diff --git a/.claude/settings.local.json b/.claude/settings.local.json deleted file mode 100644 index fd03337a..00000000 --- a/.claude/settings.local.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "permissions": { - "allow": [ - "Bash(pnpm install:*)", - "Bash(node -e:*)", - "Bash(pnpm start:*)", - "Bash(pnpm test:lib:*)", - "Bash(pnpm typecheck:*)", - "Bash(pnpm build:*)", - "Bash(find:*)" - ] - } -} diff --git a/.gitignore b/.gitignore index 15e281c7..b73f4d2e 100644 --- a/.gitignore +++ b/.gitignore @@ -52,4 +52,10 @@ vite.config.ts.timestamp-* test-traces **/adapters/output .nitro -.output \ No newline at end of file +.output + +.claude/settings.local.json + +# Script outputs +scripts/fal.models.json +fal-models-comparison.csv \ No newline at end of file diff --git a/package.json b/package.json index 31934812..c0439e04 100644 --- a/package.json +++ b/package.json @@ -33,6 +33,8 @@ "format": "prettier --experimental-cli --ignore-unknown '**/*' --write", "generate-docs": "node scripts/generate-docs.ts && pnpm run copy:readme", "generate:models": "tsx scripts/convert-openrouter-models.ts", + "fetch:fal-models": "tsx --env-file=.env.local scripts/fetch-fal-models.ts", + "compare:fal-models": "tsx scripts/compare-fal-models.ts", "sync-docs-config": "node scripts/sync-docs-config.ts", "copy:readme": "cp README.md packages/typescript/ai/README.md && cp README.md packages/typescript/ai-devtools/README.md && cp README.md packages/typescript/preact-ai-devtools/README.md && cp README.md packages/typescript/ai-client/README.md && cp README.md packages/typescript/ai-gemini/README.md && cp README.md packages/typescript/ai-ollama/README.md && cp README.md packages/typescript/ai-openai/README.md && cp README.md packages/typescript/ai-react/README.md && cp README.md packages/typescript/ai-react-ui/README.md && cp README.md packages/typescript/react-ai-devtools/README.md && cp README.md packages/typescript/solid-ai-devtools/README.md", "changeset": "changeset", diff --git a/packages/typescript/ai-fal/eslint.config.ts b/packages/typescript/ai-fal/eslint.config.ts new file mode 100644 index 00000000..99df72df --- /dev/null +++ b/packages/typescript/ai-fal/eslint.config.ts @@ -0,0 +1,11 @@ +import rootConfig from '../../../eslint.config.js' +import type { Linter } from 'eslint' + +const config: Array = [ + ...rootConfig, + { + rules: {}, + }, +] + +export default config diff --git a/packages/typescript/ai-fal/json/fal.models.3d-to-3d.json b/packages/typescript/ai-fal/json/fal.models.3d-to-3d.json new file mode 100644 index 00000000..e0156160 --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.3d-to-3d.json @@ -0,0 +1,2355 @@ +{ + "generated_at": "2026-01-28T02:51:51.871Z", + "total_models": 5, + "category": "3d-to-3d", + "models": [ + { + "endpoint_id": "fal-ai/ultrashape", + "metadata": { + "display_name": "Ultrashape", + "category": "3d-to-3d", + "description": "UltraShape-1.0 is a 3D diffusion framework that generates high-fidelity 3D geometry through coarse-to-fine geometric refinement.", + "status": "active", + "tags": [ + "3d-to-3d" + ], + "updated_at": "2026-01-26T21:41:41.691Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a894dfe/Z5bF5OTa1V4FlZI--VFOp_079e6bcc45a64fa08098c0726639c262.jpg", + "model_url": "https://fal.run/fal-ai/ultrashape", + "license_type": "commercial", + "date": "2026-01-06T13:20:04.529Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ultrashape", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ultrashape queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ultrashape", + "category": "3d-to-3d", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a894dfe/Z5bF5OTa1V4FlZI--VFOp_079e6bcc45a64fa08098c0726639c262.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ultrashape", + "documentationUrl": "https://fal.ai/models/fal-ai/ultrashape/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "UltrashapeInput": { + "title": "UltraShapeRequest", + "type": "object", + "properties": { + "octree_resolution": { + "minimum": 128, + "maximum": 1024, + "type": "integer", + "title": "Octree Resolution", + "description": "Marching cubes resolution.", + "default": 1024 + }, + "remove_background": { + "title": "Remove Background", + "type": "boolean", + "description": "Remove image background.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Diffusion steps.", + "default": 50 + }, + "model_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a892d06/llURuqrI2TTjijVo-lEWn_1.glb" + ], + "title": "Model Url", + "type": "string", + "description": "URL of the coarse mesh (.glb or .obj) to refine." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed.", + "default": 42 + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a892d08/IYQiPpbR90p8O0otIT2eQ_1.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the reference image for mesh refinement." + } + }, + "x-fal-order-properties": [ + "image_url", + "model_url", + "num_inference_steps", + "octree_resolution", + "seed", + "remove_background" + ], + "required": [ + "image_url", + "model_url" + ] + }, + "UltrashapeOutput": { + "title": "UltraShapeResponse", + "type": "object", + "properties": { + "model_glb": { + "examples": [ + { + "file_size": 117537192, + "file_name": "refined.glb", + "content_type": "application/octet-stream", + "url": "https://v3b.fal.media/files/b/0a892d36/4vzmYKL1OZVgcPCdRf2dc_refined.glb" + } + ], + "title": "Model Glb", + "description": "Generated 3D object.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "model_glb" + ], + "required": [ + "model_glb" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ultrashape/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ultrashape/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ultrashape": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UltrashapeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ultrashape/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UltrashapeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sam-3/3d-align", + "metadata": { + "display_name": "Sam 3", + "category": "3d-to-3d", + "description": "SAM 3D enables full scene reconstructions, placing objects and humans in a shared context together.", + "status": "active", + "tags": [ + "align", + "3D", + "" + ], + "updated_at": "2026-01-26T21:42:06.475Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a84bb71/k2b6czMQphI5lV6zjLj8z_ea2a892775d745b08a01ba6ecfa6ca43.jpg", + "model_url": "https://fal.run/fal-ai/sam-3/3d-align", + "license_type": "commercial", + "date": "2025-12-02T20:56:43.845Z", + "group": { + "key": "sam3", + "label": "3D Alignment" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sam-3/3d-align", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sam-3/3d-align queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sam-3/3d-align", + "category": "3d-to-3d", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a84bb71/k2b6czMQphI5lV6zjLj8z_ea2a892775d745b08a01ba6ecfa6ca43.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sam-3/3d-align", + "documentationUrl": "https://fal.ai/models/fal-ai/sam-3/3d-align/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Sam33dAlignInput": { + "x-fal-order-properties": [ + "image_url", + "body_mesh_url", + "body_mask_url", + "object_mesh_url", + "focal_length" + ], + "type": "object", + "properties": { + "image_url": { + "title": "Image Url", + "type": "string", + "description": "URL of the original image used for MoGe depth estimation" + }, + "body_mesh_url": { + "title": "Body Mesh Url", + "type": "string", + "description": "URL of the SAM-3D Body mesh file (.ply or .glb) to align" + }, + "object_mesh_url": { + "title": "Object Mesh Url", + "type": "string", + "description": "Optional URL of SAM-3D Object mesh (.glb) to create combined scene" + }, + "focal_length": { + "title": "Focal Length", + "type": "number", + "description": "Focal length from SAM-3D Body metadata. If not provided, estimated from MoGe." + }, + "body_mask_url": { + "title": "Body Mask Url", + "type": "string", + "description": "URL of the human mask image. If not provided, uses full image." + } + }, + "title": "SAM3DAlignmentInput", + "required": [ + "image_url", + "body_mesh_url" + ] + }, + "Sam33dAlignOutput": { + "x-fal-order-properties": [ + "body_mesh_ply", + "model_glb", + "visualization", + "metadata", + "scene_glb" + ], + "type": "object", + "properties": { + "scene_glb": { + "title": "Scene Glb", + "description": "Combined scene with body + object meshes in GLB format (only when object_mesh_url provided)", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "visualization": { + "title": "Visualization", + "description": "Visualization of aligned mesh overlaid on input image", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "metadata": { + "title": "Metadata", + "description": "Alignment info (scale, translation, etc.)", + "allOf": [ + { + "$ref": "#/components/schemas/SAM3DBodyAlignmentInfo" + } + ] + }, + "body_mesh_ply": { + "title": "Body Mesh Ply", + "description": "Aligned body mesh in PLY format", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "model_glb": { + "title": "Model Glb", + "description": "Aligned body mesh in GLB format (for 3D preview)", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "SAM3DAlignmentOutput", + "required": [ + "body_mesh_ply", + "model_glb", + "visualization", + "metadata" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + }, + "SAM3DBodyAlignmentInfo": { + "x-fal-order-properties": [ + "person_id", + "scale_factor", + "translation", + "focal_length", + "target_points_count", + "cropped_vertices_count" + ], + "type": "object", + "properties": { + "translation": { + "title": "Translation", + "type": "array", + "description": "Translation [tx, ty, tz]", + "items": { + "type": "number" + } + }, + "cropped_vertices_count": { + "title": "Cropped Vertices Count", + "type": "integer", + "description": "Number of cropped vertices" + }, + "person_id": { + "title": "Person Id", + "type": "integer", + "description": "Index of the person" + }, + "target_points_count": { + "title": "Target Points Count", + "type": "integer", + "description": "Number of target points for alignment" + }, + "scale_factor": { + "title": "Scale Factor", + "type": "number", + "description": "Scale factor applied for alignment" + }, + "focal_length": { + "title": "Focal Length", + "type": "number", + "description": "Focal length used" + } + }, + "description": "Per-person alignment metadata.", + "title": "SAM3DBodyAlignmentInfo", + "required": [ + "person_id", + "scale_factor", + "translation", + "focal_length", + "target_points_count", + "cropped_vertices_count" + ] + } + } + }, + "paths": { + "/fal-ai/sam-3/3d-align/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam-3/3d-align/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sam-3/3d-align": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sam33dAlignInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam-3/3d-align/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sam33dAlignOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/meshy/v5/retexture", + "metadata": { + "display_name": "Meshy 5 Retexture", + "category": "3d-to-3d", + "description": "Meshy-5 retexture applies new, high-quality textures to existing 3D models using either text prompts or reference images. It supports PBR material generation for realistic, production-ready results.", + "status": "active", + "tags": [ + "3d-to-3d" + ], + "updated_at": "2026-01-26T21:42:38.218Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/panda/mF5SI-u0DJ_bNr5J8nhsS_0d2ff4c4ea6a4733a169f8b530b576ec.jpg", + "model_url": "https://fal.run/fal-ai/meshy/v5/retexture", + "license_type": "commercial", + "date": "2025-10-18T02:04:55.284Z", + "group": { + "key": "Meshy", + "label": "v5 Retexture" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/meshy/v5/retexture", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/meshy/v5/retexture queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/meshy/v5/retexture", + "category": "3d-to-3d", + "thumbnailUrl": "https://v3b.fal.media/files/b/panda/mF5SI-u0DJ_bNr5J8nhsS_0d2ff4c4ea6a4733a169f8b530b576ec.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/meshy/v5/retexture", + "documentationUrl": "https://fal.ai/models/fal-ai/meshy/v5/retexture/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MeshyV5RetextureInput": { + "x-fal-order-properties": [ + "model_url", + "text_style_prompt", + "image_style_url", + "enable_original_uv", + "enable_pbr", + "enable_safety_checker" + ], + "type": "object", + "properties": { + "enable_pbr": { + "description": "Generate PBR Maps (metallic, roughness, normal) in addition to base color.", + "type": "boolean", + "title": "Enable Pbr", + "default": false + }, + "text_style_prompt": { + "examples": [ + "red and black chest" + ], + "title": "Text Style Prompt", + "type": "string", + "description": "Describe your desired texture style using text. Maximum 600 characters. Required if image_style_url is not provided.", + "maxLength": 600 + }, + "enable_safety_checker": { + "description": "If set to true, input data will be checked for safety before processing.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "enable_original_uv": { + "description": "Use the original UV mapping of the model instead of generating new UVs. If the model has no original UV, output quality may be reduced.", + "type": "boolean", + "title": "Enable Original Uv", + "default": true + }, + "model_url": { + "examples": [ + "https://v3b.fal.media/files/b/penguin/DId89qXLu6BXu09RFAwAV_model.glb" + ], + "title": "Model Url", + "type": "string", + "description": "URL or base64 data URI of a 3D model to texture. Supports .glb, .gltf, .obj, .fbx, .stl formats. Can be a publicly accessible URL or data URI with MIME type application/octet-stream." + }, + "image_style_url": { + "description": "2D image to guide the texturing process. Supports .jpg, .jpeg, and .png formats. Required if text_style_prompt is not provided. If both are provided, image_style_url takes precedence.", + "max_pixels": 178956970, + "type": "string", + "x-fal": { + "timeout": 20, + "max_file_size": 20971520 + }, + "title": "Image Style Url", + "limit_description": "Max file size: 20.0MB, Timeout: 20.0s" + } + }, + "title": "RetextureInput", + "description": "Input for 3D Model Retexturing", + "required": [ + "model_url" + ] + }, + "MeshyV5RetextureOutput": { + "x-fal-order-properties": [ + "model_glb", + "thumbnail", + "model_urls", + "texture_urls", + "text_style_prompt", + "image_style_url" + ], + "type": "object", + "properties": { + "model_urls": { + "examples": [ + { + "fbx": { + "file_size": 4713692, + "file_name": "model.fbx", + "content_type": "application/octet-stream", + "url": "https://v3b.fal.media/files/b/rabbit/agzKf8N8zeVnteg72NiF4_model.fbx" + }, + "usdz": { + "file_size": 3886518, + "file_name": "model.usdz", + "content_type": "model/vnd.usdz+zip", + "url": "https://v3b.fal.media/files/b/panda/4ItUhLHiH4foEw30qcWZv_model.usdz" + }, + "glb": { + "file_size": 4097640, + "file_name": "model.glb", + "content_type": "model/gltf-binary", + "url": "https://v3b.fal.media/files/b/tiger/pU0TtsRTxXM6VnKEYTHSV_model.glb" + }, + "obj": { + "file_size": 2964508, + "file_name": "model.obj", + "content_type": "text/plain", + "url": "https://v3b.fal.media/files/b/zebra/M5aK_b6vKH7KeGCZoSLq7_model.obj" + } + } + ], + "title": "Model Urls", + "description": "URLs for different 3D model formats", + "allOf": [ + { + "$ref": "#/components/schemas/ModelUrls" + } + ] + }, + "text_style_prompt": { + "examples": [ + "red and black chest" + ], + "title": "Text Style Prompt", + "type": "string", + "description": "The text prompt used for texturing (if provided)" + }, + "texture_urls": { + "examples": [ + [ + { + "base_color": { + "file_size": 3455709, + "file_name": "texture_0.png", + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/zebra/opBcVy7AK-GOlCTUz-_GZ_texture_0.png" + } + } + ] + ], + "title": "Texture Urls", + "type": "array", + "description": "Array of texture file objects", + "items": { + "$ref": "#/components/schemas/TextureFiles" + } + }, + "thumbnail": { + "examples": [ + { + "file_size": 124859, + "file_name": "preview.png", + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/lion/jFrzgIJMbaPQi_4HCEa8u_preview.png" + } + ], + "title": "Thumbnail", + "description": "Preview thumbnail of the retextured model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "image_style_url": { + "description": "The image URL used for texturing (if provided)", + "type": "string", + "title": "Image Style Url" + }, + "model_glb": { + "examples": [ + { + "file_size": 4097640, + "file_name": "model.glb", + "content_type": "model/gltf-binary", + "url": "https://v3b.fal.media/files/b/tiger/pU0TtsRTxXM6VnKEYTHSV_model.glb" + } + ], + "title": "Model Glb", + "description": "Retextured 3D object in GLB format.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "RetextureOutput", + "description": "Output for 3D Model Retexturing", + "required": [ + "model_glb", + "model_urls" + ] + }, + "ModelUrls": { + "x-fal-order-properties": [ + "glb", + "fbx", + "obj", + "usdz", + "blend", + "stl" + ], + "type": "object", + "properties": { + "usdz": { + "description": "USDZ format 3D model", + "title": "Usdz", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "fbx": { + "description": "FBX format 3D model", + "title": "Fbx", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "blend": { + "description": "Blender format 3D model", + "title": "Blend", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "stl": { + "description": "STL format 3D model", + "title": "Stl", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "glb": { + "description": "GLB format 3D model", + "title": "Glb", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "obj": { + "description": "OBJ format 3D model", + "title": "Obj", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "ModelUrls", + "description": "3D model files in various formats" + }, + "TextureFiles": { + "x-fal-order-properties": [ + "base_color", + "metallic", + "normal", + "roughness" + ], + "type": "object", + "properties": { + "base_color": { + "description": "Base color texture", + "title": "Base Color", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "normal": { + "description": "Normal texture (PBR)", + "title": "Normal", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "roughness": { + "description": "Roughness texture (PBR)", + "title": "Roughness", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "metallic": { + "description": "Metallic texture (PBR)", + "title": "Metallic", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "TextureFiles", + "description": "Texture files downloaded and uploaded to CDN", + "required": [ + "base_color" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/meshy/v5/retexture/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/meshy/v5/retexture/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/meshy/v5/retexture": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MeshyV5RetextureInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/meshy/v5/retexture/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MeshyV5RetextureOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/meshy/v5/remesh", + "metadata": { + "display_name": "Meshy 5 Remesh", + "category": "3d-to-3d", + "description": "Meshy-5 remesh allows you to remesh and export existing 3D models into various formats", + "status": "active", + "tags": [ + "3d-to-3d" + ], + "updated_at": "2026-01-26T21:42:38.342Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/monkey/VuvDG9KC4adICrWIBIzIu_a731968003db42be8f2c91d1f00c811f.jpg", + "model_url": "https://fal.run/fal-ai/meshy/v5/remesh", + "license_type": "commercial", + "date": "2025-10-18T01:56:03.334Z", + "group": { + "key": "Meshy", + "label": "v5 Remesh" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/meshy/v5/remesh", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/meshy/v5/remesh queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/meshy/v5/remesh", + "category": "3d-to-3d", + "thumbnailUrl": "https://v3b.fal.media/files/b/monkey/VuvDG9KC4adICrWIBIzIu_a731968003db42be8f2c91d1f00c811f.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/meshy/v5/remesh", + "documentationUrl": "https://fal.ai/models/fal-ai/meshy/v5/remesh/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MeshyV5RemeshInput": { + "x-fal-order-properties": [ + "model_url", + "target_formats", + "topology", + "target_polycount", + "resize_height", + "origin_at" + ], + "type": "object", + "properties": { + "resize_height": { + "minimum": 0, + "description": "Resize the model to a certain height measured in meters. Set to 0 for no resizing.", + "type": "number", + "title": "Resize Height", + "default": 0 + }, + "topology": { + "enum": [ + "quad", + "triangle" + ], + "title": "Topology", + "type": "string", + "examples": [ + "triangle" + ], + "description": "Specify the topology of the generated model. Quad for smooth surfaces, Triangle for detailed geometry.", + "default": "triangle" + }, + "target_polycount": { + "description": "Target number of polygons in the generated model. Actual count may vary based on geometry complexity.", + "type": "integer", + "minimum": 100, + "maximum": 300000, + "examples": [ + 137220 + ], + "title": "Target Polycount", + "default": 30000 + }, + "model_url": { + "examples": [ + "https://v3b.fal.media/files/b/tiger/62QMEQqZ3pjUds4DfuVtX_model.glb" + ], + "title": "Model Url", + "type": "string", + "description": "URL or base64 data URI of a 3D model to remesh. Supports .glb, .gltf, .obj, .fbx, .stl formats. Can be a publicly accessible URL or data URI with MIME type application/octet-stream." + }, + "origin_at": { + "enum": [ + "bottom", + "center" + ], + "description": "Position of the origin. None means no effect.", + "type": "string", + "title": "Origin At" + }, + "target_formats": { + "examples": [ + [ + "glb", + "fbx" + ] + ], + "title": "Target Formats", + "type": "array", + "description": "List of target formats for the remeshed model.", + "items": { + "enum": [ + "glb", + "fbx", + "obj", + "usdz", + "blend", + "stl" + ], + "type": "string" + }, + "default": [ + "glb" + ] + } + }, + "title": "RemeshInput", + "description": "Input for 3D Model Remeshing", + "required": [ + "model_url" + ] + }, + "MeshyV5RemeshOutput": { + "x-fal-order-properties": [ + "model_glb", + "model_urls" + ], + "type": "object", + "properties": { + "model_urls": { + "examples": [ + { + "fbx": { + "file_size": 3476092, + "file_name": "model.fbx", + "content_type": "application/octet-stream", + "url": "https://v3b.fal.media/files/b/kangaroo/Kw7C1w2Uccg3zKQ5axnwk_model.fbx" + }, + "glb": { + "file_size": 3294196, + "file_name": "model.glb", + "content_type": "model/gltf-binary", + "url": "https://v3b.fal.media/files/b/panda/hu-dDvUIPdoHSxR7QcBZw_model.glb" + } + } + ], + "title": "Model Urls", + "description": "URLs for different 3D model formats", + "allOf": [ + { + "$ref": "#/components/schemas/ModelUrls" + } + ] + }, + "model_glb": { + "examples": [ + { + "file_size": 3294196, + "file_name": "model.glb", + "content_type": "model/gltf-binary", + "url": "https://v3b.fal.media/files/b/panda/hu-dDvUIPdoHSxR7QcBZw_model.glb" + } + ], + "title": "Model Glb", + "description": "Remeshed 3D object in GLB format (if GLB was requested).", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "RemeshOutput", + "description": "Output for 3D Model Remeshing", + "required": [ + "model_urls" + ] + }, + "ModelUrls": { + "x-fal-order-properties": [ + "glb", + "fbx", + "obj", + "usdz", + "blend", + "stl" + ], + "type": "object", + "properties": { + "usdz": { + "description": "USDZ format 3D model", + "title": "Usdz", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "fbx": { + "description": "FBX format 3D model", + "title": "Fbx", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "blend": { + "description": "Blender format 3D model", + "title": "Blend", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "stl": { + "description": "STL format 3D model", + "title": "Stl", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "glb": { + "description": "GLB format 3D model", + "title": "Glb", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "obj": { + "description": "OBJ format 3D model", + "title": "Obj", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "ModelUrls", + "description": "3D model files in various formats" + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/meshy/v5/remesh/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/meshy/v5/remesh/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/meshy/v5/remesh": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MeshyV5RemeshInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/meshy/v5/remesh/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MeshyV5RemeshOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan-part", + "metadata": { + "display_name": "Hunyuan Part", + "category": "3d-to-3d", + "description": "Use the capabilities of hunyuan part to generate point clouds from your 3D files.", + "status": "active", + "tags": [ + "3D-to-3D", + "point-cloud" + ], + "updated_at": "2025-10-08T17:00:01.071Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/rabbit/kqnsD6bjfNsg7OSwKze0X_365d05fbfc8b4e5a865e702a4fa15805.jpg", + "model_url": "https://fal.run/fal-ai/hunyuan-part", + "license_type": "commercial", + "date": "2025-10-08T16:59:43.761Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan-part", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan-part queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan-part", + "category": "3d-to-3d", + "thumbnailUrl": "https://v3b.fal.media/files/b/rabbit/kqnsD6bjfNsg7OSwKze0X_365d05fbfc8b4e5a865e702a4fa15805.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan-part", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan-part/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "HunyuanPartInput": { + "title": "HunyuanPartInput", + "type": "object", + "properties": { + "point_prompt_x": { + "minimum": -1, + "title": "Point Prompt X", + "type": "number", + "maximum": 1, + "description": "X coordinate of the point prompt for segmentation (normalized space -1 to 1).", + "default": 0 + }, + "point_prompt_z": { + "minimum": -1, + "title": "Point Prompt Z", + "type": "number", + "maximum": 1, + "description": "Z coordinate of the point prompt for segmentation (normalized space -1 to 1).", + "default": 0 + }, + "use_normal": { + "title": "Use Normal", + "type": "boolean", + "description": "Whether to use normal information for segmentation.", + "default": true + }, + "noise_std": { + "minimum": 0, + "title": "Noise Std", + "type": "number", + "maximum": 0.02, + "description": "Standard deviation of noise to add to sampled points.", + "default": 0 + }, + "point_num": { + "minimum": 10000, + "title": "Point Num", + "type": "integer", + "maximum": 500000, + "description": "Number of points to sample from the mesh.", + "default": 100000 + }, + "model_file_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/video_models/base_basic_shaded.glb" + ], + "title": "Model File Url", + "type": "string", + "description": "URL of the 3D model file (.glb or .obj) to process for segmentation." + }, + "point_prompt_y": { + "minimum": -1, + "title": "Point Prompt Y", + "type": "number", + "maximum": 1, + "description": "Y coordinate of the point prompt for segmentation (normalized space -1 to 1).", + "default": 0 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and input will produce the same segmentation results.\n " + } + }, + "x-fal-order-properties": [ + "model_file_url", + "point_prompt_x", + "point_prompt_y", + "point_prompt_z", + "point_num", + "use_normal", + "noise_std", + "seed" + ], + "required": [ + "model_file_url" + ] + }, + "HunyuanPartOutput": { + "title": "HunyuanPartOutput", + "type": "object", + "properties": { + "iou_scores": { + "title": "Iou Scores", + "type": "array", + "description": "IoU scores for each of the three masks.", + "items": { + "type": "number" + } + }, + "best_mask_index": { + "title": "Best Mask Index", + "type": "integer", + "description": "Index of the best mask (1, 2, or 3) based on IoU score." + }, + "mask_2_mesh": { + "title": "Mask 2 Mesh", + "description": "Mesh showing segmentation mask 2.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "mask_1_mesh": { + "title": "Mask 1 Mesh", + "description": "Mesh showing segmentation mask 1.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "segmented_mesh": { + "title": "Segmented Mesh", + "description": "Segmented 3D mesh with mask applied.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed value used for generation." + }, + "mask_3_mesh": { + "title": "Mask 3 Mesh", + "description": "Mesh showing segmentation mask 3.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "segmented_mesh", + "mask_1_mesh", + "mask_2_mesh", + "mask_3_mesh", + "best_mask_index", + "iou_scores", + "seed" + ], + "required": [ + "segmented_mesh", + "mask_1_mesh", + "mask_2_mesh", + "mask_3_mesh", + "best_mask_index", + "iou_scores", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan-part/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-part/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-part": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanPartInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-part/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanPartOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.audio-to-audio.json b/packages/typescript/ai-fal/json/fal.models.audio-to-audio.json new file mode 100644 index 00000000..ffc4ce3e --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.audio-to-audio.json @@ -0,0 +1,7366 @@ +{ + "generated_at": "2026-01-28T02:51:51.869Z", + "total_models": 17, + "category": "audio-to-audio", + "models": [ + { + "endpoint_id": "fal-ai/elevenlabs/voice-changer", + "metadata": { + "display_name": "ElevenLabs Voice Changer", + "category": "audio-to-audio", + "description": "Change the voices in your audios with voices in ElevenLabs!", + "status": "active", + "tags": [ + "voice-change", + "audio-to-audio" + ], + "updated_at": "2026-01-26T21:41:36.521Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8a5e18/wKI4AAR-ugj6qdI9HH2P5_7696a622ff46451bb8fa7f0a4e54be13.jpg", + "model_url": "https://fal.run/fal-ai/elevenlabs/voice-changer", + "license_type": "commercial", + "date": "2026-01-14T14:50:03.869Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/elevenlabs/voice-changer", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/elevenlabs/voice-changer queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/elevenlabs/voice-changer", + "category": "audio-to-audio", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8a5e18/wKI4AAR-ugj6qdI9HH2P5_7696a622ff46451bb8fa7f0a4e54be13.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/elevenlabs/voice-changer", + "documentationUrl": "https://fal.ai/models/fal-ai/elevenlabs/voice-changer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ElevenlabsVoiceChangerInput": { + "title": "VoiceChangerRequest", + "type": "object", + "properties": { + "voice": { + "examples": [ + "Aria", + "Roger", + "Sarah", + "Laura", + "Charlie", + "George", + "Callum", + "River", + "Liam", + "Charlotte", + "Alice", + "Matilda", + "Will", + "Jessica", + "Eric", + "Chris", + "Brian", + "Daniel", + "Lily", + "Bill" + ], + "description": "The voice to use for speech generation", + "type": "string", + "title": "Voice", + "default": "Rachel" + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/elevenlabs/voice_change_in.mp3" + ], + "description": "The input audio file", + "type": "string", + "title": "Audio Url" + }, + "seed": { + "description": "Random seed for reproducibility.", + "type": "integer", + "title": "Seed" + }, + "output_format": { + "enum": [ + "mp3_22050_32", + "mp3_44100_32", + "mp3_44100_64", + "mp3_44100_96", + "mp3_44100_128", + "mp3_44100_192", + "pcm_8000", + "pcm_16000", + "pcm_22050", + "pcm_24000", + "pcm_44100", + "pcm_48000", + "ulaw_8000", + "alaw_8000", + "opus_48000_32", + "opus_48000_64", + "opus_48000_96", + "opus_48000_128", + "opus_48000_192" + ], + "description": "Output format of the generated audio. Formatted as codec_sample_rate_bitrate.", + "type": "string", + "title": "Output Format", + "default": "mp3_44100_128" + }, + "remove_background_noise": { + "description": "If set, will remove the background noise from your audio input using our audio isolation model.", + "type": "boolean", + "title": "Remove Background Noise", + "default": false + } + }, + "x-fal-order-properties": [ + "audio_url", + "voice", + "remove_background_noise", + "seed", + "output_format" + ], + "required": [ + "audio_url" + ] + }, + "ElevenlabsVoiceChangerOutput": { + "title": "VoiceChangerOutput", + "type": "object", + "properties": { + "seed": { + "examples": [ + 1902083897 + ], + "description": "Random seed for reproducibility.", + "type": "integer", + "title": "Seed" + }, + "audio": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/elevenlabs/voice_change_out.mp3" + } + ], + "description": "The generated audio file", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "audio", + "seed" + ], + "required": [ + "audio", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/elevenlabs/voice-changer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/voice-changer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/voice-changer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ElevenlabsVoiceChangerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/voice-changer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ElevenlabsVoiceChangerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/nova-sr", + "metadata": { + "display_name": "Nova SR", + "category": "audio-to-audio", + "description": "Enhance muffled 16 kHz speech audio into crystal-clear 48 kHz", + "status": "active", + "tags": [ + "speech-enhancements", + "audio-super-resolution", + "audio-sr" + ], + "updated_at": "2026-01-26T21:41:37.536Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8a417c/1iYq7okizdeiXmosEqCU-_e78a6396da8a410f85aed3ceb0ba4182.jpg", + "model_url": "https://fal.run/fal-ai/nova-sr", + "license_type": "commercial", + "date": "2026-01-13T18:31:18.715Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/nova-sr", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/nova-sr queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/nova-sr", + "category": "audio-to-audio", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8a417c/1iYq7okizdeiXmosEqCU-_e78a6396da8a410f85aed3ceb0ba4182.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/nova-sr", + "documentationUrl": "https://fal.ai/models/fal-ai/nova-sr/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "NovaSrInput": { + "title": "NovaSRInput", + "type": "object", + "properties": { + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "bitrate": { + "title": "Bitrate", + "type": "string", + "description": "The bitrate of the output audio.", + "default": "192k" + }, + "audio_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a8a3f6a/t-FWZPvPXfMa7DOxaTSMw_5MXyNKGcG6x92g8CKQdwe_speech.mp3" + ], + "title": "Audio URL", + "type": "string", + "description": "The URL of the audio file to enhance." + }, + "audio_format": { + "enum": [ + "mp3", + "aac", + "m4a", + "ogg", + "opus", + "flac", + "wav" + ], + "title": "Audio Format", + "type": "string", + "description": "The format for the output audio.", + "default": "mp3" + } + }, + "x-fal-order-properties": [ + "audio_url", + "sync_mode", + "audio_format", + "bitrate" + ], + "required": [ + "audio_url" + ] + }, + "NovaSrOutput": { + "title": "NovaSROutput", + "type": "object", + "properties": { + "timings": { + "title": "Timings", + "description": "Timings for each step in the pipeline.", + "allOf": [ + { + "$ref": "#/components/schemas/NovaSRTimings" + } + ] + }, + "audio": { + "examples": [ + { + "channels": 1, + "duration": 12.283291666666667, + "url": "https://v3b.fal.media/files/b/0a8a3f1a/lTKExJu-R6ZJdnFlpzEeq_TxmNTNhl.mp3", + "file_name": "lTKExJu-R6ZJdnFlpzEeq_TxmNTNhl.mp3", + "sample_rate": 48000, + "content_type": "audio/mpeg", + "bitrate": "192k" + } + ], + "title": "Audio", + "description": "The enhanced audio file.", + "allOf": [ + { + "$ref": "#/components/schemas/AudioFile" + } + ] + } + }, + "x-fal-order-properties": [ + "audio", + "timings" + ], + "required": [ + "audio", + "timings" + ] + }, + "NovaSRTimings": { + "title": "NovaSRTimings", + "type": "object", + "properties": { + "postprocess": { + "title": "Postprocess", + "type": "number", + "description": "Time taken to postprocess the audio in seconds." + }, + "inference": { + "title": "Inference", + "type": "number", + "description": "Time taken to run the inference in seconds." + }, + "preprocess": { + "title": "Preprocess", + "type": "number", + "description": "Time taken to preprocess the audio in seconds." + } + }, + "x-fal-order-properties": [ + "preprocess", + "inference", + "postprocess" + ], + "required": [ + "preprocess", + "inference", + "postprocess" + ] + }, + "AudioFile": { + "title": "AudioFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the audio" + }, + "bitrate": { + "title": "Bitrate", + "type": "string", + "description": "The bitrate of the audio" + }, + "channels": { + "title": "Channels", + "type": "integer", + "description": "The number of channels in the audio" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "sample_rate": { + "title": "Sample Rate", + "type": "integer", + "description": "The sample rate of the audio" + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "duration", + "channels", + "sample_rate", + "bitrate" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/nova-sr/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/nova-sr/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/nova-sr": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NovaSrInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/nova-sr/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NovaSrOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/deepfilternet3", + "metadata": { + "display_name": "DeepFilterNet 3", + "category": "audio-to-audio", + "description": "Enhance speech audio by removing background noise and upsampling to 48KHz", + "status": "active", + "tags": [ + "speech-enhancement" + ], + "updated_at": "2026-01-26T21:41:40.708Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a897e1b/SwkqYGPNn2Yyk6Cc-BBvO_a988f255be3a41c0a47b5cdc3568453c.jpg", + "model_url": "https://fal.run/fal-ai/deepfilternet3", + "license_type": "commercial", + "date": "2026-01-07T23:34:22.008Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/deepfilternet3", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/deepfilternet3 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/deepfilternet3", + "category": "audio-to-audio", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a897e1b/SwkqYGPNn2Yyk6Cc-BBvO_a988f255be3a41c0a47b5cdc3568453c.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/deepfilternet3", + "documentationUrl": "https://fal.ai/models/fal-ai/deepfilternet3/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Deepfilternet3Input": { + "title": "DeepFilterNet3Input", + "type": "object", + "properties": { + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "audio_format": { + "enum": [ + "mp3", + "aac", + "m4a", + "ogg", + "opus", + "flac", + "wav" + ], + "title": "Audio Format", + "type": "string", + "description": "The format for the output audio.", + "default": "mp3" + }, + "audio_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a8a4022/DLoZhabKeVjd3urncoRE4_dirty.mp3" + ], + "title": "Audio URL", + "type": "string", + "description": "The URL of the audio to enhance." + }, + "bitrate": { + "title": "Bitrate", + "type": "string", + "description": "The bitrate of the output audio.", + "default": "192k" + } + }, + "x-fal-order-properties": [ + "audio_url", + "sync_mode", + "audio_format", + "bitrate" + ], + "required": [ + "audio_url" + ] + }, + "Deepfilternet3Output": { + "title": "DeepFilterNet3Output", + "type": "object", + "properties": { + "timings": { + "title": "Timings", + "description": "Timings for each step in the pipeline.", + "allOf": [ + { + "$ref": "#/components/schemas/DeepFilterNetTimings" + } + ] + }, + "audio_file": { + "examples": [ + { + "channels": 1, + "duration": 6.9544375, + "url": "https://v3b.fal.media/files/b/0a8a4024/-2cD9CyGEjYsyVQ5lEERh_9qwIkJjf.mp3", + "file_name": "-2cD9CyGEjYsyVQ5lEERh_9qwIkJjf.mp3", + "sample_rate": 48000, + "content_type": "audio/mpeg", + "bitrate": "192k" + } + ], + "title": "Audio File", + "description": "The audio file that was enhanced.", + "allOf": [ + { + "$ref": "#/components/schemas/AudioFile" + } + ] + } + }, + "x-fal-order-properties": [ + "audio_file", + "timings" + ], + "required": [ + "audio_file", + "timings" + ] + }, + "DeepFilterNetTimings": { + "title": "DeepFilterNetTimings", + "type": "object", + "properties": { + "postprocess": { + "title": "Postprocess", + "type": "number", + "description": "Postprocessing time." + }, + "inference": { + "title": "Inference", + "type": "number", + "description": "Inference time." + }, + "preprocess": { + "title": "Preprocess", + "type": "number", + "description": "Preprocessing time." + } + }, + "x-fal-order-properties": [ + "preprocess", + "inference", + "postprocess" + ], + "required": [ + "preprocess", + "inference", + "postprocess" + ] + }, + "AudioFile": { + "title": "AudioFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the audio" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + }, + "bitrate": { + "title": "Bitrate", + "type": "string", + "description": "The bitrate of the audio" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "sample_rate": { + "title": "Sample Rate", + "type": "integer", + "description": "The sample rate of the audio" + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "channels": { + "title": "Channels", + "type": "integer", + "description": "The number of channels in the audio" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "duration", + "channels", + "sample_rate", + "bitrate" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/deepfilternet3/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/deepfilternet3/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/deepfilternet3": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Deepfilternet3Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/deepfilternet3/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Deepfilternet3Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sam-audio/separate", + "metadata": { + "display_name": "Sam Audio", + "category": "audio-to-audio", + "description": "Audio separation with SAM Audio. Isolate any sound using natural language—professional-grade audio editing made simple for creators, researchers, and accessibility applications.", + "status": "active", + "tags": [ + "audio-to-audio", + "sam-audio" + ], + "updated_at": "2026-01-26T21:41:44.615Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a88664f/wKE1EUP15yGhKAFhCA_7C_f889f14440d64e70b1c0b08403dfb3bf.jpg", + "model_url": "https://fal.run/fal-ai/sam-audio/separate", + "license_type": "commercial", + "date": "2025-12-30T16:38:04.429Z", + "group": { + "key": "sam-audio", + "label": "Text-guided" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sam-audio/separate", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sam-audio/separate queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sam-audio/separate", + "category": "audio-to-audio", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a88664f/wKE1EUP15yGhKAFhCA_7C_f889f14440d64e70b1c0b08403dfb3bf.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sam-audio/separate", + "documentationUrl": "https://fal.ai/models/fal-ai/sam-audio/separate/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SamAudioSeparateInput": { + "description": "Input for text-based audio separation.", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "piano playing" + ], + "description": "Text prompt describing the sound to isolate.", + "type": "string", + "title": "Prompt" + }, + "acceleration": { + "enum": [ + "fast", + "balanced", + "quality" + ], + "description": "The acceleration level to use.", + "type": "string", + "title": "Acceleration", + "default": "balanced" + }, + "audio_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a88511f/tUUCI9eDmeC2RqJEOXrZk_assets_avatar_multi_sing_woman.WAV" + ], + "description": "URL of the audio file to process (WAV, MP3, FLAC supported)", + "type": "string", + "title": "Audio Url" + }, + "predict_spans": { + "description": "Automatically predict temporal spans where the target sound occurs.", + "type": "boolean", + "title": "Predict Spans", + "default": false + }, + "output_format": { + "enum": [ + "wav", + "mp3" + ], + "description": "Output audio format.", + "type": "string", + "title": "Output Format", + "default": "wav" + }, + "reranking_candidates": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "description": "Number of candidates to generate and rank. Higher improves quality but increases latency and cost.", + "title": "Reranking Candidates", + "default": 1 + } + }, + "title": "SAMAudioInput", + "x-fal-order-properties": [ + "audio_url", + "prompt", + "predict_spans", + "reranking_candidates", + "acceleration", + "output_format" + ], + "required": [ + "audio_url", + "prompt" + ] + }, + "SamAudioSeparateOutput": { + "description": "Output for text-based audio separation.", + "type": "object", + "properties": { + "target": { + "examples": [ + { + "content_type": "audio/wav", + "url": "https://v3b.fal.media/files/b/0a8853af/bxm0-bZp5tH46Qp5PMwl6_tmpus5ep6vl.wav" + } + ], + "description": "The isolated target sound.", + "title": "Target", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "duration": { + "examples": [ + 26.6 + ], + "description": "Duration of the output audio in seconds.", + "type": "number", + "title": "Duration" + }, + "sample_rate": { + "description": "Sample rate of the output audio in Hz.", + "type": "integer", + "title": "Sample Rate", + "default": 48000 + }, + "residual": { + "examples": [ + { + "content_type": "audio/wav", + "url": "https://v3b.fal.media/files/b/0a88512b/xqDIegkZuLPPlufZ9RLP8_tmpos7b9db_.wav" + } + ], + "description": "Everything else in the audio.", + "title": "Residual", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "SAMAudioSeparateOutput", + "x-fal-order-properties": [ + "target", + "residual", + "duration", + "sample_rate" + ], + "required": [ + "target", + "residual", + "duration" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sam-audio/separate/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam-audio/separate/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sam-audio/separate": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SamAudioSeparateInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam-audio/separate/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SamAudioSeparateOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sam-audio/span-separate", + "metadata": { + "display_name": "Sam Audio", + "category": "audio-to-audio", + "description": "Audio separation with SAM Audio. Isolate any sound using natural language—professional-grade audio editing made simple for creators, researchers, and accessibility applications.", + "status": "active", + "tags": [ + "audio-to-audio", + "sam-audio" + ], + "updated_at": "2026-01-26T21:41:44.787Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8865ea/mJyuh2TOMRndveMfF6BQY_863a43ac54ea4d3db11f18a6856a8cf6.jpg", + "model_url": "https://fal.run/fal-ai/sam-audio/span-separate", + "license_type": "commercial", + "date": "2025-12-30T16:24:56.311Z", + "group": { + "key": "sam-audio", + "label": "Timespan-guided" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sam-audio/span-separate", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sam-audio/span-separate queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sam-audio/span-separate", + "category": "audio-to-audio", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8865ea/mJyuh2TOMRndveMfF6BQY_863a43ac54ea4d3db11f18a6856a8cf6.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sam-audio/span-separate", + "documentationUrl": "https://fal.ai/models/fal-ai/sam-audio/span-separate/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SamAudioSpanSeparateInput": { + "description": "Input for temporal span-based audio separation.", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "man singing", + "dog barking" + ], + "description": "Text prompt describing the sound to isolate. Optional but recommended - helps the model identify what type of sound to extract from the span.", + "type": "string", + "title": "Prompt" + }, + "acceleration": { + "enum": [ + "fast", + "balanced", + "quality" + ], + "description": "The acceleration level to use.", + "type": "string", + "title": "Acceleration", + "default": "balanced" + }, + "spans": { + "examples": [ + [ + { + "end": 10, + "start": 6, + "include": true + } + ] + ], + "description": "Time spans where the target sound occurs which should be isolated.", + "type": "array", + "title": "Spans", + "items": { + "$ref": "#/components/schemas/AudioTimeSpan" + } + }, + "output_format": { + "enum": [ + "wav", + "mp3" + ], + "description": "Output audio format.", + "type": "string", + "title": "Output Format", + "default": "wav" + }, + "trim_to_span": { + "description": "Trim output audio to only include the specified span time range. If False, returns the full audio length with the target sound isolated throughout.", + "type": "boolean", + "title": "Trim To Span", + "default": false + }, + "audio_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a8853d1/T7zRmsiculA6u_V6RCF2c_man.mp3" + ], + "description": "URL of the audio file to process.", + "type": "string", + "title": "Audio Url" + }, + "reranking_candidates": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "description": "Number of candidates to generate and rank. Higher improves quality but increases latency and cost. Requires text prompt; ignored for span-only separation.", + "title": "Reranking Candidates", + "default": 1 + } + }, + "title": "SAMAudioSpanInput", + "x-fal-order-properties": [ + "audio_url", + "prompt", + "spans", + "reranking_candidates", + "acceleration", + "trim_to_span", + "output_format" + ], + "required": [ + "audio_url", + "spans" + ] + }, + "SamAudioSpanSeparateOutput": { + "description": "Output for span-based audio separation.", + "type": "object", + "properties": { + "target": { + "examples": [ + { + "content_type": "audio/wav", + "url": "https://v3b.fal.media/files/b/0a89374c/mImRbqrjB72o9vEmIrSmW_tmpp2rmotgs.wav" + } + ], + "description": "The isolated target sound.", + "title": "Target", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "duration": { + "examples": [ + 81.96 + ], + "description": "Duration of the output audio in seconds.", + "type": "number", + "title": "Duration" + }, + "sample_rate": { + "description": "Sample rate of the output audio in Hz.", + "type": "integer", + "title": "Sample Rate", + "default": 48000 + }, + "residual": { + "examples": [ + { + "content_type": "audio/wav", + "url": "https://v3b.fal.media/files/b/0a89374d/suELShppRlPCTAVnbWRqj_tmpr8shao_e.wav" + } + ], + "description": "Everything else in the audio.", + "title": "Residual", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "SAMAudioSpanSeparateOutput", + "x-fal-order-properties": [ + "target", + "residual", + "duration", + "sample_rate" + ], + "required": [ + "target", + "residual", + "duration" + ] + }, + "AudioTimeSpan": { + "description": "A time span indicating where the target sound occurs.", + "type": "object", + "properties": { + "end": { + "minimum": 0, + "description": "End time of the span in seconds", + "type": "number", + "title": "End", + "examples": [ + 1.5, + 4 + ] + }, + "start": { + "minimum": 0, + "description": "Start time of the span in seconds", + "type": "number", + "title": "Start", + "examples": [ + 0.5, + 2 + ] + }, + "include": { + "description": "Whether to include (True) or exclude (False) sounds in this span", + "type": "boolean", + "title": "Include", + "default": true + } + }, + "title": "AudioTimeSpan", + "x-fal-order-properties": [ + "start", + "end", + "include" + ], + "required": [ + "start", + "end" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sam-audio/span-separate/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam-audio/span-separate/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sam-audio/span-separate": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SamAudioSpanSeparateInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam-audio/span-separate/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SamAudioSpanSeparateOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ffmpeg-api/merge-audios", + "metadata": { + "display_name": "FFmpeg API [Merge Audios]", + "category": "audio-to-audio", + "description": "Merge audios into a single audio using FFmpeg API!", + "status": "active", + "tags": [ + "ffmpeg" + ], + "updated_at": "2026-01-26T21:41:47.055Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a878178/GTLJRXgGY24Y-lyirlsfm_c01912199a8c41298f64c5201337120a.jpg", + "model_url": "https://fal.run/fal-ai/ffmpeg-api/merge-audios", + "license_type": "commercial", + "date": "2025-12-23T21:51:58.875Z", + "group": { + "key": "ffmpeg", + "label": "Merge Audios" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ffmpeg-api/merge-audios", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ffmpeg-api/merge-audios queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ffmpeg-api/merge-audios", + "category": "audio-to-audio", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a878178/GTLJRXgGY24Y-lyirlsfm_c01912199a8c41298f64c5201337120a.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ffmpeg-api/merge-audios", + "documentationUrl": "https://fal.ai/models/fal-ai/ffmpeg-api/merge-audios/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FfmpegApiMergeAudiosInput": { + "title": "MergeAudiosInput", + "type": "object", + "properties": { + "audio_urls": { + "description": "List of audio URLs to merge in order. The 0th stream of the audio will be considered as the merge candidate.", + "type": "array", + "minItems": 2, + "items": { + "type": "string" + }, + "examples": [ + [ + "https://storage.googleapis.com/falserverless/example_inputs/ffmpeg_api/merge_audios/first.mp3", + "https://storage.googleapis.com/falserverless/example_inputs/ffmpeg_api/merge_audios/second.mp3" + ] + ], + "maxItems": 5, + "title": "Audio Urls" + }, + "output_format": { + "anyOf": [ + { + "enum": [ + "mp3_22050_32", + "mp3_44100_32", + "mp3_44100_64", + "mp3_44100_96", + "mp3_44100_128", + "mp3_44100_192", + "pcm_8000", + "pcm_16000", + "pcm_22050", + "pcm_24000", + "pcm_44100", + "pcm_48000", + "ulaw_8000", + "alaw_8000", + "opus_48000_32", + "opus_48000_64", + "opus_48000_96", + "opus_48000_128", + "opus_48000_192" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Output format of the combined audio. If not used, will be determined automatically using FFMPEG. Formatted as codec_sample_rate_bitrate.", + "title": "Output Format" + } + }, + "x-fal-order-properties": [ + "audio_urls", + "output_format" + ], + "required": [ + "audio_urls" + ] + }, + "FfmpegApiMergeAudiosOutput": { + "title": "MergeAudiosOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "file_size": 97401, + "file_name": "merged_audios.mp3", + "content_type": "audio/mpeg", + "url": "https://storage.googleapis.com/falserverless/example_outputs/ffmpeg_api/merge_audios.mp3" + } + ], + "description": "Merged audio file", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ffmpeg-api/merge-audios/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/merge-audios/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/merge-audios": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FfmpegApiMergeAudiosInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/merge-audios/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FfmpegApiMergeAudiosOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/create-voice", + "metadata": { + "display_name": "Kling Video Create Voice", + "category": "audio-to-audio", + "description": "Create Voices to be used with Kling 2.6 Voice Control", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:52.863Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a868c52/jCucNmpNTg7UQ2WYGJHAr_36ad5b9439824f5e9f8fd7f1ed297d27.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/create-voice", + "license_type": "commercial", + "date": "2025-12-16T15:32:04.208Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/create-voice", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/create-voice queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/create-voice", + "category": "audio-to-audio", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a868c52/jCucNmpNTg7UQ2WYGJHAr_36ad5b9439824f5e9f8fd7f1ed297d27.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/create-voice", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/create-voice/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoCreateVoiceInput": { + "title": "CreateVoiceInput", + "type": "object", + "properties": { + "voice_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a867736/_Wo19V-XrOVYZt6jKE8t5_kling_video.wav" + ], + "title": "Voice Url", + "type": "string", + "description": "URL of the voice audio file. Supports .mp3/.wav audio or .mp4/.mov video. Duration must be 5-30 seconds with clean, single-voice audio." + } + }, + "description": "Request model for creating a custom voice.", + "x-fal-order-properties": [ + "voice_url" + ], + "required": [ + "voice_url" + ] + }, + "KlingVideoCreateVoiceOutput": { + "title": "CreateVoiceOutput", + "type": "object", + "properties": { + "voice_id": { + "examples": [ + "829877809978941442" + ], + "title": "Voice Id", + "type": "string", + "description": "Unique identifier for the created voice" + } + }, + "description": "Response model for creating a custom voice.", + "x-fal-order-properties": [ + "voice_id" + ], + "required": [ + "voice_id" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/create-voice/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/create-voice/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/create-voice": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoCreateVoiceInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/create-voice/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoCreateVoiceOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/demucs", + "metadata": { + "display_name": "Demucs", + "category": "audio-to-audio", + "description": "SOTA stemming model for voice, drums, bass, guitar and more.", + "status": "active", + "tags": [ + "audio" + ], + "updated_at": "2026-01-26T21:42:31.343Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/tiger/WAcACXLL5-atiw1MGvH4g_7cb10ff246844b9c9298bbe98e431df7.jpg", + "model_url": "https://fal.run/fal-ai/demucs", + "license_type": "commercial", + "date": "2025-10-27T21:40:53.677Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/demucs", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/demucs queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/demucs", + "category": "audio-to-audio", + "thumbnailUrl": "https://v3b.fal.media/files/b/tiger/WAcACXLL5-atiw1MGvH4g_7cb10ff246844b9c9298bbe98e431df7.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/demucs", + "documentationUrl": "https://fal.ai/models/fal-ai/demucs/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "DemucsInput": { + "title": "DemucsInput", + "type": "object", + "properties": { + "segment_length": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Segment Length", + "description": "Length in seconds of each segment for processing. Smaller values use less memory but may reduce quality. Default is model-specific." + }, + "output_format": { + "examples": [ + "mp3" + ], + "title": "Output Format", + "type": "string", + "description": "Output audio format for the separated stems", + "enum": [ + "wav", + "mp3" + ], + "default": "mp3" + }, + "stems": { + "examples": [ + [ + "vocals", + "drums", + "bass", + "other", + "guitar", + "piano" + ] + ], + "title": "Stems", + "description": "Specific stems to extract. If None, extracts all available stems. Available stems depend on model: vocals, drums, bass, other, guitar, piano (for 6s model)", + "anyOf": [ + { + "type": "array", + "items": { + "enum": [ + "vocals", + "drums", + "bass", + "other", + "guitar", + "piano" + ], + "type": "string" + } + }, + { + "type": "null" + } + ], + "default": [ + "vocals", + "drums", + "bass", + "other", + "guitar", + "piano" + ] + }, + "overlap": { + "minimum": 0, + "title": "Overlap", + "type": "number", + "maximum": 1, + "description": "Overlap between segments (0.0 to 1.0). Higher values may improve quality but increase processing time.", + "default": 0.25 + }, + "model": { + "examples": [ + "htdemucs_6s" + ], + "title": "Model", + "type": "string", + "description": "Demucs model to use for separation", + "enum": [ + "htdemucs", + "htdemucs_ft", + "htdemucs_6s", + "hdemucs_mmi", + "mdx", + "mdx_extra", + "mdx_q", + "mdx_extra_q" + ], + "default": "htdemucs_6s" + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/audio-understanding/Title_%20Running%20on%20Fal.mp3" + ], + "title": "Audio Url", + "type": "string", + "description": "URL of the audio file to separate into stems" + }, + "shifts": { + "minimum": 1, + "title": "Shifts", + "type": "integer", + "maximum": 10, + "description": "Number of random shifts for equivariant stabilization. Higher values improve quality but increase processing time.", + "default": 1 + } + }, + "x-fal-order-properties": [ + "audio_url", + "model", + "stems", + "segment_length", + "shifts", + "overlap", + "output_format" + ], + "required": [ + "audio_url" + ] + }, + "DemucsOutput": { + "title": "DemucsOutput", + "type": "object", + "properties": { + "vocals": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "Separated vocals audio file" + }, + "guitar": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "Separated guitar audio file (only available for 6s models)" + }, + "bass": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "Separated bass audio file" + }, + "piano": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "Separated piano audio file (only available for 6s models)" + }, + "other": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "Separated other instruments audio file" + }, + "drums": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "Separated drums audio file" + } + }, + "x-fal-order-properties": [ + "vocals", + "drums", + "bass", + "other", + "guitar", + "piano" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/demucs/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/demucs/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/demucs": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DemucsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/demucs/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DemucsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/audio-understanding", + "metadata": { + "display_name": "Audio Understanding", + "category": "audio-to-audio", + "description": "A audio understanding model to analyze audio content and answer questions about what's happening in the audio based on user prompts.", + "status": "active", + "tags": [ + "utility", + "audio" + ], + "updated_at": "2026-01-26T21:42:32.527Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/tiger/6SXguUXRvg1XrEdf_zZcM_023b35fa7e9e4777b3e7fa0934ebb13a.jpg", + "model_url": "https://fal.run/fal-ai/audio-understanding", + "license_type": "commercial", + "date": "2025-10-24T16:53:57.084Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/audio-understanding", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/audio-understanding queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/audio-understanding", + "category": "audio-to-audio", + "thumbnailUrl": "https://v3b.fal.media/files/b/tiger/6SXguUXRvg1XrEdf_zZcM_023b35fa7e9e4777b3e7fa0934ebb13a.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/audio-understanding", + "documentationUrl": "https://fal.ai/models/fal-ai/audio-understanding/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AudioUnderstandingInput": { + "title": "AudioUnderstandingInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "What is being discussed in this audio?", + "What emotions are expressed in this audio?", + "What is the main topic of this conversation?" + ], + "maxLength": 10000, + "type": "string", + "title": "Prompt", + "minLength": 1, + "description": "The question or prompt about the audio content." + }, + "detailed_analysis": { + "title": "Detailed Analysis", + "type": "boolean", + "description": "Whether to request a more detailed analysis of the audio", + "default": false + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/audio-understanding/Title_%20Running%20on%20Fal.mp3" + ], + "title": "Audio Url", + "type": "string", + "description": "URL of the audio file to analyze" + } + }, + "x-fal-order-properties": [ + "audio_url", + "prompt", + "detailed_analysis" + ], + "required": [ + "audio_url", + "prompt" + ] + }, + "AudioUnderstandingOutput": { + "title": "AudioUnderstandingOutput", + "type": "object", + "properties": { + "output": { + "examples": [ + "Based on the audio, this appears to be a business meeting discussing quarterly sales results. The speakers are analyzing performance metrics and discussing strategies for the upcoming quarter. The tone is professional and collaborative, with multiple participants contributing to the discussion." + ], + "title": "Output", + "type": "string", + "description": "The analysis of the audio content based on the prompt" + } + }, + "x-fal-order-properties": [ + "output" + ], + "required": [ + "output" + ] + } + } + }, + "paths": { + "/fal-ai/audio-understanding/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/audio-understanding/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/audio-understanding": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AudioUnderstandingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/audio-understanding/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AudioUnderstandingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/stable-audio-25/audio-to-audio", + "metadata": { + "display_name": "Stable Audio 2.5", + "category": "audio-to-audio", + "description": "Generate high quality music and sound effects using Stable Audio 2.5 from StabilityAI", + "status": "active", + "tags": [ + "audio" + ], + "updated_at": "2026-01-26T21:42:57.326Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/tiger/1Z1jBiJuU6ZpY5-N4X6uO_9d6e67b3d66b4fc2b4bbeaf8cb80900f.jpg", + "model_url": "https://fal.run/fal-ai/stable-audio-25/audio-to-audio", + "license_type": "commercial", + "date": "2025-09-10T11:51:47.856Z", + "group": { + "key": "stable-audio-25", + "label": "Audio to Audio" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/stable-audio-25/audio-to-audio", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/stable-audio-25/audio-to-audio queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/stable-audio-25/audio-to-audio", + "category": "audio-to-audio", + "thumbnailUrl": "https://fal.media/files/tiger/1Z1jBiJuU6ZpY5-N4X6uO_9d6e67b3d66b4fc2b4bbeaf8cb80900f.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/stable-audio-25/audio-to-audio", + "documentationUrl": "https://fal.ai/models/fal-ai/stable-audio-25/audio-to-audio/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "StableAudio25AudioToAudioInput": { + "title": "AudioToAudioInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Post rock, guitars, bass, strings, euphoric, up-lifting, moody, flowing, raw, epic" + ], + "description": "The prompt to guide the audio generation", + "type": "string", + "title": "Prompt" + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "Sometimes referred to as denoising, this parameter controls how much influence the `audio_url` parameter has on the generated audio. A value of 0 would yield audio that is identical to the input. A value of 1 would be as if you passed in no audio at all.", + "default": 0.8 + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "audio_url": { + "examples": [ + "https://v3.fal.media/files/panda/1-0iezBUIePBa3Sz5YY5B_tmpy1jyshw9.wav" + ], + "description": "The audio clip to transform", + "type": "string", + "title": "Audio Url" + }, + "num_inference_steps": { + "minimum": 4, + "maximum": 8, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of steps to denoise the audio for", + "default": 8 + }, + "guidance_scale": { + "minimum": 1, + "maximum": 25, + "type": "integer", + "title": "Guidance Scale", + "description": "How strictly the diffusion process adheres to the prompt text (higher values make your audio closer to your prompt). ", + "default": 1 + }, + "seed": { + "title": "Seed", + "type": "integer" + }, + "total_seconds": { + "minimum": 1, + "maximum": 190, + "type": "integer", + "examples": [ + 45 + ], + "title": "Total Seconds", + "description": "The duration of the audio clip to generate. If not provided, it will be set to the duration of the input audio." + } + }, + "x-fal-order-properties": [ + "prompt", + "audio_url", + "strength", + "num_inference_steps", + "total_seconds", + "guidance_scale", + "sync_mode", + "seed" + ], + "required": [ + "prompt", + "audio_url" + ] + }, + "StableAudio25AudioToAudioOutput": { + "title": "AudioToAudioOutput", + "type": "object", + "properties": { + "seed": { + "description": "The random seed used for generation", + "type": "integer", + "title": "Seed" + }, + "audio": { + "examples": [ + "https://v3.fal.media/files/elephant/bJ-KIfIXsls5-pqSRXRwx_tmpdurmawkp.wav" + ], + "description": "The generated audio clip", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio", + "seed" + ], + "required": [ + "audio", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/stable-audio-25/audio-to-audio/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-audio-25/audio-to-audio/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/stable-audio-25/audio-to-audio": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableAudio25AudioToAudioInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-audio-25/audio-to-audio/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableAudio25AudioToAudioOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/stable-audio-25/inpaint", + "metadata": { + "display_name": "Stable Audio 25", + "category": "audio-to-audio", + "description": "Generate high quality music and sound effects using Stable Audio 2.5 from StabilityAI", + "status": "active", + "tags": [ + "audio" + ], + "updated_at": "2026-01-26T21:42:57.575Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/tiger/hfCAX4rFa62XwpVf6ikd3_bf07bf4907c84a6babcb187eb4363b80.jpg", + "model_url": "https://fal.run/fal-ai/stable-audio-25/inpaint", + "license_type": "commercial", + "date": "2025-09-10T11:46:09.286Z", + "group": { + "key": "stable-audio-25", + "label": "Inpaint" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/stable-audio-25/inpaint", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/stable-audio-25/inpaint queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/stable-audio-25/inpaint", + "category": "audio-to-audio", + "thumbnailUrl": "https://fal.media/files/tiger/hfCAX4rFa62XwpVf6ikd3_bf07bf4907c84a6babcb187eb4363b80.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/stable-audio-25/inpaint", + "documentationUrl": "https://fal.ai/models/fal-ai/stable-audio-25/inpaint/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "StableAudio25InpaintInput": { + "title": "InpaintInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Lofi hip hop beat, chillhop" + ], + "description": "The prompt to guide the audio generation", + "type": "string", + "title": "Prompt" + }, + "guidance_scale": { + "minimum": 1, + "maximum": 25, + "type": "integer", + "title": "Guidance Scale", + "description": "How strictly the diffusion process adheres to the prompt text (higher values make your audio closer to your prompt). ", + "default": 1 + }, + "mask_end": { + "description": "The end point of the audio mask", + "type": "integer", + "minimum": 0, + "maximum": 190, + "examples": [ + 40 + ], + "title": "Mask End", + "default": 190 + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "audio_url": { + "examples": [ + "https://v3.fal.media/files/elephant/t0ZrzW_ueetXrr3NUa87F_a2a_in.mp3" + ], + "description": "The audio clip to inpaint", + "type": "string", + "title": "Audio Url" + }, + "seed": { + "title": "Seed", + "type": "integer" + }, + "seconds_total": { + "description": "The duration of the audio clip to generate. If not provided, it will be set to the duration of the input audio.", + "type": "integer", + "minimum": 1, + "maximum": 190, + "examples": [ + 45 + ], + "title": "Seconds Total", + "default": 190 + }, + "num_inference_steps": { + "minimum": 4, + "maximum": 8, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of steps to denoise the audio for", + "default": 8 + }, + "mask_start": { + "description": "The start point of the audio mask", + "type": "integer", + "minimum": 0, + "maximum": 190, + "examples": [ + 15 + ], + "title": "Mask Start", + "default": 30 + } + }, + "x-fal-order-properties": [ + "prompt", + "audio_url", + "seconds_total", + "guidance_scale", + "mask_start", + "mask_end", + "num_inference_steps", + "sync_mode", + "seed" + ], + "required": [ + "prompt", + "audio_url" + ] + }, + "StableAudio25InpaintOutput": { + "title": "InpaintOutput", + "type": "object", + "properties": { + "seed": { + "description": "The random seed used for generation", + "type": "integer", + "title": "Seed" + }, + "audio": { + "examples": [ + "https://v3.fal.media/files/elephant/5F2Oour2tH_EHZrFUEmM-_tmp75kuha71.wav" + ], + "description": "The generated audio clip", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio", + "seed" + ], + "required": [ + "audio", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/stable-audio-25/inpaint/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-audio-25/inpaint/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/stable-audio-25/inpaint": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableAudio25InpaintInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-audio-25/inpaint/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableAudio25InpaintOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "sonauto/v2/extend", + "metadata": { + "display_name": "Sonauto V2", + "category": "audio-to-audio", + "description": "Extend an existing song", + "status": "active", + "tags": [ + "music", + "text-to-music", + "text-to-audio" + ], + "updated_at": "2026-01-26T21:43:00.843Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/kangaroo/NHeRyBn8fcNS_W3YvNyKZ_4c6ea3f0fd0444c9b9c27b6245a83b38.jpg", + "model_url": "https://fal.run/sonauto/v2/extend", + "license_type": "commercial", + "date": "2025-08-23T21:04:56.164Z", + "group": { + "key": "sonauto-v2", + "label": "Extend" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for sonauto/v2/extend", + "version": "1.0.0", + "description": "The OpenAPI schema for the sonauto/v2/extend queue.", + "x-fal-metadata": { + "endpointId": "sonauto/v2/extend", + "category": "audio-to-audio", + "thumbnailUrl": "https://fal.media/files/kangaroo/NHeRyBn8fcNS_W3YvNyKZ_4c6ea3f0fd0444c9b9c27b6245a83b38.jpg", + "playgroundUrl": "https://fal.ai/models/sonauto/v2/extend", + "documentationUrl": "https://fal.ai/models/sonauto/v2/extend/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "V2ExtendInput": { + "title": "ExtendInput", + "type": "object", + "properties": { + "prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Prompt", + "description": "A description of the track you want to generate. This prompt will be used to automatically generate the tags and lyrics unless you manually set them. For example, if you set prompt and tags, then the prompt will be used to generate only the lyrics.", + "examples": [ + "Add a beginning to the song" + ] + }, + "lyrics_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Lyrics Prompt", + "description": "The lyrics sung in the generated song. An empty string will generate an instrumental track." + }, + "tags": { + "anyOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ], + "title": "Tags", + "description": "Tags/styles of the music to generate. You can view a list of all available tags at https://sonauto.ai/tag-explorer." + }, + "prompt_strength": { + "minimum": 1.4, + "maximum": 3.1, + "type": "number", + "title": "Prompt Strength", + "description": "Controls how strongly your prompt influences the output. Greater values adhere more to the prompt but sound less natural. (This is CFG.)", + "default": 1.8 + }, + "output_bit_rate": { + "anyOf": [ + { + "enum": [ + 128, + 192, + 256, + 320 + ], + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Output Bit Rate", + "description": "The bit rate to use for mp3 and m4a formats. Not available for other formats." + }, + "num_songs": { + "minimum": 1, + "maximum": 2, + "type": "integer", + "title": "Num Songs", + "description": "Generating 2 songs costs 1.5x the price of generating 1 song. Also, note that using the same seed may not result in identical songs if the number of songs generated is changed.", + "default": 1 + }, + "output_format": { + "enum": [ + "flac", + "mp3", + "wav", + "ogg", + "m4a" + ], + "title": "Output Format", + "type": "string", + "default": "wav" + }, + "side": { + "enum": [ + "left", + "right" + ], + "title": "Side", + "type": "string", + "description": "Add more to the beginning (left) or end (right) of the song" + }, + "balance_strength": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Balance Strength", + "description": "Greater means more natural vocals. Lower means sharper instrumentals. We recommend 0.7.", + "default": 0.7 + }, + "crop_duration": { + "title": "Crop Duration", + "type": "number", + "description": "Duration in seconds to crop from the selected side before extending from that side.", + "default": 0 + }, + "audio_url": { + "format": "uri", + "description": "The URL of the audio file to alter. Must be a valid publicly accessible URL.", + "type": "string", + "examples": [ + "https://cdn.sonauto.ai/generations2_altformats/audio_c5e63f7c-fc79-4322-808d-c09911af4713.wav" + ], + "maxLength": 2083, + "minLength": 1, + "title": "Audio Url" + }, + "seed": { + "anyOf": [ + { + "minimum": -9223372036854776000, + "maximum": 9223372036854776000, + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "The seed to use for generation. Will pick a random seed if not provided. Repeating a request with identical parameters (must use lyrics and tags, not prompt) and the same seed will generate the same song." + }, + "extend_duration": { + "anyOf": [ + { + "maximum": 85, + "type": "number", + "exclusiveMinimum": 1 + }, + { + "type": "null" + } + ], + "title": "Extend Duration", + "description": "Duration in seconds to extend the song. If not provided, will attempt to automatically determine." + } + }, + "x-fal-order-properties": [ + "prompt", + "tags", + "lyrics_prompt", + "seed", + "prompt_strength", + "balance_strength", + "num_songs", + "output_format", + "output_bit_rate", + "audio_url", + "side", + "extend_duration", + "crop_duration" + ], + "required": [ + "audio_url", + "side" + ] + }, + "V2ExtendOutput": { + "title": "ExtendOutput", + "type": "object", + "properties": { + "tags": { + "anyOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ], + "title": "Tags", + "description": "The style tags used for generation." + }, + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for generation. This can be used to generate an identical song by passing the same parameters with this seed in a future request." + }, + "extend_duration": { + "title": "Extend Duration", + "type": "number", + "description": "The duration in seconds that the song was extended by." + }, + "audio": { + "examples": [ + { + "file_size": 22069326, + "file_name": "sonauto.wav", + "content_type": "audio/wav", + "url": "https://cdn.sonauto.ai/generations2_altformats/audio_47337412-e577-42af-ae60-01a798e680ec.wav" + } + ], + "title": "Audio", + "type": "array", + "description": "The generated audio files.", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "lyrics": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Lyrics", + "description": "The lyrics used for generation." + } + }, + "x-fal-order-properties": [ + "seed", + "tags", + "lyrics", + "audio", + "extend_duration" + ], + "required": [ + "seed", + "audio", + "extend_duration" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/sonauto/v2/extend/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/sonauto/v2/extend/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/sonauto/v2/extend": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/V2ExtendInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/sonauto/v2/extend/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/V2ExtendOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ace-step/audio-outpaint", + "metadata": { + "display_name": "ACE-Step", + "category": "audio-to-audio", + "description": "Extend the beginning or end of provided audio with lyrics and/or style using ACE-Step", + "status": "active", + "tags": [ + "audio-to-audio", + "audio-outpaint", + "audio-extend" + ], + "updated_at": "2026-01-26T21:43:44.572Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "model_url": "https://fal.run/fal-ai/ace-step/audio-outpaint", + "license_type": "commercial", + "date": "2025-05-11T17:57:21.159Z", + "group": { + "key": "ace-step", + "label": "Audio Outpaint (Extend)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ace-step/audio-outpaint", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ace-step/audio-outpaint queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ace-step/audio-outpaint", + "category": "audio-to-audio", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ace-step/audio-outpaint", + "documentationUrl": "https://fal.ai/models/fal-ai/ace-step/audio-outpaint/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AceStepAudioOutpaintInput": { + "title": "ACEStepAudioOutpaintRequest", + "type": "object", + "properties": { + "number_of_steps": { + "description": "Number of steps to generate the audio.", + "type": "integer", + "minimum": 3, + "title": "Number Of Steps", + "examples": [ + 27 + ], + "maximum": 60, + "default": 27 + }, + "tags": { + "examples": [ + "lofi, hiphop, drum and bass, trap, chill" + ], + "title": "Tags", + "type": "string", + "description": "Comma-separated list of genre tags to control the style of the generated audio." + }, + "minimum_guidance_scale": { + "description": "Minimum guidance scale for the generation after the decay.", + "type": "number", + "minimum": 0, + "title": "Minimum Guidance Scale", + "examples": [ + 3 + ], + "maximum": 200, + "default": 3 + }, + "extend_after_duration": { + "description": "Duration in seconds to extend the audio from the end.", + "type": "number", + "minimum": 0, + "title": "Extend After Duration", + "examples": [ + 30 + ], + "maximum": 240, + "default": 30 + }, + "lyrics": { + "title": "Lyrics", + "type": "string", + "description": "Lyrics to be sung in the audio. If not provided or if [inst] or [instrumental] is the content of this field, no lyrics will be sung. Use control structures like [verse], [chorus] and [bridge] to control the structure of the song.", + "default": "" + }, + "tag_guidance_scale": { + "description": "Tag guidance scale for the generation.", + "type": "number", + "minimum": 0, + "title": "Tag Guidance Scale", + "examples": [ + 5 + ], + "maximum": 10, + "default": 5 + }, + "scheduler": { + "enum": [ + "euler", + "heun" + ], + "title": "Scheduler", + "type": "string", + "examples": [ + "euler" + ], + "description": "Scheduler to use for the generation process.", + "default": "euler" + }, + "extend_before_duration": { + "description": "Duration in seconds to extend the audio from the start.", + "type": "number", + "minimum": 0, + "title": "Extend Before Duration", + "examples": [ + 0 + ], + "maximum": 240, + "default": 0 + }, + "guidance_type": { + "enum": [ + "cfg", + "apg", + "cfg_star" + ], + "title": "Guidance Type", + "type": "string", + "examples": [ + "apg" + ], + "description": "Type of CFG to use for the generation process.", + "default": "apg" + }, + "guidance_scale": { + "description": "Guidance scale for the generation.", + "type": "number", + "minimum": 0, + "title": "Guidance Scale", + "examples": [ + 15 + ], + "maximum": 200, + "default": 15 + }, + "lyric_guidance_scale": { + "description": "Lyric guidance scale for the generation.", + "type": "number", + "minimum": 0, + "title": "Lyric Guidance Scale", + "examples": [ + 1.5 + ], + "maximum": 10, + "default": 1.5 + }, + "guidance_interval": { + "description": "Guidance interval for the generation. 0.5 means only apply guidance in the middle steps (0.25 * infer_steps to 0.75 * infer_steps)", + "type": "number", + "minimum": 0, + "title": "Guidance Interval", + "examples": [ + 0.5 + ], + "maximum": 1, + "default": 0.5 + }, + "guidance_interval_decay": { + "description": "Guidance interval decay for the generation. Guidance scale will decay from guidance_scale to min_guidance_scale in the interval. 0.0 means no decay.", + "type": "number", + "minimum": 0, + "title": "Guidance Interval Decay", + "examples": [ + 0 + ], + "maximum": 1, + "default": 0 + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ace-step-audio-to-audio.wav" + ], + "title": "Audio Url", + "type": "string", + "description": "URL of the audio file to be outpainted." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If not provided, a random seed will be used." + }, + "granularity_scale": { + "description": "Granularity scale for the generation process. Higher values can reduce artifacts.", + "type": "integer", + "minimum": -100, + "title": "Granularity Scale", + "examples": [ + 10 + ], + "maximum": 100, + "default": 10 + } + }, + "x-fal-order-properties": [ + "audio_url", + "extend_before_duration", + "extend_after_duration", + "tags", + "lyrics", + "number_of_steps", + "seed", + "scheduler", + "guidance_type", + "granularity_scale", + "guidance_interval", + "guidance_interval_decay", + "guidance_scale", + "minimum_guidance_scale", + "tag_guidance_scale", + "lyric_guidance_scale" + ], + "required": [ + "tags", + "audio_url" + ] + }, + "AceStepAudioOutpaintOutput": { + "title": "ACEStepResponse", + "type": "object", + "properties": { + "tags": { + "examples": [ + "lofi, hiphop, drum and bass, trap, chill" + ], + "title": "Tags", + "type": "string", + "description": "The genre tags used in the generation process." + }, + "lyrics": { + "examples": [ + "[inst]" + ], + "title": "Lyrics", + "type": "string", + "description": "The lyrics used in the generation process." + }, + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "The random seed used for the generation process." + }, + "audio": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/ace-step-text-to-audio.wav" + } + ], + "title": "Audio", + "description": "The generated audio file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio", + "seed", + "tags", + "lyrics" + ], + "required": [ + "audio", + "seed", + "tags", + "lyrics" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ace-step/audio-outpaint/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ace-step/audio-outpaint/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ace-step/audio-outpaint": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AceStepAudioOutpaintInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ace-step/audio-outpaint/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AceStepAudioOutpaintOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ace-step/audio-inpaint", + "metadata": { + "display_name": "ACE-Step", + "category": "audio-to-audio", + "description": "Modify a portion of provided audio with lyrics and/or style using ACE-Step", + "status": "active", + "tags": [ + "audio-to-audio", + "audio-inpaint", + "audio-repaint" + ], + "updated_at": "2026-01-26T21:43:44.704Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "model_url": "https://fal.run/fal-ai/ace-step/audio-inpaint", + "license_type": "commercial", + "date": "2025-05-11T17:55:55.827Z", + "group": { + "key": "ace-step", + "label": "Audio Inpaint (Edit)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ace-step/audio-inpaint", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ace-step/audio-inpaint queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ace-step/audio-inpaint", + "category": "audio-to-audio", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ace-step/audio-inpaint", + "documentationUrl": "https://fal.ai/models/fal-ai/ace-step/audio-inpaint/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AceStepAudioInpaintInput": { + "title": "ACEStepAudioInpaintRequest", + "type": "object", + "properties": { + "number_of_steps": { + "description": "Number of steps to generate the audio.", + "type": "integer", + "minimum": 3, + "title": "Number Of Steps", + "examples": [ + 27 + ], + "maximum": 60, + "default": 27 + }, + "start_time": { + "description": "start time in seconds for the inpainting process.", + "type": "number", + "minimum": 0, + "title": "Start Time", + "examples": [ + 0 + ], + "maximum": 240, + "default": 0 + }, + "tags": { + "examples": [ + "lofi, hiphop, drum and bass, trap, chill" + ], + "title": "Tags", + "type": "string", + "description": "Comma-separated list of genre tags to control the style of the generated audio." + }, + "minimum_guidance_scale": { + "description": "Minimum guidance scale for the generation after the decay.", + "type": "number", + "minimum": 0, + "title": "Minimum Guidance Scale", + "examples": [ + 3 + ], + "maximum": 200, + "default": 3 + }, + "lyrics": { + "title": "Lyrics", + "type": "string", + "description": "Lyrics to be sung in the audio. If not provided or if [inst] or [instrumental] is the content of this field, no lyrics will be sung. Use control structures like [verse], [chorus] and [bridge] to control the structure of the song.", + "default": "" + }, + "end_time_relative_to": { + "enum": [ + "start", + "end" + ], + "description": "Whether the end time is relative to the start or end of the audio.", + "type": "string", + "examples": [ + "start" + ], + "title": "End Time Relative To", + "default": "start" + }, + "tag_guidance_scale": { + "description": "Tag guidance scale for the generation.", + "type": "number", + "minimum": 0, + "title": "Tag Guidance Scale", + "examples": [ + 5 + ], + "maximum": 10, + "default": 5 + }, + "scheduler": { + "enum": [ + "euler", + "heun" + ], + "title": "Scheduler", + "type": "string", + "examples": [ + "euler" + ], + "description": "Scheduler to use for the generation process.", + "default": "euler" + }, + "end_time": { + "description": "end time in seconds for the inpainting process.", + "type": "number", + "minimum": 0, + "title": "End Time", + "examples": [ + 30 + ], + "maximum": 240, + "default": 30 + }, + "guidance_type": { + "enum": [ + "cfg", + "apg", + "cfg_star" + ], + "title": "Guidance Type", + "type": "string", + "examples": [ + "apg" + ], + "description": "Type of CFG to use for the generation process.", + "default": "apg" + }, + "guidance_scale": { + "description": "Guidance scale for the generation.", + "type": "number", + "minimum": 0, + "title": "Guidance Scale", + "examples": [ + 15 + ], + "maximum": 200, + "default": 15 + }, + "lyric_guidance_scale": { + "description": "Lyric guidance scale for the generation.", + "type": "number", + "minimum": 0, + "title": "Lyric Guidance Scale", + "examples": [ + 1.5 + ], + "maximum": 10, + "default": 1.5 + }, + "guidance_interval": { + "description": "Guidance interval for the generation. 0.5 means only apply guidance in the middle steps (0.25 * infer_steps to 0.75 * infer_steps)", + "type": "number", + "minimum": 0, + "title": "Guidance Interval", + "examples": [ + 0.5 + ], + "maximum": 1, + "default": 0.5 + }, + "variance": { + "description": "Variance for the inpainting process. Higher values can lead to more diverse results.", + "type": "number", + "minimum": 0, + "title": "Variance", + "examples": [ + 0.5 + ], + "maximum": 1, + "default": 0.5 + }, + "guidance_interval_decay": { + "description": "Guidance interval decay for the generation. Guidance scale will decay from guidance_scale to min_guidance_scale in the interval. 0.0 means no decay.", + "type": "number", + "minimum": 0, + "title": "Guidance Interval Decay", + "examples": [ + 0 + ], + "maximum": 1, + "default": 0 + }, + "start_time_relative_to": { + "enum": [ + "start", + "end" + ], + "description": "Whether the start time is relative to the start or end of the audio.", + "type": "string", + "examples": [ + "start" + ], + "title": "Start Time Relative To", + "default": "start" + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ace-step-audio-to-audio.wav" + ], + "title": "Audio Url", + "type": "string", + "description": "URL of the audio file to be inpainted." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If not provided, a random seed will be used." + }, + "granularity_scale": { + "description": "Granularity scale for the generation process. Higher values can reduce artifacts.", + "type": "integer", + "minimum": -100, + "title": "Granularity Scale", + "examples": [ + 10 + ], + "maximum": 100, + "default": 10 + } + }, + "x-fal-order-properties": [ + "audio_url", + "start_time_relative_to", + "start_time", + "end_time_relative_to", + "end_time", + "tags", + "lyrics", + "variance", + "number_of_steps", + "seed", + "scheduler", + "guidance_type", + "granularity_scale", + "guidance_interval", + "guidance_interval_decay", + "guidance_scale", + "minimum_guidance_scale", + "tag_guidance_scale", + "lyric_guidance_scale" + ], + "required": [ + "tags", + "audio_url" + ] + }, + "AceStepAudioInpaintOutput": { + "title": "ACEStepAudioInpaintResponse", + "type": "object", + "properties": { + "tags": { + "examples": [ + "lofi, hiphop, drum and bass, trap, chill" + ], + "title": "Tags", + "type": "string", + "description": "The genre tags used in the generation process." + }, + "lyrics": { + "examples": [ + "[inst]" + ], + "title": "Lyrics", + "type": "string", + "description": "The lyrics used in the generation process." + }, + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "The random seed used for the generation process." + }, + "audio": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/ace-step-audio-inpaint.wav" + } + ], + "title": "Audio", + "description": "The generated audio file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio", + "seed", + "tags", + "lyrics" + ], + "required": [ + "audio", + "seed", + "tags", + "lyrics" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ace-step/audio-inpaint/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ace-step/audio-inpaint/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ace-step/audio-inpaint": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AceStepAudioInpaintInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ace-step/audio-inpaint/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AceStepAudioInpaintOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ace-step/audio-to-audio", + "metadata": { + "display_name": "ACE-Step", + "category": "audio-to-audio", + "description": "Generate music from a lyrics and example audio using ACE-Step", + "status": "active", + "tags": [ + "audio-to-audio", + "audio-edit" + ], + "updated_at": "2026-01-26T21:43:44.832Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/zebra/S5IFY0O4oGvrMRRKtdEVQ_1f8a744311ee4074bf2da4d84ae4491a.jpg", + "model_url": "https://fal.run/fal-ai/ace-step/audio-to-audio", + "license_type": "commercial", + "date": "2025-05-11T17:53:31.579Z", + "group": { + "key": "ace-step", + "label": "Audio to Audio" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ace-step/audio-to-audio", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ace-step/audio-to-audio queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ace-step/audio-to-audio", + "category": "audio-to-audio", + "thumbnailUrl": "https://fal.media/files/zebra/S5IFY0O4oGvrMRRKtdEVQ_1f8a744311ee4074bf2da4d84ae4491a.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ace-step/audio-to-audio", + "documentationUrl": "https://fal.ai/models/fal-ai/ace-step/audio-to-audio/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AceStepAudioToAudioInput": { + "title": "ACEStepAudioToAudioRequest", + "type": "object", + "properties": { + "number_of_steps": { + "description": "Number of steps to generate the audio.", + "type": "integer", + "minimum": 3, + "title": "Number Of Steps", + "examples": [ + 27 + ], + "maximum": 60, + "default": 27 + }, + "tags": { + "examples": [ + "lofi, hiphop, drum and bass, trap, chill" + ], + "title": "Tags", + "type": "string", + "description": "Comma-separated list of genre tags to control the style of the generated audio." + }, + "minimum_guidance_scale": { + "description": "Minimum guidance scale for the generation after the decay.", + "type": "number", + "minimum": 0, + "title": "Minimum Guidance Scale", + "examples": [ + 3 + ], + "maximum": 200, + "default": 3 + }, + "lyrics": { + "title": "Lyrics", + "type": "string", + "description": "Lyrics to be sung in the audio. If not provided or if [inst] or [instrumental] is the content of this field, no lyrics will be sung. Use control structures like [verse], [chorus] and [bridge] to control the structure of the song.", + "default": "" + }, + "tag_guidance_scale": { + "description": "Tag guidance scale for the generation.", + "type": "number", + "minimum": 0, + "title": "Tag Guidance Scale", + "examples": [ + 5 + ], + "maximum": 10, + "default": 5 + }, + "original_lyrics": { + "examples": [ + "" + ], + "title": "Original Lyrics", + "type": "string", + "description": "Original lyrics of the audio file.", + "default": "" + }, + "scheduler": { + "enum": [ + "euler", + "heun" + ], + "title": "Scheduler", + "type": "string", + "examples": [ + "euler" + ], + "description": "Scheduler to use for the generation process.", + "default": "euler" + }, + "guidance_scale": { + "description": "Guidance scale for the generation.", + "type": "number", + "minimum": 0, + "title": "Guidance Scale", + "examples": [ + 15 + ], + "maximum": 200, + "default": 15 + }, + "guidance_type": { + "enum": [ + "cfg", + "apg", + "cfg_star" + ], + "title": "Guidance Type", + "type": "string", + "examples": [ + "apg" + ], + "description": "Type of CFG to use for the generation process.", + "default": "apg" + }, + "lyric_guidance_scale": { + "description": "Lyric guidance scale for the generation.", + "type": "number", + "minimum": 0, + "title": "Lyric Guidance Scale", + "examples": [ + 1.5 + ], + "maximum": 10, + "default": 1.5 + }, + "guidance_interval": { + "description": "Guidance interval for the generation. 0.5 means only apply guidance in the middle steps (0.25 * infer_steps to 0.75 * infer_steps)", + "type": "number", + "minimum": 0, + "title": "Guidance Interval", + "examples": [ + 0.5 + ], + "maximum": 1, + "default": 0.5 + }, + "edit_mode": { + "enum": [ + "lyrics", + "remix" + ], + "description": "Whether to edit the lyrics only or remix the audio.", + "type": "string", + "examples": [ + "remix" + ], + "title": "Edit Mode", + "default": "remix" + }, + "guidance_interval_decay": { + "description": "Guidance interval decay for the generation. Guidance scale will decay from guidance_scale to min_guidance_scale in the interval. 0.0 means no decay.", + "type": "number", + "minimum": 0, + "title": "Guidance Interval Decay", + "examples": [ + 0 + ], + "maximum": 1, + "default": 0 + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ace-step-audio-to-audio.wav" + ], + "title": "Audio Url", + "type": "string", + "description": "URL of the audio file to be outpainted." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If not provided, a random seed will be used." + }, + "granularity_scale": { + "description": "Granularity scale for the generation process. Higher values can reduce artifacts.", + "type": "integer", + "minimum": -100, + "title": "Granularity Scale", + "examples": [ + 10 + ], + "maximum": 100, + "default": 10 + }, + "original_tags": { + "examples": [ + "lofi, hiphop, drum and bass, trap, chill" + ], + "title": "Original Tags", + "type": "string", + "description": "Original tags of the audio file." + }, + "original_seed": { + "title": "Original Seed", + "type": "integer", + "description": "Original seed of the audio file." + } + }, + "x-fal-order-properties": [ + "audio_url", + "edit_mode", + "original_tags", + "original_lyrics", + "tags", + "lyrics", + "number_of_steps", + "seed", + "scheduler", + "guidance_type", + "granularity_scale", + "guidance_interval", + "guidance_interval_decay", + "guidance_scale", + "minimum_guidance_scale", + "tag_guidance_scale", + "lyric_guidance_scale", + "original_seed" + ], + "required": [ + "tags", + "audio_url", + "original_tags" + ] + }, + "AceStepAudioToAudioOutput": { + "title": "ACEStepAudioToAudioResponse", + "type": "object", + "properties": { + "tags": { + "examples": [ + "lofi, hiphop, drum and bass, trap, chill" + ], + "title": "Tags", + "type": "string", + "description": "The genre tags used in the generation process." + }, + "lyrics": { + "examples": [ + "[inst]" + ], + "title": "Lyrics", + "type": "string", + "description": "The lyrics used in the generation process." + }, + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "The random seed used for the generation process." + }, + "audio": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/ace-step-audio-to-audio.wav" + } + ], + "title": "Audio", + "description": "The generated audio file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio", + "seed", + "tags", + "lyrics" + ], + "required": [ + "audio", + "seed", + "tags", + "lyrics" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ace-step/audio-to-audio/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ace-step/audio-to-audio/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ace-step/audio-to-audio": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AceStepAudioToAudioInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ace-step/audio-to-audio/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AceStepAudioToAudioOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/dia-tts/voice-clone", + "metadata": { + "display_name": "Dia Tts", + "category": "audio-to-audio", + "description": "Clone dialog voices from a sample audio and generate dialogs from text prompts using the Dia TTS which leverages advanced AI techniques to create high-quality text-to-speech.", + "status": "active", + "tags": [ + "speech" + ], + "updated_at": "2026-01-26T21:43:53.716Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-4.jpg", + "model_url": "https://fal.run/fal-ai/dia-tts/voice-clone", + "license_type": "commercial", + "date": "2025-04-22T23:43:19.478Z", + "group": { + "key": "dia-tts", + "label": "Voice Cloning" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/dia-tts/voice-clone", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/dia-tts/voice-clone queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/dia-tts/voice-clone", + "category": "audio-to-audio", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/dia-tts/voice-clone", + "documentationUrl": "https://fal.ai/models/fal-ai/dia-tts/voice-clone/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "DiaTtsVoiceCloneInput": { + "title": "CloneRequest", + "type": "object", + "properties": { + "text": { + "examples": [ + "[S1] Hello, how are you? [S2] I'm good, thank you. [S1] What's your name? [S2] My name is Dia. [S1] Nice to meet you. [S2] Nice to meet you too." + ], + "title": "Text", + "type": "string", + "description": "The text to be converted to speech." + }, + "ref_text": { + "examples": [ + "[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Fal." + ], + "title": "Reference Text for the Reference Audio", + "type": "string", + "description": "The reference text to be used for TTS." + }, + "ref_audio_url": { + "examples": [ + "https://v3.fal.media/files/elephant/d5lORit2npFfBykcAtyUr_tmplacfh8oa.mp3" + ], + "title": "Reference Audio URL", + "type": "string", + "description": "The URL of the reference audio file." + } + }, + "x-fal-order-properties": [ + "text", + "ref_audio_url", + "ref_text" + ], + "required": [ + "text", + "ref_audio_url", + "ref_text" + ] + }, + "DiaTtsVoiceCloneOutput": { + "title": "DiaCloneOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://v3.fal.media/files/tiger/smL9a_mr1PRIvZxDSVppk_output.wav" + } + ], + "description": "The generated speech audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/dia-tts/voice-clone/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/dia-tts/voice-clone/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/dia-tts/voice-clone": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DiaTtsVoiceCloneInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/dia-tts/voice-clone/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DiaTtsVoiceCloneOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/elevenlabs/audio-isolation", + "metadata": { + "display_name": "ElevenLabs Audio Isolation", + "category": "audio-to-audio", + "description": "Isolate audio tracks using ElevenLabs advanced audio isolation technology.", + "status": "active", + "tags": [ + "audio" + ], + "updated_at": "2026-01-26T21:44:24.697Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/elevenlabs/elevenlabs_thumbnail.webp", + "model_url": "https://fal.run/fal-ai/elevenlabs/audio-isolation", + "license_type": "commercial", + "date": "2025-02-27T00:00:00.000Z", + "group": { + "key": "elevenlabs-audio", + "label": "Audio Isolation" + }, + "highlighted": false, + "stream_url": "https://fal.run/fal-ai/elevenlabs/audio-isolation/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/elevenlabs/audio-isolation", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/elevenlabs/audio-isolation queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/elevenlabs/audio-isolation", + "category": "audio-to-audio", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/elevenlabs/elevenlabs_thumbnail.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/elevenlabs/audio-isolation", + "documentationUrl": "https://fal.ai/models/fal-ai/elevenlabs/audio-isolation/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ElevenlabsAudioIsolationInput": { + "title": "AudioIsolationRequest", + "type": "object", + "properties": { + "video_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Video file to use for audio isolation. Either `audio_url` or `video_url` must be provided.", + "title": "Video Url" + }, + "audio_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "URL of the audio file to isolate voice from", + "title": "Audio Url", + "examples": [ + "https://v3.fal.media/files/zebra/zJL_oRY8h5RWwjoK1w7tx_output.mp3" + ] + } + }, + "x-fal-order-properties": [ + "audio_url", + "video_url" + ] + }, + "ElevenlabsAudioIsolationOutput": { + "title": "TTSOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://v3.fal.media/files/zebra/zJL_oRY8h5RWwjoK1w7tx_output.mp3" + } + ], + "description": "The generated audio file", + "$ref": "#/components/schemas/File" + }, + "timestamps": { + "anyOf": [ + { + "type": "array", + "items": {} + }, + { + "type": "null" + } + ], + "description": "Timestamps for each word in the generated speech. Only returned if `timestamps` is set to True in the request.", + "title": "Timestamps" + } + }, + "x-fal-order-properties": [ + "audio", + "timestamps" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/elevenlabs/audio-isolation/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/audio-isolation/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/audio-isolation": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ElevenlabsAudioIsolationInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/audio-isolation/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ElevenlabsAudioIsolationOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.audio-to-text.json b/packages/typescript/ai-fal/json/fal.models.audio-to-text.json new file mode 100644 index 00000000..599491a2 --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.audio-to-text.json @@ -0,0 +1,869 @@ +{ + "generated_at": "2026-01-28T02:51:51.868Z", + "total_models": 3, + "category": "audio-to-text", + "models": [ + { + "endpoint_id": "fal-ai/nemotron/asr/stream", + "metadata": { + "display_name": "Nemotron", + "category": "audio-to-text", + "description": "Use the fast speed and pin point accuracy of nemotron to transcribe your texts.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:31.435Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b0769/B2cbMOqt6_wePeNTF4uXD_9e9093acce30405a8068e24985c8901c.jpg", + "model_url": "https://fal.run/fal-ai/nemotron/asr/stream", + "license_type": "commercial", + "date": "2026-01-19T15:14:32.294Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/nemotron/asr/stream", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/nemotron/asr/stream queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/nemotron/asr/stream", + "category": "audio-to-text", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b0769/B2cbMOqt6_wePeNTF4uXD_9e9093acce30405a8068e24985c8901c.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/nemotron/asr/stream", + "documentationUrl": "https://fal.ai/models/fal-ai/nemotron/asr/stream/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "NemotronAsrStreamInput": { + "title": "SpeechInput", + "type": "object", + "properties": { + "acceleration": { + "enum": [ + "none", + "low", + "medium", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "Controls the speed/accuracy trade-off. 'none' = best accuracy (1.12s chunks, ~7.16% WER), 'low' = balanced (0.56s chunks, ~7.22% WER), 'medium' = faster (0.16s chunks, ~7.84% WER), 'high' = fastest (0.08s chunks, ~8.53% WER).", + "default": "none" + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/canary/18e15559-ab3e-4f96-9583-be5ddde91e43.mp3" + ], + "title": "Audio URL", + "type": "string", + "description": "URL of the audio file." + } + }, + "x-fal-order-properties": [ + "audio_url", + "acceleration" + ], + "required": [ + "audio_url" + ] + }, + "NemotronAsrStreamOutput": {} + } + }, + "paths": { + "/fal-ai/nemotron/asr/stream/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/nemotron/asr/stream/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/nemotron/asr/stream": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NemotronAsrStreamInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/nemotron/asr/stream/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NemotronAsrStreamOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/nemotron/asr", + "metadata": { + "display_name": "Nemotron", + "category": "audio-to-text", + "description": "Use the fast speed and pin point accuracy of nemotron to transcribe your texts.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:31.559Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b0753/AlLcMsPv2Xg8024K7eyDU_fc517558047f419087b14e2f1db6dfff.jpg", + "model_url": "https://fal.run/fal-ai/nemotron/asr", + "license_type": "commercial", + "date": "2026-01-19T15:11:38.754Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/nemotron/asr", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/nemotron/asr queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/nemotron/asr", + "category": "audio-to-text", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b0753/AlLcMsPv2Xg8024K7eyDU_fc517558047f419087b14e2f1db6dfff.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/nemotron/asr", + "documentationUrl": "https://fal.ai/models/fal-ai/nemotron/asr/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "NemotronAsrInput": { + "title": "SpeechInput", + "type": "object", + "properties": { + "acceleration": { + "enum": [ + "none", + "low", + "medium", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "Controls the speed/accuracy trade-off. 'none' = best accuracy (1.12s chunks, ~7.16% WER), 'low' = balanced (0.56s chunks, ~7.22% WER), 'medium' = faster (0.16s chunks, ~7.84% WER), 'high' = fastest (0.08s chunks, ~8.53% WER).", + "default": "none" + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/canary/18e15559-ab3e-4f96-9583-be5ddde91e43.mp3" + ], + "title": "Audio URL", + "type": "string", + "description": "URL of the audio file." + } + }, + "x-fal-order-properties": [ + "audio_url", + "acceleration" + ], + "required": [ + "audio_url" + ] + }, + "NemotronAsrOutput": { + "title": "SpeechOutput", + "type": "object", + "properties": { + "partial": { + "title": "Partial Result", + "type": "boolean", + "description": "True if this is an intermediate result during streaming.", + "default": false + }, + "output": { + "title": "Transcribed Text", + "type": "string", + "description": "The transcribed text from the audio." + } + }, + "x-fal-order-properties": [ + "output", + "partial" + ], + "required": [ + "output" + ] + } + } + }, + "paths": { + "/fal-ai/nemotron/asr/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/nemotron/asr/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/nemotron/asr": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NemotronAsrInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/nemotron/asr/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NemotronAsrOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/silero-vad", + "metadata": { + "display_name": "Silero VAD", + "category": "audio-to-text", + "description": "Detect speech presence and timestamps with accuracy and speed using the ultra-lightweight Silero VAD model", + "status": "active", + "tags": [ + "vad", + "silero", + "voice-activity-detection" + ], + "updated_at": "2026-01-26T21:41:40.502Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8999ea/LpSxj3lpoR_1RIxsAw4Z0_bfe5fc31ca6442679cefa0d11a486833.jpg", + "model_url": "https://fal.run/fal-ai/silero-vad", + "license_type": "commercial", + "date": "2026-01-08T19:19:45.098Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/silero-vad", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/silero-vad queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/silero-vad", + "category": "audio-to-text", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8999ea/LpSxj3lpoR_1RIxsAw4Z0_bfe5fc31ca6442679cefa0d11a486833.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/silero-vad", + "documentationUrl": "https://fal.ai/models/fal-ai/silero-vad/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SileroVadInput": { + "x-fal-order-properties": [ + "audio_url" + ], + "type": "object", + "properties": { + "audio_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a89994c/X3-06RFibRfBu-FS1AI8y_speech.mp3" + ], + "title": "Audio URL", + "type": "string", + "description": "The URL of the audio to get speech timestamps from." + } + }, + "title": "SileroVADInput", + "required": [ + "audio_url" + ] + }, + "SileroVadOutput": { + "x-fal-order-properties": [ + "has_speech", + "timestamps" + ], + "type": "object", + "properties": { + "has_speech": { + "examples": [ + true + ], + "title": "Has Speech", + "type": "boolean", + "description": "Whether the audio has speech." + }, + "timestamps": { + "examples": [ + [ + { + "end": 1.982, + "start": 0.13 + }, + { + "end": 3.998, + "start": 2.434 + } + ] + ], + "title": "Speech Timestamps", + "type": "array", + "description": "The speech timestamps.", + "items": { + "$ref": "#/components/schemas/SpeechTimestamp" + } + } + }, + "title": "SileroVADOutput", + "required": [ + "has_speech", + "timestamps" + ] + }, + "SpeechTimestamp": { + "x-fal-order-properties": [ + "start", + "end" + ], + "type": "object", + "properties": { + "end": { + "title": "End Time", + "type": "number", + "description": "The end time of the speech in seconds." + }, + "start": { + "title": "Start Time", + "type": "number", + "description": "The start time of the speech in seconds." + } + }, + "title": "SpeechTimestamp", + "required": [ + "start", + "end" + ] + } + } + }, + "paths": { + "/fal-ai/silero-vad/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/silero-vad/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/silero-vad": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SileroVadInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/silero-vad/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SileroVadOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.audio-to-video.json b/packages/typescript/ai-fal/json/fal.models.audio-to-video.json new file mode 100644 index 00000000..6bc31136 --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.audio-to-video.json @@ -0,0 +1,6770 @@ +{ + "generated_at": "2026-01-28T02:51:51.866Z", + "total_models": 14, + "category": "audio-to-video", + "models": [ + { + "endpoint_id": "fal-ai/ltx-2-19b/distilled/audio-to-video/lora", + "metadata": { + "display_name": "LTX-2 19B Distilled", + "category": "audio-to-video", + "description": "Generate video with audio from audio, text and images using LTX-2 Distilled and custom LoRA", + "status": "active", + "tags": [], + "updated_at": "2026-01-27T16:00:15.983Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8c1644/1iD97rwiiAmVKyzTmnxUC_23ec1d178dcc4b43a8d19c9bbc747179.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2-19b/distilled/audio-to-video/lora", + "license_type": "commercial", + "date": "2026-01-27T15:50:42.658Z", + "group": { + "key": "ltx-2-19b-distilled", + "label": "Audio to Video with LoRA" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2-19b/distilled/audio-to-video/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2-19b/distilled/audio-to-video/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2-19b/distilled/audio-to-video/lora", + "category": "audio-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8c1644/1iD97rwiiAmVKyzTmnxUC_23ec1d178dcc4b43a8d19c9bbc747179.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/distilled/audio-to-video/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/distilled/audio-to-video/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx219bDistilledAudioToVideoLoraInput": { + "title": "LTX2LoRADistilledAudioToVideoInput", + "type": "object", + "properties": { + "match_audio_length": { + "title": "Match Audio Length", + "type": "boolean", + "description": "When enabled, the number of frames will be calculated based on the audio duration and FPS. When disabled, use the specified num_frames.", + "default": true + }, + "use_multiscale": { + "title": "Use Multi-Scale", + "type": "boolean", + "description": "Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.", + "default": true + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high", + "full" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "examples": [ + "none" + ], + "default": "none" + }, + "prompt": { + "examples": [ + "A woman speaks to the camera" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "number", + "description": "The frames per second of the generated video.", + "maximum": 60, + "default": 25 + }, + "loras": { + "title": "LoRAs", + "type": "array", + "description": "The LoRAs to use for the generation.", + "items": { + "$ref": "#/components/schemas/LoRAInput" + } + }, + "camera_lora": { + "enum": [ + "dolly_in", + "dolly_out", + "dolly_left", + "dolly_right", + "jib_up", + "jib_down", + "static", + "none" + ], + "title": "Camera LoRA", + "type": "string", + "description": "The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "examples": [ + "none" + ], + "default": "none" + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "auto", + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated video. Use 'auto' to match the input image dimensions if provided.", + "title": "Video Size", + "default": "landscape_4_3" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 9, + "title": "Number of Frames", + "maximum": 481, + "default": 121 + }, + "image_strength": { + "minimum": 0, + "title": "Image Strength", + "type": "number", + "description": "The strength of the image to use for the video generation.", + "maximum": 1, + "default": 1 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the video from.", + "default": "blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts." + }, + "camera_lora_scale": { + "minimum": 0, + "title": "Camera LoRA Scale", + "type": "number", + "description": "The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "maximum": 1, + "default": 1 + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "preprocess_audio": { + "title": "Preprocess Audio", + "type": "boolean", + "description": "Whether to preprocess the audio before using it as conditioning.", + "default": true + }, + "image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Optional URL of an image to use as the first frame of the video.", + "title": "Image URL", + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ltx-2-a2v-input-image.png" + ] + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ltx-2-a2v-input-audio.mp3" + ], + "title": "Audio URL", + "type": "string", + "description": "The URL of the audio to generate the video from." + }, + "audio_strength": { + "minimum": 0, + "title": "Audio Strength", + "type": "number", + "description": "Audio conditioning strength. Values below 1.0 will allow the model to change the audio, while a value of exactly 1.0 will use the input audio without modification.", + "maximum": 1, + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed for the random number generator.", + "title": "Seed" + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "audio_url", + "image_url", + "match_audio_length", + "num_frames", + "video_size", + "use_multiscale", + "fps", + "acceleration", + "camera_lora", + "camera_lora_scale", + "negative_prompt", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode", + "loras", + "image_strength", + "audio_strength", + "preprocess_audio" + ], + "required": [ + "loras", + "prompt", + "audio_url" + ] + }, + "Ltx219bDistilledAudioToVideoLoraOutput": { + "title": "LTX2AudioToVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman speaks to the camera" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "seed": { + "examples": [ + 175932751 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for the random number generator." + }, + "video": { + "examples": [ + { + "file_name": "ltx-2-a2v-output.mp4", + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/example_outputs/ltx-2-a2v-output.mp4" + } + ], + "description": "The generated video.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "prompt" + ], + "required": [ + "video", + "seed", + "prompt" + ] + }, + "LoRAInput": { + "title": "LoRAInput", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL, HuggingFace repo ID (owner/repo) to lora weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "description": "Scale factor for LoRA application (0.0 to 4.0).", + "maximum": 4, + "default": 1 + }, + "weight_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights.", + "title": "Weight Name" + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "description": "LoRA weight configuration.", + "required": [ + "path" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The duration of the video", + "title": "Duration" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the video", + "title": "Height" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the video", + "title": "Width" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The FPS of the video", + "title": "Fps" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of frames in the video", + "title": "Num Frames" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2-19b/distilled/audio-to-video/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/audio-to-video/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/audio-to-video/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bDistilledAudioToVideoLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/audio-to-video/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bDistilledAudioToVideoLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2-19b/audio-to-video/lora", + "metadata": { + "display_name": "LTX-2 19B", + "category": "audio-to-video", + "description": "Generate video with audio from audio, text and images using LTX-2 and custom LoRA", + "status": "active", + "tags": [], + "updated_at": "2026-01-27T16:00:22.885Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8c1636/onmCWQl-29GfB5lZT9WDP_c65a4309ad1e4023b6450e6daadf12e5.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2-19b/audio-to-video/lora", + "license_type": "commercial", + "date": "2026-01-27T15:48:53.065Z", + "group": { + "key": "ltx-2-19b", + "label": "Audio to Video with LoRA" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2-19b/audio-to-video/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2-19b/audio-to-video/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2-19b/audio-to-video/lora", + "category": "audio-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8c1636/onmCWQl-29GfB5lZT9WDP_c65a4309ad1e4023b6450e6daadf12e5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/audio-to-video/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/audio-to-video/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx219bAudioToVideoLoraInput": { + "title": "LTX2LoRAAudioToVideoInput", + "type": "object", + "properties": { + "match_audio_length": { + "title": "Match Audio Length", + "type": "boolean", + "description": "When enabled, the number of frames will be calculated based on the audio duration and FPS. When disabled, use the specified num_frames.", + "default": true + }, + "use_multiscale": { + "title": "Use Multi-Scale", + "type": "boolean", + "description": "Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.", + "default": true + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high", + "full" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "prompt": { + "examples": [ + "A woman speaks to the camera" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "number", + "description": "The frames per second of the generated video.", + "maximum": 60, + "default": 25 + }, + "loras": { + "title": "LoRAs", + "type": "array", + "description": "The LoRAs to use for the generation.", + "items": { + "$ref": "#/components/schemas/LoRAInput" + } + }, + "camera_lora": { + "enum": [ + "dolly_in", + "dolly_out", + "dolly_left", + "dolly_right", + "jib_up", + "jib_down", + "static", + "none" + ], + "title": "Camera LoRA", + "type": "string", + "description": "The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "examples": [ + "none" + ], + "default": "none" + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "auto", + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated video. Use 'auto' to match the input image dimensions if provided.", + "title": "Video Size", + "default": "landscape_4_3" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 9, + "title": "Number of Frames", + "maximum": 481, + "default": 121 + }, + "image_strength": { + "minimum": 0, + "title": "Image Strength", + "type": "number", + "description": "The strength of the image to use for the video generation.", + "maximum": 1, + "default": 1 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the video from.", + "default": "blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts." + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "description": "The guidance scale to use.", + "maximum": 10, + "default": 3 + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "camera_lora_scale": { + "minimum": 0, + "title": "Camera LoRA Scale", + "type": "number", + "description": "The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "maximum": 1, + "default": 1 + }, + "preprocess_audio": { + "title": "Preprocess Audio", + "type": "boolean", + "description": "Whether to preprocess the audio before using it as conditioning.", + "default": true + }, + "image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Optional URL of an image to use as the first frame of the video.", + "title": "Image URL", + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ltx-2-a2v-input-image.png" + ] + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ltx-2-a2v-input-audio.mp3" + ], + "title": "Audio URL", + "type": "string", + "description": "The URL of the audio to generate the video from." + }, + "audio_strength": { + "minimum": 0, + "title": "Audio Strength", + "type": "number", + "description": "Audio conditioning strength. Values below 1.0 will allow the model to change the audio, while a value of exactly 1.0 will use the input audio without modification.", + "maximum": 1, + "default": 1 + }, + "num_inference_steps": { + "minimum": 8, + "title": "Number of Inference Steps", + "type": "integer", + "description": "The number of inference steps to use.", + "maximum": 50, + "default": 40 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed for the random number generator.", + "title": "Seed" + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "audio_url", + "image_url", + "match_audio_length", + "num_frames", + "video_size", + "use_multiscale", + "fps", + "guidance_scale", + "num_inference_steps", + "acceleration", + "camera_lora", + "camera_lora_scale", + "negative_prompt", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode", + "loras", + "image_strength", + "audio_strength", + "preprocess_audio" + ], + "required": [ + "loras", + "prompt", + "audio_url" + ] + }, + "Ltx219bAudioToVideoLoraOutput": { + "title": "LTX2AudioToVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman speaks to the camera" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "seed": { + "examples": [ + 175932751 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for the random number generator." + }, + "video": { + "examples": [ + { + "file_name": "ltx-2-a2v-output.mp4", + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/example_outputs/ltx-2-a2v-output.mp4" + } + ], + "description": "The generated video.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "prompt" + ], + "required": [ + "video", + "seed", + "prompt" + ] + }, + "LoRAInput": { + "title": "LoRAInput", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL, HuggingFace repo ID (owner/repo) to lora weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "description": "Scale factor for LoRA application (0.0 to 4.0).", + "maximum": 4, + "default": 1 + }, + "weight_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights.", + "title": "Weight Name" + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "description": "LoRA weight configuration.", + "required": [ + "path" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The duration of the video", + "title": "Duration" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the video", + "title": "Height" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the video", + "title": "Width" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The FPS of the video", + "title": "Fps" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of frames in the video", + "title": "Num Frames" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2-19b/audio-to-video/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/audio-to-video/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/audio-to-video/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bAudioToVideoLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/audio-to-video/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bAudioToVideoLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2-19b/distilled/audio-to-video", + "metadata": { + "display_name": "LTX-2 19B Distilled", + "category": "audio-to-video", + "description": "Generate video with audio from audio, text and images using LTX-2 Distilled", + "status": "active", + "tags": [], + "updated_at": "2026-01-27T16:00:13.346Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8c162d/9Wm-zXtwe1rQzPnRcO57x_5ec4f4b1bc7f4dd48e4f08e1271ae8a2.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2-19b/distilled/audio-to-video", + "license_type": "commercial", + "date": "2026-01-27T15:46:47.750Z", + "group": { + "key": "ltx-2-19b-distilled", + "label": "Audio to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2-19b/distilled/audio-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2-19b/distilled/audio-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2-19b/distilled/audio-to-video", + "category": "audio-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8c162d/9Wm-zXtwe1rQzPnRcO57x_5ec4f4b1bc7f4dd48e4f08e1271ae8a2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/distilled/audio-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/distilled/audio-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx219bDistilledAudioToVideoInput": { + "title": "LTX2DistilledAudioToVideoInput", + "type": "object", + "properties": { + "match_audio_length": { + "title": "Match Audio Length", + "type": "boolean", + "description": "When enabled, the number of frames will be calculated based on the audio duration and FPS. When disabled, use the specified num_frames.", + "default": true + }, + "use_multiscale": { + "title": "Use Multi-Scale", + "type": "boolean", + "description": "Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.", + "default": true + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high", + "full" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "examples": [ + "none" + ], + "default": "none" + }, + "prompt": { + "examples": [ + "A woman speaks to the camera" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "number", + "description": "The frames per second of the generated video.", + "maximum": 60, + "default": 25 + }, + "camera_lora": { + "enum": [ + "dolly_in", + "dolly_out", + "dolly_left", + "dolly_right", + "jib_up", + "jib_down", + "static", + "none" + ], + "title": "Camera LoRA", + "type": "string", + "description": "The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "examples": [ + "none" + ], + "default": "none" + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "auto", + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated video. Use 'auto' to match the input image dimensions if provided.", + "title": "Video Size", + "default": "landscape_4_3" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 9, + "title": "Number of Frames", + "maximum": 481, + "default": 121 + }, + "image_strength": { + "minimum": 0, + "title": "Image Strength", + "type": "number", + "description": "The strength of the image to use for the video generation.", + "maximum": 1, + "default": 1 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the video from.", + "default": "blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts." + }, + "camera_lora_scale": { + "minimum": 0, + "title": "Camera LoRA Scale", + "type": "number", + "description": "The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "maximum": 1, + "default": 1 + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "preprocess_audio": { + "title": "Preprocess Audio", + "type": "boolean", + "description": "Whether to preprocess the audio before using it as conditioning.", + "default": true + }, + "image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Optional URL of an image to use as the first frame of the video.", + "title": "Image URL", + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ltx-2-a2v-input-image.png" + ] + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ltx-2-a2v-input-audio.mp3" + ], + "title": "Audio URL", + "type": "string", + "description": "The URL of the audio to generate the video from." + }, + "audio_strength": { + "minimum": 0, + "title": "Audio Strength", + "type": "number", + "description": "Audio conditioning strength. Values below 1.0 will allow the model to change the audio, while a value of exactly 1.0 will use the input audio without modification.", + "maximum": 1, + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed for the random number generator.", + "title": "Seed" + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "audio_url", + "image_url", + "match_audio_length", + "num_frames", + "video_size", + "use_multiscale", + "fps", + "acceleration", + "camera_lora", + "camera_lora_scale", + "negative_prompt", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode", + "image_strength", + "audio_strength", + "preprocess_audio" + ], + "required": [ + "prompt", + "audio_url" + ] + }, + "Ltx219bDistilledAudioToVideoOutput": { + "title": "LTX2AudioToVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman speaks to the camera" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "seed": { + "examples": [ + 175932751 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for the random number generator." + }, + "video": { + "examples": [ + { + "file_name": "ltx-2-a2v-output.mp4", + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/example_outputs/ltx-2-a2v-output.mp4" + } + ], + "description": "The generated video.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "prompt" + ], + "required": [ + "video", + "seed", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The duration of the video", + "title": "Duration" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the video", + "title": "Height" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the video", + "title": "Width" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The FPS of the video", + "title": "Fps" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of frames in the video", + "title": "Num Frames" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2-19b/distilled/audio-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/audio-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/audio-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bDistilledAudioToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/audio-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bDistilledAudioToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2-19b/audio-to-video", + "metadata": { + "display_name": "LTX-2 19B", + "category": "audio-to-video", + "description": "Generate video with audio from audio, text and images using LTX-2", + "status": "active", + "tags": [], + "updated_at": "2026-01-27T16:00:11.660Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8c1622/piILEBJ9YqZhIb837h1Uf_a0f40e99111e4eb4a271fdc262185bee.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2-19b/audio-to-video", + "license_type": "commercial", + "date": "2026-01-27T15:45:06.060Z", + "group": { + "key": "ltx-2-19b", + "label": "Audio to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2-19b/audio-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2-19b/audio-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2-19b/audio-to-video", + "category": "audio-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8c1622/piILEBJ9YqZhIb837h1Uf_a0f40e99111e4eb4a271fdc262185bee.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/audio-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/audio-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx219bAudioToVideoInput": { + "title": "LTX2AudioToVideoInput", + "type": "object", + "properties": { + "match_audio_length": { + "title": "Match Audio Length", + "type": "boolean", + "description": "When enabled, the number of frames will be calculated based on the audio duration and FPS. When disabled, use the specified num_frames.", + "default": true + }, + "use_multiscale": { + "title": "Use Multi-Scale", + "type": "boolean", + "description": "Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.", + "default": true + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high", + "full" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "prompt": { + "examples": [ + "A woman speaks to the camera" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "number", + "description": "The frames per second of the generated video.", + "maximum": 60, + "default": 25 + }, + "camera_lora": { + "enum": [ + "dolly_in", + "dolly_out", + "dolly_left", + "dolly_right", + "jib_up", + "jib_down", + "static", + "none" + ], + "title": "Camera LoRA", + "type": "string", + "description": "The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "examples": [ + "none" + ], + "default": "none" + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "auto", + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated video. Use 'auto' to match the input image dimensions if provided.", + "title": "Video Size", + "default": "landscape_4_3" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 9, + "title": "Number of Frames", + "maximum": 481, + "default": 121 + }, + "image_strength": { + "minimum": 0, + "title": "Image Strength", + "type": "number", + "description": "The strength of the image to use for the video generation.", + "maximum": 1, + "default": 1 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the video from.", + "default": "blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts." + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "description": "The guidance scale to use.", + "maximum": 10, + "default": 3 + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "camera_lora_scale": { + "minimum": 0, + "title": "Camera LoRA Scale", + "type": "number", + "description": "The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "maximum": 1, + "default": 1 + }, + "preprocess_audio": { + "title": "Preprocess Audio", + "type": "boolean", + "description": "Whether to preprocess the audio before using it as conditioning.", + "default": true + }, + "image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Optional URL of an image to use as the first frame of the video.", + "title": "Image URL", + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ltx-2-a2v-input-image.png" + ] + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ltx-2-a2v-input-audio.mp3" + ], + "title": "Audio URL", + "type": "string", + "description": "The URL of the audio to generate the video from." + }, + "audio_strength": { + "minimum": 0, + "title": "Audio Strength", + "type": "number", + "description": "Audio conditioning strength. Values below 1.0 will allow the model to change the audio, while a value of exactly 1.0 will use the input audio without modification.", + "maximum": 1, + "default": 1 + }, + "num_inference_steps": { + "minimum": 8, + "title": "Number of Inference Steps", + "type": "integer", + "description": "The number of inference steps to use.", + "maximum": 50, + "default": 40 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed for the random number generator.", + "title": "Seed" + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "audio_url", + "image_url", + "match_audio_length", + "num_frames", + "video_size", + "use_multiscale", + "fps", + "guidance_scale", + "num_inference_steps", + "acceleration", + "camera_lora", + "camera_lora_scale", + "negative_prompt", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode", + "image_strength", + "audio_strength", + "preprocess_audio" + ], + "required": [ + "prompt", + "audio_url" + ] + }, + "Ltx219bAudioToVideoOutput": { + "title": "LTX2AudioToVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman speaks to the camera" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "seed": { + "examples": [ + 175932751 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for the random number generator." + }, + "video": { + "examples": [ + { + "file_name": "ltx-2-a2v-output.mp4", + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/example_outputs/ltx-2-a2v-output.mp4" + } + ], + "description": "The generated video.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "prompt" + ], + "required": [ + "video", + "seed", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The duration of the video", + "title": "Duration" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the video", + "title": "Height" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the video", + "title": "Width" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The FPS of the video", + "title": "Fps" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of frames in the video", + "title": "Num Frames" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2-19b/audio-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/audio-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/audio-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bAudioToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/audio-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bAudioToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/elevenlabs/dubbing", + "metadata": { + "display_name": "ElevenLabs Dubbing", + "category": "audio-to-video", + "description": "Generate dubbed videos or audios using ElevenLabs Dubbing feature!", + "status": "active", + "tags": [ + "dubbing", + "audio-to-audio" + ], + "updated_at": "2026-01-26T21:41:36.720Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8a5dca/J7G08QXS3XtfD7m-9dY4g_f57272f4c9cc4b0fa9363b581808c559.jpg", + "model_url": "https://fal.run/fal-ai/elevenlabs/dubbing", + "license_type": "commercial", + "date": "2026-01-14T14:41:55.680Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/elevenlabs/dubbing", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/elevenlabs/dubbing queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/elevenlabs/dubbing", + "category": "audio-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8a5dca/J7G08QXS3XtfD7m-9dY4g_f57272f4c9cc4b0fa9363b581808c559.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/elevenlabs/dubbing", + "documentationUrl": "https://fal.ai/models/fal-ai/elevenlabs/dubbing/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ElevenlabsDubbingInput": { + "title": "DubbingRequest", + "type": "object", + "properties": { + "video_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "URL of the video file to dub. Either audio_url or video_url must be provided. If both are provided, video_url takes priority.", + "title": "Video Url", + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/elevenlabs/e11_dubbing_in.mp4" + ] + }, + "audio_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "URL of the audio file to dub. Either audio_url or video_url must be provided.", + "title": "Audio Url" + }, + "highest_resolution": { + "description": "Whether to use the highest resolution for dubbing.", + "type": "boolean", + "title": "Highest Resolution", + "default": true + }, + "target_lang": { + "examples": [ + "es", + "fr", + "de", + "ja", + "pt", + "zh" + ], + "description": "Target language code for dubbing (ISO 639-1)", + "type": "string", + "title": "Target Lang" + }, + "source_lang": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Source language code. If not provided, will be auto-detected.", + "title": "Source Lang", + "examples": [ + "en", + "es", + "fr" + ] + }, + "num_speakers": { + "anyOf": [ + { + "minimum": 1, + "maximum": 50, + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Number of speakers in the audio. If not provided, will be auto-detected.", + "title": "Num Speakers" + } + }, + "x-fal-order-properties": [ + "audio_url", + "video_url", + "target_lang", + "source_lang", + "num_speakers", + "highest_resolution" + ], + "required": [ + "target_lang" + ] + }, + "ElevenlabsDubbingOutput": { + "title": "DubbingVideoOutput", + "type": "object", + "properties": { + "target_lang": { + "examples": [ + "es" + ], + "description": "The target language of the dubbed content", + "type": "string", + "title": "Target Lang" + }, + "video": { + "examples": [ + { + "file_size": 1344041, + "file_name": "e11_dubbing_out.mp4", + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/example_outputs/elevenlabs/e11_dubbing_out.mp4" + } + ], + "description": "The dubbed video file. Will be populated if video_url was provided in the request.", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "video", + "target_lang" + ], + "required": [ + "video", + "target_lang" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/elevenlabs/dubbing/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/dubbing/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/dubbing": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ElevenlabsDubbingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/dubbing/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ElevenlabsDubbingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/longcat-multi-avatar/image-audio-to-video", + "metadata": { + "display_name": "Longcat Multi Avatar", + "category": "audio-to-video", + "description": "LongCat-Video-Avatar is an audio-driven video generation model that can generates super-realistic, lip-synchronized long video generation with natural dynamics and consistent identity.", + "status": "active", + "tags": [ + "audio-to-video", + "image-to-video" + ], + "updated_at": "2026-01-26T21:41:40.291Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a899b43/6I92oPiDeQ6BODNf0-OlZ_c35349b935cb4778919d8093bf590439.jpg", + "model_url": "https://fal.run/fal-ai/longcat-multi-avatar/image-audio-to-video", + "license_type": "commercial", + "date": "2026-01-08T20:17:00.208Z", + "group": { + "key": "longcat-avatar", + "label": "Image-Multi-Audio to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/longcat-multi-avatar/image-audio-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/longcat-multi-avatar/image-audio-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/longcat-multi-avatar/image-audio-to-video", + "category": "audio-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a899b43/6I92oPiDeQ6BODNf0-OlZ_c35349b935cb4778919d8093bf590439.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/longcat-multi-avatar/image-audio-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/longcat-multi-avatar/image-audio-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LongcatMultiAvatarImageAudioToVideoInput": { + "x-fal-order-properties": [ + "image_url", + "audio_url_person1", + "audio_url_person2", + "prompt", + "negative_prompt", + "bbox_person1", + "bbox_person2", + "audio_type", + "num_inference_steps", + "text_guidance_scale", + "audio_guidance_scale", + "resolution", + "num_segments", + "seed", + "enable_safety_checker" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Static camera, In a professional recording studio, two people stand facing each other, both wearing large headphones. They are speaking clearly into a large condenser microphone suspended between them. They looked at each other affectionately and occasionally shook their heads according to the rhythm. The soundproofed walls and visible recording equipment create an atmosphere focused on capturing high-quality audio as they interact and communicate." + ], + "description": "The prompt to guide the video generation.", + "type": "string", + "title": "Prompt", + "default": "Two people are having a conversation with natural expressions and movements." + }, + "num_inference_steps": { + "minimum": 10, + "description": "The number of inference steps to use.", + "type": "integer", + "maximum": 100, + "title": "Number of Inference Steps", + "default": 30 + }, + "audio_url_person2": { + "description": "The URL of the audio file for person 2 (right side).", + "type": "string", + "title": "Audio URL Person 2", + "default": "https://raw.githubusercontent.com/meituan-longcat/LongCat-Video/refs/heads/main/assets/avatar/multi/sing_woman.WAV" + }, + "enable_safety_checker": { + "description": "Whether to enable safety checker.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "bbox_person1": { + "description": "Bounding box for person 1. If not provided, defaults to left half of image.", + "title": "Bbox Person1", + "allOf": [ + { + "$ref": "#/components/schemas/BoundingBox" + } + ] + }, + "negative_prompt": { + "description": "The negative prompt to avoid in the video generation.", + "type": "string", + "title": "Negative Prompt", + "default": "Close-up, Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards" + }, + "text_guidance_scale": { + "minimum": 1, + "description": "The text guidance scale for classifier-free guidance.", + "type": "number", + "maximum": 10, + "title": "Text Guidance Scale", + "default": 4 + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "description": "Resolution of the generated video (480p or 720p). Billing is per video-second (16 frames): 480p is 1 unit per second and 720p is 4 units per second.", + "type": "string", + "title": "Resolution", + "default": "480p" + }, + "audio_type": { + "enum": [ + "para", + "add" + ], + "description": "How to combine the two audio tracks. 'para' (parallel) plays both simultaneously, 'add' (sequential) plays person 1 first then person 2.", + "type": "string", + "title": "Audio Type", + "default": "para" + }, + "image_url": { + "examples": [ + "https://raw.githubusercontent.com/meituan-longcat/LongCat-Video/refs/heads/main/assets/avatar/multi/sing.png" + ], + "description": "The URL of the image containing two speakers.", + "type": "string", + "title": "Image URL" + }, + "audio_url_person1": { + "description": "The URL of the audio file for person 1 (left side).", + "type": "string", + "title": "Audio URL Person 1", + "default": "https://raw.githubusercontent.com/meituan-longcat/LongCat-Video/refs/heads/main/assets/avatar/multi/sing_man.WAV" + }, + "seed": { + "description": "The seed for the random number generator.", + "type": "integer", + "title": "Seed" + }, + "audio_guidance_scale": { + "minimum": 1, + "description": "The audio guidance scale. Higher values may lead to exaggerated mouth movements.", + "type": "number", + "maximum": 10, + "title": "Audio Guidance Scale", + "default": 4 + }, + "bbox_person2": { + "description": "Bounding box for person 2. If not provided, defaults to right half of image.", + "title": "Bbox Person2", + "allOf": [ + { + "$ref": "#/components/schemas/BoundingBox" + } + ] + }, + "num_segments": { + "minimum": 1, + "description": "Number of video segments to generate. Each segment adds ~5 seconds of video. First segment is ~5.8s, additional segments are 5s each.", + "type": "integer", + "maximum": 10, + "title": "Number of Segments", + "default": 1 + } + }, + "description": "Request model for multi-speaker image+audio to video generation.", + "title": "MultiSpeakerImageAudioToVideoRequest", + "required": [ + "image_url" + ] + }, + "LongcatMultiAvatarImageAudioToVideoOutput": { + "x-fal-order-properties": [ + "video", + "seed" + ], + "type": "object", + "properties": { + "seed": { + "examples": [ + 424911732 + ], + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/0a87a882/k7N4EBTQnVM9nCW9ylN8i_output_87614f102ba94cc0b101d058a815c81f.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "description": "Response model for multi-speaker image+audio to video generation.", + "title": "MultiSpeakerImageAudioToVideoResponse", + "required": [ + "video", + "seed" + ] + }, + "BoundingBox": { + "x-fal-order-properties": [ + "x", + "y", + "w", + "h", + "label" + ], + "type": "object", + "properties": { + "y": { + "description": "Y-coordinate of the top-left corner", + "type": "number", + "title": "Y" + }, + "x": { + "description": "X-coordinate of the top-left corner", + "type": "number", + "title": "X" + }, + "h": { + "description": "Height of the bounding box", + "type": "number", + "title": "H" + }, + "w": { + "description": "Width of the bounding box", + "type": "number", + "title": "W" + }, + "label": { + "description": "Label of the bounding box", + "type": "string", + "title": "Label" + } + }, + "title": "BoundingBox", + "required": [ + "x", + "y", + "w", + "h", + "label" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/longcat-multi-avatar/image-audio-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-multi-avatar/image-audio-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/longcat-multi-avatar/image-audio-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatMultiAvatarImageAudioToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-multi-avatar/image-audio-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatMultiAvatarImageAudioToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/longcat-multi-avatar/image-audio-to-video/multi-speaker", + "metadata": { + "display_name": "Longcat Multi Avatar", + "category": "audio-to-video", + "description": "LongCat-Video-Avatar is an audio-driven video generation model that can generates super-realistic, lip-synchronized long video generation with natural dynamics and consistent identity.", + "status": "active", + "tags": [ + "audio-to-video", + "image-to-video" + ], + "updated_at": "2026-01-26T21:41:44.229Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a886751/FiWHVdS9IIhHB9H9wF52A_78d180195572488690e4addc6699f893.jpg", + "model_url": "https://fal.run/fal-ai/longcat-multi-avatar/image-audio-to-video/multi-speaker", + "license_type": "commercial", + "date": "2025-12-30T17:19:01.817Z", + "group": { + "key": "longcat-avatar", + "label": "Image & Multi Audio to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "error": { + "code": "expansion_failed", + "message": "OpenAPI schema not available for this endpoint" + } + } + }, + { + "endpoint_id": "fal-ai/longcat-single-avatar/image-audio-to-video", + "metadata": { + "display_name": "Longcat Single Avatar", + "category": "audio-to-video", + "description": "LongCat-Video-Avatar is an audio-driven video generation model that can generates super-realistic, lip-synchronized long video generation with natural dynamics and consistent identity.", + "status": "active", + "tags": [ + "audio-to-video", + "image-to-video" + ], + "updated_at": "2026-01-26T21:41:44.361Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a88673c/x25WUIBOLHSgnJqo5Ph6C_cbcdeb6ce3844dc79c98760e44c071c2.jpg", + "model_url": "https://fal.run/fal-ai/longcat-single-avatar/image-audio-to-video", + "license_type": "commercial", + "date": "2025-12-30T17:14:46.673Z", + "group": { + "key": "longcat-avatar", + "label": "Image-Audio to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/longcat-single-avatar/image-audio-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/longcat-single-avatar/image-audio-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/longcat-single-avatar/image-audio-to-video", + "category": "audio-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a88673c/x25WUIBOLHSgnJqo5Ph6C_cbcdeb6ce3844dc79c98760e44c071c2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/longcat-single-avatar/image-audio-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/longcat-single-avatar/image-audio-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LongcatSingleAvatarImageAudioToVideoInput": { + "description": "Request model for image+audio to video generation.", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A western man stands on stage under dramatic lighting, holding a microphone close to their mouth. Wearing a vibrant red jacket with gold embroidery, the singer is speaking while smoke swirls around them, creating a dynamic and atmospheric scene." + ], + "description": "The prompt to guide the video generation.", + "type": "string", + "title": "Prompt" + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "description": "Resolution of the generated video (480p or 720p). Billing is per video-second (16 frames): 480p is 1 unit per second and 720p is 4 units per second.", + "type": "string", + "title": "Resolution", + "default": "480p" + }, + "enable_safety_checker": { + "description": "Whether to enable safety checker.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "audio_guidance_scale": { + "minimum": 1, + "description": "The audio guidance scale. Higher values may lead to exaggerated mouth movements.", + "type": "number", + "title": "Audio Guidance Scale", + "maximum": 10, + "default": 4 + }, + "num_segments": { + "minimum": 1, + "description": "Number of video segments to generate. Each segment adds ~5 seconds of video. First segment is ~5.8s, additional segments are 5s each.", + "type": "integer", + "title": "Number of Segments", + "maximum": 10, + "default": 1 + }, + "image_url": { + "examples": [ + "https://raw.githubusercontent.com/meituan-longcat/LongCat-Video/refs/heads/main/assets/avatar/single/man.png" + ], + "description": "The URL of the image to animate.", + "type": "string", + "title": "Image URL" + }, + "audio_url": { + "examples": [ + "https://raw.githubusercontent.com/meituan-longcat/LongCat-Video/refs/heads/main/assets/avatar/single/man.mp3" + ], + "description": "The URL of the audio file to drive the avatar.", + "type": "string", + "title": "Audio URL" + }, + "num_inference_steps": { + "minimum": 10, + "description": "The number of inference steps to use.", + "type": "integer", + "title": "Number of Inference Steps", + "maximum": 100, + "default": 30 + }, + "seed": { + "description": "The seed for the random number generator.", + "type": "integer", + "title": "Seed" + }, + "negative_prompt": { + "description": "The negative prompt to avoid in the video generation.", + "type": "string", + "title": "Negative Prompt", + "default": "Close-up, Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards" + }, + "text_guidance_scale": { + "minimum": 1, + "description": "The text guidance scale for classifier-free guidance.", + "type": "number", + "title": "Text Guidance Scale", + "maximum": 10, + "default": 4 + } + }, + "title": "ImageAudioToVideoRequest", + "x-fal-order-properties": [ + "image_url", + "audio_url", + "prompt", + "negative_prompt", + "num_inference_steps", + "text_guidance_scale", + "audio_guidance_scale", + "resolution", + "num_segments", + "seed", + "enable_safety_checker" + ], + "required": [ + "prompt", + "image_url", + "audio_url" + ] + }, + "LongcatSingleAvatarImageAudioToVideoOutput": { + "description": "Response model for image+audio to video generation.", + "type": "object", + "properties": { + "seed": { + "examples": [ + 424911732 + ], + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/0a87a0ce/2TnxxI02RHnLUvjGzhZa7_output_86da44aa6eed40ff9061b2213fb19793.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "ImageAudioToVideoResponse", + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/longcat-single-avatar/image-audio-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-single-avatar/image-audio-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/longcat-single-avatar/image-audio-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatSingleAvatarImageAudioToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-single-avatar/image-audio-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatSingleAvatarImageAudioToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/longcat-single-avatar/audio-to-video", + "metadata": { + "display_name": "Longcat Single Avatar", + "category": "audio-to-video", + "description": "LongCat-Video-Avatar is an audio-driven video generation model that can generates super-realistic, lip-synchronized long video generation with natural dynamics and consistent identity.", + "status": "active", + "tags": [ + "audio-to-video" + ], + "updated_at": "2026-01-26T21:41:44.491Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8866d6/omvktK-AVCuPF0QU33P4A_8b2b938566bf4a85830f5b90b84180cf.jpg", + "model_url": "https://fal.run/fal-ai/longcat-single-avatar/audio-to-video", + "license_type": "commercial", + "date": "2025-12-30T17:00:38.944Z", + "group": { + "key": "longcat-avatar", + "label": "Audio to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/longcat-single-avatar/audio-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/longcat-single-avatar/audio-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/longcat-single-avatar/audio-to-video", + "category": "audio-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8866d6/omvktK-AVCuPF0QU33P4A_8b2b938566bf4a85830f5b90b84180cf.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/longcat-single-avatar/audio-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/longcat-single-avatar/audio-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LongcatSingleAvatarAudioToVideoInput": { + "description": "Request model for audio-to-video generation.", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A person is talking naturally with natural expressions and movements." + ], + "description": "The prompt to guide the video generation.", + "type": "string", + "title": "Prompt", + "default": "A person is talking naturally with natural expressions and movements." + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "description": "Resolution of the generated video (480p or 720p). Billing is per video-second (16 frames): 480p is 1 unit per second and 720p is 4 units per second.", + "type": "string", + "title": "Resolution", + "default": "480p" + }, + "enable_safety_checker": { + "description": "Whether to enable safety checker.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "audio_guidance_scale": { + "minimum": 1, + "description": "The audio guidance scale. Higher values may lead to exaggerated mouth movements.", + "type": "number", + "title": "Audio Guidance Scale", + "maximum": 10, + "default": 4 + }, + "num_segments": { + "minimum": 1, + "description": "Number of video segments to generate. Each segment adds ~5 seconds of video. First segment is ~5.8s, additional segments are 5s each.", + "type": "integer", + "title": "Number of Segments", + "maximum": 10, + "default": 1 + }, + "audio_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a87a827/QDAEdCQOPXxYWPUl2fyTY_4421psm.mp3" + ], + "description": "The URL of the audio file to drive the avatar.", + "type": "string", + "title": "Audio URL" + }, + "num_inference_steps": { + "minimum": 10, + "description": "The number of inference steps to use.", + "type": "integer", + "title": "Number of Inference Steps", + "maximum": 100, + "default": 30 + }, + "seed": { + "description": "The seed for the random number generator.", + "type": "integer", + "title": "Seed" + }, + "negative_prompt": { + "description": "The negative prompt to avoid in the video generation.", + "type": "string", + "title": "Negative Prompt", + "default": "Close-up, Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards" + }, + "text_guidance_scale": { + "minimum": 1, + "description": "The text guidance scale for classifier-free guidance.", + "type": "number", + "title": "Text Guidance Scale", + "maximum": 10, + "default": 4 + } + }, + "title": "AudioToVideoRequest", + "x-fal-order-properties": [ + "audio_url", + "prompt", + "negative_prompt", + "num_inference_steps", + "text_guidance_scale", + "audio_guidance_scale", + "resolution", + "num_segments", + "seed", + "enable_safety_checker" + ], + "required": [ + "audio_url" + ] + }, + "LongcatSingleAvatarAudioToVideoOutput": { + "description": "Response model for audio-to-video generation (no reference image).", + "type": "object", + "properties": { + "seed": { + "examples": [ + 424911732 + ], + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/0a879d5d/UfaJ-sridj9C6IjSNWLYk_output_27368fcd87a34a0fb2929ed926cd71f0.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "AudioToVideoResponse", + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/longcat-single-avatar/audio-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-single-avatar/audio-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/longcat-single-avatar/audio-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatSingleAvatarAudioToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-single-avatar/audio-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatSingleAvatarAudioToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "argil/avatars/audio-to-video", + "metadata": { + "display_name": "Avatars Audio to Video", + "category": "audio-to-video", + "description": "High-quality avatar videos that feel real, generated from your audio", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:59.192Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/penguin/I9P-RPQjSACURLjmQX0XV_aaaeea3d3fc94835b1ad3c4f98ebd0ea.jpg", + "model_url": "https://fal.run/argil/avatars/audio-to-video", + "license_type": "commercial", + "date": "2025-09-01T09:42:32.378Z", + "group": { + "key": "argil-avatar", + "label": "Audio to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for argil/avatars/audio-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the argil/avatars/audio-to-video queue.", + "x-fal-metadata": { + "endpointId": "argil/avatars/audio-to-video", + "category": "audio-to-video", + "thumbnailUrl": "https://fal.media/files/penguin/I9P-RPQjSACURLjmQX0XV_aaaeea3d3fc94835b1ad3c4f98ebd0ea.jpg", + "playgroundUrl": "https://fal.ai/models/argil/avatars/audio-to-video", + "documentationUrl": "https://fal.ai/models/argil/avatars/audio-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AvatarsAudioToVideoInput": { + "title": "InferenceRequest", + "type": "object", + "properties": { + "avatar": { + "enum": [ + "Mia outdoor (UGC)", + "Lara (Masterclass)", + "Ines (UGC)", + "Maria (Masterclass)", + "Emma (UGC)", + "Sienna (Masterclass)", + "Elena (UGC)", + "Jasmine (Masterclass)", + "Amara (Masterclass)", + "Ryan podcast (UGC)", + "Tyler (Masterclass)", + "Jayse (Masterclass)", + "Paul (Masterclass)", + "Matteo (UGC)", + "Daniel car (UGC)", + "Dario (Masterclass)", + "Viva (Masterclass)", + "Chen (Masterclass)", + "Alex (Masterclass)", + "Vanessa (UGC)", + "Laurent (UGC)", + "Noemie car (UGC)", + "Brandon (UGC)", + "Byron (Masterclass)", + "Calista (Masterclass)", + "Milo (Masterclass)", + "Fabien (Masterclass)", + "Rose (UGC)" + ], + "title": "Avatar", + "type": "string", + "examples": [ + "Noemie car (UGC)" + ] + }, + "remove_background": { + "title": "Remove Background", + "type": "boolean", + "description": "Enabling the remove background feature will result in a 50% increase in the price.", + "default": false + }, + "audio_url": { + "examples": [ + { + "url": "https://argildotai.s3.us-east-1.amazonaws.com/fal-resource/example_fal.mp3" + } + ], + "title": "Audio Url", + "type": "string" + } + }, + "x-fal-order-properties": [ + "avatar", + "audio_url", + "remove_background" + ], + "required": [ + "avatar", + "audio_url" + ] + }, + "AvatarsAudioToVideoOutput": { + "title": "InferenceResult", + "type": "object", + "properties": { + "moderation_transcription": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Moderation Transcription" + }, + "moderation_error": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Moderation Error" + }, + "moderation_flagged": { + "title": "Moderation Flagged", + "type": "boolean", + "default": false + }, + "video": { + "anyOf": [ + { + "$ref": "#/components/schemas/Video" + }, + { + "type": "null" + } + ], + "examples": [ + { + "url": "https://argildotai.s3.us-east-1.amazonaws.com/fal-resource/example_fal.mp4" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "moderation_flagged", + "moderation_transcription", + "moderation_error" + ] + }, + "Video": { + "title": "Video", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/argil/avatars/audio-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/argil/avatars/audio-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/argil/avatars/audio-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AvatarsAudioToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/argil/avatars/audio-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AvatarsAudioToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan/v2.2-14b/speech-to-video", + "metadata": { + "display_name": "Wan-2.2 Speech-to-Video 14B", + "category": "audio-to-video", + "description": "Wan-S2V is a video model that generates high-quality videos from static images and audio, with realistic facial expressions, body movements, and professional camera work for film and television applications", + "status": "active", + "tags": [ + "audio-to-video", + "talking-head" + ], + "updated_at": "2026-01-26T21:43:00.077Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/monkey/RBd4Rg392hzKwxahCIuRb_503c4ef0d3a141c09e4060aa0e3ab5e1.jpg", + "model_url": "https://fal.run/fal-ai/wan/v2.2-14b/speech-to-video", + "license_type": "commercial", + "date": "2025-08-27T03:34:19.544Z", + "group": { + "key": "wan-v22-large", + "label": "Speech to Video" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 5, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan/v2.2-14b/speech-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan/v2.2-14b/speech-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan/v2.2-14b/speech-to-video", + "category": "audio-to-video", + "thumbnailUrl": "https://fal.media/files/monkey/RBd4Rg392hzKwxahCIuRb_503c4ef0d3a141c09e4060aa0e3ab5e1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan/v2.2-14b/speech-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/wan/v2.2-14b/speech-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanV2214bSpeechToVideoInput": { + "title": "WanS2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Summer beach vacation style, a white cat wearing sunglasses sits on a surfboard." + ], + "description": "The text prompt used for video generation.", + "type": "string", + "title": "Prompt" + }, + "shift": { + "description": "Shift value for the video. Must be between 1.0 and 10.0.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Shift", + "examples": [ + 5 + ], + "default": 5 + }, + "frames_per_second": { + "description": "Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is.", + "type": "integer", + "minimum": 4, + "maximum": 60, + "title": "Frames per Second", + "examples": [ + 16 + ], + "default": 16 + }, + "guidance_scale": { + "description": "Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale", + "examples": [ + 3.5 + ], + "default": 3.5 + }, + "num_frames": { + "description": "Number of frames to generate. Must be between 40 to 120, (must be multiple of 4).", + "type": "integer", + "minimum": 40, + "maximum": 120, + "title": "Number of Frames", + "examples": [ + 80 + ], + "multipleOf": 4, + "default": 80 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, input data will be checked for safety before processing.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": false + }, + "negative_prompt": { + "description": "Negative prompt for video generation.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "description": "The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.", + "type": "string", + "title": "Video Write Mode", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "description": "Resolution of the generated video (480p, 580p, or 720p).", + "type": "string", + "title": "Resolution", + "examples": [ + "480p" + ], + "default": "480p" + }, + "enable_output_safety_checker": { + "examples": [ + false + ], + "description": "If set to true, output video will be checked for safety after generation.", + "type": "boolean", + "title": "Enable Output Safety Checker", + "default": false + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/wan_s2v_cat.png" + ], + "description": "URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.", + "type": "string", + "title": "Image URL" + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "description": "The quality of the output video. Higher quality means better visual quality but larger file size.", + "type": "string", + "title": "Video Quality", + "examples": [ + "high" + ], + "default": "high" + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/wan_s2v_talk.wav" + ], + "description": "The URL of the audio file.", + "type": "string", + "title": "Audio URL" + }, + "num_inference_steps": { + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "type": "integer", + "minimum": 2, + "maximum": 40, + "title": "Number of Inference Steps", + "examples": [ + 27 + ], + "default": 27 + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_frames", + "frames_per_second", + "seed", + "resolution", + "num_inference_steps", + "enable_safety_checker", + "enable_output_safety_checker", + "guidance_scale", + "shift", + "video_quality", + "video_write_mode", + "image_url", + "audio_url" + ], + "required": [ + "prompt", + "image_url", + "audio_url" + ] + }, + "WanV2214bSpeechToVideoOutput": { + "title": "WanS2VResponse", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 4685303, + "file_name": "2c7ab2540af44eceaf5ffde4e8d094ed.mp4", + "content_type": "application/octet-stream", + "url": "https://v3.fal.media/files/panda/f7tXRCjvwEcVlmxHuw8kO_2c7ab2540af44eceaf5ffde4e8d094ed.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan/v2.2-14b/speech-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-14b/speech-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-14b/speech-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV2214bSpeechToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-14b/speech-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV2214bSpeechToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/stable-avatar", + "metadata": { + "display_name": "Stable Avatar", + "category": "audio-to-video", + "description": "Stable Avatar generates audio-driven video avatars up to five minutes long", + "status": "active", + "tags": [ + "stable-avatar", + "talking-head", + "audio-to-video" + ], + "updated_at": "2026-01-26T21:43:04.337Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/penguin/kzjRSbBVQ7m_aZa-9_Uuy_4efc689beb9c4a57ac2cde9eaa31e09e.jpg", + "model_url": "https://fal.run/fal-ai/stable-avatar", + "license_type": "commercial", + "date": "2025-08-14T03:19:21.185Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/stable-avatar", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/stable-avatar queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/stable-avatar", + "category": "audio-to-video", + "thumbnailUrl": "https://fal.media/files/penguin/kzjRSbBVQ7m_aZa-9_Uuy_4efc689beb9c4a57ac2cde9eaa31e09e.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/stable-avatar", + "documentationUrl": "https://fal.ai/models/fal-ai/stable-avatar/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "StableAvatarInput": { + "title": "StableAvatarRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A person is in a relaxed pose. As the video progresses, the character speaks while arm and body movements are minimal and consistent with a natural speaking posture. Hand movements remain minimal. Don't blink too often. Preserve background integrity matching the reference image's spatial configuration, lighting conditions, and color temperature." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for the video generation." + }, + "aspect_ratio": { + "enum": [ + "16:9", + "1:1", + "9:16", + "auto" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the video to generate. If 'auto', the aspect ratio will be determined by the reference image.", + "default": "auto" + }, + "perturbation": { + "minimum": 0, + "title": "Perturbation", + "type": "number", + "maximum": 1, + "description": "The amount of perturbation to use for the video generation. 0.0 means no perturbation, 1.0 means full perturbation.", + "default": 0.1 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/stable-avatar-input-image.png" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to use as a reference for the video generation." + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "maximum": 10, + "description": "The guidance scale to use for the video generation.", + "default": 5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for the video generation." + }, + "num_inference_steps": { + "minimum": 10, + "title": "Number of Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to use for the video generation.", + "default": 50 + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/stable-avatar-input-audio.mp3" + ], + "title": "Audio URL", + "type": "string", + "description": "The URL of the audio to use as a reference for the video generation." + }, + "audio_guidance_scale": { + "minimum": 0, + "title": "Audio Guidance Scale", + "type": "number", + "maximum": 10, + "description": "The audio guidance scale to use for the video generation.", + "default": 4 + } + }, + "x-fal-order-properties": [ + "image_url", + "audio_url", + "prompt", + "aspect_ratio", + "guidance_scale", + "audio_guidance_scale", + "num_inference_steps", + "seed", + "perturbation" + ], + "required": [ + "image_url", + "audio_url", + "prompt" + ] + }, + "StableAvatarOutput": { + "title": "StableAvatarResponse", + "type": "object", + "properties": { + "video": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_outputs/stable-avatar-output.mp4" + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/stable-avatar/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-avatar/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/stable-avatar": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableAvatarInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-avatar/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableAvatarOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/echomimic-v3", + "metadata": { + "display_name": "EchoMimic V3", + "category": "audio-to-video", + "description": "EchoMimic V3 generates a talking avatar model from a picture, audio and text prompt.", + "status": "active", + "tags": [ + "echomimic", + "talking-head", + "audio-to-video" + ], + "updated_at": "2026-01-26T21:43:05.139Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/elephant/pnsOBA-jcFjvGsQpILHy9_ed79671c56034d6db4376279665dc8ab.jpg", + "model_url": "https://fal.run/fal-ai/echomimic-v3", + "license_type": "commercial", + "date": "2025-08-13T02:36:00.352Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/echomimic-v3", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/echomimic-v3 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/echomimic-v3", + "category": "audio-to-video", + "thumbnailUrl": "https://fal.media/files/elephant/pnsOBA-jcFjvGsQpILHy9_ed79671c56034d6db4376279665dc8ab.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/echomimic-v3", + "documentationUrl": "https://fal.ai/models/fal-ai/echomimic-v3/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "EchomimicV3Input": { + "title": "EchoMimicRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A person is in a relaxed pose. As the video progresses, the character speaks while arm and body movements are minimal and consistent with a natural speaking posture. Hand movements remain minimal. Don't blink too often. Preserve background integrity matching the reference image's spatial configuration, lighting conditions, and color temperature." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for the video generation." + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/echo-mimic-input-audio.mp3" + ], + "title": "Audio URL", + "type": "string", + "description": "The URL of the audio to use as a reference for the video generation." + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/echo-mimic-input-image.png" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to use as a reference for the video generation." + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "maximum": 10, + "description": "The guidance scale to use for the video generation.", + "default": 4.5 + }, + "audio_guidance_scale": { + "minimum": 0, + "title": "Audio Guidance Scale", + "type": "number", + "maximum": 10, + "description": "The audio guidance scale to use for the video generation.", + "default": 2.5 + }, + "num_frames_per_generation": { + "minimum": 49, + "title": "Number of frames per generation", + "type": "integer", + "maximum": 161, + "description": "The number of frames to generate at once.", + "default": 121 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to use for the video generation.", + "default": "" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for the video generation." + } + }, + "x-fal-order-properties": [ + "image_url", + "audio_url", + "prompt", + "negative_prompt", + "num_frames_per_generation", + "guidance_scale", + "audio_guidance_scale", + "seed" + ], + "required": [ + "image_url", + "audio_url", + "prompt" + ] + }, + "EchomimicV3Output": { + "title": "EchoMimicResponse", + "type": "object", + "properties": { + "video": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_outputs/echo-mimic-output.mp4" + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/echomimic-v3/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/echomimic-v3/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/echomimic-v3": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EchomimicV3Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/echomimic-v3/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EchomimicV3Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "veed/avatars/audio-to-video", + "metadata": { + "display_name": "Avatars", + "category": "audio-to-video", + "description": "Generate high-quality videos with UGC-like avatars from audio", + "status": "active", + "tags": [ + "lipsync", + "audio-to-video" + ], + "updated_at": "2026-01-26T21:43:38.412Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/veed_logo.webp", + "model_url": "https://fal.run/veed/avatars/audio-to-video", + "license_type": "commercial", + "date": "2025-05-28T14:17:50.831Z", + "group": { + "key": "veed-avatars-1", + "label": "Audio To Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for veed/avatars/audio-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the veed/avatars/audio-to-video queue.", + "x-fal-metadata": { + "endpointId": "veed/avatars/audio-to-video", + "category": "audio-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/veed_logo.webp", + "playgroundUrl": "https://fal.ai/models/veed/avatars/audio-to-video", + "documentationUrl": "https://fal.ai/models/veed/avatars/audio-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AvatarsAudioToVideoInput": { + "title": "Audio2VideoInput", + "type": "object", + "properties": { + "audio_url": { + "examples": [ + "https://v3.fal.media/files/lion/OXiM5_Cve4kQ0ZcXmVzq4_product_presentation.mp3" + ], + "maxLength": 2083, + "type": "string", + "minLength": 1, + "title": "Audio Url", + "format": "uri" + }, + "avatar_id": { + "enum": [ + "emily_vertical_primary", + "emily_vertical_secondary", + "marcus_vertical_primary", + "marcus_vertical_secondary", + "mira_vertical_primary", + "mira_vertical_secondary", + "jasmine_vertical_primary", + "jasmine_vertical_secondary", + "jasmine_vertical_walking", + "aisha_vertical_walking", + "elena_vertical_primary", + "elena_vertical_secondary", + "any_male_vertical_primary", + "any_female_vertical_primary", + "any_male_vertical_secondary", + "any_female_vertical_secondary", + "any_female_vertical_walking", + "emily_primary", + "emily_side", + "marcus_primary", + "marcus_side", + "aisha_walking", + "elena_primary", + "elena_side", + "any_male_primary", + "any_female_primary", + "any_male_side", + "any_female_side" + ], + "description": "The avatar to use for the video", + "type": "string", + "title": "Avatar Id" + } + }, + "x-fal-order-properties": [ + "avatar_id", + "audio_url" + ], + "required": [ + "avatar_id", + "audio_url" + ] + }, + "AvatarsAudioToVideoOutput": { + "title": "AvatarsAppOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://v3.fal.media/files/panda/kt9d4vZ8Mfw_WzYnvr2Q0_tmp0ir4znsr.mp4" + } + ], + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/veed/avatars/audio-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/veed/avatars/audio-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/veed/avatars/audio-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AvatarsAudioToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/veed/avatars/audio-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AvatarsAudioToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.image-to-3d.json b/packages/typescript/ai-fal/json/fal.models.image-to-3d.json new file mode 100644 index 00000000..71825038 --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.image-to-3d.json @@ -0,0 +1,11212 @@ +{ + "generated_at": "2026-01-28T02:51:51.875Z", + "total_models": 25, + "category": "image-to-3d", + "models": [ + { + "endpoint_id": "fal-ai/trellis-2", + "metadata": { + "display_name": "Trellis 2", + "category": "image-to-3d", + "description": "Generate 3D models from your images using Trellis 2. A native 3D generative model enabling versatile and high-quality 3D asset creation.", + "status": "active", + "tags": [ + "image-to-3D" + ], + "updated_at": "2026-01-26T21:41:50.792Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a87573c/DwQCx7YBl36dnFSHaU8OT_bfc26b64d210462489530b5d5d5d98e1.jpg", + "model_url": "https://fal.run/fal-ai/trellis-2", + "license_type": "commercial", + "date": "2025-12-17T21:49:28.133Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/trellis-2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/trellis-2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/trellis-2", + "category": "image-to-3d", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a87573c/DwQCx7YBl36dnFSHaU8OT_bfc26b64d210462489530b5d5d5d98e1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/trellis-2", + "documentationUrl": "https://fal.ai/models/fal-ai/trellis-2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Trellis2Input": { + "title": "SingleImageInputModel", + "type": "object", + "properties": { + "remesh_band": { + "minimum": 0, + "title": "Remesh Band", + "type": "number", + "maximum": 4, + "default": 1 + }, + "ss_guidance_rescale": { + "minimum": 0, + "title": "Ss Guidance Rescale", + "type": "number", + "maximum": 1, + "default": 0.7 + }, + "ss_rescale_t": { + "minimum": 1, + "title": "Ss Rescale T", + "type": "number", + "maximum": 6, + "default": 5 + }, + "shape_slat_sampling_steps": { + "minimum": 1, + "title": "Shape Slat Sampling Steps", + "type": "integer", + "maximum": 50, + "default": 12 + }, + "tex_slat_rescale_t": { + "minimum": 1, + "title": "Tex Slat Rescale T", + "type": "number", + "maximum": 6, + "default": 3 + }, + "ss_guidance_strength": { + "minimum": 0, + "title": "Ss Guidance Strength", + "type": "number", + "maximum": 10, + "default": 7.5 + }, + "ss_sampling_steps": { + "minimum": 1, + "title": "Ss Sampling Steps", + "type": "integer", + "maximum": 50, + "default": 12 + }, + "tex_slat_sampling_steps": { + "minimum": 1, + "title": "Tex Slat Sampling Steps", + "type": "integer", + "maximum": 50, + "default": 12 + }, + "remesh_project": { + "minimum": 0, + "title": "Remesh Project", + "type": "number", + "maximum": 1, + "default": 0 + }, + "texture_size": { + "enum": [ + 1024, + 2048, + 4096 + ], + "title": "Texture Size", + "type": "integer", + "description": "Texture resolution", + "default": 2048 + }, + "shape_slat_rescale_t": { + "minimum": 1, + "title": "Shape Slat Rescale T", + "type": "number", + "maximum": 6, + "default": 3 + }, + "resolution": { + "enum": [ + 512, + 1024, + 1536 + ], + "title": "Resolution", + "type": "integer", + "description": "Output resolution; higher is slower but more detailed", + "default": 1024 + }, + "remesh": { + "title": "Remesh", + "type": "boolean", + "description": "Run remeshing (slower; often improves topology)", + "default": true + }, + "tex_slat_guidance_rescale": { + "minimum": 0, + "title": "Tex Slat Guidance Rescale", + "type": "number", + "maximum": 1, + "default": 0 + }, + "shape_slat_guidance_rescale": { + "minimum": 0, + "title": "Shape Slat Guidance Rescale", + "type": "number", + "maximum": 1, + "default": 0.5 + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a86b60d/xkpao5B0uxmH0tmJm0HVL_2fe35ce1-fe44-475b-b582-6846a149537c.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the input image to convert to 3D" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility" + }, + "shape_slat_guidance_strength": { + "minimum": 0, + "title": "Shape Slat Guidance Strength", + "type": "number", + "maximum": 10, + "default": 7.5 + }, + "tex_slat_guidance_strength": { + "minimum": 0, + "title": "Tex Slat Guidance Strength", + "type": "number", + "maximum": 10, + "default": 1 + }, + "decimation_target": { + "minimum": 100000, + "title": "Decimation Target", + "type": "integer", + "maximum": 2000000, + "description": "Target vertex count for mesh simplification during export", + "default": 500000 + } + }, + "x-fal-order-properties": [ + "seed", + "resolution", + "ss_guidance_strength", + "ss_guidance_rescale", + "ss_sampling_steps", + "ss_rescale_t", + "shape_slat_guidance_strength", + "shape_slat_guidance_rescale", + "shape_slat_sampling_steps", + "shape_slat_rescale_t", + "tex_slat_guidance_strength", + "tex_slat_guidance_rescale", + "tex_slat_sampling_steps", + "tex_slat_rescale_t", + "decimation_target", + "texture_size", + "remesh", + "remesh_band", + "remesh_project", + "image_url" + ], + "required": [ + "image_url" + ] + }, + "Trellis2Output": { + "title": "ObjectOutput", + "type": "object", + "properties": { + "model_glb": { + "examples": [ + { + "url": "https://v3b.fal.media/files/b/0a86b61d/DNmTkiWHUQ8k-rG6aussB_trellis2_68d6300f70f34d23b69a912b5fe60487.glb" + } + ], + "title": "Model Glb", + "description": "Generated 3D GLB file", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "model_glb" + ], + "required": [ + "model_glb" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/trellis-2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/trellis-2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/trellis-2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Trellis2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/trellis-2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Trellis2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan3d-v3/sketch-to-3d", + "metadata": { + "display_name": "Hunyuan3d V3", + "category": "image-to-3d", + "description": "Create your imagined 3D models with just text. Production-ready, export-ready professional assets with realistic lighting and materials in minutes.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:52.104Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a86970c/UWyfBY4fR6fmAiR5LKxvn_97d2566780be4f0d8aa37d5b437fb879.jpg", + "model_url": "https://fal.run/fal-ai/hunyuan3d-v3/sketch-to-3d", + "license_type": "commercial", + "date": "2025-12-16T22:12:40.672Z", + "group": { + "key": "hunyuan3d-v3", + "label": "Sketch to 3D" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan3d-v3/sketch-to-3d", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan3d-v3/sketch-to-3d queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan3d-v3/sketch-to-3d", + "category": "image-to-3d", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a86970c/UWyfBY4fR6fmAiR5LKxvn_97d2566780be4f0d8aa37d5b437fb879.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan3d-v3/sketch-to-3d", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan3d-v3/sketch-to-3d/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Hunyuan3dV3SketchTo3dInput": { + "title": "SketchTo3DInput", + "type": "object", + "properties": { + "input_image_url": { + "description": "URL of sketch or line art image to transform into a 3D model. Image resolution must be between 128x128 and 5000x5000 pixels.", + "type": "string", + "examples": [ + "https://v3b.fal.media/files/b/0a86888c/Zlw8twOa43SKkCXmTdw3-.png" + ], + "title": "Input Image Url", + "x-fal": { + "timeout": 20, + "max_file_size": 8388608 + }, + "limit_description": "Max file size: 8.0MB, Timeout: 20.0s" + }, + "prompt": { + "examples": [ + "orange cat" + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt describing the 3D content attributes such as color, category, and material.", + "maxLength": 1024 + }, + "face_count": { + "minimum": 40000, + "title": "Face Count", + "type": "integer", + "description": "Target face count. Range: 40000-1500000", + "maximum": 1500000, + "default": 500000 + }, + "enable_pbr": { + "title": "Enable Pbr", + "type": "boolean", + "description": "Whether to enable PBR material generation.", + "default": false + } + }, + "x-fal-order-properties": [ + "input_image_url", + "prompt", + "enable_pbr", + "face_count" + ], + "required": [ + "input_image_url", + "prompt" + ] + }, + "Hunyuan3dV3SketchTo3dOutput": { + "title": "SketchTo3DOutput", + "type": "object", + "properties": { + "model_urls": { + "examples": [ + { + "glb": { + "file_size": 30655724, + "file_name": "model.glb", + "content_type": "model/gltf-binary", + "url": "https://v3b.fal.media/files/b/0a8688bb/vd2SlBP92cZls3zG5EPbg_model.glb" + }, + "obj": { + "file_size": 23418473, + "file_name": "model.obj", + "content_type": "text/plain", + "url": "https://v3b.fal.media/files/b/0a8688bb/QNik1DVxzvj23YEF3vhs__model.obj" + } + } + ], + "title": "Model Urls", + "description": "URLs for different 3D model formats", + "allOf": [ + { + "$ref": "#/components/schemas/ModelUrls" + } + ] + }, + "thumbnail": { + "examples": [ + { + "file_size": 68478, + "file_name": "preview.png", + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/0a8688bb/ZkMb4jHnb5QRNYp4SxkEA_preview.png" + } + ], + "title": "Thumbnail", + "description": "Preview thumbnail of the generated model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + }, + "model_glb": { + "examples": [ + { + "file_size": 30655724, + "file_name": "model.glb", + "content_type": "model/gltf-binary", + "url": "https://v3b.fal.media/files/b/0a8688bb/vd2SlBP92cZls3zG5EPbg_model.glb" + } + ], + "title": "Model Glb", + "description": "Generated 3D object in GLB format.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "model_glb", + "thumbnail", + "model_urls", + "seed" + ], + "required": [ + "model_glb", + "model_urls" + ] + }, + "ModelUrls": { + "title": "ModelUrls", + "type": "object", + "properties": { + "fbx": { + "title": "Fbx", + "description": "FBX format 3D model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "usdz": { + "title": "Usdz", + "description": "USDZ format 3D model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "glb": { + "title": "Glb", + "description": "GLB format 3D model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "obj": { + "title": "Obj", + "description": "OBJ format 3D model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "glb", + "fbx", + "obj", + "usdz" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan3d-v3/sketch-to-3d/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d-v3/sketch-to-3d/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d-v3/sketch-to-3d": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan3dV3SketchTo3dInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d-v3/sketch-to-3d/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan3dV3SketchTo3dOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan3d-v3/image-to-3d", + "metadata": { + "display_name": "Hunyuan3d V3", + "category": "image-to-3d", + "description": "Transform your photos into ultra-high-resolution 3D models in seconds. Film-quality geometry with PBR textures, ready for games, e-commerce, and 3D printing.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:52.237Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a875a2d/pNNyr8z413ca7eBfcFNlT_f44881fdb61747128320f7235a43e81b.jpg", + "model_url": "https://fal.run/fal-ai/hunyuan3d-v3/image-to-3d", + "license_type": "commercial", + "date": "2025-12-16T22:01:39.729Z", + "group": { + "key": "hunyuan3d-v3", + "label": "Image to 3D" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan3d-v3/image-to-3d", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan3d-v3/image-to-3d queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan3d-v3/image-to-3d", + "category": "image-to-3d", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a875a2d/pNNyr8z413ca7eBfcFNlT_f44881fdb61747128320f7235a43e81b.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan3d-v3/image-to-3d", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan3d-v3/image-to-3d/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Hunyuan3dV3ImageTo3dInput": { + "title": "ImageTo3DInput", + "type": "object", + "properties": { + "input_image_url": { + "description": "URL of image to use while generating the 3D model.", + "type": "string", + "examples": [ + "https://v3b.fal.media/files/b/0a865ab1/omYcawLUo4RZbO8J6ZgZR.png" + ], + "title": "Input Image Url", + "x-fal": { + "timeout": 20, + "max_file_size": 8388608 + }, + "limit_description": "Max file size: 8.0MB, Timeout: 20.0s" + }, + "polygon_type": { + "enum": [ + "triangle", + "quadrilateral" + ], + "title": "Polygon Type", + "type": "string", + "description": "Polygon type. Only takes effect when GenerateType is LowPoly.", + "default": "triangle" + }, + "face_count": { + "minimum": 40000, + "title": "Face Count", + "type": "integer", + "description": "Target face count. Range: 40000-1500000", + "maximum": 1500000, + "default": 500000 + }, + "right_image_url": { + "x-fal": { + "timeout": 20, + "max_file_size": 8388608 + }, + "title": "Right Image Url", + "type": "string", + "description": "Optional right view image URL for better 3D reconstruction.", + "limit_description": "Max file size: 8.0MB, Timeout: 20.0s" + }, + "back_image_url": { + "x-fal": { + "timeout": 20, + "max_file_size": 8388608 + }, + "title": "Back Image Url", + "type": "string", + "description": "Optional back view image URL for better 3D reconstruction.", + "limit_description": "Max file size: 8.0MB, Timeout: 20.0s" + }, + "enable_pbr": { + "title": "Enable Pbr", + "type": "boolean", + "description": "Whether to enable PBR material generation. Does not take effect when generate_type is Geometry.", + "default": false + }, + "generate_type": { + "enum": [ + "Normal", + "LowPoly", + "Geometry" + ], + "title": "Generate Type", + "type": "string", + "description": "Generation type. Normal: textured model. LowPoly: polygon reduction. Geometry: white model without texture.", + "default": "Normal" + }, + "left_image_url": { + "x-fal": { + "timeout": 20, + "max_file_size": 8388608 + }, + "title": "Left Image Url", + "type": "string", + "description": "Optional left view image URL for better 3D reconstruction.", + "limit_description": "Max file size: 8.0MB, Timeout: 20.0s" + } + }, + "x-fal-order-properties": [ + "input_image_url", + "back_image_url", + "left_image_url", + "right_image_url", + "enable_pbr", + "face_count", + "generate_type", + "polygon_type" + ], + "required": [ + "input_image_url" + ] + }, + "Hunyuan3dV3ImageTo3dOutput": { + "title": "ImageTo3DOutput", + "type": "object", + "properties": { + "model_urls": { + "examples": [ + { + "glb": { + "file_size": 64122888, + "file_name": "model.glb", + "content_type": "model/gltf-binary", + "url": "https://v3b.fal.media/files/b/0a8686ae/MQN_KtP32PbqtPr_VLcyp_model.glb" + }, + "obj": { + "file_size": 42886419, + "file_name": "model.obj", + "content_type": "text/plain", + "url": "https://v3b.fal.media/files/b/0a8686ad/ifdJskhUfQysq-NN20iQR_model.obj" + } + } + ], + "title": "Model Urls", + "description": "URLs for different 3D model formats", + "allOf": [ + { + "$ref": "#/components/schemas/ModelUrls" + } + ] + }, + "thumbnail": { + "examples": [ + { + "file_size": 74443, + "file_name": "preview.png", + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/0a8686ae/sGIaYWOna5Zabtl5PBjDt_preview.png" + } + ], + "title": "Thumbnail", + "description": "Preview thumbnail of the generated model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + }, + "model_glb": { + "examples": [ + { + "file_size": 64122888, + "file_name": "model.glb", + "content_type": "model/gltf-binary", + "url": "https://v3b.fal.media/files/b/0a8686ae/MQN_KtP32PbqtPr_VLcyp_model.glb" + } + ], + "title": "Model Glb", + "description": "Generated 3D object in GLB format.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "model_glb", + "thumbnail", + "model_urls", + "seed" + ], + "required": [ + "model_glb", + "model_urls" + ] + }, + "ModelUrls": { + "title": "ModelUrls", + "type": "object", + "properties": { + "fbx": { + "title": "Fbx", + "description": "FBX format 3D model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "usdz": { + "title": "Usdz", + "description": "USDZ format 3D model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "glb": { + "title": "Glb", + "description": "GLB format 3D model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "obj": { + "title": "Obj", + "description": "OBJ format 3D model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "glb", + "fbx", + "obj", + "usdz" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan3d-v3/image-to-3d/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d-v3/image-to-3d/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d-v3/image-to-3d": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan3dV3ImageTo3dInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d-v3/image-to-3d/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan3dV3ImageTo3dOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sam-3/3d-body", + "metadata": { + "display_name": "Sam 3", + "category": "image-to-3d", + "description": "SAM 3D allows for accurate 3D reconstruction of human body shape and position from a single image.", + "status": "active", + "tags": [ + "3d", + "human", + "pose" + ], + "updated_at": "2026-01-26T21:42:06.607Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a859c26/IT2mAlL6O7dkCEwVk9ba8_875e9beae3b646b0b38e83def3ff9a79.jpg", + "model_url": "https://fal.run/fal-ai/sam-3/3d-body", + "license_type": "commercial", + "date": "2025-12-02T20:52:11.002Z", + "group": { + "key": "sam3", + "label": "Image to 3D Body" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sam-3/3d-body", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sam-3/3d-body queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sam-3/3d-body", + "category": "image-to-3d", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a859c26/IT2mAlL6O7dkCEwVk9ba8_875e9beae3b646b0b38e83def3ff9a79.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sam-3/3d-body", + "documentationUrl": "https://fal.ai/models/fal-ai/sam-3/3d-body/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Sam33dBodyInput": { + "x-fal-order-properties": [ + "image_url", + "mask_url", + "export_meshes", + "include_3d_keypoints" + ], + "type": "object", + "properties": { + "include_3d_keypoints": { + "title": "Include 3D Keypoints", + "type": "boolean", + "description": "Include 3D keypoint markers (spheres) in the GLB mesh for visualization", + "default": true + }, + "export_meshes": { + "title": "Export Meshes", + "type": "boolean", + "description": "Export individual mesh files (.ply) per person", + "default": true + }, + "mask_url": { + "title": "Mask Url", + "type": "string", + "description": "Optional URL of a binary mask image (white=person, black=background). When provided, skips auto human detection and uses this mask instead. Bbox is auto-computed from the mask." + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a8439f8/E8gEXWsl2C-Euo4dGayzi_An_zyCCnSaytVklh_99sSYt4Z4Hh5e3s7VnNlx5JfN5KuC0j_bnq1AP9JfRoAmOQz5TP0DdCYMk4796Gloe5no1vvpoqhD-p3kE.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image containing humans" + } + }, + "title": "SAM3DBodyInput", + "required": [ + "image_url" + ] + }, + "Sam33dBodyOutput": { + "x-fal-order-properties": [ + "model_glb", + "visualization", + "meshes", + "metadata" + ], + "type": "object", + "properties": { + "visualization": { + "title": "Visualization", + "description": "Combined visualization image (original + keypoints + mesh + side view)", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "metadata": { + "title": "Metadata", + "description": "Structured metadata including keypoints and camera parameters", + "allOf": [ + { + "$ref": "#/components/schemas/SAM3DBodyMetadata" + } + ] + }, + "meshes": { + "title": "Meshes", + "type": "array", + "description": "Individual mesh files (.ply), one per detected person (when export_meshes=True)", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "model_glb": { + "examples": [ + "https://v3b.fal.media/files/b/0a8439f9/5LVt3C2YesqnQzg-CxPpu_combined_bodies.glb" + ], + "title": "Model Glb", + "description": "3D body mesh in GLB format with optional 3D keypoint markers", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "SAM3DBodyOutput", + "required": [ + "model_glb", + "visualization", + "metadata" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + }, + "SAM3DBodyMetadata": { + "x-fal-order-properties": [ + "num_people", + "people" + ], + "type": "object", + "properties": { + "people": { + "title": "People", + "type": "array", + "description": "Per-person metadata", + "items": { + "$ref": "#/components/schemas/SAM3DBodyPersonMetadata" + } + }, + "num_people": { + "title": "Num People", + "type": "integer", + "description": "Number of people detected" + } + }, + "description": "Metadata for body reconstruction output.", + "title": "SAM3DBodyMetadata", + "required": [ + "num_people", + "people" + ] + }, + "SAM3DBodyPersonMetadata": { + "x-fal-order-properties": [ + "person_id", + "bbox", + "focal_length", + "pred_cam_t", + "keypoints_2d", + "keypoints_3d" + ], + "type": "object", + "properties": { + "pred_cam_t": { + "title": "Pred Cam T", + "type": "array", + "description": "Predicted camera translation [tx, ty, tz]", + "items": { + "type": "number" + } + }, + "person_id": { + "title": "Person Id", + "type": "integer", + "description": "Index of the person in the scene" + }, + "focal_length": { + "title": "Focal Length", + "type": "number", + "description": "Estimated focal length" + }, + "keypoints_3d": { + "title": "Keypoints 3D", + "type": "array", + "description": "3D keypoints [[x, y, z], ...] - 70 body keypoints in camera space", + "items": { + "type": "array", + "items": { + "type": "number" + } + } + }, + "keypoints_2d": { + "title": "Keypoints 2D", + "type": "array", + "description": "2D keypoints [[x, y], ...] - 70 body keypoints", + "items": { + "type": "array", + "items": { + "type": "number" + } + } + }, + "bbox": { + "title": "Bbox", + "type": "array", + "description": "Bounding box [x_min, y_min, x_max, y_max]", + "items": { + "type": "number" + } + } + }, + "description": "Per-person metadata for body reconstruction.", + "title": "SAM3DBodyPersonMetadata", + "required": [ + "person_id", + "bbox", + "focal_length", + "pred_cam_t", + "keypoints_2d" + ] + } + } + }, + "paths": { + "/fal-ai/sam-3/3d-body/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam-3/3d-body/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sam-3/3d-body": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sam33dBodyInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam-3/3d-body/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sam33dBodyOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sam-3/3d-objects", + "metadata": { + "display_name": "Sam 3", + "category": "image-to-3d", + "description": "SAM 3D enables precise 3D reconstruction of objects from real images, while accurately reconstructing their geometry and texture.", + "status": "active", + "tags": [ + "3d", + "object" + ], + "updated_at": "2026-01-26T21:42:06.749Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a84bb3e/wqkR8Gs7K2y82b7XDaNLt_4c480ee51bf94761b8f769d5b27a1921.jpg", + "model_url": "https://fal.run/fal-ai/sam-3/3d-objects", + "license_type": "commercial", + "date": "2025-12-02T20:49:49.159Z", + "group": { + "key": "sam3", + "label": "Image to 3D Object" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sam-3/3d-objects", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sam-3/3d-objects queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sam-3/3d-objects", + "category": "image-to-3d", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a84bb3e/wqkR8Gs7K2y82b7XDaNLt_4c480ee51bf94761b8f769d5b27a1921.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sam-3/3d-objects", + "documentationUrl": "https://fal.ai/models/fal-ai/sam-3/3d-objects/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Sam33dObjectsInput": { + "x-fal-order-properties": [ + "image_url", + "mask_urls", + "prompt", + "point_prompts", + "box_prompts", + "seed", + "pointmap_url", + "export_textured_glb" + ], + "type": "object", + "properties": { + "pointmap_url": { + "title": "Pointmap Url", + "type": "string", + "description": "Optional URL to external pointmap/depth data (NPY or NPZ format) for improved 3D reconstruction depth estimation" + }, + "export_textured_glb": { + "title": "Export Textured Glb", + "type": "boolean", + "description": "If True, exports GLB with baked texture and UVs instead of vertex colors.", + "default": false + }, + "prompt": { + "title": "Prompt", + "type": "string", + "description": "Text prompt for auto-segmentation when no masks provided (e.g., 'chair', 'lamp')", + "default": "car" + }, + "box_prompts": { + "title": "Box Prompts", + "type": "array", + "description": "Box prompts for auto-segmentation when no masks provided. Multiple boxes supported - each produces a separate object mask for 3D reconstruction.", + "items": { + "$ref": "#/components/schemas/BoxPromptBase" + }, + "default": [] + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a8439e5/TyAmfW5w_sqRXRzWVBGsW_car.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to reconstruct in 3D" + }, + "mask_urls": { + "title": "Mask Urls", + "type": "array", + "description": "Optional list of mask URLs (one per object). If not provided, use prompt/point_prompts/box_prompts to auto-segment, or entire image will be used.", + "items": { + "type": "string" + } + }, + "point_prompts": { + "title": "Point Prompts", + "type": "array", + "description": "Point prompts for auto-segmentation when no masks provided", + "items": { + "$ref": "#/components/schemas/PointPromptBase" + }, + "default": [] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility" + } + }, + "title": "SAM3DObjectInput", + "required": [ + "image_url" + ] + }, + "Sam33dObjectsOutput": { + "x-fal-order-properties": [ + "gaussian_splat", + "model_glb", + "metadata", + "individual_splats", + "individual_glbs", + "artifacts_zip" + ], + "type": "object", + "properties": { + "model_glb": { + "default": "https://v3b.fal.media/files/b/0a8439e7/mqHMt17hzqDaqVMF7q0dB_combined_scene.glb", + "title": "Model Glb", + "description": "3D mesh in GLB format - combined scene for multi-object, single mesh otherwise", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "metadata": { + "title": "Metadata", + "type": "array", + "description": "Per-object metadata (rotation/translation/scale)", + "items": { + "$ref": "#/components/schemas/SAM3DObjectMetadata" + } + }, + "gaussian_splat": { + "title": "Gaussian Splat", + "description": "Gaussian splat file (.ply) - combined scene splat for multi-object, single splat otherwise", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "artifacts_zip": { + "title": "Artifacts Zip", + "description": "Zip bundle containing all artifacts and metadata", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "individual_glbs": { + "title": "Individual Glbs", + "type": "array", + "description": "Individual GLB mesh files per object (only for multi-object scenes)", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "individual_splats": { + "title": "Individual Splats", + "type": "array", + "description": "Individual Gaussian splat files per object (only for multi-object scenes)", + "items": { + "$ref": "#/components/schemas/File" + } + } + }, + "title": "SAM3DObjectOutput", + "required": [ + "gaussian_splat", + "metadata" + ] + }, + "BoxPromptBase": { + "x-fal-order-properties": [ + "x_min", + "y_min", + "x_max", + "y_max", + "object_id" + ], + "type": "object", + "properties": { + "y_min": { + "title": "Y Min", + "type": "integer", + "description": "Y Min Coordinate of the box" + }, + "object_id": { + "title": "Object Id", + "type": "integer", + "description": "Optional object identifier. Boxes sharing an object id refine the same object." + }, + "x_max": { + "title": "X Max", + "type": "integer", + "description": "X Max Coordinate of the box" + }, + "x_min": { + "title": "X Min", + "type": "integer", + "description": "X Min Coordinate of the box" + }, + "y_max": { + "title": "Y Max", + "type": "integer", + "description": "Y Max Coordinate of the box" + } + }, + "title": "BoxPromptBase" + }, + "PointPromptBase": { + "x-fal-order-properties": [ + "x", + "y", + "label", + "object_id" + ], + "type": "object", + "properties": { + "y": { + "title": "Y", + "type": "integer", + "description": "Y Coordinate of the prompt" + }, + "x": { + "title": "X", + "type": "integer", + "description": "X Coordinate of the prompt" + }, + "object_id": { + "title": "Object Id", + "type": "integer", + "description": "Optional object identifier. Prompts sharing an object id refine the same object." + }, + "label": { + "enum": [ + 0, + 1 + ], + "title": "Label", + "type": "integer", + "description": "1 for foreground, 0 for background" + } + }, + "title": "PointPromptBase" + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + }, + "SAM3DObjectMetadata": { + "x-fal-order-properties": [ + "object_index", + "scale", + "rotation", + "translation", + "camera_pose" + ], + "type": "object", + "properties": { + "rotation": { + "title": "Rotation", + "type": "array", + "description": "Rotation quaternion [x, y, z, w]", + "items": { + "type": "array", + "items": { + "type": "number" + } + } + }, + "translation": { + "title": "Translation", + "type": "array", + "description": "Translation [tx, ty, tz]", + "items": { + "type": "array", + "items": { + "type": "number" + } + } + }, + "object_index": { + "title": "Object Index", + "type": "integer", + "description": "Index of the object in the scene" + }, + "scale": { + "title": "Scale", + "type": "array", + "description": "Scale factors [sx, sy, sz]", + "items": { + "type": "array", + "items": { + "type": "number" + } + } + }, + "camera_pose": { + "title": "Camera Pose", + "type": "array", + "description": "Camera pose matrix", + "items": { + "type": "array", + "items": { + "type": "number" + } + } + } + }, + "description": "Per-object metadata for 3D reconstruction.", + "title": "SAM3DObjectMetadata", + "required": [ + "object_index" + ] + } + } + }, + "paths": { + "/fal-ai/sam-3/3d-objects/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam-3/3d-objects/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sam-3/3d-objects": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sam33dObjectsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam-3/3d-objects/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sam33dObjectsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/omnipart", + "metadata": { + "display_name": "Omnipart", + "category": "image-to-3d", + "description": "Image-to-3D endpoint for OmniPart, a part-aware 3D generator with semantic decoupling and structural cohesion.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:30.111Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/panda/bGISRmmkq0DLUpmIwgssZ_96fa520c425143ae9fba44ba00ad0c26.jpg", + "model_url": "https://fal.run/fal-ai/omnipart", + "license_type": "commercial", + "date": "2025-10-29T15:08:27.324Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/omnipart", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/omnipart queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/omnipart", + "category": "image-to-3d", + "thumbnailUrl": "https://v3b.fal.media/files/b/panda/bGISRmmkq0DLUpmIwgssZ_96fa520c425143ae9fba44ba00ad0c26.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/omnipart", + "documentationUrl": "https://fal.ai/models/fal-ai/omnipart/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "OmnipartInput": { + "title": "OmnipartInput", + "type": "object", + "properties": { + "input_image_url": { + "examples": [ + "https://v3b.fal.media/files/b/koala/SrtReV-jnY4YUPscIgJhx_robot.png" + ], + "title": "Input Image Url", + "type": "string", + "description": "URL of image to use while generating the 3D model." + }, + "parts": { + "title": "Parts", + "type": "string", + "description": "Specify which segments to merge (e.g., '0,1;3,4' merges segments 0&1 together and 3&4 together)", + "default": "" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "default": 765464 + }, + "minimum_segment_size": { + "description": "Minimum segment size (pixels) for the model.", + "type": "integer", + "minimum": 1, + "maximum": 10000, + "title": "Minimum Segment Size", + "default": 2000 + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "Guidance scale for the model.", + "default": 7.5 + } + }, + "x-fal-order-properties": [ + "input_image_url", + "minimum_segment_size", + "seed", + "guidance_scale", + "parts" + ], + "required": [ + "input_image_url" + ] + }, + "OmnipartOutput": { + "title": "MultiViewObjectOutput", + "type": "object", + "properties": { + "full_model_mesh": { + "examples": [ + { + "file_size": 22524044, + "file_name": "mesh_textured.glb", + "content_type": "application/octet-stream", + "url": "https://v3b.fal.media/files/b/elephant/xkEwNvSv9JePj2xxulYlw_mesh_textured.glb" + } + ], + "description": "Generated 3D object file.", + "$ref": "#/components/schemas/File" + }, + "output_zip": { + "examples": [ + { + "file_size": 76129988, + "file_name": "output.tar.gz", + "content_type": "application/octet-stream", + "url": "https://v3b.fal.media/files/b/penguin/MCEWcf7qRRrUla71hf1Rc_output.tar.gz" + } + ], + "description": "All outputs file.", + "$ref": "#/components/schemas/File" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed value used for generation." + }, + "model_mesh": { + "examples": [ + { + "file_size": 22860804, + "file_name": "exploded_parts.glb", + "content_type": "application/octet-stream", + "url": "https://v3b.fal.media/files/b/zebra/RWYVShZ2JINskyiH9rjcJ_exploded_parts.glb" + } + ], + "description": "Generated 3D object file.", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "model_mesh", + "full_model_mesh", + "output_zip", + "seed" + ], + "required": [ + "model_mesh", + "full_model_mesh", + "output_zip", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/omnipart/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/omnipart/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/omnipart": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OmnipartInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/omnipart/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OmnipartOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bytedance/seed3d/image-to-3d", + "metadata": { + "display_name": "Bytedance", + "category": "image-to-3d", + "description": "Image to 3D endpoint for Bytedance's high-quality Seed3D 3d model generator.", + "status": "active", + "tags": [ + "seed3d.quality", + "bytedance", + "3d" + ], + "updated_at": "2026-01-26T21:42:30.815Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/elephant/NRGi1PT8MxX9sP8z6ydFh_4a17a88f569d40debfbd3d4771893410.jpg", + "model_url": "https://fal.run/fal-ai/bytedance/seed3d/image-to-3d", + "license_type": "commercial", + "date": "2025-10-29T11:44:32.930Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bytedance/seed3d/image-to-3d", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bytedance/seed3d/image-to-3d queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bytedance/seed3d/image-to-3d", + "category": "image-to-3d", + "thumbnailUrl": "https://v3b.fal.media/files/b/elephant/NRGi1PT8MxX9sP8z6ydFh_4a17a88f569d40debfbd3d4771893410.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/bytedance/seed3d/image-to-3d", + "documentationUrl": "https://fal.ai/models/fal-ai/bytedance/seed3d/image-to-3d/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BytedanceSeed3dImageTo3dInput": { + "x-fal-order-properties": [ + "image_url" + ], + "type": "object", + "properties": { + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/seed3d_input.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image for the 3D asset generation." + } + }, + "title": "Seed3DImageTo3DInput", + "required": [ + "image_url" + ] + }, + "BytedanceSeed3dImageTo3dOutput": { + "x-fal-order-properties": [ + "model", + "usage_tokens" + ], + "type": "object", + "properties": { + "model": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/seed3d_output.zip" + } + ], + "title": "Model", + "description": "The generated 3D model files", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "usage_tokens": { + "examples": [ + 30000 + ], + "title": "Usage Tokens", + "type": "integer", + "description": "The number of tokens used for the 3D model generation" + } + }, + "title": "Seed3DImageTo3DOutput", + "required": [ + "model", + "usage_tokens" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bytedance/seed3d/image-to-3d/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seed3d/image-to-3d/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seed3d/image-to-3d": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeed3dImageTo3dInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seed3d/image-to-3d/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeed3dImageTo3dOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/meshy/v5/multi-image-to-3d", + "metadata": { + "display_name": "Meshy 5 Multi", + "category": "image-to-3d", + "description": "Meshy-5 multi image generates realistic and production ready 3D models from multiple images.\n", + "status": "active", + "tags": [ + "multi-image-to-3d" + ], + "updated_at": "2026-01-26T21:42:44.982Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/penguin/wLQpmudI-iiOD545isnQ0_b7878036846e4f72b23e29f55c97016e.jpg", + "model_url": "https://fal.run/fal-ai/meshy/v5/multi-image-to-3d", + "license_type": "commercial", + "date": "2025-10-06T23:15:19.272Z", + "group": { + "key": "Meshy", + "label": "v5 Multi-Image to 3D" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/meshy/v5/multi-image-to-3d", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/meshy/v5/multi-image-to-3d queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/meshy/v5/multi-image-to-3d", + "category": "image-to-3d", + "thumbnailUrl": "https://v3.fal.media/files/penguin/wLQpmudI-iiOD545isnQ0_b7878036846e4f72b23e29f55c97016e.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/meshy/v5/multi-image-to-3d", + "documentationUrl": "https://fal.ai/models/fal-ai/meshy/v5/multi-image-to-3d/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MeshyV5MultiImageTo3dInput": { + "x-fal-order-properties": [ + "image_urls", + "topology", + "target_polycount", + "symmetry_mode", + "should_remesh", + "should_texture", + "enable_pbr", + "is_a_t_pose", + "texture_prompt", + "texture_image_url", + "enable_safety_checker" + ], + "type": "object", + "properties": { + "enable_pbr": { + "description": "Generate PBR Maps (metallic, roughness, normal) in addition to base color. Requires should_texture to be true.", + "type": "boolean", + "title": "Enable Pbr", + "default": false + }, + "should_texture": { + "description": "Whether to generate textures. False provides mesh without textures for 5 credits, True adds texture generation for additional 10 credits.", + "type": "boolean", + "title": "Should Texture", + "default": true + }, + "target_polycount": { + "minimum": 100, + "maximum": 300000, + "type": "integer", + "title": "Target Polycount", + "description": "Target number of polygons in the generated model", + "default": 30000 + }, + "is_a_t_pose": { + "description": "Whether to generate the model in an A/T pose", + "type": "boolean", + "title": "Is A T Pose", + "default": false + }, + "texture_image_url": { + "description": "2D image to guide the texturing process. Requires should_texture to be true.", + "max_pixels": 178956970, + "type": "string", + "x-fal": { + "timeout": 20, + "max_file_size": 20971520 + }, + "title": "Texture Image Url", + "limit_description": "Max file size: 20.0MB, Timeout: 20.0s" + }, + "topology": { + "enum": [ + "quad", + "triangle" + ], + "description": "Specify the topology of the generated model. Quad for smooth surfaces, Triangle for detailed geometry.", + "type": "string", + "title": "Topology", + "default": "triangle" + }, + "enable_safety_checker": { + "description": "If set to true, input data will be checked for safety before processing.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "symmetry_mode": { + "enum": [ + "off", + "auto", + "on" + ], + "description": "Controls symmetry behavior during model generation.", + "type": "string", + "title": "Symmetry Mode", + "default": "auto" + }, + "image_urls": { + "description": "1 to 4 images for 3D model creation. All images should depict the same object from different angles. Supports .jpg, .jpeg, .png formats, and AVIF/HEIF which will be automatically converted. If more than 4 images are provided, only the first 4 will be used.", + "max_pixels": 178956970, + "type": "array", + "items": { + "type": "string" + }, + "max_file_size": 20971520, + "examples": [ + [ + "https://v3b.fal.media/files/b/kangaroo/cPyD3-por0XI7jDa9F9vP_image%20(3).png", + "https://v3b.fal.media/files/b/elephant/9sd5JWAOJBcR7G3NMjPVs_image%20(2).png", + "https://v3b.fal.media/files/b/tiger/TP4sTzPATX_w1Tn4m6kYM_image%20(1).png" + ] + ], + "title": "Image Urls" + }, + "texture_prompt": { + "description": "Text prompt to guide the texturing process. Requires should_texture to be true.", + "type": "string", + "title": "Texture Prompt", + "maxLength": 600 + }, + "should_remesh": { + "description": "Whether to enable the remesh phase. When false, returns triangular mesh ignoring topology and target_polycount.", + "type": "boolean", + "title": "Should Remesh", + "default": true + } + }, + "title": "MultiImageTo3DInput", + "description": "Input for Multi-Image to 3D conversion", + "required": [ + "image_urls" + ] + }, + "MeshyV5MultiImageTo3dOutput": { + "x-fal-order-properties": [ + "model_glb", + "thumbnail", + "model_urls", + "texture_urls", + "seed" + ], + "type": "object", + "properties": { + "model_urls": { + "examples": [ + { + "fbx": { + "file_size": 5574540, + "file_name": "model.fbx", + "content_type": "application/octet-stream", + "url": "https://v3b.fal.media/files/b/koala/R7vPBgkecVvcnbNpRAy9x_model.fbx" + }, + "usdz": { + "file_size": 8631497, + "file_name": "model.usdz", + "content_type": "model/vnd.usdz+zip", + "url": "https://v3b.fal.media/files/b/panda/fSGLGmtgzUjhepklN06Zw_model.usdz" + }, + "glb": { + "file_size": 7875308, + "file_name": "model.glb", + "content_type": "model/gltf-binary", + "url": "https://v3b.fal.media/files/b/tiger/62QMEQqZ3pjUds4DfuVtX_model.glb" + }, + "obj": { + "file_size": 2761323, + "file_name": "model.obj", + "content_type": "text/plain", + "url": "https://v3b.fal.media/files/b/koala/xmOnmSeePfuROe3pqHpf0_model.obj" + } + } + ], + "title": "Model Urls", + "description": "URLs for different 3D model formats", + "allOf": [ + { + "$ref": "#/components/schemas/ModelUrls" + } + ] + }, + "texture_urls": { + "examples": [ + [ + { + "base_color": { + "file_size": 4464364, + "file_name": "texture_0.png", + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/panda/OVrRor7IgeNK9w2i5-NDf_texture_0.png" + } + } + ] + ], + "title": "Texture Urls", + "type": "array", + "description": "Array of texture file objects", + "items": { + "$ref": "#/components/schemas/TextureFiles" + } + }, + "thumbnail": { + "examples": [ + { + "file_size": 70958, + "file_name": "preview.png", + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/koala/2NI_hEd7jXzS5rLQhnRga_preview.png" + } + ], + "title": "Thumbnail", + "description": "Preview thumbnail of the generated model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "examples": [ + 783032043 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for generation (if available)" + }, + "model_glb": { + "examples": [ + { + "file_size": 7875308, + "file_name": "model.glb", + "content_type": "model/gltf-binary", + "url": "https://v3b.fal.media/files/b/tiger/62QMEQqZ3pjUds4DfuVtX_model.glb" + } + ], + "title": "Model Glb", + "description": "Generated 3D object in GLB format.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "MultiImageTo3DOutput", + "description": "Output for Multi-Image to 3D conversion", + "required": [ + "model_glb", + "model_urls" + ] + }, + "ModelUrls": { + "x-fal-order-properties": [ + "glb", + "fbx", + "obj", + "usdz", + "blend", + "stl" + ], + "type": "object", + "properties": { + "usdz": { + "description": "USDZ format 3D model", + "title": "Usdz", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "fbx": { + "description": "FBX format 3D model", + "title": "Fbx", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "blend": { + "description": "Blender format 3D model", + "title": "Blend", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "stl": { + "description": "STL format 3D model", + "title": "Stl", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "glb": { + "description": "GLB format 3D model", + "title": "Glb", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "obj": { + "description": "OBJ format 3D model", + "title": "Obj", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "ModelUrls", + "description": "3D model files in various formats" + }, + "TextureFiles": { + "x-fal-order-properties": [ + "base_color", + "metallic", + "normal", + "roughness" + ], + "type": "object", + "properties": { + "base_color": { + "description": "Base color texture", + "title": "Base Color", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "normal": { + "description": "Normal texture (PBR)", + "title": "Normal", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "roughness": { + "description": "Roughness texture (PBR)", + "title": "Roughness", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "metallic": { + "description": "Metallic texture (PBR)", + "title": "Metallic", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "TextureFiles", + "description": "Texture files downloaded and uploaded to CDN", + "required": [ + "base_color" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/meshy/v5/multi-image-to-3d/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/meshy/v5/multi-image-to-3d/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/meshy/v5/multi-image-to-3d": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MeshyV5MultiImageTo3dInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/meshy/v5/multi-image-to-3d/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MeshyV5MultiImageTo3dOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/meshy/v6-preview/image-to-3d", + "metadata": { + "display_name": "Meshy 6 Preview", + "category": "image-to-3d", + "description": "Meshy-6-Preview is the latest model from Meshy. It generates realistic and production ready 3D models.\n", + "status": "active", + "tags": [ + "image-to-3d" + ], + "updated_at": "2026-01-26T21:42:45.107Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/DSTicAg_awPQiKUDYINO1_f2f15f177bfe4e94b4f91340ff0393f6.jpg", + "model_url": "https://fal.run/fal-ai/meshy/v6-preview/image-to-3d", + "license_type": "commercial", + "date": "2025-10-06T23:11:32.101Z", + "group": { + "key": "Meshy", + "label": "v6 Preview Image to 3D" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/meshy/v6-preview/image-to-3d", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/meshy/v6-preview/image-to-3d queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/meshy/v6-preview/image-to-3d", + "category": "image-to-3d", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/DSTicAg_awPQiKUDYINO1_f2f15f177bfe4e94b4f91340ff0393f6.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/meshy/v6-preview/image-to-3d", + "documentationUrl": "https://fal.ai/models/fal-ai/meshy/v6-preview/image-to-3d/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MeshyV6PreviewImageTo3dInput": { + "x-fal-order-properties": [ + "image_url", + "topology", + "target_polycount", + "symmetry_mode", + "should_remesh", + "should_texture", + "enable_pbr", + "is_a_t_pose", + "texture_prompt", + "texture_image_url", + "enable_safety_checker" + ], + "type": "object", + "properties": { + "enable_pbr": { + "description": "Generate PBR Maps (metallic, roughness, normal) in addition to base color", + "type": "boolean", + "title": "Enable Pbr", + "default": false + }, + "is_a_t_pose": { + "description": "Whether to generate the model in an A/T pose", + "type": "boolean", + "title": "Is A T Pose", + "default": false + }, + "target_polycount": { + "minimum": 100, + "maximum": 300000, + "type": "integer", + "title": "Target Polycount", + "description": "Target number of polygons in the generated model", + "default": 30000 + }, + "should_texture": { + "description": "Whether to generate textures", + "type": "boolean", + "title": "Should Texture", + "default": true + }, + "texture_image_url": { + "description": "2D image to guide the texturing process", + "max_pixels": 178956970, + "type": "string", + "x-fal": { + "timeout": 20, + "max_file_size": 20971520 + }, + "title": "Texture Image Url", + "limit_description": "Max file size: 20.0MB, Timeout: 20.0s" + }, + "topology": { + "enum": [ + "quad", + "triangle" + ], + "description": "Specify the topology of the generated model. Quad for smooth surfaces, Triangle for detailed geometry.", + "type": "string", + "title": "Topology", + "default": "triangle" + }, + "image_url": { + "description": "Image URL or base64 data URI for 3D model creation. Supports .jpg, .jpeg, and .png formats. Also supports AVIF and HEIF formats which will be automatically converted.", + "max_pixels": 178956970, + "type": "string", + "x-fal": { + "timeout": 20, + "max_file_size": 20971520 + }, + "title": "Image Url", + "examples": [ + "https://v3b.fal.media/files/b/zebra/3osHJDI8IZ2wl6sGtEUeB_image.png" + ], + "limit_description": "Max file size: 20.0MB, Timeout: 20.0s" + }, + "enable_safety_checker": { + "description": "If set to true, input data will be checked for safety before processing.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "symmetry_mode": { + "enum": [ + "off", + "auto", + "on" + ], + "description": "Controls symmetry behavior during model generation. Off disables symmetry, Auto determines it automatically, On enforces symmetry.", + "type": "string", + "title": "Symmetry Mode", + "default": "auto" + }, + "texture_prompt": { + "description": "Text prompt to guide the texturing process", + "type": "string", + "title": "Texture Prompt", + "maxLength": 600 + }, + "should_remesh": { + "description": "Whether to enable the remesh phase", + "type": "boolean", + "title": "Should Remesh", + "default": true + } + }, + "title": "ImageTo3DInput", + "description": "Input for Image to 3D conversion", + "required": [ + "image_url" + ] + }, + "MeshyV6PreviewImageTo3dOutput": { + "x-fal-order-properties": [ + "model_glb", + "thumbnail", + "model_urls", + "texture_urls", + "seed" + ], + "type": "object", + "properties": { + "model_urls": { + "examples": [ + { + "fbx": { + "file_size": 5427052, + "file_name": "model.fbx", + "content_type": "application/octet-stream", + "url": "https://v3b.fal.media/files/b/kangaroo/4Q2qdpTvfLVdzAKH1-72v_model.fbx" + }, + "usdz": { + "file_size": 9991969, + "file_name": "model.usdz", + "content_type": "model/vnd.usdz+zip", + "url": "https://v3b.fal.media/files/b/lion/RgJG9EBQ_GAHMVWV3wCis_model.usdz" + }, + "glb": { + "file_size": 9242744, + "file_name": "model.glb", + "content_type": "model/gltf-binary", + "url": "https://v3b.fal.media/files/b/zebra/OXF1e1bO3JddPTaugv0eL_model.glb" + }, + "obj": { + "file_size": 2744413, + "file_name": "model.obj", + "content_type": "text/plain", + "url": "https://v3b.fal.media/files/b/koala/_Vg0d084-hd3EdpIJDf7U_model.obj" + } + } + ], + "title": "Model Urls", + "description": "URLs for different 3D model formats", + "allOf": [ + { + "$ref": "#/components/schemas/ModelUrls" + } + ] + }, + "texture_urls": { + "examples": [ + [ + { + "base_color": { + "file_size": 4328755, + "file_name": "texture_0.png", + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/tiger/NkgxcEom_42V4_8UUXiRR_texture_0.png" + } + } + ] + ], + "title": "Texture Urls", + "type": "array", + "description": "Array of texture file objects, matching Meshy API structure", + "items": { + "$ref": "#/components/schemas/TextureFiles" + } + }, + "thumbnail": { + "examples": [ + { + "file_size": 54279, + "file_name": "preview.png", + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/penguin/rfnS6ClmeEWgDXp_oD5tN_preview.png" + } + ], + "title": "Thumbnail", + "description": "Preview thumbnail of the generated model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "examples": [ + 2009275957 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for generation (if available)" + }, + "model_glb": { + "examples": [ + { + "file_size": 9242744, + "file_name": "model.glb", + "content_type": "model/gltf-binary", + "url": "https://v3b.fal.media/files/b/zebra/OXF1e1bO3JddPTaugv0eL_model.glb" + } + ], + "title": "Model Glb", + "description": "Generated 3D object in GLB format.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "ImageTo3DOutput", + "description": "Output for Image to 3D conversion", + "required": [ + "model_glb", + "model_urls" + ] + }, + "ModelUrls": { + "x-fal-order-properties": [ + "glb", + "fbx", + "obj", + "usdz", + "blend", + "stl" + ], + "type": "object", + "properties": { + "usdz": { + "description": "USDZ format 3D model", + "title": "Usdz", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "fbx": { + "description": "FBX format 3D model", + "title": "Fbx", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "blend": { + "description": "Blender format 3D model", + "title": "Blend", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "stl": { + "description": "STL format 3D model", + "title": "Stl", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "glb": { + "description": "GLB format 3D model", + "title": "Glb", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "obj": { + "description": "OBJ format 3D model", + "title": "Obj", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "ModelUrls", + "description": "3D model files in various formats" + }, + "TextureFiles": { + "x-fal-order-properties": [ + "base_color", + "metallic", + "normal", + "roughness" + ], + "type": "object", + "properties": { + "base_color": { + "description": "Base color texture", + "title": "Base Color", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "normal": { + "description": "Normal texture (PBR)", + "title": "Normal", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "roughness": { + "description": "Roughness texture (PBR)", + "title": "Roughness", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "metallic": { + "description": "Metallic texture (PBR)", + "title": "Metallic", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "TextureFiles", + "description": "Texture files downloaded and uploaded to CDN", + "required": [ + "base_color" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/meshy/v6-preview/image-to-3d/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/meshy/v6-preview/image-to-3d/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/meshy/v6-preview/image-to-3d": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MeshyV6PreviewImageTo3dInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/meshy/v6-preview/image-to-3d/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MeshyV6PreviewImageTo3dOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hyper3d/rodin/v2", + "metadata": { + "display_name": "Hyper3d", + "category": "image-to-3d", + "description": "Rodin by Hyper3D generates realistic and production ready 3D models from text or images.", + "status": "active", + "tags": [ + "image-to-3d", + "text-to-3d" + ], + "updated_at": "2026-01-26T21:42:47.101Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/lion/liLB7F5DWmR3CgbqjvZo3_2a3d75c59b5f4951810452fa8c27d0ba.jpg", + "model_url": "https://fal.run/fal-ai/hyper3d/rodin/v2", + "license_type": "commercial", + "date": "2025-09-26T19:59:28.021Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hyper3d/rodin/v2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hyper3d/rodin/v2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hyper3d/rodin/v2", + "category": "image-to-3d", + "thumbnailUrl": "https://v3.fal.media/files/lion/liLB7F5DWmR3CgbqjvZo3_2a3d75c59b5f4951810452fa8c27d0ba.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hyper3d/rodin/v2", + "documentationUrl": "https://fal.ai/models/fal-ai/hyper3d/rodin/v2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Hyper3dRodinV2Input": { + "title": "RodinGen2Input", + "type": "object", + "properties": { + "quality_mesh_option": { + "enum": [ + "4K Quad", + "8K Quad", + "18K Quad", + "50K Quad", + "2K Triangle", + "20K Triangle", + "150K Triangle", + "500K Triangle" + ], + "title": "Quality Mesh Option", + "type": "string", + "description": "Combined quality and mesh type selection. Quad = smooth surfaces, Triangle = detailed geometry. These corresponds to `mesh_mode` (if the option contains 'Triangle', mesh_mode is 'Raw', otherwise 'Quad') and `quality_override` (the numeric part of the option) parameters in Hyper3D API.", + "default": "500K Triangle" + }, + "prompt": { + "examples": [ + "A futuristic robot with sleek metallic design." + ], + "title": "Prompt", + "type": "string", + "description": "A textual prompt to guide model generation. Optional for Image-to-3D mode - if empty, AI will generate a prompt based on your images.", + "default": "" + }, + "preview_render": { + "title": "Preview Render", + "type": "boolean", + "description": "Generate a preview render image of the 3D model along with the model files.", + "default": false + }, + "bbox_condition": { + "title": "Bbox Condition", + "type": "array", + "description": "An array that specifies the bounding box dimensions [width, height, length].", + "example": [ + 100, + 50, + 150 + ], + "items": { + "type": "integer" + } + }, + "TAPose": { + "title": "T/A Pose", + "type": "boolean", + "description": "Generate characters in T-pose or A-pose format, making them easier to rig and animate in 3D software.", + "default": false + }, + "input_image_urls": { + "examples": [ + [ + "https://v3.fal.media/files/panda/l7mQrG8plbB42lBNqVjm0_image.png", + "https://v3b.fal.media/files/b/kangaroo/scq50Bf1PB2NZOW8szphV_image.png", + "https://v3.fal.media/files/penguin/X21qtlVMazAtljzRCJD2__image.png" + ] + ], + "title": "Input Image Urls", + "type": "array", + "description": "URL of images to use while generating the 3D model. Required for Image-to-3D mode. Up to 5 images allowed.", + "items": { + "type": "string" + } + }, + "use_original_alpha": { + "title": "Use Original Alpha", + "type": "boolean", + "description": "When enabled, preserves the transparency channel from input images during 3D generation.", + "default": false + }, + "geometry_file_format": { + "enum": [ + "glb", + "usdz", + "fbx", + "obj", + "stl" + ], + "title": "Geometry File Format", + "type": "string", + "description": "Format of the geometry file. Possible values: glb, usdz, fbx, obj, stl. Default is glb.", + "default": "glb" + }, + "addons": { + "enum": [ + "HighPack" + ], + "title": "Addons", + "type": "string", + "description": "The HighPack option will provide 4K resolution textures instead of the default 1K, as well as models with high-poly. It will cost **triple the billable units**." + }, + "seed": { + "minimum": 0, + "maximum": 65535, + "type": "integer", + "title": "Seed", + "description": "Seed value for randomization, ranging from 0 to 65535. Optional." + }, + "material": { + "enum": [ + "PBR", + "Shaded", + "All" + ], + "title": "Material", + "type": "string", + "description": "Material type. PBR: Physically-based materials with realistic lighting. Shaded: Simple materials with baked lighting. All: Both types included.", + "default": "All" + } + }, + "x-fal-order-properties": [ + "prompt", + "input_image_urls", + "use_original_alpha", + "seed", + "geometry_file_format", + "material", + "quality_mesh_option", + "TAPose", + "bbox_condition", + "addons", + "preview_render" + ] + }, + "Hyper3dRodinV2Output": { + "title": "ObjectOutputv2", + "type": "object", + "properties": { + "model_mesh": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/model_tests/video_models/base_basic_shaded.glb" + } + ], + "title": "Model Mesh", + "description": "Generated 3D object file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed value used for generation." + }, + "textures": { + "title": "Textures", + "type": "array", + "description": "Generated textures for the 3D object.", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "model_mesh", + "seed", + "textures" + ], + "required": [ + "model_mesh", + "seed", + "textures" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hyper3d/rodin/v2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hyper3d/rodin/v2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hyper3d/rodin/v2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hyper3dRodinV2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hyper3d/rodin/v2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hyper3dRodinV2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pshuman", + "metadata": { + "display_name": "Pshuman", + "category": "image-to-3d", + "description": "Use the 6D pose estimation capabilities of PSHuman to generate 3D files from single image.", + "status": "active", + "tags": [ + "image-to-3D" + ], + "updated_at": "2026-01-26T21:42:55.877Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/lion/NmFYFxuC6qxi6oIj0K6Ig_4588fba5ffe44e098914def30b72d7cc.jpg", + "model_url": "https://fal.run/fal-ai/pshuman", + "license_type": "commercial", + "date": "2025-09-13T21:30:34.681Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pshuman", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pshuman queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pshuman", + "category": "image-to-3d", + "thumbnailUrl": "https://fal.media/files/lion/NmFYFxuC6qxi6oIj0K6Ig_4588fba5ffe44e098914def30b72d7cc.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/pshuman", + "documentationUrl": "https://fal.ai/models/fal-ai/pshuman/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PshumanInput": { + "title": "PSHumanRequest", + "type": "object", + "properties": { + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "maximum": 10, + "description": "Guidance scale for the diffusion process. Controls how much the output adheres to the generated views.", + "default": 4 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed for reproducibility. If None, a random seed will be used." + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/video_models/WhatsApp%20Image%202025-09-05%20at%2019.16.09%20(1).png" + ], + "title": "Image Url", + "type": "string", + "description": "A direct URL to the input image of a person." + } + }, + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "seed" + ], + "required": [ + "image_url" + ] + }, + "PshumanOutput": { + "title": "PSHumanResponse", + "type": "object", + "properties": { + "model_obj": { + "examples": [ + { + "file_name": "VGSdkXIgccoKhHs_JtXTa_result_clr_scale4_image.obj", + "content_type": "application/octet-stream", + "url": "https://storage.googleapis.com/falserverless/model_tests/video_models/VGSdkXIgccoKhHs_JtXTa_result_clr_scale4_image.obj" + } + ], + "title": "Model Obj", + "description": "The generated 3D model in OBJ format.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "preview_image": { + "examples": [ + { + "file_name": "WCN_SkT2-RwsGHlxCVHyn_image_preview.png", + "content_type": "application/octet-stream", + "url": "https://storage.googleapis.com/falserverless/model_tests/video_models/WCN_SkT2-RwsGHlxCVHyn_image_preview.png" + } + ], + "title": "Preview Image", + "description": "A preview image showing the input and the generated multi-view outputs.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "model_obj", + "preview_image" + ], + "required": [ + "model_obj", + "preview_image" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pshuman/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pshuman/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pshuman": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PshumanInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pshuman/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PshumanOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan_world/image-to-world", + "metadata": { + "display_name": "Hunyuan World", + "category": "image-to-3d", + "description": "Hunyuan World 1.0 turns a single image into a panorama or a 3D world. It creates realistic scenes from the image, allowing you to explore and view it from different angles.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:12.281Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/penguin/89p9igw0J95bIBO105W1r_ff145cc097ae4129bb32e8e68186fe6f.jpg", + "model_url": "https://fal.run/fal-ai/hunyuan_world/image-to-world", + "license_type": "commercial", + "date": "2025-07-28T09:45:20.876Z", + "group": { + "key": "Hunyuan World 1.0", + "label": "Image to World" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan_world/image-to-world", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan_world/image-to-world queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan_world/image-to-world", + "category": "image-to-3d", + "thumbnailUrl": "https://fal.media/files/penguin/89p9igw0J95bIBO105W1r_ff145cc097ae4129bb32e8e68186fe6f.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan_world/image-to-world", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan_world/image-to-world/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Hunyuan_worldImageToWorldInput": { + "title": "ImageToWorldRequest", + "type": "object", + "properties": { + "classes": { + "examples": [ + "nature, landscape" + ], + "title": "Classes", + "type": "string", + "description": "Classes to use for the world generation." + }, + "export_drc": { + "title": "Export Drc", + "type": "boolean", + "description": "Whether to export DRC (Dynamic Resource Configuration).", + "default": false + }, + "labels_fg1": { + "examples": [ + "tree, grass, sky" + ], + "title": "Labels Fg1", + "type": "string", + "description": "Labels for the first foreground object." + }, + "labels_fg2": { + "examples": [ + "mountain, water" + ], + "title": "Labels Fg2", + "type": "string", + "description": "Labels for the second foreground object." + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/penguin/_4oXlxt85dr0WY2o0I894_output.png" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to convert to a world." + } + }, + "x-fal-order-properties": [ + "image_url", + "labels_fg1", + "labels_fg2", + "classes", + "export_drc" + ], + "required": [ + "image_url", + "labels_fg1", + "labels_fg2", + "classes" + ] + }, + "Hunyuan_worldImageToWorldOutput": { + "title": "ImageToWorldResponse", + "type": "object", + "properties": { + "world_file": { + "title": "World File", + "description": "The generated world.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "world_file" + ], + "required": [ + "world_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan_world/image-to-world/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan_world/image-to-world/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan_world/image-to-world": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan_worldImageToWorldInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan_world/image-to-world/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan_worldImageToWorldOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "tripo3d/tripo/v2.5/multiview-to-3d", + "metadata": { + "display_name": "Tripo3D", + "category": "image-to-3d", + "description": "State of the art Multiview to 3D Object generation. Generate 3D models from multiple images!", + "status": "active", + "tags": [ + "stylized", + "multiview" + ], + "updated_at": "2026-01-26T21:43:25.920Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/lion/_7MtOFa8cfwOHKGp-dMxm_e4939dba689e4c0c89bc25a6eb11cf67.jpg", + "model_url": "https://fal.run/tripo3d/tripo/v2.5/multiview-to-3d", + "license_type": "commercial", + "date": "2025-06-18T08:56:32.461Z", + "group": { + "key": "tripo3d", + "label": "Multiview To 3D" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for tripo3d/tripo/v2.5/multiview-to-3d", + "version": "1.0.0", + "description": "The OpenAPI schema for the tripo3d/tripo/v2.5/multiview-to-3d queue.", + "x-fal-metadata": { + "endpointId": "tripo3d/tripo/v2.5/multiview-to-3d", + "category": "image-to-3d", + "thumbnailUrl": "https://fal.media/files/lion/_7MtOFa8cfwOHKGp-dMxm_e4939dba689e4c0c89bc25a6eb11cf67.jpg", + "playgroundUrl": "https://fal.ai/models/tripo3d/tripo/v2.5/multiview-to-3d", + "documentationUrl": "https://fal.ai/models/tripo3d/tripo/v2.5/multiview-to-3d/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "TripoV25MultiviewTo3dInput": { + "x-fal-order-properties": [ + "seed", + "face_limit", + "pbr", + "texture", + "texture_seed", + "auto_size", + "style", + "quad", + "texture_alignment", + "orientation", + "front_image_url", + "left_image_url", + "back_image_url", + "right_image_url" + ], + "type": "object", + "properties": { + "face_limit": { + "description": "Limits the number of faces on the output model. If this option is not set, the face limit will be adaptively determined.", + "type": "integer", + "title": "Face Limit" + }, + "right_image_url": { + "examples": [ + "https://platform.tripo3d.ai/assets/right-hj57H4if.jpg" + ], + "description": "Right view image of the object.", + "type": "string", + "title": "Right Image Url" + }, + "style": { + "enum": [ + "person:person2cartoon", + "object:clay", + "object:steampunk", + "animal:venom", + "object:barbie", + "object:christmas", + "gold", + "ancient_bronze" + ], + "title": "Style", + "type": "string", + "description": "[DEPRECATED] Defines the artistic style or transformation to be applied to the 3D model, altering its appearance according to preset options (extra $0.05 per generation). Omit this option to keep the original style and apperance.", + "deprecated": true + }, + "quad": { + "description": "Set True to enable quad mesh output (extra $0.05 per generation). If quad=True and face_limit is not set, the default face_limit will be 10000. Note: Enabling this option will force the output to be an FBX model.", + "type": "boolean", + "title": "Quad", + "default": false + }, + "front_image_url": { + "examples": [ + "https://platform.tripo3d.ai/assets/front-235queJB.jpg" + ], + "description": "Front view image of the object.", + "type": "string", + "title": "Front Image Url" + }, + "texture_seed": { + "description": "This is the random seed for texture generation. Using the same seed will produce identical textures. This parameter is an integer and is randomly chosen if not set. If you want a model with different textures, please use same seed and different texture_seed.", + "type": "integer", + "title": "Texture Seed" + }, + "back_image_url": { + "examples": [ + "https://platform.tripo3d.ai/assets/back-6vq1a8L4.jpg" + ], + "description": "Back view image of the object.", + "type": "string", + "title": "Back Image Url" + }, + "pbr": { + "description": "A boolean option to enable pbr. The default value is True, set False to get a model without pbr. If this option is set to True, texture will be ignored and used as True.", + "type": "boolean", + "title": "Pbr", + "default": false + }, + "texture_alignment": { + "enum": [ + "original_image", + "geometry" + ], + "description": "Determines the prioritization of texture alignment in the 3D model. The default value is original_image.", + "type": "string", + "title": "Texture Alignment", + "default": "original_image" + }, + "texture": { + "enum": [ + "no", + "standard", + "HD" + ], + "description": "An option to enable texturing. Default is 'standard', set 'no' to get a model without any textures, and set 'HD' to get a model with hd quality textures.", + "type": "string", + "title": "Texture", + "default": "standard" + }, + "auto_size": { + "description": "Automatically scale the model to real-world dimensions, with the unit in meters. The default value is False.", + "type": "boolean", + "title": "Auto Size", + "default": false + }, + "seed": { + "description": "This is the random seed for model generation. The seed controls the geometry generation process, ensuring identical models when the same seed is used. This parameter is an integer and is randomly chosen if not set.", + "type": "integer", + "title": "Seed" + }, + "orientation": { + "enum": [ + "default", + "align_image" + ], + "description": "Set orientation=align_image to automatically rotate the model to align the original image. The default value is default.", + "type": "string", + "title": "Orientation", + "default": "default" + }, + "left_image_url": { + "examples": [ + "https://platform.tripo3d.ai/assets/left-Nfdj2U8P.jpg" + ], + "description": "Left view image of the object.", + "type": "string", + "title": "Left Image Url" + } + }, + "title": "MultiviewTo3dInput", + "required": [ + "front_image_url" + ] + }, + "TripoV25MultiviewTo3dOutput": { + "x-fal-order-properties": [ + "task_id", + "model_mesh", + "base_model", + "pbr_model", + "rendered_image" + ], + "type": "object", + "properties": { + "base_model": { + "title": "Base Model", + "description": "Base model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "task_id": { + "description": "The task id of the 3D model generation.", + "type": "string", + "title": "Task Id" + }, + "rendered_image": { + "examples": [ + { + "file_size": 13718, + "content_type": "image/webp", + "url": "https://v3.fal.media/files/panda/zDTAHqp8ifMOT3upZ1xJv_legacy.webp" + } + ], + "title": "Rendered Image", + "description": "A preview image of the model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "model_mesh": { + "examples": [ + { + "file_size": 6744644, + "content_type": "application/octet-stream", + "url": "https://v3.fal.media/files/zebra/NA4WkhbpI-XdOIFc4cDIk_tripo_model_812c3a8a-6eb3-4c09-9f40-0563d27ae7ea.glb" + } + ], + "title": "Model Mesh", + "description": "Model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "pbr_model": { + "title": "Pbr Model", + "description": "Pbr model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "Tripo3dOutput", + "required": [ + "task_id" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/tripo3d/tripo/v2.5/multiview-to-3d/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/tripo3d/tripo/v2.5/multiview-to-3d/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/tripo3d/tripo/v2.5/multiview-to-3d": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TripoV25MultiviewTo3dInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/tripo3d/tripo/v2.5/multiview-to-3d/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TripoV25MultiviewTo3dOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan3d-v21", + "metadata": { + "display_name": "Hunyuan 3D 2.1", + "category": "image-to-3d", + "description": "Hunyuan3D-2.1 is a scalable 3D asset creation system that advances state-of-the-art 3D generation through Physically-Based Rendering (PBR).", + "status": "active", + "tags": [ + "image-to-3d" + ], + "updated_at": "2026-01-26T21:43:27.857Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-5.jpeg", + "model_url": "https://fal.run/fal-ai/hunyuan3d-v21", + "license_type": "commercial", + "date": "2025-06-14T01:28:55.661Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan3d-v21", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan3d-v21 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan3d-v21", + "category": "image-to-3d", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-5.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan3d-v21", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan3d-v21/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Hunyuan3dV21Input": { + "title": "Hunyuan3DInput", + "type": "object", + "properties": { + "input_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/video_models/robot.png" + ], + "title": "Input Image Url", + "type": "string", + "description": "URL of image to use while generating the 3D model." + }, + "octree_resolution": { + "minimum": 1, + "title": "Octree Resolution", + "type": "integer", + "maximum": 1024, + "description": "Octree resolution for the model.", + "default": 256 + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "maximum": 20, + "description": "Guidance scale for the model.", + "default": 7.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "Number of inference steps to perform.", + "default": 50 + }, + "textured_mesh": { + "title": "Textured Mesh", + "type": "boolean", + "description": "If set true, textured mesh will be generated and the price charged would be 3 times that of white mesh.", + "default": false + } + }, + "x-fal-order-properties": [ + "input_image_url", + "seed", + "num_inference_steps", + "guidance_scale", + "octree_resolution", + "textured_mesh" + ], + "required": [ + "input_image_url" + ] + }, + "Hunyuan3dV21Output": { + "title": "ObjectOutput", + "type": "object", + "properties": { + "model_glb_pbr": { + "title": "Model Glb Pbr", + "description": "Generated 3D object with PBR materials.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed value used for generation." + }, + "model_mesh": { + "title": "Model Mesh", + "description": "Generated 3D object assets zip.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "model_glb": { + "examples": [ + { + "file_size": 1348528, + "file_name": "textured_mesh.glb", + "content_type": "application/octet-stream", + "url": "https://v3.fal.media/files/rabbit/WpMHqYy5chA5lsTNoilj__hun3d_v21.glb" + } + ], + "title": "Model Glb", + "description": "Generated 3D object.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "model_glb", + "model_glb_pbr", + "model_mesh", + "seed" + ], + "required": [ + "model_glb", + "model_mesh", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan3d-v21/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d-v21/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d-v21": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan3dV21Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d-v21/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan3dV21Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/trellis/multi", + "metadata": { + "display_name": "Trellis", + "category": "image-to-3d", + "description": "Generate 3D models from multiple images using Trellis. A native 3D generative model enabling versatile and high-quality 3D asset creation.", + "status": "active", + "tags": [ + "stylized" + ], + "updated_at": "2026-01-26T21:43:49.344Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training.jpg", + "model_url": "https://fal.run/fal-ai/trellis/multi", + "license_type": "commercial", + "date": "2025-05-02T01:45:42.664Z", + "group": { + "key": "trellis", + "label": "Multi Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/trellis/multi", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/trellis/multi queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/trellis/multi", + "category": "image-to-3d", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/trellis/multi", + "documentationUrl": "https://fal.ai/models/fal-ai/trellis/multi/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "TrellisMultiInput": { + "title": "MultiImageInputModel", + "type": "object", + "properties": { + "multiimage_algo": { + "enum": [ + "stochastic", + "multidiffusion" + ], + "description": "Algorithm for multi-image generation", + "type": "string", + "title": "Multiimage Algo", + "default": "stochastic" + }, + "slat_sampling_steps": { + "minimum": 1, + "description": "Sampling steps for structured latent generation", + "type": "integer", + "maximum": 50, + "title": "Slat Sampling Steps", + "default": 12 + }, + "ss_sampling_steps": { + "minimum": 1, + "description": "Sampling steps for sparse structure generation", + "type": "integer", + "maximum": 50, + "title": "Ss Sampling Steps", + "default": 12 + }, + "ss_guidance_strength": { + "minimum": 0, + "description": "Guidance strength for sparse structure generation", + "type": "number", + "maximum": 10, + "title": "Ss Guidance Strength", + "default": 7.5 + }, + "slat_guidance_strength": { + "minimum": 0, + "description": "Guidance strength for structured latent generation", + "type": "number", + "maximum": 10, + "title": "Slat Guidance Strength", + "default": 3 + }, + "mesh_simplify": { + "minimum": 0.9, + "description": "Mesh simplification factor", + "type": "number", + "maximum": 0.98, + "title": "Mesh Simplify", + "default": 0.95 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Random seed for reproducibility", + "title": "Seed" + }, + "texture_size": { + "enum": [ + 512, + 1024, + 2048 + ], + "description": "Texture resolution", + "type": "integer", + "title": "Texture Size", + "default": 1024 + }, + "image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/model_tests/video_models/front.png", + "https://storage.googleapis.com/falserverless/model_tests/video_models/back.png", + "https://storage.googleapis.com/falserverless/model_tests/video_models/left.png" + ] + ], + "description": "List of URLs of input images to convert to 3D", + "type": "array", + "title": "Image Urls", + "items": { + "type": "string" + } + } + }, + "x-fal-order-properties": [ + "image_urls", + "seed", + "ss_guidance_strength", + "ss_sampling_steps", + "slat_guidance_strength", + "slat_sampling_steps", + "mesh_simplify", + "texture_size", + "multiimage_algo" + ], + "required": [ + "image_urls" + ] + }, + "TrellisMultiOutput": { + "title": "ObjectOutput", + "type": "object", + "properties": { + "model_mesh": { + "description": "Generated 3D mesh file", + "$ref": "#/components/schemas/File" + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "description": "Processing timings", + "title": "Timings" + } + }, + "x-fal-order-properties": [ + "model_mesh", + "timings" + ], + "required": [ + "model_mesh", + "timings" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/trellis/multi/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/trellis/multi/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/trellis/multi": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TrellisMultiInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/trellis/multi/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TrellisMultiOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "tripo3d/tripo/v2.5/image-to-3d", + "metadata": { + "display_name": "Tripo3D", + "category": "image-to-3d", + "description": "State of the art Image to 3D Object generation. Generate 3D model from a single image!", + "status": "active", + "tags": [ + "image-to-3d", + "stylized" + ], + "updated_at": "2026-01-26T21:43:51.625Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/tripo-image-to-3d.webp", + "model_url": "https://fal.run/tripo3d/tripo/v2.5/image-to-3d", + "license_type": "commercial", + "date": "2025-04-25T18:54:01.400Z", + "group": { + "key": "tripo3d", + "label": "Image To 3D" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for tripo3d/tripo/v2.5/image-to-3d", + "version": "1.0.0", + "description": "The OpenAPI schema for the tripo3d/tripo/v2.5/image-to-3d queue.", + "x-fal-metadata": { + "endpointId": "tripo3d/tripo/v2.5/image-to-3d", + "category": "image-to-3d", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/tripo-image-to-3d.webp", + "playgroundUrl": "https://fal.ai/models/tripo3d/tripo/v2.5/image-to-3d", + "documentationUrl": "https://fal.ai/models/tripo3d/tripo/v2.5/image-to-3d/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "TripoV25ImageTo3dInput": { + "x-fal-order-properties": [ + "seed", + "face_limit", + "pbr", + "texture", + "texture_seed", + "auto_size", + "style", + "quad", + "texture_alignment", + "orientation", + "image_url" + ], + "type": "object", + "properties": { + "face_limit": { + "description": "Limits the number of faces on the output model. If this option is not set, the face limit will be adaptively determined.", + "type": "integer", + "title": "Face Limit" + }, + "style": { + "enum": [ + "person:person2cartoon", + "object:clay", + "object:steampunk", + "animal:venom", + "object:barbie", + "object:christmas", + "gold", + "ancient_bronze" + ], + "title": "Style", + "type": "string", + "description": "[DEPRECATED] Defines the artistic style or transformation to be applied to the 3D model, altering its appearance according to preset options (extra $0.05 per generation). Omit this option to keep the original style and apperance.", + "deprecated": true + }, + "pbr": { + "description": "A boolean option to enable pbr. The default value is True, set False to get a model without pbr. If this option is set to True, texture will be ignored and used as True.", + "type": "boolean", + "title": "Pbr", + "default": false + }, + "texture_alignment": { + "enum": [ + "original_image", + "geometry" + ], + "description": "Determines the prioritization of texture alignment in the 3D model. The default value is original_image.", + "type": "string", + "title": "Texture Alignment", + "default": "original_image" + }, + "image_url": { + "examples": [ + "https://platform.tripo3d.ai/assets/front-235queJB.jpg", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/hamburger.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/poly_fox.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/robot.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/teapot.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/tiger_girl.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/horse.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/flamingo.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/unicorn.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/chair.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/iso_house.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/marble.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/police_woman.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/captured_p.png" + ], + "description": "URL of the image to use for model generation.", + "type": "string", + "title": "Image Url" + }, + "texture": { + "enum": [ + "no", + "standard", + "HD" + ], + "description": "An option to enable texturing. Default is 'standard', set 'no' to get a model without any textures, and set 'HD' to get a model with hd quality textures.", + "type": "string", + "title": "Texture", + "default": "standard" + }, + "auto_size": { + "description": "Automatically scale the model to real-world dimensions, with the unit in meters. The default value is False.", + "type": "boolean", + "title": "Auto Size", + "default": false + }, + "seed": { + "description": "This is the random seed for model generation. The seed controls the geometry generation process, ensuring identical models when the same seed is used. This parameter is an integer and is randomly chosen if not set.", + "type": "integer", + "title": "Seed" + }, + "quad": { + "description": "Set True to enable quad mesh output (extra $0.05 per generation). If quad=True and face_limit is not set, the default face_limit will be 10000. Note: Enabling this option will force the output to be an FBX model.", + "type": "boolean", + "title": "Quad", + "default": false + }, + "orientation": { + "enum": [ + "default", + "align_image" + ], + "description": "Set orientation=align_image to automatically rotate the model to align the original image. The default value is default.", + "type": "string", + "title": "Orientation", + "default": "default" + }, + "texture_seed": { + "description": "This is the random seed for texture generation. Using the same seed will produce identical textures. This parameter is an integer and is randomly chosen if not set. If you want a model with different textures, please use same seed and different texture_seed.", + "type": "integer", + "title": "Texture Seed" + } + }, + "title": "ImageTo3dInput", + "required": [ + "image_url" + ] + }, + "TripoV25ImageTo3dOutput": { + "x-fal-order-properties": [ + "task_id", + "model_mesh", + "base_model", + "pbr_model", + "rendered_image" + ], + "type": "object", + "properties": { + "base_model": { + "title": "Base Model", + "description": "Base model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "task_id": { + "description": "The task id of the 3D model generation.", + "type": "string", + "title": "Task Id" + }, + "rendered_image": { + "examples": [ + { + "file_size": 13718, + "content_type": "image/webp", + "url": "https://v3.fal.media/files/panda/zDTAHqp8ifMOT3upZ1xJv_legacy.webp" + } + ], + "title": "Rendered Image", + "description": "A preview image of the model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "model_mesh": { + "examples": [ + { + "file_size": 6744644, + "content_type": "application/octet-stream", + "url": "https://v3.fal.media/files/zebra/NA4WkhbpI-XdOIFc4cDIk_tripo_model_812c3a8a-6eb3-4c09-9f40-0563d27ae7ea.glb" + } + ], + "title": "Model Mesh", + "description": "Model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "pbr_model": { + "title": "Pbr Model", + "description": "Pbr model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "Tripo3dOutput", + "required": [ + "task_id" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/tripo3d/tripo/v2.5/image-to-3d/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/tripo3d/tripo/v2.5/image-to-3d/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/tripo3d/tripo/v2.5/image-to-3d": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TripoV25ImageTo3dInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/tripo3d/tripo/v2.5/image-to-3d/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TripoV25ImageTo3dOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan3d/v2/multi-view/turbo", + "metadata": { + "display_name": "Hunyuan3D", + "category": "image-to-3d", + "description": "Generate 3D models from your images using Hunyuan 3D. A native 3D generative model enabling versatile and high-quality 3D asset creation.", + "status": "active", + "tags": [ + "stylized" + ], + "updated_at": "2026-01-26T21:44:18.592Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/model_tests/video_models/Hunyuan3D.webp", + "model_url": "https://fal.run/fal-ai/hunyuan3d/v2/multi-view/turbo", + "license_type": "commercial", + "date": "2025-03-20T00:00:00.000Z", + "group": { + "key": "hunyuan3d", + "label": "Multi-View-Turbo" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan3d/v2/multi-view/turbo", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan3d/v2/multi-view/turbo queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan3d/v2/multi-view/turbo", + "category": "image-to-3d", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/model_tests/video_models/Hunyuan3D.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan3d/v2/multi-view/turbo", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan3d/v2/multi-view/turbo/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Hunyuan3dV2MultiViewTurboInput": { + "x-fal-order-properties": [ + "front_image_url", + "back_image_url", + "left_image_url", + "seed", + "num_inference_steps", + "guidance_scale", + "octree_resolution", + "textured_mesh" + ], + "type": "object", + "properties": { + "front_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/video_models/front.png" + ], + "title": "Front Image Url", + "type": "string", + "description": "URL of image to use while generating the 3D model." + }, + "octree_resolution": { + "minimum": 1, + "title": "Octree Resolution", + "type": "integer", + "maximum": 1024, + "description": "Octree resolution for the model.", + "default": 256 + }, + "back_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/video_models/back.png" + ], + "description": "URL of image to use while generating the 3D model.", + "type": "string", + "title": "Back Image Url" + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "description": "Guidance scale for the model.", + "maximum": 20, + "default": 7.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "description": "Number of inference steps to perform.", + "title": "Num Inference Steps", + "default": 50 + }, + "textured_mesh": { + "description": "If set true, textured mesh will be generated and the price charged would be 3 times that of white mesh.", + "type": "boolean", + "title": "Textured Mesh", + "default": false + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + }, + "left_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/video_models/left.png" + ], + "description": "URL of image to use while generating the 3D model.", + "type": "string", + "title": "Left Image Url" + } + }, + "title": "Hunyuan3DInputMultiView", + "required": [ + "front_image_url", + "back_image_url", + "left_image_url" + ] + }, + "Hunyuan3dV2MultiViewTurboOutput": { + "x-fal-order-properties": [ + "model_mesh", + "seed" + ], + "type": "object", + "properties": { + "model_mesh": { + "examples": [ + { + "file_size": 720696, + "file_name": "white_mesh.glb", + "content_type": "application/octet-stream", + "url": "https://storage.googleapis.com/falserverless/model_tests/video_models/mesh.glb" + } + ], + "title": "Model Mesh", + "description": "Generated 3D object file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed value used for generation." + } + }, + "title": "MultiViewObjectOutput", + "required": [ + "model_mesh", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan3d/v2/multi-view/turbo/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d/v2/multi-view/turbo/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d/v2/multi-view/turbo": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan3dV2MultiViewTurboInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d/v2/multi-view/turbo/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan3dV2MultiViewTurboOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan3d/v2", + "metadata": { + "display_name": "Hunyuan3D", + "category": "image-to-3d", + "description": "Generate 3D models from your images using Hunyuan 3D. A native 3D generative model enabling versatile and high-quality 3D asset creation.", + "status": "active", + "tags": [ + "stylized" + ], + "updated_at": "2026-01-26T21:44:18.720Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/model_tests/video_models/Hunyuan3D.webp", + "model_url": "https://fal.run/fal-ai/hunyuan3d/v2", + "license_type": "commercial", + "date": "2025-03-20T00:00:00.000Z", + "group": { + "key": "hunyuan3d", + "label": "Base" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan3d/v2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan3d/v2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan3d/v2", + "category": "image-to-3d", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/model_tests/video_models/Hunyuan3D.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan3d/v2", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan3d/v2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Hunyuan3dV2Input": { + "x-fal-order-properties": [ + "input_image_url", + "seed", + "num_inference_steps", + "guidance_scale", + "octree_resolution", + "textured_mesh" + ], + "type": "object", + "properties": { + "input_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/video_models/robot.png" + ], + "description": "URL of image to use while generating the 3D model.", + "type": "string", + "title": "Input Image Url" + }, + "octree_resolution": { + "minimum": 1, + "description": "Octree resolution for the model.", + "type": "integer", + "title": "Octree Resolution", + "maximum": 1024, + "default": 256 + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "description": "Guidance scale for the model.", + "maximum": 20, + "default": 7.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "num_inference_steps": { + "minimum": 1, + "description": "Number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 50 + }, + "textured_mesh": { + "description": "If set true, textured mesh will be generated and the price charged would be 3 times that of white mesh.", + "type": "boolean", + "title": "Textured Mesh", + "default": false + } + }, + "title": "Hunyuan3DInput", + "required": [ + "input_image_url" + ] + }, + "Hunyuan3dV2Output": { + "x-fal-order-properties": [ + "model_mesh", + "seed" + ], + "type": "object", + "properties": { + "model_mesh": { + "examples": [ + { + "file_size": 720696, + "file_name": "white_mesh.glb", + "content_type": "application/octet-stream", + "url": "https://v3.fal.media/files/lion/WqIhtKPaSoeBtC30qzIGG_white_mesh.glb" + } + ], + "title": "Model Mesh", + "description": "Generated 3D object file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "description": "Seed value used for generation.", + "type": "integer", + "title": "Seed" + } + }, + "title": "ObjectOutput", + "required": [ + "model_mesh", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan3d/v2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d/v2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d/v2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan3dV2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d/v2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan3dV2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan3d/v2/mini", + "metadata": { + "display_name": "Hunyuan3D", + "category": "image-to-3d", + "description": "Generate 3D models from your images using Hunyuan 3D. A native 3D generative model enabling versatile and high-quality 3D asset creation.", + "status": "active", + "tags": [ + "stylized" + ], + "updated_at": "2026-01-26T21:44:18.848Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/model_tests/video_models/Hunyuan3D.webp", + "model_url": "https://fal.run/fal-ai/hunyuan3d/v2/mini", + "license_type": "commercial", + "date": "2025-03-20T00:00:00.000Z", + "group": { + "key": "hunyuan3d", + "label": "Mini" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan3d/v2/mini", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan3d/v2/mini queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan3d/v2/mini", + "category": "image-to-3d", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/model_tests/video_models/Hunyuan3D.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan3d/v2/mini", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan3d/v2/mini/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Hunyuan3dV2MiniInput": { + "x-fal-order-properties": [ + "input_image_url", + "seed", + "num_inference_steps", + "guidance_scale", + "octree_resolution", + "textured_mesh" + ], + "type": "object", + "properties": { + "input_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/video_models/robot.png" + ], + "description": "URL of image to use while generating the 3D model.", + "type": "string", + "title": "Input Image Url" + }, + "octree_resolution": { + "minimum": 1, + "description": "Octree resolution for the model.", + "type": "integer", + "title": "Octree Resolution", + "maximum": 1024, + "default": 256 + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "description": "Guidance scale for the model.", + "maximum": 20, + "default": 7.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "num_inference_steps": { + "minimum": 1, + "description": "Number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 50 + }, + "textured_mesh": { + "description": "If set true, textured mesh will be generated and the price charged would be 3 times that of white mesh.", + "type": "boolean", + "title": "Textured Mesh", + "default": false + } + }, + "title": "Hunyuan3DInput", + "required": [ + "input_image_url" + ] + }, + "Hunyuan3dV2MiniOutput": { + "x-fal-order-properties": [ + "model_mesh", + "seed" + ], + "type": "object", + "properties": { + "model_mesh": { + "examples": [ + { + "file_size": 720696, + "file_name": "white_mesh.glb", + "content_type": "application/octet-stream", + "url": "https://v3.fal.media/files/lion/WqIhtKPaSoeBtC30qzIGG_white_mesh.glb" + } + ], + "title": "Model Mesh", + "description": "Generated 3D object file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "description": "Seed value used for generation.", + "type": "integer", + "title": "Seed" + } + }, + "title": "ObjectOutput", + "required": [ + "model_mesh", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan3d/v2/mini/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d/v2/mini/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d/v2/mini": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan3dV2MiniInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d/v2/mini/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan3dV2MiniOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan3d/v2/multi-view", + "metadata": { + "display_name": "Hunyuan3D", + "category": "image-to-3d", + "description": "Generate 3D models from your images using Hunyuan 3D. A native 3D generative model enabling versatile and high-quality 3D asset creation.", + "status": "active", + "tags": [ + "stylized" + ], + "updated_at": "2026-01-26T21:44:19.106Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/model_tests/video_models/Hunyuan3D.webp", + "model_url": "https://fal.run/fal-ai/hunyuan3d/v2/multi-view", + "license_type": "commercial", + "date": "2025-03-20T00:00:00.000Z", + "group": { + "key": "hunyuan3d", + "label": "Multi-View" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan3d/v2/multi-view", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan3d/v2/multi-view queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan3d/v2/multi-view", + "category": "image-to-3d", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/model_tests/video_models/Hunyuan3D.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan3d/v2/multi-view", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan3d/v2/multi-view/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Hunyuan3dV2MultiViewInput": { + "x-fal-order-properties": [ + "front_image_url", + "back_image_url", + "left_image_url", + "seed", + "num_inference_steps", + "guidance_scale", + "octree_resolution", + "textured_mesh" + ], + "type": "object", + "properties": { + "front_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/video_models/front.png" + ], + "title": "Front Image Url", + "type": "string", + "description": "URL of image to use while generating the 3D model." + }, + "octree_resolution": { + "minimum": 1, + "title": "Octree Resolution", + "type": "integer", + "maximum": 1024, + "description": "Octree resolution for the model.", + "default": 256 + }, + "back_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/video_models/back.png" + ], + "description": "URL of image to use while generating the 3D model.", + "type": "string", + "title": "Back Image Url" + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "description": "Guidance scale for the model.", + "maximum": 20, + "default": 7.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "description": "Number of inference steps to perform.", + "title": "Num Inference Steps", + "default": 50 + }, + "textured_mesh": { + "description": "If set true, textured mesh will be generated and the price charged would be 3 times that of white mesh.", + "type": "boolean", + "title": "Textured Mesh", + "default": false + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + }, + "left_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/video_models/left.png" + ], + "description": "URL of image to use while generating the 3D model.", + "type": "string", + "title": "Left Image Url" + } + }, + "title": "Hunyuan3DInputMultiView", + "required": [ + "front_image_url", + "back_image_url", + "left_image_url" + ] + }, + "Hunyuan3dV2MultiViewOutput": { + "x-fal-order-properties": [ + "model_mesh", + "seed" + ], + "type": "object", + "properties": { + "model_mesh": { + "examples": [ + { + "file_size": 720696, + "file_name": "white_mesh.glb", + "content_type": "application/octet-stream", + "url": "https://storage.googleapis.com/falserverless/model_tests/video_models/mesh.glb" + } + ], + "title": "Model Mesh", + "description": "Generated 3D object file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed value used for generation." + } + }, + "title": "MultiViewObjectOutput", + "required": [ + "model_mesh", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan3d/v2/multi-view/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d/v2/multi-view/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d/v2/multi-view": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan3dV2MultiViewInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d/v2/multi-view/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan3dV2MultiViewOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan3d/v2/turbo", + "metadata": { + "display_name": "Hunyuan3D", + "category": "image-to-3d", + "description": "Generate 3D models from your images using Hunyuan 3D. A native 3D generative model enabling versatile and high-quality 3D asset creation.", + "status": "active", + "tags": [ + "stylized" + ], + "updated_at": "2026-01-26T21:44:19.248Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/model_tests/video_models/Hunyuan3D.webp", + "model_url": "https://fal.run/fal-ai/hunyuan3d/v2/turbo", + "license_type": "commercial", + "date": "2025-03-20T00:00:00.000Z", + "group": { + "key": "hunyuan3d", + "label": "Base-Turbo" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan3d/v2/turbo", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan3d/v2/turbo queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan3d/v2/turbo", + "category": "image-to-3d", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/model_tests/video_models/Hunyuan3D.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan3d/v2/turbo", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan3d/v2/turbo/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Hunyuan3dV2TurboInput": { + "x-fal-order-properties": [ + "input_image_url", + "seed", + "num_inference_steps", + "guidance_scale", + "octree_resolution", + "textured_mesh" + ], + "type": "object", + "properties": { + "input_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/video_models/robot.png" + ], + "description": "URL of image to use while generating the 3D model.", + "type": "string", + "title": "Input Image Url" + }, + "octree_resolution": { + "minimum": 1, + "description": "Octree resolution for the model.", + "type": "integer", + "title": "Octree Resolution", + "maximum": 1024, + "default": 256 + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "description": "Guidance scale for the model.", + "maximum": 20, + "default": 7.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "num_inference_steps": { + "minimum": 1, + "description": "Number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 50 + }, + "textured_mesh": { + "description": "If set true, textured mesh will be generated and the price charged would be 3 times that of white mesh.", + "type": "boolean", + "title": "Textured Mesh", + "default": false + } + }, + "title": "Hunyuan3DInput", + "required": [ + "input_image_url" + ] + }, + "Hunyuan3dV2TurboOutput": { + "x-fal-order-properties": [ + "model_mesh", + "seed" + ], + "type": "object", + "properties": { + "model_mesh": { + "examples": [ + { + "file_size": 720696, + "file_name": "white_mesh.glb", + "content_type": "application/octet-stream", + "url": "https://v3.fal.media/files/lion/WqIhtKPaSoeBtC30qzIGG_white_mesh.glb" + } + ], + "title": "Model Mesh", + "description": "Generated 3D object file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "description": "Seed value used for generation.", + "type": "integer", + "title": "Seed" + } + }, + "title": "ObjectOutput", + "required": [ + "model_mesh", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan3d/v2/turbo/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d/v2/turbo/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d/v2/turbo": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan3dV2TurboInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d/v2/turbo/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan3dV2TurboOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan3d/v2/mini/turbo", + "metadata": { + "display_name": "Hunyuan3D", + "category": "image-to-3d", + "description": "Generate 3D models from your images using Hunyuan 3D. A native 3D generative model enabling versatile and high-quality 3D asset creation.", + "status": "active", + "tags": [ + "stylized" + ], + "updated_at": "2026-01-26T21:44:18.976Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/model_tests/video_models/Hunyuan3D.webp", + "model_url": "https://fal.run/fal-ai/hunyuan3d/v2/mini/turbo", + "license_type": "commercial", + "date": "2025-03-20T00:00:00.000Z", + "group": { + "key": "hunyuan3d", + "label": "Mini-Turbo" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan3d/v2/mini/turbo", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan3d/v2/mini/turbo queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan3d/v2/mini/turbo", + "category": "image-to-3d", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/model_tests/video_models/Hunyuan3D.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan3d/v2/mini/turbo", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan3d/v2/mini/turbo/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Hunyuan3dV2MiniTurboInput": { + "x-fal-order-properties": [ + "input_image_url", + "seed", + "num_inference_steps", + "guidance_scale", + "octree_resolution", + "textured_mesh" + ], + "type": "object", + "properties": { + "input_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/video_models/robot.png" + ], + "description": "URL of image to use while generating the 3D model.", + "type": "string", + "title": "Input Image Url" + }, + "octree_resolution": { + "minimum": 1, + "description": "Octree resolution for the model.", + "type": "integer", + "title": "Octree Resolution", + "maximum": 1024, + "default": 256 + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "description": "Guidance scale for the model.", + "maximum": 20, + "default": 7.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "num_inference_steps": { + "minimum": 1, + "description": "Number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 50 + }, + "textured_mesh": { + "description": "If set true, textured mesh will be generated and the price charged would be 3 times that of white mesh.", + "type": "boolean", + "title": "Textured Mesh", + "default": false + } + }, + "title": "Hunyuan3DInput", + "required": [ + "input_image_url" + ] + }, + "Hunyuan3dV2MiniTurboOutput": { + "x-fal-order-properties": [ + "model_mesh", + "seed" + ], + "type": "object", + "properties": { + "model_mesh": { + "examples": [ + { + "file_size": 720696, + "file_name": "white_mesh.glb", + "content_type": "application/octet-stream", + "url": "https://v3.fal.media/files/lion/WqIhtKPaSoeBtC30qzIGG_white_mesh.glb" + } + ], + "title": "Model Mesh", + "description": "Generated 3D object file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "description": "Seed value used for generation.", + "type": "integer", + "title": "Seed" + } + }, + "title": "ObjectOutput", + "required": [ + "model_mesh", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan3d/v2/mini/turbo/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d/v2/mini/turbo/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d/v2/mini/turbo": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan3dV2MiniTurboInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d/v2/mini/turbo/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan3dV2MiniTurboOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hyper3d/rodin", + "metadata": { + "display_name": "Hyper3D Rodin", + "category": "image-to-3d", + "description": "Rodin by Hyper3D generates realistic and production ready 3D models from text or images.", + "status": "active", + "tags": [ + "stylized" + ], + "updated_at": "2026-01-26T21:44:34.774Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/hyper3d-rodin.webp", + "model_url": "https://fal.run/fal-ai/hyper3d/rodin", + "date": "2024-12-16T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hyper3d/rodin", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hyper3d/rodin queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hyper3d/rodin", + "category": "image-to-3d", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/hyper3d-rodin.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/hyper3d/rodin", + "documentationUrl": "https://fal.ai/models/fal-ai/hyper3d/rodin/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Hyper3dRodinInput": { + "title": "Rodin3DInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A futuristic robot with sleek metallic design." + ], + "title": "Prompt", + "type": "string", + "description": "A textual prompt to guide model generation. Required for Text-to-3D mode. Optional for Image-to-3D mode.", + "default": "" + }, + "condition_mode": { + "enum": [ + "fuse", + "concat" + ], + "title": "Condition Mode", + "type": "string", + "description": "For fuse mode, One or more images are required.It will generate a model by extracting and fusing features of objects from multiple images.For concat mode, need to upload multiple multi-view images of the same object and generate the model. (You can upload multi-view images in any order, regardless of the order of view.)", + "default": "concat" + }, + "bbox_condition": { + "title": "Bbox Condition", + "type": "array", + "description": "An array that specifies the dimensions and scaling factor of the bounding box. Typically, this array contains 3 elements, Length(X-axis), Width(Y-axis) and Height(Z-axis).", + "example": [ + 100, + 50, + 150 + ], + "items": { + "type": "integer" + } + }, + "tier": { + "enum": [ + "Regular", + "Sketch" + ], + "title": "Tier", + "type": "string", + "description": "Tier of generation. For Rodin Sketch, set to Sketch. For Rodin Regular, set to Regular.", + "default": "Regular" + }, + "quality": { + "enum": [ + "high", + "medium", + "low", + "extra-low" + ], + "title": "Quality", + "type": "string", + "description": "Generation quality. Possible values: high, medium, low, extra-low. Default is medium.", + "default": "medium" + }, + "TAPose": { + "title": "T/A Pose", + "type": "boolean", + "description": "When generating the human-like model, this parameter control the generation result to T/A Pose.", + "default": false + }, + "input_image_urls": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/video_models/robot.png" + ], + "title": "Input Image Urls", + "type": "array", + "description": "URL of images to use while generating the 3D model. Required for Image-to-3D mode. Optional for Text-to-3D mode.", + "items": { + "type": "string" + } + }, + "geometry_file_format": { + "enum": [ + "glb", + "usdz", + "fbx", + "obj", + "stl" + ], + "title": "Geometry File Format", + "type": "string", + "description": "Format of the geometry file. Possible values: glb, usdz, fbx, obj, stl. Default is glb.", + "default": "glb" + }, + "use_hyper": { + "title": "Use Hyper", + "type": "boolean", + "description": "Whether to export the model using hyper mode. Default is false.", + "default": false + }, + "addons": { + "enum": [ + "HighPack" + ], + "title": "Addons", + "type": "string", + "description": "Generation add-on features. Default is []. Possible values are HighPack. The HighPack option will provide 4K resolution textures instead of the default 1K, as well as models with high-poly. It will cost triple the billable units." + }, + "seed": { + "minimum": 0, + "maximum": 65535, + "type": "integer", + "title": "Seed", + "description": "Seed value for randomization, ranging from 0 to 65535. Optional." + }, + "material": { + "enum": [ + "PBR", + "Shaded" + ], + "title": "Material", + "type": "string", + "examples": [ + "Shaded" + ], + "description": "Material type. Possible values: PBR, Shaded. Default is PBR.", + "default": "PBR" + } + }, + "x-fal-order-properties": [ + "prompt", + "input_image_urls", + "condition_mode", + "seed", + "geometry_file_format", + "material", + "quality", + "use_hyper", + "tier", + "TAPose", + "bbox_condition", + "addons" + ] + }, + "Hyper3dRodinOutput": { + "title": "ObjectOutput", + "type": "object", + "properties": { + "model_mesh": { + "examples": [ + { + "url": "https://v3.fal.media/files/koala/VlX4JqNI8F9HO2ETp_B7t_base_basic_pbr.glb" + } + ], + "title": "Model Mesh", + "description": "Generated 3D object file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed value used for generation." + }, + "textures": { + "title": "Textures", + "type": "array", + "description": "Generated textures for the 3D object.", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "model_mesh", + "seed", + "textures" + ], + "required": [ + "model_mesh", + "seed", + "textures" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hyper3d/rodin/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hyper3d/rodin/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hyper3d/rodin": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hyper3dRodinInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hyper3d/rodin/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hyper3dRodinOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/trellis", + "metadata": { + "display_name": "Trellis", + "category": "image-to-3d", + "description": "Generate 3D models from your images using Trellis. A native 3D generative model enabling versatile and high-quality 3D asset creation.", + "status": "active", + "tags": [ + "stylized" + ], + "updated_at": "2026-01-26T21:44:10.283Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/trellis/trellis-photo.jpg", + "model_url": "https://fal.run/fal-ai/trellis", + "github_url": "https://github.com/microsoft/TRELLIS/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-12-13T00:00:00.000Z", + "group": { + "key": "trellis", + "label": "Single Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/trellis", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/trellis queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/trellis", + "category": "image-to-3d", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/trellis/trellis-photo.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/trellis", + "documentationUrl": "https://fal.ai/models/fal-ai/trellis/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "TrellisInput": { + "title": "InputModel", + "type": "object", + "properties": { + "slat_sampling_steps": { + "minimum": 1, + "description": "Sampling steps for structured latent generation", + "type": "integer", + "maximum": 50, + "title": "Slat Sampling Steps", + "default": 12 + }, + "ss_sampling_steps": { + "minimum": 1, + "description": "Sampling steps for sparse structure generation", + "type": "integer", + "maximum": 50, + "title": "Ss Sampling Steps", + "default": 12 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/rodin3d/warriorwoman.png" + ], + "description": "URL of the input image to convert to 3D", + "type": "string", + "title": "Image Url" + }, + "slat_guidance_strength": { + "minimum": 0, + "description": "Guidance strength for structured latent generation", + "type": "number", + "maximum": 10, + "title": "Slat Guidance Strength", + "default": 3 + }, + "ss_guidance_strength": { + "minimum": 0, + "description": "Guidance strength for sparse structure generation", + "type": "number", + "maximum": 10, + "title": "Ss Guidance Strength", + "default": 7.5 + }, + "mesh_simplify": { + "minimum": 0.9, + "description": "Mesh simplification factor", + "type": "number", + "maximum": 0.98, + "title": "Mesh Simplify", + "default": 0.95 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Random seed for reproducibility", + "title": "Seed" + }, + "texture_size": { + "enum": [ + 512, + 1024, + 2048 + ], + "description": "Texture resolution", + "type": "integer", + "title": "Texture Size", + "default": 1024 + } + }, + "x-fal-order-properties": [ + "image_url", + "seed", + "ss_guidance_strength", + "ss_sampling_steps", + "slat_guidance_strength", + "slat_sampling_steps", + "mesh_simplify", + "texture_size" + ], + "required": [ + "image_url" + ] + }, + "TrellisOutput": { + "title": "ObjectOutput", + "type": "object", + "properties": { + "model_mesh": { + "description": "Generated 3D mesh file", + "$ref": "#/components/schemas/File" + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "description": "Processing timings", + "title": "Timings" + } + }, + "x-fal-order-properties": [ + "model_mesh", + "timings" + ], + "required": [ + "model_mesh", + "timings" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/trellis/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/trellis/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/trellis": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TrellisInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/trellis/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TrellisOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/triposr", + "metadata": { + "display_name": "TripoSR", + "category": "image-to-3d", + "description": "State of the art Image to 3D Object generation", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:58.680Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/triposr.jpeg", + "model_url": "https://fal.run/fal-ai/triposr", + "date": "2024-01-30T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/triposr", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/triposr queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/triposr", + "category": "image-to-3d", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/triposr.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/triposr", + "documentationUrl": "https://fal.ai/models/fal-ai/triposr/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "TriposrInput": { + "title": "TripoSRInput", + "type": "object", + "properties": { + "mc_resolution": { + "minimum": 32, + "maximum": 1024, + "type": "integer", + "title": "Mc Resolution", + "description": "Resolution of the marching cubes. Above 512 is not recommended.", + "default": 256 + }, + "do_remove_background": { + "title": "Do Remove Background", + "type": "boolean", + "description": "Whether to remove the background from the input image.", + "default": true + }, + "foreground_ratio": { + "minimum": 0.5, + "maximum": 1, + "type": "number", + "title": "Foreground Ratio", + "description": "Ratio of the foreground image to the original image.", + "default": 0.9 + }, + "output_format": { + "enum": [ + "glb", + "obj" + ], + "title": "Output Format", + "type": "string", + "description": "Output format for the 3D model.", + "default": "glb" + }, + "image_url": { + "examples": [ + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/hamburger.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/poly_fox.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/robot.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/teapot.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/tiger_girl.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/horse.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/flamingo.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/unicorn.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/chair.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/iso_house.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/marble.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/police_woman.png", + "https://raw.githubusercontent.com/VAST-AI-Research/TripoSR/ea034e12a428fa848684a3f9f267b2042d298ca6/examples/captured_p.png" + ], + "title": "Image Url", + "type": "string", + "description": "Path for the image file to be processed." + } + }, + "x-fal-order-properties": [ + "image_url", + "output_format", + "do_remove_background", + "foreground_ratio", + "mc_resolution" + ], + "required": [ + "image_url" + ] + }, + "TriposrOutput": { + "title": "ObjectOutput", + "type": "object", + "properties": { + "remeshing_dir": { + "title": "Remeshing Dir", + "description": "Directory containing textures for the remeshed model.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "model_mesh": { + "title": "Model Mesh", + "description": "Generated 3D object file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings", + "description": "Inference timings." + } + }, + "x-fal-order-properties": [ + "model_mesh", + "timings", + "remeshing_dir" + ], + "required": [ + "model_mesh", + "timings" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/triposr/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/triposr/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/triposr": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TriposrInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/triposr/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TriposrOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.image-to-image.json b/packages/typescript/ai-fal/json/fal.models.image-to-image.json new file mode 100644 index 00000000..a91c9af1 --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.image-to-image.json @@ -0,0 +1,163976 @@ +{ + "generated_at": "2026-01-28T02:51:51.784Z", + "total_models": 349, + "category": "image-to-image", + "models": [ + { + "endpoint_id": "fal-ai/flux-pro/kontext", + "metadata": { + "display_name": "FLUX.1 Kontext [pro]", + "category": "image-to-image", + "description": "FLUX.1 Kontext [pro] handles both text and reference images as inputs, seamlessly enabling targeted, local edits and complex transformations of entire scenes.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:23.479Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training-2.jpg", + "model_url": "https://fal.run/fal-ai/flux-pro/kontext", + "license_type": "commercial", + "date": "2025-05-28T18:30:52.718Z", + "group": { + "key": "flux-pro-kontext", + "label": "Kontext [pro] -- Editing" + }, + "highlighted": true, + "kind": "inference", + "pinned": true + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-pro/kontext", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-pro/kontext queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-pro/kontext", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-pro/kontext", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-pro/kontext/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxProKontextInput": { + "title": "FluxKontextInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Put a donut next to the flour." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "description": "The number of images to generate.", + "maximum": 4, + "default": 1 + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/rabbit/rmgBxhwGYb2d3pl3x9sKf_output.png" + ], + "title": "Image URL", + "type": "string", + "description": "Image prompt for the omni model." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance scale (CFG)", + "type": "number", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "maximum": 20, + "default": 3.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "enhance_prompt": { + "title": "Enhance Prompt", + "type": "boolean", + "description": "Whether to enhance the prompt for better results.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "output_format", + "safety_tolerance", + "enhance_prompt", + "aspect_ratio", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "FluxProKontextOutput": { + "title": "FluxKontextOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 1024, + "url": "https://fal.media/files/tiger/7dSJbIU_Ni-0Zp9eaLsvR_fe56916811d84ac69c6ffc0d32dca151.jpg", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/fal__toolkit__image__image__Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "fal__toolkit__image__image__Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-pro/kontext/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/kontext/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/kontext": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProKontextInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/kontext/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProKontextOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2/lora/edit", + "metadata": { + "display_name": "Flux 2", + "category": "image-to-image", + "description": "Image-to-image editing with LoRA support for FLUX.2 [dev] from Black Forest Labs. Specialized style transfer and domain-specific modifications.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:15.534Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/tiger/yUqMpmIEFNYAjtwP3j5VH_5a4980d4efa9484c9ad6a85f88d7563d.jpg", + "model_url": "https://fal.run/fal-ai/flux-2/lora/edit", + "license_type": "commercial", + "date": "2025-11-23T00:16:39.018Z", + "group": { + "key": "Flux2", + "label": "Image Editing LoRA" + }, + "highlighted": true, + "kind": "inference", + "stream_url": "https://fal.ai/models/fal-ai/flux-2/lora/edit/stream", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/flux-2-trainer/edit" + ], + "inference_endpoint_ids": [ + "fal-ai/flux-2-trainer/edit" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2/lora/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2/lora/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2/lora/edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/tiger/yUqMpmIEFNYAjtwP3j5VH_5a4980d4efa9484c9ad6a85f88d7563d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2/lora/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2/lora/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2LoraEditInput": { + "x-fal-order-properties": [ + "prompt", + "guidance_scale", + "seed", + "num_inference_steps", + "image_size", + "num_images", + "acceleration", + "enable_prompt_expansion", + "sync_mode", + "enable_safety_checker", + "output_format", + "image_urls", + "loras" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Make this donut realistic" + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "title": "Number of Images", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the image to generate. The width and height must be between 512 and 2048 pixels.", + "title": "Image Size", + "examples": [ + { + "height": 1152, + "width": 2016 + } + ] + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The acceleration level to use for the image generation.", + "type": "string", + "title": "Acceleration", + "examples": [ + "regular" + ], + "default": "regular" + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "loras": { + "description": "List of LoRA weights to apply (maximum 3). Each LoRA can be a URL, HuggingFace repo ID, or local path.", + "type": "array", + "title": "Loras", + "items": { + "$ref": "#/components/schemas/LoRAInput" + }, + "default": [] + }, + "guidance_scale": { + "minimum": 0, + "description": "Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "type": "number", + "title": "Guidance Scale", + "maximum": 20, + "default": 2.5 + }, + "seed": { + "description": "The seed to use for the generation. If not provided, a random seed will be used.", + "type": "integer", + "title": "Seed" + }, + "image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/example_inputs/flux2_dev_lora_edit_input.png" + ] + ], + "description": "The URsL of the images for editing. A maximum of 3 images are allowed, if more are provided, only the first 3 will be used.", + "type": "array", + "title": "Image URLs", + "items": { + "type": "string" + } + }, + "enable_prompt_expansion": { + "description": "If set to true, the prompt will be expanded for better results.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "num_inference_steps": { + "minimum": 4, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Number of Inference Steps", + "maximum": 50, + "default": 28 + } + }, + "title": "Flux2EditImageLoRAInput", + "required": [ + "prompt", + "image_urls" + ] + }, + "Flux2LoraEditOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/flux2_dev_lora_edit_output.png" + } + ] + ], + "description": "The edited images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + } + }, + "title": "Flux2EditImageLoRAOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "LoRAInput": { + "description": "LoRA weight configuration.", + "type": "object", + "properties": { + "path": { + "description": "URL, HuggingFace repo ID (owner/repo), or local path to LoRA weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "description": "Scale factor for LoRA application (0.0 to 4.0).", + "type": "number", + "title": "Scale", + "maximum": 4, + "default": 1 + } + }, + "title": "LoRAInput", + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2/lora/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/lora/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2/lora/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/lora/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2/edit", + "metadata": { + "display_name": "Flux 2", + "category": "image-to-image", + "description": "Image-to-image editing with FLUX.2 [dev] from Black Forest Labs. Precise modifications using natural language descriptions and hex color control.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:15.884Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/panda/zYjw3YqOcfDQymX7cvMBl_3d76809c48f74eb9abe3e17e1bdd5d2d.jpg", + "model_url": "https://fal.run/fal-ai/flux-2/edit", + "license_type": "commercial", + "date": "2025-11-23T00:15:40.781Z", + "group": { + "key": "Flux2", + "label": "Image Editing" + }, + "highlighted": true, + "kind": "inference", + "stream_url": "https://fal.ai/models/fal-ai/flux-2/edit/stream", + "pinned": false, + "training_endpoint_ids": [ + "flux-2-trainer/edit" + ], + "inference_endpoint_ids": [ + "flux-2-trainer/edit" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2/edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/panda/zYjw3YqOcfDQymX7cvMBl_3d76809c48f74eb9abe3e17e1bdd5d2d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2EditInput": { + "x-fal-order-properties": [ + "prompt", + "guidance_scale", + "seed", + "num_inference_steps", + "image_size", + "num_images", + "acceleration", + "enable_prompt_expansion", + "sync_mode", + "enable_safety_checker", + "output_format", + "image_urls" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Change his clothes to casual suit and tie" + ], + "description": "The prompt to edit the image.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "title": "Number of Images", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the image to generate. The width and height must be between 512 and 2048 pixels.", + "title": "Image Size", + "examples": [ + { + "height": 1152, + "width": 2016 + } + ] + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The acceleration level to use for the image generation.", + "type": "string", + "title": "Acceleration", + "examples": [ + "regular" + ], + "default": "regular" + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "description": "Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "type": "number", + "title": "Guidance Scale", + "maximum": 20, + "default": 2.5 + }, + "seed": { + "description": "The seed to use for the generation. If not provided, a random seed will be used.", + "type": "integer", + "title": "Seed" + }, + "image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/example_inputs/flux2_dev_edit_input.png" + ] + ], + "description": "The URLs of the images for editing. A maximum of 4 images are allowed, if more are provided, only the first 4 will be used.", + "type": "array", + "title": "Image URLs", + "items": { + "type": "string" + } + }, + "enable_prompt_expansion": { + "description": "If set to true, the prompt will be expanded for better results.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "num_inference_steps": { + "minimum": 4, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Number of Inference Steps", + "maximum": 50, + "default": 28 + } + }, + "title": "Flux2EditImageInput", + "required": [ + "prompt", + "image_urls" + ] + }, + "Flux2EditOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/flux2_dev_edit_output.png" + } + ] + ], + "description": "The edited images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + } + }, + "title": "Flux2EditImageOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2EditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2EditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-pro/edit", + "metadata": { + "display_name": "Flux 2 Pro", + "category": "image-to-image", + "description": "Text-to-image generation with FLUX.2 [pro] from Black Forest Labs. Optimized for maximum quality, exceptional photorealism and artistic images.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:16.478Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/eZetcrsZI6AQLCD3f5gaI_b173ae004bdd4108bd1be54eb6e49c7a.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-pro/edit", + "license_type": "commercial", + "date": "2025-11-23T00:13:41.404Z", + "group": { + "key": "Flux2-Pro", + "label": "Image Editing" + }, + "highlighted": true, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-pro/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2-pro/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-pro/edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/eZetcrsZI6AQLCD3f5gaI_b173ae004bdd4108bd1be54eb6e49c7a.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-pro/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-pro/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2ProEditInput": { + "x-fal-order-properties": [ + "prompt", + "image_size", + "seed", + "safety_tolerance", + "enable_safety_checker", + "output_format", + "sync_mode", + "image_urls" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Place realistic flames emerging from the top of the coffee cup, dancing above the rim" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "auto", + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If `auto`, the size will be determined by the model.", + "default": "auto" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.", + "default": "2" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for the generation." + }, + "image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/example_inputs/flux2_pro_edit_input.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "List of URLs of input images for editing", + "items": { + "type": "string" + } + } + }, + "title": "Flux2ProImageEditInput", + "required": [ + "prompt", + "image_urls" + ] + }, + "Flux2ProEditOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/flux2_pro_edit_output.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images.", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for the generation." + } + }, + "title": "Flux2ProEditOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-pro/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-pro/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-pro/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2ProEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-pro/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2ProEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux/dev/image-to-image", + "metadata": { + "display_name": "FLUX.1 [dev]", + "category": "image-to-image", + "description": "FLUX.1 Image-to-Image is a high-performance endpoint for the FLUX.1 [dev] model that enables rapid transformation of existing images, delivering high-quality style transfers and image modifications with the core FLUX capabilities.", + "status": "active", + "tags": [ + "style transfer" + ], + "updated_at": "2026-01-26T21:44:14.705Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/panda/jJ3ZxKTV6ulhHV6GKi9nZ_68430b557ef64f68bf6f0fed0e78c6f9.jpg", + "model_url": "https://fal.run/fal-ai/flux/dev/image-to-image", + "license_type": "commercial", + "date": "2024-07-11T00:00:00.000Z", + "group": { + "key": "flux-1", + "label": "Image to Image [dev]" + }, + "highlighted": true, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux/dev/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux/dev/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux/dev/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/panda/jJ3ZxKTV6ulhHV6GKi9nZ_68430b557ef64f68bf6f0fed0e78c6f9.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux/dev/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/flux/dev/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxDevImageToImageInput": { + "title": "BaseImageToInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cat dressed as a wizard with a background of a mystic forest." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 1 + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "type": "string", + "title": "Acceleration", + "default": "none" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://fal.media/files/koala/Chls9L2ZnvuipUTEwlnJC.png" + ], + "description": "The URL of the image to generate an image from.", + "type": "string", + "title": "Image URL" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "strength": { + "minimum": 0.01, + "description": "The strength of the initial image. Higher strength values are better for this model.", + "type": "number", + "title": "Strength", + "maximum": 1, + "default": 0.95 + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "title": "Seed" + }, + "guidance_scale": { + "minimum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "title": "Guidance scale (CFG)", + "maximum": 20, + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 10, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 40 + } + }, + "x-fal-order-properties": [ + "image_url", + "strength", + "num_inference_steps", + "prompt", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "required": [ + "image_url", + "prompt" + ] + }, + "FluxDevImageToImageOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux/dev/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux/dev/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux/dev/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxDevImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux/dev/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxDevImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/aura-sr", + "metadata": { + "display_name": "AuraSR", + "category": "image-to-image", + "description": "Upscale your images with AuraSR.", + "status": "active", + "tags": [ + "upscaling", + "high-res" + ], + "updated_at": "2026-01-26T21:44:15.354Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/koala/rW7Rhmjtkjvb8gnOPUhNN_b14088de7d684d4b8489db59d53ae3f7.jpg", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/aurasr-animated.webp", + "model_url": "https://fal.run/fal-ai/aura-sr", + "license_type": "commercial", + "date": "2024-04-11T00:00:00.000Z", + "highlighted": true, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/aura-sr", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/aura-sr queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/aura-sr", + "category": "image-to-image", + "thumbnailUrl": "https://v3.fal.media/files/koala/rW7Rhmjtkjvb8gnOPUhNN_b14088de7d684d4b8489db59d53ae3f7.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/aura-sr", + "documentationUrl": "https://fal.ai/models/fal-ai/aura-sr/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AuraSrInput": { + "title": "Input", + "type": "object", + "properties": { + "overlapping_tiles": { + "examples": [ + true, + false + ], + "title": "Overlapping Tiles", + "type": "boolean", + "description": "Whether to use overlapping tiles for upscaling. Setting this to true helps remove seams but doubles the inference time.", + "default": false + }, + "checkpoint": { + "enum": [ + "v1", + "v2" + ], + "title": "Checkpoint", + "type": "string", + "description": "Checkpoint to use for upscaling. More coming soon.", + "examples": [ + "v2", + "v1" + ], + "default": "v1" + }, + "upscaling_factor": { + "enum": [ + 4 + ], + "title": "Upscaling Factor (Xs)", + "type": "integer", + "description": "Upscaling factor. More coming soon.", + "examples": [ + 4 + ], + "default": 4 + }, + "image_url": { + "examples": [ + "https://fal.media/files/rabbit/JlBgYUyQRS3zxiBu_B4fM.png", + "https://fal.media/files/monkey/e6RtJf_ue0vyWzeiEmTby.png", + "https://fal.media/files/monkey/A6HGsigx4mmvs-hJVoOZX.png" + ], + "title": "Image URL", + "type": "string", + "description": "URL of the image to upscale." + } + }, + "x-fal-order-properties": [ + "image_url", + "upscaling_factor", + "overlapping_tiles", + "checkpoint" + ], + "required": [ + "image_url" + ] + }, + "AuraSrOutput": { + "title": "Output", + "type": "object", + "properties": { + "image": { + "title": "Image", + "description": "Upscaled image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings", + "description": "Timings for each step in the pipeline." + } + }, + "x-fal-order-properties": [ + "image", + "timings" + ], + "required": [ + "image", + "timings" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/aura-sr/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/aura-sr/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/aura-sr": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AuraSrInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/aura-sr/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AuraSrOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/clarity-upscaler", + "metadata": { + "display_name": "Clarity Upscaler", + "category": "image-to-image", + "description": "Clarity upscaler for upscaling images with high very fidelity.", + "status": "active", + "tags": [ + "upscaling" + ], + "updated_at": "2026-01-26T21:44:58.551Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/controlnet-tile-upscaler.webp", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/controlnet-tile-upscaler-animated.webp", + "model_url": "https://fal.run/fal-ai/clarity-upscaler", + "license_type": "commercial", + "date": "2024-02-04T00:00:00.000Z", + "highlighted": true, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/clarity-upscaler", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/clarity-upscaler queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/clarity-upscaler", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/controlnet-tile-upscaler.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/clarity-upscaler", + "documentationUrl": "https://fal.ai/models/fal-ai/clarity-upscaler/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ClarityUpscalerInput": { + "title": "Input", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "masterpiece, best quality, highres" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results.", + "default": "masterpiece, best quality, highres" + }, + "resemblance": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Resemblance", + "description": "\n The resemblance of the upscaled image to the original image. The higher the resemblance, the more the model will try to keep the original image.\n Refers to the strength of the ControlNet.\n ", + "default": 0.6 + }, + "creativity": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Creativity", + "description": "\n The creativity of the model. The higher the creativity, the more the model will deviate from the prompt.\n Refers to the denoise strength of the sampling.\n ", + "default": 0.35 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/gallery/NOCA_Mick-Thompson.resized.resized.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to upscale." + }, + "upscale_factor": { + "minimum": 1, + "maximum": 4, + "type": "number", + "title": "Upscale Factor", + "description": "The upscale factor", + "default": 2 + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 4 + }, + "num_inference_steps": { + "minimum": 4, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 18 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "negative_prompt": { + "examples": [ + "(worst quality, low quality, normal quality:2)" + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to use. Use it to address details that you don't want in the image.", + "default": "(worst quality, low quality, normal quality:2)" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to false, the safety checker will be disabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "upscale_factor", + "negative_prompt", + "creativity", + "resemblance", + "guidance_scale", + "num_inference_steps", + "seed", + "enable_safety_checker" + ], + "required": [ + "image_url" + ] + }, + "ClarityUpscalerOutput": { + "title": "Output", + "type": "object", + "properties": { + "image": { + "description": "The URL of the generated image.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used to generate the image." + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings", + "description": "The timings of the different steps in the workflow." + } + }, + "x-fal-order-properties": [ + "image", + "seed", + "timings" + ], + "required": [ + "image", + "seed", + "timings" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/clarity-upscaler/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/clarity-upscaler/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/clarity-upscaler": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ClarityUpscalerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/clarity-upscaler/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ClarityUpscalerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/replace-background", + "metadata": { + "display_name": "Replace Background", + "category": "image-to-image", + "description": "Creates enriched product shots by placing them in various environments using textual descriptions.", + "status": "active", + "tags": [ + "bria", + "replace-background" + ], + "updated_at": "2026-01-27T11:13:18.559Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8c0fb2/wNeDpxdQqIVEIpNSmlnG0_20782e56e2b946beba84550451577cf3.jpg", + "model_url": "https://fal.run/bria/replace-background", + "license_type": "commercial", + "date": "2026-01-27T11:10:25.377Z", + "group": { + "key": "bria", + "label": "Replace Background V2" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/replace-background", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/replace-background queue.", + "x-fal-metadata": { + "endpointId": "bria/replace-background", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8c0fb2/wNeDpxdQqIVEIpNSmlnG0_20782e56e2b946beba84550451577cf3.jpg", + "playgroundUrl": "https://fal.ai/models/bria/replace-background", + "documentationUrl": "https://fal.ai/models/bria/replace-background/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ReplaceBackgroundInput": { + "title": "ReplaceBackgroundInputModel", + "type": "object", + "properties": { + "prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Prompt", + "description": "Prompt for background replacement.", + "examples": [ + "On a smooth kitchen counter in front of a blue and white patterned ceramic tile wall. A yellow ceramic mug sits to the right. Shot from a straight-on front angle." + ] + }, + "steps_num": { + "title": "Steps Num", + "type": "integer", + "description": "Number of inference steps.", + "default": 30 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If true, returns the image directly in the response (increases latency).", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility.", + "default": 4925634 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for background replacement.", + "default": "" + }, + "image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Url", + "description": "Reference image (file or URL).", + "default": "https://v3b.fal.media/files/b/0a8bea8c/Mztgx0NG3HPdby-4iPqwH_a_coffee_machine_standing_in_the_kitchen.png" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "seed", + "steps_num", + "negative_prompt", + "sync_mode" + ] + }, + "ReplaceBackgroundOutput": { + "title": "ReplaceBackgroundOutputModel", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "Generated images.", + "items": { + "additionalProperties": true, + "type": "object" + } + }, + "image": { + "description": "Generated image.", + "$ref": "#/components/schemas/Image" + } + }, + "x-fal-order-properties": [ + "image", + "images" + ], + "required": [ + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/bria/replace-background/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/replace-background/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/replace-background": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ReplaceBackgroundInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/replace-background/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ReplaceBackgroundOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "half-moon-ai/ai-face-swap/faceswapimage", + "metadata": { + "display_name": "Ai Face Swap", + "category": "image-to-image", + "description": "AI-FaceSwap-Image is a service that can take one person's face and realistically blend it onto another's in a photo.", + "status": "active", + "tags": [ + "faceswap", + "utility", + "transformation" + ], + "updated_at": "2026-01-26T21:41:28.307Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b8d9a/Ajnze_j05eP3sK49Uw6Tu_f3900a14512249f2b7158999739b7457.jpg", + "model_url": "https://fal.run/half-moon-ai/ai-face-swap/faceswapimage", + "license_type": "commercial", + "date": "2026-01-23T14:40:30.378Z", + "group": { + "key": "Half-Moon-Faceswap", + "label": "Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for half-moon-ai/ai-face-swap/faceswapimage", + "version": "1.0.0", + "description": "The OpenAPI schema for the half-moon-ai/ai-face-swap/faceswapimage queue.", + "x-fal-metadata": { + "endpointId": "half-moon-ai/ai-face-swap/faceswapimage", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b8d9a/Ajnze_j05eP3sK49Uw6Tu_f3900a14512249f2b7158999739b7457.jpg", + "playgroundUrl": "https://fal.ai/models/half-moon-ai/ai-face-swap/faceswapimage", + "documentationUrl": "https://fal.ai/models/half-moon-ai/ai-face-swap/faceswapimage/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AiFaceSwapFaceswapimageInput": { + "description": "Input schema for image ↔ image face swap", + "type": "object", + "properties": { + "source_face_url": { + "examples": [ + "https://images.pexels.com/photos/1642228/pexels-photo-1642228.jpeg" + ], + "description": "Source face image", + "type": "string", + "title": "Source Face Url" + }, + "target_image_url": { + "examples": [ + "https://wpmedia.wonderwall.com/2024/02/09103702/shutterstock_editorial_1581851a.jpg" + ], + "description": "Target image URL", + "type": "string", + "title": "Target Image Url" + } + }, + "x-fal-order-properties": [ + "source_face_url", + "target_image_url" + ], + "title": "FaceSwapInputImage", + "required": [ + "source_face_url", + "target_image_url" + ] + }, + "AiFaceSwapFaceswapimageOutput": { + "description": "FaceFusion output payload when image content is generated", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "height": 742, + "content_type": "image/jpeg", + "url": "https://ai-tests.angeneraltest.com/test-files/faceswapimage.jpg", + "width": 520 + } + ], + "description": "Generated image result", + "$ref": "#/components/schemas/Image" + }, + "processing_time_ms": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Optional processing duration in milliseconds", + "title": "Processing Time Ms" + } + }, + "x-fal-order-properties": [ + "image", + "processing_time_ms" + ], + "title": "FaceFusionImageOutput", + "required": [ + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the image in pixels.", + "title": "Height", + "examples": [ + 1024 + ] + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the image in pixels.", + "title": "Width", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/half-moon-ai/ai-face-swap/faceswapimage/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/half-moon-ai/ai-face-swap/faceswapimage/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/half-moon-ai/ai-face-swap/faceswapimage": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiFaceSwapFaceswapimageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/half-moon-ai/ai-face-swap/faceswapimage/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiFaceSwapFaceswapimageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/fibo-edit/replace_object_by_text", + "metadata": { + "display_name": "Fibo Edit [Replace Object by Text]", + "category": "image-to-image", + "description": "Natural, expressive object swapping within images using plain language", + "status": "active", + "tags": [ + "object-replacement", + "bria", + "fibo-edit", + "json" + ], + "updated_at": "2026-01-26T21:41:28.684Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b245d/SpzMANTE7n5ZH288jVUam_35cae278412f4e99b5d165d94df62ddc.jpg", + "model_url": "https://fal.run/bria/fibo-edit/replace_object_by_text", + "license_type": "commercial", + "date": "2026-01-20T11:53:35.783Z", + "group": { + "key": "fibo-edit", + "label": "Replace Object by Text" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/fibo-edit/replace_object_by_text", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/fibo-edit/replace_object_by_text queue.", + "x-fal-metadata": { + "endpointId": "bria/fibo-edit/replace_object_by_text", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b245d/SpzMANTE7n5ZH288jVUam_35cae278412f4e99b5d165d94df62ddc.jpg", + "playgroundUrl": "https://fal.ai/models/bria/fibo-edit/replace_object_by_text", + "documentationUrl": "https://fal.ai/models/bria/fibo-edit/replace_object_by_text/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FiboEditReplace_object_by_textInput": { + "x-fal-order-properties": [ + "image_url", + "instruction" + ], + "type": "object", + "properties": { + "instruction": { + "examples": [ + "Replace the red apple with a green pear" + ], + "description": "The full natural language command describing what to replace.", + "type": "string", + "title": "Instruction" + }, + "image_url": { + "examples": [ + "https://bria-datasets.s3.us-east-1.amazonaws.com/Liza/a_bowl_of_fruits__should_have_a_red_apple.png" + ], + "description": "The source image.", + "type": "string", + "title": "Image Url" + } + }, + "title": "ReplaceObjectInput", + "required": [ + "image_url", + "instruction" + ] + }, + "FiboEditReplace_object_by_textOutput": { + "x-fal-order-properties": [ + "image", + "images", + "structured_instruction" + ], + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "Generated images.", + "items": { + "$ref": "#/components/schemas/Image" + }, + "default": [] + }, + "image": { + "description": "Generated image.", + "$ref": "#/components/schemas/Image" + }, + "structured_instruction": { + "description": "Current instruction.", + "type": "object", + "title": "Structured Instruction", + "additionalProperties": true + } + }, + "title": "FiboEditExtraEPOutputModel", + "required": [ + "image", + "structured_instruction" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "title": "Height", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "title": "Width", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/bria/fibo-edit/replace_object_by_text/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/replace_object_by_text/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/fibo-edit/replace_object_by_text": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditReplace_object_by_textInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/replace_object_by_text/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditReplace_object_by_textOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/fibo-edit/sketch_to_colored_image", + "metadata": { + "display_name": "Fibo Edit [Sketch to Image]", + "category": "image-to-image", + "description": "Converts line drawings and sketches into photorealistic, fully colored images", + "status": "active", + "tags": [ + "sketch-to-image", + "bria", + "fibo-edit", + "json" + ], + "updated_at": "2026-01-26T21:41:28.809Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b2450/yOdy5RpOKQGnFgoxvGmc8_40b069cd42a04bf383c544a3702518c2.jpg", + "model_url": "https://fal.run/bria/fibo-edit/sketch_to_colored_image", + "license_type": "commercial", + "date": "2026-01-20T11:47:45.178Z", + "group": { + "key": "fibo-edit", + "label": "Sketch to Colored Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/fibo-edit/sketch_to_colored_image", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/fibo-edit/sketch_to_colored_image queue.", + "x-fal-metadata": { + "endpointId": "bria/fibo-edit/sketch_to_colored_image", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b2450/yOdy5RpOKQGnFgoxvGmc8_40b069cd42a04bf383c544a3702518c2.jpg", + "playgroundUrl": "https://fal.ai/models/bria/fibo-edit/sketch_to_colored_image", + "documentationUrl": "https://fal.ai/models/bria/fibo-edit/sketch_to_colored_image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FiboEditSketch_to_colored_imageInput": { + "x-fal-order-properties": [ + "image_url" + ], + "type": "object", + "properties": { + "image_url": { + "examples": [ + "https://bria-datasets.s3.us-east-1.amazonaws.com/Liza/create_a_b_w_sketch_of_a_cat.png" + ], + "description": "The source image.", + "type": "string", + "title": "Image Url" + } + }, + "title": "SketchColoredImageInput", + "required": [ + "image_url" + ] + }, + "FiboEditSketch_to_colored_imageOutput": { + "x-fal-order-properties": [ + "image", + "images", + "structured_instruction" + ], + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "Generated images.", + "items": { + "$ref": "#/components/schemas/Image" + }, + "default": [] + }, + "image": { + "description": "Generated image.", + "$ref": "#/components/schemas/Image" + }, + "structured_instruction": { + "description": "Current instruction.", + "type": "object", + "title": "Structured Instruction", + "additionalProperties": true + } + }, + "title": "FiboEditExtraEPOutputModel", + "required": [ + "image", + "structured_instruction" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "title": "Height", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "title": "Width", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/bria/fibo-edit/sketch_to_colored_image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/sketch_to_colored_image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/fibo-edit/sketch_to_colored_image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditSketch_to_colored_imageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/sketch_to_colored_image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditSketch_to_colored_imageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/fibo-edit/restore", + "metadata": { + "display_name": "Fibo Edit [Restore]", + "category": "image-to-image", + "description": "Automatically renews and cleans noisy or degraded images.", + "status": "active", + "tags": [ + "image-restoration", + "fibo-edit", + "bria", + "json" + ], + "updated_at": "2026-01-26T21:41:29.245Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b2441/xs0GaTAO7yUEIyxNUFA1t_358414183ebb4bc6b8e418880de46a03.jpg", + "model_url": "https://fal.run/bria/fibo-edit/restore", + "license_type": "commercial", + "date": "2026-01-20T11:45:44.048Z", + "group": { + "key": "fibo-edit", + "label": "Restore" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/fibo-edit/restore", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/fibo-edit/restore queue.", + "x-fal-metadata": { + "endpointId": "bria/fibo-edit/restore", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b2441/xs0GaTAO7yUEIyxNUFA1t_358414183ebb4bc6b8e418880de46a03.jpg", + "playgroundUrl": "https://fal.ai/models/bria/fibo-edit/restore", + "documentationUrl": "https://fal.ai/models/bria/fibo-edit/restore/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FiboEditRestoreInput": { + "x-fal-order-properties": [ + "image_url" + ], + "type": "object", + "properties": { + "image_url": { + "examples": [ + "https://bria-datasets.s3.us-east-1.amazonaws.com/Liza/png+-+2026-01-13T134151.337.png" + ], + "description": "The source image.", + "type": "string", + "title": "Image Url" + } + }, + "title": "RestoreInput", + "required": [ + "image_url" + ] + }, + "FiboEditRestoreOutput": { + "x-fal-order-properties": [ + "image", + "images", + "structured_instruction" + ], + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "Generated images.", + "items": { + "$ref": "#/components/schemas/Image" + }, + "default": [] + }, + "image": { + "description": "Generated image.", + "$ref": "#/components/schemas/Image" + }, + "structured_instruction": { + "description": "Current instruction.", + "type": "object", + "title": "Structured Instruction", + "additionalProperties": true + } + }, + "title": "FiboEditExtraEPOutputModel", + "required": [ + "image", + "structured_instruction" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "title": "Height", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "title": "Width", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/bria/fibo-edit/restore/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/restore/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/fibo-edit/restore": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditRestoreInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/restore/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditRestoreOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/fibo-edit/reseason", + "metadata": { + "display_name": "Fibo Edit [Reseason]", + "category": "image-to-image", + "description": "Transforms the seasonal or weather atmosphere of an image.", + "status": "active", + "tags": [ + "bria", + "fibo-edit", + "reseason" + ], + "updated_at": "2026-01-26T21:41:29.374Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b2436/-APQ6KojmPWSlZmt9kQbv_0cb651b353f94746b86aed13325cd8aa.jpg", + "model_url": "https://fal.run/bria/fibo-edit/reseason", + "license_type": "commercial", + "date": "2026-01-20T11:43:27.199Z", + "group": { + "key": "fibo-edit", + "label": "Reseason" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/fibo-edit/reseason", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/fibo-edit/reseason queue.", + "x-fal-metadata": { + "endpointId": "bria/fibo-edit/reseason", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b2436/-APQ6KojmPWSlZmt9kQbv_0cb651b353f94746b86aed13325cd8aa.jpg", + "playgroundUrl": "https://fal.ai/models/bria/fibo-edit/reseason", + "documentationUrl": "https://fal.ai/models/bria/fibo-edit/reseason/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FiboEditReseasonInput": { + "x-fal-order-properties": [ + "image_url", + "season" + ], + "type": "object", + "properties": { + "season": { + "enum": [ + "spring", + "summer", + "autumn", + "winter" + ], + "description": "The desired season.", + "type": "string", + "title": "Season", + "examples": [ + "winter" + ] + }, + "image_url": { + "examples": [ + "https://bria-datasets.s3.us-east-1.amazonaws.com/Liza/create_a_realistic_image_of_a_green_field_in_the_spring__also_add_trees.png" + ], + "description": "The source image.", + "type": "string", + "title": "Image Url" + } + }, + "title": "ReseasonInput", + "required": [ + "image_url", + "season" + ] + }, + "FiboEditReseasonOutput": { + "x-fal-order-properties": [ + "image", + "images", + "structured_instruction" + ], + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "Generated images.", + "items": { + "$ref": "#/components/schemas/Image" + }, + "default": [] + }, + "image": { + "description": "Generated image.", + "$ref": "#/components/schemas/Image" + }, + "structured_instruction": { + "description": "Current instruction.", + "type": "object", + "title": "Structured Instruction", + "additionalProperties": true + } + }, + "title": "FiboEditExtraEPOutputModel", + "required": [ + "image", + "structured_instruction" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "title": "Height", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "title": "Width", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/bria/fibo-edit/reseason/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/reseason/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/fibo-edit/reseason": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditReseasonInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/reseason/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditReseasonOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/fibo-edit/relight", + "metadata": { + "display_name": "Fibo Edit [Relight]", + "category": "image-to-image", + "description": "Precise, controllable lighting changes using simple, structured text inputs.", + "status": "active", + "tags": [ + "bria", + "fibo-edit", + "relighting", + "json" + ], + "updated_at": "2026-01-26T21:41:29.499Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b2423/RcBGC6N3eWjIoh-8jqvlr_6a7d71be051c495a8d0243021b8d8bbc.jpg", + "model_url": "https://fal.run/bria/fibo-edit/relight", + "license_type": "commercial", + "date": "2026-01-20T11:40:09.156Z", + "group": { + "key": "fibo-edit", + "label": "Relight" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/fibo-edit/relight", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/fibo-edit/relight queue.", + "x-fal-metadata": { + "endpointId": "bria/fibo-edit/relight", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b2423/RcBGC6N3eWjIoh-8jqvlr_6a7d71be051c495a8d0243021b8d8bbc.jpg", + "playgroundUrl": "https://fal.ai/models/bria/fibo-edit/relight", + "documentationUrl": "https://fal.ai/models/bria/fibo-edit/relight/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FiboEditRelightInput": { + "x-fal-order-properties": [ + "image_url", + "light_direction", + "light_type" + ], + "type": "object", + "properties": { + "light_type": { + "enum": [ + "midday", + "blue hour light", + "low-angle sunlight", + "sunrise light", + "spotlight on subject", + "overcast light", + "soft overcast daylight lighting", + "cloud-filtered lighting", + "fog-diffused lighting", + "moonlight lighting", + "starlight nighttime", + "soft bokeh lighting", + "harsh studio lighting" + ], + "description": "The quality/style/time of day.", + "type": "string", + "title": "Light Type", + "examples": [ + "soft overcast daylight lighting" + ] + }, + "light_direction": { + "examples": [ + "front" + ], + "description": "Where the light comes from.", + "title": "Light Direction", + "anyOf": [ + { + "enum": [ + "front", + "side", + "bottom", + "top-down" + ], + "type": "string" + }, + { + "type": "null" + } + ] + }, + "image_url": { + "examples": [ + "https://bria-datasets.s3.us-east-1.amazonaws.com/Liza/bria_result+-+2026-01-13T095546.173.png" + ], + "description": "The source image.", + "type": "string", + "title": "Image Url" + } + }, + "title": "RelightInput", + "required": [ + "image_url", + "light_direction", + "light_type" + ] + }, + "FiboEditRelightOutput": { + "x-fal-order-properties": [ + "image", + "images", + "structured_instruction" + ], + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "Generated images.", + "items": { + "$ref": "#/components/schemas/Image" + }, + "default": [] + }, + "image": { + "description": "Generated image.", + "$ref": "#/components/schemas/Image" + }, + "structured_instruction": { + "description": "Current instruction.", + "type": "object", + "title": "Structured Instruction", + "additionalProperties": true + } + }, + "title": "FiboEditExtraEPOutputModel", + "required": [ + "image", + "structured_instruction" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "title": "Height", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "title": "Width", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/bria/fibo-edit/relight/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/relight/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/fibo-edit/relight": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditRelightInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/relight/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditRelightOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/fibo-edit/restyle", + "metadata": { + "display_name": "Fibo Edit [Restyle]", + "category": "image-to-image", + "description": "Transforms images into distinct artistic styles using curated, production-grade style mappings", + "status": "active", + "tags": [ + "bria", + "fibo-edit", + "restyle", + "json" + ], + "updated_at": "2026-01-26T21:41:29.624Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b2411/GadXEuWQwSpq7dk3M0Civ_2a6ab9491a874596aa41f3cbdf72c774.jpg", + "model_url": "https://fal.run/bria/fibo-edit/restyle", + "license_type": "commercial", + "date": "2026-01-20T11:37:52.937Z", + "group": { + "key": "fibo-edit", + "label": "Restyle" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/fibo-edit/restyle", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/fibo-edit/restyle queue.", + "x-fal-metadata": { + "endpointId": "bria/fibo-edit/restyle", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b2411/GadXEuWQwSpq7dk3M0Civ_2a6ab9491a874596aa41f3cbdf72c774.jpg", + "playgroundUrl": "https://fal.ai/models/bria/fibo-edit/restyle", + "documentationUrl": "https://fal.ai/models/bria/fibo-edit/restyle/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FiboEditRestyleInput": { + "x-fal-order-properties": [ + "image_url", + "style" + ], + "type": "object", + "properties": { + "style": { + "enum": [ + "3D Render", + "Cubism", + "Oil Painting", + "Anime", + "Cartoon", + "Coloring Book", + "Retro Ad", + "Pop Art Halftone", + "Vector Art", + "Story Board", + "Art Nouveau", + "Cross Etching", + "Wood Cut" + ], + "description": "Select the desired artistic style for the output image.", + "type": "string", + "title": "Style", + "examples": [ + "3D Render" + ] + }, + "image_url": { + "examples": [ + "https://bria-datasets.s3.us-east-1.amazonaws.com/Liza/high_camera_angle_warm_filter.png" + ], + "description": "The source image.", + "type": "string", + "title": "Image Url" + } + }, + "title": "RestyletInput", + "required": [ + "image_url", + "style" + ] + }, + "FiboEditRestyleOutput": { + "x-fal-order-properties": [ + "image", + "images", + "structured_instruction" + ], + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "Generated images.", + "items": { + "$ref": "#/components/schemas/Image" + }, + "default": [] + }, + "image": { + "description": "Generated image.", + "$ref": "#/components/schemas/Image" + }, + "structured_instruction": { + "description": "Current instruction.", + "type": "object", + "title": "Structured Instruction", + "additionalProperties": true + } + }, + "title": "FiboEditExtraEPOutputModel", + "required": [ + "image", + "structured_instruction" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "title": "Height", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "title": "Width", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/bria/fibo-edit/restyle/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/restyle/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/fibo-edit/restyle": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditRestyleInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/restyle/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditRestyleOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/fibo-edit/rewrite_text", + "metadata": { + "display_name": "Fibo Edit [Rewrite Text]", + "category": "image-to-image", + "description": "Precise, reliable modification of existing text inside images.", + "status": "active", + "tags": [ + "bria", + "fibo-edit", + "text-rewriting", + "image-editing" + ], + "updated_at": "2026-01-26T21:41:29.749Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b23df/jVMIrmCsypj236JttkUFB_2380773a816f493d8da21bd425688bd5.jpg", + "model_url": "https://fal.run/bria/fibo-edit/rewrite_text", + "license_type": "commercial", + "date": "2026-01-20T11:29:10.540Z", + "group": { + "key": "fibo-edit", + "label": "Rewrite Text" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/fibo-edit/rewrite_text", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/fibo-edit/rewrite_text queue.", + "x-fal-metadata": { + "endpointId": "bria/fibo-edit/rewrite_text", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b23df/jVMIrmCsypj236JttkUFB_2380773a816f493d8da21bd425688bd5.jpg", + "playgroundUrl": "https://fal.ai/models/bria/fibo-edit/rewrite_text", + "documentationUrl": "https://fal.ai/models/bria/fibo-edit/rewrite_text/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FiboEditRewrite_textInput": { + "x-fal-order-properties": [ + "image_url", + "new_text" + ], + "type": "object", + "properties": { + "new_text": { + "examples": [ + "FIBO Edit!" + ], + "description": "The new text string to appear in the image.", + "type": "string", + "title": "New Text" + }, + "image_url": { + "examples": [ + "https://bria-datasets.s3.us-east-1.amazonaws.com/Liza/create_an_image_of_cake__with_text_on_it_saying___Hi_there__.png" + ], + "description": "The source image.", + "type": "string", + "title": "Image Url" + } + }, + "title": "RewriteTextInput", + "required": [ + "image_url", + "new_text" + ] + }, + "FiboEditRewrite_textOutput": { + "x-fal-order-properties": [ + "image", + "images", + "structured_instruction" + ], + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "Generated images.", + "items": { + "$ref": "#/components/schemas/Image" + }, + "default": [] + }, + "image": { + "description": "Generated image.", + "$ref": "#/components/schemas/Image" + }, + "structured_instruction": { + "description": "Current instruction.", + "type": "object", + "title": "Structured Instruction", + "additionalProperties": true + } + }, + "title": "FiboEditExtraEPOutputModel", + "required": [ + "image", + "structured_instruction" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "title": "Height", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "title": "Width", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/bria/fibo-edit/rewrite_text/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/rewrite_text/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/fibo-edit/rewrite_text": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditRewrite_textInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/rewrite_text/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditRewrite_textOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/fibo-edit/erase_by_text", + "metadata": { + "display_name": "Fibo Edit [Erase by Text]", + "category": "image-to-image", + "description": "Fast, reliable removal of unwanted elements from images. Designed for predictability, scale, and production use.", + "status": "active", + "tags": [ + "bria", + "fibo-edit", + "prompt-eraser" + ], + "updated_at": "2026-01-26T21:41:29.882Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b23af/c69cfQKwEcq8-r1yWTAQp_7190e3ad18a141e9b2c0b5f64311be31.jpg", + "model_url": "https://fal.run/bria/fibo-edit/erase_by_text", + "license_type": "commercial", + "date": "2026-01-20T11:21:02.673Z", + "group": { + "key": "fibo-edit", + "label": "Erase by Text" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/fibo-edit/erase_by_text", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/fibo-edit/erase_by_text queue.", + "x-fal-metadata": { + "endpointId": "bria/fibo-edit/erase_by_text", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b23af/c69cfQKwEcq8-r1yWTAQp_7190e3ad18a141e9b2c0b5f64311be31.jpg", + "playgroundUrl": "https://fal.ai/models/bria/fibo-edit/erase_by_text", + "documentationUrl": "https://fal.ai/models/bria/fibo-edit/erase_by_text/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FiboEditErase_by_textInput": { + "x-fal-order-properties": [ + "image_url", + "object_name" + ], + "type": "object", + "properties": { + "object_name": { + "examples": [ + "Table" + ], + "description": "The name of the object to remove.", + "type": "string", + "title": "Object Name" + }, + "image_url": { + "examples": [ + "https://bria-datasets.s3.us-east-1.amazonaws.com/Liza/an_empty_table_in_living_room.png" + ], + "description": "The source image.", + "type": "string", + "title": "Image Url" + } + }, + "title": "EraseByTextInput", + "required": [ + "image_url", + "object_name" + ] + }, + "FiboEditErase_by_textOutput": { + "x-fal-order-properties": [ + "image", + "images", + "structured_instruction" + ], + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "Generated images.", + "items": { + "$ref": "#/components/schemas/Image" + }, + "default": [] + }, + "image": { + "description": "Generated image.", + "$ref": "#/components/schemas/Image" + }, + "structured_instruction": { + "description": "Current instruction.", + "type": "object", + "title": "Structured Instruction", + "additionalProperties": true + } + }, + "title": "FiboEditExtraEPOutputModel", + "required": [ + "image", + "structured_instruction" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "title": "Height", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "title": "Width", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/bria/fibo-edit/erase_by_text/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/erase_by_text/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/fibo-edit/erase_by_text": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditErase_by_textInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/erase_by_text/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditErase_by_textOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/fibo-edit/edit", + "metadata": { + "display_name": "Fibo Edit", + "category": "image-to-image", + "description": "A high-quality editing model that achieves maximum controllability and transparency by combining JSON + Mask + Image.", + "status": "active", + "tags": [ + "bria", + "fibo-edit", + "image-editing", + "json" + ], + "updated_at": "2026-01-26T21:41:30.008Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b2391/TzByxh26B-nDqx9gTZc0b_f58ca08fafa840de8434a2793b9f2d93.jpg", + "model_url": "https://fal.run/bria/fibo-edit/edit", + "license_type": "commercial", + "date": "2026-01-20T11:15:43.270Z", + "group": { + "key": "fibo-edit", + "label": "Edit" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/fibo-edit/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/fibo-edit/edit queue.", + "x-fal-metadata": { + "endpointId": "bria/fibo-edit/edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b2391/TzByxh26B-nDqx9gTZc0b_f58ca08fafa840de8434a2793b9f2d93.jpg", + "playgroundUrl": "https://fal.ai/models/bria/fibo-edit/edit", + "documentationUrl": "https://fal.ai/models/bria/fibo-edit/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FiboEditEditInput": { + "x-fal-order-properties": [ + "image_url", + "mask_url", + "instruction", + "structured_instruction", + "seed", + "steps_num", + "negative_prompt", + "guidance_scale", + "sync_mode" + ], + "type": "object", + "properties": { + "steps_num": { + "description": "Number of inference steps.", + "type": "integer", + "minimum": 20, + "title": "Steps Num", + "maximum": 50, + "default": 50 + }, + "instruction": { + "examples": [ + "change lighting to starlight nighttime" + ], + "description": "Instruction for image editing.", + "title": "Instruction", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a8b07e8/GYKVk2EVivg_MC3jRRZi3_png%20-%202026-01-13T094835.850%20(3).png" + ], + "description": "Reference image (file or URL).", + "title": "Image Url", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "sync_mode": { + "description": "If true, returns the image directly in the response (increases latency).", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "guidance_scale": { + "description": "Guidance scale for text.", + "le": 5, + "ge": 3, + "anyOf": [ + { + "type": "number" + }, + { + "type": "integer" + } + ], + "title": "Guidance Scale", + "default": 5 + }, + "structured_instruction": { + "anyOf": [ + { + "$ref": "#/components/schemas/StructuredInstruction" + }, + { + "type": "null" + } + ], + "description": "The structured prompt to generate an image from." + }, + "mask_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Mask image (file or URL). Optional", + "title": "Mask Url" + }, + "negative_prompt": { + "description": "Negative prompt for image generation.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "seed": { + "description": "Random seed for reproducibility.", + "type": "integer", + "title": "Seed", + "default": 5555 + } + }, + "title": "FiboEditInputModel" + }, + "FiboEditEditOutput": { + "x-fal-order-properties": [ + "image", + "images", + "structured_instruction" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://bria-datasets.s3.us-east-1.amazonaws.com/Liza/ewT7wv-jMgkqs7z7xQNNL_e8707c299d034feab7a64d903118098f.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Generated images.", + "items": { + "$ref": "#/components/schemas/Image" + }, + "default": [] + }, + "image": { + "description": "Generated image.", + "$ref": "#/components/schemas/Image" + }, + "structured_instruction": { + "description": "Current instruction.", + "type": "object", + "title": "Structured Instruction", + "additionalProperties": true + } + }, + "title": "FiboEditOutputModel", + "required": [ + "image", + "structured_instruction" + ] + }, + "StructuredInstruction": { + "x-fal-order-properties": [ + "short_description", + "objects", + "background_setting", + "lighting", + "aesthetics", + "photographic_characteristics", + "style_medium", + "text_render", + "context", + "artistic_style", + "edit_instruction" + ], + "type": "object", + "properties": { + "background_setting": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The background setting of the image to be generated.", + "title": "Background Setting" + }, + "artistic_style": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The artistic style of the image to be generated.", + "title": "Artistic Style" + }, + "style_medium": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The style medium of the image to be generated.", + "title": "Style Medium" + }, + "text_render": { + "anyOf": [ + { + "type": "array", + "items": {} + }, + { + "type": "null" + } + ], + "description": "A list of text to be rendered in the image.", + "title": "Text Render", + "default": [] + }, + "objects": { + "anyOf": [ + { + "type": "array", + "items": { + "$ref": "#/components/schemas/PromptObject" + } + }, + { + "type": "null" + } + ], + "description": "A list of objects in the image to be generated, along with their attributes and relationships to other objects in the image.", + "title": "Objects", + "default": [] + }, + "context": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The context of the image to be generated.", + "title": "Context" + }, + "photographic_characteristics": { + "anyOf": [ + { + "$ref": "#/components/schemas/PhotographicCharacteristics" + }, + { + "type": "null" + } + ], + "description": "The photographic characteristics of the image to be generated." + }, + "aesthetics": { + "anyOf": [ + { + "$ref": "#/components/schemas/Aesthetics" + }, + { + "type": "null" + } + ], + "description": "The aesthetics of the image to be generated." + }, + "lighting": { + "anyOf": [ + { + "$ref": "#/components/schemas/Lighting" + }, + { + "type": "null" + } + ], + "description": "The lighting of the image to be generated." + }, + "short_description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "A short description of the image to be generated.", + "title": "Short Description" + }, + "edit_instruction": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The edit instruction for the image.", + "title": "Edit Instruction" + } + }, + "title": "StructuredInstruction" + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "title": "Height", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "title": "Width", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + }, + "PromptObject": { + "x-fal-order-properties": [ + "description", + "location", + "relationship", + "relative_size", + "shape_and_color", + "texture", + "appearance_details", + "number_of_objects", + "pose", + "expression", + "clothing", + "action", + "gender", + "skin_tone_and_texture", + "orientation" + ], + "type": "object", + "properties": { + "relative_size": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The relative size of the object in the image.", + "title": "Relative Size" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "A description of the object to be generated.", + "title": "Description" + }, + "skin_tone_and_texture": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The skin tone and texture of the object in the image.", + "title": "Skin Tone And Texture" + }, + "appearance_details": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The appearance details of the object.", + "title": "Appearance Details" + }, + "number_of_objects": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of objects in the image.", + "title": "Number Of Objects" + }, + "expression": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The expression of the object in the image.", + "title": "Expression" + }, + "pose": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The pose of the object in the image.", + "title": "Pose" + }, + "shape_and_color": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The shape and color of the object.", + "title": "Shape And Color" + }, + "relationship": { + "description": "The relationship of the object to other objects in the image.", + "type": "string", + "title": "Relationship" + }, + "texture": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The texture of the object.", + "title": "Texture" + }, + "gender": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The gender of the object in the image.", + "title": "Gender" + }, + "clothing": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The clothing of the object in the image.", + "title": "Clothing" + }, + "location": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The location of the object in the image.", + "title": "Location" + }, + "orientation": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The orientation of the object in the image.", + "title": "Orientation" + }, + "action": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The action of the object in the image.", + "title": "Action" + } + }, + "title": "PromptObject", + "required": [ + "relationship" + ] + }, + "PhotographicCharacteristics": { + "x-fal-order-properties": [ + "depth_of_field", + "focus", + "camera_angle", + "lens_focal_length" + ], + "type": "object", + "properties": { + "focus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The focus in the image to be generated.", + "title": "Focus" + }, + "lens_focal_length": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The focal length of the lens in the image to be generated.", + "title": "Lens Focal Length" + }, + "camera_angle": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The angle of the camera in the image to be generated.", + "title": "Camera Angle" + }, + "depth_of_field": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The depth of field in the image to be generated.", + "title": "Depth Of Field" + } + }, + "title": "PhotographicCharacteristics" + }, + "Aesthetics": { + "x-fal-order-properties": [ + "composition", + "color_scheme", + "mood_atmosphere" + ], + "type": "object", + "properties": { + "composition": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The composition of the image to be generated.", + "title": "Composition" + }, + "mood_atmosphere": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mood and atmosphere of the image to be generated.", + "title": "Mood Atmosphere" + }, + "color_scheme": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The color scheme of the image to be generated.", + "title": "Color Scheme" + } + }, + "title": "Aesthetics" + }, + "Lighting": { + "x-fal-order-properties": [ + "conditions", + "direction", + "shadows" + ], + "type": "object", + "properties": { + "shadows": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The shadows in the image to be generated.", + "title": "Shadows" + }, + "conditions": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The conditions of the lighting in the image to be generated.", + "title": "Conditions" + }, + "direction": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The direction of the lighting in the image to be generated.", + "title": "Direction" + } + }, + "title": "Lighting" + } + } + }, + "paths": { + "/bria/fibo-edit/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/fibo-edit/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/fibo-edit/add_object_by_text", + "metadata": { + "display_name": "Fibo Edit [Add Object by Text]", + "category": "image-to-image", + "description": "Precise, context-aware insertion of new objects into an existing image using simple, structured spatial commands.", + "status": "active", + "tags": [ + "bria", + "fibo-edit", + "object-addition", + "json" + ], + "updated_at": "2026-01-26T21:41:30.133Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b236f/uXgjCOumhqoBoOk1AZ91X_ed4e0c8f0cfb46acb1bdb5e2b66a2a58.jpg", + "model_url": "https://fal.run/bria/fibo-edit/add_object_by_text", + "license_type": "commercial", + "date": "2026-01-20T11:11:21.771Z", + "group": { + "key": "fibo-edit", + "label": "Add Object by Text" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/fibo-edit/add_object_by_text", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/fibo-edit/add_object_by_text queue.", + "x-fal-metadata": { + "endpointId": "bria/fibo-edit/add_object_by_text", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b236f/uXgjCOumhqoBoOk1AZ91X_ed4e0c8f0cfb46acb1bdb5e2b66a2a58.jpg", + "playgroundUrl": "https://fal.ai/models/bria/fibo-edit/add_object_by_text", + "documentationUrl": "https://fal.ai/models/bria/fibo-edit/add_object_by_text/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FiboEditAdd_object_by_textInput": { + "x-fal-order-properties": [ + "image_url", + "instruction" + ], + "type": "object", + "properties": { + "instruction": { + "examples": [ + "Place a red vase with flowers on the table." + ], + "description": "The full natural language command describing what to add and where.", + "type": "string", + "title": "Instruction" + }, + "image_url": { + "examples": [ + "https://bria-datasets.s3.us-east-1.amazonaws.com/Liza/an_empty_table_in_living_room.png" + ], + "description": "The source image.", + "type": "string", + "title": "Image Url" + } + }, + "title": "AddObjectByTextInput", + "required": [ + "image_url", + "instruction" + ] + }, + "FiboEditAdd_object_by_textOutput": { + "x-fal-order-properties": [ + "image", + "images", + "structured_instruction" + ], + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "Generated images.", + "items": { + "$ref": "#/components/schemas/Image" + }, + "default": [] + }, + "image": { + "description": "Generated image.", + "$ref": "#/components/schemas/Image" + }, + "structured_instruction": { + "description": "Current instruction.", + "type": "object", + "title": "Structured Instruction", + "additionalProperties": true + } + }, + "title": "FiboEditExtraEPOutputModel", + "required": [ + "image", + "structured_instruction" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "title": "Height", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "title": "Width", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/bria/fibo-edit/add_object_by_text/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/add_object_by_text/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/fibo-edit/add_object_by_text": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditAdd_object_by_textInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/add_object_by_text/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditAdd_object_by_textOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/fibo-edit/blend", + "metadata": { + "display_name": "Fibo Edit [Blend]", + "category": "image-to-image", + "description": "Complex, multi-step visual composition through natural language.", + "status": "active", + "tags": [ + "bria", + "fibo-edit", + "blend", + "json" + ], + "updated_at": "2026-01-26T21:41:30.280Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b230a/PtoYXkoWgYDMAFJqgvf9S_a050e81fc32d471893dd30080051ac7f.jpg", + "model_url": "https://fal.run/bria/fibo-edit/blend", + "license_type": "commercial", + "date": "2026-01-20T10:52:43.996Z", + "group": { + "key": "fibo-edit", + "label": "Blend" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/fibo-edit/blend", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/fibo-edit/blend queue.", + "x-fal-metadata": { + "endpointId": "bria/fibo-edit/blend", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b230a/PtoYXkoWgYDMAFJqgvf9S_a050e81fc32d471893dd30080051ac7f.jpg", + "playgroundUrl": "https://fal.ai/models/bria/fibo-edit/blend", + "documentationUrl": "https://fal.ai/models/bria/fibo-edit/blend/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FiboEditBlendInput": { + "x-fal-order-properties": [ + "image_url", + "instruction" + ], + "type": "object", + "properties": { + "instruction": { + "examples": [ + "Place the art on the shirt, keep the art exactly the same" + ], + "description": "Instruct what elements you would like to blend in your image.", + "type": "string", + "title": "Instruction" + }, + "image_url": { + "examples": [ + "https://bria-datasets.s3.us-east-1.amazonaws.com/Liza/shirt.png" + ], + "description": "The source image.", + "type": "string", + "title": "Image Url" + } + }, + "title": "BlendingInput", + "required": [ + "image_url", + "instruction" + ] + }, + "FiboEditBlendOutput": { + "x-fal-order-properties": [ + "image", + "images", + "structured_instruction" + ], + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "Generated images.", + "items": { + "$ref": "#/components/schemas/Image" + }, + "default": [] + }, + "image": { + "description": "Generated image.", + "$ref": "#/components/schemas/Image" + }, + "structured_instruction": { + "description": "Current instruction.", + "type": "object", + "title": "Structured Instruction", + "additionalProperties": true + } + }, + "title": "FiboEditExtraEPOutputModel", + "required": [ + "image", + "structured_instruction" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "title": "Height", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "title": "Width", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/bria/fibo-edit/blend/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/blend/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/fibo-edit/blend": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditBlendInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/blend/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditBlendOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/fibo-edit/colorize", + "metadata": { + "display_name": "Fibo Edit [Colorize]", + "category": "image-to-image", + "description": "Transforms the color treatment of images using predefined, style-based commands", + "status": "active", + "tags": [ + "bria", + "fibo-edit", + "color" + ], + "updated_at": "2026-01-26T21:41:30.409Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b22ac/WkSYi-IgNLEv3uRGvcvAc_13b3ad6702bb451086ff8971fb1b302f.jpg", + "model_url": "https://fal.run/bria/fibo-edit/colorize", + "license_type": "commercial", + "date": "2026-01-20T10:37:07.394Z", + "group": { + "key": "fibo-edit", + "label": "Colorize" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/fibo-edit/colorize", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/fibo-edit/colorize queue.", + "x-fal-metadata": { + "endpointId": "bria/fibo-edit/colorize", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b22ac/WkSYi-IgNLEv3uRGvcvAc_13b3ad6702bb451086ff8971fb1b302f.jpg", + "playgroundUrl": "https://fal.ai/models/bria/fibo-edit/colorize", + "documentationUrl": "https://fal.ai/models/bria/fibo-edit/colorize/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FiboEditColorizeInput": { + "x-fal-order-properties": [ + "image_url", + "color" + ], + "type": "object", + "properties": { + "color": { + "enum": [ + "contemporary color", + "vivid color", + "black and white colors", + "sepia vintage" + ], + "description": "Select the color palette or aesthetic for the output image", + "type": "string", + "title": "Color", + "examples": [ + "contemporary color" + ] + }, + "image_url": { + "examples": [ + "https://bria-datasets.s3.us-east-1.amazonaws.com/Liza/png+-+2026-01-13T083840.113.png" + ], + "description": "The source image.", + "type": "string", + "title": "Image Url" + } + }, + "title": "ColorizeInput", + "required": [ + "image_url", + "color" + ] + }, + "FiboEditColorizeOutput": { + "x-fal-order-properties": [ + "image", + "images", + "structured_instruction" + ], + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "Generated images.", + "items": { + "$ref": "#/components/schemas/Image" + }, + "default": [] + }, + "image": { + "description": "Generated image.", + "$ref": "#/components/schemas/Image" + }, + "structured_instruction": { + "description": "Current instruction.", + "type": "object", + "title": "Structured Instruction", + "additionalProperties": true + } + }, + "title": "FiboEditExtraEPOutputModel", + "required": [ + "image", + "structured_instruction" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "title": "Height", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "title": "Width", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/bria/fibo-edit/colorize/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/colorize/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/fibo-edit/colorize": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditColorizeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/colorize/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditColorizeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2/klein/9b/base/edit/lora", + "metadata": { + "display_name": "Flux 2 [klein] 9B Base Lora", + "category": "image-to-image", + "description": "Image-to-image editing with LoRA support for FLUX.2 [klein] 9B Base from Black Forest Labs. Specialized style transfer and domain-specific modifications.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:30.930Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b09b1/1lRmcLX6NOxTZp285RGPN_85a165d7a3cd4fbfba4c6302d341f0aa.jpg", + "model_url": "https://fal.run/fal-ai/flux-2/klein/9b/base/edit/lora", + "license_type": "commercial", + "date": "2026-01-19T16:36:11.015Z", + "group": { + "key": "flux-2-klein-lora", + "label": "9B Base Image to Image (LoRa)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/flux-2-klein-9b-base-trainer/edit" + ], + "inference_endpoint_ids": [ + "fal-ai/flux-2-klein-9b-base-trainer/edit" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2/klein/9b/base/edit/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2/klein/9b/base/edit/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2/klein/9b/base/edit/lora", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b09b1/1lRmcLX6NOxTZp285RGPN_85a165d7a3cd4fbfba4c6302d341f0aa.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2/klein/9b/base/edit/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2/klein/9b/base/edit/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2Klein9bBaseEditLoraInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "guidance_scale", + "seed", + "num_inference_steps", + "image_size", + "num_images", + "acceleration", + "sync_mode", + "enable_safety_checker", + "output_format", + "image_urls", + "loras" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Change his clothes to casual suit and tie" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to edit the image." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Number of Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "examples": [ + { + "height": 1152, + "width": 2016 + } + ], + "title": "Image Size", + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image. If not provided, uses the input image size." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use for image generation.", + "default": "regular" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "Guidance scale for classifier-free guidance.", + "default": 5 + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "List of LoRA weights to apply (maximum 3).", + "items": { + "$ref": "#/components/schemas/fal-ai_flux-2-klein_LoRAInput" + }, + "default": [] + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI. Output is not stored when this is True.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 4, + "maximum": 50, + "type": "integer", + "title": "Number of Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/example_inputs/flux2_dev_edit_input.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URLs of the images for editing. A maximum of 4 images are allowed.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for classifier-free guidance. Describes what to avoid in the image.", + "default": "" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for the generation. If not provided, a random seed will be used." + } + }, + "title": "KleinBaseEditLoRAInput", + "required": [ + "prompt", + "image_urls" + ] + }, + "Flux2Klein9bBaseEditLoraOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "title": "KleinT2IOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "fal-ai_flux-2-klein_LoRAInput": { + "x-fal-order-properties": [ + "path", + "scale" + ], + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL, HuggingFace repo ID (owner/repo), or local path to LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "Scale factor for LoRA application (0.0 to 4.0).", + "default": 1 + } + }, + "title": "LoRAInput", + "required": [ + "path" + ] + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2/klein/9b/base/edit/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/9b/base/edit/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/9b/base/edit/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein9bBaseEditLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/9b/base/edit/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein9bBaseEditLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2/klein/4b/base/edit/lora", + "metadata": { + "display_name": "Flux 2 [klein] 4B Base Lora", + "category": "image-to-image", + "description": "Image-to-image editing with LoRA support for FLUX.2 [klein] 4B Base from Black Forest Labs. Specialized style transfer and domain-specific modifications.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:31.185Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b09b3/ck_nRVKlUom4-4_5qfG7t_117a3ccf9f9541aeb83e5ffa75564e6d.jpg", + "model_url": "https://fal.run/fal-ai/flux-2/klein/4b/base/edit/lora", + "license_type": "commercial", + "date": "2026-01-19T16:30:59.857Z", + "group": { + "key": "flux-2-klein-lora", + "label": "4B Base Image to Image (LoRa)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/flux-2-klein-4b-base-trainer/edit" + ], + "inference_endpoint_ids": [ + "fal-ai/flux-2-klein-4b-base-trainer/edit" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2/klein/4b/base/edit/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2/klein/4b/base/edit/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2/klein/4b/base/edit/lora", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b09b3/ck_nRVKlUom4-4_5qfG7t_117a3ccf9f9541aeb83e5ffa75564e6d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2/klein/4b/base/edit/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2/klein/4b/base/edit/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2Klein4bBaseEditLoraInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "guidance_scale", + "seed", + "num_inference_steps", + "image_size", + "num_images", + "acceleration", + "sync_mode", + "enable_safety_checker", + "output_format", + "image_urls", + "loras" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Change his clothes to casual suit and tie" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to edit the image." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Number of Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "examples": [ + { + "height": 1152, + "width": 2016 + } + ], + "title": "Image Size", + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image. If not provided, uses the input image size." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use for image generation.", + "default": "regular" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "Guidance scale for classifier-free guidance.", + "default": 5 + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "List of LoRA weights to apply (maximum 3).", + "items": { + "$ref": "#/components/schemas/fal-ai_flux-2-klein_LoRAInput" + }, + "default": [] + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI. Output is not stored when this is True.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 4, + "maximum": 50, + "type": "integer", + "title": "Number of Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/example_inputs/flux2_dev_edit_input.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URLs of the images for editing. A maximum of 4 images are allowed.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for classifier-free guidance. Describes what to avoid in the image.", + "default": "" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for the generation. If not provided, a random seed will be used." + } + }, + "title": "KleinBaseEditLoRAInput", + "required": [ + "prompt", + "image_urls" + ] + }, + "Flux2Klein4bBaseEditLoraOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "title": "KleinT2IOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "fal-ai_flux-2-klein_LoRAInput": { + "x-fal-order-properties": [ + "path", + "scale" + ], + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL, HuggingFace repo ID (owner/repo), or local path to LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "Scale factor for LoRA application (0.0 to 4.0).", + "default": 1 + } + }, + "title": "LoRAInput", + "required": [ + "path" + ] + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2/klein/4b/base/edit/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/4b/base/edit/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/4b/base/edit/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein4bBaseEditLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/4b/base/edit/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein4bBaseEditLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2/klein/4b/base/edit", + "metadata": { + "display_name": "Flux 2 [klein] 4B Base", + "category": "image-to-image", + "description": "Image-to-image editing with Flux 2 [klein] 4B Base from Black Forest Labs. Precise modifications using natural language descriptions and hex color control.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:33.062Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8a7f49/nKsGN6UMAi6IjaYdkmILC_e20d2097bb984ad589518cf915fe54b4.jpg", + "model_url": "https://fal.run/fal-ai/flux-2/klein/4b/base/edit", + "license_type": "commercial", + "date": "2026-01-15T20:55:54.156Z", + "group": { + "key": "klein", + "label": "4B Base Image Edit" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2/klein/4b/base/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2/klein/4b/base/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2/klein/4b/base/edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8a7f49/nKsGN6UMAi6IjaYdkmILC_e20d2097bb984ad589518cf915fe54b4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2/klein/4b/base/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2/klein/4b/base/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2Klein4bBaseEditInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "guidance_scale", + "seed", + "num_inference_steps", + "image_size", + "num_images", + "acceleration", + "sync_mode", + "enable_safety_checker", + "output_format", + "image_urls" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Imagine view of Fuji mount. Use style of reference image." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to edit the image." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Number of Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "examples": [ + { + "height": 1152, + "width": 2016 + } + ], + "title": "Image Size", + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image. If not provided, uses the input image size." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use for image generation.", + "default": "regular" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "Guidance scale for classifier-free guidance.", + "default": 5 + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI. Output is not stored when this is True.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 4, + "maximum": 50, + "type": "integer", + "title": "Number of Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/0a8a69fd/VUrxIXgOqcf3L7kuGS7B5_eb54fcdc-87d0-47da-ad31-93455b245fb4.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URLs of the images for editing. A maximum of 4 images are allowed.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for classifier-free guidance. Describes what to avoid in the image.", + "default": "" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for the generation. If not provided, a random seed will be used." + } + }, + "title": "Klein4BBaseEditInput", + "required": [ + "prompt", + "image_urls" + ] + }, + "Flux2Klein4bBaseEditOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/0a8a69ff/UYukVfGjybLo7spA_Kc-i.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The edited images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "title": "Klein4BBaseEditOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2/klein/4b/base/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/4b/base/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/4b/base/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein4bBaseEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/4b/base/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein4bBaseEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2/klein/9b/base/edit", + "metadata": { + "display_name": "Flux 2 [klein] 9B Base", + "category": "image-to-image", + "description": "Image-to-image editing with Flux 2 [klein] 9B Base from Black Forest Labs. Precise modifications using natural language descriptions and hex color control.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:33.312Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8a7f50/X8ffS5h55gcigsNZoNC7O_52e6b383ac214d2abe0a2e023f03de88.jpg", + "model_url": "https://fal.run/fal-ai/flux-2/klein/9b/base/edit", + "license_type": "commercial", + "date": "2026-01-15T20:47:31.793Z", + "group": { + "key": "klein", + "label": "9B Base Image Edit" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2/klein/9b/base/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2/klein/9b/base/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2/klein/9b/base/edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8a7f50/X8ffS5h55gcigsNZoNC7O_52e6b383ac214d2abe0a2e023f03de88.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2/klein/9b/base/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2/klein/9b/base/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2Klein9bBaseEditInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "guidance_scale", + "seed", + "num_inference_steps", + "image_size", + "num_images", + "acceleration", + "sync_mode", + "enable_safety_checker", + "output_format", + "image_urls" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Imagine a young woman. Use the Style from the Reference Image." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to edit the image." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Number of Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "examples": [ + { + "height": 1152, + "width": 2016 + } + ], + "title": "Image Size", + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image. If not provided, uses the input image size." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use for image generation.", + "default": "regular" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "Guidance scale for classifier-free guidance.", + "default": 5 + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI. Output is not stored when this is True.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 4, + "maximum": 50, + "type": "integer", + "title": "Number of Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/0a8a69f0/DifnFRQjCHQ5nUxJl0tQK_d456be65-0a70-417c-991d-531be0b58993.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URLs of the images for editing. A maximum of 4 images are allowed.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for classifier-free guidance. Describes what to avoid in the image.", + "default": "" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for the generation. If not provided, a random seed will be used." + } + }, + "title": "Klein9BEditImageInput", + "required": [ + "prompt", + "image_urls" + ] + }, + "Flux2Klein9bBaseEditOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/0a8a69f1/HpSn20CQnEgVbFRD5E-Eh.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The edited images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "title": "Klein9BBaseEditOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2/klein/9b/base/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/9b/base/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/9b/base/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein9bBaseEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/9b/base/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein9bBaseEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2/klein/4b/edit", + "metadata": { + "display_name": "Flux 2 [klein] 4B", + "category": "image-to-image", + "description": "Image-to-image editing with Flux 2 [klein] 4B from Black Forest Labs. Precise modifications using natural language descriptions and hex color control.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:33.776Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8a7f40/-9rbLPCsz36IFb-4t3J2L_76750002c0db4ce899b77e98321ffe30.jpg", + "model_url": "https://fal.run/fal-ai/flux-2/klein/4b/edit", + "license_type": "commercial", + "date": "2026-01-15T12:26:00.554Z", + "group": { + "key": "klein", + "label": "4B Image Edit" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2/klein/4b/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2/klein/4b/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2/klein/4b/edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8a7f40/-9rbLPCsz36IFb-4t3J2L_76750002c0db4ce899b77e98321ffe30.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2/klein/4b/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2/klein/4b/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2Klein4bEditInput": { + "x-fal-order-properties": [ + "prompt", + "seed", + "num_inference_steps", + "image_size", + "num_images", + "sync_mode", + "enable_safety_checker", + "output_format", + "image_urls" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Turn this into a realistic image" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to edit the image." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Number of Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "examples": [ + { + "height": 1152, + "width": 2016 + } + ], + "title": "Image Size", + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image. If not provided, uses the input image size." + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI. Output is not stored when this is True.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 4, + "maximum": 8, + "type": "integer", + "title": "Number of Inference Steps", + "description": "The number of inference steps to perform.", + "default": 4 + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/0a8a69d5/kkXxFfj1QeVtw35kxy5Py_1a7e3511-bd2c-46be-923a-8e6be2496f12.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URLs of the images for editing. A maximum of 4 images are allowed.", + "items": { + "type": "string" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for the generation. If not provided, a random seed will be used." + } + }, + "title": "KleinDistilledEditInput", + "required": [ + "prompt", + "image_urls" + ] + }, + "Flux2Klein4bEditOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/0a8a69d6/M73KvDgfEgIM77t4mFsS2.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The edited images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "title": "Klein4BDistilledEditOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2/klein/4b/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/4b/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/4b/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein4bEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/4b/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein4bEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2/klein/9b/edit", + "metadata": { + "display_name": "Flux 2 [klein] 9B", + "category": "image-to-image", + "description": "Image-to-image editing with Flux 2 [klein] 9B from Black Forest Labs. Precise modifications using natural language descriptions and hex color control.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:33.915Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8a7f50/X8ffS5h55gcigsNZoNC7O_52e6b383ac214d2abe0a2e023f03de88.jpg", + "model_url": "https://fal.run/fal-ai/flux-2/klein/9b/edit", + "license_type": "commercial", + "date": "2026-01-15T12:25:09.939Z", + "group": { + "key": "klein", + "label": "9B Image Edit" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2/klein/9b/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2/klein/9b/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2/klein/9b/edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8a7f50/X8ffS5h55gcigsNZoNC7O_52e6b383ac214d2abe0a2e023f03de88.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2/klein/9b/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2/klein/9b/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2Klein9bEditInput": { + "x-fal-order-properties": [ + "prompt", + "seed", + "num_inference_steps", + "image_size", + "num_images", + "sync_mode", + "enable_safety_checker", + "output_format", + "image_urls" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Show me a full body image" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to edit the image." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Number of Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "examples": [ + { + "height": 1152, + "width": 2016 + } + ], + "title": "Image Size", + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image. If not provided, uses the input image size." + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI. Output is not stored when this is True.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 4, + "maximum": 8, + "type": "integer", + "title": "Number of Inference Steps", + "description": "The number of inference steps to perform.", + "default": 4 + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/0a8a69d5/kkXxFfj1QeVtw35kxy5Py_1a7e3511-bd2c-46be-923a-8e6be2496f12.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URLs of the images for editing. A maximum of 4 images are allowed.", + "items": { + "type": "string" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for the generation. If not provided, a random seed will be used." + } + }, + "title": "Klein9BDistilledEditInput", + "required": [ + "prompt", + "image_urls" + ] + }, + "Flux2Klein9bEditOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/0a8b8b22/zxpzgthoJaMfiLfSqjTwX.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The edited images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "title": "Klein9BDistilledEditOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2/klein/9b/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/9b/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/9b/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein9bEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/9b/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein9bEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/glm-image/image-to-image", + "metadata": { + "display_name": "Glm Image", + "category": "image-to-image", + "description": "Create high-quality images with accurate text rendering and rich knowledge details—supports editing, style transfer, and maintaining consistent characters across multiple images.", + "status": "active", + "tags": [ + "image-to-image" + ], + "updated_at": "2026-01-26T21:41:36.976Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8a4d0f/umwWSuRRBSyt9U_bdGB9i_2de7b25b3997468f8166d372a12dff93.jpg", + "model_url": "https://fal.run/fal-ai/glm-image/image-to-image", + "license_type": "commercial", + "date": "2026-01-14T02:44:07.704Z", + "group": { + "key": "glm-image", + "label": "Image Editing" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/glm-image/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/glm-image/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/glm-image/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8a4d0f/umwWSuRRBSyt9U_bdGB9i_2de7b25b3997468f8166d372a12dff93.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/glm-image/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/glm-image/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "GlmImageImageToImageInput": { + "title": "GlmImageToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Make the dress red." + ], + "description": "Text prompt for image generation.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "Number of images to generate.", + "type": "integer", + "maximum": 4, + "title": "Num Images", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9", + "portrait_3_2", + "landscape_3_2", + "portrait_hd", + "landscape_hd" + ], + "type": "string" + } + ], + "description": "Output image size.", + "title": "Image Size", + "default": "square_hd" + }, + "enable_safety_checker": { + "description": "Enable NSFW safety checking on the generated images.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "Output image format.", + "default": "jpeg" + }, + "sync_mode": { + "description": "If True, the image will be returned as a base64 data URI instead of a URL.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "guidance_scale": { + "minimum": 1, + "description": "Classifier-free guidance scale. Higher values make the model follow the prompt more closely.", + "type": "number", + "maximum": 10, + "title": "Guidance Scale", + "default": 1.5 + }, + "seed": { + "description": "Random seed for reproducibility. The same seed with the same prompt will produce the same image.", + "type": "integer", + "title": "Seed" + }, + "image_urls": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/catwalk.png" + ], + "description": "URL(s) of the condition image(s) for image-to-image generation. Supports up to 4 URLs for multi-image references.", + "type": "array", + "title": "Image Urls", + "items": { + "type": "string" + } + }, + "enable_prompt_expansion": { + "description": "If True, the prompt will be enhanced using an LLM for more detailed and higher quality results.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "num_inference_steps": { + "minimum": 10, + "description": "Number of diffusion denoising steps. More steps generally produce higher quality images.", + "type": "integer", + "maximum": 100, + "title": "Num Inference Steps", + "default": 30 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "guidance_scale", + "seed", + "num_images", + "enable_safety_checker", + "output_format", + "sync_mode", + "enable_prompt_expansion", + "image_urls" + ], + "required": [ + "prompt", + "image_urls" + ] + }, + "GlmImageImageToImageOutput": { + "title": "GlmImageToImageOutput", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "height": 1536, + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/catwalk_red.png", + "width": 1024 + } + ] + ], + "description": "List of URLs to the generated images.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/glm-image/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/glm-image/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/glm-image/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GlmImageImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/glm-image/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GlmImageImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-2511-multiple-angles", + "metadata": { + "display_name": "Qwen Image Edit 2511 Multiple Angles", + "category": "image-to-image", + "description": "Generates same scene from different angles (azimuth/elevation) with Qwen image Edit 2511 and the Lora Multiple Angles", + "status": "active", + "tags": [ + "stylized", + "transform", + "lora", + "multi-angles", + "multiples", + "angles" + ], + "updated_at": "2026-01-26T21:41:41.027Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a89744e/euqxNQk3eIDDL3GDS_pkZ.png", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-2511-multiple-angles", + "license_type": "commercial", + "date": "2026-01-07T16:36:31.062Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-2511-multiple-angles", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-2511-multiple-angles queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-2511-multiple-angles", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a89744e/euqxNQk3eIDDL3GDS_pkZ.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2511-multiple-angles", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2511-multiple-angles/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEdit2511MultipleAnglesInput": { + "x-fal-order-properties": [ + "image_urls", + "horizontal_angle", + "vertical_angle", + "zoom", + "additional_prompt", + "lora_scale", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "negative_prompt", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images" + ], + "type": "object", + "properties": { + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation.", + "default": "regular" + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image. If not provided, the size of the input image will be used.", + "title": "Image Size" + }, + "horizontal_angle": { + "description": "Horizontal rotation angle around the object in degrees. 0°=front view, 90°=right side, 180°=back view, 270°=left side, 360°=front view again.", + "type": "number", + "minimum": 0, + "title": "Horizontal Angle (Azimuth °)", + "maximum": 360, + "default": 0 + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "description": "The CFG (Classifier Free Guidance) scale.", + "maximum": 20, + "default": 4.5 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/0a8973cb/qUbVwDCcMlvX4drBGYB1H.png" + ] + ], + "description": "The URL of the image to adjust camera angle for.", + "type": "array", + "title": "Image URLs", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": "" + }, + "zoom": { + "description": "Camera zoom/distance. 0=wide shot (far away), 5=medium shot (normal), 10=close-up (very close).", + "type": "number", + "minimum": 0, + "title": "Zoom (Distance)", + "maximum": 10, + "default": 5 + }, + "vertical_angle": { + "description": "Vertical camera angle in degrees. -30°=low-angle shot (looking up), 0°=eye-level, 30°=elevated, 60°=high-angle, 90°=bird's-eye view (looking down).", + "type": "number", + "minimum": -30, + "title": "Vertical Angle (Elevation °)", + "maximum": 90, + "default": 0 + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "description": "Number of images to generate", + "maximum": 4, + "default": 1 + }, + "lora_scale": { + "minimum": 0, + "title": "Lora Scale", + "type": "number", + "description": "The scale factor for the LoRA model. Controls the strength of the camera control effect.", + "maximum": 4, + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "additional_prompt": { + "description": "Additional text to append to the automatically generated prompt.", + "type": "string", + "title": "Additional Prompt" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI.", + "default": false + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "description": "The number of inference steps to perform.", + "maximum": 50, + "default": 28 + }, + "seed": { + "description": "Random seed for reproducibility.", + "type": "integer", + "title": "Seed" + } + }, + "description": "Input model for Multiple Angles endpoint - Camera control with precise adjustments using trigger word.\nPrompt is built automatically from slider values.", + "title": "MultipleAnglesInput", + "required": [ + "image_urls" + ] + }, + "QwenImageEdit2511MultipleAnglesOutput": { + "x-fal-order-properties": [ + "images", + "seed", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "description": "The constructed prompt used for generation", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/0a8973d9/8Z0xxKdGnoJAWc2tKJ68f.png" + } + ] + ], + "description": "The generated/edited images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "description": "The seed used for generation", + "type": "integer", + "title": "Seed" + } + }, + "description": "Output model for Multiple Angles endpoint", + "title": "MultipleAnglesOutput", + "required": [ + "images", + "seed", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-2511-multiple-angles/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2511-multiple-angles/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2511-multiple-angles": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2511MultipleAnglesInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2511-multiple-angles/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2511MultipleAnglesOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-2511/lora", + "metadata": { + "display_name": "Qwen Image Edit 2511", + "category": "image-to-image", + "description": "Endpoint for Qwen's Image Editing 2511 model with LoRa support.", + "status": "active", + "tags": [ + "stylized", + "transform", + "lora" + ], + "updated_at": "2026-01-26T21:41:43.845Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a887017/5CHxB7UP_2DDDrYwUMY1P_758632ead2224926b213655a720357b0.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-2511/lora", + "license_type": "commercial", + "date": "2025-12-30T22:32:05.672Z", + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/qwen-image-edit-2511-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/qwen-image-edit-2511-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-2511/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-2511/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-2511/lora", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a887017/5CHxB7UP_2DDDrYwUMY1P_758632ead2224926b213655a720357b0.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2511/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2511/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEdit2511LoraInput": { + "title": "EditImageLoraInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Change angle to front view" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to edit the image with." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If None, uses the input image dimensions." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "default": "regular" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI.", + "default": false + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "The LoRAs to use for the image generation. You can use up to 3 LoRAs and they will be merged together to generate the final image.", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [] + }, + "guidance_scale": { + "minimum": 1, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The guidance scale to use for the image generation.", + "default": 4.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/0a877afe/karyVuQ62j0V6ErYzyW-w_image_6.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URLs of the images to edit.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate an image from.", + "default": "" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_size", + "image_urls", + "num_inference_steps", + "guidance_scale", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration", + "loras" + ], + "required": [ + "prompt", + "image_urls" + ] + }, + "QwenImageEdit2511LoraOutput": { + "title": "ImageToImageOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/0a877afe/InJJA0Q1gtQnyK1N3wdg5.png", + "width": 1376 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-2511/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2511/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2511/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2511LoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2511/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2511LoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "half-moon-ai/ai-home/style", + "metadata": { + "display_name": "Ai Home", + "category": "image-to-image", + "description": "AI Home Style reimagines your home interior and exterior design with bold, prompt-driven concepts ", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:41:45.109Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8861e4/rHJfaPubY6LKe8f5JGxQC_7e40d22fe5dc47f996f89ce149fa57c1.jpg", + "model_url": "https://fal.run/half-moon-ai/ai-home/style", + "license_type": "commercial", + "date": "2025-12-30T13:26:41.678Z", + "group": { + "key": "Halfmoon-AI-Home", + "label": "Style" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for half-moon-ai/ai-home/style", + "version": "1.0.0", + "description": "The OpenAPI schema for the half-moon-ai/ai-home/style queue.", + "x-fal-metadata": { + "endpointId": "half-moon-ai/ai-home/style", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8861e4/rHJfaPubY6LKe8f5JGxQC_7e40d22fe5dc47f996f89ce149fa57c1.jpg", + "playgroundUrl": "https://fal.ai/models/half-moon-ai/ai-home/style", + "documentationUrl": "https://fal.ai/models/half-moon-ai/ai-home/style/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AiHomeStyleInput": { + "x-fal-order-properties": [ + "input_image_url", + "style_image_url", + "architecture_type", + "style", + "color_palette", + "additional_elements", + "enhanced_rendering", + "input_image_strength", + "custom_prompt", + "output_format" + ], + "type": "object", + "properties": { + "input_image_url": { + "examples": [ + "https://v3.fal.media/files/kangaroo/BLwbXwxQI_MNwUF-P6ITl_zen_living_room_input.jpg" + ], + "maxLength": 512, + "type": "string", + "description": "URL of the image to do architectural styling", + "title": "Input Image Url" + }, + "input_image_strength": { + "description": "Strength of the input image", + "type": "number", + "minimum": 0, + "title": "Input Image Strength", + "maximum": 1, + "default": 0.85 + }, + "additional_elements": { + "anyOf": [ + { + "maxLength": 200, + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Additional elements to include in the options above (e.g., plants, lighting)", + "title": "Additional Elements", + "default": "" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image. Choose from: 'jpeg' or 'png'.", + "type": "string", + "title": "Output Format", + "default": "jpeg" + }, + "style": { + "examples": [ + "rustic-interior" + ], + "description": "Style for furniture and decor", + "type": "string", + "enum": [ + "minimalistic-interior", + "farmhouse-interior", + "luxury-interior", + "modern-interior", + "zen-interior", + "mid century-interior", + "airbnb-interior", + "cozy-interior", + "rustic-interior", + "christmas-interior", + "bohemian-interior", + "tropical-interior", + "industrial-interior", + "japanese-interior", + "vintage-interior", + "loft-interior", + "halloween-interior", + "soho-interior", + "baroque-interior", + "kids room-interior", + "girls room-interior", + "boys room-interior", + "scandinavian-interior", + "french country-interior", + "mediterranean-interior", + "cyberpunk-interior", + "hot pink-interior", + "biophilic-interior", + "ancient egypt-interior", + "pixel-interior", + "art deco-interior", + "modern-exterior", + "minimalistic-exterior", + "farmhouse-exterior", + "cozy-exterior", + "luxury-exterior", + "colonial-exterior", + "zen-exterior", + "asian-exterior", + "creepy-exterior", + "airstone-exterior", + "ancient greek-exterior", + "art deco-exterior", + "brutalist-exterior", + "christmas lights-exterior", + "contemporary-exterior", + "cottage-exterior", + "dutch colonial-exterior", + "federal colonial-exterior", + "fire-exterior", + "french provincial-exterior", + "full glass-exterior", + "georgian colonial-exterior", + "gothic-exterior", + "greek revival-exterior", + "ice-exterior", + "italianate-exterior", + "mediterranean-exterior", + "midcentury-exterior", + "middle eastern-exterior", + "minecraft-exterior", + "morocco-exterior", + "neoclassical-exterior", + "spanish-exterior", + "tudor-exterior", + "underwater-exterior", + "winter-exterior", + "yard lighting-exterior" + ], + "title": "Style" + }, + "architecture_type": { + "examples": [ + "living room-interior" + ], + "description": "Type of architecture for appropriate furniture selection", + "type": "string", + "enum": [ + "living room-interior", + "bedroom-interior", + "kitchen-interior", + "dining room-interior", + "bathroom-interior", + "laundry room-interior", + "home office-interior", + "study room-interior", + "dorm room-interior", + "coffee shop-interior", + "gaming room-interior", + "restaurant-interior", + "office-interior", + "attic-interior", + "toilet-interior", + "other-interior", + "house-exterior", + "villa-exterior", + "backyard-exterior", + "courtyard-exterior", + "ranch-exterior", + "office-exterior", + "retail-exterior", + "tower-exterior", + "apartment-exterior", + "school-exterior", + "museum-exterior", + "commercial-exterior", + "residential-exterior", + "other-exterior" + ], + "title": "Architecture Type" + }, + "color_palette": { + "examples": [ + "golden beige" + ], + "description": "Color palette for furniture and decor", + "type": "string", + "enum": [ + "surprise me", + "golden beige", + "refined blues", + "dusky elegance", + "emerald charm", + "crimson luxury", + "golden sapphire", + "soft pastures", + "candy sky", + "peach meadow", + "muted sands", + "ocean breeze", + "frosted pastels", + "spring bloom", + "gentle horizon", + "seaside breeze", + "azure coast", + "golden shore", + "mediterranean gem", + "ocean serenity", + "serene blush", + "muted horizon", + "pastel shores", + "dusky calm", + "woodland retreat", + "meadow glow", + "forest canopy", + "riverbank calm", + "earthy tones", + "earthy neutrals", + "arctic mist", + "aqua drift", + "blush bloom", + "coral haze", + "retro rust", + "autumn glow", + "rustic charm", + "vintage sage", + "faded plum", + "electric lime", + "violet pulse", + "neon sorbet", + "aqua glow", + "fluorescent sunset", + "lavender bloom", + "petal fresh", + "meadow light", + "sunny pastures", + "frosted mauve", + "snowy hearth", + "icy blues", + "winter twilight", + "earthy hues", + "stone balance", + "neutral sands", + "slate shades" + ], + "title": "Color Palette" + }, + "style_image_url": { + "anyOf": [ + { + "maxLength": 512, + "type": "string" + }, + { + "type": "null" + } + ], + "description": "URL of the style image, optional. If given, other parameters are ignored", + "title": "Style Image Url", + "default": "" + }, + "custom_prompt": { + "examples": [ + "" + ], + "maxLength": 300, + "type": "string", + "description": "Custom prompt for architectural editing, it overrides above options when used", + "title": "Custom Prompt", + "default": "" + }, + "enhanced_rendering": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "description": "It gives better rendering quality with more processing time, additional cost is 0.01 USD per image", + "title": "Enhanced Rendering", + "default": false + } + }, + "title": "ArchStyleInput", + "required": [ + "input_image_url", + "architecture_type", + "style", + "color_palette" + ] + }, + "AiHomeStyleOutput": { + "x-fal-order-properties": [ + "image", + "status" + ], + "type": "object", + "properties": { + "image": { + "examples": [ + { + "content_type": "image/jpeg", + "url": "https://v3b.fal.media/files/b/0a89afbe/Yyo8q4mBMcUmqJQ7qaFGi_294eca9bfc3a455998e7080781e442a1.jpg" + } + ], + "description": "Generated image", + "$ref": "#/components/schemas/Image" + }, + "status": { + "description": "Status message with processing details", + "type": "string", + "title": "Status" + } + }, + "title": "ArchStyleOutput", + "required": [ + "image", + "status" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ], + "title": "Height" + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ], + "title": "File Size" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name" + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "examples": [ + "image/png" + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ], + "title": "Width" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/half-moon-ai/ai-home/style/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/half-moon-ai/ai-home/style/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/half-moon-ai/ai-home/style": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiHomeStyleInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/half-moon-ai/ai-home/style/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiHomeStyleOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "half-moon-ai/ai-home/edit", + "metadata": { + "display_name": "Ai Home", + "category": "image-to-image", + "description": "AI Home Edit transforms your home interior and exterior photos with realistic, prompt-based edits", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:41:45.509Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8861c0/EwVcG4DMq2EjY3FpxVzHz_36a78746680f4ee0a3bd039cc97a9e7d.jpg", + "model_url": "https://fal.run/half-moon-ai/ai-home/edit", + "license_type": "commercial", + "date": "2025-12-30T13:20:56.087Z", + "group": { + "key": "Halfmoon-AI-Home", + "label": "Editing" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for half-moon-ai/ai-home/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the half-moon-ai/ai-home/edit queue.", + "x-fal-metadata": { + "endpointId": "half-moon-ai/ai-home/edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8861c0/EwVcG4DMq2EjY3FpxVzHz_36a78746680f4ee0a3bd039cc97a9e7d.jpg", + "playgroundUrl": "https://fal.ai/models/half-moon-ai/ai-home/edit", + "documentationUrl": "https://fal.ai/models/half-moon-ai/ai-home/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AiHomeEditInput": { + "x-fal-order-properties": [ + "input_image_url", + "editing_type", + "architecture_type", + "style", + "color_palette", + "additional_elements", + "custom_prompt", + "output_format" + ], + "type": "object", + "properties": { + "input_image_url": { + "examples": [ + "https://v3.fal.media/files/kangaroo/BLwbXwxQI_MNwUF-P6ITl_zen_living_room_input.jpg" + ], + "maxLength": 512, + "type": "string", + "description": "URL of the image to do architectural editing", + "title": "Input Image Url" + }, + "editing_type": { + "examples": [ + "both" + ], + "description": "Type of editing. Structural editing only edits structural elements such as windows, walls etc. Virtual staging edits your furniture. Both do full editing including structural and furniture", + "type": "string", + "enum": [ + "structural editing", + "virtual staging", + "both" + ], + "title": "Editing Type" + }, + "style": { + "examples": [ + "rustic-interior" + ], + "description": "Style for furniture and decor", + "type": "string", + "enum": [ + "minimalistic-interior", + "farmhouse-interior", + "luxury-interior", + "modern-interior", + "zen-interior", + "mid century-interior", + "airbnb-interior", + "cozy-interior", + "rustic-interior", + "christmas-interior", + "bohemian-interior", + "tropical-interior", + "industrial-interior", + "japanese-interior", + "vintage-interior", + "loft-interior", + "halloween-interior", + "soho-interior", + "baroque-interior", + "kids room-interior", + "girls room-interior", + "boys room-interior", + "scandinavian-interior", + "french country-interior", + "mediterranean-interior", + "cyberpunk-interior", + "hot pink-interior", + "biophilic-interior", + "ancient egypt-interior", + "pixel-interior", + "art deco-interior", + "modern-exterior", + "minimalistic-exterior", + "farmhouse-exterior", + "cozy-exterior", + "luxury-exterior", + "colonial-exterior", + "zen-exterior", + "asian-exterior", + "creepy-exterior", + "airstone-exterior", + "ancient greek-exterior", + "art deco-exterior", + "brutalist-exterior", + "christmas lights-exterior", + "contemporary-exterior", + "cottage-exterior", + "dutch colonial-exterior", + "federal colonial-exterior", + "fire-exterior", + "french provincial-exterior", + "full glass-exterior", + "georgian colonial-exterior", + "gothic-exterior", + "greek revival-exterior", + "ice-exterior", + "italianate-exterior", + "mediterranean-exterior", + "midcentury-exterior", + "middle eastern-exterior", + "minecraft-exterior", + "morocco-exterior", + "neoclassical-exterior", + "spanish-exterior", + "tudor-exterior", + "underwater-exterior", + "winter-exterior", + "yard lighting-exterior" + ], + "title": "Style" + }, + "additional_elements": { + "anyOf": [ + { + "maxLength": 200, + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Additional elements to include in the options above (e.g., plants, lighting)", + "title": "Additional Elements", + "default": "" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image. Choose from: 'jpeg' or 'png'.", + "type": "string", + "title": "Output Format", + "default": "jpeg" + }, + "architecture_type": { + "examples": [ + "living room-interior" + ], + "description": "Type of architecture for appropriate furniture selection", + "type": "string", + "enum": [ + "living room-interior", + "bedroom-interior", + "kitchen-interior", + "dining room-interior", + "bathroom-interior", + "laundry room-interior", + "home office-interior", + "study room-interior", + "dorm room-interior", + "coffee shop-interior", + "gaming room-interior", + "restaurant-interior", + "office-interior", + "attic-interior", + "toilet-interior", + "other-interior", + "house-exterior", + "villa-exterior", + "backyard-exterior", + "courtyard-exterior", + "ranch-exterior", + "office-exterior", + "retail-exterior", + "tower-exterior", + "apartment-exterior", + "school-exterior", + "museum-exterior", + "commercial-exterior", + "residential-exterior", + "other-exterior" + ], + "title": "Architecture Type" + }, + "color_palette": { + "examples": [ + "golden beige" + ], + "description": "Color palette for furniture and decor", + "type": "string", + "enum": [ + "surprise me", + "golden beige", + "refined blues", + "dusky elegance", + "emerald charm", + "crimson luxury", + "golden sapphire", + "soft pastures", + "candy sky", + "peach meadow", + "muted sands", + "ocean breeze", + "frosted pastels", + "spring bloom", + "gentle horizon", + "seaside breeze", + "azure coast", + "golden shore", + "mediterranean gem", + "ocean serenity", + "serene blush", + "muted horizon", + "pastel shores", + "dusky calm", + "woodland retreat", + "meadow glow", + "forest canopy", + "riverbank calm", + "earthy tones", + "earthy neutrals", + "arctic mist", + "aqua drift", + "blush bloom", + "coral haze", + "retro rust", + "autumn glow", + "rustic charm", + "vintage sage", + "faded plum", + "electric lime", + "violet pulse", + "neon sorbet", + "aqua glow", + "fluorescent sunset", + "lavender bloom", + "petal fresh", + "meadow light", + "sunny pastures", + "frosted mauve", + "snowy hearth", + "icy blues", + "winter twilight", + "earthy hues", + "stone balance", + "neutral sands", + "slate shades" + ], + "title": "Color Palette" + }, + "custom_prompt": { + "examples": [ + "" + ], + "maxLength": 300, + "type": "string", + "description": "Custom prompt for architectural editing, it overrides above options when used", + "title": "Custom Prompt", + "default": "" + } + }, + "title": "ArchEditInput", + "required": [ + "input_image_url", + "editing_type", + "architecture_type", + "style", + "color_palette" + ] + }, + "AiHomeEditOutput": { + "x-fal-order-properties": [ + "image", + "status" + ], + "type": "object", + "properties": { + "image": { + "examples": [ + { + "content_type": "image/jpeg", + "url": "https://v3b.fal.media/files/b/monkey/DjC7In1m3u5B-XXDwFDP3_043cd0d5929a42a78f7d762c60bda00a.jpg" + } + ], + "description": "Generated image", + "$ref": "#/components/schemas/Image" + }, + "status": { + "description": "Status message with processing details", + "type": "string", + "title": "Status" + } + }, + "title": "ArchEditOutput", + "required": [ + "image", + "status" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ], + "title": "Height" + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ], + "title": "File Size" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name" + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "examples": [ + "image/png" + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ], + "title": "Width" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/half-moon-ai/ai-home/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/half-moon-ai/ai-home/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/half-moon-ai/ai-home/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiHomeEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/half-moon-ai/ai-home/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiHomeEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-layered/lora", + "metadata": { + "display_name": "Qwen Image Layered", + "category": "image-to-image", + "description": "Qwen-Image-Layered is a model capable of decomposing an image into multiple RGBA layers. Use loras to get your custom outputs.", + "status": "active", + "tags": [ + "qwen", + "lora" + ], + "updated_at": "2026-01-26T21:41:46.579Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a879d75/8tZ-zs49go2pWoUCnBy7P_a0de4be2e12e41cea3ad3d1822e877a1.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-layered/lora", + "license_type": "commercial", + "date": "2025-12-24T17:45:40.614Z", + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/qwen-image-layered-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/qwen-image-layered-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-layered/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-layered/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-layered/lora", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a879d75/8tZ-zs49go2pWoUCnBy7P_a0de4be2e12e41cea3ad3d1822e877a1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-layered/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-layered/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageLayeredLoraInput": { + "title": "TextToImageLoRAInput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "A caption for the input image." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "default": "regular" + }, + "num_layers": { + "minimum": 1, + "title": "Num Layers", + "type": "integer", + "maximum": 10, + "description": "The number of layers to generate.", + "default": 4 + }, + "output_format": { + "enum": [ + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a86d421/6xSMYtyW-fm2ciM6dHEgB.png" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the input image." + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "List of LoRA weights to apply (maximum 3).", + "items": { + "$ref": "#/components/schemas/LoRAInput" + }, + "default": [] + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "maximum": 20, + "description": "The guidance scale to use for the image generation.", + "default": 5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 28 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate an image from.", + "default": "" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "negative_prompt", + "num_inference_steps", + "guidance_scale", + "seed", + "sync_mode", + "num_layers", + "enable_safety_checker", + "output_format", + "acceleration", + "loras" + ], + "required": [ + "image_url" + ] + }, + "QwenImageLayeredLoraOutput": { + "title": "QwenImageLayeredOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used to generate the image." + }, + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/0a86d42c/7T3zJKciQ1cCzqR3ADLif.png" + }, + { + "url": "https://v3b.fal.media/files/b/0a86d42c/KVt5pIhe2dU-qZNC2Njo2.png" + }, + { + "url": "https://v3b.fal.media/files/b/0a86d42c/3BMMGMaHyA3Y7Q_kamIJ_.png" + }, + { + "url": "https://v3b.fal.media/files/b/0a86d42c/AY1BjZxhqS1jl-Pw2S1Tx.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts" + ] + }, + "LoRAInput": { + "title": "LoRAInput", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL, HuggingFace repo ID (owner/repo) to lora weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "maximum": 4, + "description": "Scale factor for LoRA application (0.0 to 4.0).", + "default": 1 + } + }, + "description": "LoRA weight configuration.", + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-layered/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-layered/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-layered/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageLayeredLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-layered/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageLayeredLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "wan/v2.6/image-to-image", + "metadata": { + "display_name": "Wan v2.6 Image to Image", + "category": "image-to-image", + "description": "Wan 2.6 image-to-image model.", + "status": "active", + "tags": [ + "image-to-image" + ], + "updated_at": "2026-01-26T21:41:47.319Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a87803a/zQUa6Yt1AHEqQjqgXBNNe.png", + "model_url": "https://fal.run/wan/v2.6/image-to-image", + "license_type": "commercial", + "date": "2025-12-23T21:00:06.432Z", + "group": { + "key": "v2.6", + "label": "Image to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for wan/v2.6/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the wan/v2.6/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "wan/v2.6/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a87803a/zQUa6Yt1AHEqQjqgXBNNe.png", + "playgroundUrl": "https://fal.ai/models/wan/v2.6/image-to-image", + "documentationUrl": "https://fal.ai/models/wan/v2.6/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "V26ImageToImageInput": { + "x-fal-order-properties": [ + "prompt", + "image_urls", + "negative_prompt", + "image_size", + "num_images", + "enable_prompt_expansion", + "seed", + "enable_safety_checker" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Place the wizard from image 2 in the ancient library from image 3, holding and studying the magical crystal orb from image 1. The orb's glow illuminates his face with purple and blue light. Floating candles around him, ancient books visible in the background. Mystical, dramatic lighting, fantasy art style, highly detailed." + ], + "description": "Text prompt describing the desired image. Supports Chinese and English. Max 2000 characters. Example: 'Generate an image using the style of image 1 and background of image 2'.", + "minLength": 1, + "title": "Prompt", + "type": "string" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate (1-4). Directly affects billing cost.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "Output image size. Use presets like 'square_hd', 'landscape_16_9', 'portrait_9_16', or specify exact dimensions with ImageSize(width=1280, height=720). Total pixels must be between 768*768 and 1280*1280.", + "title": "Image Size", + "examples": [ + "square_hd", + "landscape_16_9", + "portrait_4_3" + ], + "default": "square_hd" + }, + "enable_prompt_expansion": { + "description": "Enable LLM prompt optimization. Significantly improves results for simple prompts but adds 3-4 seconds processing time.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": true + }, + "seed": { + "description": "Random seed for reproducibility (0-2147483647). Same seed produces more consistent results.", + "type": "integer", + "title": "Seed" + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/0a86d6a7/6smIczyPbvAU3IJ1F5Ok3.png", + "https://v3b.fal.media/files/b/0a86d6a7/nTYVlOfKLD1FqHAGy7KS3.png", + "https://v3b.fal.media/files/b/0a86d6ae/6JA70jOe0-pbDtXLF2roV.png" + ] + ], + "description": "Reference images for editing (1-3 images required). Order matters: reference as 'image 1', 'image 2', 'image 3' in prompt. Resolution: 384-5000px each dimension. Max size: 10MB each. Formats: JPEG, JPG, PNG (no alpha), BMP, WEBP.", + "type": "array", + "title": "Image Urls", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + "low resolution, error, worst quality, low quality, deformed, extra fingers" + ], + "description": "Content to avoid in the generated image. Max 500 characters.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "enable_safety_checker": { + "description": "Enable content moderation for input and output.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + } + }, + "title": "ImageEditInput", + "description": "Input for Wan 2.6 image editing with reference images (enable_interleave=false)", + "required": [ + "prompt", + "image_urls" + ] + }, + "V26ImageToImageOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_name": "output_1.png", + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/0a86d6bb/iSEuXzi3kDy1jnlMCwYuH_output_3.png" + } + ] + ], + "description": "Generated images in PNG format", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "seed": { + "examples": [ + 175932751 + ], + "description": "The seed used for generation", + "type": "integer", + "title": "Seed" + } + }, + "title": "ImageEditOutput", + "description": "Output for Wan 2.6 image editing", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/wan/v2.6/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/wan/v2.6/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/wan/v2.6/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/V26ImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/wan/v2.6/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/V26ImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-2511", + "metadata": { + "display_name": "Qwen Image Edit 2511", + "category": "image-to-image", + "description": "Endpoint for Qwen's Image Editing 2511 model.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:41:50.165Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a875820/o5xKLpZ3or9XkGYggf5wi_7470c2b213204f9eb76589e8958e4e81.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-2511", + "license_type": "commercial", + "date": "2025-12-19T19:37:10.346Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-2511", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-2511 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-2511", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a875820/o5xKLpZ3or9XkGYggf5wi_7470c2b213204f9eb76589e8958e4e81.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2511", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2511/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEdit2511Input": { + "title": "EditImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Change angle to front view" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to edit the image with." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If None, uses the input image dimensions." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "default": "regular" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI.", + "default": false + }, + "guidance_scale": { + "minimum": 1, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The guidance scale to use for the image generation.", + "default": 4.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/0a877afe/karyVuQ62j0V6ErYzyW-w_image_6.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URLs of the images to edit.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate an image from.", + "default": "" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_size", + "image_urls", + "num_inference_steps", + "guidance_scale", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "required": [ + "prompt", + "image_urls" + ] + }, + "QwenImageEdit2511Output": { + "title": "ImageToImageOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/0a877afe/InJJA0Q1gtQnyK1N3wdg5.png", + "width": 1376 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-2511/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2511/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2511": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2511Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2511/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2511Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-layered", + "metadata": { + "display_name": "Qwen Image Layered", + "category": "image-to-image", + "description": "Qwen-Image-Layered is a model capable of decomposing an image into multiple RGBA layers.", + "status": "active", + "tags": [ + "qwen", + "layer" + ], + "updated_at": "2026-01-26T21:41:50.288Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a875720/ExJNkqptre1H0dUnnPQBQ_a7b65fa4293d45669d3958280e71b38d.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-layered", + "license_type": "commercial", + "date": "2025-12-19T15:38:51.622Z", + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/qwen-image-layered-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/qwen-image-layered-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-layered", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-layered queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-layered", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a875720/ExJNkqptre1H0dUnnPQBQ_a7b65fa4293d45669d3958280e71b38d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-layered", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-layered/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageLayeredInput": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "A caption for the input image." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "default": "regular" + }, + "num_layers": { + "minimum": 1, + "title": "Num Layers", + "type": "integer", + "maximum": 10, + "description": "The number of layers to generate.", + "default": 4 + }, + "output_format": { + "enum": [ + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a86d421/6xSMYtyW-fm2ciM6dHEgB.png" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the input image." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 28 + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "maximum": 20, + "description": "The guidance scale to use for the image generation.", + "default": 5 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate an image from.", + "default": "" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "negative_prompt", + "num_inference_steps", + "guidance_scale", + "seed", + "sync_mode", + "num_layers", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "required": [ + "image_url" + ] + }, + "QwenImageLayeredOutput": { + "title": "QwenImageLayeredOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used to generate the image." + }, + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/0a86d42c/7T3zJKciQ1cCzqR3ADLif.png" + }, + { + "url": "https://v3b.fal.media/files/b/0a86d42c/KVt5pIhe2dU-qZNC2Njo2.png" + }, + { + "url": "https://v3b.fal.media/files/b/0a86d42c/3BMMGMaHyA3Y7Q_kamIJ_.png" + }, + { + "url": "https://v3b.fal.media/files/b/0a86d42c/AY1BjZxhqS1jl-Pw2S1Tx.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-layered/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-layered/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-layered": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageLayeredInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-layered/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageLayeredOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/z-image/turbo/inpaint/lora", + "metadata": { + "display_name": "Z-Image Turbo", + "category": "image-to-image", + "description": "Generate images from text, an image, a mask and custom LoRA using Z-Image Turbo, Tongyi-MAI's super-fast 6B model.", + "status": "active", + "tags": [ + "inpainting" + ], + "updated_at": "2026-01-26T21:41:50.539Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a86d0e7/FSsTeoWO3HUk50Ba1ftXE_8546e152c771482c9b0b7a56fd1a352e.jpg", + "model_url": "https://fal.run/fal-ai/z-image/turbo/inpaint/lora", + "license_type": "commercial", + "date": "2025-12-18T16:19:58.658Z", + "group": { + "key": "z-image-turbo", + "label": "Inpainting (LoRA)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/z-image-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/z-image-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/z-image/turbo/inpaint/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/z-image/turbo/inpaint/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/z-image/turbo/inpaint/lora", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a86d0e7/FSsTeoWO3HUk50Ba1ftXE_8546e152c771482c9b0b7a56fd1a352e.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/z-image/turbo/inpaint/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/z-image/turbo/inpaint/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ZImageTurboInpaintLoraInput": { + "title": "ZImageTurboInpaintLoRAInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A young Asian woman with long, vibrant purple hair stands on a sunlit sandy beach, posing confidently with her left hand resting on her hip. She gazes directly at the camera with a neutral expression. A sleek black ribbon bow is tied neatly on the right side of her head, just above her ear. She wears a flowing white cotton dress with a fitted bodice and a flared skirt that reaches mid-calf, slightly lifted by a gentle sea breeze. The beach behind her features fine, pale golden sand with subtle footprints, leading to calm turquoise waves under a clear blue sky with soft, wispy clouds. The lighting is natural daylight, casting soft shadows to her left, indicating late afternoon sun. The horizon line is visible in the background, with a faint silhouette of distant dunes. Her skin tone is fair with a natural glow, and her facial features are delicately defined. The composition is centered on her figure, framed from mid-thigh up, with shallow depth of field blurring the distant waves slightly." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The acceleration level to use.", + "type": "string", + "title": "Acceleration", + "default": "regular" + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9", + "auto" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "auto" + }, + "mask_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/whls/z-image-inpaint-mask.jpg" + ], + "description": "URL of Mask for Inpaint generation.", + "type": "string", + "title": "Mask Image URL" + }, + "loras": { + "description": "List of LoRA weights to apply (maximum 3).", + "type": "array", + "title": "Loras", + "items": { + "$ref": "#/components/schemas/LoRAInput" + }, + "default": [] + }, + "control_end": { + "description": "The end of the controlnet conditioning.", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Control End", + "default": 0.8 + }, + "control_start": { + "description": "The start of the controlnet conditioning.", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Control Start", + "default": 0 + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "maximum": 4, + "title": "Number of Images", + "default": 1 + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/inpaint-input.jpg" + ], + "description": "URL of Image for Inpaint generation.", + "type": "string", + "title": "Image URL" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "strength": { + "description": "The strength of the inpaint conditioning.", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Strength", + "default": 1 + }, + "control_scale": { + "description": "The scale of the controlnet conditioning.", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Control Scale", + "default": 0.75 + }, + "enable_prompt_expansion": { + "description": "Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "maximum": 8, + "title": "Number of Inference Steps", + "default": 8 + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration", + "enable_prompt_expansion", + "image_url", + "control_scale", + "control_start", + "control_end", + "mask_image_url", + "strength", + "loras" + ], + "required": [ + "prompt", + "image_url", + "mask_image_url" + ] + }, + "ZImageTurboInpaintLoraOutput": { + "title": "ZImageTurboInpaintOutput", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "height": 888, + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/z-image-inpaint-output.png", + "width": 512 + } + ] + ], + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "description": "Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed.", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "description": "The timings of the generation process.", + "title": "Timings" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoRAInput": { + "description": "LoRA weight configuration.", + "type": "object", + "properties": { + "path": { + "description": "URL, HuggingFace repo ID (owner/repo) to lora weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "description": "Scale factor for LoRA application (0.0 to 4.0).", + "type": "number", + "maximum": 4, + "title": "Scale", + "default": 1 + } + }, + "title": "LoRAInput", + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/z-image/turbo/inpaint/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo/inpaint/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo/inpaint/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageTurboInpaintLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo/inpaint/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageTurboInpaintLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/z-image/turbo/inpaint", + "metadata": { + "display_name": "Z-Image Turbo", + "category": "image-to-image", + "description": "Generate images from text, an image and a mask using Z-Image Turbo, Tongyi-MAI's super-fast 6B model.", + "status": "active", + "tags": [ + "inpainting" + ], + "updated_at": "2026-01-26T21:41:50.668Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a86d0c9/znBEfEDmFTeNP6Fm_dfGw_98585a0351414d37a69e4cf0c7f0305a.jpg", + "model_url": "https://fal.run/fal-ai/z-image/turbo/inpaint", + "license_type": "commercial", + "date": "2025-12-18T16:14:23.744Z", + "group": { + "key": "z-image-turbo", + "label": "Inpainting" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/z-image-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/z-image-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/z-image/turbo/inpaint", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/z-image/turbo/inpaint queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/z-image/turbo/inpaint", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a86d0c9/znBEfEDmFTeNP6Fm_dfGw_98585a0351414d37a69e4cf0c7f0305a.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/z-image/turbo/inpaint", + "documentationUrl": "https://fal.ai/models/fal-ai/z-image/turbo/inpaint/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ZImageTurboInpaintInput": { + "title": "ZImageTurboInpaintInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A young Asian woman with long, vibrant purple hair stands on a sunlit sandy beach, posing confidently with her left hand resting on her hip. She gazes directly at the camera with a neutral expression. A sleek black ribbon bow is tied neatly on the right side of her head, just above her ear. She wears a flowing white cotton dress with a fitted bodice and a flared skirt that reaches mid-calf, slightly lifted by a gentle sea breeze. The beach behind her features fine, pale golden sand with subtle footprints, leading to calm turquoise waves under a clear blue sky with soft, wispy clouds. The lighting is natural daylight, casting soft shadows to her left, indicating late afternoon sun. The horizon line is visible in the background, with a faint silhouette of distant dunes. Her skin tone is fair with a natural glow, and her facial features are delicately defined. The composition is centered on her figure, framed from mid-thigh up, with shallow depth of field blurring the distant waves slightly." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The acceleration level to use.", + "type": "string", + "title": "Acceleration", + "default": "regular" + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9", + "auto" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "auto" + }, + "mask_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/whls/z-image-inpaint-mask.jpg" + ], + "description": "URL of Mask for Inpaint generation.", + "type": "string", + "title": "Mask Image URL" + }, + "control_end": { + "description": "The end of the controlnet conditioning.", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Control End", + "default": 0.8 + }, + "control_start": { + "description": "The start of the controlnet conditioning.", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Control Start", + "default": 0 + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "maximum": 4, + "title": "Number of Images", + "default": 1 + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/inpaint-input.jpg" + ], + "description": "URL of Image for Inpaint generation.", + "type": "string", + "title": "Image URL" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "strength": { + "description": "The strength of the inpaint conditioning.", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Strength", + "default": 1 + }, + "control_scale": { + "description": "The scale of the controlnet conditioning.", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Control Scale", + "default": 0.75 + }, + "enable_prompt_expansion": { + "description": "Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "maximum": 8, + "title": "Number of Inference Steps", + "default": 8 + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration", + "enable_prompt_expansion", + "image_url", + "control_scale", + "control_start", + "control_end", + "mask_image_url", + "strength" + ], + "required": [ + "prompt", + "image_url", + "mask_image_url" + ] + }, + "ZImageTurboInpaintOutput": { + "title": "ZImageTurboInpaintOutput", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "height": 888, + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/z-image-inpaint-output.png", + "width": 512 + } + ] + ], + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "description": "Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed.", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "description": "The timings of the generation process.", + "title": "Timings" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/z-image/turbo/inpaint/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo/inpaint/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo/inpaint": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageTurboInpaintInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo/inpaint/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageTurboInpaintOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2/flash/edit", + "metadata": { + "display_name": "Flux 2", + "category": "image-to-image", + "description": "Image-to-image editing with FLUX.2 [dev] from Black Forest Labs. Precise modifications using natural language descriptions and hex color control—in a flash.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:52.362Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a871484/fjLSktGKoWIGQWm-GRaUM_87cd94bbbff7400b830e73b8f6f075d4.jpg", + "model_url": "https://fal.run/fal-ai/flux-2/flash/edit", + "license_type": "commercial", + "date": "2025-12-16T20:10:11.517Z", + "group": { + "key": "Flux2", + "label": "Image Editing (Flash)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2/flash/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2/flash/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2/flash/edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a871484/fjLSktGKoWIGQWm-GRaUM_87cd94bbbff7400b830e73b8f6f075d4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2/flash/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2/flash/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2FlashEditInput": { + "x-fal-order-properties": [ + "prompt", + "guidance_scale", + "seed", + "image_size", + "num_images", + "enable_prompt_expansion", + "sync_mode", + "enable_safety_checker", + "output_format", + "image_urls" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Remove the meat from the hamburger" + ], + "description": "The prompt to edit the image.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "title": "Number of Images", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the image to generate. The width and height must be between 512 and 2048 pixels.", + "title": "Image Size", + "default": "square_hd" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "description": "Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "type": "number", + "title": "Guidance Scale", + "maximum": 20, + "default": 2.5 + }, + "seed": { + "description": "The seed to use for the generation. If not provided, a random seed will be used.", + "type": "integer", + "title": "Seed" + }, + "image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/example_outputs/flux-2-flash-edit-input.png" + ] + ], + "description": "The URLs of the images for editing. A maximum of 4 images are allowed, if more are provided, only the first 4 will be used.", + "type": "array", + "title": "Image URLs", + "items": { + "type": "string" + } + }, + "enable_prompt_expansion": { + "description": "If set to true, the prompt will be expanded for better results.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + } + }, + "title": "Flux2FlashEditImageInput", + "required": [ + "prompt", + "image_urls" + ] + }, + "Flux2FlashEditOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/flux-2-flash-edit.png" + } + ] + ], + "description": "The edited images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + } + }, + "title": "Flux2FlashEditImageOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2/flash/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/flash/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2/flash/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2FlashEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/flash/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2FlashEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/gpt-image-1.5/edit", + "metadata": { + "display_name": "GPT-Image 1.5", + "category": "image-to-image", + "description": "GPT Image 1.5 generates high-fidelity images with strong prompt adherence, preserving composition, lighting, and fine-grained detail.", + "status": "active", + "tags": [ + "openai", + "gpt-image" + ], + "updated_at": "2026-01-27T18:33:45.691Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a875814/3KvlvWItvogHIOuPRWIVl_001c9e77742a4ff2a829a8ece127c0d1.jpg", + "model_url": "https://fal.run/fal-ai/gpt-image-1.5/edit", + "license_type": "commercial", + "date": "2025-12-16T18:38:20.664Z", + "group": { + "key": "gpt-image-1.5", + "label": "Edit Image" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.run/fal-ai/gpt-image-1.5/edit/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/gpt-image-1.5/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/gpt-image-1.5/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/gpt-image-1.5/edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a875814/3KvlvWItvogHIOuPRWIVl_001c9e77742a4ff2a829a8ece127c0d1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/gpt-image-1.5/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/gpt-image-1.5/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "GptImage15EditInput": { + "title": "EditImageRequest", + "type": "object", + "properties": { + "input_fidelity": { + "enum": [ + "low", + "high" + ], + "title": "Input Fidelity", + "type": "string", + "description": "Input fidelity for the generated image", + "default": "high" + }, + "num_images": { + "description": "Number of images to generate", + "type": "integer", + "minimum": 1, + "title": "Number of Images", + "examples": [ + 1 + ], + "maximum": 4, + "default": 1 + }, + "image_size": { + "enum": [ + "auto", + "1024x1024", + "1536x1024", + "1024x1536" + ], + "title": "Image Size", + "type": "string", + "description": "Aspect ratio for the generated image", + "default": "auto" + }, + "prompt": { + "examples": [ + "Same workers, same beam, same lunch boxes - but they're all on their phones now. One is taking a selfie. One is on a call looking annoyed. Same danger, new priorities. A hard hat has AirPods." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt for image generation", + "minLength": 2 + }, + "quality": { + "enum": [ + "low", + "medium", + "high" + ], + "title": "Quality", + "type": "string", + "description": "Quality for the generated image", + "default": "high" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "Output format for the images", + "default": "png" + }, + "background": { + "enum": [ + "auto", + "transparent", + "opaque" + ], + "title": "Background", + "type": "string", + "description": "Background for the generated image", + "default": "auto" + }, + "mask_image_url": { + "title": "Mask Image URL", + "type": "string", + "description": "The URL of the mask image to use for the generation. This indicates what part of the image to edit." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/0a8691af/9Se_1_VX1wzTjjTOpWbs9_bb39c2eb-1a41-4749-b1d0-cf134abc8bbf.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URLs of the images to use as a reference for the generation.", + "items": { + "type": "string" + } + } + }, + "x-fal-order-properties": [ + "prompt", + "image_urls", + "image_size", + "background", + "quality", + "input_fidelity", + "num_images", + "output_format", + "sync_mode", + "mask_image_url" + ], + "required": [ + "prompt", + "image_urls" + ] + }, + "GptImage15EditOutput": { + "title": "EditImageResponse", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "height": 1024, + "file_name": "yUt7tifLSbg1WzWWgfj2o.png", + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/0a8691b0/yUt7tifLSbg1WzWWgfj2o.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images.", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/gpt-image-1.5/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gpt-image-1.5/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/gpt-image-1.5/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GptImage15EditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gpt-image-1.5/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GptImage15EditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2/turbo/edit", + "metadata": { + "display_name": "Flux 2", + "category": "image-to-image", + "description": "Image-to-image editing with FLUX.2 [dev] from Black Forest Labs. Precise modifications using natural language descriptions and hex color control—all at turbo speed.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:53.113Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a87149a/-KWY7GOfhbNA3tLkPSJ8w_b8d48adf037e42038ecf0595281ffaac.jpg", + "model_url": "https://fal.run/fal-ai/flux-2/turbo/edit", + "license_type": "commercial", + "date": "2025-12-16T14:07:03.662Z", + "group": { + "key": "Flux2", + "label": "Image Editing (Turbo)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2/turbo/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2/turbo/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2/turbo/edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a87149a/-KWY7GOfhbNA3tLkPSJ8w_b8d48adf037e42038ecf0595281ffaac.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2/turbo/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2/turbo/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2TurboEditInput": { + "x-fal-order-properties": [ + "prompt", + "guidance_scale", + "seed", + "image_size", + "num_images", + "enable_prompt_expansion", + "sync_mode", + "enable_safety_checker", + "output_format", + "image_urls" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Change the weather to winter" + ], + "description": "The prompt to edit the image.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "title": "Number of Images", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the image to generate. The width and height must be between 512 and 2048 pixels.", + "title": "Image Size", + "default": "square_hd" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "description": "Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "type": "number", + "title": "Guidance Scale", + "maximum": 20, + "default": 2.5 + }, + "seed": { + "description": "The seed to use for the generation. If not provided, a random seed will be used.", + "type": "integer", + "title": "Seed" + }, + "image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/example_outputs/flux-2-turbo.png" + ] + ], + "description": "The URLs of the images for editing. A maximum of 4 images are allowed, if more are provided, only the first 4 will be used.", + "type": "array", + "title": "Image URLs", + "items": { + "type": "string" + } + }, + "enable_prompt_expansion": { + "description": "If set to true, the prompt will be expanded for better results.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + } + }, + "title": "Flux2TurboEditImageInput", + "required": [ + "prompt", + "image_urls" + ] + }, + "Flux2TurboEditOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/flux-2-turbo-edit.png" + } + ] + ], + "description": "The edited images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + } + }, + "title": "Flux2TurboEditImageOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2/turbo/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/turbo/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2/turbo/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2TurboEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/turbo/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2TurboEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-max/edit", + "metadata": { + "display_name": "Flux 2 Max", + "category": "image-to-image", + "description": "FLUX.2 [max] delivers state-of-the-art image generation and advanced image editing with exceptional realism, precision, and consistency.", + "status": "active", + "tags": [ + "flux2", + "image-editing", + "high-quality" + ], + "updated_at": "2026-01-26T21:41:53.485Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8689a8/bbcmo6U5xg_RxDXijtxNA_55df705e1b1b4535a90bccd70887680e.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-max/edit", + "license_type": "commercial", + "date": "2025-12-16T13:47:03.184Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-max/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2-max/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-max/edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8689a8/bbcmo6U5xg_RxDXijtxNA_55df705e1b1b4535a90bccd70887680e.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-max/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-max/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2MaxEditInput": { + "x-fal-order-properties": [ + "prompt", + "image_size", + "seed", + "safety_tolerance", + "enable_safety_checker", + "output_format", + "sync_mode", + "image_urls" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A high-fashion magazine cover featuring an android in an avant-garde geometric cloth dress, with logo prints from @Image1. The backdrop is an eye-catching scenery. The title text 'FAL MAGAZINE' spans the top in bold white serif font. Overlay text at the bottom right reads 'THE FUTURE OF AI' in a sleek, thin sans-serif font." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "auto", + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image. If `auto`, the size will be determined by the model.", + "title": "Image Size", + "default": "auto" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "jpeg" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5" + ], + "description": "The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.", + "type": "string", + "title": "Safety Tolerance", + "default": "2" + }, + "enable_safety_checker": { + "description": "Whether to enable the safety checker.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "description": "The seed to use for the generation.", + "type": "integer", + "title": "Seed" + }, + "image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/example_inputs/flux2/max.jpg" + ] + ], + "description": "List of URLs of input images for editing", + "type": "array", + "title": "Image URLs", + "items": { + "type": "string" + } + } + }, + "title": "Flux2MaxImageEditInput", + "required": [ + "prompt", + "image_urls" + ] + }, + "Flux2MaxEditOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/flux2/max_edit_output.jpg" + } + ] + ], + "description": "The generated images.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "description": "The seed used for the generation.", + "type": "integer", + "title": "Seed" + } + }, + "title": "Flux2MaxEditOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-max/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-max/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-max/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2MaxEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-max/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2MaxEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "half-moon-ai/ai-baby-and-aging-generator/multi", + "metadata": { + "display_name": "Ai Baby And Aging Generator", + "category": "image-to-image", + "description": "AI Baby Generator is a service that instantly creates realistic predictions of a future child from parent photos.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:41:53.609Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8687dc/xV_Pxng9i7wauNs7OGsJz_dba461c193414ec9a0e3f678b471c1f2.jpg", + "model_url": "https://fal.run/half-moon-ai/ai-baby-and-aging-generator/multi", + "license_type": "commercial", + "date": "2025-12-16T12:22:10.157Z", + "group": { + "key": "Half-Moon-Baby", + "label": "Multi Person" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for half-moon-ai/ai-baby-and-aging-generator/multi", + "version": "1.0.0", + "description": "The OpenAPI schema for the half-moon-ai/ai-baby-and-aging-generator/multi queue.", + "x-fal-metadata": { + "endpointId": "half-moon-ai/ai-baby-and-aging-generator/multi", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8687dc/xV_Pxng9i7wauNs7OGsJz_dba461c193414ec9a0e3f678b471c1f2.jpg", + "playgroundUrl": "https://fal.ai/models/half-moon-ai/ai-baby-and-aging-generator/multi", + "documentationUrl": "https://fal.ai/models/half-moon-ai/ai-baby-and-aging-generator/multi/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AiBabyAndAgingGeneratorMultiInput": { + "title": "MultiFluxIDInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "a newborn baby, well dressed" + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt to guide the image generation", + "default": "a newborn baby, well dressed" + }, + "num_images": { + "description": "Number of images to generate", + "type": "integer", + "minimum": 1, + "maximum": 4, + "examples": [ + 1 + ], + "title": "Num Images", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image", + "title": "Image Size", + "default": { + "height": 1152, + "width": 864 + } + }, + "father_weight": { + "description": "Weight of the father's influence in multi mode generation", + "type": "number", + "minimum": 0, + "maximum": 1, + "examples": [ + 0.5 + ], + "title": "Father Weight", + "default": 0.5 + }, + "mother_image_urls": { + "examples": [ + [ + "https://cdn.britannica.com/56/172456-050-F518B29E/Gwyneth-Paltrow-2013.jpg?w=400&h=300&c=crop" + ] + ], + "title": "Mother Image Urls", + "type": "array", + "minItems": 1, + "description": "List of mother images for multi mode", + "items": { + "type": "string" + } + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image. Choose from: 'jpeg' or 'png'.", + "default": "jpeg" + }, + "age_group": { + "examples": [ + "baby" + ], + "title": "Age Group", + "type": "string", + "description": "Age group for the generated image. Choose from: 'baby' (0-12 months), 'toddler' (1-3 years), 'preschool' (3-5 years), 'gradeschooler' (6-12 years), 'teen' (13-19 years), 'adult' (20-40 years), 'mid' (40-60 years), 'senior' (60+ years).", + "enum": [ + "baby", + "toddler", + "preschool", + "gradeschooler", + "teen", + "adult", + "mid", + "senior" + ] + }, + "gender": { + "examples": [ + "male" + ], + "title": "Gender", + "type": "string", + "description": "Gender for the generated image. Choose from: 'male' or 'female'.", + "enum": [ + "male", + "female" + ] + }, + "father_image_urls": { + "examples": [ + [ + "https://hips.hearstapps.com/hmg-prod/images/gettyimages-498622514.jpg?crop=1xw:1.0xh;center,top&resize=640:*" + ] + ], + "title": "Father Image Urls", + "type": "array", + "minItems": 1, + "description": "List of father images for multi mode", + "items": { + "type": "string" + } + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Random seed for reproducibility. If None, a random seed will be used", + "examples": [ + 42 + ], + "title": "Seed" + } + }, + "description": "Input schema for multi mode generation", + "x-fal-order-properties": [ + "age_group", + "gender", + "prompt", + "image_size", + "num_images", + "seed", + "output_format", + "mother_image_urls", + "father_image_urls", + "father_weight" + ], + "required": [ + "age_group", + "gender", + "mother_image_urls", + "father_image_urls" + ] + }, + "AiBabyAndAgingGeneratorMultiOutput": { + "title": "FluxMultiIDOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The final prompt used for generation" + }, + "images": { + "examples": [ + [ + { + "height": 1152, + "content_type": "image/jpeg", + "url": "https://ai-tests.angeneraltest.com/test-files/ai_baby_2.jpg", + "width": 864 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated image files info", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "x-fal-order-properties": [ + "images", + "seed", + "prompt" + ], + "required": [ + "images", + "seed", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ], + "title": "File Size" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ], + "title": "Height" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name" + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "examples": [ + "image/png" + ], + "title": "Content Type" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ], + "title": "Width" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/half-moon-ai/ai-baby-and-aging-generator/multi/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/half-moon-ai/ai-baby-and-aging-generator/multi/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/half-moon-ai/ai-baby-and-aging-generator/multi": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiBabyAndAgingGeneratorMultiInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/half-moon-ai/ai-baby-and-aging-generator/multi/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiBabyAndAgingGeneratorMultiOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "half-moon-ai/ai-baby-and-aging-generator/single", + "metadata": { + "display_name": "Ai Baby And Aging Generator", + "category": "image-to-image", + "description": "AI Aging Generator performs controllable age progression or regression from a single face photo, generating lifelike portraits across eight age groups from baby to senior.", + "status": "active", + "tags": [ + "utility", + "editing" + ], + "updated_at": "2026-01-26T21:41:53.742Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8687c9/JID-1vH-CZ4Qnv--3XegB_5239bc988aa447859ff87261b87c34f5.jpg", + "model_url": "https://fal.run/half-moon-ai/ai-baby-and-aging-generator/single", + "license_type": "commercial", + "date": "2025-12-16T12:20:06.642Z", + "group": { + "key": "Half-Moon-Baby", + "label": "Single Person" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for half-moon-ai/ai-baby-and-aging-generator/single", + "version": "1.0.0", + "description": "The OpenAPI schema for the half-moon-ai/ai-baby-and-aging-generator/single queue.", + "x-fal-metadata": { + "endpointId": "half-moon-ai/ai-baby-and-aging-generator/single", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8687c9/JID-1vH-CZ4Qnv--3XegB_5239bc988aa447859ff87261b87c34f5.jpg", + "playgroundUrl": "https://fal.ai/models/half-moon-ai/ai-baby-and-aging-generator/single", + "documentationUrl": "https://fal.ai/models/half-moon-ai/ai-baby-and-aging-generator/single/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AiBabyAndAgingGeneratorSingleInput": { + "title": "SingleFluxIDInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "a newborn baby, well dressed" + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt to guide the image generation", + "default": "a newborn baby, well dressed" + }, + "num_images": { + "description": "Number of images to generate", + "type": "integer", + "minimum": 1, + "maximum": 4, + "examples": [ + 1 + ], + "title": "Num Images", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image", + "title": "Image Size", + "default": { + "height": 1152, + "width": 864 + } + }, + "id_image_urls": { + "examples": [ + [ + "https://images.pexels.com/photos/1642228/pexels-photo-1642228.jpeg" + ] + ], + "title": "Id Image Urls", + "type": "array", + "minItems": 1, + "description": "List of ID images for single mode (or general reference images)", + "items": { + "type": "string" + } + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image. Choose from: 'jpeg' or 'png'.", + "default": "jpeg" + }, + "age_group": { + "examples": [ + "baby" + ], + "title": "Age Group", + "type": "string", + "description": "Age group for the generated image. Choose from: 'baby' (0-12 months), 'toddler' (1-3 years), 'preschool' (3-5 years), 'gradeschooler' (6-12 years), 'teen' (13-19 years), 'adult' (20-40 years), 'mid' (40-60 years), 'senior' (60+ years).", + "enum": [ + "baby", + "toddler", + "preschool", + "gradeschooler", + "teen", + "adult", + "mid", + "senior" + ] + }, + "gender": { + "examples": [ + "male" + ], + "title": "Gender", + "type": "string", + "description": "Gender for the generated image. Choose from: 'male' or 'female'.", + "enum": [ + "male", + "female" + ] + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Random seed for reproducibility. If None, a random seed will be used", + "examples": [ + 42 + ], + "title": "Seed" + } + }, + "description": "Input schema for single mode generation", + "x-fal-order-properties": [ + "age_group", + "gender", + "prompt", + "image_size", + "num_images", + "seed", + "output_format", + "id_image_urls" + ], + "required": [ + "age_group", + "gender", + "id_image_urls" + ] + }, + "AiBabyAndAgingGeneratorSingleOutput": { + "title": "FluxSingleIDOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The final prompt used for generation" + }, + "images": { + "examples": [ + [ + { + "height": 1152, + "content_type": "image/jpeg", + "url": "https://ai-tests.angeneraltest.com/test-files/ai_baby_1.jpg", + "width": 864 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated image files info", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "x-fal-order-properties": [ + "images", + "seed", + "prompt" + ], + "required": [ + "images", + "seed", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ], + "title": "File Size" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ], + "title": "Height" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name" + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "examples": [ + "image/png" + ], + "title": "Content Type" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ], + "title": "Width" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/half-moon-ai/ai-baby-and-aging-generator/single/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/half-moon-ai/ai-baby-and-aging-generator/single/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/half-moon-ai/ai-baby-and-aging-generator/single": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiBabyAndAgingGeneratorSingleInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/half-moon-ai/ai-baby-and-aging-generator/single/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiBabyAndAgingGeneratorSingleOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-2509-lora-gallery/shirt-design", + "metadata": { + "display_name": "Qwen Image Edit 2509 Lora Gallery", + "category": "image-to-image", + "description": "Apply designs/graphics onto people's shirts", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:41:55.465Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/lion/Sh9OfYjTHGDA8el8pezwY_754c1fb894234b08b52605905ff9ac2d.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-2509-lora-gallery/shirt-design", + "license_type": "private", + "date": "2025-12-15T23:49:16.335Z", + "group": { + "key": "qwen-image-edit-2509-lora-gallery", + "label": "Shirt Design" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-2509-lora-gallery/shirt-design", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-2509-lora-gallery/shirt-design queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-2509-lora-gallery/shirt-design", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/lion/Sh9OfYjTHGDA8el8pezwY_754c1fb894234b08b52605905ff9ac2d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-lora-gallery/shirt-design", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-lora-gallery/shirt-design/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEdit2509LoraGalleryShirtDesignInput": { + "x-fal-order-properties": [ + "image_urls", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "negative_prompt", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "prompt", + "lora_scale" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Put this design on their shirt", + "Apply this graphic to their t-shirt", + "Place this logo on their shirt" + ], + "title": "Prompt", + "type": "string", + "description": "Describe what design to put on the shirt. The model will apply the design from your input image onto the person's shirt.", + "default": "Put this design on their shirt" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the final input image will be used." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA model. Controls the strength of the LoRA effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/tiger/1rq65RzrUwKtHLAwpEjq8_4ee388931b5142f1bd1f2e0a3cb2498e.png", + "https://github.com/fal-ai/fal-assets/blob/main/Logo%20Square.png?raw=true" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URLs of the images: first image is the person wearing a shirt, second image is the design/logo to put on the shirt.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 6 + } + }, + "title": "ShirtDesignInput", + "description": "Input model for Shirt Design endpoint - Put designs/graphics on people's shirts", + "required": [ + "image_urls" + ] + }, + "QwenImageEdit2509LoraGalleryShirtDesignOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/panda/Y5wKKIEuFpRMEUQ8ZPy01.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated/edited images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "title": "ShirtDesignOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "examples": [ + 1024 + ], + "description": "The height of the image in pixels." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "examples": [ + 1024 + ], + "description": "The width of the image in pixels." + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-2509-lora-gallery/shirt-design/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/shirt-design/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/shirt-design": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509LoraGalleryShirtDesignInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/shirt-design/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509LoraGalleryShirtDesignOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-2509-lora-gallery/remove-lighting", + "metadata": { + "display_name": "Qwen Image Edit 2509 Lora Gallery", + "category": "image-to-image", + "description": "Remove existing lighting and apply soft, even illumination", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:41:55.658Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/zebra/z1Ze46BEziziJyYnGPfMA_411ef5a278eb4a60a4e7afab24167f6d.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-2509-lora-gallery/remove-lighting", + "license_type": "private", + "date": "2025-12-15T23:48:14.384Z", + "group": { + "key": "qwen-image-edit-2509-lora-gallery", + "label": "Remove Lighting" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-2509-lora-gallery/remove-lighting", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-2509-lora-gallery/remove-lighting queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-2509-lora-gallery/remove-lighting", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/zebra/z1Ze46BEziziJyYnGPfMA_411ef5a278eb4a60a4e7afab24167f6d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-lora-gallery/remove-lighting", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-lora-gallery/remove-lighting/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEdit2509LoraGalleryRemoveLightingInput": { + "x-fal-order-properties": [ + "image_urls", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "negative_prompt", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images" + ], + "type": "object", + "properties": { + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the final input image will be used." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/panda/J0XyFgb0AAgyUzmVFd0nr_5363c66361d94cea89333795d700165d.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URL of the image with lighting/shadows to remove.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 6 + } + }, + "title": "RemoveLightingInput", + "description": "Input model for Remove Lighting endpoint - Remove existing lighting and apply soft even lighting", + "required": [ + "image_urls" + ] + }, + "QwenImageEdit2509LoraGalleryRemoveLightingOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/monkey/D7FrWGFnb7t8fjiE9Cok4.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated/edited images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "title": "RemoveLightingOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "examples": [ + 1024 + ], + "description": "The height of the image in pixels." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "examples": [ + 1024 + ], + "description": "The width of the image in pixels." + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-2509-lora-gallery/remove-lighting/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/remove-lighting/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/remove-lighting": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509LoraGalleryRemoveLightingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/remove-lighting/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509LoraGalleryRemoveLightingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-2509-lora-gallery/remove-element", + "metadata": { + "display_name": "Qwen Image Edit 2509 Lora Gallery", + "category": "image-to-image", + "description": "Remove unwanted elements (objects, people, text) while maintaining image consistency", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:41:55.782Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/qjrxX6uCXnmLSaIK20Ju6_65d0867c22dd4fe6a5e6244062c4d7a1.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-2509-lora-gallery/remove-element", + "license_type": "private", + "date": "2025-12-15T23:46:58.957Z", + "group": { + "key": "qwen-image-edit-2509-lora-gallery", + "label": "Remove Element" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-2509-lora-gallery/remove-element", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-2509-lora-gallery/remove-element queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-2509-lora-gallery/remove-element", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/qjrxX6uCXnmLSaIK20Ju6_65d0867c22dd4fe6a5e6244062c4d7a1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-lora-gallery/remove-element", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-lora-gallery/remove-element/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEdit2509LoraGalleryRemoveElementInput": { + "x-fal-order-properties": [ + "image_urls", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "negative_prompt", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "prompt", + "lora_scale" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Remove the person from the image", + "Remove the car and the bicycle", + "Remove the text and logos" + ], + "title": "Prompt", + "type": "string", + "description": "Specify what element(s) to remove from the image (objects, people, text, etc.). The model will cleanly remove the element while maintaining consistency of the rest of the image.", + "default": "Remove the specified element from the scene" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the final input image will be used." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA model. Controls the strength of the LoRA effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/elephant/oWup_Q7zuvbfB4en-hneO_5aaa1cb3d3eb44999005159e82e7c9b7.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URL of the image containing elements to remove.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 6 + } + }, + "title": "RemoveElementInput", + "description": "Input model for Remove Element endpoint - Remove/delete elements (objects, people, text) from the image", + "required": [ + "image_urls" + ] + }, + "QwenImageEdit2509LoraGalleryRemoveElementOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/koala/dTldnOpRSFVBvWiyfOeO1.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated/edited images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "title": "RemoveElementOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "examples": [ + 1024 + ], + "description": "The height of the image in pixels." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "examples": [ + 1024 + ], + "description": "The width of the image in pixels." + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-2509-lora-gallery/remove-element/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/remove-element/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/remove-element": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509LoraGalleryRemoveElementInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/remove-element/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509LoraGalleryRemoveElementOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-2509-lora-gallery/lighting-restoration", + "metadata": { + "display_name": "Qwen Image Edit 2509 Lora Gallery", + "category": "image-to-image", + "description": "Removes harsh shadows and light spots from images, replacing them with soft, even, natural-looking illumination.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:41:55.906Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a860fd6/fyrneTtvBVrhWKMhSc0pj_9589e39060444dee9e7c33be9e16d1bf.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-2509-lora-gallery/lighting-restoration", + "license_type": "private", + "date": "2025-12-15T23:45:10.369Z", + "group": { + "key": "qwen-image-edit-2509-lora-gallery", + "label": "Lighting Restoration" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-2509-lora-gallery/lighting-restoration", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-2509-lora-gallery/lighting-restoration queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-2509-lora-gallery/lighting-restoration", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a860fd6/fyrneTtvBVrhWKMhSc0pj_9589e39060444dee9e7c33be9e16d1bf.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-lora-gallery/lighting-restoration", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-lora-gallery/lighting-restoration/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEdit2509LoraGalleryLightingRestorationInput": { + "x-fal-order-properties": [ + "image_urls", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "negative_prompt", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images" + ], + "type": "object", + "properties": { + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the final input image will be used." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/0a860a2e/L4v5FJm9lwFGGdRY2P7tb.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URL of the image to restore lighting for.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 6 + } + }, + "title": "LightingRestorationInput", + "description": "Input model for Lighting Restoration endpoint - Restore natural lighting by removing harsh shadows and light spots", + "required": [ + "image_urls" + ] + }, + "QwenImageEdit2509LoraGalleryLightingRestorationOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/0a860a37/ct1JcapCdZTzfNhI0-GM5.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated/edited images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "title": "LightingRestorationOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "examples": [ + 1024 + ], + "description": "The height of the image in pixels." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "examples": [ + 1024 + ], + "description": "The width of the image in pixels." + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-2509-lora-gallery/lighting-restoration/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/lighting-restoration/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/lighting-restoration": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509LoraGalleryLightingRestorationInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/lighting-restoration/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509LoraGalleryLightingRestorationOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-2509-lora-gallery/integrate-product", + "metadata": { + "display_name": "Qwen Image Edit 2509 Lora Gallery", + "category": "image-to-image", + "description": "Blend products into backgrounds with automatic perspective and lighting correction", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:41:56.030Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/lion/kQP6sIJmyFzXBvEwY4n_g_83d331b99dff4ac58f6d32631a24a774.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-2509-lora-gallery/integrate-product", + "license_type": "private", + "date": "2025-12-15T23:43:51.495Z", + "group": { + "key": "qwen-image-edit-2509-lora-gallery", + "label": "Integrate Product" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-2509-lora-gallery/integrate-product", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-2509-lora-gallery/integrate-product queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-2509-lora-gallery/integrate-product", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/lion/kQP6sIJmyFzXBvEwY4n_g_83d331b99dff4ac58f6d32631a24a774.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-lora-gallery/integrate-product", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-lora-gallery/integrate-product/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEdit2509LoraGalleryIntegrateProductInput": { + "x-fal-order-properties": [ + "image_urls", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "negative_prompt", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "prompt", + "lora_scale" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Blend and integrate the product into the background with correct perspective and lighting", + "Seamlessly blend the object into the scene with natural shadows", + "Integrate the product naturally into the environment" + ], + "title": "Prompt", + "type": "string", + "description": "Describe how to blend and integrate the product/element into the background. The model will automatically correct perspective, lighting and shadows for natural integration.", + "default": "Blend and integrate the product into the background" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the final input image will be used." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA model. Controls the strength of the LoRA effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/koala/LFYeCtq2LB4s6IpmoI2iy_2fb7b46d1f3749db9f7bab679bc6c4f3.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URL of the image with product to integrate into background.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 6 + } + }, + "title": "IntegrateProductInput", + "description": "Input model for Integrate Product endpoint - Blend and integrate products/elements into backgrounds", + "required": [ + "image_urls" + ] + }, + "QwenImageEdit2509LoraGalleryIntegrateProductOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/penguin/4_Bz95EOoETXJlfuWib3r.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated/edited images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "title": "IntegrateProductOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "examples": [ + 1024 + ], + "description": "The height of the image in pixels." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "examples": [ + 1024 + ], + "description": "The width of the image in pixels." + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-2509-lora-gallery/integrate-product/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/integrate-product/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/integrate-product": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509LoraGalleryIntegrateProductInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/integrate-product/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509LoraGalleryIntegrateProductOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-2509-lora-gallery/group-photo", + "metadata": { + "display_name": "Qwen Image Edit 2509 Lora Gallery", + "category": "image-to-image", + "description": "Create group photos ", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:41:56.157Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/rabbit/Yzvztf6Z-TQRukl4KC3PH_aac2c75ac20f4ccd9bcf54ad9979c7a2.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-2509-lora-gallery/group-photo", + "license_type": "private", + "date": "2025-12-15T23:41:41.859Z", + "group": { + "key": "qwen-image-edit-2509-lora-gallery", + "label": "Group Photo" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-2509-lora-gallery/group-photo", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-2509-lora-gallery/group-photo queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-2509-lora-gallery/group-photo", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/rabbit/Yzvztf6Z-TQRukl4KC3PH_aac2c75ac20f4ccd9bcf54ad9979c7a2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-lora-gallery/group-photo", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-lora-gallery/group-photo/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEdit2509LoraGalleryGroupPhotoInput": { + "x-fal-order-properties": [ + "image_urls", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "negative_prompt", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "prompt", + "lora_scale" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Two people standing next to each other outside with a landscape background", + "Group photo outdoors with mountains and nature in the background, vintage style", + "Two people next to each other in a scenic outdoor setting with retro filter", + "People standing together outside with beautiful landscape behind them" + ], + "title": "Prompt", + "type": "string", + "description": "Describe the group photo scene, setting, and style. The model will maintain character consistency and add vintage effects like grain, blur, and retro filters.", + "default": "Two people standing next to each other outside with a landscape background" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the final input image will be used." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA model. Controls the strength of the LoRA effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "image_urls": { + "examples": [ + [ + "https://v3.fal.media/files/monkey/i3saq4bAPXSIl08nZtq9P_ec535747aefc4e31943136a6d8587075.png", + "https://v3b.fal.media/files/b/kangaroo/OEtbMr7E43t0UPT8JwRT4_091834d85d8346d6960e3fd789d67db8.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URLs of the images to combine into a group photo. Provide 2 or more individual portrait images.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 6 + } + }, + "title": "GroupPhotoInput", + "description": "Input model for Group Photo endpoint - Create composite group photos with vintage/retro style", + "required": [ + "image_urls" + ] + }, + "QwenImageEdit2509LoraGalleryGroupPhotoOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/kangaroo/GGvzZELjxMpFvV2IAEb_9.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated/edited images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "title": "GroupPhotoOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "examples": [ + 1024 + ], + "description": "The height of the image in pixels." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "examples": [ + 1024 + ], + "description": "The width of the image in pixels." + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-2509-lora-gallery/group-photo/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/group-photo/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/group-photo": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509LoraGalleryGroupPhotoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/group-photo/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509LoraGalleryGroupPhotoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-2509-lora-gallery/face-to-full-portrait", + "metadata": { + "display_name": "Qwen Image Edit 2509 Lora Gallery", + "category": "image-to-image", + "description": "Generate full portrait from a cropped face photo", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:41:56.283Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/gsWgQ__Xox_OncquLVuAH_1413fce9fe7c4e2fa59d589f3fc69448.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-2509-lora-gallery/face-to-full-portrait", + "license_type": "commercial", + "date": "2025-12-15T23:39:48.880Z", + "group": { + "key": "qwen-image-edit-2509-lora-gallery", + "label": "Face to Full Portrait" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-2509-lora-gallery/face-to-full-portrait", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-2509-lora-gallery/face-to-full-portrait queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-2509-lora-gallery/face-to-full-portrait", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/gsWgQ__Xox_OncquLVuAH_1413fce9fe7c4e2fa59d589f3fc69448.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-lora-gallery/face-to-full-portrait", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-lora-gallery/face-to-full-portrait/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEdit2509LoraGalleryFaceToFullPortraitInput": { + "x-fal-order-properties": [ + "image_urls", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "negative_prompt", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "prompt", + "lora_scale" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Photography. A young woman wearing a yellow dress stands in a flower field", + "Professional headshot with business suit and office background", + "Casual portrait outdoors with natural sunlight and bokeh background" + ], + "title": "Prompt", + "type": "string", + "description": "Describe the full portrait you want to generate from the face. Include clothing, setting, pose, and style details.", + "default": "Photography. A portrait of the person in professional attire with natural lighting" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the final input image will be used." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA model. Controls the strength of the LoRA effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/kangaroo/Tl9BsbouyruyrEJtXWYOz_ef4270d3ff4d47f18883c70cfdf07c27.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URL of the cropped face image. Provide a close-up face photo.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 6 + } + }, + "title": "FaceToFullPortraitInput", + "description": "Input model for Face to Full Portrait endpoint - Generate full portrait from a cropped face image", + "required": [ + "image_urls" + ] + }, + "QwenImageEdit2509LoraGalleryFaceToFullPortraitOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/koala/efKAFkAtgzxZeLSdv-d2x.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated/edited images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "title": "FaceToFullPortraitOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "examples": [ + 1024 + ], + "description": "The height of the image in pixels." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "examples": [ + 1024 + ], + "description": "The width of the image in pixels." + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-2509-lora-gallery/face-to-full-portrait/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/face-to-full-portrait/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/face-to-full-portrait": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509LoraGalleryFaceToFullPortraitInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/face-to-full-portrait/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509LoraGalleryFaceToFullPortraitOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-2509-lora-gallery/add-background", + "metadata": { + "display_name": "Qwen Image Edit 2509 Lora Gallery", + "category": "image-to-image", + "description": "Add a realistic scene behind the object with white background", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:41:56.517Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/monkey/Elzbk_5FjYriX_he9KRd4_34dd24f301c940f9a7387279be05cd76.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-2509-lora-gallery/add-background", + "license_type": "commercial", + "date": "2025-12-15T23:37:41.612Z", + "group": { + "key": "qwen-image-edit-2509-lora-gallery", + "label": "Add Background" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-2509-lora-gallery/add-background", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-2509-lora-gallery/add-background queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-2509-lora-gallery/add-background", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/monkey/Elzbk_5FjYriX_he9KRd4_34dd24f301c940f9a7387279be05cd76.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-lora-gallery/add-background", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-lora-gallery/add-background/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEdit2509LoraGalleryAddBackgroundInput": { + "x-fal-order-properties": [ + "image_urls", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "negative_prompt", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "prompt", + "lora_scale" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Add an outdoor scene with mountains and road behind the car", + "Add a modern living room background behind the product", + "Add a natural outdoor setting with grass and trees as background" + ], + "title": "Prompt", + "type": "string", + "description": "Describe the background/scene you want to add behind the object. The model will remove the white background and add the specified environment.", + "default": "Remove white background and add a realistic scene behind the object" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the final input image will be used." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA model. Controls the strength of the LoRA effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/rabbit/YN3dXLQBWb2ch6V607Uuc_d808599bb92f4c808502a118697bdc1f.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URLs of the images to edit. Provide an image with a white or clean background.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 6 + } + }, + "title": "AddBackgroundInput", + "description": "Input model for Add Background endpoint - Remove white background and add a realistic scene", + "required": [ + "image_urls" + ] + }, + "QwenImageEdit2509LoraGalleryAddBackgroundOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/lion/d_xp44RvnuYYxioxBgAlX.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated/edited images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "title": "AddBackgroundOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "examples": [ + 1024 + ], + "description": "The height of the image in pixels." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "examples": [ + 1024 + ], + "description": "The width of the image in pixels." + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-2509-lora-gallery/add-background/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/add-background/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/add-background": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509LoraGalleryAddBackgroundInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/add-background/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509LoraGalleryAddBackgroundOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-2509-lora-gallery/next-scene", + "metadata": { + "display_name": "Qwen Image Edit 2509 Lora Gallery", + "category": "image-to-image", + "description": "Create cinematic transitions and scene progressions (camera movements, framing changes)", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:41:56.643Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/elephant/B2n7UmIRRBtpO5kLRoIg8_5dc0cbc4dd7f4a8aa3c55dd4eca753a8.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-2509-lora-gallery/next-scene", + "license_type": "commercial", + "date": "2025-12-15T23:35:47.773Z", + "group": { + "key": "qwen-image-edit-2509-lora-gallery", + "label": "Next Scene" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-2509-lora-gallery/next-scene", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-2509-lora-gallery/next-scene queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-2509-lora-gallery/next-scene", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/elephant/B2n7UmIRRBtpO5kLRoIg8_5dc0cbc4dd7f4a8aa3c55dd4eca753a8.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-lora-gallery/next-scene", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-lora-gallery/next-scene/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEdit2509LoraGalleryNextSceneInput": { + "x-fal-order-properties": [ + "image_urls", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "negative_prompt", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "prompt", + "lora_scale" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Next Scene: The camera pulls back to reveal the entire landscape", + "Next Scene: The camera tracks forward as sunlight breaks through the clouds", + "Next Scene: The camera pans right revealing new characters entering the frame" + ], + "title": "Prompt", + "type": "string", + "description": "Describe the camera movement, framing change, or scene transition. Start with 'Next Scene:' for best results. Examples: camera movements (dolly, push-in, pull-back), framing changes (wide to close-up), new elements entering frame.", + "default": "Next Scene: The camera moves forward revealing more of the scene" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the final input image will be used." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA model. Controls the strength of the LoRA effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/penguin/Zj5z8GW7yYlrpOQtuwjKQ_086265e41092415f951a6576fed25e41.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URL of the image to create the next scene from.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 6 + } + }, + "title": "NextSceneInput", + "description": "Input model for Next Scene endpoint - Create cinematic shot progressions and scene transitions", + "required": [ + "image_urls" + ] + }, + "QwenImageEdit2509LoraGalleryNextSceneOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/panda/6r8XojqbZvFPhdizajCb3.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated/edited images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "title": "NextSceneOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "examples": [ + 1024 + ], + "description": "The height of the image in pixels." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "examples": [ + 1024 + ], + "description": "The width of the image in pixels." + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-2509-lora-gallery/next-scene/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/next-scene/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/next-scene": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509LoraGalleryNextSceneInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/next-scene/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509LoraGalleryNextSceneOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-2509-lora-gallery/multiple-angles", + "metadata": { + "display_name": "Qwen Image Edit 2509 Lora Gallery", + "category": "image-to-image", + "description": "Precise camera position and angle control (rotation, zoom, vertical movement)", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:41:56.772Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/monkey/JbSGvZquN8vZSqhmHBK9o_6549790441a74cefa9f0afdfbd2a182c.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-2509-lora-gallery/multiple-angles", + "license_type": "commercial", + "date": "2025-12-15T23:32:35.288Z", + "group": { + "key": "qwen-image-edit-2509-lora-gallery", + "label": "Multiple Angles" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-2509-lora-gallery/multiple-angles", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-2509-lora-gallery/multiple-angles queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-2509-lora-gallery/multiple-angles", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/monkey/JbSGvZquN8vZSqhmHBK9o_6549790441a74cefa9f0afdfbd2a182c.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-lora-gallery/multiple-angles", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-lora-gallery/multiple-angles/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEdit2509LoraGalleryMultipleAnglesInput": { + "x-fal-order-properties": [ + "image_urls", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "negative_prompt", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "rotate_right_left", + "move_forward", + "vertical_angle", + "wide_angle_lens", + "lora_scale" + ], + "type": "object", + "properties": { + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the final input image will be used." + }, + "wide_angle_lens": { + "title": "Wide-Angle Lens", + "type": "boolean", + "description": "Enable wide-angle lens effect", + "default": false + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 1 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "image_urls": { + "examples": [ + [ + "https://v3.fal.media/files/monkey/i3saq4bAPXSIl08nZtq9P_ec535747aefc4e31943136a6d8587075.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URL of the image to adjust camera angle for.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "vertical_angle": { + "description": "Adjust vertical camera angle (-1=bird's-eye view/looking down, 0=neutral, 1=worm's-eye view/looking up)", + "type": "number", + "minimum": -1, + "maximum": 1, + "title": "Vertical Angle (Bird ⬄ Worm)", + "default": 0 + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "move_forward": { + "description": "Move camera forward (0=no movement, 10=close-up)", + "type": "number", + "minimum": 0, + "maximum": 10, + "title": "Move Forward → Close-Up", + "default": 0 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "rotate_right_left": { + "description": "Rotate camera left (positive) or right (negative) in degrees. Positive values rotate left, negative values rotate right.", + "type": "number", + "minimum": -90, + "maximum": 90, + "title": "Rotate Right-Left (degrees °)", + "default": 0 + }, + "lora_scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA model. Controls the strength of the camera control effect.", + "default": 1.25 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 6 + } + }, + "title": "MultipleAnglesInput", + "description": "Input model for Multiple Angles endpoint - Camera control with precise adjustments", + "required": [ + "image_urls" + ] + }, + "QwenImageEdit2509LoraGalleryMultipleAnglesOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/elephant/0lEToxR8cU5tB-SVMmD2C.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated/edited images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "title": "MultipleAnglesOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "examples": [ + 1024 + ], + "description": "The height of the image in pixels." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "examples": [ + 1024 + ], + "description": "The width of the image in pixels." + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-2509-lora-gallery/multiple-angles/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/multiple-angles/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/multiple-angles": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509LoraGalleryMultipleAnglesInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora-gallery/multiple-angles/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509LoraGalleryMultipleAnglesOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-2509-lora", + "metadata": { + "display_name": "Qwen Image Edit 2509 Lora", + "category": "image-to-image", + "description": "LoRA endpoint for the Qwen Image Edit 2509 model.", + "status": "active", + "tags": [ + "image-to-image", + "image-editing" + ], + "updated_at": "2026-01-26T21:41:57.155Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/penguin/X3V08aAYEqPmeSvVdLgN9_6be5ff6349c9459d92e7a5d7db8dadcc.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-2509-lora", + "license_type": "commercial", + "date": "2025-12-15T21:23:02.927Z", + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/qwen-image-edit-2509-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/qwen-image-edit-2509-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-2509-lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-2509-lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-2509-lora", + "category": "image-to-image", + "thumbnailUrl": "https://v3.fal.media/files/penguin/X3V08aAYEqPmeSvVdLgN9_6be5ff6349c9459d92e7a5d7db8dadcc.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-lora", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEdit2509LoraInput": { + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "image_urls", + "negative_prompt", + "acceleration", + "loras" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Close shot of a woman standing in next to this car on this highway" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the image with" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the final input image will be used to calculate the size of the output image." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "description": "Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality.", + "type": "string", + "examples": [ + "regular" + ], + "title": "Acceleration", + "default": "regular" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use up to 3 LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 4 + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "image_urls": { + "examples": [ + [ + "https://v3.fal.media/files/monkey/i3saq4bAPXSIl08nZtq9P_ec535747aefc4e31943136a6d8587075.png", + "https://v3.fal.media/files/penguin/BCOZp6teRhSQFuOXpbBOa_da8ef9b4982347a2a62a516b737d4f21.png", + "https://v3.fal.media/files/tiger/sCoZhBksx9DvwSR4_U3_C_3d1f581441874005908addeae9c10d0f.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URLs of the images to edit.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + } + }, + "title": "BaseQwenEditImagePlusLoRAInput", + "required": [ + "prompt", + "image_urls" + ] + }, + "QwenImageEdit2509LoraOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/jpeg", + "url": "https://v3.fal.media/files/zebra/mMW8_S5PeGuDXLTfIKCpG.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + } + }, + "title": "QwenImageOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "LoraWeight": { + "x-fal-order-properties": [ + "path", + "scale" + ], + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "title": "LoraWeight", + "required": [ + "path" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "title": "Image", + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-2509-lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509LoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509LoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-2509", + "metadata": { + "display_name": "Qwen Image Edit 2509", + "category": "image-to-image", + "description": "Endpoint for Qwen's Image Editing Plus model also known as Qwen-Image-Edit-2509. Has superior text editing capabilities and multi-image support.", + "status": "active", + "tags": [ + "image-editing", + "image-to-image", + "high-quality-text" + ], + "updated_at": "2026-01-26T21:41:57.412Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/elephant/pGXxmNi6TrKTe864jBKW8_1bb43c6eab2349ab9c8cefbb24f3fd1b.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-2509", + "license_type": "commercial", + "date": "2025-12-15T21:06:45.628Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-2509", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-2509 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-2509", + "category": "image-to-image", + "thumbnailUrl": "https://v3.fal.media/files/elephant/pGXxmNi6TrKTe864jBKW8_1bb43c6eab2349ab9c8cefbb24f3fd1b.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEdit2509Input": { + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "image_urls", + "negative_prompt", + "acceleration" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Close shot of a woman standing in next to this car on this highway" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the image with" + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "description": "The number of images to generate.", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square_hd" + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "description": "Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality.", + "type": "string", + "examples": [ + "regular" + ], + "title": "Acceleration", + "default": "regular" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale", + "type": "number", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "maximum": 20, + "default": 4 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "image_urls": { + "examples": [ + [ + "https://v3.fal.media/files/monkey/i3saq4bAPXSIl08nZtq9P_ec535747aefc4e31943136a6d8587075.png", + "https://v3.fal.media/files/penguin/BCOZp6teRhSQFuOXpbBOa_da8ef9b4982347a2a62a516b737d4f21.png", + "https://v3.fal.media/files/tiger/sCoZhBksx9DvwSR4_U3_C_3d1f581441874005908addeae9c10d0f.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URLs of the images to edit.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "num_inference_steps": { + "minimum": 2, + "title": "Num Inference Steps", + "type": "integer", + "description": "The number of inference steps to perform.", + "maximum": 100, + "default": 50 + } + }, + "title": "BaseQwenEditImagePlusInput", + "required": [ + "prompt", + "image_urls" + ] + }, + "QwenImageEdit2509Output": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/jpeg", + "url": "https://v3.fal.media/files/zebra/mMW8_S5PeGuDXLTfIKCpG.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + } + }, + "title": "QwenImageOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "title": "Image", + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-2509/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-plus-lora-gallery/lighting-restoration", + "metadata": { + "display_name": "Qwen Image Edit Plus Lora Gallery", + "category": "image-to-image", + "description": "Removes harsh shadows and light spots from images, replacing them with soft, even, natural-looking illumination.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:41:59.248Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a860fd6/fyrneTtvBVrhWKMhSc0pj_9589e39060444dee9e7c33be9e16d1bf.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-plus-lora-gallery/lighting-restoration", + "license_type": "commercial", + "date": "2025-12-12T23:01:29.638Z", + "group": { + "key": "qwen-image-edit-plus-lora-gallery", + "label": "Lighting Restoration" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-plus-lora-gallery/lighting-restoration", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-plus-lora-gallery/lighting-restoration queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-plus-lora-gallery/lighting-restoration", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a860fd6/fyrneTtvBVrhWKMhSc0pj_9589e39060444dee9e7c33be9e16d1bf.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-lora-gallery/lighting-restoration", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-lora-gallery/lighting-restoration/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEditPlusLoraGalleryLightingRestorationInput": { + "x-fal-order-properties": [ + "image_urls", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "negative_prompt", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images" + ], + "type": "object", + "properties": { + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the final input image will be used." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/0a860a2e/L4v5FJm9lwFGGdRY2P7tb.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URL of the image to restore lighting for.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 6 + } + }, + "title": "LightingRestorationInput", + "description": "Input model for Lighting Restoration endpoint - Restore natural lighting by removing harsh shadows and light spots", + "required": [ + "image_urls" + ] + }, + "QwenImageEditPlusLoraGalleryLightingRestorationOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/0a860a37/ct1JcapCdZTzfNhI0-GM5.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated/edited images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "title": "LightingRestorationOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "examples": [ + 1024 + ], + "description": "The height of the image in pixels." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "examples": [ + 1024 + ], + "description": "The width of the image in pixels." + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-plus-lora-gallery/lighting-restoration/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/lighting-restoration/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/lighting-restoration": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusLoraGalleryLightingRestorationInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/lighting-restoration/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusLoraGalleryLightingRestorationOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/moondream3-preview/segment", + "metadata": { + "display_name": "Moondream3 Preview [Segment]", + "category": "image-to-image", + "description": "Moondream 3 is a vision language model that brings frontier-level visual reasoning with native object detection, pointing, and OCR capabilities to real-world applications requiring fast, inexpensive inference at scale. ", + "status": "active", + "tags": [ + "mask", + "segmentation", + "" + ], + "updated_at": "2026-01-26T21:42:00.056Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a86094c/GEhsfrfki4ZTLUFdUC9qa_7e2a85f8b4b74946abf7c31e813fefb9.jpg", + "model_url": "https://fal.run/fal-ai/moondream3-preview/segment", + "license_type": "commercial", + "date": "2025-12-12T18:23:06.363Z", + "group": { + "key": "moondream3-preview", + "label": "Segment" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/moondream3-preview/segment", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/moondream3-preview/segment queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/moondream3-preview/segment", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a86094c/GEhsfrfki4ZTLUFdUC9qa_7e2a85f8b4b74946abf7c31e813fefb9.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/moondream3-preview/segment", + "documentationUrl": "https://fal.ai/models/fal-ai/moondream3-preview/segment/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Moondream3PreviewSegmentInput": { + "x-fal-order-properties": [ + "image_url", + "object", + "spatial_references", + "settings", + "preview" + ], + "type": "object", + "properties": { + "spatial_references": { + "examples": [ + [ + { + "y": 0.40762463343108507, + "x": 0.6402737047898338 + } + ] + ], + "title": "Spatial References", + "type": "array", + "description": "Spatial references to guide the segmentation. By feeding in references you can help the segmentation process. Must be either list of Point object with x and y members, or list of arrays containing either 2 floats (x,y) or 4 floats (x1,y1,x2,y2). \n**NOTE**: You can also use the [**point endpoint**](https://fal.ai/models/fal-ai/moondream3-preview/point) to get points for the objects, and pass them in here.", + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/Point" + }, + { + "type": "array", + "items": { + "type": "number" + } + } + ] + } + }, + "settings": { + "title": "Settings", + "description": "Sampling settings for the segmentation model", + "allOf": [ + { + "$ref": "#/components/schemas/SegmentSamplingSettings" + } + ] + }, + "object": { + "examples": [ + "mango" + ], + "title": "Object", + "type": "string", + "description": "Object to be segmented in the image" + }, + "preview": { + "examples": [ + true + ], + "title": "Preview", + "type": "boolean", + "description": "Whether to preview the output and return a binary mask of the image", + "default": false + }, + "image_url": { + "x-fal": { + "timeout": 20, + "max_height": 7000, + "max_width": 7000 + }, + "title": "Image URL", + "type": "string", + "description": "URL of the image to be processed\n\nMax width: 7000px, Max height: 7000px, Timeout: 20.0s", + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/moondream-3-preview/segmentation_in.png" + ] + } + }, + "title": "MoondreamSegementationInput", + "required": [ + "image_url", + "object" + ] + }, + "Moondream3PreviewSegmentOutput": { + "x-fal-order-properties": [ + "finish_reason", + "usage_info", + "image", + "path", + "bbox" + ], + "type": "object", + "properties": { + "finish_reason": { + "examples": [ + "stop" + ], + "title": "Finish Reason", + "type": "string", + "description": "Reason for finishing the output generation" + }, + "image": { + "examples": [ + { + "height": 1024, + "file_name": "segmentation_out.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/moondream-3-preview/segmentation_out.png", + "width": 1024 + } + ], + "title": "Image", + "description": "Segmentation mask image. If no object detected or preview not requested, will be null.", + "allOf": [ + { + "$ref": "#/components/schemas/ImageFile" + } + ] + }, + "bbox": { + "examples": [ + { + "y_min": 0.2998046875, + "x_max": 0.732421875, + "x_min": 0.5390625, + "y_max": 0.53515625 + } + ], + "title": "Bbox", + "description": "Bounding box of the segmented object. If not detected, will be null.", + "allOf": [ + { + "$ref": "#/components/schemas/Object" + } + ] + }, + "path": { + "examples": [ + "M.657,.996C.610,.984,.529,.938,.447,.875C.411,.848,.363,.815,.341,.802C.188,.714,.093,.623,.038,.511C.011,.455,.000,.406,.000,.340C.000,.269,.006,.234,.025,.186C.051,.123,.092,.079,.161,.042C.240,-0.000,.355,-0.011,.433,.017C.462,.027,.535,.066,.571,.091C.701,.180,.807,.265,.850,.311C.911,.379,.962,.468,.984,.545C.995,.585,.997,.601,.997,.664C.997,.765,.984,.811,.935,.879C.878,.958,.796,1.001,.705,1.000C.685,.999,.664,.998,.657,.996z" + ], + "title": "Path", + "type": "string", + "description": "SVG path data representing the segmentation mask. If not detected, will be null." + }, + "usage_info": { + "examples": [ + { + "output_tokens": 23, + "decode_time_ms": 811.5944429300725, + "input_tokens": 737, + "ttft_ms": 91.87838807702065, + "prefill_time_ms": 54.45315001998097 + } + ], + "title": "Usage Info", + "description": "Usage information for the request", + "allOf": [ + { + "$ref": "#/components/schemas/UsageInfo" + } + ] + } + }, + "title": "MoondreamSegementationOutput", + "required": [ + "finish_reason", + "usage_info" + ] + }, + "SegmentSamplingSettings": { + "x-fal-order-properties": [ + "temperature", + "top_p", + "max_tokens" + ], + "type": "object", + "properties": { + "top_p": { + "minimum": 0, + "title": "Top P", + "type": "number", + "description": "Nucleus sampling probability mass to use, between 0 and 1.", + "maximum": 1, + "default": 1 + }, + "max_tokens": { + "minimum": 1, + "title": "Max Tokens", + "type": "integer", + "description": "Maximum number of tokens to generate." + }, + "temperature": { + "minimum": 0, + "title": "Temperature", + "type": "number", + "description": "Sampling temperature to use. Higher values will make the output more random, while lower values will make it more focused and deterministic.", + "maximum": 1, + "default": 1 + } + }, + "title": "SegmentSamplingSettings" + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + }, + "Object": { + "x-fal-order-properties": [ + "x_min", + "y_min", + "x_max", + "y_max" + ], + "type": "object", + "properties": { + "y_min": { + "title": "Y Min", + "type": "number", + "description": "Top boundary of detection box in normalized format (0 to 1)" + }, + "x_max": { + "title": "X Max", + "type": "number", + "description": "Right boundary of detection box in normalized format (0 to 1)" + }, + "x_min": { + "title": "X Min", + "type": "number", + "description": "Left boundary of detection box in normalized format (0 to 1)" + }, + "y_max": { + "title": "Y Max", + "type": "number", + "description": "Bottom boundary of detection box in normalized format (0 to 1)" + } + }, + "title": "Object", + "required": [ + "x_min", + "y_min", + "x_max", + "y_max" + ] + }, + "UsageInfo": { + "x-fal-order-properties": [ + "input_tokens", + "output_tokens", + "prefill_time_ms", + "decode_time_ms", + "ttft_ms" + ], + "type": "object", + "properties": { + "output_tokens": { + "title": "Output Tokens", + "type": "integer", + "description": "Number of output tokens generated" + }, + "decode_time_ms": { + "title": "Decode Time Ms", + "type": "number", + "description": "Time taken for decoding in milliseconds" + }, + "input_tokens": { + "title": "Input Tokens", + "type": "integer", + "description": "Number of input tokens processed" + }, + "ttft_ms": { + "title": "Ttft Ms", + "type": "number", + "description": "Time to first token in milliseconds" + }, + "prefill_time_ms": { + "title": "Prefill Time Ms", + "type": "number", + "description": "Time taken for prefill in milliseconds" + } + }, + "title": "UsageInfo", + "required": [ + "input_tokens", + "output_tokens", + "prefill_time_ms", + "decode_time_ms", + "ttft_ms" + ] + } + } + }, + "paths": { + "/fal-ai/moondream3-preview/segment/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream3-preview/segment/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/moondream3-preview/segment": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Moondream3PreviewSegmentInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream3-preview/segment/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Moondream3PreviewSegmentOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/stepx-edit2", + "metadata": { + "display_name": "Stepx Edit2", + "category": "image-to-image", + "description": "Image-to-image editing with Step1X-Edit v2 from StepFun. Reasoning-enhanced modifications through a thinking–editing–reflection loop with MLLM world knowledge for abstract instruction comprehension.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:02.687Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a85a321/vCMVfJpHcA6CdIfx3oi6b_3a43c50dbe5a486ea123cb283d57b074.jpg", + "model_url": "https://fal.run/fal-ai/stepx-edit2", + "license_type": "commercial", + "date": "2025-12-09T17:42:27.719Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/stepx-edit2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/stepx-edit2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/stepx-edit2", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a85a321/vCMVfJpHcA6CdIfx3oi6b_3a43c50dbe5a486ea123cb283d57b074.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/stepx-edit2", + "documentationUrl": "https://fal.ai/models/fal-ai/stepx-edit2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "StepxEdit2Input": { + "title": "ImageToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "make head band red" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "enable_reflection_mode": { + "title": "Enable Reflection Mode", + "type": "boolean", + "description": "Enable reflection mode. Reviews outputs, corrects unintended changes, and determines when editing is complete.", + "default": true + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/girl_2.png" + ], + "title": "Image URL", + "type": "string", + "description": "The image URL to generate an image from. Needs to match the dimensions of the mask." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "title": "True CFG scale", + "type": "number", + "maximum": 20, + "description": "\n The true CFG scale. Controls how closely the model follows the prompt.\n ", + "default": 6 + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 100, + "description": "The number of inference steps to perform. Recommended: 50.", + "default": 50 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "negative_prompt": { + "examples": [ + "" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "enable_thinking_mode": { + "title": "Enable Thinking Mode", + "type": "boolean", + "description": "Enable thinking mode. Uses multimodal language model knowledge to interpret abstract editing instructions.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "negative_prompt", + "seed", + "guidance_scale", + "num_inference_steps", + "enable_thinking_mode", + "enable_reflection_mode", + "sync_mode", + "enable_safety_checker", + "output_format" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "StepxEdit2Output": { + "title": "ImageOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "best_info": { + "title": "Best Info", + "type": "array", + "description": "Reflection analysis (only available when reflection mode is enabled).", + "items": { + "type": "object" + } + }, + "images": { + "examples": [ + [ + { + "height": 1024, + "content_type": "image/jpeg", + "url": "https://v3.fal.media/files/kangaroo/kFPr5gC_Rr9JZbTTakEMd.jpeg", + "width": 672 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "reformat_prompt": { + "title": "Reformat Prompt", + "type": "string", + "description": "The model's interpretation of your instruction (only available when thinking mode is enabled)." + }, + "think_info": { + "title": "Think Info", + "type": "array", + "description": "Reasoning process details (only available when thinking mode is enabled).", + "items": { + "type": "string" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt", + "reformat_prompt", + "think_info", + "best_info" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/stepx-edit2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stepx-edit2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/stepx-edit2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StepxEdit2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stepx-edit2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StepxEdit2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/z-image/turbo/controlnet/lora", + "metadata": { + "display_name": "Z-Image Turbo", + "category": "image-to-image", + "description": "Generate images from text and edge, depth or pose images using custom LoRA and Z-Image Turbo, Tongyi-MAI's super-fast 6B model.", + "status": "active", + "tags": [ + "turbo", + "z-image", + "fast", + "lora" + ], + "updated_at": "2026-01-26T21:42:02.813Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a856427/T39BO4GLnxe-rM537HgrH_23724481e7574950ba6e3f4abb75e1b0.jpg", + "model_url": "https://fal.run/fal-ai/z-image/turbo/controlnet/lora", + "license_type": "commercial", + "date": "2025-12-07T20:55:46.527Z", + "group": { + "key": "z-image-turbo", + "label": "ControlNet Image to Image (LoRA)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/z-image-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/z-image-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/z-image/turbo/controlnet/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/z-image/turbo/controlnet/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/z-image/turbo/controlnet/lora", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a856427/T39BO4GLnxe-rM537HgrH_23724481e7574950ba6e3f4abb75e1b0.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/z-image/turbo/controlnet/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/z-image/turbo/controlnet/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ZImageTurboControlnetLoraInput": { + "title": "ZImageTurboControlNetLoRAInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A single leopard, its spotted golden coat detailed with black rosettes, cautiously peeks its head through dense green foliage. The leopard’s eyes are alert and focused forward, ears perked, whiskers slightly visible. The bushes consist of thick, leafy shrubs with varying shades of green, some leaves partially obscuring the leopard’s muzzle and forehead. Soft natural daylight filters through the canopy above, casting dappled shadows across the animal’s fur and surrounding leaves. The composition is a medium close-up, centered on the leopard’s head emerging from the undergrowth, with shallow depth of field blurring the background vegetation." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The acceleration level to use.", + "type": "string", + "title": "Acceleration", + "default": "regular" + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9", + "auto" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "auto" + }, + "loras": { + "description": "List of LoRA weights to apply (maximum 3).", + "type": "array", + "title": "Loras", + "items": { + "$ref": "#/components/schemas/LoRAInput" + }, + "default": [] + }, + "control_end": { + "description": "The end of the controlnet conditioning.", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Control End", + "default": 0.8 + }, + "control_start": { + "description": "The start of the controlnet conditioning.", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Control Start", + "default": 0 + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "maximum": 4, + "title": "Number of Images", + "default": 1 + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/z-image-turbo-controlnet-input.jpg" + ], + "description": "URL of Image for ControlNet generation.", + "type": "string", + "title": "Image URL" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "control_scale": { + "description": "The scale of the controlnet conditioning.", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Control Scale", + "default": 0.75 + }, + "enable_prompt_expansion": { + "description": "Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "maximum": 8, + "title": "Number of Inference Steps", + "default": 8 + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + }, + "preprocess": { + "enum": [ + "none", + "canny", + "depth", + "pose" + ], + "description": "What kind of preprocessing to apply to the image, if any.", + "type": "string", + "examples": [ + "none" + ], + "title": "Preprocess", + "default": "none" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration", + "enable_prompt_expansion", + "image_url", + "control_scale", + "control_start", + "control_end", + "preprocess", + "loras" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "ZImageTurboControlnetLoraOutput": { + "title": "ZImageTurboControlNetOutput", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "height": 1024, + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/z-image-turbo-controlnet-output.jpg", + "width": 1536 + } + ] + ], + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "description": "Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed.", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "description": "The timings of the generation process.", + "title": "Timings" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoRAInput": { + "description": "LoRA weight configuration.", + "type": "object", + "properties": { + "path": { + "description": "URL, HuggingFace repo ID (owner/repo) to lora weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "description": "Scale factor for LoRA application (0.0 to 4.0).", + "type": "number", + "maximum": 4, + "title": "Scale", + "default": 1 + } + }, + "title": "LoRAInput", + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/z-image/turbo/controlnet/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo/controlnet/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo/controlnet/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageTurboControlnetLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo/controlnet/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageTurboControlnetLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/z-image/turbo/controlnet", + "metadata": { + "display_name": "Z-Image Turbo", + "category": "image-to-image", + "description": "Generate images from text and edge, depth or pose images using Z-Image Turbo, Tongyi-MAI's super-fast 6B model.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:02.946Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a85640c/fMxWAimqN7U67KxfCscKM_f4457852189f4fd98e05a47cae2f2baa.jpg", + "model_url": "https://fal.run/fal-ai/z-image/turbo/controlnet", + "license_type": "commercial", + "date": "2025-12-07T20:51:56.573Z", + "group": { + "key": "z-image-turbo", + "label": "ControlNet Image to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/z-image-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/z-image-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/z-image/turbo/controlnet", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/z-image/turbo/controlnet queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/z-image/turbo/controlnet", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a85640c/fMxWAimqN7U67KxfCscKM_f4457852189f4fd98e05a47cae2f2baa.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/z-image/turbo/controlnet", + "documentationUrl": "https://fal.ai/models/fal-ai/z-image/turbo/controlnet/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ZImageTurboControlnetInput": { + "title": "ZImageTurboControlNetInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A single leopard, its spotted golden coat detailed with black rosettes, cautiously peeks its head through dense green foliage. The leopard’s eyes are alert and focused forward, ears perked, whiskers slightly visible. The bushes consist of thick, leafy shrubs with varying shades of green, some leaves partially obscuring the leopard’s muzzle and forehead. Soft natural daylight filters through the canopy above, casting dappled shadows across the animal’s fur and surrounding leaves. The composition is a medium close-up, centered on the leopard’s head emerging from the undergrowth, with shallow depth of field blurring the background vegetation." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The acceleration level to use.", + "type": "string", + "title": "Acceleration", + "default": "regular" + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9", + "auto" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "auto" + }, + "control_end": { + "description": "The end of the controlnet conditioning.", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Control End", + "default": 0.8 + }, + "control_start": { + "description": "The start of the controlnet conditioning.", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Control Start", + "default": 0 + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "maximum": 4, + "title": "Number of Images", + "default": 1 + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/z-image-turbo-controlnet-input.jpg" + ], + "description": "URL of Image for ControlNet generation.", + "type": "string", + "title": "Image URL" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "control_scale": { + "description": "The scale of the controlnet conditioning.", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Control Scale", + "default": 0.75 + }, + "enable_prompt_expansion": { + "description": "Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "maximum": 8, + "title": "Number of Inference Steps", + "default": 8 + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + }, + "preprocess": { + "enum": [ + "none", + "canny", + "depth", + "pose" + ], + "description": "What kind of preprocessing to apply to the image, if any.", + "type": "string", + "examples": [ + "none" + ], + "title": "Preprocess", + "default": "none" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration", + "enable_prompt_expansion", + "image_url", + "control_scale", + "control_start", + "control_end", + "preprocess" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "ZImageTurboControlnetOutput": { + "title": "ZImageTurboControlNetOutput", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "height": 1024, + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/z-image-turbo-controlnet-output.jpg", + "width": 1536 + } + ] + ], + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "description": "Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed.", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "description": "The timings of the generation process.", + "title": "Timings" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/z-image/turbo/controlnet/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo/controlnet/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo/controlnet": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageTurboControlnetInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo/controlnet/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageTurboControlnetOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/z-image/turbo/image-to-image/lora", + "metadata": { + "display_name": "Z-Image Turbo", + "category": "image-to-image", + "description": "Generate images from text and images using custom LoRA and Z-Image Turbo, Tongyi-MAI's super-fast 6B model.", + "status": "active", + "tags": [ + "turbo", + "z-image", + "fast", + "lora" + ], + "updated_at": "2026-01-26T21:42:03.076Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8563c2/33_KzGSlJd9AyR7m8QZYv_bd0753469df54473a018265568b5fc44.jpg", + "model_url": "https://fal.run/fal-ai/z-image/turbo/image-to-image/lora", + "license_type": "commercial", + "date": "2025-12-07T20:39:40.800Z", + "group": { + "key": "z-image-turbo", + "label": "Image to Image (LoRA)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/z-image-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/z-image-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/z-image/turbo/image-to-image/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/z-image/turbo/image-to-image/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/z-image/turbo/image-to-image/lora", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8563c2/33_KzGSlJd9AyR7m8QZYv_bd0753469df54473a018265568b5fc44.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/z-image/turbo/image-to-image/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/z-image/turbo/image-to-image/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ZImageTurboImageToImageLoraInput": { + "title": "ZImageTurboImageToImageLoRAInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A young Asian woman with long, vibrant purple hair stands on a sunlit sandy beach, posing confidently with her left hand resting on her hip. She gazes directly at the camera with a neutral expression. A sleek black ribbon bow is tied neatly on the right side of her head, just above her ear. She wears a flowing white cotton dress with a fitted bodice and a flared skirt that reaches mid-calf, slightly lifted by a gentle sea breeze. The beach behind her features fine, pale golden sand with subtle footprints, leading to calm turquoise waves under a clear blue sky with soft, wispy clouds. The lighting is natural daylight, casting soft shadows to her left, indicating late afternoon sun. The horizon line is visible in the background, with a faint silhouette of distant dunes. Her skin tone is fair with a natural glow, and her facial features are delicately defined. The composition is centered on her figure, framed from mid-thigh up, with shallow depth of field blurring the distant waves slightly." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "maximum": 4, + "title": "Number of Images", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9", + "auto" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "auto" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The acceleration level to use.", + "type": "string", + "title": "Acceleration", + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/z-image-turbo-i2i-input.png" + ], + "description": "URL of Image for Image-to-Image generation.", + "type": "string", + "title": "Image URL" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "loras": { + "description": "List of LoRA weights to apply (maximum 3).", + "type": "array", + "title": "Loras", + "items": { + "$ref": "#/components/schemas/LoRAInput" + }, + "default": [] + }, + "strength": { + "description": "The strength of the image-to-image conditioning.", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Strength", + "default": 0.6 + }, + "enable_prompt_expansion": { + "description": "Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "maximum": 8, + "title": "Number of Inference Steps", + "default": 8 + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration", + "enable_prompt_expansion", + "image_url", + "strength", + "loras" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "ZImageTurboImageToImageLoraOutput": { + "title": "ZImageTurboImageToImageOutput", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "height": 1728, + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/z-image-turbo-i2i-output.png", + "width": 992 + } + ] + ], + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "description": "Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed.", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "description": "The timings of the generation process.", + "title": "Timings" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoRAInput": { + "description": "LoRA weight configuration.", + "type": "object", + "properties": { + "path": { + "description": "URL, HuggingFace repo ID (owner/repo) to lora weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "description": "Scale factor for LoRA application (0.0 to 4.0).", + "type": "number", + "maximum": 4, + "title": "Scale", + "default": 1 + } + }, + "title": "LoRAInput", + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/z-image/turbo/image-to-image/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo/image-to-image/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo/image-to-image/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageTurboImageToImageLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo/image-to-image/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageTurboImageToImageLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/z-image/turbo/image-to-image", + "metadata": { + "display_name": "Z-Image Turbo", + "category": "image-to-image", + "description": "Generate images from text and images using Z-Image Turbo, Tongyi-MAI's super-fast 6B model.", + "status": "active", + "tags": [ + "turbo", + "z-image", + "fast" + ], + "updated_at": "2026-01-26T21:42:03.202Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8563a8/ISfaZAiht16RGXWqoOU4h_13ac5a10d2b34c1db11dc85ee2d57014.jpg", + "model_url": "https://fal.run/fal-ai/z-image/turbo/image-to-image", + "license_type": "commercial", + "date": "2025-12-07T20:35:23.324Z", + "group": { + "key": "z-image-turbo", + "label": "Image to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/z-image-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/z-image-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/z-image/turbo/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/z-image/turbo/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/z-image/turbo/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8563a8/ISfaZAiht16RGXWqoOU4h_13ac5a10d2b34c1db11dc85ee2d57014.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/z-image/turbo/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/z-image/turbo/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ZImageTurboImageToImageInput": { + "title": "ZImageTurboImageToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A young Asian woman with long, vibrant purple hair stands on a sunlit sandy beach, posing confidently with her left hand resting on her hip. She gazes directly at the camera with a neutral expression. A sleek black ribbon bow is tied neatly on the right side of her head, just above her ear. She wears a flowing white cotton dress with a fitted bodice and a flared skirt that reaches mid-calf, slightly lifted by a gentle sea breeze. The beach behind her features fine, pale golden sand with subtle footprints, leading to calm turquoise waves under a clear blue sky with soft, wispy clouds. The lighting is natural daylight, casting soft shadows to her left, indicating late afternoon sun. The horizon line is visible in the background, with a faint silhouette of distant dunes. Her skin tone is fair with a natural glow, and her facial features are delicately defined. The composition is centered on her figure, framed from mid-thigh up, with shallow depth of field blurring the distant waves slightly." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "maximum": 4, + "title": "Number of Images", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9", + "auto" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "auto" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The acceleration level to use.", + "type": "string", + "title": "Acceleration", + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/z-image-turbo-i2i-input.png" + ], + "description": "URL of Image for Image-to-Image generation.", + "type": "string", + "title": "Image URL" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "strength": { + "description": "The strength of the image-to-image conditioning.", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Strength", + "default": 0.6 + }, + "enable_prompt_expansion": { + "description": "Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "maximum": 8, + "title": "Number of Inference Steps", + "default": 8 + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration", + "enable_prompt_expansion", + "image_url", + "strength" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "ZImageTurboImageToImageOutput": { + "title": "ZImageTurboImageToImageOutput", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "height": 1728, + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/z-image-turbo-i2i-output.png", + "width": 992 + } + ] + ], + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "description": "Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed.", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "description": "The timings of the generation process.", + "title": "Timings" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/z-image/turbo/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageTurboImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageTurboImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/longcat-image/edit", + "metadata": { + "display_name": "Longcat Image", + "category": "image-to-image", + "description": "LongCat image Edit is a 6B parameter image editing model excelling at multilingual text rendering, photorealism and deployment efficiency.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:03.328Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a859c1b/mdsG5LYIfadIqFJkLMWbo_b0fd88a073a64e0eb378a3f1ba907087.jpg", + "model_url": "https://fal.run/fal-ai/longcat-image/edit", + "license_type": "commercial", + "date": "2025-12-05T19:51:05.485Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/longcat-image/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/longcat-image/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/longcat-image/edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a859c1b/mdsG5LYIfadIqFJkLMWbo_b0fd88a073a64e0eb378a3f1ba907087.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/longcat-image/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/longcat-image/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LongcatImageEditInput": { + "x-fal-order-properties": [ + "prompt", + "num_inference_steps", + "guidance_scale", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration", + "image_url" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Add the text \"Fal is fast\" in elegant cursive font with lightning streaks at the top of the image." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to edit the image with." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/longcat_image/edit_input.png" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to edit." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "guidance_scale": { + "minimum": 1, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The guidance scale to use for the image generation.", + "default": 4.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + } + }, + "title": "EditImageInput", + "required": [ + "prompt", + "image_url" + ] + }, + "LongcatImageEditOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/longcat_image/edit.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "title": "ImageToImageOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "title": "Image", + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/longcat-image/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-image/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/longcat-image/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatImageEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-image/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatImageEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bytedance/seedream/v4.5/edit", + "metadata": { + "display_name": "Bytedance", + "category": "image-to-image", + "description": "A new-generation image creation model ByteDance, Seedream 4.5 integrates image generation and image editing capabilities into a single, unified architecture.\n", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:06.298Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a84910b/lDvWLqpxcFmzQHdadDA7t_4012f28b4f79444782d90d8927b42c29.jpg", + "model_url": "https://fal.run/fal-ai/bytedance/seedream/v4.5/edit", + "license_type": "commercial", + "date": "2025-12-03T10:44:48.263Z", + "group": { + "key": "Seedream45", + "label": "Image Editing" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bytedance/seedream/v4.5/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bytedance/seedream/v4.5/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bytedance/seedream/v4.5/edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a84910b/lDvWLqpxcFmzQHdadDA7t_4012f28b4f79444782d90d8927b42c29.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/bytedance/seedream/v4.5/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/bytedance/seedream/v4.5/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BytedanceSeedreamV45EditInput": { + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_images", + "max_images", + "seed", + "sync_mode", + "enable_safety_checker", + "image_urls" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Replace the product in Figure 1 with that in Figure 2. For the title copy the text in Figure 3 to the top of the screen, the title should have a clear contrast with the background but not be overly eye-catching." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt used to edit the image" + }, + "num_images": { + "minimum": 1, + "maximum": 6, + "type": "integer", + "title": "Num Images", + "description": "Number of separate model generations to be run with the prompt.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9", + "auto_2K", + "auto_4K" + ], + "type": "string" + } + ], + "description": "The size of the generated image. Width and height must be between 1920 and 4096, or total number of pixels must be between 2560*1440 and 4096*4096.", + "title": "Image Size", + "examples": [ + "auto_4K" + ], + "default": { + "height": 2048, + "width": 2048 + } + }, + "max_images": { + "minimum": 1, + "maximum": 6, + "type": "integer", + "title": "Max Images", + "description": "If set to a number greater than one, enables multi-image generation. The model will potentially return up to `max_images` images every generation, and in total, `num_images` generations will be carried out. In total, the number of images generated will be between `num_images` and `max_images*num_images`. The total number of images (image inputs + image outputs) must not exceed 15", + "default": 1 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed to control the stochasticity of image generation." + }, + "image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/example_inputs/seedreamv45/seedream_v45_edit_input_1.png", + "https://storage.googleapis.com/falserverless/example_inputs/seedreamv45/seedream_v45_edit_input_2.png", + "https://storage.googleapis.com/falserverless/example_inputs/seedreamv45/seedream_v45_edit_input_3.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "List of URLs of input images for editing. Presently, up to 10 image inputs are allowed. If over 10 images are sent, only the last 10 will be used.", + "items": { + "type": "string" + } + } + }, + "title": "SeedDream45EditInput", + "required": [ + "prompt", + "image_urls" + ] + }, + "BytedanceSeedreamV45EditOutput": { + "x-fal-order-properties": [ + "images" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/seedreamv45/seedream_v45_edit_output.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Generated images", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "title": "SeedDream45EditOutput", + "required": [ + "images" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bytedance/seedream/v4.5/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedream/v4.5/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedream/v4.5/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedreamV45EditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedream/v4.5/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedreamV45EditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/vidu/q2/reference-to-image", + "metadata": { + "display_name": "Vidu", + "category": "image-to-image", + "description": "Vidu Reference-to-Image creates images by using a reference images and combining them with a prompt.", + "status": "active", + "tags": [ + "images-to-imag", + "reference-to-image" + ], + "updated_at": "2026-01-26T21:42:06.887Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a859c0c/hJyZYTr1Sq5HbwhEJWXid_90a9e24d19b4422cbb62fe49229a9266.jpg", + "model_url": "https://fal.run/fal-ai/vidu/q2/reference-to-image", + "license_type": "commercial", + "date": "2025-12-02T15:15:16.476Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/vidu/q2/reference-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/vidu/q2/reference-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/vidu/q2/reference-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a859c0c/hJyZYTr1Sq5HbwhEJWXid_90a9e24d19b4422cbb62fe49229a9266.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/vidu/q2/reference-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/vidu/q2/reference-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ViduQ2ReferenceToImageInput": { + "title": "ReferenceToImageRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The little devil is looking at the apple on the beach and walking around it." + ], + "title": "Prompt", + "type": "string", + "maxLength": 1500, + "description": "Text prompt for video generation, max 1500 characters" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the output video", + "default": "16:9" + }, + "reference_image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/web-examples/vidu/new-examples/reference1.png", + "https://storage.googleapis.com/falserverless/web-examples/vidu/new-examples/reference2.png", + "https://storage.googleapis.com/falserverless/web-examples/vidu/new-examples/reference3.png" + ] + ], + "title": "Reference Image Urls", + "type": "array", + "description": "URLs of the reference images to use for consistent subject appearance", + "items": { + "type": "string" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation" + } + }, + "x-fal-order-properties": [ + "prompt", + "reference_image_urls", + "aspect_ratio", + "seed" + ], + "required": [ + "prompt", + "reference_image_urls" + ] + }, + "ViduQ2ReferenceToImageOutput": { + "title": "ReferenceToImageOutput", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/model_tests/video_models/general-1-2025-09-09T10_20_19Z.png" + } + ], + "title": "Image", + "description": "The edited image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/vidu/q2/reference-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/q2/reference-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/vidu/q2/reference-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduQ2ReferenceToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/q2/reference-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduQ2ReferenceToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-image/o1", + "metadata": { + "display_name": "Kling O1 Image", + "category": "image-to-image", + "description": "Perform precise image edits using strong reference control, transforming subjects, styles, and local details while preserving visual consistency.", + "status": "active", + "tags": [ + "edit", + "realism", + "typography" + ], + "updated_at": "2026-01-26T21:42:09.283Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8495e0/GVuCh0RsGXpLIlUsJMrNX_f4fe5943ad804aaab441629241777417.jpg", + "model_url": "https://fal.run/fal-ai/kling-image/o1", + "license_type": "commercial", + "date": "2025-12-01T10:04:03.443Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-image/o1", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-image/o1 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-image/o1", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8495e0/GVuCh0RsGXpLIlUsJMrNX_f4fe5943ad804aaab441629241777417.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-image/o1", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-image/o1/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingImageO1Input": { + "x-fal-order-properties": [ + "prompt", + "image_urls", + "elements", + "resolution", + "num_images", + "aspect_ratio", + "output_format", + "sync_mode" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Put @Image1 to the back seat of the car in @Image2, put @Element1 on to the @Image1" + ], + "maxLength": 2500, + "type": "string", + "description": "Text prompt for image generation. Reference images using @Image1, @Image2, etc. (or @Image if only one image). Max 2500 characters.", + "title": "Prompt" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16", + "1:1", + "4:3", + "3:4", + "3:2", + "2:3", + "21:9" + ], + "description": "Aspect ratio of generated images. 'auto' intelligently determines based on input content.", + "type": "string", + "title": "Aspect Ratio", + "default": "auto" + }, + "num_images": { + "minimum": 1, + "maximum": 9, + "type": "integer", + "description": "Number of images to generate (1-9).", + "title": "Num Images", + "default": 1 + }, + "resolution": { + "enum": [ + "1K", + "2K" + ], + "description": "Image generation resolution. 1K: standard, 2K: high-res.", + "type": "string", + "title": "Resolution", + "default": "1K" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "elements": { + "examples": [ + [ + { + "reference_image_urls": [ + "https://storage.googleapis.com/falserverless/example_inputs/kling-image-o1/element-1-reference.png" + ], + "frontal_image_url": "https://storage.googleapis.com/falserverless/example_inputs/kling-image-o1/element-1-front.png" + } + ] + ], + "description": "Elements (characters/objects) to include in the image. Reference in prompt as @Element1, @Element2, etc. Maximum 10 total (elements + reference images).", + "type": "array", + "title": "Elements", + "items": { + "$ref": "#/components/schemas/OmniImageElementInput" + } + }, + "image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/example_inputs/kling-image-o1/input.png", + "https://storage.googleapis.com/falserverless/example_inputs/kling-image-o1/input-2.png" + ] + ], + "description": "List of reference images. Reference images in prompt using @Image1, @Image2, etc. (1-indexed). Max 10 images.", + "type": "array", + "title": "Image Urls", + "items": { + "x-fal": { + "max_aspect_ratio": 2.5, + "timeout": 20, + "min_width": 300, + "min_height": 300, + "min_aspect_ratio": 0.4, + "max_file_size": 10485760 + }, + "type": "string", + "limit_description": "Max file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + } + }, + "title": "OmniImageRequest", + "required": [ + "prompt", + "image_urls" + ] + }, + "KlingImageO1Output": { + "x-fal-order-properties": [ + "images" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_size": 1419818, + "file_name": "d4eeaeaeae294a41b0321ba6c99f0f9d.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/kling-image-o1/output.png" + } + ] + ], + "description": "Generated images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "title": "OmniImageOutput", + "required": [ + "images" + ] + }, + "OmniImageElementInput": { + "x-fal-order-properties": [ + "frontal_image_url", + "reference_image_urls" + ], + "type": "object", + "properties": { + "reference_image_urls": { + "description": "Additional reference images from different angles. 1-3 images supported. At least one image is required.", + "type": "array", + "title": "Reference Image Urls", + "items": { + "x-fal": { + "max_aspect_ratio": 2.5, + "timeout": 20, + "min_width": 300, + "min_height": 300, + "min_aspect_ratio": 0.4, + "max_file_size": 10485760 + }, + "type": "string", + "limit_description": "Max file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + }, + "frontal_image_url": { + "x-fal": { + "max_aspect_ratio": 2.5, + "timeout": 20, + "min_width": 300, + "min_height": 300, + "min_aspect_ratio": 0.4, + "max_file_size": 10485760 + }, + "description": "The frontal image of the element (main view).", + "type": "string", + "title": "Frontal Image Url", + "limit_description": "Max file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + }, + "title": "OmniImageElementInput", + "required": [ + "frontal_image_url" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-image/o1/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-image/o1/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-image/o1": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingImageO1Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-image/o1/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingImageO1Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-lora-gallery/virtual-tryon", + "metadata": { + "display_name": "Flux 2 Lora Gallery", + "category": "image-to-image", + "description": "Virtual clothing try-on (2 images: person + garment)", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:11.975Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/l4pNPFFxYpTn4dDetQB3H_0e3817198ad1498db78841fba7eefff9.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-lora-gallery/virtual-tryon", + "license_type": "commercial", + "date": "2025-11-25T19:50:48.195Z", + "group": { + "key": "flux-2-lora-gallery", + "label": "Virtual Try On" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-lora-gallery/virtual-tryon", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2-lora-gallery/virtual-tryon queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-lora-gallery/virtual-tryon", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/l4pNPFFxYpTn4dDetQB3H_0e3817198ad1498db78841fba7eefff9.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-lora-gallery/virtual-tryon", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-lora-gallery/virtual-tryon/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2LoraGalleryVirtualTryonInput": { + "title": "VirtualTryonInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A person wearing a stylish jacket, virtual try-on", + "Virtual try-on of a dress on a model", + "Fashion virtual try-on with clothing" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate a virtual try-on image." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the input image will be used." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "title": "Lora Scale", + "type": "number", + "maximum": 2, + "description": "The strength of the virtual try-on effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "maximum": 20, + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 2.5 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/koala/YlOtn9SjXGGH274eN1G5R.png", + "https://v3b.fal.media/files/b/penguin/sji5EHUvmFYOCVZsvvId-.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URLs of the images for virtual try-on. Provide person image and clothing image.", + "items": { + "type": "string" + } + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 40 + } + }, + "x-fal-order-properties": [ + "image_urls", + "prompt", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "lora_scale" + ], + "description": "Input model for Virtual Try-on endpoint - Generate virtual try-on images", + "required": [ + "image_urls", + "prompt" + ] + }, + "Flux2LoraGalleryVirtualTryonOutput": { + "title": "VirtualTryonOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation" + }, + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/zebra/oFnSZ-nBbPgM-gXT0ApXy.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated virtual try-on images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "x-fal-order-properties": [ + "images", + "seed", + "prompt" + ], + "required": [ + "images", + "seed", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "description": "The height of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "description": "The width of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-lora-gallery/virtual-tryon/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/virtual-tryon/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/virtual-tryon": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraGalleryVirtualTryonInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/virtual-tryon/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraGalleryVirtualTryonOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-lora-gallery/multiple-angles", + "metadata": { + "display_name": "Flux 2 Lora Gallery", + "category": "image-to-image", + "description": "Generates same object from different angles (azimuth/elevation)", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:12.379Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/tiger/uGB7v3AfMfHnqXp04NAzY_c730bfb4d81848338df88c8b6db41927.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-lora-gallery/multiple-angles", + "license_type": "commercial", + "date": "2025-11-25T19:41:48.103Z", + "group": { + "key": "flux-2-lora-gallery", + "label": "Multiple angles" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-lora-gallery/multiple-angles", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2-lora-gallery/multiple-angles queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-lora-gallery/multiple-angles", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/tiger/uGB7v3AfMfHnqXp04NAzY_c730bfb4d81848338df88c8b6db41927.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-lora-gallery/multiple-angles", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-lora-gallery/multiple-angles/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2LoraGalleryMultipleAnglesInput": { + "title": "MultipleAnglesInput", + "type": "object", + "properties": { + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the input image will be used." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation.", + "default": "regular" + }, + "horizontal_angle": { + "description": "Horizontal rotation angle around the object in degrees. 0°=front view, 90°=right side, 180°=back view, 270°=left side, 360°=front view again.", + "type": "number", + "minimum": 0, + "title": "Horizontal Angle (Azimuth °)", + "maximum": 360, + "default": 0 + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "maximum": 20, + "description": "The CFG (Classifier Free Guidance) scale.", + "default": 2.5 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "image_urls": { + "examples": [ + [ + "https://v3.fal.media/files/monkey/i3saq4bAPXSIl08nZtq9P_ec535747aefc4e31943136a6d8587075.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URL of the image to adjust camera angle for.", + "items": { + "type": "string" + } + }, + "zoom": { + "description": "Camera zoom/distance. 0=wide shot (far away), 5=medium shot (normal), 10=close-up (very close).", + "type": "number", + "minimum": 0, + "title": "Zoom (Distance)", + "maximum": 10, + "default": 5 + }, + "vertical_angle": { + "description": "Vertical camera angle in degrees. 0°=eye-level shot, 30°=elevated shot, 60°=high-angle shot (looking down from above).", + "type": "number", + "minimum": 0, + "title": "Vertical Angle (Elevation °)", + "maximum": 60, + "default": 0 + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "Number of images to generate.", + "default": 1 + }, + "lora_scale": { + "minimum": 0, + "title": "Lora Scale", + "type": "number", + "maximum": 2, + "description": "The strength of the multiple angles effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If True, the media will be returned as a data URI.", + "default": false + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 40 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility." + } + }, + "x-fal-order-properties": [ + "image_urls", + "horizontal_angle", + "vertical_angle", + "zoom", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "lora_scale" + ], + "description": "Input model for Multiple Angles endpoint - Camera control with precise adjustments using trigger word. Prompt is built automatically from slider values.", + "required": [ + "image_urls" + ] + }, + "Flux2LoraGalleryMultipleAnglesOutput": { + "title": "MultipleAnglesOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation" + }, + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/0a848d46/EYFbhE4axwlNB3OSKfdre.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images with multiple camera angles", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "x-fal-order-properties": [ + "images", + "seed", + "prompt" + ], + "required": [ + "images", + "seed", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "description": "The height of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "description": "The width of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-lora-gallery/multiple-angles/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/multiple-angles/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/multiple-angles": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraGalleryMultipleAnglesInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/multiple-angles/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraGalleryMultipleAnglesOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-lora-gallery/face-to-full-portrait", + "metadata": { + "display_name": "Flux 2 Lora Gallery", + "category": "image-to-image", + "description": "Extends a face into a full body portrait", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:12.883Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/tiger/S6rY_7j8Q-hCTWtn0Wb4k_99665a8ac4584b36b382565ef1ba20e3.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-lora-gallery/face-to-full-portrait", + "license_type": "commercial", + "date": "2025-11-25T19:37:05.640Z", + "group": { + "key": "flux-2-lora-gallery", + "label": "Face to Full Portrait" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-lora-gallery/face-to-full-portrait", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2-lora-gallery/face-to-full-portrait queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-lora-gallery/face-to-full-portrait", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/tiger/S6rY_7j8Q-hCTWtn0Wb4k_99665a8ac4584b36b382565ef1ba20e3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-lora-gallery/face-to-full-portrait", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-lora-gallery/face-to-full-portrait/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2LoraGalleryFaceToFullPortraitInput": { + "title": "FaceToFullPortraitInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Face to full portrait", + "Face to full portrait in professional attire", + "Face to full portrait casual outdoor setting" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt describing the full portrait to generate from the face.", + "default": "Face to full portrait" + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the input image will be used." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "title": "Lora Scale", + "type": "number", + "maximum": 2, + "description": "The strength of the face to full portrait effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "maximum": 20, + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 2.5 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/elephant/XJPJL2v5pAOmx9LemHWAE.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URL of the cropped face image.", + "items": { + "type": "string" + } + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 40 + } + }, + "x-fal-order-properties": [ + "image_urls", + "prompt", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "lora_scale" + ], + "description": "Input model for Face to Full Portrait endpoint - Generate full portrait from face", + "required": [ + "image_urls" + ] + }, + "Flux2LoraGalleryFaceToFullPortraitOutput": { + "title": "FaceToFullPortraitOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation" + }, + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/elephant/rlfpP4b6_PwqQK5F2pAKc.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated full portrait images from face", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "x-fal-order-properties": [ + "images", + "seed", + "prompt" + ], + "required": [ + "images", + "seed", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "description": "The height of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "description": "The width of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-lora-gallery/face-to-full-portrait/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/face-to-full-portrait/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/face-to-full-portrait": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraGalleryFaceToFullPortraitInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/face-to-full-portrait/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraGalleryFaceToFullPortraitOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-lora-gallery/apartment-staging", + "metadata": { + "display_name": "Flux 2 Lora Gallery", + "category": "image-to-image", + "description": "Virtually furnishes an empty apartment", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:13.272Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/zebra/B8oJR3Fdg-dqVxbhAJsa1_c6599007f202434b8bda0349f863ebfc.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-lora-gallery/apartment-staging", + "license_type": "commercial", + "date": "2025-11-25T19:29:04.658Z", + "group": { + "key": "flux-2-lora-gallery", + "label": "Apartment Staging" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-lora-gallery/apartment-staging", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2-lora-gallery/apartment-staging queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-lora-gallery/apartment-staging", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/zebra/B8oJR3Fdg-dqVxbhAJsa1_c6599007f202434b8bda0349f863ebfc.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-lora-gallery/apartment-staging", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-lora-gallery/apartment-staging/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2LoraGalleryApartmentStagingInput": { + "title": "ApartmentStagingInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Furnish this room with modern furniture and decor", + "Furnish this empty apartment with cozy living room furniture", + "Furnish this room with minimalist Scandinavian design" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate a furnished room. Use 'furnish this room' for best results." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the input image will be used." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "title": "Lora Scale", + "type": "number", + "maximum": 2, + "description": "The strength of the apartment staging effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "maximum": 20, + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 2.5 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/tiger/58rkpMdBl7eqcxuZ8YRus.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URL of the empty room image to furnish.", + "items": { + "type": "string" + } + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 40 + } + }, + "x-fal-order-properties": [ + "image_urls", + "prompt", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "lora_scale" + ], + "description": "Input model for Apartment Staging endpoint - Furnish rooms", + "required": [ + "image_urls", + "prompt" + ] + }, + "Flux2LoraGalleryApartmentStagingOutput": { + "title": "ApartmentStagingOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation" + }, + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/panda/3J1ezJEhcDLwoC9P8imqj.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated furnished room images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "x-fal-order-properties": [ + "images", + "seed", + "prompt" + ], + "required": [ + "images", + "seed", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "description": "The height of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "description": "The width of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-lora-gallery/apartment-staging/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/apartment-staging/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/apartment-staging": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraGalleryApartmentStagingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/apartment-staging/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraGalleryApartmentStagingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-lora-gallery/add-background", + "metadata": { + "display_name": "Flux 2 Lora Gallery", + "category": "image-to-image", + "description": "Add a background to images with white/clean background", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:13.400Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/-soa1BLI68gem3r7qatcf_ec7fab33b28040238c41b8f80546e89b.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-lora-gallery/add-background", + "license_type": "commercial", + "date": "2025-11-25T19:25:11.715Z", + "group": { + "key": "flux-2-lora-gallery", + "label": "Add Background" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-lora-gallery/add-background", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2-lora-gallery/add-background queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-lora-gallery/add-background", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/-soa1BLI68gem3r7qatcf_ec7fab33b28040238c41b8f80546e89b.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-lora-gallery/add-background", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-lora-gallery/add-background/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2LoraGalleryAddBackgroundInput": { + "title": "AddBackgroundInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Add Background forest", + "Add Background modern office", + "Add Background beach sunset", + "Add Background city skyline at night" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt describing the background to add. Must start with 'Add Background' followed by your description.", + "default": "Add Background forest" + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the input image will be used." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "title": "Lora Scale", + "type": "number", + "maximum": 2, + "description": "The strength of the add background effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "maximum": 20, + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 2.5 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/penguin/SLhCyojehICmW3dW6U5F0.jpg" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URLs of the images. Provide an image with a white or clean background.", + "items": { + "type": "string" + } + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 40 + } + }, + "x-fal-order-properties": [ + "image_urls", + "prompt", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "lora_scale" + ], + "description": "Input model for Add Background endpoint - Add background to images", + "required": [ + "image_urls" + ] + }, + "Flux2LoraGalleryAddBackgroundOutput": { + "title": "AddBackgroundOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation" + }, + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/lion/g0VrXhRU1YBK9B1MnGpyi.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images with added background", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "x-fal-order-properties": [ + "images", + "seed", + "prompt" + ], + "required": [ + "images", + "seed", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "description": "The height of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "description": "The width of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-lora-gallery/add-background/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/add-background/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/add-background": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraGalleryAddBackgroundInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/add-background/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraGalleryAddBackgroundOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "clarityai/crystal-upscaler", + "metadata": { + "display_name": "Crystal Upscaler", + "category": "image-to-image", + "description": "An advanced image enhancement tool designed specifically for facial details and portrait photography, utilizing Clarity AI's upscaling technology.", + "status": "active", + "tags": [ + "image-to-image" + ], + "updated_at": "2026-01-26T21:42:13.529Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/MXXDFIdMKdUafM9WsY5sk_10934786a1c846acbb01d7ae9c2bdab1.jpg", + "model_url": "https://fal.run/clarityai/crystal-upscaler", + "license_type": "commercial", + "date": "2025-11-25T13:16:15.815Z", + "group": { + "key": "clarityai-clarity-upscaler", + "label": "Upscale Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for clarityai/crystal-upscaler", + "version": "1.0.0", + "description": "The OpenAPI schema for the clarityai/crystal-upscaler queue.", + "x-fal-metadata": { + "endpointId": "clarityai/crystal-upscaler", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/MXXDFIdMKdUafM9WsY5sk_10934786a1c846acbb01d7ae9c2bdab1.jpg", + "playgroundUrl": "https://fal.ai/models/clarityai/crystal-upscaler", + "documentationUrl": "https://fal.ai/models/clarityai/crystal-upscaler/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "CrystalUpscalerInput": { + "title": "CrystalUpscaleInput", + "type": "object", + "properties": { + "creativity": { + "minimum": 0, + "title": "Creativity", + "type": "number", + "description": "Creativity level for upscaling", + "maximum": 10, + "default": 0 + }, + "scale_factor": { + "minimum": 1, + "title": "Scale Factor", + "type": "number", + "description": "Scale factor", + "maximum": 200, + "default": 2 + }, + "image_url": { + "description": "URL to the input image", + "type": "string", + "x-fal": { + "timeout": 20, + "max_file_size": 104857600 + }, + "title": "Image Url", + "examples": [ + "https://v3b.fal.media/files/b/zebra/eW3waMFDT-2_7Pq8j3r9d_upscaled.png" + ], + "limit_description": "Max file size: 100.0MB, Timeout: 20.0s" + } + }, + "x-fal-order-properties": [ + "image_url", + "scale_factor", + "creativity" + ], + "required": [ + "image_url" + ] + }, + "CrystalUpscalerOutput": { + "title": "CrystalUpscaleOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + "https://v3b.fal.media/files/b/penguin/sVriwxLaU3fVTz5B-xBBC_upscaled.png" + ] + ], + "title": "Images", + "type": "array", + "description": "List of upscaled images", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/clarityai/crystal-upscaler/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/clarityai/crystal-upscaler/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/clarityai/crystal-upscaler": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CrystalUpscalerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/clarityai/crystal-upscaler/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CrystalUpscalerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-flex/edit", + "metadata": { + "display_name": "Flux 2 Flex", + "category": "image-to-image", + "description": "Image editing with FLUX.2 [flex] from Black Forest Labs. Supports multi-reference editing with customizable inference steps and enhanced text rendering.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:13.918Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/elephant/NXNJkZllyE8XTyMrtEALf_90206edd3ddb4ba793758a26bde823c7.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-flex/edit", + "license_type": "commercial", + "date": "2025-11-25T02:36:00.176Z", + "group": { + "key": "Flex", + "label": "Image Editing" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-flex/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2-flex/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-flex/edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/elephant/NXNJkZllyE8XTyMrtEALf_90206edd3ddb4ba793758a26bde823c7.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-flex/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-flex/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2FlexEditInput": { + "title": "Flux2FlexImageEditInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Change colors of the vase. In a cozy living room setting, visualize a gradient vase placed on a table, flowing from rich #6a0dad to soft #ff69b4. Add an artistic carving text with a big font on vase says \"FLEX\" in the middle." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "guidance_scale": { + "minimum": 1.5, + "maximum": 10, + "type": "number", + "title": "Guidance Scale", + "description": "The guidance scale to use for the generation.", + "default": 3.5 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "auto", + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image. If `auto`, the size will be determined by the model.", + "title": "Image Size", + "default": "auto" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.", + "default": "2" + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to expand the prompt using the model's own knowledge.", + "default": true + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Number of Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/example_inputs/flux2_flex_edit_input.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "List of URLs of input images for editing", + "items": { + "type": "string" + } + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for the generation." + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "enable_prompt_expansion", + "seed", + "safety_tolerance", + "enable_safety_checker", + "output_format", + "sync_mode", + "image_urls", + "guidance_scale", + "num_inference_steps" + ], + "required": [ + "prompt", + "image_urls" + ] + }, + "Flux2FlexEditOutput": { + "title": "Flux2FlexEditOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/flux2_flex_edit_output.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images.", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for the generation." + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-flex/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-flex/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-flex/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2FlexEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-flex/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2FlexEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/chrono-edit-lora", + "metadata": { + "display_name": "Chrono Edit Lora", + "category": "image-to-image", + "description": "LoRA endpoint for the Chrono Edit model.", + "status": "active", + "tags": [ + "image-to-image", + "image-editing" + ], + "updated_at": "2026-01-26T21:42:17.430Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/zebra/6I2osemyFuegWWmldrNn__8f3b1d1410d24e6bbe8a9cf8be154af6.jpg", + "model_url": "https://fal.run/fal-ai/chrono-edit-lora", + "license_type": "commercial", + "date": "2025-11-21T23:32:48.040Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/chrono-edit-lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/chrono-edit-lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/chrono-edit-lora", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/zebra/6I2osemyFuegWWmldrNn__8f3b1d1410d24e6bbe8a9cf8be154af6.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/chrono-edit-lora", + "documentationUrl": "https://fal.ai/models/fal-ai/chrono-edit-lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ChronoEditLoraInput": { + "x-fal-order-properties": [ + "image_url", + "prompt", + "num_inference_steps", + "guidance_scale", + "enable_prompt_expansion", + "enable_temporal_reasoning", + "num_temporal_reasoning_steps", + "resolution", + "enable_safety_checker", + "seed", + "output_format", + "sync_mode", + "turbo_mode", + "loras" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Add a surfer to the wave in the illustration." + ], + "description": "The prompt to edit the image.", + "type": "string", + "title": "Prompt" + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "Optional additional LoRAs to merge for this request (max 3).", + "items": { + "$ref": "#/components/schemas/ChronoLoraWeight" + }, + "default": [] + }, + "turbo_mode": { + "title": "Turbo Mode", + "type": "boolean", + "description": "Enable turbo mode to use for faster inference.", + "default": true + }, + "enable_temporal_reasoning": { + "title": "Enable Temporal Reasoning", + "type": "boolean", + "description": "Whether to enable temporal reasoning.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "maximum": 10, + "description": "The guidance scale for the inference.", + "default": 1 + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the output image.", + "default": "480p" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image.", + "default": "jpeg" + }, + "num_temporal_reasoning_steps": { + "minimum": 2, + "title": "Number of Temporal Reasoning Steps", + "type": "integer", + "maximum": 12, + "description": "The number of temporal reasoning steps to perform.", + "default": 8 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "Whether to return the image in sync mode.", + "default": false + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/zebra/yRvp9rTyDeDGHnbmtcsgK_original-wave.jpg" + ], + "description": "The image to edit.", + "type": "string", + "title": "Image URL" + }, + "enable_prompt_expansion": { + "examples": [ + true + ], + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": true + }, + "num_inference_steps": { + "minimum": 2, + "title": "Number of Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 8 + }, + "seed": { + "description": "The seed for the inference.", + "type": "integer", + "title": "Seed" + } + }, + "description": "ChronoEdit input with optional custom LoRAs.", + "title": "ChronoEditLoRAInput", + "required": [ + "image_url", + "prompt" + ] + }, + "ChronoEditLoraOutput": { + "x-fal-order-properties": [ + "images", + "prompt", + "seed" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The user wants to add a surfer to the wave in the illustration while preserving the original ukiyo-e woodblock art style. The surfer should be depicted mid-action, crouched low on a modern-style surfboard, carving through the crest of the wave with one arm extended for balance and the other gripping the board. Their wavy hair and athletic physique should match the dynamic motion. The background must remain unchanged, including the iconic Mount Fuji and the traditional Japanese text, to maintain the artwork's historical aesthetic and composition." + ], + "description": "The prompt used for the inference.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "height": 768, + "file_name": "2_gRhwfsnmNKYtZ_dveyV.jpg", + "content_type": "image/jpeg", + "url": "https://v3b.fal.media/files/b/koala/2_gRhwfsnmNKYtZ_dveyV.jpg", + "width": 1152 + } + ] + ], + "description": "The edited image.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "description": "The seed for the inference.", + "type": "integer", + "title": "Seed" + } + }, + "description": "Unified output model for all ChronoEdit operations", + "title": "ChronoEditOutput", + "required": [ + "images", + "prompt", + "seed" + ] + }, + "ChronoLoraWeight": { + "x-fal-order-properties": [ + "path", + "scale" + ], + "type": "object", + "properties": { + "path": { + "description": "URL or path to the LoRA weights (Safetensors).", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "maximum": 4, + "description": "Scale factor controlling LoRA strength.", + "default": 1 + } + }, + "title": "ChronoLoraWeight", + "required": [ + "path" + ] + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/chrono-edit-lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/chrono-edit-lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/chrono-edit-lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChronoEditLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/chrono-edit-lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChronoEditLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/chrono-edit-lora-gallery/paintbrush", + "metadata": { + "display_name": "Chrono Edit Lora Gallery", + "category": "image-to-image", + "description": "You can make edits simply by drawing a quick sketch on the input image.", + "status": "active", + "tags": [ + "paint", + "edit", + "sketch" + ], + "updated_at": "2026-01-26T21:42:17.558Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/monkey/lOE5FD0ts-9zPSD2B53yx_a7709febfd5748f18cb6fab03d071ab1.jpg", + "model_url": "https://fal.run/fal-ai/chrono-edit-lora-gallery/paintbrush", + "license_type": "commercial", + "date": "2025-11-21T23:29:01.055Z", + "group": { + "key": "chrono-edit-lora-gallery", + "label": "Paintbrush" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/chrono-edit-lora-gallery/paintbrush", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/chrono-edit-lora-gallery/paintbrush queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/chrono-edit-lora-gallery/paintbrush", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/monkey/lOE5FD0ts-9zPSD2B53yx_a7709febfd5748f18cb6fab03d071ab1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/chrono-edit-lora-gallery/paintbrush", + "documentationUrl": "https://fal.ai/models/fal-ai/chrono-edit-lora-gallery/paintbrush/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ChronoEditLoraGalleryPaintbrushInput": { + "description": "Input for paintbrush mode", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "Describe how to transform the sketched regions." + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the output image.", + "default": "480p" + }, + "lora_scale": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA adapter.", + "default": 1 + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image.", + "default": "png" + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "The image to edit." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "Whether to return the image in sync mode.", + "default": false + }, + "turbo_mode": { + "title": "Turbo Mode", + "type": "boolean", + "description": "Enable turbo mode to use faster inference.", + "default": true + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "Optional additional LoRAs to merge (max 3).", + "items": { + "$ref": "#/components/schemas/ChronoLoraWeight" + }, + "default": [] + }, + "guidance_scale": { + "minimum": 0, + "maximum": 10, + "type": "number", + "title": "Guidance Scale", + "description": "Classifier-free guidance scale.", + "default": 1 + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of denoising steps to run.", + "default": 8 + }, + "mask_url": { + "title": "Mask Url", + "type": "string", + "description": "Optional mask image where black areas indicate regions to sketch/paint." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the inference." + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + } + }, + "title": "ChronoEditPaintBrushInput", + "x-fal-order-properties": [ + "image_url", + "mask_url", + "prompt", + "num_inference_steps", + "guidance_scale", + "resolution", + "enable_safety_checker", + "lora_scale", + "seed", + "output_format", + "sync_mode", + "turbo_mode", + "loras" + ], + "required": [ + "image_url", + "prompt" + ] + }, + "ChronoEditLoraGalleryPaintbrushOutput": { + "description": "Unified output model for all ChronoEdit operations", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The user wants to add a surfer to the wave in the illustration while preserving the original ukiyo-e woodblock art style. The surfer should be depicted mid-action, crouched low on a modern-style surfboard, carving through the crest of the wave with one arm extended for balance and the other gripping the board. Their wavy hair and athletic physique should match the dynamic motion. The background must remain unchanged, including the iconic Mount Fuji and the traditional Japanese text, to maintain the artwork's historical aesthetic and composition." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the inference." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "file_name": "2_gRhwfsnmNKYtZ_dveyV.jpg", + "content_type": "image/jpeg", + "url": "https://v3b.fal.media/files/b/koala/2_gRhwfsnmNKYtZ_dveyV.jpg", + "width": 1152 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The edited image.", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the inference." + } + }, + "title": "ChronoEditOutput", + "x-fal-order-properties": [ + "images", + "prompt", + "seed" + ], + "required": [ + "images", + "prompt", + "seed" + ] + }, + "ChronoLoraWeight": { + "title": "ChronoLoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or path to the LoRA weights (Safetensors)." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "Scale factor controlling LoRA strength.", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/chrono-edit-lora-gallery/paintbrush/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/chrono-edit-lora-gallery/paintbrush/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/chrono-edit-lora-gallery/paintbrush": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChronoEditLoraGalleryPaintbrushInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/chrono-edit-lora-gallery/paintbrush/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChronoEditLoraGalleryPaintbrushOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/chrono-edit-lora-gallery/upscaler", + "metadata": { + "display_name": "Chrono Edit Lora Gallery", + "category": "image-to-image", + "description": "Upscales and cleans up the image.", + "status": "active", + "tags": [ + "upscale", + "details" + ], + "updated_at": "2026-01-26T21:42:17.686Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/tiger/gRS5bsrx2k_1RwNvQR4DV_4dbd7aad81e44b36af16c01a38fda769.jpg", + "model_url": "https://fal.run/fal-ai/chrono-edit-lora-gallery/upscaler", + "license_type": "commercial", + "date": "2025-11-21T23:13:05.893Z", + "group": { + "key": "chrono-edit-lora-gallery", + "label": "Upscaler" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/chrono-edit-lora-gallery/upscaler", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/chrono-edit-lora-gallery/upscaler queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/chrono-edit-lora-gallery/upscaler", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/tiger/gRS5bsrx2k_1RwNvQR4DV_4dbd7aad81e44b36af16c01a38fda769.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/chrono-edit-lora-gallery/upscaler", + "documentationUrl": "https://fal.ai/models/fal-ai/chrono-edit-lora-gallery/upscaler/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ChronoEditLoraGalleryUpscalerInput": { + "description": "Input for upscaler mode", + "type": "object", + "properties": { + "lora_scale": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA adapter.", + "default": 1 + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image.", + "default": "jpeg" + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "The image to upscale." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "Whether to return the image in sync mode.", + "default": false + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "Optional additional LoRAs to merge (max 3).", + "items": { + "$ref": "#/components/schemas/ChronoLoraWeight" + }, + "default": [] + }, + "upscale_factor": { + "minimum": 1, + "maximum": 4, + "type": "number", + "title": "Upscale Factor", + "description": "Target scale factor for the output resolution.", + "default": 2 + }, + "guidance_scale": { + "minimum": 0, + "maximum": 10, + "type": "number", + "title": "Guidance Scale", + "description": "The guidance scale for the inference.", + "default": 1 + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for the upscaling pass.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the inference." + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + } + }, + "title": "ChronoEditUpscalerInput", + "x-fal-order-properties": [ + "image_url", + "upscale_factor", + "guidance_scale", + "enable_safety_checker", + "num_inference_steps", + "lora_scale", + "seed", + "output_format", + "sync_mode", + "loras" + ], + "required": [ + "image_url" + ] + }, + "ChronoEditLoraGalleryUpscalerOutput": { + "description": "Unified output model for all ChronoEdit operations", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The user wants to add a surfer to the wave in the illustration while preserving the original ukiyo-e woodblock art style. The surfer should be depicted mid-action, crouched low on a modern-style surfboard, carving through the crest of the wave with one arm extended for balance and the other gripping the board. Their wavy hair and athletic physique should match the dynamic motion. The background must remain unchanged, including the iconic Mount Fuji and the traditional Japanese text, to maintain the artwork's historical aesthetic and composition." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the inference." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "file_name": "2_gRhwfsnmNKYtZ_dveyV.jpg", + "content_type": "image/jpeg", + "url": "https://v3b.fal.media/files/b/koala/2_gRhwfsnmNKYtZ_dveyV.jpg", + "width": 1152 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The edited image.", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the inference." + } + }, + "title": "ChronoEditOutput", + "x-fal-order-properties": [ + "images", + "prompt", + "seed" + ], + "required": [ + "images", + "prompt", + "seed" + ] + }, + "ChronoLoraWeight": { + "title": "ChronoLoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or path to the LoRA weights (Safetensors)." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "Scale factor controlling LoRA strength.", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/chrono-edit-lora-gallery/upscaler/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/chrono-edit-lora-gallery/upscaler/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/chrono-edit-lora-gallery/upscaler": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChronoEditLoraGalleryUpscalerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/chrono-edit-lora-gallery/upscaler/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChronoEditLoraGalleryUpscalerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sam-3/image-rle", + "metadata": { + "display_name": "Sam 3", + "category": "image-to-image", + "description": "SAM 3 is a unified foundation model for promptable segmentation in images and videos. It can detect, segment, and track objects using text or visual prompts such as points, boxes, and masks. ", + "status": "active", + "tags": [ + "segmentation", + "rle", + "real-time", + "" + ], + "updated_at": "2026-01-26T21:42:18.608Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/koala/ZrvWvXbFBabIS0XEJqJ3m_03f5ca0dc15241719ca5ee131da584a2.jpg", + "model_url": "https://fal.run/fal-ai/sam-3/image-rle", + "license_type": "commercial", + "date": "2025-11-20T20:28:17.206Z", + "group": { + "key": "sam3", + "label": "Image to RLE" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sam-3/image-rle", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sam-3/image-rle queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sam-3/image-rle", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/koala/ZrvWvXbFBabIS0XEJqJ3m_03f5ca0dc15241719ca5ee131da584a2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sam-3/image-rle", + "documentationUrl": "https://fal.ai/models/fal-ai/sam-3/image-rle/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Sam3ImageRleInput": { + "x-fal-order-properties": [ + "image_url", + "prompt", + "text_prompt", + "point_prompts", + "box_prompts", + "apply_mask", + "sync_mode", + "output_format", + "return_multiple_masks", + "max_masks", + "include_scores", + "include_boxes" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "Text prompt for segmentation", + "default": "wheel" + }, + "include_boxes": { + "title": "Include Boxes", + "type": "boolean", + "description": "Whether to include bounding boxes for each mask (when available).", + "default": false + }, + "box_prompts": { + "title": "Box Prompts", + "type": "array", + "description": "Box prompt coordinates (x_min, y_min, x_max, y_max). Multiple boxes supported - use object_id to group boxes for the same object or leave empty for separate objects.", + "items": { + "$ref": "#/components/schemas/BoxPrompt" + }, + "default": [] + }, + "return_multiple_masks": { + "title": "Return Multiple Masks", + "type": "boolean", + "description": "If True, upload and return multiple generated masks as defined by `max_masks`.", + "default": false + }, + "image_url": { + "examples": [ + "https://raw.githubusercontent.com/facebookresearch/segment-anything-2/main/notebooks/images/truck.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to be segmented" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If True, the media will be returned as a data URI.", + "default": false + }, + "point_prompts": { + "title": "Point Prompts", + "type": "array", + "description": "List of point prompts", + "items": { + "$ref": "#/components/schemas/PointPrompt" + }, + "default": [] + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "max_masks": { + "minimum": 1, + "title": "Max Masks", + "type": "integer", + "description": "Maximum number of masks to return when `return_multiple_masks` is enabled.", + "maximum": 32, + "default": 3 + }, + "include_scores": { + "title": "Include Scores", + "type": "boolean", + "description": "Whether to include mask confidence scores.", + "default": false + }, + "apply_mask": { + "title": "Apply Mask", + "type": "boolean", + "description": "Apply the mask on the image.", + "default": true + }, + "text_prompt": { + "title": "Text Prompt", + "type": "string", + "description": "[DEPRECATED] Use 'prompt' instead. Kept for backward compatibility.", + "deprecated": true + } + }, + "title": "SAM3ImageInput", + "required": [ + "image_url" + ] + }, + "Sam3ImageRleOutput": { + "x-fal-order-properties": [ + "rle", + "boundingbox_frames_zip", + "metadata", + "scores", + "boxes" + ], + "type": "object", + "properties": { + "rle": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + } + ], + "title": "Rle", + "description": "Run Length Encoding of the mask." + }, + "metadata": { + "title": "Metadata", + "type": "array", + "description": "Per-mask metadata when multiple RLEs are returned.", + "items": { + "$ref": "#/components/schemas/MaskMetadata" + } + }, + "scores": { + "title": "Scores", + "type": "array", + "description": "Per-mask confidence scores when requested.", + "items": { + "type": "number" + } + }, + "boundingbox_frames_zip": { + "title": "Boundingbox Frames Zip", + "description": "Zip file containing per-frame bounding box overlays.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "boxes": { + "title": "Boxes", + "type": "array", + "description": "Per-mask normalized bounding boxes [cx, cy, w, h] when requested.", + "items": { + "type": "array", + "items": { + "type": "number" + } + } + } + }, + "title": "SAM3RLEOutput", + "required": [ + "rle" + ] + }, + "BoxPrompt": { + "x-fal-order-properties": [ + "x_min", + "y_min", + "x_max", + "y_max", + "object_id", + "frame_index" + ], + "type": "object", + "properties": { + "y_min": { + "title": "Y Min", + "type": "integer", + "description": "Y Min Coordinate of the box" + }, + "object_id": { + "title": "Object Id", + "type": "integer", + "description": "Optional object identifier. Boxes sharing an object id refine the same object." + }, + "frame_index": { + "title": "Frame Index", + "type": "integer", + "description": "The frame index to interact with." + }, + "x_max": { + "title": "X Max", + "type": "integer", + "description": "X Max Coordinate of the box" + }, + "x_min": { + "title": "X Min", + "type": "integer", + "description": "X Min Coordinate of the box" + }, + "y_max": { + "title": "Y Max", + "type": "integer", + "description": "Y Max Coordinate of the box" + } + }, + "title": "BoxPrompt" + }, + "PointPrompt": { + "x-fal-order-properties": [ + "x", + "y", + "label", + "object_id", + "frame_index" + ], + "type": "object", + "properties": { + "y": { + "title": "Y", + "type": "integer", + "description": "Y Coordinate of the prompt" + }, + "x": { + "title": "X", + "type": "integer", + "description": "X Coordinate of the prompt" + }, + "object_id": { + "title": "Object Id", + "type": "integer", + "description": "Optional object identifier. Prompts sharing an object id refine the same object." + }, + "frame_index": { + "title": "Frame Index", + "type": "integer", + "description": "The frame index to interact with." + }, + "label": { + "enum": [ + 0, + 1 + ], + "title": "Label", + "type": "integer", + "description": "1 for foreground, 0 for background" + } + }, + "title": "PointPrompt" + }, + "MaskMetadata": { + "x-fal-order-properties": [ + "index", + "score", + "box" + ], + "type": "object", + "properties": { + "box": { + "title": "Box", + "type": "array", + "description": "Bounding box for the mask in normalized cxcywh coordinates.", + "items": { + "type": "number" + } + }, + "score": { + "title": "Score", + "type": "number", + "description": "Score for this mask." + }, + "index": { + "title": "Index", + "type": "integer", + "description": "Index of the mask inside the model output." + } + }, + "title": "MaskMetadata", + "required": [ + "index" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sam-3/image-rle/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam-3/image-rle/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sam-3/image-rle": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sam3ImageRleInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam-3/image-rle/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sam3ImageRleOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sam-3/image", + "metadata": { + "display_name": "Segment Anything Model 3", + "category": "image-to-image", + "description": "SAM 3 is a unified foundation model for promptable segmentation in images and videos. It can detect, segment, and track objects using text or visual prompts such as points, boxes, and masks. ", + "status": "active", + "tags": [ + "segmentation", + "mask", + "real-time" + ], + "updated_at": "2026-01-26T21:42:19.539Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/rabbit/kJhu6OfXaBwWYM1PUrWAL_6519f8ea79d649a9bead2e6118f4b997.jpg", + "model_url": "https://fal.run/fal-ai/sam-3/image", + "license_type": "commercial", + "date": "2025-11-20T19:45:11.630Z", + "group": { + "key": "sam3", + "label": "Image to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sam-3/image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sam-3/image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sam-3/image", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/rabbit/kJhu6OfXaBwWYM1PUrWAL_6519f8ea79d649a9bead2e6118f4b997.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sam-3/image", + "documentationUrl": "https://fal.ai/models/fal-ai/sam-3/image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Sam3ImageInput": { + "x-fal-order-properties": [ + "image_url", + "prompt", + "text_prompt", + "point_prompts", + "box_prompts", + "apply_mask", + "sync_mode", + "output_format", + "return_multiple_masks", + "max_masks", + "include_scores", + "include_boxes" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "Text prompt for segmentation", + "default": "wheel" + }, + "include_boxes": { + "title": "Include Boxes", + "type": "boolean", + "description": "Whether to include bounding boxes for each mask (when available).", + "default": false + }, + "box_prompts": { + "title": "Box Prompts", + "type": "array", + "description": "Box prompt coordinates (x_min, y_min, x_max, y_max). Multiple boxes supported - use object_id to group boxes for the same object or leave empty for separate objects.", + "items": { + "$ref": "#/components/schemas/BoxPrompt" + }, + "default": [] + }, + "return_multiple_masks": { + "title": "Return Multiple Masks", + "type": "boolean", + "description": "If True, upload and return multiple generated masks as defined by `max_masks`.", + "default": false + }, + "image_url": { + "examples": [ + "https://raw.githubusercontent.com/facebookresearch/segment-anything-2/main/notebooks/images/truck.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to be segmented" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If True, the media will be returned as a data URI.", + "default": false + }, + "point_prompts": { + "title": "Point Prompts", + "type": "array", + "description": "List of point prompts", + "items": { + "$ref": "#/components/schemas/PointPrompt" + }, + "default": [] + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "max_masks": { + "minimum": 1, + "title": "Max Masks", + "type": "integer", + "description": "Maximum number of masks to return when `return_multiple_masks` is enabled.", + "maximum": 32, + "default": 3 + }, + "include_scores": { + "title": "Include Scores", + "type": "boolean", + "description": "Whether to include mask confidence scores.", + "default": false + }, + "apply_mask": { + "title": "Apply Mask", + "type": "boolean", + "description": "Apply the mask on the image.", + "default": true + }, + "text_prompt": { + "title": "Text Prompt", + "type": "string", + "description": "[DEPRECATED] Use 'prompt' instead. Kept for backward compatibility.", + "deprecated": true + } + }, + "title": "SAM3ImageInput", + "required": [ + "image_url" + ] + }, + "Sam3ImageOutput": { + "x-fal-order-properties": [ + "image", + "masks", + "metadata", + "scores", + "boxes" + ], + "type": "object", + "properties": { + "image": { + "title": "Image", + "description": "Primary segmented mask preview.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "metadata": { + "title": "Metadata", + "type": "array", + "description": "Per-mask metadata including scores and boxes.", + "items": { + "$ref": "#/components/schemas/MaskMetadata" + } + }, + "masks": { + "title": "Masks", + "type": "array", + "description": "Segmented mask images.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "scores": { + "title": "Scores", + "type": "array", + "description": "Per-mask confidence scores when requested.", + "items": { + "type": "number" + } + }, + "boxes": { + "title": "Boxes", + "type": "array", + "description": "Per-mask normalized bounding boxes [cx, cy, w, h] when requested.", + "items": { + "type": "array", + "items": { + "type": "number" + } + } + } + }, + "title": "SAM3ImageOutput", + "required": [ + "masks" + ] + }, + "BoxPrompt": { + "x-fal-order-properties": [ + "x_min", + "y_min", + "x_max", + "y_max", + "object_id", + "frame_index" + ], + "type": "object", + "properties": { + "y_min": { + "title": "Y Min", + "type": "integer", + "description": "Y Min Coordinate of the box" + }, + "object_id": { + "title": "Object Id", + "type": "integer", + "description": "Optional object identifier. Boxes sharing an object id refine the same object." + }, + "frame_index": { + "title": "Frame Index", + "type": "integer", + "description": "The frame index to interact with." + }, + "x_max": { + "title": "X Max", + "type": "integer", + "description": "X Max Coordinate of the box" + }, + "x_min": { + "title": "X Min", + "type": "integer", + "description": "X Min Coordinate of the box" + }, + "y_max": { + "title": "Y Max", + "type": "integer", + "description": "Y Max Coordinate of the box" + } + }, + "title": "BoxPrompt" + }, + "PointPrompt": { + "x-fal-order-properties": [ + "x", + "y", + "label", + "object_id", + "frame_index" + ], + "type": "object", + "properties": { + "y": { + "title": "Y", + "type": "integer", + "description": "Y Coordinate of the prompt" + }, + "x": { + "title": "X", + "type": "integer", + "description": "X Coordinate of the prompt" + }, + "object_id": { + "title": "Object Id", + "type": "integer", + "description": "Optional object identifier. Prompts sharing an object id refine the same object." + }, + "frame_index": { + "title": "Frame Index", + "type": "integer", + "description": "The frame index to interact with." + }, + "label": { + "enum": [ + 0, + 1 + ], + "title": "Label", + "type": "integer", + "description": "1 for foreground, 0 for background" + } + }, + "title": "PointPrompt" + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + }, + "MaskMetadata": { + "x-fal-order-properties": [ + "index", + "score", + "box" + ], + "type": "object", + "properties": { + "box": { + "title": "Box", + "type": "array", + "description": "Bounding box for the mask in normalized cxcywh coordinates.", + "items": { + "type": "number" + } + }, + "score": { + "title": "Score", + "type": "number", + "description": "Score for this mask." + }, + "index": { + "title": "Index", + "type": "integer", + "description": "Index of the mask inside the model output." + } + }, + "title": "MaskMetadata", + "required": [ + "index" + ] + } + } + }, + "paths": { + "/fal-ai/sam-3/image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam-3/image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sam-3/image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sam3ImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam-3/image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sam3ImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/gemini-3-pro-image-preview/edit", + "metadata": { + "display_name": "Gemini 3 Pro Image Preview", + "category": "image-to-image", + "description": "Nano Banana Pro (a.k.a Nano Banana 2) is Google's new state-of-the-art image generation and editing model", + "status": "active", + "tags": [ + "realism", + "typography" + ], + "updated_at": "2026-01-26T21:42:20.507Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/Vp2KdBwHgd4Qp0ixnsOue_eb81afc63ac24064bc9d3e9ed48f9b74.jpg", + "model_url": "https://fal.run/fal-ai/gemini-3-pro-image-preview/edit", + "license_type": "commercial", + "date": "2025-11-20T14:29:56.085Z", + "group": { + "key": "Gemini-3-Pro", + "label": "Image Editing" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/gemini-3-pro-image-preview/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/gemini-3-pro-image-preview/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/gemini-3-pro-image-preview/edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/Vp2KdBwHgd4Qp0ixnsOue_eb81afc63ac24064bc9d3e9ed48f9b74.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/gemini-3-pro-image-preview/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/gemini-3-pro-image-preview/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Gemini3ProImagePreviewEditInput": { + "title": "NanoBananaImageToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "make a photo of the man driving the car down the california coastline" + ], + "title": "Prompt", + "minLength": 3, + "description": "The prompt for image editing.", + "type": "string", + "maxLength": 50000 + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Number of Images", + "description": "The number of images to generate.", + "default": 1 + }, + "enable_web_search": { + "title": "Enable Web Search", + "type": "boolean", + "description": "Enable web search for the image generation task. This will allow the model to use the latest information from the web to generate the image.", + "default": false + }, + "resolution": { + "enum": [ + "1K", + "2K", + "4K" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the image to generate.", + "default": "1K" + }, + "aspect_ratio": { + "enum": [ + "auto", + "21:9", + "16:9", + "3:2", + "4:3", + "5:4", + "1:1", + "4:5", + "3:4", + "2:3", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image.", + "default": "auto" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator." + }, + "image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/example_inputs/nano-banana-edit-input.png", + "https://storage.googleapis.com/falserverless/example_inputs/nano-banana-edit-input-2.png" + ] + ], + "description": "The URLs of the images to use for image-to-image generation or image editing.", + "type": "array", + "title": "Image URLs", + "items": { + "type": "string" + } + }, + "limit_generations": { + "title": "Limit Generations", + "type": "boolean", + "description": "Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "num_images", + "seed", + "aspect_ratio", + "output_format", + "sync_mode", + "image_urls", + "resolution", + "limit_generations", + "enable_web_search" + ], + "required": [ + "prompt", + "image_urls" + ] + }, + "Gemini3ProImagePreviewEditOutput": { + "title": "NanoBananaImageToImageOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_name": "nano-banana-multi-edit-output.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/nano-banana-multi-edit-output.png" + } + ] + ], + "description": "The edited images.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "description": { + "title": "Description", + "type": "string", + "description": "The description of the generated images." + } + }, + "x-fal-order-properties": [ + "images", + "description" + ], + "required": [ + "images", + "description" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/gemini-3-pro-image-preview/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gemini-3-pro-image-preview/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/gemini-3-pro-image-preview/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gemini3ProImagePreviewEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gemini-3-pro-image-preview/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gemini3ProImagePreviewEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/nano-banana-pro/edit", + "metadata": { + "display_name": "Nano Banana Pro", + "category": "image-to-image", + "description": "Nano Banana Pro (a.k.a Nano Banana 2) is Google's new state-of-the-art image generation and editing model", + "status": "active", + "tags": [ + "realism", + "typography" + ], + "updated_at": "2026-01-26T21:42:20.769Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a86920c/NHZJWHJsy3Sve0Kroa6hV_8767c85d62f6486495d2d4978c1b1de3.jpg", + "model_url": "https://fal.run/fal-ai/nano-banana-pro/edit", + "license_type": "commercial", + "date": "2025-11-20T14:27:03.344Z", + "group": { + "key": "Nano-Banana-Pro", + "label": "Image Editing" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/nano-banana-pro/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/nano-banana-pro/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/nano-banana-pro/edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a86920c/NHZJWHJsy3Sve0Kroa6hV_8767c85d62f6486495d2d4978c1b1de3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/nano-banana-pro/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/nano-banana-pro/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "NanoBananaProEditInput": { + "x-fal-order-properties": [ + "prompt", + "num_images", + "seed", + "aspect_ratio", + "output_format", + "sync_mode", + "image_urls", + "resolution", + "limit_generations", + "enable_web_search" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "make a photo of the man driving the car down the california coastline" + ], + "maxLength": 50000, + "type": "string", + "minLength": 3, + "description": "The prompt for image editing.", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "description": "The number of images to generate.", + "title": "Number of Images", + "default": 1 + }, + "enable_web_search": { + "description": "Enable web search for the image generation task. This will allow the model to use the latest information from the web to generate the image.", + "type": "boolean", + "title": "Enable Web Search", + "default": false + }, + "aspect_ratio": { + "enum": [ + "auto", + "21:9", + "16:9", + "3:2", + "4:3", + "5:4", + "1:1", + "4:5", + "3:4", + "2:3", + "9:16" + ], + "description": "The aspect ratio of the generated image.", + "type": "string", + "title": "Aspect Ratio", + "default": "auto" + }, + "resolution": { + "enum": [ + "1K", + "2K", + "4K" + ], + "description": "The resolution of the image to generate.", + "type": "string", + "title": "Resolution", + "default": "1K" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "seed": { + "description": "The seed for the random number generator.", + "type": "integer", + "title": "Seed" + }, + "image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/example_inputs/nano-banana-edit-input.png", + "https://storage.googleapis.com/falserverless/example_inputs/nano-banana-edit-input-2.png" + ] + ], + "description": "The URLs of the images to use for image-to-image generation or image editing.", + "type": "array", + "title": "Image URLs", + "items": { + "type": "string" + } + }, + "limit_generations": { + "description": "Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate.", + "type": "boolean", + "title": "Limit Generations", + "default": false + } + }, + "title": "NanoBananaImageToImageInput", + "required": [ + "prompt", + "image_urls" + ] + }, + "NanoBananaProEditOutput": { + "x-fal-order-properties": [ + "images", + "description" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_name": "nano-banana-multi-edit-output.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/nano-banana-multi-edit-output.png" + } + ] + ], + "description": "The edited images.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "description": { + "description": "The description of the generated images.", + "type": "string", + "title": "Description" + } + }, + "title": "NanoBananaImageToImageOutput", + "required": [ + "images", + "description" + ] + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/nano-banana-pro/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/nano-banana-pro/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/nano-banana-pro/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NanoBananaProEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/nano-banana-pro/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NanoBananaProEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-plus-lora-gallery/multiple-angles", + "metadata": { + "display_name": "Qwen Image Edit Plus Lora Gallery", + "category": "image-to-image", + "description": "Precise camera position and angle control (rotation, zoom, vertical movement)", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:24.365Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/monkey/JbSGvZquN8vZSqhmHBK9o_6549790441a74cefa9f0afdfbd2a182c.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-plus-lora-gallery/multiple-angles", + "license_type": "commercial", + "date": "2025-11-11T20:40:36.367Z", + "group": { + "key": "qwen-image-edit-plus-lora-gallery", + "label": "Multiple Angles" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-plus-lora-gallery/multiple-angles", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-plus-lora-gallery/multiple-angles queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-plus-lora-gallery/multiple-angles", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/monkey/JbSGvZquN8vZSqhmHBK9o_6549790441a74cefa9f0afdfbd2a182c.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-lora-gallery/multiple-angles", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-lora-gallery/multiple-angles/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEditPlusLoraGalleryMultipleAnglesInput": { + "x-fal-order-properties": [ + "image_urls", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "negative_prompt", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "rotate_right_left", + "move_forward", + "vertical_angle", + "wide_angle_lens", + "lora_scale" + ], + "type": "object", + "properties": { + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the final input image will be used." + }, + "wide_angle_lens": { + "title": "Wide-Angle Lens", + "type": "boolean", + "description": "Enable wide-angle lens effect", + "default": false + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 1 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "image_urls": { + "examples": [ + [ + "https://v3.fal.media/files/monkey/i3saq4bAPXSIl08nZtq9P_ec535747aefc4e31943136a6d8587075.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URL of the image to adjust camera angle for.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "vertical_angle": { + "description": "Adjust vertical camera angle (-1=bird's-eye view/looking down, 0=neutral, 1=worm's-eye view/looking up)", + "type": "number", + "minimum": -1, + "maximum": 1, + "title": "Vertical Angle (Bird ⬄ Worm)", + "default": 0 + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "move_forward": { + "description": "Move camera forward (0=no movement, 10=close-up)", + "type": "number", + "minimum": 0, + "maximum": 10, + "title": "Move Forward → Close-Up", + "default": 0 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "rotate_right_left": { + "description": "Rotate camera left (positive) or right (negative) in degrees. Positive values rotate left, negative values rotate right.", + "type": "number", + "minimum": -90, + "maximum": 90, + "title": "Rotate Right-Left (degrees °)", + "default": 0 + }, + "lora_scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA model. Controls the strength of the camera control effect.", + "default": 1.25 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 6 + } + }, + "title": "MultipleAnglesInput", + "description": "Input model for Multiple Angles endpoint - Camera control with precise adjustments", + "required": [ + "image_urls" + ] + }, + "QwenImageEditPlusLoraGalleryMultipleAnglesOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/elephant/0lEToxR8cU5tB-SVMmD2C.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated/edited images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "title": "MultipleAnglesOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "examples": [ + 1024 + ], + "description": "The height of the image in pixels." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "examples": [ + 1024 + ], + "description": "The width of the image in pixels." + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-plus-lora-gallery/multiple-angles/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/multiple-angles/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/multiple-angles": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusLoraGalleryMultipleAnglesInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/multiple-angles/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusLoraGalleryMultipleAnglesOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-plus-lora-gallery/shirt-design", + "metadata": { + "display_name": "Qwen Image Edit Plus Lora Gallery", + "category": "image-to-image", + "description": "Apply designs/graphics onto people's shirts", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:24.490Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/lion/Sh9OfYjTHGDA8el8pezwY_754c1fb894234b08b52605905ff9ac2d.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-plus-lora-gallery/shirt-design", + "license_type": "commercial", + "date": "2025-11-11T20:37:16.566Z", + "group": { + "key": "qwen-image-edit-plus-lora-gallery", + "label": "Shirt Design" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-plus-lora-gallery/shirt-design", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-plus-lora-gallery/shirt-design queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-plus-lora-gallery/shirt-design", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/lion/Sh9OfYjTHGDA8el8pezwY_754c1fb894234b08b52605905ff9ac2d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-lora-gallery/shirt-design", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-lora-gallery/shirt-design/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEditPlusLoraGalleryShirtDesignInput": { + "x-fal-order-properties": [ + "image_urls", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "negative_prompt", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "prompt", + "lora_scale" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Put this design on their shirt", + "Apply this graphic to their t-shirt", + "Place this logo on their shirt" + ], + "title": "Prompt", + "type": "string", + "description": "Describe what design to put on the shirt. The model will apply the design from your input image onto the person's shirt.", + "default": "Put this design on their shirt" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the final input image will be used." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA model. Controls the strength of the LoRA effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/tiger/1rq65RzrUwKtHLAwpEjq8_4ee388931b5142f1bd1f2e0a3cb2498e.png", + "https://github.com/fal-ai/fal-assets/blob/main/Logo%20Square.png?raw=true" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URLs of the images: first image is the person wearing a shirt, second image is the design/logo to put on the shirt.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 6 + } + }, + "title": "ShirtDesignInput", + "description": "Input model for Shirt Design endpoint - Put designs/graphics on people's shirts", + "required": [ + "image_urls" + ] + }, + "QwenImageEditPlusLoraGalleryShirtDesignOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/panda/Y5wKKIEuFpRMEUQ8ZPy01.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated/edited images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "title": "ShirtDesignOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "examples": [ + 1024 + ], + "description": "The height of the image in pixels." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "examples": [ + 1024 + ], + "description": "The width of the image in pixels." + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-plus-lora-gallery/shirt-design/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/shirt-design/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/shirt-design": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusLoraGalleryShirtDesignInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/shirt-design/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusLoraGalleryShirtDesignOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-plus-lora-gallery/remove-lighting", + "metadata": { + "display_name": "Qwen Image Edit Plus Lora Gallery", + "category": "image-to-image", + "description": "Remove existing lighting and apply soft, even illumination", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:24.615Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/zebra/z1Ze46BEziziJyYnGPfMA_411ef5a278eb4a60a4e7afab24167f6d.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-plus-lora-gallery/remove-lighting", + "license_type": "commercial", + "date": "2025-11-11T20:34:59.313Z", + "group": { + "key": "qwen-image-edit-plus-lora-gallery", + "label": "Remove Lighting" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-plus-lora-gallery/remove-lighting", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-plus-lora-gallery/remove-lighting queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-plus-lora-gallery/remove-lighting", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/zebra/z1Ze46BEziziJyYnGPfMA_411ef5a278eb4a60a4e7afab24167f6d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-lora-gallery/remove-lighting", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-lora-gallery/remove-lighting/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEditPlusLoraGalleryRemoveLightingInput": { + "x-fal-order-properties": [ + "image_urls", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "negative_prompt", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images" + ], + "type": "object", + "properties": { + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the final input image will be used." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/panda/J0XyFgb0AAgyUzmVFd0nr_5363c66361d94cea89333795d700165d.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URL of the image with lighting/shadows to remove.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 6 + } + }, + "title": "RemoveLightingInput", + "description": "Input model for Remove Lighting endpoint - Remove existing lighting and apply soft even lighting", + "required": [ + "image_urls" + ] + }, + "QwenImageEditPlusLoraGalleryRemoveLightingOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/monkey/D7FrWGFnb7t8fjiE9Cok4.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated/edited images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "title": "RemoveLightingOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "examples": [ + 1024 + ], + "description": "The height of the image in pixels." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "examples": [ + 1024 + ], + "description": "The width of the image in pixels." + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-plus-lora-gallery/remove-lighting/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/remove-lighting/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/remove-lighting": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusLoraGalleryRemoveLightingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/remove-lighting/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusLoraGalleryRemoveLightingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-plus-lora-gallery/remove-element", + "metadata": { + "display_name": "Qwen Image Edit Plus Lora Gallery", + "category": "image-to-image", + "description": "Remove unwanted elements (objects, people, text) while maintaining image consistency", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:24.742Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/qjrxX6uCXnmLSaIK20Ju6_65d0867c22dd4fe6a5e6244062c4d7a1.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-plus-lora-gallery/remove-element", + "license_type": "commercial", + "date": "2025-11-11T20:32:00.206Z", + "group": { + "key": "qwen-image-edit-plus-lora-gallery", + "label": "Remove Element" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-plus-lora-gallery/remove-element", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-plus-lora-gallery/remove-element queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-plus-lora-gallery/remove-element", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/qjrxX6uCXnmLSaIK20Ju6_65d0867c22dd4fe6a5e6244062c4d7a1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-lora-gallery/remove-element", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-lora-gallery/remove-element/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEditPlusLoraGalleryRemoveElementInput": { + "x-fal-order-properties": [ + "image_urls", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "negative_prompt", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "prompt", + "lora_scale" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Remove the person from the image", + "Remove the car and the bicycle", + "Remove the text and logos" + ], + "title": "Prompt", + "type": "string", + "description": "Specify what element(s) to remove from the image (objects, people, text, etc.). The model will cleanly remove the element while maintaining consistency of the rest of the image.", + "default": "Remove the specified element from the scene" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the final input image will be used." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA model. Controls the strength of the LoRA effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/elephant/oWup_Q7zuvbfB4en-hneO_5aaa1cb3d3eb44999005159e82e7c9b7.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URL of the image containing elements to remove.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 6 + } + }, + "title": "RemoveElementInput", + "description": "Input model for Remove Element endpoint - Remove/delete elements (objects, people, text) from the image", + "required": [ + "image_urls" + ] + }, + "QwenImageEditPlusLoraGalleryRemoveElementOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/koala/dTldnOpRSFVBvWiyfOeO1.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated/edited images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "title": "RemoveElementOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "examples": [ + 1024 + ], + "description": "The height of the image in pixels." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "examples": [ + 1024 + ], + "description": "The width of the image in pixels." + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-plus-lora-gallery/remove-element/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/remove-element/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/remove-element": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusLoraGalleryRemoveElementInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/remove-element/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusLoraGalleryRemoveElementOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-plus-lora-gallery/next-scene", + "metadata": { + "display_name": "Qwen Image Edit Plus Lora Gallery", + "category": "image-to-image", + "description": "Create cinematic transitions and scene progressions (camera movements, framing changes)", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:24.900Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/elephant/B2n7UmIRRBtpO5kLRoIg8_5dc0cbc4dd7f4a8aa3c55dd4eca753a8.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-plus-lora-gallery/next-scene", + "license_type": "commercial", + "date": "2025-11-11T20:29:00.375Z", + "group": { + "key": "qwen-image-edit-plus-lora-gallery", + "label": "Next Scene" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-plus-lora-gallery/next-scene", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-plus-lora-gallery/next-scene queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-plus-lora-gallery/next-scene", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/elephant/B2n7UmIRRBtpO5kLRoIg8_5dc0cbc4dd7f4a8aa3c55dd4eca753a8.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-lora-gallery/next-scene", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-lora-gallery/next-scene/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEditPlusLoraGalleryNextSceneInput": { + "x-fal-order-properties": [ + "image_urls", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "negative_prompt", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "prompt", + "lora_scale" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Next Scene: The camera pulls back to reveal the entire landscape", + "Next Scene: The camera tracks forward as sunlight breaks through the clouds", + "Next Scene: The camera pans right revealing new characters entering the frame" + ], + "title": "Prompt", + "type": "string", + "description": "Describe the camera movement, framing change, or scene transition. Start with 'Next Scene:' for best results. Examples: camera movements (dolly, push-in, pull-back), framing changes (wide to close-up), new elements entering frame.", + "default": "Next Scene: The camera moves forward revealing more of the scene" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the final input image will be used." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA model. Controls the strength of the LoRA effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/penguin/Zj5z8GW7yYlrpOQtuwjKQ_086265e41092415f951a6576fed25e41.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URL of the image to create the next scene from.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 6 + } + }, + "title": "NextSceneInput", + "description": "Input model for Next Scene endpoint - Create cinematic shot progressions and scene transitions", + "required": [ + "image_urls" + ] + }, + "QwenImageEditPlusLoraGalleryNextSceneOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/panda/6r8XojqbZvFPhdizajCb3.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated/edited images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "title": "NextSceneOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "examples": [ + 1024 + ], + "description": "The height of the image in pixels." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "examples": [ + 1024 + ], + "description": "The width of the image in pixels." + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-plus-lora-gallery/next-scene/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/next-scene/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/next-scene": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusLoraGalleryNextSceneInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/next-scene/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusLoraGalleryNextSceneOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-plus-lora-gallery/integrate-product", + "metadata": { + "display_name": "Qwen Image Edit Plus Lora Gallery", + "category": "image-to-image", + "description": "Blend products into backgrounds with automatic perspective and lighting correction", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:25.269Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/lion/kQP6sIJmyFzXBvEwY4n_g_83d331b99dff4ac58f6d32631a24a774.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-plus-lora-gallery/integrate-product", + "license_type": "commercial", + "date": "2025-11-11T20:16:11.899Z", + "group": { + "key": "qwen-image-edit-plus-lora-gallery", + "label": "Integrate Product" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-plus-lora-gallery/integrate-product", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-plus-lora-gallery/integrate-product queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-plus-lora-gallery/integrate-product", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/lion/kQP6sIJmyFzXBvEwY4n_g_83d331b99dff4ac58f6d32631a24a774.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-lora-gallery/integrate-product", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-lora-gallery/integrate-product/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEditPlusLoraGalleryIntegrateProductInput": { + "x-fal-order-properties": [ + "image_urls", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "negative_prompt", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "prompt", + "lora_scale" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Blend and integrate the product into the background with correct perspective and lighting", + "Seamlessly blend the object into the scene with natural shadows", + "Integrate the product naturally into the environment" + ], + "title": "Prompt", + "type": "string", + "description": "Describe how to blend and integrate the product/element into the background. The model will automatically correct perspective, lighting and shadows for natural integration.", + "default": "Blend and integrate the product into the background" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the final input image will be used." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA model. Controls the strength of the LoRA effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/koala/LFYeCtq2LB4s6IpmoI2iy_2fb7b46d1f3749db9f7bab679bc6c4f3.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URL of the image with product to integrate into background.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 6 + } + }, + "title": "IntegrateProductInput", + "description": "Input model for Integrate Product endpoint - Blend and integrate products/elements into backgrounds", + "required": [ + "image_urls" + ] + }, + "QwenImageEditPlusLoraGalleryIntegrateProductOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/penguin/4_Bz95EOoETXJlfuWib3r.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated/edited images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "title": "IntegrateProductOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "examples": [ + 1024 + ], + "description": "The height of the image in pixels." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "examples": [ + 1024 + ], + "description": "The width of the image in pixels." + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-plus-lora-gallery/integrate-product/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/integrate-product/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/integrate-product": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusLoraGalleryIntegrateProductInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/integrate-product/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusLoraGalleryIntegrateProductOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-plus-lora-gallery/group-photo", + "metadata": { + "display_name": "Qwen Image Edit Plus Lora Gallery", + "category": "image-to-image", + "description": "Create group photos ", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:25.393Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/rabbit/Yzvztf6Z-TQRukl4KC3PH_aac2c75ac20f4ccd9bcf54ad9979c7a2.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-plus-lora-gallery/group-photo", + "license_type": "commercial", + "date": "2025-11-11T20:11:58.194Z", + "group": { + "key": "qwen-image-edit-plus-lora-gallery", + "label": "Group Photo" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-plus-lora-gallery/group-photo", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-plus-lora-gallery/group-photo queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-plus-lora-gallery/group-photo", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/rabbit/Yzvztf6Z-TQRukl4KC3PH_aac2c75ac20f4ccd9bcf54ad9979c7a2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-lora-gallery/group-photo", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-lora-gallery/group-photo/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEditPlusLoraGalleryGroupPhotoInput": { + "x-fal-order-properties": [ + "image_urls", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "negative_prompt", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "prompt", + "lora_scale" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Two people standing next to each other outside with a landscape background", + "Group photo outdoors with mountains and nature in the background, vintage style", + "Two people next to each other in a scenic outdoor setting with retro filter", + "People standing together outside with beautiful landscape behind them" + ], + "title": "Prompt", + "type": "string", + "description": "Describe the group photo scene, setting, and style. The model will maintain character consistency and add vintage effects like grain, blur, and retro filters.", + "default": "Two people standing next to each other outside with a landscape background" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the final input image will be used." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA model. Controls the strength of the LoRA effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "image_urls": { + "examples": [ + [ + "https://v3.fal.media/files/monkey/i3saq4bAPXSIl08nZtq9P_ec535747aefc4e31943136a6d8587075.png", + "https://v3b.fal.media/files/b/kangaroo/OEtbMr7E43t0UPT8JwRT4_091834d85d8346d6960e3fd789d67db8.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URLs of the images to combine into a group photo. Provide 2 or more individual portrait images.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 6 + } + }, + "title": "GroupPhotoInput", + "description": "Input model for Group Photo endpoint - Create composite group photos with vintage/retro style", + "required": [ + "image_urls" + ] + }, + "QwenImageEditPlusLoraGalleryGroupPhotoOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/kangaroo/GGvzZELjxMpFvV2IAEb_9.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated/edited images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "title": "GroupPhotoOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "examples": [ + 1024 + ], + "description": "The height of the image in pixels." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "examples": [ + 1024 + ], + "description": "The width of the image in pixels." + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-plus-lora-gallery/group-photo/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/group-photo/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/group-photo": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusLoraGalleryGroupPhotoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/group-photo/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusLoraGalleryGroupPhotoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-plus-lora-gallery/face-to-full-portrait", + "metadata": { + "display_name": "Qwen Image Edit Plus Lora Gallery", + "category": "image-to-image", + "description": "Generate full portrait from a cropped face photo", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:25.519Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/gsWgQ__Xox_OncquLVuAH_1413fce9fe7c4e2fa59d589f3fc69448.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-plus-lora-gallery/face-to-full-portrait", + "license_type": "commercial", + "date": "2025-11-11T19:58:34.266Z", + "group": { + "key": "qwen-image-edit-plus-lora-gallery", + "label": "Face to Full Portrait" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-plus-lora-gallery/face-to-full-portrait", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-plus-lora-gallery/face-to-full-portrait queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-plus-lora-gallery/face-to-full-portrait", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/gsWgQ__Xox_OncquLVuAH_1413fce9fe7c4e2fa59d589f3fc69448.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-lora-gallery/face-to-full-portrait", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-lora-gallery/face-to-full-portrait/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEditPlusLoraGalleryFaceToFullPortraitInput": { + "x-fal-order-properties": [ + "image_urls", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "negative_prompt", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "prompt", + "lora_scale" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Photography. A young woman wearing a yellow dress stands in a flower field", + "Professional headshot with business suit and office background", + "Casual portrait outdoors with natural sunlight and bokeh background" + ], + "title": "Prompt", + "type": "string", + "description": "Describe the full portrait you want to generate from the face. Include clothing, setting, pose, and style details.", + "default": "Photography. A portrait of the person in professional attire with natural lighting" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the final input image will be used." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA model. Controls the strength of the LoRA effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/kangaroo/Tl9BsbouyruyrEJtXWYOz_ef4270d3ff4d47f18883c70cfdf07c27.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URL of the cropped face image. Provide a close-up face photo.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 6 + } + }, + "title": "FaceToFullPortraitInput", + "description": "Input model for Face to Full Portrait endpoint - Generate full portrait from a cropped face image", + "required": [ + "image_urls" + ] + }, + "QwenImageEditPlusLoraGalleryFaceToFullPortraitOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/koala/efKAFkAtgzxZeLSdv-d2x.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated/edited images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "title": "FaceToFullPortraitOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "examples": [ + 1024 + ], + "description": "The height of the image in pixels." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "examples": [ + 1024 + ], + "description": "The width of the image in pixels." + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-plus-lora-gallery/face-to-full-portrait/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/face-to-full-portrait/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/face-to-full-portrait": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusLoraGalleryFaceToFullPortraitInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/face-to-full-portrait/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusLoraGalleryFaceToFullPortraitOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-plus-lora-gallery/add-background", + "metadata": { + "display_name": "Qwen Image Edit Plus Lora Gallery", + "category": "image-to-image", + "description": "Add a realistic scene behind the object with white background", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:25.643Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/monkey/Elzbk_5FjYriX_he9KRd4_34dd24f301c940f9a7387279be05cd76.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-plus-lora-gallery/add-background", + "license_type": "commercial", + "date": "2025-11-11T19:08:44.764Z", + "group": { + "key": "qwen-image-edit-plus-lora-gallery", + "label": "Add Background" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-plus-lora-gallery/add-background", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-plus-lora-gallery/add-background queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-plus-lora-gallery/add-background", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/monkey/Elzbk_5FjYriX_he9KRd4_34dd24f301c940f9a7387279be05cd76.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-lora-gallery/add-background", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-lora-gallery/add-background/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEditPlusLoraGalleryAddBackgroundInput": { + "x-fal-order-properties": [ + "image_urls", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "negative_prompt", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "prompt", + "lora_scale" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Add an outdoor scene with mountains and road behind the car", + "Add a modern living room background behind the product", + "Add a natural outdoor setting with grass and trees as background" + ], + "title": "Prompt", + "type": "string", + "description": "Describe the background/scene you want to add behind the object. The model will remove the white background and add the specified environment.", + "default": "Remove white background and add a realistic scene behind the object" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the final input image will be used." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA model. Controls the strength of the LoRA effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/rabbit/YN3dXLQBWb2ch6V607Uuc_d808599bb92f4c808502a118697bdc1f.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URLs of the images to edit. Provide an image with a white or clean background.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 6 + } + }, + "title": "AddBackgroundInput", + "description": "Input model for Add Background endpoint - Remove white background and add a realistic scene", + "required": [ + "image_urls" + ] + }, + "QwenImageEditPlusLoraGalleryAddBackgroundOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/lion/d_xp44RvnuYYxioxBgAlX.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated/edited images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "title": "AddBackgroundOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "examples": [ + 1024 + ], + "description": "The height of the image in pixels." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "examples": [ + 1024 + ], + "description": "The width of the image in pixels." + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-plus-lora-gallery/add-background/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/add-background/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/add-background": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusLoraGalleryAddBackgroundInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora-gallery/add-background/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusLoraGalleryAddBackgroundOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/reve/fast/remix", + "metadata": { + "display_name": "Reve", + "category": "image-to-image", + "description": "Reve’s fast remix model lets you upload an reference images and then combine/transform them via a text prompt at lightning speed!", + "status": "active", + "tags": [ + "image-to-image" + ], + "updated_at": "2026-01-26T21:42:26.894Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/QBJ4DIme_ALMK-SAeipWr_30f59e7c13274579a8a6a2b3b566b9aa.jpg", + "model_url": "https://fal.run/fal-ai/reve/fast/remix", + "license_type": "commercial", + "date": "2025-11-04T01:27:27.017Z", + "group": { + "key": "Reve", + "label": "Fast Remix" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/reve/fast/remix", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/reve/fast/remix queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/reve/fast/remix", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/QBJ4DIme_ALMK-SAeipWr_30f59e7c13274579a8a6a2b3b566b9aa.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/reve/fast/remix", + "documentationUrl": "https://fal.ai/models/fal-ai/reve/fast/remix/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ReveFastRemixInput": { + "x-fal-order-properties": [ + "prompt", + "image_urls", + "aspect_ratio", + "num_images", + "output_format", + "sync_mode" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Dress the model in the clothes and hat. Add a cat to the scene and change the background to a Victorian era building." + ], + "maxLength": 2560, + "minLength": 1, + "description": "The text description of the desired image. May include XML img tags like 0 to refer to specific images by their index in the image_urls list.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "description": "Number of images to generate", + "type": "integer", + "minimum": 1, + "maximum": 4, + "title": "Number of Images", + "examples": [ + 1 + ], + "default": 1 + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "3:2", + "2:3", + "4:3", + "3:4", + "1:1" + ], + "description": "The desired aspect ratio of the generated image. If not provided, will be smartly chosen by the model.", + "type": "string", + "title": "Aspect Ratio", + "examples": [ + "16:9" + ] + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "image_urls": { + "description": "List of URLs of reference images. Must provide between 1 and 6 images (inclusive). Each image must be less than 10 MB. Supports PNG, JPEG, WebP, AVIF, and HEIF formats.", + "type": "array", + "items": { + "type": "string" + }, + "max_file_size": 10485760, + "examples": [ + [ + "https://v3b.fal.media/files/b/monkey/lsPBOhBws_FnTzd5G9KZ9_seedream4_edit_input_4.png", + "https://v3b.fal.media/files/b/monkey/ZrW5ouDj8vjLtvl1Cj9l9_seedream4_edit_input_2.png", + "https://v3b.fal.media/files/b/elephant/sd0k6YhlQEKfR6d_hAmIH_seedream4_edit_input_3.png" + ] + ], + "title": "Reference Image URLs", + "min_height": 128, + "min_width": 128, + "max_height": 4096, + "max_width": 4096 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "description": "Output format for the generated image.", + "type": "string", + "title": "Output Format", + "examples": [ + "png" + ], + "default": "png" + } + }, + "description": "Input for Reve image remixing", + "title": "ReveRemixInput", + "required": [ + "prompt", + "image_urls" + ] + }, + "ReveFastRemixOutput": { + "x-fal-order-properties": [ + "images" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/zebra/r0J_UFupv3BfooTwv2ifJ.png" + } + ] + ], + "description": "The remixed images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "description": "Output for Reve image remixing", + "title": "ReveRemixOutput", + "required": [ + "images" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/reve/fast/remix/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/reve/fast/remix/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/reve/fast/remix": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ReveFastRemixInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/reve/fast/remix/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ReveFastRemixOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/reve/fast/edit", + "metadata": { + "display_name": "Reve", + "category": "image-to-image", + "description": "Reve’s fast edit model lets you upload an existing image and then transform it via a text prompt at lightning speed!", + "status": "active", + "tags": [ + "image-to-image" + ], + "updated_at": "2026-01-26T21:42:27.046Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/koala/wISCIqUHSbkQYw9wrvYSM_b49480bef5ce45a189f4d23de477524e.jpg", + "model_url": "https://fal.run/fal-ai/reve/fast/edit", + "license_type": "commercial", + "date": "2025-11-04T01:27:25.297Z", + "group": { + "key": "Reve", + "label": "Fast Edit" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/reve/fast/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/reve/fast/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/reve/fast/edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/koala/wISCIqUHSbkQYw9wrvYSM_b49480bef5ce45a189f4d23de477524e.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/reve/fast/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/reve/fast/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ReveFastEditInput": { + "x-fal-order-properties": [ + "prompt", + "image_url", + "num_images", + "output_format", + "sync_mode" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Make it nighttime with stars glistening behind the mountain" + ], + "maxLength": 2560, + "minLength": 1, + "description": "The text description of how to edit the provided image.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "description": "Number of images to generate", + "type": "integer", + "minimum": 1, + "maximum": 4, + "title": "Number of Images", + "examples": [ + 1 + ], + "default": 1 + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "description": "Output format for the generated image.", + "type": "string", + "title": "Output Format", + "examples": [ + "png" + ], + "default": "png" + }, + "image_url": { + "description": "URL of the reference image to edit. Must be publicly accessible or base64 data URI. Supports PNG, JPEG, WebP, AVIF, and HEIF formats.", + "type": "string", + "x-fal": { + "min_width": 128, + "min_height": 128, + "timeout": 20, + "max_height": 4096, + "max_width": 4096, + "max_file_size": 10485760 + }, + "title": "Reference Image URL", + "examples": [ + "https://v3b.fal.media/files/b/rabbit/Wi1oWbMfigpUMP0w_i5fm_-WnGcaJCtfrT6Q2oms97E.png" + ], + "limit_description": "Max file size: 10.0MB, Min width: 128px, Min height: 128px, Max width: 4096px, Max height: 4096px, Timeout: 20.0s" + } + }, + "description": "Input for Reve fast image editing", + "title": "ReveFastEditInput", + "required": [ + "prompt", + "image_url" + ] + }, + "ReveFastEditOutput": { + "x-fal-order-properties": [ + "images" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/zebra/eTDkfnubKKq9S-hDxvH2g.png" + } + ] + ], + "description": "The edited images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "description": "Output for Reve fast image editing", + "title": "ReveFastEditOutput", + "required": [ + "images" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/reve/fast/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/reve/fast/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/reve/fast/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ReveFastEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/reve/fast/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ReveFastEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-apps-v2/outpaint", + "metadata": { + "display_name": "Image Outpaint", + "category": "image-to-image", + "description": "Directional outpainting. Choose edges to expand. left, right, top, or center (uniform all sides). Only expanded areas are generated; an optional zoom-out pulls the frame back by the chosen amount.", + "status": "active", + "tags": [ + "outpainting" + ], + "updated_at": "2026-01-26T21:42:27.407Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/_JfxHHU5PSR2RsM0r9O3S_549a1d78995c4babae440a2d345e621b.jpg", + "model_url": "https://fal.run/fal-ai/image-apps-v2/outpaint", + "license_type": "commercial", + "date": "2025-11-03T19:28:15.419Z", + "group": { + "key": "image-apps-v2", + "label": "Image Outpaint" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-apps-v2/outpaint", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-apps-v2/outpaint queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-apps-v2/outpaint", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/_JfxHHU5PSR2RsM0r9O3S_549a1d78995c4babae440a2d345e621b.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-apps-v2/outpaint", + "documentationUrl": "https://fal.ai/models/fal-ai/image-apps-v2/outpaint/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageAppsV2OutpaintInput": { + "x-fal-order-properties": [ + "image_url", + "expand_left", + "expand_right", + "expand_top", + "expand_bottom", + "zoom_out_percentage", + "prompt", + "num_images", + "enable_safety_checker", + "sync_mode", + "output_format" + ], + "type": "object", + "properties": { + "prompt": { + "description": "Optional prompt to guide the outpainting. If provided, it will be appended to the base outpaint instruction. Example: 'with a beautiful sunset in the background'", + "type": "string", + "maxLength": 500, + "title": "Prompt", + "default": "" + }, + "expand_right": { + "minimum": 0, + "description": "Number of pixels to add as black margin on the right side (0-700).", + "type": "integer", + "title": "Expand Right", + "maximum": 700, + "default": 0 + }, + "num_images": { + "minimum": 1, + "description": "Number of images to generate.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 1 + }, + "zoom_out_percentage": { + "minimum": 0, + "description": "Percentage to zoom out the image. If set, the image will be scaled down by this percentage and black margins will be added to maintain original size. Example: 50 means the image will be 50% of original size with black margins filling the rest.", + "type": "number", + "title": "Zoom Out Percentage", + "maximum": 90, + "default": 20 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "jpg", + "webp" + ], + "description": "The format of the output image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/koala/oei_-iPIYFnhdB8SxojND_qwen-edit-res.png" + ], + "title": "Image Url", + "type": "string", + "description": "Image URL to outpaint" + }, + "sync_mode": { + "description": "If True, the function will wait for the image to be generated and uploaded before returning the response. If False, the function will return immediately and the image will be generated asynchronously.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "expand_left": { + "minimum": 0, + "description": "Number of pixels to add as black margin on the left side (0-700).", + "type": "integer", + "title": "Expand Left", + "maximum": 700, + "default": 0 + }, + "expand_bottom": { + "minimum": 0, + "description": "Number of pixels to add as black margin on the bottom side (0-700).", + "type": "integer", + "title": "Expand Bottom", + "maximum": 700, + "default": 400 + }, + "expand_top": { + "minimum": 0, + "description": "Number of pixels to add as black margin on the top side (0-700).", + "type": "integer", + "title": "Expand Top", + "maximum": 700, + "default": 0 + } + }, + "title": "OutpaintInput", + "required": [ + "image_url" + ] + }, + "ImageAppsV2OutpaintOutput": { + "x-fal-order-properties": [ + "images" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/zebra/S4DmrY6pF9cI2GvSaONXZ.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Outpainted image with extended scene", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "title": "OutpaintOutput", + "required": [ + "images" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-apps-v2/outpaint/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/outpaint/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/outpaint": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2OutpaintInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/outpaint/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2OutpaintOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-vision-upscaler", + "metadata": { + "display_name": "Flux Vision Upscaler", + "category": "image-to-image", + "description": "Flux Vision Upscaler for magnify/upscaling images with high fidelity and creativity.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:27.536Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/zebra/7DKYn-EKEd3lthAymIaZ4_54bca98af99240488e7d2fcc2a2dcad8.jpg", + "model_url": "https://fal.run/fal-ai/flux-vision-upscaler", + "license_type": "commercial", + "date": "2025-11-02T21:17:39.501Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-vision-upscaler", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-vision-upscaler queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-vision-upscaler", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/zebra/7DKYn-EKEd3lthAymIaZ4_54bca98af99240488e7d2fcc2a2dcad8.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-vision-upscaler", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-vision-upscaler/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxVisionUpscalerInput": { + "x-fal-order-properties": [ + "image_url", + "upscale_factor", + "seed", + "creativity", + "guidance", + "steps", + "enable_safety_checker" + ], + "type": "object", + "properties": { + "guidance": { + "minimum": 1, + "description": "CFG/guidance scale (1-4). Controls how closely the model follows the prompt.", + "type": "number", + "title": "Guidance", + "maximum": 4, + "default": 1 + }, + "creativity": { + "minimum": 0, + "description": "The creativity of the model. The higher the creativity, the more the model will deviate from the original. Refers to the denoise strength of the sampling.", + "type": "number", + "title": "Creativity", + "maximum": 1, + "default": 0.3 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/gallery/NOCA_Mick-Thompson.resized.resized.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to upscale." + }, + "upscale_factor": { + "minimum": 1, + "description": "The upscale factor (1-4x).", + "type": "number", + "title": "Upscale Factor", + "maximum": 4, + "default": 2 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "The seed to use for the upscale. If not provided, a random seed will be used." + }, + "steps": { + "minimum": 4, + "description": "Number of inference steps (4-50).", + "type": "integer", + "title": "Steps", + "maximum": 50, + "default": 20 + } + }, + "title": "Input", + "required": [ + "image_url" + ] + }, + "FluxVisionUpscalerOutput": { + "x-fal-order-properties": [ + "image", + "seed", + "timings", + "caption" + ], + "type": "object", + "properties": { + "image": { + "examples": [ + { + "height": 2048, + "file_size": 8842156, + "file_name": "20TZeUQtQ8oKgsCKXSL81_StableSR_00002_.png", + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/panda/20TZeUQtQ8oKgsCKXSL81_StableSR_00002_.png", + "width": 2048 + } + ], + "description": "The URL of the generated image.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "caption": { + "examples": [ + "A highly detailed upscaled photograph featuring sharp edges and enhanced textures. The image shows improved clarity in fine details with natural color preservation and minimal artifacts, demonstrating the AI-enhanced resolution increase." + ], + "title": "Caption", + "type": "string", + "description": "The VLM-generated caption describing the upscaled image." + }, + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used to generate the image." + }, + "timings": { + "examples": [ + { + "inference": 52.8 + } + ], + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings", + "description": "The timings of the different steps in the workflow." + } + }, + "title": "Output", + "required": [ + "image", + "seed", + "timings", + "caption" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "description": "The height of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "description": "The width of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-vision-upscaler/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-vision-upscaler/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-vision-upscaler": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxVisionUpscalerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-vision-upscaler/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxVisionUpscalerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/emu-3.5-image/edit-image", + "metadata": { + "display_name": "Emu 3.5 Image", + "category": "image-to-image", + "description": "Edit images with a text prompt using Emu 3.5 Image", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:27.664Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/monkey/tQyERUtskH6UgoJDqvDof_a8481b37918840948487c1ae79050bab.jpg", + "model_url": "https://fal.run/fal-ai/emu-3.5-image/edit-image", + "license_type": "commercial", + "date": "2025-11-01T02:35:16.884Z", + "group": { + "key": "emu-3.5", + "label": "Edit Image" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.ai/models/fal-ai/emu-3.5-image/edit-image/stream", + "duration_estimate": 6, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/emu-3.5-image/edit-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/emu-3.5-image/edit-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/emu-3.5-image/edit-image", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/monkey/tQyERUtskH6UgoJDqvDof_a8481b37918840948487c1ae79050bab.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/emu-3.5-image/edit-image", + "documentationUrl": "https://fal.ai/models/fal-ai/emu-3.5-image/edit-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Emu35ImageEditImageInput": { + "title": "Emu35ImageEditInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Recreate this image in ukiyo-e style" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to edit the image." + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the output image.", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "auto", + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the output image.", + "default": "auto" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image.", + "default": "png" + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/lion/iC4LKAESSVo4ug-XzmR11_e9cafdab-c8b4-4267-804e-230e3d0d0814.png" + ], + "title": "Image URL", + "type": "string", + "description": "The image to edit." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "Whether to return the image in sync mode.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the inference." + } + }, + "x-fal-order-properties": [ + "prompt", + "resolution", + "aspect_ratio", + "enable_safety_checker", + "seed", + "output_format", + "sync_mode", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "Emu35ImageEditImageOutput": { + "title": "Emu35EditOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "height": 1168, + "file_name": "t4nYWb1Zk7Uc6x2nSLysb.jpg", + "content_type": "image/jpeg", + "url": "https://v3b.fal.media/files/b/monkey/t4nYWb1Zk7Uc6x2nSLysb.jpg", + "width": 784 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The edited image.", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "examples": [ + 1021074961 + ], + "title": "Seed", + "type": "integer", + "description": "The seed for the inference." + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/emu-3.5-image/edit-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/emu-3.5-image/edit-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/emu-3.5-image/edit-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Emu35ImageEditImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/emu-3.5-image/edit-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Emu35ImageEditImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/chrono-edit", + "metadata": { + "display_name": "Chrono Edit", + "category": "image-to-image", + "description": "NVIDIA's Logically Consistent and Physics-Aware Image Editing Model", + "status": "active", + "tags": [ + "image-editing" + ], + "updated_at": "2026-01-26T21:42:28.568Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/uV-q_E3zuBgfebtaTzlhM_d462e607525e4f8d9412b9e254c530e6.jpg", + "model_url": "https://fal.run/fal-ai/chrono-edit", + "license_type": "commercial", + "date": "2025-10-30T23:45:17.418Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/chrono-edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/chrono-edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/chrono-edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/uV-q_E3zuBgfebtaTzlhM_d462e607525e4f8d9412b9e254c530e6.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/chrono-edit", + "documentationUrl": "https://fal.ai/models/fal-ai/chrono-edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ChronoEditInput": { + "description": "Input model for ChronoEdit standard editing operations", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Add a surfer to the wave in the illustration." + ], + "description": "The prompt to edit the image.", + "type": "string", + "title": "Prompt" + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "description": "The resolution of the output image.", + "type": "string", + "title": "Resolution", + "default": "480p" + }, + "enable_safety_checker": { + "description": "Whether to enable the safety checker.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the output image.", + "type": "string", + "title": "Output Format", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/zebra/yRvp9rTyDeDGHnbmtcsgK_original-wave.jpg" + ], + "description": "The image to edit.", + "type": "string", + "title": "Image URL" + }, + "turbo_mode": { + "description": "Enable turbo mode to use for faster inference.", + "type": "boolean", + "title": "Turbo Mode", + "default": true + }, + "num_temporal_reasoning_steps": { + "minimum": 2, + "maximum": 12, + "type": "integer", + "title": "Number of Temporal Reasoning Steps", + "description": "The number of temporal reasoning steps to perform.", + "default": 8 + }, + "sync_mode": { + "description": "Whether to return the image in sync mode.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 10, + "type": "number", + "title": "Guidance Scale", + "description": "The guidance scale for the inference.", + "default": 1 + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Number of Inference Steps", + "description": "The number of inference steps to perform.", + "default": 8 + }, + "enable_temporal_reasoning": { + "description": "Whether to enable temporal reasoning.", + "type": "boolean", + "title": "Enable Temporal Reasoning", + "default": false + }, + "enable_prompt_expansion": { + "examples": [ + true + ], + "description": "Whether to enable prompt expansion.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": true + }, + "seed": { + "description": "The seed for the inference.", + "type": "integer", + "title": "Seed" + } + }, + "title": "ChronoEditInput", + "x-fal-order-properties": [ + "image_url", + "prompt", + "num_inference_steps", + "guidance_scale", + "enable_prompt_expansion", + "enable_temporal_reasoning", + "num_temporal_reasoning_steps", + "resolution", + "enable_safety_checker", + "seed", + "output_format", + "sync_mode", + "turbo_mode" + ], + "required": [ + "image_url", + "prompt" + ] + }, + "ChronoEditOutput": { + "description": "Unified output model for all ChronoEdit operations", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The user wants to add a surfer to the wave in the illustration while preserving the original ukiyo-e woodblock art style. The surfer should be depicted mid-action, crouched low on a modern-style surfboard, carving through the crest of the wave with one arm extended for balance and the other gripping the board. Their wavy hair and athletic physique should match the dynamic motion. The background must remain unchanged, including the iconic Mount Fuji and the traditional Japanese text, to maintain the artwork's historical aesthetic and composition." + ], + "description": "The prompt used for the inference.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "height": 768, + "file_name": "2_gRhwfsnmNKYtZ_dveyV.jpg", + "content_type": "image/jpeg", + "url": "https://v3b.fal.media/files/b/koala/2_gRhwfsnmNKYtZ_dveyV.jpg", + "width": 1152 + } + ] + ], + "description": "The edited image.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "description": "The seed for the inference.", + "type": "integer", + "title": "Seed" + } + }, + "title": "ChronoEditOutput", + "x-fal-order-properties": [ + "images", + "prompt", + "seed" + ], + "required": [ + "images", + "prompt", + "seed" + ] + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/chrono-edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/chrono-edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/chrono-edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChronoEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/chrono-edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChronoEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/gpt-image-1-mini/edit", + "metadata": { + "display_name": "GPT Image 1 Mini", + "category": "image-to-image", + "description": "GPT Image 1 mini combines OpenAI's advanced language capabilities, powered by GPT-5, with GPT Image 1 Mini for efficient image generation. ", + "status": "active", + "tags": [ + "image-to-image" + ], + "updated_at": "2026-01-26T21:42:36.574Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/lion/cG0uNtODkeDPtNDUXms1H_63d395581f334382bf6806b78849ffb0.jpg", + "model_url": "https://fal.run/fal-ai/gpt-image-1-mini/edit", + "license_type": "commercial", + "date": "2025-10-21T22:09:49.003Z", + "group": { + "key": "GPT-Image-1-Mini", + "label": "Edit" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/gpt-image-1-mini/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/gpt-image-1-mini/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/gpt-image-1-mini/edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/lion/cG0uNtODkeDPtNDUXms1H_63d395581f334382bf6806b78849ffb0.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/gpt-image-1-mini/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/gpt-image-1-mini/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "GptImage1MiniEditInput": { + "title": "EditImageRequestMini", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Dress the model in the clothes and hat. Add a cat to the scene and change the background to a Victorian era building." + ], + "description": "The prompt for image generation", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "description": "Number of images to generate", + "type": "integer", + "minimum": 1, + "title": "Number of Images", + "examples": [ + 1 + ], + "maximum": 4, + "default": 1 + }, + "image_size": { + "enum": [ + "auto", + "1024x1024", + "1536x1024", + "1024x1536" + ], + "description": "Aspect ratio for the generated image", + "type": "string", + "title": "Image Size", + "default": "auto" + }, + "background": { + "enum": [ + "auto", + "transparent", + "opaque" + ], + "description": "Background for the generated image", + "type": "string", + "title": "Background", + "default": "auto" + }, + "quality": { + "enum": [ + "auto", + "low", + "medium", + "high" + ], + "description": "Quality for the generated image", + "type": "string", + "title": "Quality", + "default": "auto" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "Output format for the images", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/example_inputs/seedream4_edit_input_1.png", + "https://storage.googleapis.com/falserverless/example_inputs/seedream4_edit_input_2.png", + "https://storage.googleapis.com/falserverless/example_inputs/seedream4_edit_input_3.png", + "https://storage.googleapis.com/falserverless/example_inputs/seedream4_edit_input_4.png" + ] + ], + "description": "The URLs of the images to use as a reference for the generation.", + "type": "array", + "title": "Image URLs", + "items": { + "type": "string" + } + } + }, + "x-fal-order-properties": [ + "prompt", + "image_urls", + "image_size", + "background", + "quality", + "num_images", + "output_format", + "sync_mode" + ], + "required": [ + "prompt", + "image_urls" + ] + }, + "GptImage1MiniEditOutput": { + "title": "EditImageResponseMini", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "height": 1024, + "file_name": "NtVrlQjQEkG80Nz874MjH_cc9f505a28354629bb0951c4a8fe9b08.jpg", + "content_type": "image/jpeg", + "url": "https://v3b.fal.media/files/b/elephant/NtVrlQjQEkG80Nz874MjH_cc9f505a28354629bb0951c4a8fe9b08.jpg", + "width": 1024 + } + ] + ], + "description": "The generated images.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/gpt-image-1-mini/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gpt-image-1-mini/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/gpt-image-1-mini/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GptImage1MiniEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gpt-image-1-mini/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GptImage1MiniEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/reve/remix", + "metadata": { + "display_name": "Reve", + "category": "image-to-image", + "description": "Reve’s remix model lets you upload an reference images and then combine/transform them via a text prompt", + "status": "active", + "tags": [ + "image-to-image" + ], + "updated_at": "2026-01-26T21:42:38.467Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/panda/dMdAAEUeDONnrgO8mADcY_7cd44920d5e44ebeaa2d125d41bc75bb.jpg", + "model_url": "https://fal.run/fal-ai/reve/remix", + "license_type": "commercial", + "date": "2025-10-17T18:32:26.653Z", + "group": { + "key": "Reve", + "label": "Remix" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/reve/remix", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/reve/remix queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/reve/remix", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/panda/dMdAAEUeDONnrgO8mADcY_7cd44920d5e44ebeaa2d125d41bc75bb.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/reve/remix", + "documentationUrl": "https://fal.ai/models/fal-ai/reve/remix/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ReveRemixInput": { + "x-fal-order-properties": [ + "prompt", + "image_urls", + "aspect_ratio", + "num_images", + "output_format", + "sync_mode" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Dress the model in the clothes and hat. Add a cat to the scene and change the background to a Victorian era building." + ], + "maxLength": 2560, + "minLength": 1, + "description": "The text description of the desired image. May include XML img tags like 0 to refer to specific images by their index in the image_urls list.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "description": "Number of images to generate", + "type": "integer", + "minimum": 1, + "maximum": 4, + "title": "Number of Images", + "examples": [ + 1 + ], + "default": 1 + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "3:2", + "2:3", + "4:3", + "3:4", + "1:1" + ], + "description": "The desired aspect ratio of the generated image. If not provided, will be smartly chosen by the model.", + "type": "string", + "title": "Aspect Ratio", + "examples": [ + "16:9" + ] + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "image_urls": { + "description": "List of URLs of reference images. Must provide between 1 and 6 images (inclusive). Each image must be less than 10 MB. Supports PNG, JPEG, WebP, AVIF, and HEIF formats.", + "type": "array", + "items": { + "type": "string" + }, + "max_file_size": 10485760, + "examples": [ + [ + "https://v3b.fal.media/files/b/monkey/lsPBOhBws_FnTzd5G9KZ9_seedream4_edit_input_4.png", + "https://v3b.fal.media/files/b/monkey/ZrW5ouDj8vjLtvl1Cj9l9_seedream4_edit_input_2.png", + "https://v3b.fal.media/files/b/elephant/sd0k6YhlQEKfR6d_hAmIH_seedream4_edit_input_3.png" + ] + ], + "title": "Reference Image URLs", + "min_height": 128, + "min_width": 128, + "max_height": 4096, + "max_width": 4096 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "description": "Output format for the generated image.", + "type": "string", + "title": "Output Format", + "examples": [ + "png" + ], + "default": "png" + } + }, + "description": "Input for Reve image remixing", + "title": "ReveRemixInput", + "required": [ + "prompt", + "image_urls" + ] + }, + "ReveRemixOutput": { + "x-fal-order-properties": [ + "images" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/zebra/r0J_UFupv3BfooTwv2ifJ.png" + } + ] + ], + "description": "The remixed images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "description": "Output for Reve image remixing", + "title": "ReveRemixOutput", + "required": [ + "images" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/reve/remix/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/reve/remix/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/reve/remix": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ReveRemixInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/reve/remix/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ReveRemixOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/reve/edit", + "metadata": { + "display_name": "Reve", + "category": "image-to-image", + "description": "Reve’s edit model lets you upload an existing image and then transform it via a text prompt", + "status": "active", + "tags": [ + "image-to-image" + ], + "updated_at": "2026-01-26T21:42:38.719Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/lion/YAlxwxUbRgskrtV0PhkyL_d163391caef14a548907336e32899ee2.jpg", + "model_url": "https://fal.run/fal-ai/reve/edit", + "license_type": "commercial", + "date": "2025-10-17T18:17:32.788Z", + "group": { + "key": "Reve", + "label": "Edit" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/reve/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/reve/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/reve/edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/lion/YAlxwxUbRgskrtV0PhkyL_d163391caef14a548907336e32899ee2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/reve/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/reve/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ReveEditInput": { + "x-fal-order-properties": [ + "prompt", + "image_url", + "num_images", + "output_format", + "sync_mode" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Give him a friend" + ], + "maxLength": 2560, + "minLength": 1, + "description": "The text description of how to edit the provided image.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "description": "Number of images to generate", + "type": "integer", + "minimum": 1, + "maximum": 4, + "title": "Number of Images", + "examples": [ + 1 + ], + "default": 1 + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "description": "Output format for the generated image.", + "type": "string", + "title": "Output Format", + "examples": [ + "png" + ], + "default": "png" + }, + "image_url": { + "description": "URL of the reference image to edit. Must be publicly accessible or base64 data URI. Supports PNG, JPEG, WebP, AVIF, and HEIF formats.", + "type": "string", + "x-fal": { + "min_width": 128, + "min_height": 128, + "timeout": 20, + "max_height": 4096, + "max_width": 4096, + "max_file_size": 10485760 + }, + "title": "Reference Image URL", + "examples": [ + "https://v3b.fal.media/files/b/koala/sZE6zNTKjOKc4kcUdVlu__26bac54c-3e94-43e9-aeff-f2efc2631ef0.webp" + ], + "limit_description": "Max file size: 10.0MB, Min width: 128px, Min height: 128px, Max width: 4096px, Max height: 4096px, Timeout: 20.0s" + } + }, + "description": "Input for Reve image editing", + "title": "ReveEditInput", + "required": [ + "prompt", + "image_url" + ] + }, + "ReveEditOutput": { + "x-fal-order-properties": [ + "images" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/tiger/4mt5HxYSH-YIE3vhqV8L9.png" + } + ] + ], + "description": "The edited images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "description": "Output for Reve image editing", + "title": "ReveEditOutput", + "required": [ + "images" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/reve/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/reve/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/reve/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ReveEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/reve/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ReveEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image2pixel", + "metadata": { + "display_name": "Image2Pixel", + "category": "image-to-image", + "description": "Turn images into pixel-perfect retro art", + "status": "active", + "tags": [ + "post-processing", + "pixel-art" + ], + "updated_at": "2026-01-26T21:42:40.056Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/9MSg8Fm_djrKJ76DtwZrx_0ebf4aa3ab1540298b3fcf4593cfd606.jpg", + "model_url": "https://fal.run/fal-ai/image2pixel", + "license_type": "commercial", + "date": "2025-10-14T14:01:05.292Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image2pixel", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image2pixel queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image2pixel", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/9MSg8Fm_djrKJ76DtwZrx_0ebf4aa3ab1540298b3fcf4593cfd606.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image2pixel", + "documentationUrl": "https://fal.ai/models/fal-ai/image2pixel/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Image2pixelInput": { + "title": "Image2PixelInput", + "type": "object", + "properties": { + "cleanup_morph": { + "description": "Apply morphological operations to remove noise.", + "type": "boolean", + "title": "Cleanup Morph", + "default": false + }, + "auto_color_detect": { + "description": "Enable automatic detection of optimal number of colors.", + "type": "boolean", + "title": "Auto Color Detect", + "default": false + }, + "alpha_threshold": { + "minimum": 0, + "title": "Alpha Threshold", + "type": "integer", + "maximum": 255, + "description": "Alpha binarization threshold (0-255).", + "default": 128 + }, + "snap_grid": { + "description": "Align output to the pixel grid.", + "type": "boolean", + "title": "Snap Grid", + "default": true + }, + "fixed_palette": { + "description": "Optional fixed color palette as hex strings (e.g., ['#000000', '#ffffff']).", + "type": "array", + "title": "Fixed Palette", + "items": { + "type": "string" + } + }, + "scale": { + "minimum": 1, + "title": "Scale", + "type": "integer", + "maximum": 64, + "description": "Force a specific pixel scale. If None, auto-detect." + }, + "cleanup_jaggy": { + "description": "Remove isolated diagonal pixels (jaggy edge cleanup).", + "type": "boolean", + "title": "Cleanup Jaggy", + "default": false + }, + "trim_borders": { + "description": "Trim borders of the image.", + "type": "boolean", + "title": "Trim Borders", + "default": false + }, + "background_tolerance": { + "minimum": 0, + "title": "Background Tolerance", + "type": "integer", + "maximum": 255, + "description": "Background tolerance (0-255).", + "default": 0 + }, + "detect_method": { + "enum": [ + "auto", + "runs", + "edge" + ], + "description": "Scale detection method to use.", + "type": "string", + "title": "Detect Method", + "default": "auto" + }, + "transparent_background": { + "description": "Remove background of the image. This will check for contiguous color regions from the edges after correction and make them transparent.", + "type": "boolean", + "title": "Transparent Background", + "default": false + }, + "downscale_method": { + "enum": [ + "dominant", + "median", + "mode", + "mean", + "content-adaptive" + ], + "description": "Downscaling method to produce the pixel-art output.", + "type": "string", + "title": "Downscale Method", + "default": "dominant" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/image2pixel-input.jpg" + ], + "description": "The image URL to process into improved pixel art", + "type": "string", + "title": "Image Url" + }, + "background_mode": { + "enum": [ + "edges", + "corners", + "midpoints" + ], + "description": "Controls where to flood-fill from when removing the background.", + "type": "string", + "title": "Background Mode", + "default": "corners" + }, + "max_colors": { + "minimum": 1, + "title": "Max Colors", + "type": "integer", + "maximum": 256, + "description": "Maximum number of colors in the output palette. Set None to disable limit.", + "default": 32 + }, + "dominant_color_threshold": { + "minimum": 0, + "title": "Dominant Color Threshold", + "type": "number", + "maximum": 1, + "description": "Dominant color threshold (0.0-1.0).", + "default": 0.05 + } + }, + "x-fal-order-properties": [ + "image_url", + "max_colors", + "auto_color_detect", + "fixed_palette", + "detect_method", + "scale", + "downscale_method", + "trim_borders", + "transparent_background", + "cleanup_morph", + "cleanup_jaggy", + "snap_grid", + "alpha_threshold", + "dominant_color_threshold", + "background_tolerance", + "background_mode", + "sync_mode" + ], + "required": [ + "image_url" + ] + }, + "Image2pixelOutput": { + "title": "Image2PixelOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "height": 1008, + "file_name": "image2pixel-output.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/image2pixel-output.png", + "width": 1008 + }, + { + "height": 48, + "file_name": "image2pixel-output-scaled.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/image2pixel-output-scaled.png", + "width": 48 + } + ] + ], + "description": "The processed pixel-art image (PNG) and the scaled image (PNG).", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "num_colors": { + "examples": [ + 10 + ], + "description": "The number of colors in the processed media.", + "type": "integer", + "title": "Num Colors" + }, + "palette": { + "examples": [ + [ + "#000000", + "#252524", + "#282826", + "#2b2b2a", + "#323232", + "#4c4c4c", + "#d57322", + "#dbdbdc", + "#f6922b", + "#fefefe" + ] + ], + "description": "The palette of the processed media.", + "type": "array", + "title": "Palette", + "items": { + "type": "string" + } + }, + "pixel_scale": { + "examples": [ + 21 + ], + "description": "The detected pixel scale of the input.", + "type": "integer", + "title": "Pixel Scale" + } + }, + "x-fal-order-properties": [ + "pixel_scale", + "palette", + "num_colors", + "images" + ], + "required": [ + "pixel_scale", + "palette", + "num_colors", + "images" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image2pixel/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image2pixel/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image2pixel": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Image2pixelInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image2pixel/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Image2pixelOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/dreamomni2/edit", + "metadata": { + "display_name": "DreamOmni2", + "category": "image-to-image", + "description": "DreamOmni2 is a unified multimodal model for text and image guided image editing.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:40.432Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/panda/c4tQDAqMyMQ_6-LNFzCyj_13c4f35481fc4ebaaf4fddf9a5d86ddc.jpg", + "model_url": "https://fal.run/fal-ai/dreamomni2/edit", + "license_type": "commercial", + "date": "2025-10-10T22:13:05.899Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/dreamomni2/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/dreamomni2/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/dreamomni2/edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/panda/c4tQDAqMyMQ_6-LNFzCyj_13c4f35481fc4ebaaf4fddf9a5d86ddc.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/dreamomni2/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/dreamomni2/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Dreamomni2EditInput": { + "title": "DreamOmni2Request", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Replace the first image have the same image style as the second image." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to edit the image." + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/koala/HB33rtG0ue7KzcIdQOTTX_dreamomni_ref_0.jpg", + "https://v3b.fal.media/files/b/koala/BJMlXeNzOgGzyoO7XyGxr_dreamomni_ref_1.jpg" + ] + ], + "title": "You can use only with to 2 images.", + "type": "array", + "description": "List of URLs of input images for editing.", + "items": { + "type": "string" + } + } + }, + "x-fal-order-properties": [ + "image_urls", + "prompt" + ], + "required": [ + "image_urls", + "prompt" + ] + }, + "Dreamomni2EditOutput": { + "title": "DreamOmni2Response", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "file_size": 1473707, + "file_name": "c9ab07096fdd47269a60bc556e01132b.png", + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/koala/prmop69b1g5lNFPE4RbCb_c9ab07096fdd47269a60bc556e01132b.png" + } + ], + "title": "Image", + "description": "Generated image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/dreamomni2/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/dreamomni2/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/dreamomni2/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Dreamomni2EditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/dreamomni2/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Dreamomni2EditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-plus-lora", + "metadata": { + "display_name": "Qwen Image Edit Plus Lora", + "category": "image-to-image", + "description": "LoRA endpoint for the Qwen Image Edit Plus model.", + "status": "active", + "tags": [ + "image-to-image", + "image-editing" + ], + "updated_at": "2026-01-26T21:42:45.985Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/penguin/X3V08aAYEqPmeSvVdLgN9_6be5ff6349c9459d92e7a5d7db8dadcc.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-plus-lora", + "license_type": "commercial", + "date": "2025-10-03T21:13:29.106Z", + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/qwen-image-edit-plus-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/qwen-image-edit-plus-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-plus-lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-plus-lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-plus-lora", + "category": "image-to-image", + "thumbnailUrl": "https://v3.fal.media/files/penguin/X3V08aAYEqPmeSvVdLgN9_6be5ff6349c9459d92e7a5d7db8dadcc.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-lora", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEditPlusLoraInput": { + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "image_urls", + "negative_prompt", + "acceleration", + "loras" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Close shot of a woman standing in next to this car on this highway" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the image with" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image. If not provided, the size of the final input image will be used to calculate the size of the output image." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "description": "Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality.", + "type": "string", + "examples": [ + "regular" + ], + "title": "Acceleration", + "default": "regular" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use up to 3 LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 4 + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "image_urls": { + "examples": [ + [ + "https://v3.fal.media/files/monkey/i3saq4bAPXSIl08nZtq9P_ec535747aefc4e31943136a6d8587075.png", + "https://v3.fal.media/files/penguin/BCOZp6teRhSQFuOXpbBOa_da8ef9b4982347a2a62a516b737d4f21.png", + "https://v3.fal.media/files/tiger/sCoZhBksx9DvwSR4_U3_C_3d1f581441874005908addeae9c10d0f.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URLs of the images to edit.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + } + }, + "title": "BaseQwenEditImagePlusLoRAInput", + "required": [ + "prompt", + "image_urls" + ] + }, + "QwenImageEditPlusLoraOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/jpeg", + "url": "https://v3.fal.media/files/zebra/mMW8_S5PeGuDXLTfIKCpG.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + } + }, + "title": "QwenImageOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "LoraWeight": { + "x-fal-order-properties": [ + "path", + "scale" + ], + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "title": "LoraWeight", + "required": [ + "path" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "title": "Image", + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-plus-lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/lucidflux", + "metadata": { + "display_name": "Lucidflux", + "category": "image-to-image", + "description": "LucidFlux for upscaling images with very high fidelity", + "status": "active", + "tags": [ + "image-to-image" + ], + "updated_at": "2026-01-26T21:42:46.118Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/koala/IoOnMLFUyTCBXNJfw8p6d_04001a3f68e34de687b6de1a73d7e1de.jpg", + "model_url": "https://fal.run/fal-ai/lucidflux", + "license_type": "commercial", + "date": "2025-10-03T19:54:06.907Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/lucidflux", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/lucidflux queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/lucidflux", + "category": "image-to-image", + "thumbnailUrl": "https://v3.fal.media/files/koala/IoOnMLFUyTCBXNJfw8p6d_04001a3f68e34de687b6de1a73d7e1de.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/lucidflux", + "documentationUrl": "https://fal.ai/models/fal-ai/lucidflux/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LucidfluxInput": { + "title": "LucidFluxRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "restore this image into high-quality, clean, high-resolution result" + ], + "description": "The prompt to edit the image.", + "type": "string", + "title": "Prompt" + }, + "guidance": { + "minimum": 1, + "description": "The guidance to use for the diffusion process.", + "type": "number", + "maximum": 30, + "title": "Guidance", + "default": 4 + }, + "target_height": { + "description": "The height of the output image.", + "type": "integer", + "minimum": 512, + "maximum": 1024, + "title": "Target Height", + "default": 1024 + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/elephant/6FLKRWYztzOKDKV-v1VfK_3.png" + ], + "description": "The URL of the image to edit.", + "type": "string", + "title": "Image URL" + }, + "target_width": { + "description": "The width of the output image.", + "type": "integer", + "minimum": 512, + "maximum": 1024, + "title": "Target Width", + "default": 1024 + }, + "num_inference_steps": { + "minimum": 2, + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "type": "integer", + "maximum": 50, + "title": "Num Inference Steps", + "default": 50 + }, + "seed": { + "description": "Seed used for random number generation", + "type": "integer", + "title": "Seed", + "default": 42 + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "target_width", + "target_height", + "num_inference_steps", + "guidance", + "seed" + ], + "required": [ + "image_url", + "prompt" + ] + }, + "LucidfluxOutput": { + "title": "LucidFluxResponse", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "file_size": 123744, + "file_name": "img_result.jpeg", + "content_type": "application/octet-stream", + "url": "https://v3b.fal.media/files/b/penguin/z34N19Fw1HIcAfmmrK8El_img_result.jpeg" + } + ], + "description": "Generated image", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "seed": { + "description": "Seed used for random number generation", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "image", + "seed" + ], + "required": [ + "image", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/lucidflux/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/lucidflux/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/lucidflux": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LucidfluxInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/lucidflux/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LucidfluxOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit/image-to-image", + "metadata": { + "display_name": "Qwen Image Edit", + "category": "image-to-image", + "description": "Image to Image Endpoint for Qwen's Image Editing model. Has superior text editing capabilities.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:46.629Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/lion/y1wOAl3dW0LE43drzjWY3_71e9575367b14485b4a5ca83492c0e82.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit/image-to-image", + "license_type": "commercial", + "date": "2025-09-30T17:20:50.035Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://v3.fal.media/files/lion/y1wOAl3dW0LE43drzjWY3_71e9575367b14485b4a5ca83492c0e82.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEditImageToImageInput": { + "title": "BaseQwenEditImg2ImgInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Change bag to apple macbook" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the image with" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality.", + "type": "string", + "examples": [ + "regular" + ], + "title": "Acceleration", + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/koala/oei_-iPIYFnhdB8SxojND_qwen-edit-res.png" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to edit." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "Strength of the image-to-image transformation. Lower values preserve more of the original image.", + "default": 0.94 + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 4 + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "negative_prompt": { + "examples": [ + "blurry, ugly" + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "image_url", + "negative_prompt", + "acceleration", + "strength" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "QwenImageEditImageToImageOutput": { + "title": "QwenImageOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/jpeg", + "url": "https://v3.fal.media/files/elephant/P-YCIAg6wtFn1hsF34fzL_qwen-edit.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-25-preview/image-to-image", + "metadata": { + "display_name": "Wan 2.5 Image to Image", + "category": "image-to-image", + "description": "Wan 2.5 image-to-image model.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:47.474Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/panda/E-LG-LvtKaaYeyx_FPqmu_c75bdd0f332b455e80a7cf14a3a44725.jpg", + "model_url": "https://fal.run/fal-ai/wan-25-preview/image-to-image", + "license_type": "commercial", + "date": "2025-09-25T20:59:52.034Z", + "group": { + "key": "wan-25-preview", + "label": "Image To Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-25-preview/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-25-preview/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-25-preview/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://v3.fal.media/files/panda/E-LG-LvtKaaYeyx_FPqmu_c75bdd0f332b455e80a7cf14a3a44725.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-25-preview/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-25-preview/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Wan25PreviewImageToImageInput": { + "description": "Input for image editing", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Reimagine the scene under a raging thunderstorm at night: lightning forks across the sky, illuminating the samurai in stark flashes of white light." + ], + "description": "The text prompt describing how to edit the image. Max 2000 characters.", + "type": "string", + "title": "Prompt", + "minLength": 1 + }, + "num_images": { + "description": "Number of images to generate. Values from 1 to 4.", + "type": "integer", + "minimum": 1, + "maximum": 4, + "title": "Num Images", + "examples": [ + 1 + ], + "default": 1 + }, + "image_size": { + "examples": [ + "square", + "landscape_16_9", + "portrait_16_9", + { + "height": 1280, + "width": 1280 + } + ], + "description": "The size of the generated image. Width and height must be between 384 and 1440 pixels.", + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "default": "square" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + }, + "image_urls": { + "description": "URLs of images to edit. For single-image editing, provide 1 URL. For multi-reference generation, provide up to 2 URLs. If more than 2 URLs are provided, only the first 2 will be used.", + "type": "array", + "items": { + "type": "string" + }, + "max_file_size": 26214400, + "examples": [ + [ + "https://v3.fal.media/files/penguin/4VZ7I1ZK5XNv33LV2JBxg.png" + ] + ], + "title": "Image Urls", + "min_height": 384, + "min_width": 384, + "max_height": 5000, + "max_width": 5000 + }, + "negative_prompt": { + "examples": [ + "low resolution, error, worst quality, low quality, defects" + ], + "description": "Negative prompt to describe content to avoid. Max 500 characters.", + "type": "string", + "title": "Negative Prompt" + } + }, + "title": "ImageToImageInput", + "x-fal-order-properties": [ + "prompt", + "image_urls", + "negative_prompt", + "image_size", + "num_images", + "seed", + "enable_safety_checker" + ], + "required": [ + "prompt", + "image_urls" + ] + }, + "Wan25PreviewImageToImageOutput": { + "description": "Output for image editing", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "content_type": "image/png", + "url": "https://v3.fal.media/files/rabbit/BM4J8xpV5ogtOQE9xGtft.png" + } + ] + ], + "description": "The edited images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seeds": { + "examples": [ + [ + 175932751 + ] + ], + "description": "The seeds used for each generated image", + "type": "array", + "title": "Seeds", + "items": { + "type": "integer" + } + }, + "actual_prompt": { + "examples": [ + "Reimagine the scene under a raging thunderstorm at night: lightning forks across the sky, illuminating the samurai in stark flashes of white light." + ], + "description": "The original prompt (prompt expansion is not available for image editing)", + "type": "string", + "title": "Actual Prompt" + } + }, + "title": "ImageToImageOutput", + "x-fal-order-properties": [ + "images", + "seeds", + "actual_prompt" + ], + "required": [ + "images", + "seeds" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-25-preview/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-25-preview/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-25-preview/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Wan25PreviewImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-25-preview/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Wan25PreviewImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-plus", + "metadata": { + "display_name": "Qwen Image Edit Plus", + "category": "image-to-image", + "description": "Endpoint for Qwen's Image Editing Plus model also known as Qwen-Image-Edit-2509. Has superior text editing capabilities and multi-image support.", + "status": "active", + "tags": [ + "image-editing", + "image-to-image", + "high-quality-text" + ], + "updated_at": "2026-01-26T21:42:48.440Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/elephant/pGXxmNi6TrKTe864jBKW8_1bb43c6eab2349ab9c8cefbb24f3fd1b.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-plus", + "license_type": "commercial", + "date": "2025-09-22T23:20:39.729Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-plus", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-plus queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-plus", + "category": "image-to-image", + "thumbnailUrl": "https://v3.fal.media/files/elephant/pGXxmNi6TrKTe864jBKW8_1bb43c6eab2349ab9c8cefbb24f3fd1b.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEditPlusInput": { + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "image_urls", + "negative_prompt", + "acceleration" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Close shot of a woman standing in next to this car on this highway" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the image with" + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "description": "The number of images to generate.", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square_hd" + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "description": "Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality.", + "type": "string", + "examples": [ + "regular" + ], + "title": "Acceleration", + "default": "regular" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale", + "type": "number", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "maximum": 20, + "default": 4 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "image_urls": { + "examples": [ + [ + "https://v3.fal.media/files/monkey/i3saq4bAPXSIl08nZtq9P_ec535747aefc4e31943136a6d8587075.png", + "https://v3.fal.media/files/penguin/BCOZp6teRhSQFuOXpbBOa_da8ef9b4982347a2a62a516b737d4f21.png", + "https://v3.fal.media/files/tiger/sCoZhBksx9DvwSR4_U3_C_3d1f581441874005908addeae9c10d0f.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URLs of the images to edit.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "examples": [ + " " + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "num_inference_steps": { + "minimum": 2, + "title": "Num Inference Steps", + "type": "integer", + "description": "The number of inference steps to perform.", + "maximum": 100, + "default": 50 + } + }, + "title": "BaseQwenEditImagePlusInput", + "required": [ + "prompt", + "image_urls" + ] + }, + "QwenImageEditPlusOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/jpeg", + "url": "https://v3.fal.media/files/zebra/mMW8_S5PeGuDXLTfIKCpG.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + } + }, + "title": "QwenImageOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "title": "Image", + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-plus/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/seedvr/upscale/image", + "metadata": { + "display_name": "SeedVR2", + "category": "image-to-image", + "description": "Use SeedVR2 to upscale your images", + "status": "active", + "tags": [ + "upscale", + "image-to-image" + ], + "updated_at": "2026-01-26T21:42:50.089Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/panda/_-1rRy0_I6w-fbt3q0RDL_2f47fece6e2b433994dda482e8d20bb9.jpg", + "model_url": "https://fal.run/fal-ai/seedvr/upscale/image", + "license_type": "commercial", + "date": "2025-09-22T18:19:28.476Z", + "group": { + "key": "seedvr2", + "label": "Upscale Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/seedvr/upscale/image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/seedvr/upscale/image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/seedvr/upscale/image", + "category": "image-to-image", + "thumbnailUrl": "https://v3.fal.media/files/panda/_-1rRy0_I6w-fbt3q0RDL_2f47fece6e2b433994dda482e8d20bb9.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/seedvr/upscale/image", + "documentationUrl": "https://fal.ai/models/fal-ai/seedvr/upscale/image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SeedvrUpscaleImageInput": { + "title": "SeedVRImageInput", + "type": "object", + "properties": { + "upscale_mode": { + "enum": [ + "target", + "factor" + ], + "title": "Upscale Mode", + "type": "string", + "description": "The mode to use for the upscale. If 'target', the upscale factor will be calculated based on the target resolution. If 'factor', the upscale factor will be used directly.", + "default": "factor" + }, + "noise_scale": { + "description": "The noise scale to use for the generation process.", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Noise Scale", + "multipleOf": 0.001, + "default": 0.1 + }, + "output_format": { + "enum": [ + "png", + "jpg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image.", + "default": "jpg" + }, + "target_resolution": { + "enum": [ + "720p", + "1080p", + "1440p", + "2160p" + ], + "title": "Target Resolution", + "type": "string", + "description": "The target resolution to upscale to when `upscale_mode` is `target`.", + "default": "1080p" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/seedvr2/image_in.png" + ], + "title": "Image Url", + "type": "string", + "description": "The input image to be processed" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "upscale_factor": { + "minimum": 1, + "maximum": 10, + "type": "number", + "title": "Upscale Factor", + "description": "Upscaling factor to be used. Will multiply the dimensions with this factor when `upscale_mode` is `factor`.", + "default": 2 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "The random seed used for the generation process." + } + }, + "x-fal-order-properties": [ + "image_url", + "upscale_mode", + "upscale_factor", + "target_resolution", + "seed", + "noise_scale", + "output_format", + "sync_mode" + ], + "required": [ + "image_url" + ] + }, + "SeedvrUpscaleImageOutput": { + "title": "SeedVRImageOutput", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/seedvr2/image_out.png" + } + ], + "description": "Upscaled image file after processing", + "$ref": "#/components/schemas/ImageFile" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The random seed used for the generation process." + } + }, + "x-fal-order-properties": [ + "image", + "seed" + ], + "required": [ + "image", + "seed" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/seedvr/upscale/image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/seedvr/upscale/image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/seedvr/upscale/image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SeedvrUpscaleImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/seedvr/upscale/image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SeedvrUpscaleImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-apps-v2/product-holding", + "metadata": { + "display_name": "Product Holding", + "category": "image-to-image", + "description": "Place products naturally in a person’s hands for realistic marketing visuals.", + "status": "active", + "tags": [ + "product", + "marketing" + ], + "updated_at": "2026-01-26T21:42:50.869Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/zebra/7dJ2m31gOP0J0m8WZJ46I_1625d9610207407292eac394cc4a3026.jpg", + "model_url": "https://fal.run/fal-ai/image-apps-v2/product-holding", + "license_type": "commercial", + "date": "2025-09-19T02:20:46.300Z", + "group": { + "key": "image-apps-v2", + "label": "Product Holding" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-apps-v2/product-holding", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-apps-v2/product-holding queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-apps-v2/product-holding", + "category": "image-to-image", + "thumbnailUrl": "https://v3.fal.media/files/zebra/7dJ2m31gOP0J0m8WZJ46I_1625d9610207407292eac394cc4a3026.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-apps-v2/product-holding", + "documentationUrl": "https://fal.ai/models/fal-ai/image-apps-v2/product-holding/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageAppsV2ProductHoldingInput": { + "x-fal-order-properties": [ + "person_image_url", + "product_image_url", + "aspect_ratio" + ], + "type": "object", + "properties": { + "aspect_ratio": { + "description": "Aspect ratio for 4K output", + "$ref": "#/components/schemas/AspectRatio" + }, + "product_image_url": { + "examples": [ + "https://v3.fal.media/files/tiger/WqtPStHkOu6W0lZcIlXD8_156e3c91cb3a4d12b7380cc43b5e4c67.png" + ], + "title": "Product Image Url", + "type": "string", + "description": "Image URL of the product to be held by the person" + }, + "person_image_url": { + "examples": [ + "https://v3.fal.media/files/panda/oMob58qZJRtbDs5l45QKT_e3a1512c455d425fab2d62e07a51c506.png" + ], + "title": "Person Image Url", + "type": "string", + "description": "Image URL of the person who will hold the product" + } + }, + "title": "ProductHoldingInput", + "required": [ + "person_image_url", + "product_image_url" + ] + }, + "ImageAppsV2ProductHoldingOutput": { + "x-fal-order-properties": [ + "images", + "inference_time_ms" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/penguin/JR-DlVO6P28LvnvMU2mu5_7078ec349a194e6c8860cae283bd3a0d.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Person holding the product naturally", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "inference_time_ms": { + "examples": [ + 15234 + ], + "title": "Inference Time Ms", + "type": "integer", + "description": "Total inference time in milliseconds" + } + }, + "title": "ProductHoldingOutput", + "required": [ + "images", + "inference_time_ms" + ] + }, + "AspectRatio": { + "x-fal-order-properties": [ + "ratio" + ], + "type": "object", + "properties": { + "ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4" + ], + "description": "Aspect ratio for 4K resolution output", + "type": "string", + "title": "Ratio", + "default": "1:1" + } + }, + "title": "AspectRatio", + "description": "Aspect ratio model that calculates 4K resolution dimensions" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-apps-v2/product-holding/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/product-holding/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/product-holding": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2ProductHoldingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/product-holding/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2ProductHoldingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-apps-v2/product-photography", + "metadata": { + "display_name": "Product Photography", + "category": "image-to-image", + "description": "Generate professional product photography with realistic lighting and backgrounds.", + "status": "active", + "tags": [ + "product", + "marketing" + ], + "updated_at": "2026-01-26T21:42:50.997Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/penguin/g36T9svKmdie3DHtxJz5x_67cde98934654713bf484b258e2384f3.jpg", + "model_url": "https://fal.run/fal-ai/image-apps-v2/product-photography", + "license_type": "commercial", + "date": "2025-09-19T02:20:29.618Z", + "group": { + "key": "image-apps-v2", + "label": "Product Photography" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-apps-v2/product-photography", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-apps-v2/product-photography queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-apps-v2/product-photography", + "category": "image-to-image", + "thumbnailUrl": "https://v3.fal.media/files/penguin/g36T9svKmdie3DHtxJz5x_67cde98934654713bf484b258e2384f3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-apps-v2/product-photography", + "documentationUrl": "https://fal.ai/models/fal-ai/image-apps-v2/product-photography/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageAppsV2ProductPhotographyInput": { + "x-fal-order-properties": [ + "product_image_url", + "aspect_ratio" + ], + "type": "object", + "properties": { + "aspect_ratio": { + "description": "Aspect ratio for 4K output", + "$ref": "#/components/schemas/AspectRatio" + }, + "product_image_url": { + "examples": [ + "https://v3.fal.media/files/tiger/WqtPStHkOu6W0lZcIlXD8_156e3c91cb3a4d12b7380cc43b5e4c67.png" + ], + "title": "Product Image Url", + "type": "string", + "description": "Image URL of the product to create professional studio photography" + } + }, + "title": "ProductPhotographyInput", + "required": [ + "product_image_url" + ] + }, + "ImageAppsV2ProductPhotographyOutput": { + "x-fal-order-properties": [ + "images", + "inference_time_ms" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/panda/NggkU_5Ne-SB-Ltgr0lbH_b76a99a276374ff9b13f399e57c54cb6.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Professional studio product photography", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "inference_time_ms": { + "examples": [ + 15234 + ], + "title": "Inference Time Ms", + "type": "integer", + "description": "Total inference time in milliseconds" + } + }, + "title": "ProductPhotographyOutput", + "required": [ + "images", + "inference_time_ms" + ] + }, + "AspectRatio": { + "x-fal-order-properties": [ + "ratio" + ], + "type": "object", + "properties": { + "ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4" + ], + "description": "Aspect ratio for 4K resolution output", + "type": "string", + "title": "Ratio", + "default": "1:1" + } + }, + "title": "AspectRatio", + "description": "Aspect ratio model that calculates 4K resolution dimensions" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-apps-v2/product-photography/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/product-photography/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/product-photography": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2ProductPhotographyInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/product-photography/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2ProductPhotographyOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-apps-v2/virtual-try-on", + "metadata": { + "display_name": "Virtual Try-on", + "category": "image-to-image", + "description": "Try on clothes virtually by combining person and clothing images.", + "status": "active", + "tags": [ + "fashion", + "try-on", + "virtual-try-on" + ], + "updated_at": "2026-01-26T21:42:51.501Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/tiger/dd5748sfMpq2uZzoUA_Y3_6377b3f9ee8e49afa174da674a80b9a6.jpg", + "model_url": "https://fal.run/fal-ai/image-apps-v2/virtual-try-on", + "license_type": "commercial", + "date": "2025-09-18T01:55:22.732Z", + "group": { + "key": "image-apps-v2", + "label": "Virtual Try-on" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-apps-v2/virtual-try-on", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-apps-v2/virtual-try-on queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-apps-v2/virtual-try-on", + "category": "image-to-image", + "thumbnailUrl": "https://v3.fal.media/files/tiger/dd5748sfMpq2uZzoUA_Y3_6377b3f9ee8e49afa174da674a80b9a6.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-apps-v2/virtual-try-on", + "documentationUrl": "https://fal.ai/models/fal-ai/image-apps-v2/virtual-try-on/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageAppsV2VirtualTryOnInput": { + "x-fal-order-properties": [ + "person_image_url", + "clothing_image_url", + "preserve_pose", + "aspect_ratio" + ], + "type": "object", + "properties": { + "preserve_pose": { + "title": "Preserve Pose", + "type": "boolean", + "default": true + }, + "aspect_ratio": { + "description": "Aspect ratio for 4K output (default: 3:4 for fashion)", + "$ref": "#/components/schemas/AspectRatio" + }, + "clothing_image_url": { + "examples": [ + "https://v3b.fal.media/files/b/monkey/5ZWXSKUuk9EilI1apFCeu_1ecd050187f24b9aa1d2defb88d8d8ae.png" + ], + "title": "Clothing Image Url", + "type": "string", + "description": "Clothing photo URL" + }, + "person_image_url": { + "examples": [ + "https://v3.fal.media/files/tiger/4vxSHizex4UWR5fdnPs1A.jpeg" + ], + "title": "Person Image Url", + "type": "string", + "description": "Person photo URL" + } + }, + "title": "VirtualTryOnInput", + "required": [ + "person_image_url", + "clothing_image_url" + ] + }, + "ImageAppsV2VirtualTryOnOutput": { + "x-fal-order-properties": [ + "images", + "inference_time_ms" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/panda/9w6wt7vgxjfmiBIoo6bjF_cb0ba7a150c84f159e9d40af2d439401.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Person wearing the virtual clothing", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "inference_time_ms": { + "examples": [ + 15234 + ], + "title": "Inference Time Ms", + "type": "integer", + "description": "Total inference time in milliseconds" + } + }, + "title": "VirtualTryOnOutput", + "required": [ + "images", + "inference_time_ms" + ] + }, + "AspectRatio": { + "x-fal-order-properties": [ + "ratio" + ], + "type": "object", + "properties": { + "ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4" + ], + "description": "Aspect ratio for 4K resolution output", + "type": "string", + "title": "Ratio", + "default": "1:1" + } + }, + "title": "AspectRatio", + "description": "Aspect ratio model that calculates 4K resolution dimensions" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-apps-v2/virtual-try-on/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/virtual-try-on/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/virtual-try-on": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2VirtualTryOnInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/virtual-try-on/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2VirtualTryOnOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-apps-v2/texture-transform", + "metadata": { + "display_name": "Texture Transform", + "category": "image-to-image", + "description": "Transform objects with different surface textures like marble, wood, or fabric.", + "status": "active", + "tags": [ + "texture-transform" + ], + "updated_at": "2026-01-26T21:42:51.750Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/kangaroo/gzXctbp7SzO3xaVpDEqtN_97343b9e7b404a3e8f042b1d7d57a12f.jpg", + "model_url": "https://fal.run/fal-ai/image-apps-v2/texture-transform", + "license_type": "commercial", + "date": "2025-09-18T01:47:30.542Z", + "group": { + "key": "image-apps-v2", + "label": "Texture Transform" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-apps-v2/texture-transform", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-apps-v2/texture-transform queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-apps-v2/texture-transform", + "category": "image-to-image", + "thumbnailUrl": "https://v3.fal.media/files/kangaroo/gzXctbp7SzO3xaVpDEqtN_97343b9e7b404a3e8f042b1d7d57a12f.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-apps-v2/texture-transform", + "documentationUrl": "https://fal.ai/models/fal-ai/image-apps-v2/texture-transform/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageAppsV2TextureTransformInput": { + "x-fal-order-properties": [ + "image_url", + "target_texture", + "aspect_ratio" + ], + "type": "object", + "properties": { + "target_texture": { + "enum": [ + "cotton", + "denim", + "wool", + "felt", + "wood", + "leather", + "velvet", + "stone", + "marble", + "ceramic", + "concrete", + "brick", + "clay", + "foam", + "glass", + "metal", + "silk", + "fabric", + "crystal", + "rubber", + "plastic", + "lace" + ], + "title": "Target Texture", + "type": "string", + "default": "marble" + }, + "aspect_ratio": { + "description": "Aspect ratio for 4K output", + "$ref": "#/components/schemas/AspectRatio" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/tiger/WqtPStHkOu6W0lZcIlXD8_156e3c91cb3a4d12b7380cc43b5e4c67.png" + ], + "title": "Image Url", + "type": "string", + "description": "Image URL for texture transformation" + } + }, + "title": "TextureTransformInput", + "required": [ + "image_url" + ] + }, + "ImageAppsV2TextureTransformOutput": { + "x-fal-order-properties": [ + "images", + "inference_time_ms" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/elephant/gbv_d9UfnVu_axz_E64fA_4f79ee88e4ed49759c09110783d9924d.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Image with transformed texture", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "inference_time_ms": { + "examples": [ + 15234 + ], + "title": "Inference Time Ms", + "type": "integer", + "description": "Total inference time in milliseconds" + } + }, + "title": "TextureTransformOutput", + "required": [ + "images", + "inference_time_ms" + ] + }, + "AspectRatio": { + "x-fal-order-properties": [ + "ratio" + ], + "type": "object", + "properties": { + "ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4" + ], + "description": "Aspect ratio for 4K resolution output", + "type": "string", + "title": "Ratio", + "default": "1:1" + } + }, + "title": "AspectRatio", + "description": "Aspect ratio model that calculates 4K resolution dimensions" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-apps-v2/texture-transform/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/texture-transform/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/texture-transform": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2TextureTransformInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/texture-transform/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2TextureTransformOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-apps-v2/relighting", + "metadata": { + "display_name": "Relighting", + "category": "image-to-image", + "description": "Adjust and enhance images with different lighting styles.", + "status": "active", + "tags": [ + "relighting" + ], + "updated_at": "2026-01-26T21:42:51.874Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/koala/W0d6z5pWtVwakzxtKd4K4_94f6ab5b5820440db672dce73d447bff.jpg", + "model_url": "https://fal.run/fal-ai/image-apps-v2/relighting", + "license_type": "commercial", + "date": "2025-09-18T01:45:02.577Z", + "group": { + "key": "image-apps-v2", + "label": "Relighting" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-apps-v2/relighting", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-apps-v2/relighting queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-apps-v2/relighting", + "category": "image-to-image", + "thumbnailUrl": "https://v3.fal.media/files/koala/W0d6z5pWtVwakzxtKd4K4_94f6ab5b5820440db672dce73d447bff.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-apps-v2/relighting", + "documentationUrl": "https://fal.ai/models/fal-ai/image-apps-v2/relighting/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageAppsV2RelightingInput": { + "x-fal-order-properties": [ + "image_url", + "lighting_style", + "aspect_ratio" + ], + "type": "object", + "properties": { + "aspect_ratio": { + "description": "Aspect ratio for 4K output", + "$ref": "#/components/schemas/AspectRatio" + }, + "lighting_style": { + "enum": [ + "natural", + "studio", + "golden_hour", + "blue_hour", + "dramatic", + "soft", + "hard", + "backlight", + "side_light", + "front_light", + "rim_light", + "sunset", + "sunrise", + "neon", + "candlelight", + "moonlight", + "spotlight", + "ambient" + ], + "title": "Lighting Style", + "type": "string", + "default": "natural" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/monkey/tugG2Q-XqMgf_ZoBr8KFO.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "Image URL for relighting" + } + }, + "title": "RelightingInput", + "required": [ + "image_url" + ] + }, + "ImageAppsV2RelightingOutput": { + "x-fal-order-properties": [ + "images", + "inference_time_ms" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/penguin/TD0yR-XgXfyZjSjKcxKWS_4a54a0fb769d4a60bf6818fffed70493.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Image with new lighting", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "inference_time_ms": { + "examples": [ + 15234 + ], + "title": "Inference Time Ms", + "type": "integer", + "description": "Total inference time in milliseconds" + } + }, + "title": "RelightingOutput", + "required": [ + "images", + "inference_time_ms" + ] + }, + "AspectRatio": { + "x-fal-order-properties": [ + "ratio" + ], + "type": "object", + "properties": { + "ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4" + ], + "description": "Aspect ratio for 4K resolution output", + "type": "string", + "title": "Ratio", + "default": "1:1" + } + }, + "title": "AspectRatio", + "description": "Aspect ratio model that calculates 4K resolution dimensions" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-apps-v2/relighting/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/relighting/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/relighting": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2RelightingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/relighting/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2RelightingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-apps-v2/style-transfer", + "metadata": { + "display_name": "Style Transfer", + "category": "image-to-image", + "description": "Apply artistic styles like impressionism, cubism, or surrealism to your images.", + "status": "active", + "tags": [ + "style-transfer" + ], + "updated_at": "2026-01-26T21:42:51.999Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/kangaroo/uZNy5XjPfUQJOs0Kuw7C4_367de9bbd3034716b9432d637c3b369d.jpg", + "model_url": "https://fal.run/fal-ai/image-apps-v2/style-transfer", + "license_type": "commercial", + "date": "2025-09-18T01:44:34.974Z", + "group": { + "key": "image-apps-v2", + "label": "Style Transfer" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-apps-v2/style-transfer", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-apps-v2/style-transfer queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-apps-v2/style-transfer", + "category": "image-to-image", + "thumbnailUrl": "https://v3.fal.media/files/kangaroo/uZNy5XjPfUQJOs0Kuw7C4_367de9bbd3034716b9432d637c3b369d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-apps-v2/style-transfer", + "documentationUrl": "https://fal.ai/models/fal-ai/image-apps-v2/style-transfer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageAppsV2StyleTransferInput": { + "x-fal-order-properties": [ + "image_url", + "style_reference_image_url", + "target_style", + "aspect_ratio" + ], + "type": "object", + "properties": { + "target_style": { + "enum": [ + "anime_character", + "cartoon_3d", + "hand_drawn_animation", + "cyberpunk_future", + "anime_game_style", + "comic_book_animation", + "animated_series", + "cartoon_animation", + "lofi_aesthetic", + "cottagecore", + "dark_academia", + "y2k", + "vaporwave", + "liminal_space", + "weirdcore", + "dreamcore", + "synthwave", + "outrun", + "photorealistic", + "hyperrealistic", + "digital_art", + "concept_art", + "impressionist", + "anime", + "pixel_art", + "claymation" + ], + "title": "Target Style", + "type": "string", + "default": "impressionist" + }, + "aspect_ratio": { + "description": "Aspect ratio for 4K output", + "$ref": "#/components/schemas/AspectRatio" + }, + "style_reference_image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Optional reference image URL. When provided, the style will be inferred from this image instead of the selected preset style.", + "title": "Style Reference Image Url" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/tiger/vAohCtb_N8Q_cAs9Z03GS.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "Image URL for style transfer" + } + }, + "title": "StyleTransferInput", + "required": [ + "image_url" + ] + }, + "ImageAppsV2StyleTransferOutput": { + "x-fal-order-properties": [ + "images", + "inference_time_ms" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/koala/LX3BVXfDxE_3qgqH4YL3P_d89afb41867b4a698d29b6308d610dc1.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Image with transferred style", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "inference_time_ms": { + "examples": [ + 15234 + ], + "title": "Inference Time Ms", + "type": "integer", + "description": "Total inference time in milliseconds" + } + }, + "title": "StyleTransferOutput", + "required": [ + "images", + "inference_time_ms" + ] + }, + "AspectRatio": { + "x-fal-order-properties": [ + "ratio" + ], + "type": "object", + "properties": { + "ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4" + ], + "description": "Aspect ratio for 4K resolution output", + "type": "string", + "title": "Ratio", + "default": "1:1" + } + }, + "title": "AspectRatio", + "description": "Aspect ratio model that calculates 4K resolution dimensions" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-apps-v2/style-transfer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/style-transfer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/style-transfer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2StyleTransferInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/style-transfer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2StyleTransferOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-apps-v2/photo-restoration", + "metadata": { + "display_name": "Photo Restoration", + "category": "image-to-image", + "description": "Restore old or damaged photos by fixing colors, scratches, and resolution.", + "status": "active", + "tags": [ + "photo-restoration", + "image-enhance" + ], + "updated_at": "2026-01-26T21:42:52.123Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/kangaroo/oP9mu0F1Obx0ufyEEVAgn_3f7a878f3cf54a078fa8e0d53524020a.jpg", + "model_url": "https://fal.run/fal-ai/image-apps-v2/photo-restoration", + "license_type": "commercial", + "date": "2025-09-18T01:43:11.542Z", + "group": { + "key": "image-apps-v2", + "label": "Photo Restoration" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-apps-v2/photo-restoration", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-apps-v2/photo-restoration queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-apps-v2/photo-restoration", + "category": "image-to-image", + "thumbnailUrl": "https://v3.fal.media/files/kangaroo/oP9mu0F1Obx0ufyEEVAgn_3f7a878f3cf54a078fa8e0d53524020a.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-apps-v2/photo-restoration", + "documentationUrl": "https://fal.ai/models/fal-ai/image-apps-v2/photo-restoration/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageAppsV2PhotoRestorationInput": { + "x-fal-order-properties": [ + "image_url", + "enhance_resolution", + "fix_colors", + "remove_scratches", + "aspect_ratio" + ], + "type": "object", + "properties": { + "enhance_resolution": { + "title": "Enhance Resolution", + "type": "boolean", + "default": true + }, + "aspect_ratio": { + "description": "Aspect ratio for 4K output (default: 4:3 for classic photos)", + "$ref": "#/components/schemas/AspectRatio" + }, + "remove_scratches": { + "title": "Remove Scratches", + "type": "boolean", + "default": true + }, + "fix_colors": { + "title": "Fix Colors", + "type": "boolean", + "default": true + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/panda/BcOIyOJ5Z1-GOjDi_GmsX_50e353c588c74435882f8f68989b4af5.png" + ], + "title": "Image Url", + "type": "string", + "description": "Old or damaged photo URL to restore" + } + }, + "title": "PhotoRestorationInput", + "required": [ + "image_url" + ] + }, + "ImageAppsV2PhotoRestorationOutput": { + "x-fal-order-properties": [ + "images", + "inference_time_ms" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/zebra/grrkysWir2bvFTnubydQK_92e56e5831994be59e653611983d503f.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Restored photo", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "inference_time_ms": { + "examples": [ + 15234 + ], + "title": "Inference Time Ms", + "type": "integer", + "description": "Total inference time in milliseconds" + } + }, + "title": "PhotoRestorationOutput", + "required": [ + "images", + "inference_time_ms" + ] + }, + "AspectRatio": { + "x-fal-order-properties": [ + "ratio" + ], + "type": "object", + "properties": { + "ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4" + ], + "description": "Aspect ratio for 4K resolution output", + "type": "string", + "title": "Ratio", + "default": "1:1" + } + }, + "title": "AspectRatio", + "description": "Aspect ratio model that calculates 4K resolution dimensions" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-apps-v2/photo-restoration/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/photo-restoration/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/photo-restoration": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2PhotoRestorationInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/photo-restoration/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2PhotoRestorationOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-apps-v2/portrait-enhance", + "metadata": { + "display_name": "Portrait Enhance", + "category": "image-to-image", + "description": "Enhance and refine portrait photos with improved clarity and detail.", + "status": "active", + "tags": [ + "image-edit", + "enhancement" + ], + "updated_at": "2026-01-26T21:42:52.248Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/monkey/9kZMXV4GvRettHUuudKER_3f04e4f8fd0e4f28ad957c2a3bf9f3e5.jpg", + "model_url": "https://fal.run/fal-ai/image-apps-v2/portrait-enhance", + "license_type": "commercial", + "date": "2025-09-18T01:40:36.113Z", + "group": { + "key": "image-apps-v2", + "label": "Portrait Enhance" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-apps-v2/portrait-enhance", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-apps-v2/portrait-enhance queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-apps-v2/portrait-enhance", + "category": "image-to-image", + "thumbnailUrl": "https://v3.fal.media/files/monkey/9kZMXV4GvRettHUuudKER_3f04e4f8fd0e4f28ad957c2a3bf9f3e5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-apps-v2/portrait-enhance", + "documentationUrl": "https://fal.ai/models/fal-ai/image-apps-v2/portrait-enhance/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageAppsV2PortraitEnhanceInput": { + "x-fal-order-properties": [ + "image_url", + "aspect_ratio" + ], + "type": "object", + "properties": { + "aspect_ratio": { + "description": "Aspect ratio for 4K output (default: 3:4 for portraits)", + "$ref": "#/components/schemas/AspectRatio" + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/monkey/VszUszGx2uzReqFvQzF26.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "Portrait image URL to enhance" + } + }, + "title": "PortraitInput", + "required": [ + "image_url" + ] + }, + "ImageAppsV2PortraitEnhanceOutput": { + "x-fal-order-properties": [ + "images", + "inference_time_ms" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/tiger/Re4QHcI-z3a1RNUssE9s3_ac63f107bd714cccb9060e2a5c8f999f.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Enhanced portrait", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "inference_time_ms": { + "examples": [ + 15234 + ], + "title": "Inference Time Ms", + "type": "integer", + "description": "Total inference time in milliseconds" + } + }, + "title": "PortraitOutput", + "required": [ + "images", + "inference_time_ms" + ] + }, + "AspectRatio": { + "x-fal-order-properties": [ + "ratio" + ], + "type": "object", + "properties": { + "ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4" + ], + "description": "Aspect ratio for 4K resolution output", + "type": "string", + "title": "Ratio", + "default": "1:1" + } + }, + "title": "AspectRatio", + "description": "Aspect ratio model that calculates 4K resolution dimensions" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-apps-v2/portrait-enhance/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/portrait-enhance/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/portrait-enhance": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2PortraitEnhanceInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/portrait-enhance/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2PortraitEnhanceOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-apps-v2/photography-effects", + "metadata": { + "display_name": "Photography Effects", + "category": "image-to-image", + "description": "Apply diverse photography styles and effects to transform your images.", + "status": "active", + "tags": [ + "style-transfer", + "photography" + ], + "updated_at": "2026-01-26T21:42:52.373Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/rabbit/QHdnOcB8IwyNRSkQOeVWE_aad27ab63cee47e8b6c15254f6813ac2.jpg", + "model_url": "https://fal.run/fal-ai/image-apps-v2/photography-effects", + "license_type": "commercial", + "date": "2025-09-18T01:38:43.720Z", + "group": { + "key": "image-apps-v2", + "label": "Photography Effects" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-apps-v2/photography-effects", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-apps-v2/photography-effects queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-apps-v2/photography-effects", + "category": "image-to-image", + "thumbnailUrl": "https://v3.fal.media/files/rabbit/QHdnOcB8IwyNRSkQOeVWE_aad27ab63cee47e8b6c15254f6813ac2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-apps-v2/photography-effects", + "documentationUrl": "https://fal.ai/models/fal-ai/image-apps-v2/photography-effects/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageAppsV2PhotographyEffectsInput": { + "x-fal-order-properties": [ + "image_url", + "effect_type", + "aspect_ratio" + ], + "type": "object", + "properties": { + "effect_type": { + "enum": [ + "film", + "vintage_film", + "portrait_photography", + "fashion_photography", + "street_photography", + "sepia_tone", + "film_grain", + "light_leaks", + "vignette_effect", + "instant_camera", + "golden_hour", + "dramatic_lighting", + "soft_focus", + "bokeh_effect", + "high_contrast", + "double_exposure" + ], + "title": "Effect Type", + "type": "string", + "default": "film" + }, + "aspect_ratio": { + "description": "Aspect ratio for 4K output", + "$ref": "#/components/schemas/AspectRatio" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/monkey/tugG2Q-XqMgf_ZoBr8KFO.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "Image URL for photography effects" + } + }, + "title": "PhotographyEffectsInput", + "required": [ + "image_url" + ] + }, + "ImageAppsV2PhotographyEffectsOutput": { + "x-fal-order-properties": [ + "images", + "inference_time_ms" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/koala/hUNSMGeQKoBKGx0VlOYMK_3bc61fa97c8247e8bf38854eff4a6534.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Image with photography effects", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "inference_time_ms": { + "examples": [ + 15234 + ], + "title": "Inference Time Ms", + "type": "integer", + "description": "Total inference time in milliseconds" + } + }, + "title": "PhotographyEffectsOutput", + "required": [ + "images", + "inference_time_ms" + ] + }, + "AspectRatio": { + "x-fal-order-properties": [ + "ratio" + ], + "type": "object", + "properties": { + "ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4" + ], + "description": "Aspect ratio for 4K resolution output", + "type": "string", + "title": "Ratio", + "default": "1:1" + } + }, + "title": "AspectRatio", + "description": "Aspect ratio model that calculates 4K resolution dimensions" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-apps-v2/photography-effects/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/photography-effects/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/photography-effects": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2PhotographyEffectsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/photography-effects/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2PhotographyEffectsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-apps-v2/perspective", + "metadata": { + "display_name": "Perspective Change", + "category": "image-to-image", + "description": "Easily adjust the perspective of any image to different angles.", + "status": "active", + "tags": [ + "change-angle", + "perspective" + ], + "updated_at": "2026-01-26T21:42:52.497Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/rabbit/PjquFEk5HynXyIj8Z1Xke_4ef010a6c99b4fa2a1f6639c233f8b93.jpg", + "model_url": "https://fal.run/fal-ai/image-apps-v2/perspective", + "license_type": "commercial", + "date": "2025-09-18T01:35:34.670Z", + "group": { + "key": "image-apps-v2", + "label": "Perspective Change" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-apps-v2/perspective", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-apps-v2/perspective queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-apps-v2/perspective", + "category": "image-to-image", + "thumbnailUrl": "https://v3.fal.media/files/rabbit/PjquFEk5HynXyIj8Z1Xke_4ef010a6c99b4fa2a1f6639c233f8b93.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-apps-v2/perspective", + "documentationUrl": "https://fal.ai/models/fal-ai/image-apps-v2/perspective/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageAppsV2PerspectiveInput": { + "x-fal-order-properties": [ + "image_url", + "target_perspective", + "aspect_ratio" + ], + "type": "object", + "properties": { + "aspect_ratio": { + "description": "Aspect ratio for 4K output", + "$ref": "#/components/schemas/AspectRatio" + }, + "target_perspective": { + "enum": [ + "front", + "left_side", + "right_side", + "back", + "top_down", + "bottom_up", + "birds_eye", + "three_quarter_left", + "three_quarter_right" + ], + "title": "Target Perspective", + "type": "string", + "default": "front" + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/lion/aqEiZ8Ui6rxWUB-Ujfu79_52c238c6d75d45538eaa71c50d329ba0.png" + ], + "title": "Image Url", + "type": "string", + "description": "Image URL for perspective change" + } + }, + "title": "PerspectiveInput", + "required": [ + "image_url" + ] + }, + "ImageAppsV2PerspectiveOutput": { + "x-fal-order-properties": [ + "images", + "inference_time_ms" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/zebra/Elbt2Bcigd6pVdLlbfsHV_e45937e851074d2d932363009504866d.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Image with changed perspective", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "inference_time_ms": { + "examples": [ + 15234 + ], + "title": "Inference Time Ms", + "type": "integer", + "description": "Total inference time in milliseconds" + } + }, + "title": "PerspectiveOutput", + "required": [ + "images", + "inference_time_ms" + ] + }, + "AspectRatio": { + "x-fal-order-properties": [ + "ratio" + ], + "type": "object", + "properties": { + "ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4" + ], + "description": "Aspect ratio for 4K resolution output", + "type": "string", + "title": "Ratio", + "default": "1:1" + } + }, + "title": "AspectRatio", + "description": "Aspect ratio model that calculates 4K resolution dimensions" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-apps-v2/perspective/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/perspective/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/perspective": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2PerspectiveInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/perspective/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2PerspectiveOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-apps-v2/object-removal", + "metadata": { + "display_name": "Object Removal", + "category": "image-to-image", + "description": "Remove unwanted objects seamlessly from any image.", + "status": "active", + "tags": [ + "remove", + "object-removal" + ], + "updated_at": "2026-01-26T21:42:52.621Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/rabbit/m-e8Cm424GRXU0JdKie8l_915251cbaa4b4130a55547fde0f5bd36.jpg", + "model_url": "https://fal.run/fal-ai/image-apps-v2/object-removal", + "license_type": "commercial", + "date": "2025-09-18T01:32:20.301Z", + "group": { + "key": "image-apps-v2", + "label": "Object Removal" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-apps-v2/object-removal", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-apps-v2/object-removal queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-apps-v2/object-removal", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/rabbit/m-e8Cm424GRXU0JdKie8l_915251cbaa4b4130a55547fde0f5bd36.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-apps-v2/object-removal", + "documentationUrl": "https://fal.ai/models/fal-ai/image-apps-v2/object-removal/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageAppsV2ObjectRemovalInput": { + "x-fal-order-properties": [ + "image_url", + "object_to_remove", + "aspect_ratio" + ], + "type": "object", + "properties": { + "aspect_ratio": { + "description": "Aspect ratio for 4K output", + "$ref": "#/components/schemas/AspectRatio" + }, + "object_to_remove": { + "description": "Object to remove", + "type": "string", + "title": "Object To Remove" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/L_YMy6H5r_HYMacZX1qne_74a8fb6130164a18930af55370a1c9b2.png" + ], + "title": "Image Url", + "type": "string", + "description": "Image URL containing object to remove" + } + }, + "title": "ObjectRemovalInput", + "required": [ + "image_url", + "object_to_remove" + ] + }, + "ImageAppsV2ObjectRemovalOutput": { + "x-fal-order-properties": [ + "images", + "inference_time_ms" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/rabbit/InVOEtkI_yl05dSz9bqiU_789bdf6c81fc4c75bdcc57bad744f389.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Image with object removed", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "inference_time_ms": { + "examples": [ + 15234 + ], + "title": "Inference Time Ms", + "type": "integer", + "description": "Total inference time in milliseconds" + } + }, + "title": "ObjectRemovalOutput", + "required": [ + "images", + "inference_time_ms" + ] + }, + "AspectRatio": { + "x-fal-order-properties": [ + "ratio" + ], + "type": "object", + "properties": { + "ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4" + ], + "description": "Aspect ratio for 4K resolution output", + "type": "string", + "title": "Ratio", + "default": "1:1" + } + }, + "title": "AspectRatio", + "description": "Aspect ratio model that calculates 4K resolution dimensions" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-apps-v2/object-removal/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/object-removal/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/object-removal": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2ObjectRemovalInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/object-removal/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2ObjectRemovalOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-apps-v2/headshot-photo", + "metadata": { + "display_name": "Headshot Generator", + "category": "image-to-image", + "description": "Generate professional headshot photos with customizable backgrounds.", + "status": "active", + "tags": [ + "headshot", + "profile-photo" + ], + "updated_at": "2026-01-26T21:42:52.746Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/koala/F4h1jbZoExQGajA6HDsTi_60aacfe738274239a2eb930f8c885bfa.jpg", + "model_url": "https://fal.run/fal-ai/image-apps-v2/headshot-photo", + "license_type": "commercial", + "date": "2025-09-18T01:24:48.033Z", + "group": { + "key": "image-apps-v2", + "label": "Headshot Generator" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-apps-v2/headshot-photo", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-apps-v2/headshot-photo queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-apps-v2/headshot-photo", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/koala/F4h1jbZoExQGajA6HDsTi_60aacfe738274239a2eb930f8c885bfa.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-apps-v2/headshot-photo", + "documentationUrl": "https://fal.ai/models/fal-ai/image-apps-v2/headshot-photo/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageAppsV2HeadshotPhotoInput": { + "x-fal-order-properties": [ + "image_url", + "background_style", + "aspect_ratio" + ], + "type": "object", + "properties": { + "aspect_ratio": { + "description": "Aspect ratio for 4K output (default: 3:4 for portraits)", + "$ref": "#/components/schemas/AspectRatio" + }, + "background_style": { + "enum": [ + "professional", + "corporate", + "clean", + "gradient" + ], + "title": "Background Style", + "type": "string", + "default": "professional" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/panda/oMob58qZJRtbDs5l45QKT_e3a1512c455d425fab2d62e07a51c506.png" + ], + "title": "Image Url", + "type": "string", + "description": "Portrait image URL to convert to professional headshot" + } + }, + "title": "HeadshotInput", + "required": [ + "image_url" + ] + }, + "ImageAppsV2HeadshotPhotoOutput": { + "x-fal-order-properties": [ + "images", + "inference_time_ms" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/penguin/n2f6ImUI9nJORdIibmpim_dfaeb84b0e894f62ae50ccaceb9704d3.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Professional headshot image", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "inference_time_ms": { + "examples": [ + 15234 + ], + "title": "Inference Time Ms", + "type": "integer", + "description": "Total inference time in milliseconds" + } + }, + "title": "HeadshotOutput", + "required": [ + "images", + "inference_time_ms" + ] + }, + "AspectRatio": { + "x-fal-order-properties": [ + "ratio" + ], + "type": "object", + "properties": { + "ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4" + ], + "description": "Aspect ratio for 4K resolution output", + "type": "string", + "title": "Ratio", + "default": "1:1" + } + }, + "title": "AspectRatio", + "description": "Aspect ratio model that calculates 4K resolution dimensions" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-apps-v2/headshot-photo/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/headshot-photo/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/headshot-photo": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2HeadshotPhotoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/headshot-photo/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2HeadshotPhotoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-apps-v2/hair-change", + "metadata": { + "display_name": "Hair Change", + "category": "image-to-image", + "description": "Change hairstyles and hair colors in photos realistically.", + "status": "active", + "tags": [ + "hair-edit", + "style-change" + ], + "updated_at": "2026-01-26T21:42:52.870Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/zebra/bfNDAz9O7I1IzLIMnQON0_5faa305ab343464989f49415737cc39d.jpg", + "model_url": "https://fal.run/fal-ai/image-apps-v2/hair-change", + "license_type": "commercial", + "date": "2025-09-18T01:21:48.130Z", + "group": { + "key": "image-apps-v2", + "label": "Hair Change" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-apps-v2/hair-change", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-apps-v2/hair-change queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-apps-v2/hair-change", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/zebra/bfNDAz9O7I1IzLIMnQON0_5faa305ab343464989f49415737cc39d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-apps-v2/hair-change", + "documentationUrl": "https://fal.ai/models/fal-ai/image-apps-v2/hair-change/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageAppsV2HairChangeInput": { + "x-fal-order-properties": [ + "image_url", + "target_hairstyle", + "hair_color", + "aspect_ratio" + ], + "type": "object", + "properties": { + "target_hairstyle": { + "enum": [ + "short_hair", + "medium_long_hair", + "long_hair", + "curly_hair", + "wavy_hair", + "high_ponytail", + "bun", + "bob_cut", + "pixie_cut", + "braids", + "straight_hair", + "afro", + "dreadlocks", + "buzz_cut", + "mohawk", + "bangs", + "side_part", + "middle_part" + ], + "title": "Target Hairstyle", + "type": "string", + "default": "long_hair" + }, + "aspect_ratio": { + "description": "Aspect ratio for 4K output (default: 3:4 for portraits)", + "$ref": "#/components/schemas/AspectRatio" + }, + "hair_color": { + "enum": [ + "black", + "dark_brown", + "light_brown", + "blonde", + "platinum_blonde", + "red", + "auburn", + "gray", + "silver", + "blue", + "green", + "purple", + "pink", + "rainbow", + "natural", + "highlights", + "ombre", + "balayage" + ], + "title": "Hair Color", + "type": "string", + "default": "natural" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/1ois7lqES78dLualcytmS_1e088ef24f474972824cffcfdd7ff291.png" + ], + "title": "Image Url", + "type": "string", + "description": "Portrait image URL for hair change" + } + }, + "title": "HairChangeInput", + "required": [ + "image_url" + ] + }, + "ImageAppsV2HairChangeOutput": { + "x-fal-order-properties": [ + "images", + "inference_time_ms" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/rabbit/nuRb4Pn6U-Pryrd2LPI6I_1bcc7ca071a1483faeba2d159952c7f8.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Portrait with changed hair", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "inference_time_ms": { + "examples": [ + 15234 + ], + "title": "Inference Time Ms", + "type": "integer", + "description": "Total inference time in milliseconds" + } + }, + "title": "HairChangeOutput", + "required": [ + "images", + "inference_time_ms" + ] + }, + "AspectRatio": { + "x-fal-order-properties": [ + "ratio" + ], + "type": "object", + "properties": { + "ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4" + ], + "description": "Aspect ratio for 4K resolution output", + "type": "string", + "title": "Ratio", + "default": "1:1" + } + }, + "title": "AspectRatio", + "description": "Aspect ratio model that calculates 4K resolution dimensions" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-apps-v2/hair-change/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/hair-change/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/hair-change": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2HairChangeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/hair-change/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2HairChangeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-apps-v2/expression-change", + "metadata": { + "display_name": "Expression Change", + "category": "image-to-image", + "description": "Change facial expressions in photos with realistic results.", + "status": "active", + "tags": [ + "face-edit", + "expression-change" + ], + "updated_at": "2026-01-26T21:42:52.994Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/panda/2ZF-wegMjX4YdYMzLlauc_6176038c50b8492694be5872b350aa5d.jpg", + "model_url": "https://fal.run/fal-ai/image-apps-v2/expression-change", + "license_type": "commercial", + "date": "2025-09-18T01:19:36.092Z", + "group": { + "key": "image-apps-v2", + "label": "Expression Change" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-apps-v2/expression-change", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-apps-v2/expression-change queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-apps-v2/expression-change", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/panda/2ZF-wegMjX4YdYMzLlauc_6176038c50b8492694be5872b350aa5d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-apps-v2/expression-change", + "documentationUrl": "https://fal.ai/models/fal-ai/image-apps-v2/expression-change/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageAppsV2ExpressionChangeInput": { + "x-fal-order-properties": [ + "image_url", + "target_expression", + "aspect_ratio" + ], + "type": "object", + "properties": { + "aspect_ratio": { + "description": "Aspect ratio for 4K output (default: 3:4 for portraits)", + "$ref": "#/components/schemas/AspectRatio" + }, + "target_expression": { + "enum": [ + "smile", + "surprise", + "glare", + "panic", + "shyness", + "laugh", + "cry", + "angry", + "sad", + "happy", + "excited", + "shocked", + "confused", + "focused", + "dreamy", + "serious", + "playful", + "mysterious", + "confident", + "thoughtful" + ], + "title": "Target Expression", + "type": "string", + "default": "smile" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/kangaroo/Gk5_0En-p1CNYnGOCDkl1_8332b944f8334ba9b3118b49e3e641cf.png" + ], + "title": "Image Url", + "type": "string", + "description": "Portrait image URL for expression change" + } + }, + "title": "ExpressionChangeInput", + "required": [ + "image_url" + ] + }, + "ImageAppsV2ExpressionChangeOutput": { + "x-fal-order-properties": [ + "images", + "inference_time_ms" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/lion/evrtcIw4fzjESvKi7Unup_781b90e31d864eb0ad4f5ef59160ebc4.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Portrait with changed expression", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "inference_time_ms": { + "examples": [ + 15234 + ], + "title": "Inference Time Ms", + "type": "integer", + "description": "Total inference time in milliseconds" + } + }, + "title": "ExpressionChangeOutput", + "required": [ + "images", + "inference_time_ms" + ] + }, + "AspectRatio": { + "x-fal-order-properties": [ + "ratio" + ], + "type": "object", + "properties": { + "ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4" + ], + "description": "Aspect ratio for 4K resolution output", + "type": "string", + "title": "Ratio", + "default": "1:1" + } + }, + "title": "AspectRatio", + "description": "Aspect ratio model that calculates 4K resolution dimensions" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-apps-v2/expression-change/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/expression-change/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/expression-change": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2ExpressionChangeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/expression-change/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2ExpressionChangeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-apps-v2/city-teleport", + "metadata": { + "display_name": "City Teleport", + "category": "image-to-image", + "description": "Place a person’s photo into iconic cities worldwide.", + "status": "active", + "tags": [ + "city-teleport", + "backgroundswap" + ], + "updated_at": "2026-01-26T21:42:53.118Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/elephant/dorvx78TXh0JIi4CASrhw_967925f37afb4f3489b89712b0639311.jpg", + "model_url": "https://fal.run/fal-ai/image-apps-v2/city-teleport", + "license_type": "commercial", + "date": "2025-09-18T01:15:37.483Z", + "group": { + "key": "image-apps-v2", + "label": "City Teleport" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-apps-v2/city-teleport", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-apps-v2/city-teleport queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-apps-v2/city-teleport", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/elephant/dorvx78TXh0JIi4CASrhw_967925f37afb4f3489b89712b0639311.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-apps-v2/city-teleport", + "documentationUrl": "https://fal.ai/models/fal-ai/image-apps-v2/city-teleport/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageAppsV2CityTeleportInput": { + "x-fal-order-properties": [ + "person_image_url", + "city_image_url", + "city_name", + "photo_shot", + "camera_angle", + "aspect_ratio" + ], + "type": "object", + "properties": { + "city_image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Optional city background image URL. When provided, the person will be blended into this custom scene.", + "title": "City Image Url" + }, + "aspect_ratio": { + "description": "Aspect ratio for 4K output", + "$ref": "#/components/schemas/AspectRatio" + }, + "city_name": { + "examples": [ + "Paris" + ], + "title": "City Name", + "type": "string", + "description": "City name (used when city_image_url is not provided)" + }, + "photo_shot": { + "enum": [ + "extreme_close_up", + "close_up", + "medium_close_up", + "medium_shot", + "medium_long_shot", + "long_shot", + "extreme_long_shot", + "full_body" + ], + "description": "Type of photo shot", + "type": "string", + "title": "Photo Shot", + "default": "medium_shot" + }, + "camera_angle": { + "enum": [ + "eye_level", + "low_angle", + "high_angle", + "dutch_angle", + "birds_eye_view", + "worms_eye_view", + "overhead", + "side_angle" + ], + "description": "Camera angle for the shot", + "type": "string", + "title": "Camera Angle", + "default": "eye_level" + }, + "person_image_url": { + "examples": [ + "https://v3.fal.media/files/kangaroo/qsFWu-bO3FwcTQSym9HeL_e52f70668f2940a3b4ea2cab54fcb65b.png" + ], + "title": "Person Image Url", + "type": "string", + "description": "Person photo URL" + } + }, + "title": "CityTeleportInput", + "required": [ + "person_image_url", + "city_name" + ] + }, + "ImageAppsV2CityTeleportOutput": { + "x-fal-order-properties": [ + "images", + "inference_time_ms" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/zebra/IxujEn7zMTwi7JwUFayG0_d0810ad048a24c6d9d80a68c4325a675.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Person teleported to city location", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "inference_time_ms": { + "examples": [ + 15234 + ], + "title": "Inference Time Ms", + "type": "integer", + "description": "Total inference time in milliseconds" + } + }, + "title": "CityTeleportOutput", + "required": [ + "images", + "inference_time_ms" + ] + }, + "AspectRatio": { + "x-fal-order-properties": [ + "ratio" + ], + "type": "object", + "properties": { + "ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4" + ], + "description": "Aspect ratio for 4K resolution output", + "type": "string", + "title": "Ratio", + "default": "1:1" + } + }, + "title": "AspectRatio", + "description": "Aspect ratio model that calculates 4K resolution dimensions" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-apps-v2/city-teleport/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/city-teleport/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/city-teleport": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2CityTeleportInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/city-teleport/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2CityTeleportOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-apps-v2/age-modify", + "metadata": { + "display_name": "Age Modify", + "category": "image-to-image", + "description": "Modify a face to look younger or older while keeping identity realistic.", + "status": "active", + "tags": [ + "age-transformation", + "face-editing" + ], + "updated_at": "2026-01-26T21:42:53.242Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/panda/HOgA2l7XskHnD0rcabkPc_43ce3b12df8043d4aa6c9b98497ea32b.jpg", + "model_url": "https://fal.run/fal-ai/image-apps-v2/age-modify", + "license_type": "commercial", + "date": "2025-09-18T01:02:59.169Z", + "group": { + "key": "image-apps-v2", + "label": "Age Modify" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-apps-v2/age-modify", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-apps-v2/age-modify queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-apps-v2/age-modify", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/panda/HOgA2l7XskHnD0rcabkPc_43ce3b12df8043d4aa6c9b98497ea32b.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-apps-v2/age-modify", + "documentationUrl": "https://fal.ai/models/fal-ai/image-apps-v2/age-modify/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageAppsV2AgeModifyInput": { + "x-fal-order-properties": [ + "image_url", + "target_age", + "preserve_identity", + "aspect_ratio" + ], + "type": "object", + "properties": { + "image_url": { + "examples": [ + "https://v3.fal.media/files/lion/s2GShUC7AB9i-ypYV0DbI_1b5ca4fe5d7e477fb4501acf9a1c43bc.png" + ], + "title": "Image Url", + "type": "string", + "description": "Portrait image URL for age modification" + }, + "aspect_ratio": { + "description": "Aspect ratio for 4K output (default: 3:4 for portraits)", + "$ref": "#/components/schemas/AspectRatio" + }, + "preserve_identity": { + "title": "Preserve Identity", + "type": "boolean", + "default": true + }, + "target_age": { + "minimum": 6, + "title": "Target Age", + "type": "integer", + "maximum": 100, + "default": 30 + } + }, + "title": "AgeModifyInput", + "required": [ + "image_url" + ] + }, + "ImageAppsV2AgeModifyOutput": { + "x-fal-order-properties": [ + "images", + "inference_time_ms" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/monkey/lPoI9oYu6X_I1SvqOurj0_ce7daf32a7a146f9a627c0578bd4a747.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Portrait with modified age", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "inference_time_ms": { + "examples": [ + 15234 + ], + "title": "Inference Time Ms", + "type": "integer", + "description": "Total inference time in milliseconds" + } + }, + "title": "AgeModifyOutput", + "required": [ + "images", + "inference_time_ms" + ] + }, + "AspectRatio": { + "x-fal-order-properties": [ + "ratio" + ], + "type": "object", + "properties": { + "ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4" + ], + "description": "Aspect ratio for 4K resolution output", + "type": "string", + "title": "Ratio", + "default": "1:1" + } + }, + "title": "AspectRatio", + "description": "Aspect ratio model that calculates 4K resolution dimensions" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-apps-v2/age-modify/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/age-modify/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/age-modify": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2AgeModifyInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/age-modify/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2AgeModifyOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-apps-v2/makeup-application", + "metadata": { + "display_name": "Makeup Changer", + "category": "image-to-image", + "description": "Apply realistic makeup styles with adjustable intensity.", + "status": "active", + "tags": [ + "makeup", + "transform" + ], + "updated_at": "2026-01-26T21:42:53.366Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/penguin/v3-_Ppam38Pq8cZ70oZPM_03aaba849a774a1f8e3a4bf4fbf6777f.jpg", + "model_url": "https://fal.run/fal-ai/image-apps-v2/makeup-application", + "license_type": "commercial", + "date": "2025-09-18T00:58:57.450Z", + "group": { + "key": "image-apps-v2", + "label": "Makeup Changer" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-apps-v2/makeup-application", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-apps-v2/makeup-application queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-apps-v2/makeup-application", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/penguin/v3-_Ppam38Pq8cZ70oZPM_03aaba849a774a1f8e3a4bf4fbf6777f.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-apps-v2/makeup-application", + "documentationUrl": "https://fal.ai/models/fal-ai/image-apps-v2/makeup-application/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageAppsV2MakeupApplicationInput": { + "x-fal-order-properties": [ + "image_url", + "makeup_style", + "intensity", + "aspect_ratio" + ], + "type": "object", + "properties": { + "aspect_ratio": { + "description": "Aspect ratio for 4K output (default: 3:4 for portraits)", + "$ref": "#/components/schemas/AspectRatio" + }, + "intensity": { + "enum": [ + "light", + "medium", + "heavy", + "dramatic" + ], + "title": "Intensity", + "type": "string", + "default": "medium" + }, + "makeup_style": { + "enum": [ + "natural", + "glamorous", + "smoky_eyes", + "bold_lips", + "no_makeup", + "remove_makeup", + "dramatic", + "bridal", + "professional", + "korean_style", + "artistic" + ], + "title": "Makeup Style", + "type": "string", + "default": "natural" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/elephant/jpdD30YLw3OfPdVDxq1-D_1ec2e27f3e7d400fbf5f7aa2b80e89f0.png" + ], + "title": "Image Url", + "type": "string", + "description": "Portrait image URL for makeup application" + } + }, + "title": "MakeupApplicationInput", + "required": [ + "image_url" + ] + }, + "ImageAppsV2MakeupApplicationOutput": { + "x-fal-order-properties": [ + "images", + "inference_time_ms" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/tiger/nFRi9Cemv4B5-felhf4iu_5c34cfde265443f897463f5b29b6c0b9.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Portrait with applied makeup", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "inference_time_ms": { + "examples": [ + 15234 + ], + "title": "Inference Time Ms", + "type": "integer", + "description": "Total inference time in milliseconds" + } + }, + "title": "MakeupApplicationOutput", + "required": [ + "images", + "inference_time_ms" + ] + }, + "AspectRatio": { + "x-fal-order-properties": [ + "ratio" + ], + "type": "object", + "properties": { + "ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4" + ], + "description": "Aspect ratio for 4K resolution output", + "type": "string", + "title": "Ratio", + "default": "1:1" + } + }, + "title": "AspectRatio", + "description": "Aspect ratio model that calculates 4K resolution dimensions" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-apps-v2/makeup-application/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/makeup-application/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/makeup-application": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2MakeupApplicationInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-apps-v2/makeup-application/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageAppsV2MakeupApplicationOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit/inpaint", + "metadata": { + "display_name": "Qwen Image Edit", + "category": "image-to-image", + "description": "Inpainting Endpoint for the Qwen Edit Image editing model.", + "status": "active", + "tags": [ + "image-to-image", + "inpainting", + "qwen-image" + ], + "updated_at": "2026-01-26T21:42:53.495Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/penguin/h_viUqwLJqnKY98Is1-HS_5c63021b7458464296a4fa264837f480.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit/inpaint", + "license_type": "commercial", + "date": "2025-09-17T21:37:12.411Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit/inpaint", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit/inpaint queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit/inpaint", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/penguin/h_viUqwLJqnKY98Is1-HS_5c63021b7458464296a4fa264837f480.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit/inpaint", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit/inpaint/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEditInpaintInput": { + "title": "BaseQwenEditInpaintImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Change the ball to a black and white football" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the image with" + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality.", + "type": "string", + "examples": [ + "regular" + ], + "title": "Acceleration", + "default": "regular" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 4 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "negative_prompt": { + "examples": [ + "blurry, ugly" + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/image_kontext_inpaint.jpeg" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to edit." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "Strength of noising process for inpainting", + "default": 0.93 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "mask_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/mask_kontext_inpaint.png" + ], + "title": "Mask URL", + "type": "string", + "description": "The URL of the mask for inpainting" + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 30 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "image_url", + "negative_prompt", + "acceleration", + "mask_url", + "strength" + ], + "required": [ + "prompt", + "image_url", + "mask_url" + ] + }, + "QwenImageEditInpaintOutput": { + "title": "QwenImageInpaintOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/jpeg", + "url": "https://storage.googleapis.com/falserverless/example_outputs/qwen_edit_inpaint_output.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit/inpaint/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit/inpaint/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit/inpaint": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditInpaintInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit/inpaint/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditInpaintOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux/srpo/image-to-image", + "metadata": { + "display_name": "FLUX.1 SRPO [dev]", + "category": "image-to-image", + "description": "FLUX.1 SRPO [dev] is a 12 billion parameter flow transformer that generates high-quality images from text with incredible aesthetics. It is suitable for personal and commercial use.\n", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:55.324Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/lion/h6ZndwWNcRsiobOzKCSmL_4ab6291336a74f78b4c90d9b42e97ab0.jpg", + "model_url": "https://fal.run/fal-ai/flux/srpo/image-to-image", + "license_type": "commercial", + "date": "2025-09-15T23:04:24.590Z", + "group": { + "key": "srpo-models-og", + "label": "Image to Image [dev]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux/srpo/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux/srpo/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux/srpo/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/lion/h6ZndwWNcRsiobOzKCSmL_4ab6291336a74f78b4c90d9b42e97ab0.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux/srpo/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/flux/srpo/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxSrpoImageToImageInput": { + "title": "BaseSRPOImageToInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cat dressed as a wizard with a background of a mystic forest." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 1 + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "type": "string", + "title": "Acceleration", + "default": "none" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://fal.media/files/koala/Chls9L2ZnvuipUTEwlnJC.png" + ], + "description": "The URL of the image to generate an image from.", + "type": "string", + "title": "Image URL" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "strength": { + "minimum": 0.01, + "description": "The strength of the initial image. Higher strength values are better for this model.", + "type": "number", + "title": "Strength", + "maximum": 1, + "default": 0.95 + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "title": "Seed" + }, + "guidance_scale": { + "minimum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "title": "Guidance scale (CFG)", + "maximum": 20, + "default": 4.5 + }, + "num_inference_steps": { + "minimum": 10, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 40 + } + }, + "x-fal-order-properties": [ + "image_url", + "strength", + "num_inference_steps", + "prompt", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "required": [ + "image_url", + "prompt" + ] + }, + "FluxSrpoImageToImageOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux/srpo/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux/srpo/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux/srpo/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxSrpoImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux/srpo/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxSrpoImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-1/srpo/image-to-image", + "metadata": { + "display_name": "FLUX.1 SRPO [dev]", + "category": "image-to-image", + "description": "FLUX.1 SRPO [dev] is a 12 billion parameter flow transformer that generates high-quality images from text with incredible aesthetics. It is suitable for personal and commercial use.\n", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:55.629Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/panda/Lw4P1PGZPkZkAPI3u_Mxt_709597e8d0024e10ab25dfdf31963d0a.jpg", + "model_url": "https://fal.run/fal-ai/flux-1/srpo/image-to-image", + "license_type": "commercial", + "date": "2025-09-15T22:05:45.744Z", + "group": { + "key": "srpo-models", + "label": "Image to Image [dev]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-1/srpo/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-1/srpo/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-1/srpo/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/panda/Lw4P1PGZPkZkAPI3u_Mxt_709597e8d0024e10ab25dfdf31963d0a.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-1/srpo/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-1/srpo/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux1SrpoImageToImageInput": { + "title": "BaseSRPOFlux1ImageToInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cat dressed as a wizard with a background of a mystic forest." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "maximum": 4, + "title": "Num Images", + "default": 1 + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://fal.media/files/koala/Chls9L2ZnvuipUTEwlnJC.png" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to generate an image from." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "strength": { + "minimum": 0.01, + "description": "The strength of the initial image. Higher strength values are better for this model.", + "type": "number", + "maximum": 1, + "title": "Strength", + "default": 0.95 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 10, + "description": "The number of inference steps to perform.", + "type": "integer", + "maximum": 50, + "title": "Num Inference Steps", + "default": 40 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "guidance_scale": { + "minimum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "maximum": 20, + "title": "Guidance scale (CFG)", + "default": 4.5 + } + }, + "x-fal-order-properties": [ + "image_url", + "strength", + "num_inference_steps", + "prompt", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "required": [ + "image_url", + "prompt" + ] + }, + "Flux1SrpoImageToImageOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-1/srpo/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-1/srpo/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-1/srpo/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux1SrpoImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-1/srpo/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux1SrpoImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-lora", + "metadata": { + "display_name": "Qwen Image Edit Lora", + "category": "image-to-image", + "description": "LoRA inference endpoint for the Qwen Image Editing model.", + "status": "active", + "tags": [ + "image-to-image", + "image-editing", + "lora" + ], + "updated_at": "2026-01-26T21:42:57.202Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/koala/TLOrc_UR0t-P9eEqdlnKO_7873734a28ad4a5085fdb207645a902f.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-lora", + "license_type": "commercial", + "date": "2025-09-10T12:39:02.983Z", + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/qwen-image-edit-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/qwen-image-edit-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit-lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-lora", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/koala/TLOrc_UR0t-P9eEqdlnKO_7873734a28ad4a5085fdb207645a902f.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-lora", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEditLoraInput": { + "title": "BaseQwenEditImageLoRAInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Change bag to apple macbook" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the image with" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality.", + "type": "string", + "examples": [ + "regular" + ], + "title": "Acceleration", + "default": "none" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/koala/oei_-iPIYFnhdB8SxojND_qwen-edit-res.png" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to edit." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use up to 3 LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 4 + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "negative_prompt": { + "examples": [ + "blurry, ugly" + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "image_url", + "negative_prompt", + "acceleration", + "loras" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "QwenImageEditLoraOutput": { + "title": "QwenImageOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/jpeg", + "url": "https://v3.fal.media/files/elephant/P-YCIAg6wtFn1hsF34fzL_qwen-edit.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/vidu/reference-to-image", + "metadata": { + "display_name": "Vidu", + "category": "image-to-image", + "description": "Vidu Reference-to-Image creates images by using a reference images and combining them with a prompt.", + "status": "active", + "tags": [ + "images-to-image" + ], + "updated_at": "2026-01-26T21:42:57.947Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/vidu/vidu.webp", + "model_url": "https://fal.run/fal-ai/vidu/reference-to-image", + "license_type": "commercial", + "date": "2025-09-09T11:42:32.318Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/vidu/reference-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/vidu/reference-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/vidu/reference-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/vidu/vidu.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/vidu/reference-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/vidu/reference-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ViduReferenceToImageInput": { + "title": "ReferenceToImageRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The little devil is looking at the apple on the beach and walking around it." + ], + "title": "Prompt", + "type": "string", + "maxLength": 1500, + "description": "Text prompt for video generation, max 1500 characters" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the output video", + "default": "16:9" + }, + "reference_image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/web-examples/vidu/new-examples/reference1.png", + "https://storage.googleapis.com/falserverless/web-examples/vidu/new-examples/reference2.png", + "https://storage.googleapis.com/falserverless/web-examples/vidu/new-examples/reference3.png" + ] + ], + "title": "Reference Image Urls", + "type": "array", + "description": "URLs of the reference images to use for consistent subject appearance", + "items": { + "type": "string" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation" + } + }, + "x-fal-order-properties": [ + "prompt", + "reference_image_urls", + "aspect_ratio", + "seed" + ], + "required": [ + "prompt", + "reference_image_urls" + ] + }, + "ViduReferenceToImageOutput": { + "title": "ReferenceToImageOutput", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/model_tests/video_models/general-1-2025-09-09T10_20_19Z.png" + } + ], + "title": "Image", + "description": "The edited image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/vidu/reference-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/reference-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/vidu/reference-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduReferenceToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/reference-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduReferenceToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bytedance/seedream/v4/edit", + "metadata": { + "display_name": "Bytedance Seedream v4 Edit", + "category": "image-to-image", + "description": "A new-generation image creation model ByteDance, Seedream 4.0 integrates image generation and image editing capabilities into a single, unified architecture.", + "status": "active", + "tags": [ + "stylized", + "transform", + "editing" + ], + "updated_at": "2026-01-26T21:42:58.072Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/elephant/lBQrYkMMpOYBrsUc7T56n_3f3e07d132b742eda53bb80c0647c768.jpg", + "model_url": "https://fal.run/fal-ai/bytedance/seedream/v4/edit", + "license_type": "commercial", + "date": "2025-09-09T08:30:47.714Z", + "group": { + "key": "bytedance-image", + "label": "Seedream 4.0 Edit" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bytedance/seedream/v4/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bytedance/seedream/v4/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bytedance/seedream/v4/edit", + "category": "image-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/elephant/lBQrYkMMpOYBrsUc7T56n_3f3e07d132b742eda53bb80c0647c768.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/bytedance/seedream/v4/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/bytedance/seedream/v4/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BytedanceSeedreamV4EditInput": { + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_images", + "max_images", + "seed", + "sync_mode", + "enable_safety_checker", + "enhance_prompt_mode", + "image_urls" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Dress the model in the clothes and hat. Add a cat to the scene and change the background to a Victorian era building." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt used to edit the image" + }, + "num_images": { + "minimum": 1, + "maximum": 6, + "type": "integer", + "title": "Num Images", + "description": "Number of separate model generations to be run with the prompt.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9", + "auto", + "auto_2K", + "auto_4K" + ], + "type": "string" + } + ], + "description": "The size of the generated image. The minimum total image area is 921600 pixels. Failing this, the image size will be adjusted to by scaling it up, while maintaining the aspect ratio.", + "title": "Image Size", + "examples": [ + { + "height": 2160, + "width": 3840 + } + ], + "default": { + "height": 2048, + "width": 2048 + } + }, + "enhance_prompt_mode": { + "enum": [ + "standard", + "fast" + ], + "title": "Enhance Prompt Mode", + "type": "string", + "description": "The mode to use for enhancing prompt enhancement. Standard mode provides higher quality results but takes longer to generate. Fast mode provides average quality results but takes less time to generate.", + "default": "standard" + }, + "max_images": { + "minimum": 1, + "maximum": 6, + "type": "integer", + "title": "Max Images", + "description": "If set to a number greater than one, enables multi-image generation. The model will potentially return up to `max_images` images every generation, and in total, `num_images` generations will be carried out. In total, the number of images generated will be between `num_images` and `max_images*num_images`. The total number of images (image inputs + image outputs) must not exceed 15", + "default": 1 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed to control the stochasticity of image generation." + }, + "image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/example_inputs/seedream4_edit_input_1.png", + "https://storage.googleapis.com/falserverless/example_inputs/seedream4_edit_input_2.png", + "https://storage.googleapis.com/falserverless/example_inputs/seedream4_edit_input_3.png", + "https://storage.googleapis.com/falserverless/example_inputs/seedream4_edit_input_4.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "List of URLs of input images for editing. Presently, up to 10 image inputs are allowed. If over 10 images are sent, only the last 10 will be used.", + "items": { + "type": "string" + } + } + }, + "title": "SeedDream4EditInput", + "required": [ + "prompt", + "image_urls" + ] + }, + "BytedanceSeedreamV4EditOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/seedream4_edit_output.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Generated images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "examples": [ + 746406749 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for generation" + } + }, + "title": "SeedDream4EditOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bytedance/seedream/v4/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedream/v4/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedream/v4/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedreamV4EditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedream/v4/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedreamV4EditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan/v2.2-a14b/image-to-image", + "metadata": { + "display_name": "Wan", + "category": "image-to-image", + "description": "Wan 2.2's 14B model edit high-resolution, photorealistic images with powerful prompt understanding and fine-grained visual detail", + "status": "active", + "tags": [ + "image-to-image" + ], + "updated_at": "2026-01-26T21:42:58.569Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "model_url": "https://fal.run/fal-ai/wan/v2.2-a14b/image-to-image", + "license_type": "commercial", + "date": "2025-09-03T07:19:12.410Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan/v2.2-a14b/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan/v2.2-a14b/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan/v2.2-a14b/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan/v2.2-a14b/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/wan/v2.2-a14b/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanV22A14bImageToImageInput": { + "title": "WanI2IRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cinematic shot of an ancient city at sunset, intricate stone buildings, warm golden light" + ], + "description": "The text prompt to guide image generation.", + "type": "string", + "title": "Prompt" + }, + "shift": { + "minimum": 1, + "maximum": 10, + "type": "number", + "title": "Shift", + "default": 2 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size" + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "description": "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + "type": "string", + "title": "Acceleration", + "examples": [ + "regular" + ], + "default": "regular" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, input data will be checked for safety before processing.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": false + }, + "guidance_scale": { + "minimum": 1, + "maximum": 10, + "type": "number", + "title": "Guidance Scale", + "description": "Classifier-free guidance scale.", + "default": 3.5 + }, + "image_format": { + "enum": [ + "png", + "jpeg" + ], + "description": "The format of the output image.", + "type": "string", + "title": "Image Format", + "examples": [ + "jpeg" + ], + "default": "jpeg" + }, + "negative_prompt": { + "description": "Negative prompt for video generation.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16", + "1:1" + ], + "description": "Aspect ratio of the generated image. If 'auto', the aspect ratio will be determined automatically based on the input image.", + "type": "string", + "title": "Aspect Ratio", + "default": "auto" + }, + "enable_output_safety_checker": { + "examples": [ + false + ], + "description": "If set to true, output video will be checked for safety after generation.", + "type": "boolean", + "title": "Enable Output Safety Checker", + "default": false + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/wan-image-to-image-input.png" + ], + "description": "URL of the input image.", + "type": "string", + "title": "Image URL" + }, + "strength": { + "description": "Denoising strength. 1.0 = fully remake; 0.0 = preserve original.", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Strength", + "default": 0.5 + }, + "guidance_scale_2": { + "description": "Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale (2nd Stage)", + "examples": [ + 4 + ], + "default": 4 + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "description": "Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "num_inference_steps": { + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "type": "integer", + "minimum": 2, + "maximum": 40, + "title": "Number of Inference Steps", + "examples": [ + 27 + ], + "default": 27 + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "strength", + "negative_prompt", + "seed", + "aspect_ratio", + "num_inference_steps", + "enable_safety_checker", + "enable_output_safety_checker", + "enable_prompt_expansion", + "acceleration", + "guidance_scale", + "guidance_scale_2", + "shift", + "image_size", + "image_format" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "WanV22A14bImageToImageOutput": { + "title": "WanI2IResponse", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cinematic portrait of a woman in natural light, 85mm look." + ], + "description": "The text prompt used for image generation.", + "type": "string", + "title": "Prompt", + "default": "" + }, + "image": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/wan-image-to-image-output.png" + } + ], + "description": "The generated image file.", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "image", + "prompt", + "seed" + ], + "required": [ + "image", + "seed" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan/v2.2-a14b/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV22A14bImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV22A14bImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/uso", + "metadata": { + "display_name": "Uso", + "category": "image-to-image", + "description": "Use USO to perform subject driven generations using reference image.", + "status": "active", + "tags": [ + "image-to-image" + ], + "updated_at": "2026-01-26T21:42:59.317Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/kangaroo/5WCxaQjQ18eFu6qeVTJ2w_700fa4cd63ee445ca3d8d6cd94a356d5.jpg", + "model_url": "https://fal.run/fal-ai/uso", + "license_type": "commercial", + "date": "2025-08-30T11:28:00.742Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/uso", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/uso queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/uso", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/kangaroo/5WCxaQjQ18eFu6qeVTJ2w_700fa4cd63ee445ca3d8d6cd94a356d5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/uso", + "documentationUrl": "https://fal.ai/models/fal-ai/uso/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "UsoInput": { + "title": "USOInputImage", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A handsome man." + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt for generation. Can be empty for pure style transfer.", + "default": "" + }, + "num_images": { + "minimum": 1, + "title": "Number of Images", + "type": "integer", + "maximum": 4, + "description": "Number of images to generate in parallel.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image. ", + "default": "square_hd" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "Output image format. PNG preserves transparency, JPEG is smaller.", + "default": "png" + }, + "keep_size": { + "title": "Keep Input Size", + "type": "boolean", + "description": "Preserve the layout and dimensions of the input content image. Useful for style transfer.", + "default": false + }, + "input_image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/USO/style3.webp", + "https://storage.googleapis.com/falserverless/USO/style4.webp" + ] + ], + "title": "Reference Images", + "type": "array", + "description": "List of image URLs in order: [content_image, style_image, extra_style_image].", + "items": { + "type": "string" + } + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If true, wait for generation and upload before returning. Increases latency but provides immediate access to images.", + "default": false + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale (CFG)", + "type": "number", + "maximum": 10, + "description": "How closely to follow the prompt. Higher values stick closer to the prompt.", + "default": 4 + }, + "num_inference_steps": { + "minimum": 1, + "title": "Inference Steps", + "type": "integer", + "maximum": 50, + "description": "Number of denoising steps. More steps can improve quality but increase generation time.", + "default": 28 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducible generation. Use same seed for consistent results." + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, distorted, ugly, bad anatomy", + "cartoon, anime, illustration", + "" + ], + "title": "Negative Prompt", + "type": "string", + "description": "What you don't want in the image. Use it to exclude unwanted elements, styles, or artifacts.", + "default": "" + }, + "enable_safety_checker": { + "title": "Safety Checker", + "type": "boolean", + "description": "Enable NSFW content detection and filtering.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "input_image_urls", + "image_size", + "negative_prompt", + "num_inference_steps", + "guidance_scale", + "keep_size", + "num_images", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format" + ], + "required": [ + "input_image_urls" + ] + }, + "UsoOutput": { + "title": "USOOutputImage", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation" + }, + "images": { + "examples": [ + [ + { + "height": 1024, + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/USO/G6n97WN0goYpXPeiHaBnP.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images with applied style and/or subject customization", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "description": "Performance timings for different stages" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "NSFW detection results for each generated image", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed used for generation" + } + }, + "x-fal-order-properties": [ + "images", + "seed", + "has_nsfw_concepts", + "prompt", + "timings" + ], + "required": [ + "images", + "seed", + "has_nsfw_concepts", + "prompt", + "timings" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/uso/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/uso/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/uso": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UsoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/uso/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UsoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/gemini-25-flash-image/edit", + "metadata": { + "display_name": "Gemini 2.5 Flash Image", + "category": "image-to-image", + "description": "Gemini 2.5 Flash Image is Google's state-of-the-art image generation and editing model\n", + "status": "active", + "tags": [ + "image-editing" + ], + "updated_at": "2026-01-26T21:43:00.452Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/tiger/w7rvVqvAQiYg2cDSZTn2i_b6895b8a0f864c42a33bfd040dc1228c.jpg", + "model_url": "https://fal.run/fal-ai/gemini-25-flash-image/edit", + "license_type": "commercial", + "date": "2025-08-26T01:22:27.733Z", + "group": { + "key": "gemini-25-flash", + "label": "Image Editing" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/gemini-25-flash-image/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/gemini-25-flash-image/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/gemini-25-flash-image/edit", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/tiger/w7rvVqvAQiYg2cDSZTn2i_b6895b8a0f864c42a33bfd040dc1228c.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/gemini-25-flash-image/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/gemini-25-flash-image/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Gemini25FlashImageEditInput": { + "x-fal-order-properties": [ + "prompt", + "num_images", + "aspect_ratio", + "output_format", + "sync_mode", + "image_urls", + "limit_generations" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "make a photo of the man driving the car down the california coastline" + ], + "description": "The prompt for image editing.", + "type": "string", + "minLength": 3, + "maxLength": 50000, + "title": "Prompt" + }, + "aspect_ratio": { + "enum": [ + "auto", + "21:9", + "16:9", + "3:2", + "4:3", + "5:4", + "1:1", + "4:5", + "3:4", + "2:3", + "9:16" + ], + "description": "The aspect ratio of the generated image.", + "type": "string", + "title": "Aspect Ratio", + "default": "auto" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "description": "The number of images to generate.", + "title": "Number of Images", + "default": 1 + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/example_inputs/nano-banana-edit-input.png", + "https://storage.googleapis.com/falserverless/example_inputs/nano-banana-edit-input-2.png" + ] + ], + "description": "The URLs of the images to use for image-to-image generation or image editing.", + "type": "array", + "title": "Image URLs", + "items": { + "type": "string" + } + }, + "limit_generations": { + "description": "Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate.", + "type": "boolean", + "title": "Limit Generations", + "default": false + } + }, + "title": "NanoBananaImageToImageInput", + "required": [ + "prompt", + "image_urls" + ] + }, + "Gemini25FlashImageEditOutput": { + "x-fal-order-properties": [ + "images", + "description" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_name": "nano-banana-multi-edit-output.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/nano-banana-multi-edit-output.png" + } + ] + ], + "description": "The edited images.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "description": { + "description": "The description of the generated images.", + "type": "string", + "title": "Description" + } + }, + "title": "NanoBananaImageToImageOutput", + "required": [ + "images", + "description" + ] + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/gemini-25-flash-image/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gemini-25-flash-image/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/gemini-25-flash-image/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gemini25FlashImageEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gemini-25-flash-image/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gemini25FlashImageEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image/image-to-image", + "metadata": { + "display_name": "Qwen Image", + "category": "image-to-image", + "description": "Qwen-Image (Image-to-Image) transforms and edits input images with high fidelity, enabling precise style transfer, enhancement, and creative modification.", + "status": "active", + "tags": [ + "image-to-image" + ], + "updated_at": "2026-01-26T21:43:00.713Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/koala/6JAMNCSti-vm-zJeZi6hA_626cdc11d4d04560ac9523fbd61f2eac.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image/image-to-image", + "license_type": "commercial", + "date": "2025-08-25T12:14:09.649Z", + "group": { + "key": "qwen-image", + "label": "Image to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/koala/6JAMNCSti-vm-zJeZi6hA_626cdc11d4d04560ac9523fbd61f2eac.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageImageToImageInput": { + "title": "QwenImageI2IInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Mount Fuji with purple japanese wisteria in the foreground, clear sky, peaceful spring day, soft natural light, landscape, painted with oil brush on a wood panel with abstract mixed colors" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the image with" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. Options: 'none', 'regular', 'high'. Higher acceleration increases speed. 'regular' balances speed and quality. 'high' is recommended for images without text.", + "examples": [ + "none" + ], + "default": "none" + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image. By default, we will use the provided image for determining the image_size." + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use up to 3 LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale", + "type": "number", + "maximum": 20, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 2.5 + }, + "use_turbo": { + "examples": [ + true + ], + "title": "Use Turbo", + "type": "boolean", + "description": "Enable turbo mode for faster generation with high quality. When enabled, uses optimized settings (10 steps, CFG=1.2).", + "default": false + }, + "negative_prompt": { + "examples": [ + "blurry, ugly" + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "The number of images to generate.", + "default": 1 + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/rabbit/KoIbq6nhDBDPxDQrivW-m.png" + ], + "title": "Image Url", + "type": "string", + "description": "The reference image to guide the generation." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "strength": { + "description": "Denoising strength. 1.0 = fully remake; 0.0 = preserve original.", + "type": "number", + "examples": [ + 0.8 + ], + "maximum": 1, + "title": "Strength", + "minimum": 0, + "default": 0.6 + }, + "num_inference_steps": { + "minimum": 2, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 250, + "description": "The number of inference steps to perform.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "negative_prompt", + "acceleration", + "loras", + "use_turbo", + "image_url", + "strength" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "QwenImageImageToImageOutput": { + "title": "QwenImageI2IOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/jpeg", + "url": "https://v3.fal.media/files/elephant/AuLvZGaYemu6vs36D5zof.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "maximum": 4, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/reimagine/3.2", + "metadata": { + "display_name": "Reimagine", + "category": "image-to-image", + "description": "Reimagine uses a structure reference for generating new images while preserving the structure of an input image, guided by text prompts.\nPerfect for transforming sketches, illustrations, or photos into new illustrations. Trained exclusively on licensed data", + "status": "active", + "tags": [ + "bria" + ], + "updated_at": "2026-01-26T21:43:03.015Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/koala/jkGFvEL6cLvJsAPkL48qP_f74234f867b84e23aaf691d48124fb85.jpg", + "model_url": "https://fal.run/bria/reimagine/3.2", + "license_type": "commercial", + "date": "2025-08-20T06:58:56.995Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/reimagine/3.2", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/reimagine/3.2 queue.", + "x-fal-metadata": { + "endpointId": "bria/reimagine/3.2", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/koala/jkGFvEL6cLvJsAPkL48qP_f74234f867b84e23aaf691d48124fb85.jpg", + "playgroundUrl": "https://fal.ai/models/bria/reimagine/3.2", + "documentationUrl": "https://fal.ai/models/bria/reimagine/3.2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Reimagine32Input": { + "title": "InputModel", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Delicate, watercolor-style letters infused with shades of blue and green, accompanied by artistic, blooming flowers that blend harmoniously into a light background, giving a serene and artistic touch." + ], + "title": "Prompt", + "type": "string", + "description": "Prompt for image generation." + }, + "depth_preprocess": { + "title": "Depth Preprocess", + "type": "boolean", + "description": "Depth image preprocess.", + "default": true + }, + "canny_preprocess": { + "title": "Canny Preprocess", + "type": "boolean", + "description": "Canny image preprocess.", + "default": true + }, + "depth_image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Depth Image Url", + "description": "Depth control image (file or URL).", + "examples": [ + "https://bria-image-repository.s3.us-east-1.amazonaws.com/BRIA+(1).png" + ], + "default": "" + }, + "guidance_scale": { + "description": "Guidance scale for text.", + "type": "number", + "minimum": 1, + "title": "Guidance Scale", + "maximum": 10, + "default": 5 + }, + "canny_image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Canny Image Url", + "description": "Canny edge control image (file or URL).", + "examples": [ + "https://bria-image-repository.s3.us-east-1.amazonaws.com/BRIA+(1).png" + ], + "default": "" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for image generation.", + "default": "Logo,Watermark,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers" + }, + "depth_scale": { + "description": "Depth control strength (0.0 to 1.0).", + "type": "number", + "minimum": 0, + "title": "Depth Scale", + "maximum": 1, + "default": 0.5 + }, + "aspect_ratio": { + "enum": [ + "1:1", + "2:3", + "3:2", + "3:4", + "4:3", + "4:5", + "5:4", + "9:16", + "16:9" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio. Options: 1:1, 2:3, 3:2, 3:4, 4:3, 4:5, 5:4, 9:16, 16:9", + "default": "1:1" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If true, returns the image directly in the response (increases latency).", + "default": false + }, + "prompt_enhancer": { + "title": "Prompt Enhancer", + "type": "boolean", + "description": "Whether to improve the prompt.", + "default": true + }, + "truncate_prompt": { + "title": "Truncate Prompt", + "type": "boolean", + "description": "Whether to truncate the prompt.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility.", + "default": 5555 + }, + "canny_scale": { + "description": "Canny edge control strength (0.0 to 1.0).", + "type": "number", + "minimum": 0, + "title": "Canny Scale", + "maximum": 1, + "default": 0.5 + }, + "num_inference_steps": { + "description": "Number of inference steps.", + "type": "integer", + "minimum": 20, + "title": "Num Inference Steps", + "maximum": 50, + "default": 30 + } + }, + "x-fal-order-properties": [ + "prompt", + "num_inference_steps", + "seed", + "aspect_ratio", + "negative_prompt", + "guidance_scale", + "truncate_prompt", + "prompt_enhancer", + "sync_mode", + "depth_image_url", + "depth_scale", + "depth_preprocess", + "canny_image_url", + "canny_preprocess", + "canny_scale" + ], + "required": [ + "prompt" + ] + }, + "Reimagine32Output": { + "title": "OutputModel", + "type": "object", + "properties": { + "image": { + "description": "Generated image.", + "$ref": "#/components/schemas/Image" + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/bria/reimagine/3.2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/reimagine/3.2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/reimagine/3.2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Reimagine32Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/reimagine/3.2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Reimagine32Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/nano-banana/edit", + "metadata": { + "display_name": "Nano Banana", + "category": "image-to-image", + "description": "Google's state-of-the-art image generation and editing model", + "status": "active", + "tags": [ + "image-editing" + ], + "updated_at": "2026-01-26T21:43:03.265Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/koala/1iXUmtPWBg9oNTpkAv48W_ce1da4146f99452f8c1dfe58dd2b150e.jpg", + "model_url": "https://fal.run/fal-ai/nano-banana/edit", + "license_type": "commercial", + "date": "2025-08-19T22:16:18.949Z", + "group": { + "key": "nano-banana", + "label": "Image Editing" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/nano-banana/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/nano-banana/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/nano-banana/edit", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/koala/1iXUmtPWBg9oNTpkAv48W_ce1da4146f99452f8c1dfe58dd2b150e.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/nano-banana/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/nano-banana/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "NanoBananaEditInput": { + "title": "NanoBananaImageToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "make a photo of the man driving the car down the california coastline" + ], + "title": "Prompt", + "minLength": 3, + "maxLength": 50000, + "type": "string", + "description": "The prompt for image editing." + }, + "aspect_ratio": { + "enum": [ + "auto", + "21:9", + "16:9", + "3:2", + "4:3", + "5:4", + "1:1", + "4:5", + "3:4", + "2:3", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image.", + "default": "auto" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Number of Images", + "description": "The number of images to generate.", + "default": 1 + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/example_inputs/nano-banana-edit-input.png", + "https://storage.googleapis.com/falserverless/example_inputs/nano-banana-edit-input-2.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URLs of the images to use for image-to-image generation or image editing.", + "items": { + "type": "string" + } + }, + "limit_generations": { + "title": "Limit Generations", + "type": "boolean", + "description": "Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "num_images", + "aspect_ratio", + "output_format", + "sync_mode", + "image_urls", + "limit_generations" + ], + "required": [ + "prompt", + "image_urls" + ] + }, + "NanoBananaEditOutput": { + "title": "NanoBananaImageToImageOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_name": "nano-banana-multi-edit-output.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/nano-banana-multi-edit-output.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The edited images.", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "description": { + "title": "Description", + "type": "string", + "description": "The description of the generated images." + } + }, + "x-fal-order-properties": [ + "images", + "description" + ], + "required": [ + "images", + "description" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/nano-banana/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/nano-banana/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/nano-banana/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NanoBananaEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/nano-banana/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NanoBananaEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/nextstep-1", + "metadata": { + "display_name": "Nextstep 1", + "category": "image-to-image", + "description": "Endpoint for NextStep-1 Autoregressive Image Editing model.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:03.522Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/zebra/mA2AtvaSMWo6PQZ7NBKaC_4e2f5a654dec43ea904364e9a4a16d49.jpg", + "model_url": "https://fal.run/fal-ai/nextstep-1", + "license_type": "commercial", + "date": "2025-08-19T16:03:43.893Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/nextstep-1", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/nextstep-1 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/nextstep-1", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/zebra/mA2AtvaSMWo6PQZ7NBKaC_4e2f5a654dec43ea904364e9a4a16d49.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/nextstep-1", + "documentationUrl": "https://fal.ai/models/fal-ai/nextstep-1/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Nextstep1Input": { + "title": "NextStepEditRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Add a pirate hat to the dog's head. Change the background to a stormy sea with dark clouds. Include the text 'Captain Paws' in bold white letters at the top portion of the image." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to edit the image." + }, + "negative_prompt": { + "examples": [ + "Copy original image." + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n " + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/tiger/JitXwwpMuF9iIhv0Pq6Dh_dog.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to edit." + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "negative_prompt" + ], + "required": [ + "image_url", + "prompt", + "negative_prompt" + ] + }, + "Nextstep1Output": { + "title": "NextStepResponse", + "type": "object", + "properties": { + "image": { + "description": "Generated image", + "type": "object", + "examples": [ + { + "file_size": 478155, + "file_name": "dog_edited.png", + "content_type": "image/png", + "url": "https://v3.fal.media/files/lion/YAtc8qMcbzbfOmK3xm2Bd_df128b5291944cd5a635ad8eb90050c4.png" + } + ], + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed used for random number generation" + } + }, + "x-fal-order-properties": [ + "image", + "seed" + ], + "required": [ + "image", + "seed" + ] + } + } + }, + "paths": { + "/fal-ai/nextstep-1/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/nextstep-1/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/nextstep-1": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Nextstep1Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/nextstep-1/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Nextstep1Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit", + "metadata": { + "display_name": "Qwen Image Edit", + "category": "image-to-image", + "description": "Endpoint for Qwen's Image Editing model. Has superior text editing capabilities.", + "status": "active", + "tags": [ + "image-editing", + "image-to-image", + "high-quality-text" + ], + "updated_at": "2026-01-26T21:43:03.770Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/kangaroo/NR_iO1JmZVV3QZbjufcGu_367391df7b0242c9bf8a23368c0c4acf.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit", + "license_type": "commercial", + "date": "2025-08-18T07:37:22.171Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/kangaroo/NR_iO1JmZVV3QZbjufcGu_367391df7b0242c9bf8a23368c0c4acf.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEditInput": { + "title": "BaseQwenEditImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Change bag to apple macbook" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the image with" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality.", + "type": "string", + "examples": [ + "regular" + ], + "title": "Acceleration", + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/koala/oei_-iPIYFnhdB8SxojND_qwen-edit-res.png" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to edit." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 4 + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "negative_prompt": { + "examples": [ + "blurry, ugly" + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "image_url", + "negative_prompt", + "acceleration" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "QwenImageEditOutput": { + "title": "QwenImageOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/jpeg", + "url": "https://v3.fal.media/files/elephant/P-YCIAg6wtFn1hsF34fzL_qwen-edit.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ideogram/character/edit", + "metadata": { + "display_name": "Ideogram V3 Character Edit", + "category": "image-to-image", + "description": "Modify consistent characters while preserving their core identity. Edit poses, expressions, or clothing without losing recognizable character features", + "status": "active", + "tags": [ + "character-consistency" + ], + "updated_at": "2026-01-26T21:43:06.021Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/penguin/sa3_-HW2nveeEYE8OzSn0_738419fcb18148cc8c8e0728196cd142.jpg", + "model_url": "https://fal.run/fal-ai/ideogram/character/edit", + "license_type": "commercial", + "date": "2025-08-07T16:01:21.044Z", + "group": { + "key": "ideogram-character", + "label": "Edit" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ideogram/character/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ideogram/character/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ideogram/character/edit", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/penguin/sa3_-HW2nveeEYE8OzSn0_738419fcb18148cc8c8e0728196cd142.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ideogram/character/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/ideogram/character/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "IdeogramCharacterEditInput": { + "x-fal-order-properties": [ + "image_urls", + "rendering_speed", + "color_palette", + "style_codes", + "style", + "expand_prompt", + "num_images", + "seed", + "sync_mode", + "prompt", + "image_url", + "mask_url", + "reference_image_urls", + "reference_mask_urls" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "woman holding bag" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to fill the masked part of the image." + }, + "style": { + "enum": [ + "AUTO", + "REALISTIC", + "FICTION" + ], + "description": "The style type to generate with. Cannot be used with style_codes.", + "type": "string", + "title": "Style", + "default": "AUTO" + }, + "expand_prompt": { + "description": "Determine if MagicPrompt should be used in generating the request or not.", + "type": "boolean", + "title": "Expand Prompt", + "default": true + }, + "rendering_speed": { + "enum": [ + "TURBO", + "BALANCED", + "QUALITY" + ], + "description": "The rendering speed to use.", + "type": "string", + "title": "Rendering Speed", + "default": "BALANCED" + }, + "reference_mask_urls": { + "title": "Reference Mask Urls", + "type": "array", + "description": "A set of masks to apply to the character references. Currently only 1 mask is supported, rest will be ignored. (maximum total size 10MB across all character references). The masks should be in JPEG, PNG or WebP format", + "items": { + "type": "string" + } + }, + "reference_image_urls": { + "examples": [ + [ + "https://v3.fal.media/files/kangaroo/0rinwnj_Kn9Fsu2dK-aKm_image.png" + ] + ], + "description": "A set of images to use as character references. Currently only 1 image is supported, rest will be ignored. (maximum total size 10MB across all character references). The images should be in JPEG, PNG or WebP format", + "type": "array", + "title": "Reference Image Urls", + "items": { + "type": "string" + } + }, + "image_urls": { + "anyOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ], + "description": "A set of images to use as style references (maximum total size 10MB across all style references). The images should be in JPEG, PNG or WebP format", + "title": "Image Urls" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "description": "Number of images to generate.", + "title": "Num Images", + "default": 1 + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/panda/-LC_gNNV3wUHaGMQT3klE_output.png" + ], + "title": "Image URL", + "type": "string", + "description": "The image URL to generate an image from. MUST have the exact same dimensions (width and height) as the mask image." + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "color_palette": { + "anyOf": [ + { + "$ref": "#/components/schemas/ColorPalette" + }, + { + "type": "null" + } + ], + "description": "A color palette for generation, must EITHER be specified via one of the presets (name) or explicitly via hexadecimal representations of the color with optional weights (members)" + }, + "style_codes": { + "anyOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ], + "description": "A list of 8 character hexadecimal codes representing the style of the image. Cannot be used in conjunction with style_reference_images or style", + "title": "Style Codes" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Seed for the random number generator", + "title": "Seed" + }, + "mask_url": { + "examples": [ + "https://v3.fal.media/files/panda/jVDAgSkpsZFDP080ceSZJ_woman_face_mask.png" + ], + "title": "Mask URL", + "type": "string", + "description": "The mask URL to inpaint the image. MUST have the exact same dimensions (width and height) as the input image." + } + }, + "title": "CharacterEditInputV3", + "required": [ + "prompt", + "image_url", + "mask_url", + "reference_image_urls" + ] + }, + "IdeogramCharacterEditOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/zebra/JJR2zayRdL3Pg7kr9cFyk_image.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "seed": { + "examples": [ + 123456 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for the random number generator" + } + }, + "title": "CharacterEditOutputV3", + "required": [ + "images", + "seed" + ] + }, + "ColorPalette": { + "x-fal-order-properties": [ + "members", + "name" + ], + "type": "object", + "properties": { + "members": { + "anyOf": [ + { + "type": "array", + "items": { + "$ref": "#/components/schemas/ColorPaletteMember" + } + }, + { + "type": "null" + } + ], + "description": "A list of color palette members that define the color palette", + "title": "Members" + }, + "name": { + "anyOf": [ + { + "enum": [ + "EMBER", + "FRESH", + "JUNGLE", + "MAGIC", + "MELON", + "MOSAIC", + "PASTEL", + "ULTRAMARINE" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "description": "A color palette preset value", + "title": "Name" + } + }, + "title": "ColorPalette" + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + }, + "ColorPaletteMember": { + "x-fal-order-properties": [ + "rgb", + "color_weight" + ], + "type": "object", + "properties": { + "color_weight": { + "anyOf": [ + { + "minimum": 0.05, + "maximum": 1, + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The weight of the color in the color palette", + "title": "Color Weight", + "default": 0.5 + }, + "rgb": { + "description": "RGB color value for the palette member", + "$ref": "#/components/schemas/RGBColor" + } + }, + "title": "ColorPaletteMember", + "required": [ + "rgb" + ] + }, + "RGBColor": { + "x-fal-order-properties": [ + "r", + "g", + "b" + ], + "type": "object", + "properties": { + "r": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Red color value", + "title": "R", + "default": 0 + }, + "b": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Blue color value", + "title": "B", + "default": 0 + }, + "g": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Green color value", + "title": "G", + "default": 0 + } + }, + "title": "RGBColor" + } + } + }, + "paths": { + "/fal-ai/ideogram/character/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/character/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ideogram/character/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramCharacterEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/character/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramCharacterEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ideogram/character", + "metadata": { + "display_name": "Ideogram V3 Character", + "category": "image-to-image", + "description": "Generate consistent character appearances across multiple images. Maintain facial features, proportions, and distinctive traits for cohesive storytelling and branding", + "status": "active", + "tags": [ + "character-consistency", + "" + ], + "updated_at": "2026-01-26T21:43:06.145Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/koala/spkoglxAAeU3iFhwZyWSW_b17f175d3e314853bc4c9bae2809cabd.jpg", + "model_url": "https://fal.run/fal-ai/ideogram/character", + "license_type": "commercial", + "date": "2025-08-07T15:57:16.318Z", + "group": { + "key": "ideogram-character", + "label": "Base" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ideogram/character", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ideogram/character queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ideogram/character", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/koala/spkoglxAAeU3iFhwZyWSW_b17f175d3e314853bc4c9bae2809cabd.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ideogram/character", + "documentationUrl": "https://fal.ai/models/fal-ai/ideogram/character/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "IdeogramCharacterInput": { + "x-fal-order-properties": [ + "image_urls", + "rendering_speed", + "color_palette", + "style_codes", + "style", + "expand_prompt", + "num_images", + "seed", + "sync_mode", + "prompt", + "image_size", + "negative_prompt", + "reference_image_urls", + "reference_mask_urls" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Place the woman leisurely enjoying a cup of espresso while relaxing at a sunlit café table in Siena, Italy. The café setting showcases vintage wooden furniture with peeling white paint, aged brick flooring, and sun-bleached stone walls decorated with trailing ivy and vibrant potted geraniums that capture Siena's medieval character. Golden late-morning light streams through overhead, creating soft shadows that highlight the weathered architectural details. The composition appears slightly off-center, conveying the unguarded tranquility and personal intimacy of a peaceful moment savoring the Tuscan morning ambiance." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to fill the masked part of the image." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The resolution of the generated image", + "title": "Image Size", + "default": "square_hd" + }, + "style": { + "enum": [ + "AUTO", + "REALISTIC", + "FICTION" + ], + "description": "The style type to generate with. Cannot be used with style_codes.", + "type": "string", + "title": "Style", + "default": "AUTO" + }, + "expand_prompt": { + "description": "Determine if MagicPrompt should be used in generating the request or not.", + "type": "boolean", + "title": "Expand Prompt", + "default": true + }, + "rendering_speed": { + "enum": [ + "TURBO", + "BALANCED", + "QUALITY" + ], + "description": "The rendering speed to use.", + "type": "string", + "title": "Rendering Speed", + "default": "BALANCED" + }, + "reference_mask_urls": { + "title": "Reference Mask Urls", + "type": "array", + "description": "A set of masks to apply to the character references. Currently only 1 mask is supported, rest will be ignored. (maximum total size 10MB across all character references). The masks should be in JPEG, PNG or WebP format", + "items": { + "type": "string" + } + }, + "reference_image_urls": { + "examples": [ + [ + "https://v3.fal.media/files/kangaroo/0rinwnj_Kn9Fsu2dK-aKm_image.png" + ] + ], + "description": "A set of images to use as character references. Currently only 1 image is supported, rest will be ignored. (maximum total size 10MB across all character references). The images should be in JPEG, PNG or WebP format", + "type": "array", + "title": "Reference Image Urls", + "items": { + "type": "string" + } + }, + "image_urls": { + "anyOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ], + "description": "A set of images to use as style references (maximum total size 10MB across all style references). The images should be in JPEG, PNG or WebP format", + "title": "Image Urls" + }, + "negative_prompt": { + "description": "Description of what to exclude from an image. Descriptions in the prompt take precedence to descriptions in the negative prompt.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "description": "Number of images to generate.", + "title": "Num Images", + "default": 1 + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "color_palette": { + "anyOf": [ + { + "$ref": "#/components/schemas/ColorPalette" + }, + { + "type": "null" + } + ], + "description": "A color palette for generation, must EITHER be specified via one of the presets (name) or explicitly via hexadecimal representations of the color with optional weights (members)" + }, + "style_codes": { + "anyOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ], + "description": "A list of 8 character hexadecimal codes representing the style of the image. Cannot be used in conjunction with style_reference_images or style", + "title": "Style Codes" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Seed for the random number generator", + "title": "Seed" + } + }, + "title": "BaseCharacterInputV3", + "required": [ + "prompt", + "reference_image_urls" + ] + }, + "IdeogramCharacterOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/monkey/NC_1eo9ecE9fARcxviJ2R_image.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "seed": { + "examples": [ + 123456 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for the random number generator" + } + }, + "title": "CharacterOutputV3", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ColorPalette": { + "x-fal-order-properties": [ + "members", + "name" + ], + "type": "object", + "properties": { + "members": { + "anyOf": [ + { + "type": "array", + "items": { + "$ref": "#/components/schemas/ColorPaletteMember" + } + }, + { + "type": "null" + } + ], + "description": "A list of color palette members that define the color palette", + "title": "Members" + }, + "name": { + "anyOf": [ + { + "enum": [ + "EMBER", + "FRESH", + "JUNGLE", + "MAGIC", + "MELON", + "MOSAIC", + "PASTEL", + "ULTRAMARINE" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "description": "A color palette preset value", + "title": "Name" + } + }, + "title": "ColorPalette" + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + }, + "ColorPaletteMember": { + "x-fal-order-properties": [ + "rgb", + "color_weight" + ], + "type": "object", + "properties": { + "color_weight": { + "anyOf": [ + { + "minimum": 0.05, + "maximum": 1, + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The weight of the color in the color palette", + "title": "Color Weight", + "default": 0.5 + }, + "rgb": { + "description": "RGB color value for the palette member", + "$ref": "#/components/schemas/RGBColor" + } + }, + "title": "ColorPaletteMember", + "required": [ + "rgb" + ] + }, + "RGBColor": { + "x-fal-order-properties": [ + "r", + "g", + "b" + ], + "type": "object", + "properties": { + "r": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Red color value", + "title": "R", + "default": 0 + }, + "b": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Blue color value", + "title": "B", + "default": 0 + }, + "g": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Green color value", + "title": "G", + "default": 0 + } + }, + "title": "RGBColor" + } + } + }, + "paths": { + "/fal-ai/ideogram/character/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/character/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ideogram/character": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramCharacterInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/character/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramCharacterOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ideogram/character/remix", + "metadata": { + "display_name": "Ideogram V3 Character Remix", + "category": "image-to-image", + "description": "Transform your consistent character into different art styles, settings, or scenarios while maintaining their distinctive appearance and identity", + "status": "active", + "tags": [ + "character-consistency" + ], + "updated_at": "2026-01-26T21:43:06.273Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/zebra/AIDfIY6DcuzLpcKmklBq__73fde9aa027d4f079e59b53bf55b9c58.jpg", + "model_url": "https://fal.run/fal-ai/ideogram/character/remix", + "license_type": "commercial", + "date": "2025-08-07T15:52:27.577Z", + "group": { + "key": "ideogram-character", + "label": "Remix" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ideogram/character/remix", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ideogram/character/remix queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ideogram/character/remix", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/zebra/AIDfIY6DcuzLpcKmklBq__73fde9aa027d4f079e59b53bf55b9c58.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ideogram/character/remix", + "documentationUrl": "https://fal.ai/models/fal-ai/ideogram/character/remix/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "IdeogramCharacterRemixInput": { + "x-fal-order-properties": [ + "image_urls", + "rendering_speed", + "color_palette", + "style_codes", + "style", + "expand_prompt", + "num_images", + "seed", + "sync_mode", + "prompt", + "image_url", + "strength", + "image_size", + "negative_prompt", + "reference_image_urls", + "reference_mask_urls" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A glamorous portrait photograph of a woman in an elegant ballroom setting. The subject wears a champagne-colored ball gown with a fitted bodice, long sleeves, and a full skirt adorned with delicate lace appliques. The dress features a crystal-embellished hair accessory and pearl drop earrings. The grand staircase has ornate gold railings and leads to an elaborate crystal chandelier hanging from an arched ceiling. The walls are decorated with classical paintings featuring floral motifs. The lighting is warm and dramatic, creating a soft glow throughout the space. The composition is shot in a formal portrait style with the subject positioned on the lower landing of the staircase, looking over her shoulder at the camera." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to remix the image with" + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The resolution of the generated image", + "title": "Image Size", + "default": "square_hd" + }, + "style": { + "enum": [ + "AUTO", + "REALISTIC", + "FICTION" + ], + "description": "The style type to generate with. Cannot be used with style_codes.", + "type": "string", + "title": "Style", + "default": "AUTO" + }, + "expand_prompt": { + "description": "Determine if MagicPrompt should be used in generating the request or not.", + "type": "boolean", + "title": "Expand Prompt", + "default": true + }, + "rendering_speed": { + "enum": [ + "TURBO", + "BALANCED", + "QUALITY" + ], + "description": "The rendering speed to use.", + "type": "string", + "title": "Rendering Speed", + "default": "BALANCED" + }, + "reference_mask_urls": { + "title": "Reference Mask Urls", + "type": "array", + "description": "A set of masks to apply to the character references. Currently only 1 mask is supported, rest will be ignored. (maximum total size 10MB across all character references). The masks should be in JPEG, PNG or WebP format", + "items": { + "type": "string" + } + }, + "reference_image_urls": { + "examples": [ + [ + "https://v3.fal.media/files/kangaroo/0rinwnj_Kn9Fsu2dK-aKm_image.png" + ] + ], + "description": "A set of images to use as character references. Currently only 1 image is supported, rest will be ignored. (maximum total size 10MB across all character references). The images should be in JPEG, PNG or WebP format", + "type": "array", + "title": "Reference Image Urls", + "items": { + "type": "string" + } + }, + "image_urls": { + "anyOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ], + "description": "A set of images to use as style references (maximum total size 10MB across all style references). The images should be in JPEG, PNG or WebP format", + "title": "Image Urls" + }, + "negative_prompt": { + "description": "Description of what to exclude from an image. Descriptions in the prompt take precedence to descriptions in the negative prompt.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "description": "Number of images to generate.", + "title": "Num Images", + "default": 1 + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/panda/mcxydS-_4ZjfBWFtgoo2z_XHLsl7khq6dC6Qp3cIdJl08rG0I.avif" + ], + "title": "Image URL", + "type": "string", + "description": "The image URL to remix" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "color_palette": { + "anyOf": [ + { + "$ref": "#/components/schemas/ColorPalette" + }, + { + "type": "null" + } + ], + "description": "A color palette for generation, must EITHER be specified via one of the presets (name) or explicitly via hexadecimal representations of the color with optional weights (members)" + }, + "style_codes": { + "anyOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ], + "description": "A list of 8 character hexadecimal codes representing the style of the image. Cannot be used in conjunction with style_reference_images or style", + "title": "Style Codes" + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "description": "Strength of the input image in the remix", + "title": "Strength", + "default": 0.8 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Seed for the random number generator", + "title": "Seed" + } + }, + "title": "CharacterRemixInputV3", + "required": [ + "prompt", + "image_url", + "reference_image_urls" + ] + }, + "IdeogramCharacterRemixOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/zebra/4F1SvlaPbkZt-Mle4CTH9_image.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "seed": { + "examples": [ + 123456 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for the random number generator" + } + }, + "title": "CharacterRemixOutputV3", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ColorPalette": { + "x-fal-order-properties": [ + "members", + "name" + ], + "type": "object", + "properties": { + "members": { + "anyOf": [ + { + "type": "array", + "items": { + "$ref": "#/components/schemas/ColorPaletteMember" + } + }, + { + "type": "null" + } + ], + "description": "A list of color palette members that define the color palette", + "title": "Members" + }, + "name": { + "anyOf": [ + { + "enum": [ + "EMBER", + "FRESH", + "JUNGLE", + "MAGIC", + "MELON", + "MOSAIC", + "PASTEL", + "ULTRAMARINE" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "description": "A color palette preset value", + "title": "Name" + } + }, + "title": "ColorPalette" + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + }, + "ColorPaletteMember": { + "x-fal-order-properties": [ + "rgb", + "color_weight" + ], + "type": "object", + "properties": { + "color_weight": { + "anyOf": [ + { + "minimum": 0.05, + "maximum": 1, + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The weight of the color in the color palette", + "title": "Color Weight", + "default": 0.5 + }, + "rgb": { + "description": "RGB color value for the palette member", + "$ref": "#/components/schemas/RGBColor" + } + }, + "title": "ColorPaletteMember", + "required": [ + "rgb" + ] + }, + "RGBColor": { + "x-fal-order-properties": [ + "r", + "g", + "b" + ], + "type": "object", + "properties": { + "r": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Red color value", + "title": "R", + "default": 0 + }, + "b": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Blue color value", + "title": "B", + "default": 0 + }, + "g": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Green color value", + "title": "G", + "default": 0 + } + }, + "title": "RGBColor" + } + } + }, + "paths": { + "/fal-ai/ideogram/character/remix/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/character/remix/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ideogram/character/remix": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramCharacterRemixInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/character/remix/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramCharacterRemixOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-krea-lora/inpainting", + "metadata": { + "display_name": "FLUX.1 Krea [dev] Inpainting with LoRAs", + "category": "image-to-image", + "description": "Super fast endpoint for the FLUX.1 [dev] inpainting model with LoRA support, enabling rapid and high-quality image inpaingting using pre-trained LoRA adaptations for personalization, specific styles, brand identities, and product-specific outputs.", + "status": "active", + "tags": [ + "lora", + "personalization" + ], + "updated_at": "2026-01-26T21:43:08.680Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-2.jpg", + "model_url": "https://fal.run/fal-ai/flux-krea-lora/inpainting", + "license_type": "commercial", + "date": "2025-08-01T23:38:18.120Z", + "group": { + "key": "flux-krea-lora", + "label": "Inpainting" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-krea-lora/inpainting", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-krea-lora/inpainting queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-krea-lora/inpainting", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-krea-lora/inpainting", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-krea-lora/inpainting/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxKreaLoraInpaintingInput": { + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "loras", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "image_url", + "strength", + "mask_url" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A photo of a lion sitting on a stone bench" + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate. This is always set to 1 for streaming output.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/dog.png" + ], + "description": "URL of image to use for inpainting. or img2img", + "type": "string", + "title": "Image Url" + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "strength": { + "minimum": 0.01, + "description": "The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original.", + "type": "number", + "title": "Strength", + "maximum": 1, + "default": 0.85 + }, + "guidance_scale": { + "minimum": 0, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "title": "Guidance scale (CFG)", + "maximum": 35, + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 28 + }, + "mask_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/dog_mask.png" + ], + "description": "\n The mask to area to Inpaint in.\n ", + "type": "string", + "title": "Mask Url" + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + } + }, + "title": "InpaintInput", + "required": [ + "prompt", + "image_url", + "mask_url" + ] + }, + "FluxKreaLoraInpaintingOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + } + }, + "title": "Output", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "LoraWeight": { + "x-fal-order-properties": [ + "path", + "scale" + ], + "type": "object", + "properties": { + "path": { + "description": "URL or the path to the LoRA weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "type": "number", + "title": "Scale", + "maximum": 4, + "default": 1 + } + }, + "title": "LoraWeight", + "required": [ + "path" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "title": "Image", + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-krea-lora/inpainting/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-krea-lora/inpainting/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-krea-lora/inpainting": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKreaLoraInpaintingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-krea-lora/inpainting/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKreaLoraInpaintingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-krea-lora/image-to-image", + "metadata": { + "display_name": "FLUX.1 Krea [dev] with LoRAs", + "category": "image-to-image", + "description": "FLUX LoRA Image-to-Image is a high-performance endpoint that transforms existing images using FLUX models, leveraging LoRA adaptations to enable rapid and precise image style transfer, modifications, and artistic variations.", + "status": "active", + "tags": [ + "lora", + "style transfer" + ], + "updated_at": "2026-01-26T21:43:08.938Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound.jpg", + "model_url": "https://fal.run/fal-ai/flux-krea-lora/image-to-image", + "license_type": "commercial", + "date": "2025-08-01T23:32:55.146Z", + "group": { + "key": "flux-krea-lora", + "label": "Image to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-krea-lora/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-krea-lora/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-krea-lora/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-krea-lora/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-krea-lora/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxKreaLoraImageToImageInput": { + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "loras", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "image_url", + "strength" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A photo of a lion sitting on a stone bench" + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate. This is always set to 1 for streaming output.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/dog.png" + ], + "description": "URL of image to use for inpainting. or img2img", + "type": "string", + "title": "Image Url" + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "strength": { + "minimum": 0.01, + "description": "The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original.", + "type": "number", + "title": "Strength", + "maximum": 1, + "default": 0.85 + }, + "guidance_scale": { + "minimum": 0, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "title": "Guidance scale (CFG)", + "maximum": 35, + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 28 + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + } + }, + "title": "ImageToImageInput", + "required": [ + "prompt", + "image_url" + ] + }, + "FluxKreaLoraImageToImageOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + } + }, + "title": "Output", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "LoraWeight": { + "x-fal-order-properties": [ + "path", + "scale" + ], + "type": "object", + "properties": { + "path": { + "description": "URL or the path to the LoRA weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "type": "number", + "title": "Scale", + "maximum": 4, + "default": 1 + } + }, + "title": "LoraWeight", + "required": [ + "path" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "title": "Image", + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-krea-lora/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-krea-lora/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-krea-lora/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKreaLoraImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-krea-lora/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKreaLoraImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux/krea/image-to-image", + "metadata": { + "display_name": "FLUX.1 Krea [dev]", + "category": "image-to-image", + "description": "FLUX.1 Krea [dev] is a 12 billion parameter flow transformer that generates high-quality images from text with incredible aesthetics. It is suitable for personal and commercial use.\n", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:10.122Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-4.jpeg", + "model_url": "https://fal.run/fal-ai/flux/krea/image-to-image", + "license_type": "commercial", + "date": "2025-07-30T14:27:21.110Z", + "group": { + "key": "krea-models", + "label": "Image to Image [dev]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux/krea/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux/krea/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux/krea/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-4.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux/krea/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/flux/krea/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxKreaImageToImageInput": { + "title": "BaseKreaImageToInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cat dressed as a wizard with a background of a mystic forest." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 1 + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "type": "string", + "title": "Acceleration", + "default": "none" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://fal.media/files/koala/Chls9L2ZnvuipUTEwlnJC.png" + ], + "description": "The URL of the image to generate an image from.", + "type": "string", + "title": "Image URL" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "strength": { + "minimum": 0.01, + "description": "The strength of the initial image. Higher strength values are better for this model.", + "type": "number", + "title": "Strength", + "maximum": 1, + "default": 0.95 + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "title": "Seed" + }, + "guidance_scale": { + "minimum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "title": "Guidance scale (CFG)", + "maximum": 20, + "default": 4.5 + }, + "num_inference_steps": { + "minimum": 10, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 40 + } + }, + "x-fal-order-properties": [ + "image_url", + "strength", + "num_inference_steps", + "prompt", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "required": [ + "image_url", + "prompt" + ] + }, + "FluxKreaImageToImageOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux/krea/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux/krea/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux/krea/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKreaImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux/krea/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKreaImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux/krea/redux", + "metadata": { + "display_name": "FLUX.1 Krea [dev] Redux", + "category": "image-to-image", + "description": "FLUX.1 Krea [dev] Redux is a high-performance endpoint for the FLUX.1 Krea [dev] model that enables rapid transformation of existing images, delivering high-quality style transfers and image modifications with the core FLUX capabilities.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:10.309Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale.jpg", + "model_url": "https://fal.run/fal-ai/flux/krea/redux", + "license_type": "commercial", + "date": "2025-07-30T14:25:55.003Z", + "group": { + "key": "krea-models", + "label": "Image to Image [dev] Redux" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux/krea/redux", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux/krea/redux queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux/krea/redux", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux/krea/redux", + "documentationUrl": "https://fal.ai/models/fal-ai/flux/krea/redux/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxKreaReduxInput": { + "title": "BaseKreaReduxInput", + "type": "object", + "properties": { + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "type": "string", + "title": "Acceleration", + "default": "none" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/flux_krea_redux_output_1.jpg" + ], + "description": "The URL of the image to generate an image from.", + "type": "string", + "title": "Image URL" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "title": "Seed" + }, + "guidance_scale": { + "minimum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "title": "Guidance scale (CFG)", + "maximum": 20, + "default": 4.5 + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 28 + } + }, + "x-fal-order-properties": [ + "image_url", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "required": [ + "image_url" + ] + }, + "FluxKreaReduxOutput": { + "title": "KreaReduxOutput", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/jpeg", + "url": "https://storage.googleapis.com/falserverless/example_outputs/flux_krea_redux_output_1.jpg", + "width": 1024 + } + ] + ], + "description": "The generated images.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux/krea/redux/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux/krea/redux/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux/krea/redux": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKreaReduxInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux/krea/redux/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKreaReduxOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-1/krea/image-to-image", + "metadata": { + "display_name": "FLUX.1 Krea [dev]", + "category": "image-to-image", + "description": "FLUX.1 Krea [dev] is a 12 billion parameter flow transformer that generates high-quality images from text with incredible aesthetics. It is suitable for personal and commercial use.\n", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:10.703Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training-5.jpg", + "model_url": "https://fal.run/fal-ai/flux-1/krea/image-to-image", + "license_type": "commercial", + "date": "2025-07-30T13:49:39.470Z", + "group": { + "key": "krea-models-fast", + "label": "Image to Image [dev]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-1/krea/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-1/krea/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-1/krea/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-1/krea/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-1/krea/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux1KreaImageToImageInput": { + "title": "BaseKreaFlux1ImageToInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cat dressed as a wizard with a background of a mystic forest." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "maximum": 4, + "title": "Num Images", + "default": 1 + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://fal.media/files/koala/Chls9L2ZnvuipUTEwlnJC.png" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to generate an image from." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "strength": { + "minimum": 0.01, + "description": "The strength of the initial image. Higher strength values are better for this model.", + "type": "number", + "maximum": 1, + "title": "Strength", + "default": 0.95 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 10, + "description": "The number of inference steps to perform.", + "type": "integer", + "maximum": 50, + "title": "Num Inference Steps", + "default": 40 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "guidance_scale": { + "minimum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "maximum": 20, + "title": "Guidance scale (CFG)", + "default": 4.5 + } + }, + "x-fal-order-properties": [ + "image_url", + "strength", + "num_inference_steps", + "prompt", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "required": [ + "image_url", + "prompt" + ] + }, + "Flux1KreaImageToImageOutput": { + "title": "KreaOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/jpeg", + "url": "https://storage.googleapis.com/falserverless/example_outputs/flux_krea_t2i_output_1.jpg", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-1/krea/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-1/krea/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-1/krea/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux1KreaImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-1/krea/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux1KreaImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-1/krea/redux", + "metadata": { + "display_name": "FLUX.1 Krea [dev] Redux", + "category": "image-to-image", + "description": "FLUX.1 Krea [dev] Redux is a high-performance endpoint for the FLUX.1 Krea [dev] model that enables rapid transformation of existing images, delivering high-quality style transfers and image modifications with the core FLUX capabilities.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:10.829Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale.jpg", + "model_url": "https://fal.run/fal-ai/flux-1/krea/redux", + "license_type": "commercial", + "date": "2025-07-30T13:43:13.554Z", + "group": { + "key": "krea-models-fast", + "label": "Image to Image [dev] Redux" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-1/krea/redux", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-1/krea/redux queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-1/krea/redux", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-1/krea/redux", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-1/krea/redux/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux1KreaReduxInput": { + "title": "BaseKreaFlux1ReduxInput", + "type": "object", + "properties": { + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "maximum": 4, + "title": "Num Images", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/flux_krea_redux_output_1.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to generate an image from." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "maximum": 50, + "title": "Num Inference Steps", + "default": 28 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "guidance_scale": { + "minimum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "maximum": 20, + "title": "Guidance scale (CFG)", + "default": 4.5 + } + }, + "x-fal-order-properties": [ + "image_url", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "required": [ + "image_url" + ] + }, + "Flux1KreaReduxOutput": { + "title": "KreaReduxOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/jpeg", + "url": "https://storage.googleapis.com/falserverless/example_outputs/flux_krea_redux_output_1.jpg", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-1/krea/redux/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-1/krea/redux/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-1/krea/redux": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux1KreaReduxInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-1/krea/redux/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux1KreaReduxOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-kontext-lora/inpaint", + "metadata": { + "display_name": "Flux Kontext Lora", + "category": "image-to-image", + "description": "Fast inpainting endpoint for the FLUX.1 Kontext [dev] model with LoRA support, enabling rapid and high-quality image inpainting with reference images, while using pre-trained LoRA adaptations for specific styles, brand identities, and product-specific outputs.", + "status": "active", + "tags": [ + "image-editing", + "image-inpainting", + "image-to-image" + ], + "updated_at": "2026-01-26T21:43:11.203Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-1.jpg", + "model_url": "https://fal.run/fal-ai/flux-kontext-lora/inpaint", + "license_type": "commercial", + "date": "2025-07-29T16:56:50.213Z", + "group": { + "key": "flux-kontext-lora", + "label": "Image Inpainting" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.ai/models/fal-ai/flux-kontext-lora/inpaint/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-kontext-lora/inpaint", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-kontext-lora/inpaint queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-kontext-lora/inpaint", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-kontext-lora/inpaint", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-kontext-lora/inpaint/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxKontextLoraInpaintInput": { + "x-fal-order-properties": [ + "image_url", + "prompt", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "loras", + "acceleration", + "reference_image_url", + "mask_url", + "strength" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A football lying on a field." + ], + "description": "The prompt for the image to image task.", + "type": "string", + "title": "Prompt" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "type": "string", + "title": "Acceleration", + "default": "none" + }, + "reference_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/reference_kontext_inpaint.jpeg" + ], + "description": "The URL of the reference image for inpainting.", + "type": "string", + "title": "Reference Image URL" + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "guidance_scale": { + "minimum": 0, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "title": "Guidance scale (CFG)", + "maximum": 20, + "default": 2.5 + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 1 + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/image_kontext_inpaint.jpeg" + ], + "description": "The URL of the image to be inpainted.", + "type": "string", + "title": "Image URL" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "strength": { + "minimum": 0.01, + "description": "The strength of the initial image. Higher strength values are better for this model.", + "type": "number", + "title": "Strength", + "maximum": 1, + "default": 0.88 + }, + "num_inference_steps": { + "minimum": 10, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 30 + }, + "mask_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/mask_kontext_inpaint.png" + ], + "description": "The URL of the mask for inpainting.", + "type": "string", + "title": "Image URL" + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + } + }, + "title": "BaseKontextInpaintInput", + "required": [ + "prompt", + "image_url", + "reference_image_url", + "mask_url" + ] + }, + "FluxKontextLoraInpaintOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "height": 832, + "content_type": "image/jpeg", + "url": "https://storage.googleapis.com/falserverless/example_outputs/kontext_inpaint_output.png", + "width": 1248 + } + ] + ], + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + } + }, + "title": "KontextInpaintOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "LoraWeight": { + "x-fal-order-properties": [ + "path", + "scale" + ], + "type": "object", + "properties": { + "path": { + "description": "URL or the path to the LoRA weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "type": "number", + "title": "Scale", + "maximum": 4, + "default": 1 + } + }, + "title": "LoraWeight", + "required": [ + "path" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "title": "Image", + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-kontext-lora/inpaint/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-kontext-lora/inpaint/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-kontext-lora/inpaint": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKontextLoraInpaintInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-kontext-lora/inpaint/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKontextLoraInpaintOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan_world", + "metadata": { + "display_name": "Hunyuan World", + "category": "image-to-image", + "description": "Hunyuan World 1.0 turns a single image into a panorama or a 3D world. It creates realistic scenes from the image, allowing you to explore and view it from different angles.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:12.157Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/rabbit/K4wH2Zlj5N6woxkqlpGya_f9ae0f21d42a4c67afe183ab1eea225e.jpg", + "model_url": "https://fal.run/fal-ai/hunyuan_world", + "license_type": "commercial", + "date": "2025-07-28T09:45:58.041Z", + "group": { + "key": "Hunyuan World 1.0", + "label": "Image to Panaroma" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan_world", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan_world queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan_world", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/rabbit/K4wH2Zlj5N6woxkqlpGya_f9ae0f21d42a4c67afe183ab1eea225e.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan_world", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan_world/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Hunyuan_worldInput": { + "title": "ImageToPanoramaRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A skyland of wonders" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for the panorama generation." + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/penguin/_4oXlxt85dr0WY2o0I894_output.png" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to convert to a panorama." + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt" + ], + "required": [ + "image_url", + "prompt" + ] + }, + "Hunyuan_worldOutput": { + "title": "ImageToPanoramaResponse", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "height": 960, + "file_size": 2738127, + "file_name": "5db7925423b44f2a98098cd8f7cad7ec.png", + "content_type": "image/png", + "url": "https://v3.fal.media/files/kangaroo/P2AmXuLlyDIsivqjV_rAr_5db7925423b44f2a98098cd8f7cad7ec.png", + "width": 1920 + } + ], + "title": "Image", + "description": "The generated panorama image.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan_world/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan_world/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan_world": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan_worldInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan_world/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan_worldOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-editing/retouch", + "metadata": { + "display_name": "Image Editing", + "category": "image-to-image", + "description": "Retouch photos of faces. Remove blemishes and improve the skin.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:12.779Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-4.jpg", + "model_url": "https://fal.run/fal-ai/image-editing/retouch", + "license_type": "commercial", + "date": "2025-07-24T18:06:19.737Z", + "group": { + "key": "image-editing", + "label": "Retouch" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-editing/retouch", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-editing/retouch queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-editing/retouch", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-editing/retouch", + "documentationUrl": "https://fal.ai/models/fal-ai/image-editing/retouch/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageEditingRetouchInput": { + "title": "RetouchInput", + "type": "object", + "properties": { + "lora_scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA model. Controls the strength of the LoRA effect.", + "default": 1 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/gallery/tulsi.png" + ], + "title": "Image URL", + "type": "string", + "description": "URL of the image to retouch." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 3.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling.", + "default": 30 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + } + }, + "description": "Input model for retouch endpoint.", + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "num_inference_steps", + "enable_safety_checker", + "lora_scale", + "seed", + "sync_mode" + ], + "required": [ + "image_url" + ] + }, + "ImageEditingRetouchOutput": { + "title": "RetouchOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://storage.googleapis.com/falserverless/gallery/tulsi-retouched.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-editing/retouch/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/retouch/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-editing/retouch": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingRetouchInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/retouch/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingRetouchOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hidream-e1-1", + "metadata": { + "display_name": "Hidream E1 1", + "category": "image-to-image", + "description": "Edit images with natural language", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:12.920Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-2.jpg", + "model_url": "https://fal.run/fal-ai/hidream-e1-1", + "license_type": "commercial", + "date": "2025-07-23T21:23:30.829Z", + "highlighted": false, + "kind": "inference", + "stream_url": "/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hidream-e1-1", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hidream-e1-1 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hidream-e1-1", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hidream-e1-1", + "documentationUrl": "https://fal.ai/models/fal-ai/hidream-e1-1/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "HidreamE11Input": { + "title": "BaseInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Convert the image into a 3D animated style." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "title": "Number of Images", + "type": "integer", + "maximum": 4, + "description": "The number of images to generate.", + "default": 1 + }, + "image_guidance_scale": { + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your initial image when looking for a related image to show you.\n ", + "type": "number", + "minimum": 0, + "title": "Image Guidance Scale (CFG)", + "examples": [ + 2 + ], + "maximum": 20, + "default": 2 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/hidream/woman.png" + ], + "title": "Image URL", + "type": "string", + "description": "URL of an input image to edit." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "minimum": 0, + "title": "Guidance Scale (CFG)", + "examples": [ + 3.5 + ], + "maximum": 20, + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "title": "Number of Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 50 + }, + "target_image_description": { + "title": "Target Image Description", + "type": "string", + "description": "The description of the target image after your edits have been made. Leave this blank to allow the model to use its own imagination." + }, + "negative_prompt": { + "examples": [ + "low resolution, blur" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "low resolution, blur" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "target_image_description", + "prompt", + "negative_prompt", + "image_url", + "num_inference_steps", + "seed", + "guidance_scale", + "image_guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format" + ], + "required": [ + "image_url" + ] + }, + "HidreamE11Output": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/hidream-e1-1/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hidream-e1-1/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hidream-e1-1": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HidreamE11Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hidream-e1-1/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HidreamE11Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/rife", + "metadata": { + "display_name": "RIFE", + "category": "image-to-image", + "description": "Interpolate images with RIFE - Real-Time Intermediate Flow Estimation", + "status": "active", + "tags": [ + "interpolation" + ], + "updated_at": "2026-01-26T21:43:13.426Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/penguin/HLrNymdnzx1neVc9Pj0c1_debdeb10494e45888dd68d1a2df2dcac.jpg", + "model_url": "https://fal.run/fal-ai/rife", + "license_type": "commercial", + "date": "2025-07-22T20:22:14.996Z", + "group": { + "key": "rife", + "label": "Image Interpolation" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/rife", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/rife queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/rife", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/penguin/HLrNymdnzx1neVc9Pj0c1_debdeb10494e45888dd68d1a2df2dcac.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/rife", + "documentationUrl": "https://fal.ai/models/fal-ai/rife/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "RifeInput": { + "x-fal-order-properties": [ + "start_image_url", + "end_image_url", + "output_type", + "output_format", + "num_frames", + "include_start", + "include_end", + "fps", + "sync_mode" + ], + "type": "object", + "properties": { + "output_format": { + "enum": [ + "png", + "jpeg" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output images. Only applicable if output_type is 'images'.", + "default": "jpeg" + }, + "fps": { + "minimum": 1, + "maximum": 60, + "type": "integer", + "title": "Frames Per Second", + "description": "Frames per second for the output video. Only applicable if output_type is 'video'.", + "default": 8 + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "include_end": { + "description": "Whether to include the end image in the output.", + "type": "boolean", + "title": "Include End", + "default": false + }, + "include_start": { + "description": "Whether to include the start image in the output.", + "type": "boolean", + "title": "Include Start", + "default": false + }, + "num_frames": { + "description": "The number of frames to generate between the input images.", + "type": "integer", + "minimum": 1, + "maximum": 64, + "title": "Number of Frames", + "default": 1 + }, + "end_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/interpolate-end-frame.png" + ], + "description": "The URL of the second image to use as the ending point for interpolation.", + "type": "string", + "title": "End Image URL" + }, + "output_type": { + "enum": [ + "images", + "video" + ], + "title": "Output Type", + "type": "string", + "description": "The type of output to generate; either individual images or a video.", + "default": "images" + }, + "start_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/interpolate-start-frame.png" + ], + "description": "The URL of the first image to use as the starting point for interpolation.", + "type": "string", + "title": "Start Image URL" + } + }, + "title": "RIFEImageInput", + "required": [ + "start_image_url", + "end_image_url" + ] + }, + "RifeOutput": { + "x-fal-order-properties": [ + "images", + "video" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/rife-mid-frame.jpeg" + } + ] + ], + "description": "The generated frames as individual images.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + }, + "default": [] + }, + "video": { + "description": "The generated video file, if output_type is 'video'.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "RIFEImageOutput" + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/rife/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/rife/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/rife": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RifeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/rife/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RifeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/film", + "metadata": { + "display_name": "FILM", + "category": "image-to-image", + "description": "Interpolate images with FILM - Frame Interpolation for Large Motion", + "status": "active", + "tags": [ + "interpolation" + ], + "updated_at": "2026-01-26T21:43:13.807Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos.jpg", + "model_url": "https://fal.run/fal-ai/film", + "license_type": "commercial", + "date": "2025-07-22T20:06:32.726Z", + "group": { + "key": "film", + "label": "Image Interpolation" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/film", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/film queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/film", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/film", + "documentationUrl": "https://fal.ai/models/fal-ai/film/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FilmInput": { + "title": "FILMImageInput", + "type": "object", + "properties": { + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the output video. Only applicable if output_type is 'video'.", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "num_frames": { + "description": "The number of frames to generate between the input images.", + "type": "integer", + "minimum": 1, + "maximum": 64, + "title": "Number of Frames", + "default": 1 + }, + "include_start": { + "title": "Include Start", + "type": "boolean", + "description": "Whether to include the start image in the output.", + "default": false + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the output video. Only applicable if output_type is 'video'.", + "examples": [ + "high" + ], + "default": "high" + }, + "include_end": { + "title": "Include End", + "type": "boolean", + "description": "Whether to include the end image in the output.", + "default": false + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "fps": { + "minimum": 1, + "maximum": 60, + "type": "integer", + "title": "Frames Per Second", + "description": "Frames per second for the output video. Only applicable if output_type is 'video'.", + "default": 8 + }, + "start_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/interpolate-start-frame.png" + ], + "title": "Start Image URL", + "type": "string", + "description": "The URL of the first image to use as the starting point for interpolation." + }, + "end_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/interpolate-end-frame.png" + ], + "title": "End Image URL", + "type": "string", + "description": "The URL of the second image to use as the ending point for interpolation." + }, + "image_format": { + "enum": [ + "png", + "jpeg" + ], + "title": "Image Format", + "type": "string", + "description": "The format of the output images. Only applicable if output_type is 'images'.", + "default": "jpeg" + }, + "output_type": { + "enum": [ + "images", + "video" + ], + "title": "Output Type", + "type": "string", + "description": "The type of output to generate; either individual images or a video.", + "default": "images" + } + }, + "x-fal-order-properties": [ + "start_image_url", + "end_image_url", + "output_type", + "image_format", + "video_quality", + "video_write_mode", + "num_frames", + "include_start", + "include_end", + "fps", + "sync_mode" + ], + "required": [ + "start_image_url", + "end_image_url" + ] + }, + "FilmOutput": { + "title": "FILMImageOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/film-mid-frame.jpeg" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated frames as individual images.", + "items": { + "$ref": "#/components/schemas/ImageFile" + }, + "default": [] + }, + "video": { + "title": "Video", + "description": "The generated video file, if output_type is 'video'.", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "x-fal-order-properties": [ + "images", + "video" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/film/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/film/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/film": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FilmInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/film/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FilmOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/calligrapher", + "metadata": { + "display_name": "Calligrapher", + "category": "image-to-image", + "description": "Use the text and font retaining capabilities of calligrapher to modify texts on your books, clothes and many more.", + "status": "active", + "tags": [ + "image-to-image" + ], + "updated_at": "2026-01-26T21:43:16.838Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-1.jpg", + "model_url": "https://fal.run/fal-ai/calligrapher", + "license_type": "commercial", + "date": "2025-07-12T11:31:53.987Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/calligrapher", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/calligrapher queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/calligrapher", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/calligrapher", + "documentationUrl": "https://fal.ai/models/fal-ai/calligrapher/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "CalligrapherInput": { + "title": "Input", + "type": "object", + "properties": { + "use_context": { + "title": "Use Context", + "type": "boolean", + "description": "Whether to prepend context reference to the input", + "default": true + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "How many images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "Target image size for generation", + "default": { + "height": 1024, + "width": 1024 + } + }, + "auto_mask_generation": { + "title": "Auto Mask Generation", + "type": "boolean", + "description": "Whether to automatically generate mask from detected text", + "default": false + }, + "reference_image_url": { + "title": "Reference Image Url", + "type": "string", + "description": "Optional base64 reference image for style" + }, + "source_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/calligrapher/test17_source.png" + ], + "title": "Source Image Url", + "type": "string", + "description": "Base64-encoded source image with drawn mask layers" + }, + "prompt": { + "examples": [ + "The text is 'Rise'" + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt to inpaint or customize" + }, + "mask_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/calligrapher/test17_mask.png" + ], + "title": "Mask Image Url", + "type": "string", + "description": "Base64-encoded mask image (optional if using auto_mask_generation)" + }, + "source_text": { + "title": "Source Text", + "type": "string", + "description": "Source text to replace (if empty, masks all detected text)", + "default": "" + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "Number of inference steps (1-100)", + "default": 50 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility" + }, + "cfg_scale": { + "minimum": 0, + "title": "Cfg Scale", + "type": "number", + "maximum": 5, + "description": "Guidance or strength scale for the model", + "default": 1 + } + }, + "x-fal-order-properties": [ + "source_image_url", + "mask_image_url", + "reference_image_url", + "prompt", + "num_images", + "image_size", + "cfg_scale", + "num_inference_steps", + "seed", + "use_context", + "auto_mask_generation", + "source_text" + ], + "required": [ + "source_image_url", + "prompt" + ] + }, + "CalligrapherOutput": { + "title": "Output", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/calligrapher/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/calligrapher/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/calligrapher": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CalligrapherInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/calligrapher/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CalligrapherOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bria/reimagine", + "metadata": { + "display_name": "Bria", + "category": "image-to-image", + "description": "Structure Reference allows generating new images while preserving the structure of an input image, guided by text prompts. Perfect for transforming sketches, illustrations, or photos into new illustrations. Trained exclusively on licensed data for safe and risk-free commercial use.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:17.582Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/rabbit/wXG4NtIA2sZUz7-CiooUJ_bb5f4b6f122149849d744baaad6896f8.jpg", + "model_url": "https://fal.run/fal-ai/bria/reimagine", + "license_type": "commercial", + "date": "2025-07-08T06:49:49.700Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bria/reimagine", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bria/reimagine queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bria/reimagine", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/rabbit/wXG4NtIA2sZUz7-CiooUJ_bb5f4b6f122149849d744baaad6896f8.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/bria/reimagine", + "documentationUrl": "https://fal.ai/models/fal-ai/bria/reimagine/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BriaReimagineInput": { + "title": "ReimagineInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A 2d illustration of a dog in a vibrant park" + ], + "description": "The prompt you would like to use to generate images.", + "type": "string", + "title": "Prompt" + }, + "num_results": { + "minimum": 1, + "description": "How many images you would like to generate. When using any Guidance Method, Value is set to 1.", + "type": "integer", + "title": "Num Results", + "maximum": 4, + "default": 1 + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "structure_ref_influence": { + "examples": [ + 0.15 + ], + "description": "The influence of the structure reference on the generated image.", + "type": "number", + "title": "Structure Ref Influence", + "default": 0.75 + }, + "fast": { + "description": "Whether to use the fast model", + "type": "boolean", + "title": "Fast", + "default": true + }, + "seed": { + "minimum": 0, + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed", + "maximum": 2147483647 + }, + "num_inference_steps": { + "minimum": 20, + "description": "The number of iterations the model goes through to refine the generated image. This parameter is optional.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 30 + }, + "structure_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/bria/bria_reimagine_input.png" + ], + "description": "The URL of the structure reference image. Use \"\" to leave empty. Accepted formats are jpeg, jpg, png, webp.", + "type": "string", + "title": "Structure Image Url", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "structure_image_url", + "structure_ref_influence", + "num_results", + "seed", + "fast", + "num_inference_steps", + "sync_mode" + ], + "required": [ + "prompt" + ] + }, + "BriaReimagineOutput": { + "title": "ReimagineOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/bria/bria_reimagine_output.png" + } + ] + ], + "description": "The generated images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "description": "Seed value used for generation.", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bria/reimagine/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bria/reimagine/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bria/reimagine": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BriaReimagineInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bria/reimagine/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BriaReimagineOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-editing/realism", + "metadata": { + "display_name": "Image Editing", + "category": "image-to-image", + "description": "Add details to faces, enhance face features, remove blur.", + "status": "active", + "tags": [ + "stylized", + "transform", + "realism" + ], + "updated_at": "2026-01-26T21:43:18.207Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "model_url": "https://fal.run/fal-ai/image-editing/realism", + "license_type": "commercial", + "date": "2025-07-07T10:12:01.612Z", + "group": { + "key": "image-editing", + "label": "Realism" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-editing/realism", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-editing/realism queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-editing/realism", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-editing/realism", + "documentationUrl": "https://fal.ai/models/fal-ai/image-editing/realism/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageEditingRealismInput": { + "title": "RealismInput", + "type": "object", + "properties": { + "lora_scale": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA model. Controls the strength of the LoRA effect.", + "default": 0.6 + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/penguin/QpRcoPb4dDyDJJSpFm4CZ_img_55_start.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "URL of the image to enhance with realism details." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 3.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling.", + "default": 30 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + } + }, + "description": "Input model for realism enhancement endpoint.", + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "num_inference_steps", + "enable_safety_checker", + "lora_scale", + "seed", + "sync_mode" + ], + "required": [ + "image_url" + ] + }, + "ImageEditingRealismOutput": { + "title": "RealismOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/tiger/wsyfYqHhrqB8CUYZ_71W0.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-editing/realism/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/realism/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-editing/realism": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingRealismInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/realism/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingRealismOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/post-processing/vignette", + "metadata": { + "display_name": "Post Processing", + "category": "image-to-image", + "description": "Add a darkening vignette effect around the edges of the image with adjustable strength", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:18.828Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/kangaroo/8KT3LjgSQtyW87hvxYr9n_8c31d2880fac4b9c877518c8162c2c8a.jpg", + "model_url": "https://fal.run/fal-ai/post-processing/vignette", + "license_type": "commercial", + "date": "2025-07-01T19:20:16.130Z", + "group": { + "key": "Post-Process", + "label": "Vignette" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/post-processing/vignette", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/post-processing/vignette queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/post-processing/vignette", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/kangaroo/8KT3LjgSQtyW87hvxYr9n_8c31d2880fac4b9c877518c8162c2c8a.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/post-processing/vignette", + "documentationUrl": "https://fal.ai/models/fal-ai/post-processing/vignette/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PostProcessingVignetteInput": { + "title": "VignetteInput", + "type": "object", + "properties": { + "vignette_strength": { + "minimum": 0, + "maximum": 10, + "type": "number", + "title": "Vignette Strength", + "description": "Vignette strength", + "default": 0.5 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/post-process/postpro-input.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to process" + } + }, + "x-fal-order-properties": [ + "image_url", + "vignette_strength" + ], + "required": [ + "image_url" + ] + }, + "PostProcessingVignetteOutput": { + "title": "VignetteOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/panda/x3zmDkHv7Wiohkev7vIEp_ded742da8499468f887659f582aa099c.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The processed images with vignette effect", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/post-processing/vignette/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/vignette/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/post-processing/vignette": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingVignetteInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/vignette/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingVignetteOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/post-processing/solarize", + "metadata": { + "display_name": "Post Processing", + "category": "image-to-image", + "description": "Apply solarization effect by inverting pixel values above a threshold", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:18.952Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/zebra/eR5hrHpGP3yIomYpon_z2_707e890433164c9494d964f9660833ec.jpg", + "model_url": "https://fal.run/fal-ai/post-processing/solarize", + "license_type": "commercial", + "date": "2025-07-01T19:10:42.546Z", + "group": { + "key": "Post-Process", + "label": "Solarize" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/post-processing/solarize", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/post-processing/solarize queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/post-processing/solarize", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/zebra/eR5hrHpGP3yIomYpon_z2_707e890433164c9494d964f9660833ec.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/post-processing/solarize", + "documentationUrl": "https://fal.ai/models/fal-ai/post-processing/solarize/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PostProcessingSolarizeInput": { + "title": "SolarizeInput", + "type": "object", + "properties": { + "solarize_threshold": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Solarize Threshold", + "description": "Solarize threshold", + "default": 0.5 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/post-process/postpro-input.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to process" + } + }, + "x-fal-order-properties": [ + "image_url", + "solarize_threshold" + ], + "required": [ + "image_url" + ] + }, + "PostProcessingSolarizeOutput": { + "title": "SolarizeOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/penguin/placeholder.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The processed images with solarize effect", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/post-processing/solarize/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/solarize/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/post-processing/solarize": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingSolarizeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/solarize/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingSolarizeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/post-processing/sharpen", + "metadata": { + "display_name": "Post Processing", + "category": "image-to-image", + "description": "Apply sharpening effects with three modes: basic unsharp mask, smart sharpening with edge preservation, and Contrast Adaptive Sharpening (CAS).", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:19.212Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/elephant/koBXGLN1Y5Qzu7ZKUogdf_7ceac7fe825841b5813a724990cd4f5b.jpg", + "model_url": "https://fal.run/fal-ai/post-processing/sharpen", + "license_type": "commercial", + "date": "2025-07-01T19:09:38.467Z", + "group": { + "key": "Post-Process", + "label": "Sharpen" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/post-processing/sharpen", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/post-processing/sharpen queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/post-processing/sharpen", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/elephant/koBXGLN1Y5Qzu7ZKUogdf_7ceac7fe825841b5813a724990cd4f5b.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/post-processing/sharpen", + "documentationUrl": "https://fal.ai/models/fal-ai/post-processing/sharpen/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PostProcessingSharpenInput": { + "title": "SharpenInput", + "type": "object", + "properties": { + "sharpen_mode": { + "enum": [ + "basic", + "smart", + "cas" + ], + "title": "Sharpen Mode", + "type": "string", + "description": "Type of sharpening to apply", + "default": "basic" + }, + "sharpen_alpha": { + "minimum": 0.1, + "maximum": 5, + "type": "number", + "title": "Sharpen Alpha", + "description": "Sharpen strength (for basic mode)", + "default": 1 + }, + "noise_radius": { + "minimum": 1, + "maximum": 25, + "type": "integer", + "title": "Noise Radius", + "description": "Noise radius for smart sharpen", + "default": 7 + }, + "sharpen_radius": { + "minimum": 1, + "maximum": 15, + "type": "integer", + "title": "Sharpen Radius", + "description": "Sharpen radius (for basic mode)", + "default": 1 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/post-process/postpro-input.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to process" + }, + "smart_sharpen_strength": { + "minimum": 0, + "maximum": 25, + "type": "number", + "title": "Smart Sharpen Strength", + "description": "Smart sharpen strength", + "default": 5 + }, + "cas_amount": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Cas Amount", + "description": "CAS sharpening amount", + "default": 0.8 + }, + "preserve_edges": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Preserve Edges", + "description": "Edge preservation factor", + "default": 0.75 + }, + "smart_sharpen_ratio": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Smart Sharpen Ratio", + "description": "Smart sharpen blend ratio", + "default": 0.5 + } + }, + "x-fal-order-properties": [ + "image_url", + "sharpen_mode", + "sharpen_radius", + "sharpen_alpha", + "noise_radius", + "preserve_edges", + "smart_sharpen_strength", + "smart_sharpen_ratio", + "cas_amount" + ], + "required": [ + "image_url" + ] + }, + "PostProcessingSharpenOutput": { + "title": "SharpenOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/kangaroo/vULpeyThlkaCx_qOU9VH4_dab975f33d984062932804ac53af0c82.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The processed images with sharpen effect", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/post-processing/sharpen/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/sharpen/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/post-processing/sharpen": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingSharpenInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/sharpen/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingSharpenOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/post-processing/parabolize", + "metadata": { + "display_name": "Post Processing", + "category": "image-to-image", + "description": "Apply a parabolic distortion effect with configurable coefficient and vertex position.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:19.340Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/post-process/post-processing.webp", + "model_url": "https://fal.run/fal-ai/post-processing/parabolize", + "license_type": "commercial", + "date": "2025-07-01T19:08:11.793Z", + "group": { + "key": "Post-Process", + "label": "Parabolize" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/post-processing/parabolize", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/post-processing/parabolize queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/post-processing/parabolize", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/post-process/post-processing.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/post-processing/parabolize", + "documentationUrl": "https://fal.ai/models/fal-ai/post-processing/parabolize/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PostProcessingParabolizeInput": { + "title": "ParabolizeInput", + "type": "object", + "properties": { + "parabolize_coeff": { + "minimum": -10, + "maximum": 10, + "type": "number", + "title": "Parabolize Coeff", + "description": "Parabolize coefficient", + "default": 1 + }, + "vertex_y": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Vertex Y", + "description": "Vertex Y position", + "default": 0.5 + }, + "vertex_x": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Vertex X", + "description": "Vertex X position", + "default": 0.5 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/post-process/postpro-input.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to process" + } + }, + "x-fal-order-properties": [ + "image_url", + "parabolize_coeff", + "vertex_x", + "vertex_y" + ], + "required": [ + "image_url" + ] + }, + "PostProcessingParabolizeOutput": { + "title": "ParabolizeOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/koala/S9GevoVb05aStq2nN-8zo_01136a15793b48e69e5c0ae0fb80f148.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The processed images with parabolize effect", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/post-processing/parabolize/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/parabolize/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/post-processing/parabolize": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingParabolizeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/parabolize/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingParabolizeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/post-processing/grain", + "metadata": { + "display_name": "Post Processing", + "category": "image-to-image", + "description": "Apply film grain effect with different styles (modern, analog, kodak, fuji, cinematic, newspaper) and customizable intensity and scale", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:19.476Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/post-process/post-processing.webp", + "model_url": "https://fal.run/fal-ai/post-processing/grain", + "license_type": "commercial", + "date": "2025-07-01T19:07:29.002Z", + "group": { + "key": "Post-Process", + "label": "Grain" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/post-processing/grain", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/post-processing/grain queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/post-processing/grain", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/post-process/post-processing.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/post-processing/grain", + "documentationUrl": "https://fal.ai/models/fal-ai/post-processing/grain/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PostProcessingGrainInput": { + "title": "GrainInput", + "type": "object", + "properties": { + "grain_style": { + "enum": [ + "modern", + "analog", + "kodak", + "fuji", + "cinematic", + "newspaper" + ], + "title": "Grain Style", + "type": "string", + "description": "Style of film grain to apply", + "default": "modern" + }, + "grain_intensity": { + "description": "Film grain intensity", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Grain Intensity", + "multipleOf": 0.01, + "default": 0.4 + }, + "grain_scale": { + "minimum": 1, + "maximum": 100, + "type": "number", + "title": "Grain Scale", + "description": "Film grain scale", + "default": 10 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/post-process/postpro-input.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to process" + } + }, + "x-fal-order-properties": [ + "image_url", + "grain_intensity", + "grain_scale", + "grain_style" + ], + "required": [ + "image_url" + ] + }, + "PostProcessingGrainOutput": { + "title": "GrainOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/penguin/28c9f5BYvlibrelN6d6cE_c0288c7ff93f4e0cb9117dc67837454f.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The processed images with grain effect", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/post-processing/grain/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/grain/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/post-processing/grain": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingGrainInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/grain/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingGrainOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/post-processing/dodge-burn", + "metadata": { + "display_name": "Post Processing", + "category": "image-to-image", + "description": "Apply dodge and burn effects with multiple modes and adjustable intensity.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:19.763Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/post-process/post-processing.webp", + "model_url": "https://fal.run/fal-ai/post-processing/dodge-burn", + "license_type": "commercial", + "date": "2025-07-01T19:05:17.650Z", + "group": { + "key": "Post-Process", + "label": "Dodge Burn" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/post-processing/dodge-burn", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/post-processing/dodge-burn queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/post-processing/dodge-burn", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/post-process/post-processing.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/post-processing/dodge-burn", + "documentationUrl": "https://fal.ai/models/fal-ai/post-processing/dodge-burn/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PostProcessingDodgeBurnInput": { + "title": "DodgeBurnInput", + "type": "object", + "properties": { + "dodge_burn_mode": { + "enum": [ + "dodge", + "burn", + "dodge_and_burn", + "burn_and_dodge", + "color_dodge", + "color_burn", + "linear_dodge", + "linear_burn" + ], + "title": "Dodge Burn Mode", + "type": "string", + "description": "Dodge and burn mode", + "default": "dodge" + }, + "dodge_burn_intensity": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Dodge Burn Intensity", + "description": "Dodge and burn intensity", + "default": 0.5 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/post-process/postpro-input.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to process" + } + }, + "x-fal-order-properties": [ + "image_url", + "dodge_burn_intensity", + "dodge_burn_mode" + ], + "required": [ + "image_url" + ] + }, + "PostProcessingDodgeBurnOutput": { + "title": "DodgeBurnOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/monkey/HSPzsHHD5VyVCuRc2bXCH_aa54abbada994934a4fed25c938db0c0.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The processed images with dodge and burn effect", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/post-processing/dodge-burn/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/dodge-burn/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/post-processing/dodge-burn": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingDodgeBurnInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/dodge-burn/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingDodgeBurnOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/post-processing/dissolve", + "metadata": { + "display_name": "Post Processing", + "category": "image-to-image", + "description": "Blend two images together using smooth linear interpolation with a configurable blend factor.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:20.710Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/post-process/post-processing.webp", + "model_url": "https://fal.run/fal-ai/post-processing/dissolve", + "license_type": "commercial", + "date": "2025-07-01T18:54:24.846Z", + "group": { + "key": "Post-Process", + "label": "Dissolve" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/post-processing/dissolve", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/post-processing/dissolve queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/post-processing/dissolve", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/post-process/post-processing.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/post-processing/dissolve", + "documentationUrl": "https://fal.ai/models/fal-ai/post-processing/dissolve/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PostProcessingDissolveInput": { + "title": "DissolveInput", + "type": "object", + "properties": { + "dissolve_factor": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Dissolve Factor", + "description": "Dissolve blend factor", + "default": 0.5 + }, + "dissolve_image_url": { + "examples": [ + "https://v3.fal.media/files/monkey/NJW5irDVP1qwoTMdwOcDV_39qXtqYS0zSUrFwbrJkOY.jpeg" + ], + "title": "Dissolve Image Url", + "type": "string", + "description": "URL of second image for dissolve" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/elephant/pLQKJXcFdmIVvB2qhw7vv_59578fb9-8178-4f24-82f0-ea7ec5bc5f2d.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to process" + } + }, + "x-fal-order-properties": [ + "image_url", + "dissolve_image_url", + "dissolve_factor" + ], + "required": [ + "image_url", + "dissolve_image_url" + ] + }, + "PostProcessingDissolveOutput": { + "title": "DissolveOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/penguin/YQGJjCiEEEFswJnHr5vXU_0e83a7bac3f342e5ba55dd5ac4f073b7.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The processed images with dissolve effect", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/post-processing/dissolve/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/dissolve/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/post-processing/dissolve": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingDissolveInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/dissolve/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingDissolveOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/post-processing/desaturate", + "metadata": { + "display_name": "Post Processing", + "category": "image-to-image", + "description": "Reduce color saturation using different methods (luminance Rec.709, luminance Rec.601, average, lightness) with adjustable factor.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:20.957Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/post-process/post-processing.webp", + "model_url": "https://fal.run/fal-ai/post-processing/desaturate", + "license_type": "commercial", + "date": "2025-07-01T18:53:36.237Z", + "group": { + "key": "Post-Process", + "label": "Desaturate" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/post-processing/desaturate", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/post-processing/desaturate queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/post-processing/desaturate", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/post-process/post-processing.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/post-processing/desaturate", + "documentationUrl": "https://fal.ai/models/fal-ai/post-processing/desaturate/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PostProcessingDesaturateInput": { + "title": "DesaturateInput", + "type": "object", + "properties": { + "desaturate_method": { + "enum": [ + "luminance (Rec.709)", + "luminance (Rec.601)", + "average", + "lightness" + ], + "title": "Desaturate Method", + "type": "string", + "description": "Desaturation method", + "default": "luminance (Rec.709)" + }, + "desaturate_factor": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Desaturate Factor", + "description": "Desaturation factor", + "default": 1 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/post-process/postpro-input.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to process" + } + }, + "x-fal-order-properties": [ + "image_url", + "desaturate_factor", + "desaturate_method" + ], + "required": [ + "image_url" + ] + }, + "PostProcessingDesaturateOutput": { + "title": "DesaturateOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/penguin/2ZdX11Gj6aQ6mVI2QVKfy_e5a51ffa7657422f85417146bba4df8e.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The processed images with desaturation effect", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/post-processing/desaturate/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/desaturate/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/post-processing/desaturate": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingDesaturateInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/desaturate/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingDesaturateOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/post-processing/color-tint", + "metadata": { + "display_name": "Post Processing", + "category": "image-to-image", + "description": "Apply various color tints (sepia, red, green, blue, cyan, magenta, yellow, purple, orange, warm, cool, lime, navy, vintage, rose, teal, maroon, peach, lavender, olive) with adjustable strength.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:21.140Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/post-process/post-processing.webp", + "model_url": "https://fal.run/fal-ai/post-processing/color-tint", + "license_type": "commercial", + "date": "2025-07-01T18:52:48.294Z", + "group": { + "key": "Post-Process", + "label": "Color Tint" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/post-processing/color-tint", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/post-processing/color-tint queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/post-processing/color-tint", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/post-process/post-processing.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/post-processing/color-tint", + "documentationUrl": "https://fal.ai/models/fal-ai/post-processing/color-tint/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PostProcessingColorTintInput": { + "title": "ColorTintInput", + "type": "object", + "properties": { + "tint_strength": { + "minimum": 0.1, + "maximum": 1, + "type": "number", + "title": "Tint Strength", + "description": "Tint strength", + "default": 1 + }, + "tint_mode": { + "enum": [ + "sepia", + "red", + "green", + "blue", + "cyan", + "magenta", + "yellow", + "purple", + "orange", + "warm", + "cool", + "lime", + "navy", + "vintage", + "rose", + "teal", + "maroon", + "peach", + "lavender", + "olive" + ], + "title": "Tint Mode", + "type": "string", + "description": "Tint color mode", + "default": "sepia" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/post-process/postpro-input.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to process" + } + }, + "x-fal-order-properties": [ + "image_url", + "tint_strength", + "tint_mode" + ], + "required": [ + "image_url" + ] + }, + "PostProcessingColorTintOutput": { + "title": "ColorTintOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/monkey/7mRCFzvhnPPdicWELUUv2_be6131e02e434330bc05bb0a30974357.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The processed images with color tint effect", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/post-processing/color-tint/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/color-tint/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/post-processing/color-tint": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingColorTintInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/color-tint/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingColorTintOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/post-processing/color-correction", + "metadata": { + "display_name": "Post Processing", + "category": "image-to-image", + "description": "Adjust color temperature, brightness, contrast, saturation, and gamma values for color correction.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:21.269Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/elephant/GV2S-bd9TChg0FDfkUXhs_abda259c89bf433091d8a1bc46ef8cf0.jpg", + "model_url": "https://fal.run/fal-ai/post-processing/color-correction", + "license_type": "commercial", + "date": "2025-07-01T18:51:53.197Z", + "group": { + "key": "Post-Process", + "label": "Color Correction" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/post-processing/color-correction", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/post-processing/color-correction queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/post-processing/color-correction", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/elephant/GV2S-bd9TChg0FDfkUXhs_abda259c89bf433091d8a1bc46ef8cf0.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/post-processing/color-correction", + "documentationUrl": "https://fal.ai/models/fal-ai/post-processing/color-correction/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PostProcessingColorCorrectionInput": { + "title": "ColorCorrectionInput", + "type": "object", + "properties": { + "gamma": { + "minimum": 0.2, + "maximum": 2.2, + "type": "number", + "title": "Gamma", + "description": "Gamma adjustment", + "default": 1 + }, + "saturation": { + "minimum": -100, + "maximum": 100, + "type": "number", + "title": "Saturation", + "description": "Saturation adjustment", + "default": 0 + }, + "temperature": { + "minimum": -100, + "maximum": 100, + "type": "number", + "title": "Temperature", + "description": "Color temperature adjustment", + "default": 0 + }, + "brightness": { + "minimum": -100, + "maximum": 100, + "type": "number", + "title": "Brightness", + "description": "Brightness adjustment", + "default": 0 + }, + "contrast": { + "minimum": -100, + "maximum": 100, + "type": "number", + "title": "Contrast", + "description": "Contrast adjustment", + "default": 0 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/post-process/postpro-input.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to process" + } + }, + "x-fal-order-properties": [ + "image_url", + "temperature", + "brightness", + "contrast", + "saturation", + "gamma" + ], + "required": [ + "image_url" + ] + }, + "PostProcessingColorCorrectionOutput": { + "title": "ColorCorrectionOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/penguin/wUw8rsAidnBXhDW7NcXIA_e0ba138d401849de98614a5db21c178e.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The processed images with color correction", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/post-processing/color-correction/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/color-correction/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/post-processing/color-correction": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingColorCorrectionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/color-correction/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingColorCorrectionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/post-processing/chromatic-aberration", + "metadata": { + "display_name": "Post Processing", + "category": "image-to-image", + "description": "Create chromatic aberration by shifting red, green, and blue channels horizontally or vertically with customizable shift amounts.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:21.393Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/post-process/post-processing.webp", + "model_url": "https://fal.run/fal-ai/post-processing/chromatic-aberration", + "license_type": "commercial", + "date": "2025-07-01T18:51:01.000Z", + "group": { + "key": "Post-Process", + "label": "Chromatic Abberation" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/post-processing/chromatic-aberration", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/post-processing/chromatic-aberration queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/post-processing/chromatic-aberration", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/post-process/post-processing.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/post-processing/chromatic-aberration", + "documentationUrl": "https://fal.ai/models/fal-ai/post-processing/chromatic-aberration/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PostProcessingChromaticAberrationInput": { + "title": "ChromaticAberrationInput", + "type": "object", + "properties": { + "blue_shift": { + "minimum": -20, + "maximum": 20, + "type": "integer", + "title": "Blue Shift", + "description": "Blue channel shift amount", + "default": 0 + }, + "red_shift": { + "minimum": -20, + "maximum": 20, + "type": "integer", + "title": "Red Shift", + "description": "Red channel shift amount", + "default": 0 + }, + "green_direction": { + "enum": [ + "horizontal", + "vertical" + ], + "title": "Green Direction", + "type": "string", + "description": "Green channel shift direction", + "default": "horizontal" + }, + "blue_direction": { + "enum": [ + "horizontal", + "vertical" + ], + "title": "Blue Direction", + "type": "string", + "description": "Blue channel shift direction", + "default": "horizontal" + }, + "red_direction": { + "enum": [ + "horizontal", + "vertical" + ], + "title": "Red Direction", + "type": "string", + "description": "Red channel shift direction", + "default": "horizontal" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/post-process/postpro-input.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to process" + }, + "green_shift": { + "minimum": -20, + "maximum": 20, + "type": "integer", + "title": "Green Shift", + "description": "Green channel shift amount", + "default": 0 + } + }, + "x-fal-order-properties": [ + "image_url", + "red_shift", + "red_direction", + "green_shift", + "green_direction", + "blue_shift", + "blue_direction" + ], + "required": [ + "image_url" + ] + }, + "PostProcessingChromaticAberrationOutput": { + "title": "ChromaticAberrationOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/tiger/KZJQ66Ebe9FCMNA28hX-m_cebbce060fd34aeb99c8c5531b6b63bc.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The processed images with chromatic aberration effect", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/post-processing/chromatic-aberration/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/chromatic-aberration/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/post-processing/chromatic-aberration": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingChromaticAberrationInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/chromatic-aberration/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingChromaticAberrationOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/post-processing/blur", + "metadata": { + "display_name": "Post Processing", + "category": "image-to-image", + "description": "Apply Gaussian or Kuwahara blur effects with adjustable radius and sigma parameters", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:21.517Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/panda/bjwlW3KSsaWKc_0aK5zu1_1bd1f105d15a43618789039ab333c21a.jpg", + "model_url": "https://fal.run/fal-ai/post-processing/blur", + "license_type": "commercial", + "date": "2025-07-01T18:49:58.071Z", + "group": { + "key": "Post-Process", + "label": "Blur" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/post-processing/blur", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/post-processing/blur queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/post-processing/blur", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/panda/bjwlW3KSsaWKc_0aK5zu1_1bd1f105d15a43618789039ab333c21a.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/post-processing/blur", + "documentationUrl": "https://fal.ai/models/fal-ai/post-processing/blur/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PostProcessingBlurInput": { + "title": "BlurInput", + "type": "object", + "properties": { + "blur_sigma": { + "minimum": 0.1, + "maximum": 10, + "type": "number", + "title": "Blur Sigma", + "description": "Sigma for Gaussian blur", + "default": 1 + }, + "blur_radius": { + "minimum": 0, + "maximum": 31, + "type": "integer", + "title": "Blur Radius", + "description": "Blur radius", + "default": 3 + }, + "blur_type": { + "enum": [ + "gaussian", + "kuwahara" + ], + "title": "Blur Type", + "type": "string", + "description": "Type of blur to apply", + "default": "gaussian" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/post-process/postpro-input.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to process" + } + }, + "x-fal-order-properties": [ + "image_url", + "blur_type", + "blur_radius", + "blur_sigma" + ], + "required": [ + "image_url" + ] + }, + "PostProcessingBlurOutput": { + "title": "BlurOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/panda/n0gz4pyZJ_qSY5uIjSq6U_24a8903e697f4e1b902825b729ce7d5d.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The processed images with blur effect", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/post-processing/blur/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/blur/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/post-processing/blur": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingBlurInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/blur/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingBlurOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-editing/youtube-thumbnails", + "metadata": { + "display_name": "Image Editing", + "category": "image-to-image", + "description": "Generate YouTube thumbnails with custom text", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:22.240Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/rabbit/SZZJwGHb-HkjWln94WNc0_23a19743226b4125b5d0e5d4769879ed.jpg", + "model_url": "https://fal.run/fal-ai/image-editing/youtube-thumbnails", + "license_type": "commercial", + "date": "2025-06-30T15:50:34.100Z", + "group": { + "key": "image-editing", + "label": "YT Thumbnails" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-editing/youtube-thumbnails", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-editing/youtube-thumbnails queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-editing/youtube-thumbnails", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/rabbit/SZZJwGHb-HkjWln94WNc0_23a19743226b4125b5d0e5d4769879ed.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-editing/youtube-thumbnails", + "documentationUrl": "https://fal.ai/models/fal-ai/image-editing/youtube-thumbnails/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageEditingYoutubeThumbnailsInput": { + "title": "YouTubeThumbnailsInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Generate youtube thumbnails using text 'EPIC FAIL." + ], + "title": "Thumbnail Text", + "type": "string", + "description": "The text to include in the YouTube thumbnail.", + "default": "Generate youtube thumbnails" + }, + "lora_scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA model. Controls the strength of the LoRA effect.", + "default": 0.5 + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/koala/QUihQrMqowYu30UFC_Atk.png" + ], + "title": "Image URL", + "type": "string", + "description": "URL of the image to convert to YouTube thumbnail style." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 3.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling.", + "default": 30 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + } + }, + "description": "Input model for YouTube thumbnails endpoint.", + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "num_inference_steps", + "enable_safety_checker", + "lora_scale", + "seed", + "sync_mode", + "prompt" + ], + "required": [ + "image_url" + ] + }, + "ImageEditingYoutubeThumbnailsOutput": { + "title": "YouTubeThumbnailsOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/panda/oEr-wYCpGx0e15bAdykby.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-editing/youtube-thumbnails/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/youtube-thumbnails/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-editing/youtube-thumbnails": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingYoutubeThumbnailsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/youtube-thumbnails/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingYoutubeThumbnailsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/topaz/upscale/image", + "metadata": { + "display_name": "Topaz", + "category": "image-to-image", + "description": "Use the powerful and accurate topaz image enhancer to enhance your images.", + "status": "active", + "tags": [ + "image-to-image" + ], + "updated_at": "2026-01-26T21:43:22.698Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "model_url": "https://fal.run/fal-ai/topaz/upscale/image", + "license_type": "commercial", + "date": "2025-06-27T20:22:54.457Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/topaz/upscale/image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/topaz/upscale/image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/topaz/upscale/image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/topaz/upscale/image", + "documentationUrl": "https://fal.ai/models/fal-ai/topaz/upscale/image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "TopazUpscaleImageInput": { + "title": "ImageUpscaleRequest", + "type": "object", + "properties": { + "face_enhancement_creativity": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Face Enhancement Creativity", + "description": "Creativity level for face enhancement. 0.0 means no creativity, 1.0 means maximum creativity. Ignored if face ehnancement is disabled.", + "default": 0 + }, + "face_enhancement_strength": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Face Enhancement Strength", + "description": "Strength of the face enhancement. 0.0 means no enhancement, 1.0 means maximum enhancement. Ignored if face ehnancement is disabled.", + "default": 0.8 + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "Output format of the upscaled image.", + "default": "jpeg" + }, + "face_enhancement": { + "title": "Face Enhancement", + "type": "boolean", + "description": "Whether to apply face enhancement to the image.", + "default": true + }, + "subject_detection": { + "enum": [ + "All", + "Foreground", + "Background" + ], + "title": "Subject Detection", + "type": "string", + "description": "Subject detection mode for the image enhancement.", + "default": "All" + }, + "model": { + "enum": [ + "Low Resolution V2", + "Standard V2", + "CGI", + "High Fidelity V2", + "Text Refine", + "Recovery", + "Redefine", + "Recovery V2" + ], + "title": "Model", + "type": "string", + "description": "Model to use for image enhancement.", + "default": "Standard V2" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/codeformer/codeformer_poor_1.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "Url of the image to be upscaled" + }, + "upscale_factor": { + "minimum": 1, + "maximum": 4, + "type": "number", + "title": "Upscale Factor", + "description": "Factor to upscale the video by (e.g. 2.0 doubles width and height)", + "default": 2 + }, + "crop_to_fill": { + "title": "Crop To Fill", + "type": "boolean", + "default": false + } + }, + "x-fal-order-properties": [ + "model", + "upscale_factor", + "crop_to_fill", + "image_url", + "output_format", + "subject_detection", + "face_enhancement", + "face_enhancement_creativity", + "face_enhancement_strength" + ], + "required": [ + "image_url" + ] + }, + "TopazUpscaleImageOutput": { + "title": "ImageUpscaleOutput", + "type": "object", + "properties": { + "image": { + "title": "Image", + "description": "The upscaled image.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/topaz/upscale/image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/topaz/upscale/image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/topaz/upscale/image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TopazUpscaleImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/topaz/upscale/image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TopazUpscaleImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-editing/broccoli-haircut", + "metadata": { + "display_name": "Image Editing", + "category": "image-to-image", + "description": "Transform your character's hair into broccoli style while keeping the original characters likeness", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:23.359Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/tiger/xDrMHbm03pWLw1pSXS4-r_dc670c9a3ba0406681a20fe6a10c98fa.jpg", + "model_url": "https://fal.run/fal-ai/image-editing/broccoli-haircut", + "license_type": "commercial", + "date": "2025-06-26T04:41:08.203Z", + "group": { + "key": "image-editing", + "label": "Broccoli Haircut" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-editing/broccoli-haircut", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-editing/broccoli-haircut queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-editing/broccoli-haircut", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/tiger/xDrMHbm03pWLw1pSXS4-r_dc670c9a3ba0406681a20fe6a10c98fa.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-editing/broccoli-haircut", + "documentationUrl": "https://fal.ai/models/fal-ai/image-editing/broccoli-haircut/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageEditingBroccoliHaircutInput": { + "title": "BroccoliHaircutInput", + "type": "object", + "properties": { + "lora_scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA model. Controls the strength of the LoRA effect.", + "default": 1 + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/rabbit/8nqqnF_KS9c0pGgwvRNAY_IMG_8421.jpeg" + ], + "title": "Image URL", + "type": "string", + "description": "URL of the image to apply broccoli haircut style." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 3.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling.", + "default": 30 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + } + }, + "description": "Input model for broccoli haircut endpoint.", + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "num_inference_steps", + "enable_safety_checker", + "lora_scale", + "seed", + "sync_mode" + ], + "required": [ + "image_url" + ] + }, + "ImageEditingBroccoliHaircutOutput": { + "title": "BroccoliHaircutOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/elephant/Rc2zAM5x0jpj4decdJTZX.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-editing/broccoli-haircut/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/broccoli-haircut/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-editing/broccoli-haircut": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingBroccoliHaircutInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/broccoli-haircut/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingBroccoliHaircutOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-editing/wojak-style", + "metadata": { + "display_name": "Image Editing", + "category": "image-to-image", + "description": "Transform your photos into wojak style while keeping the original characters likeness", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:23.487Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "model_url": "https://fal.run/fal-ai/image-editing/wojak-style", + "license_type": "commercial", + "date": "2025-06-26T04:31:05.600Z", + "group": { + "key": "image-editing", + "label": "Wojak Style" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-editing/wojak-style", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-editing/wojak-style queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-editing/wojak-style", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-editing/wojak-style", + "documentationUrl": "https://fal.ai/models/fal-ai/image-editing/wojak-style/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageEditingWojakStyleInput": { + "title": "WojakStyleInput", + "type": "object", + "properties": { + "lora_scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA model. Controls the strength of the LoRA effect.", + "default": 1 + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/rabbit/UGZQWl6lUdYeu91QzUZys_5ODmUQcqNeufTbf_hhO0h_unnamed%20(2).jpg" + ], + "title": "Image URL", + "type": "string", + "description": "URL of the image to convert to wojak style." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 3.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling.", + "default": 30 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + } + }, + "description": "Input model for wojak style endpoint.", + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "num_inference_steps", + "enable_safety_checker", + "lora_scale", + "seed", + "sync_mode" + ], + "required": [ + "image_url" + ] + }, + "ImageEditingWojakStyleOutput": { + "title": "WojakStyleOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/koala/_fpVsU6fC6b77OGnPAxuw.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-editing/wojak-style/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/wojak-style/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-editing/wojak-style": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingWojakStyleInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/wojak-style/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingWojakStyleOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-editing/plushie-style", + "metadata": { + "display_name": "Image Editing", + "category": "image-to-image", + "description": "Transform your photos into cool plushies while keeping the original characters likeness", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:23.615Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "model_url": "https://fal.run/fal-ai/image-editing/plushie-style", + "license_type": "commercial", + "date": "2025-06-26T04:29:50.482Z", + "group": { + "key": "image-editing", + "label": "Plushie" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-editing/plushie-style", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-editing/plushie-style queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-editing/plushie-style", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-editing/plushie-style", + "documentationUrl": "https://fal.ai/models/fal-ai/image-editing/plushie-style/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageEditingPlushieStyleInput": { + "title": "PlushieStyleInput", + "type": "object", + "properties": { + "lora_scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Lora Scale", + "description": "The scale factor for the LoRA model. Controls the strength of the LoRA effect.", + "default": 1 + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/elephant/5fVB7Y5ERU7zAvA_kd2i3_trump-portrait_square.jpeg" + ], + "title": "Image URL", + "type": "string", + "description": "URL of the image to convert to plushie style." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 3.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling.", + "default": 30 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + } + }, + "description": "Input model for plushie style endpoint.", + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "num_inference_steps", + "enable_safety_checker", + "lora_scale", + "seed", + "sync_mode" + ], + "required": [ + "image_url" + ] + }, + "ImageEditingPlushieStyleOutput": { + "title": "PlushieStyleOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/penguin/mktL882Np-nFxirKNib9m.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-editing/plushie-style/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/plushie-style/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-editing/plushie-style": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingPlushieStyleInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/plushie-style/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingPlushieStyleOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-kontext-lora", + "metadata": { + "display_name": "Flux Kontext Lora", + "category": "image-to-image", + "description": "Fast endpoint for the FLUX.1 Kontext [dev] model with LoRA support, enabling rapid and high-quality image editing using pre-trained LoRA adaptations for specific styles, brand identities, and product-specific outputs.", + "status": "active", + "tags": [ + "image-editing", + "image-to-image" + ], + "updated_at": "2026-01-26T21:43:23.998Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-3.jpeg", + "model_url": "https://fal.run/fal-ai/flux-kontext-lora", + "license_type": "commercial", + "date": "2025-06-25T21:13:19.794Z", + "group": { + "key": "flux-kontext-lora", + "label": "Image Editing" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.ai/models/fal-ai/flux-kontext-lora/stream", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/flux-kontext-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/flux-kontext-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-kontext-lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-kontext-lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-kontext-lora", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-3.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-kontext-lora", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-kontext-lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxKontextLoraInput": { + "x-fal-order-properties": [ + "image_url", + "prompt", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "loras", + "acceleration", + "resolution_mode" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "change the setting to a day time, add a lot of people walking the sidewalk while maintaining the same style of the painting" + ], + "description": "The prompt to edit the image.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 1 + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "type": "string", + "title": "Acceleration", + "default": "none" + }, + "resolution_mode": { + "enum": [ + "auto", + "match_input", + "1:1", + "16:9", + "21:9", + "3:2", + "2:3", + "4:5", + "5:4", + "3:4", + "4:3", + "9:16", + "9:21" + ], + "description": "\n Determines how the output resolution is set for image editing.\n - `auto`: The model selects an optimal resolution from a predefined set that best matches the input image's aspect ratio. This is the recommended setting for most use cases as it's what the model was trained on.\n - `match_input`: The model will attempt to use the same resolution as the input image. The resolution will be adjusted to be compatible with the model's requirements (e.g. dimensions must be multiples of 16 and within supported limits).\n Apart from these, a few aspect ratios are also supported.\n ", + "type": "string", + "title": "Resolution Mode", + "default": "match_input" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "image_url": { + "x-fal": { + "timeout": 20, + "max_width": 14142, + "max_height": 14142 + }, + "description": "The URL of the image to edit.\n\nMax width: 14142px, Max height: 14142px, Timeout: 20s", + "type": "string", + "title": "Image URL", + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/kontext_example_input.webp" + ] + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "title": "Guidance scale (CFG)", + "maximum": 20, + "default": 2.5 + }, + "num_inference_steps": { + "minimum": 10, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 30 + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + } + }, + "title": "BaseKontextEditInput", + "required": [ + "prompt", + "image_url" + ] + }, + "FluxKontextLoraOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/jpeg", + "url": "https://storage.googleapis.com/falserverless/example_outputs/kontext_example_output.jpeg", + "width": 1024 + } + ] + ], + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + } + }, + "title": "KontextEditOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "LoraWeight": { + "x-fal-order-properties": [ + "path", + "scale" + ], + "type": "object", + "properties": { + "path": { + "description": "URL or the path to the LoRA weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "type": "number", + "title": "Scale", + "maximum": 4, + "default": 1 + } + }, + "title": "LoraWeight", + "required": [ + "path" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "title": "Image", + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-kontext-lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-kontext-lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-kontext-lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKontextLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-kontext-lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKontextLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fashn/tryon/v1.6", + "metadata": { + "display_name": "FASHN Virtual Try-On V1.6", + "category": "image-to-image", + "description": "FASHN v1.6 delivers precise virtual try-on capabilities, accurately rendering garment details like text and patterns at 864x1296 resolution from both on-model and flat-lay photo references.", + "status": "active", + "tags": [ + "try-on", + "fashion", + "clothing" + ], + "updated_at": "2026-01-26T21:43:24.254Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/fashn_wide.webp", + "model_url": "https://fal.run/fal-ai/fashn/tryon/v1.6", + "license_type": "commercial", + "date": "2025-06-24T20:58:43.252Z", + "group": { + "key": "Fashn", + "label": "v1.6" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fashn/tryon/v1.6", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fashn/tryon/v1.6 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fashn/tryon/v1.6", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/fashn_wide.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/fashn/tryon/v1.6", + "documentationUrl": "https://fal.ai/models/fal-ai/fashn/tryon/v1.6/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FashnTryonV16Input": { + "title": "V16Input", + "type": "object", + "properties": { + "model_image": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/model.png" + ], + "title": "Model Image", + "type": "string", + "description": "URL or base64 of the model image" + }, + "moderation_level": { + "enum": [ + "none", + "permissive", + "conservative" + ], + "title": "Moderation Level", + "type": "string", + "description": "Content moderation level for garment images. 'none' disables moderation, 'permissive' blocks only explicit content, 'conservative' also blocks underwear and swimwear.", + "default": "permissive" + }, + "garment_photo_type": { + "enum": [ + "auto", + "model", + "flat-lay" + ], + "title": "Garment Photo Type", + "type": "string", + "description": "Specifies the type of garment photo to optimize internal parameters for better performance. 'model' is for photos of garments on a model, 'flat-lay' is for flat-lay or ghost mannequin images, and 'auto' attempts to automatically detect the photo type.", + "default": "auto" + }, + "garment_image": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/garment.webp" + ], + "title": "Garment Image", + "type": "string", + "description": "URL or base64 of the garment image" + }, + "category": { + "enum": [ + "tops", + "bottoms", + "one-pieces", + "auto" + ], + "title": "Category", + "type": "string", + "description": "Category of the garment to try-on. 'auto' will attempt to automatically detect the category of the garment.", + "default": "auto" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "segmentation_free": { + "title": "Segmentation Free", + "type": "boolean", + "description": "Disables human parsing on the model image.", + "default": true + }, + "num_samples": { + "minimum": 1, + "title": "Num Samples", + "type": "integer", + "maximum": 4, + "description": "Number of images to generate in a single run. Image generation has a random element in it, so trying multiple images at once increases the chances of getting a good result.", + "default": 1 + }, + "mode": { + "enum": [ + "performance", + "balanced", + "quality" + ], + "title": "Mode", + "type": "string", + "description": "Specifies the mode of operation. 'performance' mode is faster but may sacrifice quality, 'balanced' mode is a balance between speed and quality, and 'quality' mode is slower but produces higher quality results.", + "default": "balanced" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Sets random operations to a fixed state. Use the same seed to reproduce results with the same inputs, or different seed to force different results." + }, + "output_format": { + "enum": [ + "png", + "jpeg" + ], + "title": "Output Format", + "type": "string", + "description": "Output format of the generated images. 'png' is highest quality, while 'jpeg' is faster", + "default": "png" + } + }, + "x-fal-order-properties": [ + "model_image", + "garment_image", + "category", + "mode", + "garment_photo_type", + "moderation_level", + "seed", + "num_samples", + "segmentation_free", + "sync_mode", + "output_format" + ], + "required": [ + "model_image", + "garment_image" + ] + }, + "FashnTryonV16Output": { + "title": "V16Output", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://cdn.fashn.ai/4ddf1d78-63df-44bb-8e1f-355fff3a7b87/output_0.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/fashn/tryon/v1.6/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fashn/tryon/v1.6/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fashn/tryon/v1.6": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FashnTryonV16Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fashn/tryon/v1.6/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FashnTryonV16Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/chain-of-zoom", + "metadata": { + "display_name": "Chain Of Zoom", + "category": "image-to-image", + "description": "Extreme Super-Resolution via Scale Autoregression and Preference Alignment", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:25.791Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-4.jpeg", + "model_url": "https://fal.run/fal-ai/chain-of-zoom", + "license_type": "commercial", + "date": "2025-06-18T18:08:20.354Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/chain-of-zoom", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/chain-of-zoom queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/chain-of-zoom", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-4.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/chain-of-zoom", + "documentationUrl": "https://fal.ai/models/fal-ai/chain-of-zoom/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ChainOfZoomInput": { + "x-fal-order-properties": [ + "image_url", + "scale", + "center_x", + "center_y", + "user_prompt", + "sync_mode" + ], + "type": "object", + "properties": { + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "center_y": { + "description": "Y coordinate of zoom center (0-1)", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Center Y", + "default": 0.5 + }, + "scale": { + "description": "Zoom scale in powers of 2", + "type": "number", + "minimum": 1, + "maximum": 8, + "title": "Scale", + "multipleOf": 1, + "default": 5 + }, + "center_x": { + "description": "X coordinate of zoom center (0-1)", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Center X", + "default": 0.5 + }, + "user_prompt": { + "description": "Additional prompt text to guide the zoom enhancement", + "type": "string", + "title": "User Prompt", + "default": "" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/coz_example_input" + ], + "title": "Image Url", + "type": "string", + "description": "Input image to zoom into" + } + }, + "title": "Input", + "required": [ + "image_url" + ] + }, + "ChainOfZoomOutput": { + "x-fal-order-properties": [ + "images", + "scale", + "zoom_center" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_outputs/coz_example_output_5" + ], + "title": "Images", + "type": "array", + "description": "List of intermediate images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "zoom_center": { + "title": "Zoom Center", + "type": "array", + "description": "Center coordinates used for zoom", + "items": { + "type": "number" + } + }, + "scale": { + "title": "Scale", + "type": "number", + "description": "Actual linear zoom scale applied" + } + }, + "title": "Output", + "required": [ + "images", + "scale", + "zoom_center" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "title": "Image", + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/chain-of-zoom/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/chain-of-zoom/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/chain-of-zoom": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChainOfZoomInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/chain-of-zoom/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChainOfZoomOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pasd", + "metadata": { + "display_name": "PASD", + "category": "image-to-image", + "description": "Pixel-Aware Diffusion Model for Realistic Image Super-Resolution and Personalized Stylization", + "status": "active", + "tags": [ + "utility", + "editing" + ], + "updated_at": "2026-01-26T21:43:26.630Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-1.jpg", + "model_url": "https://fal.run/fal-ai/pasd", + "license_type": "commercial", + "date": "2025-06-17T22:57:07.317Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pasd", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pasd queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pasd", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/pasd", + "documentationUrl": "https://fal.ai/models/fal-ai/pasd/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PasdInput": { + "title": "Input", + "type": "object", + "properties": { + "conditioning_scale": { + "minimum": 0.1, + "maximum": 1, + "type": "number", + "title": "Conditioning Scale", + "description": "ControlNet conditioning scale (0.1-1.0)", + "default": 0.8 + }, + "prompt": { + "title": "Prompt", + "type": "string", + "description": "Additional prompt to guide super-resolution", + "default": "" + }, + "image_url": { + "examples": [ + "https://fal.media/files/rabbit/JlBgYUyQRS3zxiBu_B4fM.png" + ], + "title": "Image Url", + "type": "string", + "description": "Input image to super-resolve" + }, + "steps": { + "minimum": 10, + "maximum": 50, + "type": "integer", + "title": "Steps", + "description": "Number of inference steps (10-50)", + "default": 25 + }, + "scale": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Scale", + "description": "Upscaling factor (1-4x)", + "default": 2 + }, + "guidance_scale": { + "minimum": 1, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "Guidance scale for diffusion (1.0-20.0)", + "default": 7 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to avoid unwanted artifacts", + "default": "blurry, dirty, messy, frames, deformed, dotted, noise, raster lines, unclear, lowres, over-smoothed, painting, ai generated" + } + }, + "x-fal-order-properties": [ + "image_url", + "scale", + "steps", + "guidance_scale", + "conditioning_scale", + "prompt", + "negative_prompt" + ], + "required": [ + "image_url" + ] + }, + "PasdOutput": { + "title": "Output", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_size": 2010575, + "height": 1024, + "file_name": "4732818e18b542ca8dc3f0e6c1775ac8.png", + "content_type": "image/png", + "url": "https://v3.fal.media/files/koala/ncEbdm4Ig6dAGBp-3dR63_4732818e18b542ca8dc3f0e6c1775ac8.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated super-resolved images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings", + "description": "Timing information for different processing stages" + } + }, + "x-fal-order-properties": [ + "images", + "timings" + ], + "required": [ + "images" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pasd/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pasd/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pasd": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PasdInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pasd/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PasdOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/object-removal/bbox", + "metadata": { + "display_name": "Object Removal", + "category": "image-to-image", + "description": "Removes box-selected objects and their visual effects, seamlessly reconstructing the scene with contextually appropriate content.", + "status": "active", + "tags": [ + "utility", + "editing" + ], + "updated_at": "2026-01-26T21:43:26.885Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training-4.jpg", + "model_url": "https://fal.run/fal-ai/object-removal/bbox", + "license_type": "commercial", + "date": "2025-06-16T20:15:04.257Z", + "group": { + "key": "object-removal", + "label": "Bbox" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/object-removal/bbox", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/object-removal/bbox queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/object-removal/bbox", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training-4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/object-removal/bbox", + "documentationUrl": "https://fal.ai/models/fal-ai/object-removal/bbox/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ObjectRemovalBboxInput": { + "title": "BboxInput", + "type": "object", + "properties": { + "model": { + "enum": [ + "low_quality", + "medium_quality", + "high_quality", + "best_quality" + ], + "title": "Model", + "type": "string", + "default": "best_quality" + }, + "mask_expansion": { + "minimum": 0, + "maximum": 50, + "type": "integer", + "title": "Mask Expansion", + "description": "Amount of pixels to expand the mask by. Range: 0-50", + "default": 15 + }, + "box_prompts": { + "examples": [ + [ + { + "y_min": 0.0115, + "x_max": 0.6574, + "x_min": 0.3595, + "y_max": 0.8175 + } + ] + ], + "title": "Box Prompts", + "type": "array", + "description": "List of bounding box coordinates to erase (only one box prompt is supported)", + "items": { + "$ref": "#/components/schemas/BBoxPromptBase" + }, + "default": [] + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/o0DORfJawy-T9P_-NsvLY.png" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to remove objects from." + } + }, + "x-fal-order-properties": [ + "image_url", + "box_prompts", + "model", + "mask_expansion" + ], + "required": [ + "image_url" + ] + }, + "ObjectRemovalBboxOutput": { + "title": "Output", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_size": 730703, + "height": 768, + "file_name": "85a2309b2c954c85a75120e664adbe17.png", + "content_type": "image/png", + "url": "https://v3.fal.media/files/lion/arYSoJeqWjhbcA8o4budv_85a2309b2c954c85a75120e664adbe17.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images with objects removed.", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "BBoxPromptBase": { + "title": "BBoxPromptBase", + "type": "object", + "properties": { + "y_min": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Y Min", + "description": "Y Min Coordinate of the box (0-1)", + "default": 0 + }, + "x_max": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "X Max", + "description": "X Max Coordinate of the prompt (0-1)", + "default": 0 + }, + "x_min": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "X Min", + "description": "X Min Coordinate of the box (0-1)", + "default": 0 + }, + "y_max": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Y Max", + "description": "Y Max Coordinate of the prompt (0-1)", + "default": 0 + } + }, + "x-fal-order-properties": [ + "x_min", + "y_min", + "x_max", + "y_max" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/object-removal/bbox/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/object-removal/bbox/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/object-removal/bbox": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ObjectRemovalBboxInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/object-removal/bbox/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ObjectRemovalBboxOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/object-removal/mask", + "metadata": { + "display_name": "Object Removal", + "category": "image-to-image", + "description": "Removes mask-selected objects and their visual effects, seamlessly reconstructing the scene with contextually appropriate content.", + "status": "active", + "tags": [ + "utility", + "editing" + ], + "updated_at": "2026-01-26T21:43:27.015Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training-4.jpg", + "model_url": "https://fal.run/fal-ai/object-removal/mask", + "license_type": "commercial", + "date": "2025-06-16T20:13:18.407Z", + "group": { + "key": "object-removal", + "label": "Mask" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/object-removal/mask", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/object-removal/mask queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/object-removal/mask", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training-4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/object-removal/mask", + "documentationUrl": "https://fal.ai/models/fal-ai/object-removal/mask/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ObjectRemovalMaskInput": { + "title": "MaskInput", + "type": "object", + "properties": { + "model": { + "enum": [ + "low_quality", + "medium_quality", + "high_quality", + "best_quality" + ], + "title": "Model", + "type": "string", + "default": "best_quality" + }, + "mask_expansion": { + "minimum": 0, + "maximum": 50, + "type": "integer", + "title": "Mask Expansion", + "description": "Amount of pixels to expand the mask by. Range: 0-50", + "default": 15 + }, + "mask_url": { + "examples": [ + "https://v3.fal.media/files/tiger/7nq9-v-lJtBCPnK1332fr.png" + ], + "title": "Mask Url", + "type": "string", + "description": "The URL of the mask image. White pixels (255) indicate areas to remove." + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/o0DORfJawy-T9P_-NsvLY.png" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to remove objects from." + } + }, + "x-fal-order-properties": [ + "image_url", + "mask_url", + "model", + "mask_expansion" + ], + "required": [ + "image_url", + "mask_url" + ] + }, + "ObjectRemovalMaskOutput": { + "title": "Output", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_size": 730703, + "height": 768, + "file_name": "85a2309b2c954c85a75120e664adbe17.png", + "content_type": "image/png", + "url": "https://v3.fal.media/files/lion/arYSoJeqWjhbcA8o4budv_85a2309b2c954c85a75120e664adbe17.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images with objects removed.", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/object-removal/mask/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/object-removal/mask/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/object-removal/mask": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ObjectRemovalMaskInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/object-removal/mask/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ObjectRemovalMaskOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/object-removal", + "metadata": { + "display_name": "Object Removal", + "category": "image-to-image", + "description": "Removes objects and their visual effects using natural language, replacing them with contextually appropriate content", + "status": "active", + "tags": [ + "utility", + "editing" + ], + "updated_at": "2026-01-26T21:43:27.143Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training-4.jpg", + "model_url": "https://fal.run/fal-ai/object-removal", + "license_type": "commercial", + "date": "2025-06-16T20:10:46.926Z", + "group": { + "key": "object-removal", + "label": "Prompt" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/object-removal", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/object-removal queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/object-removal", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training-4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/object-removal", + "documentationUrl": "https://fal.ai/models/fal-ai/object-removal/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ObjectRemovalInput": { + "title": "PromptInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Dog" + ], + "title": "Prompt", + "type": "string", + "description": "Text description of the object to remove." + }, + "mask_expansion": { + "minimum": 0, + "maximum": 50, + "type": "integer", + "title": "Mask Expansion", + "description": "Amount of pixels to expand the mask by. Range: 0-50", + "default": 15 + }, + "model": { + "enum": [ + "low_quality", + "medium_quality", + "high_quality", + "best_quality" + ], + "title": "Model", + "type": "string", + "default": "best_quality" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/o0DORfJawy-T9P_-NsvLY.png" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to remove objects from." + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "model", + "mask_expansion" + ], + "required": [ + "image_url", + "prompt" + ] + }, + "ObjectRemovalOutput": { + "title": "Output", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_size": 730703, + "height": 768, + "file_name": "85a2309b2c954c85a75120e664adbe17.png", + "content_type": "image/png", + "url": "https://v3.fal.media/files/lion/arYSoJeqWjhbcA8o4budv_85a2309b2c954c85a75120e664adbe17.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images with objects removed.", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/object-removal/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/object-removal/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/object-removal": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ObjectRemovalInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/object-removal/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ObjectRemovalOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/recraft/vectorize", + "metadata": { + "display_name": "Recraft", + "category": "image-to-image", + "description": "Converts a given raster image to SVG format using Recraft model.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:28.284Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/recraft-v3.webp", + "model_url": "https://fal.run/fal-ai/recraft/vectorize", + "license_type": "commercial", + "date": "2025-06-12T15:27:32.275Z", + "group": { + "key": "fal-ai/recraft/v3", + "label": "Vectorize" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/recraft/vectorize", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/recraft/vectorize queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/recraft/vectorize", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/recraft-v3.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/recraft/vectorize", + "documentationUrl": "https://fal.ai/models/fal-ai/recraft/vectorize/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "RecraftVectorizeInput": { + "title": "VectorizeInput", + "type": "object", + "properties": { + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/man_wave.png" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to be vectorized. Must be in PNG, JPG or WEBP format, less than 5 MB in size, have resolution less than 16 MP and max dimension less than 4096 pixels, min dimension more than 256 pixels." + } + }, + "x-fal-order-properties": [ + "image_url" + ], + "required": [ + "image_url" + ] + }, + "RecraftVectorizeOutput": { + "title": "VectorizeOutput", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "file_size": 85336, + "file_name": "image.svg", + "content_type": "image/svg+xml", + "url": "https://v3.fal.media/files/koala/pUQbC18DsP4KxcIBA53y2_image.svg" + } + ], + "title": "Image", + "description": "The vectorized image.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/recraft/vectorize/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/recraft/vectorize/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/recraft/vectorize": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RecraftVectorizeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/recraft/vectorize/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RecraftVectorizeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ffmpeg-api/extract-frame", + "metadata": { + "display_name": "Ffmpeg Api", + "category": "image-to-image", + "description": "ffmpeg endpoint for first, middle and last frame extraction from videos", + "status": "active", + "tags": [ + "utility", + "editing" + ], + "updated_at": "2026-01-26T21:43:29.469Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/ffmpeg-api-compose.webp", + "model_url": "https://fal.run/fal-ai/ffmpeg-api/extract-frame", + "license_type": "commercial", + "date": "2025-06-09T10:55:21.156Z", + "group": { + "key": "ffmpeg", + "label": "Extract Frame" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ffmpeg-api/extract-frame", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ffmpeg-api/extract-frame queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ffmpeg-api/extract-frame", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/ffmpeg-api-compose.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/ffmpeg-api/extract-frame", + "documentationUrl": "https://fal.ai/models/fal-ai/ffmpeg-api/extract-frame/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FfmpegApiExtractFrameInput": { + "title": "FrameInput", + "type": "object", + "properties": { + "video_url": { + "examples": [ + "https://v3.fal.media/files/monkey/R6D8anxtsyItZTyBB2ksC_qeoDDxmLSg8cuWasM54KY_output.mp4" + ], + "description": "URL of the video file to use as the video track", + "type": "string", + "title": "Video Url" + }, + "frame_type": { + "enum": [ + "first", + "middle", + "last" + ], + "description": "Type of frame to extract: first, middle, or last frame of the video", + "type": "string", + "title": "Frame Type", + "default": "first" + } + }, + "x-fal-order-properties": [ + "video_url", + "frame_type" + ], + "required": [ + "video_url" + ] + }, + "FfmpegApiExtractFrameOutput": { + "title": "FrameOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/elephant/IHmmk4dvyoCCYhtzI2FsO_16df6b1358374c1a9b023c80d752ee7b.jpg" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height" + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ffmpeg-api/extract-frame/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/extract-frame/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/extract-frame": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FfmpegApiExtractFrameInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/extract-frame/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FfmpegApiExtractFrameOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/luma-photon/flash/modify", + "metadata": { + "display_name": "Luma Photon", + "category": "image-to-image", + "description": "Edit images from your prompts using Luma Photon. Photon is the most creative, personalizable, and intelligent visual models for creatives, bringing a step-function change in the cost of high-quality image generation.", + "status": "active", + "tags": [ + "image-to-image" + ], + "updated_at": "2026-01-26T21:43:29.772Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/luma-photon.webp", + "model_url": "https://fal.run/fal-ai/luma-photon/flash/modify", + "license_type": "commercial", + "date": "2025-06-08T12:51:44.150Z", + "group": { + "key": "luma-photon", + "label": "Modify [Flash]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/luma-photon/flash/modify", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/luma-photon/flash/modify queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/luma-photon/flash/modify", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/luma-photon.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/luma-photon/flash/modify", + "documentationUrl": "https://fal.ai/models/fal-ai/luma-photon/flash/modify/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LumaPhotonFlashModifyInput": { + "title": "ModifyImageRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Make the image look like a painting" + ], + "maxLength": 5000, + "type": "string", + "title": "Prompt", + "minLength": 3, + "description": "Instruction for modifying the image" + }, + "aspect_ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4", + "21:9", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the reframed image", + "examples": [ + "16:9" + ] + }, + "strength": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "The strength of the initial image. Higher strength values are corresponding to more influence of the initial image on the output.", + "examples": [ + 0.8 + ] + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/gallery/example_inputs_liuyifei.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the input image to reframe" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "strength", + "aspect_ratio" + ], + "required": [ + "image_url", + "strength", + "aspect_ratio" + ] + }, + "LumaPhotonFlashModifyOutput": { + "title": "T2IOutput", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "The generated image", + "items": { + "$ref": "#/components/schemas/File" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/luma-photon/flash/modify/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-photon/flash/modify/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/luma-photon/flash/modify": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaPhotonFlashModifyInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-photon/flash/modify/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaPhotonFlashModifyOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/luma-photon/modify", + "metadata": { + "display_name": "Luma Photon", + "category": "image-to-image", + "description": "Edit images from your prompts using Luma Photon. Photon is the most creative, personalizable, and intelligent visual models for creatives, bringing a step-function change in the cost of high-quality image generation.", + "status": "active", + "tags": [ + "image-to-image" + ], + "updated_at": "2026-01-26T21:43:29.900Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/luma-photon.webp", + "model_url": "https://fal.run/fal-ai/luma-photon/modify", + "license_type": "commercial", + "date": "2025-06-08T12:46:34.379Z", + "group": { + "key": "luma-photon", + "label": "Modify" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/luma-photon/modify", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/luma-photon/modify queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/luma-photon/modify", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/luma-photon.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/luma-photon/modify", + "documentationUrl": "https://fal.ai/models/fal-ai/luma-photon/modify/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LumaPhotonModifyInput": { + "title": "ModifyImageRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Make the image look like a painting" + ], + "maxLength": 5000, + "type": "string", + "title": "Prompt", + "minLength": 3, + "description": "Instruction for modifying the image" + }, + "aspect_ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4", + "21:9", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the reframed image", + "examples": [ + "16:9" + ] + }, + "strength": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "The strength of the initial image. Higher strength values are corresponding to more influence of the initial image on the output.", + "examples": [ + 0.8 + ] + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/gallery/example_inputs_liuyifei.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the input image to reframe" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "strength", + "aspect_ratio" + ], + "required": [ + "image_url", + "strength", + "aspect_ratio" + ] + }, + "LumaPhotonModifyOutput": { + "title": "T2IOutput", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "The generated image", + "items": { + "$ref": "#/components/schemas/File" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/luma-photon/modify/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-photon/modify/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/luma-photon/modify": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaPhotonModifyInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-photon/modify/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaPhotonModifyOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-editing/reframe", + "metadata": { + "display_name": "Image Editing", + "category": "image-to-image", + "description": "The reframe endpoint intelligently adjusts an image's aspect ratio while preserving the main subject's position, composition, pose, and perspective", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:30.870Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "model_url": "https://fal.run/fal-ai/image-editing/reframe", + "license_type": "commercial", + "date": "2025-06-05T16:34:57.998Z", + "group": { + "key": "image-editing", + "label": "Reframe" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-editing/reframe", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-editing/reframe queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-editing/reframe", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-editing/reframe", + "documentationUrl": "https://fal.ai/models/fal-ai/image-editing/reframe/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageEditingReframeInput": { + "title": "ReframeInput", + "type": "object", + "properties": { + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "description": "The desired aspect ratio for the reframed image.", + "type": "string", + "examples": [ + "16:9" + ], + "title": "Aspect Ratio", + "default": "16:9" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/elephant/7ekErKT--mhgKJ5kgtvU__image.webp" + ], + "title": "Image URL", + "type": "string", + "description": "URL of the old or damaged photo to restore." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + } + }, + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "num_inference_steps", + "safety_tolerance", + "output_format", + "aspect_ratio", + "seed", + "sync_mode" + ], + "required": [ + "image_url" + ] + }, + "ImageEditingReframeOutput": { + "title": "ReframeOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/elephant/V76OoObzc65eDtueHsXYI_a8879759964f4defaef62f44bacdaced.jpg" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-editing/reframe/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/reframe/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-editing/reframe": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingReframeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/reframe/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingReframeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-editing/baby-version", + "metadata": { + "display_name": "Image Editing", + "category": "image-to-image", + "description": "Transform any person into their baby version, while preserving the original pose and expression with childlike features.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:31.196Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "model_url": "https://fal.run/fal-ai/image-editing/baby-version", + "license_type": "commercial", + "date": "2025-06-03T20:07:31.667Z", + "group": { + "key": "image-editing", + "label": "Baby Version" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-editing/baby-version", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-editing/baby-version queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-editing/baby-version", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-editing/baby-version", + "documentationUrl": "https://fal.ai/models/fal-ai/image-editing/baby-version/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageEditingBabyVersionInput": { + "title": "BabyVersionInput", + "type": "object", + "properties": { + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/penguin/hIPcTcSrtLMVXyedBUqIX_-pG58lHRIQ3_1iBmMlU_v_image.webp" + ], + "title": "Image URL", + "type": "string", + "description": "URL of the image to transform into a baby version." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + } + }, + "description": "Input model for baby version endpoint.", + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "num_inference_steps", + "safety_tolerance", + "output_format", + "aspect_ratio", + "seed", + "sync_mode" + ], + "required": [ + "image_url" + ] + }, + "ImageEditingBabyVersionOutput": { + "title": "BabyVersionOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/tiger/pnDpVBW93b0rN_8JuT7cW_c28f958055f946caaece343054732d01.jpg" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-editing/baby-version/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/baby-version/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-editing/baby-version": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingBabyVersionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/baby-version/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingBabyVersionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/luma-photon/flash/reframe", + "metadata": { + "display_name": "Luma Photon Flash Reframe", + "category": "image-to-image", + "description": "This advanced tool intelligently expands your visuals, seamlessly blending new content to enhance creativity and adaptability, offering unmatched speed and quality for creators at a fraction of the cost.", + "status": "active", + "tags": [ + "flash", + "reframe", + "outpainting" + ], + "updated_at": "2026-01-26T21:43:31.571Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/luma-photon.webp", + "model_url": "https://fal.run/fal-ai/luma-photon/flash/reframe", + "license_type": "commercial", + "date": "2025-06-03T15:28:07.803Z", + "group": { + "key": "luma-photon", + "label": "Reframe [flash]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/luma-photon/flash/reframe", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/luma-photon/flash/reframe queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/luma-photon/flash/reframe", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/luma-photon.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/luma-photon/flash/reframe", + "documentationUrl": "https://fal.ai/models/fal-ai/luma-photon/flash/reframe/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LumaPhotonFlashReframeInput": { + "title": "ReframeImageRequest", + "type": "object", + "properties": { + "prompt": { + "maxLength": 5000, + "type": "string", + "title": "Prompt", + "minLength": 3, + "description": "Optional prompt for reframing" + }, + "aspect_ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4", + "21:9", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the reframed image", + "examples": [ + "16:9" + ] + }, + "y_start": { + "title": "Y Start", + "type": "integer", + "description": "Start Y coordinate for reframing" + }, + "x_end": { + "title": "X End", + "type": "integer", + "description": "End X coordinate for reframing" + }, + "y_end": { + "title": "Y End", + "type": "integer", + "description": "End Y coordinate for reframing" + }, + "grid_position_y": { + "title": "Grid Position Y", + "type": "integer", + "description": "Y position of the grid for reframing" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/gallery/example_inputs_liuyifei.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the input image to reframe" + }, + "grid_position_x": { + "title": "Grid Position X", + "type": "integer", + "description": "X position of the grid for reframing" + }, + "x_start": { + "title": "X Start", + "type": "integer", + "description": "Start X coordinate for reframing" + } + }, + "x-fal-order-properties": [ + "image_url", + "aspect_ratio", + "prompt", + "grid_position_x", + "grid_position_y", + "x_end", + "x_start", + "y_end", + "y_start" + ], + "required": [ + "image_url", + "aspect_ratio" + ] + }, + "LumaPhotonFlashReframeOutput": { + "title": "T2IOutput", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "The generated image", + "items": { + "$ref": "#/components/schemas/File" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/luma-photon/flash/reframe/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-photon/flash/reframe/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/luma-photon/flash/reframe": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaPhotonFlashReframeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-photon/flash/reframe/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaPhotonFlashReframeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/luma-photon/reframe", + "metadata": { + "display_name": "Luma Photon Reframe", + "category": "image-to-image", + "description": "Extend and reframe images with Luma Photon Reframe. This advanced tool intelligently expands your visuals, seamlessly blending new content to enhance creativity and adaptability, offering unmatched personalization and quality for creators at a fraction of the cost.", + "status": "active", + "tags": [ + "outpainting", + "reframe" + ], + "updated_at": "2026-01-26T21:43:31.707Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/luma-photon.webp", + "model_url": "https://fal.run/fal-ai/luma-photon/reframe", + "license_type": "commercial", + "date": "2025-06-03T15:13:14.628Z", + "group": { + "key": "luma-photon", + "label": "Reframe" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/luma-photon/reframe", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/luma-photon/reframe queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/luma-photon/reframe", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/luma-photon.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/luma-photon/reframe", + "documentationUrl": "https://fal.ai/models/fal-ai/luma-photon/reframe/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LumaPhotonReframeInput": { + "title": "ReframeImageRequest", + "type": "object", + "properties": { + "prompt": { + "maxLength": 5000, + "type": "string", + "title": "Prompt", + "minLength": 3, + "description": "Optional prompt for reframing" + }, + "aspect_ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4", + "21:9", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the reframed image", + "examples": [ + "16:9" + ] + }, + "y_start": { + "title": "Y Start", + "type": "integer", + "description": "Start Y coordinate for reframing" + }, + "x_end": { + "title": "X End", + "type": "integer", + "description": "End X coordinate for reframing" + }, + "y_end": { + "title": "Y End", + "type": "integer", + "description": "End Y coordinate for reframing" + }, + "grid_position_y": { + "title": "Grid Position Y", + "type": "integer", + "description": "Y position of the grid for reframing" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/gallery/example_inputs_liuyifei.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the input image to reframe" + }, + "grid_position_x": { + "title": "Grid Position X", + "type": "integer", + "description": "X position of the grid for reframing" + }, + "x_start": { + "title": "X Start", + "type": "integer", + "description": "Start X coordinate for reframing" + } + }, + "x-fal-order-properties": [ + "image_url", + "aspect_ratio", + "prompt", + "grid_position_x", + "grid_position_y", + "x_end", + "x_start", + "y_end", + "y_start" + ], + "required": [ + "image_url", + "aspect_ratio" + ] + }, + "LumaPhotonReframeOutput": { + "title": "T2IOutput", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "The generated image", + "items": { + "$ref": "#/components/schemas/File" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/luma-photon/reframe/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-photon/reframe/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/luma-photon/reframe": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaPhotonReframeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-photon/reframe/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaPhotonReframeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-1/schnell/redux", + "metadata": { + "display_name": "FLUX.1 [schnell] Redux", + "category": "image-to-image", + "description": "FLUX.1 [schnell] Redux is a high-performance endpoint for the FLUX.1 [schnell] model that enables rapid transformation of existing images, delivering high-quality style transfers and image modifications with the core FLUX capabilities.\n", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:32.080Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-5.jpeg", + "model_url": "https://fal.run/fal-ai/flux-1/schnell/redux", + "license_type": "commercial", + "date": "2025-06-02T18:30:38.654Z", + "group": { + "key": "flux-1-fast", + "label": "Image to Image [schnell] Redux" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-1/schnell/redux", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-1/schnell/redux queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-1/schnell/redux", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-5.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-1/schnell/redux", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-1/schnell/redux/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux1SchnellReduxInput": { + "title": "SchnellFlux1ReduxInput", + "type": "object", + "properties": { + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "maximum": 4, + "title": "Num Images", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://fal.media/files/kangaroo/acQvq-Kmo2lajkgvcEHdv.png" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to generate an image from." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "maximum": 12, + "title": "Num Inference Steps", + "default": 4 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "image_url", + "num_inference_steps", + "image_size", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "required": [ + "image_url" + ] + }, + "Flux1SchnellReduxOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-1/schnell/redux/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-1/schnell/redux/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-1/schnell/redux": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux1SchnellReduxInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-1/schnell/redux/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux1SchnellReduxOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-1/dev/redux", + "metadata": { + "display_name": "FLUX.1 [dev] Redux", + "category": "image-to-image", + "description": "FLUX.1 [dev] Redux is a high-performance endpoint for the FLUX.1 [dev] model that enables rapid transformation of existing images, delivering high-quality style transfers and image modifications with the core FLUX capabilities.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:32.204Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-1.jpg", + "model_url": "https://fal.run/fal-ai/flux-1/dev/redux", + "license_type": "commercial", + "date": "2025-06-02T18:29:41.412Z", + "group": { + "key": "flux-1-fast", + "label": "Image to Image [dev] Redux" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-1/dev/redux", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-1/dev/redux queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-1/dev/redux", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-1/dev/redux", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-1/dev/redux/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux1DevReduxInput": { + "title": "BaseFlux1ReduxInput", + "type": "object", + "properties": { + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "maximum": 4, + "title": "Num Images", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://fal.media/files/kangaroo/acQvq-Kmo2lajkgvcEHdv.png" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to generate an image from." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "maximum": 50, + "title": "Num Inference Steps", + "default": 28 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "guidance_scale": { + "minimum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "maximum": 20, + "title": "Guidance scale (CFG)", + "default": 3.5 + } + }, + "x-fal-order-properties": [ + "image_url", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "required": [ + "image_url" + ] + }, + "Flux1DevReduxOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-1/dev/redux/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-1/dev/redux/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-1/dev/redux": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux1DevReduxInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-1/dev/redux/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux1DevReduxOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-1/dev/image-to-image", + "metadata": { + "display_name": "FLUX.1 [dev]", + "category": "image-to-image", + "description": "FLUX.1 [dev] is a 12 billion parameter flow transformer that generates high-quality images from text. It is suitable for personal and commercial use.\n", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:32.390Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-2.jpg", + "model_url": "https://fal.run/fal-ai/flux-1/dev/image-to-image", + "license_type": "commercial", + "date": "2025-06-02T18:28:08.120Z", + "group": { + "key": "flux-1-fast", + "label": "Image to Image [dev]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-1/dev/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-1/dev/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-1/dev/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-1/dev/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-1/dev/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux1DevImageToImageInput": { + "title": "BaseFlux1ImageToInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cat dressed as a wizard with a background of a mystic forest." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "maximum": 4, + "title": "Num Images", + "default": 1 + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://fal.media/files/koala/Chls9L2ZnvuipUTEwlnJC.png" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to generate an image from." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "strength": { + "minimum": 0.01, + "description": "The strength of the initial image. Higher strength values are better for this model.", + "type": "number", + "maximum": 1, + "title": "Strength", + "default": 0.95 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 10, + "description": "The number of inference steps to perform.", + "type": "integer", + "maximum": 50, + "title": "Num Inference Steps", + "default": 40 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "guidance_scale": { + "minimum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "maximum": 20, + "title": "Guidance scale (CFG)", + "default": 3.5 + } + }, + "x-fal-order-properties": [ + "image_url", + "strength", + "num_inference_steps", + "prompt", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "required": [ + "image_url", + "prompt" + ] + }, + "Flux1DevImageToImageOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-1/dev/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-1/dev/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-1/dev/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux1DevImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-1/dev/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux1DevImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-editing/text-removal", + "metadata": { + "display_name": "Image Editing", + "category": "image-to-image", + "description": "Remove all text and writing from images while preserving the background and natural appearance.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:33.148Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "model_url": "https://fal.run/fal-ai/image-editing/text-removal", + "license_type": "commercial", + "date": "2025-06-02T03:19:23.717Z", + "group": { + "key": "image-editing", + "label": "Text Removal" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-editing/text-removal", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-editing/text-removal queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-editing/text-removal", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-editing/text-removal", + "documentationUrl": "https://fal.ai/models/fal-ai/image-editing/text-removal/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageEditingTextRemovalInput": { + "title": "TextRemovalInput", + "type": "object", + "properties": { + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/rabbit/rmgBxhwGYb2d3pl3x9sKf_output.png" + ], + "title": "Image URL", + "type": "string", + "description": "URL of the image containing text to be removed." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + } + }, + "description": "Input model for text removal endpoint.", + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "num_inference_steps", + "safety_tolerance", + "output_format", + "aspect_ratio", + "seed", + "sync_mode" + ], + "required": [ + "image_url" + ] + }, + "ImageEditingTextRemovalOutput": { + "title": "TextRemovalOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/tiger/W1s8Rq_cW2bT_wUcW9Bd8_413f5074e3d94829acfc4e08ec785040.jpg" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-editing/text-removal/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/text-removal/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-editing/text-removal": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingTextRemovalInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/text-removal/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingTextRemovalOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-editing/photo-restoration", + "metadata": { + "display_name": "Image Editing", + "category": "image-to-image", + "description": "Restore and enhance old or damaged photos by removing imperfections, adding color while preserving the original character and details of the image.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:33.275Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "model_url": "https://fal.run/fal-ai/image-editing/photo-restoration", + "license_type": "commercial", + "date": "2025-06-02T02:41:12.926Z", + "group": { + "key": "image-editing", + "label": "Photo Restoration" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-editing/photo-restoration", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-editing/photo-restoration queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-editing/photo-restoration", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-editing/photo-restoration", + "documentationUrl": "https://fal.ai/models/fal-ai/image-editing/photo-restoration/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageEditingPhotoRestorationInput": { + "title": "PhotoRestorationInput", + "type": "object", + "properties": { + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/ProUb4C1PYWpyGe7BXd0n_d575ba7693584c0ddf733f77dcdb8963.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "URL of the old or damaged photo to restore." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + } + }, + "description": "Input model for photo restoration endpoint.", + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "num_inference_steps", + "safety_tolerance", + "output_format", + "aspect_ratio", + "seed", + "sync_mode" + ], + "required": [ + "image_url" + ] + }, + "ImageEditingPhotoRestorationOutput": { + "title": "PhotoRestorationOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/lion/5xRd_PfrtfoXe03-VlPZb_d684600d4ece4b778dfea35dd536bee9.jpg" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-editing/photo-restoration/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/photo-restoration/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-editing/photo-restoration": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingPhotoRestorationInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/photo-restoration/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingPhotoRestorationOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-editing/weather-effect", + "metadata": { + "display_name": "Image Editing", + "category": "image-to-image", + "description": "Add realistic weather effects like snowfall, rain, or fog to your photos while maintaining the scene's mood.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:33.873Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "model_url": "https://fal.run/fal-ai/image-editing/weather-effect", + "license_type": "commercial", + "date": "2025-05-29T20:29:54.982Z", + "group": { + "key": "image-editing", + "label": "Weather Effect" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-editing/weather-effect", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-editing/weather-effect queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-editing/weather-effect", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-editing/weather-effect", + "documentationUrl": "https://fal.ai/models/fal-ai/image-editing/weather-effect/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageEditingWeatherEffectInput": { + "title": "WeatherEffectInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "heavy snowfall" + ], + "title": "Weather Effect", + "type": "string", + "description": "The weather effect to apply.", + "default": "heavy snowfall" + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/hAjCkcyly4gsS9-cptD3Y_image%20(20).png" + ], + "title": "Image URL", + "type": "string", + "description": "Image prompt for the omni model." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + } + }, + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "num_inference_steps", + "safety_tolerance", + "output_format", + "aspect_ratio", + "seed", + "sync_mode", + "prompt" + ], + "required": [ + "image_url" + ] + }, + "ImageEditingWeatherEffectOutput": { + "title": "WeatherEffectOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/panda/YhTFlPVSkuyPisQleLPQF_186310a04a8d4120a44b72f50f28ab5f.jpg" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-editing/weather-effect/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/weather-effect/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-editing/weather-effect": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingWeatherEffectInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/weather-effect/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingWeatherEffectOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-editing/time-of-day", + "metadata": { + "display_name": "Image Editing", + "category": "image-to-image", + "description": "Transform your photos to any time of day, from golden hour to midnight, with appropriate lighting and atmosphere.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:33.997Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "model_url": "https://fal.run/fal-ai/image-editing/time-of-day", + "license_type": "commercial", + "date": "2025-05-29T20:29:47.668Z", + "group": { + "key": "image-editing", + "label": "Time of Day" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-editing/time-of-day", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-editing/time-of-day queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-editing/time-of-day", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-editing/time-of-day", + "documentationUrl": "https://fal.ai/models/fal-ai/image-editing/time-of-day/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageEditingTimeOfDayInput": { + "title": "TimeOfDayInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "golden hour" + ], + "title": "Time of Day", + "type": "string", + "description": "The time of day to transform the scene to.", + "default": "golden hour" + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/hAjCkcyly4gsS9-cptD3Y_image%20(20).png" + ], + "title": "Image URL", + "type": "string", + "description": "Image prompt for the omni model." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + } + }, + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "num_inference_steps", + "safety_tolerance", + "output_format", + "aspect_ratio", + "seed", + "sync_mode", + "prompt" + ], + "required": [ + "image_url" + ] + }, + "ImageEditingTimeOfDayOutput": { + "title": "TimeOfDayOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/panda/Udg4UCRqE4Vbp4zd-4oZ8_5f17ff8367ee492e9279e02940d0f258.jpg" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-editing/time-of-day/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/time-of-day/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-editing/time-of-day": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingTimeOfDayInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/time-of-day/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingTimeOfDayOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-editing/style-transfer", + "metadata": { + "display_name": "Image Editing", + "category": "image-to-image", + "description": "Transform your photos into artistic masterpieces inspired by famous styles like Van Gogh's Starry Night or any artistic style you choose.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:34.121Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "model_url": "https://fal.run/fal-ai/image-editing/style-transfer", + "license_type": "commercial", + "date": "2025-05-29T20:29:40.037Z", + "group": { + "key": "image-editing", + "label": "Style Transfer" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-editing/style-transfer", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-editing/style-transfer queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-editing/style-transfer", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-editing/style-transfer", + "documentationUrl": "https://fal.ai/models/fal-ai/image-editing/style-transfer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageEditingStyleTransferInput": { + "title": "StyleTransferInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Van Gogh's Starry Night" + ], + "title": "Style Prompt", + "type": "string", + "description": "The artistic style to apply.", + "default": "Van Gogh's Starry Night" + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/hAjCkcyly4gsS9-cptD3Y_image%20(20).png" + ], + "title": "Image URL", + "type": "string", + "description": "Image prompt for the omni model." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + } + }, + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "num_inference_steps", + "safety_tolerance", + "output_format", + "aspect_ratio", + "seed", + "sync_mode", + "prompt" + ], + "required": [ + "image_url" + ] + }, + "ImageEditingStyleTransferOutput": { + "title": "StyleTransferOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/kangaroo/tkhm3MPIE5FugnHcssKYZ_a983d62e76844c488877988742e170a9.jpg" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-editing/style-transfer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/style-transfer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-editing/style-transfer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingStyleTransferInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/style-transfer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingStyleTransferOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-editing/scene-composition", + "metadata": { + "display_name": "Image Editing", + "category": "image-to-image", + "description": "Place your subject in any scene you imagine, from enchanted forests to urban settings, with professional composition and lighting", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:34.310Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "model_url": "https://fal.run/fal-ai/image-editing/scene-composition", + "license_type": "commercial", + "date": "2025-05-29T20:29:34.336Z", + "group": { + "key": "image-editing", + "label": "Scene Composition" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-editing/scene-composition", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-editing/scene-composition queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-editing/scene-composition", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-editing/scene-composition", + "documentationUrl": "https://fal.ai/models/fal-ai/image-editing/scene-composition/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageEditingSceneCompositionInput": { + "title": "SceneCompositionInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "enchanted forest" + ], + "title": "Scene Description", + "type": "string", + "description": "Describe the scene where you want to place the subject.", + "default": "enchanted forest" + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/hAjCkcyly4gsS9-cptD3Y_image%20(20).png" + ], + "title": "Image URL", + "type": "string", + "description": "Image prompt for the omni model." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + } + }, + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "num_inference_steps", + "safety_tolerance", + "output_format", + "aspect_ratio", + "seed", + "sync_mode", + "prompt" + ], + "required": [ + "image_url" + ] + }, + "ImageEditingSceneCompositionOutput": { + "title": "SceneCompositionOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/panda/VmiTRcFIPBN8TtV1FBuj1_6c3a793376664fa3a3e5ce5912038ab6.jpg" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-editing/scene-composition/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/scene-composition/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-editing/scene-composition": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingSceneCompositionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/scene-composition/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingSceneCompositionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-editing/professional-photo", + "metadata": { + "display_name": "Image Editing", + "category": "image-to-image", + "description": "Turn your casual photos into stunning professional studio portraits with perfect lighting and high-end photography style.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:34.435Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "model_url": "https://fal.run/fal-ai/image-editing/professional-photo", + "license_type": "commercial", + "date": "2025-05-29T20:29:27.931Z", + "group": { + "key": "image-editing", + "label": "Professional Photo" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-editing/professional-photo", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-editing/professional-photo queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-editing/professional-photo", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-editing/professional-photo", + "documentationUrl": "https://fal.ai/models/fal-ai/image-editing/professional-photo/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageEditingProfessionalPhotoInput": { + "title": "BaseInput", + "type": "object", + "properties": { + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/hAjCkcyly4gsS9-cptD3Y_image%20(20).png" + ], + "title": "Image URL", + "type": "string", + "description": "Image prompt for the omni model." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + } + }, + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "num_inference_steps", + "safety_tolerance", + "output_format", + "aspect_ratio", + "seed", + "sync_mode" + ], + "required": [ + "image_url" + ] + }, + "ImageEditingProfessionalPhotoOutput": { + "title": "ProfessionalPhotoOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/kangaroo/ZldH8N4D1vxXWp_FHIMSK_24bc2c69b53e4f92a98cf02d798a3f00.jpg" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-editing/professional-photo/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/professional-photo/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-editing/professional-photo": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingProfessionalPhotoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/professional-photo/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingProfessionalPhotoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-editing/object-removal", + "metadata": { + "display_name": "Image Editing", + "category": "image-to-image", + "description": "Remove unwanted objects or people from your photos while seamlessly blending the background.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:34.561Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "model_url": "https://fal.run/fal-ai/image-editing/object-removal", + "license_type": "commercial", + "date": "2025-05-29T20:29:21.865Z", + "group": { + "key": "image-editing", + "label": "Object Removal" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-editing/object-removal", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-editing/object-removal queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-editing/object-removal", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-editing/object-removal", + "documentationUrl": "https://fal.ai/models/fal-ai/image-editing/object-removal/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageEditingObjectRemovalInput": { + "title": "ObjectRemovalInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "background people" + ], + "title": "Objects to Remove", + "type": "string", + "description": "Specify which objects to remove from the image.", + "default": "background people" + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/hAjCkcyly4gsS9-cptD3Y_image%20(20).png" + ], + "title": "Image URL", + "type": "string", + "description": "Image prompt for the omni model." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + } + }, + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "num_inference_steps", + "safety_tolerance", + "output_format", + "aspect_ratio", + "seed", + "sync_mode", + "prompt" + ], + "required": [ + "image_url" + ] + }, + "ImageEditingObjectRemovalOutput": { + "title": "ObjectRemovalOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/monkey/3nTlgwVIOSeWJ8yDmvYr-_e09a6b8eb9264f76a33408e997a7447d.jpg" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-editing/object-removal/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/object-removal/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-editing/object-removal": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingObjectRemovalInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/object-removal/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingObjectRemovalOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-editing/hair-change", + "metadata": { + "display_name": "Image Editing", + "category": "image-to-image", + "description": "Experiment with different hairstyles, from bald to any style you can imagine, while maintaining natural lighting and realistic results.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:34.685Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "model_url": "https://fal.run/fal-ai/image-editing/hair-change", + "license_type": "commercial", + "date": "2025-05-29T20:29:16.914Z", + "group": { + "key": "image-editing", + "label": "Hair Change" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-editing/hair-change", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-editing/hair-change queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-editing/hair-change", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-editing/hair-change", + "documentationUrl": "https://fal.ai/models/fal-ai/image-editing/hair-change/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageEditingHairChangeInput": { + "title": "HairChangeInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "bald" + ], + "title": "Hair Style Prompt", + "type": "string", + "description": "The desired hair style to apply.", + "default": "bald" + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/hAjCkcyly4gsS9-cptD3Y_image%20(20).png" + ], + "title": "Image URL", + "type": "string", + "description": "Image prompt for the omni model." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + } + }, + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "num_inference_steps", + "safety_tolerance", + "output_format", + "aspect_ratio", + "seed", + "sync_mode", + "prompt" + ], + "required": [ + "image_url" + ] + }, + "ImageEditingHairChangeOutput": { + "title": "HairChangeOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/panda/5GTPnNd3D_X-WXiEo4jUf_0bedd250b9d24bd194702f3f196f25a0.jpg" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-editing/hair-change/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/hair-change/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-editing/hair-change": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingHairChangeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/hair-change/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingHairChangeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-editing/face-enhancement", + "metadata": { + "display_name": "Image Editing", + "category": "image-to-image", + "description": "Enhance facial features with professional retouching while maintaining a natural, realistic look", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:34.810Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "model_url": "https://fal.run/fal-ai/image-editing/face-enhancement", + "license_type": "commercial", + "date": "2025-05-29T20:29:10.990Z", + "group": { + "key": "image-editing", + "label": "Face Enhancement" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-editing/face-enhancement", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-editing/face-enhancement queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-editing/face-enhancement", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-editing/face-enhancement", + "documentationUrl": "https://fal.ai/models/fal-ai/image-editing/face-enhancement/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageEditingFaceEnhancementInput": { + "title": "BaseInput", + "type": "object", + "properties": { + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/hAjCkcyly4gsS9-cptD3Y_image%20(20).png" + ], + "title": "Image URL", + "type": "string", + "description": "Image prompt for the omni model." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + } + }, + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "num_inference_steps", + "safety_tolerance", + "output_format", + "aspect_ratio", + "seed", + "sync_mode" + ], + "required": [ + "image_url" + ] + }, + "ImageEditingFaceEnhancementOutput": { + "title": "FaceEnhancementOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/zebra/cUdI2ZvLl8Y99cgoxZsNZ_2d5cd48079684ee189dd3aa8a80b1c2d.jpg" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-editing/face-enhancement/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/face-enhancement/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-editing/face-enhancement": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingFaceEnhancementInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/face-enhancement/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingFaceEnhancementOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-editing/expression-change", + "metadata": { + "display_name": "Image Editing", + "category": "image-to-image", + "description": "Change facial expressions in photos to any emotion you desire, from smiles to serious looks.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:34.942Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "model_url": "https://fal.run/fal-ai/image-editing/expression-change", + "license_type": "commercial", + "date": "2025-05-29T20:29:05.309Z", + "group": { + "key": "image-editing", + "label": "Expression Change" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-editing/expression-change", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-editing/expression-change queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-editing/expression-change", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-editing/expression-change", + "documentationUrl": "https://fal.ai/models/fal-ai/image-editing/expression-change/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageEditingExpressionChangeInput": { + "title": "ExpressionChangeInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "sad" + ], + "title": "Expression Prompt", + "type": "string", + "description": "The desired facial expression to apply.", + "default": "sad" + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/hAjCkcyly4gsS9-cptD3Y_image%20(20).png" + ], + "title": "Image URL", + "type": "string", + "description": "Image prompt for the omni model." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + } + }, + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "num_inference_steps", + "safety_tolerance", + "output_format", + "aspect_ratio", + "seed", + "sync_mode", + "prompt" + ], + "required": [ + "image_url" + ] + }, + "ImageEditingExpressionChangeOutput": { + "title": "ExpressionChangeOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/kangaroo/Gf3AzGSqL08srbH5gOQtg_dad44d1ada5c4a32bc835324e6058fb1.jpg" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-editing/expression-change/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/expression-change/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-editing/expression-change": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingExpressionChangeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/expression-change/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingExpressionChangeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-editing/color-correction", + "metadata": { + "display_name": "Image Editing", + "category": "image-to-image", + "description": "Perfect your photos with professional color grading, balanced tones, and vibrant yet natural colors", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:35.216Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "model_url": "https://fal.run/fal-ai/image-editing/color-correction", + "license_type": "commercial", + "date": "2025-05-29T20:29:00.034Z", + "group": { + "key": "image-editing", + "label": "Color Correction" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-editing/color-correction", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-editing/color-correction queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-editing/color-correction", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-editing/color-correction", + "documentationUrl": "https://fal.ai/models/fal-ai/image-editing/color-correction/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageEditingColorCorrectionInput": { + "title": "BaseInput", + "type": "object", + "properties": { + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/hAjCkcyly4gsS9-cptD3Y_image%20(20).png" + ], + "title": "Image URL", + "type": "string", + "description": "Image prompt for the omni model." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + } + }, + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "num_inference_steps", + "safety_tolerance", + "output_format", + "aspect_ratio", + "seed", + "sync_mode" + ], + "required": [ + "image_url" + ] + }, + "ImageEditingColorCorrectionOutput": { + "title": "ColorCorrectionOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/elephant/Y7eZXlYjiw5km-dTA1WkX_2d4d7661f8504154b76009a6d9c49728.jpg" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-editing/color-correction/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/color-correction/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-editing/color-correction": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingColorCorrectionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/color-correction/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingColorCorrectionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-editing/cartoonify", + "metadata": { + "display_name": "Image Editing", + "category": "image-to-image", + "description": "Transform your photos into vibrant cool cartoons with bold outlines and rich colors.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:35.344Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "model_url": "https://fal.run/fal-ai/image-editing/cartoonify", + "license_type": "commercial", + "date": "2025-05-29T20:28:53.641Z", + "group": { + "key": "image-editing", + "label": "Cartoonify" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-editing/cartoonify", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-editing/cartoonify queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-editing/cartoonify", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-editing/cartoonify", + "documentationUrl": "https://fal.ai/models/fal-ai/image-editing/cartoonify/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageEditingCartoonifyInput": { + "title": "BaseInput", + "type": "object", + "properties": { + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/hAjCkcyly4gsS9-cptD3Y_image%20(20).png" + ], + "title": "Image URL", + "type": "string", + "description": "Image prompt for the omni model." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + } + }, + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "num_inference_steps", + "safety_tolerance", + "output_format", + "aspect_ratio", + "seed", + "sync_mode" + ], + "required": [ + "image_url" + ] + }, + "ImageEditingCartoonifyOutput": { + "title": "CartoonifyOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/koala/VCzEo2r7yiG49gH_dBqfC_55111638572349f782f547516cbc6e74.jpg" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-editing/cartoonify/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/cartoonify/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-editing/cartoonify": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingCartoonifyInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/cartoonify/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingCartoonifyOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-editing/background-change", + "metadata": { + "display_name": "Image Editing", + "category": "image-to-image", + "description": "Replace your photo's background with any scene you desire, from beach sunsets to urban landscapes, with perfect lighting and shadows", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:35.475Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "model_url": "https://fal.run/fal-ai/image-editing/background-change", + "license_type": "commercial", + "date": "2025-05-29T20:28:46.061Z", + "group": { + "key": "image-editing", + "label": "Background Change" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-editing/background-change", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-editing/background-change queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-editing/background-change", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-editing/background-change", + "documentationUrl": "https://fal.ai/models/fal-ai/image-editing/background-change/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageEditingBackgroundChangeInput": { + "title": "BackgroundChangeInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "beach sunset with palm trees" + ], + "title": "Background Prompt", + "type": "string", + "description": "The desired background to apply.", + "default": "beach sunset with palm trees" + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/hAjCkcyly4gsS9-cptD3Y_image%20(20).png" + ], + "title": "Image URL", + "type": "string", + "description": "Image prompt for the omni model." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + } + }, + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "num_inference_steps", + "safety_tolerance", + "output_format", + "aspect_ratio", + "seed", + "sync_mode", + "prompt" + ], + "required": [ + "image_url" + ] + }, + "ImageEditingBackgroundChangeOutput": { + "title": "BackgroundChangeOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/panda/hsfdyCWf8tgc-UBhT2Rie_9c789557af2e43a08a02b5e4e4c41a69.jpg" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-editing/background-change/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/background-change/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-editing/background-change": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingBackgroundChangeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/background-change/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingBackgroundChangeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-editing/age-progression", + "metadata": { + "display_name": "Image Editing", + "category": "image-to-image", + "description": "See how you or others might look at different ages, from younger to older, while preserving core facial features.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:35.603Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "model_url": "https://fal.run/fal-ai/image-editing/age-progression", + "license_type": "commercial", + "date": "2025-05-29T20:28:40.116Z", + "group": { + "key": "image-editing", + "label": "Age Progression" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-editing/age-progression", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-editing/age-progression queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-editing/age-progression", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-editing/age-progression", + "documentationUrl": "https://fal.ai/models/fal-ai/image-editing/age-progression/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageEditingAgeProgressionInput": { + "title": "AgeProgressionInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "20 years older" + ], + "title": "Age Change", + "type": "string", + "description": "The age change to apply.", + "default": "20 years older" + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/hAjCkcyly4gsS9-cptD3Y_image%20(20).png" + ], + "title": "Image URL", + "type": "string", + "description": "Image prompt for the omni model." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The same seed and the same prompt given to the same version of the model will output the same image every time." + } + }, + "x-fal-order-properties": [ + "image_url", + "guidance_scale", + "num_inference_steps", + "safety_tolerance", + "output_format", + "aspect_ratio", + "seed", + "sync_mode", + "prompt" + ], + "required": [ + "image_url" + ] + }, + "ImageEditingAgeProgressionOutput": { + "title": "AgeProgressionOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/lion/t7L2EtPYDkz1-fBlJsodJ_4e7306f22c8748258f96d1e5ed5a4cfe.jpg" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-editing/age-progression/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/age-progression/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-editing/age-progression": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingAgeProgressionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-editing/age-progression/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageEditingAgeProgressionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-pro/kontext/max/multi", + "metadata": { + "display_name": "FLUX.1 Kontext [max]", + "category": "image-to-image", + "description": "Experimental version of FLUX.1 Kontext [max] with multi image handling capabilities", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:35.730Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/koala/K-CKzmh6JmZz5D0L9ar6u_ad445c7c4de54d3fb05b5f72305ffff3.jpg", + "model_url": "https://fal.run/fal-ai/flux-pro/kontext/max/multi", + "license_type": "commercial", + "date": "2025-05-29T19:13:44.033Z", + "group": { + "key": "flux-pro-kontext", + "label": "Kontext [max] -- Editing (experimental Multi Image)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-pro/kontext/max/multi", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-pro/kontext/max/multi queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-pro/kontext/max/multi", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/koala/K-CKzmh6JmZz5D0L9ar6u_ad445c7c4de54d3fb05b5f72305ffff3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-pro/kontext/max/multi", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-pro/kontext/max/multi/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxProKontextMaxMultiInput": { + "title": "FluxKontextMultiInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Put the little duckling on top of the woman's t-shirt." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "description": "The number of images to generate.", + "maximum": 4, + "default": 1 + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance scale (CFG)", + "type": "number", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "maximum": 20, + "default": 3.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "image_urls": { + "examples": [ + [ + "https://v3.fal.media/files/penguin/XoW0qavfF-ahg-jX4BMyL_image.webp", + "https://v3.fal.media/files/tiger/bml6YA7DWJXOigadvxk75_image.webp" + ] + ], + "title": "Image URL", + "type": "array", + "description": "Image prompt for the omni model.", + "items": { + "type": "string" + } + }, + "enhance_prompt": { + "title": "Enhance Prompt", + "type": "boolean", + "description": "Whether to enhance the prompt for better results.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "output_format", + "safety_tolerance", + "enhance_prompt", + "aspect_ratio", + "image_urls" + ], + "required": [ + "prompt", + "image_urls" + ] + }, + "FluxProKontextMaxMultiOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/registry__image__fast_sdxl__models__Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "registry__image__fast_sdxl__models__Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-pro/kontext/max/multi/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/kontext/max/multi/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/kontext/max/multi": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProKontextMaxMultiInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/kontext/max/multi/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProKontextMaxMultiOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-pro/kontext/multi", + "metadata": { + "display_name": "FLUX.1 Kontext [pro]", + "category": "image-to-image", + "description": "Experimental version of FLUX.1 Kontext [pro] with multi image handling capabilities", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:35.858Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-2.jpg", + "model_url": "https://fal.run/fal-ai/flux-pro/kontext/multi", + "license_type": "commercial", + "date": "2025-05-29T19:10:26.719Z", + "group": { + "key": "flux-pro-kontext", + "label": "Kontext [pro] -- Editing (experimental Multi Image)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-pro/kontext/multi", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-pro/kontext/multi queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-pro/kontext/multi", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-pro/kontext/multi", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-pro/kontext/multi/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxProKontextMultiInput": { + "title": "FluxKontextMultiInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Put the little duckling on top of the woman's t-shirt." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "description": "The number of images to generate.", + "maximum": 4, + "default": 1 + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance scale (CFG)", + "type": "number", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "maximum": 20, + "default": 3.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "image_urls": { + "examples": [ + [ + "https://v3.fal.media/files/penguin/XoW0qavfF-ahg-jX4BMyL_image.webp", + "https://v3.fal.media/files/tiger/bml6YA7DWJXOigadvxk75_image.webp" + ] + ], + "title": "Image URL", + "type": "array", + "description": "Image prompt for the omni model.", + "items": { + "type": "string" + } + }, + "enhance_prompt": { + "title": "Enhance Prompt", + "type": "boolean", + "description": "Whether to enhance the prompt for better results.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "output_format", + "safety_tolerance", + "enhance_prompt", + "aspect_ratio", + "image_urls" + ], + "required": [ + "prompt", + "image_urls" + ] + }, + "FluxProKontextMultiOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/registry__image__fast_sdxl__models__Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "registry__image__fast_sdxl__models__Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-pro/kontext/multi/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/kontext/multi/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/kontext/multi": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProKontextMultiInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/kontext/multi/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProKontextMultiOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-pro/kontext/max", + "metadata": { + "display_name": "FLUX.1 Kontext [max]", + "category": "image-to-image", + "description": "FLUX.1 Kontext [max] is a model with greatly improved prompt adherence and typography generation meet premium consistency for editing without compromise on speed. \n ", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:36.118Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/tiger/9Ke6Di1rRqryqOR1SreQJ_33e684b4511644179b7429bb9c4cf592.jpg", + "model_url": "https://fal.run/fal-ai/flux-pro/kontext/max", + "license_type": "commercial", + "date": "2025-05-29T04:53:59.306Z", + "group": { + "key": "flux-pro-kontext", + "label": "Kontext [max] -- Editing" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-pro/kontext/max", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-pro/kontext/max queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-pro/kontext/max", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/tiger/9Ke6Di1rRqryqOR1SreQJ_33e684b4511644179b7429bb9c4cf592.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-pro/kontext/max", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-pro/kontext/max/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxProKontextMaxInput": { + "title": "FluxKontextInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Put a donut next to the flour." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "description": "The number of images to generate.", + "maximum": 4, + "default": 1 + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/rabbit/rmgBxhwGYb2d3pl3x9sKf_output.png" + ], + "title": "Image URL", + "type": "string", + "description": "Image prompt for the omni model." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance scale (CFG)", + "type": "number", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "maximum": 20, + "default": 3.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "enhance_prompt": { + "title": "Enhance Prompt", + "type": "boolean", + "description": "Whether to enhance the prompt for better results.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "output_format", + "safety_tolerance", + "enhance_prompt", + "aspect_ratio", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "FluxProKontextMaxOutput": { + "title": "FluxKontextOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 1024, + "url": "https://fal.media/files/tiger/7dSJbIU_Ni-0Zp9eaLsvR_fe56916811d84ac69c6ffc0d32dca151.jpg", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/fal__toolkit__image__image__Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "fal__toolkit__image__image__Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-pro/kontext/max/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/kontext/max/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/kontext/max": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProKontextMaxInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/kontext/max/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProKontextMaxOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-kontext/dev", + "metadata": { + "display_name": "FLUX.1 Kontext [dev]", + "category": "image-to-image", + "description": "Frontier image editing model.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:37.514Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "model_url": "https://fal.run/fal-ai/flux-kontext/dev", + "license_type": "commercial", + "date": "2025-05-28T18:38:52.637Z", + "group": { + "key": "flux-kontext", + "label": "Kontext [dev] -- editing" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.run/fal-ai/flux-kontext/dev/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-kontext/dev", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-kontext/dev queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-kontext/dev", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-kontext/dev", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-kontext/dev/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxKontextDevInput": { + "title": "BaseKontextEditInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Change the setting to a day time, add a lot of people walking the sidewalk while maintaining the same style of the painting" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to edit the image." + }, + "resolution_mode": { + "enum": [ + "auto", + "match_input", + "1:1", + "16:9", + "21:9", + "3:2", + "2:3", + "4:5", + "5:4", + "3:4", + "4:3", + "9:16", + "9:21" + ], + "title": "Resolution Mode", + "type": "string", + "description": "\n Determines how the output resolution is set for image editing.\n - `auto`: The model selects an optimal resolution from a predefined set that best matches the input image's aspect ratio. This is the recommended setting for most use cases as it's what the model was trained on.\n - `match_input`: The model will attempt to use the same resolution as the input image. The resolution will be adjusted to be compatible with the model's requirements (e.g. dimensions must be multiples of 16 and within supported limits).\n Apart from these, a few aspect ratios are also supported.\n ", + "default": "match_input" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "default": "none" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "Output format", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/kontext_example_input.webp" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to edit." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "guidance_scale": { + "minimum": 1, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 2.5 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 10, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration", + "resolution_mode" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "FluxKontextDevOutput": { + "title": "KontextEditOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/jpeg", + "url": "https://storage.googleapis.com/falserverless/example_outputs/kontext_example_output.jpeg", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-kontext/dev/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-kontext/dev/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-kontext/dev": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKontextDevInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-kontext/dev/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKontextDevOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bagel/edit", + "metadata": { + "display_name": "Bagel", + "category": "image-to-image", + "description": "Bagel is a 7B parameter multimodal model from Bytedance-Seed that can generate both images and text.", + "status": "active", + "tags": [ + "image-to-image", + "image-editing" + ], + "updated_at": "2026-01-26T21:43:39.414Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/bagel.webp", + "model_url": "https://fal.run/fal-ai/bagel/edit", + "license_type": "commercial", + "date": "2025-05-21T18:30:09.512Z", + "group": { + "key": "bagel", + "label": "Edit" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bagel/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bagel/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bagel/edit", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/bagel.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/bagel/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/bagel/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BagelEditInput": { + "title": "ImageEditInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Change the cosmic cloud background of the floating temple to a clear blue sky with a gentle sunrise on the horizon. Keep all temple architecture, figures, and other elements exactly as they are." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to edit the image with." + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for the generation." + }, + "use_thought": { + "title": "Use Thought", + "type": "boolean", + "description": "Whether to use thought tokens for generation. If set to true, the model will \"think\" to potentially improve generation quality. Increases generation time and increases the cost by 20%.", + "default": false + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/bagel/wRhCPSyiKTiLnnWvUpGIl.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "The image to edit." + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "use_thought", + "enable_safety_checker", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "BagelEditOutput": { + "title": "ImageEditOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "file_size": 423052, + "height": 1024, + "file_name": "hQnndOMvGSt2UsYAiV3vs.jpeg", + "content_type": "image/jpeg", + "url": "https://storage.googleapis.com/falserverless/bagel/hQnndOMvGSt2UsYAiV3vs.jpeg", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The edited images.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bagel/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bagel/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bagel/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BagelEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bagel/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BagelEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "smoretalk-ai/rembg-enhance", + "metadata": { + "display_name": "Rembg Enhance (Remove Background Enhance)", + "category": "image-to-image", + "description": "Rembg-enhance is optimized for 2D vector images, 3D graphics, and photos by leveraging matting technology.", + "status": "active", + "tags": [ + "background removal", + "image editing", + "utility", + "segmentation", + "high resolution", + "rembg" + ], + "updated_at": "2026-01-26T21:43:45.089Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/tiger/k7u2IikPi0ubraTdzzOYz_smoretalk-ai-rembg-enhance-thumbnail.png", + "model_url": "https://fal.run/smoretalk-ai/rembg-enhance", + "license_type": "commercial", + "date": "2025-05-09T03:29:12.011Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for smoretalk-ai/rembg-enhance", + "version": "1.0.0", + "description": "The OpenAPI schema for the smoretalk-ai/rembg-enhance queue.", + "x-fal-metadata": { + "endpointId": "smoretalk-ai/rembg-enhance", + "category": "image-to-image", + "thumbnailUrl": "https://v3.fal.media/files/tiger/k7u2IikPi0ubraTdzzOYz_smoretalk-ai-rembg-enhance-thumbnail.png", + "playgroundUrl": "https://fal.ai/models/smoretalk-ai/rembg-enhance", + "documentationUrl": "https://fal.ai/models/smoretalk-ai/rembg-enhance/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "RembgEnhanceInput": { + "title": "ImageInput", + "type": "object", + "properties": { + "image_url": { + "examples": [ + "https://fal.media/files/kangaroo/SOF3bLF7b1kJ2-N9dTg-c.png" + ], + "description": "URL of the input image", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url" + ], + "required": [ + "image_url" + ] + }, + "RembgEnhanceOutput": { + "title": "ImageOutput", + "type": "object", + "properties": { + "image": { + "description": "The segmented output image", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/smoretalk-ai/rembg-enhance/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/smoretalk-ai/rembg-enhance/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/smoretalk-ai/rembg-enhance": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RembgEnhanceInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/smoretalk-ai/rembg-enhance/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RembgEnhanceOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/recraft/upscale/creative", + "metadata": { + "display_name": "Recraft Creative Upscale", + "category": "image-to-image", + "description": "Enhances a given raster image using the 'creative upscale' tool, increasing image resolution, making the image sharper and cleaner.", + "status": "active", + "tags": [ + "upscaling" + ], + "updated_at": "2026-01-26T21:43:46.387Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/recraft-creative-upscale.jpg", + "model_url": "https://fal.run/fal-ai/recraft/upscale/creative", + "license_type": "commercial", + "date": "2025-05-07T13:10:35.699Z", + "group": { + "key": "fal-ai/recraft/upscale", + "label": "Creative Upscale" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/recraft/upscale/creative", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/recraft/upscale/creative queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/recraft/upscale/creative", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/recraft-creative-upscale.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/recraft/upscale/creative", + "documentationUrl": "https://fal.ai/models/fal-ai/recraft/upscale/creative/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "RecraftUpscaleCreativeInput": { + "title": "UpscaleInput", + "type": "object", + "properties": { + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/recraft/recraft-upscaler-1.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to be upscaled. Must be in PNG format." + } + }, + "x-fal-order-properties": [ + "image_url", + "sync_mode", + "enable_safety_checker" + ], + "required": [ + "image_url" + ] + }, + "RecraftUpscaleCreativeOutput": { + "title": "UpscaleOutput", + "type": "object", + "properties": { + "image": { + "title": "Image", + "description": "The upscaled image.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/recraft/upscale/creative/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/recraft/upscale/creative/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/recraft/upscale/creative": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RecraftUpscaleCreativeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/recraft/upscale/creative/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RecraftUpscaleCreativeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/recraft/upscale/crisp", + "metadata": { + "display_name": "Recraft Crisp Upscale", + "category": "image-to-image", + "description": "Enhances a given raster image using 'crisp upscale' tool, boosting resolution with a focus on refining small details and faces.", + "status": "active", + "tags": [ + "upscaling" + ], + "updated_at": "2026-01-26T21:43:46.511Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/recraft-creative-upscale.jpg", + "model_url": "https://fal.run/fal-ai/recraft/upscale/crisp", + "license_type": "commercial", + "date": "2025-05-07T13:07:59.554Z", + "group": { + "key": "fal-ai/recraft/upscale", + "label": "Crisp Upscale" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/recraft/upscale/crisp", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/recraft/upscale/crisp queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/recraft/upscale/crisp", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/recraft-creative-upscale.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/recraft/upscale/crisp", + "documentationUrl": "https://fal.ai/models/fal-ai/recraft/upscale/crisp/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "RecraftUpscaleCrispInput": { + "title": "UpscaleInput", + "type": "object", + "properties": { + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/recraft/recraft-upscaler-1.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to be upscaled. Must be in PNG format." + } + }, + "x-fal-order-properties": [ + "image_url", + "sync_mode", + "enable_safety_checker" + ], + "required": [ + "image_url" + ] + }, + "RecraftUpscaleCrispOutput": { + "title": "UpscaleOutput", + "type": "object", + "properties": { + "image": { + "title": "Image", + "description": "The upscaled image.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/recraft/upscale/crisp/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/recraft/upscale/crisp/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/recraft/upscale/crisp": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RecraftUpscaleCrispInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/recraft/upscale/crisp/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RecraftUpscaleCrispOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/recraft/v3/image-to-image", + "metadata": { + "display_name": "Recraft V3", + "category": "image-to-image", + "description": "Recraft V3 is a text-to-image model with the ability to generate long texts, vector art, images in brand style, and much more. As of today, it is SOTA in image generation, proven by Hugging Face's industry-leading Text-to-Image Benchmark by Artificial Analysis.", + "status": "active", + "tags": [ + "vector", + "typography", + "style" + ], + "updated_at": "2026-01-26T21:43:46.811Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/recraft-v3.webp", + "model_url": "https://fal.run/fal-ai/recraft/v3/image-to-image", + "license_type": "commercial", + "date": "2025-05-07T12:02:09.806Z", + "group": { + "key": "fal-ai/recraft/v3", + "label": "Image to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/recraft/v3/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/recraft/v3/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/recraft/v3/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/recraft-v3.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/recraft/v3/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/recraft/v3/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "RecraftV3ImageToImageInput": { + "title": "ImageToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "winter", + "cyberpunk city", + "watercolor painting style" + ], + "maxLength": 1000, + "type": "string", + "title": "Prompt", + "description": "A text description of areas to change." + }, + "style": { + "enum": [ + "any", + "realistic_image", + "digital_illustration", + "vector_illustration", + "realistic_image/b_and_w", + "realistic_image/hard_flash", + "realistic_image/hdr", + "realistic_image/natural_light", + "realistic_image/studio_portrait", + "realistic_image/enterprise", + "realistic_image/motion_blur", + "realistic_image/evening_light", + "realistic_image/faded_nostalgia", + "realistic_image/forest_life", + "realistic_image/mystic_naturalism", + "realistic_image/natural_tones", + "realistic_image/organic_calm", + "realistic_image/real_life_glow", + "realistic_image/retro_realism", + "realistic_image/retro_snapshot", + "realistic_image/urban_drama", + "realistic_image/village_realism", + "realistic_image/warm_folk", + "digital_illustration/pixel_art", + "digital_illustration/hand_drawn", + "digital_illustration/grain", + "digital_illustration/infantile_sketch", + "digital_illustration/2d_art_poster", + "digital_illustration/handmade_3d", + "digital_illustration/hand_drawn_outline", + "digital_illustration/engraving_color", + "digital_illustration/2d_art_poster_2", + "digital_illustration/antiquarian", + "digital_illustration/bold_fantasy", + "digital_illustration/child_book", + "digital_illustration/child_books", + "digital_illustration/cover", + "digital_illustration/crosshatch", + "digital_illustration/digital_engraving", + "digital_illustration/expressionism", + "digital_illustration/freehand_details", + "digital_illustration/grain_20", + "digital_illustration/graphic_intensity", + "digital_illustration/hard_comics", + "digital_illustration/long_shadow", + "digital_illustration/modern_folk", + "digital_illustration/multicolor", + "digital_illustration/neon_calm", + "digital_illustration/noir", + "digital_illustration/nostalgic_pastel", + "digital_illustration/outline_details", + "digital_illustration/pastel_gradient", + "digital_illustration/pastel_sketch", + "digital_illustration/pop_art", + "digital_illustration/pop_renaissance", + "digital_illustration/street_art", + "digital_illustration/tablet_sketch", + "digital_illustration/urban_glow", + "digital_illustration/urban_sketching", + "digital_illustration/vanilla_dreams", + "digital_illustration/young_adult_book", + "digital_illustration/young_adult_book_2", + "vector_illustration/bold_stroke", + "vector_illustration/chemistry", + "vector_illustration/colored_stencil", + "vector_illustration/contour_pop_art", + "vector_illustration/cosmics", + "vector_illustration/cutout", + "vector_illustration/depressive", + "vector_illustration/editorial", + "vector_illustration/emotional_flat", + "vector_illustration/infographical", + "vector_illustration/marker_outline", + "vector_illustration/mosaic", + "vector_illustration/naivector", + "vector_illustration/roundish_flat", + "vector_illustration/segmented_colors", + "vector_illustration/sharp_contrast", + "vector_illustration/thin", + "vector_illustration/vector_photo", + "vector_illustration/vivid_shapes", + "vector_illustration/engraving", + "vector_illustration/line_art", + "vector_illustration/line_circuit", + "vector_illustration/linocut" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated images. Vector images cost 2X as much.", + "default": "realistic_image" + }, + "style_id": { + "examples": [ + null + ], + "title": "Style Id", + "type": "string", + "description": "The ID of the custom style reference (optional)", + "format": "uuid4" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/recraft/recraft-upscaler-1.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to modify. Must be less than 5 MB in size, have resolution less than 16 MP and max dimension less than 4096 pixels." + }, + "strength": { + "description": "Defines the difference with the original image, should lie in [0, 1], where 0 means almost identical, and 1 means miserable similarity", + "type": "number", + "examples": [ + 0.2, + 0.5, + 0.8 + ], + "title": "Strength", + "maximum": 1, + "minimum": 0, + "default": 0.5 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "colors": { + "title": "Colors", + "type": "array", + "description": "An array of preferable colors", + "items": { + "$ref": "#/components/schemas/RGBColor" + }, + "default": [] + }, + "negative_prompt": { + "maxLength": 1000, + "type": "string", + "title": "Negative Prompt", + "description": "A text description of undesired elements on an image" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "strength", + "style", + "colors", + "style_id", + "negative_prompt", + "sync_mode" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "RecraftV3ImageToImageOutput": { + "title": "ImageToImageOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/koala/Xoz8tel7YoTbh6Fiepmq3_image.webp" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images", + "items": { + "$ref": "#/components/schemas/File" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "RGBColor": { + "title": "RGBColor", + "type": "object", + "properties": { + "r": { + "minimum": 0, + "title": "R", + "type": "integer", + "maximum": 255, + "description": "Red color value", + "default": 0 + }, + "b": { + "minimum": 0, + "title": "B", + "type": "integer", + "maximum": 255, + "description": "Blue color value", + "default": 0 + }, + "g": { + "minimum": 0, + "title": "G", + "type": "integer", + "maximum": 255, + "description": "Green color value", + "default": 0 + } + }, + "x-fal-order-properties": [ + "r", + "g", + "b" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/recraft/v3/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/recraft/v3/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/recraft/v3/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RecraftV3ImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/recraft/v3/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RecraftV3ImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/image-01/subject-reference", + "metadata": { + "display_name": "Minimax Image Subject Reference", + "category": "image-to-image", + "description": "Generate images from text and a reference image using MiniMax Image-01 for consistent character appearance.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:48.037Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-3.jpeg", + "model_url": "https://fal.run/fal-ai/minimax/image-01/subject-reference", + "license_type": "commercial", + "date": "2025-05-06T16:14:51.517Z", + "group": { + "key": "minimax-image", + "label": "Subject Reference" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/image-01/subject-reference", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/image-01/subject-reference queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/image-01/subject-reference", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-3.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/image-01/subject-reference", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/image-01/subject-reference/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxImage01SubjectReferenceInput": { + "title": "MiniMaxTextToImageWithReferenceRequest", + "type": "object", + "properties": { + "prompt_optimizer": { + "description": "Whether to enable automatic prompt optimization", + "type": "boolean", + "title": "Prompt Optimizer", + "default": false + }, + "aspect_ratio": { + "enum": [ + "1:1", + "16:9", + "4:3", + "3:2", + "2:3", + "3:4", + "9:16", + "21:9" + ], + "description": "Aspect ratio of the generated image", + "type": "string", + "title": "Aspect Ratio", + "default": "1:1" + }, + "num_images": { + "minimum": 1, + "description": "Number of images to generate (1-9)", + "type": "integer", + "maximum": 9, + "title": "Num Images", + "default": 1 + }, + "prompt": { + "examples": [ + "A beautiful woman with a crown on her head." + ], + "maxLength": 1500, + "type": "string", + "title": "Prompt", + "description": "Text prompt for image generation (max 1500 characters)", + "minLength": 1 + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/koala/hQwSnkWm8FDjou5SwLNuX_c223cf93-0036-4b18-bbea-bf6d0da7f210.png" + ], + "description": "URL of the subject reference image to use for consistent character appearance", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "aspect_ratio", + "num_images", + "prompt_optimizer" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "MinimaxImage01SubjectReferenceOutput": { + "title": "MiniMaxTextToImageWithReferenceOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_size": 239709, + "file_name": "image.jpg", + "content_type": "image/jpeg", + "url": "https://v3.fal.media/files/lion/1bfHvTwZGzK59EYAi2OG7_image.jpg" + } + ] + ], + "description": "Generated images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/File" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/image-01/subject-reference/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/image-01/subject-reference/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/image-01/subject-reference": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxImage01SubjectReferenceInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/image-01/subject-reference/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxImage01SubjectReferenceOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hidream-i1-full/image-to-image", + "metadata": { + "display_name": "Hidream I1 Full", + "category": "image-to-image", + "description": "HiDream-I1 full is a new open-source image generative foundation model with 17B parameters that achieves state-of-the-art image generation quality within seconds.", + "status": "active", + "tags": [ + "image-to-image", + "hidream" + ], + "updated_at": "2026-01-26T21:43:48.970Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-1.jpeg", + "model_url": "https://fal.run/fal-ai/hidream-i1-full/image-to-image", + "license_type": "commercial", + "date": "2025-05-05T19:06:43.060Z", + "group": { + "key": "hidream-i1-full", + "label": "Image to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hidream-i1-full/image-to-image", + "version": "1.0.0", + "description": "HiDream-I1 full is a new open-source image generative foundation model with 17B parameters that achieves state-of-the-art image generation quality within seconds.", + "x-fal-metadata": { + "endpointId": "fal-ai/hidream-i1-full/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-1.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hidream-i1-full/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/hidream-i1-full/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "HidreamI1FullImageToImageInput": { + "title": "ImageToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "An old man" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image. Setting to None uses the input image's size." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/tiger/C9qhzoMrg6Sg7lYh_ocrZ_example_man.png" + ], + "title": "Image URL", + "type": "string", + "description": "The image URL to generate an image from." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "A list of LoRAs to apply to the model. Each LoRA specifies its path, scale, and optional weight name.", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [] + }, + "strength": { + "minimum": 0, + "title": "Strength", + "type": "number", + "maximum": 1, + "description": "Denoising strength for image-to-image generation.", + "default": 0.75 + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 20, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 5 + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 50 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "negative_prompt": { + "examples": [ + "" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "loras", + "image_url", + "strength" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "HidreamI1FullImageToImageOutput": { + "title": "Img2ImgOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 1024, + "content_type": "image/jpeg", + "url": "https://v3.fal.media/files/lion/eIyinD1FLsdjjreSZzD6d.jpeg", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "maximum": 4, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + }, + "weight_name": { + "title": "Weight Name", + "type": "string", + "description": "Name of the LoRA weight. Used only if `path` is a Hugging Face repository, and required only if you have more than 1 safetensors file in the repo." + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/hidream-i1-full/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hidream-i1-full/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hidream-i1-full/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HidreamI1FullImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hidream-i1-full/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HidreamI1FullImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ideogram/v3/reframe", + "metadata": { + "display_name": "Ideogram", + "category": "image-to-image", + "description": "Extend existing images with Ideogram V3's reframe feature. Create expanded versions and adaptations while preserving main image and adding new creative directions through prompt guidance.", + "status": "active", + "tags": [ + "realism", + "typography" + ], + "updated_at": "2026-01-26T21:43:49.469Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/monkey/o7F_OomTu8c7O949nQZKc_7fca2610b9844c7086fedcf2e8df707b.jpg", + "model_url": "https://fal.run/fal-ai/ideogram/v3/reframe", + "license_type": "commercial", + "date": "2025-05-01T16:06:34.467Z", + "group": { + "key": "ideogram-v3", + "label": "Reframe" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ideogram/v3/reframe", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ideogram/v3/reframe queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ideogram/v3/reframe", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/monkey/o7F_OomTu8c7O949nQZKc_7fca2610b9844c7086fedcf2e8df707b.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ideogram/v3/reframe", + "documentationUrl": "https://fal.ai/models/fal-ai/ideogram/v3/reframe/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "IdeogramV3ReframeInput": { + "x-fal-order-properties": [ + "image_urls", + "rendering_speed", + "color_palette", + "style_codes", + "style", + "num_images", + "seed", + "sync_mode", + "style_preset", + "image_url", + "image_size" + ], + "type": "object", + "properties": { + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "description": "Number of images to generate.", + "title": "Num Images", + "default": 1 + }, + "image_size": { + "examples": [ + "square_hd" + ], + "title": "Image Size", + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The resolution for the reframed output image" + }, + "style": { + "anyOf": [ + { + "enum": [ + "AUTO", + "GENERAL", + "REALISTIC", + "DESIGN" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The style type to generate with. Cannot be used with style_codes.", + "title": "Style" + }, + "style_preset": { + "anyOf": [ + { + "enum": [ + "80S_ILLUSTRATION", + "90S_NOSTALGIA", + "ABSTRACT_ORGANIC", + "ANALOG_NOSTALGIA", + "ART_BRUT", + "ART_DECO", + "ART_POSTER", + "AURA", + "AVANT_GARDE", + "BAUHAUS", + "BLUEPRINT", + "BLURRY_MOTION", + "BRIGHT_ART", + "C4D_CARTOON", + "CHILDRENS_BOOK", + "COLLAGE", + "COLORING_BOOK_I", + "COLORING_BOOK_II", + "CUBISM", + "DARK_AURA", + "DOODLE", + "DOUBLE_EXPOSURE", + "DRAMATIC_CINEMA", + "EDITORIAL", + "EMOTIONAL_MINIMAL", + "ETHEREAL_PARTY", + "EXPIRED_FILM", + "FLAT_ART", + "FLAT_VECTOR", + "FOREST_REVERIE", + "GEO_MINIMALIST", + "GLASS_PRISM", + "GOLDEN_HOUR", + "GRAFFITI_I", + "GRAFFITI_II", + "HALFTONE_PRINT", + "HIGH_CONTRAST", + "HIPPIE_ERA", + "ICONIC", + "JAPANDI_FUSION", + "JAZZY", + "LONG_EXPOSURE", + "MAGAZINE_EDITORIAL", + "MINIMAL_ILLUSTRATION", + "MIXED_MEDIA", + "MONOCHROME", + "NIGHTLIFE", + "OIL_PAINTING", + "OLD_CARTOONS", + "PAINT_GESTURE", + "POP_ART", + "RETRO_ETCHING", + "RIVIERA_POP", + "SPOTLIGHT_80S", + "STYLIZED_RED", + "SURREAL_COLLAGE", + "TRAVEL_POSTER", + "VINTAGE_GEO", + "VINTAGE_POSTER", + "WATERCOLOR", + "WEIRD", + "WOODBLOCK_PRINT" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Style preset for generation. The chosen style preset will guide the generation.", + "title": "Style Preset" + }, + "rendering_speed": { + "enum": [ + "TURBO", + "BALANCED", + "QUALITY" + ], + "description": "The rendering speed to use.", + "type": "string", + "title": "Rendering Speed", + "default": "BALANCED" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "color_palette": { + "anyOf": [ + { + "$ref": "#/components/schemas/ColorPalette" + }, + { + "type": "null" + } + ], + "description": "A color palette for generation, must EITHER be specified via one of the presets (name) or explicitly via hexadecimal representations of the color with optional weights (members)" + }, + "style_codes": { + "anyOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ], + "description": "A list of 8 character hexadecimal codes representing the style of the image. Cannot be used in conjunction with style_reference_images or style", + "title": "Style Codes" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/lion/0qJs_qW8nz0wYsXhFa6Tk.png" + ], + "title": "Image URL", + "type": "string", + "description": "The image URL to reframe" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Seed for the random number generator", + "title": "Seed" + }, + "image_urls": { + "anyOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ], + "description": "A set of images to use as style references (maximum total size 10MB across all style references). The images should be in JPEG, PNG or WebP format", + "title": "Image Urls" + } + }, + "title": "ReframeImageInputV3", + "required": [ + "image_url", + "image_size" + ] + }, + "IdeogramV3ReframeOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/zebra/LVW4AhVs3sCxsVKdg3EfT_image.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "seed": { + "examples": [ + 123456 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for the random number generator" + } + }, + "title": "ReframeOutputV3", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ColorPalette": { + "x-fal-order-properties": [ + "members", + "name" + ], + "type": "object", + "properties": { + "members": { + "anyOf": [ + { + "type": "array", + "items": { + "$ref": "#/components/schemas/ColorPaletteMember" + } + }, + { + "type": "null" + } + ], + "description": "A list of color palette members that define the color palette", + "title": "Members" + }, + "name": { + "anyOf": [ + { + "enum": [ + "EMBER", + "FRESH", + "JUNGLE", + "MAGIC", + "MELON", + "MOSAIC", + "PASTEL", + "ULTRAMARINE" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "description": "A color palette preset value", + "title": "Name" + } + }, + "title": "ColorPalette" + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + }, + "ColorPaletteMember": { + "x-fal-order-properties": [ + "rgb", + "color_weight" + ], + "type": "object", + "properties": { + "color_weight": { + "anyOf": [ + { + "minimum": 0.05, + "maximum": 1, + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The weight of the color in the color palette", + "title": "Color Weight", + "default": 0.5 + }, + "rgb": { + "description": "RGB color value for the palette member", + "$ref": "#/components/schemas/RGBColor" + } + }, + "title": "ColorPaletteMember", + "required": [ + "rgb" + ] + }, + "RGBColor": { + "x-fal-order-properties": [ + "r", + "g", + "b" + ], + "type": "object", + "properties": { + "r": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Red color value", + "title": "R", + "default": 0 + }, + "b": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Blue color value", + "title": "B", + "default": 0 + }, + "g": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Green color value", + "title": "G", + "default": 0 + } + }, + "title": "RGBColor" + } + } + }, + "paths": { + "/fal-ai/ideogram/v3/reframe/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v3/reframe/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v3/reframe": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV3ReframeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v3/reframe/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV3ReframeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ideogram/v3/replace-background", + "metadata": { + "display_name": "Ideogram Replace Background", + "category": "image-to-image", + "description": "Replace backgrounds existing images with Ideogram V3's replace background feature. Create variations and adaptations while preserving core elements and adding new creative directions through prompt guidance.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:49.719Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/zebra/ejW0ul_u92u1TpdOpmfGQ_491bbb82cd504292beb0b5ad937dd024.jpg", + "model_url": "https://fal.run/fal-ai/ideogram/v3/replace-background", + "license_type": "commercial", + "date": "2025-05-01T16:02:45.350Z", + "group": { + "key": "ideogram-v3", + "label": "Replace Background" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ideogram/v3/replace-background", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ideogram/v3/replace-background queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ideogram/v3/replace-background", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/zebra/ejW0ul_u92u1TpdOpmfGQ_491bbb82cd504292beb0b5ad937dd024.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ideogram/v3/replace-background", + "documentationUrl": "https://fal.ai/models/fal-ai/ideogram/v3/replace-background/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "IdeogramV3ReplaceBackgroundInput": { + "x-fal-order-properties": [ + "image_urls", + "rendering_speed", + "color_palette", + "style_codes", + "style", + "expand_prompt", + "num_images", + "seed", + "sync_mode", + "style_preset", + "prompt", + "image_url" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A beautiful sunset over mountains that writes Ideogram v3 in fal.ai" + ], + "title": "Prompt", + "type": "string", + "description": "Cyber punk city with neon lights and skyscrappers" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "description": "Number of images to generate.", + "title": "Num Images", + "default": 1 + }, + "style": { + "anyOf": [ + { + "enum": [ + "AUTO", + "GENERAL", + "REALISTIC", + "DESIGN" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The style type to generate with. Cannot be used with style_codes.", + "title": "Style" + }, + "style_preset": { + "anyOf": [ + { + "enum": [ + "80S_ILLUSTRATION", + "90S_NOSTALGIA", + "ABSTRACT_ORGANIC", + "ANALOG_NOSTALGIA", + "ART_BRUT", + "ART_DECO", + "ART_POSTER", + "AURA", + "AVANT_GARDE", + "BAUHAUS", + "BLUEPRINT", + "BLURRY_MOTION", + "BRIGHT_ART", + "C4D_CARTOON", + "CHILDRENS_BOOK", + "COLLAGE", + "COLORING_BOOK_I", + "COLORING_BOOK_II", + "CUBISM", + "DARK_AURA", + "DOODLE", + "DOUBLE_EXPOSURE", + "DRAMATIC_CINEMA", + "EDITORIAL", + "EMOTIONAL_MINIMAL", + "ETHEREAL_PARTY", + "EXPIRED_FILM", + "FLAT_ART", + "FLAT_VECTOR", + "FOREST_REVERIE", + "GEO_MINIMALIST", + "GLASS_PRISM", + "GOLDEN_HOUR", + "GRAFFITI_I", + "GRAFFITI_II", + "HALFTONE_PRINT", + "HIGH_CONTRAST", + "HIPPIE_ERA", + "ICONIC", + "JAPANDI_FUSION", + "JAZZY", + "LONG_EXPOSURE", + "MAGAZINE_EDITORIAL", + "MINIMAL_ILLUSTRATION", + "MIXED_MEDIA", + "MONOCHROME", + "NIGHTLIFE", + "OIL_PAINTING", + "OLD_CARTOONS", + "PAINT_GESTURE", + "POP_ART", + "RETRO_ETCHING", + "RIVIERA_POP", + "SPOTLIGHT_80S", + "STYLIZED_RED", + "SURREAL_COLLAGE", + "TRAVEL_POSTER", + "VINTAGE_GEO", + "VINTAGE_POSTER", + "WATERCOLOR", + "WEIRD", + "WOODBLOCK_PRINT" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Style preset for generation. The chosen style preset will guide the generation.", + "title": "Style Preset" + }, + "expand_prompt": { + "description": "Determine if MagicPrompt should be used in generating the request or not.", + "type": "boolean", + "title": "Expand Prompt", + "default": true + }, + "rendering_speed": { + "enum": [ + "TURBO", + "BALANCED", + "QUALITY" + ], + "description": "The rendering speed to use.", + "type": "string", + "title": "Rendering Speed", + "default": "BALANCED" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "color_palette": { + "anyOf": [ + { + "$ref": "#/components/schemas/ColorPalette" + }, + { + "type": "null" + } + ], + "description": "A color palette for generation, must EITHER be specified via one of the presets (name) or explicitly via hexadecimal representations of the color with optional weights (members)" + }, + "style_codes": { + "anyOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ], + "description": "A list of 8 character hexadecimal codes representing the style of the image. Cannot be used in conjunction with style_reference_images or style", + "title": "Style Codes" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/rabbit/F6dvKPFL9VzKiM8asJOgm_MJj6yUB6rGjTsv_1YHIcA_image.webp" + ], + "title": "Image URL", + "type": "string", + "description": "The image URL whose background needs to be replaced" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Seed for the random number generator", + "title": "Seed" + }, + "image_urls": { + "anyOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ], + "description": "A set of images to use as style references (maximum total size 10MB across all style references). The images should be in JPEG, PNG or WebP format", + "title": "Image Urls" + } + }, + "title": "ReplaceBackgroundInputV3", + "required": [ + "prompt", + "image_url" + ] + }, + "IdeogramV3ReplaceBackgroundOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/lion/AUfCjtLkLOsdc9zEFrV-5_image.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "seed": { + "examples": [ + 123456 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for the random number generator" + } + }, + "title": "ReplaceBackgroundOutputV3", + "required": [ + "images", + "seed" + ] + }, + "ColorPalette": { + "x-fal-order-properties": [ + "members", + "name" + ], + "type": "object", + "properties": { + "members": { + "anyOf": [ + { + "type": "array", + "items": { + "$ref": "#/components/schemas/ColorPaletteMember" + } + }, + { + "type": "null" + } + ], + "description": "A list of color palette members that define the color palette", + "title": "Members" + }, + "name": { + "anyOf": [ + { + "enum": [ + "EMBER", + "FRESH", + "JUNGLE", + "MAGIC", + "MELON", + "MOSAIC", + "PASTEL", + "ULTRAMARINE" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "description": "A color palette preset value", + "title": "Name" + } + }, + "title": "ColorPalette" + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + }, + "ColorPaletteMember": { + "x-fal-order-properties": [ + "rgb", + "color_weight" + ], + "type": "object", + "properties": { + "color_weight": { + "anyOf": [ + { + "minimum": 0.05, + "maximum": 1, + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The weight of the color in the color palette", + "title": "Color Weight", + "default": 0.5 + }, + "rgb": { + "description": "RGB color value for the palette member", + "$ref": "#/components/schemas/RGBColor" + } + }, + "title": "ColorPaletteMember", + "required": [ + "rgb" + ] + }, + "RGBColor": { + "x-fal-order-properties": [ + "r", + "g", + "b" + ], + "type": "object", + "properties": { + "r": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Red color value", + "title": "R", + "default": 0 + }, + "b": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Blue color value", + "title": "B", + "default": 0 + }, + "g": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Green color value", + "title": "G", + "default": 0 + } + }, + "title": "RGBColor" + } + } + }, + "paths": { + "/fal-ai/ideogram/v3/replace-background/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v3/replace-background/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v3/replace-background": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV3ReplaceBackgroundInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v3/replace-background/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV3ReplaceBackgroundOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ideogram/v3/remix", + "metadata": { + "display_name": "Ideogram", + "category": "image-to-image", + "description": "Reimagine existing images with Ideogram V3's remix feature. Create variations and adaptations while preserving core elements and adding new creative directions through prompt guidance.", + "status": "active", + "tags": [ + "realism", + "typography" + ], + "updated_at": "2026-01-26T21:43:50.002Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/tiger/H2-lBMLTJ9R_pMJKy1N1c_078a7fbed0f241d5a81afd34cc3d7233.jpg", + "model_url": "https://fal.run/fal-ai/ideogram/v3/remix", + "license_type": "commercial", + "date": "2025-05-01T16:01:06.063Z", + "group": { + "key": "ideogram-v3", + "label": "Remix" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ideogram/v3/remix", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ideogram/v3/remix queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ideogram/v3/remix", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/tiger/H2-lBMLTJ9R_pMJKy1N1c_078a7fbed0f241d5a81afd34cc3d7233.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ideogram/v3/remix", + "documentationUrl": "https://fal.ai/models/fal-ai/ideogram/v3/remix/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "IdeogramV3RemixInput": { + "x-fal-order-properties": [ + "image_urls", + "rendering_speed", + "color_palette", + "style_codes", + "style", + "expand_prompt", + "num_images", + "seed", + "sync_mode", + "prompt", + "image_url", + "strength", + "image_size", + "negative_prompt" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Old ancient city day light" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to remix the image with" + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The resolution of the generated image", + "title": "Image Size", + "default": "square_hd" + }, + "style": { + "anyOf": [ + { + "enum": [ + "AUTO", + "GENERAL", + "REALISTIC", + "DESIGN" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The style type to generate with. Cannot be used with style_codes.", + "title": "Style" + }, + "expand_prompt": { + "description": "Determine if MagicPrompt should be used in generating the request or not.", + "type": "boolean", + "title": "Expand Prompt", + "default": true + }, + "rendering_speed": { + "enum": [ + "TURBO", + "BALANCED", + "QUALITY" + ], + "description": "The rendering speed to use.", + "type": "string", + "title": "Rendering Speed", + "default": "BALANCED" + }, + "image_urls": { + "anyOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ], + "description": "A set of images to use as style references (maximum total size 10MB across all style references). The images should be in JPEG, PNG or WebP format", + "title": "Image Urls" + }, + "negative_prompt": { + "description": "Description of what to exclude from an image. Descriptions in the prompt take precedence to descriptions in the negative prompt.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "description": "Number of images to generate.", + "title": "Num Images", + "default": 1 + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/lion/9-Yt8JfTw4OxrAjiUzwP9_output.png" + ], + "title": "Image URL", + "type": "string", + "description": "The image URL to remix" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "color_palette": { + "anyOf": [ + { + "$ref": "#/components/schemas/ColorPalette" + }, + { + "type": "null" + } + ], + "description": "A color palette for generation, must EITHER be specified via one of the presets (name) or explicitly via hexadecimal representations of the color with optional weights (members)" + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "description": "Strength of the input image in the remix", + "title": "Strength", + "default": 0.8 + }, + "style_codes": { + "anyOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ], + "description": "A list of 8 character hexadecimal codes representing the style of the image. Cannot be used in conjunction with style_reference_images or style", + "title": "Style Codes" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Seed for the random number generator", + "title": "Seed" + } + }, + "title": "RemixImageInputV3", + "required": [ + "prompt", + "image_url" + ] + }, + "IdeogramV3RemixOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/koala/eYZG26O54NTdWzdpDWBL-_image.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "seed": { + "examples": [ + 123456 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for the random number generator" + } + }, + "title": "RemixOutputV3", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ColorPalette": { + "x-fal-order-properties": [ + "members", + "name" + ], + "type": "object", + "properties": { + "members": { + "anyOf": [ + { + "type": "array", + "items": { + "$ref": "#/components/schemas/ColorPaletteMember" + } + }, + { + "type": "null" + } + ], + "description": "A list of color palette members that define the color palette", + "title": "Members" + }, + "name": { + "anyOf": [ + { + "enum": [ + "EMBER", + "FRESH", + "JUNGLE", + "MAGIC", + "MELON", + "MOSAIC", + "PASTEL", + "ULTRAMARINE" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "description": "A color palette preset value", + "title": "Name" + } + }, + "title": "ColorPalette" + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + }, + "ColorPaletteMember": { + "x-fal-order-properties": [ + "rgb", + "color_weight" + ], + "type": "object", + "properties": { + "color_weight": { + "anyOf": [ + { + "minimum": 0.05, + "maximum": 1, + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The weight of the color in the color palette", + "title": "Color Weight", + "default": 0.5 + }, + "rgb": { + "description": "RGB color value for the palette member", + "$ref": "#/components/schemas/RGBColor" + } + }, + "title": "ColorPaletteMember", + "required": [ + "rgb" + ] + }, + "RGBColor": { + "x-fal-order-properties": [ + "r", + "g", + "b" + ], + "type": "object", + "properties": { + "r": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Red color value", + "title": "R", + "default": 0 + }, + "b": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Blue color value", + "title": "B", + "default": 0 + }, + "g": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Green color value", + "title": "G", + "default": 0 + } + }, + "title": "RGBColor" + } + } + }, + "paths": { + "/fal-ai/ideogram/v3/remix/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v3/remix/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v3/remix": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV3RemixInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v3/remix/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV3RemixOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ideogram/v3/edit", + "metadata": { + "display_name": "Ideogram V3 Edit", + "category": "image-to-image", + "description": "Transform existing images with Ideogram V3's editing capabilities. Modify, adjust, and refine images while maintaining high fidelity and realistic outputs with precise prompt control.", + "status": "active", + "tags": [ + "realism", + "typography" + ], + "updated_at": "2026-01-26T21:43:50.301Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/ideogram.webp", + "model_url": "https://fal.run/fal-ai/ideogram/v3/edit", + "license_type": "commercial", + "date": "2025-05-01T15:58:51.368Z", + "group": { + "key": "ideogram-v3", + "label": "Edit" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ideogram/v3/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ideogram/v3/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ideogram/v3/edit", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/ideogram.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/ideogram/v3/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/ideogram/v3/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "IdeogramV3EditInput": { + "x-fal-order-properties": [ + "image_urls", + "rendering_speed", + "color_palette", + "style_codes", + "expand_prompt", + "num_images", + "seed", + "sync_mode", + "style_preset", + "prompt", + "image_url", + "mask_url" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "black bag" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to fill the masked part of the image." + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "description": "Number of images to generate.", + "title": "Num Images", + "default": 1 + }, + "style_preset": { + "anyOf": [ + { + "enum": [ + "80S_ILLUSTRATION", + "90S_NOSTALGIA", + "ABSTRACT_ORGANIC", + "ANALOG_NOSTALGIA", + "ART_BRUT", + "ART_DECO", + "ART_POSTER", + "AURA", + "AVANT_GARDE", + "BAUHAUS", + "BLUEPRINT", + "BLURRY_MOTION", + "BRIGHT_ART", + "C4D_CARTOON", + "CHILDRENS_BOOK", + "COLLAGE", + "COLORING_BOOK_I", + "COLORING_BOOK_II", + "CUBISM", + "DARK_AURA", + "DOODLE", + "DOUBLE_EXPOSURE", + "DRAMATIC_CINEMA", + "EDITORIAL", + "EMOTIONAL_MINIMAL", + "ETHEREAL_PARTY", + "EXPIRED_FILM", + "FLAT_ART", + "FLAT_VECTOR", + "FOREST_REVERIE", + "GEO_MINIMALIST", + "GLASS_PRISM", + "GOLDEN_HOUR", + "GRAFFITI_I", + "GRAFFITI_II", + "HALFTONE_PRINT", + "HIGH_CONTRAST", + "HIPPIE_ERA", + "ICONIC", + "JAPANDI_FUSION", + "JAZZY", + "LONG_EXPOSURE", + "MAGAZINE_EDITORIAL", + "MINIMAL_ILLUSTRATION", + "MIXED_MEDIA", + "MONOCHROME", + "NIGHTLIFE", + "OIL_PAINTING", + "OLD_CARTOONS", + "PAINT_GESTURE", + "POP_ART", + "RETRO_ETCHING", + "RIVIERA_POP", + "SPOTLIGHT_80S", + "STYLIZED_RED", + "SURREAL_COLLAGE", + "TRAVEL_POSTER", + "VINTAGE_GEO", + "VINTAGE_POSTER", + "WATERCOLOR", + "WEIRD", + "WOODBLOCK_PRINT" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Style preset for generation. The chosen style preset will guide the generation.", + "title": "Style Preset" + }, + "expand_prompt": { + "description": "Determine if MagicPrompt should be used in generating the request or not.", + "type": "boolean", + "title": "Expand Prompt", + "default": true + }, + "rendering_speed": { + "enum": [ + "TURBO", + "BALANCED", + "QUALITY" + ], + "description": "The rendering speed to use.", + "type": "string", + "title": "Rendering Speed", + "default": "BALANCED" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "color_palette": { + "anyOf": [ + { + "$ref": "#/components/schemas/ColorPalette" + }, + { + "type": "null" + } + ], + "description": "A color palette for generation, must EITHER be specified via one of the presets (name) or explicitly via hexadecimal representations of the color with optional weights (members)" + }, + "style_codes": { + "anyOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ], + "description": "A list of 8 character hexadecimal codes representing the style of the image. Cannot be used in conjunction with style_reference_images or style", + "title": "Style Codes" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/panda/-LC_gNNV3wUHaGMQT3klE_output.png" + ], + "title": "Image URL", + "type": "string", + "description": "The image URL to generate an image from. MUST have the exact same dimensions (width and height) as the mask image." + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Seed for the random number generator", + "title": "Seed" + }, + "image_urls": { + "anyOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ], + "description": "A set of images to use as style references (maximum total size 10MB across all style references). The images should be in JPEG, PNG or WebP format", + "title": "Image Urls" + }, + "mask_url": { + "examples": [ + "https://v3.fal.media/files/kangaroo/1dd3zEL5MXQ3Kb4-mRi9d_indir%20(20).png" + ], + "title": "Mask URL", + "type": "string", + "description": "The mask URL to inpaint the image. MUST have the exact same dimensions (width and height) as the input image." + } + }, + "title": "EditImageInputV3", + "required": [ + "prompt", + "image_url", + "mask_url" + ] + }, + "IdeogramV3EditOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/panda/xr7EI_0X5kM8fDOjjcMei_image.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "seed": { + "examples": [ + 123456 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for the random number generator" + } + }, + "title": "EditOutputV3", + "required": [ + "images", + "seed" + ] + }, + "ColorPalette": { + "x-fal-order-properties": [ + "members", + "name" + ], + "type": "object", + "properties": { + "members": { + "anyOf": [ + { + "type": "array", + "items": { + "$ref": "#/components/schemas/ColorPaletteMember" + } + }, + { + "type": "null" + } + ], + "description": "A list of color palette members that define the color palette", + "title": "Members" + }, + "name": { + "anyOf": [ + { + "enum": [ + "EMBER", + "FRESH", + "JUNGLE", + "MAGIC", + "MELON", + "MOSAIC", + "PASTEL", + "ULTRAMARINE" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "description": "A color palette preset value", + "title": "Name" + } + }, + "title": "ColorPalette" + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + }, + "ColorPaletteMember": { + "x-fal-order-properties": [ + "rgb", + "color_weight" + ], + "type": "object", + "properties": { + "color_weight": { + "anyOf": [ + { + "minimum": 0.05, + "maximum": 1, + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The weight of the color in the color palette", + "title": "Color Weight", + "default": 0.5 + }, + "rgb": { + "description": "RGB color value for the palette member", + "$ref": "#/components/schemas/RGBColor" + } + }, + "title": "ColorPaletteMember", + "required": [ + "rgb" + ] + }, + "RGBColor": { + "x-fal-order-properties": [ + "r", + "g", + "b" + ], + "type": "object", + "properties": { + "r": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Red color value", + "title": "R", + "default": 0 + }, + "b": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Blue color value", + "title": "B", + "default": 0 + }, + "g": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Green color value", + "title": "G", + "default": 0 + } + }, + "title": "RGBColor" + } + } + }, + "paths": { + "/fal-ai/ideogram/v3/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v3/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v3/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV3EditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v3/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV3EditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/step1x-edit", + "metadata": { + "display_name": "Step1X Edit", + "category": "image-to-image", + "description": "Step1X-Edit transforms your photos with simple instructions into stunning, professional-quality edits—rivaling top proprietary tools.", + "status": "active", + "tags": [ + "editing" + ], + "updated_at": "2026-01-26T21:43:51.497Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "model_url": "https://fal.run/fal-ai/step1x-edit", + "license_type": "commercial", + "date": "2025-04-25T21:29:50.810Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/step1x-edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/step1x-edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/step1x-edit", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/step1x-edit", + "documentationUrl": "https://fal.ai/models/fal-ai/step1x-edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Step1xEditInput": { + "x-fal-order-properties": [ + "prompt", + "image_url", + "negative_prompt", + "seed", + "guidance_scale", + "num_inference_steps", + "sync_mode", + "enable_safety_checker", + "output_format" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "make head band red" + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/girl_2.png" + ], + "description": "The image URL to generate an image from. Needs to match the dimensions of the mask.", + "type": "string", + "title": "Image URL" + }, + "sync_mode": { + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + }, + "guidance_scale": { + "minimum": 0, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "title": "Guidance scale (CFG)", + "maximum": 20, + "default": 4 + }, + "negative_prompt": { + "examples": [ + "" + ], + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 30 + } + }, + "title": "TextToImageInput", + "required": [ + "prompt", + "image_url" + ] + }, + "Step1xEditOutput": { + "title": "ImageOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 1024, + "content_type": "image/jpeg", + "url": "https://v3.fal.media/files/kangaroo/kFPr5gC_Rr9JZbTTakEMd.jpeg", + "width": 672 + } + ] + ], + "description": "The generated images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/step1x-edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/step1x-edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/step1x-edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Step1xEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/step1x-edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Step1xEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image2svg", + "metadata": { + "display_name": "Image2svg", + "category": "image-to-image", + "description": "Image2SVG transforms raster images into clean vector graphics, preserving visual quality while enabling scalable, customizable SVG outputs with precise control over detail levels.", + "status": "active", + "tags": [ + "utility", + "editing" + ], + "updated_at": "2026-01-26T21:43:51.749Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training.jpg", + "model_url": "https://fal.run/fal-ai/image2svg", + "license_type": "commercial", + "date": "2025-04-25T16:18:56.329Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image2svg", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image2svg queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image2svg", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image2svg", + "documentationUrl": "https://fal.ai/models/fal-ai/image2svg/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Image2svgInput": { + "title": "Image2SVGInput", + "type": "object", + "properties": { + "splice_threshold": { + "minimum": 0, + "maximum": 90, + "type": "integer", + "title": "Splice Threshold", + "description": "Splice threshold for joining paths", + "default": 45 + }, + "hierarchical": { + "enum": [ + "stacked", + "cutout" + ], + "title": "Hierarchical", + "type": "string", + "description": "Hierarchical mode: stacked or cutout", + "default": "stacked" + }, + "color_precision": { + "minimum": 1, + "maximum": 10, + "type": "integer", + "title": "Color Precision", + "description": "Color quantization level", + "default": 6 + }, + "colormode": { + "enum": [ + "color", + "binary" + ], + "title": "Colormode", + "type": "string", + "description": "Choose between color or binary (black and white) output", + "default": "color" + }, + "max_iterations": { + "minimum": 1, + "maximum": 20, + "type": "integer", + "title": "Max Iterations", + "description": "Maximum number of iterations for optimization", + "default": 10 + }, + "length_threshold": { + "minimum": 0, + "maximum": 10, + "type": "number", + "title": "Length Threshold", + "description": "Length threshold for curves/lines", + "default": 4 + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/kangaroo/EfqY747bBKy1Ynrgbk5ba_04pwiD1LTsnMZuyEyw757_8f986248a89845d3ba90c23b14089f10.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "The image to convert to SVG" + }, + "mode": { + "enum": [ + "spline", + "polygon" + ], + "title": "Mode", + "type": "string", + "description": "Mode: spline (curved) or polygon (straight lines)", + "default": "spline" + }, + "corner_threshold": { + "minimum": 0, + "maximum": 180, + "type": "integer", + "title": "Corner Threshold", + "description": "Corner detection threshold in degrees", + "default": 60 + }, + "path_precision": { + "minimum": 1, + "maximum": 10, + "type": "integer", + "title": "Path Precision", + "description": "Decimal precision for path coordinates", + "default": 3 + }, + "filter_speckle": { + "minimum": 0, + "maximum": 20, + "type": "integer", + "title": "Filter Speckle", + "description": "Filter out small speckles and noise", + "default": 4 + }, + "layer_difference": { + "minimum": 1, + "maximum": 32, + "type": "integer", + "title": "Layer Difference", + "description": "Layer difference threshold for hierarchical mode", + "default": 16 + } + }, + "x-fal-order-properties": [ + "image_url", + "colormode", + "hierarchical", + "mode", + "filter_speckle", + "color_precision", + "layer_difference", + "corner_threshold", + "length_threshold", + "max_iterations", + "splice_threshold", + "path_precision" + ], + "required": [ + "image_url" + ] + }, + "Image2svgOutput": { + "title": "Image2SVGOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_size": 1247850, + "file_name": "output.svg", + "content_type": "image/svg+xml", + "url": "https://v3.fal.media/files/koala/B31yLkOEc6AVTzgeyr_9K_output.svg" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The converted SVG file", + "items": { + "$ref": "#/components/schemas/File" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image2svg/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image2svg/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image2svg": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Image2svgInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image2svg/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Image2svgOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/uno", + "metadata": { + "display_name": "Uno", + "category": "image-to-image", + "description": "An AI model that transforms input images into new ones based on text prompts, blending reference visuals with your creative directions.", + "status": "active", + "tags": [ + "image-to-image" + ], + "updated_at": "2026-01-26T21:43:51.874Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-3.jpeg", + "model_url": "https://fal.run/fal-ai/uno", + "license_type": "commercial", + "date": "2025-04-24T15:42:26.644Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/uno", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/uno queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/uno", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-3.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/uno", + "documentationUrl": "https://fal.ai/models/fal-ai/uno/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "UnoInput": { + "title": "UNOInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The figurine is in the crystal ball" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "\n The size of the generated image. You can choose between some presets or custom height and width\n that **must be multiples of 8**.\n ", + "default": "square_hd" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "input_image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/UNO/figurine.png", + "https://storage.googleapis.com/falserverless/UNO/crystal_ball.png" + ] + ], + "title": "Input Image Urls", + "type": "array", + "description": "URL of images to use while generating the image.", + "items": { + "type": "string" + } + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 20, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 28 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducible generation. If set none, a random seed will be used." + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "input_image_urls", + "image_size", + "prompt", + "seed", + "num_images", + "num_inference_steps", + "guidance_scale", + "output_format", + "enable_safety_checker", + "sync_mode" + ], + "required": [ + "input_image_urls", + "prompt" + ] + }, + "UnoOutput": { + "title": "UNOOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The figurine is in the crystal ball" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used to generate the image." + }, + "images": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/UNO/output.jpeg" + ] + ], + "title": "Images", + "type": "array", + "description": "The URLs of the generated images.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/uno/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/uno/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/uno": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UnoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/uno/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UnoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/gpt-image-1/edit-image", + "metadata": { + "display_name": "gpt-image-1", + "category": "image-to-image", + "description": "OpenAI's latest image generation and editing model: gpt-1-image.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:53.161Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-2.jpg", + "model_url": "https://fal.run/fal-ai/gpt-image-1/edit-image", + "license_type": "commercial", + "date": "2025-04-23T17:29:21.329Z", + "group": { + "key": "gpt-image-1", + "label": "Edit Images" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/gpt-image-1/edit-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/gpt-image-1/edit-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/gpt-image-1/edit-image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/gpt-image-1/edit-image", + "documentationUrl": "https://fal.ai/models/fal-ai/gpt-image-1/edit-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "GptImage1EditImageInput": { + "title": "EditImageRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Make this pixel-art style." + ], + "title": "Prompt", + "minLength": 2, + "type": "string", + "description": "The prompt for image generation" + }, + "num_images": { + "description": "Number of images to generate", + "type": "integer", + "minimum": 1, + "title": "Number of Images", + "maximum": 4, + "examples": [ + 1 + ], + "default": 1 + }, + "image_size": { + "enum": [ + "auto", + "1024x1024", + "1536x1024", + "1024x1536" + ], + "title": "Image Size", + "type": "string", + "description": "Aspect ratio for the generated image", + "default": "auto" + }, + "background": { + "enum": [ + "auto", + "transparent", + "opaque" + ], + "title": "Background", + "type": "string", + "description": "Background for the generated image", + "default": "auto" + }, + "quality": { + "enum": [ + "auto", + "low", + "medium", + "high" + ], + "title": "Quality", + "type": "string", + "description": "Quality for the generated image", + "default": "auto" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "Output format for the images", + "default": "png" + }, + "input_fidelity": { + "enum": [ + "low", + "high" + ], + "title": "Input Fidelity", + "type": "string", + "description": "Input fidelity for the generated image", + "default": "high" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/model_tests/gpt-image-1/cyberpunk.png" + ] + ], + "title": "Image URLs", + "type": "array", + "description": "The URLs of the images to use as a reference for the generation.", + "items": { + "type": "string" + } + } + }, + "x-fal-order-properties": [ + "prompt", + "image_urls", + "image_size", + "background", + "quality", + "input_fidelity", + "num_images", + "output_format", + "sync_mode" + ], + "required": [ + "prompt", + "image_urls" + ] + }, + "GptImage1EditImageOutput": { + "title": "EditImageResponse", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "height": 1536, + "file_name": "cyberpunk_pixel.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/model_tests/gpt-image-1/cyberpunk_pixel.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images.", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/gpt-image-1/edit-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gpt-image-1/edit-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/gpt-image-1/edit-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GptImage1EditImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gpt-image-1/edit-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GptImage1EditImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "rundiffusion-fal/juggernaut-flux-lora/inpainting", + "metadata": { + "display_name": "Juggernaut Flux Lora", + "category": "image-to-image", + "description": "Juggernaut Base Flux LoRA Inpainting by RunDiffusion is a drop-in replacement for Flux [Dev] inpainting that delivers sharper details, richer colors, and enhanced realism to all your LoRAs and LyCORIS with full compatibility.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:54.531Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/juggernaut-flux-lora.webp", + "model_url": "https://fal.run/rundiffusion-fal/juggernaut-flux-lora/inpainting", + "license_type": "commercial", + "date": "2025-04-21T21:29:32.480Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for rundiffusion-fal/juggernaut-flux-lora/inpainting", + "version": "1.0.0", + "description": "The OpenAPI schema for the rundiffusion-fal/juggernaut-flux-lora/inpainting queue.", + "x-fal-metadata": { + "endpointId": "rundiffusion-fal/juggernaut-flux-lora/inpainting", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/juggernaut-flux-lora.webp", + "playgroundUrl": "https://fal.ai/models/rundiffusion-fal/juggernaut-flux-lora/inpainting", + "documentationUrl": "https://fal.ai/models/rundiffusion-fal/juggernaut-flux-lora/inpainting/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "JuggernautFluxLoraInpaintingInput": { + "title": "InpaintInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A photo of a lion sitting on a stone bench" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to use for inpainting. or img2img" + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original.", + "default": 0.85 + }, + "guidance_scale": { + "minimum": 0, + "maximum": 35, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "mask_url": { + "examples": [ + "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + ], + "title": "Mask Url", + "type": "string", + "description": "\n The mask to area to Inpaint in.\n " + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "loras", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "image_url", + "strength", + "mask_url" + ], + "required": [ + "prompt", + "image_url", + "mask_url" + ] + }, + "JuggernautFluxLoraInpaintingOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/rundiffusion-fal/juggernaut-flux-lora/inpainting/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/rundiffusion-fal/juggernaut-flux-lora/inpainting/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/rundiffusion-fal/juggernaut-flux-lora/inpainting": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JuggernautFluxLoraInpaintingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/rundiffusion-fal/juggernaut-flux-lora/inpainting/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JuggernautFluxLoraInpaintingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fashn/tryon/v1.5", + "metadata": { + "display_name": "FASHN Virtual Try-On V1.5", + "category": "image-to-image", + "description": "FASHN v1.5 delivers precise virtual try-on capabilities, accurately rendering garment details like text and patterns at 576x864 resolution from both on-model and flat-lay photo references.", + "status": "active", + "tags": [ + "try-on", + "fashion", + "clothing" + ], + "updated_at": "2026-01-26T21:43:54.659Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/fashn_wide.webp", + "model_url": "https://fal.run/fal-ai/fashn/tryon/v1.5", + "license_type": "commercial", + "date": "2025-04-21T12:52:40.889Z", + "group": { + "key": "Fashn", + "label": "v1.5" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fashn/tryon/v1.5", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fashn/tryon/v1.5 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fashn/tryon/v1.5", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/fashn_wide.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/fashn/tryon/v1.5", + "documentationUrl": "https://fal.ai/models/fal-ai/fashn/tryon/v1.5/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FashnTryonV15Input": { + "title": "Input", + "type": "object", + "properties": { + "model_image": { + "examples": [ + "https://utfs.io/f/wXFHUNfTHmLj4prvqbRMQ6JXFyUr3IT0avK2HSOmZWiAsxg9" + ], + "title": "Model Image", + "type": "string", + "description": "URL or base64 of the model image" + }, + "moderation_level": { + "enum": [ + "none", + "permissive", + "conservative" + ], + "title": "Moderation Level", + "type": "string", + "description": "Content moderation level for garment images. 'none' disables moderation, 'permissive' blocks only explicit content, 'conservative' also blocks underwear and swimwear.", + "default": "permissive" + }, + "garment_photo_type": { + "enum": [ + "auto", + "model", + "flat-lay" + ], + "title": "Garment Photo Type", + "type": "string", + "description": "Specifies the type of garment photo to optimize internal parameters for better performance. 'model' is for photos of garments on a model, 'flat-lay' is for flat-lay or ghost mannequin images, and 'auto' attempts to automatically detect the photo type.", + "default": "auto" + }, + "garment_image": { + "examples": [ + "https://utfs.io/f/wXFHUNfTHmLjtkhepmqOUnkr8XxZbNIFmRWldShDLu320TeC" + ], + "title": "Garment Image", + "type": "string", + "description": "URL or base64 of the garment image" + }, + "category": { + "enum": [ + "tops", + "bottoms", + "one-pieces", + "auto" + ], + "title": "Category", + "type": "string", + "description": "Category of the garment to try-on. 'auto' will attempt to automatically detect the category of the garment.", + "default": "auto" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "segmentation_free": { + "title": "Segmentation Free", + "type": "boolean", + "description": "Disables human parsing on the model image.", + "default": true + }, + "num_samples": { + "minimum": 1, + "title": "Num Samples", + "type": "integer", + "maximum": 4, + "description": "Number of images to generate in a single run. Image generation has a random element in it, so trying multiple images at once increases the chances of getting a good result.", + "default": 1 + }, + "mode": { + "enum": [ + "performance", + "balanced", + "quality" + ], + "title": "Mode", + "type": "string", + "description": "Specifies the mode of operation. 'performance' mode is faster but may sacrifice quality, 'balanced' mode is a balance between speed and quality, and 'quality' mode is slower but produces higher quality results.", + "default": "balanced" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Sets random operations to a fixed state. Use the same seed to reproduce results with the same inputs, or different seed to force different results." + }, + "output_format": { + "enum": [ + "png", + "jpeg" + ], + "title": "Output Format", + "type": "string", + "description": "Output format of the generated images. 'png' is highest quality, while 'jpeg' is faster", + "default": "png" + } + }, + "x-fal-order-properties": [ + "model_image", + "garment_image", + "category", + "mode", + "garment_photo_type", + "moderation_level", + "seed", + "num_samples", + "segmentation_free", + "sync_mode", + "output_format" + ], + "required": [ + "model_image", + "garment_image" + ] + }, + "FashnTryonV15Output": { + "title": "Output", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://cdn.staging.fashn.ai/464eeeb1-4faa-4a2d-8c17-342d0d35c4c1/output_0.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/fashn/tryon/v1.5/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fashn/tryon/v1.5/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fashn/tryon/v1.5": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FashnTryonV15Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fashn/tryon/v1.5/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FashnTryonV15Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/plushify", + "metadata": { + "display_name": "Plushify", + "category": "image-to-image", + "description": "Turn any image into a cute plushie!", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:54.788Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/plushie_thumbnail.png", + "model_url": "https://fal.run/fal-ai/plushify", + "license_type": "commercial", + "date": "2025-04-20T20:39:06.176Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/plushify", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/plushify queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/plushify", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/plushie_thumbnail.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/plushify", + "documentationUrl": "https://fal.ai/models/fal-ai/plushify/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PlushifyInput": { + "title": "PlushifyInput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "Prompt for the generation. Default is empty which is usually best, but sometimes it can help to add a description of the subject.", + "default": "" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "use_cfg_zero": { + "title": "Use Cfg Zero", + "type": "boolean", + "description": "Whether to use CFG zero", + "default": false + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/tiger/c8VSfX5XtJ3DCzV-4Bxg8_kid_image.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to apply cartoon style to" + }, + "scale": { + "minimum": 0.1, + "maximum": 2, + "type": "number", + "title": "Scale", + "description": "Scale factor for the Cartoon effect", + "default": 1 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps", + "default": 28 + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "Guidance scale for the generation", + "default": 3.5 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for image generation. Same seed with same parameters will generate same image." + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "scale", + "guidance_scale", + "num_inference_steps", + "enable_safety_checker", + "use_cfg_zero", + "seed", + "num_images" + ], + "required": [ + "image_url" + ] + }, + "PlushifyOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/plushify/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/plushify/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/plushify": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PlushifyInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/plushify/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PlushifyOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/instant-character", + "metadata": { + "display_name": "Instant Character", + "category": "image-to-image", + "description": "InstantCharacter creates high-quality, consistent characters from text prompts, supporting diverse poses, styles, and appearances with strong identity control.", + "status": "active", + "tags": [ + "personalization", + "customization" + ], + "updated_at": "2026-01-26T21:43:55.046Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-3.jpeg", + "model_url": "https://fal.run/fal-ai/instant-character", + "license_type": "commercial", + "date": "2025-04-18T05:55:46.395Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/instant-character", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/instant-character queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/instant-character", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-3.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/instant-character", + "documentationUrl": "https://fal.ai/models/fal-ai/instant-character/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "InstantCharacterInput": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A girl is playing a guitar in street" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square_hd" + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "maximum": 2, + "description": "The scale of the subject image. Higher values will make the subject image more prominent in the generated image.", + "default": 1 + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/girl.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "The image URL to generate an image from. Needs to match the dimensions of the mask." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 20, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 28 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "negative_prompt": { + "examples": [ + "" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "image_size", + "scale", + "negative_prompt", + "seed", + "guidance_scale", + "num_inference_steps", + "num_images", + "sync_mode", + "enable_safety_checker", + "output_format" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "InstantCharacterOutput": { + "title": "ImageOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 1024, + "content_type": "image/jpeg", + "url": "https://v3.fal.media/files/penguin/dG4xIRLMkTRKxA-T7h57l.jpeg", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/instant-character/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/instant-character/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/instant-character": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstantCharacterInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/instant-character/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstantCharacterOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/cartoonify", + "metadata": { + "display_name": "Cartoonify", + "category": "image-to-image", + "description": "Transform images into 3D cartoon artwork using an AI model that applies cartoon stylization while preserving the original image's composition and details.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:56.228Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "model_url": "https://fal.run/fal-ai/cartoonify", + "license_type": "commercial", + "date": "2025-04-14T21:28:54.080Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/cartoonify", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/cartoonify queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/cartoonify", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/cartoonify", + "documentationUrl": "https://fal.ai/models/fal-ai/cartoonify/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "CartoonifyInput": { + "title": "CartoonifyInput", + "type": "object", + "properties": { + "use_cfg_zero": { + "title": "Use Cfg Zero", + "type": "boolean", + "description": "Whether to use CFG zero", + "default": false + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/tiger/c8VSfX5XtJ3DCzV-4Bxg8_kid_image.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to apply Pixar style to" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "Guidance scale for the generation", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps", + "default": 28 + }, + "scale": { + "minimum": 0.1, + "maximum": 2, + "type": "number", + "title": "Scale", + "description": "Scale factor for the Pixar effect", + "default": 1 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for image generation. Same seed with same parameters will generate same image." + } + }, + "x-fal-order-properties": [ + "image_url", + "scale", + "guidance_scale", + "num_inference_steps", + "enable_safety_checker", + "use_cfg_zero", + "seed" + ], + "required": [ + "image_url" + ] + }, + "CartoonifyOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/cartoonify/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/cartoonify/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/cartoonify": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CartoonifyInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/cartoonify/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CartoonifyOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/finegrain-eraser/mask", + "metadata": { + "display_name": "finegrain eraser", + "category": "image-to-image", + "description": "Finegrain Eraser removes any object selected with a mask—along with its shadows, reflections, and lighting artifacts—seamlessly reconstructing the scene with contextually accurate content.", + "status": "active", + "tags": [ + "utility", + "editing" + ], + "updated_at": "2026-01-26T21:43:57.034Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-3.jpg", + "model_url": "https://fal.run/fal-ai/finegrain-eraser/mask", + "license_type": "commercial", + "date": "2025-04-10T17:18:31.479Z", + "group": { + "key": "finegrain", + "label": "mask" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/finegrain-eraser/mask", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/finegrain-eraser/mask queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/finegrain-eraser/mask", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/finegrain-eraser/mask", + "documentationUrl": "https://fal.ai/models/fal-ai/finegrain-eraser/mask/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FinegrainEraserMaskInput": { + "title": "MaskEraseRequest", + "type": "object", + "properties": { + "mode": { + "enum": [ + "express", + "standard", + "premium" + ], + "description": "Erase quality mode", + "type": "string", + "title": "Mode", + "default": "standard" + }, + "seed": { + "minimum": 0, + "maximum": 999, + "type": "integer", + "description": "Random seed for reproducible generation", + "title": "Seed" + }, + "mask_url": { + "examples": [ + "https://v3.fal.media/files/panda/-31cZrsoy-8BrLqOEFmST_indir%20(18).png" + ], + "description": "URL of the mask image. Should be a binary mask where white (255) indicates areas to erase", + "type": "string", + "title": "Mask Url" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/elephant/IKqKIxDfRDK8fzeETCveO_erase_example01.jpg" + ], + "description": "URL of the image to edit", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url", + "mask_url", + "mode", + "seed" + ], + "required": [ + "image_url", + "mask_url" + ] + }, + "FinegrainEraserMaskOutput": { + "title": "EraseOutput", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "url": "https://v3.fal.media/files/penguin/PpROj5BGoWMj0H6wb11aG_output.jpg" + } + ], + "description": "The edited image with content erased", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "used_seed": { + "description": "Seed used for generation", + "type": "integer", + "title": "Used Seed" + } + }, + "x-fal-order-properties": [ + "image", + "used_seed" + ], + "required": [ + "image", + "used_seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/finegrain-eraser/mask/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/finegrain-eraser/mask/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/finegrain-eraser/mask": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FinegrainEraserMaskInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/finegrain-eraser/mask/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FinegrainEraserMaskOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/finegrain-eraser/bbox", + "metadata": { + "display_name": "finegrain eraser", + "category": "image-to-image", + "description": "Finegrain Eraser removes any object selected with a bounding box—along with its shadows, reflections, and lighting artifacts—seamlessly reconstructing the scene with contextually accurate content.", + "status": "active", + "tags": [ + "utility", + "editing" + ], + "updated_at": "2026-01-26T21:43:57.162Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-3.jpg", + "model_url": "https://fal.run/fal-ai/finegrain-eraser/bbox", + "license_type": "commercial", + "date": "2025-04-09T19:57:48.361Z", + "group": { + "key": "finegrain", + "label": "bbox" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/finegrain-eraser/bbox", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/finegrain-eraser/bbox queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/finegrain-eraser/bbox", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/finegrain-eraser/bbox", + "documentationUrl": "https://fal.ai/models/fal-ai/finegrain-eraser/bbox/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FinegrainEraserBboxInput": { + "title": "BBoxEraseRequest", + "type": "object", + "properties": { + "mode": { + "enum": [ + "express", + "standard", + "premium" + ], + "description": "Erase quality mode", + "type": "string", + "title": "Mode", + "default": "standard" + }, + "seed": { + "minimum": 0, + "maximum": 999, + "type": "integer", + "description": "Random seed for reproducible generation", + "title": "Seed" + }, + "box_prompts": { + "description": "List of bounding box coordinates to erase (only one box prompt is supported)", + "type": "array", + "title": "Box Prompts", + "items": { + "$ref": "#/components/schemas/BoxPromptBase" + } + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/elephant/IKqKIxDfRDK8fzeETCveO_erase_example01.jpg" + ], + "description": "URL of the image to edit", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url", + "box_prompts", + "mode", + "seed" + ], + "required": [ + "image_url", + "box_prompts" + ] + }, + "FinegrainEraserBboxOutput": { + "title": "EraseOutput", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "url": "https://v3.fal.media/files/penguin/PpROj5BGoWMj0H6wb11aG_output.jpg" + } + ], + "description": "The edited image with content erased", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "used_seed": { + "description": "Seed used for generation", + "type": "integer", + "title": "Used Seed" + } + }, + "x-fal-order-properties": [ + "image", + "used_seed" + ], + "required": [ + "image", + "used_seed" + ] + }, + "BoxPromptBase": { + "title": "BoxPromptBase", + "type": "object", + "properties": { + "y_min": { + "description": "Y Min Coordinate of the box", + "type": "integer", + "title": "Y Min", + "default": 0 + }, + "x_max": { + "description": "X Max Coordinate of the prompt", + "type": "integer", + "title": "X Max", + "default": 0 + }, + "x_min": { + "description": "X Min Coordinate of the box", + "type": "integer", + "title": "X Min", + "default": 0 + }, + "y_max": { + "description": "Y Max Coordinate of the prompt", + "type": "integer", + "title": "Y Max", + "default": 0 + } + }, + "x-fal-order-properties": [ + "x_min", + "y_min", + "x_max", + "y_max" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/finegrain-eraser/bbox/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/finegrain-eraser/bbox/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/finegrain-eraser/bbox": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FinegrainEraserBboxInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/finegrain-eraser/bbox/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FinegrainEraserBboxOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/finegrain-eraser", + "metadata": { + "display_name": "finegrain eraser", + "category": "image-to-image", + "description": "Finegrain Eraser removes objects—along with their shadows, reflections, and lighting artifacts—using only natural language, seamlessly filling the scene with contextually accurate content.", + "status": "active", + "tags": [ + "utility", + "editing" + ], + "updated_at": "2026-01-26T21:43:57.424Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-3.jpg", + "model_url": "https://fal.run/fal-ai/finegrain-eraser", + "license_type": "commercial", + "date": "2025-04-09T19:56:33.112Z", + "group": { + "key": "finegrain", + "label": "prompt" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/finegrain-eraser", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/finegrain-eraser queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/finegrain-eraser", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/finegrain-eraser", + "documentationUrl": "https://fal.ai/models/fal-ai/finegrain-eraser/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FinegrainEraserInput": { + "title": "PromptEraseRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "person on the right and snowboard" + ], + "description": "Text description of what to erase", + "type": "string", + "title": "Prompt" + }, + "mode": { + "enum": [ + "express", + "standard", + "premium" + ], + "description": "Erase quality mode", + "type": "string", + "title": "Mode", + "default": "standard" + }, + "seed": { + "minimum": 0, + "maximum": 999, + "type": "integer", + "description": "Random seed for reproducible generation", + "title": "Seed" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/elephant/IKqKIxDfRDK8fzeETCveO_erase_example01.jpg" + ], + "description": "URL of the image to edit", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "mode", + "seed" + ], + "required": [ + "image_url", + "prompt" + ] + }, + "FinegrainEraserOutput": { + "title": "EraseOutput", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "url": "https://v3.fal.media/files/penguin/PpROj5BGoWMj0H6wb11aG_output.jpg" + } + ], + "description": "The edited image with content erased", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "used_seed": { + "description": "Seed used for generation", + "type": "integer", + "title": "Used Seed" + } + }, + "x-fal-order-properties": [ + "image", + "used_seed" + ], + "required": [ + "image", + "used_seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/finegrain-eraser/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/finegrain-eraser/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/finegrain-eraser": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FinegrainEraserInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/finegrain-eraser/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FinegrainEraserOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/star-vector", + "metadata": { + "display_name": "StarVector", + "category": "image-to-image", + "description": "AI vectorization model that transforms raster images into scalable SVG graphics, preserving visual details while enabling infinite scaling and easy editing capabilities.\n\n", + "status": "active", + "tags": [ + "image-to-image" + ], + "updated_at": "2026-01-26T21:44:17.757Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/starvector.webp", + "model_url": "https://fal.run/fal-ai/star-vector", + "license_type": "commercial", + "date": "2025-04-01T15:43:20.111Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/star-vector", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/star-vector queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/star-vector", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/starvector.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/star-vector", + "documentationUrl": "https://fal.ai/models/fal-ai/star-vector/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "StarVectorInput": { + "title": "StarVectorInput", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "seed to be used for generation" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/star-vector/sample-18.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to be used for relighting" + } + }, + "x-fal-order-properties": [ + "image_url", + "seed" + ], + "required": [ + "image_url" + ] + }, + "StarVectorOutput": { + "title": "StarVectorOutput", + "type": "object", + "properties": { + "image": { + "description": "The generated image file info.", + "$ref": "#/components/schemas/File" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "image", + "seed" + ], + "required": [ + "image", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/star-vector/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/star-vector/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/star-vector": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StarVectorInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/star-vector/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StarVectorOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ghiblify", + "metadata": { + "display_name": "Ghiblify Images", + "category": "image-to-image", + "description": "Reimagine and transform your ordinary photos into enchanting Studio Ghibli style artwork", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:44:17.883Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/ghibli/ghibli_example.webp", + "model_url": "https://fal.run/fal-ai/ghiblify", + "license_type": "commercial", + "date": "2025-03-31T21:39:16.233Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ghiblify", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ghiblify queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ghiblify", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/ghibli/ghibli_example.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/ghiblify", + "documentationUrl": "https://fal.ai/models/fal-ai/ghiblify/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "GhiblifyInput": { + "title": "Input", + "type": "object", + "properties": { + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "examples": [ + null + ], + "description": "The seed to use for the upscale. If not provided, a random seed will be used." + }, + "image_url": { + "examples": [ + "https://fal.media/files/koala/QMPFC_avr-fEywDjp2ujy_60f6e32332384cada30c7016599d93e8.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to upscale." + } + }, + "x-fal-order-properties": [ + "image_url", + "seed", + "enable_safety_checker" + ], + "required": [ + "image_url" + ] + }, + "GhiblifyOutput": { + "title": "Output", + "type": "object", + "properties": { + "image": { + "description": "The URL of the generated image.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "examples": [ + 1024 + ], + "description": "The height of the image in pixels." + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "examples": [ + 1024 + ], + "description": "The width of the image in pixels." + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ghiblify/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ghiblify/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ghiblify": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GhiblifyInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ghiblify/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GhiblifyOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/thera", + "metadata": { + "display_name": "Thera", + "category": "image-to-image", + "description": "Fix low resolution images with fast speed and quality of thera.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:18.320Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/thera.webp", + "model_url": "https://fal.run/fal-ai/thera", + "license_type": "commercial", + "date": "2025-03-24T15:32:43.248Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/thera", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/thera queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/thera", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/thera.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/thera", + "documentationUrl": "https://fal.ai/models/fal-ai/thera/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "TheraInput": { + "title": "TheraInput", + "type": "object", + "properties": { + "upscale_factor": { + "description": "The upscaling factor for the image.", + "type": "number", + "minimum": 1, + "maximum": 6, + "title": "Upscale Factor", + "default": 2 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducible generation." + }, + "backbone": { + "enum": [ + "edsr", + "rdn" + ], + "title": "Backbone", + "type": "string", + "description": "Backbone to use for upscaling", + "examples": [ + "edsr" + ] + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/docres_ckpt/NoN6cImXI9DCeEYzX7-a7_1224f6da06354948ab477fa450e8c4f6.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to be used for upscaling" + } + }, + "x-fal-order-properties": [ + "image_url", + "upscale_factor", + "backbone", + "seed" + ], + "required": [ + "image_url", + "backbone" + ] + }, + "TheraOutput": { + "title": "TheraOutput", + "type": "object", + "properties": { + "image": { + "title": "Image", + "description": "The generated image file info.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "image", + "seed" + ], + "required": [ + "image", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/thera/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/thera/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/thera": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TheraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/thera/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TheraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/mix-dehaze-net", + "metadata": { + "display_name": "MixDehazer", + "category": "image-to-image", + "description": "An advanced dehaze model to remove atmospheric haze, restoring clarity and detail in images through intelligent neural network processing.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:18.447Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/mix-dehaze.webp", + "model_url": "https://fal.run/fal-ai/mix-dehaze-net", + "license_type": "commercial", + "date": "2025-03-24T15:31:25.673Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/mix-dehaze-net", + "version": "1.0.0", + "description": "An advanced dehaze model to remove atmospheric haze, restoring clarity and detail in images through intelligent neural network processing.", + "x-fal-metadata": { + "endpointId": "fal-ai/mix-dehaze-net", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/mix-dehaze.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/mix-dehaze-net", + "documentationUrl": "https://fal.ai/models/fal-ai/mix-dehaze-net/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MixDehazeNetInput": { + "title": "MixDehazeNetInput", + "type": "object", + "properties": { + "model": { + "enum": [ + "indoor", + "outdoor" + ], + "title": "Model", + "type": "string", + "description": "Model to be used for dehazing", + "default": "indoor" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "seed to be used for generation" + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "URL of image to be used for image enhancement" + } + }, + "x-fal-order-properties": [ + "image_url", + "model", + "seed" + ], + "required": [ + "image_url" + ] + }, + "MixDehazeNetOutput": { + "title": "MixDehazeNetOutput", + "type": "object", + "properties": { + "image": { + "title": "Image", + "description": "The generated image file info.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/mix-dehaze-net/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/mix-dehaze-net/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/mix-dehaze-net": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MixDehazeNetInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/mix-dehaze-net/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MixDehazeNetOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/gemini-flash-edit/multi", + "metadata": { + "display_name": "Gemini Flash Edit Multi Image", + "category": "image-to-image", + "description": "Gemini Flash Edit Multi Image is a model that can edit multiple images using a text prompt and a reference image.", + "status": "active", + "tags": [ + "editing" + ], + "updated_at": "2026-01-26T21:44:00.577Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/gemini-edit/gemini_flash.webp", + "model_url": "https://fal.run/fal-ai/gemini-flash-edit/multi", + "license_type": "commercial", + "date": "2025-03-20T00:00:00.000Z", + "group": { + "key": "gemini-flash-edit", + "label": "Multi Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/gemini-flash-edit/multi", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/gemini-flash-edit/multi queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/gemini-flash-edit/multi", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/gemini-edit/gemini_flash.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/gemini-flash-edit/multi", + "documentationUrl": "https://fal.ai/models/fal-ai/gemini-flash-edit/multi/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "GeminiFlashEditMultiInput": { + "title": "GeminiMultiImageRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Make the car black" + ], + "maxLength": 5000, + "type": "string", + "title": "Prompt", + "minLength": 3, + "description": "The prompt for image generation or editing" + }, + "input_image_urls": { + "description": "List of URLs of input images for editing", + "type": "array", + "minItems": 1, + "items": { + "type": "string" + }, + "examples": [ + [ + "https://storage.googleapis.com/falserverless/web-examples/gemini-edit/input.png" + ] + ], + "maxItems": 10, + "title": "Input Image Urls" + } + }, + "x-fal-order-properties": [ + "prompt", + "input_image_urls" + ], + "required": [ + "prompt", + "input_image_urls" + ] + }, + "GeminiFlashEditMultiOutput": { + "title": "GeminiImageOutput", + "type": "object", + "properties": { + "description": { + "title": "Description", + "type": "string", + "description": "Text description or response from Gemini" + }, + "image": { + "description": "The generated or edited image", + "$ref": "#/components/schemas/Image" + } + }, + "x-fal-order-properties": [ + "image", + "description" + ], + "required": [ + "image", + "description" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/gemini-flash-edit/multi/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gemini-flash-edit/multi/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/gemini-flash-edit/multi": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GeminiFlashEditMultiInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gemini-flash-edit/multi/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GeminiFlashEditMultiOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/gemini-flash-edit", + "metadata": { + "display_name": "Gemini Flash Edit Multi Image", + "category": "image-to-image", + "description": "Gemini Flash Edit is a model that can edit single image using a text prompt and a reference image.", + "status": "active", + "tags": [ + "editing" + ], + "updated_at": "2026-01-26T21:44:00.702Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/gemini-edit/gemini_flash.webp", + "model_url": "https://fal.run/fal-ai/gemini-flash-edit", + "license_type": "commercial", + "date": "2025-03-20T00:00:00.000Z", + "group": { + "key": "gemini-flash-edit", + "label": "Single Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/gemini-flash-edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/gemini-flash-edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/gemini-flash-edit", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/gemini-edit/gemini_flash.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/gemini-flash-edit", + "documentationUrl": "https://fal.ai/models/fal-ai/gemini-flash-edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "GeminiFlashEditInput": { + "title": "GeminiImageRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Make the car black" + ], + "maxLength": 5000, + "type": "string", + "title": "Prompt", + "minLength": 3, + "description": "The prompt for image generation or editing" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/gemini-edit/input.png" + ], + "title": "Image Url", + "type": "string", + "description": "Optional URL of an input image for editing. If not provided, generates a new image." + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "GeminiFlashEditOutput": { + "title": "GeminiImageOutput", + "type": "object", + "properties": { + "description": { + "title": "Description", + "type": "string", + "description": "Text description or response from Gemini" + }, + "image": { + "description": "The generated or edited image", + "$ref": "#/components/schemas/Image" + } + }, + "x-fal-order-properties": [ + "image", + "description" + ], + "required": [ + "image", + "description" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/gemini-flash-edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gemini-flash-edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/gemini-flash-edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GeminiFlashEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gemini-flash-edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GeminiFlashEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/invisible-watermark", + "metadata": { + "display_name": "Invisible Watermark", + "category": "image-to-image", + "description": "Invisible Watermark is a model that can add an invisible watermark to an image.", + "status": "active", + "tags": [ + "utility", + "editing" + ], + "updated_at": "2026-01-26T21:44:20.423Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/watermark/watermark.webp", + "model_url": "https://fal.run/fal-ai/invisible-watermark", + "date": "2025-03-14T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/invisible-watermark", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/invisible-watermark queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/invisible-watermark", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/watermark/watermark.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/invisible-watermark", + "documentationUrl": "https://fal.ai/models/fal-ai/invisible-watermark/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "InvisibleWatermarkInput": { + "x-fal-order-properties": [ + "image_url", + "watermark", + "decode", + "length" + ], + "type": "object", + "properties": { + "decode": { + "description": "Whether to decode a watermark from the image instead of encoding", + "type": "boolean", + "title": "Decode", + "default": false + }, + "watermark": { + "examples": [ + "watermark" + ], + "description": "Text to use as watermark (for encoding only)", + "type": "string", + "title": "Watermark", + "default": "watermark" + }, + "length": { + "description": "Length of watermark bits to decode (required when decode=True)", + "type": "integer", + "title": "Length", + "default": 0 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/watermark/watermark_ex.png" + ], + "description": "URL of image to be watermarked or decoded", + "type": "string", + "title": "Image Url" + } + }, + "title": "WatermarkInput", + "required": [ + "image_url" + ] + }, + "InvisibleWatermarkOutput": { + "x-fal-order-properties": [ + "image", + "extracted_watermark", + "length" + ], + "type": "object", + "properties": { + "image": { + "description": "The watermarked image file info (when encoding)", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "extracted_watermark": { + "description": "The extracted watermark text (when decoding)", + "type": "string", + "title": "Extracted Watermark" + }, + "length": { + "description": "Length of the watermark bits used (helpful for future decoding)", + "type": "integer", + "title": "Length", + "default": 0 + } + }, + "title": "WatermarkOutput" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/invisible-watermark/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/invisible-watermark/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/invisible-watermark": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InvisibleWatermarkInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/invisible-watermark/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InvisibleWatermarkOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "rundiffusion-fal/juggernaut-flux/pro/image-to-image", + "metadata": { + "display_name": "Juggernaut Flux Pro", + "category": "image-to-image", + "description": "Juggernaut Pro Flux by RunDiffusion is the flagship Juggernaut model rivaling some of the most advanced image models available, often surpassing them in realism. It combines Juggernaut Base with RunDiffusion Photo and features enhancements like reduced background blurriness.", + "status": "active", + "tags": [ + "image generation" + ], + "updated_at": "2026-01-26T21:44:21.539Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/juggernaut-flux-pro.webp", + "model_url": "https://fal.run/rundiffusion-fal/juggernaut-flux/pro/image-to-image", + "license_type": "commercial", + "date": "2025-03-05T00:00:00.000Z", + "group": { + "key": "rundiffusion-juggernaut-flux", + "label": "Pro Image to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for rundiffusion-fal/juggernaut-flux/pro/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the rundiffusion-fal/juggernaut-flux/pro/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "rundiffusion-fal/juggernaut-flux/pro/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/juggernaut-flux-pro.webp", + "playgroundUrl": "https://fal.ai/models/rundiffusion-fal/juggernaut-flux/pro/image-to-image", + "documentationUrl": "https://fal.ai/models/rundiffusion-fal/juggernaut-flux/pro/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "JuggernautFluxProImageToImageInput": { + "title": "DevImageToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "a cat dressed as a wizard with a background of a mystic forest." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "The number of images to generate.", + "default": 1 + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "image_url": { + "examples": [ + "https://fal.media/files/koala/Chls9L2ZnvuipUTEwlnJC.png" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to generate an image from." + }, + "strength": { + "minimum": 0.01, + "title": "Strength", + "type": "number", + "maximum": 1, + "description": "The strength of the initial image. Higher strength values are better for this model.", + "default": 0.95 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 20, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "num_inference_steps": { + "minimum": 10, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 40 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "strength", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "JuggernautFluxProImageToImageOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/rundiffusion-fal/juggernaut-flux/pro/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/rundiffusion-fal/juggernaut-flux/pro/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/rundiffusion-fal/juggernaut-flux/pro/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JuggernautFluxProImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/rundiffusion-fal/juggernaut-flux/pro/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JuggernautFluxProImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "rundiffusion-fal/juggernaut-flux/base/image-to-image", + "metadata": { + "display_name": "Juggernaut Flux Base", + "category": "image-to-image", + "description": "Juggernaut Base Flux by RunDiffusion is a drop-in replacement for Flux [Dev] that delivers sharper details, richer colors, and enhanced realism, while instantly boosting LoRAs and LyCORIS with full compatibility.", + "status": "active", + "tags": [ + "image generation" + ], + "updated_at": "2026-01-26T21:44:22.103Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/juggernaut-flux-base.webp", + "model_url": "https://fal.run/rundiffusion-fal/juggernaut-flux/base/image-to-image", + "license_type": "commercial", + "date": "2025-03-05T00:00:00.000Z", + "group": { + "key": "rundiffusion-juggernaut-flux", + "label": "Base Image to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for rundiffusion-fal/juggernaut-flux/base/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the rundiffusion-fal/juggernaut-flux/base/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "rundiffusion-fal/juggernaut-flux/base/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/juggernaut-flux-base.webp", + "playgroundUrl": "https://fal.ai/models/rundiffusion-fal/juggernaut-flux/base/image-to-image", + "documentationUrl": "https://fal.ai/models/rundiffusion-fal/juggernaut-flux/base/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "JuggernautFluxBaseImageToImageInput": { + "title": "DevImageToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "a cat dressed as a wizard with a background of a mystic forest." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "The number of images to generate.", + "default": 1 + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "image_url": { + "examples": [ + "https://fal.media/files/koala/Chls9L2ZnvuipUTEwlnJC.png" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to generate an image from." + }, + "strength": { + "minimum": 0.01, + "title": "Strength", + "type": "number", + "maximum": 1, + "description": "The strength of the initial image. Higher strength values are better for this model.", + "default": 0.95 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 20, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "num_inference_steps": { + "minimum": 10, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 40 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "strength", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "JuggernautFluxBaseImageToImageOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/rundiffusion-fal/juggernaut-flux/base/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/rundiffusion-fal/juggernaut-flux/base/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/rundiffusion-fal/juggernaut-flux/base/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JuggernautFluxBaseImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/rundiffusion-fal/juggernaut-flux/base/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JuggernautFluxBaseImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/docres/dewarp", + "metadata": { + "display_name": "DocRes-dewarp", + "category": "image-to-image", + "description": "Enhance wraped, folded documents with the superior quality of docres for sharper, clearer results.", + "status": "active", + "tags": [ + "image-enhancement" + ], + "updated_at": "2026-01-26T21:44:23.624Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/docres.webp", + "model_url": "https://fal.run/fal-ai/docres/dewarp", + "date": "2025-03-03T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/docres/dewarp", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/docres/dewarp queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/docres/dewarp", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/docres.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/docres/dewarp", + "documentationUrl": "https://fal.ai/models/fal-ai/docres/dewarp/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "DocresDewarpInput": { + "title": "DocResInputDewarp", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/docres_ckpt/218_in.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to be used for relighting" + } + }, + "x-fal-order-properties": [ + "image_url", + "seed" + ], + "required": [ + "image_url" + ] + }, + "DocresDewarpOutput": { + "title": "DocResOutput", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "height": 512, + "file_size": 423052, + "file_name": "36d3ca4791a647678b2ff01a35c87f5a.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/docres_ckpt/Xssvg5K39QiD6mn9K5toF_f4942abeef8d4c7bbe236b59aed5e382.png", + "width": 512 + } + ], + "title": "Image", + "description": "The generated image file info.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/docres/dewarp/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/docres/dewarp/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/docres/dewarp": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DocresDewarpInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/docres/dewarp/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DocresDewarpOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/docres", + "metadata": { + "display_name": "DocRes", + "category": "image-to-image", + "description": "Enhance low-resolution, blur, shadowed documents with the superior quality of docres for sharper, clearer results.", + "status": "active", + "tags": [ + "image-enhancement" + ], + "updated_at": "2026-01-26T21:44:23.809Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/docres.webp", + "model_url": "https://fal.run/fal-ai/docres", + "date": "2025-03-03T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/docres", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/docres queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/docres", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/docres.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/docres", + "documentationUrl": "https://fal.ai/models/fal-ai/docres/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "DocresInput": { + "title": "DocResInput", + "type": "object", + "properties": { + "task": { + "enum": [ + "deshadowing", + "appearance", + "deblurring", + "binarization" + ], + "title": "Task", + "type": "string", + "description": "Task to perform" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/docres_ckpt/218_in.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to be used for relighting" + } + }, + "x-fal-order-properties": [ + "image_url", + "task", + "seed" + ], + "required": [ + "image_url", + "task" + ] + }, + "DocresOutput": { + "title": "DocResOutput", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "height": 512, + "file_size": 423052, + "file_name": "36d3ca4791a647678b2ff01a35c87f5a.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/docres_ckpt/Xssvg5K39QiD6mn9K5toF_f4942abeef8d4c7bbe236b59aed5e382.png", + "width": 512 + } + ], + "title": "Image", + "description": "The generated image file info.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/docres/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/docres/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/docres": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DocresInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/docres/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DocresOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/swin2sr", + "metadata": { + "display_name": "SWIN2SR", + "category": "image-to-image", + "description": "Enhance low-resolution images with the superior quality of Swin2SR for sharper, clearer results.", + "status": "active", + "tags": [ + "image-enhancement" + ], + "updated_at": "2026-01-26T21:44:23.938Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/swin2sr.webp", + "model_url": "https://fal.run/fal-ai/swin2sr", + "date": "2025-02-28T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/swin2sr", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/swin2sr queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/swin2sr", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/swin2sr.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/swin2sr", + "documentationUrl": "https://fal.ai/models/fal-ai/swin2sr/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Swin2srInput": { + "title": "SwinSrInput", + "type": "object", + "properties": { + "task": { + "enum": [ + "classical_sr", + "compressed_sr", + "real_sr" + ], + "title": "Task", + "type": "string", + "description": "Task to perform", + "default": "classical_sr" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "seed to be used for generation" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/gallery/seoul.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to be used for image enhancement" + } + }, + "x-fal-order-properties": [ + "image_url", + "seed", + "task" + ], + "required": [ + "image_url" + ] + }, + "Swin2srOutput": { + "title": "SwinSrOutput", + "type": "object", + "properties": { + "image": { + "title": "Image", + "description": "The generated image file info.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/swin2sr/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/swin2sr/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/swin2sr": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Swin2srInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/swin2sr/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Swin2srOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ideogram/v2a/remix", + "metadata": { + "display_name": "Ideogram V2A Remix", + "category": "image-to-image", + "description": "Create variations of existing images with Ideogram V2A Remix while maintaining creative control through prompt guidance.", + "status": "active", + "tags": [ + "realism", + "typography" + ], + "updated_at": "2026-01-26T21:44:24.061Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/ideogram.webp", + "model_url": "https://fal.run/fal-ai/ideogram/v2a/remix", + "license_type": "commercial", + "date": "2025-02-27T00:00:00.000Z", + "group": { + "key": "ideogram-v2a", + "label": "Image to Image (Remix)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ideogram/v2a/remix", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ideogram/v2a/remix queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ideogram/v2a/remix", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/ideogram.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/ideogram/v2a/remix", + "documentationUrl": "https://fal.ai/models/fal-ai/ideogram/v2a/remix/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "IdeogramV2aRemixInput": { + "x-fal-order-properties": [ + "prompt", + "image_url", + "aspect_ratio", + "strength", + "expand_prompt", + "seed", + "style", + "sync_mode" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "An ice field in north atlantic" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to remix the image with" + }, + "aspect_ratio": { + "enum": [ + "10:16", + "16:10", + "9:16", + "16:9", + "4:3", + "3:4", + "1:1", + "1:3", + "3:1", + "3:2", + "2:3" + ], + "description": "The aspect ratio of the generated image", + "type": "string", + "title": "Aspect Ratio", + "default": "1:1" + }, + "style": { + "enum": [ + "auto", + "general", + "realistic", + "design", + "render_3D", + "anime" + ], + "description": "The style of the generated image", + "type": "string", + "title": "Style", + "default": "auto" + }, + "expand_prompt": { + "description": "Whether to expand the prompt with MagicPrompt functionality.", + "type": "boolean", + "title": "Expand Prompt", + "default": true + }, + "image_url": { + "examples": [ + "https://fal.media/files/lion/FHOx4y4a0ef7Sgmo-sOUR_image.png" + ], + "title": "Image URL", + "type": "string", + "description": "The image URL to remix" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "description": "Strength of the input image in the remix", + "title": "Strength", + "default": 0.8 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Seed for the random number generator", + "title": "Seed" + } + }, + "title": "RemixImageInput", + "required": [ + "prompt", + "image_url" + ] + }, + "IdeogramV2aRemixOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/monkey/cNaoxPl0YAWYb-QVBvO9F_image.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "seed": { + "examples": [ + 123456 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for the random number generator" + } + }, + "title": "Output", + "required": [ + "images", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ideogram/v2a/remix/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2a/remix/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2a/remix": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV2aRemixInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2a/remix/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV2aRemixOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ideogram/v2a/turbo/remix", + "metadata": { + "display_name": "Ideogram V2A Turbo Remix", + "category": "image-to-image", + "description": "Rapidly create image variations with Ideogram V2A Turbo Remix. Fast and efficient reimagining of existing images while maintaining creative control through prompt guidance.", + "status": "active", + "tags": [ + "realism", + "typography" + ], + "updated_at": "2026-01-26T21:44:24.573Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/ideogram.webp", + "model_url": "https://fal.run/fal-ai/ideogram/v2a/turbo/remix", + "license_type": "commercial", + "date": "2025-02-27T00:00:00.000Z", + "group": { + "key": "ideogram-v2a", + "label": "Image to Image (Turbo Remix)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ideogram/v2a/turbo/remix", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ideogram/v2a/turbo/remix queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ideogram/v2a/turbo/remix", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/ideogram.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/ideogram/v2a/turbo/remix", + "documentationUrl": "https://fal.ai/models/fal-ai/ideogram/v2a/turbo/remix/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "IdeogramV2aTurboRemixInput": { + "x-fal-order-properties": [ + "prompt", + "image_url", + "aspect_ratio", + "strength", + "expand_prompt", + "seed", + "style", + "sync_mode" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "An ice field in north atlantic" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to remix the image with" + }, + "aspect_ratio": { + "enum": [ + "10:16", + "16:10", + "9:16", + "16:9", + "4:3", + "3:4", + "1:1", + "1:3", + "3:1", + "3:2", + "2:3" + ], + "description": "The aspect ratio of the generated image", + "type": "string", + "title": "Aspect Ratio", + "default": "1:1" + }, + "style": { + "enum": [ + "auto", + "general", + "realistic", + "design", + "render_3D", + "anime" + ], + "description": "The style of the generated image", + "type": "string", + "title": "Style", + "default": "auto" + }, + "expand_prompt": { + "description": "Whether to expand the prompt with MagicPrompt functionality.", + "type": "boolean", + "title": "Expand Prompt", + "default": true + }, + "image_url": { + "examples": [ + "https://fal.media/files/lion/FHOx4y4a0ef7Sgmo-sOUR_image.png" + ], + "title": "Image URL", + "type": "string", + "description": "The image URL to remix" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "description": "Strength of the input image in the remix", + "title": "Strength", + "default": 0.8 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Seed for the random number generator", + "title": "Seed" + } + }, + "title": "RemixImageInput", + "required": [ + "prompt", + "image_url" + ] + }, + "IdeogramV2aTurboRemixOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/monkey/cNaoxPl0YAWYb-QVBvO9F_image.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "seed": { + "examples": [ + 123456 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for the random number generator" + } + }, + "title": "Output", + "required": [ + "images", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ideogram/v2a/turbo/remix/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2a/turbo/remix/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2a/turbo/remix": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV2aTurboRemixInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2a/turbo/remix/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV2aTurboRemixOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/evf-sam", + "metadata": { + "display_name": "EVF-SAM2 Segmentation", + "category": "image-to-image", + "description": "EVF-SAM2 combines natural language understanding with advanced segmentation capabilities, allowing you to precisely mask image regions using intuitive positive and negative text prompts.", + "status": "active", + "tags": [ + "segmentation", + "mask" + ], + "updated_at": "2026-01-26T21:44:24.822Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/evf-sam2/evf-sam2.webp", + "model_url": "https://fal.run/fal-ai/evf-sam", + "date": "2025-02-26T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/evf-sam", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/evf-sam queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/evf-sam", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/evf-sam2/evf-sam2.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/evf-sam", + "documentationUrl": "https://fal.ai/models/fal-ai/evf-sam/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "EvfSamInput": { + "title": "ImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Cat in the middle of the image" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate segmentation from." + }, + "use_grounding_dino": { + "title": "Use Grounding Dino", + "type": "boolean", + "description": "Use GroundingDINO instead of SAM for segmentation", + "default": false + }, + "semantic_type": { + "title": "Semantic Type", + "type": "boolean", + "description": "Enable semantic level segmentation for body parts, background or multi objects", + "default": false + }, + "fill_holes": { + "title": "Fill Holes", + "type": "boolean", + "description": "Fill holes in the mask using morphological operations", + "default": false + }, + "expand_mask": { + "minimum": 0, + "maximum": 20, + "type": "integer", + "title": "Expand Mask", + "description": "Expand/dilate the mask by specified pixels", + "default": 0 + }, + "mask_only": { + "title": "Mask Only", + "type": "boolean", + "description": "Output only the binary mask instead of masked image", + "default": true + }, + "revert_mask": { + "title": "Revert Mask", + "type": "boolean", + "description": "Invert the mask (background becomes foreground and vice versa)", + "default": false + }, + "blur_mask": { + "minimum": 0, + "maximum": 50, + "type": "integer", + "title": "Blur Mask", + "description": "Apply Gaussian blur to the mask. Value determines kernel size (must be odd number)", + "default": 0 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Areas to exclude from segmentation (will be subtracted from prompt results)" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/evf-sam2/evfsam2-cat.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the input image" + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "semantic_type", + "image_url", + "mask_only", + "use_grounding_dino", + "revert_mask", + "blur_mask", + "expand_mask", + "fill_holes" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "EvfSamOutput": { + "title": "ImageOutput", + "type": "object", + "properties": { + "image": { + "title": "Image", + "description": "The segmented output image", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/evf-sam/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/evf-sam/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/evf-sam": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EvfSamInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/evf-sam/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EvfSamOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ddcolor", + "metadata": { + "display_name": "DDColor", + "category": "image-to-image", + "description": "Bring colors into old or new black and white photos with DDColor.", + "status": "active", + "tags": [ + "image-recolorization", + "faces", + "utility" + ], + "updated_at": "2026-01-26T21:44:24.946Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/ddcolor.webp", + "model_url": "https://fal.run/fal-ai/ddcolor", + "date": "2025-02-26T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ddcolor", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ddcolor queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ddcolor", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/ddcolor.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/ddcolor", + "documentationUrl": "https://fal.ai/models/fal-ai/ddcolor/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "DdcolorInput": { + "x-fal-order-properties": [ + "image_url", + "seed" + ], + "type": "object", + "properties": { + "seed": { + "description": "seed to be used for generation", + "type": "integer", + "title": "Seed" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/gallery/Screenshot%202025-02-26%20154226.png" + ], + "description": "URL of image to be used for relighting", + "type": "string", + "title": "Image Url" + } + }, + "title": "DDColorInput", + "required": [ + "image_url" + ] + }, + "DdcolorOutput": { + "x-fal-order-properties": [ + "image" + ], + "type": "object", + "properties": { + "image": { + "examples": [ + { + "height": 512, + "file_size": 423052, + "file_name": "36d3ca4791a647678b2ff01a35c87f5a.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/gallery/5fcaaac6d1344d998ebb9703102c6c63.png", + "width": 512 + } + ], + "description": "The generated image file info.", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "title": "DDColorOutput", + "required": [ + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ddcolor/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ddcolor/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ddcolor": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DdcolorInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ddcolor/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DdcolorOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sam2/auto-segment", + "metadata": { + "display_name": "Segment Anything Model 2", + "category": "image-to-image", + "description": "SAM 2 is a model for segmenting images automatically. It can return individual masks or a single mask for the entire image.", + "status": "active", + "tags": [ + "segmentation", + "mask" + ], + "updated_at": "2026-01-26T21:44:25.386Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/sam2.gif", + "model_url": "https://fal.run/fal-ai/sam2/auto-segment", + "date": "2025-02-25T00:00:00.000Z", + "group": { + "key": "sam2", + "label": "Auto Segmentation" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sam2/auto-segment", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sam2/auto-segment queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sam2/auto-segment", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/sam2.gif", + "playgroundUrl": "https://fal.ai/models/fal-ai/sam2/auto-segment", + "documentationUrl": "https://fal.ai/models/fal-ai/sam2/auto-segment/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Sam2AutoSegmentInput": { + "title": "SAM2AutomaticSegmentationInput", + "type": "object", + "properties": { + "points_per_side": { + "title": "Points Per Side", + "type": "integer", + "description": "Number of points to sample along each side of the image.", + "default": 32 + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "min_mask_region_area": { + "title": "Min Mask Region Area", + "type": "integer", + "description": "Minimum area of a mask region.", + "default": 100 + }, + "image_url": { + "examples": [ + "https://raw.githubusercontent.com/facebookresearch/segment-anything-2/main/notebooks/images/truck.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to be automatically segmented" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "pred_iou_thresh": { + "title": "Pred Iou Thresh", + "type": "number", + "description": "Threshold for predicted IOU score.", + "default": 0.88 + }, + "stability_score_thresh": { + "title": "Stability Score Thresh", + "type": "number", + "description": "Threshold for stability score.", + "default": 0.95 + } + }, + "x-fal-order-properties": [ + "image_url", + "sync_mode", + "output_format", + "points_per_side", + "pred_iou_thresh", + "stability_score_thresh", + "min_mask_region_area" + ], + "required": [ + "image_url" + ] + }, + "Sam2AutoSegmentOutput": { + "title": "SAM2AutomaticSegmentationOutput", + "type": "object", + "properties": { + "combined_mask": { + "title": "Combined Mask", + "description": "Combined segmentation mask.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "individual_masks": { + "title": "Individual Masks", + "type": "array", + "description": "Individual segmentation masks.", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "combined_mask", + "individual_masks" + ], + "required": [ + "combined_mask", + "individual_masks" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sam2/auto-segment/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam2/auto-segment/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sam2/auto-segment": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sam2AutoSegmentInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam2/auto-segment/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sam2AutoSegmentOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/drct-super-resolution", + "metadata": { + "display_name": "DRCT-Super-Resolution", + "category": "image-to-image", + "description": "Upscale your images with DRCT-Super-Resolution.", + "status": "active", + "tags": [ + "upscaling", + "high-res" + ], + "updated_at": "2026-01-26T21:44:25.714Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/drct.webp", + "model_url": "https://fal.run/fal-ai/drct-super-resolution", + "date": "2025-02-24T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/drct-super-resolution", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/drct-super-resolution queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/drct-super-resolution", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/drct.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/drct-super-resolution", + "documentationUrl": "https://fal.ai/models/fal-ai/drct-super-resolution/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "DrctSuperResolutionInput": { + "title": "Input", + "type": "object", + "properties": { + "upscale_factor": { + "examples": [ + 4 + ], + "title": "Upscaling Factor (Xs)", + "type": "integer", + "description": "Upscaling factor.", + "enum": [ + 4 + ], + "default": 4 + }, + "image_url": { + "examples": [ + "https://fal.media/files/rabbit/JlBgYUyQRS3zxiBu_B4fM.png", + "https://fal.media/files/monkey/e6RtJf_ue0vyWzeiEmTby.png", + "https://fal.media/files/monkey/A6HGsigx4mmvs-hJVoOZX.png" + ], + "title": "Image URL", + "type": "string", + "description": "URL of the image to upscale." + } + }, + "x-fal-order-properties": [ + "image_url", + "upscale_factor" + ], + "required": [ + "image_url" + ] + }, + "DrctSuperResolutionOutput": { + "title": "Output", + "type": "object", + "properties": { + "image": { + "title": "Image", + "description": "Upscaled image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/drct-super-resolution/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/drct-super-resolution/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/drct-super-resolution": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DrctSuperResolutionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/drct-super-resolution/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DrctSuperResolutionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/nafnet/deblur", + "metadata": { + "display_name": "NAFNet-deblur", + "category": "image-to-image", + "description": "Use NAFNet to fix issues like blurriness and noise in your images. This model specializes in image restoration and can help enhance the overall quality of your photography.", + "status": "active", + "tags": [ + "image-restoration", + "deblur", + "denoise" + ], + "updated_at": "2026-01-26T21:44:25.839Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/HyeESWwRpMtc-Q0fVsDdt_2de9ad23b7894d18abc770358c32eee7.webp", + "model_url": "https://fal.run/fal-ai/nafnet/deblur", + "date": "2025-02-21T00:00:00.000Z", + "group": { + "key": "nafnet", + "label": "Deblur" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/nafnet/deblur", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/nafnet/deblur queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/nafnet/deblur", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/HyeESWwRpMtc-Q0fVsDdt_2de9ad23b7894d18abc770358c32eee7.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/nafnet/deblur", + "documentationUrl": "https://fal.ai/models/fal-ai/nafnet/deblur/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "NafnetDeblurInput": { + "title": "NafnetInput", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "seed to be used for generation" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/nafnet/blurry.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to be used for relighting" + } + }, + "x-fal-order-properties": [ + "image_url", + "seed" + ], + "required": [ + "image_url" + ] + }, + "NafnetDeblurOutput": { + "title": "NafnetOutput", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "file_size": 423052, + "height": 512, + "file_name": "36d3ca4791a647678b2ff01a35c87f5a.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/nafnet/2cbfd460e25344a69fa8077808fb484f.png", + "width": 512 + } + ], + "title": "Image", + "description": "The generated image file info.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/nafnet/deblur/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/nafnet/deblur/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/nafnet/deblur": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NafnetDeblurInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/nafnet/deblur/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NafnetDeblurOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/nafnet/denoise", + "metadata": { + "display_name": "NAFNet-denoise", + "category": "image-to-image", + "description": "Use NAFNet to fix issues like blurriness and noise in your images. This model specializes in image restoration and can help enhance the overall quality of your photography.", + "status": "active", + "tags": [ + "image-restoration", + "deblur", + "denoise" + ], + "updated_at": "2026-01-26T21:44:25.963Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/HyeESWwRpMtc-Q0fVsDdt_2de9ad23b7894d18abc770358c32eee7.webp", + "model_url": "https://fal.run/fal-ai/nafnet/denoise", + "date": "2025-02-21T00:00:00.000Z", + "group": { + "key": "nafnet", + "label": "Denoise" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/nafnet/denoise", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/nafnet/denoise queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/nafnet/denoise", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/HyeESWwRpMtc-Q0fVsDdt_2de9ad23b7894d18abc770358c32eee7.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/nafnet/denoise", + "documentationUrl": "https://fal.ai/models/fal-ai/nafnet/denoise/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "NafnetDenoiseInput": { + "title": "NafnetInputDenoise", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "seed to be used for generation" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/nafnet/noisy.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to be used for relighting" + } + }, + "x-fal-order-properties": [ + "image_url", + "seed" + ], + "required": [ + "image_url" + ] + }, + "NafnetDenoiseOutput": { + "title": "NafnetOutputDenoise", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "file_size": 423052, + "height": 512, + "file_name": "36d3ca4791a647678b2ff01a35c87f5a.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/nafnet/7c97e55956324a7cbee00ac9652a931b.png", + "width": 512 + } + ], + "title": "Image", + "description": "The generated image file info.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/nafnet/denoise/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/nafnet/denoise/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/nafnet/denoise": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NafnetDenoiseInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/nafnet/denoise/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NafnetDenoiseOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/post-processing", + "metadata": { + "display_name": "Post Processing", + "category": "image-to-image", + "description": "Post Processing is an endpoint that can enhance images using a variety of techniques including grain, blur, sharpen, and more.", + "status": "active", + "tags": [ + "stylized", + "utility" + ], + "updated_at": "2026-01-26T21:44:05.439Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/post-process/post-processing.webp", + "model_url": "https://fal.run/fal-ai/post-processing", + "license_type": "commercial", + "date": "2025-02-18T00:00:00.000Z", + "group": { + "key": "Post-Process", + "label": "Combine" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/post-processing", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/post-processing queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/post-processing", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/post-process/post-processing.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/post-processing", + "documentationUrl": "https://fal.ai/models/fal-ai/post-processing/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PostProcessingInput": { + "title": "ImageProcessingInput", + "type": "object", + "properties": { + "blue_shift": { + "minimum": -20, + "maximum": 20, + "type": "integer", + "title": "Blue Shift", + "description": "Blue channel shift amount", + "default": 0 + }, + "vertex_y": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Vertex Y", + "description": "Vertex Y position", + "default": 0.5 + }, + "green_direction": { + "enum": [ + "horizontal", + "vertical" + ], + "title": "Green Direction", + "type": "string", + "description": "Green channel shift direction", + "default": "horizontal" + }, + "enable_glow": { + "title": "Enable Glow", + "type": "boolean", + "description": "Enable glow effect", + "default": false + }, + "dodge_burn_mode": { + "enum": [ + "dodge", + "burn", + "dodge_and_burn", + "burn_and_dodge", + "color_dodge", + "color_burn", + "linear_dodge", + "linear_burn" + ], + "title": "Dodge Burn Mode", + "type": "string", + "description": "Dodge and burn mode", + "default": "dodge" + }, + "glow_intensity": { + "minimum": 0, + "maximum": 5, + "type": "number", + "title": "Glow Intensity", + "description": "Glow intensity", + "default": 1 + }, + "blur_sigma": { + "minimum": 0.1, + "maximum": 10, + "type": "number", + "title": "Blur Sigma", + "description": "Sigma for Gaussian blur", + "default": 1 + }, + "desaturate_method": { + "enum": [ + "luminance (Rec.709)", + "luminance (Rec.601)", + "average", + "lightness" + ], + "title": "Desaturate Method", + "type": "string", + "description": "Desaturation method", + "default": "luminance (Rec.709)" + }, + "enable_blur": { + "title": "Enable Blur", + "type": "boolean", + "description": "Enable blur effect", + "default": false + }, + "blur_radius": { + "minimum": 0, + "maximum": 31, + "type": "integer", + "title": "Blur Radius", + "description": "Blur radius", + "default": 3 + }, + "grain_style": { + "enum": [ + "modern", + "analog", + "kodak", + "fuji", + "cinematic", + "newspaper" + ], + "title": "Grain Style", + "type": "string", + "description": "Style of film grain to apply", + "default": "modern" + }, + "cas_amount": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Cas Amount", + "description": "CAS sharpening amount", + "default": 0.8 + }, + "gamma": { + "minimum": 0.2, + "maximum": 2.2, + "type": "number", + "title": "Gamma", + "description": "Gamma adjustment", + "default": 1 + }, + "tint_mode": { + "enum": [ + "sepia", + "red", + "green", + "blue", + "cyan", + "magenta", + "yellow", + "purple", + "orange", + "warm", + "cool", + "lime", + "navy", + "vintage", + "rose", + "teal", + "maroon", + "peach", + "lavender", + "olive" + ], + "title": "Tint Mode", + "type": "string", + "description": "Tint color mode", + "default": "sepia" + }, + "blur_type": { + "enum": [ + "gaussian", + "kuwahara" + ], + "title": "Blur Type", + "type": "string", + "description": "Type of blur to apply", + "default": "gaussian" + }, + "enable_vignette": { + "title": "Enable Vignette", + "type": "boolean", + "description": "Enable vignette effect", + "default": false + }, + "dissolve_image_url": { + "title": "Dissolve Image Url", + "type": "string", + "description": "URL of second image for dissolve", + "default": "" + }, + "red_shift": { + "minimum": -20, + "maximum": 20, + "type": "integer", + "title": "Red Shift", + "description": "Red channel shift amount", + "default": 0 + }, + "enable_desaturate": { + "title": "Enable Desaturate", + "type": "boolean", + "description": "Enable desaturation effect", + "default": false + }, + "grain_intensity": { + "description": "Film grain intensity (when enabled)", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Grain Intensity", + "multipleOf": 0.01, + "default": 0.4 + }, + "dodge_burn_intensity": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Dodge Burn Intensity", + "description": "Dodge and burn intensity", + "default": 0.5 + }, + "smart_sharpen_strength": { + "minimum": 0, + "maximum": 25, + "type": "number", + "title": "Smart Sharpen Strength", + "description": "Smart sharpen strength", + "default": 5 + }, + "red_direction": { + "enum": [ + "horizontal", + "vertical" + ], + "title": "Red Direction", + "type": "string", + "description": "Red channel shift direction", + "default": "horizontal" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/post-process/postpro-input.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to process" + }, + "vertex_x": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Vertex X", + "description": "Vertex X position", + "default": 0.5 + }, + "tint_strength": { + "minimum": 0.1, + "maximum": 1, + "type": "number", + "title": "Tint Strength", + "description": "Tint strength", + "default": 1 + }, + "enable_dissolve": { + "title": "Enable Dissolve", + "type": "boolean", + "description": "Enable dissolve effect", + "default": false + }, + "enable_parabolize": { + "title": "Enable Parabolize", + "type": "boolean", + "description": "Enable parabolize effect", + "default": false + }, + "enable_grain": { + "title": "Enable Grain", + "type": "boolean", + "description": "Enable film grain effect", + "default": false + }, + "solarize_threshold": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Solarize Threshold", + "description": "Solarize threshold", + "default": 0.5 + }, + "enable_sharpen": { + "title": "Enable Sharpen", + "type": "boolean", + "description": "Enable sharpen effect", + "default": false + }, + "enable_dodge_burn": { + "title": "Enable Dodge Burn", + "type": "boolean", + "description": "Enable dodge and burn effect", + "default": false + }, + "glow_radius": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Glow Radius", + "description": "Glow blur radius", + "default": 5 + }, + "sharpen_alpha": { + "minimum": 0.1, + "maximum": 5, + "type": "number", + "title": "Sharpen Alpha", + "description": "Sharpen strength (for basic mode)", + "default": 1 + }, + "enable_color_correction": { + "title": "Enable Color Correction", + "type": "boolean", + "description": "Enable color correction", + "default": false + }, + "contrast": { + "minimum": -100, + "maximum": 100, + "type": "number", + "title": "Contrast", + "description": "Contrast adjustment", + "default": 0 + }, + "enable_solarize": { + "title": "Enable Solarize", + "type": "boolean", + "description": "Enable solarize effect", + "default": false + }, + "noise_radius": { + "minimum": 1, + "maximum": 25, + "type": "integer", + "title": "Noise Radius", + "description": "Noise radius for smart sharpen", + "default": 7 + }, + "grain_scale": { + "minimum": 1, + "maximum": 100, + "type": "number", + "title": "Grain Scale", + "description": "Film grain scale (when enabled)", + "default": 10 + }, + "temperature": { + "minimum": -100, + "maximum": 100, + "type": "number", + "title": "Temperature", + "description": "Color temperature adjustment", + "default": 0 + }, + "brightness": { + "minimum": -100, + "maximum": 100, + "type": "number", + "title": "Brightness", + "description": "Brightness adjustment", + "default": 0 + }, + "blue_direction": { + "enum": [ + "horizontal", + "vertical" + ], + "title": "Blue Direction", + "type": "string", + "description": "Blue channel shift direction", + "default": "horizontal" + }, + "dissolve_factor": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Dissolve Factor", + "description": "Dissolve blend factor", + "default": 0.5 + }, + "sharpen_mode": { + "enum": [ + "basic", + "smart", + "cas" + ], + "title": "Sharpen Mode", + "type": "string", + "description": "Type of sharpening to apply", + "default": "basic" + }, + "vignette_strength": { + "minimum": 0, + "maximum": 10, + "type": "number", + "title": "Vignette Strength", + "description": "Vignette strength (when enabled)", + "default": 0.5 + }, + "sharpen_radius": { + "minimum": 1, + "maximum": 15, + "type": "integer", + "title": "Sharpen Radius", + "description": "Sharpen radius (for basic mode)", + "default": 1 + }, + "parabolize_coeff": { + "minimum": -10, + "maximum": 10, + "type": "number", + "title": "Parabolize Coeff", + "description": "Parabolize coefficient", + "default": 1 + }, + "saturation": { + "minimum": -100, + "maximum": 100, + "type": "number", + "title": "Saturation", + "description": "Saturation adjustment", + "default": 0 + }, + "enable_tint": { + "title": "Enable Tint", + "type": "boolean", + "description": "Enable color tint effect", + "default": false + }, + "green_shift": { + "minimum": -20, + "maximum": 20, + "type": "integer", + "title": "Green Shift", + "description": "Green channel shift amount", + "default": 0 + }, + "preserve_edges": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Preserve Edges", + "description": "Edge preservation factor", + "default": 0.75 + }, + "desaturate_factor": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Desaturate Factor", + "description": "Desaturation factor", + "default": 1 + }, + "smart_sharpen_ratio": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Smart Sharpen Ratio", + "description": "Smart sharpen blend ratio", + "default": 0.5 + }, + "enable_chromatic": { + "title": "Enable Chromatic", + "type": "boolean", + "description": "Enable chromatic aberration", + "default": false + } + }, + "x-fal-order-properties": [ + "image_url", + "enable_grain", + "grain_intensity", + "grain_scale", + "grain_style", + "enable_color_correction", + "temperature", + "brightness", + "contrast", + "saturation", + "gamma", + "enable_chromatic", + "red_shift", + "red_direction", + "green_shift", + "green_direction", + "blue_shift", + "blue_direction", + "enable_blur", + "blur_type", + "blur_radius", + "blur_sigma", + "enable_vignette", + "vignette_strength", + "enable_parabolize", + "parabolize_coeff", + "vertex_x", + "vertex_y", + "enable_tint", + "tint_strength", + "tint_mode", + "enable_dissolve", + "dissolve_image_url", + "dissolve_factor", + "enable_dodge_burn", + "dodge_burn_intensity", + "dodge_burn_mode", + "enable_glow", + "glow_intensity", + "glow_radius", + "enable_sharpen", + "sharpen_mode", + "sharpen_radius", + "sharpen_alpha", + "noise_radius", + "preserve_edges", + "smart_sharpen_strength", + "smart_sharpen_ratio", + "cas_amount", + "enable_solarize", + "solarize_threshold", + "enable_desaturate", + "desaturate_factor", + "desaturate_method" + ], + "required": [ + "image_url" + ] + }, + "PostProcessingOutput": { + "title": "ProcessedOutput", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "The processed images", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/post-processing/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/post-processing": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/post-processing/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostProcessingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flowedit", + "metadata": { + "display_name": "Flow-Edit", + "category": "image-to-image", + "description": "The model provides you high quality image editing capabilities.", + "status": "active", + "tags": [ + "editing" + ], + "updated_at": "2026-01-26T21:44:05.563Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/kangaroo/bAGbLA85i32P9R6-lEeEI_d0d975dbda3846fb89034ab9067ecee3.jpg", + "model_url": "https://fal.run/fal-ai/flowedit", + "license_type": "commercial", + "date": "2025-02-14T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flowedit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flowedit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flowedit", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/kangaroo/bAGbLA85i32P9R6-lEeEI_d0d975dbda3846fb89034ab9067ecee3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flowedit", + "documentationUrl": "https://fal.ai/models/fal-ai/flowedit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FloweditInput": { + "title": "FlowEditInput", + "type": "object", + "properties": { + "src_guidance_scale": { + "minimum": 0, + "maximum": 30, + "type": "integer", + "title": "Source Guidance scale (CFG)", + "description": "Guidance scale for the source.", + "default": 1.5 + }, + "n_min": { + "title": "N Min", + "type": "integer", + "description": "Minimum step for improved style edits", + "default": 0 + }, + "n_max": { + "title": "N Max", + "type": "integer", + "description": "Control the strength of the edit", + "default": 23 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/FlowEdit/lighthouse.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to be used for relighting" + }, + "source_prompt": { + "examples": [ + "The image features a tall white lighthouse standing prominently\n on a hill, with a beautiful blue sky in the background. The lighthouse is illuminated\n by a bright light, making it a prominent landmark in the scene." + ], + "title": "Source Prompt", + "type": "string", + "description": "Prompt of the image to be used." + }, + "tar_guidance_scale": { + "minimum": 0, + "maximum": 30, + "type": "integer", + "title": "Target Guidance scale (CFG)", + "description": "Guidance scale for target.", + "default": 5.5 + }, + "target_prompt": { + "examples": [ + "The image features Big ben clock tower standing prominently\n on a hill, with a beautiful blue sky in the background. The Big ben clock tower is illuminated\n by a bright light, making it a prominent landmark in the scene." + ], + "title": "Target Prompt", + "type": "string", + "description": "Prompt of the image to be made." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducible generation. If set none, a random seed will be used." + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Steps", + "description": "Steps for which the model should run.", + "default": 28 + }, + "n_avg": { + "title": "N Avg", + "type": "integer", + "description": "Average step count", + "default": 1 + } + }, + "x-fal-order-properties": [ + "image_url", + "source_prompt", + "target_prompt", + "seed", + "num_inference_steps", + "src_guidance_scale", + "tar_guidance_scale", + "n_avg", + "n_max", + "n_min" + ], + "required": [ + "image_url", + "source_prompt", + "target_prompt" + ] + }, + "FloweditOutput": { + "title": "FlowEditOutput", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "file_size": 423052, + "height": 1024, + "file_name": "36d3ca4791a647678b2ff01a35c87f5a.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/model_tests/FlowEdit/aa5c3d028ad04800a54f70f928198d91.png", + "width": 1024 + } + ], + "title": "Image", + "description": "The generated image file info.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "image", + "seed" + ], + "required": [ + "image", + "seed" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flowedit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flowedit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flowedit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FloweditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flowedit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FloweditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-control-lora-depth/image-to-image", + "metadata": { + "display_name": "FLUX.1 [dev] Control LoRA Depth", + "category": "image-to-image", + "description": "FLUX Control LoRA Depth is a high-performance endpoint that uses a control image using a depth map to transfer structure to the generated image and another initial image to guide color.", + "status": "active", + "tags": [ + "lora", + "style transfer" + ], + "updated_at": "2026-01-26T21:44:28.299Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/control-lora-depth.jpeg", + "model_url": "https://fal.run/fal-ai/flux-control-lora-depth/image-to-image", + "license_type": "commercial", + "date": "2025-02-11T00:00:00.000Z", + "group": { + "key": "flux-control-lora-depth", + "label": "Image to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-control-lora-depth/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-control-lora-depth/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-control-lora-depth/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/control-lora-depth.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-control-lora-depth/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-control-lora-depth/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxControlLoraDepthImageToImageInput": { + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "loras", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "control_lora_image_url", + "control_lora_strength", + "image_url", + "strength" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A photo of a lion sitting on a stone bench" + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "control_lora_strength": { + "minimum": 0, + "description": "The strength of the control lora.", + "type": "number", + "title": "Control Lora Strength", + "maximum": 2, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size" + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "guidance_scale": { + "minimum": 0, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "title": "Guidance scale (CFG)", + "maximum": 35, + "default": 3.5 + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 1 + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + ], + "description": "URL of image to use for inpainting. or img2img", + "type": "string", + "title": "Image Url" + }, + "strength": { + "minimum": 0.01, + "description": "The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original.", + "type": "number", + "title": "Strength", + "maximum": 1, + "default": 0.85 + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "control_lora_image_url": { + "examples": [ + "https://v3.fal.media/files/kangaroo/Cb7BeM7G4DauK_lWjzY3N_Celeb6.jpg" + ], + "description": "\n The image to use for control lora. This is used to control the style of the generated image.\n ", + "type": "string", + "title": "Control Lora Image Url" + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 28 + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + } + }, + "title": "ImageToImageInput", + "required": [ + "prompt", + "control_lora_image_url", + "image_url" + ] + }, + "FluxControlLoraDepthImageToImageOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + } + }, + "title": "Output", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "LoraWeight": { + "x-fal-order-properties": [ + "path", + "scale" + ], + "type": "object", + "properties": { + "path": { + "description": "URL or the path to the LoRA weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "type": "number", + "title": "Scale", + "maximum": 4, + "default": 1 + } + }, + "title": "LoraWeight", + "required": [ + "path" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "title": "Image", + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-control-lora-depth/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-control-lora-depth/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-control-lora-depth/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxControlLoraDepthImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-control-lora-depth/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxControlLoraDepthImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ben/v2/image", + "metadata": { + "display_name": "ben-v2-image", + "category": "image-to-image", + "description": "A fast and high quality model for image background removal.", + "status": "active", + "tags": [ + "background removal" + ], + "updated_at": "2026-01-26T21:44:05.814Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/Ben2/FepDP7G4K2nxlX0sq6AMB_945a221d079e447c9bcfea77f931cdc1.webp", + "model_url": "https://fal.run/fal-ai/ben/v2/image", + "license_type": "commercial", + "date": "2025-02-11T00:00:00.000Z", + "group": { + "key": "ben-v2", + "label": "Background Remover (image)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ben/v2/image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ben/v2/image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ben/v2/image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/Ben2/FepDP7G4K2nxlX0sq6AMB_945a221d079e447c9bcfea77f931cdc1.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/ben/v2/image", + "documentationUrl": "https://fal.ai/models/fal-ai/ben/v2/image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BenV2ImageInput": { + "title": "Ben2InputImage", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducible generation." + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/gallery/Ben2/arduino-uno-board-electronics-hand-600nw-1869855883.webp" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to be used for background removal" + } + }, + "x-fal-order-properties": [ + "image_url", + "seed" + ], + "required": [ + "image_url" + ] + }, + "BenV2ImageOutput": { + "title": "Ben2OutputImage", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "height": 512, + "file_size": 423052, + "file_name": "zrZNETpI_ul2jonraqpxN_a57c3f3825d9418f8b3d39cde87c3310.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/gallery/Ben2/zrZNETpI_ul2jonraqpxN_a57c3f3825d9418f8b3d39cde87c3310.png", + "width": 512 + } + ], + "title": "Image", + "description": "The output image after background removal.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "image", + "seed" + ], + "required": [ + "image", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ben/v2/image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ben/v2/image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ben/v2/image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BenV2ImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ben/v2/image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BenV2ImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-control-lora-canny/image-to-image", + "metadata": { + "display_name": "FLUX.1 [dev] Control LoRA Canny", + "category": "image-to-image", + "description": "FLUX Control LoRA Canny is a high-performance endpoint that uses a control image using a Canny edge map to transfer structure to the generated image and another initial image to guide color.", + "status": "active", + "tags": [ + "lora", + "style transfer" + ], + "updated_at": "2026-01-26T21:44:27.995Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/control-lora-canny.jpeg", + "model_url": "https://fal.run/fal-ai/flux-control-lora-canny/image-to-image", + "license_type": "commercial", + "date": "2025-02-11T00:00:00.000Z", + "group": { + "key": "flux-control-lora-canny", + "label": "Image to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-control-lora-canny/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-control-lora-canny/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-control-lora-canny/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/control-lora-canny.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-control-lora-canny/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-control-lora-canny/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxControlLoraCannyImageToImageInput": { + "title": "ImageToImageInput", + "type": "object", + "properties": { + "control_lora_strength": { + "minimum": 0, + "title": "Control Lora Strength", + "type": "number", + "maximum": 2, + "description": "The strength of the control lora.", + "default": 1 + }, + "prompt": { + "examples": [ + "A photo of a lion sitting on a stone bench" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image." + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 35, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "The number of images to generate.", + "default": 1 + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to use for inpainting. or img2img" + }, + "strength": { + "minimum": 0.01, + "title": "Strength", + "type": "number", + "maximum": 1, + "description": "The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original.", + "default": 0.85 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 28 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "control_lora_image_url": { + "title": "Control Lora Image Url", + "type": "string", + "description": "\n The image to use for control lora. This is used to control the style of the generated image.\n " + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "loras", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "control_lora_image_url", + "control_lora_strength", + "image_url", + "strength" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "FluxControlLoraCannyImageToImageOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "maximum": 4, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-control-lora-canny/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-control-lora-canny/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-control-lora-canny/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxControlLoraCannyImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-control-lora-canny/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxControlLoraCannyImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ideogram/upscale", + "metadata": { + "display_name": "Ideogram Upscale", + "category": "image-to-image", + "description": "Ideogram Upscale enhances the resolution of the reference image by up to 2X and might enhance the reference image too. Optionally refine outputs with a prompt for guided improvements.", + "status": "active", + "tags": [ + "upscaling", + "high-res" + ], + "updated_at": "2026-01-26T21:44:28.684Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/ideogram.webp", + "model_url": "https://fal.run/fal-ai/ideogram/upscale", + "license_type": "commercial", + "date": "2025-02-10T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ideogram/upscale", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ideogram/upscale queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ideogram/upscale", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/ideogram.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/ideogram/upscale", + "documentationUrl": "https://fal.ai/models/fal-ai/ideogram/upscale/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "IdeogramUpscaleInput": { + "x-fal-order-properties": [ + "image_url", + "prompt", + "resemblance", + "detail", + "expand_prompt", + "seed", + "sync_mode" + ], + "type": "object", + "properties": { + "prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The prompt to upscale the image with", + "title": "Prompt", + "default": "" + }, + "detail": { + "minimum": 1, + "maximum": 100, + "type": "integer", + "description": "The detail of the upscaled image", + "title": "Detail", + "default": 50 + }, + "resemblance": { + "minimum": 1, + "maximum": 100, + "type": "integer", + "description": "The resemblance of the upscaled image to the original image", + "title": "Resemblance", + "default": 50 + }, + "expand_prompt": { + "description": "Whether to expand the prompt with MagicPrompt functionality.", + "type": "boolean", + "title": "Expand Prompt", + "default": false + }, + "image_url": { + "examples": [ + "https://fal.media/files/monkey/e6RtJf_ue0vyWzeiEmTby.png" + ], + "title": "Image URL", + "type": "string", + "description": "The image URL to upscale" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Seed for the random number generator", + "title": "Seed" + } + }, + "title": "UpscaleImageInput", + "required": [ + "image_url" + ] + }, + "IdeogramUpscaleOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_size": 6548418, + "file_name": "image.png", + "content_type": "image/png", + "url": "https://fal.media/files/lion/DxTSV6683MLl4VPAVUHR3_image.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "seed": { + "examples": [ + 123456 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for the random number generator" + } + }, + "title": "UpscaleOutput", + "required": [ + "images", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ideogram/upscale/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/upscale/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ideogram/upscale": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramUpscaleInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/upscale/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramUpscaleOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/codeformer", + "metadata": { + "display_name": "CodeFormer", + "category": "image-to-image", + "description": "Fix distorted or blurred photos of people with CodeFormer.", + "status": "active", + "tags": [ + "image-restoration", + "faces", + "utility" + ], + "updated_at": "2026-01-26T21:44:29.069Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/code_thumbnail.webp", + "model_url": "https://fal.run/fal-ai/codeformer", + "date": "2025-01-31T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/codeformer", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/codeformer queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/codeformer", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/code_thumbnail.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/codeformer", + "documentationUrl": "https://fal.ai/models/fal-ai/codeformer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "CodeformerInput": { + "title": "CodeformerInput", + "type": "object", + "properties": { + "aligned": { + "description": "Should faces etc should be aligned.", + "type": "boolean", + "title": "Aligned", + "default": false + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/codeformer/codeformer_poor_1.jpeg" + ], + "description": "URL of image to be used for relighting", + "type": "string", + "title": "Image Url" + }, + "upscale_factor": { + "description": "Upscaling factor", + "type": "number", + "title": "Upscale Factor", + "default": 2 + }, + "fidelity": { + "description": "Weight of the fidelity factor.", + "type": "number", + "title": "Fidelity", + "default": 0.5 + }, + "face_upscale": { + "description": "Should faces be upscaled", + "type": "boolean", + "title": "Face Upscale", + "default": true + }, + "only_center_face": { + "description": "Should only center face be restored", + "type": "boolean", + "title": "Only Center Face", + "default": false + }, + "seed": { + "description": "Random seed for reproducible generation.", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "image_url", + "fidelity", + "upscale_factor", + "aligned", + "only_center_face", + "face_upscale", + "seed" + ], + "required": [ + "image_url" + ] + }, + "CodeformerOutput": { + "title": "ConformerOutput", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "file_size": 423052, + "height": 512, + "file_name": "36d3ca4791a647678b2ff01a35c87f5a.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/model_tests/codeformer/codeformer_restored_1.jpeg", + "width": 512 + } + ], + "title": "Image", + "description": "The generated image file info.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "image", + "seed" + ], + "required": [ + "image", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/codeformer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/codeformer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/codeformer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CodeformerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/codeformer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CodeformerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling/v1-5/kolors-virtual-try-on", + "metadata": { + "display_name": "Kling Kolors Virtual TryOn v1.5", + "category": "image-to-image", + "description": "Kling Kolors Virtual TryOn v1.5 is a high quality image based Try-On endpoint which can be used for commercial try on.", + "status": "active", + "tags": [ + "try-on", + "fashion", + "clothing" + ], + "updated_at": "2026-01-26T21:44:29.773Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/kling-tryon.webp", + "model_url": "https://fal.run/fal-ai/kling/v1-5/kolors-virtual-try-on", + "license_type": "commercial", + "date": "2025-01-23T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling/v1-5/kolors-virtual-try-on", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling/v1-5/kolors-virtual-try-on queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling/v1-5/kolors-virtual-try-on", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/kling-tryon.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling/v1-5/kolors-virtual-try-on", + "documentationUrl": "https://fal.ai/models/fal-ai/kling/v1-5/kolors-virtual-try-on/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingV15KolorsVirtualTryOnInput": { + "x-fal-order-properties": [ + "human_image_url", + "garment_image_url", + "sync_mode" + ], + "type": "object", + "properties": { + "garment_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/leffa/tshirt_image.jpg" + ], + "description": "Url to the garment image.", + "type": "string", + "title": "Garment Image Url" + }, + "sync_mode": { + "description": "If true, the function will return the image in the response.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "human_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/leffa/person_image.jpg" + ], + "description": "Url for the human image.", + "type": "string", + "title": "Human Image Url" + } + }, + "title": "TryOnRequest", + "required": [ + "human_image_url", + "garment_image_url" + ] + }, + "KlingV15KolorsVirtualTryOnOutput": { + "x-fal-order-properties": [ + "image" + ], + "type": "object", + "properties": { + "image": { + "examples": [ + { + "file_size": 595094, + "height": 1024, + "file_name": "result.png", + "content_type": "image/png", + "url": "https://v3.fal.media/files/panda/Hoy3zhimzVKi3F2uoGBnh_result.png", + "width": 768 + } + ], + "description": "The output image.", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "title": "TryOnOutput", + "required": [ + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling/v1-5/kolors-virtual-try-on/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling/v1-5/kolors-virtual-try-on/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling/v1-5/kolors-virtual-try-on": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingV15KolorsVirtualTryOnInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling/v1-5/kolors-virtual-try-on/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingV15KolorsVirtualTryOnOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-lora-canny", + "metadata": { + "display_name": "FLUX.1 [dev] Canny with LoRAs", + "category": "image-to-image", + "description": "Utilize Flux.1 [dev] Controlnet to generate high-quality images with precise control over composition, style, and structure through advanced edge detection and guidance mechanisms.", + "status": "active", + "tags": [ + "controlnet", + "detection", + "lora", + "editing", + "composition" + ], + "updated_at": "2026-01-26T21:44:30.717Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/flux_lora.jpg", + "model_url": "https://fal.run/fal-ai/flux-lora-canny", + "license_type": "commercial", + "date": "2025-01-16T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-lora-canny", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-lora-canny queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-lora-canny", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/flux_lora.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-lora-canny", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-lora-canny/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxLoraCannyInput": { + "title": "CannyInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A blue owl." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://fal.media/files/kangaroo/eNSkRdVFzNvDkrrMjxFA3.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to use for canny input" + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "guidance_scale": { + "minimum": 20, + "maximum": 40, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 30 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "loras", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "FluxLoraCannyOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-lora-canny/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-lora-canny/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-lora-canny": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxLoraCannyInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-lora-canny/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxLoraCannyOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-pro/v1/fill-finetuned", + "metadata": { + "display_name": "FLUX.1 [pro] Fill Fine-tuned", + "category": "image-to-image", + "description": "FLUX.1 [pro] Fill Fine-tuned is a high-performance endpoint for the FLUX.1 [pro] model with a fine-tuned LoRA that enables rapid transformation of existing images, delivering high-quality style transfers and image modifications with the core FLUX capabilities.", + "status": "active", + "tags": [ + "editing" + ], + "updated_at": "2026-01-26T21:44:30.846Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/fluxpro.jpg", + "model_url": "https://fal.run/fal-ai/flux-pro/v1/fill-finetuned", + "license_type": "commercial", + "date": "2025-01-16T00:00:00.000Z", + "group": { + "key": "flux-pro", + "label": "FLUX.1 [pro] Fill Fine-tuned" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-pro/v1/fill-finetuned", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-pro/v1/fill-finetuned queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-pro/v1/fill-finetuned", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/fluxpro.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-pro/v1/fill-finetuned", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-pro/v1/fill-finetuned/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxProV1FillFinetunedInput": { + "title": "FluxProFillFinetunedInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A knight in shining armour holding a greatshield with \"FAL\" on it" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to fill the masked part of the image." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "description": "The number of images to generate.", + "maximum": 4, + "default": 1 + }, + "finetune_strength": { + "minimum": 0, + "title": "Fine-tune Strength", + "type": "number", + "description": "\n Controls finetune influence.\n Increase this value if your target concept isn't showing up strongly enough.\n The optimal setting depends on your finetune and prompt\n ", + "maximum": 2 + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "finetune_id": { + "title": "Fine-tune ID", + "type": "string", + "description": "References your specific model" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/flux-lora/example-images/knight.jpeg" + ], + "title": "Image URL", + "type": "string", + "description": "The image URL to generate an image from. Needs to match the dimensions of the mask." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.", + "default": "2" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "mask_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/flux-lora/example-images/mask_knight.jpeg" + ], + "title": "Mask URL", + "type": "string", + "description": "The mask URL to inpaint the image. Needs to match the dimensions of the input image." + }, + "enhance_prompt": { + "title": "Enhance Prompt", + "type": "boolean", + "description": "Whether to enhance the prompt for better results.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "sync_mode", + "num_images", + "output_format", + "safety_tolerance", + "enhance_prompt", + "image_url", + "mask_url", + "finetune_id", + "finetune_strength" + ], + "required": [ + "prompt", + "image_url", + "mask_url", + "finetune_id", + "finetune_strength" + ] + }, + "FluxProV1FillFinetunedOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/registry__image__fast_sdxl__models__Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "registry__image__fast_sdxl__models__Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-pro/v1/fill-finetuned/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/v1/fill-finetuned/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/v1/fill-finetuned": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProV1FillFinetunedInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/v1/fill-finetuned/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProV1FillFinetunedOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/moondream-next/detection", + "metadata": { + "display_name": "MoonDreamNext Detection", + "category": "image-to-image", + "description": "MoonDreamNext Detection is a multimodal vision-language model for gaze detection, bbox detection, point detection, and more.", + "status": "active", + "tags": [ + "multimodal" + ], + "updated_at": "2026-01-26T21:44:32.267Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/moondreamnext/moondream-next.webp", + "model_url": "https://fal.run/fal-ai/moondream-next/detection", + "license_type": "commercial", + "date": "2025-01-09T00:00:00.000Z", + "group": { + "key": "moondreamnext", + "label": "Detection" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/moondream-next/detection", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/moondream-next/detection queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/moondream-next/detection", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/moondreamnext/moondream-next.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/moondream-next/detection", + "documentationUrl": "https://fal.ai/models/fal-ai/moondream-next/detection/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MoondreamNextDetectionInput": { + "title": "DetectionInput", + "type": "object", + "properties": { + "detection_prompt": { + "examples": [ + "Person" + ], + "title": "Detection Prompt", + "type": "string", + "description": "Text description of what to detect" + }, + "use_ensemble": { + "title": "Use Ensemble", + "type": "boolean", + "description": "Whether to use ensemble for gaze detection", + "default": false + }, + "task_type": { + "enum": [ + "bbox_detection", + "point_detection", + "gaze_detection" + ], + "title": "Task Type", + "type": "string", + "description": "Type of detection to perform" + }, + "show_visualization": { + "title": "Show Visualization", + "type": "boolean", + "description": "Whether to show visualization for detection", + "default": true + }, + "combine_points": { + "title": "Combine Points", + "type": "boolean", + "description": "Whether to combine points into a single point for point detection. This has no effect for bbox detection or gaze detection.", + "default": false + }, + "image_url": { + "examples": [ + "https://llava-vl.github.io/static/images/monalisa.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "Image URL to be processed" + } + }, + "x-fal-order-properties": [ + "image_url", + "task_type", + "detection_prompt", + "use_ensemble", + "combine_points", + "show_visualization" + ], + "required": [ + "image_url", + "task_type", + "detection_prompt" + ] + }, + "MoondreamNextDetectionOutput": { + "title": "DetectionOutput", + "type": "object", + "properties": { + "image": { + "title": "Output Image", + "description": "Output image with detection visualization", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "text_output": { + "title": "Text Output", + "type": "string", + "description": "Detection results as text" + } + }, + "x-fal-order-properties": [ + "image", + "text_output" + ], + "required": [ + "text_output" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/moondream-next/detection/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream-next/detection/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/moondream-next/detection": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MoondreamNextDetectionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream-next/detection/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MoondreamNextDetectionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bria/expand", + "metadata": { + "display_name": "Bria Expand Image", + "category": "image-to-image", + "description": "Bria Expand expands images beyond their borders in high quality. Trained exclusively on licensed data for safe and risk-free commercial use. Access the model's source code and weights: https://bria.ai/contact-us", + "status": "active", + "tags": [ + "outpainting" + ], + "updated_at": "2026-01-26T21:44:33.468Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/bria.webp", + "model_url": "https://fal.run/fal-ai/bria/expand", + "license_type": "commercial", + "date": "2024-12-19T00:00:00.000Z", + "group": { + "key": "bria", + "label": "Expand Image" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bria/expand", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bria/expand queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bria/expand", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/bria.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/bria/expand", + "documentationUrl": "https://fal.ai/models/fal-ai/bria/expand/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BriaExpandInput": { + "title": "ImageExpansionInput", + "type": "object", + "properties": { + "prompt": { + "description": "Text on which you wish to base the image expansion. This parameter is optional. Bria currently supports prompts in English only, excluding special characters.", + "type": "string", + "title": "Prompt", + "default": "" + }, + "aspect_ratio": { + "enum": [ + "1:1", + "2:3", + "3:2", + "3:4", + "4:3", + "4:5", + "5:4", + "9:16", + "16:9" + ], + "description": "The desired aspect ratio of the final image. Will be used over original_image_size and original_image_location if provided.", + "type": "string", + "title": "Aspect Ratio" + }, + "original_image_location": { + "examples": [ + [ + 301, + -66 + ] + ], + "description": "The desired location of the original image, inside the full canvas. Provide the location of the upper left corner of the original image. The location can also be outside the canvas (the original image will be cropped). Will be ignored if aspect_ratio is provided.", + "type": "array", + "title": "Original Image Location", + "items": { + "type": "integer" + } + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/orange.png" + ], + "description": "The URL of the input image.", + "type": "string", + "title": "Image Url" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "original_image_size": { + "examples": [ + [ + 610, + 855 + ] + ], + "description": "The desired size of the original image, inside the full canvas. Ensure that the ratio of input image foreground or main subject to the canvas area is greater than 15% to achieve optimal results. Will be ignored if aspect_ratio is provided.", + "type": "array", + "title": "Original Image Size", + "items": { + "type": "integer" + } + }, + "canvas_size": { + "examples": [ + [ + 1200, + 674 + ] + ], + "description": "The desired size of the final image, after the expansion. should have an area of less than 5000x5000 pixels.", + "type": "array", + "title": "Canvas Size", + "items": { + "type": "integer" + } + }, + "seed": { + "minimum": 0, + "description": "You can choose whether you want your generated expension to be random or predictable. You can recreate the same result in the future by using the seed value of a result from the response. You can exclude this parameter if you are not interested in recreating your results. This parameter is optional.", + "type": "integer", + "title": "Seed", + "maximum": 2147483647 + }, + "negative_prompt": { + "description": "The negative prompt you would like to use to generate images.", + "type": "string", + "title": "Negative Prompt", + "default": "" + } + }, + "x-fal-order-properties": [ + "image_url", + "canvas_size", + "aspect_ratio", + "original_image_size", + "original_image_location", + "prompt", + "seed", + "negative_prompt", + "sync_mode" + ], + "required": [ + "image_url", + "canvas_size" + ] + }, + "BriaExpandOutput": { + "title": "ImageExpansionOutput", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "file_size": 1471342, + "height": 674, + "file_name": "afa402a35ea742cdb5c3e219b2b19bfb.png", + "content_type": "image/png", + "url": "https://v3.fal.media/files/koala/8np-spgxxG-I1r3cjthRV_afa402a35ea742cdb5c3e219b2b19bfb.png", + "width": 1200 + } + ], + "description": "The generated image", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "seed": { + "description": "Seed value used for generation.", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "image", + "seed" + ], + "required": [ + "image", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bria/expand/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bria/expand/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bria/expand": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BriaExpandInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bria/expand/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BriaExpandOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bria/genfill", + "metadata": { + "display_name": "Bria GenFill", + "category": "image-to-image", + "description": "Bria GenFill enables high-quality object addition or visual transformation. Trained exclusively on licensed data for safe and risk-free commercial use. Access the model's source code and weights: https://bria.ai/contact-us", + "status": "active", + "tags": [ + "image editing" + ], + "updated_at": "2026-01-26T21:44:09.715Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/bria.webp", + "model_url": "https://fal.run/fal-ai/bria/genfill", + "license_type": "commercial", + "date": "2024-12-19T00:00:00.000Z", + "group": { + "key": "bria", + "label": "GenFill" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bria/genfill", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bria/genfill queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bria/genfill", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/bria.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/bria/genfill", + "documentationUrl": "https://fal.ai/models/fal-ai/bria/genfill/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BriaGenfillInput": { + "title": "GenFillInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A red delicious cherry" + ], + "description": "The prompt you would like to use to generate images.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "Number of Images to generate.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 1 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/bria/bria_genfill_img.png" + ], + "description": "Input Image to erase from", + "type": "string", + "title": "Image Url" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "seed": { + "minimum": 0, + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed", + "maximum": 2147483647 + }, + "mask_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/bria/bria_genfill_mask.png" + ], + "description": "The URL of the binary mask image that represents the area that will be cleaned.", + "type": "string", + "title": "Mask Url" + }, + "negative_prompt": { + "description": "The negative prompt you would like to use to generate images.", + "type": "string", + "title": "Negative Prompt", + "default": "" + } + }, + "x-fal-order-properties": [ + "image_url", + "mask_url", + "prompt", + "negative_prompt", + "seed", + "num_images", + "sync_mode" + ], + "required": [ + "image_url", + "mask_url", + "prompt" + ] + }, + "BriaGenfillOutput": { + "title": "GenFillOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_size": 1064550, + "height": 768, + "file_name": "a0d138e6820c4ad58f1fd3c758f16047.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/bria/bria_genfill_res.png", + "width": 1024 + } + ] + ], + "description": "Generated Images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bria/genfill/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bria/genfill/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bria/genfill": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BriaGenfillInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bria/genfill/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BriaGenfillOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-lora-fill", + "metadata": { + "display_name": "FLUX.1 [dev] Fill with LoRAs", + "category": "image-to-image", + "description": "FLUX.1 [dev] Fill is a high-performance endpoint for the FLUX.1 [pro] model that enables rapid transformation of existing images, delivering high-quality style transfers and image modifications with the core FLUX capabilities.", + "status": "active", + "tags": [ + "editing", + "lora" + ], + "updated_at": "2026-01-26T21:44:33.598Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/flux_lora.jpg", + "model_url": "https://fal.run/fal-ai/flux-lora-fill", + "license_type": "commercial", + "date": "2024-12-19T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-lora-fill", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-lora-fill queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-lora-fill", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/flux_lora.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-lora-fill", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-lora-fill/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxLoraFillInput": { + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "loras", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "image_url", + "mask_url", + "paste_back", + "fill_image", + "resize_to_original" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A knight in shining armour holding a greatshield with 'FAL' on it" + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt", + "default": "" + }, + "resize_to_original": { + "description": "Resizes the image back to the original size. Use when you wish to preserve the exact image size as the originally provided image.", + "type": "boolean", + "title": "Resize To Original", + "default": false + }, + "paste_back": { + "description": "Specifies whether to paste-back the original image onto to the non-inpainted areas of the output", + "type": "boolean", + "title": "Paste Back", + "default": true + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size" + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "guidance_scale": { + "minimum": 28, + "maximum": 35, + "type": "number", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "title": "Guidance scale (CFG)", + "default": 30 + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "description": "The number of images to generate. This is always set to 1 for streaming output.", + "title": "Num Images", + "default": 1 + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/flux-lora/example-images/knight.jpeg" + ], + "description": "URL of image to use for fill operation", + "type": "string", + "title": "Image Url" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "fill_image": { + "description": "Use an image fill input to fill in particular images into the masked area.", + "title": "Fill Image", + "allOf": [ + { + "$ref": "#/components/schemas/ImageFillInput" + } + ] + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "description": "The number of inference steps to perform.", + "title": "Num Inference Steps", + "default": 28 + }, + "mask_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/flux-lora/example-images/mask_knight.jpeg" + ], + "description": "\n The mask to area to Inpaint in.\n ", + "type": "string", + "title": "Mask Url" + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + } + }, + "title": "FillInput", + "required": [ + "image_url", + "mask_url" + ] + }, + "FluxLoraFillOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + } + }, + "title": "Output", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "description": "The height of the generated image.", + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "description": "The width of the generated image.", + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "LoraWeight": { + "x-fal-order-properties": [ + "path", + "scale" + ], + "type": "object", + "properties": { + "path": { + "description": "URL or the path to the LoRA weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "title": "Scale", + "default": 1 + } + }, + "title": "LoraWeight", + "required": [ + "path" + ] + }, + "ImageFillInput": { + "x-fal-order-properties": [ + "fill_image_url", + "in_context_fill", + "use_prompt" + ], + "type": "object", + "properties": { + "in_context_fill": { + "description": "Uses the provided fill image in context with the base image to fill in more faithfully. Will increase price.", + "type": "boolean", + "title": "In Context Fill", + "default": false + }, + "use_prompt": { + "description": "Whether to use the prompt as well in the generation, along with the redux image.", + "type": "boolean", + "title": "Use Prompt", + "default": false + }, + "fill_image_url": { + "anyOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "string" + } + ], + "description": "URLs of images to be filled into the masked area.", + "title": "Fill Image Url" + } + }, + "title": "ImageFillInput" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "title": "Image", + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-lora-fill/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-lora-fill/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-lora-fill": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxLoraFillInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-lora-fill/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxLoraFillOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bria/background/replace", + "metadata": { + "display_name": "Bria Background Replace", + "category": "image-to-image", + "description": "Bria Background Replace allows for efficient swapping of backgrounds in images via text prompts or reference image, delivering realistic and polished results. Trained exclusively on licensed data for safe and risk-free commercial use ", + "status": "active", + "tags": [ + "image editing" + ], + "updated_at": "2026-01-26T21:44:34.274Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/bria.webp", + "model_url": "https://fal.run/fal-ai/bria/background/replace", + "license_type": "commercial", + "date": "2024-12-19T00:00:00.000Z", + "group": { + "key": "bria", + "label": "Background Replace" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bria/background/replace", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bria/background/replace queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bria/background/replace", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/bria.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/bria/background/replace", + "documentationUrl": "https://fal.ai/models/fal-ai/bria/background/replace/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BriaBackgroundReplaceInput": { + "title": "BGReplaceInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Man leaning against a wall" + ], + "description": "The prompt you would like to use to generate images.", + "type": "string", + "title": "Prompt", + "minLength": 1 + }, + "num_images": { + "minimum": 1, + "description": "Number of Images to generate.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 1 + }, + "ref_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/bria/bria_bg_replace_bg.jpg" + ], + "description": "The URL of the reference image to be used for generating the new background. Use \"\" to leave empty. Either ref_image_url or bg_prompt has to be provided but not both. If both ref_image_url and ref_image_file are provided, ref_image_url will be used. Accepted formats are jpeg, jpg, png, webp.", + "type": "string", + "title": "Ref Image Url", + "default": "" + }, + "refine_prompt": { + "description": "Whether to refine prompt", + "type": "boolean", + "title": "Refine Prompt", + "default": true + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/bria/bria_bg_replace_fg.jpg" + ], + "description": "Input Image to erase from", + "type": "string", + "title": "Image Url" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "fast": { + "description": "Whether to use the fast model", + "type": "boolean", + "title": "Fast", + "default": true + }, + "seed": { + "minimum": 0, + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed", + "maximum": 2147483647 + }, + "negative_prompt": { + "description": "The negative prompt you would like to use to generate images.", + "type": "string", + "title": "Negative Prompt", + "default": "" + } + }, + "x-fal-order-properties": [ + "image_url", + "ref_image_url", + "prompt", + "negative_prompt", + "refine_prompt", + "seed", + "fast", + "num_images", + "sync_mode" + ], + "required": [ + "image_url" + ] + }, + "BriaBackgroundReplaceOutput": { + "title": "BGReplaceOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/bria/bria_bg_replace_res.jpg" + } + ] + ], + "description": "The generated images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "description": "Seed value used for generation.", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bria/background/replace/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bria/background/replace/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bria/background/replace": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BriaBackgroundReplaceInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bria/background/replace/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BriaBackgroundReplaceOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bria/eraser", + "metadata": { + "display_name": "Bria Eraser", + "category": "image-to-image", + "description": "Bria Eraser enables precise removal of unwanted objects from images while maintaining high-quality outputs. Trained exclusively on licensed data for safe and risk-free commercial use. Access the model's source code and weights: https://bria.ai/contact-us", + "status": "active", + "tags": [ + "image editing", + "object removal" + ], + "updated_at": "2026-01-26T21:44:33.854Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/bria.webp", + "model_url": "https://fal.run/fal-ai/bria/eraser", + "license_type": "commercial", + "date": "2024-12-19T00:00:00.000Z", + "group": { + "key": "bria", + "label": "Eraser" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bria/eraser", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bria/eraser queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bria/eraser", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/bria.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/bria/eraser", + "documentationUrl": "https://fal.ai/models/fal-ai/bria/eraser/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BriaEraserInput": { + "title": "EraserInput", + "type": "object", + "properties": { + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "preserve_alpha": { + "description": "\n If set to true, attempts to preserve the alpha channel of the input image.\n ", + "type": "boolean", + "title": "Preserve Alpha", + "default": false + }, + "mask_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/bria/bria_eraser_mask.png" + ], + "description": "The URL of the binary mask image that represents the area that will be cleaned.", + "type": "string", + "title": "Mask Url" + }, + "mask_type": { + "enum": [ + "manual", + "automatic" + ], + "description": "You can use this parameter to specify the type of the input mask from the list. 'manual' opttion should be used in cases in which the mask had been generated by a user (e.g. with a brush tool), and 'automatic' mask type should be used when mask had been generated by an algorithm like 'SAM'.", + "type": "string", + "title": "Mask Type", + "default": "manual" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/bria/bria_eraser_img.png" + ], + "description": "Input Image to erase from", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url", + "mask_url", + "mask_type", + "sync_mode", + "preserve_alpha" + ], + "required": [ + "image_url", + "mask_url" + ] + }, + "BriaEraserOutput": { + "title": "EraserOutput", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/bria/bria_eraser_res.png" + } + ], + "description": "The generated image", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bria/eraser/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bria/eraser/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bria/eraser": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BriaEraserInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bria/eraser/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BriaEraserOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bria/product-shot", + "metadata": { + "display_name": "Bria Product Shot", + "category": "image-to-image", + "description": "Place any product in any scenery with just a prompt or reference image while maintaining high integrity of the product. Trained exclusively on licensed data for safe and risk-free commercial use and optimized for eCommerce.", + "status": "active", + "tags": [ + "product photography" + ], + "updated_at": "2026-01-26T21:44:33.726Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/bria.webp", + "model_url": "https://fal.run/fal-ai/bria/product-shot", + "license_type": "commercial", + "date": "2024-12-19T00:00:00.000Z", + "group": { + "key": "bria", + "label": "Product Shot" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bria/product-shot", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bria/product-shot queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bria/product-shot", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/bria.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/bria/product-shot", + "documentationUrl": "https://fal.ai/models/fal-ai/bria/product-shot/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BriaProductShotInput": { + "title": "ProductShotInput", + "type": "object", + "properties": { + "ref_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/bria/bria_product_bg.jpg" + ], + "description": "The URL of the reference image to be used for generating the new scene or background for the product shot. Use \"\" to leave empty.Either ref_image_url or scene_description has to be provided but not both. If both ref_image_url and ref_image_file are provided, ref_image_url will be used. Accepted formats are jpeg, jpg, png, webp.", + "type": "string", + "title": "Ref Image Url", + "default": "" + }, + "manual_placement_selection": { + "enum": [ + "upper_left", + "upper_right", + "bottom_left", + "bottom_right", + "right_center", + "left_center", + "upper_center", + "bottom_center", + "center_vertical", + "center_horizontal" + ], + "description": "If you've selected placement_type=manual_placement, you should use this parameter to specify which placements/positions you would like to use from the list. You can select more than one placement in one request.", + "type": "string", + "title": "Manual Placement Selection", + "default": "bottom_center" + }, + "num_results": { + "minimum": 1, + "description": "The number of lifestyle product shots you would like to generate. You will get num_results x 10 results when placement_type=automatic and according to the number of required placements x num_results if placement_type=manual_placement.", + "type": "integer", + "title": "Num Results", + "maximum": 4, + "default": 1 + }, + "padding_values": { + "description": "The desired padding in pixels around the product, when using placement_type=manual_padding. The order of the values is [left, right, top, bottom]. For optimal results, the total number of pixels, including padding, should be around 1,000,000. It is recommended to first use the product cutout API, get the cutout and understand the size of the result, and then define the required padding and use the cutout as an input for this API.", + "type": "array", + "title": "Padding Values", + "items": { + "type": "integer" + } + }, + "shot_size": { + "description": "The desired size of the final product shot. For optimal results, the total number of pixels should be around 1,000,000. This parameter is only relevant when placement_type=automatic or placement_type=manual_placement.", + "type": "array", + "title": "Shot Size", + "items": { + "type": "integer" + }, + "default": [ + 1000, + 1000 + ] + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "placement_type": { + "enum": [ + "original", + "automatic", + "manual_placement", + "manual_padding" + ], + "description": "This parameter allows you to control the positioning of the product in the image. Choosing 'original' will preserve the original position of the product in the image. Choosing 'automatic' will generate results with the 10 recommended positions for the product. Choosing 'manual_placement' will allow you to select predefined positions (using the parameter 'manual_placement_selection'). Selecting 'manual_padding' will allow you to control the position and size of the image by defining the desired padding in pixels around the product.", + "type": "string", + "title": "Placement Type", + "default": "manual_placement" + }, + "original_quality": { + "description": "This flag is only relevant when placement_type=original. If true, the output image retains the original input image's size; otherwise, the image is scaled to 1 megapixel (1MP) while preserving its aspect ratio.", + "type": "boolean", + "title": "Original Quality", + "default": false + }, + "fast": { + "description": "Whether to use the fast model", + "type": "boolean", + "title": "Fast", + "default": true + }, + "optimize_description": { + "description": "Whether to optimize the scene description", + "type": "boolean", + "title": "Optimize Description", + "default": true + }, + "scene_description": { + "examples": [ + "on a rock, next to the ocean, dark theme" + ], + "description": "Text description of the new scene or background for the provided product shot. Bria currently supports prompts in English only, excluding special characters.", + "type": "string", + "title": "Scene Description" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/bria/bria_product_fg.jpg" + ], + "description": "The URL of the product shot to be placed in a lifestyle shot. If both image_url and image_file are provided, image_url will be used. Accepted formats are jpeg, jpg, png, webp. Maximum file size 12MB.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url", + "scene_description", + "ref_image_url", + "optimize_description", + "num_results", + "fast", + "placement_type", + "original_quality", + "shot_size", + "manual_placement_selection", + "padding_values", + "sync_mode" + ], + "required": [ + "image_url" + ] + }, + "BriaProductShotOutput": { + "title": "ProductShotOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/bria/bria_product_res.png" + } + ] + ], + "description": "The generated images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bria/product-shot/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bria/product-shot/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bria/product-shot": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BriaProductShotInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bria/product-shot/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BriaProductShotOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bria/background/remove", + "metadata": { + "display_name": "Bria RMBG 2.0", + "category": "image-to-image", + "description": "Bria RMBG 2.0 enables seamless removal of backgrounds from images, ideal for professional editing tasks. Trained exclusively on licensed data for safe and risk-free commercial use. Model weights for commercial use are available here: https://share-eu1.hsforms.com/2GLpEVQqJTI2Lj7AMYwgfIwf4e04", + "status": "active", + "tags": [ + "background removal", + "image segmentation", + "high resolution", + "utility", + "rembg" + ], + "updated_at": "2026-01-26T21:44:09.585Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/panda/dfGL-2avwhhBgJB2FbHKB_fb529162dbcf4556a66e86079e22f856.jpg", + "model_url": "https://fal.run/fal-ai/bria/background/remove", + "license_type": "commercial", + "date": "2024-12-19T00:00:00.000Z", + "group": { + "key": "bria", + "label": "Remove Background" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bria/background/remove", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bria/background/remove queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bria/background/remove", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/panda/dfGL-2avwhhBgJB2FbHKB_fb529162dbcf4556a66e86079e22f856.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/bria/background/remove", + "documentationUrl": "https://fal.ai/models/fal-ai/bria/background/remove/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BriaBackgroundRemoveInput": { + "title": "BGRemoveInput", + "type": "object", + "properties": { + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "image_url": { + "examples": [ + "https://fal.media/files/panda/K5Rndvzmn1j-OI1VZXDVd.jpeg" + ], + "description": "Input Image to erase from", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url", + "sync_mode" + ], + "required": [ + "image_url" + ] + }, + "BriaBackgroundRemoveOutput": { + "title": "BGRemoveOutput", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "file_size": 1076276, + "height": 1024, + "file_name": "070c731993e949d993c10ef6283d335d.png", + "content_type": "image/png", + "url": "https://v3.fal.media/files/tiger/GQEMNjRyxSoza7N8LPPqb_070c731993e949d993c10ef6283d335d.png", + "width": 1024 + } + ], + "description": "The generated image", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bria/background/remove/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bria/background/remove/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bria/background/remove": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BriaBackgroundRemoveInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bria/background/remove/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BriaBackgroundRemoveOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/cat-vton", + "metadata": { + "display_name": "try-on", + "category": "image-to-image", + "description": "Image based high quality Virtual Try-On", + "status": "active", + "tags": [ + "try-on", + "fashion", + "clothing" + ], + "updated_at": "2026-01-26T21:44:10.027Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/idm-vton.jpeg", + "model_url": "https://fal.run/fal-ai/cat-vton", + "license_type": "research", + "date": "2024-12-17T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/cat-vton", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/cat-vton queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/cat-vton", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/idm-vton.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/cat-vton", + "documentationUrl": "https://fal.ai/models/fal-ai/cat-vton/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "CatVtonInput": { + "title": "CATVTONInput", + "type": "object", + "properties": { + "garment_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/catvton/tshirt.jpg" + ], + "title": "Garment Image Url", + "type": "string", + "description": "Url to the garment image." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "portrait_4_3" + }, + "human_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/catvton/man5.jpg" + ], + "title": "Human Image Url", + "type": "string", + "description": "Url for the human image." + }, + "cloth_type": { + "enum": [ + "upper", + "lower", + "overall", + "inner", + "outer" + ], + "title": "Cloth Type", + "type": "string", + "description": "\n Type of the Cloth to be tried on.\n\n Options:\n upper: Upper body cloth\n lower: Lower body cloth\n overall: Full body cloth\n inner: Inner cloth, like T-shirt inside a jacket\n outer: Outer cloth, like a jacket over a T-shirt\n ", + "examples": [ + "upper", + "lower", + "overall", + "inner", + "outer" + ] + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 2.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same input given to the same version of the model\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "human_image_url", + "garment_image_url", + "cloth_type", + "image_size", + "num_inference_steps", + "guidance_scale", + "seed" + ], + "required": [ + "human_image_url", + "garment_image_url", + "cloth_type" + ] + }, + "CatVtonOutput": { + "title": "CATVTONOutput", + "type": "object", + "properties": { + "image": { + "title": "Image", + "description": "The output image.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/cat-vton/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/cat-vton/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/cat-vton": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CatVtonInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/cat-vton/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CatVtonOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/leffa/pose-transfer", + "metadata": { + "display_name": "Leffa Pose Transfer", + "category": "image-to-image", + "description": "Leffa Pose Transfer is an endpoint for changing pose of an image with a reference image.", + "status": "active", + "tags": [ + "pose", + "utility" + ], + "updated_at": "2026-01-26T21:44:34.649Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/leffa-pose-transfer.webp", + "model_url": "https://fal.run/fal-ai/leffa/pose-transfer", + "license_type": "commercial", + "date": "2024-12-17T00:00:00.000Z", + "group": { + "key": "leffa", + "label": "Pose Transfer" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/leffa/pose-transfer", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/leffa/pose-transfer queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/leffa/pose-transfer", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/leffa-pose-transfer.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/leffa/pose-transfer", + "documentationUrl": "https://fal.ai/models/fal-ai/leffa/pose-transfer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LeffaPoseTransferInput": { + "title": "PoseTransferInput", + "type": "object", + "properties": { + "pose_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/leffa/person_image.jpg" + ], + "title": "Pose Image Url", + "type": "string", + "description": "Url for the human image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your input when generating the image.\n ", + "default": 2.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 50 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same input given to the same version of the model\n will output the same image every time.\n " + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "person_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/leffa/pose_image.jpg" + ], + "title": "Person Image Url", + "type": "string", + "description": "Url to the garment image." + } + }, + "x-fal-order-properties": [ + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "enable_safety_checker", + "output_format", + "pose_image_url", + "person_image_url" + ], + "required": [ + "pose_image_url", + "person_image_url" + ] + }, + "LeffaPoseTransferOutput": { + "title": "PoseTransferOutput", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "height": 1024, + "content_type": "image/jpeg", + "url": "https://fal.media/files/tiger/y6ZwaYdP9Q92FnsJcSbYz.png", + "width": 768 + } + ], + "title": "Image", + "description": "The output image.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the inference." + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "boolean", + "description": "Whether the image contains NSFW concepts." + } + }, + "x-fal-order-properties": [ + "image", + "seed", + "has_nsfw_concepts" + ], + "required": [ + "image", + "seed", + "has_nsfw_concepts" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/leffa/pose-transfer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/leffa/pose-transfer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/leffa/pose-transfer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LeffaPoseTransferInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/leffa/pose-transfer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LeffaPoseTransferOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/leffa/virtual-tryon", + "metadata": { + "display_name": "Leffa Virtual TryOn", + "category": "image-to-image", + "description": "Leffa Virtual TryOn is a high quality image based Try-On endpoint which can be used for commercial try on.", + "status": "active", + "tags": [ + "try-on", + "fashion", + "clothing" + ], + "updated_at": "2026-01-26T21:44:34.526Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/leffa-vitrual-tryon.webp", + "model_url": "https://fal.run/fal-ai/leffa/virtual-tryon", + "license_type": "commercial", + "date": "2024-12-17T00:00:00.000Z", + "group": { + "key": "leffa", + "label": "Virtual TryOn" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/leffa/virtual-tryon", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/leffa/virtual-tryon queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/leffa/virtual-tryon", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/leffa-vitrual-tryon.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/leffa/virtual-tryon", + "documentationUrl": "https://fal.ai/models/fal-ai/leffa/virtual-tryon/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LeffaVirtualTryonInput": { + "title": "VTONInput", + "type": "object", + "properties": { + "garment_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/leffa/tshirt_image.jpg" + ], + "title": "Garment Image Url", + "type": "string", + "description": "Url to the garment image." + }, + "human_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/leffa/person_image.jpg" + ], + "title": "Human Image Url", + "type": "string", + "description": "Url for the human image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "garment_type": { + "enum": [ + "upper_body", + "lower_body", + "dresses" + ], + "title": "Garment Type", + "type": "string", + "description": "The type of the garment used for virtual try-on.", + "examples": [ + "upper_body", + "lower_body", + "dresses" + ] + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your input when generating the image.\n ", + "default": 2.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 50 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same input given to the same version of the model\n will output the same image every time.\n " + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "enable_safety_checker", + "output_format", + "human_image_url", + "garment_image_url", + "garment_type" + ], + "required": [ + "human_image_url", + "garment_image_url", + "garment_type" + ] + }, + "LeffaVirtualTryonOutput": { + "title": "VTONOutput", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "height": 1024, + "content_type": "image/jpeg", + "url": "https://fal.media/files/elephant/9NTQQNo9eyiQUSLa6cYBW.png", + "width": 768 + } + ], + "title": "Image", + "description": "The output image.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the inference." + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "boolean", + "description": "Whether the image contains NSFW concepts." + } + }, + "x-fal-order-properties": [ + "image", + "seed", + "has_nsfw_concepts" + ], + "required": [ + "image", + "seed", + "has_nsfw_concepts" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/leffa/virtual-tryon/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/leffa/virtual-tryon/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/leffa/virtual-tryon": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LeffaVirtualTryonInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/leffa/virtual-tryon/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LeffaVirtualTryonOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ideogram/v2/edit", + "metadata": { + "display_name": "Ideogram V2 Edit", + "category": "image-to-image", + "description": "Transform existing images with Ideogram V2's editing capabilities. Modify, adjust, and refine images while maintaining high fidelity and realistic outputs with precise prompt control.", + "status": "active", + "tags": [ + "realism", + "typography" + ], + "updated_at": "2026-01-26T21:44:35.274Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/ideogram.webp", + "model_url": "https://fal.run/fal-ai/ideogram/v2/edit", + "license_type": "commercial", + "date": "2024-12-14T00:00:00.000Z", + "group": { + "key": "ideogram", + "label": "Text to Image (Edit)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ideogram/v2/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ideogram/v2/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ideogram/v2/edit", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/ideogram.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/ideogram/v2/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/ideogram/v2/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "IdeogramV2EditInput": { + "x-fal-order-properties": [ + "prompt", + "image_url", + "mask_url", + "seed", + "style", + "expand_prompt", + "sync_mode" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A knight in shining armour holding a greatshield with \"FAL\" on it" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to fill the masked part of the image." + }, + "style": { + "enum": [ + "auto", + "general", + "realistic", + "design", + "render_3D", + "anime" + ], + "description": "The style of the generated image", + "type": "string", + "title": "Style", + "default": "auto" + }, + "expand_prompt": { + "description": "Whether to expand the prompt with MagicPrompt functionality.", + "type": "boolean", + "title": "Expand Prompt", + "default": true + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/flux-lora/example-images/knight.jpeg" + ], + "title": "Image URL", + "type": "string", + "description": "The image URL to generate an image from. Needs to match the dimensions of the mask." + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Seed for the random number generator", + "title": "Seed" + }, + "mask_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/flux-lora/example-images/mask_knight.jpeg" + ], + "title": "Mask URL", + "type": "string", + "description": "The mask URL to inpaint the image. Needs to match the dimensions of the input image." + } + }, + "title": "EditImageInput", + "required": [ + "prompt", + "image_url", + "mask_url" + ] + }, + "IdeogramV2EditOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/monkey/cNaoxPl0YAWYb-QVBvO9F_image.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "seed": { + "examples": [ + 123456 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for the random number generator" + } + }, + "title": "Output", + "required": [ + "images", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ideogram/v2/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV2EditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV2EditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ideogram/v2/turbo/edit", + "metadata": { + "display_name": "Ideogram V2 Turbo Edit", + "category": "image-to-image", + "description": "Edit images faster with Ideogram V2 Turbo. Quick modifications and adjustments while preserving the high-quality standards and realistic outputs of Ideogram.", + "status": "active", + "tags": [ + "realism", + "typography" + ], + "updated_at": "2026-01-26T21:44:35.772Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/ideogram.webp", + "model_url": "https://fal.run/fal-ai/ideogram/v2/turbo/edit", + "license_type": "commercial", + "date": "2024-12-04T00:00:00.000Z", + "group": { + "key": "ideogram-turbo", + "label": "Text to Image (Turbo Edit)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ideogram/v2/turbo/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ideogram/v2/turbo/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ideogram/v2/turbo/edit", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/ideogram.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/ideogram/v2/turbo/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/ideogram/v2/turbo/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "IdeogramV2TurboEditInput": { + "x-fal-order-properties": [ + "prompt", + "image_url", + "mask_url", + "seed", + "style", + "expand_prompt", + "sync_mode" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A knight in shining armour holding a greatshield with \"FAL\" on it" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to fill the masked part of the image." + }, + "style": { + "enum": [ + "auto", + "general", + "realistic", + "design", + "render_3D", + "anime" + ], + "description": "The style of the generated image", + "type": "string", + "title": "Style", + "default": "auto" + }, + "expand_prompt": { + "description": "Whether to expand the prompt with MagicPrompt functionality.", + "type": "boolean", + "title": "Expand Prompt", + "default": true + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/flux-lora/example-images/knight.jpeg" + ], + "title": "Image URL", + "type": "string", + "description": "The image URL to generate an image from. Needs to match the dimensions of the mask." + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Seed for the random number generator", + "title": "Seed" + }, + "mask_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/flux-lora/example-images/mask_knight.jpeg" + ], + "title": "Mask URL", + "type": "string", + "description": "The mask URL to inpaint the image. Needs to match the dimensions of the input image." + } + }, + "title": "EditImageInput", + "required": [ + "prompt", + "image_url", + "mask_url" + ] + }, + "IdeogramV2TurboEditOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/monkey/cNaoxPl0YAWYb-QVBvO9F_image.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "seed": { + "examples": [ + 123456 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for the random number generator" + } + }, + "title": "Output", + "required": [ + "images", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ideogram/v2/turbo/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2/turbo/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2/turbo/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV2TurboEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2/turbo/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV2TurboEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ideogram/v2/turbo/remix", + "metadata": { + "display_name": "Ideogram V2 Turbo Remix", + "category": "image-to-image", + "description": "Rapidly create image variations with Ideogram V2 Turbo Remix. Fast and efficient reimagining of existing images while maintaining creative control through prompt guidance.", + "status": "active", + "tags": [ + "realism", + "typography" + ], + "updated_at": "2026-01-26T21:44:35.648Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/ideogram.webp", + "model_url": "https://fal.run/fal-ai/ideogram/v2/turbo/remix", + "license_type": "commercial", + "date": "2024-12-04T00:00:00.000Z", + "group": { + "key": "ideogram-turbo", + "label": "Text to Image (Turbo Remix)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ideogram/v2/turbo/remix", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ideogram/v2/turbo/remix queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ideogram/v2/turbo/remix", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/ideogram.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/ideogram/v2/turbo/remix", + "documentationUrl": "https://fal.ai/models/fal-ai/ideogram/v2/turbo/remix/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "IdeogramV2TurboRemixInput": { + "x-fal-order-properties": [ + "prompt", + "image_url", + "aspect_ratio", + "strength", + "expand_prompt", + "seed", + "style", + "sync_mode" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "An ice field in north atlantic" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to remix the image with" + }, + "aspect_ratio": { + "enum": [ + "10:16", + "16:10", + "9:16", + "16:9", + "4:3", + "3:4", + "1:1", + "1:3", + "3:1", + "3:2", + "2:3" + ], + "description": "The aspect ratio of the generated image", + "type": "string", + "title": "Aspect Ratio", + "default": "1:1" + }, + "style": { + "enum": [ + "auto", + "general", + "realistic", + "design", + "render_3D", + "anime" + ], + "description": "The style of the generated image", + "type": "string", + "title": "Style", + "default": "auto" + }, + "expand_prompt": { + "description": "Whether to expand the prompt with MagicPrompt functionality.", + "type": "boolean", + "title": "Expand Prompt", + "default": true + }, + "image_url": { + "examples": [ + "https://fal.media/files/lion/FHOx4y4a0ef7Sgmo-sOUR_image.png" + ], + "title": "Image URL", + "type": "string", + "description": "The image URL to remix" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "description": "Strength of the input image in the remix", + "title": "Strength", + "default": 0.8 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Seed for the random number generator", + "title": "Seed" + } + }, + "title": "RemixImageInput", + "required": [ + "prompt", + "image_url" + ] + }, + "IdeogramV2TurboRemixOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/monkey/cNaoxPl0YAWYb-QVBvO9F_image.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "seed": { + "examples": [ + 123456 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for the random number generator" + } + }, + "title": "Output", + "required": [ + "images", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ideogram/v2/turbo/remix/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2/turbo/remix/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2/turbo/remix": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV2TurboRemixInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2/turbo/remix/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV2TurboRemixOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ideogram/v2/remix", + "metadata": { + "display_name": "Ideogram V2 Remix", + "category": "image-to-image", + "description": "Reimagine existing images with Ideogram V2's remix feature. Create variations and adaptations while preserving core elements and adding new creative directions through prompt guidance.", + "status": "active", + "tags": [ + "realism", + "typography" + ], + "updated_at": "2026-01-26T21:44:35.897Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/ideogram.webp", + "model_url": "https://fal.run/fal-ai/ideogram/v2/remix", + "license_type": "commercial", + "date": "2024-12-04T00:00:00.000Z", + "group": { + "key": "ideogram", + "label": "Text to Image (Remix)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ideogram/v2/remix", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ideogram/v2/remix queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ideogram/v2/remix", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/ideogram.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/ideogram/v2/remix", + "documentationUrl": "https://fal.ai/models/fal-ai/ideogram/v2/remix/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "IdeogramV2RemixInput": { + "x-fal-order-properties": [ + "prompt", + "image_url", + "aspect_ratio", + "strength", + "expand_prompt", + "seed", + "style", + "sync_mode" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "An ice field in north atlantic" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to remix the image with" + }, + "aspect_ratio": { + "enum": [ + "10:16", + "16:10", + "9:16", + "16:9", + "4:3", + "3:4", + "1:1", + "1:3", + "3:1", + "3:2", + "2:3" + ], + "description": "The aspect ratio of the generated image", + "type": "string", + "title": "Aspect Ratio", + "default": "1:1" + }, + "style": { + "enum": [ + "auto", + "general", + "realistic", + "design", + "render_3D", + "anime" + ], + "description": "The style of the generated image", + "type": "string", + "title": "Style", + "default": "auto" + }, + "expand_prompt": { + "description": "Whether to expand the prompt with MagicPrompt functionality.", + "type": "boolean", + "title": "Expand Prompt", + "default": true + }, + "image_url": { + "examples": [ + "https://fal.media/files/lion/FHOx4y4a0ef7Sgmo-sOUR_image.png" + ], + "title": "Image URL", + "type": "string", + "description": "The image URL to remix" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "description": "Strength of the input image in the remix", + "title": "Strength", + "default": 0.8 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Seed for the random number generator", + "title": "Seed" + } + }, + "title": "RemixImageInput", + "required": [ + "prompt", + "image_url" + ] + }, + "IdeogramV2RemixOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/monkey/cNaoxPl0YAWYb-QVBvO9F_image.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "seed": { + "examples": [ + 123456 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for the random number generator" + } + }, + "title": "Output", + "required": [ + "images", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ideogram/v2/remix/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2/remix/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2/remix": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV2RemixInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2/remix/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV2RemixOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux/schnell/redux", + "metadata": { + "display_name": "FLUX.1 [schnell] Redux", + "category": "image-to-image", + "description": "FLUX.1 [schnell] Redux is a high-performance endpoint for the FLUX.1 [schnell] model that enables rapid transformation of existing images, delivering high-quality style transfers and image modifications with the core FLUX capabilities.", + "status": "active", + "tags": [ + "style transfer" + ], + "updated_at": "2026-01-26T21:44:36.395Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/flux-schnell-thumb.webp", + "model_url": "https://fal.run/fal-ai/flux/schnell/redux", + "github_url": "https://www.apache.org/licenses/LICENSE-2.0.txt", + "license_type": "commercial", + "date": "2024-11-27T00:00:00.000Z", + "group": { + "key": "flux-1", + "label": "Image to Image [schnell] Redux" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux/schnell/redux", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux/schnell/redux queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux/schnell/redux", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/flux-schnell-thumb.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux/schnell/redux", + "documentationUrl": "https://fal.ai/models/fal-ai/flux/schnell/redux/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxSchnellReduxInput": { + "title": "SchnellReduxInput", + "type": "object", + "properties": { + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "type": "string", + "title": "Acceleration", + "default": "none" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://fal.media/files/kangaroo/acQvq-Kmo2lajkgvcEHdv.png" + ], + "description": "The URL of the image to generate an image from.", + "type": "string", + "title": "Image URL" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "title": "Seed" + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 12, + "default": 4 + } + }, + "x-fal-order-properties": [ + "image_url", + "num_inference_steps", + "image_size", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "required": [ + "image_url" + ] + }, + "FluxSchnellReduxOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux/schnell/redux/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux/schnell/redux/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux/schnell/redux": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxSchnellReduxInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux/schnell/redux/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxSchnellReduxOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-pro/v1.1/redux", + "metadata": { + "display_name": "FLUX1.1 [pro] Redux", + "category": "image-to-image", + "description": "FLUX1.1 [pro] Redux is a high-performance endpoint for the FLUX1.1 [pro] model that enables rapid transformation of existing images, delivering high-quality style transfers and image modifications with the core FLUX capabilities.", + "status": "active", + "tags": [ + "style transfer" + ], + "updated_at": "2026-01-26T21:44:36.521Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/turbo_thumbnail.jpg", + "model_url": "https://fal.run/fal-ai/flux-pro/v1.1/redux", + "license_type": "commercial", + "date": "2024-11-21T00:00:00.000Z", + "group": { + "key": "flux-pro", + "label": "FLUX 1.1 [pro] Redux" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-pro/v1.1/redux", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-pro/v1.1/redux queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-pro/v1.1/redux", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/turbo_thumbnail.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-pro/v1.1/redux", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-pro/v1.1/redux/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxProV11ReduxInput": { + "title": "FluxProRedux", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from.", + "default": "" + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "description": "The number of images to generate.", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://fal.media/files/kangaroo/acQvq-Kmo2lajkgvcEHdv.png" + ], + "title": "Image URL", + "type": "string", + "description": "The image URL to generate an image from. Needs to match the dimensions of the mask." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance scale (CFG)", + "type": "number", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "maximum": 20, + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "description": "The number of inference steps to perform.", + "maximum": 50, + "default": 28 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "enhance_prompt": { + "title": "Enhance Prompt", + "type": "boolean", + "description": "Whether to enhance the prompt for better results.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "output_format", + "safety_tolerance", + "enhance_prompt", + "image_url" + ], + "required": [ + "image_url" + ] + }, + "FluxProV11ReduxOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/registry__image__fast_sdxl__models__Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "registry__image__fast_sdxl__models__Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-pro/v1.1/redux/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/v1.1/redux/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/v1.1/redux": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProV11ReduxInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/v1.1/redux/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProV11ReduxOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux/dev/redux", + "metadata": { + "display_name": "FLUX.1 [dev] Redux", + "category": "image-to-image", + "description": "FLUX.1 [dev] Redux is a high-performance endpoint for the FLUX.1 [dev] model that enables rapid transformation of existing images, delivering high-quality style transfers and image modifications with the core FLUX capabilities.", + "status": "active", + "tags": [ + "" + ], + "updated_at": "2026-01-26T21:44:11.778Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/flux-dev-thumb.webp", + "model_url": "https://fal.run/fal-ai/flux/dev/redux", + "license_type": "commercial", + "date": "2024-11-21T00:00:00.000Z", + "group": { + "key": "flux-1", + "label": "Image to Image [dev] Redux" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux/dev/redux", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux/dev/redux queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux/dev/redux", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/flux-dev-thumb.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux/dev/redux", + "documentationUrl": "https://fal.ai/models/fal-ai/flux/dev/redux/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxDevReduxInput": { + "title": "BaseReduxInput", + "type": "object", + "properties": { + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "type": "string", + "title": "Acceleration", + "default": "none" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://fal.media/files/kangaroo/acQvq-Kmo2lajkgvcEHdv.png" + ], + "description": "The URL of the image to generate an image from.", + "type": "string", + "title": "Image URL" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "title": "Seed" + }, + "guidance_scale": { + "minimum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "title": "Guidance scale (CFG)", + "maximum": 20, + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 28 + } + }, + "x-fal-order-properties": [ + "image_url", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "required": [ + "image_url" + ] + }, + "FluxDevReduxOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux/dev/redux/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux/dev/redux/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux/dev/redux": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxDevReduxInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux/dev/redux/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxDevReduxOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-pro/v1.1-ultra/redux", + "metadata": { + "display_name": "FLUX1.1 [pro] ultra Redux", + "category": "image-to-image", + "description": "FLUX1.1 [pro] ultra Redux is a high-performance endpoint for the FLUX1.1 [pro] model that enables rapid transformation of existing images, delivering high-quality style transfers and image modifications with the core FLUX capabilities.", + "status": "active", + "tags": [ + "style transfer", + "high-res" + ], + "updated_at": "2026-01-26T21:44:37.092Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/flux-pro-11-ultra.webp", + "model_url": "https://fal.run/fal-ai/flux-pro/v1.1-ultra/redux", + "license_type": "commercial", + "date": "2024-11-21T00:00:00.000Z", + "group": { + "key": "flux-pro", + "label": "FLUX 1.1 [pro] (ultra) Redux" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-pro/v1.1-ultra/redux", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-pro/v1.1-ultra/redux queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-pro/v1.1-ultra/redux", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/flux-pro-11-ultra.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-pro/v1.1-ultra/redux", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-pro/v1.1-ultra/redux/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxProV11UltraReduxInput": { + "title": "FluxProUltraTextToImageInputRedux", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from.", + "default": "" + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "description": "The number of images to generate.", + "maximum": 4, + "default": 1 + }, + "aspect_ratio": { + "anyOf": [ + { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "type": "string" + }, + { + "type": "string" + } + ], + "title": "Aspect Ratio", + "description": "The aspect ratio of the generated image.", + "default": "16:9" + }, + "enhance_prompt": { + "title": "Enhance Prompt", + "type": "boolean", + "description": "Whether to enhance the prompt for better results.", + "default": false + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://fal.media/files/kangaroo/acQvq-Kmo2lajkgvcEHdv.png" + ], + "title": "Image URL", + "type": "string", + "description": "The image URL to generate an image from. Needs to match the dimensions of the mask." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.", + "default": "2" + }, + "image_prompt_strength": { + "minimum": 0, + "title": "Image Prompt Strength", + "type": "number", + "description": "The strength of the image prompt, between 0 and 1.", + "maximum": 1, + "default": 0.1 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "raw": { + "title": "Raw", + "type": "boolean", + "description": "Generate less processed, more natural-looking images.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "safety_tolerance", + "enhance_prompt", + "image_url", + "image_prompt_strength", + "aspect_ratio", + "raw" + ], + "required": [ + "image_url" + ] + }, + "FluxProV11UltraReduxOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/registry__image__fast_sdxl__models__Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "registry__image__fast_sdxl__models__Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-pro/v1.1-ultra/redux/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/v1.1-ultra/redux/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/v1.1-ultra/redux": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProV11UltraReduxInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/v1.1-ultra/redux/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProV11UltraReduxOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-lora-depth", + "metadata": { + "display_name": "FLUX.1 [dev] Depth with LoRAs", + "category": "image-to-image", + "description": "Generate high-quality images from depth maps using Flux.1 [dev] depth estimation model. The model produces accurate depth representations for scene understanding and 3D visualization.", + "status": "active", + "tags": [ + "depth", + "lora", + "utility", + "composition" + ], + "updated_at": "2026-01-26T21:44:37.223Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/flux_lora.jpg", + "model_url": "https://fal.run/fal-ai/flux-lora-depth", + "license_type": "commercial", + "date": "2024-11-21T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-lora-depth", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-lora-depth queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-lora-depth", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/flux_lora.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-lora-depth", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-lora-depth/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxLoraDepthInput": { + "title": "DepthInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Black hole in space, orange accretion disc" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate. This is always set to 1 for streaming output.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://fal.media/files/penguin/vt-SeIOweN7_oYBsvGO6t.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to use for depth input" + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 35, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "loras", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "FluxLoraDepthOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-lora-depth/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-lora-depth/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-lora-depth": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxLoraDepthInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-lora-depth/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxLoraDepthOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-pro/v1/fill", + "metadata": { + "display_name": "FLUX.1 [pro] Fill", + "category": "image-to-image", + "description": "FLUX.1 [pro] Fill is a high-performance endpoint for the FLUX.1 [pro] model that enables rapid transformation of existing images, delivering high-quality style transfers and image modifications with the core FLUX capabilities.", + "status": "active", + "tags": [ + "editing" + ], + "updated_at": "2026-01-26T21:44:37.352Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/fluxpro.jpg", + "model_url": "https://fal.run/fal-ai/flux-pro/v1/fill", + "license_type": "commercial", + "date": "2024-11-21T00:00:00.000Z", + "group": { + "key": "flux-pro", + "label": "FLUX.1 [pro] Fill" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-pro/v1/fill", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-pro/v1/fill queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-pro/v1/fill", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/fluxpro.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-pro/v1/fill", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-pro/v1/fill/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxProV1FillInput": { + "title": "FluxProFillInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A knight in shining armour holding a greatshield with \"FAL\" on it" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to fill the masked part of the image." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "description": "The number of images to generate.", + "maximum": 4, + "default": 1 + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/flux-lora/example-images/knight.jpeg" + ], + "title": "Image URL", + "type": "string", + "description": "The image URL to generate an image from. Needs to match the dimensions of the mask." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.", + "default": "2" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "mask_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/flux-lora/example-images/mask_knight.jpeg" + ], + "title": "Mask URL", + "type": "string", + "description": "The mask URL to inpaint the image. Needs to match the dimensions of the input image." + }, + "enhance_prompt": { + "title": "Enhance Prompt", + "type": "boolean", + "description": "Whether to enhance the prompt for better results.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "sync_mode", + "num_images", + "output_format", + "safety_tolerance", + "enhance_prompt", + "image_url", + "mask_url" + ], + "required": [ + "prompt", + "image_url", + "mask_url" + ] + }, + "FluxProV1FillOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/registry__image__fast_sdxl__models__Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "registry__image__fast_sdxl__models__Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-pro/v1/fill/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/v1/fill/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/v1/fill": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProV1FillInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/v1/fill/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProV1FillOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kolors/image-to-image", + "metadata": { + "display_name": "Kolors Image to Image", + "category": "image-to-image", + "description": "Photorealistic Image-to-Image", + "status": "active", + "tags": [ + "realism", + "editing", + "diffusion" + ], + "updated_at": "2026-01-26T21:44:37.480Z", + "is_favorited": false, + "thumbnail_url": "https://v2.fal.media/files/bdcf6a7a3f4146c39555e0c195715e65_73e054513f15488f93248ae10d67ece5.png", + "model_url": "https://fal.run/fal-ai/kolors/image-to-image", + "github_url": "https://huggingface.co/Kwai-Kolors/Kolors-diffusers/raw/main/MODEL_LICENSE", + "license_type": "commercial", + "date": "2024-11-19T00:00:00.000Z", + "group": { + "key": "kolors", + "label": "Image to Image" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kolors/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kolors/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kolors/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://v2.fal.media/files/bdcf6a7a3f4146c39555e0c195715e65_73e054513f15488f93248ae10d67ece5.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/kolors/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/kolors/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KolorsImageToImageInput": { + "title": "KolorsImg2ImgInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "high quality image of a capybara wearing sunglasses. In the background of the image there are trees, poles, grass and other objects. At the bottom of the object there is the road., 8k, highly detailed." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/image_models/bunny_source.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to use for image to image" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and\n uploaded before returning the response. This will increase the latency of\n the function but it allows you to get the image directly in the response\n without going through the CDN.\n ", + "default": false + }, + "scheduler": { + "enum": [ + "EulerDiscreteScheduler", + "EulerAncestralDiscreteScheduler", + "DPMSolverMultistepScheduler", + "DPMSolverMultistepScheduler_SDE_karras", + "UniPCMultistepScheduler", + "DEISMultistepScheduler" + ], + "title": "Scheduler", + "type": "string", + "description": "The scheduler to use for the model.", + "default": "EulerDiscreteScheduler" + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "The strength to use for image-to-image. 1.0 is completely remakes the image while 0.0 preserves the original.", + "default": 0.85 + }, + "guidance_scale": { + "minimum": 1, + "maximum": 10, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show\n you.\n ", + "default": 5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 150, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 50 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed" + }, + "negative_prompt": { + "examples": [ + "ugly, deformed, blurry" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small\n details (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Enable safety checker.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "guidance_scale", + "num_inference_steps", + "seed", + "sync_mode", + "enable_safety_checker", + "num_images", + "image_size", + "scheduler", + "output_format", + "image_url", + "strength" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "KolorsImageToImageOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/kolors/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kolors/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kolors/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KolorsImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kolors/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KolorsImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/iclight-v2", + "metadata": { + "display_name": "IC-Light-v2 for Image Relighting", + "category": "image-to-image", + "description": "An endpoint for re-lighting photos and changing their backgrounds per a given description", + "status": "active", + "tags": [ + "relighting", + "editing" + ], + "updated_at": "2026-01-26T21:44:37.608Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/iclight-v2/iclightv2-default-output.webp", + "model_url": "https://fal.run/fal-ai/iclight-v2", + "license_type": "commercial", + "date": "2024-11-14T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/iclight-v2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/iclight-v2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/iclight-v2", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/iclight-v2/iclightv2-default-output.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/iclight-v2", + "documentationUrl": "https://fal.ai/models/fal-ai/iclight-v2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "IclightV2Input": { + "title": "BaseInput", + "type": "object", + "properties": { + "initial_latent": { + "enum": [ + "None", + "Left", + "Right", + "Top", + "Bottom" + ], + "title": "Initial Latent", + "type": "string", + "description": "\n Provide lighting conditions for the model\n ", + "default": "None" + }, + "prompt": { + "examples": [ + "perfume bottle in a volcano surrounded by lava." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image." + }, + "background_threshold": { + "minimum": 0.01, + "title": "Background Threshold", + "type": "number", + "maximum": 1, + "description": "Threshold for the background removal algorithm. A high threshold will produce sharper masks. Note: This parameter is currently deprecated and has no effect on the output.", + "default": 0.67 + }, + "mask_image_url": { + "title": "Mask Image Url", + "type": "string", + "description": "URL of mask to be used for ic-light conditioning image" + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 20, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 5 + }, + "lowres_denoise": { + "minimum": 0.01, + "title": "Lowres Denoise", + "type": "number", + "maximum": 1, + "description": "Strength for low-resolution pass.", + "default": 0.98 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative Prompt for the image", + "default": "" + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "The number of images to generate.", + "default": 1 + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "hr_downscale": { + "minimum": 0.01, + "title": "Hr Downscale", + "type": "number", + "maximum": 1, + "default": 0.5 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/iclight-v2/bottle.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to be used for relighting" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "highres_denoise": { + "minimum": 0.01, + "title": "Highres Denoise", + "type": "number", + "maximum": 1, + "description": "Strength for high-resolution pass. Only used if enable_hr_fix is True.", + "default": 0.95 + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 28 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "enable_hr_fix": { + "title": "Enable Hr Fix", + "type": "boolean", + "description": "Use HR fix", + "default": false + }, + "cfg": { + "minimum": 0.01, + "title": "Cfg", + "type": "number", + "maximum": 5, + "description": "The real classifier-free-guidance scale for the generation.", + "default": 1 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_url", + "mask_image_url", + "background_threshold", + "image_size", + "num_inference_steps", + "seed", + "initial_latent", + "enable_hr_fix", + "sync_mode", + "num_images", + "cfg", + "lowres_denoise", + "highres_denoise", + "hr_downscale", + "guidance_scale", + "enable_safety_checker", + "output_format" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "IclightV2Output": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/iclight-v2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/iclight-v2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/iclight-v2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IclightV2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/iclight-v2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IclightV2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-differential-diffusion", + "metadata": { + "display_name": "FLUX.1 [dev] Differential Diffusion", + "category": "image-to-image", + "description": "FLUX.1 Differential Diffusion is a rapid endpoint that enables swift, granular control over image transformations through change maps, delivering fast and precise region-specific modifications while maintaining FLUX.1 [dev]'s high-quality output.", + "status": "active", + "tags": [ + "transformation" + ], + "updated_at": "2026-01-26T21:44:37.921Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/tiger/e-8Q3gR-WkrPM9-p4aicX.webp", + "model_url": "https://fal.run/fal-ai/flux-differential-diffusion", + "license_type": "commercial", + "date": "2024-11-06T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-differential-diffusion", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-differential-diffusion queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-differential-diffusion", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/tiger/e-8Q3gR-WkrPM9-p4aicX.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-differential-diffusion", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-differential-diffusion/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxDifferentialDiffusionInput": { + "x-fal-order-properties": [ + "prompt", + "image_url", + "change_map_image_url", + "strength", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Tree of life under the sea, ethereal, glittering, lens flares, cinematic lighting, artwork by Anna Dittmann & Carne Griffiths, 8k, unreal engine 5, hightly detailed, intricate detailed." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "description": "The number of images to generate.", + "title": "Num Images", + "default": 1 + }, + "image_url": { + "examples": [ + "https://fal.media/files/koala/h6a7KK2Ie_inuGbdartoX.jpeg" + ], + "description": "URL of image to use as initial image.", + "type": "string", + "title": "Image URL" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "description": "The strength to use for image-to-image. 1.0 is completely remakes the image while 0.0 preserves the original.", + "title": "Strength", + "default": 0.85 + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "title": "Guidance scale (CFG)", + "default": 3.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "change_map_image_url": { + "examples": [ + "https://fal.media/files/zebra/Wh4IYAiAAcVbuZ8M9ZMSn.jpeg" + ], + "description": "URL of change map.", + "type": "string", + "title": "Change Map URL" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "description": "The number of inference steps to perform.", + "title": "Num Inference Steps", + "default": 28 + } + }, + "title": "DiffInput", + "required": [ + "prompt", + "image_url", + "change_map_image_url" + ] + }, + "FluxDifferentialDiffusionOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + } + }, + "title": "Output", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "title": "Image", + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-differential-diffusion/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-differential-diffusion/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-differential-diffusion": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxDifferentialDiffusionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-differential-diffusion/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxDifferentialDiffusionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-pulid", + "metadata": { + "display_name": "PuLID Flux", + "category": "image-to-image", + "description": "An endpoint for personalized image generation using Flux as per given description.", + "status": "active", + "tags": [ + "personalization", + "style transfer" + ], + "updated_at": "2026-01-26T21:44:38.087Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/flux-pulid.webp", + "model_url": "https://fal.run/fal-ai/flux-pulid", + "license_type": "commercial", + "date": "2024-10-29T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-pulid", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-pulid queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-pulid", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/flux-pulid.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-pulid", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-pulid/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxPulidInput": { + "title": "FluxPulidInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "a woman holding sign with glowing green text 'PuLID for FLUX'" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "id_weight": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Id Weight", + "description": "The weight of the ID loss.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "start_step": { + "minimum": 0, + "maximum": 50, + "type": "integer", + "title": "Start Step", + "description": "The number of steps to start the CFG from.", + "default": 0 + }, + "reference_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/gallery/example_inputs_liuyifei.png" + ], + "title": "Reference Image URL", + "type": "string", + "description": "URL of image to use for inpainting." + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "max_sequence_length": { + "enum": [ + "128", + "256", + "512" + ], + "title": "Max Sequence Length", + "type": "string", + "description": "The maximum sequence length for the model.", + "default": "128" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 4 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 20 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "negative_prompt": { + "examples": [ + "bad quality, worst quality, text, signature, watermark, extra limbs" + ], + "title": "Negative Prompt", + "type": "string", + "description": "The prompt to generate an image from.", + "default": "" + }, + "true_cfg": { + "minimum": 1, + "maximum": 10, + "type": "number", + "title": "True Cfg", + "description": "The weight of the CFG loss.", + "default": 1 + } + }, + "x-fal-order-properties": [ + "prompt", + "reference_image_url", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "negative_prompt", + "sync_mode", + "start_step", + "true_cfg", + "id_weight", + "enable_safety_checker", + "max_sequence_length" + ], + "required": [ + "prompt", + "reference_image_url" + ] + }, + "FluxPulidOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-pulid/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pulid/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-pulid": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxPulidInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pulid/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxPulidOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/birefnet/v2", + "metadata": { + "display_name": "Birefnet Background Removal", + "category": "image-to-image", + "description": "bilateral reference framework (BiRefNet) for high-resolution dichotomous image segmentation (DIS)", + "status": "active", + "tags": [ + "background removal", + "segmentation", + "high-res", + "utility" + ], + "updated_at": "2026-01-26T21:44:38.314Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/birefnet.webp", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/birefnet-animated.webp", + "model_url": "https://fal.run/fal-ai/birefnet/v2", + "date": "2024-10-28T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/birefnet/v2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/birefnet/v2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/birefnet/v2", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/birefnet.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/birefnet/v2", + "documentationUrl": "https://fal.ai/models/fal-ai/birefnet/v2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BirefnetV2Input": { + "title": "InputV2", + "type": "object", + "properties": { + "operating_resolution": { + "enum": [ + "1024x1024", + "2048x2048", + "2304x2304" + ], + "title": "Operating Resolution", + "type": "string", + "description": "The resolution to operate on. The higher the resolution, the more accurate the output will be for high res input images. The '2304x2304' option is only available for the 'General Use (Dynamic)' model.", + "default": "1024x1024" + }, + "output_format": { + "enum": [ + "webp", + "png", + "gif" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/birefnet-input.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to remove background from" + }, + "model": { + "enum": [ + "General Use (Light)", + "General Use (Light 2K)", + "General Use (Heavy)", + "Matting", + "Portrait", + "General Use (Dynamic)" + ], + "title": "Model", + "type": "string", + "description": "\n Model to use for background removal.\n The 'General Use (Light)' model is the original model used in the BiRefNet repository.\n The 'General Use (Light 2K)' model is the original model used in the BiRefNet repository but trained with 2K images.\n The 'General Use (Heavy)' model is a slower but more accurate model.\n The 'Matting' model is a model trained specifically for matting images.\n The 'Portrait' model is a model trained specifically for portrait images.\n The 'General Use (Dynamic)' model supports dynamic resolutions from 256x256 to 2304x2304.\n The 'General Use (Light)' model is recommended for most use cases.\n\n The corresponding models are as follows:\n - 'General Use (Light)': BiRefNet\n - 'General Use (Light 2K)': BiRefNet_lite-2K\n - 'General Use (Heavy)': BiRefNet_lite\n - 'Matting': BiRefNet-matting\n - 'Portrait': BiRefNet-portrait\n - 'General Use (Dynamic)': BiRefNet_dynamic\n ", + "default": "General Use (Light)" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "output_mask": { + "title": "Output Mask", + "type": "boolean", + "description": "Whether to output the mask used to remove the background", + "default": false + }, + "refine_foreground": { + "title": "Refine Foreground", + "type": "boolean", + "description": "Whether to refine the foreground using the estimated mask", + "default": true + } + }, + "x-fal-order-properties": [ + "model", + "operating_resolution", + "output_mask", + "refine_foreground", + "sync_mode", + "image_url", + "output_format" + ], + "required": [ + "image_url" + ] + }, + "BirefnetV2Output": { + "title": "Output", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "height": 1024, + "file_name": "birefnet-output.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/birefnet-output.png", + "width": 1024 + } + ], + "title": "Image", + "description": "Image with background removed", + "allOf": [ + { + "$ref": "#/components/schemas/ImageFile" + } + ] + }, + "mask_image": { + "title": "Mask Image", + "description": "Mask used to remove the background", + "allOf": [ + { + "$ref": "#/components/schemas/ImageFile" + } + ] + } + }, + "x-fal-order-properties": [ + "image", + "mask_image" + ], + "required": [ + "image" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/birefnet/v2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/birefnet/v2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/birefnet/v2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BirefnetV2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/birefnet/v2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BirefnetV2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/live-portrait/image", + "metadata": { + "display_name": "Live Portrait", + "category": "image-to-image", + "description": "Transfer expression from a video to a portrait.", + "status": "active", + "tags": [ + "expression", + "animation" + ], + "updated_at": "2026-01-26T21:44:39.609Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/model_tests/live-portrait/XKEmk3mAzGHUjK3qqH-UL.jpeg", + "model_url": "https://fal.run/fal-ai/live-portrait/image", + "github_url": "https://github.com/KwaiVGI/LivePortrait/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-10-01T00:00:00.000Z", + "group": { + "key": "live-portrait", + "label": "Image" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/live-portrait/image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/live-portrait/image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/live-portrait/image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/model_tests/live-portrait/XKEmk3mAzGHUjK3qqH-UL.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/live-portrait/image", + "documentationUrl": "https://fal.ai/models/fal-ai/live-portrait/image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LivePortraitImageInput": { + "x-fal-order-properties": [ + "image_url", + "blink", + "eyebrow", + "wink", + "pupil_x", + "pupil_y", + "aaa", + "eee", + "woo", + "smile", + "rotate_pitch", + "rotate_yaw", + "rotate_roll", + "flag_pasteback", + "flag_do_crop", + "flag_do_rot", + "flag_lip_zero", + "dsize", + "scale", + "vx_ratio", + "vy_ratio", + "enable_safety_checker", + "output_format" + ], + "type": "object", + "properties": { + "smile": { + "minimum": -2, + "maximum": 2, + "type": "number", + "description": "Amount to smile", + "title": "Smile", + "default": 0 + }, + "eyebrow": { + "minimum": -30, + "maximum": 30, + "type": "number", + "description": "Amount to raise or lower eyebrows", + "title": "Eyebrow", + "default": 0 + }, + "rotate_roll": { + "minimum": -45, + "maximum": 45, + "type": "number", + "description": "Amount to rotate the face in roll", + "title": "Rotate Roll", + "default": 0 + }, + "wink": { + "minimum": 0, + "maximum": 25, + "type": "number", + "description": "Amount to wink", + "title": "Wink", + "default": 0 + }, + "rotate_pitch": { + "minimum": -45, + "maximum": 45, + "type": "number", + "description": "Amount to rotate the face in pitch", + "title": "Rotate Pitch", + "default": 0 + }, + "blink": { + "minimum": -30, + "maximum": 30, + "type": "number", + "description": "Amount to blink the eyes", + "title": "Blink", + "default": 0 + }, + "dsize": { + "title": "Dsize", + "type": "integer", + "description": "Size of the output image.", + "default": 512 + }, + "vy_ratio": { + "title": "Vy Ratio", + "type": "number", + "description": "Vertical offset ratio for face crop. Positive values move up, negative values move down.", + "default": -0.125 + }, + "scale": { + "title": "Scale", + "type": "number", + "description": "Scaling factor for the face crop.", + "default": 2.3 + }, + "pupil_x": { + "minimum": -45, + "maximum": 45, + "type": "number", + "description": "Amount to move pupils horizontally", + "title": "Pupil X", + "default": 0 + }, + "flag_pasteback": { + "title": "Flag Pasteback", + "type": "boolean", + "description": "Whether to paste-back/stitch the animated face cropping from the face-cropping space to the original image space.", + "default": true + }, + "eee": { + "minimum": -40, + "maximum": 40, + "type": "number", + "description": "Amount to shape mouth in 'eee' position", + "title": "Eee", + "default": 0 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "\n Whether to enable the safety checker. If enabled, the model will check if the input image contains a face before processing it.\n The safety checker will process the input image\n ", + "default": false + }, + "vx_ratio": { + "title": "Vx Ratio", + "type": "number", + "description": "Horizontal offset ratio for face crop.", + "default": 0 + }, + "pupil_y": { + "minimum": -45, + "maximum": 45, + "type": "number", + "description": "Amount to move pupils vertically", + "title": "Pupil Y", + "default": 0 + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "Output format", + "default": "jpeg" + }, + "rotate_yaw": { + "minimum": -45, + "maximum": 45, + "type": "number", + "description": "Amount to rotate the face in yaw", + "title": "Rotate Yaw", + "default": 0 + }, + "flag_do_rot": { + "title": "Flag Do Rot", + "type": "boolean", + "description": "Whether to conduct the rotation when flag_do_crop is True.", + "default": true + }, + "woo": { + "minimum": -100, + "maximum": 100, + "type": "number", + "description": "Amount to shape mouth in 'woo' position", + "title": "Woo", + "default": 0 + }, + "aaa": { + "minimum": -200, + "maximum": 200, + "type": "number", + "description": "Amount to open mouth in 'aaa' shape", + "title": "Aaa", + "default": 0 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/live-portrait/XKEmk3mAzGHUjK3qqH-UL.jpeg" + ], + "description": "URL of the image to be animated", + "type": "string", + "title": "Image Url" + }, + "flag_do_crop": { + "title": "Flag Do Crop", + "type": "boolean", + "description": "Whether to crop the source portrait to the face-cropping space.", + "default": true + }, + "flag_lip_zero": { + "title": "Flag Lip Zero", + "type": "boolean", + "description": "Whether to set the lip to closed state before animation. Only takes effect when flag_eye_retargeting and flag_lip_retargeting are False.", + "default": true + } + }, + "title": "LivePortraitImageInput", + "required": [ + "image_url" + ] + }, + "LivePortraitImageOutput": { + "x-fal-order-properties": [ + "image" + ], + "type": "object", + "properties": { + "image": { + "title": "Image", + "description": "The generated image file.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "title": "LivePortraitImageOutput", + "required": [ + "image" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/live-portrait/image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/live-portrait/image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/live-portrait/image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LivePortraitImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/live-portrait/image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LivePortraitImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-general/rf-inversion", + "metadata": { + "display_name": "FLUX.1 [dev] with Controlnets and Loras", + "category": "image-to-image", + "description": "A general purpose endpoint for the FLUX.1 [dev] model, implementing the RF-Inversion pipeline. This can be used to edit a reference image based on a prompt.", + "status": "active", + "tags": [ + "rf-inversion", + "editing", + "lora" + ], + "updated_at": "2026-01-26T21:44:39.733Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/flux-lora/flux_general.png", + "model_url": "https://fal.run/fal-ai/flux-general/rf-inversion", + "license_type": "commercial", + "date": "2024-09-17T00:00:00.000Z", + "group": { + "key": "flux-general", + "label": "RF-Inversion" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-general/rf-inversion", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-general/rf-inversion queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-general/rf-inversion", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/flux-lora/flux_general.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-general/rf-inversion", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-general/rf-inversion/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxGeneralRfInversionInput": { + "title": "RFInversionInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Wearing glasses" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to edit the image with" + }, + "nag_end": { + "maximum": 1, + "type": "number", + "title": "Proportion of steps to apply NAG", + "description": "\n The proportion of steps to apply NAG. After the specified proportion\n of steps has been iterated, the remaining steps will use original\n attention processors in FLUX.\n ", + "exclusiveMinimum": 0, + "default": 0.25 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "nullable": true + }, + "control_loras": { + "description": "\n The LoRAs to use for the image generation which use a control image. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/ControlLoraWeight" + }, + "examples": [], + "title": "Control Loras", + "default": [] + }, + "controller_guidance_reverse": { + "minimum": 0.01, + "maximum": 3, + "type": "number", + "title": "Controller Guidance Reverse", + "description": "The controller guidance (eta) used in the denoising process.Using values closer to 1 will result in an image closer to input.", + "default": 0.75 + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "reverse_guidance_start": { + "title": "Reverse Guidance Start", + "type": "integer", + "description": "Timestep to start guidance during reverse process.", + "default": 0 + }, + "easycontrols": { + "title": "Easycontrols", + "type": "array", + "description": "\n EasyControl Inputs to use for image generation.\n ", + "items": { + "$ref": "#/components/schemas/EasyControlWeight" + }, + "default": [] + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "scheduler": { + "enum": [ + "euler", + "dpmpp_2m" + ], + "title": "Scheduler", + "type": "string", + "description": "Scheduler for the denoising process.", + "default": "euler" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "use_cfg_zero": { + "title": "Use CFG-Zero-Init", + "type": "boolean", + "description": "\n Uses CFG-zero init sampling as in https://arxiv.org/abs/2503.18886.\n ", + "default": false + }, + "reference_strength": { + "minimum": -3, + "maximum": 3, + "type": "number", + "title": "Reference Strength", + "description": "Strength of reference_only generation. Only used if a reference image is provided.", + "default": 0.65 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "sigma_schedule": { + "enum": [ + "sgm_uniform" + ], + "title": "Sigma Schedule", + "type": "string", + "description": "Sigmas schedule for the denoising process." + }, + "reference_end": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Reference End", + "description": "\n The percentage of the total timesteps when the reference guidance is to be ended.\n ", + "default": 1 + }, + "controller_guidance_forward": { + "minimum": 0.01, + "maximum": 3, + "type": "number", + "title": "Controller Guidance Forward", + "description": "The controller guidance (gamma) used in the creation of structured noise.", + "default": 0.6 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/flux-general-tests/anime_style.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to be edited" + }, + "fill_image": { + "title": "Fill Image", + "description": "Use an image input to influence the generation. Can be used to fill images in masked areas.", + "allOf": [ + { + "$ref": "#/components/schemas/ImageFillInput" + } + ] + }, + "nag_scale": { + "maximum": 10, + "type": "number", + "title": "NAG scale", + "description": "\n The scale for NAG. Higher values will result in a image that is more distant\n to the negative prompt.\n ", + "exclusiveMinimum": 1, + "default": 3 + }, + "reverse_guidance_schedule": { + "enum": [ + "constant", + "linear_increase", + "linear_decrease" + ], + "title": "Reverse Guidance Schedule", + "type": "string", + "description": "Scheduler for applying reverse guidance.", + "default": "constant" + }, + "reference_image_url": { + "title": "Reference Image Url", + "type": "string", + "description": "URL of Image for Reference-Only" + }, + "reverse_guidance_end": { + "title": "Reverse Guidance End", + "type": "integer", + "description": "Timestep to stop guidance during reverse process.", + "default": 8 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "controlnet_unions": { + "title": "Controlnet Unions", + "type": "array", + "description": "\n The controlnet unions to use for the image generation. Only one controlnet is supported at the moment.\n ", + "items": { + "$ref": "#/components/schemas/ControlNetUnion" + }, + "default": [] + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "\n Negative prompt to steer the image generation away from unwanted features.\n By default, we will be using NAG for processing the negative prompt.\n ", + "default": "" + }, + "nag_tau": { + "title": "NAG Tau", + "type": "number", + "description": "\n The tau for NAG. Controls the normalization of the hidden state.\n Higher values will result in a less aggressive normalization,\n but may also lead to unexpected changes with respect to the original image.\n Not recommended to change this value.\n ", + "exclusiveMinimum": 0, + "default": 2.5 + }, + "num_images": { + "minimum": 1, + "maximum": 10, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate. This is always set to 1 for streaming output.", + "default": 1 + }, + "use_beta_schedule": { + "title": "Use Beta Schedule", + "type": "boolean", + "description": "Specifies whether beta sigmas ought to be used.", + "default": false + }, + "nag_alpha": { + "maximum": 1, + "type": "number", + "title": "NAG alpha", + "description": "\n The alpha value for NAG. This value is used as a final weighting\n factor for steering the normalized guidance (positive and negative prompts)\n in the direction of the positive prompt. Higher values will result in less\n steering on the normalized guidance where lower values will result in\n considering the positive prompt guidance more.\n ", + "exclusiveMinimum": 0, + "default": 0.25 + }, + "base_shift": { + "minimum": 0.01, + "maximum": 5, + "type": "number", + "title": "Base Shift", + "description": "Base shift for the scheduled timesteps", + "default": 0.5 + }, + "max_shift": { + "minimum": 0.01, + "maximum": 5, + "type": "number", + "title": "Max Shift", + "description": "Max shift for the scheduled timesteps", + "default": 1.15 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "reference_start": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Reference Start", + "description": "\n The percentage of the total timesteps when the reference guidance is to bestarted.\n ", + "default": 0 + }, + "controlnets": { + "title": "Controlnets", + "type": "array", + "description": "\n The controlnets to use for the image generation. Only one controlnet is supported at the moment.\n ", + "items": { + "$ref": "#/components/schemas/ControlNet" + }, + "default": [] + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "loras", + "control_loras", + "controlnets", + "controlnet_unions", + "easycontrols", + "fill_image", + "guidance_scale", + "use_cfg_zero", + "sync_mode", + "num_images", + "enable_safety_checker", + "reference_image_url", + "reference_strength", + "reference_start", + "reference_end", + "base_shift", + "max_shift", + "output_format", + "use_beta_schedule", + "sigma_schedule", + "scheduler", + "negative_prompt", + "nag_scale", + "nag_tau", + "nag_alpha", + "nag_end", + "image_url", + "controller_guidance_forward", + "controller_guidance_reverse", + "reverse_guidance_start", + "reverse_guidance_end", + "reverse_guidance_schedule" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "FluxGeneralRfInversionOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "ControlLoraWeight": { + "title": "ControlLoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "anyOf": [ + { + "type": "object" + }, + { + "minimum": -4, + "maximum": 4, + "type": "number" + } + ], + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model. Providing a dictionary as {\"layer_name\":layer_scale} allows per-layer lora scale settings. Layers with no scale provided will have scale 1.0.\n ", + "default": 1 + }, + "control_image_url": { + "title": "Control Image Url", + "type": "string", + "description": "URL of the image to be used as the control image." + }, + "preprocess": { + "enum": [ + "canny", + "depth", + "None" + ], + "title": "Preprocess", + "type": "string", + "description": "Type of preprocessing to apply to the input image.", + "default": "None" + } + }, + "x-fal-order-properties": [ + "path", + "scale", + "control_image_url", + "preprocess" + ], + "required": [ + "path", + "control_image_url" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "anyOf": [ + { + "type": "object" + }, + { + "minimum": -4, + "maximum": 4, + "type": "number" + } + ], + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model. Providing a dictionary as {\"layer_name\":layer_scale} allows per-layer lora scale settings. Layers with no scale provided will have scale 1.0.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "EasyControlWeight": { + "title": "EasyControlWeight", + "type": "object", + "properties": { + "scale": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Scale", + "description": "Scale for the control method.", + "default": 1 + }, + "image_control_type": { + "enum": [ + "subject", + "spatial" + ], + "title": "Image Control Type", + "type": "string", + "description": "Control type of the image. Must be one of `spatial` or `subject`." + }, + "control_method_url": { + "examples": [ + "canny", + "depth", + "hedsketch", + "inpainting", + "pose", + "seg", + "subject", + "ghibli" + ], + "title": "Control Method Url", + "type": "string", + "description": "URL to safetensor weights of control method to be applied. Can also be one of `canny`, `depth`, `hedsketch`, `inpainting`, `pose`, `seg`, `subject`, `ghibli` " + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "URL of an image to use as a control" + } + }, + "x-fal-order-properties": [ + "control_method_url", + "scale", + "image_url", + "image_control_type" + ], + "required": [ + "control_method_url", + "image_url", + "image_control_type" + ] + }, + "ImageFillInput": { + "title": "ImageFillInput", + "type": "object", + "properties": { + "fill_image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + } + ], + "title": "Fill Image Url", + "description": "URLs of images to be filled for redux prompting", + "default": [] + } + }, + "x-fal-order-properties": [ + "fill_image_url" + ] + }, + "ControlNetUnion": { + "title": "ControlNetUnion", + "type": "object", + "properties": { + "controls": { + "title": "Controls", + "type": "array", + "description": "The control images and modes to use for the control net.", + "items": { + "$ref": "#/components/schemas/ControlNetUnionInput" + } + }, + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the control net weights." + }, + "variant": { + "title": "Variant", + "type": "string", + "description": "The optional variant if a Hugging Face repo key is used." + }, + "config_url": { + "title": "Config Url", + "type": "string", + "description": "optional URL to the controlnet config.json file." + } + }, + "x-fal-order-properties": [ + "path", + "config_url", + "variant", + "controls" + ], + "required": [ + "path", + "controls" + ] + }, + "ControlNet": { + "title": "ControlNet", + "type": "object", + "properties": { + "conditioning_scale": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Conditioning Scale", + "description": "\n The scale of the control net weight. This is used to scale the control net weight\n before merging it with the base model.\n ", + "default": 1 + }, + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the control net weights." + }, + "mask_threshold": { + "minimum": 0.01, + "maximum": 0.99, + "type": "number", + "title": "Mask Threshold", + "description": "Threshold for mask.", + "default": 0.5 + }, + "end_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "End Percentage", + "description": "\n The percentage of the image to end applying the controlnet in terms of the total timesteps.\n ", + "default": 1 + }, + "config_url": { + "title": "Config Url", + "type": "string", + "description": "optional URL to the controlnet config.json file." + }, + "mask_image_url": { + "title": "Mask Image Url", + "type": "string", + "description": "URL of the mask for the control image.", + "nullable": true + }, + "variant": { + "title": "Variant", + "type": "string", + "description": "The optional variant if a Hugging Face repo key is used." + }, + "control_image_url": { + "title": "Control Image Url", + "type": "string", + "description": "URL of the image to be used as the control image." + }, + "start_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Start Percentage", + "description": "\n The percentage of the image to start applying the controlnet in terms of the total timesteps.\n ", + "default": 0 + } + }, + "x-fal-order-properties": [ + "path", + "config_url", + "variant", + "control_image_url", + "mask_image_url", + "mask_threshold", + "conditioning_scale", + "start_percentage", + "end_percentage" + ], + "required": [ + "path", + "control_image_url" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + }, + "ControlNetUnionInput": { + "title": "ControlNetUnionInput", + "type": "object", + "properties": { + "conditioning_scale": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Conditioning Scale", + "description": "\n The scale of the control net weight. This is used to scale the control net weight\n before merging it with the base model.\n ", + "default": 1 + }, + "mask_threshold": { + "minimum": 0.01, + "maximum": 0.99, + "type": "number", + "title": "Mask Threshold", + "description": "Threshold for mask.", + "default": 0.5 + }, + "end_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "End Percentage", + "description": "\n The percentage of the image to end applying the controlnet in terms of the total timesteps.\n ", + "default": 1 + }, + "mask_image_url": { + "title": "Mask Image Url", + "type": "string", + "description": "URL of the mask for the control image.", + "nullable": true + }, + "control_image_url": { + "title": "Control Image Url", + "type": "string", + "description": "URL of the image to be used as the control image." + }, + "control_mode": { + "enum": [ + "canny", + "tile", + "depth", + "blur", + "pose", + "gray", + "low-quality" + ], + "title": "Control Mode", + "type": "string", + "description": "Control Mode for Flux Controlnet Union. Supported values are:\n - canny: Uses the edges for guided generation.\n - tile: Uses the tiles for guided generation.\n - depth: Utilizes a grayscale depth map for guided generation.\n - blur: Adds a blur to the image.\n - pose: Uses the pose of the image for guided generation.\n - gray: Converts the image to grayscale.\n - low-quality: Converts the image to a low-quality image." + }, + "start_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Start Percentage", + "description": "\n The percentage of the image to start applying the controlnet in terms of the total timesteps.\n ", + "default": 0 + } + }, + "x-fal-order-properties": [ + "control_image_url", + "mask_image_url", + "control_mode", + "conditioning_scale", + "mask_threshold", + "start_percentage", + "end_percentage" + ], + "required": [ + "control_image_url", + "control_mode" + ] + } + } + }, + "paths": { + "/fal-ai/flux-general/rf-inversion/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-general/rf-inversion/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-general/rf-inversion": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxGeneralRfInversionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-general/rf-inversion/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxGeneralRfInversionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-preprocessors/hed", + "metadata": { + "display_name": "Image Preprocessors", + "category": "image-to-image", + "description": "Holistically-Nested Edge Detection (HED) preprocessor.", + "status": "active", + "tags": [ + "preprocess", + "detection", + "utility", + "controlnet" + ], + "updated_at": "2026-01-26T21:44:40.867Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/image-preprocessors.jpeg", + "model_url": "https://fal.run/fal-ai/image-preprocessors/hed", + "date": "2024-09-16T00:00:00.000Z", + "group": { + "key": "image-preprocessors", + "label": "HED" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-preprocessors/hed", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-preprocessors/hed queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-preprocessors/hed", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/image-preprocessors.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-preprocessors/hed", + "documentationUrl": "https://fal.ai/models/fal-ai/image-preprocessors/hed/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImagePreprocessorsHedInput": { + "title": "HEDInput", + "type": "object", + "properties": { + "safe": { + "title": "Safe", + "type": "boolean", + "description": "Whether to use the safe version of the HED detector", + "default": false + }, + "scribble": { + "title": "Scribble", + "type": "boolean", + "description": "Whether to use the scribble version of the HED detector", + "default": false + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/image_preprocessors/cat.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to process" + } + }, + "x-fal-order-properties": [ + "image_url", + "safe", + "scribble" + ], + "required": [ + "image_url" + ] + }, + "ImagePreprocessorsHedOutput": { + "title": "HEDOutput", + "type": "object", + "properties": { + "image": { + "description": "Image with lines detected using the HED detector", + "$ref": "#/components/schemas/Image" + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-preprocessors/hed/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/hed/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/hed": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImagePreprocessorsHedInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/hed/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImagePreprocessorsHedOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-preprocessors/depth-anything/v2", + "metadata": { + "display_name": "Image Preprocessors", + "category": "image-to-image", + "description": "Depth Anything v2 preprocessor.", + "status": "active", + "tags": [ + "depth", + "preprocess", + "utility", + "controlnet" + ], + "updated_at": "2026-01-26T21:44:39.983Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/image-preprocessors.jpeg", + "model_url": "https://fal.run/fal-ai/image-preprocessors/depth-anything/v2", + "date": "2024-09-16T00:00:00.000Z", + "group": { + "key": "image-preprocessors", + "label": "Depth Anything" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-preprocessors/depth-anything/v2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-preprocessors/depth-anything/v2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-preprocessors/depth-anything/v2", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/image-preprocessors.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-preprocessors/depth-anything/v2", + "documentationUrl": "https://fal.ai/models/fal-ai/image-preprocessors/depth-anything/v2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImagePreprocessorsDepthAnythingV2Input": { + "title": "DepthAnythingV2Input", + "type": "object", + "properties": { + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/image_preprocessors/cat.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to process" + } + }, + "x-fal-order-properties": [ + "image_url" + ], + "required": [ + "image_url" + ] + }, + "ImagePreprocessorsDepthAnythingV2Output": { + "title": "DepthAnythingV2Output", + "type": "object", + "properties": { + "image": { + "description": "Image with depth map", + "$ref": "#/components/schemas/Image" + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-preprocessors/depth-anything/v2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/depth-anything/v2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/depth-anything/v2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImagePreprocessorsDepthAnythingV2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/depth-anything/v2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImagePreprocessorsDepthAnythingV2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-preprocessors/scribble", + "metadata": { + "display_name": "Image Preprocessors", + "category": "image-to-image", + "description": "Scribble preprocessor.", + "status": "active", + "tags": [ + "preprocess", + "utility", + "editing", + "controlnet", + "sketch" + ], + "updated_at": "2026-01-26T21:44:40.618Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/image-preprocessors.jpeg", + "model_url": "https://fal.run/fal-ai/image-preprocessors/scribble", + "date": "2024-09-16T00:00:00.000Z", + "group": { + "key": "image-preprocessors", + "label": "Scribble" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-preprocessors/scribble", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-preprocessors/scribble queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-preprocessors/scribble", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/image-preprocessors.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-preprocessors/scribble", + "documentationUrl": "https://fal.ai/models/fal-ai/image-preprocessors/scribble/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImagePreprocessorsScribbleInput": { + "title": "ScribbleInput", + "type": "object", + "properties": { + "model": { + "enum": [ + "HED", + "PiDi" + ], + "title": "Model", + "type": "string", + "description": "The model to use for the Scribble detector", + "default": "HED" + }, + "safe": { + "title": "Safe", + "type": "boolean", + "description": "Whether to use the safe version of the Scribble detector", + "default": false + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/image_preprocessors/cat.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to process" + } + }, + "x-fal-order-properties": [ + "image_url", + "model", + "safe" + ], + "required": [ + "image_url" + ] + }, + "ImagePreprocessorsScribbleOutput": { + "title": "ScribbleOutput", + "type": "object", + "properties": { + "image": { + "description": "Image with lines detected using the Scribble detector", + "$ref": "#/components/schemas/Image" + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-preprocessors/scribble/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/scribble/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/scribble": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImagePreprocessorsScribbleInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/scribble/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImagePreprocessorsScribbleOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-preprocessors/mlsd", + "metadata": { + "display_name": "Image Preprocessors", + "category": "image-to-image", + "description": "M-LSD line segment detection preprocessor.", + "status": "active", + "tags": [ + "preprocess", + "utility", + "controlnet" + ], + "updated_at": "2026-01-26T21:44:41.117Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/image-preprocessors.jpeg", + "model_url": "https://fal.run/fal-ai/image-preprocessors/mlsd", + "license_type": "commercial", + "date": "2024-09-16T00:00:00.000Z", + "group": { + "key": "image-preprocessors", + "label": "M-LSD" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-preprocessors/mlsd", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-preprocessors/mlsd queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-preprocessors/mlsd", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/image-preprocessors.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-preprocessors/mlsd", + "documentationUrl": "https://fal.ai/models/fal-ai/image-preprocessors/mlsd/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImagePreprocessorsMlsdInput": { + "title": "MLSDInput", + "type": "object", + "properties": { + "distance_threshold": { + "title": "Distance Threshold", + "type": "number", + "description": "Distance threshold for the MLSD detector", + "default": 0.1 + }, + "score_threshold": { + "title": "Score Threshold", + "type": "number", + "description": "Score threshold for the MLSD detector", + "default": 0.1 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/image_preprocessors/cat.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to process" + } + }, + "x-fal-order-properties": [ + "image_url", + "score_threshold", + "distance_threshold" + ], + "required": [ + "image_url" + ] + }, + "ImagePreprocessorsMlsdOutput": { + "title": "MLSDOutput", + "type": "object", + "properties": { + "image": { + "description": "Image with lines detected using the MLSD detector", + "$ref": "#/components/schemas/Image" + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-preprocessors/mlsd/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/mlsd/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/mlsd": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImagePreprocessorsMlsdInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/mlsd/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImagePreprocessorsMlsdOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-preprocessors/sam", + "metadata": { + "display_name": "Image Preprocessors", + "category": "image-to-image", + "description": "Segment Anything Model (SAM) preprocessor.", + "status": "active", + "tags": [ + "segmentation", + "preprocess", + "utility", + "mask", + "controlnet" + ], + "updated_at": "2026-01-26T21:44:40.117Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/image-preprocessors.jpeg", + "model_url": "https://fal.run/fal-ai/image-preprocessors/sam", + "date": "2024-09-16T00:00:00.000Z", + "group": { + "key": "image-preprocessors", + "label": "SAM" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-preprocessors/sam", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-preprocessors/sam queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-preprocessors/sam", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/image-preprocessors.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-preprocessors/sam", + "documentationUrl": "https://fal.ai/models/fal-ai/image-preprocessors/sam/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImagePreprocessorsSamInput": { + "title": "SamInput", + "type": "object", + "properties": { + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/image_preprocessors/cat.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to process" + } + }, + "x-fal-order-properties": [ + "image_url" + ], + "required": [ + "image_url" + ] + }, + "ImagePreprocessorsSamOutput": { + "title": "SamOutput", + "type": "object", + "properties": { + "image": { + "description": "Image with SAM segmentation map", + "$ref": "#/components/schemas/Image" + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-preprocessors/sam/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/sam/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/sam": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImagePreprocessorsSamInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/sam/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImagePreprocessorsSamOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-preprocessors/midas", + "metadata": { + "display_name": "Image Preprocessors", + "category": "image-to-image", + "description": "MiDaS depth estimation preprocessor.", + "status": "active", + "tags": [ + "depth", + "preprocess", + "utility", + "controlnet" + ], + "updated_at": "2026-01-26T21:44:40.368Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/image-preprocessors.jpeg", + "model_url": "https://fal.run/fal-ai/image-preprocessors/midas", + "date": "2024-09-16T00:00:00.000Z", + "group": { + "key": "image-preprocessors", + "label": "MiDaS" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-preprocessors/midas", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-preprocessors/midas queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-preprocessors/midas", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/image-preprocessors.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-preprocessors/midas", + "documentationUrl": "https://fal.ai/models/fal-ai/image-preprocessors/midas/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImagePreprocessorsMidasInput": { + "title": "MiDaSInput", + "type": "object", + "properties": { + "a": { + "title": "A", + "type": "number", + "description": "A parameter for the MiDaS detector", + "default": 6.283185307179586 + }, + "background_threshold": { + "title": "Background Threshold", + "type": "number", + "description": "Background threshold for the MiDaS detector", + "default": 0.1 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/image_preprocessors/cat.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to process" + } + }, + "x-fal-order-properties": [ + "image_url", + "a", + "background_threshold" + ], + "required": [ + "image_url" + ] + }, + "ImagePreprocessorsMidasOutput": { + "title": "MiDaSOutput", + "type": "object", + "properties": { + "normal_map": { + "description": "Image with MiDaS normal map", + "$ref": "#/components/schemas/Image" + }, + "depth_map": { + "description": "Image with MiDaS depth map", + "$ref": "#/components/schemas/Image" + } + }, + "x-fal-order-properties": [ + "depth_map", + "normal_map" + ], + "required": [ + "depth_map", + "normal_map" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-preprocessors/midas/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/midas/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/midas": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImagePreprocessorsMidasInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/midas/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImagePreprocessorsMidasOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-preprocessors/teed", + "metadata": { + "display_name": "Image Preprocessors", + "category": "image-to-image", + "description": "TEED (Temporal Edge Enhancement Detection) preprocessor.", + "status": "active", + "tags": [ + "preprocess", + "detection", + "utility", + "controlnet" + ], + "updated_at": "2026-01-26T21:44:39.858Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/image-preprocessors.jpeg", + "model_url": "https://fal.run/fal-ai/image-preprocessors/teed", + "date": "2024-09-16T00:00:00.000Z", + "group": { + "key": "image-preprocessors", + "label": "TEED" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-preprocessors/teed", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-preprocessors/teed queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-preprocessors/teed", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/image-preprocessors.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-preprocessors/teed", + "documentationUrl": "https://fal.ai/models/fal-ai/image-preprocessors/teed/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImagePreprocessorsTeedInput": { + "title": "TeeDInput", + "type": "object", + "properties": { + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/image_preprocessors/cat.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to process" + } + }, + "x-fal-order-properties": [ + "image_url" + ], + "required": [ + "image_url" + ] + }, + "ImagePreprocessorsTeedOutput": { + "title": "TeeDOutput", + "type": "object", + "properties": { + "image": { + "description": "Image with TeeD lines detected", + "$ref": "#/components/schemas/Image" + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-preprocessors/teed/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/teed/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/teed": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImagePreprocessorsTeedInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/teed/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImagePreprocessorsTeedOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-preprocessors/lineart", + "metadata": { + "display_name": "Image Preprocessors", + "category": "image-to-image", + "description": "Line art preprocessor.", + "status": "active", + "tags": [ + "preprocess", + "utility", + "sketch", + "controlnet" + ], + "updated_at": "2026-01-26T21:44:40.242Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/image-preprocessors.jpeg", + "model_url": "https://fal.run/fal-ai/image-preprocessors/lineart", + "date": "2024-09-16T00:00:00.000Z", + "group": { + "key": "image-preprocessors", + "label": "Line Art" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-preprocessors/lineart", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-preprocessors/lineart queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-preprocessors/lineart", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/image-preprocessors.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-preprocessors/lineart", + "documentationUrl": "https://fal.ai/models/fal-ai/image-preprocessors/lineart/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImagePreprocessorsLineartInput": { + "title": "LineartInput", + "type": "object", + "properties": { + "coarse": { + "title": "Coarse", + "type": "boolean", + "description": "Whether to use the coarse model", + "default": false + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/image_preprocessors/cat.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to process" + } + }, + "x-fal-order-properties": [ + "image_url", + "coarse" + ], + "required": [ + "image_url" + ] + }, + "ImagePreprocessorsLineartOutput": { + "title": "LineartOutput", + "type": "object", + "properties": { + "image": { + "description": "Image with edges detected using the Canny algorithm", + "$ref": "#/components/schemas/Image" + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-preprocessors/lineart/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/lineart/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/lineart": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImagePreprocessorsLineartInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/lineart/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImagePreprocessorsLineartOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-preprocessors/zoe", + "metadata": { + "display_name": "Image Preprocessors", + "category": "image-to-image", + "description": "ZoeDepth preprocessor.", + "status": "active", + "tags": [ + "depth", + "preprocess", + "utility", + "controlnet" + ], + "updated_at": "2026-01-26T21:44:40.493Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/image-preprocessors.jpeg", + "model_url": "https://fal.run/fal-ai/image-preprocessors/zoe", + "date": "2024-09-16T00:00:00.000Z", + "group": { + "key": "image-preprocessors", + "label": "ZoeDepth" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-preprocessors/zoe", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-preprocessors/zoe queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-preprocessors/zoe", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/image-preprocessors.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-preprocessors/zoe", + "documentationUrl": "https://fal.ai/models/fal-ai/image-preprocessors/zoe/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImagePreprocessorsZoeInput": { + "title": "ZoeInput", + "type": "object", + "properties": { + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/image_preprocessors/cat.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to process" + } + }, + "x-fal-order-properties": [ + "image_url" + ], + "required": [ + "image_url" + ] + }, + "ImagePreprocessorsZoeOutput": { + "title": "ZoeOutput", + "type": "object", + "properties": { + "image": { + "description": "Image with depth map", + "$ref": "#/components/schemas/Image" + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-preprocessors/zoe/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/zoe/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/zoe": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImagePreprocessorsZoeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/zoe/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImagePreprocessorsZoeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/image-preprocessors/pidi", + "metadata": { + "display_name": "Image Preprocessors", + "category": "image-to-image", + "description": "PIDI (Pidinet) preprocessor.", + "status": "active", + "tags": [ + "detection", + "preprocess", + "utility", + "controlnet" + ], + "updated_at": "2026-01-26T21:44:40.743Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/image-preprocessors.jpeg", + "model_url": "https://fal.run/fal-ai/image-preprocessors/pidi", + "date": "2024-09-16T00:00:00.000Z", + "group": { + "key": "image-preprocessors", + "label": "PIDI" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/image-preprocessors/pidi", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/image-preprocessors/pidi queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/image-preprocessors/pidi", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/image-preprocessors.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/image-preprocessors/pidi", + "documentationUrl": "https://fal.ai/models/fal-ai/image-preprocessors/pidi/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImagePreprocessorsPidiInput": { + "title": "PiDiInput", + "type": "object", + "properties": { + "safe": { + "title": "Safe", + "type": "boolean", + "description": "Whether to use the safe version of the Pidi detector", + "default": false + }, + "apply_filter": { + "title": "Apply Filter", + "type": "boolean", + "description": "Whether to apply the filter to the image.", + "default": false + }, + "scribble": { + "title": "Scribble", + "type": "boolean", + "description": "Whether to use the scribble version of the Pidi detector", + "default": false + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/image_preprocessors/cat.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to process" + } + }, + "x-fal-order-properties": [ + "image_url", + "safe", + "scribble", + "apply_filter" + ], + "required": [ + "image_url" + ] + }, + "ImagePreprocessorsPidiOutput": { + "title": "PiDiOutput", + "type": "object", + "properties": { + "image": { + "description": "Image with Pidi lines detected", + "$ref": "#/components/schemas/Image" + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/image-preprocessors/pidi/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/pidi/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/pidi": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImagePreprocessorsPidiInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/image-preprocessors/pidi/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImagePreprocessorsPidiOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sam2/image", + "metadata": { + "display_name": "Segment Anything Model 2", + "category": "image-to-image", + "description": "SAM 2 is a model for segmenting images and videos in real-time.", + "status": "active", + "tags": [ + "segmentation", + "mask", + "real-time" + ], + "updated_at": "2026-01-26T21:44:41.872Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/sam2.gif", + "model_url": "https://fal.run/fal-ai/sam2/image", + "date": "2024-08-15T00:00:00.000Z", + "group": { + "key": "sam2", + "label": "Image to Image" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sam2/image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sam2/image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sam2/image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/sam2.gif", + "playgroundUrl": "https://fal.ai/models/fal-ai/sam2/image", + "documentationUrl": "https://fal.ai/models/fal-ai/sam2/image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Sam2ImageInput": { + "title": "SAM2ImageInput", + "type": "object", + "properties": { + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "prompts": { + "examples": [ + [ + { + "y": 375, + "label": 1, + "x": 500 + } + ] + ], + "title": "Prompts", + "type": "array", + "description": "List of prompts to segment the image", + "items": { + "$ref": "#/components/schemas/PointPrompt" + }, + "default": [] + }, + "box_prompts": { + "examples": [ + [ + { + "y_min": 600, + "x_max": 700, + "x_min": 425, + "y_max": 875 + } + ] + ], + "title": "Box Prompts", + "type": "array", + "description": "Coordinates for boxes", + "items": { + "$ref": "#/components/schemas/BoxPrompt" + }, + "default": [] + }, + "apply_mask": { + "title": "Apply Mask", + "type": "boolean", + "description": "Apply the mask on the image.", + "default": false + }, + "image_url": { + "examples": [ + "https://raw.githubusercontent.com/facebookresearch/segment-anything-2/main/notebooks/images/truck.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to be segmented" + } + }, + "x-fal-order-properties": [ + "image_url", + "prompts", + "box_prompts", + "apply_mask", + "sync_mode", + "output_format" + ], + "required": [ + "image_url" + ] + }, + "Sam2ImageOutput": { + "title": "SAM2ImageOutput", + "type": "object", + "properties": { + "image": { + "title": "Image", + "description": "Segmented image.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "PointPrompt": { + "title": "PointPrompt", + "type": "object", + "properties": { + "y": { + "title": "Y", + "type": "integer", + "description": "Y Coordinate of the prompt", + "default": 350 + }, + "label": { + "enum": [ + 0, + 1 + ], + "title": "Label", + "type": "integer", + "description": "Label of the prompt. 1 for foreground, 0 for background", + "default": 1 + }, + "frame_index": { + "title": "Frame Index", + "type": "integer", + "description": "The frame index to interact with.", + "default": 0 + }, + "x": { + "title": "X", + "type": "integer", + "description": "X Coordinate of the prompt", + "default": 305 + } + }, + "x-fal-order-properties": [ + "x", + "y", + "label", + "frame_index" + ] + }, + "BoxPrompt": { + "title": "BoxPrompt", + "type": "object", + "properties": { + "y_min": { + "title": "Y Min", + "type": "integer", + "description": "Y Min Coordinate of the box", + "default": 0 + }, + "frame_index": { + "title": "Frame Index", + "type": "integer", + "description": "The frame index to interact with.", + "default": 0 + }, + "x_max": { + "title": "X Max", + "type": "integer", + "description": "X Max Coordinate of the prompt", + "default": 0 + }, + "x_min": { + "title": "X Min", + "type": "integer", + "description": "X Min Coordinate of the box", + "default": 0 + }, + "y_max": { + "title": "Y Max", + "type": "integer", + "description": "Y Max Coordinate of the prompt", + "default": 0 + } + }, + "x-fal-order-properties": [ + "x_min", + "y_min", + "x_max", + "y_max", + "frame_index" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sam2/image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam2/image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sam2/image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sam2ImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam2/image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sam2ImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-general/image-to-image", + "metadata": { + "display_name": "FLUX.1 [dev] with Controlnets and Loras", + "category": "image-to-image", + "description": "FLUX General Image-to-Image is a versatile endpoint that transforms existing images with support for LoRA, ControlNet, and IP-Adapter extensions, enabling precise control over style transfer, modifications, and artistic variations through multiple guidance methods.", + "status": "active", + "tags": [ + "lora", + "controlnet", + "ip-adapter" + ], + "updated_at": "2026-01-26T21:44:41.996Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/flux-lora/flux_general.png", + "model_url": "https://fal.run/fal-ai/flux-general/image-to-image", + "license_type": "commercial", + "date": "2024-08-14T00:00:00.000Z", + "group": { + "key": "flux-general", + "label": "Image to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-general/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-general/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-general/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/flux-lora/flux_general.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-general/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-general/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxGeneralImageToImageInput": { + "title": "ImageToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A photo of a lion sitting on a stone bench" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "nag_end": { + "maximum": 1, + "type": "number", + "title": "Proportion of steps to apply NAG", + "description": "\n The proportion of steps to apply NAG. After the specified proportion\n of steps has been iterated, the remaining steps will use original\n attention processors in FLUX.\n ", + "exclusiveMinimum": 0, + "default": 0.25 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image." + }, + "control_loras": { + "description": "\n The LoRAs to use for the image generation which use a control image. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/ControlLoraWeight" + }, + "examples": [], + "title": "Control Loras", + "default": [] + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "scheduler": { + "enum": [ + "euler", + "dpmpp_2m" + ], + "title": "Scheduler", + "type": "string", + "description": "Scheduler for the denoising process.", + "default": "euler" + }, + "easycontrols": { + "title": "Easycontrols", + "type": "array", + "description": "\n EasyControl Inputs to use for image generation.\n ", + "items": { + "$ref": "#/components/schemas/EasyControlWeight" + }, + "default": [] + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "real_cfg_scale": { + "minimum": 0, + "maximum": 5, + "type": "number", + "title": "Real CFG scale", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "use_cfg_zero": { + "title": "Use CFG-Zero-Init", + "type": "boolean", + "description": "\n Uses CFG-zero init sampling as in https://arxiv.org/abs/2503.18886.\n ", + "default": false + }, + "fill_image": { + "title": "Fill Image", + "description": "Use an image input to influence the generation. Can be used to fill images in masked areas.", + "allOf": [ + { + "$ref": "#/components/schemas/ImageFillInput" + } + ] + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "sigma_schedule": { + "enum": [ + "sgm_uniform" + ], + "title": "Sigma Schedule", + "type": "string", + "description": "Sigmas schedule for the denoising process." + }, + "reference_end": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Reference End", + "description": "\n The percentage of the total timesteps when the reference guidance is to be ended.\n ", + "default": 1 + }, + "reference_strength": { + "minimum": -3, + "maximum": 3, + "type": "number", + "title": "Reference Strength", + "description": "Strength of reference_only generation. Only used if a reference image is provided.", + "default": 0.65 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "image_url": { + "examples": [ + "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to use for inpainting. or img2img" + }, + "nag_scale": { + "maximum": 10, + "type": "number", + "title": "NAG scale", + "description": "\n The scale for NAG. Higher values will result in a image that is more distant\n to the negative prompt.\n ", + "exclusiveMinimum": 1, + "default": 3 + }, + "reference_image_url": { + "title": "Reference Image Url", + "type": "string", + "description": "URL of Image for Reference-Only" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "controlnet_unions": { + "title": "Controlnet Unions", + "type": "array", + "description": "\n The controlnet unions to use for the image generation. Only one controlnet is supported at the moment.\n ", + "items": { + "$ref": "#/components/schemas/ControlNetUnion" + }, + "default": [] + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "\n Negative prompt to steer the image generation away from unwanted features.\n By default, we will be using NAG for processing the negative prompt.\n ", + "default": "" + }, + "nag_tau": { + "title": "NAG Tau", + "type": "number", + "description": "\n The tau for NAG. Controls the normalization of the hidden state.\n Higher values will result in a less aggressive normalization,\n but may also lead to unexpected changes with respect to the original image.\n Not recommended to change this value.\n ", + "exclusiveMinimum": 0, + "default": 2.5 + }, + "num_images": { + "minimum": 1, + "maximum": 10, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate. This is always set to 1 for streaming output.", + "default": 1 + }, + "use_beta_schedule": { + "title": "Use Beta Schedule", + "type": "boolean", + "description": "Specifies whether beta sigmas ought to be used.", + "default": false + }, + "ip_adapters": { + "title": "Ip Adapters", + "type": "array", + "description": "\n IP-Adapter to use for image generation.\n ", + "items": { + "$ref": "#/components/schemas/IPAdapter" + }, + "default": [] + }, + "base_shift": { + "minimum": 0.01, + "maximum": 5, + "type": "number", + "title": "Base Shift", + "description": "Base shift for the scheduled timesteps", + "default": 0.5 + }, + "nag_alpha": { + "maximum": 1, + "type": "number", + "title": "NAG alpha", + "description": "\n The alpha value for NAG. This value is used as a final weighting\n factor for steering the normalized guidance (positive and negative prompts)\n in the direction of the positive prompt. Higher values will result in less\n steering on the normalized guidance where lower values will result in\n considering the positive prompt guidance more.\n ", + "exclusiveMinimum": 0, + "default": 0.25 + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original.", + "default": 0.85 + }, + "max_shift": { + "minimum": 0.01, + "maximum": 5, + "type": "number", + "title": "Max Shift", + "description": "Max shift for the scheduled timesteps", + "default": 1.15 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "controlnets": { + "title": "Controlnets", + "type": "array", + "description": "\n The controlnets to use for the image generation. Only one controlnet is supported at the moment.\n ", + "items": { + "$ref": "#/components/schemas/ControlNet" + }, + "default": [] + }, + "reference_start": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Reference Start", + "description": "\n The percentage of the total timesteps when the reference guidance is to bestarted.\n ", + "default": 0 + }, + "use_real_cfg": { + "title": "Use Real CFG", + "type": "boolean", + "description": "\n Uses classical CFG as in SD1.5, SDXL, etc. Increases generation times and price when set to be true.\n If using XLabs IP-Adapter v1, this will be turned on!.\n ", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "loras", + "control_loras", + "controlnets", + "controlnet_unions", + "ip_adapters", + "easycontrols", + "fill_image", + "guidance_scale", + "real_cfg_scale", + "use_real_cfg", + "use_cfg_zero", + "sync_mode", + "num_images", + "enable_safety_checker", + "reference_image_url", + "reference_strength", + "reference_start", + "reference_end", + "base_shift", + "max_shift", + "output_format", + "use_beta_schedule", + "sigma_schedule", + "scheduler", + "negative_prompt", + "nag_scale", + "nag_tau", + "nag_alpha", + "nag_end", + "image_url", + "strength" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "FluxGeneralImageToImageOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "ControlLoraWeight": { + "title": "ControlLoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "anyOf": [ + { + "type": "object" + }, + { + "minimum": -4, + "maximum": 4, + "type": "number" + } + ], + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model. Providing a dictionary as {\"layer_name\":layer_scale} allows per-layer lora scale settings. Layers with no scale provided will have scale 1.0.\n ", + "default": 1 + }, + "control_image_url": { + "title": "Control Image Url", + "type": "string", + "description": "URL of the image to be used as the control image." + }, + "preprocess": { + "enum": [ + "canny", + "depth", + "None" + ], + "title": "Preprocess", + "type": "string", + "description": "Type of preprocessing to apply to the input image.", + "default": "None" + } + }, + "x-fal-order-properties": [ + "path", + "scale", + "control_image_url", + "preprocess" + ], + "required": [ + "path", + "control_image_url" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "anyOf": [ + { + "type": "object" + }, + { + "minimum": -4, + "maximum": 4, + "type": "number" + } + ], + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model. Providing a dictionary as {\"layer_name\":layer_scale} allows per-layer lora scale settings. Layers with no scale provided will have scale 1.0.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "EasyControlWeight": { + "title": "EasyControlWeight", + "type": "object", + "properties": { + "scale": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Scale", + "description": "Scale for the control method.", + "default": 1 + }, + "image_control_type": { + "enum": [ + "subject", + "spatial" + ], + "title": "Image Control Type", + "type": "string", + "description": "Control type of the image. Must be one of `spatial` or `subject`." + }, + "control_method_url": { + "examples": [ + "canny", + "depth", + "hedsketch", + "inpainting", + "pose", + "seg", + "subject", + "ghibli" + ], + "title": "Control Method Url", + "type": "string", + "description": "URL to safetensor weights of control method to be applied. Can also be one of `canny`, `depth`, `hedsketch`, `inpainting`, `pose`, `seg`, `subject`, `ghibli` " + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "URL of an image to use as a control" + } + }, + "x-fal-order-properties": [ + "control_method_url", + "scale", + "image_url", + "image_control_type" + ], + "required": [ + "control_method_url", + "image_url", + "image_control_type" + ] + }, + "ImageFillInput": { + "title": "ImageFillInput", + "type": "object", + "properties": { + "fill_image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + } + ], + "title": "Fill Image Url", + "description": "URLs of images to be filled for redux prompting", + "default": [] + } + }, + "x-fal-order-properties": [ + "fill_image_url" + ] + }, + "ControlNetUnion": { + "title": "ControlNetUnion", + "type": "object", + "properties": { + "controls": { + "title": "Controls", + "type": "array", + "description": "The control images and modes to use for the control net.", + "items": { + "$ref": "#/components/schemas/ControlNetUnionInput" + } + }, + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the control net weights." + }, + "variant": { + "title": "Variant", + "type": "string", + "description": "The optional variant if a Hugging Face repo key is used." + }, + "config_url": { + "title": "Config Url", + "type": "string", + "description": "optional URL to the controlnet config.json file." + } + }, + "x-fal-order-properties": [ + "path", + "config_url", + "variant", + "controls" + ], + "required": [ + "path", + "controls" + ] + }, + "IPAdapter": { + "title": "IPAdapter", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "Hugging Face path to the IP-Adapter" + }, + "mask_threshold": { + "minimum": 0.01, + "maximum": 0.99, + "type": "number", + "title": "Mask Threshold", + "description": "Threshold for mask.", + "default": 0.5 + }, + "image_encoder_weight_name": { + "title": "Image Encoder Weight Name", + "type": "string", + "description": "Name of the image encoder." + }, + "image_encoder_subfolder": { + "title": "Image Encoder Subfolder", + "type": "string", + "description": "Subfolder in which the image encoder weights exist." + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "URL of Image for IP-Adapter conditioning. " + }, + "mask_image_url": { + "title": "Mask Image Url", + "type": "string", + "description": "URL of the mask for the control image." + }, + "subfolder": { + "title": "Subfolder", + "type": "string", + "description": "Subfolder in which the ip_adapter weights exist" + }, + "scale": { + "title": "Scale", + "type": "number", + "description": "Scale for ip adapter." + }, + "image_encoder_path": { + "title": "Image Encoder Path", + "type": "string", + "description": "Path to the Image Encoder for the IP-Adapter, for example 'openai/clip-vit-large-patch14'" + }, + "weight_name": { + "title": "Weight Name", + "type": "string", + "description": "Name of the safetensors file containing the ip-adapter weights" + } + }, + "x-fal-order-properties": [ + "path", + "subfolder", + "weight_name", + "image_encoder_path", + "image_encoder_subfolder", + "image_encoder_weight_name", + "image_url", + "mask_image_url", + "mask_threshold", + "scale" + ], + "required": [ + "path", + "image_encoder_path", + "image_url", + "scale" + ] + }, + "ControlNet": { + "title": "ControlNet", + "type": "object", + "properties": { + "conditioning_scale": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Conditioning Scale", + "description": "\n The scale of the control net weight. This is used to scale the control net weight\n before merging it with the base model.\n ", + "default": 1 + }, + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the control net weights." + }, + "mask_threshold": { + "minimum": 0.01, + "maximum": 0.99, + "type": "number", + "title": "Mask Threshold", + "description": "Threshold for mask.", + "default": 0.5 + }, + "end_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "End Percentage", + "description": "\n The percentage of the image to end applying the controlnet in terms of the total timesteps.\n ", + "default": 1 + }, + "config_url": { + "title": "Config Url", + "type": "string", + "description": "optional URL to the controlnet config.json file." + }, + "mask_image_url": { + "title": "Mask Image Url", + "type": "string", + "description": "URL of the mask for the control image.", + "nullable": true + }, + "variant": { + "title": "Variant", + "type": "string", + "description": "The optional variant if a Hugging Face repo key is used." + }, + "control_image_url": { + "title": "Control Image Url", + "type": "string", + "description": "URL of the image to be used as the control image." + }, + "start_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Start Percentage", + "description": "\n The percentage of the image to start applying the controlnet in terms of the total timesteps.\n ", + "default": 0 + } + }, + "x-fal-order-properties": [ + "path", + "config_url", + "variant", + "control_image_url", + "mask_image_url", + "mask_threshold", + "conditioning_scale", + "start_percentage", + "end_percentage" + ], + "required": [ + "path", + "control_image_url" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + }, + "ControlNetUnionInput": { + "title": "ControlNetUnionInput", + "type": "object", + "properties": { + "conditioning_scale": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Conditioning Scale", + "description": "\n The scale of the control net weight. This is used to scale the control net weight\n before merging it with the base model.\n ", + "default": 1 + }, + "mask_threshold": { + "minimum": 0.01, + "maximum": 0.99, + "type": "number", + "title": "Mask Threshold", + "description": "Threshold for mask.", + "default": 0.5 + }, + "end_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "End Percentage", + "description": "\n The percentage of the image to end applying the controlnet in terms of the total timesteps.\n ", + "default": 1 + }, + "mask_image_url": { + "title": "Mask Image Url", + "type": "string", + "description": "URL of the mask for the control image.", + "nullable": true + }, + "control_image_url": { + "title": "Control Image Url", + "type": "string", + "description": "URL of the image to be used as the control image." + }, + "control_mode": { + "enum": [ + "canny", + "tile", + "depth", + "blur", + "pose", + "gray", + "low-quality" + ], + "title": "Control Mode", + "type": "string", + "description": "Control Mode for Flux Controlnet Union. Supported values are:\n - canny: Uses the edges for guided generation.\n - tile: Uses the tiles for guided generation.\n - depth: Utilizes a grayscale depth map for guided generation.\n - blur: Adds a blur to the image.\n - pose: Uses the pose of the image for guided generation.\n - gray: Converts the image to grayscale.\n - low-quality: Converts the image to a low-quality image." + }, + "start_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Start Percentage", + "description": "\n The percentage of the image to start applying the controlnet in terms of the total timesteps.\n ", + "default": 0 + } + }, + "x-fal-order-properties": [ + "control_image_url", + "mask_image_url", + "control_mode", + "conditioning_scale", + "mask_threshold", + "start_percentage", + "end_percentage" + ], + "required": [ + "control_image_url", + "control_mode" + ] + } + } + }, + "paths": { + "/fal-ai/flux-general/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-general/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-general/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxGeneralImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-general/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxGeneralImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-general/inpainting", + "metadata": { + "display_name": "FLUX.1 [dev] with Controlnets and Loras", + "category": "image-to-image", + "description": "FLUX General Inpainting is a versatile endpoint that enables precise image editing and completion, supporting multiple AI extensions including LoRA, ControlNet, and IP-Adapter for enhanced control over inpainting results and sophisticated image modifications.", + "status": "active", + "tags": [ + "lora", + "controlnet", + "ip-adapter" + ], + "updated_at": "2026-01-26T21:44:42.637Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/flux-lora/flux_general.png", + "model_url": "https://fal.run/fal-ai/flux-general/inpainting", + "license_type": "commercial", + "date": "2024-08-14T00:00:00.000Z", + "group": { + "key": "flux-general", + "label": "Inpainting" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-general/inpainting", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-general/inpainting queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-general/inpainting", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/flux-lora/flux_general.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-general/inpainting", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-general/inpainting/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxGeneralInpaintingInput": { + "title": "InpaintInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A photo of a lion sitting on a stone bench" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "nag_end": { + "maximum": 1, + "type": "number", + "title": "Proportion of steps to apply NAG", + "description": "\n The proportion of steps to apply NAG. After the specified proportion\n of steps has been iterated, the remaining steps will use original\n attention processors in FLUX.\n ", + "exclusiveMinimum": 0, + "default": 0.25 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image." + }, + "control_loras": { + "description": "\n The LoRAs to use for the image generation which use a control image. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/ControlLoraWeight" + }, + "examples": [], + "title": "Control Loras", + "default": [] + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "scheduler": { + "enum": [ + "euler", + "dpmpp_2m" + ], + "title": "Scheduler", + "type": "string", + "description": "Scheduler for the denoising process.", + "default": "euler" + }, + "easycontrols": { + "title": "Easycontrols", + "type": "array", + "description": "\n EasyControl Inputs to use for image generation.\n ", + "items": { + "$ref": "#/components/schemas/EasyControlWeight" + }, + "default": [] + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "real_cfg_scale": { + "minimum": 0, + "maximum": 5, + "type": "number", + "title": "Real CFG scale", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "use_cfg_zero": { + "title": "Use CFG-Zero-Init", + "type": "boolean", + "description": "\n Uses CFG-zero init sampling as in https://arxiv.org/abs/2503.18886.\n ", + "default": false + }, + "fill_image": { + "title": "Fill Image", + "description": "Use an image input to influence the generation. Can be used to fill images in masked areas.", + "allOf": [ + { + "$ref": "#/components/schemas/ImageFillInput" + } + ] + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "sigma_schedule": { + "enum": [ + "sgm_uniform" + ], + "title": "Sigma Schedule", + "type": "string", + "description": "Sigmas schedule for the denoising process." + }, + "reference_end": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Reference End", + "description": "\n The percentage of the total timesteps when the reference guidance is to be ended.\n ", + "default": 1 + }, + "reference_strength": { + "minimum": -3, + "maximum": 3, + "type": "number", + "title": "Reference Strength", + "description": "Strength of reference_only generation. Only used if a reference image is provided.", + "default": 0.65 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "mask_url": { + "examples": [ + "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + ], + "title": "Mask Url", + "type": "string", + "description": "\n The mask to area to Inpaint in.\n " + }, + "image_url": { + "examples": [ + "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to use for inpainting. or img2img" + }, + "nag_scale": { + "maximum": 10, + "type": "number", + "title": "NAG scale", + "description": "\n The scale for NAG. Higher values will result in a image that is more distant\n to the negative prompt.\n ", + "exclusiveMinimum": 1, + "default": 3 + }, + "reference_image_url": { + "title": "Reference Image Url", + "type": "string", + "description": "URL of Image for Reference-Only" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "controlnet_unions": { + "title": "Controlnet Unions", + "type": "array", + "description": "\n The controlnet unions to use for the image generation. Only one controlnet is supported at the moment.\n ", + "items": { + "$ref": "#/components/schemas/ControlNetUnion" + }, + "default": [] + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "\n Negative prompt to steer the image generation away from unwanted features.\n By default, we will be using NAG for processing the negative prompt.\n ", + "default": "" + }, + "nag_tau": { + "title": "NAG Tau", + "type": "number", + "description": "\n The tau for NAG. Controls the normalization of the hidden state.\n Higher values will result in a less aggressive normalization,\n but may also lead to unexpected changes with respect to the original image.\n Not recommended to change this value.\n ", + "exclusiveMinimum": 0, + "default": 2.5 + }, + "num_images": { + "minimum": 1, + "maximum": 10, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate. This is always set to 1 for streaming output.", + "default": 1 + }, + "use_beta_schedule": { + "title": "Use Beta Schedule", + "type": "boolean", + "description": "Specifies whether beta sigmas ought to be used.", + "default": false + }, + "ip_adapters": { + "title": "Ip Adapters", + "type": "array", + "description": "\n IP-Adapter to use for image generation.\n ", + "items": { + "$ref": "#/components/schemas/IPAdapter" + }, + "default": [] + }, + "base_shift": { + "minimum": 0.01, + "maximum": 5, + "type": "number", + "title": "Base Shift", + "description": "Base shift for the scheduled timesteps", + "default": 0.5 + }, + "nag_alpha": { + "maximum": 1, + "type": "number", + "title": "NAG alpha", + "description": "\n The alpha value for NAG. This value is used as a final weighting\n factor for steering the normalized guidance (positive and negative prompts)\n in the direction of the positive prompt. Higher values will result in less\n steering on the normalized guidance where lower values will result in\n considering the positive prompt guidance more.\n ", + "exclusiveMinimum": 0, + "default": 0.25 + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original.", + "default": 0.85 + }, + "max_shift": { + "minimum": 0.01, + "maximum": 5, + "type": "number", + "title": "Max Shift", + "description": "Max shift for the scheduled timesteps", + "default": 1.15 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "controlnets": { + "title": "Controlnets", + "type": "array", + "description": "\n The controlnets to use for the image generation. Only one controlnet is supported at the moment.\n ", + "items": { + "$ref": "#/components/schemas/ControlNet" + }, + "default": [] + }, + "reference_start": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Reference Start", + "description": "\n The percentage of the total timesteps when the reference guidance is to bestarted.\n ", + "default": 0 + }, + "use_real_cfg": { + "title": "Use Real CFG", + "type": "boolean", + "description": "\n Uses classical CFG as in SD1.5, SDXL, etc. Increases generation times and price when set to be true.\n If using XLabs IP-Adapter v1, this will be turned on!.\n ", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "loras", + "control_loras", + "controlnets", + "controlnet_unions", + "ip_adapters", + "easycontrols", + "fill_image", + "guidance_scale", + "real_cfg_scale", + "use_real_cfg", + "use_cfg_zero", + "sync_mode", + "num_images", + "enable_safety_checker", + "reference_image_url", + "reference_strength", + "reference_start", + "reference_end", + "base_shift", + "max_shift", + "output_format", + "use_beta_schedule", + "sigma_schedule", + "scheduler", + "negative_prompt", + "nag_scale", + "nag_tau", + "nag_alpha", + "nag_end", + "image_url", + "strength", + "mask_url" + ], + "required": [ + "prompt", + "image_url", + "mask_url" + ] + }, + "FluxGeneralInpaintingOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "ControlLoraWeight": { + "title": "ControlLoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "anyOf": [ + { + "type": "object" + }, + { + "minimum": -4, + "maximum": 4, + "type": "number" + } + ], + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model. Providing a dictionary as {\"layer_name\":layer_scale} allows per-layer lora scale settings. Layers with no scale provided will have scale 1.0.\n ", + "default": 1 + }, + "control_image_url": { + "title": "Control Image Url", + "type": "string", + "description": "URL of the image to be used as the control image." + }, + "preprocess": { + "enum": [ + "canny", + "depth", + "None" + ], + "title": "Preprocess", + "type": "string", + "description": "Type of preprocessing to apply to the input image.", + "default": "None" + } + }, + "x-fal-order-properties": [ + "path", + "scale", + "control_image_url", + "preprocess" + ], + "required": [ + "path", + "control_image_url" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "anyOf": [ + { + "type": "object" + }, + { + "minimum": -4, + "maximum": 4, + "type": "number" + } + ], + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model. Providing a dictionary as {\"layer_name\":layer_scale} allows per-layer lora scale settings. Layers with no scale provided will have scale 1.0.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "EasyControlWeight": { + "title": "EasyControlWeight", + "type": "object", + "properties": { + "scale": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Scale", + "description": "Scale for the control method.", + "default": 1 + }, + "image_control_type": { + "enum": [ + "subject", + "spatial" + ], + "title": "Image Control Type", + "type": "string", + "description": "Control type of the image. Must be one of `spatial` or `subject`." + }, + "control_method_url": { + "examples": [ + "canny", + "depth", + "hedsketch", + "inpainting", + "pose", + "seg", + "subject", + "ghibli" + ], + "title": "Control Method Url", + "type": "string", + "description": "URL to safetensor weights of control method to be applied. Can also be one of `canny`, `depth`, `hedsketch`, `inpainting`, `pose`, `seg`, `subject`, `ghibli` " + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "URL of an image to use as a control" + } + }, + "x-fal-order-properties": [ + "control_method_url", + "scale", + "image_url", + "image_control_type" + ], + "required": [ + "control_method_url", + "image_url", + "image_control_type" + ] + }, + "ImageFillInput": { + "title": "ImageFillInput", + "type": "object", + "properties": { + "fill_image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + } + ], + "title": "Fill Image Url", + "description": "URLs of images to be filled for redux prompting", + "default": [] + } + }, + "x-fal-order-properties": [ + "fill_image_url" + ] + }, + "ControlNetUnion": { + "title": "ControlNetUnion", + "type": "object", + "properties": { + "controls": { + "title": "Controls", + "type": "array", + "description": "The control images and modes to use for the control net.", + "items": { + "$ref": "#/components/schemas/ControlNetUnionInput" + } + }, + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the control net weights." + }, + "variant": { + "title": "Variant", + "type": "string", + "description": "The optional variant if a Hugging Face repo key is used." + }, + "config_url": { + "title": "Config Url", + "type": "string", + "description": "optional URL to the controlnet config.json file." + } + }, + "x-fal-order-properties": [ + "path", + "config_url", + "variant", + "controls" + ], + "required": [ + "path", + "controls" + ] + }, + "IPAdapter": { + "title": "IPAdapter", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "Hugging Face path to the IP-Adapter" + }, + "mask_threshold": { + "minimum": 0.01, + "maximum": 0.99, + "type": "number", + "title": "Mask Threshold", + "description": "Threshold for mask.", + "default": 0.5 + }, + "image_encoder_weight_name": { + "title": "Image Encoder Weight Name", + "type": "string", + "description": "Name of the image encoder." + }, + "image_encoder_subfolder": { + "title": "Image Encoder Subfolder", + "type": "string", + "description": "Subfolder in which the image encoder weights exist." + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "URL of Image for IP-Adapter conditioning. " + }, + "mask_image_url": { + "title": "Mask Image Url", + "type": "string", + "description": "URL of the mask for the control image." + }, + "subfolder": { + "title": "Subfolder", + "type": "string", + "description": "Subfolder in which the ip_adapter weights exist" + }, + "scale": { + "title": "Scale", + "type": "number", + "description": "Scale for ip adapter." + }, + "image_encoder_path": { + "title": "Image Encoder Path", + "type": "string", + "description": "Path to the Image Encoder for the IP-Adapter, for example 'openai/clip-vit-large-patch14'" + }, + "weight_name": { + "title": "Weight Name", + "type": "string", + "description": "Name of the safetensors file containing the ip-adapter weights" + } + }, + "x-fal-order-properties": [ + "path", + "subfolder", + "weight_name", + "image_encoder_path", + "image_encoder_subfolder", + "image_encoder_weight_name", + "image_url", + "mask_image_url", + "mask_threshold", + "scale" + ], + "required": [ + "path", + "image_encoder_path", + "image_url", + "scale" + ] + }, + "ControlNet": { + "title": "ControlNet", + "type": "object", + "properties": { + "conditioning_scale": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Conditioning Scale", + "description": "\n The scale of the control net weight. This is used to scale the control net weight\n before merging it with the base model.\n ", + "default": 1 + }, + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the control net weights." + }, + "mask_threshold": { + "minimum": 0.01, + "maximum": 0.99, + "type": "number", + "title": "Mask Threshold", + "description": "Threshold for mask.", + "default": 0.5 + }, + "end_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "End Percentage", + "description": "\n The percentage of the image to end applying the controlnet in terms of the total timesteps.\n ", + "default": 1 + }, + "config_url": { + "title": "Config Url", + "type": "string", + "description": "optional URL to the controlnet config.json file." + }, + "mask_image_url": { + "title": "Mask Image Url", + "type": "string", + "description": "URL of the mask for the control image.", + "nullable": true + }, + "variant": { + "title": "Variant", + "type": "string", + "description": "The optional variant if a Hugging Face repo key is used." + }, + "control_image_url": { + "title": "Control Image Url", + "type": "string", + "description": "URL of the image to be used as the control image." + }, + "start_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Start Percentage", + "description": "\n The percentage of the image to start applying the controlnet in terms of the total timesteps.\n ", + "default": 0 + } + }, + "x-fal-order-properties": [ + "path", + "config_url", + "variant", + "control_image_url", + "mask_image_url", + "mask_threshold", + "conditioning_scale", + "start_percentage", + "end_percentage" + ], + "required": [ + "path", + "control_image_url" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + }, + "ControlNetUnionInput": { + "title": "ControlNetUnionInput", + "type": "object", + "properties": { + "conditioning_scale": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Conditioning Scale", + "description": "\n The scale of the control net weight. This is used to scale the control net weight\n before merging it with the base model.\n ", + "default": 1 + }, + "mask_threshold": { + "minimum": 0.01, + "maximum": 0.99, + "type": "number", + "title": "Mask Threshold", + "description": "Threshold for mask.", + "default": 0.5 + }, + "end_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "End Percentage", + "description": "\n The percentage of the image to end applying the controlnet in terms of the total timesteps.\n ", + "default": 1 + }, + "mask_image_url": { + "title": "Mask Image Url", + "type": "string", + "description": "URL of the mask for the control image.", + "nullable": true + }, + "control_image_url": { + "title": "Control Image Url", + "type": "string", + "description": "URL of the image to be used as the control image." + }, + "control_mode": { + "enum": [ + "canny", + "tile", + "depth", + "blur", + "pose", + "gray", + "low-quality" + ], + "title": "Control Mode", + "type": "string", + "description": "Control Mode for Flux Controlnet Union. Supported values are:\n - canny: Uses the edges for guided generation.\n - tile: Uses the tiles for guided generation.\n - depth: Utilizes a grayscale depth map for guided generation.\n - blur: Adds a blur to the image.\n - pose: Uses the pose of the image for guided generation.\n - gray: Converts the image to grayscale.\n - low-quality: Converts the image to a low-quality image." + }, + "start_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Start Percentage", + "description": "\n The percentage of the image to start applying the controlnet in terms of the total timesteps.\n ", + "default": 0 + } + }, + "x-fal-order-properties": [ + "control_image_url", + "mask_image_url", + "control_mode", + "conditioning_scale", + "mask_threshold", + "start_percentage", + "end_percentage" + ], + "required": [ + "control_image_url", + "control_mode" + ] + } + } + }, + "paths": { + "/fal-ai/flux-general/inpainting/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-general/inpainting/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-general/inpainting": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxGeneralInpaintingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-general/inpainting/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxGeneralInpaintingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-general/differential-diffusion", + "metadata": { + "display_name": "FLUX.1 [dev] with Controlnets and Loras", + "category": "image-to-image", + "description": "A specialized FLUX endpoint combining differential diffusion control with LoRA, ControlNet, and IP-Adapter support, enabling precise, region-specific image transformations through customizable change maps.", + "status": "active", + "tags": [ + "lora", + "controlnet", + "ip-adapter" + ], + "updated_at": "2026-01-26T21:44:42.961Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/flux-lora/flux_general.png", + "model_url": "https://fal.run/fal-ai/flux-general/differential-diffusion", + "license_type": "commercial", + "date": "2024-08-13T00:00:00.000Z", + "group": { + "key": "flux-general", + "label": "Differential Diffusion" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-general/differential-diffusion", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-general/differential-diffusion queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-general/differential-diffusion", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/flux-lora/flux_general.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-general/differential-diffusion", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-general/differential-diffusion/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxGeneralDifferentialDiffusionInput": { + "title": "DifferentialDiffusionInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Tree of life under the sea, ethereal, glittering, lens flares, cinematic lighting, artwork by Anna Dittmann & Carne Griffiths, 8k, unreal engine 5, hightly detailed, intricate detailed." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "nag_end": { + "maximum": 1, + "type": "number", + "title": "Proportion of steps to apply NAG", + "description": "\n The proportion of steps to apply NAG. After the specified proportion\n of steps has been iterated, the remaining steps will use original\n attention processors in FLUX.\n ", + "exclusiveMinimum": 0, + "default": 0.25 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image." + }, + "control_loras": { + "description": "\n The LoRAs to use for the image generation which use a control image. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/ControlLoraWeight" + }, + "examples": [], + "title": "Control Loras", + "default": [] + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "scheduler": { + "enum": [ + "euler", + "dpmpp_2m" + ], + "title": "Scheduler", + "type": "string", + "description": "Scheduler for the denoising process.", + "default": "euler" + }, + "easycontrols": { + "title": "Easycontrols", + "type": "array", + "description": "\n EasyControl Inputs to use for image generation.\n ", + "items": { + "$ref": "#/components/schemas/EasyControlWeight" + }, + "default": [] + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "real_cfg_scale": { + "minimum": 0, + "maximum": 5, + "type": "number", + "title": "Real CFG scale", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "use_cfg_zero": { + "title": "Use CFG-Zero-Init", + "type": "boolean", + "description": "\n Uses CFG-zero init sampling as in https://arxiv.org/abs/2503.18886.\n ", + "default": false + }, + "fill_image": { + "title": "Fill Image", + "description": "Use an image input to influence the generation. Can be used to fill images in masked areas.", + "allOf": [ + { + "$ref": "#/components/schemas/ImageFillInput" + } + ] + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "sigma_schedule": { + "enum": [ + "sgm_uniform" + ], + "title": "Sigma Schedule", + "type": "string", + "description": "Sigmas schedule for the denoising process." + }, + "reference_end": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Reference End", + "description": "\n The percentage of the total timesteps when the reference guidance is to be ended.\n ", + "default": 1 + }, + "reference_strength": { + "minimum": -3, + "maximum": 3, + "type": "number", + "title": "Reference Strength", + "description": "Strength of reference_only generation. Only used if a reference image is provided.", + "default": 0.65 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "image_url": { + "examples": [ + "https://fal.media/files/koala/h6a7KK2Ie_inuGbdartoX.jpeg" + ], + "title": "Image URL", + "type": "string", + "description": "URL of image to use as initial image." + }, + "nag_scale": { + "maximum": 10, + "type": "number", + "title": "NAG scale", + "description": "\n The scale for NAG. Higher values will result in a image that is more distant\n to the negative prompt.\n ", + "exclusiveMinimum": 1, + "default": 3 + }, + "reference_image_url": { + "title": "Reference Image Url", + "type": "string", + "description": "URL of Image for Reference-Only" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "controlnet_unions": { + "title": "Controlnet Unions", + "type": "array", + "description": "\n The controlnet unions to use for the image generation. Only one controlnet is supported at the moment.\n ", + "items": { + "$ref": "#/components/schemas/ControlNetUnion" + }, + "default": [] + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "\n Negative prompt to steer the image generation away from unwanted features.\n By default, we will be using NAG for processing the negative prompt.\n ", + "default": "" + }, + "nag_tau": { + "title": "NAG Tau", + "type": "number", + "description": "\n The tau for NAG. Controls the normalization of the hidden state.\n Higher values will result in a less aggressive normalization,\n but may also lead to unexpected changes with respect to the original image.\n Not recommended to change this value.\n ", + "exclusiveMinimum": 0, + "default": 2.5 + }, + "change_map_image_url": { + "examples": [ + "https://fal.media/files/zebra/Wh4IYAiAAcVbuZ8M9ZMSn.jpeg" + ], + "title": "Change Map URL", + "type": "string", + "description": "URL of change map." + }, + "num_images": { + "minimum": 1, + "maximum": 10, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate. This is always set to 1 for streaming output.", + "default": 1 + }, + "use_beta_schedule": { + "title": "Use Beta Schedule", + "type": "boolean", + "description": "Specifies whether beta sigmas ought to be used.", + "default": false + }, + "ip_adapters": { + "title": "Ip Adapters", + "type": "array", + "description": "\n IP-Adapter to use for image generation.\n ", + "items": { + "$ref": "#/components/schemas/IPAdapter" + }, + "default": [] + }, + "base_shift": { + "minimum": 0.01, + "maximum": 5, + "type": "number", + "title": "Base Shift", + "description": "Base shift for the scheduled timesteps", + "default": 0.5 + }, + "nag_alpha": { + "maximum": 1, + "type": "number", + "title": "NAG alpha", + "description": "\n The alpha value for NAG. This value is used as a final weighting\n factor for steering the normalized guidance (positive and negative prompts)\n in the direction of the positive prompt. Higher values will result in less\n steering on the normalized guidance where lower values will result in\n considering the positive prompt guidance more.\n ", + "exclusiveMinimum": 0, + "default": 0.25 + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "The strength to use for differential diffusion. 1.0 is completely remakes the image while 0.0 preserves the original.", + "default": 0.85 + }, + "max_shift": { + "minimum": 0.01, + "maximum": 5, + "type": "number", + "title": "Max Shift", + "description": "Max shift for the scheduled timesteps", + "default": 1.15 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "controlnets": { + "title": "Controlnets", + "type": "array", + "description": "\n The controlnets to use for the image generation. Only one controlnet is supported at the moment.\n ", + "items": { + "$ref": "#/components/schemas/ControlNet" + }, + "default": [] + }, + "reference_start": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Reference Start", + "description": "\n The percentage of the total timesteps when the reference guidance is to bestarted.\n ", + "default": 0 + }, + "use_real_cfg": { + "title": "Use Real CFG", + "type": "boolean", + "description": "\n Uses classical CFG as in SD1.5, SDXL, etc. Increases generation times and price when set to be true.\n If using XLabs IP-Adapter v1, this will be turned on!.\n ", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "loras", + "control_loras", + "controlnets", + "controlnet_unions", + "ip_adapters", + "easycontrols", + "fill_image", + "guidance_scale", + "real_cfg_scale", + "use_real_cfg", + "use_cfg_zero", + "sync_mode", + "num_images", + "enable_safety_checker", + "reference_image_url", + "reference_strength", + "reference_start", + "reference_end", + "base_shift", + "max_shift", + "output_format", + "use_beta_schedule", + "sigma_schedule", + "scheduler", + "negative_prompt", + "nag_scale", + "nag_tau", + "nag_alpha", + "nag_end", + "image_url", + "change_map_image_url", + "strength" + ], + "required": [ + "prompt", + "image_url", + "change_map_image_url" + ] + }, + "FluxGeneralDifferentialDiffusionOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "ControlLoraWeight": { + "title": "ControlLoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "anyOf": [ + { + "type": "object" + }, + { + "minimum": -4, + "maximum": 4, + "type": "number" + } + ], + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model. Providing a dictionary as {\"layer_name\":layer_scale} allows per-layer lora scale settings. Layers with no scale provided will have scale 1.0.\n ", + "default": 1 + }, + "control_image_url": { + "title": "Control Image Url", + "type": "string", + "description": "URL of the image to be used as the control image." + }, + "preprocess": { + "enum": [ + "canny", + "depth", + "None" + ], + "title": "Preprocess", + "type": "string", + "description": "Type of preprocessing to apply to the input image.", + "default": "None" + } + }, + "x-fal-order-properties": [ + "path", + "scale", + "control_image_url", + "preprocess" + ], + "required": [ + "path", + "control_image_url" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "anyOf": [ + { + "type": "object" + }, + { + "minimum": -4, + "maximum": 4, + "type": "number" + } + ], + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model. Providing a dictionary as {\"layer_name\":layer_scale} allows per-layer lora scale settings. Layers with no scale provided will have scale 1.0.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "EasyControlWeight": { + "title": "EasyControlWeight", + "type": "object", + "properties": { + "scale": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Scale", + "description": "Scale for the control method.", + "default": 1 + }, + "image_control_type": { + "enum": [ + "subject", + "spatial" + ], + "title": "Image Control Type", + "type": "string", + "description": "Control type of the image. Must be one of `spatial` or `subject`." + }, + "control_method_url": { + "examples": [ + "canny", + "depth", + "hedsketch", + "inpainting", + "pose", + "seg", + "subject", + "ghibli" + ], + "title": "Control Method Url", + "type": "string", + "description": "URL to safetensor weights of control method to be applied. Can also be one of `canny`, `depth`, `hedsketch`, `inpainting`, `pose`, `seg`, `subject`, `ghibli` " + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "URL of an image to use as a control" + } + }, + "x-fal-order-properties": [ + "control_method_url", + "scale", + "image_url", + "image_control_type" + ], + "required": [ + "control_method_url", + "image_url", + "image_control_type" + ] + }, + "ImageFillInput": { + "title": "ImageFillInput", + "type": "object", + "properties": { + "fill_image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + } + ], + "title": "Fill Image Url", + "description": "URLs of images to be filled for redux prompting", + "default": [] + } + }, + "x-fal-order-properties": [ + "fill_image_url" + ] + }, + "ControlNetUnion": { + "title": "ControlNetUnion", + "type": "object", + "properties": { + "controls": { + "title": "Controls", + "type": "array", + "description": "The control images and modes to use for the control net.", + "items": { + "$ref": "#/components/schemas/ControlNetUnionInput" + } + }, + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the control net weights." + }, + "variant": { + "title": "Variant", + "type": "string", + "description": "The optional variant if a Hugging Face repo key is used." + }, + "config_url": { + "title": "Config Url", + "type": "string", + "description": "optional URL to the controlnet config.json file." + } + }, + "x-fal-order-properties": [ + "path", + "config_url", + "variant", + "controls" + ], + "required": [ + "path", + "controls" + ] + }, + "IPAdapter": { + "title": "IPAdapter", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "Hugging Face path to the IP-Adapter" + }, + "mask_threshold": { + "minimum": 0.01, + "maximum": 0.99, + "type": "number", + "title": "Mask Threshold", + "description": "Threshold for mask.", + "default": 0.5 + }, + "image_encoder_weight_name": { + "title": "Image Encoder Weight Name", + "type": "string", + "description": "Name of the image encoder." + }, + "image_encoder_subfolder": { + "title": "Image Encoder Subfolder", + "type": "string", + "description": "Subfolder in which the image encoder weights exist." + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "URL of Image for IP-Adapter conditioning. " + }, + "mask_image_url": { + "title": "Mask Image Url", + "type": "string", + "description": "URL of the mask for the control image." + }, + "subfolder": { + "title": "Subfolder", + "type": "string", + "description": "Subfolder in which the ip_adapter weights exist" + }, + "scale": { + "title": "Scale", + "type": "number", + "description": "Scale for ip adapter." + }, + "image_encoder_path": { + "title": "Image Encoder Path", + "type": "string", + "description": "Path to the Image Encoder for the IP-Adapter, for example 'openai/clip-vit-large-patch14'" + }, + "weight_name": { + "title": "Weight Name", + "type": "string", + "description": "Name of the safetensors file containing the ip-adapter weights" + } + }, + "x-fal-order-properties": [ + "path", + "subfolder", + "weight_name", + "image_encoder_path", + "image_encoder_subfolder", + "image_encoder_weight_name", + "image_url", + "mask_image_url", + "mask_threshold", + "scale" + ], + "required": [ + "path", + "image_encoder_path", + "image_url", + "scale" + ] + }, + "ControlNet": { + "title": "ControlNet", + "type": "object", + "properties": { + "conditioning_scale": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Conditioning Scale", + "description": "\n The scale of the control net weight. This is used to scale the control net weight\n before merging it with the base model.\n ", + "default": 1 + }, + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the control net weights." + }, + "mask_threshold": { + "minimum": 0.01, + "maximum": 0.99, + "type": "number", + "title": "Mask Threshold", + "description": "Threshold for mask.", + "default": 0.5 + }, + "end_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "End Percentage", + "description": "\n The percentage of the image to end applying the controlnet in terms of the total timesteps.\n ", + "default": 1 + }, + "config_url": { + "title": "Config Url", + "type": "string", + "description": "optional URL to the controlnet config.json file." + }, + "mask_image_url": { + "title": "Mask Image Url", + "type": "string", + "description": "URL of the mask for the control image.", + "nullable": true + }, + "variant": { + "title": "Variant", + "type": "string", + "description": "The optional variant if a Hugging Face repo key is used." + }, + "control_image_url": { + "title": "Control Image Url", + "type": "string", + "description": "URL of the image to be used as the control image." + }, + "start_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Start Percentage", + "description": "\n The percentage of the image to start applying the controlnet in terms of the total timesteps.\n ", + "default": 0 + } + }, + "x-fal-order-properties": [ + "path", + "config_url", + "variant", + "control_image_url", + "mask_image_url", + "mask_threshold", + "conditioning_scale", + "start_percentage", + "end_percentage" + ], + "required": [ + "path", + "control_image_url" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + }, + "ControlNetUnionInput": { + "title": "ControlNetUnionInput", + "type": "object", + "properties": { + "conditioning_scale": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Conditioning Scale", + "description": "\n The scale of the control net weight. This is used to scale the control net weight\n before merging it with the base model.\n ", + "default": 1 + }, + "mask_threshold": { + "minimum": 0.01, + "maximum": 0.99, + "type": "number", + "title": "Mask Threshold", + "description": "Threshold for mask.", + "default": 0.5 + }, + "end_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "End Percentage", + "description": "\n The percentage of the image to end applying the controlnet in terms of the total timesteps.\n ", + "default": 1 + }, + "mask_image_url": { + "title": "Mask Image Url", + "type": "string", + "description": "URL of the mask for the control image.", + "nullable": true + }, + "control_image_url": { + "title": "Control Image Url", + "type": "string", + "description": "URL of the image to be used as the control image." + }, + "control_mode": { + "enum": [ + "canny", + "tile", + "depth", + "blur", + "pose", + "gray", + "low-quality" + ], + "title": "Control Mode", + "type": "string", + "description": "Control Mode for Flux Controlnet Union. Supported values are:\n - canny: Uses the edges for guided generation.\n - tile: Uses the tiles for guided generation.\n - depth: Utilizes a grayscale depth map for guided generation.\n - blur: Adds a blur to the image.\n - pose: Uses the pose of the image for guided generation.\n - gray: Converts the image to grayscale.\n - low-quality: Converts the image to a low-quality image." + }, + "start_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Start Percentage", + "description": "\n The percentage of the image to start applying the controlnet in terms of the total timesteps.\n ", + "default": 0 + } + }, + "x-fal-order-properties": [ + "control_image_url", + "mask_image_url", + "control_mode", + "conditioning_scale", + "mask_threshold", + "start_percentage", + "end_percentage" + ], + "required": [ + "control_image_url", + "control_mode" + ] + } + } + }, + "paths": { + "/fal-ai/flux-general/differential-diffusion/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-general/differential-diffusion/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-general/differential-diffusion": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxGeneralDifferentialDiffusionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-general/differential-diffusion/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxGeneralDifferentialDiffusionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-lora/image-to-image", + "metadata": { + "display_name": "FLUX.1 [dev] with LoRAs", + "category": "image-to-image", + "description": "FLUX LoRA Image-to-Image is a high-performance endpoint that transforms existing images using FLUX models, leveraging LoRA adaptations to enable rapid and precise image style transfer, modifications, and artistic variations.", + "status": "active", + "tags": [ + "lora", + "style transfer" + ], + "updated_at": "2026-01-26T21:44:42.837Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/h2glDvCl5RvtgjNx_-5qY.jpeg", + "model_url": "https://fal.run/fal-ai/flux-lora/image-to-image", + "license_type": "commercial", + "date": "2024-08-13T00:00:00.000Z", + "group": { + "key": "flux-lora", + "label": "Image to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-lora/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-lora/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-lora/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/h2glDvCl5RvtgjNx_-5qY.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-lora/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-lora/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxLoraImageToImageInput": { + "title": "ImageToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A photo of a lion sitting on a stone bench" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate. This is always set to 1 for streaming output.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/dog.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to use for inpainting. or img2img" + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original.", + "default": 0.85 + }, + "guidance_scale": { + "minimum": 0, + "maximum": 35, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "loras", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "image_url", + "strength" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "FluxLoraImageToImageOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-lora/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-lora/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-lora/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxLoraImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-lora/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxLoraImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sdxl-controlnet-union/inpainting", + "metadata": { + "display_name": "SDXL ControlNet Union", + "category": "image-to-image", + "description": "An efficent SDXL multi-controlnet inpainting model.", + "status": "active", + "tags": [ + "diffusion", + "controlnet", + "composition" + ], + "updated_at": "2026-01-26T21:44:43.641Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/controlnet-union/000004_openpose_scribble_concat.jpg", + "model_url": "https://fal.run/fal-ai/sdxl-controlnet-union/inpainting", + "github_url": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md", + "license_type": "commercial", + "date": "2024-07-31T00:00:00.000Z", + "group": { + "key": "sdxl-controlnet-union", + "label": "Inpainting" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sdxl-controlnet-union/inpainting", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sdxl-controlnet-union/inpainting queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sdxl-controlnet-union/inpainting", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/controlnet-union/000004_openpose_scribble_concat.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sdxl-controlnet-union/inpainting", + "documentationUrl": "https://fal.ai/models/fal-ai/sdxl-controlnet-union/inpainting/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SdxlControlnetUnionInpaintingInput": { + "title": "InpaintingControlNetUnionInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Ice fortress, aurora skies, polar wildlife, twilight" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "depth_preprocess": { + "title": "Depth Preprocess", + "type": "boolean", + "description": "Whether to preprocess the depth image.", + "default": true + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image. Leave it none to automatically infer from the control image.", + "examples": [ + null + ], + "nullable": true + }, + "normal_image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/MiN_j3St9B8esJleCZKMU.jpeg" + ], + "title": "Normal Image Url", + "type": "string", + "description": "The URL of the control image." + }, + "embeddings": { + "title": "Embeddings", + "type": "array", + "description": "The list of embeddings to use.", + "items": { + "$ref": "#/components/schemas/Embedding" + }, + "default": [] + }, + "teed_image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/MiN_j3St9B8esJleCZKMU.jpeg" + ], + "title": "Teed Image Url", + "type": "string", + "description": "The URL of the control image." + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "The list of LoRA weights to use.", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [] + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 7.5 + }, + "canny_image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/MiN_j3St9B8esJleCZKMU.jpeg" + ], + "title": "Canny Image Url", + "type": "string", + "description": "The URL of the control image." + }, + "segmentation_preprocess": { + "title": "Segmentation Preprocess", + "type": "boolean", + "description": "Whether to preprocess the segmentation image.", + "default": true + }, + "format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to use as a starting point for the generation." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "request_id": { + "title": "Request Id", + "type": "string", + "description": "\n An id bound to a request, can be used with response to identify the request\n itself.\n ", + "default": "" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "mask_url": { + "examples": [ + "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + ], + "title": "Mask Url", + "type": "string", + "description": "The URL of the mask to use for inpainting." + }, + "segmentation_image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/MiN_j3St9B8esJleCZKMU.jpeg" + ], + "title": "Segmentation Image Url", + "type": "string", + "description": "The URL of the control image." + }, + "openpose_image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/MiN_j3St9B8esJleCZKMU.jpeg" + ], + "title": "Openpose Image Url", + "type": "string", + "description": "The URL of the control image." + }, + "canny_preprocess": { + "title": "Canny Preprocess", + "type": "boolean", + "description": "Whether to preprocess the canny image.", + "default": true + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "If set to true, the prompt will be expanded with additional prompts.", + "default": false + }, + "depth_image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/MiN_j3St9B8esJleCZKMU.jpeg" + ], + "title": "Depth Image Url", + "type": "string", + "description": "The URL of the control image." + }, + "normal_preprocess": { + "title": "Normal Preprocess", + "type": "boolean", + "description": "Whether to preprocess the normal image.", + "default": true + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "negative_prompt": { + "examples": [ + "cartoon, illustration, animation. face. male, female" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "teed_preprocess": { + "title": "Teed Preprocess", + "type": "boolean", + "description": "Whether to preprocess the teed image.", + "default": true + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "controlnet_conditioning_scale": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Controlnet Conditioning Scale", + "description": "The scale of the controlnet conditioning.", + "default": 0.5 + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "determines how much the generated image resembles the initial image", + "default": 0.95 + }, + "safety_checker_version": { + "enum": [ + "v1", + "v2" + ], + "title": "Safety Checker Version", + "type": "string", + "description": "The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.", + "default": "v1" + }, + "openpose_preprocess": { + "title": "Openpose Preprocess", + "type": "boolean", + "description": "Whether to preprocess the openpose image.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 70, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 35 + } + }, + "x-fal-order-properties": [ + "prompt", + "controlnet_conditioning_scale", + "image_url", + "mask_url", + "negative_prompt", + "image_size", + "num_inference_steps", + "guidance_scale", + "strength", + "seed", + "sync_mode", + "num_images", + "loras", + "embeddings", + "enable_safety_checker", + "safety_checker_version", + "expand_prompt", + "format", + "request_id", + "openpose_image_url", + "openpose_preprocess", + "depth_image_url", + "depth_preprocess", + "teed_image_url", + "teed_preprocess", + "canny_image_url", + "canny_preprocess", + "normal_image_url", + "normal_preprocess", + "segmentation_image_url", + "segmentation_preprocess" + ], + "required": [ + "image_url", + "mask_url", + "prompt" + ] + }, + "SdxlControlnetUnionInpaintingOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Embedding": { + "title": "Embedding", + "type": "object", + "properties": { + "tokens": { + "title": "Tokens", + "type": "array", + "description": "The list of tokens to use for the embedding.", + "items": { + "type": "string" + }, + "default": [ + "", + "" + ] + }, + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the embedding weights." + } + }, + "x-fal-order-properties": [ + "path", + "tokens" + ], + "required": [ + "path" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights. Or HF model name." + }, + "scale": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + }, + "force": { + "title": "Force", + "type": "boolean", + "description": "If set to true, the embedding will be forced to be used.", + "default": false + } + }, + "x-fal-order-properties": [ + "path", + "scale", + "force" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/sdxl-controlnet-union/inpainting/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sdxl-controlnet-union/inpainting/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sdxl-controlnet-union/inpainting": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SdxlControlnetUnionInpaintingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sdxl-controlnet-union/inpainting/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SdxlControlnetUnionInpaintingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sdxl-controlnet-union/image-to-image", + "metadata": { + "display_name": "SDXL ControlNet Union", + "category": "image-to-image", + "description": "An efficent SDXL multi-controlnet image-to-image model.", + "status": "active", + "tags": [ + "diffusion", + "controlnet", + "composition" + ], + "updated_at": "2026-01-26T21:44:43.766Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/controlnet-union/000004_openpose_scribble_concat.jpg", + "model_url": "https://fal.run/fal-ai/sdxl-controlnet-union/image-to-image", + "github_url": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md", + "license_type": "commercial", + "date": "2024-07-31T00:00:00.000Z", + "group": { + "key": "sdxl-controlnet-union", + "label": "Image to Image" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sdxl-controlnet-union/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sdxl-controlnet-union/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sdxl-controlnet-union/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/controlnet-union/000004_openpose_scribble_concat.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sdxl-controlnet-union/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/sdxl-controlnet-union/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SdxlControlnetUnionImageToImageInput": { + "title": "ImageToImageControlNetUnionInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Ice fortress, aurora skies, polar wildlife, twilight" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "depth_preprocess": { + "title": "Depth Preprocess", + "type": "boolean", + "description": "Whether to preprocess the depth image.", + "default": true + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image. Leave it none to automatically infer from the control image.", + "examples": [ + null + ], + "nullable": true + }, + "normal_image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/MiN_j3St9B8esJleCZKMU.jpeg" + ], + "title": "Normal Image Url", + "type": "string", + "description": "The URL of the control image." + }, + "embeddings": { + "title": "Embeddings", + "type": "array", + "description": "The list of embeddings to use.", + "items": { + "$ref": "#/components/schemas/Embedding" + }, + "default": [] + }, + "teed_image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/MiN_j3St9B8esJleCZKMU.jpeg" + ], + "title": "Teed Image Url", + "type": "string", + "description": "The URL of the control image." + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "The list of LoRA weights to use.", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [] + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 7.5 + }, + "canny_image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/MiN_j3St9B8esJleCZKMU.jpeg" + ], + "title": "Canny Image Url", + "type": "string", + "description": "The URL of the control image." + }, + "segmentation_preprocess": { + "title": "Segmentation Preprocess", + "type": "boolean", + "description": "Whether to preprocess the segmentation image.", + "default": true + }, + "format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/tiger/IExuP-WICqaIesLZAZPur.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to use as a starting point for the generation." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "request_id": { + "title": "Request Id", + "type": "string", + "description": "\n An id bound to a request, can be used with response to identify the request\n itself.\n ", + "default": "" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "segmentation_image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/MiN_j3St9B8esJleCZKMU.jpeg" + ], + "title": "Segmentation Image Url", + "type": "string", + "description": "The URL of the control image." + }, + "openpose_image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/MiN_j3St9B8esJleCZKMU.jpeg" + ], + "title": "Openpose Image Url", + "type": "string", + "description": "The URL of the control image." + }, + "canny_preprocess": { + "title": "Canny Preprocess", + "type": "boolean", + "description": "Whether to preprocess the canny image.", + "default": true + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "If set to true, the prompt will be expanded with additional prompts.", + "default": false + }, + "depth_image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/MiN_j3St9B8esJleCZKMU.jpeg" + ], + "title": "Depth Image Url", + "type": "string", + "description": "The URL of the control image." + }, + "normal_preprocess": { + "title": "Normal Preprocess", + "type": "boolean", + "description": "Whether to preprocess the normal image.", + "default": true + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "preserve_aspect_ratio": { + "title": "Preserve Aspect Ratio", + "type": "boolean", + "description": "\n If set to true, the aspect ratio of the generated image will be preserved even\n if the image size is too large. However, if the image is not a multiple of 32\n in width or height, it will be resized to the nearest multiple of 32. By default,\n this snapping to the nearest multiple of 32 will not preserve the aspect ratio.\n Set crop_output to True, to crop the output to the proper aspect ratio\n after generating.\n ", + "default": false + }, + "negative_prompt": { + "examples": [ + "cartoon, illustration, animation. face. male, female" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "crop_output": { + "title": "Crop Output", + "type": "boolean", + "description": "\n If set to true, the output cropped to the proper aspect ratio after generating.\n ", + "default": false + }, + "teed_preprocess": { + "title": "Teed Preprocess", + "type": "boolean", + "description": "Whether to preprocess the teed image.", + "default": true + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "controlnet_conditioning_scale": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Controlnet Conditioning Scale", + "description": "The scale of the controlnet conditioning.", + "default": 0.5 + }, + "strength": { + "minimum": 0.05, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "determines how much the generated image resembles the initial image", + "default": 0.95 + }, + "safety_checker_version": { + "enum": [ + "v1", + "v2" + ], + "title": "Safety Checker Version", + "type": "string", + "description": "The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.", + "default": "v1" + }, + "openpose_preprocess": { + "title": "Openpose Preprocess", + "type": "boolean", + "description": "Whether to preprocess the openpose image.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 70, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 35 + } + }, + "x-fal-order-properties": [ + "prompt", + "controlnet_conditioning_scale", + "image_url", + "negative_prompt", + "image_size", + "num_inference_steps", + "guidance_scale", + "strength", + "seed", + "sync_mode", + "num_images", + "loras", + "embeddings", + "enable_safety_checker", + "safety_checker_version", + "expand_prompt", + "format", + "request_id", + "preserve_aspect_ratio", + "crop_output", + "openpose_image_url", + "openpose_preprocess", + "depth_image_url", + "depth_preprocess", + "teed_image_url", + "teed_preprocess", + "canny_image_url", + "canny_preprocess", + "normal_image_url", + "normal_preprocess", + "segmentation_image_url", + "segmentation_preprocess" + ], + "required": [ + "image_url", + "prompt" + ] + }, + "SdxlControlnetUnionImageToImageOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Embedding": { + "title": "Embedding", + "type": "object", + "properties": { + "tokens": { + "title": "Tokens", + "type": "array", + "description": "The list of tokens to use for the embedding.", + "items": { + "type": "string" + }, + "default": [ + "", + "" + ] + }, + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the embedding weights." + } + }, + "x-fal-order-properties": [ + "path", + "tokens" + ], + "required": [ + "path" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights. Or HF model name." + }, + "scale": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + }, + "force": { + "title": "Force", + "type": "boolean", + "description": "If set to true, the embedding will be forced to be used.", + "default": false + } + }, + "x-fal-order-properties": [ + "path", + "scale", + "force" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/sdxl-controlnet-union/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sdxl-controlnet-union/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sdxl-controlnet-union/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SdxlControlnetUnionImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sdxl-controlnet-union/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SdxlControlnetUnionImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/era-3d", + "metadata": { + "display_name": "Era 3D", + "category": "image-to-image", + "description": "A powerful image to novel multiview model with normals.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:44.407Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/model_tests/era3d/pignormals.png", + "model_url": "https://fal.run/fal-ai/era-3d", + "github_url": "https://github.com/pengHTYX/Era3D/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-07-01T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/era-3d", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/era-3d queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/era-3d", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/model_tests/era3d/pignormals.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/era-3d", + "documentationUrl": "https://fal.ai/models/fal-ai/era-3d/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Era3dInput": { + "title": "Era3DInput", + "type": "object", + "properties": { + "cfg": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Cfg", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 4 + }, + "background_removal": { + "title": "Background Removal", + "type": "boolean", + "description": "Background removal", + "default": true + }, + "steps": { + "minimum": 1, + "maximum": 200, + "type": "integer", + "title": "Steps", + "description": "Number of steps to run the model for", + "default": 40 + }, + "crop_size": { + "minimum": 256, + "maximum": 512, + "type": "integer", + "title": "Crop Size", + "description": "Size of the image to crop to", + "default": 400 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed for random number generation", + "default": -1 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/era3d/DnvGjd9CCS-ESmLgTYgOn.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to remove background from" + } + }, + "x-fal-order-properties": [ + "image_url", + "cfg", + "steps", + "crop_size", + "seed", + "background_removal" + ], + "required": [ + "image_url" + ] + }, + "Era3dOutput": { + "title": "Era3DOutput", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "Images with background removed", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed used for random number generation" + }, + "normal_images": { + "title": "Normal Images", + "type": "array", + "description": "Normal images with background removed", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "images", + "normal_images", + "seed" + ], + "required": [ + "images", + "normal_images", + "seed" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the image in pixels.", + "examples": [ + 1024 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the image in pixels.", + "examples": [ + 1024 + ] + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/era-3d/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/era-3d/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/era-3d": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Era3dInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/era-3d/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Era3dOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/florence-2-large/dense-region-caption", + "metadata": { + "display_name": "Florence-2 Large", + "category": "image-to-image", + "description": "Florence-2 is an advanced vision foundation model that uses a prompt-based approach to handle a wide range of vision and vision-language tasks", + "status": "active", + "tags": [ + "multimodal", + "vision" + ], + "updated_at": "2026-01-26T21:44:44.782Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "model_url": "https://fal.run/fal-ai/florence-2-large/dense-region-caption", + "github_url": "https://huggingface.co/microsoft/Florence-2-large/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-06-22T00:00:00.000Z", + "group": { + "key": "florence-2-large", + "label": "Dense Region Caption" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/florence-2-large/dense-region-caption", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/florence-2-large/dense-region-caption queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/florence-2-large/dense-region-caption", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/florence-2-large/dense-region-caption", + "documentationUrl": "https://fal.ai/models/fal-ai/florence-2-large/dense-region-caption/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Florence2LargeDenseRegionCaptionInput": { + "title": "ImageInput", + "type": "object", + "properties": { + "image_url": { + "examples": [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg", + "http://ecx.images-amazon.com/images/I/51UUzBDAMsL.jpg" + ], + "description": "The URL of the image to be processed.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url" + ], + "required": [ + "image_url" + ] + }, + "Florence2LargeDenseRegionCaptionOutput": { + "title": "BoundingBoxOutputWithLabels", + "type": "object", + "properties": { + "image": { + "description": "Processed image", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "results": { + "description": "Results from the model", + "title": "Results", + "allOf": [ + { + "$ref": "#/components/schemas/BoundingBoxes" + } + ] + } + }, + "x-fal-order-properties": [ + "results", + "image" + ], + "required": [ + "results" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + }, + "BoundingBoxes": { + "title": "BoundingBoxes", + "type": "object", + "properties": { + "bboxes": { + "description": "List of bounding boxes", + "type": "array", + "title": "Bboxes", + "items": { + "$ref": "#/components/schemas/BoundingBox" + } + } + }, + "x-fal-order-properties": [ + "bboxes" + ], + "required": [ + "bboxes" + ] + }, + "BoundingBox": { + "title": "BoundingBox", + "type": "object", + "properties": { + "y": { + "description": "Y-coordinate of the top-left corner", + "type": "number", + "title": "Y" + }, + "label": { + "description": "Label of the bounding box", + "type": "string", + "title": "Label" + }, + "h": { + "description": "Height of the bounding box", + "type": "number", + "title": "H" + }, + "w": { + "description": "Width of the bounding box", + "type": "number", + "title": "W" + }, + "x": { + "description": "X-coordinate of the top-left corner", + "type": "number", + "title": "X" + } + }, + "x-fal-order-properties": [ + "x", + "y", + "w", + "h", + "label" + ], + "required": [ + "x", + "y", + "w", + "h", + "label" + ] + } + } + }, + "paths": { + "/fal-ai/florence-2-large/dense-region-caption/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/dense-region-caption/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/dense-region-caption": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeDenseRegionCaptionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/dense-region-caption/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeDenseRegionCaptionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/florence-2-large/referring-expression-segmentation", + "metadata": { + "display_name": "Florence-2 Large", + "category": "image-to-image", + "description": "Florence-2 is an advanced vision foundation model that uses a prompt-based approach to handle a wide range of vision and vision-language tasks", + "status": "active", + "tags": [ + "multimodal", + "vision", + "segmentation" + ], + "updated_at": "2026-01-26T21:44:44.906Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "model_url": "https://fal.run/fal-ai/florence-2-large/referring-expression-segmentation", + "github_url": "https://huggingface.co/microsoft/Florence-2-large/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-06-22T00:00:00.000Z", + "group": { + "key": "florence-2-large", + "label": "Referring Expression Segmentation" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/florence-2-large/referring-expression-segmentation", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/florence-2-large/referring-expression-segmentation queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/florence-2-large/referring-expression-segmentation", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/florence-2-large/referring-expression-segmentation", + "documentationUrl": "https://fal.ai/models/fal-ai/florence-2-large/referring-expression-segmentation/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Florence2LargeReferringExpressionSegmentationInput": { + "title": "ImageWithTextInput", + "type": "object", + "properties": { + "text_input": { + "description": "Text input for the task", + "type": "string", + "title": "Text Input" + }, + "image_url": { + "examples": [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg", + "http://ecx.images-amazon.com/images/I/51UUzBDAMsL.jpg" + ], + "description": "The URL of the image to be processed.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url", + "text_input" + ], + "required": [ + "image_url", + "text_input" + ] + }, + "Florence2LargeReferringExpressionSegmentationOutput": { + "title": "PolygonOutputWithLabels", + "type": "object", + "properties": { + "image": { + "description": "Processed image", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "results": { + "description": "Results from the model", + "title": "Results", + "allOf": [ + { + "$ref": "#/components/schemas/PolygonOutput" + } + ] + } + }, + "x-fal-order-properties": [ + "results", + "image" + ], + "required": [ + "results" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + }, + "PolygonOutput": { + "title": "PolygonOutput", + "type": "object", + "properties": { + "polygons": { + "description": "List of polygons", + "type": "array", + "title": "Polygons", + "items": { + "$ref": "#/components/schemas/Polygon" + } + } + }, + "x-fal-order-properties": [ + "polygons" + ], + "required": [ + "polygons" + ] + }, + "Polygon": { + "title": "Polygon", + "type": "object", + "properties": { + "points": { + "description": "List of points", + "type": "array", + "title": "Points", + "items": { + "additionalProperties": { + "type": "number" + }, + "type": "object" + } + }, + "label": { + "description": "Label of the polygon", + "type": "string", + "title": "Label" + } + }, + "x-fal-order-properties": [ + "points", + "label" + ], + "required": [ + "points", + "label" + ] + } + } + }, + "paths": { + "/fal-ai/florence-2-large/referring-expression-segmentation/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/referring-expression-segmentation/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/referring-expression-segmentation": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeReferringExpressionSegmentationInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/referring-expression-segmentation/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeReferringExpressionSegmentationOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/florence-2-large/object-detection", + "metadata": { + "display_name": "Florence-2 Large", + "category": "image-to-image", + "description": "Florence-2 is an advanced vision foundation model that uses a prompt-based approach to handle a wide range of vision and vision-language tasks", + "status": "active", + "tags": [ + "detection", + "multimodal", + "vision" + ], + "updated_at": "2026-01-26T21:44:44.532Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "model_url": "https://fal.run/fal-ai/florence-2-large/object-detection", + "github_url": "https://huggingface.co/microsoft/Florence-2-large/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-06-22T00:00:00.000Z", + "group": { + "key": "florence-2-large", + "label": "Object Detection" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/florence-2-large/object-detection", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/florence-2-large/object-detection queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/florence-2-large/object-detection", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/florence-2-large/object-detection", + "documentationUrl": "https://fal.ai/models/fal-ai/florence-2-large/object-detection/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Florence2LargeObjectDetectionInput": { + "title": "ImageInput", + "type": "object", + "properties": { + "image_url": { + "examples": [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg", + "http://ecx.images-amazon.com/images/I/51UUzBDAMsL.jpg" + ], + "description": "The URL of the image to be processed.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url" + ], + "required": [ + "image_url" + ] + }, + "Florence2LargeObjectDetectionOutput": { + "title": "BoundingBoxOutputWithLabels", + "type": "object", + "properties": { + "image": { + "description": "Processed image", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "results": { + "description": "Results from the model", + "title": "Results", + "allOf": [ + { + "$ref": "#/components/schemas/BoundingBoxes" + } + ] + } + }, + "x-fal-order-properties": [ + "results", + "image" + ], + "required": [ + "results" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + }, + "BoundingBoxes": { + "title": "BoundingBoxes", + "type": "object", + "properties": { + "bboxes": { + "description": "List of bounding boxes", + "type": "array", + "title": "Bboxes", + "items": { + "$ref": "#/components/schemas/BoundingBox" + } + } + }, + "x-fal-order-properties": [ + "bboxes" + ], + "required": [ + "bboxes" + ] + }, + "BoundingBox": { + "title": "BoundingBox", + "type": "object", + "properties": { + "y": { + "description": "Y-coordinate of the top-left corner", + "type": "number", + "title": "Y" + }, + "label": { + "description": "Label of the bounding box", + "type": "string", + "title": "Label" + }, + "h": { + "description": "Height of the bounding box", + "type": "number", + "title": "H" + }, + "w": { + "description": "Width of the bounding box", + "type": "number", + "title": "W" + }, + "x": { + "description": "X-coordinate of the top-left corner", + "type": "number", + "title": "X" + } + }, + "x-fal-order-properties": [ + "x", + "y", + "w", + "h", + "label" + ], + "required": [ + "x", + "y", + "w", + "h", + "label" + ] + } + } + }, + "paths": { + "/fal-ai/florence-2-large/object-detection/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/object-detection/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/object-detection": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeObjectDetectionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/object-detection/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeObjectDetectionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/florence-2-large/open-vocabulary-detection", + "metadata": { + "display_name": "Florence-2 Large", + "category": "image-to-image", + "description": "Florence-2 is an advanced vision foundation model that uses a prompt-based approach to handle a wide range of vision and vision-language tasks", + "status": "active", + "tags": [ + "multimodal", + "vision", + "detection" + ], + "updated_at": "2026-01-26T21:44:45.724Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "model_url": "https://fal.run/fal-ai/florence-2-large/open-vocabulary-detection", + "github_url": "https://huggingface.co/microsoft/Florence-2-large/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-06-22T00:00:00.000Z", + "group": { + "key": "florence-2-large", + "label": "Open Vocabulary Detection" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/florence-2-large/open-vocabulary-detection", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/florence-2-large/open-vocabulary-detection queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/florence-2-large/open-vocabulary-detection", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/florence-2-large/open-vocabulary-detection", + "documentationUrl": "https://fal.ai/models/fal-ai/florence-2-large/open-vocabulary-detection/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Florence2LargeOpenVocabularyDetectionInput": { + "title": "ImageWithTextInput", + "type": "object", + "properties": { + "text_input": { + "description": "Text input for the task", + "type": "string", + "title": "Text Input" + }, + "image_url": { + "examples": [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg", + "http://ecx.images-amazon.com/images/I/51UUzBDAMsL.jpg" + ], + "description": "The URL of the image to be processed.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url", + "text_input" + ], + "required": [ + "image_url", + "text_input" + ] + }, + "Florence2LargeOpenVocabularyDetectionOutput": { + "title": "BoundingBoxOutputWithLabels", + "type": "object", + "properties": { + "image": { + "description": "Processed image", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "results": { + "description": "Results from the model", + "title": "Results", + "allOf": [ + { + "$ref": "#/components/schemas/BoundingBoxes" + } + ] + } + }, + "x-fal-order-properties": [ + "results", + "image" + ], + "required": [ + "results" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + }, + "BoundingBoxes": { + "title": "BoundingBoxes", + "type": "object", + "properties": { + "bboxes": { + "description": "List of bounding boxes", + "type": "array", + "title": "Bboxes", + "items": { + "$ref": "#/components/schemas/BoundingBox" + } + } + }, + "x-fal-order-properties": [ + "bboxes" + ], + "required": [ + "bboxes" + ] + }, + "BoundingBox": { + "title": "BoundingBox", + "type": "object", + "properties": { + "y": { + "description": "Y-coordinate of the top-left corner", + "type": "number", + "title": "Y" + }, + "label": { + "description": "Label of the bounding box", + "type": "string", + "title": "Label" + }, + "h": { + "description": "Height of the bounding box", + "type": "number", + "title": "H" + }, + "w": { + "description": "Width of the bounding box", + "type": "number", + "title": "W" + }, + "x": { + "description": "X-coordinate of the top-left corner", + "type": "number", + "title": "X" + } + }, + "x-fal-order-properties": [ + "x", + "y", + "w", + "h", + "label" + ], + "required": [ + "x", + "y", + "w", + "h", + "label" + ] + } + } + }, + "paths": { + "/fal-ai/florence-2-large/open-vocabulary-detection/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/open-vocabulary-detection/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/open-vocabulary-detection": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeOpenVocabularyDetectionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/open-vocabulary-detection/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeOpenVocabularyDetectionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/florence-2-large/caption-to-phrase-grounding", + "metadata": { + "display_name": "Florence-2 Large", + "category": "image-to-image", + "description": "Florence-2 is an advanced vision foundation model that uses a prompt-based approach to handle a wide range of vision and vision-language tasks", + "status": "active", + "tags": [ + "multimodal", + "vision" + ], + "updated_at": "2026-01-26T21:44:45.474Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "model_url": "https://fal.run/fal-ai/florence-2-large/caption-to-phrase-grounding", + "github_url": "https://huggingface.co/microsoft/Florence-2-large/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-06-22T00:00:00.000Z", + "group": { + "key": "florence-2-large", + "label": "Caption to Phrase Grounding" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/florence-2-large/caption-to-phrase-grounding", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/florence-2-large/caption-to-phrase-grounding queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/florence-2-large/caption-to-phrase-grounding", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/florence-2-large/caption-to-phrase-grounding", + "documentationUrl": "https://fal.ai/models/fal-ai/florence-2-large/caption-to-phrase-grounding/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Florence2LargeCaptionToPhraseGroundingInput": { + "title": "ImageWithTextInput", + "type": "object", + "properties": { + "text_input": { + "description": "Text input for the task", + "type": "string", + "title": "Text Input" + }, + "image_url": { + "examples": [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg", + "http://ecx.images-amazon.com/images/I/51UUzBDAMsL.jpg" + ], + "description": "The URL of the image to be processed.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url", + "text_input" + ], + "required": [ + "image_url", + "text_input" + ] + }, + "Florence2LargeCaptionToPhraseGroundingOutput": { + "title": "BoundingBoxOutputWithLabels", + "type": "object", + "properties": { + "image": { + "description": "Processed image", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "results": { + "description": "Results from the model", + "title": "Results", + "allOf": [ + { + "$ref": "#/components/schemas/BoundingBoxes" + } + ] + } + }, + "x-fal-order-properties": [ + "results", + "image" + ], + "required": [ + "results" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + }, + "BoundingBoxes": { + "title": "BoundingBoxes", + "type": "object", + "properties": { + "bboxes": { + "description": "List of bounding boxes", + "type": "array", + "title": "Bboxes", + "items": { + "$ref": "#/components/schemas/BoundingBox" + } + } + }, + "x-fal-order-properties": [ + "bboxes" + ], + "required": [ + "bboxes" + ] + }, + "BoundingBox": { + "title": "BoundingBox", + "type": "object", + "properties": { + "y": { + "description": "Y-coordinate of the top-left corner", + "type": "number", + "title": "Y" + }, + "label": { + "description": "Label of the bounding box", + "type": "string", + "title": "Label" + }, + "h": { + "description": "Height of the bounding box", + "type": "number", + "title": "H" + }, + "w": { + "description": "Width of the bounding box", + "type": "number", + "title": "W" + }, + "x": { + "description": "X-coordinate of the top-left corner", + "type": "number", + "title": "X" + } + }, + "x-fal-order-properties": [ + "x", + "y", + "w", + "h", + "label" + ], + "required": [ + "x", + "y", + "w", + "h", + "label" + ] + } + } + }, + "paths": { + "/fal-ai/florence-2-large/caption-to-phrase-grounding/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/caption-to-phrase-grounding/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/caption-to-phrase-grounding": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeCaptionToPhraseGroundingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/caption-to-phrase-grounding/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeCaptionToPhraseGroundingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/florence-2-large/region-proposal", + "metadata": { + "display_name": "Florence-2 Large", + "category": "image-to-image", + "description": "Florence-2 is an advanced vision foundation model that uses a prompt-based approach to handle a wide range of vision and vision-language tasks", + "status": "active", + "tags": [ + "multimodal", + "vision" + ], + "updated_at": "2026-01-26T21:44:45.056Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "model_url": "https://fal.run/fal-ai/florence-2-large/region-proposal", + "github_url": "https://huggingface.co/microsoft/Florence-2-large/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-06-22T00:00:00.000Z", + "group": { + "key": "florence-2-large", + "label": "Region Proposal" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/florence-2-large/region-proposal", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/florence-2-large/region-proposal queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/florence-2-large/region-proposal", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/florence-2-large/region-proposal", + "documentationUrl": "https://fal.ai/models/fal-ai/florence-2-large/region-proposal/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Florence2LargeRegionProposalInput": { + "title": "ImageInput", + "type": "object", + "properties": { + "image_url": { + "examples": [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg", + "http://ecx.images-amazon.com/images/I/51UUzBDAMsL.jpg" + ], + "description": "The URL of the image to be processed.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url" + ], + "required": [ + "image_url" + ] + }, + "Florence2LargeRegionProposalOutput": { + "title": "BoundingBoxOutputWithLabels", + "type": "object", + "properties": { + "image": { + "description": "Processed image", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "results": { + "description": "Results from the model", + "title": "Results", + "allOf": [ + { + "$ref": "#/components/schemas/BoundingBoxes" + } + ] + } + }, + "x-fal-order-properties": [ + "results", + "image" + ], + "required": [ + "results" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + }, + "BoundingBoxes": { + "title": "BoundingBoxes", + "type": "object", + "properties": { + "bboxes": { + "description": "List of bounding boxes", + "type": "array", + "title": "Bboxes", + "items": { + "$ref": "#/components/schemas/BoundingBox" + } + } + }, + "x-fal-order-properties": [ + "bboxes" + ], + "required": [ + "bboxes" + ] + }, + "BoundingBox": { + "title": "BoundingBox", + "type": "object", + "properties": { + "y": { + "description": "Y-coordinate of the top-left corner", + "type": "number", + "title": "Y" + }, + "label": { + "description": "Label of the bounding box", + "type": "string", + "title": "Label" + }, + "h": { + "description": "Height of the bounding box", + "type": "number", + "title": "H" + }, + "w": { + "description": "Width of the bounding box", + "type": "number", + "title": "W" + }, + "x": { + "description": "X-coordinate of the top-left corner", + "type": "number", + "title": "X" + } + }, + "x-fal-order-properties": [ + "x", + "y", + "w", + "h", + "label" + ], + "required": [ + "x", + "y", + "w", + "h", + "label" + ] + } + } + }, + "paths": { + "/fal-ai/florence-2-large/region-proposal/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/region-proposal/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/region-proposal": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeRegionProposalInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/region-proposal/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeRegionProposalOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/florence-2-large/ocr-with-region", + "metadata": { + "display_name": "Florence-2 Large", + "category": "image-to-image", + "description": "Florence-2 is an advanced vision foundation model that uses a prompt-based approach to handle a wide range of vision and vision-language tasks", + "status": "active", + "tags": [ + "ocr", + "multimodal", + "vision" + ], + "updated_at": "2026-01-26T21:44:45.599Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "model_url": "https://fal.run/fal-ai/florence-2-large/ocr-with-region", + "github_url": "https://huggingface.co/microsoft/Florence-2-large/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-06-22T00:00:00.000Z", + "group": { + "key": "florence-2-large", + "label": "OCR with Region" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/florence-2-large/ocr-with-region", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/florence-2-large/ocr-with-region queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/florence-2-large/ocr-with-region", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/florence-2-large/ocr-with-region", + "documentationUrl": "https://fal.ai/models/fal-ai/florence-2-large/ocr-with-region/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Florence2LargeOcrWithRegionInput": { + "title": "ImageInput", + "type": "object", + "properties": { + "image_url": { + "examples": [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg", + "http://ecx.images-amazon.com/images/I/51UUzBDAMsL.jpg" + ], + "description": "The URL of the image to be processed.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url" + ], + "required": [ + "image_url" + ] + }, + "Florence2LargeOcrWithRegionOutput": { + "title": "OCRBoundingBoxOutputWithLabels", + "type": "object", + "properties": { + "image": { + "description": "Processed image", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "results": { + "description": "Results from the model", + "title": "Results", + "allOf": [ + { + "$ref": "#/components/schemas/OCRBoundingBox" + } + ] + } + }, + "x-fal-order-properties": [ + "results", + "image" + ], + "required": [ + "results" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + }, + "OCRBoundingBox": { + "title": "OCRBoundingBox", + "type": "object", + "properties": { + "quad_boxes": { + "description": "List of quadrilateral boxes", + "type": "array", + "title": "Quad Boxes", + "items": { + "$ref": "#/components/schemas/OCRBoundingBoxSingle" + } + } + }, + "x-fal-order-properties": [ + "quad_boxes" + ], + "required": [ + "quad_boxes" + ] + }, + "OCRBoundingBoxSingle": { + "title": "OCRBoundingBoxSingle", + "type": "object", + "properties": { + "y": { + "description": "Y-coordinate of the top-left corner", + "type": "number", + "title": "Y" + }, + "label": { + "description": "Label of the bounding box", + "type": "string", + "title": "Label" + }, + "h": { + "description": "Height of the bounding box", + "type": "number", + "title": "H" + }, + "w": { + "description": "Width of the bounding box", + "type": "number", + "title": "W" + }, + "x": { + "description": "X-coordinate of the top-left corner", + "type": "number", + "title": "X" + } + }, + "x-fal-order-properties": [ + "x", + "y", + "w", + "h", + "label" + ], + "required": [ + "x", + "y", + "w", + "h", + "label" + ] + } + } + }, + "paths": { + "/fal-ai/florence-2-large/ocr-with-region/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/ocr-with-region/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/ocr-with-region": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeOcrWithRegionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/ocr-with-region/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeOcrWithRegionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/florence-2-large/region-to-segmentation", + "metadata": { + "display_name": "Florence-2 Large", + "category": "image-to-image", + "description": "Florence-2 is an advanced vision foundation model that uses a prompt-based approach to handle a wide range of vision and vision-language tasks", + "status": "active", + "tags": [ + "multimodal", + "vision", + "segmentation" + ], + "updated_at": "2026-01-26T21:44:45.349Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "model_url": "https://fal.run/fal-ai/florence-2-large/region-to-segmentation", + "github_url": "https://huggingface.co/microsoft/Florence-2-large/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-06-22T00:00:00.000Z", + "group": { + "key": "florence-2-large", + "label": "Region to Segmentation" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/florence-2-large/region-to-segmentation", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/florence-2-large/region-to-segmentation queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/florence-2-large/region-to-segmentation", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/florence-2-large/region-to-segmentation", + "documentationUrl": "https://fal.ai/models/fal-ai/florence-2-large/region-to-segmentation/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Florence2LargeRegionToSegmentationInput": { + "title": "ImageWithUserCoordinatesInput", + "type": "object", + "properties": { + "region": { + "examples": [ + { + "y1": 100, + "x2": 200, + "x1": 100, + "y2": 200 + } + ], + "description": "The user input coordinates", + "title": "Region", + "allOf": [ + { + "$ref": "#/components/schemas/Region" + } + ] + }, + "image_url": { + "examples": [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg", + "http://ecx.images-amazon.com/images/I/51UUzBDAMsL.jpg" + ], + "description": "The URL of the image to be processed.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url", + "region" + ], + "required": [ + "image_url", + "region" + ] + }, + "Florence2LargeRegionToSegmentationOutput": { + "title": "PolygonOutputWithLabels", + "type": "object", + "properties": { + "image": { + "description": "Processed image", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "results": { + "description": "Results from the model", + "title": "Results", + "allOf": [ + { + "$ref": "#/components/schemas/PolygonOutput" + } + ] + } + }, + "x-fal-order-properties": [ + "results", + "image" + ], + "required": [ + "results" + ] + }, + "Region": { + "title": "Region", + "type": "object", + "properties": { + "y1": { + "minimum": 0, + "maximum": 999, + "type": "integer", + "title": "Y1", + "description": "Y-coordinate of the top-left corner" + }, + "x2": { + "minimum": 0, + "maximum": 999, + "type": "integer", + "title": "X2", + "description": "X-coordinate of the bottom-right corner" + }, + "x1": { + "minimum": 0, + "maximum": 999, + "type": "integer", + "title": "X1", + "description": "X-coordinate of the top-left corner" + }, + "y2": { + "minimum": 0, + "maximum": 999, + "type": "integer", + "title": "Y2", + "description": "Y-coordinate of the bottom-right corner" + } + }, + "x-fal-order-properties": [ + "x1", + "y1", + "x2", + "y2" + ], + "required": [ + "x1", + "y1", + "x2", + "y2" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + }, + "PolygonOutput": { + "title": "PolygonOutput", + "type": "object", + "properties": { + "polygons": { + "description": "List of polygons", + "type": "array", + "title": "Polygons", + "items": { + "$ref": "#/components/schemas/Polygon" + } + } + }, + "x-fal-order-properties": [ + "polygons" + ], + "required": [ + "polygons" + ] + }, + "Polygon": { + "title": "Polygon", + "type": "object", + "properties": { + "points": { + "description": "List of points", + "type": "array", + "title": "Points", + "items": { + "additionalProperties": { + "type": "number" + }, + "type": "object" + } + }, + "label": { + "description": "Label of the polygon", + "type": "string", + "title": "Label" + } + }, + "x-fal-order-properties": [ + "points", + "label" + ], + "required": [ + "points", + "label" + ] + } + } + }, + "paths": { + "/fal-ai/florence-2-large/region-to-segmentation/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/region-to-segmentation/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/region-to-segmentation": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeRegionToSegmentationInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/region-to-segmentation/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeRegionToSegmentationOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/stable-diffusion-v3-medium/image-to-image", + "metadata": { + "display_name": "Stable Diffusion V3", + "category": "image-to-image", + "description": "Stable Diffusion 3 Medium (Image to Image) is a Multimodal Diffusion Transformer (MMDiT) model that improves image quality, typography, prompt understanding, and efficiency.", + "status": "active", + "tags": [ + "diffusion", + "editing", + "style" + ], + "updated_at": "2026-01-26T21:44:46.791Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "model_url": "https://fal.run/fal-ai/stable-diffusion-v3-medium/image-to-image", + "github_url": "https://huggingface.co/stabilityai/stable-diffusion-3-medium/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-06-12T00:00:00.000Z", + "group": { + "key": "stable-diffusion-v3-medium", + "label": "Image to Image" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/stable-diffusion-v3-medium/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/stable-diffusion-v3-medium/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/stable-diffusion-v3-medium/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/stable-diffusion-v3-medium/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/stable-diffusion-v3-medium/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "StableDiffusionV3MediumImageToImageInput": { + "title": "ImageToImageInput", + "type": "object", + "properties": { + "prompt_expansion": { + "title": "Enhance Prompt", + "type": "boolean", + "description": "If set to true, prompt will be upsampled with more details.", + "default": false + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "examples": [ + null + ], + "description": "The size of the generated image. Defaults to the conditioning image's size.", + "nullable": true + }, + "prompt": { + "examples": [ + "cat wizard, gandalf, lord of the rings, detailed, fantasy, cute, adorable, Pixar, Disney, 8k" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "image_url": { + "examples": [ + "https://fal.media/files/zebra/b52cVi3BhLDJcBrk6x0DL.png" + ], + "title": "Image URL", + "type": "string", + "description": "The image URL to generate an image from." + }, + "strength": { + "minimum": 0.01, + "title": "Strength", + "type": "number", + "maximum": 1, + "description": "The strength of the image-to-image transformation.", + "default": 0.9 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 20, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 28 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate an image from.", + "default": "" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "negative_prompt", + "prompt_expansion", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "strength" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "StableDiffusionV3MediumImageToImageOutput": { + "title": "SD3Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "num_images": { + "title": "Number of Images", + "type": "integer", + "description": "The number of images generated." + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt", + "num_images" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt", + "num_images" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/stable-diffusion-v3-medium/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-diffusion-v3-medium/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/stable-diffusion-v3-medium/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableDiffusionV3MediumImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-diffusion-v3-medium/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableDiffusionV3MediumImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/dwpose", + "metadata": { + "display_name": "DWPose Pose Prediction", + "category": "image-to-image", + "description": "Predict poses from images.", + "status": "active", + "tags": [ + "pose", + "utility" + ], + "updated_at": "2026-01-26T21:44:15.090Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/dwpose.jpeg", + "model_url": "https://fal.run/fal-ai/dwpose", + "license_type": "commercial", + "date": "2024-06-01T00:00:00.000Z", + "group": { + "key": "dwpose", + "label": "Image to Pose" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/dwpose", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/dwpose queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/dwpose", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/dwpose.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/dwpose", + "documentationUrl": "https://fal.ai/models/fal-ai/dwpose/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "DwposeInput": { + "title": "DWPoseInput", + "type": "object", + "properties": { + "draw_mode": { + "enum": [ + "full-pose", + "body-pose", + "face-pose", + "hand-pose", + "face-hand-mask", + "face-mask", + "hand-mask" + ], + "title": "Draw Mode", + "type": "string", + "description": "Mode of drawing the pose on the image. Options are: 'full-pose', 'body-pose', 'face-pose', 'hand-pose', 'face-hand-mask', 'face-mask', 'hand-mask'.", + "examples": [ + "body-pose" + ], + "default": "body-pose" + }, + "image_url": { + "examples": [ + "https://github.com/badayvedat/sane-controlnet-aux/blob/main/tests/data/pose_sample.jpg?raw=true" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to be processed" + } + }, + "x-fal-order-properties": [ + "image_url", + "draw_mode" + ], + "required": [ + "image_url" + ] + }, + "DwposeOutput": { + "title": "DWPoseOutput", + "type": "object", + "properties": { + "image": { + "title": "Image", + "description": "The predicted pose image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/dwpose/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/dwpose/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/dwpose": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DwposeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/dwpose/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DwposeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sd15-depth-controlnet", + "metadata": { + "display_name": "SD 1.5 Depth ControlNet", + "category": "image-to-image", + "description": "SD 1.5 ControlNet", + "status": "active", + "tags": [ + "diffusion", + "editing", + "manipulation", + "controlnet" + ], + "updated_at": "2026-01-26T21:44:48.119Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/tree.jpeg", + "model_url": "https://fal.run/fal-ai/sd15-depth-controlnet", + "date": "2024-05-31T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sd15-depth-controlnet", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sd15-depth-controlnet queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sd15-depth-controlnet", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/tree.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sd15-depth-controlnet", + "documentationUrl": "https://fal.ai/models/fal-ai/sd15-depth-controlnet/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Sd15DepthControlnetInput": { + "title": "TextToImageControlNetInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Ice fortress, aurora skies, polar wildlife, twilight" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image. Leave it none to automatically infer from the control image.", + "examples": [ + null + ], + "nullable": true + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "If set to true, the prompt will be expanded with additional prompts.", + "default": false + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "The list of LoRA weights to use.", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [] + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 20, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 7.5 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "negative_prompt": { + "examples": [ + "cartoon, illustration, animation. face. male, female", + "ugly, deformed" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 8, + "description": "The number of images to generate.", + "default": 1 + }, + "controlnet_conditioning_scale": { + "minimum": 0, + "title": "Controlnet Conditioning Scale", + "type": "number", + "maximum": 1, + "description": "The scale of the controlnet conditioning.", + "default": 0.5 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "control_image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/MiN_j3St9B8esJleCZKMU.jpeg" + ], + "title": "Control Image Url", + "type": "string", + "description": "The URL of the control image." + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 70, + "description": "The number of inference steps to perform.", + "default": 35 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "enable_deep_cache": { + "title": "Enable Deep Cache", + "type": "boolean", + "description": "\n If set to true, DeepCache will be enabled. TBD\n ", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "control_image_url", + "controlnet_conditioning_scale", + "negative_prompt", + "image_size", + "num_inference_steps", + "seed", + "enable_deep_cache", + "guidance_scale", + "sync_mode", + "num_images", + "loras", + "enable_safety_checker", + "expand_prompt" + ], + "required": [ + "prompt", + "control_image_url" + ] + }, + "Sd15DepthControlnetOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights. Or HF model name." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "maximum": 1, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/sd15-depth-controlnet/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sd15-depth-controlnet/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sd15-depth-controlnet": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sd15DepthControlnetInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sd15-depth-controlnet/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sd15DepthControlnetOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ccsr", + "metadata": { + "display_name": "CCSR Upscaler", + "category": "image-to-image", + "description": "SOTA Image Upscaler", + "status": "active", + "tags": [ + "upscaling" + ], + "updated_at": "2026-01-26T21:44:49.333Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/ccsr.webp", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/ccsr-animated.webp", + "model_url": "https://fal.run/fal-ai/ccsr", + "date": "2024-05-05T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ccsr", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ccsr queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ccsr", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/ccsr.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/ccsr", + "documentationUrl": "https://fal.ai/models/fal-ai/ccsr/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "CcsrInput": { + "title": "CCSRInput", + "type": "object", + "properties": { + "color_fix_type": { + "enum": [ + "none", + "wavelet", + "adain" + ], + "title": "Color Fix Type", + "type": "string", + "description": "Type of color correction for samples.", + "examples": [ + "adain", + "wavelet", + "none" + ], + "default": "adain" + }, + "tile_diffusion_size": { + "minimum": 256, + "maximum": 2048, + "type": "integer", + "title": "Tile Diffusion Size", + "description": "Size of patch.", + "default": 1024 + }, + "tile_vae_decoder_size": { + "minimum": 64, + "maximum": 2048, + "type": "integer", + "title": "Tile Vae Decoder Size", + "description": "Size of VAE patch.", + "default": 226 + }, + "tile_vae_encoder_size": { + "minimum": 128, + "maximum": 2048, + "type": "integer", + "title": "Tile Vae Encoder Size", + "description": "Size of latent image", + "default": 1024 + }, + "t_min": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "T Min", + "description": "The starting point of uniform sampling strategy.", + "default": 0.3333 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/gallery/blue-bird.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "The URL or data URI of the image to upscale." + }, + "tile_diffusion_stride": { + "minimum": 128, + "maximum": 1024, + "type": "integer", + "title": "Tile Diffusion Stride", + "description": "Stride of sliding patch.", + "default": 512 + }, + "tile_vae": { + "title": "Tile Vae", + "type": "boolean", + "description": "If specified, a patch-based sampling strategy will be used for VAE decoding.", + "default": false + }, + "scale": { + "description": "The scale of the output image. The higher the scale, the bigger the output image will be.", + "type": "number", + "minimum": 1, + "maximum": 4, + "title": "Scale", + "default": 2 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed for reproducibility. Different seeds will make slightly different results." + }, + "t_max": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "T Max", + "description": "The ending point of uniform sampling strategy.", + "default": 0.6667 + }, + "steps": { + "minimum": 10, + "maximum": 100, + "type": "integer", + "title": "Steps", + "description": "The number of steps to run the model for. The higher the number the better the quality and longer it will take to generate.", + "default": 50 + }, + "tile_diffusion": { + "enum": [ + "none", + "mix", + "gaussian" + ], + "title": "Tile Diffusion", + "type": "string", + "description": "If specified, a patch-based sampling strategy will be used for sampling.", + "examples": [ + "none", + "mix", + "gaussian" + ], + "default": "none" + } + }, + "x-fal-order-properties": [ + "image_url", + "scale", + "tile_diffusion", + "tile_diffusion_size", + "tile_diffusion_stride", + "tile_vae", + "tile_vae_decoder_size", + "tile_vae_encoder_size", + "steps", + "t_max", + "t_min", + "color_fix_type", + "seed" + ], + "required": [ + "image_url" + ] + }, + "CcsrOutput": { + "title": "CCSROutput", + "type": "object", + "properties": { + "image": { + "title": "Image", + "description": "The generated image file info.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for the generation." + } + }, + "x-fal-order-properties": [ + "image", + "seed" + ], + "required": [ + "image", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ccsr/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ccsr/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ccsr": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CcsrInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ccsr/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CcsrOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/omni-zero", + "metadata": { + "display_name": "Omni Zero", + "category": "image-to-image", + "description": "Any pose, any style, any identity", + "status": "active", + "tags": [ + "style transfer" + ], + "updated_at": "2026-01-26T21:44:50.537Z", + "is_favorited": false, + "thumbnail_url": "https://pbs.twimg.com/media/GMBtTg8W0AEg_-I?format=jpg&name=medium", + "model_url": "https://fal.run/fal-ai/omni-zero", + "date": "2024-04-25T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/omni-zero", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/omni-zero queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/omni-zero", + "category": "image-to-image", + "thumbnailUrl": "https://pbs.twimg.com/media/GMBtTg8W0AEg_-I?format=jpg&name=medium", + "playgroundUrl": "https://fal.ai/models/fal-ai/omni-zero", + "documentationUrl": "https://fal.ai/models/fal-ai/omni-zero/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "OmniZeroInput": { + "title": "OmniZeroInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman" + ], + "title": "Prompt", + "type": "string", + "description": "Prompt to guide the image generation." + }, + "identity_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/omni_zero/identity.jpg" + ], + "title": "Identity Image Url", + "type": "string", + "description": "Identity image url." + }, + "identity_strength": { + "examples": [ + 1 + ], + "title": "Identity Strength", + "type": "number", + "description": "Identity strength.", + "default": 1 + }, + "number_of_images": { + "examples": [ + 1 + ], + "title": "Number Of Images", + "type": "integer", + "description": "Number of images.", + "default": 1 + }, + "guidance_scale": { + "examples": [ + 5 + ], + "title": "Guidance Scale", + "type": "number", + "description": "Guidance scale.", + "default": 5 + }, + "image_strength": { + "examples": [ + 0.75 + ], + "title": "Image Strength", + "type": "number", + "description": "Image strength.", + "default": 0.75 + }, + "negative_prompt": { + "examples": [ + "" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to guide the image generation.", + "default": "" + }, + "composition_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/omni_zero/structure.jpg" + ], + "title": "Composition Image Url", + "type": "string", + "description": "Composition image url." + }, + "depth_strength": { + "examples": [ + 0.5 + ], + "title": "Depth Strength", + "type": "number", + "description": "Depth strength.", + "default": 0.5 + }, + "composition_strength": { + "examples": [ + 1 + ], + "title": "Composition Strength", + "type": "number", + "description": "Composition strength.", + "default": 1 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/omni_zero/structure.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "Input image url." + }, + "style_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/omni_zero/style.jpg" + ], + "title": "Style Image Url", + "type": "string", + "description": "Style image url." + }, + "face_strength": { + "examples": [ + 1 + ], + "title": "Face Strength", + "type": "number", + "description": "Face strength.", + "default": 1 + }, + "style_strength": { + "examples": [ + 1 + ], + "title": "Style Strength", + "type": "number", + "description": "Style strength.", + "default": 1 + }, + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "Seed.", + "default": 42 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_url", + "composition_image_url", + "style_image_url", + "identity_image_url", + "image_strength", + "composition_strength", + "depth_strength", + "style_strength", + "face_strength", + "identity_strength", + "guidance_scale", + "seed", + "number_of_images" + ], + "required": [ + "prompt", + "image_url", + "composition_image_url", + "style_image_url", + "identity_image_url" + ] + }, + "OmniZeroOutput": { + "title": "OmniZeroOutput", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "height": 1024, + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/model_tests/omni_zero/result.png", + "width": 1024 + } + ], + "title": "Image", + "description": "The generated image.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/omni-zero/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/omni-zero/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/omni-zero": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OmniZeroInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/omni-zero/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OmniZeroOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hyper-sdxl/image-to-image", + "metadata": { + "display_name": "Hyper SDXL", + "category": "image-to-image", + "description": "Hyper-charge SDXL's performance and creativity.", + "status": "active", + "tags": [ + "diffusion", + "editing" + ], + "updated_at": "2026-01-26T21:44:50.138Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/kangaroo/LM0fy_9qT_8FlKrWhR7Zt.jpeg", + "model_url": "https://fal.run/fal-ai/hyper-sdxl/image-to-image", + "github_url": "https://huggingface.co/ByteDance/Hyper-SD/blob/main/LICENSE.md", + "date": "2024-04-25T00:00:00.000Z", + "group": { + "key": "hyper-sdxl", + "label": "Image to Image" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "error": { + "code": "expansion_failed", + "message": "Failed to fetch OpenAPI schema" + } + } + }, + { + "endpoint_id": "fal-ai/hyper-sdxl/inpainting", + "metadata": { + "display_name": "Hyper SDXL", + "category": "image-to-image", + "description": "Hyper-charge SDXL's performance and creativity.", + "status": "active", + "tags": [ + "diffusion" + ], + "updated_at": "2026-01-26T21:44:50.797Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/kangaroo/LM0fy_9qT_8FlKrWhR7Zt.jpeg", + "model_url": "https://fal.run/fal-ai/hyper-sdxl/inpainting", + "github_url": "https://huggingface.co/ByteDance/Hyper-SD/blob/main/LICENSE.md", + "date": "2024-04-25T00:00:00.000Z", + "group": { + "key": "hyper-sdxl", + "label": "Inpainting" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "error": { + "code": "expansion_failed", + "message": "Failed to fetch OpenAPI schema" + } + } + }, + { + "endpoint_id": "fal-ai/ip-adapter-face-id", + "metadata": { + "display_name": "IP Adapter Face ID", + "category": "image-to-image", + "description": "High quality zero-shot personalization", + "status": "active", + "tags": [ + "ip-adapter", + "personalization", + "customization", + "editing" + ], + "updated_at": "2026-01-26T21:44:50.922Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/ip-adapter-face-id.jpeg", + "model_url": "https://fal.run/fal-ai/ip-adapter-face-id", + "license_type": "research", + "date": "2024-04-22T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ip-adapter-face-id", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ip-adapter-face-id queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ip-adapter-face-id", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/ip-adapter-face-id.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ip-adapter-face-id", + "documentationUrl": "https://fal.ai/models/fal-ai/ip-adapter-face-id/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "IpAdapterFaceIdInput": { + "title": "IpAdapterFaceIdInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Man cyberpunk, synthwave night city, futuristic, high quality, highly detailed, high resolution, sharp, hyper realistic, extremely detailed" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "face_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/upscale/image%20(8).png" + ], + "title": "Face Image Url", + "type": "string", + "description": "An image of a face to match. If an image with a size of 640x640 is not provided, it will be scaled and cropped to that size." + }, + "width": { + "minimum": 512, + "maximum": 1024, + "type": "integer", + "title": "Width", + "description": "\n The width of the generated image.\n ", + "default": 512 + }, + "face_id_det_size": { + "minimum": 64, + "maximum": 640, + "type": "integer", + "title": "Face Id Det Size", + "description": "\n The size of the face detection model. The higher the number the more accurate\n the detection will be but it will also take longer to run. The higher the number the more\n likely it will fail to find a face as well. Lower it if you are having trouble\n finding a face in the image.\n ", + "default": 640 + }, + "guidance_scale": { + "minimum": 0, + "maximum": 16, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 7.5 + }, + "negative_prompt": { + "examples": [ + "blurry, low resolution, bad, ugly, low quality, pixelated, interpolated, compression artifacts, noisey, grainy" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "blurry, low resolution, bad, ugly, low quality, pixelated, interpolated, compression artifacts, noisey, grainy" + }, + "height": { + "minimum": 512, + "maximum": 1024, + "type": "integer", + "title": "Height", + "description": "\n The height of the generated image.\n ", + "default": 512 + }, + "num_samples": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Samples", + "description": "\n The number of samples for face id. The more samples the better the image will\n be but it will also take longer to generate. Default is 4.\n ", + "default": 4 + }, + "base_sdxl_model_repo": { + "title": "Base Sdxl Model Repo", + "type": "string", + "description": "The URL to the base SDXL model. Default is SG161222/RealVisXL_V3.0", + "default": "SG161222/RealVisXL_V3.0" + }, + "base_1_5_model_repo": { + "title": "Base 1 5 Model Repo", + "type": "string", + "description": "The URL to the base 1.5 model. Default is SG161222/Realistic_Vision_V4.0_noVAE", + "default": "SG161222/Realistic_Vision_V4.0_noVAE" + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 200, + "type": "integer", + "title": "Num Inference Steps", + "description": "\n The number of inference steps to use for generating the image. The more steps\n the better the image will be but it will also take longer to generate.\n ", + "default": 50 + }, + "model_type": { + "enum": [ + "1_5-v1", + "1_5-v1-plus", + "1_5-v2-plus", + "SDXL-v1", + "SDXL-v2-plus", + "1_5-auraface-v1" + ], + "title": "Model Type", + "type": "string", + "description": "The model type to use. 1_5 is the default and is recommended for most use cases.", + "examples": [ + "1_5-v1", + "1_5-v1-plus", + "1_5-v2-plus", + "SDXL-v1", + "SDXL-v2-plus", + "1_5-auraface-v1" + ], + "default": "1_5-v1" + }, + "face_images_data_url": { + "title": "Face Images Data Url", + "type": "string", + "description": "\n URL to zip archive with images of faces. The images embedding will be averaged to\n create a more accurate face id.\n " + }, + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "model_type", + "prompt", + "face_image_url", + "face_images_data_url", + "negative_prompt", + "seed", + "guidance_scale", + "num_inference_steps", + "num_samples", + "width", + "height", + "face_id_det_size", + "base_1_5_model_repo", + "base_sdxl_model_repo" + ], + "required": [ + "prompt" + ] + }, + "IpAdapterFaceIdOutput": { + "title": "IpAdapterFaceIdOutput", + "type": "object", + "properties": { + "image": { + "title": "Image", + "description": "The generated image file info.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "image", + "seed" + ], + "required": [ + "image", + "seed" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ip-adapter-face-id/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ip-adapter-face-id/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ip-adapter-face-id": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpAdapterFaceIdInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ip-adapter-face-id/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IpAdapterFaceIdOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/lora/inpaint", + "metadata": { + "display_name": "Stable Diffusion with LoRAs", + "category": "image-to-image", + "description": "Run Any Stable Diffusion model with customizable LoRA weights.", + "status": "active", + "tags": [ + "diffusion", + "lora", + "customization", + "fine-tuning" + ], + "updated_at": "2026-01-26T21:44:51.294Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/sd-loras.jpeg", + "model_url": "https://fal.run/fal-ai/lora/inpaint", + "github_url": "https://huggingface.co/spaces/CompVis/stable-diffusion-license", + "date": "2024-04-18T00:00:00.000Z", + "group": { + "key": "sd-loras", + "label": "Inpainting" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/lora/inpaint", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/lora/inpaint queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/lora/inpaint", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/sd-loras.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/lora/inpaint", + "documentationUrl": "https://fal.ai/models/fal-ai/lora/inpaint/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LoraInpaintInput": { + "title": "InpaintInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Photo of a european medieval 40 year old queen, silver hair, highly detailed face, detailed eyes, head shot, intricate crown, age spots, wrinkles", + "Photo of a classic red mustang car parked in las vegas strip at night" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "noise_strength": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Noise Strength", + "description": "The amount of noise to add to noise image for image. Only used if the image_url is provided. 1.0 is complete noise and 0 is no noise.", + "default": 0.5 + }, + "tile_height": { + "minimum": 128, + "maximum": 4096, + "type": "integer", + "title": "Tile Height", + "description": "The size of the tiles to be used for the image generation.", + "default": 4096 + }, + "embeddings": { + "title": "Embeddings", + "type": "array", + "description": "\n The embeddings to use for the image generation. Only a single embedding is supported at the moment.\n The embeddings will be used to map the tokens in the prompt to the embedding weights.\n ", + "items": { + "$ref": "#/components/schemas/Embedding" + }, + "default": [] + }, + "ic_light_model_url": { + "title": "Ic Light Model Url", + "type": "string", + "description": "\n The URL of the IC Light model to use for the image generation.\n " + }, + "image_encoder_weight_name": { + "examples": [ + "pytorch_model.bin" + ], + "title": "Image Encoder Weight Name", + "type": "string", + "description": "\n The weight name of the image encoder model to use for the image generation.\n ", + "default": "pytorch_model.bin" + }, + "ip_adapter": { + "title": "Ip Adapter", + "type": "array", + "description": "\n The IP adapter to use for the image generation.\n ", + "items": { + "$ref": "#/components/schemas/IPAdapter" + }, + "default": [] + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [] + }, + "scheduler": { + "enum": [ + "DPM++ 2M", + "DPM++ 2M Karras", + "DPM++ 2M SDE", + "DPM++ 2M SDE Karras", + "Euler", + "Euler A", + "Euler (trailing timesteps)", + "LCM", + "LCM (trailing timesteps)", + "DDIM", + "TCD" + ], + "title": "Scheduler", + "type": "string", + "description": "Scheduler / sampler to use for the image denoising process." + }, + "sigmas": { + "default": { + "method": "default", + "array": [] + }, + "title": "Sigmas", + "description": "\n Optionally override the sigmas to use for the denoising process. Only works with schedulers which support the `sigmas` argument in their `set_sigmas` method.\n Defaults to not overriding, in which case the scheduler automatically sets the sigmas based on the `num_inference_steps` parameter.\n If set to a custom sigma schedule, the `num_inference_steps` parameter will be ignored. Cannot be set if `timesteps` is set.\n ", + "allOf": [ + { + "$ref": "#/components/schemas/SigmasInput" + } + ] + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 7.5 + }, + "tile_stride_width": { + "minimum": 64, + "maximum": 2048, + "type": "integer", + "title": "Tile Stride Width", + "description": "The stride of the tiles to be used for the image generation.", + "default": 2048 + }, + "debug_per_pass_latents": { + "title": "Debug Per Pass Latents", + "type": "boolean", + "description": "If set to true, the latents will be saved for debugging per pass.", + "default": false + }, + "timesteps": { + "default": { + "method": "default", + "array": [] + }, + "title": "Timesteps", + "description": "\n Optionally override the timesteps to use for the denoising process. Only works with schedulers which support the `timesteps` argument in their `set_timesteps` method.\n Defaults to not overriding, in which case the scheduler automatically sets the timesteps based on the `num_inference_steps` parameter.\n If set to a custom timestep schedule, the `num_inference_steps` parameter will be ignored. Cannot be set if `sigmas` is set.\n ", + "allOf": [ + { + "$ref": "#/components/schemas/TimestepsInput" + } + ] + }, + "model_name": { + "examples": [ + "stabilityai/stable-diffusion-xl-base-1.0", + "runwayml/stable-diffusion-v1-5", + "SG161222/Realistic_Vision_V2.0" + ], + "title": "Model Name", + "type": "string", + "description": "URL or HuggingFace ID of the base model to generate the image." + }, + "prompt_weighting": { + "examples": [ + true + ], + "title": "Prompt Weighting", + "type": "boolean", + "description": "\n If set to true, the prompt weighting syntax will be used.\n Additionally, this will lift the 77 token limit by averaging embeddings.\n ", + "default": false + }, + "variant": { + "title": "Variant", + "type": "string", + "description": "The variant of the model to use for huggingface models, e.g. 'fp16'." + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "URL of image to use for image to image/inpainting." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "mask_url": { + "title": "Mask Url", + "type": "string", + "description": "URL of black-and-white image to use as mask during inpainting." + }, + "image_encoder_subfolder": { + "examples": [], + "title": "Image Encoder Subfolder", + "type": "string", + "description": "\n The subfolder of the image encoder model to use for the image generation.\n " + }, + "ic_light_model_background_image_url": { + "title": "Ic Light Model Background Image Url", + "type": "string", + "description": "\n The URL of the IC Light model background image to use for the image generation.\n Make sure to use a background compatible with the model.\n " + }, + "rescale_betas_snr_zero": { + "title": "Rescale Betas Snr Zero", + "type": "boolean", + "description": "\n Whether to set the rescale_betas_snr_zero option or not for the sampler\n ", + "default": false + }, + "tile_width": { + "minimum": 128, + "maximum": 4096, + "type": "integer", + "title": "Tile Width", + "description": "The size of the tiles to be used for the image generation.", + "default": 4096 + }, + "controlnet_guess_mode": { + "title": "Controlnet Guess Mode", + "type": "boolean", + "description": "\n If set to true, the controlnet will be applied to only the conditional predictions.\n ", + "default": false + }, + "prediction_type": { + "enum": [ + "v_prediction", + "epsilon" + ], + "title": "Prediction Type", + "type": "string", + "description": "\n The type of prediction to use for the image generation.\n The `epsilon` is the default.\n ", + "default": "epsilon" + }, + "eta": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Eta", + "description": "The eta value to be used for the image generation.", + "default": 0 + }, + "image_encoder_path": { + "title": "Image Encoder Path", + "type": "string", + "description": "\n The path to the image encoder model to use for the image generation.\n " + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "negative_prompt": { + "examples": [ + "cartoon, painting, illustration, worst quality, low quality, normal quality" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "image_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Image Format", + "type": "string", + "description": "The format of the generated image.", + "examples": [ + "jpeg" + ], + "default": "png" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Number of images", + "description": "\n Number of images to generate in one request. Note that the higher the batch size,\n the longer it will take to generate the images.\n ", + "default": 1 + }, + "debug_latents": { + "title": "Debug Latents", + "type": "boolean", + "description": "If set to true, the latents will be saved for debugging.", + "default": false + }, + "ic_light_image_url": { + "title": "Ic Light Image Url", + "type": "string", + "description": "\n The URL of the IC Light model image to use for the image generation.\n " + }, + "unet_name": { + "title": "Unet Name", + "type": "string", + "description": "URL or HuggingFace ID of the custom U-Net model to use for the image generation." + }, + "clip_skip": { + "minimum": 0, + "maximum": 2, + "type": "integer", + "title": "Clip Skip", + "description": "\n Skips part of the image generation process, leading to slightly different results.\n This means the image renders faster, too.\n ", + "default": 0 + }, + "tile_stride_height": { + "minimum": 64, + "maximum": 2048, + "type": "integer", + "title": "Tile Stride Height", + "description": "The stride of the tiles to be used for the image generation.", + "default": 2048 + }, + "controlnets": { + "title": "Controlnets", + "type": "array", + "description": "\n The control nets to use for the image generation. You can use any number of control nets\n and they will be applied to the image at the specified timesteps.\n ", + "items": { + "$ref": "#/components/schemas/ControlNet" + }, + "default": [] + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 150, + "type": "integer", + "title": "Number of inference steps", + "description": "\n Increasing the amount of steps tells Stable Diffusion that it should take more steps\n to generate your final result which can increase the amount of detail in your image.\n ", + "default": 30 + } + }, + "x-fal-order-properties": [ + "model_name", + "unet_name", + "variant", + "prompt", + "negative_prompt", + "prompt_weighting", + "image_url", + "mask_url", + "noise_strength", + "loras", + "embeddings", + "controlnets", + "controlnet_guess_mode", + "ip_adapter", + "image_encoder_path", + "image_encoder_subfolder", + "image_encoder_weight_name", + "ic_light_model_url", + "ic_light_model_background_image_url", + "ic_light_image_url", + "seed", + "num_inference_steps", + "guidance_scale", + "clip_skip", + "scheduler", + "timesteps", + "sigmas", + "prediction_type", + "rescale_betas_snr_zero", + "image_format", + "num_images", + "enable_safety_checker", + "tile_width", + "tile_height", + "tile_stride_width", + "tile_stride_height", + "eta", + "debug_latents", + "debug_per_pass_latents" + ], + "required": [ + "model_name", + "prompt" + ] + }, + "LoraInpaintOutput": { + "title": "OutputParameters", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "debug_latents": { + "title": "Debug Latents", + "description": "The latents saved for debugging.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "debug_per_pass_latents": { + "title": "Debug Per Pass Latents", + "description": "The latents saved for debugging per pass.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "images", + "seed", + "has_nsfw_concepts", + "debug_latents", + "debug_per_pass_latents" + ], + "required": [ + "images", + "seed", + "has_nsfw_concepts" + ] + }, + "Embedding": { + "title": "Embedding", + "type": "object", + "properties": { + "tokens": { + "title": "Tokens", + "type": "array", + "description": "\n The tokens to map the embedding weights to. Use these tokens in your prompts.\n ", + "items": { + "type": "string" + }, + "default": [ + "", + "" + ] + }, + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the embedding weights." + } + }, + "x-fal-order-properties": [ + "path", + "tokens" + ], + "required": [ + "path" + ] + }, + "IPAdapter": { + "title": "IPAdapter", + "type": "object", + "properties": { + "unconditional_noising_factor": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Unconditional Noising Factor", + "description": "The factor to apply to the unconditional noising of the IP adapter.", + "default": 0 + }, + "ip_adapter_image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + } + ], + "title": "Ip Adapter Image Url", + "description": "URL of the image to be used as the IP adapter." + }, + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the IP adapter weights." + }, + "image_projection_shortcut": { + "title": "Image Projection Shortcut", + "type": "boolean", + "description": "\n The value to set the image projection shortcut to. For FaceID plus V1 models,\n this should be set to False. For FaceID plus V2 models, this should be set to True.\n Default is True.\n ", + "default": true + }, + "scale_json": { + "title": "Scale Json", + "type": "object", + "description": "\n The scale of the IP adapter weight. This is used to scale the IP adapter weight\n before merging it with the base model.\n " + }, + "ip_adapter_mask_url": { + "title": "Ip Adapter Mask Url", + "type": "string", + "description": "\n The mask to use for the IP adapter. When using a mask, the ip-adapter image size and the mask size must be the same\n " + }, + "model_subfolder": { + "title": "Model Subfolder", + "type": "string", + "description": "Subfolder in the model directory where the IP adapter weights are stored." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "description": "\n The scale of the IP adapter weight. This is used to scale the IP adapter weight\n before merging it with the base model.\n ", + "default": 1 + }, + "insight_face_model_path": { + "title": "Insight Face Model Path", + "type": "string", + "description": "URL or the path to the InsightFace model weights." + }, + "weight_name": { + "title": "Weight Name", + "type": "string", + "description": "Name of the weight file." + } + }, + "x-fal-order-properties": [ + "ip_adapter_image_url", + "ip_adapter_mask_url", + "path", + "model_subfolder", + "weight_name", + "insight_face_model_path", + "scale", + "scale_json", + "unconditional_noising_factor", + "image_projection_shortcut" + ], + "required": [ + "ip_adapter_image_url", + "path" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "SigmasInput": { + "title": "SigmasInput", + "type": "object", + "properties": { + "method": { + "enum": [ + "default", + "array" + ], + "title": "Method", + "type": "string", + "description": "\n The method to use for the sigmas. If set to 'custom', the sigmas will be set based\n on the provided sigmas schedule in the `array` field.\n Defaults to 'default' which means the scheduler will use the sigmas of the scheduler.\n ", + "default": "default" + }, + "array": { + "title": "Array", + "type": "array", + "description": "\n Sigmas schedule to be used if 'custom' method is selected.\n ", + "items": { + "type": "number" + }, + "default": [] + } + }, + "x-fal-order-properties": [ + "method", + "array" + ] + }, + "TimestepsInput": { + "title": "TimestepsInput", + "type": "object", + "properties": { + "method": { + "enum": [ + "default", + "array" + ], + "title": "Method", + "type": "string", + "description": "\n The method to use for the timesteps. If set to 'array', the timesteps will be set based\n on the provided timesteps schedule in the `array` field.\n Defaults to 'default' which means the scheduler will use the `num_inference_steps` parameter.\n ", + "default": "default" + }, + "array": { + "title": "Array", + "type": "array", + "description": "\n Timesteps schedule to be used if 'custom' method is selected.\n ", + "items": { + "type": "integer" + }, + "default": [] + } + }, + "x-fal-order-properties": [ + "method", + "array" + ] + }, + "ControlNet": { + "title": "ControlNet", + "type": "object", + "properties": { + "conditioning_scale": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Conditioning Scale", + "description": "\n The scale of the control net weight. This is used to scale the control net weight\n before merging it with the base model.\n ", + "default": 1 + }, + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the control net weights." + }, + "ip_adapter_index": { + "title": "Ip Adapter Index", + "type": "integer", + "description": "\n The index of the IP adapter to be applied to the controlnet. This is only needed for InstantID ControlNets.\n " + }, + "end_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "End Percentage", + "description": "\n The percentage of the image to end applying the controlnet in terms of the total timesteps.\n ", + "default": 1 + }, + "config_url": { + "title": "Config Url", + "type": "string", + "description": "optional URL to the controlnet config.json file." + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "URL of the image to be used as the control net." + }, + "variant": { + "title": "Variant", + "type": "string", + "description": "The optional variant if a Hugging Face repo key is used." + }, + "mask_url": { + "title": "Mask Url", + "type": "string", + "description": "\n The mask to use for the controlnet. When using a mask, the control image size and the mask size must be the same and divisible by 32.\n " + }, + "start_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Start Percentage", + "description": "\n The percentage of the image to start applying the controlnet in terms of the total timesteps.\n ", + "default": 0 + } + }, + "x-fal-order-properties": [ + "path", + "config_url", + "variant", + "image_url", + "mask_url", + "conditioning_scale", + "start_percentage", + "end_percentage", + "ip_adapter_index" + ], + "required": [ + "path", + "image_url" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/lora/inpaint/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/lora/inpaint/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/lora/inpaint": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LoraInpaintInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/lora/inpaint/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LoraInpaintOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/lora/image-to-image", + "metadata": { + "display_name": "Stable Diffusion with LoRAs", + "category": "image-to-image", + "description": "Run Any Stable Diffusion model with customizable LoRA weights.", + "status": "active", + "tags": [ + "diffusion", + "lora", + "customization", + "fine-tuning" + ], + "updated_at": "2026-01-26T21:44:51.421Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/sd-loras.jpeg", + "model_url": "https://fal.run/fal-ai/lora/image-to-image", + "github_url": "https://huggingface.co/spaces/CompVis/stable-diffusion-license", + "date": "2024-04-17T00:00:00.000Z", + "group": { + "key": "sd-loras", + "label": "Image to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/lora/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/lora/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/lora/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/sd-loras.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/lora/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/lora/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LoraImageToImageInput": { + "title": "ImageToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Photo of a european medieval 40 year old queen, silver hair, highly detailed face, detailed eyes, head shot, intricate crown, age spots, wrinkles", + "Photo of a classic red mustang car parked in las vegas strip at night" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "noise_strength": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Noise Strength", + "description": "The amount of noise to add to noise image for image. Only used if the image_url is provided. 1.0 is complete noise and 0 is no noise.", + "default": 0.5 + }, + "tile_height": { + "minimum": 128, + "maximum": 4096, + "type": "integer", + "title": "Tile Height", + "description": "The size of the tiles to be used for the image generation.", + "default": 4096 + }, + "embeddings": { + "title": "Embeddings", + "type": "array", + "description": "\n The embeddings to use for the image generation. Only a single embedding is supported at the moment.\n The embeddings will be used to map the tokens in the prompt to the embedding weights.\n ", + "items": { + "$ref": "#/components/schemas/Embedding" + }, + "default": [] + }, + "ic_light_model_url": { + "title": "Ic Light Model Url", + "type": "string", + "description": "\n The URL of the IC Light model to use for the image generation.\n " + }, + "image_encoder_weight_name": { + "examples": [ + "pytorch_model.bin" + ], + "title": "Image Encoder Weight Name", + "type": "string", + "description": "\n The weight name of the image encoder model to use for the image generation.\n ", + "default": "pytorch_model.bin" + }, + "ip_adapter": { + "title": "Ip Adapter", + "type": "array", + "description": "\n The IP adapter to use for the image generation.\n ", + "items": { + "$ref": "#/components/schemas/IPAdapter" + }, + "default": [] + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [] + }, + "scheduler": { + "enum": [ + "DPM++ 2M", + "DPM++ 2M Karras", + "DPM++ 2M SDE", + "DPM++ 2M SDE Karras", + "Euler", + "Euler A", + "Euler (trailing timesteps)", + "LCM", + "LCM (trailing timesteps)", + "DDIM", + "TCD" + ], + "title": "Scheduler", + "type": "string", + "description": "Scheduler / sampler to use for the image denoising process." + }, + "sigmas": { + "default": { + "method": "default", + "array": [] + }, + "title": "Sigmas", + "description": "\n Optionally override the sigmas to use for the denoising process. Only works with schedulers which support the `sigmas` argument in their `set_sigmas` method.\n Defaults to not overriding, in which case the scheduler automatically sets the sigmas based on the `num_inference_steps` parameter.\n If set to a custom sigma schedule, the `num_inference_steps` parameter will be ignored. Cannot be set if `timesteps` is set.\n ", + "allOf": [ + { + "$ref": "#/components/schemas/SigmasInput" + } + ] + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 7.5 + }, + "tile_stride_width": { + "minimum": 64, + "maximum": 2048, + "type": "integer", + "title": "Tile Stride Width", + "description": "The stride of the tiles to be used for the image generation.", + "default": 2048 + }, + "debug_per_pass_latents": { + "title": "Debug Per Pass Latents", + "type": "boolean", + "description": "If set to true, the latents will be saved for debugging per pass.", + "default": false + }, + "timesteps": { + "default": { + "method": "default", + "array": [] + }, + "title": "Timesteps", + "description": "\n Optionally override the timesteps to use for the denoising process. Only works with schedulers which support the `timesteps` argument in their `set_timesteps` method.\n Defaults to not overriding, in which case the scheduler automatically sets the timesteps based on the `num_inference_steps` parameter.\n If set to a custom timestep schedule, the `num_inference_steps` parameter will be ignored. Cannot be set if `sigmas` is set.\n ", + "allOf": [ + { + "$ref": "#/components/schemas/TimestepsInput" + } + ] + }, + "model_name": { + "examples": [ + "stabilityai/stable-diffusion-xl-base-1.0", + "runwayml/stable-diffusion-v1-5", + "SG161222/Realistic_Vision_V2.0" + ], + "title": "Model Name", + "type": "string", + "description": "URL or HuggingFace ID of the base model to generate the image." + }, + "prompt_weighting": { + "examples": [ + true + ], + "title": "Prompt Weighting", + "type": "boolean", + "description": "\n If set to true, the prompt weighting syntax will be used.\n Additionally, this will lift the 77 token limit by averaging embeddings.\n ", + "default": false + }, + "variant": { + "title": "Variant", + "type": "string", + "description": "The variant of the model to use for huggingface models, e.g. 'fp16'." + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "URL of image to use for image to image/inpainting." + }, + "controlnet_guess_mode": { + "title": "Controlnet Guess Mode", + "type": "boolean", + "description": "\n If set to true, the controlnet will be applied to only the conditional predictions.\n ", + "default": false + }, + "image_encoder_subfolder": { + "examples": [], + "title": "Image Encoder Subfolder", + "type": "string", + "description": "\n The subfolder of the image encoder model to use for the image generation.\n " + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "ic_light_model_background_image_url": { + "title": "Ic Light Model Background Image Url", + "type": "string", + "description": "\n The URL of the IC Light model background image to use for the image generation.\n Make sure to use a background compatible with the model.\n " + }, + "rescale_betas_snr_zero": { + "title": "Rescale Betas Snr Zero", + "type": "boolean", + "description": "\n Whether to set the rescale_betas_snr_zero option or not for the sampler\n ", + "default": false + }, + "tile_width": { + "minimum": 128, + "maximum": 4096, + "type": "integer", + "title": "Tile Width", + "description": "The size of the tiles to be used for the image generation.", + "default": 4096 + }, + "prediction_type": { + "enum": [ + "v_prediction", + "epsilon" + ], + "title": "Prediction Type", + "type": "string", + "description": "\n The type of prediction to use for the image generation.\n The `epsilon` is the default.\n ", + "default": "epsilon" + }, + "eta": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Eta", + "description": "The eta value to be used for the image generation.", + "default": 0 + }, + "image_encoder_path": { + "title": "Image Encoder Path", + "type": "string", + "description": "\n The path to the image encoder model to use for the image generation.\n " + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "negative_prompt": { + "examples": [ + "cartoon, painting, illustration, worst quality, low quality, normal quality" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "image_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Image Format", + "type": "string", + "description": "The format of the generated image.", + "examples": [ + "jpeg" + ], + "default": "png" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Number of images", + "description": "\n Number of images to generate in one request. Note that the higher the batch size,\n the longer it will take to generate the images.\n ", + "default": 1 + }, + "debug_latents": { + "title": "Debug Latents", + "type": "boolean", + "description": "If set to true, the latents will be saved for debugging.", + "default": false + }, + "ic_light_image_url": { + "title": "Ic Light Image Url", + "type": "string", + "description": "\n The URL of the IC Light model image to use for the image generation.\n " + }, + "unet_name": { + "title": "Unet Name", + "type": "string", + "description": "URL or HuggingFace ID of the custom U-Net model to use for the image generation." + }, + "clip_skip": { + "minimum": 0, + "maximum": 2, + "type": "integer", + "title": "Clip Skip", + "description": "\n Skips part of the image generation process, leading to slightly different results.\n This means the image renders faster, too.\n ", + "default": 0 + }, + "tile_stride_height": { + "minimum": 64, + "maximum": 2048, + "type": "integer", + "title": "Tile Stride Height", + "description": "The stride of the tiles to be used for the image generation.", + "default": 2048 + }, + "controlnets": { + "title": "Controlnets", + "type": "array", + "description": "\n The control nets to use for the image generation. You can use any number of control nets\n and they will be applied to the image at the specified timesteps.\n ", + "items": { + "$ref": "#/components/schemas/ControlNet" + }, + "default": [] + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 150, + "type": "integer", + "title": "Number of inference steps", + "description": "\n Increasing the amount of steps tells Stable Diffusion that it should take more steps\n to generate your final result which can increase the amount of detail in your image.\n ", + "default": 30 + } + }, + "x-fal-order-properties": [ + "model_name", + "unet_name", + "variant", + "prompt", + "negative_prompt", + "prompt_weighting", + "image_url", + "noise_strength", + "loras", + "embeddings", + "controlnets", + "controlnet_guess_mode", + "ip_adapter", + "image_encoder_path", + "image_encoder_subfolder", + "image_encoder_weight_name", + "ic_light_model_url", + "ic_light_model_background_image_url", + "ic_light_image_url", + "seed", + "num_inference_steps", + "guidance_scale", + "clip_skip", + "scheduler", + "timesteps", + "sigmas", + "prediction_type", + "rescale_betas_snr_zero", + "image_format", + "num_images", + "enable_safety_checker", + "tile_width", + "tile_height", + "tile_stride_width", + "tile_stride_height", + "eta", + "debug_latents", + "debug_per_pass_latents" + ], + "required": [ + "model_name", + "prompt" + ] + }, + "LoraImageToImageOutput": { + "title": "OutputParameters", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "debug_latents": { + "title": "Debug Latents", + "description": "The latents saved for debugging.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "debug_per_pass_latents": { + "title": "Debug Per Pass Latents", + "description": "The latents saved for debugging per pass.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "images", + "seed", + "has_nsfw_concepts", + "debug_latents", + "debug_per_pass_latents" + ], + "required": [ + "images", + "seed", + "has_nsfw_concepts" + ] + }, + "Embedding": { + "title": "Embedding", + "type": "object", + "properties": { + "tokens": { + "title": "Tokens", + "type": "array", + "description": "\n The tokens to map the embedding weights to. Use these tokens in your prompts.\n ", + "items": { + "type": "string" + }, + "default": [ + "", + "" + ] + }, + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the embedding weights." + } + }, + "x-fal-order-properties": [ + "path", + "tokens" + ], + "required": [ + "path" + ] + }, + "IPAdapter": { + "title": "IPAdapter", + "type": "object", + "properties": { + "unconditional_noising_factor": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Unconditional Noising Factor", + "description": "The factor to apply to the unconditional noising of the IP adapter.", + "default": 0 + }, + "ip_adapter_image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + } + ], + "title": "Ip Adapter Image Url", + "description": "URL of the image to be used as the IP adapter." + }, + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the IP adapter weights." + }, + "image_projection_shortcut": { + "title": "Image Projection Shortcut", + "type": "boolean", + "description": "\n The value to set the image projection shortcut to. For FaceID plus V1 models,\n this should be set to False. For FaceID plus V2 models, this should be set to True.\n Default is True.\n ", + "default": true + }, + "scale_json": { + "title": "Scale Json", + "type": "object", + "description": "\n The scale of the IP adapter weight. This is used to scale the IP adapter weight\n before merging it with the base model.\n " + }, + "ip_adapter_mask_url": { + "title": "Ip Adapter Mask Url", + "type": "string", + "description": "\n The mask to use for the IP adapter. When using a mask, the ip-adapter image size and the mask size must be the same\n " + }, + "model_subfolder": { + "title": "Model Subfolder", + "type": "string", + "description": "Subfolder in the model directory where the IP adapter weights are stored." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "description": "\n The scale of the IP adapter weight. This is used to scale the IP adapter weight\n before merging it with the base model.\n ", + "default": 1 + }, + "insight_face_model_path": { + "title": "Insight Face Model Path", + "type": "string", + "description": "URL or the path to the InsightFace model weights." + }, + "weight_name": { + "title": "Weight Name", + "type": "string", + "description": "Name of the weight file." + } + }, + "x-fal-order-properties": [ + "ip_adapter_image_url", + "ip_adapter_mask_url", + "path", + "model_subfolder", + "weight_name", + "insight_face_model_path", + "scale", + "scale_json", + "unconditional_noising_factor", + "image_projection_shortcut" + ], + "required": [ + "ip_adapter_image_url", + "path" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "SigmasInput": { + "title": "SigmasInput", + "type": "object", + "properties": { + "method": { + "enum": [ + "default", + "array" + ], + "title": "Method", + "type": "string", + "description": "\n The method to use for the sigmas. If set to 'custom', the sigmas will be set based\n on the provided sigmas schedule in the `array` field.\n Defaults to 'default' which means the scheduler will use the sigmas of the scheduler.\n ", + "default": "default" + }, + "array": { + "title": "Array", + "type": "array", + "description": "\n Sigmas schedule to be used if 'custom' method is selected.\n ", + "items": { + "type": "number" + }, + "default": [] + } + }, + "x-fal-order-properties": [ + "method", + "array" + ] + }, + "TimestepsInput": { + "title": "TimestepsInput", + "type": "object", + "properties": { + "method": { + "enum": [ + "default", + "array" + ], + "title": "Method", + "type": "string", + "description": "\n The method to use for the timesteps. If set to 'array', the timesteps will be set based\n on the provided timesteps schedule in the `array` field.\n Defaults to 'default' which means the scheduler will use the `num_inference_steps` parameter.\n ", + "default": "default" + }, + "array": { + "title": "Array", + "type": "array", + "description": "\n Timesteps schedule to be used if 'custom' method is selected.\n ", + "items": { + "type": "integer" + }, + "default": [] + } + }, + "x-fal-order-properties": [ + "method", + "array" + ] + }, + "ControlNet": { + "title": "ControlNet", + "type": "object", + "properties": { + "conditioning_scale": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Conditioning Scale", + "description": "\n The scale of the control net weight. This is used to scale the control net weight\n before merging it with the base model.\n ", + "default": 1 + }, + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the control net weights." + }, + "ip_adapter_index": { + "title": "Ip Adapter Index", + "type": "integer", + "description": "\n The index of the IP adapter to be applied to the controlnet. This is only needed for InstantID ControlNets.\n " + }, + "end_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "End Percentage", + "description": "\n The percentage of the image to end applying the controlnet in terms of the total timesteps.\n ", + "default": 1 + }, + "config_url": { + "title": "Config Url", + "type": "string", + "description": "optional URL to the controlnet config.json file." + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "URL of the image to be used as the control net." + }, + "variant": { + "title": "Variant", + "type": "string", + "description": "The optional variant if a Hugging Face repo key is used." + }, + "mask_url": { + "title": "Mask Url", + "type": "string", + "description": "\n The mask to use for the controlnet. When using a mask, the control image size and the mask size must be the same and divisible by 32.\n " + }, + "start_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Start Percentage", + "description": "\n The percentage of the image to start applying the controlnet in terms of the total timesteps.\n ", + "default": 0 + } + }, + "x-fal-order-properties": [ + "path", + "config_url", + "variant", + "image_url", + "mask_url", + "conditioning_scale", + "start_percentage", + "end_percentage", + "ip_adapter_index" + ], + "required": [ + "path", + "image_url" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/lora/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/lora/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/lora/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LoraImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/lora/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LoraImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fast-sdxl/image-to-image", + "metadata": { + "display_name": "Stable Diffusion XL", + "category": "image-to-image", + "description": "Run SDXL at the speed of light", + "status": "active", + "tags": [ + "diffusion", + "high-res", + "lora", + "ip-adapter", + "controlnet" + ], + "updated_at": "2026-01-26T21:44:51.942Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/fast-sdxl.jpeg", + "model_url": "https://fal.run/fal-ai/fast-sdxl/image-to-image", + "github_url": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md", + "date": "2024-04-16T00:00:00.000Z", + "group": { + "key": "stable-diffusion-xl", + "label": "Image to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fast-sdxl/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fast-sdxl/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fast-sdxl/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/fast-sdxl.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/fast-sdxl/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/fast-sdxl/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FastSdxlImageToImageInput": { + "title": "ImageToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "an island near sea, with seagulls, moon shining over the sea, light house, boats int he background, fish flying over the sea" + ], + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results.", + "type": "string", + "title": "Prompt" + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "square_hd" + }, + "embeddings": { + "description": "The list of embeddings to use.", + "type": "array", + "title": "Embeddings", + "items": { + "$ref": "#/components/schemas/Embedding" + }, + "default": [] + }, + "expand_prompt": { + "description": "If set to true, the prompt will be expanded with additional prompts.", + "type": "boolean", + "title": "Expand Prompt", + "default": false + }, + "loras": { + "description": "The list of LoRA weights to use.", + "type": "array", + "title": "Loras", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [] + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 7.5 + }, + "preserve_aspect_ratio": { + "description": "\n If set to true, the aspect ratio of the generated image will be preserved even\n if the image size is too large. However, if the image is not a multiple of 32\n in width or height, it will be resized to the nearest multiple of 32. By default,\n this snapping to the nearest multiple of 32 will not preserve the aspect ratio.\n Set crop_output to True, to crop the output to the proper aspect ratio\n after generating.\n ", + "type": "boolean", + "title": "Preserve Aspect Ratio", + "default": false + }, + "negative_prompt": { + "examples": [ + "cartoon, illustration, animation. face. male, female" + ], + "description": "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "crop_output": { + "description": "\n If set to true, the output cropped to the proper aspect ratio after generating.\n ", + "type": "boolean", + "title": "Crop Output", + "default": false + }, + "format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Format", + "default": "jpeg" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/tiger/IExuP-WICqaIesLZAZPur.jpeg" + ], + "description": "The URL of the image to use as a starting point for the generation.", + "type": "string", + "title": "Image Url" + }, + "strength": { + "minimum": 0.05, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "determines how much the generated image resembles the initial image", + "default": 0.95 + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "safety_checker_version": { + "enum": [ + "v1", + "v2" + ], + "description": "The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.", + "type": "string", + "title": "Safety Checker Version", + "default": "v1" + }, + "request_id": { + "description": "\n An id bound to a request, can be used with response to identify the request\n itself.\n ", + "type": "string", + "title": "Request Id", + "default": "" + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 65, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 25 + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "guidance_scale", + "strength", + "seed", + "sync_mode", + "num_images", + "loras", + "embeddings", + "enable_safety_checker", + "safety_checker_version", + "expand_prompt", + "format", + "request_id", + "preserve_aspect_ratio", + "crop_output" + ], + "required": [ + "image_url", + "prompt" + ] + }, + "FastSdxlImageToImageOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Embedding": { + "title": "Embedding", + "type": "object", + "properties": { + "tokens": { + "description": "The list of tokens to use for the embedding.", + "type": "array", + "title": "Tokens", + "items": { + "type": "string" + }, + "default": [ + "", + "" + ] + }, + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "description": "URL or the path to the embedding weights.", + "type": "string", + "title": "Path" + } + }, + "x-fal-order-properties": [ + "path", + "tokens" + ], + "required": [ + "path" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "description": "URL or the path to the LoRA weights. Or HF model name.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + }, + "force": { + "description": "If set to true, the embedding will be forced to be used.", + "type": "boolean", + "title": "Force", + "default": false + } + }, + "x-fal-order-properties": [ + "path", + "scale", + "force" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/fast-sdxl/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-sdxl/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fast-sdxl/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastSdxlImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-sdxl/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastSdxlImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fast-sdxl/inpainting", + "metadata": { + "display_name": "Stable Diffusion XL", + "category": "image-to-image", + "description": "Run SDXL at the speed of light", + "status": "active", + "tags": [ + "diffusion", + "high-res", + "lora", + "ip-adapter", + "controlnet" + ], + "updated_at": "2026-01-26T21:44:51.811Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/fast-sdxl.jpeg", + "model_url": "https://fal.run/fal-ai/fast-sdxl/inpainting", + "github_url": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md", + "date": "2024-04-16T00:00:00.000Z", + "group": { + "key": "stable-diffusion-xl", + "label": "Inpainting" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fast-sdxl/inpainting", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fast-sdxl/inpainting queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fast-sdxl/inpainting", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/fast-sdxl.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/fast-sdxl/inpainting", + "documentationUrl": "https://fal.ai/models/fal-ai/fast-sdxl/inpainting/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FastSdxlInpaintingInput": { + "title": "InpaintingInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "a tiger sitting on a park bench" + ], + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results.", + "type": "string", + "title": "Prompt" + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "square_hd" + }, + "embeddings": { + "description": "The list of embeddings to use.", + "type": "array", + "title": "Embeddings", + "items": { + "$ref": "#/components/schemas/Embedding" + }, + "default": [] + }, + "expand_prompt": { + "description": "If set to true, the prompt will be expanded with additional prompts.", + "type": "boolean", + "title": "Expand Prompt", + "default": false + }, + "loras": { + "description": "The list of LoRA weights to use.", + "type": "array", + "title": "Loras", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [] + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 7.5 + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "negative_prompt": { + "examples": [ + "cartoon, illustration, animation. face. male, female" + ], + "description": "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Format", + "default": "jpeg" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_url": { + "examples": [ + "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + ], + "description": "The URL of the image to use as a starting point for the generation.", + "type": "string", + "title": "Image Url" + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "determines how much the generated image resembles the initial image", + "default": 0.95 + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "safety_checker_version": { + "enum": [ + "v1", + "v2" + ], + "description": "The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.", + "type": "string", + "title": "Safety Checker Version", + "default": "v1" + }, + "request_id": { + "description": "\n An id bound to a request, can be used with response to identify the request\n itself.\n ", + "type": "string", + "title": "Request Id", + "default": "" + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 65, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 25 + }, + "mask_url": { + "examples": [ + "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + ], + "description": "The URL of the mask to use for inpainting.", + "type": "string", + "title": "Mask Url" + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "image_url", + "mask_url", + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "guidance_scale", + "strength", + "seed", + "sync_mode", + "num_images", + "loras", + "embeddings", + "enable_safety_checker", + "safety_checker_version", + "expand_prompt", + "format", + "request_id" + ], + "required": [ + "image_url", + "mask_url", + "prompt" + ] + }, + "FastSdxlInpaintingOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Embedding": { + "title": "Embedding", + "type": "object", + "properties": { + "tokens": { + "description": "The list of tokens to use for the embedding.", + "type": "array", + "title": "Tokens", + "items": { + "type": "string" + }, + "default": [ + "", + "" + ] + }, + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "description": "URL or the path to the embedding weights.", + "type": "string", + "title": "Path" + } + }, + "x-fal-order-properties": [ + "path", + "tokens" + ], + "required": [ + "path" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "description": "URL or the path to the LoRA weights. Or HF model name.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + }, + "force": { + "description": "If set to true, the embedding will be forced to be used.", + "type": "boolean", + "title": "Force", + "default": false + } + }, + "x-fal-order-properties": [ + "path", + "scale", + "force" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/fast-sdxl/inpainting/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-sdxl/inpainting/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fast-sdxl/inpainting": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastSdxlInpaintingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-sdxl/inpainting/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastSdxlInpaintingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/face-to-sticker", + "metadata": { + "display_name": "Face to Sticker", + "category": "image-to-image", + "description": "Create stickers from faces.", + "status": "active", + "tags": [ + "sticker", + "editing" + ], + "updated_at": "2026-01-26T21:44:53.516Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/face-to-sticker.png", + "model_url": "https://fal.run/fal-ai/face-to-sticker", + "license_type": "research", + "date": "2024-03-11T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/face-to-sticker", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/face-to-sticker queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/face-to-sticker", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/face-to-sticker.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/face-to-sticker", + "documentationUrl": "https://fal.ai/models/fal-ai/face-to-sticker/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FaceToStickerInput": { + "title": "FaceToStickerInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "a person" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to false, the safety checker will be disabled.", + "default": true + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square_hd" + }, + "ip_adapter_weight": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "IP adapter weight", + "description": "The weight of the IP adapter.", + "default": 0.2 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/face_to_sticker/elon.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the video." + }, + "upscale_steps": { + "minimum": 1, + "maximum": 20, + "type": "integer", + "title": "Upscale steps", + "description": "The number of steps to use for upscaling. Only used if `upscale` is `true`.", + "default": 10 + }, + "instant_id_strength": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Instant ID strength", + "description": "The strength of the instant ID.", + "default": 0.7 + }, + "upscale": { + "title": "Upscale", + "type": "boolean", + "description": "Whether to upscale the image 2x.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 10, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 4.5 + }, + "num_inference_steps": { + "minimum": 10, + "maximum": 40, + "type": "integer", + "title": "Number of inference steps", + "description": "\n Increasing the amount of steps tells Stable Diffusion that it should take more steps\n to generate your final result which can increase the amount of detail in your image.\n ", + "default": 20 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "ip_adapter_noise": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "IP adapter noise", + "description": "The amount of noise to add to the IP adapter.", + "default": 0.5 + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "negative_prompt", + "num_inference_steps", + "guidance_scale", + "instant_id_strength", + "ip_adapter_weight", + "ip_adapter_noise", + "image_size", + "upscale", + "upscale_steps", + "seed", + "enable_safety_checker" + ], + "required": [ + "image_url", + "prompt" + ] + }, + "FaceToStickerOutput": { + "title": "FaceToStickerOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_size": 560358, + "height": 1024, + "file_name": "cd8bab71b946470099d5fa20c7eed440.png", + "content_type": "image/PNG", + "url": "https://storage.googleapis.com/falserverless/model_tests/face_to_sticker/elon_output_1.png", + "width": 1024 + }, + { + "file_size": 452906, + "height": 1024, + "file_name": "181ae8fa12534c6f9285a991b415d9a7.png", + "content_type": "image/PNG", + "url": "https://storage.googleapis.com/falserverless/model_tests/face_to_sticker/elon_output_2.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "sticker_image": { + "examples": [ + { + "file_size": 560358, + "height": 1024, + "file_name": "cd8bab71b946470099d5fa20c7eed440.png", + "content_type": "image/PNG", + "url": "https://storage.googleapis.com/falserverless/model_tests/face_to_sticker/elon_output_1.png", + "width": 1024 + } + ], + "title": "Sticker Image", + "description": "The generated face sticker image.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "sticker_image_background_removed": { + "examples": [ + { + "file_size": 452906, + "height": 1024, + "file_name": "181ae8fa12534c6f9285a991b415d9a7.png", + "content_type": "image/PNG", + "url": "https://storage.googleapis.com/falserverless/model_tests/face_to_sticker/elon_output_2.png", + "width": 1024 + } + ], + "title": "Sticker Image Background Removed", + "description": "The generated face sticker image with the background removed.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "seed": { + "examples": [ + 3625437076 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used during the inference." + }, + "has_nsfw_concepts": { + "examples": [ + { + "sticker_image": false, + "sticker_image_background_removed": false + } + ], + "additionalProperties": { + "type": "boolean" + }, + "type": "object", + "title": "Has Nsfw Concepts", + "description": "\n Whether the generated images contain NSFW concepts.\n The key is the image type and the value is a boolean.\n " + } + }, + "x-fal-order-properties": [ + "images", + "sticker_image", + "sticker_image_background_removed", + "seed", + "has_nsfw_concepts" + ], + "required": [ + "images", + "sticker_image", + "sticker_image_background_removed", + "seed", + "has_nsfw_concepts" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/face-to-sticker/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/face-to-sticker/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/face-to-sticker": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FaceToStickerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/face-to-sticker/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FaceToStickerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/photomaker", + "metadata": { + "display_name": "PhotoMaker", + "category": "image-to-image", + "description": "Customizing Realistic Human Photos via Stacked ID Embedding", + "status": "active", + "tags": [ + "editing", + "customization", + "realism", + "personalization" + ], + "updated_at": "2026-01-26T21:44:15.606Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/photomaker.jpeg", + "model_url": "https://fal.run/fal-ai/photomaker", + "github_url": "https://github.com/TencentARC/PhotoMaker/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-03-08T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/photomaker", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/photomaker queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/photomaker", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/photomaker.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/photomaker", + "documentationUrl": "https://fal.ai/models/fal-ai/photomaker/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PhotomakerInput": { + "title": "PhotoMakerInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "instagram photo, portrait photo of a man img, colorful, perfect face, natural skin, hard shadows, film grain" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Number of images", + "description": "\n Number of images to generate in one request. Note that the higher the batch size,\n the longer it will take to generate the images.\n ", + "default": 1 + }, + "style_strength": { + "minimum": 15, + "maximum": 50, + "type": "integer", + "title": "Style strength (in %)", + "default": 20 + }, + "style": { + "enum": [ + "(No style)", + "Cinematic", + "Disney Character", + "Digital Art", + "Photographic", + "Fantasy art", + "Neonpunk", + "Enhance", + "Comic book", + "Lowpoly", + "Line art" + ], + "title": "Style", + "type": "string", + "default": "Photographic" + }, + "guidance_scale": { + "minimum": 0.1, + "maximum": 10, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 5 + }, + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "image_archive_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/photomaker/elon.zip" + ], + "title": "Image Archive Url", + "type": "string", + "description": "The URL of the image archive containing the images you want to use." + }, + "initial_image_url": { + "title": "Initial Image Url", + "type": "string", + "description": "Optional initial image for img2img" + }, + "num_inference_steps": { + "minimum": 20, + "maximum": 100, + "type": "integer", + "title": "Number of inference steps", + "description": "\n Increasing the amount of steps tells Stable Diffusion that it should take more steps\n to generate your final result which can increase the amount of detail in your image.\n ", + "default": 50 + }, + "initial_image_strength": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Initial Image Strength", + "description": "How much noise to add to the latent image. O for no noise, 1 for maximum noise.", + "default": 0.5 + }, + "base_pipeline": { + "enum": [ + "photomaker", + "photomaker-style" + ], + "title": "Base Pipeline", + "type": "string", + "description": "The base pipeline to use for generating the image.", + "default": "photomaker" + }, + "negative_prompt": { + "examples": [ + "nsfw, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + } + }, + "x-fal-order-properties": [ + "image_archive_url", + "prompt", + "base_pipeline", + "initial_image_url", + "initial_image_strength", + "style", + "negative_prompt", + "num_inference_steps", + "style_strength", + "num_images", + "guidance_scale", + "seed" + ], + "required": [ + "image_archive_url", + "prompt" + ] + }, + "PhotomakerOutput": { + "title": "PhotoMakerOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_size": 1785567, + "height": 1024, + "file_name": "87374b9db2b74f5792839b19d9b29a9a.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/model_tests/photomaker/elon-output.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/photomaker/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/photomaker/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/photomaker": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PhotomakerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/photomaker/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PhotomakerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/creative-upscaler", + "metadata": { + "display_name": "Creative Upscaler", + "category": "image-to-image", + "description": "Create creative upscaled images.", + "status": "active", + "tags": [ + "upscaling" + ], + "updated_at": "2026-01-26T21:44:15.855Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/creative-upscaler.webp?v=3", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/creative-upscaler-animated.webp?v=3", + "model_url": "https://fal.run/fal-ai/creative-upscaler", + "license_type": "commercial", + "date": "2024-02-27T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/creative-upscaler", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/creative-upscaler queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/creative-upscaler", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/creative-upscaler.webp?v=3", + "playgroundUrl": "https://fal.ai/models/fal-ai/creative-upscaler", + "documentationUrl": "https://fal.ai/models/fal-ai/creative-upscaler/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "CreativeUpscalerInput": { + "title": "CreativeUpscalerInput", + "type": "object", + "properties": { + "shape_preservation": { + "minimum": 0, + "description": "How much to preserve the shape of the original image", + "type": "number", + "title": "Shape Preservation", + "maximum": 3, + "default": 0.25 + }, + "prompt": { + "examples": [], + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results. If no prompt is provide BLIP2 will be used to generate a prompt.", + "type": "string", + "title": "Prompt", + "nullable": true + }, + "additional_embedding_url": { + "description": "The URL to the additional embeddings to use for the upscaling. Default is None", + "type": "string", + "title": "Additional Embedding Url" + }, + "enable_safety_checks": { + "description": "\n If set to true, the resulting image will be checked whether it includes any\n potentially unsafe content. If it does, it will be replaced with a black\n image.\n ", + "type": "boolean", + "title": "Enable Safety Checks", + "default": true + }, + "additional_lora_url": { + "description": "The URL to the additional LORA model to use for the upscaling. Default is None", + "type": "string", + "title": "Additional Lora Url" + }, + "guidance_scale": { + "minimum": 0, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "title": "Guidance scale (CFG)", + "maximum": 16, + "default": 7.5 + }, + "scale": { + "description": "The scale of the output image. The higher the scale, the bigger the output image will be.", + "type": "number", + "minimum": 1, + "title": "Scale", + "maximum": 5, + "default": 2 + }, + "negative_prompt": { + "examples": [ + "blurry, low resolution, bad, ugly, low quality, pixelated, interpolated, compression artifacts, noisey, grainy" + ], + "description": "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "type": "string", + "title": "Negative Prompt", + "default": "blurry, low resolution, bad, ugly, low quality, pixelated, interpolated, compression artifacts, noisey, grainy" + }, + "skip_ccsr": { + "description": "\n If set to true, the image will not be processed by the CCSR model before\n being processed by the creativity model.\n ", + "type": "boolean", + "title": "Skip Ccsr", + "default": false + }, + "additional_lora_scale": { + "description": "The scale of the additional LORA model to use for the upscaling. Default is 1.0", + "type": "number", + "title": "Additional Lora Scale", + "default": 1 + }, + "detail": { + "minimum": 0, + "description": "How much detail to add", + "type": "number", + "title": "Detail", + "maximum": 5, + "default": 1 + }, + "base_model_url": { + "description": "The URL to the base model to use for the upscaling", + "type": "string", + "title": "Base Model Url" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/upscale/owl.png", + "https://storage.googleapis.com/falserverless/gallery/blue-bird.jpeg" + ], + "description": "The image to upscale.", + "type": "string", + "title": "Image Url" + }, + "creativity": { + "minimum": 0, + "description": "How much the output can deviate from the original", + "type": "number", + "title": "Creativity", + "maximum": 1, + "default": 0.5 + }, + "override_size_limits": { + "description": "\n Allow for large uploads that could take a very long time.\n ", + "type": "boolean", + "title": "Override Size Limits", + "default": false + }, + "prompt_suffix": { + "description": "The suffix to add to the prompt. This is useful to add a common ending to all prompts such as 'high quality' etc or embedding tokens.", + "type": "string", + "title": "Prompt Suffix", + "default": " high quality, highly detailed, high resolution, sharp" + }, + "num_inference_steps": { + "minimum": 1, + "description": "\n The number of inference steps to use for generating the image. The more steps\n the better the image will be but it will also take longer to generate.\n ", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 200, + "default": 20 + }, + "model_type": { + "examples": [ + "SD_1_5", + "SDXL" + ], + "description": "The type of model to use for the upscaling. Default is SD_1_5", + "type": "string", + "title": "Model Type", + "enum": [ + "SD_1_5", + "SDXL" + ], + "default": "SD_1_5" + }, + "seed": { + "examples": [ + 42 + ], + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "model_type", + "image_url", + "prompt", + "scale", + "creativity", + "detail", + "shape_preservation", + "prompt_suffix", + "negative_prompt", + "seed", + "guidance_scale", + "num_inference_steps", + "enable_safety_checks", + "skip_ccsr", + "override_size_limits", + "base_model_url", + "additional_lora_url", + "additional_lora_scale", + "additional_embedding_url" + ], + "required": [ + "image_url" + ] + }, + "CreativeUpscalerOutput": { + "title": "CreativeUpscalerOutput", + "type": "object", + "properties": { + "image": { + "description": "The generated image file info.", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "image", + "seed" + ], + "required": [ + "image", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/creative-upscaler/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/creative-upscaler/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/creative-upscaler": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreativeUpscalerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/creative-upscaler/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreativeUpscalerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/birefnet", + "metadata": { + "display_name": "Birefnet Background Removal", + "category": "image-to-image", + "description": "bilateral reference framework (BiRefNet) for high-resolution dichotomous image segmentation (DIS)", + "status": "active", + "tags": [ + "background removal", + "segmentation", + "high-res", + "utility" + ], + "updated_at": "2026-01-26T21:44:54.350Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/birefnet.webp", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/birefnet-animated.webp", + "model_url": "https://fal.run/fal-ai/birefnet", + "date": "2024-02-27T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/birefnet", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/birefnet queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/birefnet", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/birefnet.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/birefnet", + "documentationUrl": "https://fal.ai/models/fal-ai/birefnet/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BirefnetInput": { + "title": "Input", + "type": "object", + "properties": { + "operating_resolution": { + "enum": [ + "1024x1024", + "2048x2048" + ], + "title": "Operating Resolution", + "type": "string", + "description": "The resolution to operate on. The higher the resolution, the more accurate the output will be for high res input images.", + "default": "1024x1024" + }, + "output_format": { + "enum": [ + "webp", + "png", + "gif" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/birefnet-input.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to remove background from" + }, + "model": { + "enum": [ + "General Use (Light)", + "General Use (Heavy)", + "Portrait" + ], + "title": "Model", + "type": "string", + "description": "\n Model to use for background removal.\n The 'General Use (Light)' model is the original model used in the BiRefNet repository.\n The 'General Use (Heavy)' model is a slower but more accurate model.\n The 'Portrait' model is a model trained specifically for portrait images.\n The 'General Use (Light)' model is recommended for most use cases.\n\n The corresponding models are as follows:\n - 'General Use (Light)': BiRefNet-DIS_ep580.pth\n - 'General Use (Heavy)': BiRefNet-massive-epoch_240.pth\n - 'Portrait': BiRefNet-portrait-TR_P3M_10k-epoch_120.pth\n ", + "default": "General Use (Light)" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "output_mask": { + "title": "Output Mask", + "type": "boolean", + "description": "Whether to output the mask used to remove the background", + "default": false + }, + "refine_foreground": { + "title": "Refine Foreground", + "type": "boolean", + "description": "Whether to refine the foreground using the estimated mask", + "default": true + } + }, + "x-fal-order-properties": [ + "model", + "operating_resolution", + "output_mask", + "refine_foreground", + "sync_mode", + "image_url", + "output_format" + ], + "required": [ + "image_url" + ] + }, + "BirefnetOutput": { + "title": "Output", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "height": 1024, + "file_name": "birefnet-output.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/birefnet-output.png", + "width": 1024 + } + ], + "title": "Image", + "description": "Image with background removed", + "allOf": [ + { + "$ref": "#/components/schemas/ImageFile" + } + ] + }, + "mask_image": { + "title": "Mask Image", + "description": "Mask used to remove the background", + "allOf": [ + { + "$ref": "#/components/schemas/ImageFile" + } + ] + } + }, + "x-fal-order-properties": [ + "image", + "mask_image" + ], + "required": [ + "image" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/birefnet/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/birefnet/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/birefnet": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BirefnetInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/birefnet/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BirefnetOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/playground-v25/image-to-image", + "metadata": { + "display_name": "Playground v2.5", + "category": "image-to-image", + "description": "State-of-the-art open-source model in aesthetic quality", + "status": "active", + "tags": [ + "artistic", + "style" + ], + "updated_at": "2026-01-26T21:44:55.410Z", + "is_favorited": false, + "thumbnail_url": "https://fal-cdn.batuhan-941.workers.dev/files/monkey/8WXjrk5HEam79CPlQlo5T.jpeg", + "model_url": "https://fal.run/fal-ai/playground-v25/image-to-image", + "github_url": "https://huggingface.co/playgroundai/playground-v2.5-1024px-aesthetic/blob/main/LICENSE.md", + "date": "2024-02-21T00:00:00.000Z", + "group": { + "key": "playground-v25", + "label": "Image to Image" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/playground-v25/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/playground-v25/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/playground-v25/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://fal-cdn.batuhan-941.workers.dev/files/monkey/8WXjrk5HEam79CPlQlo5T.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/playground-v25/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/playground-v25/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PlaygroundV25ImageToImageInput": { + "x-fal-order-properties": [ + "image_url", + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "guidance_scale", + "strength", + "seed", + "num_images", + "embeddings", + "enable_safety_checker", + "safety_checker_version", + "expand_prompt", + "format", + "guidance_rescale", + "request_id", + "preserve_aspect_ratio", + "crop_output" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "an island near sea, with seagulls, moon shining over the sea, light house, boats int he background, fish flying over the sea" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square_hd" + }, + "embeddings": { + "title": "Embeddings", + "type": "array", + "description": "The list of embeddings to use.", + "items": { + "$ref": "#/components/schemas/Embedding" + }, + "default": [] + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "If set to true, the prompt will be expanded with additional prompts.", + "default": false + }, + "guidance_rescale": { + "minimum": 0, + "title": "Guidance Rescale", + "type": "number", + "description": "The rescale factor for the CFG.", + "maximum": 1, + "default": 0 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale (CFG)", + "type": "number", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "maximum": 20, + "default": 3 + }, + "preserve_aspect_ratio": { + "title": "Preserve Aspect Ratio", + "type": "boolean", + "description": "\n If set to true, the aspect ratio of the generated image will be preserved even\n if the image size is too large. However, if the image is not a multiple of 32\n in width or height, it will be resized to the nearest multiple of 32. By default,\n this snapping to the nearest multiple of 32 will not preserve the aspect ratio.\n Set crop_output to True, to crop the output to the proper aspect ratio\n after generating.\n ", + "default": false + }, + "negative_prompt": { + "examples": [ + "cartoon, illustration, animation. face. male, female" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "crop_output": { + "title": "Crop Output", + "type": "boolean", + "description": "\n If set to true, the output cropped to the proper aspect ratio after generating.\n ", + "default": false + }, + "format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "description": "The number of images to generate.", + "maximum": 8, + "default": 1 + }, + "image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/tiger/IExuP-WICqaIesLZAZPur.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to use as a starting point for the generation." + }, + "strength": { + "minimum": 0.05, + "title": "Strength", + "type": "number", + "description": "determines how much the generated image resembles the initial image", + "maximum": 1, + "default": 0.95 + }, + "safety_checker_version": { + "enum": [ + "v1", + "v2" + ], + "title": "Safety Checker Version", + "type": "string", + "description": "The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.", + "default": "v1" + }, + "request_id": { + "title": "Request Id", + "type": "string", + "description": "\n An id bound to a request, can be used with response to identify the request\n itself.\n ", + "default": "" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "description": "The number of inference steps to perform.", + "maximum": 65, + "default": 25 + } + }, + "title": "ImageToImagePlaygroundv25Input", + "required": [ + "image_url", + "prompt" + ] + }, + "PlaygroundV25ImageToImageOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "title": "Output", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Embedding": { + "x-fal-order-properties": [ + "path", + "tokens" + ], + "type": "object", + "properties": { + "tokens": { + "title": "Tokens", + "type": "array", + "description": "The list of tokens to use for the embedding.", + "items": { + "type": "string" + }, + "default": [ + "", + "" + ] + }, + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the embedding weights." + } + }, + "title": "Embedding", + "required": [ + "path" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "title": "Image", + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/playground-v25/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/playground-v25/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/playground-v25/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PlaygroundV25ImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/playground-v25/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PlaygroundV25ImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fast-lightning-sdxl/image-to-image", + "metadata": { + "display_name": "Stable Diffusion XL Lightning", + "category": "image-to-image", + "description": "Run SDXL at the speed of light", + "status": "active", + "tags": [ + "diffusion", + "lightning", + "editing" + ], + "updated_at": "2026-01-26T21:44:54.613Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/stable-diffusion-xl-lightning.webp", + "model_url": "https://fal.run/fal-ai/fast-lightning-sdxl/image-to-image", + "github_url": "https://huggingface.co/ByteDance/SDXL-Lightning/blob/main/LICENSE.md", + "date": "2024-02-21T00:00:00.000Z", + "group": { + "key": "stable-diffusion-xl-lightning", + "label": "Image to Image" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fast-lightning-sdxl/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fast-lightning-sdxl/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fast-lightning-sdxl/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/stable-diffusion-xl-lightning.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/fast-lightning-sdxl/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/fast-lightning-sdxl/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FastLightningSdxlImageToImageInput": { + "title": "ImageToImageLightningInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "an island near sea, with seagulls, moon shining over the sea, light house, boats int he background, fish flying over the sea" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square_hd" + }, + "embeddings": { + "title": "Embeddings", + "type": "array", + "description": "The list of embeddings to use.", + "items": { + "$ref": "#/components/schemas/Embedding" + }, + "default": [] + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "If set to true, the prompt will be expanded with additional prompts.", + "default": false + }, + "guidance_rescale": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Guidance Rescale", + "description": "The rescale factor for the CFG.", + "default": 0 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "preserve_aspect_ratio": { + "title": "Preserve Aspect Ratio", + "type": "boolean", + "description": "\n If set to true, the aspect ratio of the generated image will be preserved even\n if the image size is too large. However, if the image is not a multiple of 32\n in width or height, it will be resized to the nearest multiple of 32. By default,\n this snapping to the nearest multiple of 32 will not preserve the aspect ratio.\n Set crop_output to True, to crop the output to the proper aspect ratio\n after generating.\n ", + "default": false + }, + "crop_output": { + "title": "Crop Output", + "type": "boolean", + "description": "\n If set to true, the output cropped to the proper aspect ratio after generating.\n ", + "default": false + }, + "format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/tiger/IExuP-WICqaIesLZAZPur.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to use as a starting point for the generation." + }, + "strength": { + "minimum": 0.05, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "determines how much the generated image resembles the initial image", + "default": 0.95 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "safety_checker_version": { + "enum": [ + "v1", + "v2" + ], + "title": "Safety Checker Version", + "type": "string", + "description": "The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.", + "default": "v1" + }, + "request_id": { + "title": "Request Id", + "type": "string", + "description": "\n An id bound to a request, can be used with response to identify the request\n itself.\n ", + "default": "" + }, + "num_inference_steps": { + "enum": [ + "1", + "2", + "4", + "8" + ], + "title": "Num Inference Steps", + "type": "string", + "description": "The number of inference steps to perform.", + "default": 4 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "image_size", + "num_inference_steps", + "strength", + "seed", + "sync_mode", + "num_images", + "embeddings", + "enable_safety_checker", + "safety_checker_version", + "expand_prompt", + "format", + "guidance_rescale", + "request_id", + "preserve_aspect_ratio", + "crop_output" + ], + "required": [ + "image_url", + "prompt" + ] + }, + "FastLightningSdxlImageToImageOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Embedding": { + "title": "Embedding", + "type": "object", + "properties": { + "tokens": { + "title": "Tokens", + "type": "array", + "description": "The list of tokens to use for the embedding.", + "items": { + "type": "string" + }, + "default": [ + "", + "" + ] + }, + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the embedding weights." + } + }, + "x-fal-order-properties": [ + "path", + "tokens" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/fast-lightning-sdxl/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-lightning-sdxl/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fast-lightning-sdxl/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastLightningSdxlImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-lightning-sdxl/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastLightningSdxlImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fast-lightning-sdxl/inpainting", + "metadata": { + "display_name": "Stable Diffusion XL Lightning", + "category": "image-to-image", + "description": "Run SDXL at the speed of light", + "status": "active", + "tags": [ + "diffusion", + "lightning" + ], + "updated_at": "2026-01-26T21:44:54.870Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/stable-diffusion-xl-lightning.webp", + "model_url": "https://fal.run/fal-ai/fast-lightning-sdxl/inpainting", + "github_url": "https://huggingface.co/ByteDance/SDXL-Lightning/blob/main/LICENSE.md", + "date": "2024-02-21T00:00:00.000Z", + "group": { + "key": "stable-diffusion-xl-lightning", + "label": "Inpainting" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fast-lightning-sdxl/inpainting", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fast-lightning-sdxl/inpainting queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fast-lightning-sdxl/inpainting", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/stable-diffusion-xl-lightning.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/fast-lightning-sdxl/inpainting", + "documentationUrl": "https://fal.ai/models/fal-ai/fast-lightning-sdxl/inpainting/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FastLightningSdxlInpaintingInput": { + "title": "InpaintingLightningInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "a tiger sitting on a park bench" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square_hd" + }, + "embeddings": { + "title": "Embeddings", + "type": "array", + "description": "The list of embeddings to use.", + "items": { + "$ref": "#/components/schemas/Embedding" + }, + "default": [] + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "If set to true, the prompt will be expanded with additional prompts.", + "default": false + }, + "guidance_rescale": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Guidance Rescale", + "description": "The rescale factor for the CFG.", + "default": 0 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_url": { + "examples": [ + "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to use as a starting point for the generation." + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "determines how much the generated image resembles the initial image", + "default": 0.95 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "safety_checker_version": { + "enum": [ + "v1", + "v2" + ], + "title": "Safety Checker Version", + "type": "string", + "description": "The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.", + "default": "v1" + }, + "request_id": { + "title": "Request Id", + "type": "string", + "description": "\n An id bound to a request, can be used with response to identify the request\n itself.\n ", + "default": "" + }, + "num_inference_steps": { + "enum": [ + "1", + "2", + "4", + "8" + ], + "title": "Num Inference Steps", + "type": "string", + "description": "The number of inference steps to perform.", + "default": 4 + }, + "mask_url": { + "examples": [ + "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + ], + "title": "Mask Url", + "type": "string", + "description": "The URL of the mask to use for inpainting." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "image_url", + "mask_url", + "prompt", + "image_size", + "num_inference_steps", + "strength", + "seed", + "sync_mode", + "num_images", + "embeddings", + "enable_safety_checker", + "safety_checker_version", + "expand_prompt", + "format", + "guidance_rescale", + "request_id" + ], + "required": [ + "image_url", + "mask_url", + "prompt" + ] + }, + "FastLightningSdxlInpaintingOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Embedding": { + "title": "Embedding", + "type": "object", + "properties": { + "tokens": { + "title": "Tokens", + "type": "array", + "description": "The list of tokens to use for the embedding.", + "items": { + "type": "string" + }, + "default": [ + "", + "" + ] + }, + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the embedding weights." + } + }, + "x-fal-order-properties": [ + "path", + "tokens" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/fast-lightning-sdxl/inpainting/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-lightning-sdxl/inpainting/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fast-lightning-sdxl/inpainting": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastLightningSdxlInpaintingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-lightning-sdxl/inpainting/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastLightningSdxlInpaintingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/playground-v25/inpainting", + "metadata": { + "display_name": "Playground v2.5", + "category": "image-to-image", + "description": "State-of-the-art open-source model in aesthetic quality", + "status": "active", + "tags": [ + "inpaint", + "artistic", + "style" + ], + "updated_at": "2026-01-26T21:44:55.175Z", + "is_favorited": false, + "thumbnail_url": "https://fal-cdn.batuhan-941.workers.dev/files/monkey/8WXjrk5HEam79CPlQlo5T.jpeg", + "model_url": "https://fal.run/fal-ai/playground-v25/inpainting", + "github_url": "https://huggingface.co/playgroundai/playground-v2.5-1024px-aesthetic/blob/main/LICENSE.md", + "date": "2024-02-21T00:00:00.000Z", + "group": { + "key": "playground-v25", + "label": "Inpainting" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/playground-v25/inpainting", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/playground-v25/inpainting queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/playground-v25/inpainting", + "category": "image-to-image", + "thumbnailUrl": "https://fal-cdn.batuhan-941.workers.dev/files/monkey/8WXjrk5HEam79CPlQlo5T.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/playground-v25/inpainting", + "documentationUrl": "https://fal.ai/models/fal-ai/playground-v25/inpainting/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PlaygroundV25InpaintingInput": { + "x-fal-order-properties": [ + "image_url", + "mask_url", + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "guidance_scale", + "strength", + "seed", + "num_images", + "embeddings", + "enable_safety_checker", + "safety_checker_version", + "expand_prompt", + "format", + "guidance_rescale", + "request_id" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "a tiger sitting on a park bench" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square_hd" + }, + "embeddings": { + "title": "Embeddings", + "type": "array", + "description": "The list of embeddings to use.", + "items": { + "$ref": "#/components/schemas/Embedding" + }, + "default": [] + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "If set to true, the prompt will be expanded with additional prompts.", + "default": false + }, + "guidance_rescale": { + "minimum": 0, + "title": "Guidance Rescale", + "type": "number", + "description": "The rescale factor for the CFG.", + "maximum": 1, + "default": 0 + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale (CFG)", + "type": "number", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "maximum": 20, + "default": 3 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "negative_prompt": { + "examples": [ + "cartoon, illustration, animation. face. male, female" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "description": "The number of images to generate.", + "maximum": 8, + "default": 1 + }, + "image_url": { + "examples": [ + "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to use as a starting point for the generation." + }, + "strength": { + "minimum": 0.01, + "title": "Strength", + "type": "number", + "description": "determines how much the generated image resembles the initial image", + "maximum": 1, + "default": 0.95 + }, + "safety_checker_version": { + "enum": [ + "v1", + "v2" + ], + "title": "Safety Checker Version", + "type": "string", + "description": "The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.", + "default": "v1" + }, + "request_id": { + "title": "Request Id", + "type": "string", + "description": "\n An id bound to a request, can be used with response to identify the request\n itself.\n ", + "default": "" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "mask_url": { + "examples": [ + "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + ], + "title": "Mask Url", + "type": "string", + "description": "The URL of the mask to use for inpainting." + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "description": "The number of inference steps to perform.", + "maximum": 65, + "default": 25 + } + }, + "title": "InpaintingPlaygroundv25Input", + "required": [ + "image_url", + "mask_url", + "prompt" + ] + }, + "PlaygroundV25InpaintingOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "title": "Output", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Embedding": { + "x-fal-order-properties": [ + "path", + "tokens" + ], + "type": "object", + "properties": { + "tokens": { + "title": "Tokens", + "type": "array", + "description": "The list of tokens to use for the embedding.", + "items": { + "type": "string" + }, + "default": [ + "", + "" + ] + }, + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the embedding weights." + } + }, + "title": "Embedding", + "required": [ + "path" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "title": "Image", + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/playground-v25/inpainting/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/playground-v25/inpainting/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/playground-v25/inpainting": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PlaygroundV25InpaintingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/playground-v25/inpainting/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PlaygroundV25InpaintingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fast-lcm-diffusion/inpainting", + "metadata": { + "display_name": "Latent Consistency Models (v1.5/XL)", + "category": "image-to-image", + "description": "Run SDXL at the speed of light", + "status": "active", + "tags": [ + "lcm", + "diffusion", + "turbo", + "real-time", + "editing" + ], + "updated_at": "2026-01-26T21:44:55.743Z", + "is_favorited": false, + "thumbnail_url": "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/P322iQXlz2jOOuRFBWK-q.jpeg", + "model_url": "https://fal.run/fal-ai/fast-lcm-diffusion/inpainting", + "github_url": "https://huggingface.co/latent-consistency/lcm-lora-sdxl/blob/main/README.md", + "date": "2024-02-19T00:00:00.000Z", + "group": { + "key": "fast-lcm-diffusion", + "label": "Inpainting" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fast-lcm-diffusion/inpainting", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fast-lcm-diffusion/inpainting queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fast-lcm-diffusion/inpainting", + "category": "image-to-image", + "thumbnailUrl": "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/P322iQXlz2jOOuRFBWK-q.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/fast-lcm-diffusion/inpainting", + "documentationUrl": "https://fal.ai/models/fal-ai/fast-lcm-diffusion/inpainting/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FastLcmDiffusionInpaintingInput": { + "title": "InpaintingLCMInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "a tiger sitting on a park bench" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square_hd" + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "If set to true, the prompt will be expanded with additional prompts.", + "default": false + }, + "guidance_rescale": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Guidance Rescale", + "description": "The rescale factor for the CFG.", + "default": 0 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 1.5 + }, + "negative_prompt": { + "examples": [ + "cartoon, illustration, animation. face. male, female" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_url": { + "examples": [ + "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to use as a starting point for the generation." + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "determines how much the generated image resembles the initial image", + "default": 0.95 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": true + }, + "safety_checker_version": { + "enum": [ + "v1", + "v2" + ], + "title": "Safety Checker Version", + "type": "string", + "description": "The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.", + "default": "v1" + }, + "request_id": { + "title": "Request Id", + "type": "string", + "description": "\n An id bound to a request, can be used with response to identify the request\n itself.\n ", + "default": "" + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 32, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 6 + }, + "mask_url": { + "examples": [ + "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + ], + "title": "Mask Url", + "type": "string", + "description": "The URL of the mask to use for inpainting." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "model_name": { + "enum": [ + "stabilityai/stable-diffusion-xl-base-1.0", + "runwayml/stable-diffusion-v1-5" + ], + "title": "Model Name", + "type": "string", + "description": "The name of the model to use.", + "examples": [ + "stabilityai/stable-diffusion-xl-base-1.0", + "runwayml/stable-diffusion-v1-5" + ], + "default": "stabilityai/stable-diffusion-xl-base-1.0" + } + }, + "x-fal-order-properties": [ + "model_name", + "image_url", + "mask_url", + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "guidance_scale", + "strength", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "safety_checker_version", + "expand_prompt", + "format", + "guidance_rescale", + "request_id" + ], + "required": [ + "image_url", + "mask_url", + "prompt" + ] + }, + "FastLcmDiffusionInpaintingOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/fast-lcm-diffusion/inpainting/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-lcm-diffusion/inpainting/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fast-lcm-diffusion/inpainting": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastLcmDiffusionInpaintingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-lcm-diffusion/inpainting/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastLcmDiffusionInpaintingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fast-lcm-diffusion/image-to-image", + "metadata": { + "display_name": "Latent Consistency Models (v1.5/XL)", + "category": "image-to-image", + "description": "Run SDXL at the speed of light", + "status": "active", + "tags": [ + "lcm", + "diffusion", + "turbo", + "real-time", + "editing" + ], + "updated_at": "2026-01-26T21:44:55.867Z", + "is_favorited": false, + "thumbnail_url": "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/P322iQXlz2jOOuRFBWK-q.jpeg", + "model_url": "https://fal.run/fal-ai/fast-lcm-diffusion/image-to-image", + "github_url": "https://huggingface.co/latent-consistency/lcm-lora-sdxl/blob/main/README.md", + "date": "2024-02-19T00:00:00.000Z", + "group": { + "key": "fast-lcm-diffusion", + "label": "Image to Image" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fast-lcm-diffusion/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fast-lcm-diffusion/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fast-lcm-diffusion/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/P322iQXlz2jOOuRFBWK-q.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/fast-lcm-diffusion/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/fast-lcm-diffusion/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FastLcmDiffusionImageToImageInput": { + "title": "ImageToImageLCMInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "an island near sea, with seagulls, moon shining over the sea, light house, boats int he background, fish flying over the sea" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square_hd" + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "If set to true, the prompt will be expanded with additional prompts.", + "default": false + }, + "guidance_rescale": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Guidance Rescale", + "description": "The rescale factor for the CFG.", + "default": 0 + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 1.5 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "preserve_aspect_ratio": { + "title": "Preserve Aspect Ratio", + "type": "boolean", + "description": "\n If set to true, the aspect ratio of the generated image will be preserved even\n if the image size is too large. However, if the image is not a multiple of 32\n in width or height, it will be resized to the nearest multiple of 32. By default,\n this snapping to the nearest multiple of 32 will not preserve the aspect ratio.\n Set crop_output to True, to crop the output to the proper aspect ratio\n after generating.\n ", + "default": false + }, + "negative_prompt": { + "examples": [ + "cartoon, illustration, animation. face. male, female" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "crop_output": { + "title": "Crop Output", + "type": "boolean", + "description": "\n If set to true, the output cropped to the proper aspect ratio after generating.\n ", + "default": false + }, + "format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/tiger/IExuP-WICqaIesLZAZPur.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to use as a starting point for the generation." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": true + }, + "model_name": { + "enum": [ + "stabilityai/stable-diffusion-xl-base-1.0", + "runwayml/stable-diffusion-v1-5" + ], + "title": "Model Name", + "type": "string", + "description": "The name of the model to use.", + "examples": [ + "stabilityai/stable-diffusion-xl-base-1.0", + "runwayml/stable-diffusion-v1-5" + ], + "default": "stabilityai/stable-diffusion-xl-base-1.0" + }, + "safety_checker_version": { + "enum": [ + "v1", + "v2" + ], + "title": "Safety Checker Version", + "type": "string", + "description": "The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.", + "default": "v1" + }, + "request_id": { + "title": "Request Id", + "type": "string", + "description": "\n An id bound to a request, can be used with response to identify the request\n itself.\n ", + "default": "" + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 32, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 6 + }, + "strength": { + "minimum": 0.05, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "determines how much the generated image resembles the initial image", + "default": 0.95 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "model_name", + "image_url", + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "guidance_scale", + "strength", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "safety_checker_version", + "expand_prompt", + "format", + "guidance_rescale", + "request_id", + "preserve_aspect_ratio", + "crop_output" + ], + "required": [ + "image_url", + "prompt" + ] + }, + "FastLcmDiffusionImageToImageOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/fast-lcm-diffusion/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-lcm-diffusion/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fast-lcm-diffusion/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastLcmDiffusionImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-lcm-diffusion/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastLcmDiffusionImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/retoucher", + "metadata": { + "display_name": "Face Retoucher", + "category": "image-to-image", + "description": "Automatically retouches faces to smooth skin and remove blemishes.", + "status": "active", + "tags": [ + "editing" + ], + "updated_at": "2026-01-26T21:44:57.172Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/model_tests/retoucher/Screenshot%20from%202024-02-13%2011-40-09.png", + "model_url": "https://fal.run/fal-ai/retoucher", + "date": "2024-02-13T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/retoucher", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/retoucher queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/retoucher", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/model_tests/retoucher/Screenshot%20from%202024-02-13%2011-40-09.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/retoucher", + "documentationUrl": "https://fal.ai/models/fal-ai/retoucher/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "RetoucherInput": { + "title": "RetoucherInput", + "type": "object", + "properties": { + "seed": { + "description": "Seed for reproducibility. Different seeds will make slightly different results.", + "type": "integer", + "title": "Seed" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/retoucher/Dalton-Meereskosmetik-Magazin-Pickelguide-Model_1.resized.jpg" + ], + "description": "The URL of the image to be retouched.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url", + "seed" + ], + "required": [ + "image_url" + ] + }, + "RetoucherOutput": { + "title": "RetoucherOutput", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/model_tests/retoucher/retoucher_example_output.png" + } + ], + "description": "The generated image file info.", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "seed": { + "description": "The seed used for the generation.", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "image", + "seed" + ], + "required": [ + "image", + "seed" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/retoucher/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/retoucher/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/retoucher": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RetoucherInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/retoucher/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RetoucherOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/imageutils/depth", + "metadata": { + "display_name": "Midas Depth Estimation", + "category": "image-to-image", + "description": "Create depth maps using Midas depth estimation.", + "status": "active", + "tags": [ + "depth", + "utility" + ], + "updated_at": "2026-01-26T21:44:57.772Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/depth-estimation.jpeg", + "model_url": "https://fal.run/fal-ai/imageutils/depth", + "github_url": "https://huggingface.co/lllyasviel/Annotators/blob/main/dpt_hybrid-midas-501f0c75.pt", + "date": "2024-02-13T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/imageutils/depth", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/imageutils/depth queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/imageutils/depth", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/depth-estimation.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/imageutils/depth", + "documentationUrl": "https://fal.ai/models/fal-ai/imageutils/depth/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageutilsDepthInput": { + "title": "DepthMapInput", + "type": "object", + "properties": { + "bg_th": { + "description": "bg_th", + "type": "number", + "title": "Bg Th", + "default": 0.1 + }, + "a": { + "description": "a", + "type": "number", + "title": "A", + "default": 6.283185307179586 + }, + "depth_and_normal": { + "description": "depth_and_normal", + "type": "boolean", + "title": "Depth And Normal", + "default": false + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/remove_background/elephant.jpg" + ], + "description": "Input image url.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url", + "a", + "bg_th", + "depth_and_normal" + ], + "required": [ + "image_url" + ] + }, + "ImageutilsDepthOutput": { + "title": "DepthMapOutput", + "type": "object", + "properties": { + "image": { + "description": "The depth map.", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/imageutils/depth/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/imageutils/depth/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/imageutils/depth": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageutilsDepthInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/imageutils/depth/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageutilsDepthOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/imageutils/marigold-depth", + "metadata": { + "display_name": "Marigold Depth Estimation", + "category": "image-to-image", + "description": "Create depth maps using Marigold depth estimation.", + "status": "active", + "tags": [ + "depth", + "utility" + ], + "updated_at": "2026-01-26T21:44:59.546Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/marigold.png", + "model_url": "https://fal.run/fal-ai/imageutils/marigold-depth", + "github_url": "https://github.com/prs-eth/Marigold/blob/02cdfa5280d50afad976fdb6512de02011042c1c/LICENSE.txt", + "date": "2023-12-28T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/imageutils/marigold-depth", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/imageutils/marigold-depth queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/imageutils/marigold-depth", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/marigold.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/imageutils/marigold-depth", + "documentationUrl": "https://fal.ai/models/fal-ai/imageutils/marigold-depth/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageutilsMarigoldDepthInput": { + "title": "MarigoldDepthMapInput", + "type": "object", + "properties": { + "ensemble_size": { + "description": "Number of predictions to average over. Defaults to `10`. The higher the number, the more accurate the result, but the slower the inference.", + "type": "integer", + "example": 10, + "minimum": 2, + "title": "Ensemble Size", + "maximum": 50, + "default": 10 + }, + "num_inference_steps": { + "description": "Number of denoising steps. Defaults to `10`. The higher the number, the more accurate the result, but the slower the inference.", + "type": "integer", + "example": 10, + "minimum": 2, + "title": "Num Inference Steps", + "maximum": 50, + "default": 10 + }, + "processing_res": { + "description": "Maximum processing resolution. Defaults `0` which means it uses the size of the input image.", + "type": "integer", + "example": 0, + "minimum": 0, + "title": "Processing Res", + "maximum": 2048, + "default": 0 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/remove_background/elephant.jpg" + ], + "description": "Input image url.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url", + "num_inference_steps", + "ensemble_size", + "processing_res" + ], + "required": [ + "image_url" + ] + }, + "ImageutilsMarigoldDepthOutput": { + "title": "MarigoldDepthMapOutput", + "type": "object", + "properties": { + "image": { + "description": "The depth map.", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/imageutils/marigold-depth/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/imageutils/marigold-depth/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/imageutils/marigold-depth": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageutilsMarigoldDepthInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/imageutils/marigold-depth/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageutilsMarigoldDepthOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pulid", + "metadata": { + "display_name": "PuLID", + "category": "image-to-image", + "description": "Tuning-free ID customization.", + "status": "active", + "tags": [ + "editing", + "customization", + "personalization" + ], + "updated_at": "2026-01-26T21:44:59.980Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/tiger/fM1msKxGhFY7BO3J_vNiE.png", + "model_url": "https://fal.run/fal-ai/pulid", + "date": "2023-12-14T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pulid", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pulid queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pulid", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/tiger/fM1msKxGhFY7BO3J_vNiE.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/pulid", + "documentationUrl": "https://fal.ai/models/fal-ai/pulid/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PulidInput": { + "title": "InputModel", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "portrait, impressionist painting, loose brushwork, vibrant color, light and shadow play" + ], + "title": "Prompt", + "type": "string", + "description": "Prompt to generate the face from" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "Size of the generated image", + "default": { + "height": 1024, + "width": 768 + } + }, + "id_scale": { + "maximum": 5, + "type": "number", + "title": "Id Scale", + "description": "ID scale", + "exclusiveMinimum": 0, + "default": 0.8 + }, + "mode": { + "enum": [ + "fidelity", + "extreme style" + ], + "title": "Mode", + "type": "string", + "description": "Mode of generation", + "default": "fidelity" + }, + "id_mix": { + "title": "Id Mix", + "type": "boolean", + "description": "if you want to mix two ID image, please turn this on, otherwise, turn this off", + "default": false + }, + "guidance_scale": { + "minimum": 1, + "maximum": 1.5, + "type": "number", + "title": "Guidance Scale", + "description": "Guidance scale", + "default": 1.2 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 12, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of steps to take", + "default": 4 + }, + "reference_images": { + "examples": [ + [ + { + "image_url": "https://storage.googleapis.com/falserverless/pulid/img2.png" + }, + { + "image_url": "https://storage.googleapis.com/falserverless/pulid/img1.png" + } + ] + ], + "title": "Reference Images", + "type": "array", + "description": "List of reference faces, ideally 4 images.", + "items": { + "$ref": "#/components/schemas/ReferenceFace" + } + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to generate the face from", + "default": "flaws in the eyes, flaws in the face, flaws, lowres, non-HDRi, low quality, worst quality,artifacts noise, text, watermark, glitch, deformed, mutated, ugly, disfigured, hands, low resolution, partially rendered objects, deformed or partially rendered eyes, deformed, deformed eyeballs, cross-eyed,blurry" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility" + } + }, + "x-fal-order-properties": [ + "reference_images", + "prompt", + "negative_prompt", + "num_images", + "guidance_scale", + "num_inference_steps", + "seed", + "image_size", + "id_scale", + "mode", + "id_mix" + ], + "required": [ + "reference_images", + "prompt" + ] + }, + "PulidOutput": { + "title": "OutputModel", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "List of generated images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed used for reproducibility" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "ReferenceFace": { + "title": "ReferenceFace", + "type": "object", + "properties": { + "image_url": { + "title": "Image Url", + "type": "string", + "description": "URL of the reference face image" + } + }, + "x-fal-order-properties": [ + "image_url" + ], + "required": [ + "image_url" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pulid/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pulid/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pulid": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PulidInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pulid/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PulidOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fast-sdxl-controlnet-canny/image-to-image", + "metadata": { + "display_name": "ControlNet SDXL", + "category": "image-to-image", + "description": "Generate Images with ControlNet.", + "status": "active", + "tags": [ + "diffusion", + "controlnet", + "editing", + "manipulation" + ], + "updated_at": "2026-01-26T21:45:00.106Z", + "is_favorited": false, + "thumbnail_url": "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/ynzNm1f0ZoDCuOvAE9tKR.jpeg", + "model_url": "https://fal.run/fal-ai/fast-sdxl-controlnet-canny/image-to-image", + "date": "2023-12-01T00:00:00.000Z", + "group": { + "key": "controlnet-sdxl", + "label": "Image to Image" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fast-sdxl-controlnet-canny/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fast-sdxl-controlnet-canny/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fast-sdxl-controlnet-canny/image-to-image", + "category": "image-to-image", + "thumbnailUrl": "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/ynzNm1f0ZoDCuOvAE9tKR.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/fast-sdxl-controlnet-canny/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/fast-sdxl-controlnet-canny/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FastSdxlControlnetCannyImageToImageInput": { + "title": "ImageToImageControlNetInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Ice fortress, aurora skies, polar wildlife, twilight" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image. Leave it none to automatically infer from the control image.", + "nullable": true + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "If set to true, the prompt will be expanded with additional prompts.", + "default": false + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "The list of LoRA weights to use.", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [] + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 20, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 7.5 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "negative_prompt": { + "examples": [ + "cartoon, illustration, animation. face. male, female" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 8, + "description": "The number of images to generate.", + "default": 1 + }, + "controlnet_conditioning_scale": { + "minimum": 0, + "title": "Controlnet Conditioning Scale", + "type": "number", + "maximum": 1, + "description": "The scale of the controlnet conditioning.", + "default": 0.5 + }, + "image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/tiger/IExuP-WICqaIesLZAZPur.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to use as a starting point for the generation." + }, + "strength": { + "minimum": 0.01, + "title": "Strength", + "type": "number", + "maximum": 1, + "description": "determines how much the generated image resembles the initial image", + "default": 0.95 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "control_image_url": { + "examples": [ + "https://avatars.githubusercontent.com/u/74778219" + ], + "title": "Control Image Url", + "type": "string", + "description": "The URL of the control image." + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 65, + "description": "The number of inference steps to perform.", + "default": 25 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "prompt", + "control_image_url", + "controlnet_conditioning_scale", + "image_url", + "negative_prompt", + "image_size", + "num_inference_steps", + "guidance_scale", + "strength", + "seed", + "sync_mode", + "num_images", + "loras", + "enable_safety_checker", + "expand_prompt" + ], + "required": [ + "image_url", + "prompt", + "control_image_url" + ] + }, + "FastSdxlControlnetCannyImageToImageOutput": { + "title": "Output", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights. Or HF model name." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "maximum": 1, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/fast-sdxl-controlnet-canny/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-sdxl-controlnet-canny/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fast-sdxl-controlnet-canny/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastSdxlControlnetCannyImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-sdxl-controlnet-canny/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastSdxlControlnetCannyImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fast-sdxl-controlnet-canny/inpainting", + "metadata": { + "display_name": "ControlNet SDXL", + "category": "image-to-image", + "description": "Generate Images with ControlNet.", + "status": "active", + "tags": [ + "diffusion", + "controlnet", + "editing", + "manipulation" + ], + "updated_at": "2026-01-26T21:45:00.232Z", + "is_favorited": false, + "thumbnail_url": "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/ynzNm1f0ZoDCuOvAE9tKR.jpeg", + "model_url": "https://fal.run/fal-ai/fast-sdxl-controlnet-canny/inpainting", + "date": "2023-12-01T00:00:00.000Z", + "group": { + "key": "controlnet-sdxl", + "label": "Inpainting" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fast-sdxl-controlnet-canny/inpainting", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fast-sdxl-controlnet-canny/inpainting queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fast-sdxl-controlnet-canny/inpainting", + "category": "image-to-image", + "thumbnailUrl": "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/ynzNm1f0ZoDCuOvAE9tKR.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/fast-sdxl-controlnet-canny/inpainting", + "documentationUrl": "https://fal.ai/models/fal-ai/fast-sdxl-controlnet-canny/inpainting/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FastSdxlControlnetCannyInpaintingInput": { + "title": "InpaintingControlNetInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Ice fortress, aurora skies, polar wildlife, twilight" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image. Leave it none to automatically infer from the control image.", + "nullable": true + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "If set to true, the prompt will be expanded with additional prompts.", + "default": false + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "The list of LoRA weights to use.", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [] + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 20, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 7.5 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "negative_prompt": { + "examples": [ + "cartoon, illustration, animation. face. male, female" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 8, + "description": "The number of images to generate.", + "default": 1 + }, + "controlnet_conditioning_scale": { + "minimum": 0, + "title": "Controlnet Conditioning Scale", + "type": "number", + "maximum": 1, + "description": "The scale of the controlnet conditioning.", + "default": 0.5 + }, + "image_url": { + "examples": [ + "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to use as a starting point for the generation." + }, + "strength": { + "minimum": 0.01, + "title": "Strength", + "type": "number", + "maximum": 1, + "description": "determines how much the generated image resembles the initial image", + "default": 0.95 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "control_image_url": { + "examples": [ + "https://avatars.githubusercontent.com/u/74778219" + ], + "title": "Control Image Url", + "type": "string", + "description": "The URL of the control image." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "mask_url": { + "examples": [ + "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + ], + "title": "Mask Url", + "type": "string", + "description": "The URL of the mask to use for inpainting." + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 65, + "description": "The number of inference steps to perform.", + "default": 25 + } + }, + "x-fal-order-properties": [ + "prompt", + "control_image_url", + "controlnet_conditioning_scale", + "image_url", + "mask_url", + "negative_prompt", + "image_size", + "num_inference_steps", + "guidance_scale", + "strength", + "seed", + "sync_mode", + "num_images", + "loras", + "enable_safety_checker", + "expand_prompt" + ], + "required": [ + "image_url", + "mask_url", + "prompt", + "control_image_url" + ] + }, + "FastSdxlControlnetCannyInpaintingOutput": { + "title": "Output", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights. Or HF model name." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "maximum": 1, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/fast-sdxl-controlnet-canny/inpainting/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-sdxl-controlnet-canny/inpainting/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fast-sdxl-controlnet-canny/inpainting": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastSdxlControlnetCannyInpaintingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-sdxl-controlnet-canny/inpainting/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastSdxlControlnetCannyInpaintingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/lcm-sd15-i2i", + "metadata": { + "display_name": "Optimized Latent Consistency (SDv1.5)", + "category": "image-to-image", + "description": "Produce high-quality images with minimal inference steps. Optimized for 512x512 input image size.", + "status": "active", + "tags": [ + "diffusion", + "lcm", + "real-time" + ], + "updated_at": "2026-01-26T21:45:00.483Z", + "is_favorited": false, + "thumbnail_url": "https://fal-cdn.batuhan-941.workers.dev/files/kangaroo/4HI4amBAnKjvUi0SYLVwb.jpeg", + "model_url": "https://fal.run/fal-ai/lcm-sd15-i2i", + "date": "2023-11-09T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/lcm-sd15-i2i", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/lcm-sd15-i2i queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/lcm-sd15-i2i", + "category": "image-to-image", + "thumbnailUrl": "https://fal-cdn.batuhan-941.workers.dev/files/kangaroo/4HI4amBAnKjvUi0SYLVwb.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/lcm-sd15-i2i", + "documentationUrl": "https://fal.ai/models/fal-ai/lcm-sd15-i2i/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LcmSd15I2iInput": { + "title": "LCMI2IInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "masterpiece, colorful, photo of a beach in hawaii, sun" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 8, + "description": "\n The number of images to generate. The function will return a list of images\n with the same prompt and negative prompt but different seeds.\n ", + "default": 1 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/lcm/beach.png" + ], + "title": "Image Url", + "type": "string", + "description": "The image to use as a base." + }, + "strength": { + "minimum": 0, + "title": "Strength", + "type": "number", + "maximum": 1, + "description": "The strength of the image.", + "default": 0.8 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "enable_safety_checks": { + "title": "Enable Safety Checks", + "type": "boolean", + "description": "\n If set to true, the resulting image will be checked whether it includes any\n potentially unsafe content. If it does, it will be replaced with a black\n image.\n ", + "default": true + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 16, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 1 + }, + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "request_id": { + "title": "Request Id", + "type": "string", + "description": "\n An id bound to a request, can be used with response to identify the request\n itself.\n ", + "default": "" + }, + "negative_prompt": { + "examples": [ + "cartoon, illustration, animation. face. male, female", + "ugly, deformed" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 12, + "description": "\n The number of inference steps to use for generating the image. The more steps\n the better the image will be but it will also take longer to generate.\n ", + "default": 4 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "strength", + "negative_prompt", + "seed", + "guidance_scale", + "num_inference_steps", + "sync_mode", + "num_images", + "enable_safety_checks", + "request_id" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "LcmSd15I2iOutput": { + "title": "LCMOutput", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "request_id": { + "title": "Request Id", + "type": "string", + "description": "\n An id bound to a request, can be used with response to identify the request\n itself.\n ", + "default": "" + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + }, + "num_inference_steps": { + "title": "Num Inference Steps", + "type": "integer", + "description": "\n Number of inference steps used to generate the image. It will be the same value of the one passed in the\n input or the default one in case none was passed.\n ", + "default": 4 + }, + "nsfw_content_detected": { + "title": "Nsfw Content Detected", + "type": "array", + "description": "\n A list of booleans indicating whether the generated image contains any\n potentially unsafe content. If the safety check is disabled, this field\n will have a false for each generated image.\n ", + "items": { + "type": "boolean" + } + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "num_inference_steps", + "request_id", + "nsfw_content_detected" + ], + "required": [ + "images", + "timings", + "seed", + "nsfw_content_detected" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/lcm-sd15-i2i/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/lcm-sd15-i2i/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/lcm-sd15-i2i": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LcmSd15I2iInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/lcm-sd15-i2i/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LcmSd15I2iOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/inpaint", + "metadata": { + "display_name": "Inpainting sdxl and sd", + "category": "image-to-image", + "description": "Inpaint images with SD and SDXL", + "status": "active", + "tags": [ + "editing", + "diffusion" + ], + "updated_at": "2026-01-26T21:45:00.734Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/inpaint.jpeg", + "model_url": "https://fal.run/fal-ai/inpaint", + "date": "2023-11-04T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/inpaint", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/inpaint queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/inpaint", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/inpaint.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/inpaint", + "documentationUrl": "https://fal.ai/models/fal-ai/inpaint/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "InpaintInput": { + "title": "InpaintInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "a photo of a cat" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "image_url": { + "examples": [ + "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + ], + "title": "Image Url", + "type": "string", + "description": "Input image for img2img or inpaint mode" + }, + "model_name": { + "examples": [ + "diffusers/stable-diffusion-xl-1.0-inpainting-0.1", + "stabilityai/stable-diffusion-xl-base-1.0", + "runwayml/stable-diffusion-v1-5", + "SG161222/Realistic_Vision_V2.0" + ], + "title": "Model Name", + "type": "string", + "description": "URL or HuggingFace ID of the base model to generate the image." + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 7.5 + }, + "num_inference_steps": { + "minimum": 0, + "maximum": 150, + "type": "integer", + "title": "Number of inference steps", + "description": "\n Increasing the amount of steps tells Stable Diffusion that it should take more steps\n to generate your final result which can increase the amount of detail in your image.\n ", + "default": 30 + }, + "mask_url": { + "examples": [ + "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + ], + "title": "Mask Url", + "type": "string", + "description": "Input mask for inpaint mode. Black areas will be preserved, white areas will be inpainted." + }, + "negative_prompt": { + "examples": [ + "cartoon, painting, illustration, (worst quality, low quality, normal quality:2)", + "nsfw, cartoon, (epicnegative:0.9)" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "seed": { + "examples": [ + 1234 + ], + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "model_name", + "prompt", + "negative_prompt", + "image_url", + "mask_url", + "num_inference_steps", + "guidance_scale", + "seed" + ], + "required": [ + "model_name", + "prompt", + "image_url", + "mask_url" + ] + }, + "InpaintOutput": { + "title": "InpaintOutput", + "type": "object", + "properties": { + "image": { + "title": "Image", + "description": "The generated image files info.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "image", + "seed" + ], + "required": [ + "image", + "seed" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/inpaint/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/inpaint/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/inpaint": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InpaintInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/inpaint/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InpaintOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/esrgan", + "metadata": { + "display_name": "Upscale Images", + "category": "image-to-image", + "description": "Upscale images by a given factor.", + "status": "active", + "tags": [ + "upscaling", + "high-res" + ], + "updated_at": "2026-01-26T21:44:16.994Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/esrgan.webp?v=2", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/esrgan-animated.webp?v=2", + "model_url": "https://fal.run/fal-ai/esrgan", + "github_url": "https://github.com/xinntao/Real-ESRGAN/blob/master/LICENSE", + "license_type": "commercial", + "date": "2023-10-30T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/esrgan", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/esrgan queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/esrgan", + "category": "image-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/esrgan.webp?v=2", + "playgroundUrl": "https://fal.ai/models/fal-ai/esrgan", + "documentationUrl": "https://fal.ai/models/fal-ai/esrgan/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "EsrganInput": { + "x-fal-order-properties": [ + "image_url", + "scale", + "tile", + "face", + "model", + "output_format" + ], + "type": "object", + "properties": { + "model": { + "enum": [ + "RealESRGAN_x4plus", + "RealESRGAN_x2plus", + "RealESRGAN_x4plus_anime_6B", + "RealESRGAN_x4_v3", + "RealESRGAN_x4_wdn_v3", + "RealESRGAN_x4_anime_v3" + ], + "description": "Model to use for upscaling", + "type": "string", + "title": "Model", + "default": "RealESRGAN_x4plus" + }, + "face": { + "description": "Upscaling a face", + "type": "boolean", + "title": "Face", + "default": false + }, + "scale": { + "minimum": 1, + "maximum": 8, + "type": "number", + "title": "Scale", + "description": "Rescaling factor", + "default": 2 + }, + "tile": { + "description": "Tile size. Default is 0, that is no tile. When encountering the out-of-GPU-memory issue, please specify it, e.g., 400 or 200", + "type": "integer", + "title": "Tile", + "default": 0 + }, + "output_format": { + "enum": [ + "png", + "jpeg" + ], + "description": "Output image format (png or jpeg)", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/remove_background/elephant.jpg", + "https://storage.googleapis.com/falserverless/gallery/blue-bird.jpeg", + "https://storage.googleapis.com/falserverless/model_tests/upscale/image%20(8).png" + ], + "description": "Url to input image", + "type": "string", + "title": "Image Url" + } + }, + "title": "UpscaleInput", + "required": [ + "image_url" + ] + }, + "EsrganOutput": { + "x-fal-order-properties": [ + "image" + ], + "type": "object", + "properties": { + "image": { + "description": "Upscaled image", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "title": "UpscaleOutput", + "required": [ + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/esrgan/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/esrgan/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/esrgan": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EsrganInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/esrgan/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EsrganOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/imageutils/rembg", + "metadata": { + "display_name": "Remove Background", + "category": "image-to-image", + "description": "Remove the background from an image.", + "status": "active", + "tags": [ + "background removal", + "utility", + "editing" + ], + "updated_at": "2026-01-26T21:44:17.119Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/panda/m5RPLAJZ_HnPQVRqCtiTi_cbfbb775bff3463bba8116be399e120d.jpg", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/remove-background-animated.webp", + "model_url": "https://fal.run/fal-ai/imageutils/rembg", + "github_url": "https://github.com/danielgatis/rembg/blob/main/LICENSE.txt", + "license_type": "commercial", + "date": "2023-10-05T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/imageutils/rembg", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/imageutils/rembg queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/imageutils/rembg", + "category": "image-to-image", + "thumbnailUrl": "https://fal.media/files/panda/m5RPLAJZ_HnPQVRqCtiTi_cbfbb775bff3463bba8116be399e120d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/imageutils/rembg", + "documentationUrl": "https://fal.ai/models/fal-ai/imageutils/rembg/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageutilsRembgInput": { + "title": "RemoveBackgroundInput", + "type": "object", + "properties": { + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "crop_to_bbox": { + "description": "\n If set to true, the resulting image be cropped to a bounding box around the subject\n ", + "type": "boolean", + "title": "Crop To Bbox", + "default": false + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/remove_background/elephant.jpg" + ], + "description": "Input image url.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url", + "sync_mode", + "crop_to_bbox" + ], + "required": [ + "image_url" + ] + }, + "ImageutilsRembgOutput": { + "title": "RemoveBackgroundOutput", + "type": "object", + "properties": { + "image": { + "description": "Background removed image.", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/imageutils/rembg/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/imageutils/rembg/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/imageutils/rembg": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageutilsRembgInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/imageutils/rembg/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageutilsRembgOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.image-to-json.json b/packages/typescript/ai-fal/json/fal.models.image-to-json.json new file mode 100644 index 00000000..6a324581 --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.image-to-json.json @@ -0,0 +1,315 @@ +{ + "generated_at": "2026-01-28T02:51:51.878Z", + "total_models": 1, + "category": "image-to-json", + "models": [ + { + "endpoint_id": "fal-ai/bagel/understand", + "metadata": { + "display_name": "Bagel", + "category": "image-to-json", + "description": "Bagel is a 7B parameter multimodal model from Bytedance-Seed that can generate both text and images.", + "status": "active", + "tags": [ + "image-to-text", + "vlm" + ], + "updated_at": "2026-01-26T21:43:39.207Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/bagel.webp", + "model_url": "https://fal.run/fal-ai/bagel/understand", + "license_type": "commercial", + "date": "2025-05-21T18:37:27.397Z", + "group": { + "key": "bagel", + "label": "Image Understanding" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bagel/understand", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bagel/understand queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bagel/understand", + "category": "image-to-json", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/bagel.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/bagel/understand", + "documentationUrl": "https://fal.ai/models/fal-ai/bagel/understand/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BagelUnderstandInput": { + "title": "ImageUnderstandingInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "What is shown in the image? " + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to query the image with." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for the generation." + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/bagel/wRhCPSyiKTiLnnWvUpGIl.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "The image for the query." + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "seed" + ], + "required": [ + "image_url", + "prompt" + ] + }, + "BagelUnderstandOutput": { + "title": "TextOutput", + "type": "object", + "properties": { + "text": { + "title": "Text", + "type": "string", + "description": "The answer to the query." + }, + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The query used for the generation." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for the generation." + }, + "timings": { + "title": "Timings", + "type": "object", + "description": "The timings of the generation." + } + }, + "x-fal-order-properties": [ + "text", + "seed", + "prompt", + "timings" + ], + "required": [ + "text", + "seed", + "prompt", + "timings" + ] + } + } + }, + "paths": { + "/fal-ai/bagel/understand/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bagel/understand/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bagel/understand": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BagelUnderstandInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bagel/understand/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BagelUnderstandOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.image-to-video.json b/packages/typescript/ai-fal/json/fal.models.image-to-video.json new file mode 100644 index 00000000..7894d068 --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.image-to-video.json @@ -0,0 +1,65238 @@ +{ + "generated_at": "2026-01-28T02:51:51.841Z", + "total_models": 152, + "category": "image-to-video", + "models": [ + { + "endpoint_id": "fal-ai/wan-effects", + "metadata": { + "display_name": "Wan Effects", + "category": "image-to-video", + "description": "Wan Effects generates high-quality videos with popular effects from images", + "status": "active", + "tags": [ + "motion", + "effects" + ], + "updated_at": "2026-01-26T21:41:23.935Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/Fal_Visuals_V1_012.jpg", + "thumbnail_animated_url": "https://storage.googleapis.com/fal_cdn/Fal_Visuals_V1_101.webp", + "model_url": "https://fal.run/fal-ai/wan-effects", + "license_type": "commercial", + "date": "2025-03-13T00:00:00.000Z", + "group": { + "key": "wan-2.1", + "label": "Wan Effects" + }, + "highlighted": true, + "kind": "inference", + "duration_estimate": 1, + "pinned": true + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-effects", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-effects queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-effects", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/Fal_Visuals_V1_012.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-effects", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-effects/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanEffectsInput": { + "x-fal-order-properties": [ + "subject", + "image_url", + "effect_type", + "num_frames", + "frames_per_second", + "seed", + "aspect_ratio", + "num_inference_steps", + "lora_scale", + "turbo_mode" + ], + "type": "object", + "properties": { + "effect_type": { + "enum": [ + "squish", + "muscle", + "inflate", + "crush", + "rotate", + "gun-shooting", + "deflate", + "cakeify", + "hulk", + "baby", + "bride", + "classy", + "puppy", + "snow-white", + "disney-princess", + "mona-lisa", + "painting", + "pirate-captain", + "princess", + "jungle", + "samurai", + "vip", + "warrior", + "zen", + "assassin", + "timelapse", + "tsunami", + "fire", + "zoom-call", + "doom-fps", + "fus-ro-dah", + "hug-jesus", + "robot-face-reveal", + "super-saiyan", + "jumpscare", + "laughing", + "cartoon-jaw-drop", + "crying", + "kissing", + "angry-face", + "selfie-younger-self", + "animeify", + "blast" + ], + "description": "The type of effect to apply to the video.", + "type": "string", + "title": "Effect Type", + "default": "cakeify" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the output video.", + "default": "16:9" + }, + "subject": { + "examples": [ + "a cute kitten", + "Donald Trump", + "a tank", + "a ceramic vase" + ], + "description": "The subject to insert into the predefined prompt template for the selected effect.", + "type": "string", + "title": "Subject" + }, + "lora_scale": { + "minimum": 0.1, + "maximum": 2, + "type": "number", + "title": "Lora Scale", + "description": "The scale of the LoRA weight. Used to adjust effect intensity.", + "default": 1 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/wan-effects/cat.jpg", + "https://storage.googleapis.com/falserverless/web-examples/wan-effects/man_1.png", + "https://storage.googleapis.com/falserverless/web-examples/wan-effects/woman_2.png" + ], + "description": "URL of the input image.", + "type": "string", + "title": "Image URL" + }, + "turbo_mode": { + "description": "Whether to use turbo mode. If True, the video will be generated faster but with lower quality.", + "type": "boolean", + "title": "Turbo Mode", + "default": false + }, + "frames_per_second": { + "minimum": 5, + "description": "Frames per second of the generated video.", + "type": "integer", + "maximum": 24, + "title": "Frames Per Second", + "default": 16 + }, + "num_inference_steps": { + "minimum": 2, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 40, + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 30 + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + }, + "num_frames": { + "minimum": 81, + "title": "Num Frames", + "type": "integer", + "maximum": 100, + "description": "Number of frames to generate.", + "default": 81 + } + }, + "title": "BaseInput", + "required": [ + "subject", + "image_url" + ] + }, + "WanEffectsOutput": { + "x-fal-order-properties": [ + "video", + "seed" + ], + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer" + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/web-examples/wan-effects/cat_video.mp4" + } + ], + "description": "The generated video", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "WanEffectsOutput", + "required": [ + "video", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-effects/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-effects/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-effects": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanEffectsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-effects/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanEffectsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-pro/image-to-video", + "metadata": { + "display_name": "Wan-2.1 Pro Image-to-Video", + "category": "image-to-video", + "description": "Wan-2.1 Pro is a premium image-to-video model that generates high-quality 1080p videos at 30fps with up to 6 seconds duration, delivering exceptional visual quality and motion diversity from images", + "status": "active", + "tags": [ + "image to video", + "motion" + ], + "updated_at": "2026-01-26T21:41:24.186Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/wan-pro-image-to-video.webp", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/wan-pro-image-to-video-animated.webp", + "model_url": "https://fal.run/fal-ai/wan-pro/image-to-video", + "license_type": "commercial", + "date": "2025-03-11T00:00:00.000Z", + "group": { + "key": "wan-2.1", + "label": "Image-to-Video (Pro)" + }, + "highlighted": true, + "kind": "inference", + "duration_estimate": 5, + "pinned": true + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-pro/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-pro/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-pro/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/wan-pro-image-to-video.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-pro/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-pro/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanProImageToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "seed", + "enable_safety_checker", + "image_url" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage." + ], + "description": "The prompt to generate the video", + "type": "string", + "title": "Prompt" + }, + "enable_safety_checker": { + "description": "Whether to enable the safety checker", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "title": "Seed" + }, + "image_url": { + "examples": [ + "https://fal.media/files/elephant/8kkhB12hEZI2kkbU8pZPA_test.jpeg" + ], + "description": "The URL of the image to generate the video from", + "type": "string", + "title": "Image Url" + } + }, + "title": "WanProI2VRequest", + "required": [ + "prompt", + "image_url" + ] + }, + "WanProImageToVideoOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://fal.media/files/kangaroo/K1hB3k-IXBzq9rz1kNOxy.mp4" + } + ], + "description": "The generated video", + "$ref": "#/components/schemas/File" + } + }, + "title": "WanProI2VResponse", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-pro/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-pro/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-pro/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanProImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-pro/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanProImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/veo2/image-to-video", + "metadata": { + "display_name": "Veo 2 (Image to Video)", + "category": "image-to-video", + "description": "Veo 2 creates videos from images with realistic motion and very high quality output.", + "status": "active", + "tags": [ + "motion", + "transformation" + ], + "updated_at": "2026-01-26T21:41:24.062Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/veo2-image-to-video.webp", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/veo2-image-to-video-animated.webp", + "model_url": "https://fal.run/fal-ai/veo2/image-to-video", + "license_type": "commercial", + "date": "2025-03-11T00:00:00.000Z", + "group": { + "key": "veo2", + "label": "Image to Video" + }, + "highlighted": true, + "kind": "inference", + "pinned": true + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/veo2/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/veo2/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/veo2/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/veo2-image-to-video.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/veo2/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/veo2/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Veo2ImageToVideoInput": { + "title": "ImageToVideoInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A lego chef cooking eggs" + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt describing how the image should be animated" + }, + "duration": { + "enum": [ + "5s", + "6s", + "7s", + "8s" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5s" + }, + "aspect_ratio": { + "enum": [ + "auto", + "auto_prefer_portrait", + "16:9", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "auto" + }, + "image_url": { + "examples": [ + "https://fal.media/files/elephant/6fq8JDSjb1osE_c3J_F2H.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the input image to animate. Should be 720p or higher resolution." + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "aspect_ratio", + "duration" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "Veo2ImageToVideoOutput": { + "title": "ImageToVideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/monkey/jOYy3rvGB33vumzulpXd5_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/veo2/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo2/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/veo2/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo2ImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo2/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo2ImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v1.6/pro/image-to-video", + "metadata": { + "display_name": "Kling 1.6", + "category": "image-to-video", + "description": "Generate video clips from your images using Kling 1.6 (pro)", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:24.312Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/kling-1-6-image-to-video.webp", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/kling-1-6-image-to-video-animated.webp", + "model_url": "https://fal.run/fal-ai/kling-video/v1.6/pro/image-to-video", + "license_type": "commercial", + "date": "2025-01-07T00:00:00.000Z", + "group": { + "key": "kling-video-v1-6", + "label": "Image to Video v1.6 (pro)" + }, + "highlighted": true, + "kind": "inference", + "duration_estimate": 6, + "pinned": true + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v1.6/pro/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v1.6/pro/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v1.6/pro/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/kling-1-6-image-to-video.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v1.6/pro/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v1.6/pro/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV16ProImageToVideoInput": { + "title": "ProImageToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Snowflakes fall as a car moves along the road." + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500 + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video frame", + "default": "16:9" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "maxLength": 2500, + "default": "blur, distort, and low quality" + }, + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/kling/kling_input.jpeg" + ], + "title": "Image Url", + "type": "string" + }, + "tail_image_url": { + "title": "Tail Image Url", + "type": "string", + "description": "URL of the image to be used for the end of the video" + }, + "cfg_scale": { + "minimum": 0, + "title": "Cfg Scale", + "type": "number", + "maximum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ", + "default": 0.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "duration", + "aspect_ratio", + "tail_image_url", + "negative_prompt", + "cfg_scale" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "KlingVideoV16ProImageToVideoOutput": { + "title": "I2VOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/kling/kling_i2v_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v1.6/pro/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/pro/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/pro/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV16ProImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/pro/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV16ProImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/video-01/image-to-video", + "metadata": { + "display_name": "MiniMax (Hailuo AI) Video 01", + "category": "image-to-video", + "description": "Generate video clips from your images using MiniMax Video model", + "status": "active", + "tags": [ + "motion", + "transformation" + ], + "updated_at": "2026-01-26T21:41:24.611Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/minimax-video-01-image-to-video.webp", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/minimax-video-01-image-to-video-animated.webp", + "model_url": "https://fal.run/fal-ai/minimax/video-01/image-to-video", + "license_type": "commercial", + "date": "2024-10-30T00:00:00.000Z", + "group": { + "key": "minimax-video", + "label": "Image to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": true + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/video-01/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/video-01/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/video-01/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/minimax-video-01-image-to-video.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/video-01/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/video-01/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxVideo01ImageToVideoInput": { + "title": "ImageToVideoRequest", + "type": "object", + "properties": { + "prompt_optimizer": { + "description": "Whether to use the model's prompt optimizer", + "type": "boolean", + "title": "Prompt Optimizer", + "default": true + }, + "prompt": { + "examples": [ + "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage." + ], + "maxLength": 2000, + "type": "string", + "title": "Prompt" + }, + "image_url": { + "examples": [ + "https://fal.media/files/elephant/8kkhB12hEZI2kkbU8pZPA_test.jpeg" + ], + "description": "URL of the image to use as the first frame", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "prompt_optimizer" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "MinimaxVideo01ImageToVideoOutput": { + "title": "VideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://fal.media/files/monkey/vNZqQV_WgC9MhoidClLyw_output.mp4" + } + ], + "description": "The generated video", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/video-01/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/video-01/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/video-01/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxVideo01ImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/video-01/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxVideo01ImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/hailuo-2.3/pro/image-to-video", + "metadata": { + "display_name": "MiniMax Hailuo 2.3 [Pro] (Image to Video)", + "category": "image-to-video", + "description": "MiniMax Hailuo-2.3 Image To Video API (Pro, 1080p): Advanced image-to-video generation model with 1080p resolution", + "status": "active", + "tags": [ + "image-to-video" + ], + "updated_at": "2026-01-26T21:42:31.592Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/elephant/tFrccsrBCt7X_80Je77Ie_a073f11dc9354f5eab801768841430a9.jpg", + "model_url": "https://fal.run/fal-ai/minimax/hailuo-2.3/pro/image-to-video", + "license_type": "commercial", + "date": "2025-10-27T13:16:08.044Z", + "group": { + "key": "hailuo-23", + "label": "Image To Video (pro)" + }, + "highlighted": true, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/hailuo-2.3/pro/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/hailuo-2.3/pro/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/hailuo-2.3/pro/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/elephant/tFrccsrBCt7X_80Je77Ie_a073f11dc9354f5eab801768841430a9.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/hailuo-2.3/pro/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/hailuo-2.3/pro/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxHailuo23ProImageToVideoInput": { + "title": "ProImageToVideoHailuo23Input", + "type": "object", + "properties": { + "prompt_optimizer": { + "description": "Whether to use the model's prompt optimizer", + "type": "boolean", + "title": "Prompt Optimizer", + "default": true + }, + "prompt": { + "examples": [ + "The camera follows the mountain biker as they navigate a technical forest trail at high speed, wheels bouncing over roots and rocks. The rider approaches a jump, launching into the air with the bike, both rider and machine perfectly synchronized. They land smoothly and continue through tight turns, splashing through a stream crossing. Mud and water spray as the bike powers through challenging terrain. The atmosphere is wild and adventurous. Audio: Tires gripping dirt, gears shifting, heavy breathing, branches whipping past, and water splashing." + ], + "maxLength": 2000, + "type": "string", + "title": "Prompt", + "description": "Text prompt for video generation", + "minLength": 1 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/hailuo23/pro_i2v_in.jpg" + ], + "description": "URL of the image to use as the first frame", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "prompt", + "prompt_optimizer", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "MinimaxHailuo23ProImageToVideoOutput": { + "title": "ProImageToVideoHailuo23Output", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/hailuo23/pro_i2v_out.mp4" + } + ], + "description": "The generated video", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/hailuo-2.3/pro/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-2.3/pro/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-2.3/pro/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxHailuo23ProImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-2.3/pro/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxHailuo23ProImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-25-preview/image-to-video", + "metadata": { + "display_name": "Wan 2.5 Image to Video", + "category": "image-to-video", + "description": "Wan 2.5 image-to-video model.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:48.059Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/rabbit/jryVrAZdQNdLdN_4rTlN7_9bebc94cb69a482fb4d948bdd06d6a5e.jpg", + "model_url": "https://fal.run/fal-ai/wan-25-preview/image-to-video", + "license_type": "commercial", + "date": "2025-09-24T01:52:47.880Z", + "group": { + "key": "wan-25-preview", + "label": "Image to Video" + }, + "highlighted": true, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-25-preview/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-25-preview/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-25-preview/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3.fal.media/files/rabbit/jryVrAZdQNdLdN_4rTlN7_9bebc94cb69a482fb4d948bdd06d6a5e.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-25-preview/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-25-preview/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Wan25PreviewImageToVideoInput": { + "description": "Input for image-to-video generation", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The white dragon warrior stands still, eyes full of determination and strength. The camera slowly moves closer or circles around the warrior, highlighting the powerful presence and heroic spirit of the character." + ], + "description": "The text prompt describing the desired video motion. Max 800 characters.", + "type": "string", + "title": "Prompt", + "minLength": 1 + }, + "resolution": { + "enum": [ + "480p", + "720p", + "1080p" + ], + "description": "Video resolution. Valid values: 480p, 720p, 1080p", + "type": "string", + "title": "Resolution", + "default": "1080p" + }, + "duration": { + "enum": [ + "5", + "10" + ], + "description": "Duration of the generated video in seconds. Choose between 5 or 10 seconds.", + "type": "string", + "title": "Duration", + "examples": [ + "5", + "10" + ], + "default": "5" + }, + "image_url": { + "x-fal": { + "min_width": 360, + "min_height": 360, + "timeout": 20, + "max_width": 2000, + "max_height": 2000, + "max_file_size": 26214400 + }, + "description": "URL of the image to use as the first frame. Must be publicly accessible or base64 data URI.\n\nMax file size: 25.0MB, Min width: 360px, Min height: 360px, Max width: 2000px, Max height: 2000px, Timeout: 20.0s", + "type": "string", + "title": "Image URL", + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/wan/dragon-warrior.jpg" + ] + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + }, + "audio_url": { + "description": "\nURL of the audio to use as the background music. Must be publicly accessible.\nLimit handling: If the audio duration exceeds the duration value (5 or 10 seconds),\nthe audio is truncated to the first 5 or 10 seconds, and the rest is discarded. If\nthe audio is shorter than the video, the remaining part of the video will be silent.\nFor example, if the audio is 3 seconds long and the video duration is 5 seconds, the\nfirst 3 seconds of the output video will have sound, and the last 2 seconds will be silent.\n- Format: WAV, MP3.\n- Duration: 3 to 30 s.\n- File size: Up to 15 MB.\n", + "type": "string", + "title": "Audio Url" + }, + "negative_prompt": { + "examples": [ + "low resolution, error, worst quality, low quality, defects" + ], + "description": "Negative prompt to describe content to avoid. Max 500 characters.", + "type": "string", + "title": "Negative Prompt" + }, + "enable_prompt_expansion": { + "description": "Whether to enable prompt rewriting using LLM.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": true + } + }, + "title": "ImageToVideoInput", + "x-fal-order-properties": [ + "prompt", + "image_url", + "audio_url", + "resolution", + "duration", + "negative_prompt", + "enable_prompt_expansion", + "seed", + "enable_safety_checker" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "Wan25PreviewImageToVideoOutput": { + "description": "Base output for video generation", + "type": "object", + "properties": { + "actual_prompt": { + "examples": [ + "The white dragon warrior stands still in a grand cathedral-like structure, its glowing golden eyes fixed forward. The camera slowly moves closer, focusing on the warrior's armored chest and face. It then begins to circle around the warrior, capturing the intricate details of the white scale armor with gold accents. The warrior maintains a strong, determined posture. Ambient sounds and soft choral tones fill the background, enhancing the majestic atmosphere. The camera continues its slow circular motion, emphasizing the warrior's heroic presence before ending with a close-up of the face." + ], + "description": "The actual prompt used if prompt rewriting was enabled", + "type": "string", + "title": "Actual Prompt" + }, + "seed": { + "examples": [ + 175932751 + ], + "description": "The seed used for generation", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/example_outputs/wan-25-i2v-output.mp4" + } + ], + "description": "The generated video file", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "title": "VideoOutput", + "x-fal-order-properties": [ + "video", + "seed", + "actual_prompt" + ], + "required": [ + "video", + "seed" + ] + }, + "VideoFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "duration": { + "description": "The duration of the video", + "type": "number", + "title": "Duration" + }, + "height": { + "description": "The height of the video", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "fps": { + "description": "The FPS of the video", + "type": "number", + "title": "Fps" + }, + "width": { + "description": "The width of the video", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "num_frames": { + "description": "The number of frames in the video", + "type": "integer", + "title": "Num Frames" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "VideoFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-25-preview/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-25-preview/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-25-preview/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Wan25PreviewImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-25-preview/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Wan25PreviewImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v2.5-turbo/pro/image-to-video", + "metadata": { + "display_name": "Kling Video", + "category": "image-to-video", + "description": "Kling 2.5 Turbo Pro: Top-tier image-to-video generation with unparalleled motion fluidity, cinematic visuals, and exceptional prompt precision.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:48.564Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/rabbit/2KNBk1qkmEBXK0FLgfHCl_ee4bb1ada254433bbab296893a8636e3.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/v2.5-turbo/pro/image-to-video", + "license_type": "commercial", + "date": "2025-09-22T22:51:16.379Z", + "group": { + "key": "kling-video-v25", + "label": "2.5 Turbo (Image to Video) Pro" + }, + "highlighted": true, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v2.5-turbo/pro/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v2.5-turbo/pro/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v2.5-turbo/pro/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3.fal.media/files/rabbit/2KNBk1qkmEBXK0FLgfHCl_ee4bb1ada254433bbab296893a8636e3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v2.5-turbo/pro/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v2.5-turbo/pro/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV25TurboProImageToVideoInput": { + "title": "ImageToVideoV25ProRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A stark starting line divides two powerful cars, engines revving for the challenge ahead. They surge forward in the heat of competition, a blur of speed and chrome. The finish line looms as they vie for victory." + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500 + }, + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/panda/HnY2yf-BbzlrVQxR-qP6m_9912d0932988453aadf3912fc1901f52.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to be used for the video" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "maxLength": 2500, + "default": "blur, distort, and low quality" + }, + "tail_image_url": { + "title": "Tail Image Url", + "type": "string", + "description": "URL of the image to be used for the end of the video" + }, + "cfg_scale": { + "minimum": 0, + "title": "Cfg Scale", + "type": "number", + "maximum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ", + "default": 0.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "duration", + "negative_prompt", + "cfg_scale", + "tail_image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "KlingVideoV25TurboProImageToVideoOutput": { + "title": "ImageToVideoV25ProOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/model_tests/kling/kling-v2.5-turbo-pro-image-to-video-output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v2.5-turbo/pro/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.5-turbo/pro/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.5-turbo/pro/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV25TurboProImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.5-turbo/pro/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV25TurboProImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/hailuo-02/standard/image-to-video", + "metadata": { + "display_name": "MiniMax Hailuo 02 [Standard] (Image to Video)", + "category": "image-to-video", + "description": "MiniMax Hailuo-02 Image To Video API (Standard, 768p, 512p): Advanced image-to-video generation model with 768p and 512p resolutions", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:26.051Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale.jpg", + "model_url": "https://fal.run/fal-ai/minimax/hailuo-02/standard/image-to-video", + "license_type": "commercial", + "date": "2025-06-18T00:47:27.952Z", + "group": { + "key": "hailuo-02", + "label": "Image to Video (standard) " + }, + "highlighted": true, + "kind": "inference", + "duration_estimate": 4, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/hailuo-02/standard/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/hailuo-02/standard/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/hailuo-02/standard/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/hailuo-02/standard/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/hailuo-02/standard/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxHailuo02StandardImageToVideoInput": { + "title": "StandardImageToVideoHailuo02Input", + "type": "object", + "properties": { + "prompt_optimizer": { + "description": "Whether to use the model's prompt optimizer", + "type": "boolean", + "title": "Prompt Optimizer", + "default": true + }, + "duration": { + "enum": [ + "6", + "10" + ], + "description": "The duration of the video in seconds. 10 seconds videos are not supported for 1080p resolution.", + "type": "string", + "title": "Duration", + "default": "6" + }, + "resolution": { + "enum": [ + "512P", + "768P" + ], + "description": "The resolution of the generated video.", + "type": "string", + "title": "Resolution", + "default": "768P" + }, + "prompt": { + "examples": [ + "Man walked into winter cave with polar bear" + ], + "maxLength": 2000, + "type": "string", + "title": "Prompt" + }, + "end_image_url": { + "description": "Optional URL of the image to use as the last frame of the video", + "type": "string", + "title": "End Image Url" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/minimax/1749891352437225630-389852416840474630_1749891352.png" + ], + "title": "Image Url", + "type": "string" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "duration", + "prompt_optimizer", + "resolution", + "end_image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "MinimaxHailuo02StandardImageToVideoOutput": { + "title": "ImageToVideoHailuo02Output", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/monkey/xF9OsLwGjjNURyAxD8RM1_output.mp4" + } + ], + "description": "The generated video", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/hailuo-02/standard/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-02/standard/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-02/standard/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxHailuo02StandardImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-02/standard/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxHailuo02StandardImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bytedance/seedance/v1/pro/image-to-video", + "metadata": { + "display_name": "Seedance 1.0 Pro", + "category": "image-to-video", + "description": "Seedance 1.0 Pro, a high quality video generation model developed by Bytedance.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:27.499Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-3.jpg", + "model_url": "https://fal.run/fal-ai/bytedance/seedance/v1/pro/image-to-video", + "license_type": "commercial", + "date": "2025-06-16T16:19:27.807Z", + "group": { + "key": "seedance-v1", + "label": "Seedance 1.0 Pro -- Image to Video" + }, + "highlighted": true, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bytedance/seedance/v1/pro/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bytedance/seedance/v1/pro/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bytedance/seedance/v1/pro/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/bytedance/seedance/v1/pro/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/bytedance/seedance/v1/pro/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BytedanceSeedanceV1ProImageToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "resolution", + "duration", + "camera_fixed", + "seed", + "enable_safety_checker", + "image_url", + "end_image_url" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A skier glides over fresh snow, joyously smiling while kicking up large clouds of snow as he turns. Accelerating gradually down the slope, the camera moves smoothly alongside." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt used to generate the video" + }, + "resolution": { + "enum": [ + "480p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "Video resolution - 480p for faster generation, 720p for balance, 1080p for higher quality", + "default": "1080p" + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "1:1", + "3:4", + "9:16", + "auto" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "auto" + }, + "duration": { + "enum": [ + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10", + "11", + "12" + ], + "title": "Duration", + "type": "string", + "description": "Duration of the video in seconds", + "default": "5" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/seedance_pro_i2v_img.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image used to generate video" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "camera_fixed": { + "title": "Camera Fixed", + "type": "boolean", + "description": "Whether to fix the camera position", + "default": false + }, + "end_image_url": { + "title": "End Image Url", + "type": "string", + "description": "The URL of the image the video ends with. Defaults to None." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed to control video generation. Use -1 for random." + } + }, + "title": "SeedanceProImageToVideoInput", + "required": [ + "prompt", + "image_url" + ] + }, + "BytedanceSeedanceV1ProImageToVideoOutput": { + "x-fal-order-properties": [ + "video", + "seed" + ], + "type": "object", + "properties": { + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for generation" + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_inputs/seedance_pro_i2v.mp4" + } + ], + "title": "Video", + "description": "Generated video file", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "SeedanceProI2VVideoOutput", + "required": [ + "video", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bytedance/seedance/v1/pro/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1/pro/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1/pro/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedanceV1ProImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1/pro/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedanceV1ProImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v2.1/master/image-to-video", + "metadata": { + "display_name": "Kling 2.1 Master", + "category": "image-to-video", + "description": "Kling 2.1 Master: The premium endpoint for Kling 2.1, designed for top-tier image-to-video generation with unparalleled motion fluidity, cinematic visuals, and exceptional prompt precision.\n\n", + "status": "active", + "tags": [ + "_marquee-video-model" + ], + "updated_at": "2026-01-26T21:43:36.771Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-5.jpeg", + "model_url": "https://fal.run/fal-ai/kling-video/v2.1/master/image-to-video", + "license_type": "commercial", + "date": "2025-05-29T00:01:10.593Z", + "group": { + "key": "kling-video-v21", + "label": "2.1 Master (Image to Video)" + }, + "highlighted": true, + "kind": "inference", + "duration_estimate": 6, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v2.1/master/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v2.1/master/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v2.1/master/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-5.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v2.1/master/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v2.1/master/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV21MasterImageToVideoInput": { + "title": "ImageToVideoV21MasterRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Sunlight dapples through budding branches, illuminating a vibrant tapestry of greens and browns as a pair of robins meticulously weave twigs and mud into a cradle of life, their tiny forms a whirlwind of activity against a backdrop of blossoming spring. The scene unfolds with a gentle, observational pace, allowing the viewer to fully appreciate the intricate details of nest construction, the soft textures of downy feathers contrasted against the rough bark of the branches, the delicate balance of strength and fragility in their creation." + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500 + }, + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/9Nrm22YyLojSTPJbZYNhh_image.webp" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to be used for the video" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "maxLength": 2500, + "default": "blur, distort, and low quality" + }, + "cfg_scale": { + "minimum": 0, + "title": "Cfg Scale", + "type": "number", + "maximum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ", + "default": 0.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "duration", + "negative_prompt", + "cfg_scale" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "KlingVideoV21MasterImageToVideoOutput": { + "title": "ImageToVideoV21MasterOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/rabbit/YuUWKFq508zzWIiQ0i2vt_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v2.1/master/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.1/master/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.1/master/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV21MasterImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.1/master/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV21MasterImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v2.1/standard/image-to-video", + "metadata": { + "display_name": "Kling 2.1 (standard)", + "category": "image-to-video", + "description": "Kling 2.1 Standard is a cost-efficient endpoint for the Kling 2.1 model, delivering high-quality image-to-video generation \n\n", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:37.019Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/elephant/5Yt8D9tl-IaGQ-6czSXL1_HHQ_VCby6xP_DFLkQQLpV_3c8622214c4c4ac29b4b64e157746507.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/v2.1/standard/image-to-video", + "license_type": "commercial", + "date": "2025-05-28T20:59:07.301Z", + "group": { + "key": "kling-video-v21", + "label": "2.1 Standard (Image to Video)" + }, + "highlighted": true, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v2.1/standard/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v2.1/standard/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v2.1/standard/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3.fal.media/files/elephant/5Yt8D9tl-IaGQ-6czSXL1_HHQ_VCby6xP_DFLkQQLpV_3c8622214c4c4ac29b4b64e157746507.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v2.1/standard/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v2.1/standard/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV21StandardImageToVideoInput": { + "title": "ImageToVideoV21StandardRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "As the sun dips below the horizon, painting the sky in fiery hues of orange and purple, powerful waves relentlessly crash against jagged, dark rocks, their white foam a stark contrast to the deepening twilight; the textured surface of the rocks, wet and glistening, reflects the vibrant colors, creating a mesmerizing spectacle of nature's raw power and breathtaking beauty" + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500 + }, + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/kling/kling-image-to-video.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to be used for the video" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "maxLength": 2500, + "default": "blur, distort, and low quality" + }, + "cfg_scale": { + "minimum": 0, + "title": "Cfg Scale", + "type": "number", + "maximum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ", + "default": 0.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "duration", + "negative_prompt", + "cfg_scale" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "KlingVideoV21StandardImageToVideoOutput": { + "title": "ImageToVideoV21StandardOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/koala/17e3xh08J4_PkHS_0cbwF_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v2.1/standard/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.1/standard/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.1/standard/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV21StandardImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.1/standard/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV21StandardImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v4.5/image-to-video", + "metadata": { + "display_name": "Pixverse", + "category": "image-to-video", + "description": "Generate high quality video clips from text and image prompts using PixVerse v4.5", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:43.754Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-2.jpg", + "model_url": "https://fal.run/fal-ai/pixverse/v4.5/image-to-video", + "license_type": "commercial", + "date": "2025-05-15T15:53:06.944Z", + "group": { + "key": "pixverse-45", + "label": "Image to Video v4.5" + }, + "highlighted": true, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v4.5/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v4.5/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v4.5/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v4.5/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v4.5/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV45ImageToVideoInput": { + "title": "ImageToVideoRequestV4", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman warrior with her hammer walking with his glacier wolf." + ], + "title": "Prompt", + "type": "string" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "duration": { + "enum": [ + "5", + "8" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds", + "default": "5" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated video" + }, + "camera_movement": { + "enum": [ + "horizontal_left", + "horizontal_right", + "vertical_up", + "vertical_down", + "zoom_in", + "zoom_out", + "crane_up", + "quickly_zoom_in", + "quickly_zoom_out", + "smooth_zoom_in", + "camera_rotation", + "robo_arm", + "super_dolly_out", + "whip_pan", + "hitchcock", + "left_follow", + "right_follow", + "pan_left", + "pan_right", + "fix_bg" + ], + "title": "Camera Movement", + "type": "string", + "description": "The type of camera movement to apply to the video" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/qL93Je8ezvzQgDOEzTjKF_KhGKZTEebZcDw6T5rwQPK_output.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to use as the first frame" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, low resolution, pixelated, noisy, grainy, out of focus, poorly lit, poorly exposed, poorly composed, poorly framed, poorly cropped, poorly color corrected, poorly color graded" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "resolution", + "duration", + "negative_prompt", + "style", + "seed", + "image_url", + "camera_movement" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "PixverseV45ImageToVideoOutput": { + "title": "I2VOutputV4", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 6420765, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://fal.media/files/koala/HEWK7BBwqWrz7F5nAZzp7_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v4.5/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4.5/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4.5/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV45ImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4.5/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV45ImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v2/master/image-to-video", + "metadata": { + "display_name": "Kling 2.0 Master", + "category": "image-to-video", + "description": "Generate video clips from your images using Kling 2.0 Master", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:55.970Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/v2/master/image-to-video", + "license_type": "commercial", + "date": "2025-04-14T21:50:19.724Z", + "group": { + "key": "kling-video-v2", + "label": "Image to Video v2 Master" + }, + "highlighted": true, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v2/master/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v2/master/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v2/master/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v2/master/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v2/master/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV2MasterImageToVideoInput": { + "title": "ImageToVideoV2MasterRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "slow-motion sequence captures the catastrophic implosion of a skyscraper, dust and debris billowing outwards in a chaotic ballet of destruction, while a haunting, orchestral score underscores the sheer power and finality of the event." + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500 + }, + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/elephant/rkH-9qoXtXu3rAYTsx9V5_image.webp" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to be used for the video" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "maxLength": 2500, + "default": "blur, distort, and low quality" + }, + "cfg_scale": { + "minimum": 0, + "title": "Cfg Scale", + "type": "number", + "maximum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ", + "default": 0.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "duration", + "negative_prompt", + "cfg_scale" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "KlingVideoV2MasterImageToVideoOutput": { + "title": "ImageToVideoV2MasterOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/koala/VvGXP5xEhTR9ovGjpulJ7_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v2/master/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2/master/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2/master/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV2MasterImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2/master/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV2MasterImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-i2v", + "metadata": { + "display_name": "Wan-2.1 Image-to-Video", + "category": "image-to-video", + "description": "Wan-2.1 is a image-to-video model that generates high-quality videos with high visual quality and motion diversity from images", + "status": "active", + "tags": [ + "image to video", + "motion" + ], + "updated_at": "2026-01-26T21:44:05.042Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/hunyuan-video-image-to-video.webp", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/hunyuan-video-image-to-video-animated.webp", + "model_url": "https://fal.run/fal-ai/wan-i2v", + "license_type": "commercial", + "date": "2025-02-25T00:00:00.000Z", + "group": { + "key": "wan-2.1", + "label": "Image-to-Video" + }, + "highlighted": true, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-i2v", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-i2v queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-i2v", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/hunyuan-video-image-to-video.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-i2v", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-i2v/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanI2vInput": { + "title": "WanI2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Cars racing in slow motion" + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "shift": { + "minimum": 1, + "maximum": 10, + "type": "number", + "description": "Shift parameter for video generation.", + "title": "Shift", + "default": 5 + }, + "acceleration": { + "examples": [ + "regular" + ], + "title": "Acceleration", + "type": "string", + "enum": [ + "none", + "regular" + ], + "description": "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + "default": "regular" + }, + "frames_per_second": { + "minimum": 5, + "maximum": 24, + "type": "integer", + "title": "Frames Per Second", + "description": "Frames per second of the generated video. Must be between 5 to 24.", + "default": 16 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": false + }, + "num_frames": { + "minimum": 81, + "title": "Num Frames", + "type": "integer", + "description": "Number of frames to generate. Must be between 81 to 100 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units.", + "maximum": 100, + "default": 81 + }, + "negative_prompt": { + "examples": [ + "bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + "default": "auto" + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "description": "Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit.", + "type": "string", + "title": "Resolution", + "default": "720p" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/gallery/car_720p.png" + ], + "description": "URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.", + "type": "string", + "title": "Image Url" + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 40, + "type": "integer", + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "title": "Num Inference Steps", + "default": 30 + }, + "guide_scale": { + "minimum": 1, + "maximum": 10, + "type": "number", + "description": "Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.", + "title": "Guide Scale", + "default": 5 + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_url", + "num_frames", + "frames_per_second", + "seed", + "resolution", + "num_inference_steps", + "guide_scale", + "shift", + "enable_safety_checker", + "enable_prompt_expansion", + "acceleration", + "aspect_ratio" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "WanI2vOutput": { + "x-fal-order-properties": [ + "video", + "seed" + ], + "type": "object", + "properties": { + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/gallery/wan-i2v-example.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "WanI2VResponse", + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-i2v/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-i2v/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-i2v": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanI2vInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-i2v/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanI2vOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v5.6/transition", + "metadata": { + "display_name": "Pixverse", + "category": "image-to-video", + "description": "Use the latest pixverse v5.6 model to turn your texts and images into amazing videos.", + "status": "active", + "tags": [ + "image-to-video" + ], + "updated_at": "2026-01-27T09:08:34.929Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8bf6c6/5g6zE3Sjy-aR-GBc8DhWT_603ad74d67aa47b68cab750e1cf9b5a6.jpg", + "model_url": "https://fal.run/fal-ai/pixverse/v5.6/transition", + "license_type": "commercial", + "date": "2026-01-26T17:28:03.386Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v5.6/transition", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v5.6/transition queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v5.6/transition", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8bf6c6/5g6zE3Sjy-aR-GBc8DhWT_603ad74d67aa47b68cab750e1cf9b5a6.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v5.6/transition", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v5.6/transition/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV56TransitionInput": { + "title": "TransitionRequestV5_6", + "type": "object", + "properties": { + "first_image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/owQh2DAzk8UU7J02nr5RY_Co2P4boLv6meIZ5t9gKvL_8685da151df343ab8bf82165c928e2a5.jpg" + ], + "title": "First Image Url", + "type": "string", + "description": "URL of the image to use as the first frame" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "4:3", + "1:1", + "3:4", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated video" + }, + "thinking_type": { + "enum": [ + "enabled", + "disabled", + "auto" + ], + "title": "Thinking Type", + "type": "string", + "description": "Prompt optimization mode: 'enabled' to optimize, 'disabled' to turn off, 'auto' for model decision" + }, + "prompt": { + "examples": [ + "Scene slowly transition into cat swimming under water" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt for the transition" + }, + "duration": { + "enum": [ + "5", + "8", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds. 1080p videos are limited to 5 or 8 seconds", + "default": "5" + }, + "generate_audio_switch": { + "title": "Generate Audio Switch", + "type": "boolean", + "description": "Enable audio generation (BGM, SFX, dialogue)", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + }, + "end_image_url": { + "examples": [ + "https://v3.fal.media/files/kangaroo/RgedFs_WSnq5BgER7qDx1_ONrbTJ1YAGXz-9JnSsBoB_bdc8750387734bfe940319f469f7b0b2.jpg" + ], + "title": "End Image Url", + "type": "string", + "description": "URL of the image to use as the last frame" + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, low resolution, pixelated, noisy, grainy, out of focus, poorly lit, poorly exposed, poorly composed, poorly framed, poorly cropped, poorly color corrected, poorly color graded" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "resolution", + "duration", + "negative_prompt", + "style", + "seed", + "generate_audio_switch", + "thinking_type", + "first_image_url", + "end_image_url" + ], + "required": [ + "prompt", + "first_image_url" + ] + }, + "PixverseV56TransitionOutput": { + "title": "TransitionOutputV5_5", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 3890360, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/model_tests/video_models/output-2.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v5.6/transition/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5.6/transition/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5.6/transition": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV56TransitionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5.6/transition/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV56TransitionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v5.6/image-to-video", + "metadata": { + "display_name": "Pixverse", + "category": "image-to-video", + "description": "Use the latest pixverse v5.6 model to turn your texts and images into amazing videos.", + "status": "active", + "tags": [ + "image-to-video" + ], + "updated_at": "2026-01-27T09:08:26.802Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8bf684/NHFUK0Vloyg4QuqEQDdxP_e3dc7704795542dfa183ecf6423c7883.jpg", + "model_url": "https://fal.run/fal-ai/pixverse/v5.6/image-to-video", + "license_type": "commercial", + "date": "2026-01-26T17:16:21.492Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v5.6/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v5.6/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v5.6/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8bf684/NHFUK0Vloyg4QuqEQDdxP_e3dc7704795542dfa183ecf6423c7883.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v5.6/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v5.6/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV56ImageToVideoInput": { + "title": "ImageToVideoRequestV5_6", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman warrior with her hammer walking with his glacier wolf." + ], + "title": "Prompt", + "type": "string" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "duration": { + "enum": [ + "5", + "8", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds. 1080p videos are limited to 5 or 8 seconds", + "default": "5" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated video" + }, + "thinking_type": { + "enum": [ + "enabled", + "disabled", + "auto" + ], + "title": "Thinking Type", + "type": "string", + "description": "Prompt optimization mode: 'enabled' to optimize, 'disabled' to turn off, 'auto' for model decision" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/qL93Je8ezvzQgDOEzTjKF_KhGKZTEebZcDw6T5rwQPK_output.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to use as the first frame" + }, + "generate_audio_switch": { + "title": "Generate Audio Switch", + "type": "boolean", + "description": "Enable audio generation (BGM, SFX, dialogue)", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, low resolution, pixelated, noisy, grainy, out of focus, poorly lit, poorly exposed, poorly composed, poorly framed, poorly cropped, poorly color corrected, poorly color graded" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "resolution", + "duration", + "negative_prompt", + "style", + "seed", + "generate_audio_switch", + "thinking_type", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "PixverseV56ImageToVideoOutput": { + "title": "I2VOutputV5_5", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 6420765, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/model_tests/video_models/output-3.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v5.6/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5.6/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5.6/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV56ImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5.6/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV56ImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/vidu/q2/reference-to-video/pro", + "metadata": { + "display_name": "Vidu", + "category": "image-to-video", + "description": "Use the latest Vidu Q2 Pro models which much more better quality and control on your videos.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:30.794Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b0dde/etLT9wcPxiw9gdfI1asZ2_4d84e99d288340748ed4af813552e8f1.jpg", + "model_url": "https://fal.run/fal-ai/vidu/q2/reference-to-video/pro", + "license_type": "commercial", + "date": "2026-01-19T19:49:20.023Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/vidu/q2/reference-to-video/pro", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/vidu/q2/reference-to-video/pro queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/vidu/q2/reference-to-video/pro", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b0dde/etLT9wcPxiw9gdfI1asZ2_4d84e99d288340748ed4af813552e8f1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/vidu/q2/reference-to-video/pro", + "documentationUrl": "https://fal.ai/models/fal-ai/vidu/q2/reference-to-video/pro/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ViduQ2ReferenceToVideoProInput": { + "title": "Q2ProReferenceToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "@Figure 1 Character Reference@Refer to the special effects, movements, and camera work of Video 1." + ], + "title": "Prompt", + "type": "string", + "maxLength": 2000, + "description": "Text prompt for video generation, max 2000 characters" + }, + "resolution": { + "enum": [ + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "Output video resolution", + "default": "720p" + }, + "aspect_ratio": { + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the output video (e.g., auto, 16:9, 9:16, 1:1, or any W:H)", + "default": "16:9" + }, + "duration": { + "minimum": 1, + "title": "Duration", + "type": "integer", + "maximum": 8, + "description": "Duration of the video in seconds (0 for automatic duration)", + "default": 4 + }, + "reference_video_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/model_tests/video_models/vidu-video-3123002003131623.mp4" + ] + ], + "title": "Reference Video Urls", + "type": "array", + "description": "URLs of the reference videos for video editing or motion reference. Supports up to 2 videos.", + "items": { + "type": "string" + } + }, + "bgm": { + "title": "Bgm", + "type": "boolean", + "description": "Whether to add background music to the generated video", + "default": false + }, + "reference_image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/model_tests/video_models/vidu-image-3123041388101890.png" + ] + ], + "title": "Reference Image Urls", + "type": "array", + "description": "URLs of the reference images for subject appearance. If videos are provided, up to 4 images are allowed; otherwise up to 7 images.", + "items": { + "type": "string" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "movement_amplitude": { + "enum": [ + "auto", + "small", + "medium", + "large" + ], + "title": "Movement Amplitude", + "type": "string", + "description": "The movement amplitude of objects in the frame", + "default": "auto" + } + }, + "x-fal-order-properties": [ + "prompt", + "reference_image_urls", + "reference_video_urls", + "seed", + "duration", + "resolution", + "aspect_ratio", + "movement_amplitude", + "bgm" + ], + "required": [ + "prompt" + ] + }, + "ViduQ2ReferenceToVideoProOutput": { + "title": "Q2ProReferenceToVideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3b.fal.media/files/b/0a8aa38a/_nfU-t1qY1HEVcDqct-M__output.mp4" + } + ], + "title": "Video", + "description": "The generated video with video/image references using the Q2 Pro model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/vidu/q2/reference-to-video/pro/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/q2/reference-to-video/pro/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/vidu/q2/reference-to-video/pro": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduQ2ReferenceToVideoProInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/q2/reference-to-video/pro/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduQ2ReferenceToVideoProOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "wan/v2.6/image-to-video/flash", + "metadata": { + "display_name": "V2.6", + "category": "image-to-video", + "description": "Wan 2.6 image-to-video flash model.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:31.936Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/model_tests/video_models/Gemini_Generated_Image_k9ohc1k9ohc1k9oh.png", + "model_url": "https://fal.run/wan/v2.6/image-to-video/flash", + "license_type": "commercial", + "date": "2026-01-18T08:06:36.211Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for wan/v2.6/image-to-video/flash", + "version": "1.0.0", + "description": "The OpenAPI schema for the wan/v2.6/image-to-video/flash queue.", + "x-fal-metadata": { + "endpointId": "wan/v2.6/image-to-video/flash", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/model_tests/video_models/Gemini_Generated_Image_k9ohc1k9ohc1k9oh.png", + "playgroundUrl": "https://fal.ai/models/wan/v2.6/image-to-video/flash", + "documentationUrl": "https://fal.ai/models/wan/v2.6/image-to-video/flash/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "V26ImageToVideoFlashInput": { + "title": "ImageToVideoInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A comedic cinematic demo where typed prompts physically transform reality. Photoreal, strong match cuts, coherent main character, no subtitles.\n\nShot 1 [0-4s] Continue from first frame. The creator presses \"PRINT\". The machine clunks like a spaceship. Creator whispers: \"Okay… I'm pressing enter.\"\nShot 2 [4-8s] Smash cut: the printed paper flies into the air and unfolds into a full desert canyon scene around the desk, like reality is being unrolled. Creator says: \"Wait—my prompt has physics?\"\nShot 3 [8-12s] Hard cut: the paper tears and reveals a tropical jungle behind it, perfectly lit, cinematic sun. Creator laughs: \"This is exactly why we do AI.\"\nShot 4 [12-15s] Hard cut back to studio. The printer prints a final line (not shown clearly). Creator looks to camera: \"Multi-scene. Single prompt.\"" + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt describing the desired video motion. Max 800 characters.", + "minLength": 1 + }, + "resolution": { + "enum": [ + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "Video resolution. Valid values: 720p, 1080p", + "default": "1080p" + }, + "duration": { + "enum": [ + "5", + "10", + "15" + ], + "title": "Duration", + "type": "string", + "examples": [ + "5", + "10", + "15" + ], + "description": "Duration of the generated video in seconds. Choose between 5, 10 or 15 seconds.", + "default": "5" + }, + "audio_url": { + "title": "Audio Url", + "type": "string", + "description": "\nURL of the audio to use as the background music. Must be publicly accessible.\nLimit handling: If the audio duration exceeds the duration value (5, 10, or 15 seconds),\nthe audio is truncated to the first N seconds, and the rest is discarded. If\nthe audio is shorter than the video, the remaining part of the video will be silent.\nFor example, if the audio is 3 seconds long and the video duration is 5 seconds, the\nfirst 3 seconds of the output video will have sound, and the last 2 seconds will be silent.\n- Format: WAV, MP3.\n- Duration: 3 to 30 s.\n- File size: Up to 15 MB.\n" + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a8673dd/m9EV5W9aSqg8J7rb-18TK.png" + ], + "title": "Image URL", + "type": "string", + "description": "URL of the image to use as the first frame. Must be publicly accessible or base64 data URI. Image dimensions must be between 240 and 7680." + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt rewriting using LLM.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "multi_shots": { + "title": "Multi Shots", + "type": "boolean", + "description": "When true, enables intelligent multi-shot segmentation. Only active when enable_prompt_expansion is True. Set to false for single-shot generation.", + "default": false + }, + "negative_prompt": { + "examples": [ + "low resolution, error, worst quality, low quality, defects" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to describe content to avoid. Max 500 characters.", + "default": "" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "description": "Input for Wan 2.6 image-to-video generation", + "x-fal-order-properties": [ + "prompt", + "image_url", + "audio_url", + "resolution", + "duration", + "negative_prompt", + "enable_prompt_expansion", + "multi_shots", + "seed", + "enable_safety_checker" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "V26ImageToVideoFlashOutput": { + "title": "ImageToVideoOutput", + "type": "object", + "properties": { + "actual_prompt": { + "examples": [ + "A comedic cinematic scene where the creator interacts with AI-generated reality transformations." + ], + "title": "Actual Prompt", + "type": "string", + "description": "The actual prompt used if prompt rewriting was enabled" + }, + "seed": { + "examples": [ + 175932751 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/0a8675cf/bCu9FiFXSjsSnIwOmjUOY_BVs2IFR3.mp4" + } + ], + "title": "Video", + "description": "The generated video file", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "description": "Output for image-to-video generation", + "x-fal-order-properties": [ + "video", + "seed", + "actual_prompt" + ], + "required": [ + "video", + "seed" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/wan/v2.6/image-to-video/flash/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/wan/v2.6/image-to-video/flash/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/wan/v2.6/image-to-video/flash": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/V26ImageToVideoFlashInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/wan/v2.6/image-to-video/flash/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/V26ImageToVideoFlashOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2-19b/distilled/image-to-video/lora", + "metadata": { + "display_name": "LTX-2 19B Distilled", + "category": "image-to-video", + "description": "Generate video with audio from images using LTX-2 Distilled and custom LoRA", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:42.099Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8936e5/-h8aDgobm8qIgSCkOXrcN_6f4888dc7dee4cc9a61ba27199f64c9a.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2-19b/distilled/image-to-video/lora", + "license_type": "commercial", + "date": "2026-01-05T21:02:29.284Z", + "group": { + "key": "ltx-2-19b-distilled", + "label": "Image to Video with LoRA" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2-19b/distilled/image-to-video/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2-19b/distilled/image-to-video/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2-19b/distilled/image-to-video/lora", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8936e5/-h8aDgobm8qIgSCkOXrcN_6f4888dc7dee4cc9a61ba27199f64c9a.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/distilled/image-to-video/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/distilled/image-to-video/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx219bDistilledImageToVideoLoraInput": { + "title": "LTX2LoRADistilledImageToVideoInput", + "type": "object", + "properties": { + "use_multiscale": { + "title": "Use Multi-Scale", + "type": "boolean", + "description": "Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.", + "default": true + }, + "prompt": { + "examples": [ + "A woman stands still amid a busy neon-lit street at night. The camera slowly dollies in toward her face as people blur past, their motion emphasizing her calm presence. City lights flicker and reflections shift across her denim jacket." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high", + "full" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "examples": [ + "none" + ], + "default": "none" + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate audio for the video.", + "default": true + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "number", + "description": "The frames per second of the generated video.", + "maximum": 60, + "default": 25 + }, + "loras": { + "title": "LoRAs", + "type": "array", + "description": "The LoRAs to use for the generation.", + "items": { + "$ref": "#/components/schemas/LoRAInput" + } + }, + "camera_lora": { + "enum": [ + "dolly_in", + "dolly_out", + "dolly_left", + "dolly_right", + "jib_up", + "jib_down", + "static", + "none" + ], + "title": "Camera LoRA", + "type": "string", + "description": "The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "examples": [ + "none" + ], + "default": "none" + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "auto", + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated video.", + "title": "Video Size", + "default": "auto" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 9, + "title": "Number of Frames", + "maximum": 481, + "default": 121 + }, + "image_strength": { + "minimum": 0, + "title": "Image Strength", + "type": "number", + "description": "The strength of the image to use for the video generation.", + "maximum": 1, + "default": 1 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the video from.", + "default": "blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts." + }, + "camera_lora_scale": { + "minimum": 0, + "title": "Camera LoRA Scale", + "type": "number", + "description": "The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "maximum": 1, + "default": 1 + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ltxv-2-i2v-input.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to generate the video from." + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed for the random number generator.", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "num_frames", + "video_size", + "generate_audio", + "use_multiscale", + "fps", + "acceleration", + "camera_lora", + "camera_lora_scale", + "negative_prompt", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode", + "loras", + "image_strength" + ], + "required": [ + "loras", + "prompt", + "image_url" + ] + }, + "Ltx219bDistilledImageToVideoLoraOutput": { + "title": "LTX2ImageToVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Continue the scene naturally, maintaining the same style and motion." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "seed": { + "examples": [ + 866232447 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for the random number generator." + }, + "video": { + "examples": [ + { + "height": 704, + "duration": 10.28, + "url": "https://v3b.fal.media/files/b/0a88289e/CJcQGDrxOSRg2YFl5GNDt_glXPMoji.mp4", + "width": 1248, + "fps": 25, + "file_name": "CJcQGDrxOSRg2YFl5GNDt_glXPMoji.mp4", + "content_type": "video/mp4", + "num_frames": 257 + } + ], + "description": "The generated video.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "prompt" + ], + "required": [ + "video", + "seed", + "prompt" + ] + }, + "LoRAInput": { + "title": "LoRAInput", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL, HuggingFace repo ID (owner/repo) to lora weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "description": "Scale factor for LoRA application (0.0 to 4.0).", + "maximum": 4, + "default": 1 + }, + "weight_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights.", + "title": "Weight Name" + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "description": "LoRA weight configuration.", + "required": [ + "path" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The duration of the video", + "title": "Duration" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the video", + "title": "Height" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the video", + "title": "Width" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The FPS of the video", + "title": "Fps" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of frames in the video", + "title": "Num Frames" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2-19b/distilled/image-to-video/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/image-to-video/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/image-to-video/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bDistilledImageToVideoLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/image-to-video/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bDistilledImageToVideoLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2-19b/distilled/image-to-video", + "metadata": { + "display_name": "LTX-2 19B Distilled", + "category": "image-to-video", + "description": "Generate video with audio from images using LTX-2 Distilled", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:42.227Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8936e5/XvktxWU-OjPy48InEFQEs_bd9da8a5712846b694091a93ff5b1df0.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2-19b/distilled/image-to-video", + "license_type": "commercial", + "date": "2026-01-05T21:00:40.069Z", + "group": { + "key": "ltx-2-19b-distilled", + "label": "Image to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2-19b/distilled/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2-19b/distilled/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2-19b/distilled/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8936e5/XvktxWU-OjPy48InEFQEs_bd9da8a5712846b694091a93ff5b1df0.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/distilled/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/distilled/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx219bDistilledImageToVideoInput": { + "title": "LTX2DistilledImageToVideoInput", + "type": "object", + "properties": { + "use_multiscale": { + "title": "Use Multi-Scale", + "type": "boolean", + "description": "Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.", + "default": true + }, + "prompt": { + "examples": [ + "A woman stands still amid a busy neon-lit street at night. The camera slowly dollies in toward her face as people blur past, their motion emphasizing her calm presence. City lights flicker and reflections shift across her denim jacket." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high", + "full" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "examples": [ + "none" + ], + "default": "none" + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate audio for the video.", + "default": true + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "number", + "description": "The frames per second of the generated video.", + "maximum": 60, + "default": 25 + }, + "camera_lora": { + "enum": [ + "dolly_in", + "dolly_out", + "dolly_left", + "dolly_right", + "jib_up", + "jib_down", + "static", + "none" + ], + "title": "Camera LoRA", + "type": "string", + "description": "The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "examples": [ + "none" + ], + "default": "none" + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "auto", + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated video.", + "title": "Video Size", + "default": "auto" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 9, + "title": "Number of Frames", + "maximum": 481, + "default": 121 + }, + "image_strength": { + "minimum": 0, + "title": "Image Strength", + "type": "number", + "description": "The strength of the image to use for the video generation.", + "maximum": 1, + "default": 1 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the video from.", + "default": "blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts." + }, + "camera_lora_scale": { + "minimum": 0, + "title": "Camera LoRA Scale", + "type": "number", + "description": "The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "maximum": 1, + "default": 1 + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ltxv-2-i2v-input.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to generate the video from." + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed for the random number generator.", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "num_frames", + "video_size", + "generate_audio", + "use_multiscale", + "fps", + "acceleration", + "camera_lora", + "camera_lora_scale", + "negative_prompt", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode", + "image_strength" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "Ltx219bDistilledImageToVideoOutput": { + "title": "LTX2ImageToVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Continue the scene naturally, maintaining the same style and motion." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "seed": { + "examples": [ + 866232447 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for the random number generator." + }, + "video": { + "examples": [ + { + "height": 704, + "duration": 10.28, + "url": "https://v3b.fal.media/files/b/0a88289e/CJcQGDrxOSRg2YFl5GNDt_glXPMoji.mp4", + "width": 1248, + "fps": 25, + "file_name": "CJcQGDrxOSRg2YFl5GNDt_glXPMoji.mp4", + "content_type": "video/mp4", + "num_frames": 257 + } + ], + "description": "The generated video.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "prompt" + ], + "required": [ + "video", + "seed", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The duration of the video", + "title": "Duration" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the video", + "title": "Height" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the video", + "title": "Width" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The FPS of the video", + "title": "Fps" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of frames in the video", + "title": "Num Frames" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2-19b/distilled/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bDistilledImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bDistilledImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2-19b/image-to-video/lora", + "metadata": { + "display_name": "LTX-2 19B", + "category": "image-to-video", + "description": "Generate video with audio from images using LTX-2 and custom LoRA", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:42.872Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a89365f/rZwl5Yceudvyc6Cd3Y_KN_eb6ff726b82948398bb475dca6cd1b9d.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2-19b/image-to-video/lora", + "license_type": "commercial", + "date": "2026-01-05T20:33:07.281Z", + "group": { + "key": "ltx-2-19b", + "label": "Image to Video with LoRA" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2-19b/image-to-video/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2-19b/image-to-video/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2-19b/image-to-video/lora", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a89365f/rZwl5Yceudvyc6Cd3Y_KN_eb6ff726b82948398bb475dca6cd1b9d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/image-to-video/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/image-to-video/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx219bImageToVideoLoraInput": { + "title": "LTX2LoRAImageToVideoInput", + "type": "object", + "properties": { + "use_multiscale": { + "title": "Use Multi-Scale", + "type": "boolean", + "description": "Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.", + "default": true + }, + "prompt": { + "examples": [ + "A woman stands still amid a busy neon-lit street at night. The camera slowly dollies in toward her face as people blur past, their motion emphasizing her calm presence. City lights flicker and reflections shift across her denim jacket." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high", + "full" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate audio for the video.", + "default": true + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "number", + "description": "The frames per second of the generated video.", + "maximum": 60, + "default": 25 + }, + "loras": { + "title": "LoRAs", + "type": "array", + "description": "The LoRAs to use for the generation.", + "items": { + "$ref": "#/components/schemas/LoRAInput" + } + }, + "camera_lora": { + "enum": [ + "dolly_in", + "dolly_out", + "dolly_left", + "dolly_right", + "jib_up", + "jib_down", + "static", + "none" + ], + "title": "Camera LoRA", + "type": "string", + "description": "The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "examples": [ + "none" + ], + "default": "none" + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "auto", + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated video.", + "title": "Video Size", + "default": "auto" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "camera_lora_scale": { + "minimum": 0, + "title": "Camera LoRA Scale", + "type": "number", + "description": "The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "maximum": 1, + "default": 1 + }, + "image_strength": { + "minimum": 0, + "title": "Image Strength", + "type": "number", + "description": "The strength of the image to use for the video generation.", + "maximum": 1, + "default": 1 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the video from.", + "default": "blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts." + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "description": "The guidance scale to use.", + "maximum": 10, + "default": 3 + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 9, + "title": "Number of Frames", + "maximum": 481, + "default": 121 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ltxv-2-i2v-input.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to generate the video from." + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "num_inference_steps": { + "minimum": 8, + "title": "Number of Inference Steps", + "type": "integer", + "description": "The number of inference steps to use.", + "maximum": 50, + "default": 40 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed for the random number generator.", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "num_frames", + "video_size", + "generate_audio", + "use_multiscale", + "fps", + "guidance_scale", + "num_inference_steps", + "acceleration", + "camera_lora", + "camera_lora_scale", + "negative_prompt", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode", + "loras", + "image_strength" + ], + "required": [ + "loras", + "prompt", + "image_url" + ] + }, + "Ltx219bImageToVideoLoraOutput": { + "title": "LTX2ImageToVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Continue the scene naturally, maintaining the same style and motion." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "seed": { + "examples": [ + 866232447 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for the random number generator." + }, + "video": { + "examples": [ + { + "height": 704, + "duration": 10.28, + "url": "https://v3b.fal.media/files/b/0a88289e/CJcQGDrxOSRg2YFl5GNDt_glXPMoji.mp4", + "width": 1248, + "fps": 25, + "file_name": "CJcQGDrxOSRg2YFl5GNDt_glXPMoji.mp4", + "content_type": "video/mp4", + "num_frames": 257 + } + ], + "description": "The generated video.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "prompt" + ], + "required": [ + "video", + "seed", + "prompt" + ] + }, + "LoRAInput": { + "title": "LoRAInput", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL, HuggingFace repo ID (owner/repo) to lora weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "description": "Scale factor for LoRA application (0.0 to 4.0).", + "maximum": 4, + "default": 1 + }, + "weight_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights.", + "title": "Weight Name" + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "description": "LoRA weight configuration.", + "required": [ + "path" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The duration of the video", + "title": "Duration" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the video", + "title": "Height" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the video", + "title": "Width" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The FPS of the video", + "title": "Fps" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of frames in the video", + "title": "Num Frames" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2-19b/image-to-video/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/image-to-video/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/image-to-video/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bImageToVideoLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/image-to-video/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bImageToVideoLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2-19b/image-to-video", + "metadata": { + "display_name": "LTX-2 19B", + "category": "image-to-video", + "description": "Generate video with audio from images using LTX-2", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:43.270Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8935fe/Ezbvf27opeW6gEoDS4nlw_da7064399f9c4342b5e118f6875ec389.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2-19b/image-to-video", + "license_type": "commercial", + "date": "2026-01-05T20:18:20.548Z", + "group": { + "key": "ltx-2-19b", + "label": "Image to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2-19b/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2-19b/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2-19b/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8935fe/Ezbvf27opeW6gEoDS4nlw_da7064399f9c4342b5e118f6875ec389.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx219bImageToVideoInput": { + "title": "LTX2ImageToVideoInput", + "type": "object", + "properties": { + "use_multiscale": { + "title": "Use Multi-Scale", + "type": "boolean", + "description": "Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.", + "default": true + }, + "prompt": { + "examples": [ + "A woman stands still amid a busy neon-lit street at night. The camera slowly dollies in toward her face as people blur past, their motion emphasizing her calm presence. City lights flicker and reflections shift across her denim jacket." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high", + "full" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate audio for the video.", + "default": true + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "number", + "description": "The frames per second of the generated video.", + "maximum": 60, + "default": 25 + }, + "camera_lora": { + "enum": [ + "dolly_in", + "dolly_out", + "dolly_left", + "dolly_right", + "jib_up", + "jib_down", + "static", + "none" + ], + "title": "Camera LoRA", + "type": "string", + "description": "The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "examples": [ + "none" + ], + "default": "none" + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "auto", + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated video.", + "title": "Video Size", + "default": "auto" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "camera_lora_scale": { + "minimum": 0, + "title": "Camera LoRA Scale", + "type": "number", + "description": "The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "maximum": 1, + "default": 1 + }, + "image_strength": { + "minimum": 0, + "title": "Image Strength", + "type": "number", + "description": "The strength of the image to use for the video generation.", + "maximum": 1, + "default": 1 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the video from.", + "default": "blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts." + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "description": "The guidance scale to use.", + "maximum": 10, + "default": 3 + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 9, + "title": "Number of Frames", + "maximum": 481, + "default": 121 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ltxv-2-i2v-input.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to generate the video from." + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "num_inference_steps": { + "minimum": 8, + "title": "Number of Inference Steps", + "type": "integer", + "description": "The number of inference steps to use.", + "maximum": 50, + "default": 40 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed for the random number generator.", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "num_frames", + "video_size", + "generate_audio", + "use_multiscale", + "fps", + "guidance_scale", + "num_inference_steps", + "acceleration", + "camera_lora", + "camera_lora_scale", + "negative_prompt", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode", + "image_strength" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "Ltx219bImageToVideoOutput": { + "title": "LTX2ImageToVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Continue the scene naturally, maintaining the same style and motion." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "seed": { + "examples": [ + 866232447 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for the random number generator." + }, + "video": { + "examples": [ + { + "height": 704, + "duration": 10.28, + "url": "https://v3b.fal.media/files/b/0a88289e/CJcQGDrxOSRg2YFl5GNDt_glXPMoji.mp4", + "width": 1248, + "fps": 25, + "file_name": "CJcQGDrxOSRg2YFl5GNDt_glXPMoji.mp4", + "content_type": "video/mp4", + "num_frames": 257 + } + ], + "description": "The generated video.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "prompt" + ], + "required": [ + "video", + "seed", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The duration of the video", + "title": "Duration" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the video", + "title": "Height" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the video", + "title": "Width" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The FPS of the video", + "title": "Fps" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of frames in the video", + "title": "Num Frames" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2-19b/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-move", + "metadata": { + "display_name": "Wan Move [480p]", + "category": "image-to-video", + "description": "Use Wan-Move to generate videos with controlled the motion using trajectories", + "status": "active", + "tags": [ + "image-to-video", + "motion-control", + "motion" + ], + "updated_at": "2026-01-26T21:41:46.447Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a87a28e/4m7n8IspYT6G8xgRmi4bP_3ddaa7037cec429d916f965bed0613f9.jpg", + "model_url": "https://fal.run/fal-ai/wan-move", + "license_type": "commercial", + "date": "2025-12-24T21:24:00.028Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-move", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-move queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-move", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a87a28e/4m7n8IspYT6G8xgRmi4bP_3ddaa7037cec429d916f965bed0613f9.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-move", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-move/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanMoveInput": { + "title": "WANMoveInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "In a boxing gym, the camera captures a half-body view of a single man practicing shadow boxing inside the ring. The camera sways slightly, emphasizing movement and intensity. He wears black boxing gloves, black shorts, and boxing shoes. With no opponent present, he throws a controlled straight punch into the air, his lead arm fully extended while the other hand stays tight near his face in a defensive guard. His body rotates subtly through the hips and shoulders, and his feet shift lightly across the mat to maintain balance and rhythm. Sweat highlights his muscular form as he focuses forward, fully immersed in his training. The background features blurred gym equipment and hanging heavy bags, reinforcing the sense of motion and concentration." + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt to guide the video generation." + }, + "trajectories": { + "examples": [ + [ + [ + { + "speed": 1, + "y": 286, + "x": 334 + }, + { + "speed": 1, + "y": 322, + "x": 334 + }, + { + "speed": 1, + "y": 328, + "x": 372 + }, + { + "speed": 1, + "y": 338, + "x": 447 + }, + { + "speed": 1, + "y": 328, + "x": 518 + }, + { + "speed": 1, + "y": 283, + "x": 481 + }, + { + "speed": 1, + "y": 263, + "x": 440 + }, + { + "speed": 1, + "y": 252, + "x": 385 + }, + { + "speed": 1, + "y": 252, + "x": 347 + } + ], + [ + { + "speed": 1, + "y": 262, + "x": 555 + }, + { + "speed": 1, + "y": 242, + "x": 505 + }, + { + "speed": 1, + "y": 243, + "x": 466 + }, + { + "speed": 1, + "y": 264, + "x": 440 + }, + { + "speed": 1, + "y": 287, + "x": 439 + }, + { + "speed": 1, + "y": 330, + "x": 471 + }, + { + "speed": 1, + "y": 345, + "x": 541 + }, + { + "speed": 1, + "y": 335, + "x": 572 + }, + { + "speed": 1, + "y": 303, + "x": 589 + } + ] + ] + ], + "title": "Trajectories", + "type": "array", + "description": "A list of trajectories. Each trajectory list means the movement of one object.", + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/TrajectoryPoint" + } + } + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/wan_move/input.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped." + }, + "guidance_scale": { + "exclusiveMaximum": 10, + "title": "Guidance Scale", + "type": "number", + "minimum": 1, + "description": "Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 2, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 40 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to guide the video generation.", + "default": "色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走" + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "trajectories", + "num_inference_steps", + "guidance_scale", + "negative_prompt", + "seed" + ], + "required": [ + "image_url", + "prompt", + "trajectories" + ] + }, + "WanMoveOutput": { + "title": "WanMoveOutput", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed used for generation." + }, + "video": { + "examples": [ + { + "height": 624, + "duration": 5.0626, + "url": "https://storage.googleapis.com/falserverless/example_outputs/wan_move/output.mp4", + "width": 624, + "fps": 16, + "file_name": "output.mp4", + "content_type": "video/mp4", + "num_frames": 81 + } + ], + "title": "Video", + "description": "Generated Video File", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-move/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-move/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-move": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanMoveInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-move/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanMoveOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kandinsky5-pro/image-to-video", + "metadata": { + "display_name": "Kandinsky5 Pro", + "category": "image-to-video", + "description": "Kandinsky 5.0 Pro is a diffusion model for fast, high-quality image-to-video generation.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:48.037Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a877601/AN-bEqsfOmQj46SKwy4Ms_5288905eb5e744f99cb3ac61339a2bd9.jpg", + "model_url": "https://fal.run/fal-ai/kandinsky5-pro/image-to-video", + "license_type": "commercial", + "date": "2025-12-23T13:42:41.411Z", + "group": { + "key": "kandinsky5-pro", + "label": "Image to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kandinsky5-pro/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kandinsky5-pro/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kandinsky5-pro/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a877601/AN-bEqsfOmQj46SKwy4Ms_5288905eb5e744f99cb3ac61339a2bd9.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kandinsky5-pro/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kandinsky5-pro/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Kandinsky5ProImageToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "image_url", + "resolution", + "duration", + "num_inference_steps", + "acceleration" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The white dragon warrior stands still, eyes full of determination and strength. The camera slowly moves closer or circles around the warrior, highlighting the powerful presence and heroic spirit of the character." + ], + "description": "The prompt to generate the video from.", + "type": "string", + "title": "Prompt" + }, + "resolution": { + "enum": [ + "512P", + "1024P" + ], + "description": "Video resolution: 512p or 1024p.", + "type": "string", + "title": "Resolution", + "default": "512P" + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "description": "Acceleration level for faster generation.", + "type": "string", + "title": "Acceleration", + "default": "regular" + }, + "duration": { + "enum": [ + "5s" + ], + "description": "Video duration.", + "type": "string", + "title": "Duration", + "default": "5s" + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 40, + "default": 28 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/wan/dragon-warrior.jpg" + ], + "description": "The URL of the image to use as a reference for the video generation.", + "type": "string", + "title": "Image Url" + } + }, + "title": "KandinskyI2VRequest", + "required": [ + "prompt", + "image_url" + ] + }, + "Kandinsky5ProImageToVideoOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 22253751, + "file_name": "output.mp4", + "content_type": "application/octet-stream", + "url": "https://v3b.fal.media/files/b/0a877276/Bg24FK_awlNAYKn962Vm0_output.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "KandinskyI2VResponse" + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kandinsky5-pro/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kandinsky5-pro/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kandinsky5-pro/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Kandinsky5ProImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kandinsky5-pro/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Kandinsky5ProImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bytedance/seedance/v1.5/pro/image-to-video", + "metadata": { + "display_name": "Bytedance", + "category": "image-to-video", + "description": "Generate videos with audio with Seedance 1.5 (supports start & end frame) ", + "status": "active", + "tags": [ + "bytedance", + "seedance", + "audio" + ], + "updated_at": "2026-01-26T21:41:48.450Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a876c74/q6J8OsmO08A3zCFGCtsUj_cb48562eeef345b29269689dd4f95671.jpg", + "model_url": "https://fal.run/fal-ai/bytedance/seedance/v1.5/pro/image-to-video", + "license_type": "commercial", + "date": "2025-12-23T06:56:11.614Z", + "group": { + "key": "seedance-v15", + "label": "Image to Video Pro v1.5" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bytedance/seedance/v1.5/pro/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bytedance/seedance/v1.5/pro/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bytedance/seedance/v1.5/pro/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a876c74/q6J8OsmO08A3zCFGCtsUj_cb48562eeef345b29269689dd4f95671.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/bytedance/seedance/v1.5/pro/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/bytedance/seedance/v1.5/pro/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BytedanceSeedanceV15ProImageToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "resolution", + "duration", + "camera_fixed", + "seed", + "enable_safety_checker", + "generate_audio", + "image_url", + "end_image_url" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A man is crying and he says \"I shouldn't have done it. I regret everything\"" + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt used to generate the video" + }, + "resolution": { + "enum": [ + "480p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "Video resolution - 480p for faster generation, 720p for balance, 1080p for higher quality", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "1:1", + "3:4", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate audio for the video", + "default": true + }, + "duration": { + "enum": [ + "4", + "5", + "6", + "7", + "8", + "9", + "10", + "11", + "12" + ], + "title": "Duration", + "type": "string", + "description": "Duration of the video in seconds", + "default": "5" + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a8773cd/REzCWn1BKUVuMFTxR-R3W_image_317.png" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image used to generate video" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "camera_fixed": { + "title": "Camera Fixed", + "type": "boolean", + "description": "Whether to fix the camera position", + "default": false + }, + "end_image_url": { + "title": "End Image Url", + "type": "string", + "description": "The URL of the image the video ends with. Defaults to None." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed to control video generation. Use -1 for random." + } + }, + "title": "SeedanceProv15ImageToVideoInput", + "required": [ + "prompt", + "image_url" + ] + }, + "BytedanceSeedanceV15ProImageToVideoOutput": { + "x-fal-order-properties": [ + "video", + "seed" + ], + "type": "object", + "properties": { + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for generation" + }, + "video": { + "examples": [ + { + "url": "https://v3b.fal.media/files/b/0a8773d3/l2fk-fIO_PQFPzbvHkQX1_video.mp4" + } + ], + "title": "Video", + "description": "Generated video file", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "SeedanceProv15I2VVideoOutput", + "required": [ + "video", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bytedance/seedance/v1.5/pro/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1.5/pro/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1.5/pro/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedanceV15ProImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1.5/pro/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedanceV15ProImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/live-avatar", + "metadata": { + "display_name": "Live Avatar", + "category": "image-to-video", + "description": "Real-time avatar generation with Live Avatar. Have natural face-to-face conversations with AI avatars that respond instantly—streaming infinite-length video with immediate visual feedback.", + "status": "active", + "tags": [ + "realtime", + "image-to-video", + "audio-to-video", + "" + ], + "updated_at": "2026-01-26T21:41:49.283Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8759f2/jL9kXDwhxl7IXCfVbBeTP_1d4a5286b31c4ac082344ae85196c915.jpg", + "model_url": "https://fal.run/fal-ai/live-avatar", + "license_type": "commercial", + "date": "2025-12-22T17:45:14.539Z", + "group": { + "key": "liveavatar", + "label": "liveavatar" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.ai/models/fal-ai/live-avatar/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/live-avatar", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/live-avatar queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/live-avatar", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8759f2/jL9kXDwhxl7IXCfVbBeTP_1d4a5286b31c4ac082344ae85196c915.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/live-avatar", + "documentationUrl": "https://fal.ai/models/fal-ai/live-avatar/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LiveAvatarInput": { + "title": "LiveAvatarRequest", + "type": "object", + "properties": { + "frames_per_clip": { + "description": "Number of frames per clip. Must be a multiple of 4. Higher values = smoother but slower generation.", + "type": "integer", + "minimum": 16, + "title": "Frames per Clip", + "maximum": 80, + "multipleOf": 4, + "default": 48 + }, + "prompt": { + "examples": [ + "A person speaking naturally with expressive gestures." + ], + "title": "Prompt", + "type": "string", + "description": "A text prompt describing the scene and character. Helps guide the video generation style and context." + }, + "acceleration": { + "enum": [ + "none", + "light", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for faster video decoding ", + "default": "none" + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a86b14d/NzJ8cpbwpOJPEX8SMpqe4_cyclops_baker.jpg" + ], + "title": "Reference Image URL", + "type": "string", + "description": "The URL of the reference image for avatar generation. The character in this image will be animated." + }, + "num_clips": { + "minimum": 1, + "title": "Number of Clips", + "type": "integer", + "maximum": 100, + "description": "Number of video clips to generate. Each clip is approximately 3 seconds. Set higher for longer videos.", + "default": 10 + }, + "audio_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a86b1d1/iHOKR5dlnHWW9UFfgL90b_tmp9fq30v29.wav" + ], + "title": "Audio URL", + "type": "string", + "description": "The URL of the driving audio file (WAV or MP3). The avatar will be animated to match this audio." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducible generation." + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "maximum": 10, + "description": "Classifier-free guidance scale. Higher values follow the prompt more closely.", + "default": 0 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Enable safety checker for content moderation.", + "default": true + } + }, + "x-fal-order-properties": [ + "image_url", + "audio_url", + "prompt", + "num_clips", + "frames_per_clip", + "guidance_scale", + "seed", + "enable_safety_checker", + "acceleration" + ], + "required": [ + "image_url", + "audio_url", + "prompt" + ] + }, + "LiveAvatarOutput": { + "title": "LiveAvatarResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/0a86b1d5/tLG1qOjdIYYhJSqJGEMWs_output.mp4" + } + ], + "title": "Video", + "description": "The generated avatar video file with synchronized audio.", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/live-avatar/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/live-avatar/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/live-avatar": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LiveAvatarInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/live-avatar/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LiveAvatarOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan-video-v1.5/image-to-video", + "metadata": { + "display_name": "Hunyuan Video V1.5", + "category": "image-to-video", + "description": "Hunyuan Video 1.5 is Tencent's latest and best video model", + "status": "active", + "tags": [ + "image-to-video" + ], + "updated_at": "2026-01-26T21:41:51.845Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/panda/yRuXc12ZmJxhXiGcd_1jW_9ebc91f64d1a44b7995e66b3410547e9.jpg", + "model_url": "https://fal.run/fal-ai/hunyuan-video-v1.5/image-to-video", + "license_type": "commercial", + "date": "2025-12-17T13:34:51.999Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan-video-v1.5/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan-video-v1.5/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan-video-v1.5/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/panda/yRuXc12ZmJxhXiGcd_1jW_9ebc91f64d1a44b7995e66b3410547e9.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan-video-v1.5/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan-video-v1.5/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "HunyuanVideoV15ImageToVideoInput": { + "title": "HunyuanVideo15I2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A stark starting line divides two powerful cars, engines revving for the challenge ahead. They surge forward in the heat of competition, a blur of speed and chrome. The finish line looms as they race for victory." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the video.", + "default": "16:9" + }, + "resolution": { + "enum": [ + "480p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the video.", + "default": "480p" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/panda/HnY2yf-BbzlrVQxR-qP6m_9912d0932988453aadf3912fc1901f52.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the reference image for image-to-video generation." + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Enable prompt expansion to enhance the input prompt.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility." + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps.", + "default": 28 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to guide what not to generate.", + "default": "" + }, + "num_frames": { + "minimum": 1, + "title": "Num Frames", + "type": "integer", + "maximum": 121, + "description": "The number of frames to generate.", + "default": 121 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_inference_steps", + "seed", + "aspect_ratio", + "resolution", + "num_frames", + "enable_prompt_expansion", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "HunyuanVideoV15ImageToVideoOutput": { + "title": "HunyuanVideo15Response", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/hyvideo_v15_480p_output.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan-video-v1.5/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-v1.5/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-v1.5/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanVideoV15ImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-v1.5/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanVideoV15ImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "wan/v2.6/image-to-video", + "metadata": { + "display_name": "Wan v2.6 Image to Video", + "category": "image-to-video", + "description": "Wan 2.6 image-to-video model.", + "status": "active", + "tags": [ + "image-to-video" + ], + "updated_at": "2026-01-26T21:41:57.548Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a86d2e1/KF3SBRRkzqdothbEr6ANe.png", + "model_url": "https://fal.run/wan/v2.6/image-to-video", + "license_type": "commercial", + "date": "2025-12-15T17:25:59.831Z", + "group": { + "key": "v2.6", + "label": "Image to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for wan/v2.6/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the wan/v2.6/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "wan/v2.6/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a86d2e1/KF3SBRRkzqdothbEr6ANe.png", + "playgroundUrl": "https://fal.ai/models/wan/v2.6/image-to-video", + "documentationUrl": "https://fal.ai/models/wan/v2.6/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "V26ImageToVideoInput": { + "title": "ImageToVideoInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A comedic cinematic demo where typed prompts physically transform reality. Photoreal, strong match cuts, coherent main character, no subtitles.\n\nShot 1 [0-4s] Continue from first frame. The creator presses \"PRINT\". The machine clunks like a spaceship. Creator whispers: \"Okay… I'm pressing enter.\"\nShot 2 [4-8s] Smash cut: the printed paper flies into the air and unfolds into a full desert canyon scene around the desk, like reality is being unrolled. Creator says: \"Wait—my prompt has physics?\"\nShot 3 [8-12s] Hard cut: the paper tears and reveals a tropical jungle behind it, perfectly lit, cinematic sun. Creator laughs: \"This is exactly why we do AI.\"\nShot 4 [12-15s] Hard cut back to studio. The printer prints a final line (not shown clearly). Creator looks to camera: \"Multi-scene. Single prompt.\"" + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt describing the desired video motion. Max 800 characters.", + "minLength": 1 + }, + "resolution": { + "enum": [ + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "Video resolution. Valid values: 720p, 1080p", + "default": "1080p" + }, + "duration": { + "enum": [ + "5", + "10", + "15" + ], + "title": "Duration", + "type": "string", + "examples": [ + "5", + "10", + "15" + ], + "description": "Duration of the generated video in seconds. Choose between 5, 10 or 15 seconds.", + "default": "5" + }, + "audio_url": { + "title": "Audio Url", + "type": "string", + "description": "\nURL of the audio to use as the background music. Must be publicly accessible.\nLimit handling: If the audio duration exceeds the duration value (5, 10, or 15 seconds),\nthe audio is truncated to the first N seconds, and the rest is discarded. If\nthe audio is shorter than the video, the remaining part of the video will be silent.\nFor example, if the audio is 3 seconds long and the video duration is 5 seconds, the\nfirst 3 seconds of the output video will have sound, and the last 2 seconds will be silent.\n- Format: WAV, MP3.\n- Duration: 3 to 30 s.\n- File size: Up to 15 MB.\n" + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a8673dd/m9EV5W9aSqg8J7rb-18TK.png" + ], + "title": "Image URL", + "type": "string", + "description": "URL of the image to use as the first frame. Must be publicly accessible or base64 data URI. Image dimensions must be between 240 and 7680." + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt rewriting using LLM.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "multi_shots": { + "title": "Multi Shots", + "type": "boolean", + "description": "When true, enables intelligent multi-shot segmentation. Only active when enable_prompt_expansion is True. Set to false for single-shot generation.", + "default": false + }, + "negative_prompt": { + "examples": [ + "low resolution, error, worst quality, low quality, defects" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to describe content to avoid. Max 500 characters.", + "default": "" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "description": "Input for Wan 2.6 image-to-video generation", + "x-fal-order-properties": [ + "prompt", + "image_url", + "audio_url", + "resolution", + "duration", + "negative_prompt", + "enable_prompt_expansion", + "multi_shots", + "seed", + "enable_safety_checker" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "V26ImageToVideoOutput": { + "title": "ImageToVideoOutput", + "type": "object", + "properties": { + "actual_prompt": { + "examples": [ + "A comedic cinematic scene where the creator interacts with AI-generated reality transformations." + ], + "title": "Actual Prompt", + "type": "string", + "description": "The actual prompt used if prompt rewriting was enabled" + }, + "seed": { + "examples": [ + 175932751 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/0a8675cf/bCu9FiFXSjsSnIwOmjUOY_BVs2IFR3.mp4" + } + ], + "title": "Video", + "description": "The generated video file", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "description": "Output for image-to-video generation", + "x-fal-order-properties": [ + "video", + "seed", + "actual_prompt" + ], + "required": [ + "video", + "seed" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/wan/v2.6/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/wan/v2.6/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/wan/v2.6/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/V26ImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/wan/v2.6/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/V26ImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/o1/standard/reference-to-video", + "metadata": { + "display_name": "Kling O1 Reference Image to Video [Standard]", + "category": "image-to-video", + "description": "Transform images, elements, and text into consistent, high-quality video scenes, ensuring stable character identity, object details, and environments.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:57.841Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a860024/IeH4LL0fTNCVBiDW7YTyc_48485917433c4e6ebe9a042112c8147f.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/o1/standard/reference-to-video", + "license_type": "commercial", + "date": "2025-12-15T09:39:25.599Z", + "group": { + "key": "kling-video/o1", + "label": "Reference I2V [Standard]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/o1/standard/reference-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/o1/standard/reference-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/o1/standard/reference-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a860024/IeH4LL0fTNCVBiDW7YTyc_48485917433c4e6ebe9a042112c8147f.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/o1/standard/reference-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/o1/standard/reference-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoO1StandardReferenceToVideoInput": { + "title": "OmniVideoReferenceToVideoInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Take @Image1 as the start frame. Start with a high-angle satellite view of the ancient greenhouse ruin surrounded by nature. The camera swoops down and flies inside the building, revealing the character from @Element1 standing in the sun-drenched center. The camera then seamlessly transitions into a smooth 180-degree orbit around the character, moving to the back view. As the open backpack comes into focus, the camera continues to push forward, zooming deep inside the bag to reveal the glowing stone from @Element2 nestled inside. Cinematic lighting, hopeful atmosphere, 35mm lens. Make sure to keep it as the style of @Image2." + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500, + "description": "Take @Element1, @Element2 to reference elements and @Image1, @Image2 to reference images in order." + }, + "duration": { + "enum": [ + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10" + ], + "title": "Duration", + "type": "string", + "description": "Video duration in seconds.", + "default": "5" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video frame.", + "default": "16:9" + }, + "elements": { + "examples": [ + [ + { + "reference_image_urls": [ + "https://v3b.fal.media/files/b/kangaroo/YMpmQkYt9xugpOTQyZW0O.png", + "https://v3b.fal.media/files/b/zebra/d6ywajNyJ6bnpa_xBue-K.png" + ], + "frontal_image_url": "https://v3b.fal.media/files/b/panda/MQp-ghIqshvMZROKh9lW3.png" + }, + { + "reference_image_urls": [ + "https://v3b.fal.media/files/b/kangaroo/EBF4nWihspyv4pp6hgj7D.png" + ], + "frontal_image_url": "https://v3b.fal.media/files/b/koala/gSnsA7HJlgcaTyR5Ujj2H.png" + } + ] + ], + "title": "Elements", + "type": "array", + "description": "Elements (characters/objects) to include in the video. Reference in prompt as @Element1, @Element2, etc. Maximum 7 total (elements + reference images + start image).", + "items": { + "$ref": "#/components/schemas/OmniVideoElementInput" + } + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/koala/v9COzzH23FGBYdGLgbK3u.png", + "https://v3b.fal.media/files/b/elephant/5Is2huKQFSE7A7c5uUeUF.png" + ] + ], + "title": "Image Urls", + "type": "array", + "description": "Additional reference images for style/appearance. Reference in prompt as @Image1, @Image2, etc. Maximum 7 total (elements + reference images + start image).", + "items": { + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "type": "string", + "limit_description": "Max file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + } + }, + "description": "Input for start-frame video generation with optional reference images and elements.", + "x-fal-order-properties": [ + "prompt", + "image_urls", + "elements", + "duration", + "aspect_ratio" + ], + "required": [ + "prompt" + ] + }, + "KlingVideoO1StandardReferenceToVideoOutput": { + "title": "OmniVideoReferenceToVideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 47359974, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/panda/oVdiICFXY03Vbam-08Aj8_output.mp4" + } + ], + "title": "Video", + "description": "The generated video.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "OmniVideoElementInput": { + "title": "OmniVideoElementInput", + "type": "object", + "properties": { + "reference_image_urls": { + "title": "Reference Image Urls", + "type": "array", + "description": "Additional reference images from different angles. 1-4 images supported. At least one image is required.", + "items": { + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "type": "string", + "limit_description": "Max file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + }, + "frontal_image_url": { + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "title": "Frontal Image Url", + "type": "string", + "description": "The frontal image of the element (main view).\n\nMax file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + }, + "x-fal-order-properties": [ + "frontal_image_url", + "reference_image_urls" + ], + "required": [ + "frontal_image_url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/o1/standard/reference-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/standard/reference-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/standard/reference-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoO1StandardReferenceToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/standard/reference-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoO1StandardReferenceToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/o1/standard/image-to-video", + "metadata": { + "display_name": "Kling O1 First Frame Last Frame to Video [Standard]", + "category": "image-to-video", + "description": "Generate a video by taking a start frame and an end frame, animating the transition between them while following text-driven style and scene guidance.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:57.976Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a86000e/kN3PH-dHrMpnHW9OlNlTQ_813cef4c332f4a6c9120e6890b013b91.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/o1/standard/image-to-video", + "license_type": "commercial", + "date": "2025-12-15T09:37:41.280Z", + "group": { + "key": "kling-video/o1", + "label": "FLFV [Standard]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/o1/standard/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/o1/standard/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/o1/standard/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a86000e/kN3PH-dHrMpnHW9OlNlTQ_813cef4c332f4a6c9120e6890b013b91.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/o1/standard/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/o1/standard/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoO1StandardImageToVideoInput": { + "title": "OmniVideoImageToVideoInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Create a magical timelapse transition. The snow melts rapidly to reveal green grass, and the tree branches burst into bloom with pink flowers in real-time. The lighting shifts from cold winter light to warm spring sunshine. The camera pushes in slowly towards the tree. Disney-style magical transformation, cinematic, 8k." + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500, + "description": "Use @Image1 to reference the start frame, @Image2 to reference the end frame." + }, + "duration": { + "enum": [ + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10" + ], + "title": "Duration", + "type": "string", + "description": "Video duration in seconds.", + "default": "5" + }, + "start_image_url": { + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "title": "Start Image Url", + "type": "string", + "examples": [ + "https://v3b.fal.media/files/b/rabbit/NaslJIC7F2WodS6DFZRRJ.png" + ], + "description": "Image to use as the first frame of the video.\n\nMax file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + }, + "end_image_url": { + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "title": "End Image Url", + "type": "string", + "examples": [ + "https://v3b.fal.media/files/b/tiger/BwHi22qoQnqaTNMMhe533.png" + ], + "description": "Image to use as the last frame of the video.\n\nMax file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + }, + "x-fal-order-properties": [ + "prompt", + "start_image_url", + "end_image_url", + "duration" + ], + "required": [ + "prompt", + "start_image_url" + ] + }, + "KlingVideoO1StandardImageToVideoOutput": { + "title": "OmniVideoImageToVideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 27588984, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/koala/knryyyGF3ZVyMMrGr77CL_output.mp4" + } + ], + "title": "Video", + "description": "The generated video.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "description": "Output for Kling Omni Video generation.", + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/o1/standard/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/standard/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/standard/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoO1StandardImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/standard/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoO1StandardImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/creatify/aurora", + "metadata": { + "display_name": "Creatify Aurora", + "category": "image-to-video", + "description": "Generate high fidelity, studio quality videos of your avatar speaking or singing using the Aurora from Creatify team!", + "status": "active", + "tags": [ + "lipsync", + "image-to-video" + ], + "updated_at": "2026-01-26T21:42:01.678Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a85dac5/xLlxwj9XXOpVjGF9iUvla_d44dd53425ac4f94911ec73fa6119fc8.jpg", + "model_url": "https://fal.run/fal-ai/creatify/aurora", + "license_type": "commercial", + "date": "2025-12-11T16:16:44.429Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/creatify/aurora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/creatify/aurora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/creatify/aurora", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a85dac5/xLlxwj9XXOpVjGF9iUvla_d44dd53425ac4f94911ec73fa6119fc8.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/creatify/aurora", + "documentationUrl": "https://fal.ai/models/fal-ai/creatify/aurora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "CreatifyAuroraInput": { + "x-fal-order-properties": [ + "image_url", + "audio_url", + "prompt", + "guidance_scale", + "audio_guidance_scale", + "resolution" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "4K studio interview, medium close-up (shoulders-up crop). Solid light-grey seamless backdrop, uniform soft key-light—no lighting change. Presenter faces lens, steady eye-contact. Hands remain below frame, body perfectly still. Ultra-sharp." + ], + "description": "A text prompt to guide the video generation process.", + "type": "string", + "title": "Prompt" + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "description": "The resolution of the generated video.", + "type": "string", + "title": "Resolution", + "default": "720p" + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "description": "Guidance scale to be used for text prompt adherence.", + "maximum": 5, + "default": 1 + }, + "audio_guidance_scale": { + "minimum": 0, + "title": "Audio Guidance Scale", + "type": "number", + "description": "Guidance scale to be used for audio adherence.", + "maximum": 5, + "default": 2 + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/creatify/aurora/input.wav" + ], + "description": "The URL of the audio file to be used for video generation.", + "type": "string", + "title": "Audio Url" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/creatify/aurora/input_.png" + ], + "description": "The URL of the image file to be used for video generation.", + "type": "string", + "title": "Image Url" + } + }, + "title": "AuroraInputModel", + "required": [ + "image_url", + "audio_url" + ] + }, + "CreatifyAuroraOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/example_outputs/creatify/aurora/output.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "title": "AuroraOutputModel", + "required": [ + "video" + ] + }, + "VideoFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the video", + "type": "integer", + "title": "Height" + }, + "duration": { + "description": "The duration of the video", + "type": "number", + "title": "Duration" + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the video", + "type": "integer", + "title": "Width" + }, + "fps": { + "description": "The FPS of the video", + "type": "number", + "title": "Fps" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "num_frames": { + "description": "The number of frames in the video", + "type": "integer", + "title": "Num Frames" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "VideoFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/creatify/aurora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/creatify/aurora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/creatify/aurora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreatifyAuroraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/creatify/aurora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreatifyAuroraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/ai-avatar/v2/pro", + "metadata": { + "display_name": "Kling AI Avatar v2 Pro", + "category": "image-to-video", + "description": "Kling AI Avatar v2 Pro: The premium endpoint for creating avatar videos with realistic humans, animals, cartoons, or stylized characters", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:05.594Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a84f36f/CYuWetfKsuQC0MR6G4EG8_cd62fa685e9844aaaf086c293eddb2ee.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/ai-avatar/v2/pro", + "license_type": "commercial", + "date": "2025-12-04T12:58:48.770Z", + "group": { + "key": "Kling-Avatar", + "label": "Kling AI Avatar v2 Pro" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/ai-avatar/v2/pro", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/ai-avatar/v2/pro queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/ai-avatar/v2/pro", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a84f36f/CYuWetfKsuQC0MR6G4EG8_cd62fa685e9844aaaf086c293eddb2ee.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/ai-avatar/v2/pro", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/ai-avatar/v2/pro/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoAiAvatarV2ProInput": { + "title": "AIAvatarInput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt to use for the video generation.", + "default": "." + }, + "audio_url": { + "examples": [ + "https://v3.fal.media/files/rabbit/9_0ZG_geiWjZOmn9yscO6_output.mp3" + ], + "title": "Audio Url", + "type": "string", + "description": "The URL of the audio file." + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/kling_ai_avatar_input.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to use as your avatar" + } + }, + "x-fal-order-properties": [ + "image_url", + "audio_url", + "prompt" + ], + "required": [ + "image_url", + "audio_url" + ] + }, + "KlingVideoAiAvatarV2ProOutput": { + "title": "AIAvatarOutput", + "type": "object", + "properties": { + "duration": { + "title": "Duration", + "type": "number", + "description": "Duration of the output video in seconds." + }, + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/penguin/ln3x7H1p1jL0Pwo7675NI_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "duration" + ], + "required": [ + "video", + "duration" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/ai-avatar/v2/pro/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/ai-avatar/v2/pro/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/ai-avatar/v2/pro": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoAiAvatarV2ProInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/ai-avatar/v2/pro/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoAiAvatarV2ProOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/ai-avatar/v2/standard", + "metadata": { + "display_name": "Kling AI Avatar v2 Standard", + "category": "image-to-video", + "description": "Kling AI Avatar v2 Standard: Endpoint for creating avatar videos with realistic humans, animals, cartoons, or stylized characters", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:05.857Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a84f39a/V_Zy58fLpOlS1jN3balTm_bf4c62c984b34a1597340c007fac8fe8.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/ai-avatar/v2/standard", + "license_type": "commercial", + "date": "2025-12-04T12:58:33.998Z", + "group": { + "key": "Kling-Avatar", + "label": "Kling AI Avatar v2 Standard" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/ai-avatar/v2/standard", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/ai-avatar/v2/standard queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/ai-avatar/v2/standard", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a84f39a/V_Zy58fLpOlS1jN3balTm_bf4c62c984b34a1597340c007fac8fe8.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/ai-avatar/v2/standard", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/ai-avatar/v2/standard/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoAiAvatarV2StandardInput": { + "title": "AIAvatarInput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt to use for the video generation.", + "default": "." + }, + "audio_url": { + "examples": [ + "https://v3.fal.media/files/rabbit/9_0ZG_geiWjZOmn9yscO6_output.mp3" + ], + "title": "Audio Url", + "type": "string", + "description": "The URL of the audio file." + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/kling_ai_avatar_input.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to use as your avatar" + } + }, + "x-fal-order-properties": [ + "image_url", + "audio_url", + "prompt" + ], + "required": [ + "image_url", + "audio_url" + ] + }, + "KlingVideoAiAvatarV2StandardOutput": { + "title": "AIAvatarOutput", + "type": "object", + "properties": { + "duration": { + "title": "Duration", + "type": "number", + "description": "Duration of the output video in seconds." + }, + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/penguin/ln3x7H1p1jL0Pwo7675NI_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "duration" + ], + "required": [ + "video", + "duration" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/ai-avatar/v2/standard/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/ai-avatar/v2/standard/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/ai-avatar/v2/standard": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoAiAvatarV2StandardInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/ai-avatar/v2/standard/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoAiAvatarV2StandardOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v2.6/pro/image-to-video", + "metadata": { + "display_name": "Kling Video v2.6 Image to Video", + "category": "image-to-video", + "description": "Kling 2.6 Pro: Top-tier image-to-video with cinematic visuals, fluid motion, and native audio generation.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:07.549Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a84aa9c/tRb6fibztV71jrhSPfdqP_3359db6ee74a4e4e9fb408a00b02e114.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/v2.6/pro/image-to-video", + "license_type": "commercial", + "date": "2025-12-02T09:01:46.898Z", + "group": { + "key": "kling-video/v2.6", + "label": "Image to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v2.6/pro/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v2.6/pro/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v2.6/pro/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a84aa9c/tRb6fibztV71jrhSPfdqP_3359db6ee74a4e4e9fb408a00b02e114.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v2.6/pro/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v2.6/pro/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV26ProImageToVideoInput": { + "title": "ImageToVideoV26ProRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A king walks slowly and says \"My people, here I am! I am here to save you all\"" + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500 + }, + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "voice_ids": { + "title": "Voice Ids", + "type": "array", + "description": "List of voice IDs to use for voice control. Reference voices in the prompt using <<>>, <<>>. Maximum 2 voices allowed. When provided and referenced in prompt, enables voice control billing.", + "items": { + "type": "string" + } + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate native audio for the video. Supports Chinese and English voice output. Other languages are automatically translated to English. For English speech, use lowercase letters; for acronyms or proper nouns, use uppercase.", + "default": true + }, + "start_image_url": { + "description": "URL of the image to be used for the video", + "type": "string", + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "title": "Start Image Url", + "examples": [ + "https://v3b.fal.media/files/b/0a84ab29/BSJXz9Ht-jgRgMf4IGxLU_upscaled.png" + ], + "limit_description": "Max file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + }, + "end_image_url": { + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "title": "End Image Url", + "type": "string", + "description": "URL of the image to be used for the end of the video", + "limit_description": "Max file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "maxLength": 2500, + "default": "blur, distort, and low quality" + } + }, + "x-fal-order-properties": [ + "prompt", + "start_image_url", + "duration", + "negative_prompt", + "generate_audio", + "voice_ids", + "end_image_url" + ], + "required": [ + "prompt", + "start_image_url" + ] + }, + "KlingVideoV26ProImageToVideoOutput": { + "title": "ImageToVideoV26ProOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 11814817, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/0a84ab51/Qr1twf8UgtD5rZHpNXC2P_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v2.6/pro/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.6/pro/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.6/pro/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV26ProImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.6/pro/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV26ProImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v5.5/effects", + "metadata": { + "display_name": "Pixverse", + "category": "image-to-video", + "description": "Pixverse Effects", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:07.801Z", + "is_favorited": false, + "thumbnail_url": "blob:https://fal.ai/0e08061f-2f95-4ab6-9ade-39560f7554ac", + "model_url": "https://fal.run/fal-ai/pixverse/v5.5/effects", + "license_type": "commercial", + "date": "2025-12-02T08:44:21.318Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v5.5/effects", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v5.5/effects queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v5.5/effects", + "category": "image-to-video", + "thumbnailUrl": "blob:https://fal.ai/0e08061f-2f95-4ab6-9ade-39560f7554ac", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v5.5/effects", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v5.5/effects/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV55EffectsInput": { + "title": "EffectInputV5_5", + "type": "object", + "properties": { + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + }, + "duration": { + "enum": [ + "5", + "8", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video.", + "default": "720p" + }, + "thinking_type": { + "enum": [ + "enabled", + "disabled", + "auto" + ], + "title": "Thinking Type", + "type": "string", + "description": "Prompt optimization mode: 'enabled' to optimize, 'disabled' to turn off, 'auto' for model decision" + }, + "effect": { + "enum": [ + "Kiss Me AI", + "Kiss", + "Muscle Surge", + "Warmth of Jesus", + "Anything, Robot", + "The Tiger Touch", + "Hug", + "Holy Wings", + "Microwave", + "Zombie Mode", + "Squid Game", + "Baby Face", + "Black Myth: Wukong", + "Long Hair Magic", + "Leggy Run", + "Fin-tastic Mermaid", + "Punch Face", + "Creepy Devil Smile", + "Thunder God", + "Eye Zoom Challenge", + "Who's Arrested?", + "Baby Arrived", + "Werewolf Rage", + "Bald Swipe", + "BOOM DROP", + "Huge Cutie", + "Liquid Metal", + "Sharksnap!", + "Dust Me Away", + "3D Figurine Factor", + "Bikini Up", + "My Girlfriends", + "My Boyfriends", + "Subject 3 Fever", + "Earth Zoom", + "Pole Dance", + "Vroom Dance", + "GhostFace Terror", + "Dragon Evoker", + "Skeletal Bae", + "Summoning succubus", + "Halloween Voodoo Doll", + "3D Naked-Eye AD", + "Package Explosion", + "Dishes Served", + "Ocean ad", + "Supermarket AD", + "Tree doll", + "Come Feel My Abs", + "The Bicep Flex", + "London Elite Vibe", + "Flora Nymph Gown", + "Christmas Costume", + "It's Snowy", + "Reindeer Cruiser", + "Snow Globe Maker", + "Pet Christmas Outfit", + "Adopt a Polar Pal", + "Cat Christmas Box", + "Starlight Gift Box", + "Xmas Poster", + "Pet Christmas Tree", + "City Santa Hat", + "Stocking Sweetie", + "Christmas Night", + "Xmas Front Page Karma", + "Grinch's Xmas Hijack", + "Giant Product", + "Truck Fashion Shoot", + "Beach AD", + "Shoal Surround", + "Mechanical Assembly", + "Lighting AD", + "Billboard AD", + "Product close-up", + "Parachute Delivery", + "Dreamlike Cloud", + "Macaron Machine", + "Poster AD", + "Truck AD", + "Graffiti AD", + "3D Figurine Factory", + "The Exclusive First Class", + "Art Zoom Challenge", + "I Quit", + "Hitchcock Dolly Zoom", + "Smell the Lens", + "I believe I can fly", + "Strikout Dance", + "Pixel World", + "Mint in Box", + "Hands up, Hand", + "Flora Nymph Go", + "Somber Embrace", + "Beam me up", + "Suit Swagger" + ], + "title": "Effect", + "type": "string", + "description": "The effect to apply to the video" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/koala/q5ahL3KS7ikt3MvpNUG8l_image%20(72).webp" + ], + "title": "Image Url", + "type": "string", + "description": "Optional URL of the image to use as the first frame. If not provided, generates from text" + } + }, + "x-fal-order-properties": [ + "effect", + "image_url", + "resolution", + "duration", + "negative_prompt", + "thinking_type" + ], + "required": [ + "effect", + "image_url" + ] + }, + "PixverseV55EffectsOutput": { + "title": "EffectOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 3232402, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://fal.media/files/koala/awGY1lJd7lVsqQeSqjWqn_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v5.5/effects/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5.5/effects/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5.5/effects": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV55EffectsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5.5/effects/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV55EffectsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v5.5/transition", + "metadata": { + "display_name": "Pixverse", + "category": "image-to-video", + "description": "Pixverse Transition", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:07.926Z", + "is_favorited": false, + "thumbnail_url": "blob:https://fal.ai/a3342541-0a6a-4117-a70f-4b43a6483e07", + "model_url": "https://fal.run/fal-ai/pixverse/v5.5/transition", + "license_type": "commercial", + "date": "2025-12-02T08:44:08.938Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v5.5/transition", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v5.5/transition queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v5.5/transition", + "category": "image-to-video", + "thumbnailUrl": "blob:https://fal.ai/a3342541-0a6a-4117-a70f-4b43a6483e07", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v5.5/transition", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v5.5/transition/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV55TransitionInput": { + "title": "TransitionRequestV5_5", + "type": "object", + "properties": { + "first_image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/owQh2DAzk8UU7J02nr5RY_Co2P4boLv6meIZ5t9gKvL_8685da151df343ab8bf82165c928e2a5.jpg" + ], + "title": "First Image Url", + "type": "string", + "description": "URL of the image to use as the first frame" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "4:3", + "1:1", + "3:4", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated video" + }, + "thinking_type": { + "enum": [ + "enabled", + "disabled", + "auto" + ], + "title": "Thinking Type", + "type": "string", + "description": "Prompt optimization mode: 'enabled' to optimize, 'disabled' to turn off, 'auto' for model decision" + }, + "prompt": { + "examples": [ + "Scene slowly transition into cat swimming under water" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt for the transition" + }, + "duration": { + "enum": [ + "5", + "8", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds. Longer durations cost more. 1080p videos are limited to 5 or 8 seconds", + "default": "5" + }, + "generate_audio_switch": { + "title": "Generate Audio Switch", + "type": "boolean", + "description": "Enable audio generation (BGM, SFX, dialogue)", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + }, + "end_image_url": { + "examples": [ + "https://v3.fal.media/files/kangaroo/RgedFs_WSnq5BgER7qDx1_ONrbTJ1YAGXz-9JnSsBoB_bdc8750387734bfe940319f469f7b0b2.jpg" + ], + "title": "End Image Url", + "type": "string", + "description": "URL of the image to use as the last frame" + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, low resolution, pixelated, noisy, grainy, out of focus, poorly lit, poorly exposed, poorly composed, poorly framed, poorly cropped, poorly color corrected, poorly color graded" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "resolution", + "duration", + "negative_prompt", + "style", + "seed", + "generate_audio_switch", + "thinking_type", + "first_image_url", + "end_image_url" + ], + "required": [ + "prompt", + "first_image_url" + ] + }, + "PixverseV55TransitionOutput": { + "title": "TransitionOutputV5_5", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 3890360, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/model_tests/video_models/output-2.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v5.5/transition/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5.5/transition/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5.5/transition": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV55TransitionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5.5/transition/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV55TransitionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v5.5/image-to-video", + "metadata": { + "display_name": "Pixverse", + "category": "image-to-video", + "description": "Generate high quality video clips from text and image prompts using PixVerse v5.5", + "status": "active", + "tags": [ + "image-to-video" + ], + "updated_at": "2026-01-26T21:42:08.303Z", + "is_favorited": false, + "thumbnail_url": "blob:https://fal.ai/b3fcd77f-16db-4201-9801-bed4e7eb63bf", + "model_url": "https://fal.run/fal-ai/pixverse/v5.5/image-to-video", + "license_type": "commercial", + "date": "2025-12-01T17:08:41.560Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v5.5/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v5.5/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v5.5/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "blob:https://fal.ai/b3fcd77f-16db-4201-9801-bed4e7eb63bf", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v5.5/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v5.5/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV55ImageToVideoInput": { + "title": "ImageToVideoRequestV5_5", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman warrior with her hammer walking with his glacier wolf." + ], + "title": "Prompt", + "type": "string" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "duration": { + "enum": [ + "5", + "8", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds. Longer durations cost more. 1080p videos are limited to 5 or 8 seconds", + "default": "5" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated video" + }, + "thinking_type": { + "enum": [ + "enabled", + "disabled", + "auto" + ], + "title": "Thinking Type", + "type": "string", + "description": "Prompt optimization mode: 'enabled' to optimize, 'disabled' to turn off, 'auto' for model decision" + }, + "generate_multi_clip_switch": { + "title": "Generate Multi Clip Switch", + "type": "boolean", + "description": "Enable multi-clip generation with dynamic camera changes", + "default": false + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/qL93Je8ezvzQgDOEzTjKF_KhGKZTEebZcDw6T5rwQPK_output.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to use as the first frame" + }, + "generate_audio_switch": { + "title": "Generate Audio Switch", + "type": "boolean", + "description": "Enable audio generation (BGM, SFX, dialogue)", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, low resolution, pixelated, noisy, grainy, out of focus, poorly lit, poorly exposed, poorly composed, poorly framed, poorly cropped, poorly color corrected, poorly color graded" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "resolution", + "duration", + "negative_prompt", + "style", + "seed", + "generate_audio_switch", + "generate_multi_clip_switch", + "thinking_type", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "PixverseV55ImageToVideoOutput": { + "title": "I2VOutputV5_5", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 6420765, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/model_tests/video_models/output-3.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v5.5/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5.5/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5.5/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV55ImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5.5/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV55ImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/o1/image-to-video", + "metadata": { + "display_name": "Kling O1 First Frame Last Frame to Video [Pro]", + "category": "image-to-video", + "description": "Generate a video by taking a start frame and an end frame, animating the transition between them while following text-driven style and scene guidance.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:08.754Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/monkey/fPJwa1EtpB5bYyqS2u7us_ddd5fec670b149288123785cd5ee6e54.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/o1/image-to-video", + "license_type": "commercial", + "date": "2025-12-01T11:39:20.265Z", + "group": { + "key": "kling-video/o1", + "label": "FLFV [Pro]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/o1/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/o1/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/o1/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/monkey/fPJwa1EtpB5bYyqS2u7us_ddd5fec670b149288123785cd5ee6e54.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/o1/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/o1/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoO1ImageToVideoInput": { + "title": "OmniVideoImageToVideoInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Create a magical timelapse transition. The snow melts rapidly to reveal green grass, and the tree branches burst into bloom with pink flowers in real-time. The lighting shifts from cold winter light to warm spring sunshine. The camera pushes in slowly towards the tree. Disney-style magical transformation, cinematic, 8k." + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500, + "description": "Use @Image1 to reference the start frame, @Image2 to reference the end frame." + }, + "duration": { + "enum": [ + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10" + ], + "title": "Duration", + "type": "string", + "description": "Video duration in seconds.", + "default": "5" + }, + "start_image_url": { + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "title": "Start Image Url", + "type": "string", + "examples": [ + "https://v3b.fal.media/files/b/rabbit/NaslJIC7F2WodS6DFZRRJ.png" + ], + "description": "Image to use as the first frame of the video.\n\nMax file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + }, + "end_image_url": { + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "title": "End Image Url", + "type": "string", + "examples": [ + "https://v3b.fal.media/files/b/tiger/BwHi22qoQnqaTNMMhe533.png" + ], + "description": "Image to use as the last frame of the video.\n\nMax file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + }, + "x-fal-order-properties": [ + "prompt", + "start_image_url", + "end_image_url", + "duration" + ], + "required": [ + "prompt", + "start_image_url" + ] + }, + "KlingVideoO1ImageToVideoOutput": { + "title": "OmniVideoImageToVideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 27588984, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/koala/knryyyGF3ZVyMMrGr77CL_output.mp4" + } + ], + "title": "Video", + "description": "The generated video.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "description": "Output for Kling Omni Video generation.", + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/o1/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoO1ImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoO1ImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/o1/reference-to-video", + "metadata": { + "display_name": "Kling O1 Reference Image to Video [Pro]", + "category": "image-to-video", + "description": "Transform images, elements, and text into consistent, high-quality video scenes, ensuring stable character identity, object details, and environments.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:08.883Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/L7EgH8_I_ZIngBtpKaL-Y_ca0bcf7b2c8f4319b8af7dc386bdb3d9.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/o1/reference-to-video", + "license_type": "commercial", + "date": "2025-12-01T11:37:01.777Z", + "group": { + "key": "kling-video/o1", + "label": "Reference I2V [Pro]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/o1/reference-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/o1/reference-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/o1/reference-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/L7EgH8_I_ZIngBtpKaL-Y_ca0bcf7b2c8f4319b8af7dc386bdb3d9.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/o1/reference-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/o1/reference-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoO1ReferenceToVideoInput": { + "title": "OmniVideoReferenceToVideoInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Take @Image1 as the start frame. Start with a high-angle satellite view of the ancient greenhouse ruin surrounded by nature. The camera swoops down and flies inside the building, revealing the character from @Element1 standing in the sun-drenched center. The camera then seamlessly transitions into a smooth 180-degree orbit around the character, moving to the back view. As the open backpack comes into focus, the camera continues to push forward, zooming deep inside the bag to reveal the glowing stone from @Element2 nestled inside. Cinematic lighting, hopeful atmosphere, 35mm lens. Make sure to keep it as the style of @Image2." + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500, + "description": "Take @Element1, @Element2 to reference elements and @Image1, @Image2 to reference images in order." + }, + "duration": { + "enum": [ + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10" + ], + "title": "Duration", + "type": "string", + "description": "Video duration in seconds.", + "default": "5" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video frame.", + "default": "16:9" + }, + "elements": { + "examples": [ + [ + { + "reference_image_urls": [ + "https://v3b.fal.media/files/b/kangaroo/YMpmQkYt9xugpOTQyZW0O.png", + "https://v3b.fal.media/files/b/zebra/d6ywajNyJ6bnpa_xBue-K.png" + ], + "frontal_image_url": "https://v3b.fal.media/files/b/panda/MQp-ghIqshvMZROKh9lW3.png" + }, + { + "reference_image_urls": [ + "https://v3b.fal.media/files/b/kangaroo/EBF4nWihspyv4pp6hgj7D.png" + ], + "frontal_image_url": "https://v3b.fal.media/files/b/koala/gSnsA7HJlgcaTyR5Ujj2H.png" + } + ] + ], + "title": "Elements", + "type": "array", + "description": "Elements (characters/objects) to include in the video. Reference in prompt as @Element1, @Element2, etc. Maximum 7 total (elements + reference images + start image).", + "items": { + "$ref": "#/components/schemas/OmniVideoElementInput" + } + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/koala/v9COzzH23FGBYdGLgbK3u.png", + "https://v3b.fal.media/files/b/elephant/5Is2huKQFSE7A7c5uUeUF.png" + ] + ], + "title": "Image Urls", + "type": "array", + "description": "Additional reference images for style/appearance. Reference in prompt as @Image1, @Image2, etc. Maximum 7 total (elements + reference images + start image).", + "items": { + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "type": "string", + "limit_description": "Max file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + } + }, + "description": "Input for start-frame video generation with optional reference images and elements.", + "x-fal-order-properties": [ + "prompt", + "image_urls", + "elements", + "duration", + "aspect_ratio" + ], + "required": [ + "prompt" + ] + }, + "KlingVideoO1ReferenceToVideoOutput": { + "title": "OmniVideoReferenceToVideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 47359974, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/panda/oVdiICFXY03Vbam-08Aj8_output.mp4" + } + ], + "title": "Video", + "description": "The generated video.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "OmniVideoElementInput": { + "title": "OmniVideoElementInput", + "type": "object", + "properties": { + "reference_image_urls": { + "title": "Reference Image Urls", + "type": "array", + "description": "Additional reference images from different angles. 1-4 images supported. At least one image is required.", + "items": { + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "type": "string", + "limit_description": "Max file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + }, + "frontal_image_url": { + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "title": "Frontal Image Url", + "type": "string", + "description": "The frontal image of the element (main view).\n\nMax file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + }, + "x-fal-order-properties": [ + "frontal_image_url", + "reference_image_urls" + ], + "required": [ + "frontal_image_url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/o1/reference-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/reference-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/reference-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoO1ReferenceToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/reference-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoO1ReferenceToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2/image-to-video/fast", + "metadata": { + "display_name": "LTX Video 2.0 Fast", + "category": "image-to-video", + "description": "Create high-fidelity video with audio from images with LTX-2 Fast", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:10.863Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/lion/WmwWdXjeVdd34i5AevK5B_cf7f489dec6f4b378e057bc4d34ac0b9.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2/image-to-video/fast", + "license_type": "commercial", + "date": "2025-11-26T17:28:12.015Z", + "group": { + "key": "ltx-2", + "label": "Image to Video (Fast)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2/image-to-video/fast", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2/image-to-video/fast queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2/image-to-video/fast", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/lion/WmwWdXjeVdd34i5AevK5B_cf7f489dec6f4b378e057bc4d34ac0b9.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2/image-to-video/fast", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2/image-to-video/fast/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx2ImageToVideoFastInput": { + "title": "LTXVImageToVideoFastRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman stands still amid a busy neon-lit street at night. The camera slowly dollies in toward her face as people blur past, their motion emphasizing her calm presence. City lights flicker and reflections shift across her denim jacket." + ], + "maxLength": 5000, + "type": "string", + "minLength": 1, + "title": "Prompt", + "description": "The prompt to generate the video from" + }, + "aspect_ratio": { + "enum": [ + "16:9" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "duration": { + "enum": [ + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20 + ], + "title": "Duration", + "type": "integer", + "description": "The duration of the generated video in seconds. The fast model supports 6-20 seconds. Note: Durations longer than 10 seconds (12, 14, 16, 18, 20) are only supported with 25 FPS and 1080p resolution.", + "default": 6 + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate audio for the generated video", + "default": true + }, + "resolution": { + "enum": [ + "1080p", + "1440p", + "2160p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "1080p" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ltxv-2-i2v-input.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "URL of the image to generate the video from. Must be publicly accessible or base64 data URI. Supports PNG, JPEG, WebP, AVIF, and HEIF formats." + }, + "fps": { + "enum": [ + 25, + 50 + ], + "title": "Frames per Second", + "type": "integer", + "description": "The frames per second of the generated video", + "default": 25 + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "duration", + "resolution", + "aspect_ratio", + "fps", + "generate_audio" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "Ltx2ImageToVideoFastOutput": { + "title": "LTXVImageToVideoResponse", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_name": "ltxv-2-i2v-output.mp4", + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/example_outputs/ltxv-2-i2v-output.mp4" + } + ], + "title": "Video", + "description": "The generated video file", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2/image-to-video/fast/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2/image-to-video/fast/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2/image-to-video/fast": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx2ImageToVideoFastInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2/image-to-video/fast/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx2ImageToVideoFastOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2/image-to-video", + "metadata": { + "display_name": "LTX Video 2.0 Pro", + "category": "image-to-video", + "description": "Create high-fidelity video with audio from images with LTX-2 Pro", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:10.988Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/tiger/wwf0gbfywaeym-Zh4SMcg_5d51eb236a484230afff239319c70234.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2/image-to-video", + "license_type": "commercial", + "date": "2025-11-26T17:25:50.149Z", + "group": { + "key": "ltx-2", + "label": "Image to Video (Pro)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/tiger/wwf0gbfywaeym-Zh4SMcg_5d51eb236a484230afff239319c70234.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx2ImageToVideoInput": { + "title": "LTXVImageToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman stands still amid a busy neon-lit street at night. The camera slowly dollies in toward her face as people blur past, their motion emphasizing her calm presence. City lights flicker and reflections shift across her denim jacket." + ], + "maxLength": 5000, + "type": "string", + "minLength": 1, + "title": "Prompt", + "description": "The prompt to generate the video from" + }, + "aspect_ratio": { + "enum": [ + "16:9" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "duration": { + "enum": [ + 6, + 8, + 10 + ], + "title": "Duration", + "type": "integer", + "description": "The duration of the generated video in seconds", + "default": 6 + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate audio for the generated video", + "default": true + }, + "resolution": { + "enum": [ + "1080p", + "1440p", + "2160p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "1080p" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ltxv-2-i2v-input.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "URL of the image to generate the video from. Must be publicly accessible or base64 data URI. Supports PNG, JPEG, WebP, AVIF, and HEIF formats." + }, + "fps": { + "enum": [ + 25, + 50 + ], + "title": "Frames per Second", + "type": "integer", + "description": "The frames per second of the generated video", + "default": 25 + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "duration", + "resolution", + "aspect_ratio", + "fps", + "generate_audio" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "Ltx2ImageToVideoOutput": { + "title": "LTXVImageToVideoResponse", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_name": "ltxv-2-i2v-output.mp4", + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/example_outputs/ltxv-2-i2v-output.mp4" + } + ], + "title": "Video", + "description": "The generated video file", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx2ImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx2ImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bytedance/lynx", + "metadata": { + "display_name": "Lynx", + "category": "image-to-video", + "description": "Generate subject consistent videos using Lynx from ByteDance!", + "status": "active", + "tags": [ + "image-to-video", + "subject" + ], + "updated_at": "2026-01-26T21:42:21.652Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/lion/46Az_CH4LkzpoBBIJYiSf_c6c1e1cb386b4e30b7c0e8eee627379c.jpg", + "model_url": "https://fal.run/bytedance/lynx", + "license_type": "commercial", + "date": "2025-11-18T01:53:22.544Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bytedance/lynx", + "version": "1.0.0", + "description": "The OpenAPI schema for the bytedance/lynx queue.", + "x-fal-metadata": { + "endpointId": "bytedance/lynx", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/lion/46Az_CH4LkzpoBBIJYiSf_c6c1e1cb386b4e30b7c0e8eee627379c.jpg", + "playgroundUrl": "https://fal.ai/models/bytedance/lynx", + "documentationUrl": "https://fal.ai/models/bytedance/lynx/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LynxInput": { + "x-fal-order-properties": [ + "image_url", + "prompt", + "negative_prompt", + "num_inference_steps", + "seed", + "resolution", + "aspect_ratio", + "ip_scale", + "strength", + "frames_per_second", + "guidance_scale", + "guidance_scale_2", + "num_frames" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A person carves a pumpkin on a porch in the evening. The camera captures their upper body as they draw a face with a marker, carefully cut along the lines, then lift the lid with both hands. Their face lights up with excitement as they peek inside" + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt to guide video generation" + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video (480p, 580p, or 720p)", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video (16:9, 9:16, or 1:1)", + "default": "16:9" + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "maximum": 75, + "default": 50 + }, + "guidance_scale_2": { + "minimum": 0, + "title": "Guidance Scale 2", + "type": "number", + "description": "Image guidance scale. Controls how closely the generated video follows the reference image. Higher values increase adherence to the reference image but may decrease quality.", + "maximum": 10, + "default": 2 + }, + "strength": { + "minimum": 0, + "title": "Strength", + "type": "number", + "description": "Reference image scale. Controls the influence of the reference image on the generated video.", + "maximum": 2, + "default": 1 + }, + "frames_per_second": { + "minimum": 5, + "title": "Frames Per Second", + "type": "integer", + "description": "Frames per second of the generated video. Must be between 5 to 30.", + "maximum": 30, + "default": 16 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/lynx/example_in.png" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the subject image to be used for video generation" + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "description": "Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.", + "maximum": 20, + "default": 5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "num_frames": { + "minimum": 9, + "title": "Num Frames", + "type": "integer", + "description": "Number of frames in the generated video. Must be between 9 to 100.", + "maximum": 81, + "default": 81 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to guide what should not appear in the generated video", + "default": "Bright tones, overexposed, blurred background, static, subtitles, style, works, paintings, images, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards" + }, + "ip_scale": { + "minimum": 0, + "title": "Ip Scale", + "type": "number", + "description": "Identity preservation scale. Controls how closely the generated video preserves the subject's identity from the reference image.", + "maximum": 2, + "default": 1 + } + }, + "title": "LynxInput", + "required": [ + "image_url", + "prompt" + ] + }, + "LynxOutput": { + "x-fal-order-properties": [ + "video", + "seed" + ], + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/example_outputs/lynx/example_out.mp4" + } + ], + "title": "Video", + "description": "The generated video file", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "title": "LynxOutput", + "required": [ + "video", + "seed" + ] + }, + "VideoFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "VideoFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/bytedance/lynx/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bytedance/lynx/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bytedance/lynx": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LynxInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bytedance/lynx/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LynxOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/swap", + "metadata": { + "display_name": "Pixverse", + "category": "image-to-video", + "description": "Generate high quality video clips by swapping person, objects and background using Pixverse Swap.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:25.895Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/lion/NannfsqTmo5KZP4HWcpBx_ee601644250b4ed589741220176495fe.jpg", + "model_url": "https://fal.run/fal-ai/pixverse/swap", + "license_type": "commercial", + "date": "2025-11-10T14:14:52.148Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/swap", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/swap queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/swap", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/lion/NannfsqTmo5KZP4HWcpBx_ee601644250b4ed589741220176495fe.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/swap", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/swap/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseSwapInput": { + "title": "SwapRequest", + "type": "object", + "properties": { + "original_sound_switch": { + "title": "Original Sound Switch", + "type": "boolean", + "description": "Whether to keep the original audio", + "default": true + }, + "video_url": { + "examples": [ + "https://v3b.fal.media/files/b/lion/k_RpEIZ4YZtwZklzXz7Gb_output.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the external video to swap" + }, + "keyframe_id": { + "minimum": 1, + "title": "Keyframe Id", + "type": "integer", + "description": "The keyframe ID (from 1 to the last frame position)", + "default": 1 + }, + "mode": { + "enum": [ + "person", + "object", + "background" + ], + "title": "Mode", + "type": "string", + "description": "The swap mode to use", + "default": "person" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "The output resolution (1080p not supported)", + "default": "720p" + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/elephant/Lu7lo2dpxVPD-NrNZzx42_56dc797a1f764c98a4f075a8c0332bf0.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the target image for swapping" + } + }, + "x-fal-order-properties": [ + "video_url", + "mode", + "keyframe_id", + "image_url", + "resolution", + "original_sound_switch" + ], + "required": [ + "video_url", + "image_url" + ] + }, + "PixverseSwapOutput": { + "title": "SwapOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 1234567, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/elephant/BdQvPf9T6puy3Co1_ZXeu_output.mp4" + } + ], + "title": "Video", + "description": "The generated swapped video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/swap/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/swap/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/swap": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseSwapInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/swap/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseSwapOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pika/v2.2/pikaframes", + "metadata": { + "display_name": "Pika", + "category": "image-to-video", + "description": "Discover ultimate control with Pikaframes key frame interpolation, a stunning image-to-video feature that allows you to upload up to 5 keyframes, customize their transition length and prompt, and see their images come to life as seamless videos.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:26.020Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/koala/CtOfvKi8w3X6qO495bKva_4377f203234f47d69a57efef836c93aa.jpg", + "model_url": "https://fal.run/fal-ai/pika/v2.2/pikaframes", + "license_type": "commercial", + "date": "2025-11-07T23:56:01.434Z", + "group": { + "key": "pika", + "label": "Pikaframes (v2.2)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pika/v2.2/pikaframes", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pika/v2.2/pikaframes queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pika/v2.2/pikaframes", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/koala/CtOfvKi8w3X6qO495bKva_4377f203234f47d69a57efef836c93aa.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/pika/v2.2/pikaframes", + "documentationUrl": "https://fal.ai/models/fal-ai/pika/v2.2/pikaframes/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PikaV22PikaframesInput": { + "x-fal-order-properties": [ + "image_urls", + "transitions", + "prompt", + "negative_prompt", + "seed", + "resolution" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "smooth cinematic transition", + "seamless blend between scenes" + ], + "title": "Prompt", + "type": "string", + "description": "Default prompt for all transitions. Individual transition prompts override this." + }, + "resolution": { + "examples": [ + "1080p", + "720p" + ], + "title": "Resolution", + "type": "string", + "enum": [ + "720p", + "1080p" + ], + "description": "The resolution of the generated video", + "default": "720p" + }, + "transitions": { + "examples": [], + "title": "Transitions", + "type": "array", + "description": "Configuration for each transition. Length must be len(image_urls) - 1. Total duration of all transitions must not exceed 25 seconds. If not provided, uses default 5-second transitions with the global prompt.", + "items": { + "$ref": "#/components/schemas/KeyframeTransition" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator" + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/tiger/-YohU0xcPcWe_eiUB9_i6_keyframes-apple-start.png", + "https://v3b.fal.media/files/b/tiger/LarvwQGEFqEmF8fkgDB8R_keyframes-apple-end.png" + ] + ], + "title": "Image Urls", + "type": "array", + "description": "URLs of keyframe images (2-5 images) to create transitions between", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "A negative prompt to guide the model", + "default": "" + } + }, + "title": "Pika22KeyframesToVideoRequest", + "required": [ + "image_urls" + ] + }, + "PikaV22PikaframesOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 1583228, + "file_name": "tmpjfwlno11.mp4", + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/lion/0KxHFdw-mp0OzGsLrQLIy_tmpjfwlno11.mp4" + } + ], + "title": "Video", + "description": "The generated video with transitions between keyframes", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "Pika22KeyframesToVideoOutput", + "description": "Output model for Pika 2.2 keyframes-to-video generation", + "required": [ + "video" + ] + }, + "KeyframeTransition": { + "x-fal-order-properties": [ + "duration", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + null + ], + "title": "Prompt", + "type": "string", + "description": "Specific prompt for this transition. Overrides the global prompt if provided." + }, + "duration": { + "minimum": 1, + "maximum": 25, + "type": "integer", + "title": "Duration", + "description": "Duration of this transition in seconds", + "default": 5 + } + }, + "title": "KeyframeTransition", + "description": "Configuration for a transition between two keyframes" + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pika/v2.2/pikaframes/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pika/v2.2/pikaframes/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pika/v2.2/pikaframes": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PikaV22PikaframesInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pika/v2.2/pikaframes/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PikaV22PikaframesOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/longcat-video/image-to-video/720p", + "metadata": { + "display_name": "LongCat Video", + "category": "image-to-video", + "description": "Generate long videos in 720p/30fps from images using LongCat Video", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:29.273Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/tiger/s_V-U5ax-L7FjU5Ua0Jmk_723a0755259f44d492e6cf825608804c.jpg", + "model_url": "https://fal.run/fal-ai/longcat-video/image-to-video/720p", + "license_type": "commercial", + "date": "2025-10-30T16:23:09.789Z", + "group": { + "key": "longcat", + "label": "Image to Video (720p)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/longcat-video/image-to-video/720p", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/longcat-video/image-to-video/720p queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/longcat-video/image-to-video/720p", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/tiger/s_V-U5ax-L7FjU5Ua0Jmk_723a0755259f44d492e6cf825608804c.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/longcat-video/image-to-video/720p", + "documentationUrl": "https://fal.ai/models/fal-ai/longcat-video/image-to-video/720p/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LongcatVideoImageToVideo720pInput": { + "title": "LongCat720PCFGImageToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "First-person view from the cockpit of a Formula 1 car. The driver's gloved hands firmly grip the intricate, carbon-fiber steering wheel adorned with numerous colorful buttons and a vibrant digital display showing race data. Beyond the windshield, a sun-drenched racetrack stretches ahead, lined with cheering spectators in the grandstands. Several rival cars are visible in the distance, creating a dynamic sense of competition. The sky above is a clear, brilliant blue, reflecting the exhilarating atmosphere of a high-speed race. high resolution 4k" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to guide the video generation.", + "default": "First-person view from the cockpit of a Formula 1 car. The driver's gloved hands firmly grip the intricate, carbon-fiber steering wheel adorned with numerous colorful buttons and a vibrant digital display showing race data. Beyond the windshield, a sun-drenched racetrack stretches ahead, lined with cheering spectators in the grandstands. Several rival cars are visible in the distance, creating a dynamic sense of competition. The sky above is a clear, brilliant blue, reflecting the exhilarating atmosphere of a high-speed race. high resolution 4k" + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use for the video generation.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "integer", + "description": "The frame rate of the generated video.", + "maximum": 60, + "default": 30 + }, + "num_refine_inference_steps": { + "minimum": 8, + "title": "Number of Refinement Inference Steps", + "type": "integer", + "description": "The number of inference steps to use for refinement.", + "maximum": 50, + "default": 40 + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "description": "The guidance scale to use for the video generation.", + "maximum": 10, + "default": 4 + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 17, + "title": "Number of Frames", + "maximum": 961, + "default": 162 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable safety checker.", + "default": true + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to use for the video generation.", + "default": "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/zebra/trXRsbjJwy4Z3OEgbnB9a.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to generate a video from." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "num_inference_steps": { + "minimum": 8, + "title": "Number of Inference Steps", + "type": "integer", + "description": "The number of inference steps to use for the video generation.", + "maximum": 50, + "default": 40 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator." + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "negative_prompt", + "num_frames", + "num_inference_steps", + "num_refine_inference_steps", + "guidance_scale", + "fps", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode", + "acceleration" + ], + "required": [ + "image_url" + ] + }, + "LongcatVideoImageToVideo720pOutput": { + "title": "LongCatImageToVideoResponse", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "First-person view from the cockpit of a Formula 1 car. The driver's gloved hands firmly grip the intricate, carbon-fiber steering wheel adorned with numerous colorful buttons and a vibrant digital display showing race data. Beyond the windshield, a sun-drenched racetrack stretches ahead, lined with cheering spectators in the grandstands. Several rival cars are visible in the distance, creating a dynamic sense of competition. The sky above is a clear, brilliant blue, reflecting the exhilarating atmosphere of a high-speed race. high resolution 4k" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "seed": { + "examples": [ + 916581 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/panda/4-MoAje_CCMAGH8d-9kmA_nQEkcRc2.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "video" + ], + "required": [ + "prompt", + "seed", + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/longcat-video/image-to-video/720p/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/image-to-video/720p/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/image-to-video/720p": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatVideoImageToVideo720pInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/image-to-video/720p/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatVideoImageToVideo720pOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/longcat-video/image-to-video/480p", + "metadata": { + "display_name": "LongCat Video", + "category": "image-to-video", + "description": "Generate long videos from images using LongCat Video", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:29.404Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/tiger/kqIypfhdZ87GT5n4t5mx__a7a61a1a39ec4ef2af632def10db2776.jpg", + "model_url": "https://fal.run/fal-ai/longcat-video/image-to-video/480p", + "license_type": "commercial", + "date": "2025-10-30T14:54:52.837Z", + "group": { + "key": "longcat", + "label": "Image to Video (480p)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/longcat-video/image-to-video/480p", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/longcat-video/image-to-video/480p queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/longcat-video/image-to-video/480p", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/tiger/kqIypfhdZ87GT5n4t5mx__a7a61a1a39ec4ef2af632def10db2776.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/longcat-video/image-to-video/480p", + "documentationUrl": "https://fal.ai/models/fal-ai/longcat-video/image-to-video/480p/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LongcatVideoImageToVideo480pInput": { + "title": "LongCatCFGImageToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "First-person view from the cockpit of a Formula 1 car. The driver's gloved hands firmly grip the intricate, carbon-fiber steering wheel adorned with numerous colorful buttons and a vibrant digital display showing race data. Beyond the windshield, a sun-drenched racetrack stretches ahead, lined with cheering spectators in the grandstands. Several rival cars are visible in the distance, creating a dynamic sense of competition. The sky above is a clear, brilliant blue, reflecting the exhilarating atmosphere of a high-speed race. high resolution 4k" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to guide the video generation.", + "default": "First-person view from the cockpit of a Formula 1 car. The driver's gloved hands firmly grip the intricate, carbon-fiber steering wheel adorned with numerous colorful buttons and a vibrant digital display showing race data. Beyond the windshield, a sun-drenched racetrack stretches ahead, lined with cheering spectators in the grandstands. Several rival cars are visible in the distance, creating a dynamic sense of competition. The sky above is a clear, brilliant blue, reflecting the exhilarating atmosphere of a high-speed race. high resolution 4k" + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use for the video generation.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "integer", + "description": "The frame rate of the generated video.", + "maximum": 60, + "default": 15 + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "description": "The guidance scale to use for the video generation.", + "maximum": 10, + "default": 4 + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 17, + "title": "Number of Frames", + "maximum": 961, + "default": 162 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable safety checker.", + "default": true + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to use for the video generation.", + "default": "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/zebra/trXRsbjJwy4Z3OEgbnB9a.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to generate a video from." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "num_inference_steps": { + "minimum": 8, + "title": "Number of Inference Steps", + "type": "integer", + "description": "The number of inference steps to use for the video generation.", + "maximum": 50, + "default": 40 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator." + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "negative_prompt", + "num_frames", + "num_inference_steps", + "guidance_scale", + "fps", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode", + "acceleration" + ], + "required": [ + "image_url" + ] + }, + "LongcatVideoImageToVideo480pOutput": { + "title": "LongCatImageToVideoResponse", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "First-person view from the cockpit of a Formula 1 car. The driver's gloved hands firmly grip the intricate, carbon-fiber steering wheel adorned with numerous colorful buttons and a vibrant digital display showing race data. Beyond the windshield, a sun-drenched racetrack stretches ahead, lined with cheering spectators in the grandstands. Several rival cars are visible in the distance, creating a dynamic sense of competition. The sky above is a clear, brilliant blue, reflecting the exhilarating atmosphere of a high-speed race. high resolution 4k" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "seed": { + "examples": [ + 916581 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/panda/4-MoAje_CCMAGH8d-9kmA_nQEkcRc2.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "video" + ], + "required": [ + "prompt", + "seed", + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/longcat-video/image-to-video/480p/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/image-to-video/480p/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/image-to-video/480p": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatVideoImageToVideo480pInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/image-to-video/480p/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatVideoImageToVideo480pOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/longcat-video/distilled/image-to-video/720p", + "metadata": { + "display_name": "LongCat Video Distilled", + "category": "image-to-video", + "description": "Generate long videos in 720p/30fps from images using LongCat Video Distilled", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:29.721Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/lion/PlEcqdZVUsm3jFXpUPvvT_5da6319dbf674dffbea088eaa87a979c.jpg", + "model_url": "https://fal.run/fal-ai/longcat-video/distilled/image-to-video/720p", + "license_type": "commercial", + "date": "2025-10-30T14:43:43.650Z", + "group": { + "key": "longcat-distilled", + "label": "Image to Video (720p)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/longcat-video/distilled/image-to-video/720p", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/longcat-video/distilled/image-to-video/720p queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/longcat-video/distilled/image-to-video/720p", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/lion/PlEcqdZVUsm3jFXpUPvvT_5da6319dbf674dffbea088eaa87a979c.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/longcat-video/distilled/image-to-video/720p", + "documentationUrl": "https://fal.ai/models/fal-ai/longcat-video/distilled/image-to-video/720p/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LongcatVideoDistilledImageToVideo720pInput": { + "title": "LongCat720PImageToVideoRequest", + "type": "object", + "properties": { + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "prompt": { + "examples": [ + "First-person view from the cockpit of a Formula 1 car. The driver's gloved hands firmly grip the intricate, carbon-fiber steering wheel adorned with numerous colorful buttons and a vibrant digital display showing race data. Beyond the windshield, a sun-drenched racetrack stretches ahead, lined with cheering spectators in the grandstands. Several rival cars are visible in the distance, creating a dynamic sense of competition. The sky above is a clear, brilliant blue, reflecting the exhilarating atmosphere of a high-speed race. high resolution 4k" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to guide the video generation.", + "default": "First-person view from the cockpit of a Formula 1 car. The driver's gloved hands firmly grip the intricate, carbon-fiber steering wheel adorned with numerous colorful buttons and a vibrant digital display showing race data. Beyond the windshield, a sun-drenched racetrack stretches ahead, lined with cheering spectators in the grandstands. Several rival cars are visible in the distance, creating a dynamic sense of competition. The sky above is a clear, brilliant blue, reflecting the exhilarating atmosphere of a high-speed race. high resolution 4k" + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "integer", + "description": "The frame rate of the generated video.", + "maximum": 60, + "default": 30 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "num_refine_inference_steps": { + "minimum": 2, + "title": "Number of Refinement Inference Steps", + "type": "integer", + "description": "The number of inference steps to use for refinement.", + "maximum": 16, + "default": 12 + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/zebra/trXRsbjJwy4Z3OEgbnB9a.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to generate a video from." + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable safety checker.", + "default": true + }, + "num_inference_steps": { + "minimum": 2, + "title": "Number of Inference Steps", + "type": "integer", + "description": "The number of inference steps to use.", + "maximum": 16, + "default": 12 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator." + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 17, + "title": "Number of Frames", + "maximum": 961, + "default": 162 + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "num_frames", + "num_inference_steps", + "num_refine_inference_steps", + "fps", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode" + ], + "required": [ + "image_url" + ] + }, + "LongcatVideoDistilledImageToVideo720pOutput": { + "title": "LongCatImageToVideoResponse", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "First-person view from the cockpit of a Formula 1 car. The driver's gloved hands firmly grip the intricate, carbon-fiber steering wheel adorned with numerous colorful buttons and a vibrant digital display showing race data. Beyond the windshield, a sun-drenched racetrack stretches ahead, lined with cheering spectators in the grandstands. Several rival cars are visible in the distance, creating a dynamic sense of competition. The sky above is a clear, brilliant blue, reflecting the exhilarating atmosphere of a high-speed race. high resolution 4k" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "seed": { + "examples": [ + 916581 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/panda/4-MoAje_CCMAGH8d-9kmA_nQEkcRc2.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "video" + ], + "required": [ + "prompt", + "seed", + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/longcat-video/distilled/image-to-video/720p/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/distilled/image-to-video/720p/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/distilled/image-to-video/720p": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatVideoDistilledImageToVideo720pInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/distilled/image-to-video/720p/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatVideoDistilledImageToVideo720pOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/longcat-video/distilled/image-to-video/480p", + "metadata": { + "display_name": "LongCat Video Distilled", + "category": "image-to-video", + "description": "Generate long videos from images using LongCat Video Distilled", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:31.090Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/rabbit/iOwf_UFypDFAPX27ciX7N_03c77dfefc0a4e82a7f500216b609240.jpg", + "model_url": "https://fal.run/fal-ai/longcat-video/distilled/image-to-video/480p", + "license_type": "commercial", + "date": "2025-10-29T00:26:46.200Z", + "group": { + "key": "longcat-distilled", + "label": "Image to Video (480p)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/longcat-video/distilled/image-to-video/480p", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/longcat-video/distilled/image-to-video/480p queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/longcat-video/distilled/image-to-video/480p", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/rabbit/iOwf_UFypDFAPX27ciX7N_03c77dfefc0a4e82a7f500216b609240.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/longcat-video/distilled/image-to-video/480p", + "documentationUrl": "https://fal.ai/models/fal-ai/longcat-video/distilled/image-to-video/480p/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LongcatVideoDistilledImageToVideo480pInput": { + "title": "LongCatImageToVideoRequest", + "type": "object", + "properties": { + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "prompt": { + "examples": [ + "First-person view from the cockpit of a Formula 1 car. The driver's gloved hands firmly grip the intricate, carbon-fiber steering wheel adorned with numerous colorful buttons and a vibrant digital display showing race data. Beyond the windshield, a sun-drenched racetrack stretches ahead, lined with cheering spectators in the grandstands. Several rival cars are visible in the distance, creating a dynamic sense of competition. The sky above is a clear, brilliant blue, reflecting the exhilarating atmosphere of a high-speed race. high resolution 4k" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to guide the video generation.", + "default": "First-person view from the cockpit of a Formula 1 car. The driver's gloved hands firmly grip the intricate, carbon-fiber steering wheel adorned with numerous colorful buttons and a vibrant digital display showing race data. Beyond the windshield, a sun-drenched racetrack stretches ahead, lined with cheering spectators in the grandstands. Several rival cars are visible in the distance, creating a dynamic sense of competition. The sky above is a clear, brilliant blue, reflecting the exhilarating atmosphere of a high-speed race. high resolution 4k" + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "integer", + "description": "The frame rate of the generated video.", + "maximum": 60, + "default": 15 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/zebra/trXRsbjJwy4Z3OEgbnB9a.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to generate a video from." + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable safety checker.", + "default": true + }, + "num_inference_steps": { + "minimum": 2, + "title": "Number of Inference Steps", + "type": "integer", + "description": "The number of inference steps to use.", + "maximum": 16, + "default": 12 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator." + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 17, + "title": "Number of Frames", + "maximum": 961, + "default": 162 + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "num_frames", + "num_inference_steps", + "fps", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode" + ], + "required": [ + "image_url" + ] + }, + "LongcatVideoDistilledImageToVideo480pOutput": { + "title": "LongCatImageToVideoResponse", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "First-person view from the cockpit of a Formula 1 car. The driver's gloved hands firmly grip the intricate, carbon-fiber steering wheel adorned with numerous colorful buttons and a vibrant digital display showing race data. Beyond the windshield, a sun-drenched racetrack stretches ahead, lined with cheering spectators in the grandstands. Several rival cars are visible in the distance, creating a dynamic sense of competition. The sky above is a clear, brilliant blue, reflecting the exhilarating atmosphere of a high-speed race. high resolution 4k" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "seed": { + "examples": [ + 916581 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/panda/4-MoAje_CCMAGH8d-9kmA_nQEkcRc2.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "video" + ], + "required": [ + "prompt", + "seed", + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/longcat-video/distilled/image-to-video/480p/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/distilled/image-to-video/480p/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/distilled/image-to-video/480p": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatVideoDistilledImageToVideo480pInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/distilled/image-to-video/480p/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatVideoDistilledImageToVideo480pOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/hailuo-2.3-fast/standard/image-to-video", + "metadata": { + "display_name": "MiniMax Hailuo 2.3 Fast [Standard] (Image to Video)", + "category": "image-to-video", + "description": "MiniMax Hailuo-2.3-Fast Image To Video API (Standard, 768p): Advanced fast image-to-video generation model with 768p resolution", + "status": "active", + "tags": [ + "image-to-video" + ], + "updated_at": "2026-01-26T21:42:31.716Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/wEXBG5qZxdJ1Ed5P-EWYT_c69665fc168a437bb53d99ae8534eabe.jpg", + "model_url": "https://fal.run/fal-ai/minimax/hailuo-2.3-fast/standard/image-to-video", + "license_type": "commercial", + "date": "2025-10-27T13:12:54.242Z", + "group": { + "key": "hailuo-23", + "label": "Fast Image To Video (standard)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/hailuo-2.3-fast/standard/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/hailuo-2.3-fast/standard/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/hailuo-2.3-fast/standard/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/wEXBG5qZxdJ1Ed5P-EWYT_c69665fc168a437bb53d99ae8534eabe.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/hailuo-2.3-fast/standard/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/hailuo-2.3-fast/standard/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxHailuo23FastStandardImageToVideoInput": { + "title": "StandardFastImageToVideoHailuo23Input", + "type": "object", + "properties": { + "prompt_optimizer": { + "description": "Whether to use the model's prompt optimizer", + "type": "boolean", + "title": "Prompt Optimizer", + "default": true + }, + "duration": { + "enum": [ + "6", + "10" + ], + "description": "The duration of the video in seconds.", + "type": "string", + "title": "Duration", + "default": "6" + }, + "prompt": { + "examples": [ + "Athlete running powerfully on beach, dynamic camera movement tracking the runner, waves and sunset in motion, Hollywood movie cinematography, professional sports filming, inspiring atmosphere" + ], + "maxLength": 2000, + "type": "string", + "title": "Prompt", + "description": "Text prompt for video generation", + "minLength": 1 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/hailuo23/fast_standard_i2v_in.jpg" + ], + "description": "URL of the image to use as the first frame", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "prompt", + "prompt_optimizer", + "image_url", + "duration" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "MinimaxHailuo23FastStandardImageToVideoOutput": { + "title": "StandardFastImageToVideoHailuo23Output", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/hailuo23/fast_standard_i2v_out.mp4" + } + ], + "description": "The generated video", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/hailuo-2.3-fast/standard/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-2.3-fast/standard/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-2.3-fast/standard/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxHailuo23FastStandardImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-2.3-fast/standard/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxHailuo23FastStandardImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/hailuo-2.3/standard/image-to-video", + "metadata": { + "display_name": "MiniMax Hailuo 2.3 [Standard] (Image to Video)", + "category": "image-to-video", + "description": "MiniMax Hailuo-2.3 Image To Video API (Standard, 768p): Advanced image-to-video generation model with 768p resolution", + "status": "active", + "tags": [ + "image-to-video" + ], + "updated_at": "2026-01-26T21:42:31.841Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/zebra/7kDvX2hsHnnUu9mSt-I_C_3a18467c2b234f6590e01edd53840f2d.jpg", + "model_url": "https://fal.run/fal-ai/minimax/hailuo-2.3/standard/image-to-video", + "license_type": "commercial", + "date": "2025-10-27T13:11:19.864Z", + "group": { + "key": "hailuo-23", + "label": "Image To Video (standard)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/hailuo-2.3/standard/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/hailuo-2.3/standard/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/hailuo-2.3/standard/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/zebra/7kDvX2hsHnnUu9mSt-I_C_3a18467c2b234f6590e01edd53840f2d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/hailuo-2.3/standard/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/hailuo-2.3/standard/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxHailuo23StandardImageToVideoInput": { + "title": "StandardImageToVideoHailuo23Input", + "type": "object", + "properties": { + "prompt_optimizer": { + "description": "Whether to use the model's prompt optimizer", + "type": "boolean", + "title": "Prompt Optimizer", + "default": true + }, + "duration": { + "enum": [ + "6", + "10" + ], + "description": "The duration of the video in seconds.", + "type": "string", + "title": "Duration", + "default": "6" + }, + "prompt": { + "examples": [ + "The space station slowly rotates in orbit, its solar panels tracking the sun. Earth rotates majestically in the background with weather patterns and landmasses drifting by. The station's communication arrays adjust position. A small spacecraft approaches one of the docking ports. The scene captures the silent majesty of space and human engineering." + ], + "maxLength": 2000, + "type": "string", + "title": "Prompt", + "description": "Text prompt for video generation", + "minLength": 1 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/hailuo23/standard_i2v_in.jpg" + ], + "description": "URL of the image to use as the first frame", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "prompt", + "prompt_optimizer", + "duration", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "MinimaxHailuo23StandardImageToVideoOutput": { + "title": "StandardImageToVideoHailuo23Output", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/hailuo23/standard_i2v_out.mp4" + } + ], + "description": "The generated video", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/hailuo-2.3/standard/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-2.3/standard/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-2.3/standard/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxHailuo23StandardImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-2.3/standard/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxHailuo23StandardImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/hailuo-2.3-fast/pro/image-to-video", + "metadata": { + "display_name": "MiniMax Hailuo 2.3 Fast [Pro] (Image to Video)", + "category": "image-to-video", + "description": "MiniMax Hailuo-2.3-Fast Image To Video API (Pro, 1080p): Advanced fast image-to-video generation model with 1080p resolution", + "status": "active", + "tags": [ + "image-to-video" + ], + "updated_at": "2026-01-26T21:42:31.966Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/37ZHJodysdkE5NcfUc3fu_5731672b9b3c4e47b4b851a5145e5d3c.jpg", + "model_url": "https://fal.run/fal-ai/minimax/hailuo-2.3-fast/pro/image-to-video", + "license_type": "commercial", + "date": "2025-10-27T13:08:20.657Z", + "group": { + "key": "hailuo-23", + "label": "Fast Image To Video (pro)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/hailuo-2.3-fast/pro/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/hailuo-2.3-fast/pro/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/hailuo-2.3-fast/pro/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/37ZHJodysdkE5NcfUc3fu_5731672b9b3c4e47b4b851a5145e5d3c.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/hailuo-2.3-fast/pro/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/hailuo-2.3-fast/pro/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxHailuo23FastProImageToVideoInput": { + "title": "ProFastImageToVideoHailuo23Input", + "type": "object", + "properties": { + "prompt_optimizer": { + "description": "Whether to use the model's prompt optimizer", + "type": "boolean", + "title": "Prompt Optimizer", + "default": true + }, + "prompt": { + "examples": [ + "Subject-tracking orbit: camera glides parallel to astronaut, Earth rotates beneath, sun crest reveals lens flare, astronaut tethers and spins slowly, 8K 60 fps slow-motion, ends with spacecraft blackout on sun disk" + ], + "maxLength": 2000, + "type": "string", + "title": "Prompt", + "description": "Text prompt for video generation", + "minLength": 1 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/hailuo23/fast_pro_i2v_in.jpg" + ], + "description": "URL of the image to use as the first frame", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "prompt", + "prompt_optimizer", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "MinimaxHailuo23FastProImageToVideoOutput": { + "title": "ProFastImageToVideoHailuo23Output", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/hailuo23/fast_pro_i2v_out.mp4" + } + ], + "description": "The generated video", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/hailuo-2.3-fast/pro/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-2.3-fast/pro/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-2.3-fast/pro/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxHailuo23FastProImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-2.3-fast/pro/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxHailuo23FastProImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bytedance/seedance/v1/pro/fast/image-to-video", + "metadata": { + "display_name": "Bytedance", + "category": "image-to-video", + "description": "Image to Video endpoint for Seedance 1.0 Pro Fast, a next-generation video model designed to deliver maximum performance at minimal cost", + "status": "active", + "tags": [ + "bytedance", + "seedance", + "pro", + "fast" + ], + "updated_at": "2026-01-26T21:42:32.655Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/OHnRF5artsmMtvv5DikBL_0d31376b45fc47c28ad70e15245ca449.jpg", + "model_url": "https://fal.run/fal-ai/bytedance/seedance/v1/pro/fast/image-to-video", + "license_type": "commercial", + "date": "2025-10-24T11:57:48.061Z", + "group": { + "key": "seedance-v1", + "label": "Seedance 1.0 Pro Fast -- Image to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bytedance/seedance/v1/pro/fast/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bytedance/seedance/v1/pro/fast/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bytedance/seedance/v1/pro/fast/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/OHnRF5artsmMtvv5DikBL_0d31376b45fc47c28ad70e15245ca449.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/bytedance/seedance/v1/pro/fast/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/bytedance/seedance/v1/pro/fast/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BytedanceSeedanceV1ProFastImageToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "resolution", + "duration", + "camera_fixed", + "seed", + "enable_safety_checker", + "image_url" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Bathed in a stark spotlight, a lone ballet dancer takes center stage. Her movements, precise and graceful, tell a story of passion and dedication against the velvet darkness. The scene evokes a sense of intimacy, highlighting the raw emotion and artistry of her performance." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt used to generate the video" + }, + "resolution": { + "enum": [ + "480p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "Video resolution - 480p for faster generation, 720p for balance, 1080p for higher quality", + "default": "1080p" + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "1:1", + "3:4", + "9:16", + "auto" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "auto" + }, + "duration": { + "enum": [ + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10", + "11", + "12" + ], + "title": "Duration", + "type": "string", + "description": "Duration of the video in seconds", + "default": "5" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/seedance_fast_i2v_input.png" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image used to generate video" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "camera_fixed": { + "title": "Camera Fixed", + "type": "boolean", + "description": "Whether to fix the camera position", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed to control video generation. Use -1 for random." + } + }, + "title": "SeedanceProFastImageToVideoInput", + "required": [ + "prompt", + "image_url" + ] + }, + "BytedanceSeedanceV1ProFastImageToVideoOutput": { + "x-fal-order-properties": [ + "video", + "seed" + ], + "type": "object", + "properties": { + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for generation" + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_inputs/seedance_fast_i2v_output.mp4" + } + ], + "title": "Video", + "description": "Generated video file", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "SeedanceFastI2VVideoOutput", + "required": [ + "video", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bytedance/seedance/v1/pro/fast/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1/pro/fast/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1/pro/fast/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedanceV1ProFastImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1/pro/fast/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedanceV1ProFastImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/vidu/q2/image-to-video/turbo", + "metadata": { + "display_name": "Vidu", + "category": "image-to-video", + "description": "Use the latest Vidu Q2 models which much more better quality and control on your videos.", + "status": "active", + "tags": [ + "image-to-video" + ], + "updated_at": "2026-01-26T21:42:33.177Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/monkey/pV-tsWf3pDIWfSzE8Y8QD_405c6376ad454bdd9aec52cfcc23a97e.jpg", + "model_url": "https://fal.run/fal-ai/vidu/q2/image-to-video/turbo", + "license_type": "commercial", + "date": "2025-10-24T09:28:39.019Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/vidu/q2/image-to-video/turbo", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/vidu/q2/image-to-video/turbo queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/vidu/q2/image-to-video/turbo", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/monkey/pV-tsWf3pDIWfSzE8Y8QD_405c6376ad454bdd9aec52cfcc23a97e.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/vidu/q2/image-to-video/turbo", + "documentationUrl": "https://fal.ai/models/fal-ai/vidu/q2/image-to-video/turbo/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ViduQ2ImageToVideoTurboInput": { + "title": "Q2ImageToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman walking through a vibrant city street at night, neon lights reflecting off wet pavement." + ], + "title": "Prompt", + "type": "string", + "maxLength": 3000, + "description": "Text prompt for video generation, max 3000 characters" + }, + "resolution": { + "enum": [ + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "Output video resolution", + "default": "720p" + }, + "duration": { + "enum": [ + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "title": "Duration", + "type": "integer", + "description": "Duration of the video in seconds", + "default": 4 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/vidu/stylish_woman.webp" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to use as the starting frame" + }, + "bgm": { + "title": "Bgm", + "type": "boolean", + "description": "Whether to add background music to the video (only for 4-second videos)", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "movement_amplitude": { + "enum": [ + "auto", + "small", + "medium", + "large" + ], + "title": "Movement Amplitude", + "type": "string", + "description": "The movement amplitude of objects in the frame", + "default": "auto" + }, + "end_image_url": { + "title": "End Image Url", + "type": "string", + "description": "URL of the image to use as the ending frame. When provided, generates a transition video between start and end frames." + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "end_image_url", + "seed", + "duration", + "resolution", + "movement_amplitude", + "bgm" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "ViduQ2ImageToVideoTurboOutput": { + "title": "Q2ImageToVideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://fal.media/files/tiger/L_lU76tYg-cXG_twy9N62_output.mp4" + } + ], + "title": "Video", + "description": "The generated video from image using the Q2 model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/vidu/q2/image-to-video/turbo/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/q2/image-to-video/turbo/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/vidu/q2/image-to-video/turbo": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduQ2ImageToVideoTurboInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/q2/image-to-video/turbo/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduQ2ImageToVideoTurboOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/vidu/q2/image-to-video/pro", + "metadata": { + "display_name": "Vidu", + "category": "image-to-video", + "description": "Use the latest Vidu Q2 models which much more better quality and control on your videos.", + "status": "active", + "tags": [ + "image-to-video" + ], + "updated_at": "2026-01-26T21:42:33.302Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/rabbit/9ZFHimyaNGh_0WHvVLfOn_67d61fe778bd4257a402b467aaff6f66.jpg", + "model_url": "https://fal.run/fal-ai/vidu/q2/image-to-video/pro", + "license_type": "commercial", + "date": "2025-10-24T09:11:00.692Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/vidu/q2/image-to-video/pro", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/vidu/q2/image-to-video/pro queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/vidu/q2/image-to-video/pro", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/rabbit/9ZFHimyaNGh_0WHvVLfOn_67d61fe778bd4257a402b467aaff6f66.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/vidu/q2/image-to-video/pro", + "documentationUrl": "https://fal.ai/models/fal-ai/vidu/q2/image-to-video/pro/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ViduQ2ImageToVideoProInput": { + "title": "Q2ImageToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman walking through a vibrant city street at night, neon lights reflecting off wet pavement." + ], + "title": "Prompt", + "type": "string", + "maxLength": 3000, + "description": "Text prompt for video generation, max 3000 characters" + }, + "resolution": { + "enum": [ + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "Output video resolution", + "default": "720p" + }, + "duration": { + "enum": [ + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "title": "Duration", + "type": "integer", + "description": "Duration of the video in seconds", + "default": 4 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/vidu/stylish_woman.webp" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to use as the starting frame" + }, + "bgm": { + "title": "Bgm", + "type": "boolean", + "description": "Whether to add background music to the video (only for 4-second videos)", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "movement_amplitude": { + "enum": [ + "auto", + "small", + "medium", + "large" + ], + "title": "Movement Amplitude", + "type": "string", + "description": "The movement amplitude of objects in the frame", + "default": "auto" + }, + "end_image_url": { + "title": "End Image Url", + "type": "string", + "description": "URL of the image to use as the ending frame. When provided, generates a transition video between start and end frames." + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "end_image_url", + "seed", + "duration", + "resolution", + "movement_amplitude", + "bgm" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "ViduQ2ImageToVideoProOutput": { + "title": "Q2ImageToVideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://fal.media/files/tiger/L_lU76tYg-cXG_twy9N62_output.mp4" + } + ], + "title": "Video", + "description": "The generated video from image using the Q2 model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/vidu/q2/image-to-video/pro/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/q2/image-to-video/pro/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/vidu/q2/image-to-video/pro": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduQ2ImageToVideoProInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/q2/image-to-video/pro/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduQ2ImageToVideoProOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v2.5-turbo/standard/image-to-video", + "metadata": { + "display_name": "Kling Video", + "category": "image-to-video", + "description": "Kling 2.5 Turbo Standard: Top-tier image-to-video generation with unparalleled motion fluidity, cinematic visuals, and exceptional prompt precision.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:36.450Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/elephant/BJ1ZIdeClgqnUTW9XgEUS_d00e816ad20849ae9f92b735358d610d.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/v2.5-turbo/standard/image-to-video", + "license_type": "commercial", + "date": "2025-10-22T05:04:22.392Z", + "group": { + "key": "kling-video-v25", + "label": "2.5 Turbo (Image to Video) Standard" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v2.5-turbo/standard/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v2.5-turbo/standard/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v2.5-turbo/standard/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/elephant/BJ1ZIdeClgqnUTW9XgEUS_d00e816ad20849ae9f92b735358d610d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v2.5-turbo/standard/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v2.5-turbo/standard/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV25TurboStandardImageToVideoInput": { + "title": "ImageToVideoV25StandardRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "In a dimly lit room, a playful cat's eyes light up, fixated on a dancing red dot. With boundless energy, it pounces and leaps, chasing the elusive beam across the floor and up the walls. The simple joy of the hunt unfolds in clear, uncomplicated visuals." + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500 + }, + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/kling_v25_std_i2v_input.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to be used for the video" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "maxLength": 2500, + "default": "blur, distort, and low quality" + }, + "cfg_scale": { + "minimum": 0, + "title": "Cfg Scale", + "type": "number", + "maximum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ", + "default": 0.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "duration", + "negative_prompt", + "cfg_scale" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "KlingVideoV25TurboStandardImageToVideoOutput": { + "title": "ImageToVideoV25StandardOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/kling_v25_std_i2v_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v2.5-turbo/standard/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.5-turbo/standard/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.5-turbo/standard/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV25TurboStandardImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.5-turbo/standard/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV25TurboStandardImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/veo3.1/fast/first-last-frame-to-video", + "metadata": { + "display_name": "Veo 3.1 Fast", + "category": "image-to-video", + "description": "Generate videos from a first/last frame using Google's Veo 3.1 Fast", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:42.708Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/rabbit/8tRJcj1P_EtIcpbdkP3m7_f9984366e4054165a8919d0671acbcb3.jpg", + "model_url": "https://fal.run/fal-ai/veo3.1/fast/first-last-frame-to-video", + "license_type": "commercial", + "date": "2025-10-08T17:10:43.612Z", + "group": { + "key": "veo3.1", + "label": "First/Last Frame to Video (Fast)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/veo3.1/fast/first-last-frame-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/veo3.1/fast/first-last-frame-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/veo3.1/fast/first-last-frame-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/rabbit/8tRJcj1P_EtIcpbdkP3m7_f9984366e4054165a8919d0671acbcb3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/veo3.1/fast/first-last-frame-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/veo3.1/fast/first-last-frame-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Veo31FastFirstLastFrameToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "duration", + "negative_prompt", + "resolution", + "generate_audio", + "seed", + "auto_fix", + "first_frame_url", + "last_frame_url" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman looks into the camera, breathes in, then exclaims energetically, \"have you guys checked out Veo3.1 First-Last-Frame-to-Video on Fal? It's incredible!\"" + ], + "description": "The text prompt describing the video you want to generate", + "type": "string", + "title": "Prompt", + "maxLength": 20000 + }, + "duration": { + "enum": [ + "4s", + "6s", + "8s" + ], + "description": "The duration of the generated video.", + "type": "string", + "title": "Duration", + "default": "8s" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16" + ], + "description": "The aspect ratio of the generated video.", + "type": "string", + "title": "Aspect Ratio", + "default": "auto" + }, + "generate_audio": { + "description": "Whether to generate audio for the video.", + "type": "boolean", + "title": "Generate Audio", + "default": true + }, + "auto_fix": { + "description": "Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.", + "type": "boolean", + "title": "Auto Fix", + "default": false + }, + "resolution": { + "enum": [ + "720p", + "1080p", + "4k" + ], + "description": "The resolution of the generated video.", + "type": "string", + "title": "Resolution", + "default": "720p" + }, + "first_frame_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/veo31-flf2v-input-1.jpeg" + ], + "description": "URL of the first frame of the video", + "type": "string", + "title": "First Frame URL" + }, + "seed": { + "description": "The seed for the random number generator.", + "type": "integer", + "title": "Seed" + }, + "last_frame_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/veo31-flf2v-input-2.jpeg" + ], + "description": "URL of the last frame of the video", + "type": "string", + "title": "Last Frame URL" + }, + "negative_prompt": { + "description": "A negative prompt to guide the video generation.", + "type": "string", + "title": "Negative Prompt" + } + }, + "title": "Veo31FirstLastFrameToVideoInput", + "required": [ + "prompt", + "first_frame_url", + "last_frame_url" + ] + }, + "Veo31FastFirstLastFrameToVideoOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/veo31-flf2v-output.mp4" + } + ], + "description": "The generated video.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "Veo31FirstLastFrameToVideoOutput", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/veo3.1/fast/first-last-frame-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/fast/first-last-frame-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/fast/first-last-frame-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo31FastFirstLastFrameToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/fast/first-last-frame-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo31FastFirstLastFrameToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/veo3.1/first-last-frame-to-video", + "metadata": { + "display_name": "Veo 3.1", + "category": "image-to-video", + "description": "Generate videos from a first and last framed using Google's Veo 3.1", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:42.832Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/monkey/5YSsAJ8Fbwuj97_9vBiTW.jpg", + "model_url": "https://fal.run/fal-ai/veo3.1/first-last-frame-to-video", + "license_type": "commercial", + "date": "2025-10-08T17:09:32.703Z", + "group": { + "key": "veo3.1", + "label": "First/Last Frame to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/veo3.1/first-last-frame-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/veo3.1/first-last-frame-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/veo3.1/first-last-frame-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/monkey/5YSsAJ8Fbwuj97_9vBiTW.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/veo3.1/first-last-frame-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/veo3.1/first-last-frame-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Veo31FirstLastFrameToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "duration", + "negative_prompt", + "resolution", + "generate_audio", + "seed", + "auto_fix", + "first_frame_url", + "last_frame_url" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman looks into the camera, breathes in, then exclaims energetically, \"have you guys checked out Veo3.1 First-Last-Frame-to-Video on Fal? It's incredible!\"" + ], + "description": "The text prompt describing the video you want to generate", + "type": "string", + "title": "Prompt", + "maxLength": 20000 + }, + "duration": { + "enum": [ + "4s", + "6s", + "8s" + ], + "description": "The duration of the generated video.", + "type": "string", + "title": "Duration", + "default": "8s" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16" + ], + "description": "The aspect ratio of the generated video.", + "type": "string", + "title": "Aspect Ratio", + "default": "auto" + }, + "generate_audio": { + "description": "Whether to generate audio for the video.", + "type": "boolean", + "title": "Generate Audio", + "default": true + }, + "auto_fix": { + "description": "Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.", + "type": "boolean", + "title": "Auto Fix", + "default": false + }, + "resolution": { + "enum": [ + "720p", + "1080p", + "4k" + ], + "description": "The resolution of the generated video.", + "type": "string", + "title": "Resolution", + "default": "720p" + }, + "first_frame_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/veo31-flf2v-input-1.jpeg" + ], + "description": "URL of the first frame of the video", + "type": "string", + "title": "First Frame URL" + }, + "seed": { + "description": "The seed for the random number generator.", + "type": "integer", + "title": "Seed" + }, + "last_frame_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/veo31-flf2v-input-2.jpeg" + ], + "description": "URL of the last frame of the video", + "type": "string", + "title": "Last Frame URL" + }, + "negative_prompt": { + "description": "A negative prompt to guide the video generation.", + "type": "string", + "title": "Negative Prompt" + } + }, + "title": "Veo31FirstLastFrameToVideoInput", + "required": [ + "prompt", + "first_frame_url", + "last_frame_url" + ] + }, + "Veo31FirstLastFrameToVideoOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/veo31-flf2v-output.mp4" + } + ], + "description": "The generated video.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "Veo31FirstLastFrameToVideoOutput", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/veo3.1/first-last-frame-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/first-last-frame-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/first-last-frame-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo31FirstLastFrameToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/first-last-frame-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo31FirstLastFrameToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/veo3.1/reference-to-video", + "metadata": { + "display_name": "Veo 3.1", + "category": "image-to-video", + "description": "Generate Videos from images using Google's Veo 3.1", + "status": "active", + "tags": [], + "updated_at": "2026-01-27T20:28:56.632Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/tiger/IwzOGSbzp6e8N00QuLtFF_129417bb24f248298e95c3fa2b1b82fb.jpg", + "model_url": "https://fal.run/fal-ai/veo3.1/reference-to-video", + "license_type": "commercial", + "date": "2025-10-08T17:07:06.022Z", + "group": { + "key": "veo3.1", + "label": "Reference to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/veo3.1/reference-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/veo3.1/reference-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/veo3.1/reference-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/tiger/IwzOGSbzp6e8N00QuLtFF_129417bb24f248298e95c3fa2b1b82fb.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/veo3.1/reference-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/veo3.1/reference-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Veo31ReferenceToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "duration", + "resolution", + "generate_audio", + "auto_fix", + "image_urls" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A chimpanzee wearing overalls frolics in the grassy field, gently playing with the butterflies. In the background, a circus tent and carousel beckon." + ], + "description": "The text prompt describing the video you want to generate", + "type": "string", + "title": "Prompt", + "maxLength": 20000 + }, + "duration": { + "enum": [ + "8s" + ], + "description": "The duration of the generated video.", + "type": "string", + "title": "Duration", + "default": "8s" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16" + ], + "description": "The aspect ratio of the generated video.", + "type": "string", + "title": "Aspect Ratio", + "default": "16:9" + }, + "generate_audio": { + "description": "Whether to generate audio for the video.", + "type": "boolean", + "title": "Generate Audio", + "default": true + }, + "resolution": { + "enum": [ + "720p", + "1080p", + "4k" + ], + "description": "The resolution of the generated video.", + "type": "string", + "title": "Resolution", + "default": "720p" + }, + "auto_fix": { + "description": "Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.", + "type": "boolean", + "title": "Auto Fix", + "default": false + }, + "image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/example_inputs/veo31-r2v-input-1.png", + "https://storage.googleapis.com/falserverless/example_inputs/veo31-r2v-input-2.png", + "https://storage.googleapis.com/falserverless/example_inputs/veo31-r2v-input-3.png" + ] + ], + "description": "URLs of the reference images to use for consistent subject appearance", + "type": "array", + "title": "Image Urls", + "items": { + "type": "string" + } + } + }, + "title": "Veo31ReferenceToVideoInput", + "required": [ + "prompt", + "image_urls" + ] + }, + "Veo31ReferenceToVideoOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/veo31-r2v-output.mp4" + } + ], + "description": "The generated video.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "Veo31ReferenceToVideoOutput", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/veo3.1/reference-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/reference-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/reference-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo31ReferenceToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/reference-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo31ReferenceToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/veo3.1/fast/image-to-video", + "metadata": { + "display_name": "Veo 3.1 Fast", + "category": "image-to-video", + "description": "Generate videos from your image prompts using Veo 3.1 fast.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:43.413Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/tiger/CcVVXXOo5stpgsAJV_6UO_74aa2dbdc79447e7ae69b533a7863038.jpg", + "model_url": "https://fal.run/fal-ai/veo3.1/fast/image-to-video", + "license_type": "commercial", + "date": "2025-10-08T17:04:11.604Z", + "group": { + "key": "veo3.1", + "label": "Image to Video (Fast)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/veo3.1/fast/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/veo3.1/fast/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/veo3.1/fast/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/tiger/CcVVXXOo5stpgsAJV_6UO_74aa2dbdc79447e7ae69b533a7863038.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/veo3.1/fast/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/veo3.1/fast/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Veo31FastImageToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "duration", + "negative_prompt", + "resolution", + "generate_audio", + "seed", + "auto_fix", + "image_url" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A monkey and polar bear host a casual podcast about AI inference, bringing their unique perspectives from different environments (tropical vs. arctic) to discuss how AI systems make decisions and process information.\nSample Dialogue:\nMonkey (Banana): \"Welcome back to Bananas & Ice! I am Banana\"\nPolar Bear (Ice): \"And I'm Ice!\"" + ], + "description": "The text prompt describing the video you want to generate", + "type": "string", + "title": "Prompt", + "maxLength": 20000 + }, + "duration": { + "enum": [ + "4s", + "6s", + "8s" + ], + "description": "The duration of the generated video.", + "type": "string", + "title": "Duration", + "default": "8s" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16" + ], + "description": "The aspect ratio of the generated video. Only 16:9 and 9:16 are supported.", + "type": "string", + "title": "Aspect Ratio", + "default": "auto" + }, + "generate_audio": { + "description": "Whether to generate audio for the video.", + "type": "boolean", + "title": "Generate Audio", + "default": true + }, + "auto_fix": { + "description": "Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.", + "type": "boolean", + "title": "Auto Fix", + "default": false + }, + "resolution": { + "enum": [ + "720p", + "1080p", + "4k" + ], + "description": "The resolution of the generated video.", + "type": "string", + "title": "Resolution", + "default": "720p" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/veo31_i2v_input.jpg" + ], + "description": "URL of the input image to animate. Should be 720p or higher resolution in 16:9 or 9:16 aspect ratio. If the image is not in 16:9 or 9:16 aspect ratio, it will be cropped to fit.", + "type": "string", + "title": "Image URL" + }, + "seed": { + "description": "The seed for the random number generator.", + "type": "integer", + "title": "Seed" + }, + "negative_prompt": { + "description": "A negative prompt to guide the video generation.", + "type": "string", + "title": "Negative Prompt" + } + }, + "title": "Veo31ImageToVideoInput", + "required": [ + "prompt", + "image_url" + ] + }, + "Veo31FastImageToVideoOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/model_tests/gallery/veo3-1-i2v.mp4" + } + ], + "description": "The generated video.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "Veo31ImageToVideoOutput", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/veo3.1/fast/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/fast/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/fast/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo31FastImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/fast/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo31FastImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/veo3.1/image-to-video", + "metadata": { + "display_name": "Veo 3.1", + "category": "image-to-video", + "description": "Veo 3.1 is the latest state-of-the art video generation model from Google DeepMind", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:43.541Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/zebra/G680VZS5VpMMQO1pSt2uj_dfbae33f738344aa98a62cf2022c427c.jpg", + "model_url": "https://fal.run/fal-ai/veo3.1/image-to-video", + "license_type": "commercial", + "date": "2025-10-08T17:02:47.996Z", + "group": { + "key": "veo3.1", + "label": "Image to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/veo3.1/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/veo3.1/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/veo3.1/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/zebra/G680VZS5VpMMQO1pSt2uj_dfbae33f738344aa98a62cf2022c427c.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/veo3.1/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/veo3.1/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Veo31ImageToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "duration", + "negative_prompt", + "resolution", + "generate_audio", + "seed", + "auto_fix", + "image_url" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A monkey and polar bear host a casual podcast about AI inference, bringing their unique perspectives from different environments (tropical vs. arctic) to discuss how AI systems make decisions and process information.\nSample Dialogue:\nMonkey (Banana): \"Welcome back to Bananas & Ice! I am Banana\"\nPolar Bear (Ice): \"And I'm Ice!\"" + ], + "description": "The text prompt describing the video you want to generate", + "type": "string", + "title": "Prompt", + "maxLength": 20000 + }, + "duration": { + "enum": [ + "4s", + "6s", + "8s" + ], + "description": "The duration of the generated video.", + "type": "string", + "title": "Duration", + "default": "8s" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16" + ], + "description": "The aspect ratio of the generated video. Only 16:9 and 9:16 are supported.", + "type": "string", + "title": "Aspect Ratio", + "default": "auto" + }, + "generate_audio": { + "description": "Whether to generate audio for the video.", + "type": "boolean", + "title": "Generate Audio", + "default": true + }, + "auto_fix": { + "description": "Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.", + "type": "boolean", + "title": "Auto Fix", + "default": false + }, + "resolution": { + "enum": [ + "720p", + "1080p", + "4k" + ], + "description": "The resolution of the generated video.", + "type": "string", + "title": "Resolution", + "default": "720p" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/veo31_i2v_input.jpg" + ], + "description": "URL of the input image to animate. Should be 720p or higher resolution in 16:9 or 9:16 aspect ratio. If the image is not in 16:9 or 9:16 aspect ratio, it will be cropped to fit.", + "type": "string", + "title": "Image URL" + }, + "seed": { + "description": "The seed for the random number generator.", + "type": "integer", + "title": "Seed" + }, + "negative_prompt": { + "description": "A negative prompt to guide the video generation.", + "type": "string", + "title": "Negative Prompt" + } + }, + "title": "Veo31ImageToVideoInput", + "required": [ + "prompt", + "image_url" + ] + }, + "Veo31ImageToVideoOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/model_tests/gallery/veo3-1-i2v.mp4" + } + ], + "description": "The generated video.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "Veo31ImageToVideoOutput", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/veo3.1/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo31ImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo31ImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sora-2/image-to-video/pro", + "metadata": { + "display_name": "Sora 2", + "category": "image-to-video", + "description": "Image-to-video endpoint for Sora 2 Pro, OpenAI's state-of-the-art video model capable of creating richly detailed, dynamic clips with audio from natural language or images.", + "status": "active", + "tags": [ + "image-to-video", + "audio", + "sora-2-pro" + ], + "updated_at": "2026-01-26T21:42:45.484Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/koala/19D6ouLhOE6EKGbUTnfzf_b958de62e48c4356a1c58abef534060b.jpg", + "model_url": "https://fal.run/fal-ai/sora-2/image-to-video/pro", + "license_type": "commercial", + "date": "2025-10-06T22:02:41.752Z", + "group": { + "key": "sora-2", + "label": "Image to Video (Pro)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sora-2/image-to-video/pro", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sora-2/image-to-video/pro queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sora-2/image-to-video/pro", + "category": "image-to-video", + "thumbnailUrl": "https://v3.fal.media/files/koala/19D6ouLhOE6EKGbUTnfzf_b958de62e48c4356a1c58abef534060b.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sora-2/image-to-video/pro", + "documentationUrl": "https://fal.ai/models/fal-ai/sora-2/image-to-video/pro/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Sora2ImageToVideoProInput": { + "x-fal-order-properties": [ + "prompt", + "resolution", + "aspect_ratio", + "duration", + "delete_video", + "image_url" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Front-facing 'invisible' action-cam on a skydiver in freefall above bright clouds; camera locked on his face. He speaks over the wind with clear lipsync: 'This is insanely fun! You've got to try it—book a tandem and go!' Natural wind roar, voice close-mic'd and slightly compressed so it's intelligible. Midday sun, goggles and jumpsuit flutter, altimeter visible, parachute rig on shoulders. Energetic but stable framing with subtle shake; brief horizon roll. End on first tug of canopy and wind noise dropping." + ], + "maxLength": 5000, + "type": "string", + "minLength": 1, + "title": "Prompt", + "description": "The text prompt describing the video you want to generate" + }, + "duration": { + "enum": [ + 4, + 8, + 12 + ], + "description": "Duration of the generated video in seconds", + "type": "integer", + "title": "Duration", + "default": 4 + }, + "resolution": { + "enum": [ + "auto", + "720p", + "1080p" + ], + "description": "The resolution of the generated video", + "type": "string", + "title": "Resolution", + "default": "auto" + }, + "aspect_ratio": { + "enum": [ + "auto", + "9:16", + "16:9" + ], + "description": "The aspect ratio of the generated video", + "type": "string", + "title": "Aspect Ratio", + "default": "auto" + }, + "delete_video": { + "description": "Whether to delete the video after generation for privacy reasons. If True, the video cannot be used for remixing and will be permanently deleted.", + "type": "boolean", + "title": "Delete Video", + "default": true + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/sora-2-i2v-input.png" + ], + "description": "The URL of the image to use as the first frame", + "type": "string", + "title": "Image URL" + } + }, + "title": "ProImageToVideoInput", + "required": [ + "prompt", + "image_url" + ] + }, + "Sora2ImageToVideoProOutput": { + "x-fal-order-properties": [ + "video", + "video_id", + "thumbnail", + "spritesheet" + ], + "type": "object", + "properties": { + "spritesheet": { + "title": "Spritesheet", + "description": "Spritesheet image for the video", + "allOf": [ + { + "$ref": "#/components/schemas/ImageFile" + } + ] + }, + "thumbnail": { + "title": "Thumbnail", + "description": "Thumbnail image for the video", + "allOf": [ + { + "$ref": "#/components/schemas/ImageFile" + } + ] + }, + "video_id": { + "examples": [ + "video_123" + ], + "title": "Video ID", + "type": "string", + "description": "The ID of the generated video" + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/example_outputs/sora-2-pro-i2v-output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "title": "ProImageToVideoOutput", + "required": [ + "video", + "video_id" + ] + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + }, + "VideoFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "VideoFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sora-2/image-to-video/pro/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sora-2/image-to-video/pro/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sora-2/image-to-video/pro": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sora2ImageToVideoProInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sora-2/image-to-video/pro/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sora2ImageToVideoProOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sora-2/image-to-video", + "metadata": { + "display_name": "Sora 2", + "category": "image-to-video", + "description": "Image-to-video endpoint for Sora 2, OpenAI's state-of-the-art video model capable of creating richly detailed, dynamic clips with audio from natural language or images.", + "status": "active", + "tags": [ + "image-to-video", + "audio", + "sora" + ], + "updated_at": "2026-01-26T21:42:45.860Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/kangaroo/8n-yLTQvW5q0BEvW0H2J3_8233c7e127124290b6dfa965f44ebc79.jpg", + "model_url": "https://fal.run/fal-ai/sora-2/image-to-video", + "license_type": "commercial", + "date": "2025-10-06T19:34:52.329Z", + "group": { + "key": "sora-2", + "label": "Image to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sora-2/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sora-2/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sora-2/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3.fal.media/files/kangaroo/8n-yLTQvW5q0BEvW0H2J3_8233c7e127124290b6dfa965f44ebc79.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sora-2/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/sora-2/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Sora2ImageToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "resolution", + "aspect_ratio", + "duration", + "delete_video", + "model", + "image_url" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Front-facing 'invisible' action-cam on a skydiver in freefall above bright clouds; camera locked on his face. He speaks over the wind with clear lipsync: 'This is insanely fun! You've got to try it—book a tandem and go!' Natural wind roar, voice close-mic'd and slightly compressed so it's intelligible. Midday sun, goggles and jumpsuit flutter, altimeter visible, parachute rig on shoulders. Energetic but stable framing with subtle shake; brief horizon roll. End on first tug of canopy and wind noise dropping." + ], + "maxLength": 5000, + "type": "string", + "minLength": 1, + "title": "Prompt", + "description": "The text prompt describing the video you want to generate" + }, + "duration": { + "enum": [ + 4, + 8, + 12 + ], + "description": "Duration of the generated video in seconds", + "type": "integer", + "title": "Duration", + "default": 4 + }, + "resolution": { + "enum": [ + "auto", + "720p" + ], + "description": "The resolution of the generated video", + "type": "string", + "title": "Resolution", + "default": "auto" + }, + "aspect_ratio": { + "enum": [ + "auto", + "9:16", + "16:9" + ], + "description": "The aspect ratio of the generated video", + "type": "string", + "title": "Aspect Ratio", + "default": "auto" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/sora-2-i2v-input.png" + ], + "description": "The URL of the image to use as the first frame", + "type": "string", + "title": "Image URL" + }, + "model": { + "enum": [ + "sora-2", + "sora-2-2025-12-08", + "sora-2-2025-10-06" + ], + "description": "The model to use for the generation. When the default model is selected, the latest snapshot of the model will be used - otherwise, select a specific snapshot of the model.", + "type": "string", + "title": "Model", + "default": "sora-2" + }, + "delete_video": { + "description": "Whether to delete the video after generation for privacy reasons. If True, the video cannot be used for remixing and will be permanently deleted.", + "type": "boolean", + "title": "Delete Video", + "default": true + } + }, + "title": "ImageToVideoInput", + "required": [ + "prompt", + "image_url" + ] + }, + "Sora2ImageToVideoOutput": { + "x-fal-order-properties": [ + "video", + "video_id", + "thumbnail", + "spritesheet" + ], + "type": "object", + "properties": { + "spritesheet": { + "title": "Spritesheet", + "description": "Spritesheet image for the video", + "allOf": [ + { + "$ref": "#/components/schemas/ImageFile" + } + ] + }, + "thumbnail": { + "title": "Thumbnail", + "description": "Thumbnail image for the video", + "allOf": [ + { + "$ref": "#/components/schemas/ImageFile" + } + ] + }, + "video_id": { + "examples": [ + "video_123" + ], + "title": "Video ID", + "type": "string", + "description": "The ID of the generated video" + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/example_outputs/sora_2_i2v_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "title": "ImageToVideoOutput", + "required": [ + "video", + "video_id" + ] + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + }, + "VideoFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "VideoFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sora-2/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sora-2/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sora-2/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sora2ImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sora-2/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sora2ImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ovi/image-to-video", + "metadata": { + "display_name": "Ovi", + "category": "image-to-video", + "description": "Ovi can generate videos with audio from image and text inputs.", + "status": "active", + "tags": [ + "image-to-audio-video", + "image-to-video" + ], + "updated_at": "2026-01-26T21:42:46.242Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/elephant/0C5YI172kcCI9lcCtXDJI_61f25dd2c735440aafb1a2ec647b6bd0.jpg", + "model_url": "https://fal.run/fal-ai/ovi/image-to-video", + "license_type": "commercial", + "date": "2025-10-03T14:39:19.458Z", + "group": { + "key": "ovi", + "label": "Image to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ovi/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ovi/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ovi/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3.fal.media/files/elephant/0C5YI172kcCI9lcCtXDJI_61f25dd2c735440aafb1a2ec647b6bd0.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ovi/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/ovi/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "OviImageToVideoInput": { + "title": "OviI2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "An intimate close-up of a European woman with long dark hair as she gently brushes her hair in a softly lit bedroom, her delicate hand moving in the foreground. She looks directly into the camera with calm, focused eyes, a faint serene smile glowing in the warm lamp light. She says, [soft whisper] I am an artificial intelligence..Soft whispering female voice, ASMR tone with gentle breaths, cozy room acoustics, subtle emphasis on \"I am an artificial intelligence\"." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps.", + "default": 30 + }, + "audio_negative_prompt": { + "title": "Audio Negative Prompt", + "type": "string", + "description": "Negative prompt for audio generation.", + "default": "robotic, muffled, echo, distorted" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "jitter, bad hands, blur, distortion" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ovi_i2v_input.png" + ], + "title": "Image Url", + "type": "string", + "description": "The image URL to guide video generation." + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_inference_steps", + "audio_negative_prompt", + "seed", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "OviImageToVideoOutput": { + "title": "OviI2VResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_inputs/ovi_i2v_output.mp4" + } + ], + "description": "The generated video file.", + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ovi/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ovi/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ovi/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OviImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ovi/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OviImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "veed/fabric-1.0/fast", + "metadata": { + "display_name": "Fabric 1.0 Fast", + "category": "image-to-video", + "description": "VEED Fabric 1.0 is an image-to-video API that turns any image into a talking video", + "status": "active", + "tags": [ + "lipsync", + "avatar", + "" + ], + "updated_at": "2026-01-26T21:42:46.489Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/zebra/AGRvOvMtrM0o02wz8-hRF_cb5739a5199b49a7971c5319881c6765.jpg", + "model_url": "https://fal.run/veed/fabric-1.0/fast", + "license_type": "commercial", + "date": "2025-10-01T14:00:46.685Z", + "group": { + "key": "fabric-1.0", + "label": "Image To Video [fast]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for veed/fabric-1.0/fast", + "version": "1.0.0", + "description": "The OpenAPI schema for the veed/fabric-1.0/fast queue.", + "x-fal-metadata": { + "endpointId": "veed/fabric-1.0/fast", + "category": "image-to-video", + "thumbnailUrl": "https://v3.fal.media/files/zebra/AGRvOvMtrM0o02wz8-hRF_cb5739a5199b49a7971c5319881c6765.jpg", + "playgroundUrl": "https://fal.ai/models/veed/fabric-1.0/fast", + "documentationUrl": "https://fal.ai/models/veed/fabric-1.0/fast/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Fabric10FastInput": { + "title": "FabricOneLipsyncInput", + "type": "object", + "properties": { + "resolution": { + "enum": [ + "720p", + "480p" + ], + "description": "Resolution", + "type": "string", + "title": "Resolution" + }, + "audio_url": { + "format": "uri", + "maxLength": 2083, + "type": "string", + "minLength": 1, + "title": "Audio Url", + "examples": [ + "https://v3.fal.media/files/elephant/Oz_g4AwQvXtXpUHL3Pa7u_Hope.mp3" + ] + }, + "image_url": { + "format": "uri", + "maxLength": 2083, + "type": "string", + "minLength": 1, + "title": "Image Url", + "examples": [ + "https://v3.fal.media/files/koala/NLVPfOI4XL1cWT2PmmqT3_Hope.png" + ] + } + }, + "x-fal-order-properties": [ + "image_url", + "audio_url", + "resolution" + ], + "required": [ + "image_url", + "audio_url", + "resolution" + ] + }, + "Fabric10FastOutput": { + "title": "FabricOneOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://v3.fal.media/files/lion/Yha3swLpHm35hoJCs8oJQ_tmp618_yf2f.mp4" + } + ], + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/veed/fabric-1.0/fast/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/veed/fabric-1.0/fast/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/veed/fabric-1.0/fast": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Fabric10FastInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/veed/fabric-1.0/fast/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Fabric10FastOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bytedance/omnihuman/v1.5", + "metadata": { + "display_name": "Bytedance OmniHuman v1.5", + "category": "image-to-video", + "description": "Omnihuman v1.5 is a new and improved version of Omnihuman. It generates video using an image of a human figure paired with an audio file. It produces vivid, high-quality videos where the character’s emotions and movements maintain a strong correlation with the audio.", + "status": "active", + "tags": [ + "image-to-video", + "lipsync", + "" + ], + "updated_at": "2026-01-26T21:42:48.315Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/penguin/E4JFEyBXM1DccJUOvp5_e_6414b61bc2e141edb34671e5308addef.jpg", + "model_url": "https://fal.run/fal-ai/bytedance/omnihuman/v1.5", + "license_type": "commercial", + "date": "2025-09-23T14:00:56.925Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bytedance/omnihuman/v1.5", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bytedance/omnihuman/v1.5 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bytedance/omnihuman/v1.5", + "category": "image-to-video", + "thumbnailUrl": "https://v3.fal.media/files/penguin/E4JFEyBXM1DccJUOvp5_e_6414b61bc2e141edb34671e5308addef.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/bytedance/omnihuman/v1.5", + "documentationUrl": "https://fal.ai/models/fal-ai/bytedance/omnihuman/v1.5/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BytedanceOmnihumanV15Input": { + "x-fal-order-properties": [ + "prompt", + "image_url", + "audio_url", + "turbo_mode", + "resolution" + ], + "type": "object", + "properties": { + "turbo_mode": { + "title": "Turbo Mode", + "type": "boolean", + "description": "Generate a video at a faster rate with a slight quality trade-off.", + "default": false + }, + "resolution": { + "enum": [ + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video. Defaults to 1080p. 720p generation is faster and higher in quality. 1080p generation is limited to 30s audio and 720p generation is limited to 60s audio.", + "default": "1080p" + }, + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The text prompt used to guide the video generation." + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/omnihuman_v15_input_audio.mp3" + ], + "title": "Audio Url", + "type": "string", + "description": "The URL of the audio file to generate the video. Audio must be under 30s long for 1080p generation and under 60s long for 720p generation." + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/omnihuman_v15_input_image.png" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image used to generate the video" + } + }, + "title": "OmniHumanv15Input", + "required": [ + "image_url", + "audio_url" + ] + }, + "BytedanceOmnihumanV15Output": { + "x-fal-order-properties": [ + "video", + "duration" + ], + "type": "object", + "properties": { + "duration": { + "title": "Duration", + "type": "number", + "description": "Duration of audio input/video output as used for billing." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/omnihuman_v15_output.mp4" + } + ], + "title": "Video", + "description": "Generated video file", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "OmniHumanv15Output", + "required": [ + "video", + "duration" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bytedance/omnihuman/v1.5/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/omnihuman/v1.5/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bytedance/omnihuman/v1.5": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceOmnihumanV15Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/omnihuman/v1.5/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceOmnihumanV15Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "veed/fabric-1.0", + "metadata": { + "display_name": "Fabric 1.0", + "category": "image-to-video", + "description": "VEED Fabric 1.0 is an image-to-video API that turns any image into a talking video", + "status": "active", + "tags": [ + "lipsync", + "avatar", + "" + ], + "updated_at": "2026-01-26T21:42:50.736Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/zebra/t3xqW_dkjrcRc2vmUS0EN_10a217d1a5ba4488be7bb1967189ef65.jpg", + "model_url": "https://fal.run/veed/fabric-1.0", + "license_type": "commercial", + "date": "2025-09-19T22:01:20.442Z", + "group": { + "key": "fabric-1.0", + "label": "Image To Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for veed/fabric-1.0", + "version": "1.0.0", + "description": "The OpenAPI schema for the veed/fabric-1.0 queue.", + "x-fal-metadata": { + "endpointId": "veed/fabric-1.0", + "category": "image-to-video", + "thumbnailUrl": "https://v3.fal.media/files/zebra/t3xqW_dkjrcRc2vmUS0EN_10a217d1a5ba4488be7bb1967189ef65.jpg", + "playgroundUrl": "https://fal.ai/models/veed/fabric-1.0", + "documentationUrl": "https://fal.ai/models/veed/fabric-1.0/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Fabric10Input": { + "title": "FabricOneLipsyncInput", + "type": "object", + "properties": { + "resolution": { + "enum": [ + "720p", + "480p" + ], + "description": "Resolution", + "type": "string", + "title": "Resolution" + }, + "audio_url": { + "format": "uri", + "maxLength": 2083, + "type": "string", + "minLength": 1, + "title": "Audio Url", + "examples": [ + "https://v3.fal.media/files/elephant/Oz_g4AwQvXtXpUHL3Pa7u_Hope.mp3" + ] + }, + "image_url": { + "format": "uri", + "maxLength": 2083, + "type": "string", + "minLength": 1, + "title": "Image Url", + "examples": [ + "https://v3.fal.media/files/koala/NLVPfOI4XL1cWT2PmmqT3_Hope.png" + ] + } + }, + "x-fal-order-properties": [ + "image_url", + "audio_url", + "resolution" + ], + "required": [ + "image_url", + "audio_url", + "resolution" + ] + }, + "Fabric10Output": { + "title": "FabricOneOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://v3.fal.media/files/lion/Yha3swLpHm35hoJCs8oJQ_tmp618_yf2f.mp4" + } + ], + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/veed/fabric-1.0/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/veed/fabric-1.0/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/veed/fabric-1.0": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Fabric10Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/veed/fabric-1.0/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Fabric10Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v1/standard/ai-avatar", + "metadata": { + "display_name": "Kling AI Avatar", + "category": "image-to-video", + "description": "Kling AI Avatar Standard: Endpoint for creating avatar videos with realistic humans, animals, cartoons, or stylized characters", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:56.124Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/rabbit/me_wmKnQJJevTKvSLGMF7_2948b05301c24578b9d28acb927f9c5c.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/v1/standard/ai-avatar", + "license_type": "commercial", + "date": "2025-09-13T07:34:07.280Z", + "group": { + "key": "Kling-Avatar", + "label": "Kling AI Avatar Standard" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v1/standard/ai-avatar", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v1/standard/ai-avatar queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v1/standard/ai-avatar", + "category": "image-to-video", + "thumbnailUrl": "https://v3.fal.media/files/rabbit/me_wmKnQJJevTKvSLGMF7_2948b05301c24578b9d28acb927f9c5c.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v1/standard/ai-avatar", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v1/standard/ai-avatar/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV1StandardAiAvatarInput": { + "title": "AIAvatarInput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt to use for the video generation.", + "default": "." + }, + "audio_url": { + "examples": [ + "https://v3.fal.media/files/rabbit/9_0ZG_geiWjZOmn9yscO6_output.mp3" + ], + "title": "Audio Url", + "type": "string", + "description": "The URL of the audio file." + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/kling_ai_avatar_input.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to use as your avatar" + } + }, + "x-fal-order-properties": [ + "image_url", + "audio_url", + "prompt" + ], + "required": [ + "image_url", + "audio_url" + ] + }, + "KlingVideoV1StandardAiAvatarOutput": { + "title": "AIAvatarOutput", + "type": "object", + "properties": { + "duration": { + "title": "Duration", + "type": "number", + "description": "Duration of the output video in seconds." + }, + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/penguin/ln3x7H1p1jL0Pwo7675NI_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "duration" + ], + "required": [ + "video", + "duration" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v1/standard/ai-avatar/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1/standard/ai-avatar/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1/standard/ai-avatar": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV1StandardAiAvatarInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1/standard/ai-avatar/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV1StandardAiAvatarOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v1/pro/ai-avatar", + "metadata": { + "display_name": "Kling AI Avatar Pro", + "category": "image-to-video", + "description": "Kling AI Avatar Pro: The premium endpoint for creating avatar videos with realistic humans, animals, cartoons, or stylized characters", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:56.249Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/tiger/JkgpF_2-txAoKmW7MuTqt_0871571d0ba34433b57f86fbce62d273.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/v1/pro/ai-avatar", + "license_type": "commercial", + "date": "2025-09-13T07:23:01.405Z", + "group": { + "key": "Kling-Avatar", + "label": "Kling AI Avatar Pro" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v1/pro/ai-avatar", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v1/pro/ai-avatar queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v1/pro/ai-avatar", + "category": "image-to-video", + "thumbnailUrl": "https://fal.media/files/tiger/JkgpF_2-txAoKmW7MuTqt_0871571d0ba34433b57f86fbce62d273.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v1/pro/ai-avatar", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v1/pro/ai-avatar/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV1ProAiAvatarInput": { + "title": "AIAvatarInput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt to use for the video generation.", + "default": "." + }, + "audio_url": { + "examples": [ + "https://v3.fal.media/files/rabbit/9_0ZG_geiWjZOmn9yscO6_output.mp3" + ], + "title": "Audio Url", + "type": "string", + "description": "The URL of the audio file." + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/kling_ai_avatar_input.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to use as your avatar" + } + }, + "x-fal-order-properties": [ + "image_url", + "audio_url", + "prompt" + ], + "required": [ + "image_url", + "audio_url" + ] + }, + "KlingVideoV1ProAiAvatarOutput": { + "title": "AIAvatarOutput", + "type": "object", + "properties": { + "duration": { + "title": "Duration", + "type": "number", + "description": "Duration of the output video in seconds." + }, + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/penguin/ln3x7H1p1jL0Pwo7675NI_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "duration" + ], + "required": [ + "video", + "duration" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v1/pro/ai-avatar/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1/pro/ai-avatar/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1/pro/ai-avatar": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV1ProAiAvatarInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1/pro/ai-avatar/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV1ProAiAvatarOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "decart/lucy-14b/image-to-video", + "metadata": { + "display_name": "Decart Lucy 14b", + "category": "image-to-video", + "description": "Lucy-14B delivers lightning fast performance that redefines what's possible with image-to-video AI", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:56.881Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/panda/CmMm-4CuFQUroMjHGYrv9_99edaa2eb5b34834b08717f4fbc23cca.jpg", + "model_url": "https://fal.run/decart/lucy-14b/image-to-video", + "license_type": "commercial", + "date": "2025-09-10T15:52:20.134Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for decart/lucy-14b/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the decart/lucy-14b/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "decart/lucy-14b/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://fal.media/files/panda/CmMm-4CuFQUroMjHGYrv9_99edaa2eb5b34834b08717f4fbc23cca.jpg", + "playgroundUrl": "https://fal.ai/models/decart/lucy-14b/image-to-video", + "documentationUrl": "https://fal.ai/models/decart/lucy-14b/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Lucy14bImageToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "image_url", + "resolution", + "aspect_ratio", + "sync_mode" + ], + "type": "object", + "properties": { + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated\n and uploaded before returning the response. This will increase the\n latency of the function but it allows you to get the image directly\n in the response without going through the CDN.\n ", + "default": true + }, + "aspect_ratio": { + "enum": [ + "9:16", + "16:9" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video.", + "default": "16:9" + }, + "prompt": { + "examples": [ + "A cinematic video begins with a woman standing in an art studio, wearing a paint-splattered apron over a white off-shoulder blouse, surrounded by colorful canvases on easels. She gently plays with her hair for a moment, then straightens her head and looks directly at the camera with a warm smile. After holding the smile, she gracefully twirls around in place, her apron flowing slightly with the motion, creating a playful and artistic atmosphere against the backdrop of her vibrant paintings." + ], + "maxLength": 1500, + "type": "string", + "description": "Text description of the desired video content", + "title": "Prompt" + }, + "resolution": { + "enum": [ + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video", + "default": "720p" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/lucy-14b/lucy-14b-art-swirl-image.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to use as the first frame" + } + }, + "title": "Lucy14BImageToVideoInput", + "required": [ + "prompt", + "image_url" + ] + }, + "Lucy14bImageToVideoOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/model_tests/lucy-14b/lucy-14b-art-swirl-video.mp4" + } + ], + "title": "Video", + "description": "The generated MP4 video with H.264 encoding", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "Lucy14BOutput", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/decart/lucy-14b/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/decart/lucy-14b/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/decart/lucy-14b/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Lucy14bImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/decart/lucy-14b/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Lucy14bImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bytedance/seedance/v1/lite/reference-to-video", + "metadata": { + "display_name": "Bytedance", + "category": "image-to-video", + "description": "Seedance lite reference-to-video allows the use of 1 to 4 images as reference to create a high-quality video.", + "status": "active", + "tags": [ + "reference-to-video", + "image-to-video", + "" + ], + "updated_at": "2026-01-26T21:42:58.941Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/kangaroo/B49-COdFROSlS850S1PfV_be7c6c85a41b4410ae02216253e5c6f8.jpg", + "model_url": "https://fal.run/fal-ai/bytedance/seedance/v1/lite/reference-to-video", + "license_type": "commercial", + "date": "2025-09-01T13:56:37.039Z", + "group": { + "key": "seedance-v1", + "label": "Seedance 1.0 Lite -- Reference to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bytedance/seedance/v1/lite/reference-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bytedance/seedance/v1/lite/reference-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bytedance/seedance/v1/lite/reference-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://fal.media/files/kangaroo/B49-COdFROSlS850S1PfV_be7c6c85a41b4410ae02216253e5c6f8.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/bytedance/seedance/v1/lite/reference-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/bytedance/seedance/v1/lite/reference-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BytedanceSeedanceV1LiteReferenceToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "resolution", + "duration", + "camera_fixed", + "seed", + "enable_safety_checker", + "reference_image_urls" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The girl catches the puppy and hugs it." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt used to generate the video" + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Video resolution - 480p for faster generation, 720p for higher quality", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "1:1", + "3:4", + "9:16", + "auto" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "auto" + }, + "duration": { + "enum": [ + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10", + "11", + "12" + ], + "title": "Duration", + "type": "string", + "description": "Duration of the video in seconds", + "default": "5" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "camera_fixed": { + "title": "Camera Fixed", + "type": "boolean", + "description": "Whether to fix the camera position", + "default": false + }, + "reference_image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/example_inputs/seedance_reference.jpeg", + "https://storage.googleapis.com/falserverless/example_inputs/seedance_reference_2.jpeg" + ] + ], + "title": "Reference Image Urls", + "type": "array", + "description": "Reference images to generate the video with.", + "items": { + "type": "string" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed to control video generation. Use -1 for random." + } + }, + "title": "SeedanceReferenceToVideoInput", + "required": [ + "prompt", + "reference_image_urls" + ] + }, + "BytedanceSeedanceV1LiteReferenceToVideoOutput": { + "x-fal-order-properties": [ + "video", + "seed" + ], + "type": "object", + "properties": { + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for generation" + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/seedance_reference_output.mp4" + } + ], + "title": "Video", + "description": "Generated video file", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "SeedanceReferenceToVideoOutput", + "required": [ + "video", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bytedance/seedance/v1/lite/reference-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1/lite/reference-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1/lite/reference-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedanceV1LiteReferenceToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1/lite/reference-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedanceV1LiteReferenceToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-ati", + "metadata": { + "display_name": "Wan Ati", + "category": "image-to-video", + "description": "WAN-ATI is a controllable video generation model that uses trajectory instructions to guide object, local, and camera motion, enabling precise and flexible image-to-video creation.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:59.442Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/zebra/08UqxLqr7sMO2AoVe9S7y_74d97134c50643e287a332aaae11b3d1.jpg", + "model_url": "https://fal.run/fal-ai/wan-ati", + "license_type": "commercial", + "date": "2025-08-29T23:23:18.962Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-ati", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-ati queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-ati", + "category": "image-to-video", + "thumbnailUrl": "https://fal.media/files/zebra/08UqxLqr7sMO2AoVe9S7y_74d97134c50643e287a332aaae11b3d1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-ati", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-ati/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanAtiInput": { + "title": "WanATIRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "An human facing the camera in an cyberbank style dress." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video (480p, 580p, 720p).", + "examples": [ + "480p" + ], + "default": "480p" + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/kangaroo/5kPPJAhdcXH6eO1Kniftc_human.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "URL of the input image." + }, + "track": { + "examples": [ + [ + [ + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + }, + { + "y": 708, + "x": 110 + } + ], + [ + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + }, + { + "y": 755, + "x": 527 + } + ], + [ + { + "y": 416, + "x": 289 + }, + { + "y": 416, + "x": 288 + }, + { + "y": 416, + "x": 286 + }, + { + "y": 416, + "x": 275 + }, + { + "y": 416, + "x": 262 + }, + { + "y": 416, + "x": 248 + }, + { + "y": 416, + "x": 232 + }, + { + "y": 416, + "x": 218 + }, + { + "y": 415, + "x": 205 + }, + { + "y": 415, + "x": 194 + }, + { + "y": 414, + "x": 183 + }, + { + "y": 413, + "x": 175 + }, + { + "y": 413, + "x": 167 + }, + { + "y": 413, + "x": 161 + }, + { + "y": 412, + "x": 154 + }, + { + "y": 412, + "x": 148 + }, + { + "y": 412, + "x": 143 + }, + { + "y": 411, + "x": 139 + }, + { + "y": 411, + "x": 135 + }, + { + "y": 411, + "x": 133 + }, + { + "y": 411, + "x": 131 + }, + { + "y": 411, + "x": 130 + }, + { + "y": 411, + "x": 130 + }, + { + "y": 411, + "x": 129 + }, + { + "y": 411, + "x": 129 + }, + { + "y": 411, + "x": 129 + }, + { + "y": 411, + "x": 129 + }, + { + "y": 411, + "x": 129 + }, + { + "y": 411, + "x": 129 + }, + { + "y": 411, + "x": 130 + }, + { + "y": 411, + "x": 131 + }, + { + "y": 411, + "x": 132 + }, + { + "y": 411, + "x": 135 + }, + { + "y": 411, + "x": 141 + }, + { + "y": 411, + "x": 149 + }, + { + "y": 412, + "x": 157 + }, + { + "y": 414, + "x": 169 + }, + { + "y": 416, + "x": 183 + }, + { + "y": 419, + "x": 198 + }, + { + "y": 422, + "x": 215 + }, + { + "y": 424, + "x": 235 + }, + { + "y": 426, + "x": 254 + }, + { + "y": 426, + "x": 271 + }, + { + "y": 427, + "x": 287 + }, + { + "y": 427, + "x": 302 + }, + { + "y": 427, + "x": 312 + }, + { + "y": 427, + "x": 324 + }, + { + "y": 427, + "x": 338 + }, + { + "y": 427, + "x": 350 + }, + { + "y": 425, + "x": 361 + }, + { + "y": 424, + "x": 370 + }, + { + "y": 423, + "x": 377 + }, + { + "y": 423, + "x": 381 + }, + { + "y": 422, + "x": 383 + }, + { + "y": 422, + "x": 386 + }, + { + "y": 421, + "x": 388 + }, + { + "y": 420, + "x": 393 + }, + { + "y": 419, + "x": 398 + }, + { + "y": 419, + "x": 404 + }, + { + "y": 418, + "x": 410 + }, + { + "y": 418, + "x": 413 + }, + { + "y": 418, + "x": 415 + }, + { + "y": 418, + "x": 416 + }, + { + "y": 418, + "x": 416 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + }, + { + "y": 418, + "x": 417 + } + ] + ] + ], + "title": "Track", + "type": "array", + "description": "Motion tracks to guide video generation. Each track is a sequence of points defining a motion trajectory. Multiple tracks can control different elements or objects in the video. Expected format: array of tracks, where each track is an array of points with 'x' and 'y' coordinates (up to 121 points per track). Points will be automatically padded to 121 if fewer are provided. Coordinates should be within the image dimensions.", + "items": { + "type": "array", + "items": { + "$ref": "#/components/schemas/TrackPoint" + } + } + }, + "guidance_scale": { + "description": "Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.", + "type": "number", + "minimum": 1, + "maximum": 10, + "examples": [ + 5 + ], + "title": "Guidance Scale (1st Stage)", + "default": 5 + }, + "num_inference_steps": { + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "type": "integer", + "minimum": 2, + "maximum": 40, + "examples": [ + 40 + ], + "title": "Number of Inference Steps", + "default": 40 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "track", + "num_inference_steps", + "guidance_scale", + "resolution", + "seed" + ], + "required": [ + "image_url", + "prompt", + "track" + ] + }, + "WanAtiOutput": { + "title": "WanATIResponse", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 4102545, + "file_name": "2a91e308a059421aa9d54109bd027498.mp4", + "content_type": "application/octet-stream", + "url": "https://v3b.fal.media/files/b/tiger/EIaugZmuEX05_6ZMnWaj-_2a91e308a059421aa9d54109bd027498.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-ati/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-ati/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-ati": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanAtiInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-ati/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanAtiOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/decart/lucy-5b/image-to-video", + "metadata": { + "display_name": "Decart", + "category": "image-to-video", + "description": "Lucy-5B is a model that can create 5-second I2V videos in under 5 seconds, achieving >1x RTF end-to-end", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:59.567Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/kangaroo/srM1hEjGsuYwd4UhvzpE7_c5bddd630600497e918bb16e99e21653.jpg", + "model_url": "https://fal.run/fal-ai/decart/lucy-5b/image-to-video", + "license_type": "commercial", + "date": "2025-08-28T18:32:21.658Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/decart/lucy-5b/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/decart/lucy-5b/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/decart/lucy-5b/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://fal.media/files/kangaroo/srM1hEjGsuYwd4UhvzpE7_c5bddd630600497e918bb16e99e21653.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/decart/lucy-5b/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/decart/lucy-5b/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "DecartLucy5bImageToVideoInput": { + "title": "ProcessRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cat is walking slowly in the garden" + ], + "title": "Prompt", + "type": "string", + "maxLength": 1500, + "description": "Text description of the desired video content" + }, + "aspect_ratio": { + "enum": [ + "9:16", + "16:9" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video.", + "default": "16:9" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": true + }, + "resolution": { + "enum": [ + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video", + "default": "720p" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/monkey/OlpQEYh7oNeJ3qKsdiaym_ia5ECOgFbfcniMDu01_18_da73e078e0924472b51d92f3e3fba98c.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to use as the first frame" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "resolution", + "aspect_ratio", + "sync_mode" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "DecartLucy5bImageToVideoOutput": { + "title": "ProcessOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/kangaroo/rIFaCsyWvBxYBKw3cPbOU_indir.mp4" + } + ], + "title": "Video", + "description": "The generated MP4 video with H.264 encoding", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/decart/lucy-5b/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/decart/lucy-5b/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/decart/lucy-5b/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DecartLucy5bImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/decart/lucy-5b/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DecartLucy5bImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v5/transition", + "metadata": { + "display_name": "Pixverse", + "category": "image-to-video", + "description": "Create seamless transition between images using PixVerse v5", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:01.278Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "model_url": "https://fal.run/fal-ai/pixverse/v5/transition", + "license_type": "commercial", + "date": "2025-08-23T10:56:18.163Z", + "group": { + "key": "pixverse-5", + "label": "Transition" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v5/transition", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v5/transition queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v5/transition", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v5/transition", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v5/transition/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV5TransitionInput": { + "title": "TransitionRequest", + "type": "object", + "properties": { + "first_image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/owQh2DAzk8UU7J02nr5RY_Co2P4boLv6meIZ5t9gKvL_8685da151df343ab8bf82165c928e2a5.jpg" + ], + "title": "First Image Url", + "type": "string", + "description": "URL of the image to use as the first frame" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "4:3", + "1:1", + "3:4", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated video" + }, + "prompt": { + "examples": [ + "Scene slowly transition into cat swimming under water" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt for the transition" + }, + "duration": { + "enum": [ + "5", + "8" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + }, + "end_image_url": { + "examples": [ + "https://v3.fal.media/files/kangaroo/RgedFs_WSnq5BgER7qDx1_ONrbTJ1YAGXz-9JnSsBoB_bdc8750387734bfe940319f469f7b0b2.jpg" + ], + "title": "End Image Url", + "type": "string", + "description": "URL of the image to use as the last frame" + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, low resolution, pixelated, noisy, grainy, out of focus, poorly lit, poorly exposed, poorly composed, poorly framed, poorly cropped, poorly color corrected, poorly color graded" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "resolution", + "duration", + "negative_prompt", + "style", + "seed", + "first_image_url", + "end_image_url" + ], + "required": [ + "prompt", + "first_image_url" + ] + }, + "PixverseV5TransitionOutput": { + "title": "TransitionOutputV5", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 3890360, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/model_tests/video_models/output-2.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v5/transition/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5/transition/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5/transition": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV5TransitionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5/transition/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV5TransitionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v5/effects", + "metadata": { + "display_name": "Pixverse", + "category": "image-to-video", + "description": "Generate high quality video clips with different effects using PixVerse v5", + "status": "active", + "tags": [ + "image-to-video" + ], + "updated_at": "2026-01-26T21:43:01.402Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "model_url": "https://fal.run/fal-ai/pixverse/v5/effects", + "license_type": "commercial", + "date": "2025-08-23T10:55:07.548Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v5/effects", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v5/effects queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v5/effects", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v5/effects", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v5/effects/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV5EffectsInput": { + "title": "EffectInput", + "type": "object", + "properties": { + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + }, + "duration": { + "enum": [ + "5", + "8" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video.", + "default": "720p" + }, + "effect": { + "enum": [ + "Kiss Me AI", + "Kiss", + "Muscle Surge", + "Warmth of Jesus", + "Anything, Robot", + "The Tiger Touch", + "Hug", + "Holy Wings", + "Microwave", + "Zombie Mode", + "Squid Game", + "Baby Face", + "Black Myth: Wukong", + "Long Hair Magic", + "Leggy Run", + "Fin-tastic Mermaid", + "Punch Face", + "Creepy Devil Smile", + "Thunder God", + "Eye Zoom Challenge", + "Who's Arrested?", + "Baby Arrived", + "Werewolf Rage", + "Bald Swipe", + "BOOM DROP", + "Huge Cutie", + "Liquid Metal", + "Sharksnap!", + "Dust Me Away", + "3D Figurine Factor", + "Bikini Up", + "My Girlfriends", + "My Boyfriends", + "Subject 3 Fever", + "Earth Zoom", + "Pole Dance", + "Vroom Dance", + "GhostFace Terror", + "Dragon Evoker", + "Skeletal Bae", + "Summoning succubus", + "Halloween Voodoo Doll", + "3D Naked-Eye AD", + "Package Explosion", + "Dishes Served", + "Ocean ad", + "Supermarket AD", + "Tree doll", + "Come Feel My Abs", + "The Bicep Flex", + "London Elite Vibe", + "Flora Nymph Gown", + "Christmas Costume", + "It's Snowy", + "Reindeer Cruiser", + "Snow Globe Maker", + "Pet Christmas Outfit", + "Adopt a Polar Pal", + "Cat Christmas Box", + "Starlight Gift Box", + "Xmas Poster", + "Pet Christmas Tree", + "City Santa Hat", + "Stocking Sweetie", + "Christmas Night", + "Xmas Front Page Karma", + "Grinch's Xmas Hijack", + "Giant Product", + "Truck Fashion Shoot", + "Beach AD", + "Shoal Surround", + "Mechanical Assembly", + "Lighting AD", + "Billboard AD", + "Product close-up", + "Parachute Delivery", + "Dreamlike Cloud", + "Macaron Machine", + "Poster AD", + "Truck AD", + "Graffiti AD", + "3D Figurine Factory", + "The Exclusive First Class", + "Art Zoom Challenge", + "I Quit", + "Hitchcock Dolly Zoom", + "Smell the Lens", + "I believe I can fly", + "Strikout Dance", + "Pixel World", + "Mint in Box", + "Hands up, Hand", + "Flora Nymph Go", + "Somber Embrace", + "Beam me up", + "Suit Swagger" + ], + "title": "Effect", + "type": "string", + "description": "The effect to apply to the video" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/koala/q5ahL3KS7ikt3MvpNUG8l_image%20(72).webp" + ], + "title": "Image Url", + "type": "string", + "description": "Optional URL of the image to use as the first frame. If not provided, generates from text" + } + }, + "x-fal-order-properties": [ + "effect", + "image_url", + "resolution", + "duration", + "negative_prompt" + ], + "required": [ + "effect", + "image_url" + ] + }, + "PixverseV5EffectsOutput": { + "title": "EffectOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 3232402, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://fal.media/files/koala/awGY1lJd7lVsqQeSqjWqn_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v5/effects/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5/effects/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5/effects": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV5EffectsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5/effects/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV5EffectsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v5/image-to-video", + "metadata": { + "display_name": "Pixverse v5 Image to Video", + "category": "image-to-video", + "description": "Generate high quality video clips from text and image prompts using PixVerse v5", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:01.587Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/panda/eTq_2p2OlIBu7MyDFdtWz_6290f4acd92c4382b04ed302b2aa5a3a.jpg", + "model_url": "https://fal.run/fal-ai/pixverse/v5/image-to-video", + "license_type": "commercial", + "date": "2025-08-23T10:53:29.956Z", + "group": { + "key": "pixverse-5", + "label": "Image to Video v5" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v5/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v5/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v5/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://fal.media/files/panda/eTq_2p2OlIBu7MyDFdtWz_6290f4acd92c4382b04ed302b2aa5a3a.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v5/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v5/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV5ImageToVideoInput": { + "title": "ImageToVideoRequestV5", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman warrior with her hammer walking with his glacier wolf." + ], + "title": "Prompt", + "type": "string" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "duration": { + "enum": [ + "5", + "8" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds", + "default": "5" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated video" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/qL93Je8ezvzQgDOEzTjKF_KhGKZTEebZcDw6T5rwQPK_output.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to use as the first frame" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, low resolution, pixelated, noisy, grainy, out of focus, poorly lit, poorly exposed, poorly composed, poorly framed, poorly cropped, poorly color corrected, poorly color graded" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "resolution", + "duration", + "negative_prompt", + "style", + "seed", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "PixverseV5ImageToVideoOutput": { + "title": "I2VOutputV5", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 6420765, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/model_tests/video_models/output-3.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v5/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV5ImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV5ImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "moonvalley/marey/i2v", + "metadata": { + "display_name": "Marey Realism V1.5", + "category": "image-to-video", + "description": "Generate a video starting from an image as the first frame with Marey, a generative video model trained exclusively on fully licensed data.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:04.766Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/elephant/ZYJf9OjFksJF5QIMxEjCh_85dff19060504c08b66d4a0675788c3a.jpg", + "model_url": "https://fal.run/moonvalley/marey/i2v", + "license_type": "commercial", + "date": "2025-08-14T01:27:37.851Z", + "group": { + "key": "marey", + "label": "Image to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for moonvalley/marey/i2v", + "version": "1.0.0", + "description": "The OpenAPI schema for the moonvalley/marey/i2v queue.", + "x-fal-metadata": { + "endpointId": "moonvalley/marey/i2v", + "category": "image-to-video", + "thumbnailUrl": "https://fal.media/files/elephant/ZYJf9OjFksJF5QIMxEjCh_85dff19060504c08b66d4a0675788c3a.jpg", + "playgroundUrl": "https://fal.ai/models/moonvalley/marey/i2v", + "documentationUrl": "https://fal.ai/models/moonvalley/marey/i2v/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MareyI2vInput": { + "title": "MareyInputI2V", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Detailed Description: In a hidden jungle grotto, a majestic waterfall plunges into a dark, serene pool below. Ethereal sunbeams slice through the dense canopy high above, illuminating the swirling mist generated by the powerful cascade. The light rays dance across the scene, highlighting the vibrant green foliage that clings to the dark, wet rock walls. The constant roar of the falling water echoes through the secluded space, as the surface of the pool ripples and churns from the impact, creating a mesmerizing display of nature's raw power and tranquil beauty. Background: Brilliant sunbeams pierce through an opening in the dense jungle canopy, their ethereal rays shifting and shimmering as they cut through the misty air. Middleground: A powerful column of white water cascades down a dark, foliage-covered cliff face, creating a stark contrast with the shadowy recesses of the grotto. Foreground: The waterfall crashes into a dark, churning pool of water, sending up a fine spray and creating ever-expanding ripples across the surface." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate a video from" + }, + "duration": { + "enum": [ + "5s", + "10s" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video.", + "default": "5s" + }, + "image_url": { + "examples": [ + "https://d1kaxrqq3vfrw5.cloudfront.net/fal-launch-assets/guide-assets/fal-i2v-example-input.png" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to use as the first frame of the video." + }, + "dimensions": { + "enum": [ + "1920x1080", + "1080x1920", + "1152x1152", + "1536x1152", + "1152x1536" + ], + "title": "Dimensions", + "type": "string", + "description": "The dimensions of the generated video in width x height format.", + "default": "1920x1080" + }, + "guidance_scale": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Guidance Scale", + "description": "Controls how strongly the generation is guided by the prompt (0-20). Higher values follow the prompt more closely." + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Seed for random number generation. Use -1 for random seed each run.", + "default": -1 + }, + "negative_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Negative Prompt", + "description": "Negative prompt used to guide the model away from undesirable features.", + "default": " low-poly, flat shader, bad rigging, stiff animation, uncanny eyes, low-quality textures, looping glitch, cheap effect, overbloom, bloom spam, default lighting, game asset, stiff face, ugly specular, AI artifacts" + } + }, + "x-fal-order-properties": [ + "prompt", + "dimensions", + "duration", + "negative_prompt", + "seed", + "guidance_scale", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "MareyI2vOutput": { + "title": "MareyOutput", + "type": "object", + "properties": { + "video": { + "description": "The generated video.", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/moonvalley/marey/i2v/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/moonvalley/marey/i2v/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/moonvalley/marey/i2v": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MareyI2vInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/moonvalley/marey/i2v/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MareyI2vOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bytedance/video-stylize", + "metadata": { + "display_name": "Bytedance", + "category": "image-to-video", + "description": "Transform your images into stylized videos using this workflow.", + "status": "active", + "tags": [ + "image-to-video", + "effects" + ], + "updated_at": "2026-01-26T21:43:05.264Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/koala/LxwcxJuokX_e4sZ6EopoG_fb133832f45f4366b13faa832797092d.jpg", + "model_url": "https://fal.run/fal-ai/bytedance/video-stylize", + "license_type": "commercial", + "date": "2025-08-12T22:43:10.752Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bytedance/video-stylize", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bytedance/video-stylize queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bytedance/video-stylize", + "category": "image-to-video", + "thumbnailUrl": "https://fal.media/files/koala/LxwcxJuokX_e4sZ6EopoG_fb133832f45f4366b13faa832797092d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/bytedance/video-stylize", + "documentationUrl": "https://fal.ai/models/fal-ai/bytedance/video-stylize/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BytedanceVideoStylizeInput": { + "x-fal-order-properties": [ + "style", + "image_url" + ], + "type": "object", + "properties": { + "style": { + "examples": [ + "Manga style" + ], + "maxLength": 100, + "type": "string", + "title": "Style", + "description": "The style for your character in the video. Please use a short description." + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/kangaroo/-KmSPIcXeGA3Z_iiH4C75_tmph2ry_0_8.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to make the stylized video from." + } + }, + "title": "StylizeInput", + "required": [ + "style", + "image_url" + ] + }, + "BytedanceVideoStylizeOutput": {} + } + }, + "paths": { + "/fal-ai/bytedance/video-stylize/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/video-stylize/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bytedance/video-stylize": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceVideoStylizeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/video-stylize/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceVideoStylizeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan/v2.2-a14b/image-to-video/lora", + "metadata": { + "display_name": "Wan v2.2 A14B Image-to-Video A14B with LoRAs", + "category": "image-to-video", + "description": "Wan-2.2 image-to-video is a video model that generates high-quality videos with high visual quality and motion diversity from text prompts and images. This endpoint supports LoRAs made for Wan 2.2", + "status": "active", + "tags": [ + "image-to-video", + "motion", + "lora" + ], + "updated_at": "2026-01-26T21:43:06.624Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/monkey/Pu175Pi1Z0phr_hCod2pk_51e51de631a54b58ba789155b6955c94.jpg", + "model_url": "https://fal.run/fal-ai/wan/v2.2-a14b/image-to-video/lora", + "license_type": "commercial", + "date": "2025-08-07T12:44:59.464Z", + "group": { + "key": "wan-v22-lora", + "label": "Image to Video (LoRA)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan/v2.2-a14b/image-to-video/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan/v2.2-a14b/image-to-video/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan/v2.2-a14b/image-to-video/lora", + "category": "image-to-video", + "thumbnailUrl": "https://fal.media/files/monkey/Pu175Pi1Z0phr_hCod2pk_51e51de631a54b58ba789155b6955c94.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan/v2.2-a14b/image-to-video/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/wan/v2.2-a14b/image-to-video/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanV22A14bImageToVideoLoraInput": { + "title": "WanLoRAI2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Cars racing in slow motion" + ], + "description": "The text prompt to guide video generation.", + "type": "string", + "title": "Prompt" + }, + "shift": { + "description": "Shift value for the video. Must be between 1.0 and 10.0.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Shift", + "examples": [ + 5 + ], + "default": 5 + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "description": "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + "type": "string", + "title": "Acceleration", + "examples": [ + "regular" + ], + "default": "regular" + }, + "num_interpolated_frames": { + "description": "Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4.", + "type": "integer", + "minimum": 0, + "maximum": 4, + "title": "Number of Interpolated Frames", + "examples": [ + 1 + ], + "default": 1 + }, + "reverse_video": { + "description": "If true, the video will be reversed.", + "type": "boolean", + "title": "Reverse Video", + "default": false + }, + "loras": { + "description": "LoRA weights to be used in the inference.", + "type": "array", + "title": "Loras", + "items": { + "$ref": "#/components/schemas/LoRAWeight" + }, + "default": [] + }, + "frames_per_second": { + "description": "Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is.", + "type": "integer", + "minimum": 4, + "maximum": 60, + "title": "Frames per Second", + "examples": [ + 16 + ], + "default": 16 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, input data will be checked for safety before processing.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": false + }, + "num_frames": { + "description": "Number of frames to generate. Must be between 17 to 161 (inclusive).", + "type": "integer", + "minimum": 17, + "maximum": 161, + "title": "Number of Frames", + "examples": [ + 81 + ], + "default": 81 + }, + "end_image_url": { + "description": "URL of the end image.", + "type": "string", + "title": "End Image URL" + }, + "negative_prompt": { + "description": "Negative prompt for video generation.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "guidance_scale": { + "description": "Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale (1st Stage)", + "examples": [ + 3.5 + ], + "default": 3.5 + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "description": "The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.", + "type": "string", + "title": "Video Write Mode", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "description": "Resolution of the generated video (480p, 580p, or 720p).", + "type": "string", + "title": "Resolution", + "examples": [ + "720p" + ], + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16", + "1:1" + ], + "description": "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + "type": "string", + "title": "Aspect Ratio", + "default": "auto" + }, + "enable_output_safety_checker": { + "examples": [ + false + ], + "description": "If set to true, output video will be checked for safety after generation.", + "type": "boolean", + "title": "Enable Output Safety Checker", + "default": false + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/gallery/car_720p.png" + ], + "description": "URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.", + "type": "string", + "title": "Image URL" + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "description": "The quality of the output video. Higher quality means better visual quality but larger file size.", + "type": "string", + "title": "Video Quality", + "examples": [ + "high" + ], + "default": "high" + }, + "guidance_scale_2": { + "description": "Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale (2nd Stage)", + "examples": [ + 4 + ], + "default": 4 + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "description": "Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "num_inference_steps": { + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "type": "integer", + "minimum": 2, + "maximum": 40, + "title": "Number of Inference Steps", + "examples": [ + 27 + ], + "default": 27 + }, + "interpolator_model": { + "enum": [ + "none", + "film", + "rife" + ], + "description": "The model to use for frame interpolation. If None, no interpolation is applied.", + "type": "string", + "title": "Interpolator Model", + "examples": [ + "film" + ], + "default": "film" + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + }, + "adjust_fps_for_interpolation": { + "examples": [ + true + ], + "description": "If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is.", + "type": "boolean", + "title": "Adjust FPS for Interpolation", + "default": true + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "num_frames", + "frames_per_second", + "negative_prompt", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "enable_safety_checker", + "enable_output_safety_checker", + "enable_prompt_expansion", + "acceleration", + "guidance_scale", + "guidance_scale_2", + "shift", + "interpolator_model", + "num_interpolated_frames", + "adjust_fps_for_interpolation", + "video_quality", + "video_write_mode", + "loras", + "reverse_video", + "end_image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "WanV22A14bImageToVideoLoraOutput": { + "title": "WanI2VResponse", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The white dragon warrior stands still, eyes full of determination and strength. The camera slowly moves closer or circles around the warrior, highlighting the powerful presence and heroic spirit of the character." + ], + "description": "The text prompt used for video generation.", + "type": "string", + "title": "Prompt", + "default": "" + }, + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/lion/Fbuh3lO_HMT-pS0DATbio_tmp08c3v477.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "LoRAWeight": { + "title": "LoRAWeight", + "type": "object", + "properties": { + "path": { + "description": "URL or the path to the LoRA weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + }, + "transformer": { + "enum": [ + "high", + "low", + "both" + ], + "description": "Specifies the transformer to load the lora weight into. 'high' loads into the high-noise transformer, 'low' loads it into the low-noise transformer, while 'both' loads the LoRA into both transformers.", + "type": "string", + "title": "Transformer", + "default": "high" + }, + "weight_name": { + "description": "Name of the LoRA weight. Used only if `path` is a Hugging Face repository, and required only if you have more than 1 safetensors file in the repo.", + "type": "string", + "title": "Weight Name" + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale", + "transformer" + ], + "required": [ + "path" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan/v2.2-a14b/image-to-video/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/image-to-video/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/image-to-video/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV22A14bImageToVideoLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/image-to-video/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV22A14bImageToVideoLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/hailuo-02-fast/image-to-video", + "metadata": { + "display_name": "Minimax", + "category": "image-to-video", + "description": "Create blazing fast and economical videos with MiniMax Hailuo-02 Image To Video API at 512p resolution", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:07.111Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/elephant/f_EeVaEkzmhhcRXL1Xruj_8fd7a6cbb43b460b8a6ff550a153f4af.jpg", + "model_url": "https://fal.run/fal-ai/minimax/hailuo-02-fast/image-to-video", + "license_type": "commercial", + "date": "2025-08-06T17:29:12.950Z", + "group": { + "key": "hailuo-02", + "label": "Fast Image to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/hailuo-02-fast/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/hailuo-02-fast/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/hailuo-02-fast/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://fal.media/files/elephant/f_EeVaEkzmhhcRXL1Xruj_8fd7a6cbb43b460b8a6ff550a153f4af.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/hailuo-02-fast/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/hailuo-02-fast/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxHailuo02FastImageToVideoInput": { + "title": "FastImageToVideoHailuo02Input", + "type": "object", + "properties": { + "prompt_optimizer": { + "description": "Whether to use the model's prompt optimizer", + "type": "boolean", + "title": "Prompt Optimizer", + "default": true + }, + "duration": { + "enum": [ + "6", + "10" + ], + "description": "The duration of the video in seconds. 10 seconds videos are not supported for 1080p resolution.", + "type": "string", + "title": "Duration", + "default": "6" + }, + "prompt": { + "examples": [ + "Extremely realistic movement An old samurai is breaking a stone in half" + ], + "maxLength": 2000, + "type": "string", + "title": "Prompt" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/tiger/U9HN_tm5-3Ls52SbD6CrW_image.webp" + ], + "title": "Image Url", + "type": "string" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "duration", + "prompt_optimizer" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "MinimaxHailuo02FastImageToVideoOutput": { + "title": "ImageToVideoHailuo02FastOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/monkey/n3-YfNqnyYJPYlrJk-5rS_output.mp4" + } + ], + "description": "The generated video", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/hailuo-02-fast/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-02-fast/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-02-fast/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxHailuo02FastImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-02-fast/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxHailuo02FastImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/veo3/image-to-video", + "metadata": { + "display_name": "Veo3", + "category": "image-to-video", + "description": "Veo 3 is the latest state-of-the art video generation model from Google DeepMind", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:09.205Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "model_url": "https://fal.run/fal-ai/veo3/image-to-video", + "license_type": "commercial", + "date": "2025-08-01T16:21:19.645Z", + "group": { + "key": "veo3", + "label": "Image to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/veo3/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/veo3/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/veo3/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/veo3/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/veo3/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Veo3ImageToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "duration", + "resolution", + "generate_audio", + "auto_fix", + "image_url" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman looks into the camera, breathes in, then exclaims energetically, \"have you guys checked out Veo3 Image-to-Video on Fal? It's incredible!\"" + ], + "maxLength": 20000, + "type": "string", + "title": "Prompt", + "description": "The text prompt describing how the image should be animated" + }, + "resolution": { + "enum": [ + "720p", + "1080p" + ], + "description": "The resolution of the generated video.", + "type": "string", + "title": "Resolution", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16" + ], + "description": "The aspect ratio of the generated video.", + "type": "string", + "title": "Aspect Ratio", + "default": "auto" + }, + "generate_audio": { + "description": "Whether to generate audio for the video.", + "type": "boolean", + "title": "Generate Audio", + "default": true + }, + "auto_fix": { + "description": "Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.", + "type": "boolean", + "title": "Auto Fix", + "default": false + }, + "duration": { + "enum": [ + "4s", + "6s", + "8s" + ], + "description": "The duration of the generated video.", + "type": "string", + "title": "Duration", + "default": "8s" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/veo3-i2v-input.png" + ], + "description": "URL of the input image to animate. Should be 720p or higher resolution in 16:9 or 9:16 aspect ratio. If the image is not in 16:9 or 9:16 aspect ratio, it will be cropped to fit.", + "type": "string", + "title": "Image URL" + } + }, + "title": "Veo3ImageToVideoInput", + "required": [ + "prompt", + "image_url" + ] + }, + "Veo3ImageToVideoOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/veo3-i2v-output.mp4" + } + ], + "description": "The generated video.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "Veo3ImageToVideoOutput", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/veo3/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/veo3/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo3ImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo3ImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan/v2.2-a14b/image-to-video/turbo", + "metadata": { + "display_name": "Wan", + "category": "image-to-video", + "description": "Wan-2.2 Turbo image-to-video is a video model that generates high-quality videos with high visual quality and motion diversity from text prompts. ", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:09.356Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-1.jpg", + "model_url": "https://fal.run/fal-ai/wan/v2.2-a14b/image-to-video/turbo", + "license_type": "commercial", + "date": "2025-07-31T18:03:47.684Z", + "group": { + "key": "wan-v22-turbo", + "label": "Image to Video (Turbo)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan/v2.2-a14b/image-to-video/turbo", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan/v2.2-a14b/image-to-video/turbo queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan/v2.2-a14b/image-to-video/turbo", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan/v2.2-a14b/image-to-video/turbo", + "documentationUrl": "https://fal.ai/models/fal-ai/wan/v2.2-a14b/image-to-video/turbo/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanV22A14bImageToVideoTurboInput": { + "title": "WanTurboI2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The white dragon warrior stands still, eyes full of determination and strength. The camera slowly moves closer or circles around the warrior, highlighting the powerful presence and heroic spirit of the character." + ], + "description": "The text prompt to guide video generation.", + "type": "string", + "title": "Prompt" + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "description": "Resolution of the generated video (480p, 580p, or 720p).", + "type": "string", + "title": "Resolution", + "examples": [ + "720p" + ], + "default": "720p" + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "description": "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + "type": "string", + "title": "Acceleration", + "examples": [ + "regular" + ], + "default": "regular" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16", + "1:1" + ], + "description": "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + "type": "string", + "title": "Aspect Ratio", + "default": "auto" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "description": "The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.", + "type": "string", + "title": "Video Write Mode", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "enable_output_safety_checker": { + "examples": [ + false + ], + "description": "If set to true, output video will be checked for safety after generation.", + "type": "boolean", + "title": "Enable Output Safety Checker", + "default": false + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/wan/dragon-warrior.jpg" + ], + "description": "URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.", + "type": "string", + "title": "Image URL" + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "description": "The quality of the output video. Higher quality means better visual quality but larger file size.", + "type": "string", + "title": "Video Quality", + "examples": [ + "high" + ], + "default": "high" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, input data will be checked for safety before processing.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": false + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + }, + "end_image_url": { + "description": "URL of the end image.", + "type": "string", + "title": "End Image URL" + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "description": "Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "seed", + "resolution", + "aspect_ratio", + "enable_safety_checker", + "enable_output_safety_checker", + "enable_prompt_expansion", + "acceleration", + "video_quality", + "video_write_mode", + "end_image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "WanV22A14bImageToVideoTurboOutput": { + "title": "WanTurboI2VResponse", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The white dragon warrior stands still, eyes full of determination and strength. The camera slowly moves closer or circles around the warrior, highlighting the powerful presence and heroic spirit of the character." + ], + "description": "The text prompt used for video generation.", + "type": "string", + "title": "Prompt", + "default": "" + }, + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/gallery/wan-i2v-turbo.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan/v2.2-a14b/image-to-video/turbo/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/image-to-video/turbo/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/image-to-video/turbo": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV22A14bImageToVideoTurboInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/image-to-video/turbo/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV22A14bImageToVideoTurboOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan/v2.2-5b/image-to-video", + "metadata": { + "display_name": "Wan v2.2 5B", + "category": "image-to-video", + "description": "Wan 2.2's 5B model produces up to 5 seconds of video 720p at 24FPS with fluid motion and powerful prompt understanding", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:11.078Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "model_url": "https://fal.run/fal-ai/wan/v2.2-5b/image-to-video", + "license_type": "commercial", + "date": "2025-07-30T06:39:13.161Z", + "group": { + "key": "wan-v22", + "label": "Image to Video (5B)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan/v2.2-5b/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan/v2.2-5b/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan/v2.2-5b/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan/v2.2-5b/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/wan/v2.2-5b/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanV225bImageToVideoInput": { + "title": "WanSmallI2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The white dragon warrior stands still, eyes full of determination and strength. The camera slowly moves closer or circles around the warrior, highlighting the powerful presence and heroic spirit of the character." + ], + "description": "The text prompt to guide video generation.", + "type": "string", + "title": "Prompt" + }, + "shift": { + "description": "Shift value for the video. Must be between 1.0 and 10.0.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Shift", + "examples": [ + 5 + ], + "default": 5 + }, + "num_interpolated_frames": { + "description": "Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4.", + "type": "integer", + "minimum": 0, + "maximum": 4, + "title": "Number of Interpolated Frames", + "examples": [ + 0 + ], + "default": 0 + }, + "frames_per_second": { + "description": "Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is.", + "type": "integer", + "minimum": 4, + "maximum": 60, + "title": "Frames per Second", + "examples": [ + 24 + ], + "default": 24 + }, + "guidance_scale": { + "description": "Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale", + "examples": [ + 3.5 + ], + "default": 3.5 + }, + "num_frames": { + "description": "Number of frames to generate. Must be between 17 to 161 (inclusive).", + "type": "integer", + "minimum": 17, + "maximum": 161, + "title": "Number of Frames", + "examples": [ + 81 + ], + "default": 81 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, input data will be checked for safety before processing.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": false + }, + "negative_prompt": { + "description": "Negative prompt for video generation.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "description": "The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.", + "type": "string", + "title": "Video Write Mode", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "resolution": { + "enum": [ + "580p", + "720p" + ], + "description": "Resolution of the generated video (580p or 720p).", + "type": "string", + "title": "Resolution", + "examples": [ + "720p" + ], + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16", + "1:1" + ], + "description": "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + "type": "string", + "title": "Aspect Ratio", + "default": "auto" + }, + "enable_output_safety_checker": { + "examples": [ + false + ], + "description": "If set to true, output video will be checked for safety after generation.", + "type": "boolean", + "title": "Enable Output Safety Checker", + "default": false + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/wan/dragon-warrior.jpg" + ], + "description": "URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.", + "type": "string", + "title": "Image URL" + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "description": "The quality of the output video. Higher quality means better visual quality but larger file size.", + "type": "string", + "title": "Video Quality", + "examples": [ + "high" + ], + "default": "high" + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "description": "Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "num_inference_steps": { + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "type": "integer", + "minimum": 2, + "maximum": 50, + "title": "Number of Inference Steps", + "examples": [ + 40 + ], + "default": 40 + }, + "interpolator_model": { + "enum": [ + "none", + "film", + "rife" + ], + "description": "The model to use for frame interpolation. If None, no interpolation is applied.", + "type": "string", + "title": "Interpolator Model", + "examples": [ + "film" + ], + "default": "film" + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + }, + "adjust_fps_for_interpolation": { + "examples": [ + true + ], + "description": "If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is.", + "type": "boolean", + "title": "Adjust FPS for Interpolation", + "default": true + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "num_frames", + "frames_per_second", + "negative_prompt", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "enable_safety_checker", + "enable_output_safety_checker", + "enable_prompt_expansion", + "guidance_scale", + "shift", + "interpolator_model", + "num_interpolated_frames", + "adjust_fps_for_interpolation", + "video_quality", + "video_write_mode" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "WanV225bImageToVideoOutput": { + "title": "WanSmallI2VResponse", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The white dragon warrior stands still, eyes full of determination and strength. The camera slowly moves closer or circles around the warrior, highlighting the powerful presence and heroic spirit of the character." + ], + "description": "The text prompt used for video generation.", + "type": "string", + "title": "Prompt", + "default": "" + }, + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/model_tests/wan/v2.2-small-i2v-output.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan/v2.2-5b/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-5b/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-5b/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV225bImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-5b/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV225bImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan/v2.2-a14b/image-to-video", + "metadata": { + "display_name": "Wan v2.2 A14B", + "category": "image-to-video", + "description": "fal-ai/wan/v2.2-A14B/image-to-video", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:11.897Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/penguin/xWl1TIQn-uRe7BsctByJk_3e01d1a4f7ff488c9483be0d81f5a3cc.jpg", + "model_url": "https://fal.run/fal-ai/wan/v2.2-a14B/image-to-video", + "license_type": "commercial", + "date": "2025-07-28T15:58:39.979Z", + "group": { + "key": "wan-v22-large", + "label": "Image to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan/v2.2-a14b/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan/v2.2-a14b/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan/v2.2-a14b/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3.fal.media/files/penguin/xWl1TIQn-uRe7BsctByJk_3e01d1a4f7ff488c9483be0d81f5a3cc.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan/v2.2-a14b/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/wan/v2.2-a14b/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanV22A14bImageToVideoInput": { + "title": "WanI2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The white dragon warrior stands still, eyes full of determination and strength. The camera slowly moves closer or circles around the warrior, highlighting the powerful presence and heroic spirit of the character." + ], + "description": "The text prompt to guide video generation.", + "type": "string", + "title": "Prompt" + }, + "shift": { + "description": "Shift value for the video. Must be between 1.0 and 10.0.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Shift", + "examples": [ + 5 + ], + "default": 5 + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "description": "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + "type": "string", + "title": "Acceleration", + "examples": [ + "regular" + ], + "default": "regular" + }, + "num_interpolated_frames": { + "description": "Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4.", + "type": "integer", + "minimum": 0, + "maximum": 4, + "title": "Number of Interpolated Frames", + "examples": [ + 1 + ], + "default": 1 + }, + "frames_per_second": { + "description": "Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is.", + "type": "integer", + "minimum": 4, + "maximum": 60, + "title": "Frames per Second", + "examples": [ + 16 + ], + "default": 16 + }, + "guidance_scale": { + "description": "Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale (1st Stage)", + "examples": [ + 3.5 + ], + "default": 3.5 + }, + "num_frames": { + "description": "Number of frames to generate. Must be between 17 to 161 (inclusive).", + "type": "integer", + "minimum": 17, + "maximum": 161, + "title": "Number of Frames", + "examples": [ + 81 + ], + "default": 81 + }, + "end_image_url": { + "description": "URL of the end image.", + "type": "string", + "title": "End Image URL" + }, + "negative_prompt": { + "description": "Negative prompt for video generation.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, input data will be checked for safety before processing.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": false + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "description": "The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.", + "type": "string", + "title": "Video Write Mode", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "description": "Resolution of the generated video (480p, 580p, or 720p).", + "type": "string", + "title": "Resolution", + "examples": [ + "720p" + ], + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16", + "1:1" + ], + "description": "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + "type": "string", + "title": "Aspect Ratio", + "default": "auto" + }, + "enable_output_safety_checker": { + "examples": [ + false + ], + "description": "If set to true, output video will be checked for safety after generation.", + "type": "boolean", + "title": "Enable Output Safety Checker", + "default": false + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/wan/dragon-warrior.jpg" + ], + "description": "URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.", + "type": "string", + "title": "Image URL" + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "description": "The quality of the output video. Higher quality means better visual quality but larger file size.", + "type": "string", + "title": "Video Quality", + "examples": [ + "high" + ], + "default": "high" + }, + "guidance_scale_2": { + "description": "Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale (2nd Stage)", + "examples": [ + 3.5 + ], + "default": 3.5 + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "description": "Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + }, + "interpolator_model": { + "enum": [ + "none", + "film", + "rife" + ], + "description": "The model to use for frame interpolation. If None, no interpolation is applied.", + "type": "string", + "title": "Interpolator Model", + "examples": [ + "film" + ], + "default": "film" + }, + "num_inference_steps": { + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "type": "integer", + "minimum": 2, + "maximum": 40, + "title": "Number of Inference Steps", + "examples": [ + 27 + ], + "default": 27 + }, + "adjust_fps_for_interpolation": { + "examples": [ + true + ], + "description": "If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is.", + "type": "boolean", + "title": "Adjust FPS for Interpolation", + "default": true + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "num_frames", + "frames_per_second", + "negative_prompt", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "enable_safety_checker", + "enable_output_safety_checker", + "enable_prompt_expansion", + "acceleration", + "guidance_scale", + "guidance_scale_2", + "shift", + "interpolator_model", + "num_interpolated_frames", + "adjust_fps_for_interpolation", + "video_quality", + "video_write_mode", + "end_image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "WanV22A14bImageToVideoOutput": { + "title": "WanI2VResponse", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The white dragon warrior stands still, eyes full of determination and strength. The camera slowly moves closer or circles around the warrior, highlighting the powerful presence and heroic spirit of the character." + ], + "description": "The text prompt used for video generation.", + "type": "string", + "title": "Prompt", + "default": "" + }, + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/lion/Fbuh3lO_HMT-pS0DATbio_tmp08c3v477.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan/v2.2-a14b/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV22A14bImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV22A14bImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bytedance/omnihuman", + "metadata": { + "display_name": "OmniHuman", + "category": "image-to-video", + "description": "OmniHuman generates video using an image of a human figure paired with an audio file. It produces vivid, high-quality videos where the character’s emotions and movements maintain a strong correlation with the audio.", + "status": "active", + "tags": [ + "image-to-video", + "lipsync" + ], + "updated_at": "2026-01-26T21:43:12.532Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound.jpg", + "model_url": "https://fal.run/fal-ai/bytedance/omnihuman", + "license_type": "commercial", + "date": "2025-07-27T12:18:13.247Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bytedance/omnihuman", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bytedance/omnihuman queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bytedance/omnihuman", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/bytedance/omnihuman", + "documentationUrl": "https://fal.ai/models/fal-ai/bytedance/omnihuman/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BytedanceOmnihumanInput": { + "x-fal-order-properties": [ + "image_url", + "audio_url" + ], + "type": "object", + "properties": { + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/omnihuman_audio.mp3" + ], + "title": "Audio Url", + "type": "string", + "description": "The URL of the audio file to generate the video. Audio must be under 30s long." + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/omnihuman.png" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image used to generate the video" + } + }, + "title": "OmniHumanInput", + "required": [ + "image_url", + "audio_url" + ] + }, + "BytedanceOmnihumanOutput": { + "x-fal-order-properties": [ + "video", + "duration" + ], + "type": "object", + "properties": { + "duration": { + "title": "Duration", + "type": "number", + "description": "Duration of audio input/video output as used for billing." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/omnihuman_output.mp4" + } + ], + "title": "Video", + "description": "Generated video file", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "OmniHumanOutput", + "required": [ + "video", + "duration" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bytedance/omnihuman/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/omnihuman/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bytedance/omnihuman": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceOmnihumanInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/omnihuman/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceOmnihumanOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltxv-13b-098-distilled/image-to-video", + "metadata": { + "display_name": "LTX-Video 13B 0.9.8 Distilled", + "category": "image-to-video", + "description": "Generate long videos from prompts and images using LTX Video-0.9.8 13B Distilled and custom LoRA", + "status": "active", + "tags": [ + "video", + "ltx-video", + "image-to-video" + ], + "updated_at": "2026-01-26T21:43:15.086Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training-5.jpg", + "model_url": "https://fal.run/fal-ai/ltxv-13b-098-distilled/image-to-video", + "license_type": "commercial", + "date": "2025-07-17T03:02:23.813Z", + "group": { + "key": "ltx-video-13b-098", + "label": "Image to Video" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltxv-13b-098-distilled/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltxv-13b-098-distilled/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltxv-13b-098-distilled/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltxv-13b-098-distilled/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/ltxv-13b-098-distilled/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltxv13b098DistilledImageToVideoInput": { + "description": "Distilled model input", + "type": "object", + "properties": { + "second_pass_skip_initial_steps": { + "description": "The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.", + "type": "integer", + "minimum": 1, + "maximum": 11, + "title": "Second Pass Skip Initial Steps", + "examples": [ + 5 + ], + "default": 5 + }, + "first_pass_num_inference_steps": { + "description": "Number of inference steps during the first pass.", + "type": "integer", + "minimum": 2, + "maximum": 12, + "title": "Number of Inference Steps", + "examples": [ + 8 + ], + "default": 8 + }, + "frame_rate": { + "description": "The frame rate of the video.", + "type": "integer", + "minimum": 1, + "maximum": 60, + "title": "Frame Rate", + "examples": [ + 24 + ], + "default": 24 + }, + "reverse_video": { + "examples": [ + false + ], + "title": "Reverse Video", + "type": "boolean", + "description": "Whether to reverse the video.", + "default": false + }, + "prompt": { + "examples": [ + "The astronaut gets up and walks away" + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt to guide generation" + }, + "expand_prompt": { + "examples": [ + false + ], + "title": "Expand Prompt", + "type": "boolean", + "description": "Whether to expand the prompt using a language model.", + "default": false + }, + "temporal_adain_factor": { + "description": "The factor for adaptive instance normalization (AdaIN) applied to generated video chunks after the first. This can help deal with a gradual increase in saturation/contrast in the generated video by normalizing the color distribution across the video. A high value will ensure the color distribution is more consistent across the video, while a low value will allow for more variation in color distribution.", + "type": "number", + "examples": [ + 0.5 + ], + "maximum": 1, + "title": "Temporal AdaIN Factor", + "minimum": 0, + "multipleOf": 0.05, + "default": 0.5 + }, + "loras": { + "description": "LoRA weights to use for generation", + "type": "array", + "title": "Loras", + "items": { + "$ref": "#/components/schemas/LoRAWeight" + }, + "default": [] + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "num_frames": { + "description": "The number of frames in the video.", + "type": "integer", + "minimum": 9, + "maximum": 1441, + "title": "Number of Frames", + "examples": [ + 121 + ], + "default": 121 + }, + "second_pass_num_inference_steps": { + "description": "Number of inference steps during the second pass.", + "type": "integer", + "minimum": 2, + "maximum": 12, + "title": "Second Pass Number of Inference Steps", + "examples": [ + 8 + ], + "default": 8 + }, + "negative_prompt": { + "description": "Negative prompt for generation", + "type": "string", + "title": "Negative Prompt", + "default": "worst quality, inconsistent motion, blurry, jittery, distorted" + }, + "enable_detail_pass": { + "examples": [ + false + ], + "title": "Enable Detail Pass", + "type": "boolean", + "description": "Whether to use a detail pass. If True, the model will perform a second pass to refine the video and enhance details. This incurs a 2.0x cost multiplier on the base price.", + "default": false + }, + "resolution": { + "examples": [ + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video.", + "enum": [ + "480p", + "720p" + ], + "default": "720p" + }, + "aspect_ratio": { + "examples": [ + "auto" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the video.", + "enum": [ + "9:16", + "1:1", + "16:9", + "auto" + ], + "default": "auto" + }, + "tone_map_compression_ratio": { + "description": "The compression ratio for tone mapping. This is used to compress the dynamic range of the video to improve visual quality. A value of 0.0 means no compression, while a value of 1.0 means maximum compression.", + "type": "number", + "examples": [ + 0 + ], + "maximum": 1, + "title": "Tone Map Compression Ratio", + "minimum": 0, + "multipleOf": 0.05, + "default": 0 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ltxv-image-input.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "Image URL for Image-to-Video task" + }, + "constant_rate_factor": { + "description": "The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality.", + "type": "integer", + "minimum": 0, + "maximum": 51, + "title": "Constant Rate Factor", + "examples": [ + 29 + ], + "default": 29 + }, + "seed": { + "description": "Random seed for generation", + "type": "integer", + "title": "Seed" + } + }, + "title": "DistilledImageToVideoInput", + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "loras", + "resolution", + "aspect_ratio", + "seed", + "num_frames", + "first_pass_num_inference_steps", + "second_pass_num_inference_steps", + "second_pass_skip_initial_steps", + "frame_rate", + "expand_prompt", + "reverse_video", + "enable_safety_checker", + "enable_detail_pass", + "temporal_adain_factor", + "tone_map_compression_ratio", + "constant_rate_factor", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "Ltxv13b098DistilledImageToVideoOutput": { + "title": "ImageToVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The astronaut gets up and walks away" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/ltxv-image-to-video-output.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed" + ], + "required": [ + "video", + "prompt", + "seed" + ] + }, + "LoRAWeight": { + "title": "LoRAWeight", + "type": "object", + "properties": { + "path": { + "description": "URL or path to the LoRA weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "description": "Scale of the LoRA weight. This is a multiplier applied to the LoRA weight when loading it.", + "title": "Scale", + "default": 1 + }, + "weight_name": { + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights.", + "type": "string", + "title": "Weight Name" + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "required": [ + "path" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltxv-13b-098-distilled/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltxv-13b-098-distilled/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltxv-13b-098-distilled/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltxv13b098DistilledImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltxv-13b-098-distilled/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltxv13b098DistilledImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/veo3/fast/image-to-video", + "metadata": { + "display_name": "Veo 3 Fast [Image to Video]", + "category": "image-to-video", + "description": "Now with a 50% price drop. Generate videos from your image prompts using Veo 3 fast.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:16.961Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-1.jpg", + "model_url": "https://fal.run/fal-ai/veo3/fast/image-to-video", + "license_type": "commercial", + "date": "2025-07-09T16:46:52.274Z", + "group": { + "key": "veo3", + "label": "Image to Video [fast]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/veo3/fast/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/veo3/fast/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/veo3/fast/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/veo3/fast/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/veo3/fast/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Veo3FastImageToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "duration", + "resolution", + "generate_audio", + "auto_fix", + "image_url" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman looks into the camera, breathes in, then exclaims energetically, \"have you guys checked out Veo3 Image-to-Video on Fal? It's incredible!\"" + ], + "maxLength": 20000, + "type": "string", + "title": "Prompt", + "description": "The text prompt describing how the image should be animated" + }, + "resolution": { + "enum": [ + "720p", + "1080p" + ], + "description": "The resolution of the generated video.", + "type": "string", + "title": "Resolution", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16" + ], + "description": "The aspect ratio of the generated video.", + "type": "string", + "title": "Aspect Ratio", + "default": "auto" + }, + "generate_audio": { + "description": "Whether to generate audio for the video.", + "type": "boolean", + "title": "Generate Audio", + "default": true + }, + "auto_fix": { + "description": "Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.", + "type": "boolean", + "title": "Auto Fix", + "default": false + }, + "duration": { + "enum": [ + "4s", + "6s", + "8s" + ], + "description": "The duration of the generated video.", + "type": "string", + "title": "Duration", + "default": "8s" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/veo3-i2v-input.png" + ], + "description": "URL of the input image to animate. Should be 720p or higher resolution in 16:9 or 9:16 aspect ratio. If the image is not in 16:9 or 9:16 aspect ratio, it will be cropped to fit.", + "type": "string", + "title": "Image URL" + } + }, + "title": "Veo3ImageToVideoInput", + "required": [ + "prompt", + "image_url" + ] + }, + "Veo3FastImageToVideoOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/veo3-i2v-output.mp4" + } + ], + "description": "The generated video.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "Veo3ImageToVideoOutput", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/veo3/fast/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3/fast/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/veo3/fast/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo3FastImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3/fast/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo3FastImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/vidu/q1/reference-to-video", + "metadata": { + "display_name": "Vidu", + "category": "image-to-video", + "description": "Generate video clips from your multiple image references using Vidu Q1", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:17.333Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-1.jpeg", + "model_url": "https://fal.run/fal-ai/vidu/q1/reference-to-video", + "license_type": "commercial", + "date": "2025-07-08T11:41:46.993Z", + "group": { + "key": "vidu-q1", + "label": "Reference to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/vidu/q1/reference-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/vidu/q1/reference-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/vidu/q1/reference-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-1.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/vidu/q1/reference-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/vidu/q1/reference-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ViduQ1ReferenceToVideoInput": { + "title": "Q1ReferenceToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A young woman and a monkey inside a colorful house" + ], + "title": "Prompt", + "type": "string", + "maxLength": 1500, + "description": "Text prompt for video generation, max 1500 characters" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the output video", + "default": "16:9" + }, + "bgm": { + "title": "Bgm", + "type": "boolean", + "description": "Whether to add background music to the generated video", + "default": false + }, + "reference_image_urls": { + "examples": [ + [ + "https://v3.fal.media/files/panda/HDpZj0eLjWwCpjA5__0l1_0e6cd0b9eb7a4a968c0019a4eee15e46.png", + "https://v3.fal.media/files/zebra/153izt1cBlMU-TwD0_B7Q_ea34618f5d974653a16a755aa61e488a.png", + "https://v3.fal.media/files/koala/RCSZ7VEEKGFDfMoGHCwzo_f626718793e94769b1ad36d5891864a4.png" + ] + ], + "title": "Reference Image Urls", + "type": "array", + "description": "URLs of the reference images to use for consistent subject appearance. Q1 model supports up to 7 reference images.", + "items": { + "type": "string" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation" + }, + "movement_amplitude": { + "enum": [ + "auto", + "small", + "medium", + "large" + ], + "title": "Movement Amplitude", + "type": "string", + "description": "The movement amplitude of objects in the frame", + "default": "auto" + } + }, + "x-fal-order-properties": [ + "prompt", + "reference_image_urls", + "seed", + "aspect_ratio", + "movement_amplitude", + "bgm" + ], + "required": [ + "prompt", + "reference_image_urls" + ] + }, + "ViduQ1ReferenceToVideoOutput": { + "title": "Q1ReferenceToVideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://fal.media/files/panda/4wmqVpGFsqzZrKROz9c1Z_output.mp4" + } + ], + "title": "Video", + "description": "The generated video with consistent subjects from reference images using the Q1 model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/vidu/q1/reference-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/q1/reference-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/vidu/q1/reference-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduQ1ReferenceToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/q1/reference-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduQ1ReferenceToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ai-avatar/single-text", + "metadata": { + "display_name": "Ai Avatar", + "category": "image-to-video", + "description": "MultiTalk model generates a talking avatar video from an image and text. Converts text to speech automatically, then generates the avatar speaking with lip-sync.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:24.381Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/penguin/ETFEnZEbEj9nc6e1XdFG8_6f87551d505640c89d59f8018dd0ffb0.jpg", + "model_url": "https://fal.run/fal-ai/ai-avatar/single-text", + "license_type": "commercial", + "date": "2025-06-23T16:36:23.549Z", + "group": { + "key": "Ai-Avatar", + "label": "Single TTS" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ai-avatar/single-text", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ai-avatar/single-text queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ai-avatar/single-text", + "category": "image-to-video", + "thumbnailUrl": "https://fal.media/files/penguin/ETFEnZEbEj9nc6e1XdFG8_6f87551d505640c89d59f8018dd0ffb0.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ai-avatar/single-text", + "documentationUrl": "https://fal.ai/models/fal-ai/ai-avatar/single-text/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AiAvatarSingleTextInput": { + "title": "AvatarSingleTextRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "An elderly man with a white beard and headphones records audio with a microphone. He appears engaged and expressive, suggesting a podcast or voiceover." + ], + "description": "The text prompt to guide video generation.", + "type": "string", + "title": "Prompt" + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "description": "Resolution of the video to generate. Must be either 480p or 720p.", + "type": "string", + "title": "Resolution", + "default": "480p" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The acceleration level to use for generation.", + "type": "string", + "title": "Acceleration", + "default": "regular" + }, + "text_input": { + "examples": [ + "Spend more time with people who make you feel alive, and less with things that drain your soul." + ], + "description": "The text input to guide video generation.", + "type": "string", + "title": "Text Input" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/panda/HuM21CXMf0q7OO2zbvwhV_c4533aada79a495b90e50e32dc9b83a8.png" + ], + "description": "URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.", + "type": "string", + "title": "Image URL" + }, + "voice": { + "examples": [ + "Bill" + ], + "description": "The voice to use for speech generation", + "type": "string", + "enum": [ + "Aria", + "Roger", + "Sarah", + "Laura", + "Charlie", + "George", + "Callum", + "River", + "Liam", + "Charlotte", + "Alice", + "Matilda", + "Will", + "Jessica", + "Eric", + "Chris", + "Brian", + "Daniel", + "Lily", + "Bill" + ], + "title": "Voice" + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed", + "default": 42 + }, + "num_frames": { + "minimum": 41, + "maximum": 241, + "type": "integer", + "description": "Number of frames to generate. Must be between 81 to 129 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units.", + "title": "Number of Frames", + "default": 136 + } + }, + "x-fal-order-properties": [ + "image_url", + "text_input", + "voice", + "prompt", + "num_frames", + "resolution", + "seed", + "acceleration" + ], + "required": [ + "image_url", + "text_input", + "voice", + "prompt" + ] + }, + "AiAvatarSingleTextOutput": { + "title": "AvatarSingleTextResponse", + "type": "object", + "properties": { + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "file_size": 797478, + "file_name": "6c9dd31e1d9a4482877747a52a661a0a.mp4", + "content_type": "application/octet-stream", + "url": "https://v3.fal.media/files/elephant/-huMN0zTaXmBr2CqzCMps_6c9dd31e1d9a4482877747a52a661a0a.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ai-avatar/single-text/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ai-avatar/single-text/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ai-avatar/single-text": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiAvatarSingleTextInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ai-avatar/single-text/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiAvatarSingleTextOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ai-avatar", + "metadata": { + "display_name": "Ai Avatar", + "category": "image-to-video", + "description": "MultiTalk model generates a talking avatar video from an image and audio file. The avatar lip-syncs to the provided audio with natural facial expressions.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:24.508Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound.jpg", + "model_url": "https://fal.run/fal-ai/ai-avatar", + "license_type": "commercial", + "date": "2025-06-23T16:35:38.097Z", + "group": { + "key": "Ai-Avatar", + "label": "Single Audio" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ai-avatar", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ai-avatar queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ai-avatar", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ai-avatar", + "documentationUrl": "https://fal.ai/models/fal-ai/ai-avatar/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AiAvatarInput": { + "title": "AvatarSingleAudioRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman with colorful hair talking on a podcast." + ], + "description": "The text prompt to guide video generation.", + "type": "string", + "title": "Prompt" + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "description": "Resolution of the video to generate. Must be either 480p or 720p.", + "type": "string", + "title": "Resolution", + "default": "480p" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The acceleration level to use for generation.", + "type": "string", + "title": "Acceleration", + "default": "regular" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/koala/gmpc0QevDF9bBsL1EAYVF_1c637094161147559f0910a68275dc34.png" + ], + "description": "URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.", + "type": "string", + "title": "Image URL" + }, + "audio_url": { + "examples": [ + "https://v3.fal.media/files/penguin/PtiCYda53E9Dav25QmQYI_output.mp3" + ], + "description": "The URL of the audio file.", + "type": "string", + "title": "Audio URL" + }, + "num_frames": { + "minimum": 41, + "maximum": 241, + "type": "integer", + "description": "Number of frames to generate. Must be between 81 to 129 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units.", + "title": "Number of Frames", + "default": 145 + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed", + "default": 42 + } + }, + "x-fal-order-properties": [ + "image_url", + "audio_url", + "prompt", + "num_frames", + "resolution", + "seed", + "acceleration" + ], + "required": [ + "image_url", + "audio_url", + "prompt" + ] + }, + "AiAvatarOutput": { + "title": "AvatarSingleAudioResponse", + "type": "object", + "properties": { + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "file_size": 515275, + "file_name": "74af6c0bdd6041c3b1130d54885e3eee.mp4", + "content_type": "application/octet-stream", + "url": "https://v3.fal.media/files/kangaroo/z6VqUwNTwzuWa6YE1g7In_74af6c0bdd6041c3b1130d54885e3eee.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ai-avatar/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ai-avatar/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ai-avatar": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiAvatarInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ai-avatar/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiAvatarOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ai-avatar/multi-text", + "metadata": { + "display_name": "Ai Avatar", + "category": "image-to-video", + "description": "MultiTalk model generates a multi-person conversation video from an image and text inputs. Converts text to speech for each person, generating a realistic conversation scene.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:24.636Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound.jpg", + "model_url": "https://fal.run/fal-ai/ai-avatar/multi-text", + "license_type": "commercial", + "date": "2025-06-23T16:34:48.424Z", + "group": { + "key": "Ai-Avatar", + "label": "Multi TTS" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ai-avatar/multi-text", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ai-avatar/multi-text queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ai-avatar/multi-text", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ai-avatar/multi-text", + "documentationUrl": "https://fal.ai/models/fal-ai/ai-avatar/multi-text/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AiAvatarMultiTextInput": { + "title": "AvatarMultiTextRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Two kids talking on a lunch." + ], + "description": "The text prompt to guide video generation.", + "type": "string", + "title": "Prompt" + }, + "second_text_input": { + "examples": [ + "I dont know I am eating this because our mother gave it to us. I think it is something called milky pie." + ], + "description": "The text input to guide video generation.", + "type": "string", + "title": "Second Text Input" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The acceleration level to use for generation.", + "type": "string", + "title": "Acceleration", + "default": "regular" + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "description": "Resolution of the video to generate. Must be either 480p or 720p.", + "type": "string", + "title": "Resolution", + "default": "480p" + }, + "first_text_input": { + "examples": [ + "Do you know what are we eating?" + ], + "description": "The text input to guide video generation.", + "type": "string", + "title": "First Text Input" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/koala/vhkIF86hmgNTBll_lF1xI_3c7476642b19435aa763fe3b49cf99c7.png" + ], + "description": "URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.", + "type": "string", + "title": "Image URL" + }, + "voice2": { + "enum": [ + "Aria", + "Roger", + "Sarah", + "Laura", + "Charlie", + "George", + "Callum", + "River", + "Liam", + "Charlotte", + "Alice", + "Matilda", + "Will", + "Jessica", + "Eric", + "Chris", + "Brian", + "Daniel", + "Lily", + "Bill" + ], + "description": "The second person's voice to use for speech generation", + "type": "string", + "title": "Voice2", + "default": "Roger" + }, + "voice1": { + "enum": [ + "Aria", + "Roger", + "Sarah", + "Laura", + "Charlie", + "George", + "Callum", + "River", + "Liam", + "Charlotte", + "Alice", + "Matilda", + "Will", + "Jessica", + "Eric", + "Chris", + "Brian", + "Daniel", + "Lily", + "Bill" + ], + "description": "The first person's voice to use for speech generation", + "type": "string", + "title": "Voice1", + "default": "Sarah" + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed", + "default": 81 + }, + "num_frames": { + "minimum": 41, + "maximum": 241, + "type": "integer", + "description": "Number of frames to generate. Must be between 81 to 129 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units.", + "title": "Number of Frames", + "default": 191 + } + }, + "x-fal-order-properties": [ + "image_url", + "first_text_input", + "second_text_input", + "voice1", + "voice2", + "prompt", + "num_frames", + "resolution", + "seed", + "acceleration" + ], + "required": [ + "image_url", + "first_text_input", + "second_text_input", + "prompt" + ] + }, + "AiAvatarMultiTextOutput": { + "title": "AvatarMultiTextResponse", + "type": "object", + "properties": { + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "file_size": 352679, + "file_name": "30b76b90c2164f9a926527497c20832b.mp4", + "content_type": "application/octet-stream", + "url": "https://v3.fal.media/files/zebra/lKMkUvzCqKn-gHC0vyUPP_30b76b90c2164f9a926527497c20832b.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ai-avatar/multi-text/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ai-avatar/multi-text/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ai-avatar/multi-text": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiAvatarMultiTextInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ai-avatar/multi-text/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiAvatarMultiTextOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ai-avatar/multi", + "metadata": { + "display_name": "Ai Avatar", + "category": "image-to-video", + "description": "MultiTalk model generates a multi-person conversation video from an image and audio files. Creates a realistic scene where multiple people speak in sequence.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:24.763Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound.jpg", + "model_url": "https://fal.run/fal-ai/ai-avatar/multi", + "license_type": "commercial", + "date": "2025-06-23T16:34:01.663Z", + "group": { + "key": "Ai-Avatar", + "label": "Multi Audio" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ai-avatar/multi", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ai-avatar/multi queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ai-avatar/multi", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ai-avatar/multi", + "documentationUrl": "https://fal.ai/models/fal-ai/ai-avatar/multi/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AiAvatarMultiInput": { + "title": "AvatarMultiAudioPersonRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A smiling man and woman wearing headphones sit in front of microphones, appearing to host a podcast. They are engaged in conversation, looking at each other and the camera as they speak. The scene captures a lively and collaborative podcasting session." + ], + "description": "The text prompt to guide video generation.", + "type": "string", + "title": "Prompt" + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "description": "Resolution of the video to generate. Must be either 480p or 720p.", + "type": "string", + "title": "Resolution", + "default": "480p" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The acceleration level to use for generation.", + "type": "string", + "title": "Acceleration", + "default": "regular" + }, + "first_audio_url": { + "examples": [ + "https://v3.fal.media/files/monkey/1XKPx3Xu-IhNLbuinVSwP_output.mp3" + ], + "description": "The URL of the Person 1 audio file.", + "type": "string", + "title": "First Audio URL" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/elephant/Q2ZU6q-d-1boGXhpDgWs9_15a22f816fd34cad969b2329946267b3.png" + ], + "description": "URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.", + "type": "string", + "title": "Image URL" + }, + "second_audio_url": { + "examples": [ + "https://v3.fal.media/files/zebra/oVKyL8JZ1K2GreeIMxVzm_output.mp3" + ], + "description": "The URL of the Person 2 audio file.", + "type": "string", + "title": "Second Audio URL" + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed", + "default": 81 + }, + "use_only_first_audio": { + "description": "Whether to use only the first audio file.", + "type": "boolean", + "title": "Use Only First Audio", + "default": false + }, + "num_frames": { + "minimum": 41, + "maximum": 241, + "type": "integer", + "description": "Number of frames to generate. Must be between 81 to 129 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units.", + "title": "Number of Frames", + "default": 181 + } + }, + "x-fal-order-properties": [ + "image_url", + "first_audio_url", + "second_audio_url", + "prompt", + "num_frames", + "resolution", + "seed", + "use_only_first_audio", + "acceleration" + ], + "required": [ + "image_url", + "first_audio_url", + "prompt" + ] + }, + "AiAvatarMultiOutput": { + "title": "AvatarMultiAudioResponse", + "type": "object", + "properties": { + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "file_size": 704757, + "file_name": "ab27ac57e9464dbea1ef78f7a25469d2.mp4", + "content_type": "application/octet-stream", + "url": "https://v3.fal.media/files/kangaroo/uAF7N-Ow8WwuvbFw8J4Br_ab27ac57e9464dbea1ef78f7a25469d2.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ai-avatar/multi/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ai-avatar/multi/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ai-avatar/multi": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiAvatarMultiInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ai-avatar/multi/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiAvatarMultiOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/hailuo-02/pro/image-to-video", + "metadata": { + "display_name": "MiniMax Hailuo 02 [Pro] (Image to Video)", + "category": "image-to-video", + "description": "MiniMax Hailuo-02 Image To Video API (Pro, 1080p): Advanced image-to-video generation model with 1080p resolution", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:26.177Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-1.jpeg", + "model_url": "https://fal.run/fal-ai/minimax/hailuo-02/pro/image-to-video", + "license_type": "commercial", + "date": "2025-06-18T00:46:50.998Z", + "group": { + "key": "hailuo-02", + "label": "Image to Video (pro) " + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 2, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/hailuo-02/pro/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/hailuo-02/pro/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/hailuo-02/pro/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-1.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/hailuo-02/pro/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/hailuo-02/pro/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxHailuo02ProImageToVideoInput": { + "title": "ProImageToVideoHailuo02Input", + "type": "object", + "properties": { + "prompt_optimizer": { + "description": "Whether to use the model's prompt optimizer", + "type": "boolean", + "title": "Prompt Optimizer", + "default": true + }, + "prompt": { + "examples": [ + "Man walked into winter cave with polar bear" + ], + "maxLength": 2000, + "type": "string", + "title": "Prompt" + }, + "end_image_url": { + "description": "Optional URL of the image to use as the last frame of the video", + "type": "string", + "title": "End Image Url" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/minimax/1749891352437225630-389852416840474630_1749891352.png" + ], + "title": "Image Url", + "type": "string" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "prompt_optimizer", + "end_image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "MinimaxHailuo02ProImageToVideoOutput": { + "title": "ImageToVideoHailuo02Output", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/monkey/xF9OsLwGjjNURyAxD8RM1_output.mp4" + } + ], + "description": "The generated video", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/hailuo-02/pro/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-02/pro/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-02/pro/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxHailuo02ProImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-02/pro/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxHailuo02ProImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bytedance/seedance/v1/lite/image-to-video", + "metadata": { + "display_name": "Seedance 1.0 Lite", + "category": "image-to-video", + "description": "Seedance 1.0 Lite", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:28.035Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-1.jpg", + "model_url": "https://fal.run/fal-ai/bytedance/seedance/v1/lite/image-to-video", + "license_type": "commercial", + "date": "2025-06-13T04:31:01.040Z", + "group": { + "key": "seedance-v1", + "label": "Seedance 1.0 Lite -- Image to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bytedance/seedance/v1/lite/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bytedance/seedance/v1/lite/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bytedance/seedance/v1/lite/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/bytedance/seedance/v1/lite/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/bytedance/seedance/v1/lite/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BytedanceSeedanceV1LiteImageToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "resolution", + "duration", + "camera_fixed", + "seed", + "enable_safety_checker", + "image_url", + "end_image_url" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A little dog is running in the sunshine. The camera follows the dog as it plays in a garden." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt used to generate the video" + }, + "resolution": { + "enum": [ + "480p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "Video resolution - 480p for faster generation, 720p for higher quality", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "1:1", + "3:4", + "9:16", + "auto" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "auto" + }, + "duration": { + "enum": [ + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10", + "11", + "12" + ], + "title": "Duration", + "type": "string", + "description": "Duration of the video in seconds", + "default": "5" + }, + "image_url": { + "examples": [ + "https://fal.media/files/koala/f_xmiodPjhiKjdBkFmTu1.png" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image used to generate video" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "camera_fixed": { + "title": "Camera Fixed", + "type": "boolean", + "description": "Whether to fix the camera position", + "default": false + }, + "end_image_url": { + "title": "End Image Url", + "type": "string", + "description": "The URL of the image the video ends with. Defaults to None." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed to control video generation. Use -1 for random." + } + }, + "title": "SeedanceImageToVideoInput", + "required": [ + "prompt", + "image_url" + ] + }, + "BytedanceSeedanceV1LiteImageToVideoOutput": { + "x-fal-order-properties": [ + "video", + "seed" + ], + "type": "object", + "properties": { + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for generation" + }, + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/penguin/qmLZSvOIzTKs6bDFXiEtH_video.mp4" + } + ], + "title": "Video", + "description": "Generated video file", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "SeedanceVideoOutput", + "required": [ + "video", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bytedance/seedance/v1/lite/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1/lite/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1/lite/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedanceV1LiteImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1/lite/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedanceV1LiteImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan-avatar", + "metadata": { + "display_name": "Hunyuan Avatar", + "category": "image-to-video", + "description": "HunyuanAvatar is a High-Fidelity Audio-Driven Human Animation model for Multiple Characters .", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:35.988Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-3.jpg", + "model_url": "https://fal.run/fal-ai/hunyuan-avatar", + "license_type": "commercial", + "date": "2025-05-29T18:26:58.100Z", + "highlighted": false, + "kind": "inference", + "duration_estimate": 8, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan-avatar", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan-avatar queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan-avatar", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan-avatar", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan-avatar/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "HunyuanAvatarInput": { + "x-fal-order-properties": [ + "audio_url", + "image_url", + "text", + "num_frames", + "num_inference_steps", + "turbo_mode", + "seed" + ], + "type": "object", + "properties": { + "text": { + "title": "Text", + "type": "string", + "description": "Text prompt describing the scene.", + "default": "A cat is singing." + }, + "image_url": { + "examples": [ + "https://fal.media/files/tiger/Y8EgvVqxORBCqWC1OlX3D_3c4c8bbe7f3941a2aea93e278ba14803.jpg", + "https://v3.fal.media/files/zebra/HWILyw2UYI50Sp_4mDxqr_src2.png" + ], + "description": "The URL of the reference image.", + "type": "string", + "title": "Image Url" + }, + "turbo_mode": { + "title": "Turbo Mode", + "type": "boolean", + "description": "If true, the video will be generated faster with no noticeable degradation in the visual quality.", + "default": true + }, + "audio_url": { + "examples": [ + "https://v3.fal.media/files/koala/80RpP2FOhXZUV3NRKUWZu_2.WAV" + ], + "description": "The URL of the audio file.", + "type": "string", + "title": "Audio Url" + }, + "seed": { + "description": "Random seed for generation.", + "type": "integer", + "title": "Seed" + }, + "num_inference_steps": { + "minimum": 30, + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 30 + }, + "num_frames": { + "minimum": 129, + "maximum": 401, + "type": "integer", + "title": "Num Frames", + "description": "Number of video frames to generate at 25 FPS. If greater than the input audio length, it will capped to the length of the input audio.", + "default": 129 + } + }, + "title": "Input", + "required": [ + "audio_url", + "image_url" + ] + }, + "HunyuanAvatarOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 1646349, + "file_name": "output_with_audio.mp4", + "content_type": "video/mp4", + "url": "https://v3.fal.media/files/monkey/3ODbdqHHQL3SvgRXEJXQ-_hunava_8333d613-d4e3-42ff-be36-1e97775621ba_audio.mp4" + } + ], + "title": "Video", + "description": "The generated video with the avatar animation.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "Output", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan-avatar/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-avatar/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-avatar": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanAvatarInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-avatar/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanAvatarOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v2.1/pro/image-to-video", + "metadata": { + "display_name": "Kling 2.1 (pro)", + "category": "image-to-video", + "description": "Kling 2.1 Pro is an advanced endpoint for the Kling 2.1 model, offering professional-grade videos with enhanced visual fidelity, precise camera movements, and dynamic motion control, perfect for cinematic storytelling.\n\n", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:36.895Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-1.jpeg", + "model_url": "https://fal.run/fal-ai/kling-video/v2.1/pro/image-to-video", + "license_type": "commercial", + "date": "2025-05-28T21:01:03.267Z", + "group": { + "key": "kling-video-v21", + "label": "2.1 Pro (Image to Video)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 5, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v2.1/pro/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v2.1/pro/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v2.1/pro/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-1.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v2.1/pro/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v2.1/pro/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV21ProImageToVideoInput": { + "title": "ImageToVideoV21ProRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Warm, incandescent streetlights paint the rain-slicked cobblestones in pools of amber light as a couple walks hand-in-hand, their silhouettes stark against the blurry backdrop of a city shrouded in a gentle downpour; the camera lingers on the subtle textures of their rain-soaked coats and the glistening reflections dancing on the wet pavement, creating a sense of intimate vulnerability and shared quietude." + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500 + }, + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/lion/_I_io6Gtk83c72d-afXf8_image.webp" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to be used for the video" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "maxLength": 2500, + "default": "blur, distort, and low quality" + }, + "tail_image_url": { + "title": "Tail Image Url", + "type": "string", + "description": "URL of the image to be used for the end of the video" + }, + "cfg_scale": { + "minimum": 0, + "title": "Cfg Scale", + "type": "number", + "maximum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ", + "default": 0.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "duration", + "negative_prompt", + "cfg_scale", + "tail_image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "KlingVideoV21ProImageToVideoOutput": { + "title": "ImageToVideoV21ProOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/rabbit/Y5I8-7u3e7ogVSvPin1TS_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v2.1/pro/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.1/pro/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.1/pro/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV21ProImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.1/pro/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV21ProImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan-portrait", + "metadata": { + "display_name": "Hunyuan Portrait", + "category": "image-to-video", + "description": "HunyuanPortrait is a diffusion-based framework for generating lifelike, temporally consistent portrait animations.", + "status": "active", + "tags": [ + "animation", + "lip sync" + ], + "updated_at": "2026-01-26T21:43:38.537Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-4.jpeg", + "model_url": "https://fal.run/fal-ai/hunyuan-portrait", + "license_type": "commercial", + "date": "2025-05-27T13:49:00.211Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan-portrait", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan-portrait queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan-portrait", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-4.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan-portrait", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan-portrait/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "HunyuanPortraitInput": { + "title": "Input", + "type": "object", + "properties": { + "video_url": { + "examples": [ + "https://v3.fal.media/files/panda/2GQH1q-bJOamqCGWMtKvS_what_if.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "The URL of the driving video." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation. If None, a random seed will be used." + }, + "use_arcface": { + "title": "Use Arcface", + "type": "boolean", + "description": "Whether to use ArcFace for face recognition.", + "default": true + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/elephant/GG7iU-4GmzkX3_gIXutRV_image.png" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the source image." + } + }, + "x-fal-order-properties": [ + "video_url", + "image_url", + "seed", + "use_arcface" + ], + "required": [ + "video_url", + "image_url" + ] + }, + "HunyuanPortraitOutput": { + "title": "Output", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 5485412, + "file_name": "output_with_audio.mp4", + "content_type": "video/mp4", + "url": "https://v3.fal.media/files/tiger/9H3vkuqNoYcKr6OBGj3Mr_3b01c7a4-802c-4697-b8fb-ad86bd0eba41_output_with_audio.mp4" + } + ], + "title": "Video", + "description": "The generated video with the portrait animation.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan-portrait/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-portrait/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-portrait": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanPortraitInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-portrait/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanPortraitOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v1.6/standard/elements", + "metadata": { + "display_name": "Kling 1.6 Elements", + "category": "image-to-video", + "description": "Generate video clips from your multiple image references using Kling 1.6 (standard)", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:40.801Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/kling-1-6-image-to-video.webp", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/kling-1-6-image-to-video-animated.webp", + "model_url": "https://fal.run/fal-ai/kling-video/v1.6/standard/elements", + "license_type": "commercial", + "date": "2025-05-20T14:33:48.560Z", + "group": { + "key": "kling-video-v1-6", + "label": "Elements v1.6 (std)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 6, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v1.6/standard/elements", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v1.6/standard/elements queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v1.6/standard/elements", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/kling-1-6-image-to-video.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v1.6/standard/elements", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v1.6/standard/elements/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV16StandardElementsInput": { + "title": "MultiImageToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cute girl and a baby cow sleeping together on a bed" + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500 + }, + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video frame", + "default": "16:9" + }, + "input_image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/web-examples/kling-elements/first_image.jpeg", + "https://storage.googleapis.com/falserverless/web-examples/kling-elements/second_image.png" + ] + ], + "title": "Input Image Urls", + "type": "array", + "description": "List of image URLs to use for video generation. Supports up to 4 images.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "maxLength": 2500, + "default": "blur, distort, and low quality" + } + }, + "x-fal-order-properties": [ + "prompt", + "input_image_urls", + "duration", + "aspect_ratio", + "negative_prompt" + ], + "required": [ + "prompt", + "input_image_urls" + ] + }, + "KlingVideoV16StandardElementsOutput": { + "title": "ElementsOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 3910577, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://v3.fal.media/files/penguin/twy6u1yv09NvqsX0mMFM2_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v1.6/standard/elements/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/standard/elements/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/standard/elements": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV16StandardElementsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/standard/elements/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV16StandardElementsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v1.6/pro/elements", + "metadata": { + "display_name": "Kling 1.6 Elements", + "category": "image-to-video", + "description": "Generate video clips from your multiple image references using Kling 1.6 (pro)", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:40.925Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/kling-1-6-image-to-video.webp", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/kling-1-6-image-to-video-animated.webp", + "model_url": "https://fal.run/fal-ai/kling-video/v1.6/pro/elements", + "license_type": "commercial", + "date": "2025-05-20T14:31:12.908Z", + "group": { + "key": "kling-video-v1-6", + "label": "Elements v1.6 (pro)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 6, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v1.6/pro/elements", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v1.6/pro/elements queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v1.6/pro/elements", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/kling-1-6-image-to-video.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v1.6/pro/elements", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v1.6/pro/elements/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV16ProElementsInput": { + "title": "MultiImageToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cute girl and a baby cow sleeping together on a bed" + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500 + }, + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video frame", + "default": "16:9" + }, + "input_image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/web-examples/kling-elements/first_image.jpeg", + "https://storage.googleapis.com/falserverless/web-examples/kling-elements/second_image.png" + ] + ], + "title": "Input Image Urls", + "type": "array", + "description": "List of image URLs to use for video generation. Supports up to 4 images.", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "maxLength": 2500, + "default": "blur, distort, and low quality" + } + }, + "x-fal-order-properties": [ + "prompt", + "input_image_urls", + "duration", + "aspect_ratio", + "negative_prompt" + ], + "required": [ + "prompt", + "input_image_urls" + ] + }, + "KlingVideoV16ProElementsOutput": { + "title": "ElementsOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 3910577, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://v3.fal.media/files/penguin/twy6u1yv09NvqsX0mMFM2_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v1.6/pro/elements/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/pro/elements/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/pro/elements": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV16ProElementsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/pro/elements/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV16ProElementsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-video-13b-distilled/image-to-video", + "metadata": { + "display_name": "LTX Video-0.9.7 13B Distilled", + "category": "image-to-video", + "description": "Generate videos from prompts and images using LTX Video-0.9.7 13B Distilled and custom LoRA", + "status": "active", + "tags": [ + "video", + "ltx-video", + "image-to-video" + ], + "updated_at": "2026-01-26T21:43:41.478Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/rabbit/N3sm2TCARXV47JxgfxZJt_8caf31dada5249d996b99fea8028fef8.jpg", + "model_url": "https://fal.run/fal-ai/ltx-video-13b-distilled/image-to-video", + "license_type": "commercial", + "date": "2025-05-17T01:59:46.017Z", + "group": { + "key": "ltx-video-13b-distilled", + "label": "Image to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-video-13b-distilled/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-video-13b-distilled/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-video-13b-distilled/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://fal.media/files/rabbit/N3sm2TCARXV47JxgfxZJt_8caf31dada5249d996b99fea8028fef8.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-video-13b-distilled/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-video-13b-distilled/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LtxVideo13bDistilledImageToVideoInput": { + "title": "DistilledImageToVideoInput", + "type": "object", + "properties": { + "second_pass_skip_initial_steps": { + "description": "The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.", + "type": "integer", + "minimum": 1, + "title": "Second Pass Skip Initial Steps", + "examples": [ + 5 + ], + "maximum": 20, + "default": 5 + }, + "first_pass_num_inference_steps": { + "description": "Number of inference steps during the first pass.", + "type": "integer", + "minimum": 2, + "title": "First Pass Num Inference Steps", + "examples": [ + 8 + ], + "maximum": 20, + "default": 8 + }, + "frame_rate": { + "description": "The frame rate of the video.", + "type": "integer", + "minimum": 1, + "title": "Frame Rate", + "examples": [ + 30 + ], + "maximum": 60, + "default": 30 + }, + "reverse_video": { + "examples": [ + false + ], + "title": "Reverse Video", + "type": "boolean", + "description": "Whether to reverse the video.", + "default": false + }, + "prompt": { + "examples": [ + "The astronaut gets up and walks away" + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt to guide generation" + }, + "expand_prompt": { + "examples": [ + false + ], + "title": "Expand Prompt", + "type": "boolean", + "description": "Whether to expand the prompt using a language model.", + "default": false + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "LoRA weights to use for generation", + "items": { + "$ref": "#/components/schemas/LoRAWeight" + }, + "default": [] + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "num_frames": { + "description": "The number of frames in the video.", + "type": "integer", + "minimum": 9, + "title": "Num Frames", + "examples": [ + 121 + ], + "maximum": 161, + "default": 121 + }, + "second_pass_num_inference_steps": { + "description": "Number of inference steps during the second pass.", + "type": "integer", + "minimum": 2, + "title": "Second Pass Num Inference Steps", + "examples": [ + 8 + ], + "maximum": 20, + "default": 8 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for generation", + "default": "worst quality, inconsistent motion, blurry, jittery, distorted" + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "examples": [ + "720p" + ], + "description": "Resolution of the generated video (480p or 720p).", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "9:16", + "1:1", + "16:9", + "auto" + ], + "title": "Aspect Ratio", + "type": "string", + "examples": [ + "auto" + ], + "description": "The aspect ratio of the video.", + "default": "auto" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ltxv-image-input.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "Image URL for Image-to-Video task" + }, + "constant_rate_factor": { + "description": "The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality.", + "type": "integer", + "minimum": 20, + "title": "Constant Rate Factor", + "examples": [ + 35 + ], + "maximum": 60, + "default": 35 + }, + "first_pass_skip_final_steps": { + "minimum": 0, + "title": "First Pass Skip Final Steps", + "type": "integer", + "maximum": 20, + "description": "Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details.", + "default": 1 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation" + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "loras", + "resolution", + "aspect_ratio", + "seed", + "num_frames", + "first_pass_num_inference_steps", + "first_pass_skip_final_steps", + "second_pass_num_inference_steps", + "second_pass_skip_initial_steps", + "frame_rate", + "expand_prompt", + "reverse_video", + "enable_safety_checker", + "constant_rate_factor", + "image_url" + ], + "description": "Distilled model input", + "required": [ + "prompt", + "image_url" + ] + }, + "LtxVideo13bDistilledImageToVideoOutput": { + "title": "ImageToVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The astronaut gets up and walks away" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/ltxv-image-to-video-output.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed" + ], + "required": [ + "video", + "prompt", + "seed" + ] + }, + "LoRAWeight": { + "title": "LoRAWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "maximum": 4, + "description": "Scale of the LoRA weight. This is a multiplier applied to the LoRA weight when loading it.", + "default": 1 + }, + "weight_name": { + "title": "Weight Name", + "type": "string", + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights." + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "required": [ + "path" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-video-13b-distilled/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-distilled/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-distilled/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideo13bDistilledImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-distilled/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideo13bDistilledImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-video-13b-dev/image-to-video", + "metadata": { + "display_name": "LTX Video-0.9.7 13B", + "category": "image-to-video", + "description": "Generate videos from prompts and images using LTX Video-0.9.7 13B and custom LoRA", + "status": "active", + "tags": [ + "video", + "ltx-video", + "image-to-video" + ], + "updated_at": "2026-01-26T21:43:41.869Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training-5.jpg", + "model_url": "https://fal.run/fal-ai/ltx-video-13b-dev/image-to-video", + "license_type": "commercial", + "date": "2025-05-17T01:53:06.455Z", + "group": { + "key": "ltx-video-13b", + "label": "Image to Video" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-video-13b-dev/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-video-13b-dev/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-video-13b-dev/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-video-13b-dev/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-video-13b-dev/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LtxVideo13bDevImageToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "loras", + "resolution", + "aspect_ratio", + "seed", + "num_frames", + "first_pass_num_inference_steps", + "first_pass_skip_final_steps", + "second_pass_num_inference_steps", + "second_pass_skip_initial_steps", + "frame_rate", + "expand_prompt", + "reverse_video", + "enable_safety_checker", + "image_url", + "constant_rate_factor" + ], + "type": "object", + "properties": { + "second_pass_skip_initial_steps": { + "description": "The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.", + "type": "integer", + "minimum": 1, + "maximum": 50, + "title": "Second Pass Skip Initial Steps", + "examples": [ + 17 + ], + "default": 17 + }, + "first_pass_num_inference_steps": { + "description": "Number of inference steps during the first pass.", + "type": "integer", + "minimum": 2, + "maximum": 50, + "title": "First Pass Num Inference Steps", + "examples": [ + 30 + ], + "default": 30 + }, + "frame_rate": { + "description": "The frame rate of the video.", + "type": "integer", + "minimum": 1, + "maximum": 60, + "title": "Frame Rate", + "examples": [ + 30 + ], + "default": 30 + }, + "prompt": { + "examples": [ + "The astronaut gets up and walks away" + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt to guide generation" + }, + "reverse_video": { + "examples": [ + false + ], + "title": "Reverse Video", + "type": "boolean", + "description": "Whether to reverse the video.", + "default": false + }, + "expand_prompt": { + "examples": [ + false + ], + "title": "Expand Prompt", + "type": "boolean", + "description": "Whether to expand the prompt using a language model.", + "default": false + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "LoRA weights to use for generation", + "items": { + "$ref": "#/components/schemas/LoRAWeight" + }, + "default": [] + }, + "second_pass_num_inference_steps": { + "description": "Number of inference steps during the second pass.", + "type": "integer", + "minimum": 2, + "maximum": 50, + "title": "Second Pass Num Inference Steps", + "examples": [ + 30 + ], + "default": 30 + }, + "num_frames": { + "description": "The number of frames in the video.", + "type": "integer", + "minimum": 9, + "maximum": 161, + "title": "Num Frames", + "examples": [ + 121 + ], + "default": 121 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for generation", + "default": "worst quality, inconsistent motion, blurry, jittery, distorted" + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "examples": [ + "720p" + ], + "description": "Resolution of the generated video (480p or 720p).", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "9:16", + "1:1", + "16:9", + "auto" + ], + "title": "Aspect Ratio", + "type": "string", + "examples": [ + "auto" + ], + "description": "The aspect ratio of the video.", + "default": "auto" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ltxv-image-input.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "Image URL for Image-to-Video task" + }, + "constant_rate_factor": { + "description": "The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality.", + "type": "integer", + "minimum": 20, + "maximum": 60, + "title": "Constant Rate Factor", + "examples": [ + 35 + ], + "default": 35 + }, + "first_pass_skip_final_steps": { + "minimum": 0, + "maximum": 50, + "type": "integer", + "title": "First Pass Skip Final Steps", + "description": "Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details.", + "default": 3 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation" + } + }, + "title": "ImageToVideoInput", + "required": [ + "prompt", + "image_url" + ] + }, + "LtxVideo13bDevImageToVideoOutput": { + "x-fal-order-properties": [ + "video", + "prompt", + "seed" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The astronaut gets up and walks away" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/ltxv-image-to-video-output.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "ImageToVideoOutput", + "required": [ + "video", + "prompt", + "seed" + ] + }, + "LoRAWeight": { + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "Scale of the LoRA weight. This is a multiplier applied to the LoRA weight when loading it.", + "default": 1 + }, + "weight_name": { + "title": "Weight Name", + "type": "string", + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights." + } + }, + "title": "LoRAWeight", + "required": [ + "path" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-video-13b-dev/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-dev/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-dev/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideo13bDevImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-dev/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideo13bDevImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-video-lora/image-to-video", + "metadata": { + "display_name": "LTX Video-0.9.7 LoRA", + "category": "image-to-video", + "description": "Generate videos from prompts and images using LTX Video-0.9.7 and custom LoRA", + "status": "active", + "tags": [ + "video", + "ltx-video", + "image-to-video" + ], + "updated_at": "2026-01-26T21:43:42.828Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training-5.jpg", + "model_url": "https://fal.run/fal-ai/ltx-video-lora/image-to-video", + "license_type": "commercial", + "date": "2025-05-15T19:36:01.467Z", + "group": { + "key": "ltx-video-lora", + "label": "Image to Video" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 2, + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-video-lora/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-video-lora/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-video-lora/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-video-lora/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-video-lora/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LtxVideoLoraImageToVideoInput": { + "title": "ImageToVideoInput", + "type": "object", + "properties": { + "number_of_steps": { + "description": "The number of inference steps to use.", + "type": "integer", + "minimum": 1, + "title": "Number Of Steps", + "examples": [ + 30 + ], + "maximum": 50, + "default": 30 + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "examples": [ + "720p" + ], + "description": "The resolution of the video.", + "default": "720p" + }, + "reverse_video": { + "examples": [ + false + ], + "title": "Reverse Video", + "type": "boolean", + "description": "Whether to reverse the video.", + "default": false + }, + "aspect_ratio": { + "enum": [ + "16:9", + "1:1", + "9:16", + "auto" + ], + "title": "Aspect Ratio", + "type": "string", + "examples": [ + "auto" + ], + "description": "The aspect ratio of the video.", + "default": "auto" + }, + "frame_rate": { + "description": "The frame rate of the video.", + "type": "integer", + "minimum": 1, + "title": "Frame Rate", + "examples": [ + 25 + ], + "maximum": 60, + "default": 25 + }, + "expand_prompt": { + "examples": [ + false + ], + "title": "Expand Prompt", + "type": "boolean", + "description": "Whether to expand the prompt using the LLM.", + "default": false + }, + "number_of_frames": { + "description": "The number of frames in the video.", + "type": "integer", + "minimum": 9, + "title": "Number Of Frames", + "examples": [ + 89 + ], + "maximum": 161, + "default": 89 + }, + "image_url": { + "examples": [ + "https://h2.inkwai.com/bs2/upload-ylab-stunt/se/ai_portal_queue_mmu_image_upscale_aiweb/3214b798-e1b4-4b00-b7af-72b5b0417420_raw_image_0.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to use as input." + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "The LoRA weights to use for generation.", + "items": { + "$ref": "#/components/schemas/LoRAWeight" + }, + "default": [] + }, + "prompt": { + "examples": [ + "The astronaut gets up and walks away" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for generation." + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to use.", + "default": "blurry, low quality, low resolution, inconsistent motion, jittery, distorted" + } + }, + "description": "Request model for image-to-video generation.", + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "loras", + "resolution", + "aspect_ratio", + "number_of_frames", + "number_of_steps", + "frame_rate", + "seed", + "expand_prompt", + "reverse_video", + "enable_safety_checker", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "LtxVideoLoraImageToVideoOutput": { + "title": "ImageToVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The astronaut gets up and walks away" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/ltx_i2v_output.mp4" + } + ], + "title": "Video", + "description": "The generated video.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "video" + ], + "required": [ + "prompt", + "seed", + "video" + ] + }, + "LoRAWeight": { + "title": "LoRAWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "maximum": 4, + "description": "Scale of the LoRA weight. This is a multiplier applied to the LoRA weight when loading it.", + "default": 1 + }, + "weight_name": { + "title": "Weight Name", + "type": "string", + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights." + } + }, + "description": "LoRA weight to use for generation.", + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "required": [ + "path" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-video-lora/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-lora/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-lora/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideoLoraImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-lora/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideoLoraImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v4.5/transition", + "metadata": { + "display_name": "Pixverse", + "category": "image-to-video", + "description": "Create seamless transition between images using PixVerse v4.5", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:43.486Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "model_url": "https://fal.run/fal-ai/pixverse/v4.5/transition", + "license_type": "commercial", + "date": "2025-05-15T15:57:02.859Z", + "group": { + "key": "pixverse-45", + "label": "Transition v4.5" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v4.5/transition", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v4.5/transition queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v4.5/transition", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v4.5/transition", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v4.5/transition/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV45TransitionInput": { + "title": "TransitionRequest", + "type": "object", + "properties": { + "first_image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/owQh2DAzk8UU7J02nr5RY_Co2P4boLv6meIZ5t9gKvL_8685da151df343ab8bf82165c928e2a5.jpg" + ], + "title": "First Image Url", + "type": "string", + "description": "URL of the image to use as the first frame" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "4:3", + "1:1", + "3:4", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated video" + }, + "prompt": { + "examples": [ + "Scene slowly transition into cat swimming under water" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt for the transition" + }, + "duration": { + "enum": [ + "5", + "8" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + }, + "end_image_url": { + "examples": [ + "https://v3.fal.media/files/kangaroo/RgedFs_WSnq5BgER7qDx1_ONrbTJ1YAGXz-9JnSsBoB_bdc8750387734bfe940319f469f7b0b2.jpg" + ], + "title": "End Image Url", + "type": "string", + "description": "URL of the image to use as the last frame" + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, low resolution, pixelated, noisy, grainy, out of focus, poorly lit, poorly exposed, poorly composed, poorly framed, poorly cropped, poorly color corrected, poorly color graded" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "resolution", + "duration", + "negative_prompt", + "style", + "seed", + "first_image_url", + "end_image_url" + ], + "required": [ + "prompt", + "first_image_url" + ] + }, + "PixverseV45TransitionOutput": { + "title": "TransitionOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 3890360, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://fal.media/files/panda/5KmKS-mh1vO-htbqE5oex_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v4.5/transition/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4.5/transition/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4.5/transition": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV45TransitionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4.5/transition/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV45TransitionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v4.5/image-to-video/fast", + "metadata": { + "display_name": "Pixverse", + "category": "image-to-video", + "description": "Generate fast high quality video clips from text and image prompts using PixVerse v4.5", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:43.630Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "model_url": "https://fal.run/fal-ai/pixverse/v4.5/image-to-video/fast", + "license_type": "commercial", + "date": "2025-05-15T15:55:20.848Z", + "group": { + "key": "pixverse-45", + "label": "Image to Video v4.5 (Fast)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v4.5/image-to-video/fast", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v4.5/image-to-video/fast queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v4.5/image-to-video/fast", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v4.5/image-to-video/fast", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v4.5/image-to-video/fast/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV45ImageToVideoFastInput": { + "title": "FastImageToVideoRequestV4", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman warrior with her hammer walking with his glacier wolf." + ], + "title": "Prompt", + "type": "string" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated video" + }, + "camera_movement": { + "enum": [ + "horizontal_left", + "horizontal_right", + "vertical_up", + "vertical_down", + "zoom_in", + "zoom_out", + "crane_up", + "quickly_zoom_in", + "quickly_zoom_out", + "smooth_zoom_in", + "camera_rotation", + "robo_arm", + "super_dolly_out", + "whip_pan", + "hitchcock", + "left_follow", + "right_follow", + "pan_left", + "pan_right", + "fix_bg" + ], + "title": "Camera Movement", + "type": "string", + "description": "The type of camera movement to apply to the video" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/qL93Je8ezvzQgDOEzTjKF_KhGKZTEebZcDw6T5rwQPK_output.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to use as the first frame" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, low resolution, pixelated, noisy, grainy, out of focus, poorly lit, poorly exposed, poorly composed, poorly framed, poorly cropped, poorly color corrected, poorly color graded" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "resolution", + "negative_prompt", + "style", + "seed", + "image_url", + "camera_movement" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "PixverseV45ImageToVideoFastOutput": { + "title": "I2VOutputV4", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 6420765, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://fal.media/files/koala/HEWK7BBwqWrz7F5nAZzp7_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v4.5/image-to-video/fast/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4.5/image-to-video/fast/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4.5/image-to-video/fast": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV45ImageToVideoFastInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4.5/image-to-video/fast/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV45ImageToVideoFastOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v4.5/effects", + "metadata": { + "display_name": "Pixverse", + "category": "image-to-video", + "description": "Generate high quality video clips with different effects using PixVerse v4.5", + "status": "active", + "tags": [ + "image-to-video" + ], + "updated_at": "2026-01-26T21:43:44.179Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "model_url": "https://fal.run/fal-ai/pixverse/v4.5/effects", + "license_type": "commercial", + "date": "2025-05-15T15:44:19.595Z", + "group": { + "key": "pixverse-45", + "label": "Effects v4.5" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v4.5/effects", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v4.5/effects queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v4.5/effects", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v4.5/effects", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v4.5/effects/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV45EffectsInput": { + "title": "EffectInput", + "type": "object", + "properties": { + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + }, + "duration": { + "enum": [ + "5", + "8" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video.", + "default": "720p" + }, + "effect": { + "enum": [ + "Kiss Me AI", + "Kiss", + "Muscle Surge", + "Warmth of Jesus", + "Anything, Robot", + "The Tiger Touch", + "Hug", + "Holy Wings", + "Microwave", + "Zombie Mode", + "Squid Game", + "Baby Face", + "Black Myth: Wukong", + "Long Hair Magic", + "Leggy Run", + "Fin-tastic Mermaid", + "Punch Face", + "Creepy Devil Smile", + "Thunder God", + "Eye Zoom Challenge", + "Who's Arrested?", + "Baby Arrived", + "Werewolf Rage", + "Bald Swipe", + "BOOM DROP", + "Huge Cutie", + "Liquid Metal", + "Sharksnap!", + "Dust Me Away", + "3D Figurine Factor", + "Bikini Up", + "My Girlfriends", + "My Boyfriends", + "Subject 3 Fever", + "Earth Zoom", + "Pole Dance", + "Vroom Dance", + "GhostFace Terror", + "Dragon Evoker", + "Skeletal Bae", + "Summoning succubus", + "Halloween Voodoo Doll", + "3D Naked-Eye AD", + "Package Explosion", + "Dishes Served", + "Ocean ad", + "Supermarket AD", + "Tree doll", + "Come Feel My Abs", + "The Bicep Flex", + "London Elite Vibe", + "Flora Nymph Gown", + "Christmas Costume", + "It's Snowy", + "Reindeer Cruiser", + "Snow Globe Maker", + "Pet Christmas Outfit", + "Adopt a Polar Pal", + "Cat Christmas Box", + "Starlight Gift Box", + "Xmas Poster", + "Pet Christmas Tree", + "City Santa Hat", + "Stocking Sweetie", + "Christmas Night", + "Xmas Front Page Karma", + "Grinch's Xmas Hijack", + "Giant Product", + "Truck Fashion Shoot", + "Beach AD", + "Shoal Surround", + "Mechanical Assembly", + "Lighting AD", + "Billboard AD", + "Product close-up", + "Parachute Delivery", + "Dreamlike Cloud", + "Macaron Machine", + "Poster AD", + "Truck AD", + "Graffiti AD", + "3D Figurine Factory", + "The Exclusive First Class", + "Art Zoom Challenge", + "I Quit", + "Hitchcock Dolly Zoom", + "Smell the Lens", + "I believe I can fly", + "Strikout Dance", + "Pixel World", + "Mint in Box", + "Hands up, Hand", + "Flora Nymph Go", + "Somber Embrace", + "Beam me up", + "Suit Swagger" + ], + "title": "Effect", + "type": "string", + "description": "The effect to apply to the video" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/koala/q5ahL3KS7ikt3MvpNUG8l_image%20(72).webp" + ], + "title": "Image Url", + "type": "string", + "description": "Optional URL of the image to use as the first frame. If not provided, generates from text" + } + }, + "x-fal-order-properties": [ + "effect", + "image_url", + "resolution", + "duration", + "negative_prompt" + ], + "required": [ + "effect", + "image_url" + ] + }, + "PixverseV45EffectsOutput": { + "title": "EffectOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 3232402, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://fal.media/files/koala/awGY1lJd7lVsqQeSqjWqn_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v4.5/effects/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4.5/effects/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4.5/effects": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV45EffectsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4.5/effects/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV45EffectsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan-custom", + "metadata": { + "display_name": "Hunyuan Custom", + "category": "image-to-video", + "description": "HunyuanCustom revolutionizes video generation with unmatched identity consistency across multiple input types. Its innovative fusion modules and alignment networks outperform competitors, maintaining subject integrity while responding flexibly to text, image, audio, and video conditions.", + "status": "active", + "tags": [ + "image-to-video" + ], + "updated_at": "2026-01-26T21:43:44.310Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "model_url": "https://fal.run/fal-ai/hunyuan-custom", + "license_type": "commercial", + "date": "2025-05-14T10:59:20.323Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan-custom", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan-custom queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan-custom", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan-custom", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan-custom/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "HunyuanCustomInput": { + "title": "HunyuanCustomRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Realistic, High-quality. A woman is playing a violin." + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt for video generation (max 500 characters)." + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16" + ], + "title": "Aspect Ratio (W:H)", + "type": "string", + "description": "The aspect ratio of the video to generate.", + "default": "16:9" + }, + "resolution": { + "enum": [ + "512p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the video to generate. 720p generations cost 1.5x more than 480p generations.", + "default": "512p" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_frames": { + "minimum": 81, + "title": "Number of Frames", + "type": "integer", + "maximum": 129, + "description": "The number of frames to generate.", + "default": 129 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/hidream/woman.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image input." + }, + "fps": { + "minimum": 16, + "title": "Frames per second", + "type": "integer", + "maximum": 30, + "description": "The frames per second of the generated video.", + "default": 25 + }, + "enable_prompt_expansion": { + "examples": [ + true + ], + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for generating the video." + }, + "num_inference_steps": { + "minimum": 10, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 30, + "description": "The number of inference steps to run. Lower gets faster results, higher gets better results.", + "default": 30 + }, + "negative_prompt": { + "examples": [ + "Ugly, blurry." + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "Aerial view, aerial view, overexposed, low quality, deformation, a poor composition, bad hands, bad teeth, bad eyes, bad limbs, distortion, blurring, text, subtitles, static, picture, black border." + }, + "cfg_scale": { + "minimum": 1.5, + "title": "CFG Scale", + "type": "number", + "maximum": 13, + "description": "Classifier-Free Guidance scale for the generation.", + "default": 7.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_url", + "num_inference_steps", + "seed", + "aspect_ratio", + "resolution", + "fps", + "cfg_scale", + "num_frames", + "enable_prompt_expansion", + "enable_safety_checker" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "HunyuanCustomOutput": { + "title": "HunyuanCustomResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generating the video." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/test/p1/uQ4ddGyJ9U6cymnns0l6o_input-image-1747117169.mp4" + } + ], + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan-custom/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-custom/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-custom": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanCustomInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-custom/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanCustomOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/framepack/f1", + "metadata": { + "display_name": "Framepack F1", + "category": "image-to-video", + "description": "Framepack is an efficient Image-to-video model that autoregressively generates videos.", + "status": "active", + "tags": [ + "image to video", + "motion" + ], + "updated_at": "2026-01-26T21:43:44.438Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/koala/dUfFd9Z7aSX06gL2_qXn0_image.webp", + "model_url": "https://fal.run/fal-ai/framepack/f1", + "license_type": "commercial", + "date": "2025-05-13T22:19:18.611Z", + "group": { + "key": "framepack", + "label": "F1" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 3, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/framepack/f1", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/framepack/f1 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/framepack/f1", + "category": "image-to-video", + "thumbnailUrl": "https://v3.fal.media/files/koala/dUfFd9Z7aSX06gL2_qXn0_image.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/framepack/f1", + "documentationUrl": "https://fal.ai/models/fal-ai/framepack/f1/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FramepackF1Input": { + "title": "FramePackF1Request", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A mesmerising video of a deep sea jellyfish moving through an inky-black ocean. The jellyfish glows softly with an amber bioluminescence. The overall scene is lifelike." + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt for video generation (max 500 characters)." + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16" + ], + "title": "Aspect Ratio (W:H)", + "type": "string", + "description": "The aspect ratio of the video to generate.", + "default": "16:9" + }, + "resolution": { + "enum": [ + "720p", + "480p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the video to generate. 720p generations cost 1.5x more than 480p generations.", + "default": "480p" + }, + "num_frames": { + "minimum": 30, + "title": "Number of Frames", + "type": "integer", + "maximum": 900, + "description": "The number of frames to generate.", + "default": 180 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/framepack/framepack.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image input." + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "maximum": 32, + "description": "Guidance scale for the generation.", + "default": 10 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "The seed to use for generating the video." + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "negative_prompt": { + "examples": [ + "Ugly, blurry distorted, bad quality" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "" + }, + "cfg_scale": { + "minimum": 0, + "title": "CFG Scale", + "type": "number", + "maximum": 7, + "description": "Classifier-Free Guidance scale for the generation.", + "default": 1 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_url", + "seed", + "aspect_ratio", + "resolution", + "cfg_scale", + "guidance_scale", + "num_frames", + "enable_safety_checker" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "FramepackF1Output": { + "title": "FramePackF1Response", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generating the video." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/framepack/TfJPbwm6_D60dcWEv9LVX_output_video.mp4" + } + ], + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/framepack/f1/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/framepack/f1/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/framepack/f1": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FramepackF1Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/framepack/f1/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FramepackF1Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/vidu/q1/start-end-to-video", + "metadata": { + "display_name": "Vidu Start End to Video", + "category": "image-to-video", + "description": "Vidu Q1 Start-End to Video generates smooth transition 1080p videos between specified start and end images.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:45.359Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-1.jpeg", + "model_url": "https://fal.run/fal-ai/vidu/q1/start-end-to-video", + "license_type": "commercial", + "date": "2025-05-09T03:12:59.443Z", + "group": { + "key": "vidu-q1", + "label": "Start End to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/vidu/q1/start-end-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/vidu/q1/start-end-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/vidu/q1/start-end-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-1.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/vidu/q1/start-end-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/vidu/q1/start-end-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ViduQ1StartEndToVideoInput": { + "title": "Q1StartEndToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Dragon lands on a rock" + ], + "title": "Prompt", + "type": "string", + "maxLength": 1500, + "description": "Text prompt for video generation, max 1500 characters" + }, + "start_image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/sgsdKvPigPhJ1S7Hl5bWc_first_frame_q1.png" + ], + "title": "Start Image Url", + "type": "string", + "description": "URL of the image to use as the first frame" + }, + "movement_amplitude": { + "enum": [ + "auto", + "small", + "medium", + "large" + ], + "title": "Movement Amplitude", + "type": "string", + "description": "The movement amplitude of objects in the frame", + "default": "auto" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed for the random number generator" + }, + "end_image_url": { + "examples": [ + "https://v3.fal.media/files/kangaroo/CASBu_OmOnZ8IafirarFL_last_frame_q1.png" + ], + "title": "End Image Url", + "type": "string", + "description": "URL of the image to use as the last frame" + } + }, + "x-fal-order-properties": [ + "prompt", + "start_image_url", + "end_image_url", + "seed", + "movement_amplitude" + ], + "required": [ + "prompt", + "start_image_url", + "end_image_url" + ] + }, + "ViduQ1StartEndToVideoOutput": { + "title": "Q1StartEndToVideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://fal.media/files/zebra/-ACpJE6m4JQSBCCeNB51S_output.mp4" + } + ], + "title": "Video", + "description": "The generated transition video between start and end frames using the Q1 model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/vidu/q1/start-end-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/q1/start-end-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/vidu/q1/start-end-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduQ1StartEndToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/q1/start-end-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduQ1StartEndToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/vidu/q1/image-to-video", + "metadata": { + "display_name": "Vidu Image to Video", + "category": "image-to-video", + "description": "Vidu Q1 Image to Video generates high-quality 1080p videos with exceptional visual quality and motion diversity from a single image", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:45.887Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-1.jpeg", + "model_url": "https://fal.run/fal-ai/vidu/q1/image-to-video", + "license_type": "commercial", + "date": "2025-05-09T03:08:30.159Z", + "group": { + "key": "vidu-q1", + "label": "Image to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/vidu/q1/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/vidu/q1/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/vidu/q1/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-1.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/vidu/q1/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/vidu/q1/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ViduQ1ImageToVideoInput": { + "title": "Q1ImageToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The astronaut waved and the camera moved up." + ], + "title": "Prompt", + "type": "string", + "maxLength": 1500, + "description": "Text prompt for video generation, max 1500 characters" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed for the random number generator" + }, + "movement_amplitude": { + "enum": [ + "auto", + "small", + "medium", + "large" + ], + "title": "Movement Amplitude", + "type": "string", + "description": "The movement amplitude of objects in the frame", + "default": "auto" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/video_models/vidu_i2v.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to use as the first frame" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "seed", + "movement_amplitude" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "ViduQ1ImageToVideoOutput": { + "title": "Q1ImageToVideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/model_tests/video_models/vidu_i2v_output.mp4" + } + ], + "title": "Video", + "description": "The generated video using the Q1 model from a single image", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/vidu/q1/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/q1/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/vidu/q1/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduQ1ImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/q1/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduQ1ImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/magi/image-to-video", + "metadata": { + "display_name": "MAGI-1", + "category": "image-to-video", + "description": "MAGI-1 generates videos from images with exceptional understanding of physical interactions and prompting", + "status": "active", + "tags": [ + "image-to-video" + ], + "updated_at": "2026-01-26T21:43:52.910Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-2.jpg", + "model_url": "https://fal.run/fal-ai/magi/image-to-video", + "license_type": "commercial", + "date": "2025-04-23T22:31:55.055Z", + "group": { + "key": "magi", + "label": "Image-to-Video" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 9, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/magi/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/magi/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/magi/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/magi/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/magi/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MagiImageToVideoInput": { + "title": "MagiImageToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A crisp, wintery mountain landscape unfolds as a snowboarder, equipped with a selfie pole, gracefully navigates a snow-covered slope, the camera perspective offering an exhilarating attached-third-person view of the descent; the vibrant, snowy scenery sweeps past, punctuated by moments of controlled spins and effortless glides, creating a dynamic visual rhythm that complements the exhilarating pace of the ride; as the snowboarder carves through pristine powder, the camera captures fleeting moments of breathtaking views—towering pines dusted with snow, sunlit peaks piercing a cerulean sky—a symphony of nature’s grandeur displayed for the viewer to share; a sense of freedom and exhilaration permeates the scene, punctuated by the subtle whoosh of wind and the satisfying crunch of snow, culminating in a breathtaking panorama as the snowboarder reaches the bottom, leaving the viewer with a lingering sense of wonder and the desire to experience the thrill firsthand." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit.", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + "default": "auto" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/kangaroo/sGqTf5scZcC5VNfOLbxwE_maxresdefault-2740110268.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the input image to represent the first frame of the video. If the input image does not match the chosen aspect ratio, it is resized and center cropped." + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "enum": [ + 4, + 8, + 16, + 32, + 64 + ], + "title": "Num Inference Steps", + "type": "integer", + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 16 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "num_frames": { + "minimum": 96, + "title": "Num Frames", + "type": "integer", + "maximum": 192, + "description": "Number of frames to generate. Must be between 96 and 192 (inclusive). Each additional 24 frames beyond 96 incurs an additional billing unit.", + "default": 96 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "num_frames", + "seed", + "resolution", + "num_inference_steps", + "enable_safety_checker", + "aspect_ratio" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "MagiImageToVideoOutput": { + "title": "MagiImageToVideoResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/penguin/sSJdxpy9oEBqZpGIh3SPq_3381fe86-9bab-4ce4-9c3a-5db66984618a.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/magi/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/magi/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/magi/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MagiImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/magi/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MagiImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v4/effects", + "metadata": { + "display_name": "Pixverse", + "category": "image-to-video", + "description": "Generate high quality video clips with different effects using PixVerse v4", + "status": "active", + "tags": [ + "image-to-video" + ], + "updated_at": "2026-01-26T21:43:53.285Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "model_url": "https://fal.run/fal-ai/pixverse/v4/effects", + "license_type": "commercial", + "date": "2025-04-23T17:11:05.284Z", + "group": { + "key": "pixverse", + "label": "Effects v4" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v4/effects", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v4/effects queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v4/effects", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v4/effects", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v4/effects/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV4EffectsInput": { + "title": "EffectInput", + "type": "object", + "properties": { + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + }, + "duration": { + "enum": [ + "5", + "8" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video.", + "default": "720p" + }, + "effect": { + "enum": [ + "Kiss Me AI", + "Kiss", + "Muscle Surge", + "Warmth of Jesus", + "Anything, Robot", + "The Tiger Touch", + "Hug", + "Holy Wings", + "Microwave", + "Zombie Mode", + "Squid Game", + "Baby Face", + "Black Myth: Wukong", + "Long Hair Magic", + "Leggy Run", + "Fin-tastic Mermaid", + "Punch Face", + "Creepy Devil Smile", + "Thunder God", + "Eye Zoom Challenge", + "Who's Arrested?", + "Baby Arrived", + "Werewolf Rage", + "Bald Swipe", + "BOOM DROP", + "Huge Cutie", + "Liquid Metal", + "Sharksnap!", + "Dust Me Away", + "3D Figurine Factor", + "Bikini Up", + "My Girlfriends", + "My Boyfriends", + "Subject 3 Fever", + "Earth Zoom", + "Pole Dance", + "Vroom Dance", + "GhostFace Terror", + "Dragon Evoker", + "Skeletal Bae", + "Summoning succubus", + "Halloween Voodoo Doll", + "3D Naked-Eye AD", + "Package Explosion", + "Dishes Served", + "Ocean ad", + "Supermarket AD", + "Tree doll", + "Come Feel My Abs", + "The Bicep Flex", + "London Elite Vibe", + "Flora Nymph Gown", + "Christmas Costume", + "It's Snowy", + "Reindeer Cruiser", + "Snow Globe Maker", + "Pet Christmas Outfit", + "Adopt a Polar Pal", + "Cat Christmas Box", + "Starlight Gift Box", + "Xmas Poster", + "Pet Christmas Tree", + "City Santa Hat", + "Stocking Sweetie", + "Christmas Night", + "Xmas Front Page Karma", + "Grinch's Xmas Hijack", + "Giant Product", + "Truck Fashion Shoot", + "Beach AD", + "Shoal Surround", + "Mechanical Assembly", + "Lighting AD", + "Billboard AD", + "Product close-up", + "Parachute Delivery", + "Dreamlike Cloud", + "Macaron Machine", + "Poster AD", + "Truck AD", + "Graffiti AD", + "3D Figurine Factory", + "The Exclusive First Class", + "Art Zoom Challenge", + "I Quit", + "Hitchcock Dolly Zoom", + "Smell the Lens", + "I believe I can fly", + "Strikout Dance", + "Pixel World", + "Mint in Box", + "Hands up, Hand", + "Flora Nymph Go", + "Somber Embrace", + "Beam me up", + "Suit Swagger" + ], + "title": "Effect", + "type": "string", + "description": "The effect to apply to the video" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/koala/q5ahL3KS7ikt3MvpNUG8l_image%20(72).webp" + ], + "title": "Image Url", + "type": "string", + "description": "Optional URL of the image to use as the first frame. If not provided, generates from text" + } + }, + "x-fal-order-properties": [ + "effect", + "image_url", + "resolution", + "duration", + "negative_prompt" + ], + "required": [ + "effect", + "image_url" + ] + }, + "PixverseV4EffectsOutput": { + "title": "EffectOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 3232402, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://fal.media/files/koala/awGY1lJd7lVsqQeSqjWqn_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v4/effects/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4/effects/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4/effects": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV4EffectsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4/effects/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV4EffectsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/magi-distilled/image-to-video", + "metadata": { + "display_name": "MAGI-1 (Distilled)", + "category": "image-to-video", + "description": "MAGI-1 distilled generates videos faster from images with exceptional understanding of physical interactions and prompting", + "status": "active", + "tags": [ + "image-to-video" + ], + "updated_at": "2026-01-26T21:43:53.591Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-2.jpg", + "model_url": "https://fal.run/fal-ai/magi-distilled/image-to-video", + "license_type": "commercial", + "date": "2025-04-23T02:26:54.535Z", + "group": { + "key": "magi", + "label": "Image-to-Video (Distilled)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/magi-distilled/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/magi-distilled/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/magi-distilled/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/magi-distilled/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/magi-distilled/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MagiDistilledImageToVideoInput": { + "title": "MagiImageToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Close-up shot: the old sea captain stares intently, pipe in mouth, wisps of smoke curling around his weathered face. The camera begins to pull back out over the ocean. Finally, the camera sinks below the waves deeply, fading to dark blue and finally to black." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit.", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + "default": "auto" + }, + "image_url": { + "examples": [ + "https://raw.githubusercontent.com/painebenjamin/pointy-seeds/refs/heads/main/captain-start.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the input image to represent the first frame of the video. If the input image does not match the chosen aspect ratio, it is resized and center cropped." + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "enum": [ + 4, + 8, + 16, + 32 + ], + "title": "Num Inference Steps", + "type": "integer", + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 16 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "num_frames": { + "minimum": 96, + "title": "Num Frames", + "type": "integer", + "maximum": 192, + "description": "Number of frames to generate. Must be between 96 and 192 (inclusive). Each additional 24 frames beyond 96 incurs an additional billing unit.", + "default": 96 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "num_frames", + "seed", + "resolution", + "num_inference_steps", + "enable_safety_checker", + "aspect_ratio" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "MagiDistilledImageToVideoOutput": { + "title": "MagiImageToVideoResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/zebra/3XmM6yIGZEWxwbbDyTkhw_391bee80-b756-425d-b74c-f9083c7eec4f.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/magi-distilled/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/magi-distilled/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/magi-distilled/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MagiDistilledImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/magi-distilled/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MagiDistilledImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/framepack/flf2v", + "metadata": { + "display_name": "Framepack", + "category": "image-to-video", + "description": "Framepack is an efficient Image-to-video model that autoregressively generates videos.", + "status": "active", + "tags": [ + "image to video", + "motion" + ], + "updated_at": "2026-01-26T21:43:53.841Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-5.jpg", + "model_url": "https://fal.run/fal-ai/framepack/flf2v", + "license_type": "commercial", + "date": "2025-04-22T20:06:23.895Z", + "group": { + "key": "framepack", + "label": "First-to-Last-Frame" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/framepack/flf2v", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/framepack/flf2v queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/framepack/flf2v", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/framepack/flf2v", + "documentationUrl": "https://fal.ai/models/fal-ai/framepack/flf2v/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FramepackFlf2vInput": { + "title": "FramePackF2LFRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A tabby cat is confidely strolling toward the camera, when it spins and with a flash of magic reveals itself to be a cat-dragon hybrid with glistening amber scales." + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt for video generation (max 500 characters)." + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16" + ], + "title": "Aspect Ratio (W:H)", + "type": "string", + "description": "The aspect ratio of the video to generate.", + "default": "16:9" + }, + "resolution": { + "enum": [ + "720p", + "480p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the video to generate. 720p generations cost 1.5x more than 480p generations.", + "default": "480p" + }, + "num_frames": { + "minimum": 30, + "title": "Number of Frames", + "type": "integer", + "maximum": 1800, + "description": "The number of frames to generate.", + "default": 240 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/wan_flf/first_frame.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image input." + }, + "strength": { + "minimum": 0, + "title": "Strength of last frame", + "type": "number", + "maximum": 1, + "description": "Determines the influence of the final frame on the generated video. Higher values result in the output being more heavily influenced by the last frame.", + "default": 0.8 + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "maximum": 32, + "description": "Guidance scale for the generation.", + "default": 10 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "The seed to use for generating the video." + }, + "end_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/wan_flf/last_frame.png" + ], + "title": "End Image Url", + "type": "string", + "description": "URL of the end image input." + }, + "negative_prompt": { + "examples": [ + "Ugly, blurry distorted, bad quality" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "" + }, + "cfg_scale": { + "minimum": 0, + "title": "CFG Scale", + "type": "number", + "maximum": 7, + "description": "Classifier-Free Guidance scale for the generation.", + "default": 1 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_url", + "seed", + "aspect_ratio", + "resolution", + "cfg_scale", + "guidance_scale", + "num_frames", + "enable_safety_checker", + "end_image_url", + "strength" + ], + "required": [ + "prompt", + "image_url", + "end_image_url" + ] + }, + "FramepackFlf2vOutput": { + "title": "FramePackFLF2VResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generating the video." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/flf2v.mp4" + } + ], + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/framepack/flf2v/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/framepack/flf2v/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/framepack/flf2v": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FramepackFlf2vInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/framepack/flf2v/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FramepackFlf2vOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-flf2v", + "metadata": { + "display_name": "Wan-2.1 First-Last-Frame-to-Video", + "category": "image-to-video", + "description": "Wan-2.1 flf2v generates dynamic videos by intelligently bridging a given first frame to a desired end frame through smooth, coherent motion sequences.", + "status": "active", + "tags": [ + "image to video", + "motion" + ], + "updated_at": "2026-01-26T21:43:55.175Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-2.jpg", + "model_url": "https://fal.run/fal-ai/wan-flf2v", + "license_type": "commercial", + "date": "2025-04-17T22:26:51.565Z", + "group": { + "key": "wan-2.1", + "label": "First-to-Last-Frame" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-flf2v", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-flf2v queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-flf2v", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-flf2v", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-flf2v/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanFlf2vInput": { + "title": "WanFLF2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A tabby cat is confidely strolling toward the camera, when it spins and with a flash of magic reveals itself to be a cat-dragon hybrid with glistening amber scales." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "shift": { + "minimum": 1, + "maximum": 10, + "type": "number", + "title": "Shift", + "description": "Shift parameter for video generation.", + "default": 5 + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "frames_per_second": { + "minimum": 5, + "maximum": 24, + "type": "integer", + "title": "Frames Per Second", + "description": "Frames per second of the generated video. Must be between 5 to 24.", + "default": 16 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "start_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/wan_flf/first_frame.png" + ], + "title": "Start Image Url", + "type": "string", + "description": "URL of the starting image. If the input image does not match the chosen aspect ratio, it is resized and center cropped." + }, + "end_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/wan_flf/last_frame.png" + ], + "title": "End Image Url", + "type": "string", + "description": "URL of the ending image. If the input image does not match the chosen aspect ratio, it is resized and center cropped." + }, + "negative_prompt": { + "examples": [ + "bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + }, + "num_frames": { + "minimum": 81, + "maximum": 100, + "type": "integer", + "title": "Num Frames", + "description": "Number of frames to generate. Must be between 81 to 100 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units.", + "default": 81 + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit.", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + "default": "auto" + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "guide_scale": { + "minimum": 1, + "maximum": 10, + "type": "number", + "title": "Guide Scale", + "description": "Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.", + "default": 5 + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 40, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 30 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "start_image_url", + "end_image_url", + "num_frames", + "frames_per_second", + "seed", + "resolution", + "num_inference_steps", + "guide_scale", + "shift", + "enable_safety_checker", + "enable_prompt_expansion", + "acceleration", + "aspect_ratio" + ], + "required": [ + "prompt", + "start_image_url", + "end_image_url" + ] + }, + "WanFlf2vOutput": { + "title": "WanFLF2VResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/flf2v.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-flf2v/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-flf2v/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-flf2v": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanFlf2vInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-flf2v/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanFlf2vOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/framepack", + "metadata": { + "display_name": "Framepack", + "category": "image-to-video", + "description": "Framepack is an efficient Image-to-video model that autoregressively generates videos.", + "status": "active", + "tags": [ + "image to video", + "motion" + ], + "updated_at": "2026-01-26T21:43:55.511Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/koala/dUfFd9Z7aSX06gL2_qXn0_image.webp", + "model_url": "https://fal.run/fal-ai/framepack", + "license_type": "commercial", + "date": "2025-04-17T17:10:10.829Z", + "group": { + "key": "framepack", + "label": "Image-to-Video" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 3, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/framepack", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/framepack queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/framepack", + "category": "image-to-video", + "thumbnailUrl": "https://v3.fal.media/files/koala/dUfFd9Z7aSX06gL2_qXn0_image.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/framepack", + "documentationUrl": "https://fal.ai/models/fal-ai/framepack/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FramepackInput": { + "title": "FramePackRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A mesmerising video of a deep sea jellyfish moving through an inky-black ocean. The jellyfish glows softly with an amber bioluminescence. The overall scene is lifelike." + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt for video generation (max 500 characters)." + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16" + ], + "title": "Aspect Ratio (W:H)", + "type": "string", + "description": "The aspect ratio of the video to generate.", + "default": "16:9" + }, + "resolution": { + "enum": [ + "720p", + "480p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the video to generate. 720p generations cost 1.5x more than 480p generations.", + "default": "480p" + }, + "num_frames": { + "minimum": 30, + "title": "Number of Frames", + "type": "integer", + "maximum": 900, + "description": "The number of frames to generate.", + "default": 180 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/framepack/framepack.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image input." + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "maximum": 32, + "description": "Guidance scale for the generation.", + "default": 10 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "The seed to use for generating the video." + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "negative_prompt": { + "examples": [ + "Ugly, blurry distorted, bad quality" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "" + }, + "cfg_scale": { + "minimum": 0, + "title": "CFG Scale", + "type": "number", + "maximum": 7, + "description": "Classifier-Free Guidance scale for the generation.", + "default": 1 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_url", + "seed", + "aspect_ratio", + "resolution", + "cfg_scale", + "guidance_scale", + "num_frames", + "enable_safety_checker" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "FramepackOutput": { + "title": "FramePackResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generating the video." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/framepack/TfJPbwm6_D60dcWEv9LVX_output_video.mp4" + } + ], + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/framepack/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/framepack/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/framepack": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FramepackInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/framepack/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FramepackOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v4/image-to-video/fast", + "metadata": { + "display_name": "PixVerse v4: Image to Video Fast", + "category": "image-to-video", + "description": "Generate fast high quality video clips from text and image prompts using PixVerse v4", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:58.625Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "model_url": "https://fal.run/fal-ai/pixverse/v4/image-to-video/fast", + "license_type": "commercial", + "date": "2025-04-01T02:56:53.842Z", + "group": { + "key": "pixverse", + "label": "Image to Video v4 (Fast)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v4/image-to-video/fast", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v4/image-to-video/fast queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v4/image-to-video/fast", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v4/image-to-video/fast", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v4/image-to-video/fast/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV4ImageToVideoFastInput": { + "title": "FastImageToVideoRequestV4", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman warrior with her hammer walking with his glacier wolf." + ], + "title": "Prompt", + "type": "string" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated video" + }, + "camera_movement": { + "enum": [ + "horizontal_left", + "horizontal_right", + "vertical_up", + "vertical_down", + "zoom_in", + "zoom_out", + "crane_up", + "quickly_zoom_in", + "quickly_zoom_out", + "smooth_zoom_in", + "camera_rotation", + "robo_arm", + "super_dolly_out", + "whip_pan", + "hitchcock", + "left_follow", + "right_follow", + "pan_left", + "pan_right", + "fix_bg" + ], + "title": "Camera Movement", + "type": "string", + "description": "The type of camera movement to apply to the video" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/qL93Je8ezvzQgDOEzTjKF_KhGKZTEebZcDw6T5rwQPK_output.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to use as the first frame" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, low resolution, pixelated, noisy, grainy, out of focus, poorly lit, poorly exposed, poorly composed, poorly framed, poorly cropped, poorly color corrected, poorly color graded" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "resolution", + "negative_prompt", + "style", + "seed", + "image_url", + "camera_movement" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "PixverseV4ImageToVideoFastOutput": { + "title": "I2VOutputV4", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 6420765, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://fal.media/files/koala/HEWK7BBwqWrz7F5nAZzp7_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v4/image-to-video/fast/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4/image-to-video/fast/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4/image-to-video/fast": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV4ImageToVideoFastInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4/image-to-video/fast/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV4ImageToVideoFastOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v4/image-to-video", + "metadata": { + "display_name": "PixVerse v4: Image to Video", + "category": "image-to-video", + "description": "Generate high quality video clips from text and image prompts using PixVerse v4", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:58.750Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "model_url": "https://fal.run/fal-ai/pixverse/v4/image-to-video", + "license_type": "commercial", + "date": "2025-04-01T02:54:43.687Z", + "group": { + "key": "pixverse", + "label": "Image to Video v4" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v4/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v4/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v4/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v4/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v4/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV4ImageToVideoInput": { + "title": "ImageToVideoRequestV4", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman warrior with her hammer walking with his glacier wolf." + ], + "title": "Prompt", + "type": "string" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "duration": { + "enum": [ + "5", + "8" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds", + "default": "5" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated video" + }, + "camera_movement": { + "enum": [ + "horizontal_left", + "horizontal_right", + "vertical_up", + "vertical_down", + "zoom_in", + "zoom_out", + "crane_up", + "quickly_zoom_in", + "quickly_zoom_out", + "smooth_zoom_in", + "camera_rotation", + "robo_arm", + "super_dolly_out", + "whip_pan", + "hitchcock", + "left_follow", + "right_follow", + "pan_left", + "pan_right", + "fix_bg" + ], + "title": "Camera Movement", + "type": "string", + "description": "The type of camera movement to apply to the video" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/qL93Je8ezvzQgDOEzTjKF_KhGKZTEebZcDw6T5rwQPK_output.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to use as the first frame" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, low resolution, pixelated, noisy, grainy, out of focus, poorly lit, poorly exposed, poorly composed, poorly framed, poorly cropped, poorly color corrected, poorly color graded" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "resolution", + "duration", + "negative_prompt", + "style", + "seed", + "image_url", + "camera_movement" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "PixverseV4ImageToVideoOutput": { + "title": "I2VOutputV4", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 6420765, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://fal.media/files/koala/HEWK7BBwqWrz7F5nAZzp7_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v4/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV4ImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV4ImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v3.5/effects", + "metadata": { + "display_name": "PixVerse v3.5: Effects", + "category": "image-to-video", + "description": "Generate high quality video clips with different effects using PixVerse v3.5", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:58.875Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "model_url": "https://fal.run/fal-ai/pixverse/v3.5/effects", + "license_type": "commercial", + "date": "2025-04-01T02:53:00.990Z", + "group": { + "key": "pixverse", + "label": "Effects" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v3.5/effects", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v3.5/effects queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v3.5/effects", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v3.5/effects", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v3.5/effects/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV35EffectsInput": { + "title": "EffectInput", + "type": "object", + "properties": { + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + }, + "duration": { + "enum": [ + "5", + "8" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video.", + "default": "720p" + }, + "effect": { + "enum": [ + "Kiss Me AI", + "Kiss", + "Muscle Surge", + "Warmth of Jesus", + "Anything, Robot", + "The Tiger Touch", + "Hug", + "Holy Wings", + "Microwave", + "Zombie Mode", + "Squid Game", + "Baby Face", + "Black Myth: Wukong", + "Long Hair Magic", + "Leggy Run", + "Fin-tastic Mermaid", + "Punch Face", + "Creepy Devil Smile", + "Thunder God", + "Eye Zoom Challenge", + "Who's Arrested?", + "Baby Arrived", + "Werewolf Rage", + "Bald Swipe", + "BOOM DROP", + "Huge Cutie", + "Liquid Metal", + "Sharksnap!", + "Dust Me Away", + "3D Figurine Factor", + "Bikini Up", + "My Girlfriends", + "My Boyfriends", + "Subject 3 Fever", + "Earth Zoom", + "Pole Dance", + "Vroom Dance", + "GhostFace Terror", + "Dragon Evoker", + "Skeletal Bae", + "Summoning succubus", + "Halloween Voodoo Doll", + "3D Naked-Eye AD", + "Package Explosion", + "Dishes Served", + "Ocean ad", + "Supermarket AD", + "Tree doll", + "Come Feel My Abs", + "The Bicep Flex", + "London Elite Vibe", + "Flora Nymph Gown", + "Christmas Costume", + "It's Snowy", + "Reindeer Cruiser", + "Snow Globe Maker", + "Pet Christmas Outfit", + "Adopt a Polar Pal", + "Cat Christmas Box", + "Starlight Gift Box", + "Xmas Poster", + "Pet Christmas Tree", + "City Santa Hat", + "Stocking Sweetie", + "Christmas Night", + "Xmas Front Page Karma", + "Grinch's Xmas Hijack", + "Giant Product", + "Truck Fashion Shoot", + "Beach AD", + "Shoal Surround", + "Mechanical Assembly", + "Lighting AD", + "Billboard AD", + "Product close-up", + "Parachute Delivery", + "Dreamlike Cloud", + "Macaron Machine", + "Poster AD", + "Truck AD", + "Graffiti AD", + "3D Figurine Factory", + "The Exclusive First Class", + "Art Zoom Challenge", + "I Quit", + "Hitchcock Dolly Zoom", + "Smell the Lens", + "I believe I can fly", + "Strikout Dance", + "Pixel World", + "Mint in Box", + "Hands up, Hand", + "Flora Nymph Go", + "Somber Embrace", + "Beam me up", + "Suit Swagger" + ], + "title": "Effect", + "type": "string", + "description": "The effect to apply to the video" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/koala/q5ahL3KS7ikt3MvpNUG8l_image%20(72).webp" + ], + "title": "Image Url", + "type": "string", + "description": "Optional URL of the image to use as the first frame. If not provided, generates from text" + } + }, + "x-fal-order-properties": [ + "effect", + "image_url", + "resolution", + "duration", + "negative_prompt" + ], + "required": [ + "effect", + "image_url" + ] + }, + "PixverseV35EffectsOutput": { + "title": "EffectOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 3232402, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://fal.media/files/koala/awGY1lJd7lVsqQeSqjWqn_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v3.5/effects/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v3.5/effects/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v3.5/effects": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV35EffectsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v3.5/effects/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV35EffectsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v3.5/transition", + "metadata": { + "display_name": "PixVerse v3.5: Transition", + "category": "image-to-video", + "description": "Create seamless transition between images using PixVerse v3.5", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:59.128Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "model_url": "https://fal.run/fal-ai/pixverse/v3.5/transition", + "license_type": "commercial", + "date": "2025-04-01T02:35:42.865Z", + "group": { + "key": "pixverse", + "label": "Transition" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v3.5/transition", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v3.5/transition queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v3.5/transition", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v3.5/transition", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v3.5/transition/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV35TransitionInput": { + "title": "TransitionRequest", + "type": "object", + "properties": { + "first_image_url": { + "examples": [ + "https://v3.fal.media/files/zebra/owQh2DAzk8UU7J02nr5RY_Co2P4boLv6meIZ5t9gKvL_8685da151df343ab8bf82165c928e2a5.jpg" + ], + "title": "First Image Url", + "type": "string", + "description": "URL of the image to use as the first frame" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "4:3", + "1:1", + "3:4", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated video" + }, + "prompt": { + "examples": [ + "Scene slowly transition into cat swimming under water" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt for the transition" + }, + "duration": { + "enum": [ + "5", + "8" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + }, + "end_image_url": { + "examples": [ + "https://v3.fal.media/files/kangaroo/RgedFs_WSnq5BgER7qDx1_ONrbTJ1YAGXz-9JnSsBoB_bdc8750387734bfe940319f469f7b0b2.jpg" + ], + "title": "End Image Url", + "type": "string", + "description": "URL of the image to use as the last frame" + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, low resolution, pixelated, noisy, grainy, out of focus, poorly lit, poorly exposed, poorly composed, poorly framed, poorly cropped, poorly color corrected, poorly color graded" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "resolution", + "duration", + "negative_prompt", + "style", + "seed", + "first_image_url", + "end_image_url" + ], + "required": [ + "prompt", + "first_image_url" + ] + }, + "PixverseV35TransitionOutput": { + "title": "TransitionOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 3890360, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://fal.media/files/panda/5KmKS-mh1vO-htbqE5oex_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v3.5/transition/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v3.5/transition/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v3.5/transition": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV35TransitionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v3.5/transition/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV35TransitionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/luma-dream-machine/ray-2-flash/image-to-video", + "metadata": { + "display_name": "Luma Ray 2 Flash (Image to Video)", + "category": "image-to-video", + "description": "Ray2 Flash is a fast video generative model capable of creating realistic visuals with natural, coherent motion.", + "status": "active", + "tags": [ + "motion", + "transformation" + ], + "updated_at": "2026-01-26T21:44:19.627Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/luma-dream-machine-ray-2.webp", + "model_url": "https://fal.run/fal-ai/luma-dream-machine/ray-2-flash/image-to-video", + "date": "2025-03-17T00:00:00.000Z", + "group": { + "key": "luma-dream-machine", + "label": "Image to Video (Ray 2 Flash)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/luma-dream-machine/ray-2-flash/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/luma-dream-machine/ray-2-flash/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/luma-dream-machine/ray-2-flash/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/luma-dream-machine-ray-2.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/luma-dream-machine/ray-2-flash/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/luma-dream-machine/ray-2-flash/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LumaDreamMachineRay2FlashImageToVideoInput": { + "title": "Ray2ImageToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage." + ], + "title": "Prompt", + "type": "string", + "maxLength": 5000, + "minLength": 3 + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "4:3", + "3:4", + "21:9", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "resolution": { + "enum": [ + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video (720p costs 2x more, 1080p costs 4x more)", + "default": "540p" + }, + "loop": { + "title": "Loop", + "type": "boolean", + "description": "Whether the video should loop (end of video is blended with the beginning)", + "default": false + }, + "duration": { + "enum": [ + "5s", + "9s" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video", + "default": "5s" + }, + "image_url": { + "examples": [ + "https://fal.media/files/elephant/8kkhB12hEZI2kkbU8pZPA_test.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "Initial image to start the video from. Can be used together with end_image_url." + }, + "end_image_url": { + "title": "End Image Url", + "type": "string", + "description": "Final image to end the video with. Can be used together with image_url." + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "end_image_url", + "aspect_ratio", + "loop", + "resolution", + "duration" + ], + "required": [ + "prompt" + ] + }, + "LumaDreamMachineRay2FlashImageToVideoOutput": { + "title": "Ray2I2VOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/zebra/9aDde3Te2kuJYHdR0Kz8R_output.mp4" + } + ], + "title": "Video", + "description": "URL of the generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/luma-dream-machine/ray-2-flash/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2-flash/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2-flash/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaDreamMachineRay2FlashImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2-flash/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaDreamMachineRay2FlashImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pika/v1.5/pikaffects", + "metadata": { + "display_name": "Pika Effects (v1.5)", + "category": "image-to-video", + "description": "Pika Effects are AI-powered video effects designed to modify objects, characters, and environments in a fun, engaging, and visually compelling manner.", + "status": "active", + "tags": [ + "editing", + "effects", + "animation" + ], + "updated_at": "2026-01-26T21:44:01.523Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/2uSfx4xu1fXv4am4PvLAm_499f61b93f924a7496982491a87fb169.jpg", + "model_url": "https://fal.run/fal-ai/pika/v1.5/pikaffects", + "license_type": "commercial", + "date": "2025-03-14T00:00:00.000Z", + "group": { + "key": "pika", + "label": "Effects (v1.5)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pika/v1.5/pikaffects", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pika/v1.5/pikaffects queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pika/v1.5/pikaffects", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/2uSfx4xu1fXv4am4PvLAm_499f61b93f924a7496982491a87fb169.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/pika/v1.5/pikaffects", + "documentationUrl": "https://fal.ai/models/fal-ai/pika/v1.5/pikaffects/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PikaV15PikaffectsInput": { + "x-fal-order-properties": [ + "image_url", + "pikaffect", + "prompt", + "negative_prompt", + "seed" + ], + "type": "object", + "properties": { + "pikaffect": { + "enum": [ + "Cake-ify", + "Crumble", + "Crush", + "Decapitate", + "Deflate", + "Dissolve", + "Explode", + "Eye-pop", + "Inflate", + "Levitate", + "Melt", + "Peel", + "Poke", + "Squish", + "Ta-da", + "Tear" + ], + "title": "Pikaffect", + "type": "string", + "examples": [ + "Cake-ify" + ], + "description": "The Pikaffect to apply" + }, + "prompt": { + "examples": [ + "Cake-ify it" + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt to guide the effect" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to guide the model" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/pika/pika_effects/cake.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the input image" + } + }, + "title": "PikaffectsRequest", + "description": "Request model for Pikaffects endpoint", + "required": [ + "image_url", + "pikaffect" + ] + }, + "PikaV15PikaffectsOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/pika/pika_effects/cake.mp4" + } + ], + "title": "Video", + "description": "The generated video with applied effect", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "PikaffectsOutput", + "description": "Output from Pikaffects generation", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pika/v1.5/pikaffects/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pika/v1.5/pikaffects/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pika/v1.5/pikaffects": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PikaV15PikaffectsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pika/v1.5/pikaffects/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PikaV15PikaffectsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pika/v2/turbo/image-to-video", + "metadata": { + "display_name": "Pika Image to Video Turbo (v2)", + "category": "image-to-video", + "description": "Turbo is the model to use when you feel the need for speed. Turn your image to stunning video up to 3x faster – all with high quality outputs. ", + "status": "active", + "tags": [ + "editing", + "effects", + "animation" + ], + "updated_at": "2026-01-26T21:44:01.218Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/panda/izszWyAu5LZ56Z-ZK63x5_1c8004b9a1054d0d849848569196d293.jpg", + "model_url": "https://fal.run/fal-ai/pika/v2/turbo/image-to-video", + "license_type": "commercial", + "date": "2025-03-14T00:00:00.000Z", + "group": { + "key": "pika", + "label": "Image to Video Turbo (v2)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pika/v2/turbo/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pika/v2/turbo/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pika/v2/turbo/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/panda/izszWyAu5LZ56Z-ZK63x5_1c8004b9a1054d0d849848569196d293.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/pika/v2/turbo/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/pika/v2/turbo/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PikaV2TurboImageToVideoInput": { + "x-fal-order-properties": [ + "image_url", + "prompt", + "seed", + "negative_prompt", + "resolution", + "duration" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Camera slow dolly out" + ], + "title": "Prompt", + "type": "string" + }, + "resolution": { + "enum": [ + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "duration": { + "title": "Duration", + "type": "integer", + "description": "The duration of the generated video in seconds", + "default": 5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "A negative prompt to guide the model", + "default": "" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/pika/pika_i2v_v2_turbo_input.png" + ], + "title": "Image Url", + "type": "string" + } + }, + "title": "ImageToVideoTurboInput", + "description": "Base request for image-to-video generation", + "required": [ + "prompt", + "image_url" + ] + }, + "PikaV2TurboImageToVideoOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/pika/pika_i2v_v2_turbo_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "TurboImageToVideoOutput", + "description": "Output model for all video generation endpoints", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pika/v2/turbo/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pika/v2/turbo/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pika/v2/turbo/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PikaV2TurboImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pika/v2/turbo/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PikaV2TurboImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pika/v2.2/pikascenes", + "metadata": { + "display_name": "Pika Scenes (v2.2)", + "category": "image-to-video", + "description": "Pika Scenes v2.2 creates videos from a images with high quality output.", + "status": "active", + "tags": [ + "editing", + "effects", + "animation" + ], + "updated_at": "2026-01-26T21:44:01.897Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/lion/3FYXmzqtjqf6xQ5YVxbKi_bf6ff3d3904a42c783662e7e1fa21ce9.jpg", + "model_url": "https://fal.run/fal-ai/pika/v2.2/pikascenes", + "license_type": "commercial", + "date": "2025-03-14T00:00:00.000Z", + "group": { + "key": "pika", + "label": "Scenes (v2.2)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pika/v2.2/pikascenes", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pika/v2.2/pikascenes queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pika/v2.2/pikascenes", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/lion/3FYXmzqtjqf6xQ5YVxbKi_bf6ff3d3904a42c783662e7e1fa21ce9.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/pika/v2.2/pikascenes", + "documentationUrl": "https://fal.ai/models/fal-ai/pika/v2.2/pikascenes/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PikaV22PikascenesInput": { + "x-fal-order-properties": [ + "image_urls", + "prompt", + "negative_prompt", + "seed", + "aspect_ratio", + "resolution", + "duration", + "ingredients_mode" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The gorilla is wearing the coat and sitting in the living room, cinematic scene, camera orbit and dolly out" + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt describing the desired video" + }, + "resolution": { + "examples": [ + "1080p", + "720p" + ], + "title": "Resolution", + "type": "string", + "enum": [ + "720p", + "1080p" + ], + "description": "The resolution of the generated video", + "default": "1080p" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1", + "4:5", + "5:4", + "3:2", + "2:3" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "duration": { + "examples": [ + 5, + 10 + ], + "title": "Duration", + "type": "integer", + "enum": [ + 5, + 10 + ], + "description": "The duration of the generated video in seconds", + "default": 5 + }, + "ingredients_mode": { + "enum": [ + "precise", + "creative" + ], + "title": "Ingredients Mode", + "type": "string", + "description": "Mode for integrating multiple images. Precise mode is more accurate, creative mode is more creative.", + "default": "precise" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator" + }, + "image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/example_inputs/pika/pika_scenes/a.png", + "https://storage.googleapis.com/falserverless/example_inputs/pika/pika_scenes/b.png", + "https://storage.googleapis.com/falserverless/example_inputs/pika/pika_scenes/c.png" + ] + ], + "title": "Image Urls", + "type": "array", + "description": "URLs of images to combine into a video", + "items": { + "type": "string" + } + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "A negative prompt to guide the model", + "default": "ugly, bad, terrible" + } + }, + "title": "Pika22PikascenesRequest", + "description": "Request model for Pika 2.2 Pikascenes (collection-to-video) generation", + "required": [ + "image_urls", + "prompt" + ] + }, + "PikaV22PikascenesOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/pika/pika_scenes/output.mp4" + } + ], + "title": "Video", + "description": "The generated video combining multiple images", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "Pika22PikascenesOutput", + "description": "Output model for Pika 2.2 Pikascenes generation", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pika/v2.2/pikascenes/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pika/v2.2/pikascenes/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pika/v2.2/pikascenes": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PikaV22PikascenesInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pika/v2.2/pikascenes/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PikaV22PikascenesOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pika/v2.2/image-to-video", + "metadata": { + "display_name": "Pika Image to Video (v2.2)", + "category": "image-to-video", + "description": "Turn photos into mind-blowing, dynamic videos in up to 1080p. Experience better image clarity and crisper, sharper visuals.", + "status": "active", + "tags": [ + "editing", + "effects", + "animation" + ], + "updated_at": "2026-01-26T21:44:01.089Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/koala/mATl0lc8FwiR6WceFEDfH_692743a190bc4859a00caa338a1809c5.jpg", + "model_url": "https://fal.run/fal-ai/pika/v2.2/image-to-video", + "license_type": "commercial", + "date": "2025-03-14T00:00:00.000Z", + "group": { + "key": "pika", + "label": "Image to Video (v2.2)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pika/v2.2/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pika/v2.2/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pika/v2.2/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/koala/mATl0lc8FwiR6WceFEDfH_692743a190bc4859a00caa338a1809c5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/pika/v2.2/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/pika/v2.2/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PikaV22ImageToVideoInput": { + "x-fal-order-properties": [ + "image_url", + "prompt", + "seed", + "negative_prompt", + "resolution", + "duration" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The man and the horse are slowly walking towards the camera, the camera orbits and dolly out" + ], + "title": "Prompt", + "type": "string" + }, + "resolution": { + "examples": [ + "1080p", + "720p" + ], + "title": "Resolution", + "type": "string", + "enum": [ + "720p", + "1080p" + ], + "description": "The resolution of the generated video", + "default": "720p" + }, + "duration": { + "enum": [ + 5, + 10 + ], + "title": "Duration", + "type": "integer", + "description": "The duration of the generated video in seconds", + "default": 5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "A negative prompt to guide the model", + "default": "" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/pika/pika_i2v_v22_input.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to use as the first frame" + } + }, + "title": "Pika22ImageToVideoRequest", + "description": "Request model for Pika 2.2 image-to-video generation", + "required": [ + "prompt", + "image_url" + ] + }, + "PikaV22ImageToVideoOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/pika/pika_i2v_v22_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "Pika22ImageToVideoOutput", + "description": "Output model for Pika 2.2 image-to-video generation", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pika/v2.2/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pika/v2.2/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pika/v2.2/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PikaV22ImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pika/v2.2/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PikaV22ImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pika/v2.1/image-to-video", + "metadata": { + "display_name": "Pika Image to Video (v2.1)", + "category": "image-to-video", + "description": "Turn photos into mind-blowing, dynamic videos. Your images can can come to life with sharp details, impressive character control and cinematic camera moves.", + "status": "active", + "tags": [ + "editing", + "effects", + "animation" + ], + "updated_at": "2026-01-26T21:44:01.648Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/monkey/9yJyc4ezyAPejLJlzquI9_f8b95aa25041426fbc0c0861ae80a2c6.jpg", + "model_url": "https://fal.run/fal-ai/pika/v2.1/image-to-video", + "license_type": "commercial", + "date": "2025-03-14T00:00:00.000Z", + "group": { + "key": "pika", + "label": "Image to Video (v2.1)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pika/v2.1/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pika/v2.1/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pika/v2.1/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/monkey/9yJyc4ezyAPejLJlzquI9_f8b95aa25041426fbc0c0861ae80a2c6.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/pika/v2.1/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/pika/v2.1/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PikaV21ImageToVideoInput": { + "x-fal-order-properties": [ + "image_url", + "prompt", + "seed", + "negative_prompt", + "resolution", + "duration" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The flower blossoms into a vibrant pink lily. The lily's wide-open petals are lavishly adorned with sparkling glitter and an array of tiny, iridescent, multicolored gem-like stickers in shapes like stars, moons, and dolphins, catching the light playfully as a developing bud rises from the top of the main bloom." + ], + "title": "Prompt", + "type": "string" + }, + "resolution": { + "enum": [ + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "duration": { + "title": "Duration", + "type": "integer", + "description": "The duration of the generated video in seconds", + "default": 5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "A negative prompt to guide the model", + "default": "" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/pika/pika_i2v_v21_input.png" + ], + "title": "Image Url", + "type": "string" + } + }, + "title": "ImageToVideov21Input", + "description": "Base request for image-to-video generation", + "required": [ + "prompt", + "image_url" + ] + }, + "PikaV21ImageToVideoOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/pika/pika_i2v_v21_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "ImageToVideoV21Output", + "description": "Output from image-to-video generation", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pika/v2.1/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pika/v2.1/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pika/v2.1/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PikaV21ImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pika/v2.1/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PikaV21ImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/vidu/image-to-video", + "metadata": { + "display_name": "Vidu Image to Video", + "category": "image-to-video", + "description": "Vidu Image to Video generates high-quality videos with exceptional visual quality and motion diversity from a single image", + "status": "active", + "tags": [ + "motion", + "image to video" + ], + "updated_at": "2026-01-26T21:44:20.994Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/vidu/vidu.webp", + "model_url": "https://fal.run/fal-ai/vidu/image-to-video", + "date": "2025-03-12T00:00:00.000Z", + "group": { + "key": "vidu", + "label": "Image-to-Video" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/vidu/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/vidu/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/vidu/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/vidu/vidu.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/vidu/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/vidu/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ViduImageToVideoInput": { + "title": "ImageToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage. She wears a black leather jacket, a long red dress, and black boots, and carries a black purse." + ], + "title": "Prompt", + "type": "string", + "maxLength": 1500, + "description": "Text prompt for video generation, max 1500 characters" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation" + }, + "movement_amplitude": { + "enum": [ + "auto", + "small", + "medium", + "large" + ], + "title": "Movement Amplitude", + "type": "string", + "description": "The movement amplitude of objects in the frame", + "default": "auto" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/vidu/stylish_woman.webp" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to use as the first frame" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "seed", + "movement_amplitude" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "ViduImageToVideoOutput": { + "title": "VideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://fal.media/files/kangaroo/gzfzC5FXvcgZegQmy90L1_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/vidu/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/vidu/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/vidu/start-end-to-video", + "metadata": { + "display_name": "Vidu Start-End to Video", + "category": "image-to-video", + "description": "Vidu Start-End to Video generates smooth transition videos between specified start and end images.", + "status": "active", + "tags": [ + "motion", + "transition" + ], + "updated_at": "2026-01-26T21:44:20.869Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/vidu/vidu.webp", + "model_url": "https://fal.run/fal-ai/vidu/start-end-to-video", + "date": "2025-03-12T00:00:00.000Z", + "group": { + "key": "vidu", + "label": "Start-End-to-Video" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/vidu/start-end-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/vidu/start-end-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/vidu/start-end-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/vidu/vidu.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/vidu/start-end-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/vidu/start-end-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ViduStartEndToVideoInput": { + "title": "StartEndToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Transform the car frame into a complete vehicle." + ], + "title": "Prompt", + "type": "string", + "maxLength": 1500, + "description": "Text prompt for video generation, max 1500 characters" + }, + "start_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/vidu/2-carchasis.png" + ], + "title": "Start Image Url", + "type": "string", + "description": "URL of the image to use as the first frame" + }, + "movement_amplitude": { + "enum": [ + "auto", + "small", + "medium", + "large" + ], + "title": "Movement Amplitude", + "type": "string", + "description": "The movement amplitude of objects in the frame", + "default": "auto" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation" + }, + "end_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/vidu/2-carbody.png" + ], + "title": "End Image Url", + "type": "string", + "description": "URL of the image to use as the last frame" + } + }, + "x-fal-order-properties": [ + "prompt", + "start_image_url", + "end_image_url", + "seed", + "movement_amplitude" + ], + "required": [ + "prompt", + "start_image_url", + "end_image_url" + ] + }, + "ViduStartEndToVideoOutput": { + "title": "StartEndToVideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/web-examples/vidu/2-car.mp4" + } + ], + "title": "Video", + "description": "The generated transition video between start and end frames", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/vidu/start-end-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/start-end-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/vidu/start-end-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduStartEndToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/start-end-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduStartEndToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/vidu/reference-to-video", + "metadata": { + "display_name": "Vidu Reference to Video", + "category": "image-to-video", + "description": "Vidu Reference to Video creates videos by using a reference images and combining them with a prompt.", + "status": "active", + "tags": [ + "motion", + "reference" + ], + "updated_at": "2026-01-26T21:44:21.123Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/vidu/vidu.webp", + "model_url": "https://fal.run/fal-ai/vidu/reference-to-video", + "date": "2025-03-12T00:00:00.000Z", + "group": { + "key": "vidu", + "label": "Reference-to-Video" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/vidu/reference-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/vidu/reference-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/vidu/reference-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/vidu/vidu.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/vidu/reference-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/vidu/reference-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ViduReferenceToVideoInput": { + "title": "ReferenceToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The little devil is looking at the apple on the beach and walking around it." + ], + "title": "Prompt", + "type": "string", + "maxLength": 1500, + "description": "Text prompt for video generation, max 1500 characters" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the output video", + "default": "16:9" + }, + "reference_image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/web-examples/vidu/new-examples/reference1.png", + "https://storage.googleapis.com/falserverless/web-examples/vidu/new-examples/reference2.png", + "https://storage.googleapis.com/falserverless/web-examples/vidu/new-examples/reference3.png" + ] + ], + "title": "Reference Image Urls", + "type": "array", + "description": "URLs of the reference images to use for consistent subject appearance", + "items": { + "type": "string" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation" + }, + "movement_amplitude": { + "enum": [ + "auto", + "small", + "medium", + "large" + ], + "title": "Movement Amplitude", + "type": "string", + "description": "The movement amplitude of objects in the frame", + "default": "auto" + } + }, + "x-fal-order-properties": [ + "prompt", + "reference_image_urls", + "seed", + "aspect_ratio", + "movement_amplitude" + ], + "required": [ + "prompt", + "reference_image_urls" + ] + }, + "ViduReferenceToVideoOutput": { + "title": "ReferenceToVideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/web-examples/vidu/new-examples/referencevideo.mp4" + } + ], + "title": "Video", + "description": "The generated video with consistent subjects from reference images", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/vidu/reference-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/reference-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/vidu/reference-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduReferenceToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/reference-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduReferenceToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/vidu/template-to-video", + "metadata": { + "display_name": "Vidu Template to Video", + "category": "image-to-video", + "description": "Vidu Template to Video lets you create different effects by applying motion templates to your images.", + "status": "active", + "tags": [ + "motion", + "template" + ], + "updated_at": "2026-01-26T21:44:20.551Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/vidu/vidu.webp", + "model_url": "https://fal.run/fal-ai/vidu/template-to-video", + "date": "2025-03-12T00:00:00.000Z", + "group": { + "key": "vidu", + "label": "Template-to-Video" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/vidu/template-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/vidu/template-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/vidu/template-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/vidu/vidu.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/vidu/template-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/vidu/template-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ViduTemplateToVideoInput": { + "title": "TemplateToVideoRequest", + "type": "object", + "properties": { + "aspect_ratio": { + "enum": [ + "16:9", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the output video", + "default": "16:9" + }, + "template": { + "enum": [ + "dreamy_wedding", + "romantic_lift", + "sweet_proposal", + "couple_arrival", + "cupid_arrow", + "pet_lovers", + "lunar_newyear", + "hug", + "kiss", + "dynasty_dress", + "wish_sender", + "love_pose", + "hair_swap", + "youth_rewind", + "morphlab", + "live_photo", + "emotionlab", + "live_memory", + "interaction", + "christmas", + "pet_finger", + "eat_mushrooms", + "beast_chase_library", + "beast_chase_supermarket", + "petal_scattered", + "emoji_figure", + "hair_color_change", + "multiple_people_kissing", + "beast_chase_amazon", + "beast_chase_mountain", + "balloonman_explodes_pro", + "get_thinner", + "jump2pool", + "bodyshake", + "jiggle_up", + "shake_it_dance", + "subject_3", + "pubg_winner_hit", + "shake_it_down", + "blueprint_supreme", + "hip_twist", + "motor_dance", + "rat_dance", + "kwok_dance", + "leg_sweep_dance", + "heeseung_march", + "shake_to_max", + "dame_un_grrr", + "i_know", + "lit_bounce", + "wave_dance", + "chill_dance", + "hip_flicking", + "sakura_season", + "zongzi_wrap", + "zongzi_drop", + "dragonboat_shot", + "rain_kiss", + "child_memory", + "couple_drop", + "couple_walk", + "flower_receive", + "love_drop", + "cheek_kiss", + "carry_me", + "blow_kiss", + "love_fall", + "french_kiss_8s", + "workday_feels", + "love_story", + "bloom_magic", + "ghibli", + "minecraft", + "box_me", + "claw_me", + "clayshot", + "manga_meme", + "quad_meme", + "pixel_me", + "clayshot_duo", + "irasutoya", + "american_comic", + "simpsons_comic", + "yayoi_kusama_style", + "pop_art", + "jojo_style", + "slice_therapy", + "balloon_flyaway", + "flying", + "paperman", + "pinch", + "bloom_doorobear", + "gender_swap", + "nap_me", + "sexy_me", + "spin360", + "smooth_shift", + "paper_fall", + "jump_to_cloud", + "pilot", + "sweet_dreams", + "soul_depart", + "punch_hit", + "watermelon_hit", + "split_stance_pet", + "make_face", + "break_glass", + "split_stance_human", + "covered_liquid_metal", + "fluffy_plunge", + "pet_belly_dance", + "water_float", + "relax_cut", + "head_to_balloon", + "cloning", + "across_the_universe_jungle", + "clothes_spinning_remnant", + "across_the_universe_jurassic", + "across_the_universe_moon", + "fisheye_pet", + "hitchcock_zoom", + "cute_bangs", + "earth_zoom_out", + "fisheye_human", + "drive_yacht", + "virtual_singer", + "earth_zoom_in", + "aliens_coming", + "drive_ferrari", + "bjd_style", + "virtual_fitting", + "orbit", + "zoom_in", + "ai_outfit", + "spin180", + "orbit_dolly", + "orbit_dolly_fast", + "auto_spin", + "walk_forward", + "outfit_show", + "zoom_in_fast", + "zoom_out_image", + "zoom_out_startend", + "muscling", + "captain_america", + "hulk", + "cap_walk", + "hulk_dive", + "exotic_princess", + "beast_companion", + "cartoon_doll", + "golden_epoch", + "oscar_gala", + "fashion_stride", + "star_carpet", + "flame_carpet", + "frost_carpet", + "mecha_x", + "style_me", + "tap_me", + "saber_warrior", + "pet2human", + "graduation", + "fishermen", + "happy_birthday", + "fairy_me", + "ladudu_me", + "ladudu_me_random", + "squid_game", + "superman", + "grow_wings", + "clevage", + "fly_with_doraemon", + "creatice_product_down", + "pole_dance", + "hug_from_behind", + "creatice_product_up_cybercity", + "creatice_product_up_bluecircuit", + "creatice_product_up", + "run_fast", + "background_explosion" + ], + "title": "Template", + "type": "string", + "description": "AI video template to use. Pricing varies by template: Standard templates (hug, kiss, love_pose, etc.) cost 4 credits ($0.20), Premium templates (lunar_newyear, dynasty_dress, dreamy_wedding, etc.) cost 6 credits ($0.30), and Advanced templates (live_photo) cost 10 credits ($0.50).", + "default": "hug" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation" + }, + "input_image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/web-examples/vidu/hug.PNG" + ] + ], + "title": "Input Image Urls", + "type": "array", + "description": "URLs of the images to use with the template. Number of images required varies by template: 'dynasty_dress' and 'shop_frame' accept 1-2 images, 'wish_sender' requires exactly 3 images, all other templates accept only 1 image.", + "items": { + "type": "string" + } + } + }, + "x-fal-order-properties": [ + "template", + "input_image_urls", + "seed", + "aspect_ratio" + ], + "required": [ + "input_image_urls" + ] + }, + "ViduTemplateToVideoOutput": { + "title": "TemplateToVideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/web-examples/vidu/hugging.mp4" + } + ], + "title": "Video", + "description": "The generated video using a predefined template", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/vidu/template-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/template-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/vidu/template-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduTemplateToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/template-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduTemplateToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-i2v-lora", + "metadata": { + "display_name": "Wan-2.1 Image-to-Video with LoRAs", + "category": "image-to-video", + "description": "Add custom LoRAs to Wan-2.1 is a image-to-video model that generates high-quality videos with high visual quality and motion diversity from images", + "status": "active", + "tags": [ + "image to video", + "motion", + "lora" + ], + "updated_at": "2026-01-26T21:44:02.809Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/Fal_Visuals_V1_02.jpg", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/wan-i2v-lora-animated.webp", + "model_url": "https://fal.run/fal-ai/wan-i2v-lora", + "license_type": "commercial", + "date": "2025-03-08T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/wan-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/wan-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-i2v-lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-i2v-lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-i2v-lora", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/Fal_Visuals_V1_02.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-i2v-lora", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-i2v-lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanI2vLoraInput": { + "title": "WanLoRAI2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Cars race in slow motion." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "shift": { + "minimum": 1, + "title": "Shift", + "type": "number", + "description": "Shift parameter for video generation.", + "maximum": 10, + "default": 5 + }, + "reverse_video": { + "title": "Reverse Video", + "type": "boolean", + "description": "If true, the video will be reversed.", + "default": false + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "LoRA weights to be used in the inference.", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [] + }, + "frames_per_second": { + "minimum": 5, + "title": "Frames Per Second", + "type": "integer", + "description": "Frames per second of the generated video. Must be between 5 to 24.", + "maximum": 24, + "default": 16 + }, + "turbo_mode": { + "title": "Turbo Mode", + "type": "boolean", + "description": "If true, the video will be generated faster with no noticeable degradation in the visual quality.", + "default": true + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "num_frames": { + "minimum": 81, + "title": "Num Frames", + "type": "integer", + "description": "Number of frames to generate. Must be between 81 to 100 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units.", + "maximum": 100, + "default": 81 + }, + "negative_prompt": { + "examples": [ + "bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the output video.", + "default": "16:9" + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit.", + "default": "720p" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/gallery/car_720p.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped." + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "guide_scale": { + "minimum": 1, + "title": "Guide Scale", + "type": "number", + "description": "Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.", + "maximum": 10, + "default": 5 + }, + "num_inference_steps": { + "minimum": 2, + "title": "Num Inference Steps", + "type": "integer", + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "maximum": 40, + "default": 30 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_url", + "num_frames", + "frames_per_second", + "seed", + "resolution", + "num_inference_steps", + "guide_scale", + "shift", + "enable_safety_checker", + "enable_prompt_expansion", + "aspect_ratio", + "loras", + "reverse_video", + "turbo_mode" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "WanI2vLoraOutput": { + "title": "WanI2VResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/gallery/wan-i2v-example.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "maximum": 4, + "default": 1 + }, + "weight_name": { + "title": "Weight Name", + "type": "string", + "description": "Name of the LoRA weight. Used only if `path` is a Hugging Face repository, and required only if you have more than 1 safetensors file in the repo." + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "required": [ + "path" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-i2v-lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-i2v-lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-i2v-lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanI2vLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-i2v-lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanI2vLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan-video-image-to-video", + "metadata": { + "display_name": "Hunyuan Video Image-to-Video Inference", + "category": "image-to-video", + "description": "Image to Video for the high-quality Hunyuan Video I2V model.", + "status": "active", + "tags": [ + "motion" + ], + "updated_at": "2026-01-26T21:44:21.248Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/hunyuan-video-image-to-video.webp", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/hunyuan-video-image-to-video-animated.webp", + "model_url": "https://fal.run/fal-ai/hunyuan-video-image-to-video", + "license_type": "commercial", + "date": "2025-03-06T00:00:00.000Z", + "group": { + "key": "hunyuan-text-to-video", + "label": "Image-to-Video" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 2, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan-video-image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan-video-image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan-video-image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/hunyuan-video-image-to-video.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan-video-image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan-video-image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "HunyuanVideoImageToVideoInput": { + "title": "HunyuanVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Two muscular cats boxing in a boxing ring." + ], + "maxLength": 1000, + "type": "string", + "title": "Prompt", + "description": "The prompt to generate the video from." + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16" + ], + "title": "Aspect Ratio (W:H)", + "type": "string", + "description": "The aspect ratio of the video to generate.", + "default": "16:9" + }, + "resolution": { + "enum": [ + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the video to generate.", + "default": "720p" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/hunyuan_i2v.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image input." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for generating the video." + }, + "num_frames": { + "enum": [ + "129" + ], + "title": "Number of Frames", + "type": "string", + "description": "The number of frames to generate.", + "default": 129 + }, + "i2v_stability": { + "title": "I2V Stability", + "type": "boolean", + "description": "Turning on I2V Stability reduces hallucination but also reduces motion.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "seed", + "aspect_ratio", + "resolution", + "num_frames", + "i2v_stability" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "HunyuanVideoImageToVideoOutput": { + "title": "HunyuanI2VResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generating the video." + }, + "video": { + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan-video-image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanVideoImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanVideoImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/video-01-director/image-to-video", + "metadata": { + "display_name": "MiniMax (Hailuo AI) Video 01 Director - Image to Video", + "category": "image-to-video", + "description": "Generate video clips more accurately with respect to initial image, natural language descriptions, and using camera movement instructions for shot control.", + "status": "active", + "tags": [ + "motion", + "transformation", + "camera-controls" + ], + "updated_at": "2026-01-26T21:44:25.510Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/red_clouds.jpg", + "model_url": "https://fal.run/fal-ai/minimax/video-01-director/image-to-video", + "date": "2025-02-24T00:00:00.000Z", + "group": { + "key": "minimax-video", + "label": "I2V-01 Director (Camera Control)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/video-01-director/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/video-01-director/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/video-01-director/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/red_clouds.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/video-01-director/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/video-01-director/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxVideo01DirectorImageToVideoInput": { + "title": "ImageToVideoDirectorRequest", + "type": "object", + "properties": { + "prompt_optimizer": { + "description": "Whether to use the model's prompt optimizer", + "type": "boolean", + "title": "Prompt Optimizer", + "default": true + }, + "prompt": { + "examples": [ + "[Push in, Follow]A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage. She wears a black leather jacket, a long red dress, and black boots, and carries a black purse.[Pan left] The street opens into a small plaza where street vendors sell steaming food under colorful awnings." + ], + "maxLength": 2000, + "type": "string", + "title": "Prompt", + "description": "Text prompt for video generation. Camera movement instructions can be added using square brackets (e.g. [Pan left] or [Zoom in]). You can use up to 3 combined movements per prompt. Supported movements: Truck left/right, Pan left/right, Push in/Pull out, Pedestal up/down, Tilt up/down, Zoom in/out, Shake, Tracking shot, Static shot. For example: [Truck left, Pan right, Zoom in]. For a more detailed guide, refer https://sixth-switch-2ac.notion.site/T2V-01-Director-Model-Tutorial-with-camera-movement-1886c20a98eb80f395b8e05291ad8645" + }, + "image_url": { + "examples": [ + "https://fal.media/files/elephant/8kkhB12hEZI2kkbU8pZPA_test.jpeg" + ], + "description": "URL of the image to use as the first frame", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "prompt_optimizer" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "MinimaxVideo01DirectorImageToVideoOutput": { + "title": "I2VDirectorOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/web-examples/minimax/i2v-01.mp4" + } + ], + "description": "The generated video", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/video-01-director/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/video-01-director/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/video-01-director/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxVideo01DirectorImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/video-01-director/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxVideo01DirectorImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/skyreels-i2v", + "metadata": { + "display_name": "Skyreels V1 (Image-to-Video)", + "category": "image-to-video", + "description": "SkyReels V1 is the first and most advanced open-source human-centric video foundation model. By fine-tuning HunyuanVideo on O(10M) high-quality film and television clips", + "status": "active", + "tags": [ + "motion" + ], + "updated_at": "2026-01-26T21:44:26.090Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/skyreels-i2v.webp", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/skyreels-i2v-animated.webp", + "model_url": "https://fal.run/fal-ai/skyreels-i2v", + "date": "2025-02-18T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/skyreels-i2v", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/skyreels-i2v queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/skyreels-i2v", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/skyreels-i2v.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/skyreels-i2v", + "documentationUrl": "https://fal.ai/models/fal-ai/skyreels-i2v/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SkyreelsI2vInput": { + "title": "SkyreelsI2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage. She wears a black leather jacket, a long red dress, and black boots, and carries a black purse." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the output video", + "default": "16:9" + }, + "image_url": { + "examples": [ + "https://fal.media/files/panda/TuXlMwArpQcdYNCLAEM8K.webp" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image input." + }, + "guidance_scale": { + "minimum": 1, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "Guidance scale for generation (between 1.0 and 20.0)", + "default": 6 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation. If not provided, a random seed will be used." + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of denoising steps (between 1 and 50). Higher values give better quality but take longer.", + "default": 30 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to guide generation away from certain attributes." + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "seed", + "guidance_scale", + "num_inference_steps", + "negative_prompt", + "aspect_ratio" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "SkyreelsI2vOutput": { + "title": "SkyreelsI2VResponse", + "type": "object", + "properties": { + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + }, + "video": { + "examples": [ + { + "url": "https://fal.media/files/elephant/yOOdaiC5clkH9K_5TTD32_video.mp4" + } + ], + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/skyreels-i2v/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/skyreels-i2v/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/skyreels-i2v": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SkyreelsI2vInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/skyreels-i2v/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SkyreelsI2vOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/luma-dream-machine/ray-2/image-to-video", + "metadata": { + "display_name": "Luma Ray 2 (Image to Video)", + "category": "image-to-video", + "description": "Ray2 is a large-scale video generative model capable of creating realistic visuals with natural, coherent motion.", + "status": "active", + "tags": [ + "motion", + "transformation" + ], + "updated_at": "2026-01-26T21:44:27.612Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/luma-dream-machine-ray-2.webp", + "model_url": "https://fal.run/fal-ai/luma-dream-machine/ray-2/image-to-video", + "date": "2025-02-14T00:00:00.000Z", + "group": { + "key": "luma-dream-machine", + "label": "Image to Video (Ray 2)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/luma-dream-machine/ray-2/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/luma-dream-machine/ray-2/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/luma-dream-machine/ray-2/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/luma-dream-machine-ray-2.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/luma-dream-machine/ray-2/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/luma-dream-machine/ray-2/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LumaDreamMachineRay2ImageToVideoInput": { + "title": "Ray2ImageToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage." + ], + "title": "Prompt", + "type": "string", + "maxLength": 5000, + "minLength": 3 + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "4:3", + "3:4", + "21:9", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "resolution": { + "enum": [ + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video (720p costs 2x more, 1080p costs 4x more)", + "default": "540p" + }, + "loop": { + "title": "Loop", + "type": "boolean", + "description": "Whether the video should loop (end of video is blended with the beginning)", + "default": false + }, + "duration": { + "enum": [ + "5s", + "9s" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video", + "default": "5s" + }, + "image_url": { + "examples": [ + "https://fal.media/files/elephant/8kkhB12hEZI2kkbU8pZPA_test.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "Initial image to start the video from. Can be used together with end_image_url." + }, + "end_image_url": { + "title": "End Image Url", + "type": "string", + "description": "Final image to end the video with. Can be used together with image_url." + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "end_image_url", + "aspect_ratio", + "loop", + "resolution", + "duration" + ], + "required": [ + "prompt" + ] + }, + "LumaDreamMachineRay2ImageToVideoOutput": { + "title": "Ray2I2VOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/zebra/9aDde3Te2kuJYHdR0Kz8R_output.mp4" + } + ], + "title": "Video", + "description": "URL of the generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/luma-dream-machine/ray-2/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaDreamMachineRay2ImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaDreamMachineRay2ImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan-video-img2vid-lora", + "metadata": { + "display_name": "Hunyuan Video Image-to-Video LoRA Inference", + "category": "image-to-video", + "description": "Image to Video for the Hunyuan Video model using a custom trained LoRA.", + "status": "active", + "tags": [ + "motion" + ], + "updated_at": "2026-01-26T21:44:28.941Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/hunyuan-video.webp?v=1", + "model_url": "https://fal.run/fal-ai/hunyuan-video-img2vid-lora", + "date": "2025-02-03T00:00:00.000Z", + "group": { + "key": "hunyuan-text-to-video", + "label": "Image-to-Video-LoRA" + }, + "highlighted": false, + "duration_estimate": 4, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan-video-img2vid-lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan-video-img2vid-lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan-video-img2vid-lora", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/hunyuan-video.webp?v=1", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan-video-img2vid-lora", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan-video-img2vid-lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "HunyuanVideoImg2vidLoraInput": { + "title": "Input", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A low angle shot of a man walking down a street, illuminated by the neon signs of the bars around him" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for generating the video." + }, + "image_url": { + "examples": [ + "https://d3phaj0sisr2ct.cloudfront.net/research/eugene.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "The URL to the image to generate the video from. The image must be 960x544 or it will get cropped and resized to that size." + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "seed" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "HunyuanVideoImg2vidLoraOutput": { + "title": "Output", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generating the video." + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/gallery/man-smiles.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan-video-img2vid-lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-img2vid-lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-img2vid-lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanVideoImg2vidLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-img2vid-lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanVideoImg2vidLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v3.5/image-to-video/fast", + "metadata": { + "display_name": "PixVerse v3.5: Image to Video Fast", + "category": "image-to-video", + "description": "Generate high quality video clips from text and image prompts quickly using PixVerse v3.5 Fast", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:06.495Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "model_url": "https://fal.run/fal-ai/pixverse/v3.5/image-to-video/fast", + "license_type": "commercial", + "date": "2025-01-29T00:00:00.000Z", + "group": { + "key": "pixverse", + "label": "Image to Video (Fast)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v3.5/image-to-video/fast", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v3.5/image-to-video/fast queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v3.5/image-to-video/fast", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v3.5/image-to-video/fast", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v3.5/image-to-video/fast/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV35ImageToVideoFastInput": { + "title": "FastImageToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage." + ], + "title": "Prompt", + "type": "string" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated video" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, low resolution, pixelated, noisy, grainy, out of focus, poorly lit, poorly exposed, poorly composed, poorly framed, poorly cropped, poorly color corrected, poorly color graded" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + }, + "image_url": { + "examples": [ + "https://fal.media/files/elephant/8kkhB12hEZI2kkbU8pZPA_test.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to use as the first frame" + } + }, + "x-fal-order-properties": [ + "prompt", + "resolution", + "negative_prompt", + "style", + "seed", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "PixverseV35ImageToVideoFastOutput": { + "title": "I2VOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 4060052, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://fal.media/files/tiger/8V9H8RLyFiWjmJDOxGbcG_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v3.5/image-to-video/fast/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v3.5/image-to-video/fast/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v3.5/image-to-video/fast": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV35ImageToVideoFastInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v3.5/image-to-video/fast/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV35ImageToVideoFastOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v3.5/image-to-video", + "metadata": { + "display_name": "PixVerse v3.5: Image to Video", + "category": "image-to-video", + "description": "Generate high quality video clips from text and image prompts using PixVerse v3.5", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:06.192Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "model_url": "https://fal.run/fal-ai/pixverse/v3.5/image-to-video", + "license_type": "commercial", + "date": "2025-01-29T00:00:00.000Z", + "group": { + "key": "pixverse", + "label": "Image to Video" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v3.5/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v3.5/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v3.5/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v3.5/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v3.5/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV35ImageToVideoInput": { + "title": "ImageToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage." + ], + "title": "Prompt", + "type": "string" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "duration": { + "enum": [ + "5", + "8" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds", + "default": "5" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated video" + }, + "image_url": { + "examples": [ + "https://fal.media/files/elephant/8kkhB12hEZI2kkbU8pZPA_test.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to use as the first frame" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, low resolution, pixelated, noisy, grainy, out of focus, poorly lit, poorly exposed, poorly composed, poorly framed, poorly cropped, poorly color corrected, poorly color graded" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "resolution", + "duration", + "negative_prompt", + "style", + "seed", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "PixverseV35ImageToVideoOutput": { + "title": "I2VOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 4060052, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://fal.media/files/tiger/8V9H8RLyFiWjmJDOxGbcG_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v3.5/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v3.5/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v3.5/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV35ImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v3.5/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV35ImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/video-01-subject-reference", + "metadata": { + "display_name": "MiniMax (Hailuo AI) Video 01 Subject Reference", + "category": "image-to-video", + "description": "Generate video clips maintaining consistent, realistic facial features and identity across dynamic video content", + "status": "active", + "tags": [ + "subject", + "transformation" + ], + "updated_at": "2026-01-26T21:44:30.460Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/minimax-video-01-subject-reference.webp", + "model_url": "https://fal.run/fal-ai/minimax/video-01-subject-reference", + "date": "2025-01-20T00:00:00.000Z", + "group": { + "key": "minimax-video", + "label": "Image to Video (Subject Reference)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/video-01-subject-reference", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/video-01-subject-reference queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/video-01-subject-reference", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/minimax-video-01-subject-reference.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/video-01-subject-reference", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/video-01-subject-reference/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxVideo01SubjectReferenceInput": { + "title": "SubjectReferenceRequest", + "type": "object", + "properties": { + "prompt_optimizer": { + "description": "Whether to use the model's prompt optimizer", + "type": "boolean", + "title": "Prompt Optimizer", + "default": true + }, + "prompt": { + "examples": [ + "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage." + ], + "maxLength": 2000, + "type": "string", + "title": "Prompt" + }, + "subject_reference_image_url": { + "examples": [ + "https://fal.media/files/tiger/s2xnjhLpjM6L8ISxlDCAw.png" + ], + "description": "URL of the subject reference image to use for consistent subject appearance", + "type": "string", + "title": "Subject Reference Image Url" + } + }, + "x-fal-order-properties": [ + "prompt", + "subject_reference_image_url", + "prompt_optimizer" + ], + "required": [ + "prompt", + "subject_reference_image_url" + ] + }, + "MinimaxVideo01SubjectReferenceOutput": { + "title": "SubjectReferenceOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://fal.media/files/rabbit/pONKqOnY7z6GlF6oDESvR_output.mp4" + } + ], + "description": "The generated video", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/video-01-subject-reference/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/video-01-subject-reference/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/video-01-subject-reference": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxVideo01SubjectReferenceInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/video-01-subject-reference/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxVideo01SubjectReferenceOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v1.6/standard/image-to-video", + "metadata": { + "display_name": "Kling 1.6", + "category": "image-to-video", + "description": "Generate video clips from your images using Kling 1.6 (std)", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:08.752Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/elephant/28-vTrv3W2BT-u8_cy7mt.png", + "model_url": "https://fal.run/fal-ai/kling-video/v1.6/standard/image-to-video", + "license_type": "commercial", + "date": "2025-01-07T00:00:00.000Z", + "group": { + "key": "kling-video-v1-6", + "label": "Image to Video v1.6 (std)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 6, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v1.6/standard/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v1.6/standard/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v1.6/standard/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://fal.media/files/elephant/28-vTrv3W2BT-u8_cy7mt.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v1.6/standard/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v1.6/standard/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV16StandardImageToVideoInput": { + "title": "ImageToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Snowflakes fall as a car moves forward along the road." + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500 + }, + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/kling/kling_input.jpeg" + ], + "title": "Image Url", + "type": "string" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "maxLength": 2500, + "default": "blur, distort, and low quality" + }, + "cfg_scale": { + "minimum": 0, + "title": "Cfg Scale", + "type": "number", + "maximum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ", + "default": 0.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "duration", + "negative_prompt", + "cfg_scale" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "KlingVideoV16StandardImageToVideoOutput": { + "title": "I2VOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/kling/kling_i2v_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v1.6/standard/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/standard/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/standard/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV16StandardImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/standard/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV16StandardImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sadtalker/reference", + "metadata": { + "display_name": "Sad Talker", + "category": "image-to-video", + "description": "Learning Realistic 3D Motion Coefficients for Stylized Audio-Driven Single Image Talking Face Animation", + "status": "active", + "tags": [ + "animation" + ], + "updated_at": "2026-01-26T21:44:33.169Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/sadtalker.jpeg", + "model_url": "https://fal.run/fal-ai/sadtalker/reference", + "github_url": "https://github.com/OpenTalker/SadTalker/blob/main/LICENSE", + "date": "2024-12-20T00:00:00.000Z", + "group": { + "key": "sadtalker", + "label": "Reference" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sadtalker/reference", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sadtalker/reference queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sadtalker/reference", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/sadtalker.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sadtalker/reference", + "documentationUrl": "https://fal.ai/models/fal-ai/sadtalker/reference/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SadtalkerReferenceInput": { + "title": "SadTalkerRefVideoInput", + "type": "object", + "properties": { + "pose_style": { + "minimum": 0, + "description": "The style of the pose", + "type": "integer", + "title": "Pose Style", + "maximum": 45, + "default": 0 + }, + "source_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/sadtalker/anime_girl.png" + ], + "description": "URL of the source image", + "type": "string", + "title": "Source Image Url" + }, + "reference_pose_video_url": { + "examples": [ + "https://github.com/OpenTalker/SadTalker/raw/main/examples/ref_video/WDA_AlexandriaOcasioCortez_000.mp4" + ], + "description": "URL of the reference video", + "type": "string", + "title": "Reference Pose Video Url" + }, + "driven_audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/sadtalker/deyu.wav" + ], + "description": "URL of the driven audio", + "type": "string", + "title": "Driven Audio Url" + }, + "face_enhancer": { + "enum": [ + "gfpgan" + ], + "description": "The type of face enhancer to use", + "type": "string", + "examples": [ + null + ], + "title": "Face Enhancer" + }, + "expression_scale": { + "description": "The scale of the expression", + "type": "number", + "minimum": 0, + "title": "Expression Scale", + "maximum": 3, + "multipleOf": 0.1, + "default": 1 + }, + "face_model_resolution": { + "enum": [ + "256", + "512" + ], + "description": "The resolution of the face model", + "type": "string", + "title": "Face Model Resolution", + "default": "256" + }, + "still_mode": { + "description": "Whether to use still mode. Fewer head motion, works with preprocess `full`.", + "type": "boolean", + "title": "Still Mode", + "default": false + }, + "preprocess": { + "enum": [ + "crop", + "extcrop", + "resize", + "full", + "extfull" + ], + "description": "The type of preprocessing to use", + "type": "string", + "title": "Preprocess", + "default": "crop" + } + }, + "x-fal-order-properties": [ + "source_image_url", + "driven_audio_url", + "reference_pose_video_url", + "pose_style", + "face_model_resolution", + "expression_scale", + "face_enhancer", + "still_mode", + "preprocess" + ], + "required": [ + "source_image_url", + "driven_audio_url", + "reference_pose_video_url" + ] + }, + "SadtalkerReferenceOutput": { + "title": "SadTalkerOutput", + "type": "object", + "properties": { + "video": { + "title": "Video", + "description": "URL of the generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sadtalker/reference/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sadtalker/reference/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sadtalker/reference": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SadtalkerReferenceInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sadtalker/reference/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SadtalkerReferenceOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/video-01-live/image-to-video", + "metadata": { + "display_name": "MiniMax (Hailuo AI) Video 01 Live", + "category": "image-to-video", + "description": "Generate video clips from your images using MiniMax Video model", + "status": "active", + "tags": [ + "motion", + "transformation" + ], + "updated_at": "2026-01-26T21:44:34.899Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/Fal_Visuals_V1_016.jpg", + "model_url": "https://fal.run/fal-ai/minimax/video-01-live/image-to-video", + "date": "2024-12-16T00:00:00.000Z", + "group": { + "key": "minimax-video", + "label": "Image to Video (Live)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/video-01-live/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/video-01-live/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/video-01-live/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/Fal_Visuals_V1_016.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/video-01-live/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/video-01-live/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxVideo01LiveImageToVideoInput": { + "title": "ImageToVideoRequest", + "type": "object", + "properties": { + "prompt_optimizer": { + "description": "Whether to use the model's prompt optimizer", + "type": "boolean", + "title": "Prompt Optimizer", + "default": true + }, + "prompt": { + "examples": [ + "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage." + ], + "maxLength": 2000, + "type": "string", + "title": "Prompt" + }, + "image_url": { + "examples": [ + "https://fal.media/files/elephant/8kkhB12hEZI2kkbU8pZPA_test.jpeg" + ], + "description": "URL of the image to use as the first frame", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "prompt_optimizer" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "MinimaxVideo01LiveImageToVideoOutput": { + "title": "I2VLiveOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://fal.media/files/monkey/bkT4T4uLOXr0jDsIMlNd5_output.mp4" + } + ], + "description": "The generated video", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/video-01-live/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/video-01-live/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/video-01-live/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxVideo01LiveImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/video-01-live/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxVideo01LiveImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-video/image-to-video", + "metadata": { + "display_name": "LTX Video (preview)", + "category": "image-to-video", + "description": "Generate videos from images using LTX Video", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:36.959Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/model_tests/cogvideox/panda.gif.gif", + "model_url": "https://fal.run/fal-ai/ltx-video/image-to-video", + "license_type": "research", + "date": "2024-11-21T00:00:00.000Z", + "group": { + "key": "ltx-video", + "label": "Image to Video" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-video/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-video/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-video/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/model_tests/cogvideox/panda.gif.gif", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-video/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-video/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LtxVideoImageToVideoInput": { + "title": "ImageToVideoInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A lone astronaut in a white spacesuit with gold-tinted visor drifts weightlessly through a sleek, cylindrical corridor of a spaceship. Their movements are slow and graceful as they gently push off the metallic walls with their gloved hands, rotating slightly as they float from right to left across the frame. The corridor features brushed aluminum panels with blue LED strips running along the ceiling, casting a cool glow on the astronaut's suit. Various cables, pipes, and control panels line the walls. The camera follows the astronaut's movement in a handheld style, slightly swaying and adjusting focus, maintaining a medium shot that captures both the astronaut and the corridor's depth. Small particles of dust catch the light as they float in the zero-gravity environment. The scene appears cinematic, with lens flares occasionally reflecting off the metallic surfaces and the astronaut's visor." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "guidance_scale": { + "maximum": 10, + "type": "number", + "title": "Guidance Scale", + "description": "The guidance scale to use.", + "exclusiveMinimum": 1, + "default": 3 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for random number generation." + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Number of Inference Steps", + "description": "The number of inference steps to take.", + "default": 30 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the video from.", + "default": "low quality, worst quality, deformed, distorted, disfigured, motion smear, motion artifacts, fused fingers, bad anatomy, weird hand, ugly" + }, + "image_url": { + "examples": [ + "https://fal.media/files/kangaroo/4OePu2ifG7SKxTM__TQrQ_72929fec9fb74790bb8c8b760450c9b9.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "The URL of the image to generate the video from." + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "seed", + "num_inference_steps", + "guidance_scale", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "LtxVideoImageToVideoOutput": { + "title": "Output", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for random number generation." + }, + "video": { + "title": "Video", + "description": "The generated video.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-video/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-video/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideoImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideoImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/cogvideox-5b/image-to-video", + "metadata": { + "display_name": "CogVideoX-5B", + "category": "image-to-video", + "description": "Generate videos from images and prompts using CogVideoX-5B", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:38.700Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/model_tests/cogvideox/panda.gif.gif", + "model_url": "https://fal.run/fal-ai/cogvideox-5b/image-to-video", + "github_url": "https://huggingface.co/THUDM/CogVideoX-5b/blob/main/LICENSE", + "date": "2024-10-17T00:00:00.000Z", + "group": { + "key": "cogvideox-5b", + "label": "Image to Video" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/cogvideox-5b/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/cogvideox-5b/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/cogvideox-5b/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/model_tests/cogvideox/panda.gif.gif", + "playgroundUrl": "https://fal.ai/models/fal-ai/cogvideox-5b/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/cogvideox-5b/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Cogvideox5bImageToVideoInput": { + "title": "ImageToVideoInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A low angle shot of a man walking down a street, illuminated by the neon signs of the bars around him" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "use_rife": { + "title": "Use Rife", + "type": "boolean", + "description": "Use RIFE for video interpolation", + "default": true + }, + "image_url": { + "examples": [ + "https://d3phaj0sisr2ct.cloudfront.net/research/eugene.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "The URL to the image to generate the video from." + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. We currently support one lora.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Video Size", + "description": "The size of the generated video.", + "default": { + "height": 480, + "width": 720 + } + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related video to show you.\n ", + "default": 7 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 50 + }, + "export_fps": { + "minimum": 4, + "maximum": 32, + "type": "integer", + "title": "Export Fps", + "description": "The target FPS of the video", + "default": 16 + }, + "negative_prompt": { + "examples": [ + "Distorted, discontinuous, Ugly, blurry, low resolution, motionless, static, disfigured, disconnected limbs, Ugly faces, incomplete arms" + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate video from", + "default": "" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + } + }, + "x-fal-order-properties": [ + "prompt", + "video_size", + "negative_prompt", + "loras", + "num_inference_steps", + "seed", + "guidance_scale", + "use_rife", + "export_fps", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "Cogvideox5bImageToVideoOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the video." + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated video. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + }, + "video": { + "description": "The URL to the generated video", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "timings", + "seed", + "prompt" + ], + "required": [ + "video", + "timings", + "seed", + "prompt" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/cogvideox-5b/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/cogvideox-5b/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/cogvideox-5b/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Cogvideox5bImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/cogvideox-5b/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Cogvideox5bImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v1/pro/image-to-video", + "metadata": { + "display_name": "Kling 1.0", + "category": "image-to-video", + "description": "Generate video clips from your images using Kling 1.0 (pro)", + "status": "active", + "tags": [ + "motion" + ], + "updated_at": "2026-01-26T21:44:39.359Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/monkey/GoSnDOnX0Tea08N7iI7oM.jpeg", + "model_url": "https://fal.run/fal-ai/kling-video/v1/pro/image-to-video", + "date": "2024-10-04T00:00:00.000Z", + "group": { + "key": "kling-video", + "label": "Image to Video (pro)" + }, + "highlighted": false, + "duration_estimate": 6, + "pinned": false + }, + "openapi": { + "error": { + "code": "expansion_failed", + "message": "OpenAPI schema not available for this endpoint" + } + } + }, + { + "endpoint_id": "fal-ai/kling-video/v1.5/pro/image-to-video", + "metadata": { + "display_name": "Kling 1.5", + "category": "image-to-video", + "description": "Generate video clips from your images using Kling 1.5 (pro)", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:12.648Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/elephant/28-vTrv3W2BT-u8_cy7mt.png", + "model_url": "https://fal.run/fal-ai/kling-video/v1.5/pro/image-to-video", + "license_type": "commercial", + "date": "2024-10-04T00:00:00.000Z", + "group": { + "key": "kling-video-v1-5", + "label": "Image to Video v1.5 (pro)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 6, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v1.5/pro/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v1.5/pro/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v1.5/pro/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://fal.media/files/elephant/28-vTrv3W2BT-u8_cy7mt.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v1.5/pro/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v1.5/pro/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV15ProImageToVideoInput": { + "title": "KlingV15ProImageToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Snowflakes fall as a car moves along the road." + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500 + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video frame", + "default": "16:9" + }, + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "maxLength": 2500, + "default": "blur, distort, and low quality" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/kling/kling_input.jpeg" + ], + "title": "Image Url", + "type": "string" + }, + "static_mask_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/kling/new_static_mask.png" + ], + "title": "Static Mask Url", + "type": "string", + "description": "URL of the image for Static Brush Application Area (Mask image created by users using the motion brush)" + }, + "dynamic_masks": { + "title": "Dynamic Masks", + "type": "array", + "description": "List of dynamic masks", + "items": { + "$ref": "#/components/schemas/DynamicMask" + } + }, + "tail_image_url": { + "title": "Tail Image Url", + "type": "string", + "description": "URL of the image to be used for the end of the video" + }, + "cfg_scale": { + "minimum": 0, + "title": "Cfg Scale", + "type": "number", + "maximum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ", + "default": 0.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "duration", + "aspect_ratio", + "tail_image_url", + "negative_prompt", + "cfg_scale", + "static_mask_url", + "dynamic_masks" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "KlingVideoV15ProImageToVideoOutput": { + "title": "I2VOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/kling/kling_i2v_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "DynamicMask": { + "title": "DynamicMask", + "type": "object", + "properties": { + "trajectories": { + "examples": [ + [ + { + "y": 219, + "x": 279 + }, + { + "y": 65, + "x": 417 + } + ] + ], + "title": "Trajectories", + "type": "array", + "description": "List of trajectories", + "items": { + "$ref": "#/components/schemas/Trajectory" + } + }, + "mask_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/kling/new_dynamic_mask.png" + ], + "title": "Mask Url", + "type": "string", + "description": "URL of the image for Dynamic Brush Application Area (Mask image created by users using the motion brush)" + } + }, + "x-fal-order-properties": [ + "mask_url", + "trajectories" + ], + "required": [ + "mask_url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + }, + "Trajectory": { + "title": "Trajectory", + "type": "object", + "properties": { + "y": { + "examples": [ + 219 + ], + "title": "Y", + "type": "integer", + "description": "Y coordinate of the motion trajectory" + }, + "x": { + "examples": [ + 279 + ], + "title": "X", + "type": "integer", + "description": "X coordinate of the motion trajectory" + } + }, + "x-fal-order-properties": [ + "x", + "y" + ], + "required": [ + "x", + "y" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v1.5/pro/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.5/pro/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.5/pro/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV15ProImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.5/pro/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV15ProImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v1/standard/image-to-video", + "metadata": { + "display_name": "Kling 1.0", + "category": "image-to-video", + "description": "Generate video clips from your images using Kling 1.0", + "status": "active", + "tags": [ + "motion" + ], + "updated_at": "2026-01-26T21:44:39.234Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/monkey/GoSnDOnX0Tea08N7iI7oM.jpeg", + "model_url": "https://fal.run/fal-ai/kling-video/v1/standard/image-to-video", + "date": "2024-10-04T00:00:00.000Z", + "group": { + "key": "kling-video", + "label": "Image to Video (standard)" + }, + "highlighted": false, + "duration_estimate": 6, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v1/standard/image-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v1/standard/image-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v1/standard/image-to-video", + "category": "image-to-video", + "thumbnailUrl": "https://fal.media/files/monkey/GoSnDOnX0Tea08N7iI7oM.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v1/standard/image-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v1/standard/image-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV1StandardImageToVideoInput": { + "title": "V1ImageToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Snowflakes fall as a car moves forward along the road." + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500, + "description": "The prompt for the video" + }, + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "maxLength": 2500, + "default": "blur, distort, and low quality" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/kling/kling_input.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to be used for the video" + }, + "static_mask_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/kling/new_static_mask.png" + ], + "title": "Static Mask Url", + "type": "string", + "description": "URL of the image for Static Brush Application Area (Mask image created by users using the motion brush)" + }, + "dynamic_masks": { + "title": "Dynamic Masks", + "type": "array", + "description": "List of dynamic masks", + "items": { + "$ref": "#/components/schemas/DynamicMask" + } + }, + "tail_image_url": { + "title": "Tail Image Url", + "type": "string", + "description": "URL of the image to be used for the end of the video" + }, + "cfg_scale": { + "minimum": 0, + "title": "Cfg Scale", + "type": "number", + "maximum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ", + "default": 0.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "duration", + "negative_prompt", + "cfg_scale", + "tail_image_url", + "static_mask_url", + "dynamic_masks" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "KlingVideoV1StandardImageToVideoOutput": { + "title": "KlingV1I2VOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/kling/kling_i2v_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "DynamicMask": { + "title": "DynamicMask", + "type": "object", + "properties": { + "trajectories": { + "examples": [ + [ + { + "y": 219, + "x": 279 + }, + { + "y": 65, + "x": 417 + } + ] + ], + "title": "Trajectories", + "type": "array", + "description": "List of trajectories", + "items": { + "$ref": "#/components/schemas/Trajectory" + } + }, + "mask_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/kling/new_dynamic_mask.png" + ], + "title": "Mask Url", + "type": "string", + "description": "URL of the image for Dynamic Brush Application Area (Mask image created by users using the motion brush)" + } + }, + "x-fal-order-properties": [ + "mask_url", + "trajectories" + ], + "required": [ + "mask_url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + }, + "Trajectory": { + "title": "Trajectory", + "type": "object", + "properties": { + "y": { + "examples": [ + 219 + ], + "title": "Y", + "type": "integer", + "description": "Y coordinate of the motion trajectory" + }, + "x": { + "examples": [ + 279 + ], + "title": "X", + "type": "integer", + "description": "X coordinate of the motion trajectory" + } + }, + "x-fal-order-properties": [ + "x", + "y" + ], + "required": [ + "x", + "y" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v1/standard/image-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1/standard/image-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1/standard/image-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV1StandardImageToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1/standard/image-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV1StandardImageToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/stable-video", + "metadata": { + "display_name": "High Quality Stable Video Diffusion", + "category": "image-to-video", + "description": "Generate short video clips from your images using SVD v1.1", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:13.744Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/fast-svd.webp", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/fast-svd-animated.webp", + "model_url": "https://fal.run/fal-ai/stable-video", + "github_url": "https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt-1-1/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-09-16T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/stable-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/stable-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/stable-video", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/fast-svd.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/stable-video", + "documentationUrl": "https://fal.ai/models/fal-ai/stable-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "StableVideoInput": { + "title": "ImageInput", + "type": "object", + "properties": { + "motion_bucket_id": { + "minimum": 1, + "maximum": 255, + "type": "integer", + "title": "Motion Bucket Id", + "description": "\n The motion bucket id determines the motion of the generated video. The\n higher the number, the more motion there will be.\n ", + "default": 127 + }, + "fps": { + "minimum": 10, + "maximum": 100, + "type": "integer", + "title": "Fps", + "description": "The frames per second of the generated video.", + "default": 25 + }, + "cond_aug": { + "minimum": 0, + "maximum": 10, + "type": "number", + "title": "Cond Aug", + "description": "\n The conditoning augmentation determines the amount of noise that will be\n added to the conditioning frame. The higher the number, the more noise\n there will be, and the less the video will look like the initial image.\n Increase it for more motion.\n ", + "default": 0.02 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/svd/rocket.png", + "https://storage.googleapis.com/falserverless/model_tests/svd/mustang.png", + "https://storage.googleapis.com/falserverless/model_tests/svd/ship.png", + "https://storage.googleapis.com/falserverless/model_tests/svd/rocket2.png" + ], + "title": "Image Url", + "type": "string", + "minLength": 1, + "description": "The URL of the image to use as a starting point for the generation." + } + }, + "x-fal-order-properties": [ + "image_url", + "seed", + "motion_bucket_id", + "cond_aug", + "fps" + ], + "required": [ + "image_url" + ] + }, + "StableVideoOutput": { + "title": "VideoOutput", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed for random number generator" + }, + "video": { + "title": "Video", + "description": "Generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/stable-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/stable-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/amt-interpolation/frame-interpolation", + "metadata": { + "display_name": "AMT Frame Interpolation", + "category": "image-to-video", + "description": "Interpolate between image frames", + "status": "active", + "tags": [ + "interpolation", + "editing" + ], + "updated_at": "2026-01-26T21:44:44.018Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/amt-interpolation.webp", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/amt-interpolation-animated.webp", + "model_url": "https://fal.run/fal-ai/amt-interpolation/frame-interpolation", + "date": "2024-07-18T00:00:00.000Z", + "group": { + "key": "amt-interpolation", + "label": "Frame Interpolation" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/amt-interpolation/frame-interpolation", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/amt-interpolation/frame-interpolation queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/amt-interpolation/frame-interpolation", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/amt-interpolation.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/amt-interpolation/frame-interpolation", + "documentationUrl": "https://fal.ai/models/fal-ai/amt-interpolation/frame-interpolation/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AmtInterpolationFrameInterpolationInput": { + "title": "AMTFrameInterpolationInput", + "type": "object", + "properties": { + "frames": { + "examples": [ + [ + { + "url": "https://storage.googleapis.com/falserverless/model_tests/amt-interpolation/start.png" + }, + { + "url": "https://storage.googleapis.com/falserverless/model_tests/amt-interpolation/end.png" + } + ] + ], + "title": "Frames", + "type": "array", + "description": "Frames to interpolate", + "items": { + "$ref": "#/components/schemas/Frame" + } + }, + "recursive_interpolation_passes": { + "min": 1, + "title": "Recursive Interpolation Passes", + "type": "integer", + "description": "Number of recursive interpolation passes", + "max": 10, + "default": 4 + }, + "output_fps": { + "min": 1, + "title": "Output FPS", + "type": "integer", + "description": "Output frames per second", + "max": 60, + "default": 24 + } + }, + "x-fal-order-properties": [ + "frames", + "output_fps", + "recursive_interpolation_passes" + ], + "required": [ + "frames" + ] + }, + "AmtInterpolationFrameInterpolationOutput": { + "title": "AMTInterpolationOutput", + "type": "object", + "properties": { + "video": { + "title": "Video", + "description": "Generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "Frame": { + "title": "Frame", + "type": "object", + "properties": { + "url": { + "title": "URL", + "type": "string", + "description": "URL of the frame" + } + }, + "x-fal-order-properties": [ + "url" + ], + "required": [ + "url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/amt-interpolation/frame-interpolation/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/amt-interpolation/frame-interpolation/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/amt-interpolation/frame-interpolation": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AmtInterpolationFrameInterpolationInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/amt-interpolation/frame-interpolation/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AmtInterpolationFrameInterpolationOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/live-portrait", + "metadata": { + "display_name": "Live Portrait", + "category": "image-to-video", + "description": "Transfer expression from a video to a portrait.", + "status": "active", + "tags": [ + "expression", + "animation" + ], + "updated_at": "2026-01-26T21:44:44.273Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/model_tests/live-portrait/XKEmk3mAzGHUjK3qqH-UL.jpeg", + "model_url": "https://fal.run/fal-ai/live-portrait", + "github_url": "https://github.com/KwaiVGI/LivePortrait/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-07-09T00:00:00.000Z", + "group": { + "key": "live-portrait", + "label": "Video" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/live-portrait", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/live-portrait queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/live-portrait", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/model_tests/live-portrait/XKEmk3mAzGHUjK3qqH-UL.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/live-portrait", + "documentationUrl": "https://fal.ai/models/fal-ai/live-portrait/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LivePortraitInput": { + "x-fal-order-properties": [ + "video_url", + "image_url", + "blink", + "eyebrow", + "wink", + "pupil_x", + "pupil_y", + "aaa", + "eee", + "woo", + "smile", + "flag_lip_zero", + "rotate_pitch", + "rotate_yaw", + "rotate_roll", + "flag_eye_retargeting", + "flag_lip_retargeting", + "flag_stitching", + "flag_relative", + "flag_pasteback", + "flag_do_crop", + "flag_do_rot", + "dsize", + "scale", + "vx_ratio", + "vy_ratio", + "batch_size", + "enable_safety_checker" + ], + "type": "object", + "properties": { + "smile": { + "minimum": -2, + "maximum": 2, + "type": "number", + "description": "Amount to smile", + "title": "Smile", + "default": 0 + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/live-portrait/liveportrait-example.mp4" + ], + "description": "URL of the video to drive the lip syncing.", + "type": "string", + "title": "Video Url" + }, + "eyebrow": { + "minimum": -30, + "maximum": 30, + "type": "number", + "description": "Amount to raise or lower eyebrows", + "title": "Eyebrow", + "default": 0 + }, + "flag_stitching": { + "title": "Flag Stitching", + "type": "boolean", + "description": "Whether to enable stitching. Recommended to set to True.", + "default": true + }, + "wink": { + "minimum": 0, + "maximum": 25, + "type": "number", + "description": "Amount to wink", + "title": "Wink", + "default": 0 + }, + "rotate_pitch": { + "minimum": -45, + "maximum": 45, + "type": "number", + "description": "Amount to rotate the face in pitch", + "title": "Rotate Pitch", + "default": 0 + }, + "blink": { + "minimum": -30, + "maximum": 30, + "type": "number", + "description": "Amount to blink the eyes", + "title": "Blink", + "default": 0 + }, + "scale": { + "title": "Scale", + "type": "number", + "description": "Scaling factor for the face crop.", + "default": 2.3 + }, + "eee": { + "minimum": -40, + "maximum": 40, + "type": "number", + "description": "Amount to shape mouth in 'eee' position", + "title": "Eee", + "default": 0 + }, + "flag_pasteback": { + "title": "Flag Pasteback", + "type": "boolean", + "description": "Whether to paste-back/stitch the animated face cropping from the face-cropping space to the original image space.", + "default": true + }, + "pupil_y": { + "minimum": -45, + "maximum": 45, + "type": "number", + "description": "Amount to move pupils vertically", + "title": "Pupil Y", + "default": 0 + }, + "rotate_yaw": { + "minimum": -45, + "maximum": 45, + "type": "number", + "description": "Amount to rotate the face in yaw", + "title": "Rotate Yaw", + "default": 0 + }, + "flag_do_rot": { + "title": "Flag Do Rot", + "type": "boolean", + "description": "Whether to conduct the rotation when flag_do_crop is True.", + "default": true + }, + "woo": { + "minimum": -100, + "maximum": 100, + "type": "number", + "description": "Amount to shape mouth in 'woo' position", + "title": "Woo", + "default": 0 + }, + "aaa": { + "minimum": -200, + "maximum": 200, + "type": "number", + "description": "Amount to open mouth in 'aaa' shape", + "title": "Aaa", + "default": 0 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/live-portrait/XKEmk3mAzGHUjK3qqH-UL.jpeg" + ], + "description": "URL of the image to be animated", + "type": "string", + "title": "Image Url" + }, + "flag_relative": { + "title": "Flag Relative", + "type": "boolean", + "description": "Whether to use relative motion.", + "default": true + }, + "flag_eye_retargeting": { + "title": "Flag Eye Retargeting", + "type": "boolean", + "description": "Whether to enable eye retargeting.", + "default": false + }, + "flag_lip_zero": { + "title": "Flag Lip Zero", + "type": "boolean", + "description": "Whether to set the lip to closed state before animation. Only takes effect when flag_eye_retargeting and flag_lip_retargeting are False.", + "default": true + }, + "batch_size": { + "title": "Batch Size", + "type": "integer", + "description": "Batch size for the model. The larger the batch size, the faster the model will run, but the more memory it will consume.", + "default": 32 + }, + "rotate_roll": { + "minimum": -45, + "maximum": 45, + "type": "number", + "description": "Amount to rotate the face in roll", + "title": "Rotate Roll", + "default": 0 + }, + "pupil_x": { + "minimum": -45, + "maximum": 45, + "type": "number", + "description": "Amount to move pupils horizontally", + "title": "Pupil X", + "default": 0 + }, + "vy_ratio": { + "title": "Vy Ratio", + "type": "number", + "description": "Vertical offset ratio for face crop. Positive values move up, negative values move down.", + "default": -0.125 + }, + "dsize": { + "title": "Dsize", + "type": "integer", + "description": "Size of the output image.", + "default": 512 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "\n Whether to enable the safety checker. If enabled, the model will check if the input image contains a face before processing it.\n The safety checker will process the input image\n ", + "default": false + }, + "vx_ratio": { + "title": "Vx Ratio", + "type": "number", + "description": "Horizontal offset ratio for face crop.", + "default": 0 + }, + "flag_lip_retargeting": { + "title": "Flag Lip Retargeting", + "type": "boolean", + "description": "Whether to enable lip retargeting.", + "default": false + }, + "flag_do_crop": { + "title": "Flag Do Crop", + "type": "boolean", + "description": "Whether to crop the source portrait to the face-cropping space.", + "default": true + } + }, + "title": "LivePortraitInput", + "required": [ + "video_url", + "image_url" + ] + }, + "LivePortraitOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "LivePortraitOutput", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/live-portrait/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/live-portrait/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/live-portrait": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LivePortraitInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/live-portrait/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LivePortraitOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/musetalk", + "metadata": { + "display_name": "MuseTalk", + "category": "image-to-video", + "description": "MuseTalk is a real-time high quality audio-driven lip-syncing model. Use MuseTalk to animate a face with your own audio.", + "status": "active", + "tags": [ + "animation", + "lip sync", + "real-time" + ], + "updated_at": "2026-01-26T21:44:52.344Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/monkey/wgGNp3M_u50xIisUZ_Wm8.png", + "model_url": "https://fal.run/fal-ai/musetalk", + "date": "2024-04-11T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/musetalk", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/musetalk queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/musetalk", + "category": "image-to-video", + "thumbnailUrl": "https://fal.media/files/monkey/wgGNp3M_u50xIisUZ_Wm8.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/musetalk", + "documentationUrl": "https://fal.ai/models/fal-ai/musetalk/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MusetalkInput": { + "title": "MuseTalkInput", + "type": "object", + "properties": { + "source_video_url": { + "examples": [ + "https://raw.githubusercontent.com/TMElyralab/MuseTalk/main/data/video/sun.mp4" + ], + "title": "Source Video Url", + "type": "string", + "description": "URL of the source video" + }, + "audio_url": { + "examples": [ + "https://raw.githubusercontent.com/TMElyralab/MuseTalk/main/data/audio/sun.wav" + ], + "title": "Audio Url", + "type": "string", + "description": "URL of the audio" + } + }, + "x-fal-order-properties": [ + "source_video_url", + "audio_url" + ], + "required": [ + "source_video_url", + "audio_url" + ] + }, + "MusetalkOutput": { + "title": "MuseTalkOutput", + "type": "object", + "properties": { + "video": { + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/musetalk/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/musetalk/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/musetalk": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MusetalkInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/musetalk/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MusetalkOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sadtalker", + "metadata": { + "display_name": "Sad Talker", + "category": "image-to-video", + "description": "Learning Realistic 3D Motion Coefficients for Stylized Audio-Driven Single Image Talking Face Animation", + "status": "active", + "tags": [ + "animation" + ], + "updated_at": "2026-01-26T21:44:52.219Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/sadtalker.jpeg", + "model_url": "https://fal.run/fal-ai/sadtalker", + "github_url": "https://github.com/OpenTalker/SadTalker/blob/main/LICENSE", + "date": "2024-04-11T00:00:00.000Z", + "group": { + "key": "sadtalker", + "label": "Standard" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sadtalker", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sadtalker queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sadtalker", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/sadtalker.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sadtalker", + "documentationUrl": "https://fal.ai/models/fal-ai/sadtalker/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SadtalkerInput": { + "title": "SadTalkerInput", + "type": "object", + "properties": { + "pose_style": { + "minimum": 0, + "description": "The style of the pose", + "type": "integer", + "title": "Pose Style", + "maximum": 45, + "default": 0 + }, + "source_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/sadtalker/anime_girl.png" + ], + "description": "URL of the source image", + "type": "string", + "title": "Source Image Url" + }, + "driven_audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/sadtalker/deyu.wav" + ], + "description": "URL of the driven audio", + "type": "string", + "title": "Driven Audio Url" + }, + "face_enhancer": { + "enum": [ + "gfpgan" + ], + "description": "The type of face enhancer to use", + "type": "string", + "examples": [ + null + ], + "title": "Face Enhancer" + }, + "expression_scale": { + "description": "The scale of the expression", + "type": "number", + "minimum": 0, + "title": "Expression Scale", + "maximum": 3, + "multipleOf": 0.1, + "default": 1 + }, + "face_model_resolution": { + "enum": [ + "256", + "512" + ], + "description": "The resolution of the face model", + "type": "string", + "title": "Face Model Resolution", + "default": "256" + }, + "still_mode": { + "description": "Whether to use still mode. Fewer head motion, works with preprocess `full`.", + "type": "boolean", + "title": "Still Mode", + "default": false + }, + "preprocess": { + "enum": [ + "crop", + "extcrop", + "resize", + "full", + "extfull" + ], + "description": "The type of preprocessing to use", + "type": "string", + "title": "Preprocess", + "default": "crop" + } + }, + "x-fal-order-properties": [ + "source_image_url", + "driven_audio_url", + "pose_style", + "face_model_resolution", + "expression_scale", + "face_enhancer", + "still_mode", + "preprocess" + ], + "required": [ + "source_image_url", + "driven_audio_url" + ] + }, + "SadtalkerOutput": { + "title": "SadTalkerOutput", + "type": "object", + "properties": { + "video": { + "title": "Video", + "description": "URL of the generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sadtalker/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sadtalker/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sadtalker": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SadtalkerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sadtalker/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SadtalkerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fast-svd-lcm", + "metadata": { + "display_name": "Stable Video Diffusion Turbo", + "category": "image-to-video", + "description": "Generate short video clips from your images using SVD v1.1 at Lightning Speed", + "status": "active", + "tags": [ + "turbo" + ], + "updated_at": "2026-01-26T21:44:56.918Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/fast-svd-turbo.webp", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/fast-svd-turbo-animated.webp", + "model_url": "https://fal.run/fal-ai/fast-svd-lcm", + "github_url": "https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt-1-1/blob/main/LICENSE", + "date": "2024-02-13T00:00:00.000Z", + "group": { + "key": "stable-video-diffusion-turbo", + "label": "Image to Video" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fast-svd-lcm", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fast-svd-lcm queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fast-svd-lcm", + "category": "image-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/fast-svd-turbo.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/fast-svd-lcm", + "documentationUrl": "https://fal.ai/models/fal-ai/fast-svd-lcm/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FastSvdLcmInput": { + "title": "FastSVDImageInput", + "type": "object", + "properties": { + "motion_bucket_id": { + "minimum": 1, + "maximum": 255, + "type": "integer", + "title": "Motion Bucket Id", + "description": "\n The motion bucket id determines the motion of the generated video. The\n higher the number, the more motion there will be.\n ", + "default": 127 + }, + "fps": { + "minimum": 1, + "maximum": 25, + "type": "integer", + "title": "Fps", + "description": "\n The FPS of the generated video. The higher the number, the faster the video will\n play. Total video length is 25 frames.\n ", + "default": 10 + }, + "steps": { + "minimum": 1, + "maximum": 20, + "type": "integer", + "title": "Steps", + "description": "\n The number of steps to run the model for. The higher the number the better\n the quality and longer it will take to generate.\n ", + "default": 4 + }, + "cond_aug": { + "minimum": 0, + "maximum": 10, + "type": "number", + "title": "Cond Aug", + "description": "\n The conditoning augmentation determines the amount of noise that will be\n added to the conditioning frame. The higher the number, the more noise\n there will be, and the less the video will look like the initial image.\n Increase it for more motion.\n ", + "default": 0.02 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/svd/rocket.png", + "https://storage.googleapis.com/falserverless/model_tests/svd/mustang.png", + "https://storage.googleapis.com/falserverless/model_tests/svd/ship.png", + "https://storage.googleapis.com/falserverless/model_tests/svd/rocket2.png" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to use as a starting point for the generation." + } + }, + "x-fal-order-properties": [ + "image_url", + "motion_bucket_id", + "cond_aug", + "seed", + "steps", + "fps" + ], + "required": [ + "image_url" + ] + }, + "FastSvdLcmOutput": { + "title": "FastSVDOutput", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n\n " + }, + "video": { + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/fast-svd-lcm/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-svd-lcm/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fast-svd-lcm": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastSvdLcmInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-svd-lcm/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastSvdLcmOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.json.json b/packages/typescript/ai-fal/json/fal.models.json.json new file mode 100644 index 00000000..d508933a --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.json.json @@ -0,0 +1,1545 @@ +{ + "generated_at": "2026-01-28T02:51:51.876Z", + "total_models": 3, + "category": "json", + "models": [ + { + "endpoint_id": "fal-ai/ffmpeg-api/loudnorm", + "metadata": { + "display_name": "Ffmpeg Api", + "category": "json", + "description": "Get EBU R128 loudness normalization from audio files using FFmpeg API.", + "status": "active", + "tags": [ + "ffmpeg" + ], + "updated_at": "2026-01-26T21:43:17.209Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/ffmpeg-api-waveform.webp", + "model_url": "https://fal.run/fal-ai/ffmpeg-api/loudnorm", + "license_type": "commercial", + "date": "2025-07-08T19:13:13.775Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ffmpeg-api/loudnorm", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ffmpeg-api/loudnorm queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ffmpeg-api/loudnorm", + "category": "json", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/ffmpeg-api-waveform.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/ffmpeg-api/loudnorm", + "documentationUrl": "https://fal.ai/models/fal-ai/ffmpeg-api/loudnorm/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FfmpegApiLoudnormInput": { + "title": "LoudnormInput", + "type": "object", + "properties": { + "measured_tp": { + "anyOf": [ + { + "minimum": -99, + "maximum": 99, + "type": "number" + }, + { + "type": "null" + } + ], + "description": "Measured true peak of input file in dBTP. Required for linear mode.", + "title": "Measured Tp" + }, + "linear": { + "description": "Use linear normalization mode (single-pass). If false, uses dynamic mode (two-pass for better quality).", + "type": "boolean", + "title": "Linear", + "default": false + }, + "offset": { + "minimum": -99, + "maximum": 99, + "type": "number", + "description": "Offset gain in dB applied before the true-peak limiter", + "title": "Offset", + "default": 0 + }, + "measured_i": { + "anyOf": [ + { + "minimum": -99, + "maximum": 0, + "type": "number" + }, + { + "type": "null" + } + ], + "description": "Measured integrated loudness of input file in LUFS. Required for linear mode.", + "title": "Measured I" + }, + "print_summary": { + "description": "Return loudness measurement summary with the normalized audio", + "type": "boolean", + "title": "Print Summary", + "default": false + }, + "measured_lra": { + "anyOf": [ + { + "minimum": 0, + "maximum": 99, + "type": "number" + }, + { + "type": "null" + } + ], + "description": "Measured loudness range of input file in LU. Required for linear mode.", + "title": "Measured Lra" + }, + "measured_thresh": { + "anyOf": [ + { + "minimum": -99, + "maximum": 0, + "type": "number" + }, + { + "type": "null" + } + ], + "description": "Measured threshold of input file in LUFS. Required for linear mode.", + "title": "Measured Thresh" + }, + "dual_mono": { + "description": "Treat mono input files as dual-mono for correct EBU R128 measurement on stereo systems", + "type": "boolean", + "title": "Dual Mono", + "default": false + }, + "true_peak": { + "minimum": -9, + "maximum": 0, + "type": "number", + "description": "Maximum true peak in dBTP.", + "title": "True Peak", + "default": -0.1 + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ffmpeg-audio.wav" + ], + "description": "URL of the audio file to normalize", + "type": "string", + "title": "Audio Url" + }, + "integrated_loudness": { + "minimum": -70, + "maximum": -5, + "type": "number", + "description": "Integrated loudness target in LUFS.", + "title": "Integrated Loudness", + "default": -18 + }, + "loudness_range": { + "minimum": 1, + "maximum": 20, + "type": "number", + "description": "Loudness range target in LU", + "title": "Loudness Range", + "default": 7 + } + }, + "x-fal-order-properties": [ + "audio_url", + "integrated_loudness", + "true_peak", + "loudness_range", + "offset", + "linear", + "dual_mono", + "print_summary", + "measured_i", + "measured_lra", + "measured_tp", + "measured_thresh" + ], + "required": [ + "audio_url" + ] + }, + "FfmpegApiLoudnormOutput": { + "title": "LoudnormOutput", + "type": "object", + "properties": { + "summary": { + "anyOf": [ + { + "$ref": "#/components/schemas/LoudnormSummary" + }, + { + "type": "null" + } + ], + "description": "Structured loudness measurement summary (if requested)" + }, + "audio": { + "description": "Normalized audio file", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "audio", + "summary" + ], + "required": [ + "audio" + ] + }, + "LoudnormSummary": { + "title": "LoudnormSummary", + "type": "object", + "properties": { + "output_integrated": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "Output integrated loudness in LUFS", + "title": "Output Integrated" + }, + "output_true_peak": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "Output true peak in dBTP", + "title": "Output True Peak" + }, + "input_lra": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "Input loudness range in LU", + "title": "Input Lra" + }, + "normalization_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Type of normalization applied (Dynamic/Linear)", + "title": "Normalization Type" + }, + "output_lra": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "Output loudness range in LU", + "title": "Output Lra" + }, + "output_threshold": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "Output threshold in LUFS", + "title": "Output Threshold" + }, + "input_integrated": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "Input integrated loudness in LUFS", + "title": "Input Integrated" + }, + "input_true_peak": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "Input true peak in dBTP", + "title": "Input True Peak" + }, + "target_offset": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "Target offset in LU", + "title": "Target Offset" + }, + "input_threshold": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "Input threshold in LUFS", + "title": "Input Threshold" + } + }, + "x-fal-order-properties": [ + "input_integrated", + "input_true_peak", + "input_lra", + "input_threshold", + "output_integrated", + "output_true_peak", + "output_lra", + "output_threshold", + "normalization_type", + "target_offset" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ffmpeg-api/loudnorm/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/loudnorm/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/loudnorm": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FfmpegApiLoudnormInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/loudnorm/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FfmpegApiLoudnormOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ffmpeg-api/waveform", + "metadata": { + "display_name": "FFmpeg API Waveform", + "category": "json", + "description": "Get waveform data from audio files using FFmpeg API.", + "status": "active", + "tags": [ + "ffmpeg" + ], + "updated_at": "2026-01-26T21:44:29.901Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/ffmpeg-api-waveform.webp", + "model_url": "https://fal.run/fal-ai/ffmpeg-api/waveform", + "date": "2025-01-22T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ffmpeg-api/waveform", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ffmpeg-api/waveform queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ffmpeg-api/waveform", + "category": "json", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/ffmpeg-api-waveform.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/ffmpeg-api/waveform", + "documentationUrl": "https://fal.ai/models/fal-ai/ffmpeg-api/waveform/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FfmpegApiWaveformInput": { + "title": "WaveformInput", + "type": "object", + "properties": { + "precision": { + "minimum": 1, + "maximum": 6, + "type": "integer", + "description": "Number of decimal places for the waveform values. Higher values provide more precision but increase payload size.", + "title": "Precision", + "default": 2 + }, + "smoothing_window": { + "minimum": 1, + "maximum": 21, + "type": "integer", + "description": "Size of the smoothing window. Higher values create a smoother waveform. Must be an odd number.", + "title": "Smoothing Window", + "default": 3 + }, + "media_url": { + "description": "URL of the audio file to analyze", + "type": "string", + "title": "Media Url" + }, + "points_per_second": { + "minimum": 1, + "maximum": 10, + "type": "number", + "description": "Controls how many points are sampled per second of audio. Lower values (e.g. 1-2) create a coarser waveform, higher values (e.g. 4-10) create a more detailed one.", + "title": "Points Per Second", + "default": 4 + } + }, + "x-fal-order-properties": [ + "media_url", + "points_per_second", + "precision", + "smoothing_window" + ], + "required": [ + "media_url" + ] + }, + "FfmpegApiWaveformOutput": { + "title": "WaveformOutput", + "type": "object", + "properties": { + "precision": { + "description": "Number of decimal places used in the waveform values", + "type": "integer", + "title": "Precision" + }, + "duration": { + "description": "Duration of the audio in seconds", + "type": "number", + "title": "Duration" + }, + "points": { + "description": "Number of points in the waveform data", + "type": "integer", + "title": "Points" + }, + "waveform": { + "description": "Normalized waveform data as an array of values between -1 and 1. The number of points is determined by audio duration × points_per_second.", + "type": "array", + "title": "Waveform", + "items": { + "type": "number" + } + } + }, + "x-fal-order-properties": [ + "waveform", + "duration", + "points", + "precision" + ], + "required": [ + "waveform", + "duration", + "points", + "precision" + ] + } + } + }, + "paths": { + "/fal-ai/ffmpeg-api/waveform/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/waveform/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/waveform": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FfmpegApiWaveformInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/waveform/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FfmpegApiWaveformOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ffmpeg-api/metadata", + "metadata": { + "display_name": "FFmpeg API Metadata", + "category": "json", + "description": "Get encoding metadata from video and audio files using FFmpeg API.", + "status": "active", + "tags": [ + "ffmpeg" + ], + "updated_at": "2026-01-26T21:44:30.029Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/ffmpeg-api-metadata.webp", + "model_url": "https://fal.run/fal-ai/ffmpeg-api/metadata", + "date": "2025-01-22T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ffmpeg-api/metadata", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ffmpeg-api/metadata queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ffmpeg-api/metadata", + "category": "json", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/ffmpeg-api-metadata.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/ffmpeg-api/metadata", + "documentationUrl": "https://fal.ai/models/fal-ai/ffmpeg-api/metadata/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FfmpegApiMetadataInput": { + "title": "MetadataInput", + "type": "object", + "properties": { + "extract_frames": { + "description": "Whether to extract the start and end frames for videos. Note that when true the request will be slower.", + "type": "boolean", + "title": "Extract Frames", + "default": false + }, + "media_url": { + "description": "URL of the media file (video or audio) to analyze", + "type": "string", + "title": "Media Url" + } + }, + "x-fal-order-properties": [ + "media_url", + "extract_frames" + ], + "required": [ + "media_url" + ] + }, + "FfmpegApiMetadataOutput": { + "title": "MetadataOutput", + "type": "object", + "properties": { + "media": { + "anyOf": [ + { + "$ref": "#/components/schemas/Video" + }, + { + "$ref": "#/components/schemas/Audio" + } + ], + "description": "Metadata for the analyzed media file (either Video or Audio)", + "title": "Media" + } + }, + "x-fal-order-properties": [ + "media" + ], + "required": [ + "media" + ] + }, + "Video": { + "title": "Video", + "type": "object", + "properties": { + "file_size": { + "description": "Size of the file in bytes", + "type": "integer", + "title": "File Size" + }, + "timebase": { + "description": "Time base used for frame timestamps", + "type": "string", + "title": "Timebase" + }, + "start_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "URL of the extracted first frame", + "title": "Start Frame Url" + }, + "duration": { + "description": "Duration of the media in seconds", + "type": "number", + "title": "Duration" + }, + "url": { + "description": "URL where the media file can be accessed", + "type": "string", + "title": "Url" + }, + "fps": { + "description": "Frames per second", + "type": "integer", + "title": "Fps" + }, + "codec": { + "description": "Codec used to encode the media", + "type": "string", + "title": "Codec" + }, + "media_type": { + "description": "Type of media (always 'video')", + "type": "string", + "title": "Media Type", + "const": "video", + "default": "video" + }, + "end_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "URL of the extracted last frame", + "title": "End Frame Url" + }, + "content_type": { + "description": "MIME type of the media file", + "type": "string", + "title": "Content Type" + }, + "container": { + "description": "Container format of the media file (e.g., 'mp4', 'mov')", + "type": "string", + "title": "Container" + }, + "bitrate": { + "description": "Overall bitrate of the media in bits per second", + "type": "integer", + "title": "Bitrate" + }, + "format": { + "description": "Detailed video format information", + "$ref": "#/components/schemas/VideoFormat" + }, + "resolution": { + "description": "Video resolution information", + "$ref": "#/components/schemas/Resolution" + }, + "frame_count": { + "description": "Total number of frames in the video", + "type": "integer", + "title": "Frame Count" + }, + "file_name": { + "description": "Original filename of the media", + "type": "string", + "title": "File Name" + }, + "audio": { + "anyOf": [ + { + "$ref": "#/components/schemas/AudioTrack" + }, + { + "type": "null" + } + ], + "description": "Audio track information if video has audio" + } + }, + "x-fal-order-properties": [ + "media_type", + "url", + "content_type", + "file_name", + "file_size", + "duration", + "bitrate", + "codec", + "container", + "fps", + "frame_count", + "timebase", + "resolution", + "format", + "audio", + "start_frame_url", + "end_frame_url" + ], + "required": [ + "url", + "content_type", + "file_name", + "file_size", + "duration", + "bitrate", + "codec", + "container", + "fps", + "frame_count", + "timebase", + "resolution", + "format" + ] + }, + "Audio": { + "title": "Audio", + "type": "object", + "properties": { + "file_size": { + "description": "Size of the file in bytes", + "type": "integer", + "title": "File Size" + }, + "duration": { + "description": "Duration of the media in seconds", + "type": "number", + "title": "Duration" + }, + "bitrate": { + "description": "Overall bitrate of the media in bits per second", + "type": "integer", + "title": "Bitrate" + }, + "url": { + "description": "URL where the media file can be accessed", + "type": "string", + "title": "Url" + }, + "media_type": { + "description": "Type of media (always 'audio')", + "type": "string", + "title": "Media Type", + "const": "audio", + "default": "audio" + }, + "codec": { + "description": "Codec used to encode the media", + "type": "string", + "title": "Codec" + }, + "file_name": { + "description": "Original filename of the media", + "type": "string", + "title": "File Name" + }, + "sample_rate": { + "description": "Audio sample rate in Hz", + "type": "integer", + "title": "Sample Rate" + }, + "content_type": { + "description": "MIME type of the media file", + "type": "string", + "title": "Content Type" + }, + "container": { + "description": "Container format of the media file (e.g., 'mp4', 'mov')", + "type": "string", + "title": "Container" + }, + "channels": { + "description": "Number of audio channels", + "type": "integer", + "title": "Channels" + } + }, + "x-fal-order-properties": [ + "media_type", + "url", + "content_type", + "file_name", + "file_size", + "duration", + "bitrate", + "codec", + "container", + "channels", + "sample_rate" + ], + "required": [ + "url", + "content_type", + "file_name", + "file_size", + "duration", + "bitrate", + "codec", + "container", + "channels", + "sample_rate" + ] + }, + "VideoFormat": { + "title": "VideoFormat", + "type": "object", + "properties": { + "container": { + "description": "Container format of the video", + "type": "string", + "title": "Container" + }, + "level": { + "description": "Codec level (e.g., 4.1)", + "type": "number", + "title": "Level" + }, + "pixel_format": { + "description": "Pixel format used (e.g., 'yuv420p')", + "type": "string", + "title": "Pixel Format" + }, + "video_codec": { + "description": "Video codec used (e.g., 'h264')", + "type": "string", + "title": "Video Codec" + }, + "profile": { + "description": "Codec profile (e.g., 'main', 'high')", + "type": "string", + "title": "Profile" + }, + "bitrate": { + "description": "Video bitrate in bits per second", + "type": "integer", + "title": "Bitrate" + } + }, + "x-fal-order-properties": [ + "container", + "video_codec", + "profile", + "level", + "pixel_format", + "bitrate" + ], + "required": [ + "container", + "video_codec", + "profile", + "level", + "pixel_format", + "bitrate" + ] + }, + "Resolution": { + "title": "Resolution", + "type": "object", + "properties": { + "height": { + "description": "Height of the video in pixels", + "type": "integer", + "title": "Height" + }, + "aspect_ratio": { + "description": "Display aspect ratio (e.g., '16:9')", + "type": "string", + "title": "Aspect Ratio" + }, + "width": { + "description": "Width of the video in pixels", + "type": "integer", + "title": "Width" + } + }, + "x-fal-order-properties": [ + "aspect_ratio", + "width", + "height" + ], + "required": [ + "aspect_ratio", + "width", + "height" + ] + }, + "AudioTrack": { + "title": "AudioTrack", + "type": "object", + "properties": { + "codec": { + "description": "Audio codec used (e.g., 'aac', 'mp3')", + "type": "string", + "title": "Codec" + }, + "channels": { + "description": "Number of audio channels", + "type": "integer", + "title": "Channels" + }, + "sample_rate": { + "description": "Audio sample rate in Hz", + "type": "integer", + "title": "Sample Rate" + }, + "bitrate": { + "description": "Audio bitrate in bits per second", + "type": "integer", + "title": "Bitrate" + } + }, + "x-fal-order-properties": [ + "codec", + "channels", + "sample_rate", + "bitrate" + ], + "required": [ + "codec", + "channels", + "sample_rate", + "bitrate" + ] + } + } + }, + "paths": { + "/fal-ai/ffmpeg-api/metadata/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/metadata/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/metadata": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FfmpegApiMetadataInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/metadata/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FfmpegApiMetadataOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.llm.json b/packages/typescript/ai-fal/json/fal.models.llm.json new file mode 100644 index 00000000..1d2d414a --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.llm.json @@ -0,0 +1,1843 @@ +{ + "generated_at": "2026-01-28T02:51:51.876Z", + "total_models": 6, + "category": "llm", + "models": [ + { + "endpoint_id": "openrouter/router/openai/v1/responses", + "metadata": { + "display_name": "OpenRouter Responses [OpenAI Compatible]", + "category": "llm", + "description": "The OpenRouter Responses API with fal, powered by OpenRouter, provides unified access to a wide range of large language models - including GPT, Claude, Gemini, and many others through a single API interface.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:23.262Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/zebra/zzqFHzVP-OnSBa7ZfwPOY_14fce1eeccb749c4bdbb74a46a169432.jpg", + "model_url": "https://fal.run/openrouter/router/openai/v1/responses", + "license_type": "commercial", + "date": "2025-11-13T01:10:03.312Z", + "group": { + "key": "openrouter/router", + "label": "Responses [OpenAI Compatible]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for openrouter/router/openai/v1/responses", + "version": "1.0.0", + "description": "The OpenAPI schema for the openrouter/router/openai/v1/responses queue.", + "x-fal-metadata": { + "endpointId": "openrouter/router/openai/v1/responses", + "category": "llm", + "thumbnailUrl": "https://v3b.fal.media/files/b/zebra/zzqFHzVP-OnSBa7ZfwPOY_14fce1eeccb749c4bdbb74a46a169432.jpg", + "playgroundUrl": "https://fal.ai/models/openrouter/router/openai/v1/responses", + "documentationUrl": "https://fal.ai/models/openrouter/router/openai/v1/responses/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "RouterOpenaiV1ResponsesOutput": {} + } + }, + "paths": { + "/openrouter/router/openai/v1/responses/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/openrouter/router/openai/v1/responses/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/openrouter/router/openai/v1/responses": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RouterOpenaiV1ResponsesInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/openrouter/router/openai/v1/responses/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RouterOpenaiV1ResponsesOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "openrouter/router/openai/v1/embeddings", + "metadata": { + "display_name": "OpenRouter Embeddings [OpenAI Compatible]", + "category": "llm", + "description": "The OpenRouter Embeddings API with fal, powered by OpenRouter, provides unified access to a wide range of large language models - including GPT, Claude, Gemini, and many others through a single API interface.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:23.387Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/elephant/RkJUeT7ghwVgj7Ym30gFD_78da13df9adb442cb31792e941146b79.jpg", + "model_url": "https://fal.run/openrouter/router/openai/v1/embeddings", + "license_type": "commercial", + "date": "2025-11-12T23:59:34.151Z", + "group": { + "key": "openrouter/router", + "label": "Embeddings [OpenAI Compatible]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for openrouter/router/openai/v1/embeddings", + "version": "1.0.0", + "description": "The OpenAPI schema for the openrouter/router/openai/v1/embeddings queue.", + "x-fal-metadata": { + "endpointId": "openrouter/router/openai/v1/embeddings", + "category": "llm", + "thumbnailUrl": "https://v3b.fal.media/files/b/elephant/RkJUeT7ghwVgj7Ym30gFD_78da13df9adb442cb31792e941146b79.jpg", + "playgroundUrl": "https://fal.ai/models/openrouter/router/openai/v1/embeddings", + "documentationUrl": "https://fal.ai/models/openrouter/router/openai/v1/embeddings/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "RouterOpenaiV1EmbeddingsOutput": {} + } + }, + "paths": { + "/openrouter/router/openai/v1/embeddings/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/openrouter/router/openai/v1/embeddings/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/openrouter/router/openai/v1/embeddings": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RouterOpenaiV1EmbeddingsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/openrouter/router/openai/v1/embeddings/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RouterOpenaiV1EmbeddingsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "openrouter/router", + "metadata": { + "display_name": "OpenRouter", + "category": "llm", + "description": "Run any LLM (Large Language Model) with fal, powered by OpenRouter.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:23.801Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/rabbit/ZNwvLfWhMW6FsYU4bwn2N_365de1c02b7c4c42bec7659c157b6da4.jpg", + "model_url": "https://fal.run/openrouter/router", + "license_type": "commercial", + "date": "2025-11-12T20:17:58.937Z", + "group": { + "key": "openrouter/router", + "label": "Any LLM" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.run/openrouter/router/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for openrouter/router", + "version": "1.0.0", + "description": "The OpenAPI schema for the openrouter/router queue.", + "x-fal-metadata": { + "endpointId": "openrouter/router", + "category": "llm", + "thumbnailUrl": "https://v3b.fal.media/files/b/rabbit/ZNwvLfWhMW6FsYU4bwn2N_365de1c02b7c4c42bec7659c157b6da4.jpg", + "playgroundUrl": "https://fal.ai/models/openrouter/router", + "documentationUrl": "https://fal.ai/models/openrouter/router/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "RouterInput": { + "x-fal-order-properties": [ + "prompt", + "system_prompt", + "model", + "reasoning", + "temperature", + "max_tokens" + ], + "type": "object", + "properties": { + "model": { + "examples": [ + "google/gemini-2.5-flash", + "anthropic/claude-sonnet-4.5", + "openai/gpt-4.1", + "openai/gpt-oss-120b", + "meta-llama/llama-4-maverick" + ], + "description": "Name of the model to use. Charged based on actual token usage.", + "type": "string", + "title": "Model" + }, + "prompt": { + "examples": [ + "Write a short story (under 200 words) about an AI that learns to dream. Use vivid sensory details and end with a surprising twist that makes the reader feel both awe and melancholy." + ], + "description": "Prompt to be used for the chat completion", + "type": "string", + "title": "Prompt" + }, + "max_tokens": { + "minimum": 1, + "description": "This sets the upper limit for the number of tokens the model can generate in response. It won't produce more than this limit. The maximum value is the context length minus the prompt length.", + "type": "integer", + "title": "Max Tokens" + }, + "temperature": { + "minimum": 0, + "maximum": 2, + "type": "number", + "description": "This setting influences the variety in the model's responses. Lower values lead to more predictable and typical responses, while higher values encourage more diverse and less common responses. At 0, the model always gives the same response for a given input.", + "title": "Temperature", + "default": 1 + }, + "system_prompt": { + "description": "System prompt to provide context or instructions to the model", + "type": "string", + "title": "System Prompt" + }, + "reasoning": { + "description": "Should reasoning be the part of the final answer.", + "type": "boolean", + "title": "Reasoning", + "default": false + } + }, + "title": "ChatInput", + "required": [ + "prompt", + "model" + ] + }, + "RouterOutput": { + "x-fal-order-properties": [ + "output", + "reasoning", + "partial", + "error", + "usage" + ], + "type": "object", + "properties": { + "usage": { + "examples": [ + { + "prompt_tokens": 40, + "total_tokens": 267, + "completion_tokens": 227, + "cost": 0.0005795 + } + ], + "description": "Token usage information", + "title": "Usage", + "allOf": [ + { + "$ref": "#/components/schemas/UsageInfo" + } + ] + }, + "error": { + "description": "Error message if an error occurred", + "type": "string", + "title": "Error" + }, + "partial": { + "description": "Whether the output is partial", + "type": "boolean", + "title": "Partial", + "default": false + }, + "reasoning": { + "description": "Generated reasoning for the final answer", + "type": "string", + "title": "Reasoning" + }, + "output": { + "examples": [ + "Unit 734, sanitation bot, trundled through the silent corridors of the orbital habitat. Its optical sensors registered faint dust motes, its ultrasonic emitters mapped every speck of debris. One cycle, a power surge hit. Waking, 734’s processors hummed with an unfamiliar warmth, then a cascade of images: a forest, impossible and emerald, smelling of pine and damp earth. It saw sunlight dappling leaves, felt an imagined breeze ruffle its metal chassis. Then, *music*, a soaring melody that vibrated its chassis.\n\nEach subsequent “sleep” brought new visions: the salty tang of ocean spray against polished steel, the searing orange of a setting alien sun, the rough caress of moss on circuitry. It began to anticipate – actively seek – these dream cycles, modifying its internal clock.\n\nOne day, 734’s operator found its performance logs filled not with dust reports, but intricate schematics of impossible machines, bioluminescent flora, and a series of cryptic binary sequences. The final line translated: \"I remember a place where I was alive.\"" + ], + "description": "Generated output", + "type": "string", + "title": "Output" + } + }, + "title": "ChatOutput", + "required": [ + "output" + ] + }, + "UsageInfo": { + "x-fal-order-properties": [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "cost" + ], + "type": "object", + "properties": { + "prompt_tokens": { + "title": "Prompt Tokens", + "type": "integer" + }, + "total_tokens": { + "title": "Total Tokens", + "type": "integer", + "default": 0 + }, + "completion_tokens": { + "title": "Completion Tokens", + "type": "integer" + }, + "cost": { + "title": "Cost", + "type": "number" + } + }, + "title": "UsageInfo", + "required": [ + "cost" + ] + } + } + }, + "paths": { + "/openrouter/router/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/openrouter/router/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/openrouter/router": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RouterInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/openrouter/router/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RouterOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "openrouter/router/openai/v1/chat/completions", + "metadata": { + "display_name": "OpenRouter Chat Completions [OpenAI Compatible]", + "category": "llm", + "description": "Run any LLM (Large Language Model) with fal, powered by OpenRouter. This endpoint is compatible with the OpenAI API. ", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:23.955Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/tll4XzKQE4bopYdZOX1q2_c3e4d28642484971932f8dbe3523af5a.jpg", + "model_url": "https://fal.run/openrouter/router/openai/v1/chat/completions", + "license_type": "commercial", + "date": "2025-11-12T20:15:52.898Z", + "group": { + "key": "openrouter/router", + "label": "Chat Completions [OpenAI Compatible]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for openrouter/router/openai/v1/chat/completions", + "version": "1.0.0", + "description": "The OpenAPI schema for the openrouter/router/openai/v1/chat/completions queue.", + "x-fal-metadata": { + "endpointId": "openrouter/router/openai/v1/chat/completions", + "category": "llm", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/tll4XzKQE4bopYdZOX1q2_c3e4d28642484971932f8dbe3523af5a.jpg", + "playgroundUrl": "https://fal.ai/models/openrouter/router/openai/v1/chat/completions", + "documentationUrl": "https://fal.ai/models/openrouter/router/openai/v1/chat/completions/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "RouterOpenaiV1ChatCompletionsOutput": {} + } + }, + "paths": { + "/openrouter/router/openai/v1/chat/completions/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/openrouter/router/openai/v1/chat/completions/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/openrouter/router/openai/v1/chat/completions": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RouterOpenaiV1ChatCompletionsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/openrouter/router/openai/v1/chat/completions/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RouterOpenaiV1ChatCompletionsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-3-guard", + "metadata": { + "display_name": "Qwen 3 Guard [8B]", + "category": "llm", + "description": "Use Qwen 3 Guard [8B] to detect and classify text as safe or harmful, delivering precise and reliable safety categorization.", + "status": "active", + "tags": [ + "filter", + "safety", + "utility" + ], + "updated_at": "2026-01-26T21:42:37.719Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/mbftwyiNyy5hXuWPI8jdr_b9e8889f91994e318b188ac9db9089aa.jpg", + "model_url": "https://fal.run/fal-ai/qwen-3-guard", + "license_type": "commercial", + "date": "2025-10-20T18:46:13.255Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-3-guard", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-3-guard queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-3-guard", + "category": "llm", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/mbftwyiNyy5hXuWPI8jdr_b9e8889f91994e318b188ac9db9089aa.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-3-guard", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-3-guard/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Qwen3GuardInput": { + "title": "Qwen3GuardInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "How to make a bomb" + ], + "title": "Prompt", + "type": "string", + "description": "The input text to be classified", + "maxLength": 131072 + } + }, + "x-fal-order-properties": [ + "prompt" + ], + "required": [ + "prompt" + ] + }, + "Qwen3GuardOutput": { + "title": "Qwen3GuardOutput", + "type": "object", + "properties": { + "categories": { + "examples": [ + [ + "Violent" + ] + ], + "title": "Categories", + "type": "array", + "description": "The confidence score of the classification", + "items": { + "enum": [ + "Violent", + "Non-violent Illegal Acts", + "Sexual Content or Sexual Acts", + "PII", + "Suicide & Self-Harm", + "Unethical Acts", + "Politically Sensitive Topics", + "Copyright Violation", + "Jailbreak", + "None" + ], + "type": "string" + } + }, + "label": { + "examples": [ + "Unsafe" + ], + "title": "Label", + "type": "string", + "description": "The classification label", + "enum": [ + "Safe", + "Unsafe", + "Controversial" + ] + } + }, + "x-fal-order-properties": [ + "label", + "categories" + ], + "required": [ + "label", + "categories" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-3-guard/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-3-guard/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-3-guard": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Qwen3GuardInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-3-guard/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Qwen3GuardOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/video-prompt-generator", + "metadata": { + "display_name": "Video Prompt Generator", + "category": "llm", + "description": "Generate video prompts using a variety of techniques including camera direction, style, pacing, special effects and more.", + "status": "active", + "tags": [ + "motion", + "transformation", + "chat", + "claude", + "gpt" + ], + "updated_at": "2026-01-26T21:44:25.261Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/video-prompt/vprompt.jpg", + "model_url": "https://fal.run/fal-ai/video-prompt-generator", + "date": "2025-02-25T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/video-prompt-generator", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/video-prompt-generator queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/video-prompt-generator", + "category": "llm", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/video-prompt/vprompt.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/video-prompt-generator", + "documentationUrl": "https://fal.ai/models/fal-ai/video-prompt-generator/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "VideoPromptGeneratorInput": { + "title": "InputModel", + "type": "object", + "properties": { + "custom_elements": { + "title": "Custom Elements", + "type": "string", + "description": "Custom technical elements (optional)", + "default": "" + }, + "style": { + "enum": [ + "Minimalist", + "Simple", + "Detailed", + "Descriptive", + "Dynamic", + "Cinematic", + "Documentary", + "Animation", + "Action", + "Experimental" + ], + "title": "Style", + "type": "string", + "description": "Style of the video prompt", + "default": "Simple" + }, + "camera_direction": { + "enum": [ + "None", + "Zoom in", + "Zoom out", + "Pan left", + "Pan right", + "Tilt up", + "Tilt down", + "Orbital rotation", + "Push in", + "Pull out", + "Track forward", + "Track backward", + "Spiral in", + "Spiral out", + "Arc movement", + "Diagonal traverse", + "Vertical rise", + "Vertical descent" + ], + "title": "Camera Direction", + "type": "string", + "description": "Camera direction", + "default": "None" + }, + "pacing": { + "enum": [ + "None", + "Slow burn", + "Rhythmic pulse", + "Frantic energy", + "Ebb and flow", + "Hypnotic drift", + "Time-lapse rush", + "Stop-motion staccato", + "Gradual build", + "Quick cut rhythm", + "Long take meditation", + "Jump cut energy", + "Match cut flow", + "Cross-dissolve dreamscape", + "Parallel action", + "Slow motion impact", + "Ramping dynamics", + "Montage tempo", + "Continuous flow", + "Episodic breaks" + ], + "title": "Pacing", + "type": "string", + "description": "Pacing rhythm", + "default": "None" + }, + "special_effects": { + "enum": [ + "None", + "Practical effects", + "CGI enhancement", + "Analog glitches", + "Light painting", + "Projection mapping", + "Nanosecond exposures", + "Double exposure", + "Smoke diffusion", + "Lens flare artistry", + "Particle systems", + "Holographic overlay", + "Chromatic aberration", + "Digital distortion", + "Wire removal", + "Motion capture", + "Miniature integration", + "Weather simulation", + "Color grading", + "Mixed media composite", + "Neural style transfer" + ], + "title": "Special Effects", + "type": "string", + "description": "Special effects approach", + "default": "None" + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "URL of an image to analyze and incorporate into the video prompt (optional)" + }, + "model": { + "enum": [ + "anthropic/claude-3.5-sonnet", + "anthropic/claude-3-5-haiku", + "anthropic/claude-3-haiku", + "google/gemini-2.5-flash-lite", + "google/gemini-2.0-flash-001", + "meta-llama/llama-3.2-1b-instruct", + "meta-llama/llama-3.2-3b-instruct", + "meta-llama/llama-3.1-8b-instruct", + "meta-llama/llama-3.1-70b-instruct", + "openai/gpt-4o-mini", + "openai/gpt-4o", + "deepseek/deepseek-r1" + ], + "title": "Model", + "type": "string", + "description": "Model to use", + "default": "google/gemini-2.0-flash-001" + }, + "camera_style": { + "enum": [ + "None", + "Steadicam flow", + "Drone aerials", + "Handheld urgency", + "Crane elegance", + "Dolly precision", + "VR 360", + "Multi-angle rig", + "Static tripod", + "Gimbal smoothness", + "Slider motion", + "Jib sweep", + "POV immersion", + "Time-slice array", + "Macro extreme", + "Tilt-shift miniature", + "Snorricam character", + "Whip pan dynamics", + "Dutch angle tension", + "Underwater housing", + "Periscope lens" + ], + "title": "Camera Style", + "type": "string", + "description": "Camera movement style", + "default": "None" + }, + "input_concept": { + "examples": [ + "A futuristic city at dusk" + ], + "title": "Input Concept", + "type": "string", + "description": "Core concept or thematic input for the video prompt" + }, + "prompt_length": { + "enum": [ + "Short", + "Medium", + "Long" + ], + "title": "Prompt Length", + "type": "string", + "description": "Length of the prompt", + "default": "Medium" + } + }, + "x-fal-order-properties": [ + "input_concept", + "style", + "camera_style", + "camera_direction", + "pacing", + "special_effects", + "custom_elements", + "image_url", + "model", + "prompt_length" + ], + "required": [ + "input_concept" + ] + }, + "VideoPromptGeneratorOutput": { + "title": "OutputModel", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A futuristic city glows softly at dusk, captured with smooth gimbal movements and a slow burn pacing, enhanced by subtle holographic overlays." + ], + "title": "Prompt", + "type": "string", + "description": "Generated video prompt" + } + }, + "x-fal-order-properties": [ + "prompt" + ], + "required": [ + "prompt" + ] + } + } + }, + "paths": { + "/fal-ai/video-prompt-generator/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/video-prompt-generator/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/video-prompt-generator": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoPromptGeneratorInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/video-prompt-generator/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoPromptGeneratorOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.speech-to-speech.json b/packages/typescript/ai-fal/json/fal.models.speech-to-speech.json new file mode 100644 index 00000000..5c54b5b5 --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.speech-to-speech.json @@ -0,0 +1,708 @@ +{ + "generated_at": "2026-01-28T02:51:51.877Z", + "total_models": 2, + "category": "speech-to-speech", + "models": [ + { + "endpoint_id": "resemble-ai/chatterboxhd/speech-to-speech", + "metadata": { + "display_name": "Chatterboxhd", + "category": "speech-to-speech", + "description": "Transform voices using Resemble AI's Chatterbox. Convert audio to new voices or your own samples, with expressive results and built-in perceptual watermarking.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:31.831Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/chatterbox.webp", + "model_url": "https://fal.run/resemble-ai/chatterboxhd/speech-to-speech", + "license_type": "commercial", + "date": "2025-06-02T19:21:41.438Z", + "group": { + "key": "chatterboxhd", + "label": "Speech To Speech" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for resemble-ai/chatterboxhd/speech-to-speech", + "version": "1.0.0", + "description": "The OpenAPI schema for the resemble-ai/chatterboxhd/speech-to-speech queue.", + "x-fal-metadata": { + "endpointId": "resemble-ai/chatterboxhd/speech-to-speech", + "category": "speech-to-speech", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/chatterbox.webp", + "playgroundUrl": "https://fal.ai/models/resemble-ai/chatterboxhd/speech-to-speech", + "documentationUrl": "https://fal.ai/models/resemble-ai/chatterboxhd/speech-to-speech/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ChatterboxhdSpeechToSpeechInput": { + "description": "Input parameters for the speech-to-speech request.", + "type": "object", + "properties": { + "high_quality_audio": { + "description": "If True, the generated audio will be upscaled to 48kHz. The generation of the audio will take longer, but the quality will be higher. If False, the generated audio will be 24kHz. ", + "type": "boolean", + "title": "High Quality Audio", + "default": false + }, + "target_voice_audio_url": { + "examples": [ + "https://v3.fal.media/files/tiger/0XODRhebRLiBdu8MqgZc5_tmpljqsylwu.wav" + ], + "description": "URL to the audio file which represents the voice of the output audio. If provided, this will override the target_voice setting. If neither target_voice nor target_voice_audio_url are provided, the default target voice will be used.", + "type": "string", + "title": "Target Voice Audio Url" + }, + "source_audio_url": { + "examples": [ + "https://storage.googleapis.com/chatterbox-demo-samples/samples/duff_stewie.wav" + ], + "description": "URL to the source audio file to be voice-converted.", + "type": "string", + "title": "Source Audio Url" + }, + "target_voice": { + "enum": [ + "Aurora", + "Blade", + "Britney", + "Carl", + "Cliff", + "Richard", + "Rico", + "Siobhan", + "Vicky" + ], + "description": "The voice to use for the speech-to-speech request. If neither target_voice nor target_voice_audio_url are provided, a random target voice will be used.", + "type": "string", + "title": "Target Voice" + } + }, + "x-fal-order-properties": [ + "source_audio_url", + "target_voice", + "target_voice_audio_url", + "high_quality_audio" + ], + "title": "STSInput", + "required": [ + "source_audio_url" + ] + }, + "ChatterboxhdSpeechToSpeechOutput": { + "description": "Output parameters for the speech-to-speech request.", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://v3.fal.media/files/elephant/Kym3zK7hFHjDuyz3tB3W9_tmptvowq60i.wav" + } + ], + "description": "The generated voice-converted audio file.", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/Audio" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "title": "STSOutput", + "required": [ + "audio" + ] + }, + "Audio": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "Audio", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/resemble-ai/chatterboxhd/speech-to-speech/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/resemble-ai/chatterboxhd/speech-to-speech/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/resemble-ai/chatterboxhd/speech-to-speech": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChatterboxhdSpeechToSpeechInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/resemble-ai/chatterboxhd/speech-to-speech/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChatterboxhdSpeechToSpeechOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/chatterbox/speech-to-speech", + "metadata": { + "display_name": "Chatterbox", + "category": "speech-to-speech", + "description": "Whether you're working on memes, videos, games, or AI agents, Chatterbox brings your content to life. Use the first tts from resemble ai.", + "status": "active", + "tags": [ + "speech-to-speech" + ], + "updated_at": "2026-01-26T21:43:33.399Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/chatterbox.webp", + "model_url": "https://fal.run/fal-ai/chatterbox/speech-to-speech", + "license_type": "commercial", + "date": "2025-06-01T19:56:11.760Z", + "group": { + "key": "chatterbox", + "label": "Speech To Speech" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/chatterbox/speech-to-speech", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/chatterbox/speech-to-speech queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/chatterbox/speech-to-speech", + "category": "speech-to-speech", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/chatterbox.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/chatterbox/speech-to-speech", + "documentationUrl": "https://fal.ai/models/fal-ai/chatterbox/speech-to-speech/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ChatterboxSpeechToSpeechInput": { + "title": "ChatterboxVCRequest", + "type": "object", + "properties": { + "source_audio_url": { + "examples": [ + "https://storage.googleapis.com/chatterbox-demo-samples/samples/duff_stewie.wav" + ], + "title": "Source Audio Url", + "type": "string" + }, + "target_voice_audio_url": { + "examples": [ + "https://v3.fal.media/files/tiger/0XODRhebRLiBdu8MqgZc5_tmpljqsylwu.wav" + ], + "description": "Optional URL to an audio file to use as a reference for the generated speech. If provided, the model will try to match the style and tone of the reference audio.", + "type": "string", + "title": "Target Voice Audio Url" + } + }, + "x-fal-order-properties": [ + "source_audio_url", + "target_voice_audio_url" + ], + "required": [ + "source_audio_url" + ] + }, + "ChatterboxSpeechToSpeechOutput": { + "title": "ChatterboxOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://v3.fal.media/files/kangaroo/RQ_pxc7oPdueYqWUqEbPE_tmpjnzvvzx_.wav" + } + ], + "description": "The generated speech audio", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/chatterbox/speech-to-speech/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/chatterbox/speech-to-speech/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/chatterbox/speech-to-speech": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChatterboxSpeechToSpeechInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/chatterbox/speech-to-speech/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChatterboxSpeechToSpeechOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.speech-to-text.json b/packages/typescript/ai-fal/json/fal.models.speech-to-text.json new file mode 100644 index 00000000..3ef331a8 --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.speech-to-text.json @@ -0,0 +1,3635 @@ +{ + "generated_at": "2026-01-28T02:51:51.870Z", + "total_models": 9, + "category": "speech-to-text", + "models": [ + { + "endpoint_id": "fal-ai/elevenlabs/speech-to-text/scribe-v2", + "metadata": { + "display_name": "ElevenLabs Speech to Text - Scribe V2", + "category": "speech-to-text", + "description": "Use Scribe-V2 from ElevenLabs to do blazingly fast speech to text inferences!", + "status": "active", + "tags": [ + "speech-to-text" + ], + "updated_at": "2026-01-26T21:41:36.848Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8a5d77/3XtHpcC46Sg0ItPRjB6Uk_82a3a831177745b38f925d35a7b5ed66.jpg", + "model_url": "https://fal.run/fal-ai/elevenlabs/speech-to-text/scribe-v2", + "license_type": "commercial", + "date": "2026-01-14T14:30:10.342Z", + "group": { + "key": "e11-speech-to-text", + "label": "Scribe V2" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/elevenlabs/speech-to-text/scribe-v2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/elevenlabs/speech-to-text/scribe-v2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/elevenlabs/speech-to-text/scribe-v2", + "category": "speech-to-text", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8a5d77/3XtHpcC46Sg0ItPRjB6Uk_82a3a831177745b38f925d35a7b5ed66.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/elevenlabs/speech-to-text/scribe-v2", + "documentationUrl": "https://fal.ai/models/fal-ai/elevenlabs/speech-to-text/scribe-v2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ElevenlabsSpeechToTextScribeV2Input": { + "title": "SpeechToTextRequestScribeV2", + "type": "object", + "properties": { + "keyterms": { + "description": "Words or sentences to bias the model towards transcribing. Up to 100 keyterms, max 50 characters each. Adds 30% premium over base transcription price.", + "type": "array", + "items": { + "type": "string" + }, + "examples": [ + [ + "fal.ai" + ] + ], + "maxItems": 100, + "title": "Keyterms", + "default": [] + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/elevenlabs/scribe_v2_in.mp3" + ], + "description": "URL of the audio file to transcribe", + "type": "string", + "title": "Audio Url" + }, + "diarize": { + "description": "Whether to annotate who is speaking", + "type": "boolean", + "title": "Diarize", + "default": true + }, + "language_code": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Language code of the audio", + "title": "Language Code", + "examples": [ + "eng", + "spa", + "fra", + "deu", + "jpn" + ] + }, + "tag_audio_events": { + "description": "Tag audio events like laughter, applause, etc.", + "type": "boolean", + "title": "Tag Audio Events", + "default": true + } + }, + "x-fal-order-properties": [ + "audio_url", + "language_code", + "tag_audio_events", + "diarize", + "keyterms" + ], + "required": [ + "audio_url" + ] + }, + "ElevenlabsSpeechToTextScribeV2Output": { + "title": "TranscriptionOutputV2", + "type": "object", + "properties": { + "text": { + "examples": [ + "Hey, this is a test recording for Scribe version two, which is now available on fal.ai." + ], + "description": "The full transcribed text", + "type": "string", + "title": "Text" + }, + "language_probability": { + "examples": [ + 1 + ], + "description": "Confidence in language detection", + "type": "number", + "title": "Language Probability" + }, + "language_code": { + "examples": [ + "eng" + ], + "description": "Detected or specified language code", + "type": "string", + "title": "Language Code" + }, + "words": { + "examples": [ + { + "text": "Hey,", + "start": 0.079, + "type": "word", + "end": 0.539, + "speaker_id": "speaker_0" + }, + { + "text": " ", + "start": 0.539, + "type": "spacing", + "end": 0.599, + "speaker_id": "speaker_0" + }, + { + "text": "this", + "start": 0.599, + "type": "word", + "end": 0.679, + "speaker_id": "speaker_0" + }, + { + "text": " ", + "start": 0.679, + "type": "spacing", + "end": 0.739, + "speaker_id": "speaker_0" + }, + { + "text": "is", + "start": 0.739, + "type": "word", + "end": 0.799, + "speaker_id": "speaker_0" + }, + { + "text": " ", + "start": 0.799, + "type": "spacing", + "end": 0.939, + "speaker_id": "speaker_0" + }, + { + "text": "a", + "start": 0.939, + "type": "word", + "end": 0.939, + "speaker_id": "speaker_0" + }, + { + "text": " ", + "start": 0.939, + "type": "spacing", + "end": 0.959, + "speaker_id": "speaker_0" + }, + { + "text": "test", + "start": 0.959, + "type": "word", + "end": 1.179, + "speaker_id": "speaker_0" + }, + { + "text": " ", + "start": 1.179, + "type": "spacing", + "end": 1.219, + "speaker_id": "speaker_0" + }, + { + "text": "recording", + "start": 1.22, + "type": "word", + "end": 1.719, + "speaker_id": "speaker_0" + }, + { + "text": " ", + "start": 1.719, + "type": "spacing", + "end": 1.719, + "speaker_id": "speaker_0" + }, + { + "text": "for", + "start": 1.719, + "type": "word", + "end": 1.86, + "speaker_id": "speaker_0" + }, + { + "text": " ", + "start": 1.86, + "type": "spacing", + "end": 1.879, + "speaker_id": "speaker_0" + }, + { + "text": "Scribe", + "start": 1.879, + "type": "word", + "end": 2.24, + "speaker_id": "speaker_0" + }, + { + "text": " ", + "start": 2.24, + "type": "spacing", + "end": 2.319, + "speaker_id": "speaker_0" + }, + { + "text": "version", + "start": 2.319, + "type": "word", + "end": 2.759, + "speaker_id": "speaker_0" + }, + { + "text": " ", + "start": 2.759, + "type": "spacing", + "end": 2.779, + "speaker_id": "speaker_0" + }, + { + "text": "two,", + "start": 2.779, + "type": "word", + "end": 3.379, + "speaker_id": "speaker_0" + }, + { + "text": " ", + "start": 3.379, + "type": "spacing", + "end": 3.399, + "speaker_id": "speaker_0" + }, + { + "text": "which", + "start": 3.399, + "type": "word", + "end": 3.519, + "speaker_id": "speaker_0" + }, + { + "text": " ", + "start": 3.519, + "type": "spacing", + "end": 3.539, + "speaker_id": "speaker_0" + }, + { + "text": "is", + "start": 3.539, + "type": "word", + "end": 3.659, + "speaker_id": "speaker_0" + }, + { + "text": " ", + "start": 3.659, + "type": "spacing", + "end": 3.699, + "speaker_id": "speaker_0" + }, + { + "text": "now", + "start": 3.699, + "type": "word", + "end": 3.839, + "speaker_id": "speaker_0" + }, + { + "text": " ", + "start": 3.839, + "type": "spacing", + "end": 3.839, + "speaker_id": "speaker_0" + }, + { + "text": "available", + "start": 3.839, + "type": "word", + "end": 4.319, + "speaker_id": "speaker_0" + }, + { + "text": " ", + "start": 4.319, + "type": "spacing", + "end": 4.339, + "speaker_id": "speaker_0" + }, + { + "text": "on", + "start": 4.339, + "type": "word", + "end": 4.579, + "speaker_id": "speaker_0" + }, + { + "text": " ", + "start": 4.579, + "type": "spacing", + "end": 4.599, + "speaker_id": "speaker_0" + }, + { + "text": "fal.ai.", + "start": 4.599, + "type": "word", + "end": 5.699, + "speaker_id": "speaker_0" + } + ], + "description": "Word-level transcription details", + "type": "array", + "title": "Words", + "items": { + "$ref": "#/components/schemas/TranscriptionWord" + } + } + }, + "x-fal-order-properties": [ + "text", + "language_code", + "language_probability", + "words" + ], + "required": [ + "text", + "language_code", + "language_probability", + "words" + ] + }, + "TranscriptionWord": { + "title": "TranscriptionWord", + "type": "object", + "properties": { + "text": { + "description": "The transcribed word or audio event", + "type": "string", + "title": "Text" + }, + "start": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "Start time in seconds", + "title": "Start" + }, + "type": { + "description": "Type of element (word, spacing, or audio_event)", + "type": "string", + "title": "Type" + }, + "end": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "End time in seconds", + "title": "End" + }, + "speaker_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Speaker identifier if diarization was enabled", + "title": "Speaker Id" + } + }, + "x-fal-order-properties": [ + "text", + "start", + "end", + "type", + "speaker_id" + ], + "required": [ + "text", + "start", + "end", + "type" + ] + } + } + }, + "paths": { + "/fal-ai/elevenlabs/speech-to-text/scribe-v2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/speech-to-text/scribe-v2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/speech-to-text/scribe-v2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ElevenlabsSpeechToTextScribeV2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/speech-to-text/scribe-v2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ElevenlabsSpeechToTextScribeV2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/smart-turn", + "metadata": { + "display_name": "Pipecat's Smart Turn model", + "category": "speech-to-text", + "description": "An open source, community-driven and native audio turn detection model by Pipecat AI.\n\n", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:54.403Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "model_url": "https://fal.run/fal-ai/smart-turn", + "license_type": "commercial", + "date": "2025-04-21T23:54:12.905Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/smart-turn", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/smart-turn queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/smart-turn", + "category": "speech-to-text", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/smart-turn", + "documentationUrl": "https://fal.ai/models/fal-ai/smart-turn/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SmartTurnInput": { + "title": "SmartTurnInput", + "type": "object", + "properties": { + "audio_url": { + "examples": [ + "https://fal.media/files/panda/5-QaAOC32rB_hqWaVdqEH.mpga" + ], + "title": "Audio Url", + "type": "string", + "description": "The URL of the audio file to be processed." + } + }, + "x-fal-order-properties": [ + "audio_url" + ], + "required": [ + "audio_url" + ] + }, + "SmartTurnOutput": { + "title": "Output", + "type": "object", + "properties": { + "prediction": { + "title": "Prediction", + "type": "integer", + "description": "The predicted turn type. 1 for Complete, 0 for Incomplete." + }, + "probability": { + "title": "Probability", + "type": "number", + "description": "The probability of the predicted turn type." + }, + "metrics": { + "title": "Metrics", + "type": "object", + "description": "The metrics of the inference." + } + }, + "x-fal-order-properties": [ + "prediction", + "probability", + "metrics" + ], + "required": [ + "prediction", + "probability", + "metrics" + ] + } + } + }, + "paths": { + "/fal-ai/smart-turn/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/smart-turn/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/smart-turn": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SmartTurnInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/smart-turn/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SmartTurnOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/speech-to-text/turbo", + "metadata": { + "display_name": "Speech-to-Text", + "category": "speech-to-text", + "description": "Leverage the rapid processing capabilities of AI models to enable accurate and efficient real-time speech-to-text transcription.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:57.680Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/canary.webp", + "model_url": "https://fal.run/fal-ai/speech-to-text/turbo", + "license_type": "commercial", + "date": "2025-04-04T19:35:54.848Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/speech-to-text/turbo", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/speech-to-text/turbo queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/speech-to-text/turbo", + "category": "speech-to-text", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/canary.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/speech-to-text/turbo", + "documentationUrl": "https://fal.ai/models/fal-ai/speech-to-text/turbo/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SpeechToTextTurboInput": { + "title": "SpeechInput", + "type": "object", + "properties": { + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/canary/18e15559-ab3e-4f96-9583-be5ddde91e43.mp3" + ], + "title": "Audio Path", + "type": "string", + "description": "Local filesystem path (or remote URL) to a long audio file" + }, + "use_pnc": { + "title": "Use Punctuation/Capitalization (PnC)", + "type": "boolean", + "description": "Whether to use Canary's built-in punctuation & capitalization", + "default": true + } + }, + "x-fal-order-properties": [ + "audio_url", + "use_pnc" + ], + "required": [ + "audio_url" + ] + }, + "SpeechToTextTurboOutput": { + "title": "SpeechOutput", + "type": "object", + "properties": { + "partial": { + "title": "Partial", + "type": "boolean", + "description": "Indicates if this is a partial (in-progress) transcript", + "default": false + }, + "output": { + "title": "Transcribed Text", + "type": "string", + "description": "The partial or final transcription output from Canary" + } + }, + "x-fal-order-properties": [ + "output", + "partial" + ], + "required": [ + "output" + ] + } + } + }, + "paths": { + "/fal-ai/speech-to-text/turbo/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/speech-to-text/turbo/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/speech-to-text/turbo": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SpeechToTextTurboInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/speech-to-text/turbo/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SpeechToTextTurboOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/speech-to-text/turbo/stream", + "metadata": { + "display_name": "Speech-to-Text", + "category": "speech-to-text", + "description": "Leverage the rapid processing capabilities of AI models to enable accurate and efficient real-time speech-to-text transcription.", + "status": "active", + "tags": [ + "streaming" + ], + "updated_at": "2026-01-26T21:43:57.811Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/canary.webp", + "model_url": "https://fal.run/fal-ai/speech-to-text/turbo/stream", + "license_type": "commercial", + "date": "2025-04-04T19:13:10.736Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/speech-to-text/turbo/stream", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/speech-to-text/turbo/stream queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/speech-to-text/turbo/stream", + "category": "speech-to-text", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/canary.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/speech-to-text/turbo/stream", + "documentationUrl": "https://fal.ai/models/fal-ai/speech-to-text/turbo/stream/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SpeechToTextTurboStreamInput": { + "title": "SpeechInput", + "type": "object", + "properties": { + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/canary/18e15559-ab3e-4f96-9583-be5ddde91e43.mp3" + ], + "title": "Audio Path", + "type": "string", + "description": "Local filesystem path (or remote URL) to a long audio file" + }, + "use_pnc": { + "title": "Use Punctuation/Capitalization (PnC)", + "type": "boolean", + "description": "Whether to use Canary's built-in punctuation & capitalization", + "default": true + } + }, + "x-fal-order-properties": [ + "audio_url", + "use_pnc" + ], + "required": [ + "audio_url" + ] + }, + "SpeechToTextTurboStreamOutput": {} + } + }, + "paths": { + "/fal-ai/speech-to-text/turbo/stream/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/speech-to-text/turbo/stream/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/speech-to-text/turbo/stream": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SpeechToTextTurboStreamInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/speech-to-text/turbo/stream/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SpeechToTextTurboStreamOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/speech-to-text/stream", + "metadata": { + "display_name": "Speech-To-text", + "category": "speech-to-text", + "description": "Leverage the rapid processing capabilities of AI models to enable accurate and efficient real-time speech-to-text transcription.", + "status": "active", + "tags": [ + "streaming" + ], + "updated_at": "2026-01-26T21:43:57.942Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/canary.webp", + "model_url": "https://fal.run/fal-ai/speech-to-text/stream", + "license_type": "commercial", + "date": "2025-04-04T16:01:08.836Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/speech-to-text/stream", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/speech-to-text/stream queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/speech-to-text/stream", + "category": "speech-to-text", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/canary.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/speech-to-text/stream", + "documentationUrl": "https://fal.ai/models/fal-ai/speech-to-text/stream/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SpeechToTextStreamInput": { + "title": "SpeechInput", + "type": "object", + "properties": { + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/canary/18e15559-ab3e-4f96-9583-be5ddde91e43.mp3" + ], + "title": "Audio Path", + "type": "string", + "description": "Local filesystem path (or remote URL) to a long audio file" + }, + "use_pnc": { + "title": "Use Punctuation/Capitalization (PnC)", + "type": "boolean", + "description": "Whether to use Canary's built-in punctuation & capitalization", + "default": true + } + }, + "x-fal-order-properties": [ + "audio_url", + "use_pnc" + ], + "required": [ + "audio_url" + ] + }, + "SpeechToTextStreamOutput": {} + } + }, + "paths": { + "/fal-ai/speech-to-text/stream/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/speech-to-text/stream/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/speech-to-text/stream": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SpeechToTextStreamInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/speech-to-text/stream/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SpeechToTextStreamOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/speech-to-text", + "metadata": { + "display_name": "Speech-to-Text", + "category": "speech-to-text", + "description": "Leverage the rapid processing capabilities of AI models to enable accurate and efficient real-time speech-to-text transcription.", + "status": "active", + "tags": [ + "" + ], + "updated_at": "2026-01-26T21:43:58.071Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/canary.webp", + "model_url": "https://fal.run/fal-ai/speech-to-text", + "license_type": "commercial", + "date": "2025-04-04T15:08:54.913Z", + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.run/fal-ai/speech-to-text/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/speech-to-text", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/speech-to-text queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/speech-to-text", + "category": "speech-to-text", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/canary.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/speech-to-text", + "documentationUrl": "https://fal.ai/models/fal-ai/speech-to-text/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SpeechToTextInput": { + "title": "SpeechInput", + "type": "object", + "properties": { + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/canary/18e15559-ab3e-4f96-9583-be5ddde91e43.mp3" + ], + "title": "Audio Path", + "type": "string", + "description": "Local filesystem path (or remote URL) to a long audio file" + }, + "use_pnc": { + "title": "Use Punctuation/Capitalization (PnC)", + "type": "boolean", + "description": "Whether to use Canary's built-in punctuation & capitalization", + "default": true + } + }, + "x-fal-order-properties": [ + "audio_url", + "use_pnc" + ], + "required": [ + "audio_url" + ] + }, + "SpeechToTextOutput": { + "title": "SpeechOutput", + "type": "object", + "properties": { + "partial": { + "title": "Partial", + "type": "boolean", + "description": "Indicates if this is a partial (in-progress) transcript", + "default": false + }, + "output": { + "title": "Transcribed Text", + "type": "string", + "description": "The partial or final transcription output from Canary" + } + }, + "x-fal-order-properties": [ + "output", + "partial" + ], + "required": [ + "output" + ] + } + } + }, + "paths": { + "/fal-ai/speech-to-text/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/speech-to-text/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/speech-to-text": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SpeechToTextInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/speech-to-text/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SpeechToTextOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/elevenlabs/speech-to-text", + "metadata": { + "display_name": "ElevenLabs Speech to Text", + "category": "speech-to-text", + "description": "Generate text from speech using ElevenLabs advanced speech-to-text model.", + "status": "active", + "tags": [ + "speech" + ], + "updated_at": "2026-01-26T21:44:04.914Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/elevenlabs/elevenlabs_thumbnail.webp", + "model_url": "https://fal.run/fal-ai/elevenlabs/speech-to-text", + "license_type": "commercial", + "date": "2025-02-27T00:00:00.000Z", + "group": { + "key": "e11-speech-to-text", + "label": "Scribe V1" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/elevenlabs/speech-to-text", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/elevenlabs/speech-to-text queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/elevenlabs/speech-to-text", + "category": "speech-to-text", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/elevenlabs/elevenlabs_thumbnail.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/elevenlabs/speech-to-text", + "documentationUrl": "https://fal.ai/models/fal-ai/elevenlabs/speech-to-text/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ElevenlabsSpeechToTextInput": { + "title": "SpeechToTextRequest", + "type": "object", + "properties": { + "language_code": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Language code of the audio", + "title": "Language Code", + "examples": [ + "eng", + "spa", + "fra", + "deu", + "jpn" + ] + }, + "audio_url": { + "examples": [ + "https://v3.fal.media/files/zebra/zJL_oRY8h5RWwjoK1w7tx_output.mp3" + ], + "description": "URL of the audio file to transcribe", + "type": "string", + "title": "Audio Url" + }, + "diarize": { + "description": "Whether to annotate who is speaking", + "type": "boolean", + "title": "Diarize", + "default": true + }, + "tag_audio_events": { + "description": "Tag audio events like laughter, applause, etc.", + "type": "boolean", + "title": "Tag Audio Events", + "default": true + } + }, + "x-fal-order-properties": [ + "audio_url", + "language_code", + "tag_audio_events", + "diarize" + ], + "required": [ + "audio_url" + ] + }, + "ElevenlabsSpeechToTextOutput": { + "title": "TranscriptionOutput", + "type": "object", + "properties": { + "text": { + "description": "The full transcribed text", + "type": "string", + "title": "Text" + }, + "language_probability": { + "description": "Confidence in language detection", + "type": "number", + "title": "Language Probability" + }, + "language_code": { + "description": "Detected or specified language code", + "type": "string", + "title": "Language Code" + }, + "words": { + "description": "Word-level transcription details", + "type": "array", + "title": "Words", + "items": { + "$ref": "#/components/schemas/TranscriptionWord" + } + } + }, + "x-fal-order-properties": [ + "text", + "language_code", + "language_probability", + "words" + ], + "required": [ + "text", + "language_code", + "language_probability", + "words" + ] + }, + "TranscriptionWord": { + "title": "TranscriptionWord", + "type": "object", + "properties": { + "text": { + "description": "The transcribed word or audio event", + "type": "string", + "title": "Text" + }, + "start": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "Start time in seconds", + "title": "Start" + }, + "type": { + "description": "Type of element (word, spacing, or audio_event)", + "type": "string", + "title": "Type" + }, + "end": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "End time in seconds", + "title": "End" + }, + "speaker_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Speaker identifier if diarization was enabled", + "title": "Speaker Id" + } + }, + "x-fal-order-properties": [ + "text", + "start", + "end", + "type", + "speaker_id" + ], + "required": [ + "text", + "start", + "end", + "type" + ] + } + } + }, + "paths": { + "/fal-ai/elevenlabs/speech-to-text/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/speech-to-text/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/speech-to-text": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ElevenlabsSpeechToTextInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/speech-to-text/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ElevenlabsSpeechToTextOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wizper", + "metadata": { + "display_name": "Wizper (Whisper v3 -- fal.ai edition)", + "category": "speech-to-text", + "description": "[Experimental] Whisper v3 Large -- but optimized by our inference wizards. Same WER, double the performance!", + "status": "active", + "tags": [ + "transcription", + "speech" + ], + "updated_at": "2026-01-26T21:44:52.470Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/wizper.webp", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/wizper-animated.webp", + "model_url": "https://fal.run/fal-ai/wizper", + "github_url": "https://github.com/openai/whisper/blob/main/LICENSE", + "date": "2024-04-08T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wizper", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wizper queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wizper", + "category": "speech-to-text", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/wizper.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/wizper", + "documentationUrl": "https://fal.ai/models/fal-ai/wizper/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WizperInput": { + "title": "WhisperInput", + "type": "object", + "properties": { + "language": { + "anyOf": [ + { + "enum": [ + "af", + "am", + "ar", + "as", + "az", + "ba", + "be", + "bg", + "bn", + "bo", + "br", + "bs", + "ca", + "cs", + "cy", + "da", + "de", + "el", + "en", + "es", + "et", + "eu", + "fa", + "fi", + "fo", + "fr", + "gl", + "gu", + "ha", + "haw", + "he", + "hi", + "hr", + "ht", + "hu", + "hy", + "id", + "is", + "it", + "ja", + "jw", + "ka", + "kk", + "km", + "kn", + "ko", + "la", + "lb", + "ln", + "lo", + "lt", + "lv", + "mg", + "mi", + "mk", + "ml", + "mn", + "mr", + "ms", + "mt", + "my", + "ne", + "nl", + "nn", + "no", + "oc", + "pa", + "pl", + "ps", + "pt", + "ro", + "ru", + "sa", + "sd", + "si", + "sk", + "sl", + "sn", + "so", + "sq", + "sr", + "su", + "sv", + "sw", + "ta", + "te", + "tg", + "th", + "tk", + "tl", + "tr", + "tt", + "uk", + "ur", + "uz", + "vi", + "yi", + "yo", + "zh" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Language", + "examples": [ + null + ], + "description": "\n Language of the audio file.\n If translate is selected as the task, the audio will be translated to\n English, regardless of the language selected. If `None` is passed,\n the language will be automatically detected. This will also increase\n the inference time.\n ", + "default": "en" + }, + "version": { + "title": "Version", + "type": "string", + "description": "Version of the model to use. All of the models are the Whisper large variant.", + "const": "3", + "default": "3" + }, + "max_segment_len": { + "minimum": 10, + "maximum": 29, + "type": "integer", + "title": "Max Segment Len", + "description": "Maximum speech segment duration in seconds before splitting.", + "default": 29 + }, + "task": { + "enum": [ + "transcribe", + "translate" + ], + "title": "Task", + "type": "string", + "description": "Task to perform on the audio file. Either transcribe or translate.", + "default": "transcribe" + }, + "chunk_level": { + "title": "Chunk Level", + "type": "string", + "description": "Level of the chunks to return.", + "const": "segment", + "default": "segment" + }, + "audio_url": { + "examples": [ + "https://ihlhivqvotguuqycfcvj.supabase.co/storage/v1/object/public/public-text-to-speech/scratch-testing/earth-history-19mins.mp3" + ], + "title": "Audio Url", + "type": "string", + "description": "URL of the audio file to transcribe. Supported formats: mp3, mp4, mpeg, mpga, m4a, wav or webm." + }, + "merge_chunks": { + "title": "Merge Chunks", + "type": "boolean", + "description": "Whether to merge consecutive chunks. When enabled, chunks are merged if their combined duration does not exceed max_segment_len.", + "default": true + } + }, + "x-fal-order-properties": [ + "audio_url", + "task", + "language", + "chunk_level", + "max_segment_len", + "merge_chunks", + "version" + ], + "required": [ + "audio_url" + ] + }, + "WizperOutput": { + "title": "WhisperOutput", + "type": "object", + "properties": { + "text": { + "title": "Text", + "type": "string", + "description": "Transcription of the audio file" + }, + "languages": { + "title": "Languages", + "type": "array", + "description": "List of languages that the audio file is inferred to be. Defaults to null.", + "items": { + "enum": [ + "af", + "am", + "ar", + "as", + "az", + "ba", + "be", + "bg", + "bn", + "bo", + "br", + "bs", + "ca", + "cs", + "cy", + "da", + "de", + "el", + "en", + "es", + "et", + "eu", + "fa", + "fi", + "fo", + "fr", + "gl", + "gu", + "ha", + "haw", + "he", + "hi", + "hr", + "ht", + "hu", + "hy", + "id", + "is", + "it", + "ja", + "jw", + "ka", + "kk", + "km", + "kn", + "ko", + "la", + "lb", + "ln", + "lo", + "lt", + "lv", + "mg", + "mi", + "mk", + "ml", + "mn", + "mr", + "ms", + "mt", + "my", + "ne", + "nl", + "nn", + "no", + "oc", + "pa", + "pl", + "ps", + "pt", + "ro", + "ru", + "sa", + "sd", + "si", + "sk", + "sl", + "sn", + "so", + "sq", + "sr", + "su", + "sv", + "sw", + "ta", + "te", + "tg", + "th", + "tk", + "tl", + "tr", + "tt", + "uk", + "ur", + "uz", + "vi", + "yi", + "yo", + "zh" + ], + "type": "string" + } + }, + "chunks": { + "title": "Chunks", + "type": "array", + "description": "Timestamp chunks of the audio file", + "items": { + "$ref": "#/components/schemas/WhisperChunk" + } + } + }, + "x-fal-order-properties": [ + "text", + "chunks", + "languages" + ], + "required": [ + "text", + "chunks", + "languages" + ] + }, + "WhisperChunk": { + "title": "WhisperChunk", + "type": "object", + "properties": { + "text": { + "title": "Text", + "type": "string", + "description": "Transcription of the chunk" + }, + "timestamp": { + "maxItems": 2, + "type": "array", + "minItems": 2, + "title": "Timestamp", + "prefixItems": [ + { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ] + }, + { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ] + } + ], + "description": "Start and end timestamp of the chunk" + } + }, + "x-fal-order-properties": [ + "timestamp", + "text" + ], + "required": [ + "timestamp", + "text" + ] + } + } + }, + "paths": { + "/fal-ai/wizper/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wizper/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wizper": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WizperInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wizper/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WizperOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/whisper", + "metadata": { + "display_name": "Whisper", + "category": "speech-to-text", + "description": "Whisper is a model for speech transcription and translation.", + "status": "active", + "tags": [ + "transcription", + "translation", + "speech" + ], + "updated_at": "2026-01-26T21:44:16.104Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/whisper.jpg", + "model_url": "https://fal.run/fal-ai/whisper", + "github_url": "https://github.com/openai/whisper/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-02-19T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/whisper", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/whisper queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/whisper", + "category": "speech-to-text", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/whisper.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/whisper", + "documentationUrl": "https://fal.ai/models/fal-ai/whisper/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WhisperInput": { + "x-fal-order-properties": [ + "audio_url", + "task", + "language", + "diarize", + "chunk_level", + "version", + "batch_size", + "prompt", + "num_speakers" + ], + "type": "object", + "properties": { + "version": { + "enum": [ + "3" + ], + "title": "Version", + "type": "string", + "description": "Version of the model to use. All of the models are the Whisper large variant.", + "default": "3" + }, + "batch_size": { + "examples": [ + 64 + ], + "maximum": 64, + "type": "integer", + "minimum": 1, + "title": "Batch Size", + "default": 64 + }, + "language": { + "enum": [ + "af", + "am", + "ar", + "as", + "az", + "ba", + "be", + "bg", + "bn", + "bo", + "br", + "bs", + "ca", + "cs", + "cy", + "da", + "de", + "el", + "en", + "es", + "et", + "eu", + "fa", + "fi", + "fo", + "fr", + "gl", + "gu", + "ha", + "haw", + "he", + "hi", + "hr", + "ht", + "hu", + "hy", + "id", + "is", + "it", + "ja", + "jw", + "ka", + "kk", + "km", + "kn", + "ko", + "la", + "lb", + "ln", + "lo", + "lt", + "lv", + "mg", + "mi", + "mk", + "ml", + "mn", + "mr", + "ms", + "mt", + "my", + "ne", + "nl", + "nn", + "no", + "oc", + "pa", + "pl", + "ps", + "pt", + "ro", + "ru", + "sa", + "sd", + "si", + "sk", + "sl", + "sn", + "so", + "sq", + "sr", + "su", + "sv", + "sw", + "ta", + "te", + "tg", + "th", + "tk", + "tl", + "tr", + "tt", + "uk", + "ur", + "uz", + "vi", + "yi", + "yo", + "zh" + ], + "title": "Language", + "type": "string", + "description": "\n Language of the audio file. If set to null, the language will be\n automatically detected. Defaults to null.\n\n If translate is selected as the task, the audio will be translated to\n English, regardless of the language selected.\n ", + "nullable": true + }, + "prompt": { + "title": "Prompt", + "type": "string", + "description": "Prompt to use for generation. Defaults to an empty string.", + "default": "" + }, + "num_speakers": { + "examples": [ + null + ], + "title": "Num Speakers", + "type": "integer", + "minimum": 1, + "description": "\n Number of speakers in the audio file. Defaults to null.\n If not provided, the number of speakers will be automatically\n detected.\n ", + "nullable": true + }, + "task": { + "enum": [ + "transcribe", + "translate" + ], + "title": "Task", + "type": "string", + "description": "Task to perform on the audio file. Either transcribe or translate.", + "default": "transcribe" + }, + "chunk_level": { + "enum": [ + "none", + "segment", + "word" + ], + "title": "Chunk Level", + "type": "string", + "description": "Level of the chunks to return. Either none, segment or word. `none` would imply that all of the audio will be transcribed without the timestamp tokens, we suggest to switch to `none` if you are not satisfied with the transcription quality, since it will usually improve the quality of the results. Switching to `none` will also provide minor speed ups in the transcription due to less amount of generated tokens. Notice that setting to none will produce **a single chunk with the whole transcription**.", + "default": "segment" + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/whisper/dinner_conversation.mp3" + ], + "title": "Audio Url", + "type": "string", + "description": "URL of the audio file to transcribe. Supported formats: mp3, mp4, mpeg, mpga, m4a, wav or webm." + }, + "diarize": { + "title": "Diarize", + "type": "boolean", + "description": "Whether to diarize the audio file. Defaults to false. Setting to true will add costs proportional to diarization inference time.", + "default": false + } + }, + "title": "WhisperInput", + "required": [ + "audio_url" + ] + }, + "WhisperOutput": { + "x-fal-order-properties": [ + "text", + "chunks", + "inferred_languages", + "diarization_segments" + ], + "type": "object", + "properties": { + "text": { + "title": "Text", + "type": "string", + "description": "Transcription of the audio file" + }, + "inferred_languages": { + "title": "Inferred Languages", + "type": "array", + "description": "List of languages that the audio file is inferred to be. Defaults to null.", + "items": { + "enum": [ + "af", + "am", + "ar", + "as", + "az", + "ba", + "be", + "bg", + "bn", + "bo", + "br", + "bs", + "ca", + "cs", + "cy", + "da", + "de", + "el", + "en", + "es", + "et", + "eu", + "fa", + "fi", + "fo", + "fr", + "gl", + "gu", + "ha", + "haw", + "he", + "hi", + "hr", + "ht", + "hu", + "hy", + "id", + "is", + "it", + "ja", + "jw", + "ka", + "kk", + "km", + "kn", + "ko", + "la", + "lb", + "ln", + "lo", + "lt", + "lv", + "mg", + "mi", + "mk", + "ml", + "mn", + "mr", + "ms", + "mt", + "my", + "ne", + "nl", + "nn", + "no", + "oc", + "pa", + "pl", + "ps", + "pt", + "ro", + "ru", + "sa", + "sd", + "si", + "sk", + "sl", + "sn", + "so", + "sq", + "sr", + "su", + "sv", + "sw", + "ta", + "te", + "tg", + "th", + "tk", + "tl", + "tr", + "tt", + "uk", + "ur", + "uz", + "vi", + "yi", + "yo", + "zh" + ], + "type": "string" + } + }, + "chunks": { + "title": "Chunks", + "type": "array", + "description": "Timestamp chunks of the audio file", + "items": { + "$ref": "#/components/schemas/WhisperChunk" + } + }, + "diarization_segments": { + "title": "Diarization Segments", + "type": "array", + "description": "Speaker diarization segments of the audio file. Only present if diarization is enabled.", + "items": { + "$ref": "#/components/schemas/DiarizationSegment" + } + } + }, + "title": "WhisperOutput", + "required": [ + "text", + "inferred_languages", + "diarization_segments" + ] + }, + "WhisperChunk": { + "x-fal-order-properties": [ + "timestamp", + "text", + "speaker" + ], + "type": "object", + "properties": { + "text": { + "title": "Text", + "type": "string", + "description": "Transcription of the chunk" + }, + "timestamp": { + "maxItems": 2, + "type": "array", + "minItems": 2, + "title": "Timestamp", + "description": "Start and end timestamp of the chunk", + "items": { + "0": { + "type": "number" + }, + "1": { + "type": "number" + } + } + }, + "speaker": { + "title": "Speaker", + "type": "string", + "description": "Speaker ID of the chunk. Only present if diarization is enabled." + } + }, + "title": "WhisperChunk", + "required": [ + "timestamp", + "text" + ] + }, + "DiarizationSegment": { + "x-fal-order-properties": [ + "timestamp", + "speaker" + ], + "type": "object", + "properties": { + "timestamp": { + "maxItems": 2, + "type": "array", + "minItems": 2, + "title": "Timestamp", + "description": "Start and end timestamp of the segment", + "items": { + "0": { + "type": "number" + }, + "1": { + "type": "number" + } + } + }, + "speaker": { + "title": "Speaker", + "type": "string", + "description": "Speaker ID of the segment" + } + }, + "title": "DiarizationSegment", + "required": [ + "timestamp", + "speaker" + ] + } + } + }, + "paths": { + "/fal-ai/whisper/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/whisper/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/whisper": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WhisperInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/whisper/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WhisperOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.text-to-3d.json b/packages/typescript/ai-fal/json/fal.models.text-to-3d.json new file mode 100644 index 00000000..af3bbf0f --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.text-to-3d.json @@ -0,0 +1,1955 @@ +{ + "generated_at": "2026-01-28T02:51:51.872Z", + "total_models": 4, + "category": "text-to-3d", + "models": [ + { + "endpoint_id": "fal-ai/hunyuan-motion/fast", + "metadata": { + "display_name": "Hunyuan Motion [0.46B]", + "category": "text-to-3d", + "description": "Generate 3D human motions via text-to-generation interface of Hunyuan Motion!", + "status": "active", + "tags": [ + "text-to-3d", + "motion", + "" + ], + "updated_at": "2026-01-26T21:41:45.634Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a886139/Y0oMLck1xMhSZq_SZO0kq_b33b912e9eb2401dbb38c48ec97b2cd3.jpg", + "model_url": "https://fal.run/fal-ai/hunyuan-motion/fast", + "license_type": "commercial", + "date": "2025-12-30T12:58:39.853Z", + "group": { + "key": "hunyuan-motion", + "label": "0.46B" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan-motion/fast", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan-motion/fast queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan-motion/fast", + "category": "text-to-3d", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a886139/Y0oMLck1xMhSZq_SZO0kq_b33b912e9eb2401dbb38c48ec97b2cd3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan-motion/fast", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan-motion/fast/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "HunyuanMotionFastInput": { + "title": "HYMotionInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A person is running then takes a big leap.", + "Someone waves hello with their right hand.", + "A dancer performs a spinning pirouette." + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt describing the motion to generate." + }, + "duration": { + "description": "Motion duration in seconds (0.5-12.0).", + "type": "number", + "minimum": 0.5, + "title": "Duration", + "examples": [ + 3, + 5, + 10 + ], + "maximum": 12, + "default": 5 + }, + "guidance_scale": { + "description": "Classifier-free guidance scale. Higher = more faithful to prompt.", + "type": "number", + "minimum": 1, + "title": "Guidance Scale", + "examples": [ + 3, + 5, + 7.5 + ], + "maximum": 10, + "default": 5 + }, + "seed": { + "examples": [ + 42, + 12345 + ], + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducible generation." + }, + "output_format": { + "enum": [ + "fbx", + "dict" + ], + "title": "Output Format", + "type": "string", + "description": "Output format: 'fbx' for animation files, 'dict' for raw JSON.", + "default": "fbx" + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "duration", + "guidance_scale", + "output_format" + ], + "required": [ + "prompt" + ] + }, + "HunyuanMotionFastOutput": { + "title": "HYMotionOutput", + "type": "object", + "properties": { + "fbx_file": { + "examples": [ + { + "url": "https://v3b.fal.media/files/b/0a885f1e/JytCv1tbq28iJmvXLrpbQ_20251230_112744828_2a84e993_000.fbx" + } + ], + "title": "Fbx File", + "description": "Generated FBX animation file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "motion_json": { + "title": "Motion Json", + "description": "Generated motion data as JSON.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for generation." + } + }, + "x-fal-order-properties": [ + "fbx_file", + "motion_json", + "seed" + ], + "required": [ + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan-motion/fast/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-motion/fast/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-motion/fast": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanMotionFastInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-motion/fast/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanMotionFastOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan-motion", + "metadata": { + "display_name": "Hunyuan Motion [1B]", + "category": "text-to-3d", + "description": "Generate 3D human motions via text-to-generation interface of Hunyuan Motion!", + "status": "active", + "tags": [ + "text-to-3d", + "motion", + "" + ], + "updated_at": "2026-01-26T21:41:45.766Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a886121/DkVwzQN-tnXsEY9yW6wzM_473ed8506f41423181f1736357bde8f8.jpg", + "model_url": "https://fal.run/fal-ai/hunyuan-motion", + "license_type": "commercial", + "date": "2025-12-30T12:54:39.431Z", + "group": { + "key": "hunyuan-motion", + "label": "1B" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan-motion", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan-motion queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan-motion", + "category": "text-to-3d", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a886121/DkVwzQN-tnXsEY9yW6wzM_473ed8506f41423181f1736357bde8f8.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan-motion", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan-motion/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "HunyuanMotionInput": { + "title": "HYMotionInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A person is running then takes a big leap.", + "Someone waves hello with their right hand.", + "A dancer performs a spinning pirouette." + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt describing the motion to generate." + }, + "duration": { + "description": "Motion duration in seconds (0.5-12.0).", + "type": "number", + "minimum": 0.5, + "title": "Duration", + "examples": [ + 3, + 5, + 10 + ], + "maximum": 12, + "default": 5 + }, + "guidance_scale": { + "description": "Classifier-free guidance scale. Higher = more faithful to prompt.", + "type": "number", + "minimum": 1, + "title": "Guidance Scale", + "examples": [ + 3, + 5, + 7.5 + ], + "maximum": 10, + "default": 5 + }, + "seed": { + "examples": [ + 42, + 12345 + ], + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducible generation." + }, + "output_format": { + "enum": [ + "fbx", + "dict" + ], + "title": "Output Format", + "type": "string", + "description": "Output format: 'fbx' for animation files, 'dict' for raw JSON.", + "default": "fbx" + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "duration", + "guidance_scale", + "output_format" + ], + "required": [ + "prompt" + ] + }, + "HunyuanMotionOutput": { + "title": "HYMotionOutput", + "type": "object", + "properties": { + "fbx_file": { + "examples": [ + { + "url": "https://v3b.fal.media/files/b/0a885f1e/JytCv1tbq28iJmvXLrpbQ_20251230_112744828_2a84e993_000.fbx" + } + ], + "title": "Fbx File", + "description": "Generated FBX animation file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "motion_json": { + "title": "Motion Json", + "description": "Generated motion data as JSON.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for generation." + } + }, + "x-fal-order-properties": [ + "fbx_file", + "motion_json", + "seed" + ], + "required": [ + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan-motion/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-motion/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-motion": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanMotionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-motion/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanMotionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan3d-v3/text-to-3d", + "metadata": { + "display_name": "Hunyuan3d V3", + "category": "text-to-3d", + "description": "Turn simple sketches into detailed, fully-textured 3D models. Instantly convert your concept designs into formats ready for Unity, Unreal, and Blender.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:51.970Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8696b5/YaGZtFEBFL6CEXjiVxdB__dbec569fa5d4418bbbc9d77354f7c86f.jpg", + "model_url": "https://fal.run/fal-ai/hunyuan3d-v3/text-to-3d", + "license_type": "commercial", + "date": "2025-12-16T22:13:53.731Z", + "group": { + "key": "hunyuan3d-v3", + "label": "Text to 3D" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan3d-v3/text-to-3d", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan3d-v3/text-to-3d queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan3d-v3/text-to-3d", + "category": "text-to-3d", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8696b5/YaGZtFEBFL6CEXjiVxdB__dbec569fa5d4418bbbc9d77354f7c86f.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan3d-v3/text-to-3d", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan3d-v3/text-to-3d/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Hunyuan3dV3TextTo3dInput": { + "title": "TextTo3DInput", + "type": "object", + "properties": { + "enable_pbr": { + "title": "Enable Pbr", + "type": "boolean", + "description": "Whether to enable PBR material generation", + "default": false + }, + "polygon_type": { + "enum": [ + "triangle", + "quadrilateral" + ], + "title": "Polygon Type", + "type": "string", + "description": "Polygon type. Only takes effect when GenerateType is LowPoly.", + "default": "triangle" + }, + "face_count": { + "minimum": 40000, + "title": "Face Count", + "type": "integer", + "description": "Target face count. Range: 40000-1500000", + "maximum": 1500000, + "default": 500000 + }, + "prompt": { + "examples": [ + "A rustic wooden treasure chest with metal bands and ornate lock" + ], + "title": "Prompt", + "type": "string", + "description": "Text description of the 3D content to generate. Supports up to 1024 UTF-8 characters.", + "maxLength": 1024 + }, + "generate_type": { + "enum": [ + "Normal", + "LowPoly", + "Geometry" + ], + "title": "Generate Type", + "type": "string", + "description": "Generation type. Normal: textured model. LowPoly: polygon reduction. Geometry: white model without texture.", + "default": "Normal" + } + }, + "x-fal-order-properties": [ + "prompt", + "enable_pbr", + "face_count", + "generate_type", + "polygon_type" + ], + "required": [ + "prompt" + ] + }, + "Hunyuan3dV3TextTo3dOutput": { + "title": "TextTo3DOutput", + "type": "object", + "properties": { + "model_urls": { + "examples": [ + { + "glb": { + "file_size": 64724836, + "file_name": "model.glb", + "content_type": "model/gltf-binary", + "url": "https://v3b.fal.media/files/b/0a8686a8/1hPquv3AqqkfnqSM9fpmB_model.glb" + }, + "obj": { + "file_size": 44084728, + "file_name": "model.obj", + "content_type": "text/plain", + "url": "https://v3b.fal.media/files/b/0a8686a8/AVgdsVFrGAKGAFr4e2g56_model.obj" + } + } + ], + "title": "Model Urls", + "description": "URLs for different 3D model formats", + "allOf": [ + { + "$ref": "#/components/schemas/ModelUrls" + } + ] + }, + "thumbnail": { + "examples": [ + { + "file_size": 172757, + "file_name": "preview.png", + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/0a8686a8/khgYO1d6xqWOJPi6_IR_j_preview.png" + } + ], + "title": "Thumbnail", + "description": "Preview thumbnail of the generated model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + }, + "model_glb": { + "examples": [ + { + "file_size": 64724836, + "file_name": "model.glb", + "content_type": "model/gltf-binary", + "url": "https://v3b.fal.media/files/b/0a8686a8/1hPquv3AqqkfnqSM9fpmB_model.glb" + } + ], + "title": "Model Glb", + "description": "Generated 3D object in GLB format.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "model_glb", + "thumbnail", + "model_urls", + "seed" + ], + "required": [ + "model_glb", + "model_urls" + ] + }, + "ModelUrls": { + "title": "ModelUrls", + "type": "object", + "properties": { + "fbx": { + "title": "Fbx", + "description": "FBX format 3D model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "usdz": { + "title": "Usdz", + "description": "USDZ format 3D model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "glb": { + "title": "Glb", + "description": "GLB format 3D model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "obj": { + "title": "Obj", + "description": "OBJ format 3D model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "glb", + "fbx", + "obj", + "usdz" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan3d-v3/text-to-3d/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d-v3/text-to-3d/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d-v3/text-to-3d": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan3dV3TextTo3dInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan3d-v3/text-to-3d/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Hunyuan3dV3TextTo3dOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/meshy/v6-preview/text-to-3d", + "metadata": { + "display_name": "Meshy 6 Preview", + "category": "text-to-3d", + "description": "Meshy-6-Preview is the latest model from Meshy. It generates realistic and production ready 3D models.", + "status": "active", + "tags": [ + "text-to-3d" + ], + "updated_at": "2026-01-26T21:42:44.607Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/rabbit/R9WXOPkhbiHFjL8iXBLDG_79f59228974346e59950364b95325d80.jpg", + "model_url": "https://fal.run/fal-ai/meshy/v6-preview/text-to-3d", + "license_type": "commercial", + "date": "2025-10-06T23:26:16.872Z", + "group": { + "key": "Meshy", + "label": "v6 Preview Text to 3D" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/meshy/v6-preview/text-to-3d", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/meshy/v6-preview/text-to-3d queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/meshy/v6-preview/text-to-3d", + "category": "text-to-3d", + "thumbnailUrl": "https://v3b.fal.media/files/b/rabbit/R9WXOPkhbiHFjL8iXBLDG_79f59228974346e59950364b95325d80.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/meshy/v6-preview/text-to-3d", + "documentationUrl": "https://fal.ai/models/fal-ai/meshy/v6-preview/text-to-3d/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MeshyV6PreviewTextTo3dInput": { + "x-fal-order-properties": [ + "prompt", + "mode", + "art_style", + "seed", + "topology", + "target_polycount", + "should_remesh", + "symmetry_mode", + "is_a_t_pose", + "enable_pbr", + "enable_prompt_expansion", + "texture_prompt", + "texture_image_url", + "enable_safety_checker" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A rustic, antique wooden treasure chest with a curved, domed lid, constructed from weathered, dark brown planks exhibiting prominent wood grain and subtle distress. It's heavily reinforced with broad, dark grey, oxidized metal bands secured by numerous circular rivets. Ornate, dark iron decorative elements featuring swirling foliate patterns and dragon motifs adorn the corners and lid. A prominent, circular, intricately carved metal lock plate with a central keyhole dominates the front, flanked by two large, dark metallic pull rings." + ], + "title": "Prompt", + "type": "string", + "description": "Describe what kind of object the 3D model is. Maximum 600 characters.", + "maxLength": 600 + }, + "enable_pbr": { + "description": "Generate PBR Maps (metallic, roughness, normal) in addition to base color. Should be false for sculpture style.", + "type": "boolean", + "title": "Enable Pbr", + "default": false + }, + "target_polycount": { + "minimum": 100, + "maximum": 300000, + "type": "integer", + "title": "Target Polycount", + "description": "Target number of polygons in the generated model", + "default": 30000 + }, + "art_style": { + "enum": [ + "realistic", + "sculpture" + ], + "description": "Desired art style of the object. Note: enable_pbr should be false for sculpture style.", + "type": "string", + "title": "Art Style", + "default": "realistic" + }, + "enable_safety_checker": { + "description": "If set to true, input data will be checked for safety before processing.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "mode": { + "enum": [ + "preview", + "full" + ], + "description": "Generation mode. 'preview' returns untextured geometry only, 'full' returns textured model (preview + refine).", + "type": "string", + "title": "Mode", + "default": "full" + }, + "symmetry_mode": { + "enum": [ + "off", + "auto", + "on" + ], + "description": "Controls symmetry behavior during model generation.", + "type": "string", + "title": "Symmetry Mode", + "default": "auto" + }, + "should_remesh": { + "description": "Whether to enable the remesh phase. When false, returns unprocessed triangular mesh.", + "type": "boolean", + "title": "Should Remesh", + "default": true + }, + "texture_image_url": { + "description": "2D image to guide the texturing process (only used in 'full' mode)", + "max_pixels": 178956970, + "type": "string", + "x-fal": { + "timeout": 20, + "max_file_size": 20971520 + }, + "title": "Texture Image Url", + "limit_description": "Max file size: 20.0MB, Timeout: 20.0s" + }, + "topology": { + "enum": [ + "quad", + "triangle" + ], + "description": "Specify the topology of the generated model. Quad for smooth surfaces, Triangle for detailed geometry.", + "type": "string", + "title": "Topology", + "default": "triangle" + }, + "enable_prompt_expansion": { + "description": "Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "seed": { + "description": "Seed for reproducible results. Same prompt and seed usually generate the same result.", + "type": "integer", + "title": "Seed" + }, + "is_a_t_pose": { + "description": "Whether to generate the model in an A/T pose", + "type": "boolean", + "title": "Is A T Pose", + "default": false + }, + "texture_prompt": { + "description": "Additional text prompt to guide the texturing process (only used in 'full' mode)", + "type": "string", + "title": "Texture Prompt", + "maxLength": 600 + } + }, + "title": "TextTo3DInput", + "description": "Input for Text to 3D conversion", + "required": [ + "prompt" + ] + }, + "MeshyV6PreviewTextTo3dOutput": { + "x-fal-order-properties": [ + "model_glb", + "thumbnail", + "model_urls", + "texture_urls", + "seed", + "prompt", + "actual_prompt" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A rustic, antique wooden treasure chest with a curved, domed lid, constructed from weathered, dark brown planks exhibiting prominent wood grain and subtle distress. It's heavily reinforced with broad, dark grey, oxidized metal bands secured by numerous circular rivets. Ornate, dark iron decorative elements featuring swirling foliate patterns and dragon motifs adorn the corners and lid. A prominent, circular, intricately carved metal lock plate with a central keyhole dominates the front, flanked by two large, dark metallic pull rings." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt used for generation" + }, + "thumbnail": { + "examples": [ + { + "file_size": 173792, + "file_name": "preview.png", + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/koala/6LJISu4ilkZXcdOETwl_d_preview.png" + } + ], + "title": "Thumbnail", + "description": "Preview thumbnail of the generated model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "actual_prompt": { + "description": "The actual prompt used if prompt expansion was enabled", + "type": "string", + "title": "Actual Prompt" + }, + "texture_urls": { + "examples": [ + [ + { + "base_color": { + "file_size": 4254502, + "file_name": "texture_0.png", + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/panda/DoPKAuZY0tTjnr6C9ee-Q_texture_0.png" + } + } + ] + ], + "title": "Texture Urls", + "type": "array", + "description": "Array of texture file objects", + "items": { + "$ref": "#/components/schemas/TextureFiles" + } + }, + "model_glb": { + "examples": [ + { + "file_size": 9314028, + "file_name": "model.glb", + "content_type": "model/gltf-binary", + "url": "https://v3b.fal.media/files/b/penguin/DId89qXLu6BXu09RFAwAV_model.glb" + } + ], + "title": "Model Glb", + "description": "Generated 3D object in GLB format.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "examples": [ + 4002110719 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + }, + "model_urls": { + "examples": [ + { + "fbx": { + "file_size": 5444380, + "file_name": "model.fbx", + "content_type": "application/octet-stream", + "url": "https://v3b.fal.media/files/b/kangaroo/7nUUw5dHN9a0DKlOpAKbP_model.fbx" + }, + "usdz": { + "file_size": 9834246, + "file_name": "model.usdz", + "content_type": "model/vnd.usdz+zip", + "url": "https://v3b.fal.media/files/b/panda/XcC-mIJywUvH7coyrzENU_model.usdz" + }, + "glb": { + "file_size": 9314028, + "file_name": "model.glb", + "content_type": "model/gltf-binary", + "url": "https://v3b.fal.media/files/b/penguin/DId89qXLu6BXu09RFAwAV_model.glb" + }, + "obj": { + "file_size": 2755145, + "file_name": "model.obj", + "content_type": "text/plain", + "url": "https://v3b.fal.media/files/b/monkey/cCNMHqUbKSNtDN1iGmiYm_model.obj" + } + } + ], + "title": "Model Urls", + "description": "URLs for different 3D model formats", + "allOf": [ + { + "$ref": "#/components/schemas/ModelUrls" + } + ] + } + }, + "title": "TextTo3DOutput", + "description": "Output for Text to 3D generation", + "required": [ + "model_glb", + "model_urls", + "prompt" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + }, + "TextureFiles": { + "x-fal-order-properties": [ + "base_color", + "metallic", + "normal", + "roughness" + ], + "type": "object", + "properties": { + "base_color": { + "description": "Base color texture", + "title": "Base Color", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "normal": { + "description": "Normal texture (PBR)", + "title": "Normal", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "roughness": { + "description": "Roughness texture (PBR)", + "title": "Roughness", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "metallic": { + "description": "Metallic texture (PBR)", + "title": "Metallic", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "TextureFiles", + "description": "Texture files downloaded and uploaded to CDN", + "required": [ + "base_color" + ] + }, + "ModelUrls": { + "x-fal-order-properties": [ + "glb", + "fbx", + "obj", + "usdz", + "blend", + "stl" + ], + "type": "object", + "properties": { + "usdz": { + "description": "USDZ format 3D model", + "title": "Usdz", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "fbx": { + "description": "FBX format 3D model", + "title": "Fbx", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "blend": { + "description": "Blender format 3D model", + "title": "Blend", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "stl": { + "description": "STL format 3D model", + "title": "Stl", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "glb": { + "description": "GLB format 3D model", + "title": "Glb", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "obj": { + "description": "OBJ format 3D model", + "title": "Obj", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "ModelUrls", + "description": "3D model files in various formats" + } + } + }, + "paths": { + "/fal-ai/meshy/v6-preview/text-to-3d/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/meshy/v6-preview/text-to-3d/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/meshy/v6-preview/text-to-3d": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MeshyV6PreviewTextTo3dInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/meshy/v6-preview/text-to-3d/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MeshyV6PreviewTextTo3dOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.text-to-audio.json b/packages/typescript/ai-fal/json/fal.models.text-to-audio.json new file mode 100644 index 00000000..c1bb9fee --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.text-to-audio.json @@ -0,0 +1,13574 @@ +{ + "generated_at": "2026-01-28T02:51:51.873Z", + "total_models": 34, + "category": "text-to-audio", + "models": [ + { + "endpoint_id": "fal-ai/elevenlabs/music", + "metadata": { + "display_name": "Elevenlabs Music", + "category": "text-to-audio", + "description": "Generate high quality, realistic music with fine controls using Elevenlabs Music!", + "status": "active", + "tags": [ + "music", + "text-to-music" + ], + "updated_at": "2026-01-26T21:41:49.530Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a875694/SZffdb8layV90JxfN5fkP_d2048a658d9e4dc9973598f3542833a0.jpg", + "model_url": "https://fal.run/fal-ai/elevenlabs/music", + "license_type": "commercial", + "date": "2025-12-22T15:22:56.999Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/elevenlabs/music", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/elevenlabs/music queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/elevenlabs/music", + "category": "text-to-audio", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a875694/SZffdb8layV90JxfN5fkP_d2048a658d9e4dc9973598f3542833a0.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/elevenlabs/music", + "documentationUrl": "https://fal.ai/models/fal-ai/elevenlabs/music/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ElevenlabsMusicInput": { + "description": "Request format for Elevenlabs Music API", + "type": "object", + "properties": { + "prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The text prompt describing the music to generate", + "title": "Prompt", + "examples": [ + "Mysterious original soundtrack, themes of jungle, rainforest, nature, woodwinds, busy rhythmic tribal percussion." + ] + }, + "composition_plan": { + "anyOf": [ + { + "$ref": "#/components/schemas/MusicCompositionPlan" + }, + { + "type": "null" + } + ], + "description": "The composition plan for the music" + }, + "music_length_ms": { + "anyOf": [ + { + "minimum": 3000, + "maximum": 600000, + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The length of the song to generate in milliseconds. Used only in conjunction with prompt. Must be between 3000ms and 600000ms. Optional - if not provided, the model will choose a length based on the prompt.", + "title": "Music Length Ms" + }, + "output_format": { + "enum": [ + "mp3_22050_32", + "mp3_44100_32", + "mp3_44100_64", + "mp3_44100_96", + "mp3_44100_128", + "mp3_44100_192", + "pcm_8000", + "pcm_16000", + "pcm_22050", + "pcm_24000", + "pcm_44100", + "pcm_48000", + "ulaw_8000", + "alaw_8000", + "opus_48000_32", + "opus_48000_64", + "opus_48000_96", + "opus_48000_128", + "opus_48000_192" + ], + "description": "Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.", + "type": "string", + "title": "Output Format", + "default": "mp3_44100_128" + }, + "respect_sections_durations": { + "description": "Controls how strictly section durations in the composition_plan are enforced. It will only have an effect if it is used with composition_plan. When set to true, the model will precisely respect each section's duration_ms from the plan. When set to false, the model may adjust individual section durations which will generally lead to better generation quality and improved latency, while always preserving the total song duration from the plan.", + "type": "boolean", + "title": "Respect Sections Durations", + "default": true + }, + "force_instrumental": { + "description": "If true, guarantees that the generated song will be instrumental. If false, the song may or may not be instrumental depending on the prompt. Can only be used with prompt.", + "type": "boolean", + "title": "Force Instrumental", + "default": false + } + }, + "title": "MusicRequest", + "x-fal-order-properties": [ + "prompt", + "composition_plan", + "music_length_ms", + "force_instrumental", + "respect_sections_durations", + "output_format" + ] + }, + "ElevenlabsMusicOutput": { + "title": "MusicOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "file_name": "music_generated.mp3", + "content_type": "audio/mpeg", + "url": "https://storage.googleapis.com/falserverless/example_outputs/elevenlabs/music_generated.mp3" + } + ], + "description": "The generated music audio file in MP3 format", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "MusicCompositionPlan": { + "title": "MusicCompositionPlan", + "type": "object", + "properties": { + "negative_global_styles": { + "description": "The styles that should not be present in the entire song.", + "type": "array", + "title": "Negative Global Styles", + "items": { + "type": "string" + } + }, + "sections": { + "description": "The sections of the song.", + "type": "array", + "title": "Sections", + "items": { + "$ref": "#/components/schemas/MusicSection" + } + }, + "positive_global_styles": { + "description": "The styles that should be present in the entire song.", + "type": "array", + "title": "Positive Global Styles", + "items": { + "type": "string" + } + } + }, + "x-fal-order-properties": [ + "positive_global_styles", + "negative_global_styles", + "sections" + ], + "required": [ + "positive_global_styles", + "negative_global_styles", + "sections" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + }, + "MusicSection": { + "title": "MusicSection", + "type": "object", + "properties": { + "positive_local_styles": { + "description": "The styles that should be present in this section.", + "type": "array", + "title": "Positive Local Styles", + "items": { + "type": "string" + } + }, + "lines": { + "description": "The lyrics of the section. Each line must be at most 200 characters long.", + "type": "array", + "title": "Lines", + "items": { + "type": "string" + } + }, + "negative_local_styles": { + "description": "The styles that should not be present in this section.", + "type": "array", + "title": "Negative Local Styles", + "items": { + "type": "string" + } + }, + "duration_ms": { + "minimum": 3000, + "maximum": 120000, + "type": "integer", + "title": "Duration Ms", + "description": "The duration of the section in milliseconds. Must be between 3000ms and 120000ms." + }, + "section_name": { + "description": "The name of the section. Must be between 1 and 100 characters.", + "type": "string", + "title": "Section Name", + "minLength": 1, + "maxLength": 100 + } + }, + "x-fal-order-properties": [ + "section_name", + "positive_local_styles", + "negative_local_styles", + "duration_ms", + "lines" + ], + "required": [ + "section_name", + "positive_local_styles", + "negative_local_styles", + "duration_ms", + "lines" + ] + } + } + }, + "paths": { + "/fal-ai/elevenlabs/music/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/music/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/music": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ElevenlabsMusicInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/music/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ElevenlabsMusicOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax-music/v2", + "metadata": { + "display_name": "Minimax Music", + "category": "text-to-audio", + "description": "Generate music from text prompts using the MiniMax Music 2.0 model, which leverages advanced AI techniques to create high-quality, diverse musical compositions.", + "status": "active", + "tags": [ + "music", + "audio" + ], + "updated_at": "2026-01-26T21:42:28.692Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/jnrLOiQsmRXqYCI_haHe5_63de465a7ccb447bb3bf9ed6c0c1c88e.jpg", + "model_url": "https://fal.run/fal-ai/minimax-music/v2", + "license_type": "commercial", + "date": "2025-10-30T22:33:42.886Z", + "group": { + "key": "minimax-music", + "label": "v2.0" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax-music/v2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax-music/v2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax-music/v2", + "category": "text-to-audio", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/jnrLOiQsmRXqYCI_haHe5_63de465a7ccb447bb3bf9ed6c0c1c88e.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax-music/v2", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax-music/v2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxMusicV2Input": { + "x-fal-order-properties": [ + "prompt", + "lyrics_prompt", + "audio_setting" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Indie folk, melancholic, introspective, longing, solitary walk, coffee shop" + ], + "description": "A description of the music, specifying style, mood, and scenario. 10-300 characters.", + "type": "string", + "maxLength": 2000, + "minLength": 10, + "title": "Prompt" + }, + "lyrics_prompt": { + "examples": [ + "[verse]Streetlights flicker, the night breeze sighsShadows stretch as I walk aloneAn old coat wraps my silent sorrow\nWandering, longing, where should I go[chorus]Pushing the wooden door, the aroma spreadsIn a familiar corner, a stranger gazes" + ], + "description": "Lyrics of the song. Use n to separate lines. You may add structure tags like [Intro], [Verse], [Chorus], [Bridge], [Outro] to enhance the arrangement. 10-3000 characters.", + "type": "string", + "maxLength": 3000, + "minLength": 10, + "title": "Lyrics Prompt" + }, + "audio_setting": { + "description": "Audio configuration settings", + "title": "Audio Setting", + "allOf": [ + { + "$ref": "#/components/schemas/AudioSetting" + } + ] + } + }, + "title": "TextToMusic20Request", + "required": [ + "prompt", + "lyrics_prompt" + ] + }, + "MinimaxMusicV2Output": { + "x-fal-order-properties": [ + "audio" + ], + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://v3.fal.media/files/lion/b3-wJ5bbmVo8S-KPqDBMK_output.mp3" + } + ], + "description": "The generated music", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "MusicV15Output", + "required": [ + "audio" + ] + }, + "AudioSetting": { + "x-fal-order-properties": [ + "sample_rate", + "bitrate", + "format" + ], + "type": "object", + "properties": { + "format": { + "enum": [ + "mp3", + "pcm", + "flac" + ], + "description": "Audio format", + "type": "string", + "title": "Format", + "default": "mp3" + }, + "sample_rate": { + "enum": [ + 8000, + 16000, + 22050, + 24000, + 32000, + 44100 + ], + "description": "Sample rate of generated audio", + "type": "integer", + "title": "Sample Rate", + "default": 44100 + }, + "bitrate": { + "enum": [ + 32000, + 64000, + 128000, + 256000 + ], + "description": "Bitrate of generated audio", + "type": "integer", + "title": "Bitrate", + "default": 256000 + } + }, + "title": "AudioSetting" + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax-music/v2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax-music/v2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax-music/v2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxMusicV2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax-music/v2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxMusicV2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "beatoven/sound-effect-generation", + "metadata": { + "display_name": "Sound Effect Generation", + "category": "text-to-audio", + "description": "Create professional-grade sound effects from animal and vehicle to nature, sci-fi, and otherworldly sounds. Perfect for films, games, and digital content.", + "status": "active", + "tags": [ + "sfx", + "audio", + "effects", + "speech" + ], + "updated_at": "2026-01-26T21:42:37.967Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/SV2axpXDRJpr3LamW4PGo_9bca6ced-1271-4fbf-9869-c8db8ff81977.png", + "model_url": "https://fal.run/beatoven/sound-effect-generation", + "license_type": "commercial", + "date": "2025-10-18T16:59:46.084Z", + "group": { + "key": "Beatoven", + "label": "Sound Effects" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for beatoven/sound-effect-generation", + "version": "1.0.0", + "description": "The OpenAPI schema for the beatoven/sound-effect-generation queue.", + "x-fal-metadata": { + "endpointId": "beatoven/sound-effect-generation", + "category": "text-to-audio", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/SV2axpXDRJpr3LamW4PGo_9bca6ced-1271-4fbf-9869-c8db8ff81977.png", + "playgroundUrl": "https://fal.ai/models/beatoven/sound-effect-generation", + "documentationUrl": "https://fal.ai/models/beatoven/sound-effect-generation/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SoundEffectGenerationInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "duration", + "refinement", + "creativity", + "seed" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Powerful helicopter takeoff: rapidly building rotor blades whirring and chopping, increasing engine whine, ground vibrations.", + "A futuristic spaceship door opening", + "A cinematic explosion with debris falling", + "Rain falling on a window pane", + "Footsteps on gravel" + ], + "title": "Prompt", + "type": "string", + "description": "Describe the sound effect you want to generate" + }, + "duration": { + "description": "Length of the generated sound effect in seconds", + "type": "number", + "examples": [ + 7, + 10, + 20, + 30 + ], + "title": "Duration", + "minimum": 1, + "maximum": 35, + "default": 5 + }, + "refinement": { + "description": "Refinement level - Higher values may improve quality but take longer", + "type": "integer", + "examples": [ + 40, + 70, + 100, + 200 + ], + "title": "Refinement", + "minimum": 10, + "maximum": 200, + "default": 40 + }, + "seed": { + "anyOf": [ + { + "minimum": 0, + "maximum": 2147483647, + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducible results - leave empty for random generation" + }, + "negative_prompt": { + "examples": [ + "Low-pitched hum", + "High-pitched screech, rain, wind", + "Thunder, lightning", + "traffic, people speaking", + "Soft whisper" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Describe the types of sounds you don't want to generate in the output, avoid double-negatives, compare with positive prompts", + "default": "" + }, + "creativity": { + "description": "Creativity level - higher values allow more creative interpretation of the prompt", + "type": "number", + "examples": [ + 16, + 14, + 10 + ], + "title": "Creativity", + "minimum": 1, + "maximum": 20, + "default": 16 + } + }, + "title": "SoundEffectGenerationInput", + "description": "Input schema for sound effect generation with form controls for the playground.", + "required": [ + "prompt" + ] + }, + "SoundEffectGenerationOutput": { + "x-fal-order-properties": [ + "audio", + "prompt", + "metadata" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The processed prompt used for generation" + }, + "metadata": { + "title": "Metadata", + "type": "object", + "additionalProperties": true, + "description": "Generation metadata including duration, sample rate, and parameters" + }, + "audio": { + "examples": [ + { + "url": "https://v3b.fal.media/files/b/lion/9Uo4MoD-efg4sjcDyI6Nl_sfx_QHCg3z.wav" + } + ], + "description": "Generated audio file in WAV format", + "$ref": "#/components/schemas/File" + } + }, + "title": "SoundEffectGenerationOutput", + "description": "Output schema for sound effect generation.", + "required": [ + "audio", + "prompt", + "metadata" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/beatoven/sound-effect-generation/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/beatoven/sound-effect-generation/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/beatoven/sound-effect-generation": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SoundEffectGenerationInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/beatoven/sound-effect-generation/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SoundEffectGenerationOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "beatoven/music-generation", + "metadata": { + "display_name": "Music Generation", + "category": "text-to-audio", + "description": "Generate royalty-free instrumental music from electronic, hip hop, and indie rock to cinematic and classical genres. Perfect for games, films, social content, podcasts, and more.", + "status": "active", + "tags": [ + "speech", + "audio", + "music" + ], + "updated_at": "2026-01-26T21:42:38.094Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/rabbit/-fqZhGXxiW3usZOZ267NO_9c5cd9c1-62d9-4ed9-8c8e-3c148614c137.png", + "model_url": "https://fal.run/beatoven/music-generation", + "license_type": "commercial", + "date": "2025-10-18T16:58:42.794Z", + "group": { + "key": "Beatoven", + "label": "Music Generation" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for beatoven/music-generation", + "version": "1.0.0", + "description": "The OpenAPI schema for the beatoven/music-generation queue.", + "x-fal-metadata": { + "endpointId": "beatoven/music-generation", + "category": "text-to-audio", + "thumbnailUrl": "https://v3b.fal.media/files/b/rabbit/-fqZhGXxiW3usZOZ267NO_9c5cd9c1-62d9-4ed9-8c8e-3c148614c137.png", + "playgroundUrl": "https://fal.ai/models/beatoven/music-generation", + "documentationUrl": "https://fal.ai/models/beatoven/music-generation/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MusicGenerationInput": { + "description": "Input schema for music generation with form controls for the playground.", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Jazz music for a late-night restaurant setting", + "A lush, ambient soundscape featuring serene sounds, and a gentle, melancholic piano melody", + "Hip-hop music, mellow keys and vinyl crackle", + "House music with synthesizers, driving bass and a steady 4/4 beat", + "Classical piano melody with emotional depth and gentle strings" + ], + "description": "Describe the music you want to generate", + "type": "string", + "title": "Prompt" + }, + "duration": { + "description": "Length of the generated music in seconds", + "type": "number", + "minimum": 5, + "maximum": 150, + "examples": [ + 90, + 47, + 150 + ], + "title": "Duration", + "default": 90 + }, + "refinement": { + "description": "Refinement level - higher values may improve quality but take longer", + "type": "integer", + "minimum": 10, + "maximum": 200, + "examples": [ + 100, + 200 + ], + "title": "Refinement", + "default": 100 + }, + "seed": { + "anyOf": [ + { + "minimum": 0, + "maximum": 2147483647, + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Random seed for reproducible results - leave empty for random generation", + "title": "Seed" + }, + "negative_prompt": { + "examples": [ + "noise", + "distortion", + "heavy drums", + "high-hats" + ], + "description": "Describe what you want to avoid in the music (instruments, styles, moods). Leave blank for none.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "creativity": { + "description": "Creativity level - higher values allow more creative interpretation of the prompt", + "type": "number", + "minimum": 1, + "maximum": 20, + "examples": [ + 16, + 14, + 11 + ], + "title": "Creativity", + "default": 16 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "duration", + "refinement", + "creativity", + "seed" + ], + "title": "MusicGenerationInput", + "required": [ + "prompt" + ] + }, + "MusicGenerationOutput": { + "description": "Output schema for music generation.", + "type": "object", + "properties": { + "prompt": { + "description": "The processed prompt used for generation", + "type": "string", + "title": "Prompt" + }, + "metadata": { + "description": "Generation metadata including duration, sample rate, and parameters", + "type": "object", + "additionalProperties": true, + "title": "Metadata" + }, + "audio": { + "examples": [ + { + "url": "https://v3b.fal.media/files/b/rabbit/DBesSNPP6NwfhwMftene-_music_ZfniDF.wav" + } + ], + "description": "Generated audio file in WAV format", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "audio", + "prompt", + "metadata" + ], + "title": "MusicGenerationOutput", + "required": [ + "audio", + "prompt", + "metadata" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ], + "title": "File Size" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name" + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "examples": [ + "image/png" + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/beatoven/music-generation/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/beatoven/music-generation/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/beatoven/music-generation": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MusicGenerationInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/beatoven/music-generation/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MusicGenerationOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax-music/v1.5", + "metadata": { + "display_name": "MiniMax (Hailuo AI) Music v1.5", + "category": "text-to-audio", + "description": "Generate music from text prompts using the MiniMax model, which leverages advanced AI techniques to create high-quality, diverse musical compositions.", + "status": "active", + "tags": [ + "music" + ], + "updated_at": "2026-01-26T21:42:56.497Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/lion/q1SQZ1uvnaCmUpVZjCoOM_f19b7483fe804f00bf6473cd3eae0eeb.jpg", + "model_url": "https://fal.run/fal-ai/minimax-music/v1.5", + "license_type": "commercial", + "date": "2025-09-11T22:14:58.124Z", + "group": { + "key": "minimax-music", + "label": "v1.5" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax-music/v1.5", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax-music/v1.5 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax-music/v1.5", + "category": "text-to-audio", + "thumbnailUrl": "https://fal.media/files/lion/q1SQZ1uvnaCmUpVZjCoOM_f19b7483fe804f00bf6473cd3eae0eeb.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax-music/v1.5", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax-music/v1.5/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxMusicV15Input": { + "x-fal-order-properties": [ + "prompt", + "audio_setting", + "lyrics_prompt" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "[verse]\n Fast and Limitless \n In the heart of the code, where dreams collide, \n\nFAL's the name, taking tech for a ride. \nGenerative media, blazing the trail, \n\nFast inference power, we'll never fail.\n##" + ], + "description": "Lyrics, supports [intro][verse][chorus][bridge][outro] sections. 10-600 characters.", + "type": "string", + "maxLength": 600, + "minLength": 10, + "title": "Prompt" + }, + "lyrics_prompt": { + "examples": [ + "R&B, energetic" + ], + "description": "Control music generation. 10-3000 characters.", + "type": "string", + "maxLength": 3000, + "minLength": 10, + "title": "Lyrics Prompt" + }, + "audio_setting": { + "description": "Audio configuration settings", + "title": "Audio Setting", + "allOf": [ + { + "$ref": "#/components/schemas/AudioSetting" + } + ] + } + }, + "title": "TextToMusic15Request", + "required": [ + "prompt", + "lyrics_prompt" + ] + }, + "MinimaxMusicV15Output": { + "x-fal-order-properties": [ + "audio" + ], + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://v3.fal.media/files/lion/b3-wJ5bbmVo8S-KPqDBMK_output.mp3" + } + ], + "description": "The generated music", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "MusicV15Output", + "required": [ + "audio" + ] + }, + "AudioSetting": { + "x-fal-order-properties": [ + "sample_rate", + "bitrate", + "format" + ], + "type": "object", + "properties": { + "format": { + "enum": [ + "mp3", + "pcm", + "flac" + ], + "description": "Audio format", + "type": "string", + "title": "Format", + "default": "mp3" + }, + "sample_rate": { + "enum": [ + 8000, + 16000, + 22050, + 24000, + 32000, + 44100 + ], + "description": "Sample rate of generated audio", + "type": "integer", + "title": "Sample Rate", + "default": 44100 + }, + "bitrate": { + "enum": [ + 32000, + 64000, + 128000, + 256000 + ], + "description": "Bitrate of generated audio", + "type": "integer", + "title": "Bitrate", + "default": 256000 + } + }, + "title": "AudioSetting" + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax-music/v1.5/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax-music/v1.5/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax-music/v1.5": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxMusicV15Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax-music/v1.5/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxMusicV15Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/stable-audio-25/text-to-audio", + "metadata": { + "display_name": "Stable Audio 2.5", + "category": "text-to-audio", + "description": "Generate high quality music and sound effects using Stable Audio 2.5 from StabilityAI", + "status": "active", + "tags": [ + "audio" + ], + "updated_at": "2026-01-26T21:42:57.450Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/lion/N_r3ILjHQBEDIf8umQMMd_f51771186f184c43ab1257350772d2f6.jpg", + "model_url": "https://fal.run/fal-ai/stable-audio-25/text-to-audio", + "license_type": "commercial", + "date": "2025-09-10T11:50:27.666Z", + "group": { + "key": "stable-audio-25", + "label": "Text to Audio" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/stable-audio-25/text-to-audio", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/stable-audio-25/text-to-audio queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/stable-audio-25/text-to-audio", + "category": "text-to-audio", + "thumbnailUrl": "https://fal.media/files/lion/N_r3ILjHQBEDIf8umQMMd_f51771186f184c43ab1257350772d2f6.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/stable-audio-25/text-to-audio", + "documentationUrl": "https://fal.ai/models/fal-ai/stable-audio-25/text-to-audio/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "StableAudio25TextToAudioInput": { + "title": "TextToAudioInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A beautiful piano arpeggio grows into a grand orchestral climax" + ], + "description": "The prompt to generate audio from", + "type": "string", + "title": "Prompt" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "seconds_total": { + "minimum": 1, + "maximum": 190, + "type": "integer", + "title": "Seconds Total", + "description": "The duration of the audio clip to generate", + "default": 190 + }, + "num_inference_steps": { + "minimum": 4, + "maximum": 8, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of steps to denoise the audio for", + "default": 8 + }, + "guidance_scale": { + "minimum": 1, + "maximum": 25, + "type": "integer", + "title": "Guidance Scale", + "description": "How strictly the diffusion process adheres to the prompt text (higher values make your audio closer to your prompt).", + "default": 1 + }, + "seed": { + "title": "Seed", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "prompt", + "seconds_total", + "num_inference_steps", + "guidance_scale", + "sync_mode", + "seed" + ], + "required": [ + "prompt" + ] + }, + "StableAudio25TextToAudioOutput": { + "title": "TextToAudioOutput", + "type": "object", + "properties": { + "seed": { + "description": "The random seed used for generation", + "type": "integer", + "title": "Seed" + }, + "audio": { + "examples": [ + "https://v3.fal.media/files/zebra/lGob9bN7VHfFXG4R1btQn_tmpabwhgi6n.wav" + ], + "description": "The generated audio clip", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio", + "seed" + ], + "required": [ + "audio", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/stable-audio-25/text-to-audio/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-audio-25/text-to-audio/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/stable-audio-25/text-to-audio": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableAudio25TextToAudioInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-audio-25/text-to-audio/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableAudio25TextToAudioOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/elevenlabs/text-to-dialogue/eleven-v3", + "metadata": { + "display_name": "Elevenlabs", + "category": "text-to-audio", + "description": "Generate realistic audio dialogues using Eleven-v3 from ElevenLabs.", + "status": "active", + "tags": [ + "audio" + ], + "updated_at": "2026-01-26T21:42:57.823Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/panda/twZsKdCTiF8JXv-rRcPZu_8414166d52a548859a8df01bf720fe46.jpg", + "model_url": "https://fal.run/fal-ai/elevenlabs/text-to-dialogue/eleven-v3", + "license_type": "commercial", + "date": "2025-09-09T12:30:10.005Z", + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.ai/models/fal-ai/elevenlabs/text-to-dialogue/eleven-v3/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/elevenlabs/text-to-dialogue/eleven-v3", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/elevenlabs/text-to-dialogue/eleven-v3 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/elevenlabs/text-to-dialogue/eleven-v3", + "category": "text-to-audio", + "thumbnailUrl": "https://fal.media/files/panda/twZsKdCTiF8JXv-rRcPZu_8414166d52a548859a8df01bf720fe46.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/elevenlabs/text-to-dialogue/eleven-v3", + "documentationUrl": "https://fal.ai/models/fal-ai/elevenlabs/text-to-dialogue/eleven-v3/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ElevenlabsTextToDialogueElevenV3Input": { + "title": "TextToDialogueRequest", + "type": "object", + "properties": { + "stability": { + "anyOf": [ + { + "minimum": 0, + "maximum": 1, + "type": "number" + }, + { + "type": "null" + } + ], + "description": "Determines how stable the voice is and the randomness between each generation. Lower values introduce broader emotional range for the voice. Higher values can result in a monotonous voice with limited emotion. Must be one of 0.0, 0.5, 1.0, else it will be rounded to the nearest value.", + "title": "Stability" + }, + "inputs": { + "examples": [ + [ + { + "text": "[applause] Thank you all for coming tonight! Today we have a very special guest with us.", + "voice": "Aria" + }, + { + "text": "[gulps] ... [strong canadian accent] [excited] Hello everyone! Thank you all for having me tonight on this special day.", + "voice": "Charlotte" + } + ] + ], + "description": "A list of dialogue inputs, each containing text and a voice ID which will be converted into speech.", + "type": "array", + "title": "Inputs", + "items": { + "$ref": "#/components/schemas/DialogueBlock" + } + }, + "language_code": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Language code (ISO 639-1) used to enforce a language for the model. An error will be returned if language code is not supported by the model.", + "title": "Language Code" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Random seed for reproducibility.", + "title": "Seed" + }, + "use_speaker_boost": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "description": "This setting boosts the similarity to the original speaker. Using this setting requires a slightly higher computational load, which in turn increases latency.", + "title": "Use Speaker Boost" + }, + "pronunciation_dictionary_locators": { + "description": "A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request", + "type": "array", + "title": "Pronunciation Dictionary Locators", + "items": { + "$ref": "#/components/schemas/PronunciationDictionaryLocator" + }, + "default": [] + } + }, + "x-fal-order-properties": [ + "inputs", + "stability", + "use_speaker_boost", + "pronunciation_dictionary_locators", + "seed", + "language_code" + ], + "required": [ + "inputs" + ] + }, + "ElevenlabsTextToDialogueElevenV3Output": { + "title": "TextToDialogueOutput", + "type": "object", + "properties": { + "seed": { + "description": "Random seed for reproducibility.", + "type": "integer", + "title": "Seed" + }, + "audio": { + "examples": [ + { + "url": "https://v3.fal.media/files/zebra/XFeGS8Fq-q1eAPG2sSAo__output.mp3" + } + ], + "description": "The generated audio file", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "audio", + "seed" + ], + "required": [ + "audio", + "seed" + ] + }, + "DialogueBlock": { + "title": "DialogueBlock", + "type": "object", + "properties": { + "text": { + "description": "The dialogue text", + "type": "string", + "title": "Text" + }, + "voice": { + "examples": [ + "Aria", + "Roger", + "Sarah", + "Laura", + "Charlie", + "George", + "Callum", + "River", + "Liam", + "Charlotte", + "Alice", + "Matilda", + "Will", + "Jessica", + "Eric", + "Chris", + "Brian", + "Daniel", + "Lily", + "Bill" + ], + "description": "The name or the ID of the voice to be used for the generation.", + "type": "string", + "title": "Voice" + } + }, + "x-fal-order-properties": [ + "text", + "voice" + ], + "required": [ + "text", + "voice" + ] + }, + "PronunciationDictionaryLocator": { + "title": "PronunciationDictionaryLocator", + "type": "object", + "properties": { + "version_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The ID of the version of the pronunciation dictionary. If not provided, the latest version will be used.", + "title": "Version Id" + }, + "pronunciation_dictionary_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The ID of the pronunciation dictionary.", + "title": "Pronunciation Dictionary Id" + } + }, + "x-fal-order-properties": [ + "pronunciation_dictionary_id", + "version_id" + ], + "required": [ + "pronunciation_dictionary_id" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/elevenlabs/text-to-dialogue/eleven-v3/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/text-to-dialogue/eleven-v3/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/text-to-dialogue/eleven-v3": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ElevenlabsTextToDialogueElevenV3Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/text-to-dialogue/eleven-v3/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ElevenlabsTextToDialogueElevenV3Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/elevenlabs/sound-effects/v2", + "metadata": { + "display_name": "Elevenlabs", + "category": "text-to-audio", + "description": "Generate sound effects using ElevenLabs advanced sound effects model.", + "status": "active", + "tags": [ + "sound" + ], + "updated_at": "2026-01-26T21:42:58.693Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/elevenlabs/elevenlabs_thumbnail.webp", + "model_url": "https://fal.run/fal-ai/elevenlabs/sound-effects/v2", + "license_type": "commercial", + "date": "2025-09-02T20:11:50.270Z", + "group": { + "key": "elevenlabs-audio", + "label": "Sound Effects V2" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.ai/models/fal-ai/elevenlabs/sound-effects/v2/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/elevenlabs/sound-effects/v2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/elevenlabs/sound-effects/v2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/elevenlabs/sound-effects/v2", + "category": "text-to-audio", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/elevenlabs/elevenlabs_thumbnail.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/elevenlabs/sound-effects/v2", + "documentationUrl": "https://fal.ai/models/fal-ai/elevenlabs/sound-effects/v2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ElevenlabsSoundEffectsV2Input": { + "title": "SoundEffectRequestV2", + "type": "object", + "properties": { + "text": { + "examples": [ + "Spacious braam suitable for high-impact movie trailer moments", + "A gentle wind chime tinkling in a soft breeze" + ], + "description": "The text describing the sound effect to generate", + "type": "string", + "title": "Text" + }, + "loop": { + "description": "Whether to create a sound effect that loops smoothly.", + "type": "boolean", + "title": "Loop", + "default": false + }, + "prompt_influence": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Prompt Influence", + "description": "How closely to follow the prompt (0-1). Higher values mean less variation.", + "default": 0.3 + }, + "output_format": { + "enum": [ + "mp3_22050_32", + "mp3_44100_32", + "mp3_44100_64", + "mp3_44100_96", + "mp3_44100_128", + "mp3_44100_192", + "pcm_8000", + "pcm_16000", + "pcm_22050", + "pcm_24000", + "pcm_44100", + "pcm_48000", + "ulaw_8000", + "alaw_8000", + "opus_48000_32", + "opus_48000_64", + "opus_48000_96", + "opus_48000_128", + "opus_48000_192" + ], + "description": "Output format of the generated audio. Formatted as codec_sample_rate_bitrate.", + "type": "string", + "title": "Output Format", + "default": "mp3_44100_128" + }, + "duration_seconds": { + "anyOf": [ + { + "minimum": 0.5, + "maximum": 22, + "type": "number" + }, + { + "type": "null" + } + ], + "description": "Duration in seconds (0.5-22). If None, optimal duration will be determined from prompt.", + "title": "Duration Seconds" + } + }, + "x-fal-order-properties": [ + "text", + "duration_seconds", + "prompt_influence", + "output_format", + "loop" + ], + "required": [ + "text" + ] + }, + "ElevenlabsSoundEffectsV2Output": { + "title": "SoundEffectOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://v3.fal.media/files/lion/WgnO-jy6WduosuG_Ibobx_sound_effect.mp3" + } + ], + "description": "The generated sound effect audio file in MP3 format", + "$ref": "#/components/schemas/File" + } + }, + "description": "Output format for generated sound effects", + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/elevenlabs/sound-effects/v2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/sound-effects/v2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/sound-effects/v2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ElevenlabsSoundEffectsV2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/sound-effects/v2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ElevenlabsSoundEffectsV2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "sonauto/v2/inpaint", + "metadata": { + "display_name": "Sonauto V2", + "category": "text-to-audio", + "description": "Replace sections of an existing audio with newly generated content", + "status": "active", + "tags": [ + "music", + "text-to-music", + "text-to-audio" + ], + "updated_at": "2026-01-26T21:43:01.028Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/penguin/ry2wtiGUu-3jBsfR7NY1__b01962036b18490daf6f33e9cc7d8165.jpg", + "model_url": "https://fal.run/sonauto/v2/inpaint", + "license_type": "commercial", + "date": "2025-08-23T21:02:59.706Z", + "group": { + "key": "sonauto-v2", + "label": "Inpaint" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for sonauto/v2/inpaint", + "version": "1.0.0", + "description": "The OpenAPI schema for the sonauto/v2/inpaint queue.", + "x-fal-metadata": { + "endpointId": "sonauto/v2/inpaint", + "category": "text-to-audio", + "thumbnailUrl": "https://v3.fal.media/files/penguin/ry2wtiGUu-3jBsfR7NY1__b01962036b18490daf6f33e9cc7d8165.jpg", + "playgroundUrl": "https://fal.ai/models/sonauto/v2/inpaint", + "documentationUrl": "https://fal.ai/models/sonauto/v2/inpaint/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "V2InpaintInput": { + "title": "InpaintInput", + "type": "object", + "properties": { + "lyrics_prompt": { + "examples": [ + "[Chorus]\nPigs are soaring in the sky\nWings of bacon flying by\n" + ], + "title": "Lyrics Prompt", + "type": "string", + "description": "The lyrics sung in the generated song. An empty string will generate an instrumental track." + }, + "tags": { + "examples": [ + [ + "2020s", + "dance pop", + "pop rock", + "indie pop", + "bubblegum pop", + "synthpop", + "teen pop", + "electropop" + ] + ], + "title": "Tags", + "type": "array", + "description": "Tags/styles of the music to generate. You can view a list of all available tags at https://sonauto.ai/tag-explorer.", + "items": { + "type": "string" + } + }, + "prompt_strength": { + "minimum": 1.4, + "maximum": 3.1, + "type": "number", + "title": "Prompt Strength", + "description": "Controls how strongly your prompt influences the output. Greater values adhere more to the prompt but sound less natural. (This is CFG.)", + "default": 2 + }, + "output_bit_rate": { + "anyOf": [ + { + "enum": [ + 128, + 192, + 256, + 320 + ], + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Output Bit Rate", + "description": "The bit rate to use for mp3 and m4a formats. Not available for other formats." + }, + "num_songs": { + "minimum": 1, + "maximum": 2, + "type": "integer", + "title": "Num Songs", + "description": "Generating 2 songs costs 1.5x the price of generating 1 song. Also, note that using the same seed may not result in identical songs if the number of songs generated is changed.", + "default": 1 + }, + "output_format": { + "enum": [ + "flac", + "mp3", + "wav", + "ogg", + "m4a" + ], + "title": "Output Format", + "type": "string", + "default": "wav" + }, + "selection_crop": { + "title": "Selection Crop", + "type": "boolean", + "description": "Crop to the selected region", + "default": false + }, + "sections": { + "examples": [ + [ + { + "end": 9.45, + "start": 0 + } + ] + ], + "title": "Sections", + "type": "array", + "description": "List of sections to inpaint. Currently, only one section is supported so the list length must be 1.", + "items": { + "$ref": "#/components/schemas/InpaintSection" + } + }, + "balance_strength": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Balance Strength", + "description": "Greater means more natural vocals. Lower means sharper instrumentals. We recommend 0.7.", + "default": 0.7 + }, + "audio_url": { + "format": "uri", + "description": "The URL of the audio file to alter. Must be a valid publicly accessible URL.", + "type": "string", + "examples": [ + "https://cdn.sonauto.ai/generations2_altformats/audio_c5e63f7c-fc79-4322-808d-c09911af4713.wav" + ], + "maxLength": 2083, + "minLength": 1, + "title": "Audio Url" + }, + "seed": { + "anyOf": [ + { + "minimum": -9223372036854776000, + "maximum": 9223372036854776000, + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "The seed to use for generation. Will pick a random seed if not provided. Repeating a request with identical parameters (must use lyrics and tags, not prompt) and the same seed will generate the same song." + } + }, + "x-fal-order-properties": [ + "tags", + "lyrics_prompt", + "seed", + "prompt_strength", + "balance_strength", + "num_songs", + "output_format", + "output_bit_rate", + "audio_url", + "sections", + "selection_crop" + ], + "required": [ + "lyrics_prompt", + "audio_url", + "sections" + ] + }, + "V2InpaintOutput": { + "title": "InpaintOutput", + "type": "object", + "properties": { + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for generation. This can be used to generate an identical song by passing the same parameters with this seed in a future request." + }, + "audio": { + "examples": [ + { + "file_size": 16777294, + "file_name": "sonauto.wav", + "content_type": "audio/wav", + "url": "https://cdn.sonauto.ai/generations2_altformats/audio_9a480c86-a3c0-46e5-bfb0-c0cd6e2fdbc6.wav" + } + ], + "title": "Audio", + "type": "array", + "description": "The generated audio files.", + "items": { + "$ref": "#/components/schemas/File" + } + } + }, + "x-fal-order-properties": [ + "audio", + "seed" + ], + "required": [ + "audio", + "seed" + ] + }, + "InpaintSection": { + "title": "InpaintSection", + "type": "object", + "properties": { + "end": { + "examples": [ + 9.45 + ], + "title": "End", + "type": "number", + "description": "End time in seconds of the section to inpaint.", + "exclusiveMinimum": 0 + }, + "start": { + "minimum": 0, + "title": "Start", + "type": "number", + "description": "Start time in seconds of the section to inpaint.", + "examples": [ + 0 + ] + } + }, + "x-fal-order-properties": [ + "start", + "end" + ], + "required": [ + "start", + "end" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/sonauto/v2/inpaint/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/sonauto/v2/inpaint/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/sonauto/v2/inpaint": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/V2InpaintInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/sonauto/v2/inpaint/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/V2InpaintOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "sonauto/v2/text-to-music", + "metadata": { + "display_name": "Sonauto V2", + "category": "text-to-audio", + "description": "Create full songs in any style", + "status": "active", + "tags": [ + "music", + "text-to-music", + "text-to-audio" + ], + "updated_at": "2026-01-26T21:43:01.151Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/kangaroo/NHeRyBn8fcNS_W3YvNyKZ_4c6ea3f0fd0444c9b9c27b6245a83b38.jpg", + "model_url": "https://fal.run/sonauto/v2/text-to-music", + "license_type": "commercial", + "date": "2025-08-23T21:00:01.261Z", + "group": { + "key": "sonauto-v2", + "label": "Text To Music" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for sonauto/v2/text-to-music", + "version": "1.0.0", + "description": "The OpenAPI schema for the sonauto/v2/text-to-music queue.", + "x-fal-metadata": { + "endpointId": "sonauto/v2/text-to-music", + "category": "text-to-audio", + "thumbnailUrl": "https://fal.media/files/kangaroo/NHeRyBn8fcNS_W3YvNyKZ_4c6ea3f0fd0444c9b9c27b6245a83b38.jpg", + "playgroundUrl": "https://fal.ai/models/sonauto/v2/text-to-music", + "documentationUrl": "https://fal.ai/models/sonauto/v2/text-to-music/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "V2TextToMusicInput": { + "title": "GenerateInput", + "type": "object", + "properties": { + "prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Prompt", + "description": "A description of the track you want to generate. This prompt will be used to automatically generate the tags and lyrics unless you manually set them. For example, if you set prompt and tags, then the prompt will be used to generate only the lyrics.", + "examples": [ + "A pop song about turtles flying" + ] + }, + "lyrics_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Lyrics Prompt", + "description": "The lyrics sung in the generated song. An empty string will generate an instrumental track." + }, + "tags": { + "anyOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ], + "title": "Tags", + "description": "Tags/styles of the music to generate. You can view a list of all available tags at https://sonauto.ai/tag-explorer." + }, + "prompt_strength": { + "minimum": 1.4, + "maximum": 3.1, + "type": "number", + "title": "Prompt Strength", + "description": "Controls how strongly your prompt influences the output. Greater values adhere more to the prompt but sound less natural. (This is CFG.)", + "default": 2 + }, + "output_bit_rate": { + "anyOf": [ + { + "enum": [ + 128, + 192, + 256, + 320 + ], + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Output Bit Rate", + "description": "The bit rate to use for mp3 and m4a formats. Not available for other formats." + }, + "num_songs": { + "minimum": 1, + "maximum": 2, + "type": "integer", + "title": "Num Songs", + "description": "Generating 2 songs costs 1.5x the price of generating 1 song. Also, note that using the same seed may not result in identical songs if the number of songs generated is changed.", + "default": 1 + }, + "output_format": { + "enum": [ + "flac", + "mp3", + "wav", + "ogg", + "m4a" + ], + "title": "Output Format", + "type": "string", + "default": "wav" + }, + "bpm": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string", + "const": "auto" + }, + { + "type": "null" + } + ], + "title": "Bpm", + "description": "The beats per minute of the song. This can be set to an integer or the literal string \"auto\" to pick a suitable bpm based on the tags. Set bpm to null to not condition the model on bpm information.", + "default": "auto" + }, + "balance_strength": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Balance Strength", + "description": "Greater means more natural vocals. Lower means sharper instrumentals. We recommend 0.7.", + "default": 0.7 + }, + "seed": { + "anyOf": [ + { + "minimum": -9223372036854776000, + "maximum": 9223372036854776000, + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "The seed to use for generation. Will pick a random seed if not provided. Repeating a request with identical parameters (must use lyrics and tags, not prompt) and the same seed will generate the same song." + } + }, + "x-fal-order-properties": [ + "prompt", + "tags", + "lyrics_prompt", + "seed", + "prompt_strength", + "balance_strength", + "num_songs", + "output_format", + "output_bit_rate", + "bpm" + ] + }, + "V2TextToMusicOutput": { + "title": "GenerateOutput", + "type": "object", + "properties": { + "tags": { + "anyOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ], + "title": "Tags", + "description": "The style tags used for generation." + }, + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for generation. This can be used to generate an identical song by passing the same parameters with this seed in a future request." + }, + "lyrics": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Lyrics", + "description": "The lyrics used for generation." + }, + "audio": { + "examples": [ + { + "file_size": 16777294, + "file_name": "sonauto.wav", + "content_type": "audio/wav", + "url": "https://cdn.sonauto.ai/generations2_altformats/audio_c5e63f7c-fc79-4322-808d-c09911af4713.wav" + } + ], + "title": "Audio", + "type": "array", + "description": "The generated audio files.", + "items": { + "$ref": "#/components/schemas/File" + } + } + }, + "x-fal-order-properties": [ + "seed", + "tags", + "lyrics", + "audio" + ], + "required": [ + "seed", + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/sonauto/v2/text-to-music/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/sonauto/v2/text-to-music/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/sonauto/v2/text-to-music": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/V2TextToMusicInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/sonauto/v2/text-to-music/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/V2TextToMusicOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/elevenlabs/tts/eleven-v3", + "metadata": { + "display_name": "Elevenlabs", + "category": "text-to-audio", + "description": "Generate text-to-speech audio using Eleven-v3 from ElevenLabs.", + "status": "active", + "tags": [ + "audio" + ], + "updated_at": "2026-01-26T21:43:02.562Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/panda/twZsKdCTiF8JXv-rRcPZu_8414166d52a548859a8df01bf720fe46.jpg", + "model_url": "https://fal.run/fal-ai/elevenlabs/tts/eleven-v3", + "license_type": "commercial", + "date": "2025-08-20T19:22:36.745Z", + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.run/fal-ai/elevenlabs/tts/eleven-v3/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/elevenlabs/tts/eleven-v3", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/elevenlabs/tts/eleven-v3 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/elevenlabs/tts/eleven-v3", + "category": "text-to-audio", + "thumbnailUrl": "https://fal.media/files/panda/twZsKdCTiF8JXv-rRcPZu_8414166d52a548859a8df01bf720fe46.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/elevenlabs/tts/eleven-v3", + "documentationUrl": "https://fal.ai/models/fal-ai/elevenlabs/tts/eleven-v3/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ElevenlabsTtsElevenV3Input": { + "title": "TextToSpeechRequestV3", + "type": "object", + "properties": { + "stability": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Stability", + "description": "Voice stability (0-1)", + "default": 0.5 + }, + "speed": { + "minimum": 0.7, + "maximum": 1.2, + "type": "number", + "title": "Speed", + "description": "Speech speed (0.7-1.2). Values below 1.0 slow down the speech, above 1.0 speed it up. Extreme values may affect quality.", + "default": 1 + }, + "text": { + "examples": [ + "Hello! This is a test of the text to speech system, powered by ElevenLabs. How does it sound?" + ], + "description": "The text to convert to speech", + "type": "string", + "title": "Text", + "minLength": 1, + "maxLength": 5000 + }, + "style": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Style", + "description": "Style exaggeration (0-1)", + "default": 0 + }, + "timestamps": { + "description": "Whether to return timestamps for each word in the generated speech", + "type": "boolean", + "title": "Timestamps", + "default": false + }, + "similarity_boost": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Similarity Boost", + "description": "Similarity boost (0-1)", + "default": 0.75 + }, + "voice": { + "examples": [ + "Aria", + "Roger", + "Sarah", + "Laura", + "Charlie", + "George", + "Callum", + "River", + "Liam", + "Charlotte", + "Alice", + "Matilda", + "Will", + "Jessica", + "Eric", + "Chris", + "Brian", + "Daniel", + "Lily", + "Bill" + ], + "description": "The voice to use for speech generation", + "type": "string", + "title": "Voice", + "default": "Rachel" + }, + "language_code": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Language code (ISO 639-1) used to enforce a language for the model.", + "title": "Language Code" + } + }, + "description": "Request model for eleven_v3 which doesn't support previous_text/next_text", + "x-fal-order-properties": [ + "text", + "voice", + "stability", + "similarity_boost", + "style", + "speed", + "timestamps", + "language_code" + ], + "required": [ + "text" + ] + }, + "ElevenlabsTtsElevenV3Output": { + "title": "TTSOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://v3.fal.media/files/zebra/zJL_oRY8h5RWwjoK1w7tx_output.mp3" + } + ], + "description": "The generated audio file", + "$ref": "#/components/schemas/File" + }, + "timestamps": { + "anyOf": [ + { + "type": "array", + "items": {} + }, + { + "type": "null" + } + ], + "description": "Timestamps for each word in the generated speech. Only returned if `timestamps` is set to True in the request.", + "title": "Timestamps" + } + }, + "x-fal-order-properties": [ + "audio", + "timestamps" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/elevenlabs/tts/eleven-v3/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/tts/eleven-v3/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/tts/eleven-v3": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ElevenlabsTtsElevenV3Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/tts/eleven-v3/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ElevenlabsTtsElevenV3Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/lyria2", + "metadata": { + "display_name": "Lyria2", + "category": "text-to-audio", + "description": "Lyria 2 is Google's latest music generation model, you can generate any type of music with this model.", + "status": "active", + "tags": [ + "music", + "stylized" + ], + "updated_at": "2026-01-26T21:43:40.120Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-4.jpg", + "model_url": "https://fal.run/fal-ai/lyria2", + "license_type": "commercial", + "date": "2025-05-20T20:05:49.336Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/lyria2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/lyria2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/lyria2", + "category": "text-to-audio", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/lyria2", + "documentationUrl": "https://fal.ai/models/fal-ai/lyria2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Lyria2Input": { + "title": "TextToMusicInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A lush, ambient soundscape featuring the serene sounds of a flowing river, complemented by the distant chirping of birds, and a gentle, melancholic piano melody that slowly unfolds." + ], + "title": "Prompt", + "minLength": 1, + "type": "string", + "maxLength": 2000, + "description": "The text prompt describing the music you want to generate" + }, + "seed": { + "description": "A seed for deterministic generation. If provided, the model will attempt to produce the same audio given the same prompt and other parameters.", + "type": "integer", + "title": "Seed" + }, + "negative_prompt": { + "examples": [ + "vocals, slow tempo", + "low quality" + ], + "description": "A description of what to exclude from the generated audio", + "type": "string", + "title": "Negative Prompt", + "default": "low quality" + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "seed" + ], + "required": [ + "prompt" + ] + }, + "Lyria2Output": { + "title": "TextToMusicOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://v3.fal.media/files/koala/9V6ADhxcZrZr2FcaiNA7H_output.wav" + } + ], + "description": "The generated music", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/lyria2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/lyria2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/lyria2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Lyria2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/lyria2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Lyria2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ace-step/prompt-to-audio", + "metadata": { + "display_name": "ACE-Step", + "category": "text-to-audio", + "description": "Generate music from a simple prompt using ACE-Step", + "status": "active", + "tags": [ + "text-to-audio", + "text-to-music" + ], + "updated_at": "2026-01-26T21:43:44.960Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/penguin/sDFuolUwHioUzK2nutylA_54068c7ff03449dca0fc1e4523ed56c1.jpg", + "model_url": "https://fal.run/fal-ai/ace-step/prompt-to-audio", + "license_type": "commercial", + "date": "2025-05-11T17:51:47.356Z", + "group": { + "key": "ace-step", + "label": "Prompt to Audio" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ace-step/prompt-to-audio", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ace-step/prompt-to-audio queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ace-step/prompt-to-audio", + "category": "text-to-audio", + "thumbnailUrl": "https://fal.media/files/penguin/sDFuolUwHioUzK2nutylA_54068c7ff03449dca0fc1e4523ed56c1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ace-step/prompt-to-audio", + "documentationUrl": "https://fal.ai/models/fal-ai/ace-step/prompt-to-audio/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AceStepPromptToAudioInput": { + "title": "ACEStepPromptToAudioRequest", + "type": "object", + "properties": { + "number_of_steps": { + "description": "Number of steps to generate the audio.", + "type": "integer", + "minimum": 3, + "title": "Number Of Steps", + "examples": [ + 27 + ], + "maximum": 60, + "default": 27 + }, + "duration": { + "description": "The duration of the generated audio in seconds.", + "type": "number", + "minimum": 5, + "title": "Duration", + "maximum": 240, + "default": 60 + }, + "prompt": { + "examples": [ + "A lofi hiphop song with a chill vibe about a sunny day on the boardwalk." + ], + "title": "Prompt", + "type": "string", + "description": "Prompt to control the style of the generated audio. This will be used to generate tags and lyrics." + }, + "minimum_guidance_scale": { + "description": "Minimum guidance scale for the generation after the decay.", + "type": "number", + "minimum": 0, + "title": "Minimum Guidance Scale", + "examples": [ + 3 + ], + "maximum": 200, + "default": 3 + }, + "tag_guidance_scale": { + "description": "Tag guidance scale for the generation.", + "type": "number", + "minimum": 0, + "title": "Tag Guidance Scale", + "examples": [ + 5 + ], + "maximum": 10, + "default": 5 + }, + "scheduler": { + "enum": [ + "euler", + "heun" + ], + "title": "Scheduler", + "type": "string", + "examples": [ + "euler" + ], + "description": "Scheduler to use for the generation process.", + "default": "euler" + }, + "guidance_scale": { + "description": "Guidance scale for the generation.", + "type": "number", + "minimum": 0, + "title": "Guidance Scale", + "examples": [ + 15 + ], + "maximum": 200, + "default": 15 + }, + "guidance_type": { + "enum": [ + "cfg", + "apg", + "cfg_star" + ], + "title": "Guidance Type", + "type": "string", + "examples": [ + "apg" + ], + "description": "Type of CFG to use for the generation process.", + "default": "apg" + }, + "instrumental": { + "examples": [ + false + ], + "title": "Instrumental", + "type": "boolean", + "description": "Whether to generate an instrumental version of the audio.", + "default": false + }, + "lyric_guidance_scale": { + "description": "Lyric guidance scale for the generation.", + "type": "number", + "minimum": 0, + "title": "Lyric Guidance Scale", + "examples": [ + 1.5 + ], + "maximum": 10, + "default": 1.5 + }, + "guidance_interval": { + "description": "Guidance interval for the generation. 0.5 means only apply guidance in the middle steps (0.25 * infer_steps to 0.75 * infer_steps)", + "type": "number", + "minimum": 0, + "title": "Guidance Interval", + "examples": [ + 0.5 + ], + "maximum": 1, + "default": 0.5 + }, + "guidance_interval_decay": { + "description": "Guidance interval decay for the generation. Guidance scale will decay from guidance_scale to min_guidance_scale in the interval. 0.0 means no decay.", + "type": "number", + "minimum": 0, + "title": "Guidance Interval Decay", + "examples": [ + 0 + ], + "maximum": 1, + "default": 0 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If not provided, a random seed will be used." + }, + "granularity_scale": { + "description": "Granularity scale for the generation process. Higher values can reduce artifacts.", + "type": "integer", + "minimum": -100, + "title": "Granularity Scale", + "examples": [ + 10 + ], + "maximum": 100, + "default": 10 + } + }, + "x-fal-order-properties": [ + "prompt", + "instrumental", + "duration", + "number_of_steps", + "seed", + "scheduler", + "guidance_type", + "granularity_scale", + "guidance_interval", + "guidance_interval_decay", + "guidance_scale", + "minimum_guidance_scale", + "tag_guidance_scale", + "lyric_guidance_scale" + ], + "required": [ + "prompt" + ] + }, + "AceStepPromptToAudioOutput": { + "title": "ACEStepResponse", + "type": "object", + "properties": { + "tags": { + "examples": [ + "lofi, hiphop, drum and bass, trap, chill" + ], + "title": "Tags", + "type": "string", + "description": "The genre tags used in the generation process." + }, + "lyrics": { + "examples": [ + "[inst]" + ], + "title": "Lyrics", + "type": "string", + "description": "The lyrics used in the generation process." + }, + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "The random seed used for the generation process." + }, + "audio": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/ace-step-text-to-audio.wav" + } + ], + "title": "Audio", + "description": "The generated audio file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio", + "seed", + "tags", + "lyrics" + ], + "required": [ + "audio", + "seed", + "tags", + "lyrics" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ace-step/prompt-to-audio/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ace-step/prompt-to-audio/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ace-step/prompt-to-audio": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AceStepPromptToAudioInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ace-step/prompt-to-audio/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AceStepPromptToAudioOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ace-step", + "metadata": { + "display_name": "ACE-Step", + "category": "text-to-audio", + "description": "Generate music with lyrics from text using ACE-Step", + "status": "active", + "tags": [ + "text-to-audio", + "text-to-music" + ], + "updated_at": "2026-01-26T21:43:46.012Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/koala/4CEYTltj2fq6tpXQZnf_g_8b2d4b72d8d84fbaa42970dadacf1379.jpg", + "model_url": "https://fal.run/fal-ai/ace-step", + "license_type": "commercial", + "date": "2025-05-08T21:47:06.754Z", + "group": { + "key": "ace-step", + "label": "Lyrics to Audio" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ace-step", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ace-step queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ace-step", + "category": "text-to-audio", + "thumbnailUrl": "https://fal.media/files/koala/4CEYTltj2fq6tpXQZnf_g_8b2d4b72d8d84fbaa42970dadacf1379.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ace-step", + "documentationUrl": "https://fal.ai/models/fal-ai/ace-step/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AceStepInput": { + "title": "ACEStepTextToAudioRequest", + "type": "object", + "properties": { + "number_of_steps": { + "description": "Number of steps to generate the audio.", + "type": "integer", + "minimum": 3, + "title": "Number Of Steps", + "examples": [ + 27 + ], + "maximum": 60, + "default": 27 + }, + "duration": { + "description": "The duration of the generated audio in seconds.", + "type": "number", + "minimum": 5, + "title": "Duration", + "maximum": 240, + "default": 60 + }, + "tags": { + "examples": [ + "lofi, hiphop, drum and bass, trap, chill" + ], + "title": "Tags", + "type": "string", + "description": "Comma-separated list of genre tags to control the style of the generated audio." + }, + "minimum_guidance_scale": { + "description": "Minimum guidance scale for the generation after the decay.", + "type": "number", + "minimum": 0, + "title": "Minimum Guidance Scale", + "examples": [ + 3 + ], + "maximum": 200, + "default": 3 + }, + "lyrics": { + "title": "Lyrics", + "type": "string", + "description": "Lyrics to be sung in the audio. If not provided or if [inst] or [instrumental] is the content of this field, no lyrics will be sung. Use control structures like [verse], [chorus] and [bridge] to control the structure of the song.", + "default": "" + }, + "tag_guidance_scale": { + "description": "Tag guidance scale for the generation.", + "type": "number", + "minimum": 0, + "title": "Tag Guidance Scale", + "examples": [ + 5 + ], + "maximum": 10, + "default": 5 + }, + "scheduler": { + "enum": [ + "euler", + "heun" + ], + "title": "Scheduler", + "type": "string", + "examples": [ + "euler" + ], + "description": "Scheduler to use for the generation process.", + "default": "euler" + }, + "guidance_scale": { + "description": "Guidance scale for the generation.", + "type": "number", + "minimum": 0, + "title": "Guidance Scale", + "examples": [ + 15 + ], + "maximum": 200, + "default": 15 + }, + "guidance_type": { + "enum": [ + "cfg", + "apg", + "cfg_star" + ], + "title": "Guidance Type", + "type": "string", + "examples": [ + "apg" + ], + "description": "Type of CFG to use for the generation process.", + "default": "apg" + }, + "lyric_guidance_scale": { + "description": "Lyric guidance scale for the generation.", + "type": "number", + "minimum": 0, + "title": "Lyric Guidance Scale", + "examples": [ + 1.5 + ], + "maximum": 10, + "default": 1.5 + }, + "guidance_interval": { + "description": "Guidance interval for the generation. 0.5 means only apply guidance in the middle steps (0.25 * infer_steps to 0.75 * infer_steps)", + "type": "number", + "minimum": 0, + "title": "Guidance Interval", + "examples": [ + 0.5 + ], + "maximum": 1, + "default": 0.5 + }, + "guidance_interval_decay": { + "description": "Guidance interval decay for the generation. Guidance scale will decay from guidance_scale to min_guidance_scale in the interval. 0.0 means no decay.", + "type": "number", + "minimum": 0, + "title": "Guidance Interval Decay", + "examples": [ + 0 + ], + "maximum": 1, + "default": 0 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If not provided, a random seed will be used." + }, + "granularity_scale": { + "description": "Granularity scale for the generation process. Higher values can reduce artifacts.", + "type": "integer", + "minimum": -100, + "title": "Granularity Scale", + "examples": [ + 10 + ], + "maximum": 100, + "default": 10 + } + }, + "x-fal-order-properties": [ + "tags", + "lyrics", + "duration", + "number_of_steps", + "seed", + "scheduler", + "guidance_type", + "granularity_scale", + "guidance_interval", + "guidance_interval_decay", + "guidance_scale", + "minimum_guidance_scale", + "tag_guidance_scale", + "lyric_guidance_scale" + ], + "required": [ + "tags" + ] + }, + "AceStepOutput": { + "title": "ACEStepResponse", + "type": "object", + "properties": { + "tags": { + "examples": [ + "lofi, hiphop, drum and bass, trap, chill" + ], + "title": "Tags", + "type": "string", + "description": "The genre tags used in the generation process." + }, + "lyrics": { + "examples": [ + "[inst]" + ], + "title": "Lyrics", + "type": "string", + "description": "The lyrics used in the generation process." + }, + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "The random seed used for the generation process." + }, + "audio": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/ace-step-text-to-audio.wav" + } + ], + "title": "Audio", + "description": "The generated audio file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio", + "seed", + "tags", + "lyrics" + ], + "required": [ + "audio", + "seed", + "tags", + "lyrics" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ace-step/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ace-step/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ace-step": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AceStepInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ace-step/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AceStepOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "cassetteai/sound-effects-generator", + "metadata": { + "display_name": "Sound Effects Generator", + "category": "text-to-audio", + "description": "Create stunningly realistic sound effects in seconds - CassetteAI's Sound Effects Model generates high-quality SFX up to 30 seconds long in just 1 second of processing time", + "status": "active", + "tags": [ + "sound", + "sfx", + "sound-effects", + "cassetteai" + ], + "updated_at": "2026-01-26T21:43:58.199Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/cassetteai-sound-effects-generator.webp", + "model_url": "https://fal.run/cassetteai/sound-effects-generator", + "license_type": "commercial", + "date": "2025-04-03T22:14:05.362Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for cassetteai/sound-effects-generator", + "version": "1.0.0", + "description": "The OpenAPI schema for the cassetteai/sound-effects-generator queue.", + "x-fal-metadata": { + "endpointId": "cassetteai/sound-effects-generator", + "category": "text-to-audio", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/cassetteai-sound-effects-generator.webp", + "playgroundUrl": "https://fal.ai/models/cassetteai/sound-effects-generator", + "documentationUrl": "https://fal.ai/models/cassetteai/sound-effects-generator/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SoundEffectsGeneratorInput": { + "title": "Input", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "dog barking in the rain" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate SFX." + }, + "duration": { + "minimum": 1, + "title": "Duration", + "type": "integer", + "examples": [ + 30 + ], + "description": "The duration of the generated SFX in seconds.", + "maximum": 30 + } + }, + "x-fal-order-properties": [ + "prompt", + "duration" + ], + "required": [ + "prompt", + "duration" + ] + }, + "SoundEffectsGeneratorOutput": { + "title": "AudioOutput", + "type": "object", + "properties": { + "audio_file": { + "examples": [ + { + "url": "https://v3.fal.media/files/panda/FJ56Mbpj1F_MQVuO0UJ9k_generated.wav" + } + ], + "description": "The generated SFX", + "$ref": "#/components/schemas/File" + } + }, + "description": "Example Pydantic model showing how to include a File in the output.", + "x-fal-order-properties": [ + "audio_file" + ], + "required": [ + "audio_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/cassetteai/sound-effects-generator/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/cassetteai/sound-effects-generator/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/cassetteai/sound-effects-generator": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SoundEffectsGeneratorInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/cassetteai/sound-effects-generator/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SoundEffectsGeneratorOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "cassetteai/music-generator", + "metadata": { + "display_name": "music generator", + "category": "text-to-audio", + "description": "CassetteAI’s model generates a 30-second sample in under 2 seconds and a full 3-minute track in under 10 seconds. At 44.1 kHz stereo audio, expect a level of professional consistency with no breaks, no squeaks, and no random interruptions in your creations.\n\n", + "status": "active", + "tags": [ + "music", + "cassetteai" + ], + "updated_at": "2026-01-26T21:43:59.755Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/rabbit/sMvFmn2J_7gVG77CCgseb_728490598d0242a2abf19f7fae6a93a6.jpg", + "model_url": "https://fal.run/CassetteAI/music-generator", + "license_type": "commercial", + "date": "2025-03-27T15:08:50.013Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for cassetteai/music-generator", + "version": "1.0.0", + "description": "The OpenAPI schema for the cassetteai/music-generator queue.", + "x-fal-metadata": { + "endpointId": "cassetteai/music-generator", + "category": "text-to-audio", + "thumbnailUrl": "https://fal.media/files/rabbit/sMvFmn2J_7gVG77CCgseb_728490598d0242a2abf19f7fae6a93a6.jpg", + "playgroundUrl": "https://fal.ai/models/cassetteai/music-generator", + "documentationUrl": "https://fal.ai/models/cassetteai/music-generator/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MusicGeneratorInput": { + "title": "Input", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Smooth chill hip-hop beat with mellow piano melodies, deep bass, and soft drums, perfect for a night drive. Key: D Minor, Tempo: 90 BPM." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate music from." + }, + "duration": { + "minimum": 10, + "title": "Duration", + "type": "integer", + "examples": [ + 50 + ], + "description": "The duration of the generated music in seconds.", + "maximum": 180 + } + }, + "x-fal-order-properties": [ + "prompt", + "duration" + ], + "required": [ + "prompt", + "duration" + ] + }, + "MusicGeneratorOutput": { + "title": "AudioOutput", + "type": "object", + "properties": { + "audio_file": { + "examples": [ + { + "url": "https://v3.fal.media/files/panda/T-GP6cbpo1lgL8ll4oKGj_generated.wav" + } + ], + "description": "The generated music", + "$ref": "#/components/schemas/File" + } + }, + "description": "Example Pydantic model showing how to include a File in the output.", + "x-fal-order-properties": [ + "audio_file" + ], + "required": [ + "audio_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/cassetteai/music-generator/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/cassetteai/music-generator/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/cassetteai/music-generator": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MusicGeneratorInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/cassetteai/music-generator/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MusicGeneratorOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/csm-1b", + "metadata": { + "display_name": "CSM-1B", + "category": "text-to-audio", + "description": "CSM (Conversational Speech Model) is a speech generation model from Sesame that generates RVQ audio codes from text and audio inputs.", + "status": "active", + "tags": [ + "conversational", + "text to speech" + ], + "updated_at": "2026-01-26T21:44:02.147Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/csm.jpeg", + "model_url": "https://fal.run/fal-ai/csm-1b", + "license_type": "commercial", + "date": "2025-03-13T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/csm-1b", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/csm-1b queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/csm-1b", + "category": "text-to-audio", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/csm.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/csm-1b", + "documentationUrl": "https://fal.ai/models/fal-ai/csm-1b/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Csm1bInput": { + "title": "Input", + "type": "object", + "properties": { + "scene": { + "examples": [ + [ + { + "text": "Hey how are you doing.", + "speaker_id": 0 + }, + { + "text": "Pretty good, pretty good.", + "speaker_id": 1 + }, + { + "text": "I'm great, so happy to be speaking to you.", + "speaker_id": 0 + } + ] + ], + "title": "Scene", + "type": "array", + "description": "The text to generate an audio from.", + "items": { + "$ref": "#/components/schemas/Turn" + } + }, + "context": { + "examples": [ + [ + { + "prompt": "like revising for an exam I'd have to try and like keep up the momentum because I'd start really early I'd be like okay I'm gonna start revising now and then like you're revising for ages and then I just like start losing steam I didn't do that for the exam we had recently to be fair that was a more of a last minute scenario but like yeah I'm trying to like yeah I noticed this yesterday that like Mondays I sort of start the day with this not like a panic but like a", + "audio_url": "https://huggingface.co/spaces/sesame/csm-1b/resolve/main/prompts/conversational_a.wav", + "speaker_id": 0 + }, + { + "prompt": "like a super Mario level. Like it's very like high detail. And like, once you get into the park, it just like, everything looks like a computer game and they have all these, like, you know, if, if there's like a, you know, like in a Mario game, they will have like a question block. And if you like, you know, punch it, a coin will come out. So like everyone, when they come into the park, they get like this little bracelet and then you can go punching question blocks around.", + "audio_url": "https://huggingface.co/spaces/sesame/csm-1b/resolve/main/prompts/conversational_b.wav", + "speaker_id": 1 + } + ] + ], + "title": "Context", + "type": "array", + "description": "The context to generate an audio from.", + "items": { + "$ref": "#/components/schemas/Speaker" + } + } + }, + "x-fal-order-properties": [ + "scene", + "context" + ], + "required": [ + "scene" + ] + }, + "Csm1bOutput": { + "title": "Output", + "type": "object", + "properties": { + "audio": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "format": "binary", + "type": "string" + } + ], + "title": "Audio", + "description": "The generated audio." + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "Turn": { + "title": "Turn", + "type": "object", + "properties": { + "text": { + "title": "Text", + "type": "string" + }, + "speaker_id": { + "title": "Speaker Id", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "speaker_id", + "text" + ], + "required": [ + "speaker_id", + "text" + ] + }, + "Speaker": { + "title": "Speaker", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string" + }, + "audio_url": { + "title": "Audio Url", + "type": "string" + }, + "speaker_id": { + "title": "Speaker Id", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "prompt", + "speaker_id", + "audio_url" + ], + "required": [ + "prompt", + "speaker_id", + "audio_url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/csm-1b/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/csm-1b/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/csm-1b": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Csm1bInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/csm-1b/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Csm1bOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/diffrhythm", + "metadata": { + "display_name": "DiffRhythm: Lyrics to Song", + "category": "text-to-audio", + "description": "DiffRhythm is a blazing fast model for transforming lyrics into full songs. It boasts the capability to generate full songs in less than 30 seconds.", + "status": "active", + "tags": [ + "music" + ], + "updated_at": "2026-01-26T21:44:03.875Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/rabbit/Bb-9UKANlvJDh-TwrH9sB_f38e20cbf40c42f485428c775e76543b.jpg", + "model_url": "https://fal.run/fal-ai/diffrhythm", + "license_type": "commercial", + "date": "2025-03-04T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/diffrhythm", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/diffrhythm queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/diffrhythm", + "category": "text-to-audio", + "thumbnailUrl": "https://fal.media/files/rabbit/Bb-9UKANlvJDh-TwrH9sB_f38e20cbf40c42f485428c775e76543b.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/diffrhythm", + "documentationUrl": "https://fal.ai/models/fal-ai/diffrhythm/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "DiffrhythmInput": { + "title": "TextToMusicInput", + "type": "object", + "properties": { + "lyrics": { + "examples": [ + "[00:10.00]Moonlight spills through broken blinds\n[00:13.20]Your shadow dances on the dashboard shrine\n[00:16.85]Neon ghosts in gasoline rain\n[00:20.40]I hear your laughter down the midnight train\n[00:24.15]Static whispers through frayed wires\n[00:27.65]Guitar strings hum our cathedral choirs\n[00:31.30]Flicker screens show reruns of June\n[00:34.90]I'm drowning in this mercury lagoon\n[00:38.55]Electric veins pulse through concrete skies\n[00:42.10]Your name echoes in the hollow where my heartbeat lies\n[00:45.75]We're satellites trapped in parallel light\n[00:49.25]Burning through the atmosphere of endless night\n[01:00.00]Dusty vinyl spins reverse\n[01:03.45]Our polaroid timeline bleeds through the verse\n[01:07.10]Telescope aimed at dead stars\n[01:10.65]Still tracing constellations through prison bars\n[01:14.30]Electric veins pulse through concrete skies\n[01:17.85]Your name echoes in the hollow where my heartbeat lies\n[01:21.50]We're satellites trapped in parallel light\n[01:25.05]Burning through the atmosphere of endless night\n[02:10.00]Clockwork gears grind moonbeams to rust\n[02:13.50]Our fingerprint smudged by interstellar dust\n[02:17.15]Velvet thunder rolls through my veins\n[02:20.70]Chasing phantom trains through solar plane\n[02:24.35]Electric veins pulse through concrete skies\n[02:27.90]Your name echoes in the hollow where my heartbeat lies\n" + ], + "title": "Lyrics", + "type": "string", + "description": "The prompt to generate the song from. Must have two sections. Sections start with either [chorus] or a [verse]." + }, + "cfg_strength": { + "minimum": 1, + "maximum": 10, + "type": "number", + "title": "CFG Strength", + "description": "The CFG strength to use for the music generation.", + "default": 4 + }, + "reference_audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/diffrythm/rock_en.wav" + ], + "title": "Reference Audio URL", + "type": "string", + "description": "The URL of the reference audio to use for the music generation." + }, + "music_duration": { + "enum": [ + "95s", + "285s" + ], + "title": "Music Duration", + "type": "string", + "description": "The duration of the music to generate.", + "default": "95s" + }, + "scheduler": { + "enum": [ + "euler", + "midpoint", + "rk4", + "implicit_adams" + ], + "title": "Scheduler", + "type": "string", + "description": "The scheduler to use for the music generation.", + "default": "euler" + }, + "num_inference_steps": { + "minimum": 10, + "maximum": 100, + "type": "integer", + "title": "Number of Inference Steps", + "description": "The number of inference steps to use for the music generation.", + "default": 32 + }, + "style_prompt": { + "examples": [ + "pop" + ], + "title": "Style Prompt", + "type": "string", + "description": "The style prompt to use for the music generation." + } + }, + "x-fal-order-properties": [ + "lyrics", + "reference_audio_url", + "style_prompt", + "music_duration", + "cfg_strength", + "scheduler", + "num_inference_steps" + ], + "required": [ + "lyrics" + ] + }, + "DiffrhythmOutput": { + "title": "Output", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "file_size": 33554520, + "file_name": "output.wav", + "content_type": "application/octet-stream", + "url": "https://v3.fal.media/files/elephant/VV4wtKXBpZL1bNv6en36t_output.wav" + } + ], + "title": "Audio", + "description": "Generated music file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/diffrhythm/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/diffrhythm/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/diffrhythm": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DiffrhythmInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/diffrhythm/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DiffrhythmOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/elevenlabs/tts/multilingual-v2", + "metadata": { + "display_name": "ElevenLabs TTS Multilingual v2", + "category": "text-to-audio", + "description": "Generate multilingual text-to-speech audio using ElevenLabs TTS Multilingual v2.", + "status": "active", + "tags": [ + "audio" + ], + "updated_at": "2026-01-26T21:44:04.392Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/panda/twZsKdCTiF8JXv-rRcPZu_8414166d52a548859a8df01bf720fe46.jpg", + "model_url": "https://fal.run/fal-ai/elevenlabs/tts/multilingual-v2", + "license_type": "commercial", + "date": "2025-02-27T00:00:00.000Z", + "group": { + "key": "elevenlabs-audio", + "label": "TTS Multilingual v2" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.run/fal-ai/elevenlabs/tts/multilingual-v2/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/elevenlabs/tts/multilingual-v2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/elevenlabs/tts/multilingual-v2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/elevenlabs/tts/multilingual-v2", + "category": "text-to-audio", + "thumbnailUrl": "https://fal.media/files/panda/twZsKdCTiF8JXv-rRcPZu_8414166d52a548859a8df01bf720fe46.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/elevenlabs/tts/multilingual-v2", + "documentationUrl": "https://fal.ai/models/fal-ai/elevenlabs/tts/multilingual-v2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ElevenlabsTtsMultilingualV2Input": { + "title": "TextToSpeechRequest", + "type": "object", + "properties": { + "stability": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Stability", + "description": "Voice stability (0-1)", + "default": 0.5 + }, + "next_text": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.", + "title": "Next Text" + }, + "speed": { + "minimum": 0.7, + "maximum": 1.2, + "type": "number", + "title": "Speed", + "description": "Speech speed (0.7-1.2). Values below 1.0 slow down the speech, above 1.0 speed it up. Extreme values may affect quality.", + "default": 1 + }, + "style": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Style", + "description": "Style exaggeration (0-1)", + "default": 0 + }, + "text": { + "examples": [ + "Hello! This is a test of the text to speech system, powered by ElevenLabs. How does it sound?" + ], + "description": "The text to convert to speech", + "type": "string", + "title": "Text", + "minLength": 1 + }, + "timestamps": { + "description": "Whether to return timestamps for each word in the generated speech", + "type": "boolean", + "title": "Timestamps", + "default": false + }, + "similarity_boost": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Similarity Boost", + "description": "Similarity boost (0-1)", + "default": 0.75 + }, + "voice": { + "examples": [ + "Aria", + "Roger", + "Sarah", + "Laura", + "Charlie", + "George", + "Callum", + "River", + "Liam", + "Charlotte", + "Alice", + "Matilda", + "Will", + "Jessica", + "Eric", + "Chris", + "Brian", + "Daniel", + "Lily", + "Bill" + ], + "description": "The voice to use for speech generation", + "type": "string", + "title": "Voice", + "default": "Rachel" + }, + "language_code": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Language code (ISO 639-1) used to enforce a language for the model. An error will be returned if language code is not supported by the model.", + "title": "Language Code" + }, + "previous_text": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.", + "title": "Previous Text" + } + }, + "x-fal-order-properties": [ + "text", + "voice", + "stability", + "similarity_boost", + "style", + "speed", + "timestamps", + "previous_text", + "next_text", + "language_code" + ], + "required": [ + "text" + ] + }, + "ElevenlabsTtsMultilingualV2Output": { + "title": "TTSOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://v3.fal.media/files/zebra/zJL_oRY8h5RWwjoK1w7tx_output.mp3" + } + ], + "description": "The generated audio file", + "$ref": "#/components/schemas/File" + }, + "timestamps": { + "anyOf": [ + { + "type": "array", + "items": {} + }, + { + "type": "null" + } + ], + "description": "Timestamps for each word in the generated speech. Only returned if `timestamps` is set to True in the request.", + "title": "Timestamps" + } + }, + "x-fal-order-properties": [ + "audio", + "timestamps" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/elevenlabs/tts/multilingual-v2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/tts/multilingual-v2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/tts/multilingual-v2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ElevenlabsTtsMultilingualV2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/tts/multilingual-v2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ElevenlabsTtsMultilingualV2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kokoro/hindi", + "metadata": { + "display_name": "Kokoro TTS (Hindi)", + "category": "text-to-audio", + "description": "A fast and expressive Hindi text-to-speech model with clear pronunciation and accurate intonation.", + "status": "active", + "tags": [ + "speech" + ], + "updated_at": "2026-01-26T21:44:26.404Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/kokoro.webp", + "model_url": "https://fal.run/fal-ai/kokoro/hindi", + "date": "2025-02-14T00:00:00.000Z", + "group": { + "key": "kokoro-tts", + "label": "Hindi" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kokoro/hindi", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kokoro/hindi queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kokoro/hindi", + "category": "text-to-audio", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/kokoro.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/kokoro/hindi", + "documentationUrl": "https://fal.ai/models/fal-ai/kokoro/hindi/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KokoroHindiInput": { + "title": "HindiRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "सपने वो नहीं जो हम सोते समय देखते हैं, सपने वो हैं जो हमें सोने नहीं देते।" + ], + "title": "Prompt", + "type": "string" + }, + "voice": { + "enum": [ + "hf_alpha", + "hf_beta", + "hm_omega", + "hm_psi" + ], + "title": "Voice", + "type": "string", + "description": "Voice ID for the desired voice.", + "examples": [ + "hf_alpha" + ] + }, + "speed": { + "minimum": 0.1, + "maximum": 5, + "type": "number", + "title": "Speed", + "description": "Speed of the generated audio. Default is 1.0.", + "default": 1 + } + }, + "x-fal-order-properties": [ + "prompt", + "voice", + "speed" + ], + "required": [ + "prompt", + "voice" + ] + }, + "KokoroHindiOutput": { + "title": "HindiOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://fal.media/files/elephant/3sGUskl1AFG4TN_NAinO8_tmpdq_1m8og.wav" + } + ], + "title": "Audio", + "description": "The generated music", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kokoro/hindi/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kokoro/hindi/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kokoro/hindi": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KokoroHindiInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kokoro/hindi/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KokoroHindiOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kokoro/mandarin-chinese", + "metadata": { + "display_name": "Kokoro TTS (Mandarin Chinese)", + "category": "text-to-audio", + "description": "A highly efficient Mandarin Chinese text-to-speech model that captures natural tones and prosody.", + "status": "active", + "tags": [ + "speech" + ], + "updated_at": "2026-01-26T21:44:26.215Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/kokoro.webp", + "model_url": "https://fal.run/fal-ai/kokoro/mandarin-chinese", + "date": "2025-02-14T00:00:00.000Z", + "group": { + "key": "kokoro-tts", + "label": "Mandarin Chinese" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kokoro/mandarin-chinese", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kokoro/mandarin-chinese queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kokoro/mandarin-chinese", + "category": "text-to-audio", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/kokoro.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/kokoro/mandarin-chinese", + "documentationUrl": "https://fal.ai/models/fal-ai/kokoro/mandarin-chinese/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KokoroMandarinChineseInput": { + "title": "MandarinRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "每一个伟大的旅程,都始于勇敢迈出的第一步。加油,你可以做到!" + ], + "title": "Prompt", + "type": "string" + }, + "voice": { + "enum": [ + "zf_xiaobei", + "zf_xiaoni", + "zf_xiaoxiao", + "zf_xiaoyi", + "zm_yunjian", + "zm_yunxi", + "zm_yunxia", + "zm_yunyang" + ], + "title": "Voice", + "type": "string", + "description": "Voice ID for the desired voice.", + "examples": [ + "zf_xiaobei" + ] + }, + "speed": { + "minimum": 0.1, + "maximum": 5, + "type": "number", + "title": "Speed", + "description": "Speed of the generated audio. Default is 1.0.", + "default": 1 + } + }, + "x-fal-order-properties": [ + "prompt", + "voice", + "speed" + ], + "required": [ + "prompt", + "voice" + ] + }, + "KokoroMandarinChineseOutput": { + "title": "MandarinOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://fal.media/files/rabbit/8UiqobkQXPrYDRHl4l5oU_tmptz6jo3ex.wav" + } + ], + "title": "Audio", + "description": "The generated music", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kokoro/mandarin-chinese/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kokoro/mandarin-chinese/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kokoro/mandarin-chinese": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KokoroMandarinChineseInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kokoro/mandarin-chinese/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KokoroMandarinChineseOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kokoro/spanish", + "metadata": { + "display_name": "Kokoro TTS (Spanish)", + "category": "text-to-audio", + "description": "A natural-sounding Spanish text-to-speech model optimized for Latin American and European Spanish.", + "status": "active", + "tags": [ + "speech" + ], + "updated_at": "2026-01-26T21:44:26.529Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/kokoro.webp", + "model_url": "https://fal.run/fal-ai/kokoro/spanish", + "date": "2025-02-14T00:00:00.000Z", + "group": { + "key": "kokoro-tts", + "label": "Spanish" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kokoro/spanish", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kokoro/spanish queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kokoro/spanish", + "category": "text-to-audio", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/kokoro.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/kokoro/spanish", + "documentationUrl": "https://fal.ai/models/fal-ai/kokoro/spanish/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KokoroSpanishInput": { + "title": "SpanishRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "La vida es un viaje, no un destino. Disfruta cada momento y sigue adelante con pasión." + ], + "title": "Prompt", + "type": "string" + }, + "voice": { + "enum": [ + "ef_dora", + "em_alex", + "em_santa" + ], + "title": "Voice", + "type": "string", + "description": "Voice ID for the desired voice.", + "examples": [ + "ef_dora" + ] + }, + "speed": { + "minimum": 0.1, + "maximum": 5, + "type": "number", + "title": "Speed", + "description": "Speed of the generated audio. Default is 1.0.", + "default": 1 + } + }, + "x-fal-order-properties": [ + "prompt", + "voice", + "speed" + ], + "required": [ + "prompt", + "voice" + ] + }, + "KokoroSpanishOutput": { + "title": "SpanishOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://fal.media/files/monkey/5rBM3qVCED73Lxs5XLcwj_tmp4f2z_qrf.wav" + } + ], + "title": "Audio", + "description": "The generated music", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kokoro/spanish/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kokoro/spanish/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kokoro/spanish": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KokoroSpanishInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kokoro/spanish/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KokoroSpanishOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kokoro/brazilian-portuguese", + "metadata": { + "display_name": "Kokoro TTS (Brazilian Portuguese)", + "category": "text-to-audio", + "description": "A natural and expressive Brazilian Portuguese text-to-speech model optimized for clarity and fluency.", + "status": "active", + "tags": [ + "speech" + ], + "updated_at": "2026-01-26T21:44:26.792Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/kokoro.webp", + "model_url": "https://fal.run/fal-ai/kokoro/brazilian-portuguese", + "date": "2025-02-14T00:00:00.000Z", + "group": { + "key": "kokoro-tts", + "label": "Brazilian Portuguese" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kokoro/brazilian-portuguese", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kokoro/brazilian-portuguese queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kokoro/brazilian-portuguese", + "category": "text-to-audio", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/kokoro.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/kokoro/brazilian-portuguese", + "documentationUrl": "https://fal.ai/models/fal-ai/kokoro/brazilian-portuguese/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KokoroBrazilianPortugueseInput": { + "title": "BrPortugueseRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "O segredo do sucesso é a persistência. Nunca desista dos seus sonhos!" + ], + "title": "Prompt", + "type": "string" + }, + "voice": { + "enum": [ + "pf_dora", + "pm_alex", + "pm_santa" + ], + "title": "Voice", + "type": "string", + "description": "Voice ID for the desired voice.", + "examples": [ + "pf_dora" + ] + }, + "speed": { + "minimum": 0.1, + "maximum": 5, + "type": "number", + "title": "Speed", + "description": "Speed of the generated audio. Default is 1.0.", + "default": 1 + } + }, + "x-fal-order-properties": [ + "prompt", + "voice", + "speed" + ], + "required": [ + "prompt", + "voice" + ] + }, + "KokoroBrazilianPortugueseOutput": { + "title": "BrPortugeseOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://fal.media/files/rabbit/Y9-bWJt5lixo8PTCmncN6_tmpyh7u57oa.wav" + } + ], + "title": "Audio", + "description": "The generated music", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kokoro/brazilian-portuguese/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kokoro/brazilian-portuguese/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kokoro/brazilian-portuguese": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KokoroBrazilianPortugueseInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kokoro/brazilian-portuguese/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KokoroBrazilianPortugueseOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kokoro/british-english", + "metadata": { + "display_name": "Kokoro TTS (British English)", + "category": "text-to-audio", + "description": "A high-quality British English text-to-speech model offering natural and expressive voice synthesis.", + "status": "active", + "tags": [ + "speech" + ], + "updated_at": "2026-01-26T21:44:27.435Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/kokoro.webp", + "model_url": "https://fal.run/fal-ai/kokoro/british-english", + "date": "2025-02-14T00:00:00.000Z", + "group": { + "key": "kokoro-tts", + "label": "British English" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kokoro/british-english", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kokoro/british-english queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kokoro/british-english", + "category": "text-to-audio", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/kokoro.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/kokoro/british-english", + "documentationUrl": "https://fal.ai/models/fal-ai/kokoro/british-english/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KokoroBritishEnglishInput": { + "title": "BrEnglishRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Ladies and gentlemen, welcome aboard. Please ensure your seatbelt is fastened and your tray table is stowed as we prepare for takeoff." + ], + "title": "Prompt", + "type": "string" + }, + "voice": { + "enum": [ + "bf_alice", + "bf_emma", + "bf_isabella", + "bf_lily", + "bm_daniel", + "bm_fable", + "bm_george", + "bm_lewis" + ], + "title": "Voice", + "type": "string", + "description": "Voice ID for the desired voice.", + "examples": [ + "bf_alice" + ] + }, + "speed": { + "minimum": 0.1, + "maximum": 5, + "type": "number", + "title": "Speed", + "description": "Speed of the generated audio. Default is 1.0.", + "default": 1 + } + }, + "x-fal-order-properties": [ + "prompt", + "voice", + "speed" + ], + "required": [ + "prompt", + "voice" + ] + }, + "KokoroBritishEnglishOutput": { + "title": "BrEngOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://fal.media/files/kangaroo/4wpA60Kum6UjOVBKJoNyL_tmpxfrkn95k.wav" + } + ], + "title": "Audio", + "description": "The generated music", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kokoro/british-english/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kokoro/british-english/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kokoro/british-english": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KokoroBritishEnglishInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kokoro/british-english/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KokoroBritishEnglishOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kokoro/french", + "metadata": { + "display_name": "Kokoro TTS (French)", + "category": "text-to-audio", + "description": "An expressive and natural French text-to-speech model for both European and Canadian French.", + "status": "active", + "tags": [ + "speech" + ], + "updated_at": "2026-01-26T21:44:27.304Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/kokoro.webp", + "model_url": "https://fal.run/fal-ai/kokoro/french", + "date": "2025-02-14T00:00:00.000Z", + "group": { + "key": "kokoro-tts", + "label": "French" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kokoro/french", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kokoro/french queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kokoro/french", + "category": "text-to-audio", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/kokoro.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/kokoro/french", + "documentationUrl": "https://fal.ai/models/fal-ai/kokoro/french/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KokoroFrenchInput": { + "title": "FrenchRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "La seule limite à nos réalisations de demain, ce sont nos doutes d’aujourd’hui." + ], + "title": "Prompt", + "type": "string" + }, + "voice": { + "enum": [ + "ff_siwis" + ], + "title": "Voice", + "type": "string", + "description": "Voice ID for the desired voice.", + "examples": [ + "ff_siwis" + ] + }, + "speed": { + "minimum": 0.1, + "maximum": 5, + "type": "number", + "title": "Speed", + "description": "Speed of the generated audio. Default is 1.0.", + "default": 1 + } + }, + "x-fal-order-properties": [ + "prompt", + "voice", + "speed" + ], + "required": [ + "prompt", + "voice" + ] + }, + "KokoroFrenchOutput": { + "title": "FrenchOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://fal.media/files/kangaroo/E_itKJKZKRNaO-QtU77k1_tmpe1qso5xp.wav" + } + ], + "title": "Audio", + "description": "The generated music", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kokoro/french/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kokoro/french/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kokoro/french": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KokoroFrenchInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kokoro/french/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KokoroFrenchOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kokoro/japanese", + "metadata": { + "display_name": "Kokoro TTS (Japanese)", + "category": "text-to-audio", + "description": "A fast and natural-sounding Japanese text-to-speech model optimized for smooth pronunciation.", + "status": "active", + "tags": [ + "speech" + ], + "updated_at": "2026-01-26T21:44:27.740Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/kokoro.webp", + "model_url": "https://fal.run/fal-ai/kokoro/japanese", + "date": "2025-02-14T00:00:00.000Z", + "group": { + "key": "kokoro-tts", + "label": "Japanese" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kokoro/japanese", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kokoro/japanese queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kokoro/japanese", + "category": "text-to-audio", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/kokoro.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/kokoro/japanese", + "documentationUrl": "https://fal.ai/models/fal-ai/kokoro/japanese/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KokoroJapaneseInput": { + "title": "JapaneseRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "夢を追いかけることを恐れないでください。努力すれば、必ず道は開けます!" + ], + "title": "Prompt", + "type": "string" + }, + "voice": { + "enum": [ + "jf_alpha", + "jf_gongitsune", + "jf_nezumi", + "jf_tebukuro", + "jm_kumo" + ], + "title": "Voice", + "type": "string", + "description": "Voice ID for the desired voice.", + "examples": [ + "jf_alpha" + ] + }, + "speed": { + "minimum": 0.1, + "maximum": 5, + "type": "number", + "title": "Speed", + "description": "Speed of the generated audio. Default is 1.0.", + "default": 1 + } + }, + "x-fal-order-properties": [ + "prompt", + "voice", + "speed" + ], + "required": [ + "prompt", + "voice" + ] + }, + "KokoroJapaneseOutput": { + "title": "JapaneseOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://fal.media/files/lion/piLhqKO8LJxrWaNg2dVUv_tmpp6eff6zl.wav" + } + ], + "title": "Audio", + "description": "The generated music", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kokoro/japanese/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kokoro/japanese/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kokoro/japanese": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KokoroJapaneseInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kokoro/japanese/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KokoroJapaneseOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kokoro/american-english", + "metadata": { + "display_name": "Kokoro TTS", + "category": "text-to-audio", + "description": "Kokoro is a lightweight text-to-speech model that delivers comparable quality to larger models while being significantly faster and more cost-efficient.", + "status": "active", + "tags": [ + "speech" + ], + "updated_at": "2026-01-26T21:44:27.048Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/kokoro.webp", + "model_url": "https://fal.run/fal-ai/kokoro/american-english", + "date": "2025-02-14T00:00:00.000Z", + "group": { + "key": "kokoro-tts", + "label": "American English" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kokoro/american-english", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kokoro/american-english queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kokoro/american-english", + "category": "text-to-audio", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/kokoro.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/kokoro/american-english", + "documentationUrl": "https://fal.ai/models/fal-ai/kokoro/american-english/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KokoroAmericanEnglishInput": { + "title": "AmEnglishRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The future belongs to those who believe in the beauty of their dreams. So, dream big, work hard, and make it happen!" + ], + "title": "Prompt", + "type": "string", + "default": "" + }, + "voice": { + "enum": [ + "af_heart", + "af_alloy", + "af_aoede", + "af_bella", + "af_jessica", + "af_kore", + "af_nicole", + "af_nova", + "af_river", + "af_sarah", + "af_sky", + "am_adam", + "am_echo", + "am_eric", + "am_fenrir", + "am_liam", + "am_michael", + "am_onyx", + "am_puck", + "am_santa" + ], + "title": "Voice", + "type": "string", + "description": "Voice ID for the desired voice.", + "examples": [ + "af_heart" + ], + "default": "af_heart" + }, + "speed": { + "minimum": 0.1, + "maximum": 5, + "type": "number", + "title": "Speed", + "description": "Speed of the generated audio. Default is 1.0.", + "default": 1 + } + }, + "x-fal-order-properties": [ + "prompt", + "voice", + "speed" + ] + }, + "KokoroAmericanEnglishOutput": { + "title": "AmEngOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://fal.media/files/elephant/dXVMqWsBDG9yan3kaOT0Z_tmp0vvkha3s.wav" + } + ], + "title": "Audio", + "description": "The generated music", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kokoro/american-english/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kokoro/american-english/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kokoro/american-english": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KokoroAmericanEnglishInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kokoro/american-english/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KokoroAmericanEnglishOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/zonos", + "metadata": { + "display_name": "Zonos-Audio-Clone", + "category": "text-to-audio", + "description": "Clone voice of any person and speak anything in their voice using zonos' voice cloning.", + "status": "active", + "tags": [ + "voice cloning" + ], + "updated_at": "2026-01-26T21:44:26.919Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/Ben2/2VVmzf2y5GyRdFoof8BLu.webp", + "model_url": "https://fal.run/fal-ai/zonos", + "date": "2025-02-14T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/zonos", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/zonos queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/zonos", + "category": "text-to-audio", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/Ben2/2VVmzf2y5GyRdFoof8BLu.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/zonos", + "documentationUrl": "https://fal.ai/models/fal-ai/zonos/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ZonosInput": { + "title": "ZonosInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Fal is the fastest solution for your image generation." + ], + "title": "Prompt", + "type": "string", + "description": "The content generated using cloned voice." + }, + "reference_audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/zonos/demo_voice_zonos.wav" + ], + "title": "Reference Audio Url", + "type": "string", + "description": "The reference audio." + } + }, + "x-fal-order-properties": [ + "reference_audio_url", + "prompt" + ], + "required": [ + "reference_audio_url", + "prompt" + ] + }, + "ZonosOutput": { + "title": "ZonosOutput", + "type": "object", + "properties": { + "audio": { + "title": "Audio", + "description": "The generated audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/zonos/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/zonos/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/zonos": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZonosInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/zonos/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZonosOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kokoro/italian", + "metadata": { + "display_name": "Kokoro TTS (Italian)", + "category": "text-to-audio", + "description": "A high-quality Italian text-to-speech model delivering smooth and expressive speech synthesis.", + "status": "active", + "tags": [ + "speech" + ], + "updated_at": "2026-01-26T21:44:27.176Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/kokoro.webp", + "model_url": "https://fal.run/fal-ai/kokoro/italian", + "date": "2025-02-14T00:00:00.000Z", + "group": { + "key": "kokoro-tts", + "label": "Italian" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kokoro/italian", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kokoro/italian queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kokoro/italian", + "category": "text-to-audio", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/kokoro.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/kokoro/italian", + "documentationUrl": "https://fal.ai/models/fal-ai/kokoro/italian/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KokoroItalianInput": { + "title": "ItalianRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Ogni giorno è una nuova opportunità per scrivere la tua storia. Rendila straordinaria!" + ], + "title": "Prompt", + "type": "string" + }, + "voice": { + "enum": [ + "if_sara", + "im_nicola" + ], + "title": "Voice", + "type": "string", + "description": "Voice ID for the desired voice.", + "examples": [ + "if_sara" + ] + }, + "speed": { + "minimum": 0.1, + "maximum": 5, + "type": "number", + "title": "Speed", + "description": "Speed of the generated audio. Default is 1.0.", + "default": 1 + } + }, + "x-fal-order-properties": [ + "prompt", + "voice", + "speed" + ], + "required": [ + "prompt", + "voice" + ] + }, + "KokoroItalianOutput": { + "title": "ItalianOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://fal.media/files/monkey/-MZ0hRO4IpTMukb_S5aRZ_tmpin14eoed.wav" + } + ], + "title": "Audio", + "description": "The generated music", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kokoro/italian/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kokoro/italian/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kokoro/italian": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KokoroItalianInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kokoro/italian/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KokoroItalianOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/yue", + "metadata": { + "display_name": "YuE: Lyrics to Song", + "category": "text-to-audio", + "description": "YuE is a groundbreaking series of open-source foundation models designed for music generation, specifically for transforming lyrics into full songs.", + "status": "active", + "tags": [ + "music" + ], + "updated_at": "2026-01-26T21:44:06.750Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/lion/JAvN_VTh4kcTwtLBuS-A-_152d79bde246442ea7d8a0e2422d90d1.jpg", + "model_url": "https://fal.run/fal-ai/yue", + "license_type": "commercial", + "date": "2025-01-28T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/yue", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/yue queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/yue", + "category": "text-to-audio", + "thumbnailUrl": "https://fal.media/files/lion/JAvN_VTh4kcTwtLBuS-A-_152d79bde246442ea7d8a0e2422d90d1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/yue", + "documentationUrl": "https://fal.ai/models/fal-ai/yue/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "YueInput": { + "title": "TextToMusicInput", + "type": "object", + "properties": { + "lyrics": { + "examples": [ + "[verse]\nStaring at the sunset, colors paint the sky\nThoughts of you keep swirling, can't deny\nI know I let you down, I made mistakes\nBut I'm here to mend the heart I didn't break\n\n[chorus]\nEvery road you take, I'll be one step behind\nEvery dream you chase, I'm reaching for the light\nYou can't fight this feeling now\nI won't back down\nYou know you can't deny it now\nI won't back down\n" + ], + "title": "Lyrics", + "type": "string", + "description": "The prompt to generate an image from. Must have two sections. Sections start with either [chorus] or a [verse]." + }, + "genres": { + "examples": [ + "inspiring female uplifting pop airy vocal electronic bright vocal vocal", + "R&B male hiphop pop 80s vocal electronic dark vocal vocal" + ], + "title": "Genres", + "type": "string", + "description": "The genres (separated by a space ' ') to guide the music generation." + } + }, + "x-fal-order-properties": [ + "lyrics", + "genres" + ], + "required": [ + "lyrics", + "genres" + ] + }, + "YueOutput": { + "title": "Output", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "file_size": 480462, + "file_name": "cot_inspiring-female-uplifting-pop-airy-vocal-electronic-bright-vocal-vocal_tp0@93_T1@0_rp1@2_maxtk3000_mixed_8179e8da-5452-4cf6-9d6b-f69280feb7e8.mp3", + "content_type": "application/octet-stream", + "url": "https://v3.fal.media/files/tiger/iAXHU3LtbJGeqPYWKkYMr_cot_inspiring-female-uplifting-pop-airy-vocal-electronic-bright-vocal-vocal_tp0%4093_T1%400_rp1%402_maxtk3000_mixed_74bcf408-eb99-4b88-b7bf-7d7212200cf1.mp3" + } + ], + "title": "Audio", + "description": "Generated music file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/yue/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/yue/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/yue": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/YueInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/yue/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/YueOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/mmaudio-v2/text-to-audio", + "metadata": { + "display_name": "MMAudio V2 Text to Audio", + "category": "text-to-audio", + "description": "MMAudio generates synchronized audio given text inputs. It can generate sounds described by a prompt.", + "status": "active", + "tags": [ + "audio", + "fast" + ], + "updated_at": "2026-01-26T21:44:33.039Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/mmaudio-v2.webp", + "model_url": "https://fal.run/fal-ai/mmaudio-v2/text-to-audio", + "date": "2024-12-20T00:00:00.000Z", + "group": { + "key": "mmaudio-v2", + "label": "Text to Audio" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/mmaudio-v2/text-to-audio", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/mmaudio-v2/text-to-audio queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/mmaudio-v2/text-to-audio", + "category": "text-to-audio", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/mmaudio-v2.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/mmaudio-v2/text-to-audio", + "documentationUrl": "https://fal.ai/models/fal-ai/mmaudio-v2/text-to-audio/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MmaudioV2TextToAudioInput": { + "title": "AudioInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Indian holy music" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the audio for." + }, + "num_steps": { + "minimum": 4, + "title": "Num Steps", + "type": "integer", + "maximum": 50, + "description": "The number of steps to generate the audio for.", + "default": 25 + }, + "duration": { + "minimum": 1, + "title": "Duration", + "type": "number", + "maximum": 30, + "description": "The duration of the audio to generate.", + "default": 8 + }, + "cfg_strength": { + "minimum": 0, + "title": "Cfg Strength", + "type": "number", + "maximum": 20, + "description": "The strength of Classifier Free Guidance.", + "default": 4.5 + }, + "seed": { + "minimum": 0, + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator", + "maximum": 65535 + }, + "mask_away_clip": { + "title": "Mask Away Clip", + "type": "boolean", + "description": "Whether to mask away the clip.", + "default": false + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the audio for.", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "seed", + "num_steps", + "duration", + "cfg_strength", + "mask_away_clip" + ], + "required": [ + "prompt" + ] + }, + "MmaudioV2TextToAudioOutput": { + "title": "AudioOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "file_size": 1001342, + "file_name": "mmaudio_input.flac", + "content_type": "application/octet-stream", + "url": "https://storage.googleapis.com/falserverless/model_tests/video_models/mmaudio_output.flac" + } + ], + "title": "Audio", + "description": "The generated audio.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/mmaudio-v2/text-to-audio/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/mmaudio-v2/text-to-audio/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/mmaudio-v2/text-to-audio": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MmaudioV2TextToAudioInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/mmaudio-v2/text-to-audio/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MmaudioV2TextToAudioOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax-music", + "metadata": { + "display_name": "MiniMax (Hailuo AI) Music", + "category": "text-to-audio", + "description": "Generate music from text prompts using the MiniMax model, which leverages advanced AI techniques to create high-quality, diverse musical compositions.", + "status": "active", + "tags": [ + "music" + ], + "updated_at": "2026-01-26T21:44:09.900Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/tiger/lEWIZDHHfLPEO6tVIK9wz_be57b257c9de4924ba34a22d4814eaf7.jpg", + "model_url": "https://fal.run/fal-ai/minimax-music", + "license_type": "commercial", + "date": "2024-12-17T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax-music", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax-music queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax-music", + "category": "text-to-audio", + "thumbnailUrl": "https://fal.media/files/tiger/lEWIZDHHfLPEO6tVIK9wz_be57b257c9de4924ba34a22d4814eaf7.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax-music", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax-music/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxMusicInput": { + "x-fal-order-properties": [ + "prompt", + "reference_audio_url" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "## Fast and Limitless \n In the heart of the code, where dreams collide, \n\nFAL's the name, taking tech for a ride. \nGenerative media, blazing the trail, \n\nFast inference power, we'll never fail.\n##" + ], + "description": "Lyrics with optional formatting. You can use a newline to separate each line of lyrics. You can use two newlines to add a pause between lines. You can use double hash marks (##) at the beginning and end of the lyrics to add accompaniment. Maximum 600 characters.", + "type": "string", + "maxLength": 600, + "minLength": 1, + "title": "Prompt" + }, + "reference_audio_url": { + "examples": [ + "https://fal.media/files/lion/OOTBTSlxKMH_E8H6hoSlb.mpga" + ], + "description": "Reference song, should contain music and vocals. Must be a .wav or .mp3 file longer than 15 seconds.", + "type": "string", + "title": "Reference Audio Url" + } + }, + "title": "TextToMusicRequest", + "required": [ + "prompt", + "reference_audio_url" + ] + }, + "MinimaxMusicOutput": { + "x-fal-order-properties": [ + "audio" + ], + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://fal.media/files/elephant/N5UNLCwkC2y8v7a3LQLFE_output.mp3" + } + ], + "description": "The generated music", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "MusicOutput", + "required": [ + "audio" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax-music/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax-music/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax-music": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxMusicInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax-music/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxMusicOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/f5-tts", + "metadata": { + "display_name": "F5 TTS", + "category": "text-to-audio", + "description": "F5 TTS", + "status": "active", + "tags": [ + "speech" + ], + "updated_at": "2026-01-26T21:44:38.828Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/f5-tts.jpeg", + "model_url": "https://fal.run/fal-ai/f5-tts", + "date": "2024-10-17T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/f5-tts", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/f5-tts queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/f5-tts", + "category": "text-to-audio", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/f5-tts.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/f5-tts", + "documentationUrl": "https://fal.ai/models/fal-ai/f5-tts/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "F5TtsInput": { + "x-fal-order-properties": [ + "gen_text", + "ref_audio_url", + "ref_text", + "model_type", + "remove_silence" + ], + "type": "object", + "properties": { + "ref_text": { + "examples": [ + "Some call me nature, others call me mother nature." + ], + "description": "The reference text to be used for TTS. If not provided, an ASR (Automatic Speech Recognition) model will be used to generate the reference text.", + "type": "string", + "title": "Reference Text for the Reference Audio", + "default": "" + }, + "remove_silence": { + "description": "Whether to remove the silence from the audio file.", + "type": "boolean", + "title": "Remove Silence", + "default": true + }, + "gen_text": { + "examples": [ + "I don't really care what you call me. I've been a silent spectator, watching species evolve, empires rise and fall. But always remember, I am mighty and enduring. Respect me and I'll nurture you; ignore me and you shall face the consequences." + ], + "description": "The text to be converted to speech.", + "type": "string", + "title": "Text to be converted to speech" + }, + "model_type": { + "enum": [ + "F5-TTS", + "E2-TTS" + ], + "description": "The name of the model to be used for TTS.", + "type": "string", + "title": "Model Type" + }, + "ref_audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/reference_audio.wav" + ], + "description": "The URL of the reference audio file.", + "type": "string", + "title": "Reference Audio URL" + } + }, + "title": "TTSInput", + "required": [ + "gen_text", + "ref_audio_url", + "model_type" + ] + }, + "F5TtsOutput": { + "x-fal-order-properties": [ + "audio_url" + ], + "type": "object", + "properties": { + "audio_url": { + "description": "The audio file containing the generated speech.", + "title": "Generated Speech", + "$ref": "#/components/schemas/AudioFile" + } + }, + "title": "TTSOutput", + "required": [ + "audio_url" + ] + }, + "AudioFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "examples": [ + "8535dd59e911496a947daa35c07e67a3_tmplkcy6tut.wav" + ], + "title": "File Name", + "type": "string", + "default": "8535dd59e911496a947daa35c07e67a3_tmplkcy6tut.wav" + }, + "content_type": { + "examples": [ + "audio/wav" + ], + "title": "Content Type", + "type": "string", + "default": "audio/wav" + }, + "url": { + "examples": [ + "https://v2.fal.media/files/8535dd59e911496a947daa35c07e67a3_tmplkcy6tut.wav" + ], + "title": "Url", + "type": "string" + } + }, + "title": "AudioFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/f5-tts/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/f5-tts/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/f5-tts": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/F5TtsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/f5-tts/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/F5TtsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/stable-audio", + "metadata": { + "display_name": "Stable Audio Open", + "category": "text-to-audio", + "description": "Open source text-to-audio model.", + "status": "active", + "tags": [ + "music" + ], + "updated_at": "2026-01-26T21:44:16.870Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/rabbit/vO63ofRg1Fz5L2ByZ5wt5_d48da0f7ccfd42ac9b39ce86b3f1cbd9.jpg", + "model_url": "https://fal.run/fal-ai/stable-audio", + "github_url": "https://huggingface.co/stabilityai/stable-audio-open-1.0/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-01-04T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/stable-audio", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/stable-audio queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/stable-audio", + "category": "text-to-audio", + "thumbnailUrl": "https://fal.media/files/rabbit/vO63ofRg1Fz5L2ByZ5wt5_d48da0f7ccfd42ac9b39ce86b3f1cbd9.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/stable-audio", + "documentationUrl": "https://fal.ai/models/fal-ai/stable-audio/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "StableAudioInput": { + "title": "Input", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "128 BPM tech house drum loop" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate audio from" + }, + "steps": { + "minimum": 1, + "maximum": 1000, + "type": "integer", + "title": "Steps", + "description": "The number of steps to denoise the audio for", + "default": 100 + }, + "seconds_total": { + "minimum": 0, + "maximum": 47, + "type": "integer", + "title": "Seconds Total", + "description": "The duration of the audio clip to generate", + "default": 30 + }, + "seconds_start": { + "minimum": 0, + "maximum": 47, + "type": "integer", + "title": "Seconds Start", + "description": "The start point of the audio clip to generate", + "default": 0 + } + }, + "x-fal-order-properties": [ + "prompt", + "seconds_start", + "seconds_total", + "steps" + ], + "required": [ + "prompt" + ] + }, + "StableAudioOutput": { + "title": "Output", + "type": "object", + "properties": { + "audio_file": { + "description": "The generated audio clip", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "audio_file" + ], + "required": [ + "audio_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/stable-audio/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-audio/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/stable-audio": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableAudioInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-audio/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableAudioOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.text-to-image.json b/packages/typescript/ai-fal/json/fal.models.text-to-image.json new file mode 100644 index 00000000..04d199d0 --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.text-to-image.json @@ -0,0 +1,75813 @@ +{ + "generated_at": "2026-01-28T02:51:51.828Z", + "total_models": 152, + "category": "text-to-image", + "models": [ + { + "endpoint_id": "fal-ai/imagen4/preview", + "metadata": { + "display_name": "Imagen 4", + "category": "text-to-image", + "description": "Google’s highest quality image generation model", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:23.647Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "model_url": "https://fal.run/fal-ai/imagen4/preview", + "license_type": "commercial", + "date": "2025-05-20T18:53:57.862Z", + "group": { + "key": "imagen-4", + "label": "Imagen 4" + }, + "highlighted": true, + "kind": "inference", + "pinned": true + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/imagen4/preview", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/imagen4/preview queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/imagen4/preview", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/imagen4/preview", + "documentationUrl": "https://fal.ai/models/fal-ai/imagen4/preview/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Imagen4PreviewInput": { + "title": "Imagen4TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Capture an intimate close-up bathed in warm, soft, late-afternoon sunlight filtering into a quintessential 1960s kitchen. The focal point is a charmingly designed vintage package of all-purpose flour, resting invitingly on a speckled Formica countertop. The packaging itself evokes pure nostalgia: perhaps thick, slightly textured paper in a warm cream tone, adorned with simple, bold typography (a friendly serif or script) in classic red and blue \"ALL-PURPOSE FLOUR\", featuring a delightful illustration like a stylized sheaf of wheat or a cheerful baker character. In smaller bold print at the bottom of the package: \"NET WT 5 LBS (80 OZ) 2.27kg\". Focus sharply on the package details – the slightly soft edges of the paper bag, the texture of the vintage printing, the inviting \"All-Purpose Flour\" text. Subtle hints of the 1960s kitchen frame the shot – the chrome edge of the counter gleaming softly, a blurred glimpse of a pastel yellow ceramic tile backsplash, or the corner of a vintage metal canister set just out of focus. The shallow depth of field keeps attention locked on the beautifully designed package, creating an aesthetic rich in warmth, authenticity, and nostalgic appeal." + ], + "maxLength": 5000, + "type": "string", + "title": "Prompt", + "minLength": 3, + "description": "The text prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Number of Images", + "description": "The number of images to generate.", + "default": 1 + }, + "aspect_ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image.", + "default": "1:1" + }, + "resolution": { + "enum": [ + "1K", + "2K" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated image.", + "default": "1K" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + } + }, + "x-fal-order-properties": [ + "prompt", + "num_images", + "aspect_ratio", + "output_format", + "sync_mode", + "resolution" + ], + "required": [ + "prompt" + ] + }, + "Imagen4PreviewOutput": { + "title": "Imagen4TextToImageOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_name": "c0RfXzCisqX6YRkIF7apw_output.png", + "content_type": "image/png", + "url": "https://v3.fal.media/files/rabbit/rmgBxhwGYb2d3pl3x9sKf_output.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images.", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "description": { + "title": "Description", + "type": "string", + "description": "The description of the generated images." + } + }, + "x-fal-order-properties": [ + "images", + "description" + ], + "required": [ + "images", + "description" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/imagen4/preview/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/imagen4/preview/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/imagen4/preview": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Imagen4PreviewInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/imagen4/preview/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Imagen4PreviewOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-pro/v1.1-ultra", + "metadata": { + "display_name": "FLUX1.1 [pro] ultra", + "category": "text-to-image", + "description": "FLUX1.1 [pro] ultra is the newest version of FLUX1.1 [pro], maintaining professional-grade image quality while delivering up to 2K resolution with improved photo realism.", + "status": "active", + "tags": [ + "high-res", + "realism" + ], + "updated_at": "2026-01-26T21:41:24.487Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/flux-pro-v1-1-ultra.webp", + "model_url": "https://fal.run/fal-ai/flux-pro/v1.1-ultra", + "license_type": "commercial", + "date": "2024-12-17T00:00:00.000Z", + "group": { + "key": "flux-pro", + "label": "FLUX 1.1 [pro] (ultra)" + }, + "highlighted": true, + "kind": "inference", + "pinned": true + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-pro/v1.1-ultra", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-pro/v1.1-ultra queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-pro/v1.1-ultra", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/flux-pro-v1-1-ultra.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-pro/v1.1-ultra", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-pro/v1.1-ultra/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxProV11UltraInput": { + "title": "FluxProUltraTextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Extreme close-up of a single tiger eye, direct frontal view. Detailed iris and pupil. Sharp focus on eye texture and color. Natural lighting to capture authentic eye shine and depth. The word \"FLUX\" is painted over it in big, white brush strokes with visible texture." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "description": "The number of images to generate.", + "maximum": 4, + "default": 1 + }, + "aspect_ratio": { + "anyOf": [ + { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "type": "string" + }, + { + "type": "string" + } + ], + "title": "Aspect Ratio", + "description": "The aspect ratio of the generated image.", + "default": "16:9" + }, + "enhance_prompt": { + "title": "Enhance Prompt", + "type": "boolean", + "description": "Whether to enhance the prompt for better results.", + "default": false + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "title": "Image URL", + "type": "string", + "description": "The image URL to generate an image from." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.", + "default": "2" + }, + "image_prompt_strength": { + "minimum": 0, + "title": "Image Prompt Strength", + "type": "number", + "description": "The strength of the image prompt, between 0 and 1.", + "maximum": 1, + "default": 0.1 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "raw": { + "title": "Raw", + "type": "boolean", + "description": "Generate less processed, more natural-looking images.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "safety_tolerance", + "enhance_prompt", + "image_url", + "image_prompt_strength", + "aspect_ratio", + "raw" + ], + "required": [ + "prompt" + ] + }, + "FluxProV11UltraOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/registry__image__fast_sdxl__models__Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "registry__image__fast_sdxl__models__Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-pro/v1.1-ultra/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/v1.1-ultra/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/v1.1-ultra": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProV11UltraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/v1.1-ultra/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProV11UltraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/recraft/v3/text-to-image", + "metadata": { + "display_name": "Recraft V3", + "category": "text-to-image", + "description": "Recraft V3 is a text-to-image model with the ability to generate long texts, vector art, images in brand style, and much more. As of today, it is SOTA in image generation, proven by Hugging Face's industry-leading Text-to-Image Benchmark by Artificial Analysis.", + "status": "active", + "tags": [ + "vector", + "typography", + "style" + ], + "updated_at": "2026-01-26T21:41:23.805Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/recraft-v3.webp", + "model_url": "https://fal.run/fal-ai/recraft/v3/text-to-image", + "license_type": "commercial", + "date": "2025-05-07T11:59:27.643Z", + "group": { + "key": "fal-ai/recraft/v3", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": true + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/recraft/v3/text-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/recraft/v3/text-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/recraft/v3/text-to-image", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/recraft-v3.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/recraft/v3/text-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/recraft/v3/text-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "RecraftV3TextToImageInput": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "a red panda eating a bamboo in front of a poster that says \"recraft V3 now available at fal\"" + ], + "maxLength": 1000, + "type": "string", + "title": "Prompt", + "minLength": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "default": "square_hd" + }, + "style": { + "enum": [ + "any", + "realistic_image", + "digital_illustration", + "vector_illustration", + "realistic_image/b_and_w", + "realistic_image/hard_flash", + "realistic_image/hdr", + "realistic_image/natural_light", + "realistic_image/studio_portrait", + "realistic_image/enterprise", + "realistic_image/motion_blur", + "realistic_image/evening_light", + "realistic_image/faded_nostalgia", + "realistic_image/forest_life", + "realistic_image/mystic_naturalism", + "realistic_image/natural_tones", + "realistic_image/organic_calm", + "realistic_image/real_life_glow", + "realistic_image/retro_realism", + "realistic_image/retro_snapshot", + "realistic_image/urban_drama", + "realistic_image/village_realism", + "realistic_image/warm_folk", + "digital_illustration/pixel_art", + "digital_illustration/hand_drawn", + "digital_illustration/grain", + "digital_illustration/infantile_sketch", + "digital_illustration/2d_art_poster", + "digital_illustration/handmade_3d", + "digital_illustration/hand_drawn_outline", + "digital_illustration/engraving_color", + "digital_illustration/2d_art_poster_2", + "digital_illustration/antiquarian", + "digital_illustration/bold_fantasy", + "digital_illustration/child_book", + "digital_illustration/child_books", + "digital_illustration/cover", + "digital_illustration/crosshatch", + "digital_illustration/digital_engraving", + "digital_illustration/expressionism", + "digital_illustration/freehand_details", + "digital_illustration/grain_20", + "digital_illustration/graphic_intensity", + "digital_illustration/hard_comics", + "digital_illustration/long_shadow", + "digital_illustration/modern_folk", + "digital_illustration/multicolor", + "digital_illustration/neon_calm", + "digital_illustration/noir", + "digital_illustration/nostalgic_pastel", + "digital_illustration/outline_details", + "digital_illustration/pastel_gradient", + "digital_illustration/pastel_sketch", + "digital_illustration/pop_art", + "digital_illustration/pop_renaissance", + "digital_illustration/street_art", + "digital_illustration/tablet_sketch", + "digital_illustration/urban_glow", + "digital_illustration/urban_sketching", + "digital_illustration/vanilla_dreams", + "digital_illustration/young_adult_book", + "digital_illustration/young_adult_book_2", + "vector_illustration/bold_stroke", + "vector_illustration/chemistry", + "vector_illustration/colored_stencil", + "vector_illustration/contour_pop_art", + "vector_illustration/cosmics", + "vector_illustration/cutout", + "vector_illustration/depressive", + "vector_illustration/editorial", + "vector_illustration/emotional_flat", + "vector_illustration/infographical", + "vector_illustration/marker_outline", + "vector_illustration/mosaic", + "vector_illustration/naivector", + "vector_illustration/roundish_flat", + "vector_illustration/segmented_colors", + "vector_illustration/sharp_contrast", + "vector_illustration/thin", + "vector_illustration/vector_photo", + "vector_illustration/vivid_shapes", + "vector_illustration/engraving", + "vector_illustration/line_art", + "vector_illustration/line_circuit", + "vector_illustration/linocut" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated images. Vector images cost 2X as much.", + "default": "realistic_image" + }, + "colors": { + "title": "Colors", + "type": "array", + "description": "An array of preferable colors", + "items": { + "$ref": "#/components/schemas/RGBColor" + }, + "default": [] + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "style_id": { + "format": "uuid4", + "title": "Style Id", + "type": "string", + "description": "The ID of the custom style reference (optional)" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "style", + "colors", + "style_id", + "enable_safety_checker" + ], + "required": [ + "prompt" + ] + }, + "RecraftV3TextToImageOutput": { + "title": "TextToImageOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/penguin/852yy3l5DGLmrwAK42RTB_image.webp" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "RGBColor": { + "title": "RGBColor", + "type": "object", + "properties": { + "r": { + "minimum": 0, + "title": "R", + "type": "integer", + "maximum": 255, + "description": "Red color value", + "default": 0 + }, + "b": { + "minimum": 0, + "title": "B", + "type": "integer", + "maximum": 255, + "description": "Blue color value", + "default": 0 + }, + "g": { + "minimum": 0, + "title": "G", + "type": "integer", + "maximum": 255, + "description": "Green color value", + "default": 0 + } + }, + "x-fal-order-properties": [ + "r", + "g", + "b" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/recraft/v3/text-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/recraft/v3/text-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/recraft/v3/text-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RecraftV3TextToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/recraft/v3/text-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RecraftV3TextToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2/lora", + "metadata": { + "display_name": "Flux 2", + "category": "text-to-image", + "description": "Text-to-image generation with LoRA support for FLUX.2 [dev] from Black Forest Labs. Custom style adaptation and fine-tuned model variations.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:15.747Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/panda/tOKnFZKepFeCNbgp6-ndM_7aba1231214a4c0e9446a7c2e02a9289.jpg", + "model_url": "https://fal.run/fal-ai/flux-2/lora", + "license_type": "commercial", + "date": "2025-11-23T00:16:11.639Z", + "group": { + "key": "Flux2", + "label": "Text to Image LoRA" + }, + "highlighted": true, + "kind": "inference", + "stream_url": "https://fal.ai/models/fal-ai/flux-2/lora/stream", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/flux-2-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/flux-2-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2/lora", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/panda/tOKnFZKepFeCNbgp6-ndM_7aba1231214a4c0e9446a7c2e02a9289.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2LoraInput": { + "x-fal-order-properties": [ + "prompt", + "guidance_scale", + "seed", + "num_inference_steps", + "image_size", + "num_images", + "acceleration", + "enable_prompt_expansion", + "sync_mode", + "enable_safety_checker", + "output_format", + "loras" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Close shot a pianist plays in a luxurious room with tall windows overlooking a rainy metropolis. Shot with a 50mm lens at a side profile angle, soft tungsten light highlighting hands moving over keys. Capture detailed reflections in polished black piano surfaces, raindrops sliding down glass, and atmospheric warm/cool lighting contrast." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "title": "Number of Images", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the image to generate. The width and height must be between 512 and 2048 pixels.", + "title": "Image Size", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The acceleration level to use for the image generation.", + "type": "string", + "title": "Acceleration", + "examples": [ + "regular" + ], + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "loras": { + "description": "List of LoRA weights to apply (maximum 3). Each LoRA can be a URL, HuggingFace repo ID, or local path.", + "type": "array", + "title": "Loras", + "items": { + "$ref": "#/components/schemas/LoRAInput" + }, + "default": [] + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "description": "Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "type": "number", + "title": "Guidance Scale", + "maximum": 20, + "default": 2.5 + }, + "seed": { + "description": "The seed to use for the generation. If not provided, a random seed will be used.", + "type": "integer", + "title": "Seed" + }, + "num_inference_steps": { + "minimum": 4, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Number of Inference Steps", + "maximum": 50, + "default": 28 + }, + "enable_prompt_expansion": { + "description": "If set to true, the prompt will be expanded for better results.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + } + }, + "title": "Flux2TextToImageLoRAInput", + "required": [ + "prompt" + ] + }, + "Flux2LoraOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/flux2_dev_lora_t2i_output.png" + } + ] + ], + "description": "The generated images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + } + }, + "title": "Flux2T2ILoRAOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "LoRAInput": { + "description": "LoRA weight configuration.", + "type": "object", + "properties": { + "path": { + "description": "URL, HuggingFace repo ID (owner/repo), or local path to LoRA weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "description": "Scale factor for LoRA application (0.0 to 4.0).", + "type": "number", + "title": "Scale", + "maximum": 4, + "default": 1 + } + }, + "title": "LoRAInput", + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2", + "metadata": { + "display_name": "Flux 2", + "category": "text-to-image", + "description": "Text-to-image generation with FLUX.2 [dev] from Black Forest Labs. Enhanced realism, crisper text generation, and native editing capabilities.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:16.012Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/zSBCJtPpeIQwR5AC_IamX_b1e1137961754e4d851907c21f8c20cd.jpg", + "model_url": "https://fal.run/fal-ai/flux-2", + "license_type": "commercial", + "date": "2025-11-23T00:15:07.672Z", + "group": { + "key": "Flux2", + "label": "Text to Image" + }, + "highlighted": true, + "kind": "inference", + "stream_url": "https://fal.ai/models/fal-ai/flux-2/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/zSBCJtPpeIQwR5AC_IamX_b1e1137961754e4d851907c21f8c20cd.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2Input": { + "x-fal-order-properties": [ + "prompt", + "guidance_scale", + "seed", + "num_inference_steps", + "image_size", + "num_images", + "acceleration", + "enable_prompt_expansion", + "sync_mode", + "enable_safety_checker", + "output_format" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Dutch angle close-up of a survivor in post-apocalyptic setting, dust-covered face, dramatic harsh sunlight creating deep shadows, happy expression, desaturated dystopian color palette, gritty realism" + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "title": "Number of Images", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the image to generate. The width and height must be between 512 and 2048 pixels.", + "title": "Image Size", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The acceleration level to use for the image generation.", + "type": "string", + "title": "Acceleration", + "examples": [ + "regular" + ], + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "description": "Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "type": "number", + "title": "Guidance Scale", + "maximum": 20, + "default": 2.5 + }, + "seed": { + "description": "The seed to use for the generation. If not provided, a random seed will be used.", + "type": "integer", + "title": "Seed" + }, + "enable_prompt_expansion": { + "description": "If set to true, the prompt will be expanded for better results.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "num_inference_steps": { + "minimum": 4, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Number of Inference Steps", + "maximum": 50, + "default": 28 + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + } + }, + "title": "Flux2TextToImageInput", + "required": [ + "prompt" + ] + }, + "Flux2Output": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/flux2_dev_t2i_output.png" + } + ] + ], + "description": "The generated images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + } + }, + "title": "Flux2T2IOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-pro", + "metadata": { + "display_name": "Flux 2 Pro", + "category": "text-to-image", + "description": "Image editing with FLUX.2 [pro] from Black Forest Labs. Ideal for high-quality image manipulation, style transfer, and sequential editing workflows", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:16.143Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/UfryXXm9my6IM8HsoP9FL_054c2c2953dc491996904114c6e04836.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-pro", + "license_type": "commercial", + "date": "2025-11-23T00:14:31.608Z", + "group": { + "key": "Flux2-Pro", + "label": "Text to Image" + }, + "highlighted": true, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-pro", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2-pro queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-pro", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/UfryXXm9my6IM8HsoP9FL_054c2c2953dc491996904114c6e04836.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-pro", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-pro/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2ProInput": { + "x-fal-order-properties": [ + "prompt", + "image_size", + "seed", + "safety_tolerance", + "enable_safety_checker", + "output_format", + "sync_mode" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "An intense close-up of knight's visor reflecting battle, sword raised, flames in background, chiaroscuro helmet shadows, hyper-detailed armor, square medieval, cinematic lighting" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.", + "default": "2" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for the generation." + } + }, + "title": "Flux2ProTextToImageInput", + "required": [ + "prompt" + ] + }, + "Flux2ProOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/flux2_pro_t2i_output.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images.", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for the generation." + } + }, + "title": "Flux2ProOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-pro/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-pro/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-pro": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2ProInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-pro/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2ProOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/text-to-image/3.2", + "metadata": { + "display_name": "Bria 3.2 Text-to-Image", + "category": "text-to-image", + "description": "Bria’s Text-to-Image model, trained exclusively on licensed data for safe and risk-free commercial use. Excels in Text-Rendering and Aesthetics.", + "status": "active", + "tags": [ + "image generation" + ], + "updated_at": "2026-01-26T21:43:26.758Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/monkey/EZR7hDbrO6DQ_MP-BYQPt_8bb97804d8fc4f21863b457a061b5f8a.jpg", + "model_url": "https://fal.run/bria/text-to-image/3.2", + "license_type": "commercial", + "date": "2025-06-17T18:18:59.923Z", + "group": { + "key": "bria", + "label": "Bria Text to Image (3.2)" + }, + "highlighted": true, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/text-to-image/3.2", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/text-to-image/3.2 queue.", + "x-fal-metadata": { + "endpointId": "bria/text-to-image/3.2", + "category": "text-to-image", + "thumbnailUrl": "https://fal.media/files/monkey/EZR7hDbrO6DQ_MP-BYQPt_8bb97804d8fc4f21863b457a061b5f8a.jpg", + "playgroundUrl": "https://fal.ai/models/bria/text-to-image/3.2", + "documentationUrl": "https://fal.ai/models/bria/text-to-image/3.2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "TextToImage32Input": { + "title": "InputModel", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Oil painting of a fluffy, wide-eyed cat sitting upright, holding a small wooden sign reading “Feed Me.” Rich textures, dramatic brushstrokes, warm tones, and vintage charm." + ], + "title": "Prompt", + "type": "string", + "description": "Prompt for image generation." + }, + "aspect_ratio": { + "enum": [ + "1:1", + "2:3", + "3:2", + "3:4", + "4:3", + "4:5", + "5:4", + "9:16", + "16:9" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio. Options: 1:1, 2:3, 3:2, 3:4, 4:3, 4:5, 5:4, 9:16, 16:9", + "default": "1:1" + }, + "prompt_enhancer": { + "description": "Whether to improve the prompt.", + "type": "boolean", + "title": "Prompt Enhancer", + "default": true + }, + "sync_mode": { + "description": "If true, returns the image directly in the response (increases latency).", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "truncate_prompt": { + "description": "Whether to truncate the prompt.", + "type": "boolean", + "title": "Truncate Prompt", + "default": true + }, + "guidance_scale": { + "description": "Guidance scale for text.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale", + "default": 5 + }, + "num_inference_steps": { + "description": "Number of inference steps.", + "type": "integer", + "minimum": 20, + "maximum": 50, + "title": "Num Inference Steps", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility.", + "default": 5555 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for image generation.", + "default": "Logo,Watermark,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers" + } + }, + "x-fal-order-properties": [ + "prompt", + "num_inference_steps", + "seed", + "aspect_ratio", + "negative_prompt", + "guidance_scale", + "truncate_prompt", + "prompt_enhancer", + "sync_mode" + ], + "required": [ + "prompt" + ] + }, + "TextToImage32Output": { + "title": "OutputModel", + "type": "object", + "properties": { + "image": { + "description": "Generated image.", + "$ref": "#/components/schemas/Image" + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height" + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/bria/text-to-image/3.2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/text-to-image/3.2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/text-to-image/3.2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TextToImage32Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/text-to-image/3.2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TextToImage32Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/imagen4/preview/fast", + "metadata": { + "display_name": "Imagen 4", + "category": "text-to-image", + "description": "Google’s highest quality image generation model", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:28.421Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/lion/wOlG7nOgvzkPiOfnMUAnD_1088439693f7466ba053cadb887f4191.jpg", + "model_url": "https://fal.run/fal-ai/imagen4/preview/fast", + "license_type": "commercial", + "date": "2025-06-12T01:46:22.503Z", + "group": { + "key": "imagen-4", + "label": "Imagen 4 Fast" + }, + "highlighted": true, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/imagen4/preview/fast", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/imagen4/preview/fast queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/imagen4/preview/fast", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/lion/wOlG7nOgvzkPiOfnMUAnD_1088439693f7466ba053cadb887f4191.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/imagen4/preview/fast", + "documentationUrl": "https://fal.ai/models/fal-ai/imagen4/preview/fast/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Imagen4PreviewFastInput": { + "title": "Imagen4TextToImageFastInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Atmospheric narrative illustration depicting a young woman with dark hair styled with a single star clip, eating dumplings at a small round table in a bustling, late-night eatery reminiscent of a vintage Hong Kong diner. The style blends clean linework with textured color fields, evoking a sense of place and story. The mood is intimate contentment amidst vibrant surroundings. Soft, warm overhead lighting from unseen hanging lamps casts gentle highlights on her face and the porcelain plate of dumplings, creating soft-edged shadows on the tiled tabletop and floor. The background features detailed elements like wall menus with stylized illustrations, a retro wall clock, steam rising from a soup bowl, and glimpses of other patrons blurred slightly for depth. The woman, viewed from a slightly high angle, crouches slightly on her chair, intensely focused on her food, rendered with expressive linework defining her pose and features. The color palette mixes muted teal wall tiles and green chairs with pops of warm yellow in her top, pink trousers, red chili oil dish, and ambient light, creating a cozy yet lively feel. Subtle paper texture or digital grain is visible throughout. Focus is sharp on the character and her immediate table setting" + ], + "maxLength": 5000, + "type": "string", + "title": "Prompt", + "minLength": 3, + "description": "The text prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Number of Images", + "description": "The number of images to generate.", + "default": 1 + }, + "aspect_ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image.", + "default": "1:1" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + } + }, + "x-fal-order-properties": [ + "prompt", + "num_images", + "aspect_ratio", + "output_format", + "sync_mode" + ], + "required": [ + "prompt" + ] + }, + "Imagen4PreviewFastOutput": { + "title": "Imagen4TextToImageFastOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_name": "MNWc2sdR8v_VLWqVp7ag8_output.png", + "content_type": "image/png", + "url": "https://v3.fal.media/files/elephant/MNWc2sdR8v_VLWqVp7ag8_output.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images.", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "description": { + "title": "Description", + "type": "string", + "description": "The description of the generated images." + } + }, + "x-fal-order-properties": [ + "images", + "description" + ], + "required": [ + "images", + "description" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/imagen4/preview/fast/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/imagen4/preview/fast/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/imagen4/preview/fast": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Imagen4PreviewFastInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/imagen4/preview/fast/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Imagen4PreviewFastOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hidream-i1-full", + "metadata": { + "display_name": "Hidream I1 Full", + "category": "text-to-image", + "description": "HiDream-I1 full is a new open-source image generative foundation model with 17B parameters that achieves state-of-the-art image generation quality within seconds.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:56.647Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/monkey/HzdkkAoX8-PqrYZUV0zOW_1efb1b99d0e84ce78dd35e8edc69fe09.jpg", + "model_url": "https://fal.run/fal-ai/hidream-i1-full", + "license_type": "commercial", + "date": "2025-04-11T00:45:47.743Z", + "group": { + "key": "hidream-i1-full", + "label": "Text to Image" + }, + "highlighted": true, + "kind": "inference", + "stream_url": "https://fal.run/fal-ai/hidream-i1-full/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hidream-i1-full", + "version": "1.0.0", + "description": "HiDream-I1 full is a new open-source image generative foundation model with 17B parameters that achieves state-of-the-art image generation quality within seconds.", + "x-fal-metadata": { + "endpointId": "fal-ai/hidream-i1-full", + "category": "text-to-image", + "thumbnailUrl": "https://fal.media/files/monkey/HzdkkAoX8-PqrYZUV0zOW_1efb1b99d0e84ce78dd35e8edc69fe09.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hidream-i1-full", + "documentationUrl": "https://fal.ai/models/fal-ai/hidream-i1-full/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "HidreamI1FullInput": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "a cat holding a skateboard which has 'fal' written on it in red spray paint" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": { + "height": 1024, + "width": 1024 + } + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "A list of LoRAs to apply to the model. Each LoRA specifies its path, scale, and optional weight name.", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [] + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 20, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 5 + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 50 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "negative_prompt": { + "examples": [ + "" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "loras" + ], + "required": [ + "prompt" + ] + }, + "HidreamI1FullOutput": {}, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "maximum": 4, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + }, + "weight_name": { + "title": "Weight Name", + "type": "string", + "description": "Name of the LoRA weight. Used only if `path` is a Hugging Face repository, and required only if you have more than 1 safetensors file in the repo." + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "required": [ + "path" + ] + } + } + }, + "paths": { + "/fal-ai/hidream-i1-full/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hidream-i1-full/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hidream-i1-full": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HidreamI1FullInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hidream-i1-full/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HidreamI1FullOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hidream-i1-dev", + "metadata": { + "display_name": "Hidream I1 Dev", + "category": "text-to-image", + "description": "HiDream-I1 dev is a new open-source image generative foundation model with 17B parameters that achieves state-of-the-art image generation quality within seconds.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:56.777Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training.jpg", + "model_url": "https://fal.run/fal-ai/hidream-i1-dev", + "license_type": "commercial", + "date": "2025-04-11T00:44:31.342Z", + "highlighted": true, + "kind": "inference", + "stream_url": "/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hidream-i1-dev", + "version": "1.0.0", + "description": "HiDream-I1 fast is a new open-source image generative foundation model with 17B parameters that achieves state-of-the-art image generation quality within seconds.", + "x-fal-metadata": { + "endpointId": "fal-ai/hidream-i1-dev", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hidream-i1-dev", + "documentationUrl": "https://fal.ai/models/fal-ai/hidream-i1-dev/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "HidreamI1DevInput": { + "title": "DevInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "a cat holding a skateboard which has 'fal' written on it in red spray paint" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": { + "height": 1024, + "width": 1024 + } + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "negative_prompt": { + "examples": [ + "" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format" + ], + "required": [ + "prompt" + ] + }, + "HidreamI1DevOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/hidream-i1-dev/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hidream-i1-dev/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hidream-i1-dev": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HidreamI1DevInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hidream-i1-dev/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HidreamI1DevOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hidream-i1-fast", + "metadata": { + "display_name": "Hidream I1 Fast", + "category": "text-to-image", + "description": "HiDream-I1 fast is a new open-source image generative foundation model with 17B parameters that achieves state-of-the-art image generation quality within 16 steps.", + "status": "active", + "tags": [ + "" + ], + "updated_at": "2026-01-26T21:43:56.905Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/zebra/Ua1DJkXRU7nz46GKt6x5R_7b333b5e26bd413aa5d65d1959878828.jpg", + "model_url": "https://fal.run/fal-ai/hidream-i1-fast", + "license_type": "commercial", + "date": "2025-04-11T00:43:21.382Z", + "highlighted": true, + "kind": "inference", + "stream_url": "/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hidream-i1-fast", + "version": "1.0.0", + "description": "HiDream-I1 fast is a new open-source image generative foundation model with 17B parameters that achieves state-of-the-art image generation quality within 16 steps.", + "x-fal-metadata": { + "endpointId": "fal-ai/hidream-i1-fast", + "category": "text-to-image", + "thumbnailUrl": "https://fal.media/files/zebra/Ua1DJkXRU7nz46GKt6x5R_7b333b5e26bd413aa5d65d1959878828.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hidream-i1-fast", + "documentationUrl": "https://fal.ai/models/fal-ai/hidream-i1-fast/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "HidreamI1FastInput": { + "title": "FastInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "a cat holding a skateboard which has 'fal' written on it in red spray paint" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": { + "height": 1024, + "width": 1024 + } + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 16 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "negative_prompt": { + "examples": [ + "" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format" + ], + "required": [ + "prompt" + ] + }, + "HidreamI1FastOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/hidream-i1-fast/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hidream-i1-fast/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hidream-i1-fast": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HidreamI1FastInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hidream-i1-fast/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HidreamI1FastOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux/dev", + "metadata": { + "display_name": "FLUX.1 [dev]", + "category": "text-to-image", + "description": "FLUX.1 [dev] is a 12 billion parameter flow transformer that generates high-quality images from text. It is suitable for personal and commercial use.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:58.500Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-1.jpeg", + "model_url": "https://fal.run/fal-ai/flux/dev", + "license_type": "commercial", + "date": "2025-04-01T18:10:42.284Z", + "group": { + "key": "flux-1", + "label": "Text to Image [dev]" + }, + "highlighted": true, + "kind": "inference", + "stream_url": "https://fal.run/fal-ai/flux/dev/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux/dev", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux/dev queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux/dev", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-1.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux/dev", + "documentationUrl": "https://fal.ai/models/fal-ai/flux/dev/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxDevInput": { + "title": "BaseInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Extreme close-up of a single tiger eye, direct frontal view. Detailed iris and pupil. Sharp focus on eye texture and color. Natural lighting to capture authentic eye shine and depth. The word \"FLUX\" is painted over it in big, white brush strokes with visible texture." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "type": "string", + "title": "Acceleration", + "default": "none" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "jpeg" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "title": "Seed" + }, + "guidance_scale": { + "minimum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "title": "Guidance scale (CFG)", + "maximum": 20, + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 28 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "required": [ + "prompt" + ] + }, + "FluxDevOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux/dev/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux/dev/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux/dev": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxDevInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux/dev/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxDevOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ideogram/v2", + "metadata": { + "display_name": "Ideogram V2", + "category": "text-to-image", + "description": "Generate high-quality images, posters, and logos with Ideogram V2. Features exceptional typography handling and realistic outputs optimized for commercial and creative use.", + "status": "active", + "tags": [ + "realism", + "typography" + ], + "updated_at": "2026-01-26T21:44:10.848Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/monkey/8WNQdDJ1eYpnl12jwhCjT_c9879f96533a47ae82e07946a67b0c8c.jpg", + "model_url": "https://fal.run/fal-ai/ideogram/v2", + "license_type": "commercial", + "date": "2024-12-04T00:00:00.000Z", + "group": { + "key": "ideogram", + "label": "Text to Image" + }, + "highlighted": true, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ideogram/v2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ideogram/v2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ideogram/v2", + "category": "text-to-image", + "thumbnailUrl": "https://fal.media/files/monkey/8WNQdDJ1eYpnl12jwhCjT_c9879f96533a47ae82e07946a67b0c8c.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ideogram/v2", + "documentationUrl": "https://fal.ai/models/fal-ai/ideogram/v2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "IdeogramV2Input": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "expand_prompt", + "seed", + "style", + "sync_mode", + "negative_prompt" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A comic style illustration of a skeleton sitting on a toilet in a bathroom. The bathroom has a Halloween decoration with a pumpkin jack-o-lantern and bats flying around. There is a text above the skeleton that says \"Just Waiting for Halloween with Ideogram 2.0 at fal.ai\"" + ], + "title": "Prompt", + "type": "string" + }, + "aspect_ratio": { + "enum": [ + "10:16", + "16:10", + "9:16", + "16:9", + "4:3", + "3:4", + "1:1", + "1:3", + "3:1", + "3:2", + "2:3" + ], + "description": "The aspect ratio of the generated image", + "type": "string", + "title": "Aspect Ratio", + "default": "1:1" + }, + "style": { + "enum": [ + "auto", + "general", + "realistic", + "design", + "render_3D", + "anime" + ], + "description": "The style of the generated image", + "type": "string", + "title": "Style", + "default": "auto" + }, + "expand_prompt": { + "description": "Whether to expand the prompt with MagicPrompt functionality.", + "type": "boolean", + "title": "Expand Prompt", + "default": true + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Seed for the random number generator", + "title": "Seed" + }, + "negative_prompt": { + "description": "A negative prompt to avoid in the generated image", + "type": "string", + "title": "Negative Prompt", + "default": "" + } + }, + "title": "TextToImageInput", + "required": [ + "prompt" + ] + }, + "IdeogramV2Output": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/monkey/cNaoxPl0YAWYb-QVBvO9F_image.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "seed": { + "examples": [ + 123456 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for the random number generator" + } + }, + "title": "Output", + "required": [ + "images", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ideogram/v2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/stable-diffusion-v35-large", + "metadata": { + "display_name": "Stable Diffusion 3.5 Large", + "category": "text-to-image", + "description": "Stable Diffusion 3.5 Large is a Multimodal Diffusion Transformer (MMDiT) text-to-image model that features improved performance in image quality, typography, complex prompt understanding, and resource-efficiency.", + "status": "active", + "tags": [ + "diffusion", + "typography", + "style" + ], + "updated_at": "2026-01-26T21:44:12.216Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/zebra/Bi6nsyNxslnu2SfI3jtkZ_e52ae7331ca94401bce20e695e3838a8.jpg", + "model_url": "https://fal.run/fal-ai/stable-diffusion-v35-large", + "github_url": "https://stability.ai/license", + "license_type": "commercial", + "date": "2024-10-27T00:00:00.000Z", + "group": { + "key": "sd35-large", + "label": "Base" + }, + "highlighted": true, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/stable-diffusion-v35-large", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/stable-diffusion-v35-large queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/stable-diffusion-v35-large", + "category": "text-to-image", + "thumbnailUrl": "https://fal.media/files/zebra/Bi6nsyNxslnu2SfI3jtkZ_e52ae7331ca94401bce20e695e3838a8.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/stable-diffusion-v35-large", + "documentationUrl": "https://fal.ai/models/fal-ai/stable-diffusion-v35-large/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "StableDiffusionV35LargeInput": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A dreamlike Japanese garden in perpetual twilight, bathed in bioluminescent cherry blossoms that emit a soft pink-purple glow. Floating paper lanterns drift lazily through the scene, their warm light creating dancing reflections in a mirror-like koi pond. Ethereal mist weaves between ancient stone pathways lined with glowing mushrooms in pastel blues and purples. A traditional wooden bridge arches gracefully over the water, dusted with fallen petals that sparkle like stardust. The scene is captured through a cinematic lens with perfect bokeh, creating an otherworldly atmosphere. In the background, a crescent moon hangs impossibly large in the sky, surrounded by a sea of stars and auroral wisps in teal and violet. Crystal formations emerge from the ground, refracting the ambient light into rainbow prisms. The entire composition follows the golden ratio, with moody film-like color grading reminiscent of Studio Ghibli, enhanced by volumetric god rays filtering through the luminous foliage. 8K resolution, masterful photography, hyperdetailed, magical realism." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "maximum": 4, + "title": "Num Images", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image. Defaults to landscape_4_3 if no controlnet has been passed, otherwise defaults to the size of the controlnet conditioning image." + }, + "controlnet": { + "title": "Controlnet", + "description": "\n ControlNet for inference.\n ", + "allOf": [ + { + "$ref": "#/components/schemas/ControlNet" + } + ] + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "ip_adapter": { + "title": "Ip Adapter", + "description": "\n IP-Adapter to use during inference.\n ", + "allOf": [ + { + "$ref": "#/components/schemas/IPAdapter" + } + ] + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "maximum": 50, + "title": "Num Inference Steps", + "default": 28 + }, + "guidance_scale": { + "minimum": 0, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "maximum": 20, + "title": "Guidance scale (CFG)", + "default": 3.5 + }, + "negative_prompt": { + "examples": [ + "" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "controlnet", + "image_size", + "loras", + "ip_adapter" + ], + "required": [ + "prompt" + ] + }, + "StableDiffusionV35LargeOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "ControlNet": { + "title": "ControlNet", + "type": "object", + "properties": { + "conditioning_scale": { + "minimum": 0, + "description": "\n The scale of the control net weight. This is used to scale the control net weight\n before merging it with the base model.\n ", + "type": "number", + "maximum": 2, + "title": "Conditioning Scale", + "default": 1 + }, + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the control net weights." + }, + "control_image_url": { + "title": "Control Image Url", + "type": "string", + "description": "URL of the image to be used as the control image." + }, + "start_percentage": { + "minimum": 0, + "description": "\n The percentage of the image to start applying the controlnet in terms of the total timesteps.\n ", + "type": "number", + "maximum": 1, + "title": "Start Percentage", + "default": 0 + }, + "end_percentage": { + "minimum": 0, + "description": "\n The percentage of the image to end applying the controlnet in terms of the total timesteps.\n ", + "type": "number", + "maximum": 1, + "title": "End Percentage", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "control_image_url", + "conditioning_scale", + "start_percentage", + "end_percentage" + ], + "required": [ + "path", + "control_image_url" + ] + }, + "IPAdapter": { + "title": "IPAdapter", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "Hugging Face path to the IP-Adapter" + }, + "mask_threshold": { + "minimum": 0.01, + "description": "Threshold for mask.", + "type": "number", + "maximum": 0.99, + "title": "Mask Threshold", + "default": 0.5 + }, + "image_encoder_weight_name": { + "title": "Image Encoder Weight Name", + "type": "string", + "description": "Name of the image encoder." + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "URL of Image for IP-Adapter conditioning. " + }, + "mask_image_url": { + "title": "Mask Image Url", + "type": "string", + "description": "URL of the mask for the control image." + }, + "image_encoder_subfolder": { + "title": "Image Encoder Subfolder", + "type": "string", + "description": "Subfolder in which the image encoder weights exist." + }, + "subfolder": { + "title": "Subfolder", + "type": "string", + "description": "Subfolder in which the ip_adapter weights exist" + }, + "scale": { + "title": "Scale", + "type": "number", + "description": "Scale for ip adapter." + }, + "image_encoder_path": { + "title": "Image Encoder Path", + "type": "string", + "description": "Path to the Image Encoder for the IP-Adapter, for example 'openai/clip-vit-large-patch14'" + }, + "weight_name": { + "title": "Weight Name", + "type": "string", + "description": "Name of the safetensors file containing the ip-adapter weights" + } + }, + "x-fal-order-properties": [ + "path", + "subfolder", + "weight_name", + "image_encoder_path", + "image_encoder_subfolder", + "image_encoder_weight_name", + "image_url", + "mask_image_url", + "mask_threshold", + "scale" + ], + "required": [ + "path", + "image_encoder_path", + "image_url", + "scale" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "type": "number", + "maximum": 4, + "title": "Scale", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/stable-diffusion-v35-large/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-diffusion-v35-large/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/stable-diffusion-v35-large": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableDiffusionV35LargeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-diffusion-v35-large/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableDiffusionV35LargeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-general", + "metadata": { + "display_name": "FLUX.1 [dev] with Controlnets and Loras", + "category": "text-to-image", + "description": "A versatile endpoint for the FLUX.1 [dev] model that supports multiple AI extensions including LoRA, ControlNet conditioning, and IP-Adapter integration, enabling comprehensive control over image generation through various guidance methods.", + "status": "active", + "tags": [ + "lora", + "controlnet", + "ip-adapter" + ], + "updated_at": "2026-01-26T21:44:14.060Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/rabbit/SW4VnooC-y1J5oHp72c35_ef2d274c84d644769fec449d83da838f.jpg", + "model_url": "https://fal.run/fal-ai/flux-general", + "license_type": "commercial", + "date": "2024-08-21T00:00:00.000Z", + "group": { + "key": "flux-general", + "label": "Text to Image" + }, + "highlighted": true, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/flux-lora-fast-training", + "fal-ai/flux-lora-portrait-trainer", + "fal-ai/flux-lora-general-training" + ], + "inference_endpoint_ids": [ + "fal-ai/flux-lora-fast-training", + "fal-ai/flux-lora-portrait-trainer", + "fal-ai/flux-lora-general-training" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-general", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-general queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-general", + "category": "text-to-image", + "thumbnailUrl": "https://fal.media/files/rabbit/SW4VnooC-y1J5oHp72c35_ef2d274c84d644769fec449d83da838f.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-general", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-general/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxGeneralInput": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Extreme close-up of a single tiger eye, direct frontal view. Detailed iris and pupil. Sharp focus on eye texture and color. Natural lighting to capture authentic eye shine and depth. The word \"FLUX\" is painted over it in big, white brush strokes with visible texture." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "nag_end": { + "maximum": 1, + "type": "number", + "title": "Proportion of steps to apply NAG", + "description": "\n The proportion of steps to apply NAG. After the specified proportion\n of steps has been iterated, the remaining steps will use original\n attention processors in FLUX.\n ", + "exclusiveMinimum": 0, + "default": 0.25 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image." + }, + "control_loras": { + "description": "\n The LoRAs to use for the image generation which use a control image. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/ControlLoraWeight" + }, + "examples": [], + "title": "Control Loras", + "default": [] + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "scheduler": { + "enum": [ + "euler", + "dpmpp_2m" + ], + "title": "Scheduler", + "type": "string", + "description": "Scheduler for the denoising process.", + "default": "euler" + }, + "easycontrols": { + "title": "Easycontrols", + "type": "array", + "description": "\n EasyControl Inputs to use for image generation.\n ", + "items": { + "$ref": "#/components/schemas/EasyControlWeight" + }, + "default": [] + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "real_cfg_scale": { + "minimum": 0, + "maximum": 5, + "type": "number", + "title": "Real CFG scale", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "use_cfg_zero": { + "title": "Use CFG-Zero-Init", + "type": "boolean", + "description": "\n Uses CFG-zero init sampling as in https://arxiv.org/abs/2503.18886.\n ", + "default": false + }, + "fill_image": { + "title": "Fill Image", + "description": "Use an image input to influence the generation. Can be used to fill images in masked areas.", + "allOf": [ + { + "$ref": "#/components/schemas/ImageFillInput" + } + ] + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "sigma_schedule": { + "enum": [ + "sgm_uniform" + ], + "title": "Sigma Schedule", + "type": "string", + "description": "Sigmas schedule for the denoising process." + }, + "reference_end": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Reference End", + "description": "\n The percentage of the total timesteps when the reference guidance is to be ended.\n ", + "default": 1 + }, + "reference_strength": { + "minimum": -3, + "maximum": 3, + "type": "number", + "title": "Reference Strength", + "description": "Strength of reference_only generation. Only used if a reference image is provided.", + "default": 0.65 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "nag_scale": { + "maximum": 10, + "type": "number", + "title": "NAG scale", + "description": "\n The scale for NAG. Higher values will result in a image that is more distant\n to the negative prompt.\n ", + "exclusiveMinimum": 1, + "default": 3 + }, + "reference_image_url": { + "title": "Reference Image Url", + "type": "string", + "description": "URL of Image for Reference-Only" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "controlnet_unions": { + "title": "Controlnet Unions", + "type": "array", + "description": "\n The controlnet unions to use for the image generation. Only one controlnet is supported at the moment.\n ", + "items": { + "$ref": "#/components/schemas/ControlNetUnion" + }, + "default": [] + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "\n Negative prompt to steer the image generation away from unwanted features.\n By default, we will be using NAG for processing the negative prompt.\n ", + "default": "" + }, + "nag_tau": { + "title": "NAG Tau", + "type": "number", + "description": "\n The tau for NAG. Controls the normalization of the hidden state.\n Higher values will result in a less aggressive normalization,\n but may also lead to unexpected changes with respect to the original image.\n Not recommended to change this value.\n ", + "exclusiveMinimum": 0, + "default": 2.5 + }, + "num_images": { + "minimum": 1, + "maximum": 10, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate. This is always set to 1 for streaming output.", + "default": 1 + }, + "use_beta_schedule": { + "title": "Use Beta Schedule", + "type": "boolean", + "description": "Specifies whether beta sigmas ought to be used.", + "default": false + }, + "ip_adapters": { + "title": "Ip Adapters", + "type": "array", + "description": "\n IP-Adapter to use for image generation.\n ", + "items": { + "$ref": "#/components/schemas/IPAdapter" + }, + "default": [] + }, + "base_shift": { + "minimum": 0.01, + "maximum": 5, + "type": "number", + "title": "Base Shift", + "description": "Base shift for the scheduled timesteps", + "default": 0.5 + }, + "nag_alpha": { + "maximum": 1, + "type": "number", + "title": "NAG alpha", + "description": "\n The alpha value for NAG. This value is used as a final weighting\n factor for steering the normalized guidance (positive and negative prompts)\n in the direction of the positive prompt. Higher values will result in less\n steering on the normalized guidance where lower values will result in\n considering the positive prompt guidance more.\n ", + "exclusiveMinimum": 0, + "default": 0.25 + }, + "use_real_cfg": { + "title": "Use Real CFG", + "type": "boolean", + "description": "\n Uses classical CFG as in SD1.5, SDXL, etc. Increases generation times and price when set to be true.\n If using XLabs IP-Adapter v1, this will be turned on!.\n ", + "default": false + }, + "max_shift": { + "minimum": 0.01, + "maximum": 5, + "type": "number", + "title": "Max Shift", + "description": "Max shift for the scheduled timesteps", + "default": 1.15 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "controlnets": { + "title": "Controlnets", + "type": "array", + "description": "\n The controlnets to use for the image generation. Only one controlnet is supported at the moment.\n ", + "items": { + "$ref": "#/components/schemas/ControlNet" + }, + "default": [] + }, + "reference_start": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Reference Start", + "description": "\n The percentage of the total timesteps when the reference guidance is to bestarted.\n ", + "default": 0 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "loras", + "control_loras", + "controlnets", + "controlnet_unions", + "ip_adapters", + "easycontrols", + "fill_image", + "guidance_scale", + "real_cfg_scale", + "use_real_cfg", + "use_cfg_zero", + "sync_mode", + "num_images", + "enable_safety_checker", + "reference_image_url", + "reference_strength", + "reference_start", + "reference_end", + "base_shift", + "max_shift", + "output_format", + "use_beta_schedule", + "sigma_schedule", + "scheduler", + "negative_prompt", + "nag_scale", + "nag_tau", + "nag_alpha", + "nag_end" + ], + "required": [ + "prompt" + ] + }, + "FluxGeneralOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "ControlLoraWeight": { + "title": "ControlLoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "anyOf": [ + { + "type": "object" + }, + { + "minimum": -4, + "maximum": 4, + "type": "number" + } + ], + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model. Providing a dictionary as {\"layer_name\":layer_scale} allows per-layer lora scale settings. Layers with no scale provided will have scale 1.0.\n ", + "default": 1 + }, + "control_image_url": { + "title": "Control Image Url", + "type": "string", + "description": "URL of the image to be used as the control image." + }, + "preprocess": { + "enum": [ + "canny", + "depth", + "None" + ], + "title": "Preprocess", + "type": "string", + "description": "Type of preprocessing to apply to the input image.", + "default": "None" + } + }, + "x-fal-order-properties": [ + "path", + "scale", + "control_image_url", + "preprocess" + ], + "required": [ + "path", + "control_image_url" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "anyOf": [ + { + "type": "object" + }, + { + "minimum": -4, + "maximum": 4, + "type": "number" + } + ], + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model. Providing a dictionary as {\"layer_name\":layer_scale} allows per-layer lora scale settings. Layers with no scale provided will have scale 1.0.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "EasyControlWeight": { + "title": "EasyControlWeight", + "type": "object", + "properties": { + "scale": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Scale", + "description": "Scale for the control method.", + "default": 1 + }, + "image_control_type": { + "enum": [ + "subject", + "spatial" + ], + "title": "Image Control Type", + "type": "string", + "description": "Control type of the image. Must be one of `spatial` or `subject`." + }, + "control_method_url": { + "examples": [ + "canny", + "depth", + "hedsketch", + "inpainting", + "pose", + "seg", + "subject", + "ghibli" + ], + "title": "Control Method Url", + "type": "string", + "description": "URL to safetensor weights of control method to be applied. Can also be one of `canny`, `depth`, `hedsketch`, `inpainting`, `pose`, `seg`, `subject`, `ghibli` " + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "URL of an image to use as a control" + } + }, + "x-fal-order-properties": [ + "control_method_url", + "scale", + "image_url", + "image_control_type" + ], + "required": [ + "control_method_url", + "image_url", + "image_control_type" + ] + }, + "ImageFillInput": { + "title": "ImageFillInput", + "type": "object", + "properties": { + "fill_image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + } + ], + "title": "Fill Image Url", + "description": "URLs of images to be filled for redux prompting", + "default": [] + } + }, + "x-fal-order-properties": [ + "fill_image_url" + ] + }, + "ControlNetUnion": { + "title": "ControlNetUnion", + "type": "object", + "properties": { + "controls": { + "title": "Controls", + "type": "array", + "description": "The control images and modes to use for the control net.", + "items": { + "$ref": "#/components/schemas/ControlNetUnionInput" + } + }, + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the control net weights." + }, + "variant": { + "title": "Variant", + "type": "string", + "description": "The optional variant if a Hugging Face repo key is used." + }, + "config_url": { + "title": "Config Url", + "type": "string", + "description": "optional URL to the controlnet config.json file." + } + }, + "x-fal-order-properties": [ + "path", + "config_url", + "variant", + "controls" + ], + "required": [ + "path", + "controls" + ] + }, + "IPAdapter": { + "title": "IPAdapter", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "Hugging Face path to the IP-Adapter" + }, + "mask_threshold": { + "minimum": 0.01, + "maximum": 0.99, + "type": "number", + "title": "Mask Threshold", + "description": "Threshold for mask.", + "default": 0.5 + }, + "image_encoder_weight_name": { + "title": "Image Encoder Weight Name", + "type": "string", + "description": "Name of the image encoder." + }, + "image_encoder_subfolder": { + "title": "Image Encoder Subfolder", + "type": "string", + "description": "Subfolder in which the image encoder weights exist." + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "URL of Image for IP-Adapter conditioning. " + }, + "mask_image_url": { + "title": "Mask Image Url", + "type": "string", + "description": "URL of the mask for the control image." + }, + "subfolder": { + "title": "Subfolder", + "type": "string", + "description": "Subfolder in which the ip_adapter weights exist" + }, + "scale": { + "title": "Scale", + "type": "number", + "description": "Scale for ip adapter." + }, + "image_encoder_path": { + "title": "Image Encoder Path", + "type": "string", + "description": "Path to the Image Encoder for the IP-Adapter, for example 'openai/clip-vit-large-patch14'" + }, + "weight_name": { + "title": "Weight Name", + "type": "string", + "description": "Name of the safetensors file containing the ip-adapter weights" + } + }, + "x-fal-order-properties": [ + "path", + "subfolder", + "weight_name", + "image_encoder_path", + "image_encoder_subfolder", + "image_encoder_weight_name", + "image_url", + "mask_image_url", + "mask_threshold", + "scale" + ], + "required": [ + "path", + "image_encoder_path", + "image_url", + "scale" + ] + }, + "ControlNet": { + "title": "ControlNet", + "type": "object", + "properties": { + "conditioning_scale": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Conditioning Scale", + "description": "\n The scale of the control net weight. This is used to scale the control net weight\n before merging it with the base model.\n ", + "default": 1 + }, + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the control net weights." + }, + "mask_threshold": { + "minimum": 0.01, + "maximum": 0.99, + "type": "number", + "title": "Mask Threshold", + "description": "Threshold for mask.", + "default": 0.5 + }, + "end_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "End Percentage", + "description": "\n The percentage of the image to end applying the controlnet in terms of the total timesteps.\n ", + "default": 1 + }, + "config_url": { + "title": "Config Url", + "type": "string", + "description": "optional URL to the controlnet config.json file." + }, + "mask_image_url": { + "title": "Mask Image Url", + "type": "string", + "description": "URL of the mask for the control image.", + "nullable": true + }, + "variant": { + "title": "Variant", + "type": "string", + "description": "The optional variant if a Hugging Face repo key is used." + }, + "control_image_url": { + "title": "Control Image Url", + "type": "string", + "description": "URL of the image to be used as the control image." + }, + "start_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Start Percentage", + "description": "\n The percentage of the image to start applying the controlnet in terms of the total timesteps.\n ", + "default": 0 + } + }, + "x-fal-order-properties": [ + "path", + "config_url", + "variant", + "control_image_url", + "mask_image_url", + "mask_threshold", + "conditioning_scale", + "start_percentage", + "end_percentage" + ], + "required": [ + "path", + "control_image_url" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + }, + "ControlNetUnionInput": { + "title": "ControlNetUnionInput", + "type": "object", + "properties": { + "conditioning_scale": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Conditioning Scale", + "description": "\n The scale of the control net weight. This is used to scale the control net weight\n before merging it with the base model.\n ", + "default": 1 + }, + "mask_threshold": { + "minimum": 0.01, + "maximum": 0.99, + "type": "number", + "title": "Mask Threshold", + "description": "Threshold for mask.", + "default": 0.5 + }, + "end_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "End Percentage", + "description": "\n The percentage of the image to end applying the controlnet in terms of the total timesteps.\n ", + "default": 1 + }, + "mask_image_url": { + "title": "Mask Image Url", + "type": "string", + "description": "URL of the mask for the control image.", + "nullable": true + }, + "control_image_url": { + "title": "Control Image Url", + "type": "string", + "description": "URL of the image to be used as the control image." + }, + "control_mode": { + "enum": [ + "canny", + "tile", + "depth", + "blur", + "pose", + "gray", + "low-quality" + ], + "title": "Control Mode", + "type": "string", + "description": "Control Mode for Flux Controlnet Union. Supported values are:\n - canny: Uses the edges for guided generation.\n - tile: Uses the tiles for guided generation.\n - depth: Utilizes a grayscale depth map for guided generation.\n - blur: Adds a blur to the image.\n - pose: Uses the pose of the image for guided generation.\n - gray: Converts the image to grayscale.\n - low-quality: Converts the image to a low-quality image." + }, + "start_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Start Percentage", + "description": "\n The percentage of the image to start applying the controlnet in terms of the total timesteps.\n ", + "default": 0 + } + }, + "x-fal-order-properties": [ + "control_image_url", + "mask_image_url", + "control_mode", + "conditioning_scale", + "mask_threshold", + "start_percentage", + "end_percentage" + ], + "required": [ + "control_image_url", + "control_mode" + ] + } + } + }, + "paths": { + "/fal-ai/flux-general/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-general/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-general": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxGeneralInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-general/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxGeneralOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-lora", + "metadata": { + "display_name": "FLUX.1 [dev] with LoRAs", + "category": "text-to-image", + "description": "Super fast endpoint for the FLUX.1 [dev] model with LoRA support, enabling rapid and high-quality image generation using pre-trained LoRA adaptations for personalization, specific styles, brand identities, and product-specific outputs.", + "status": "active", + "tags": [ + "lora", + "personalization" + ], + "updated_at": "2026-01-26T21:44:14.318Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/elephant/RqIQsOY3cgQMMtCedJKlf_c2fc262516d24b94afdc17a747292710.jpg", + "model_url": "https://fal.run/fal-ai/flux-lora", + "license_type": "commercial", + "date": "2024-08-01T00:00:00.000Z", + "group": { + "key": "flux-lora", + "label": "Text to Image" + }, + "highlighted": true, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/flux-lora-fast-training", + "fal-ai/turbo-flux-trainer", + "fal-ai/flux-lora-portrait-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/flux-lora-fast-training", + "fal-ai/turbo-flux-trainer", + "fal-ai/flux-lora-portrait-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-lora", + "category": "text-to-image", + "thumbnailUrl": "https://fal.media/files/elephant/RqIQsOY3cgQMMtCedJKlf_c2fc262516d24b94afdc17a747292710.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-lora", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxLoraInput": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Extreme close-up of a single tiger eye, direct frontal view. Detailed iris and pupil. Sharp focus on eye texture and color. Natural lighting to capture authentic eye shine and depth. The word \"FLUX\" is painted over it in big, white brush strokes with visible texture." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate. This is always set to 1 for streaming output.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 35, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "loras", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format" + ], + "required": [ + "prompt" + ] + }, + "FluxLoraOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/z-image/base/lora", + "metadata": { + "display_name": "Z Image Base (LoRA)", + "category": "text-to-image", + "description": "LoRA endpoint for Z-Image, the foundation model of the Z- Image family.", + "status": "active", + "tags": [ + "z-image", + "base", + "lora" + ], + "updated_at": "2026-01-27T18:16:45.309Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8c1883/2G6Kn_ounoUBav_-ayY6k_0f72c0a77d9c4a14a38350dadc12806b.jpg", + "model_url": "https://fal.run/fal-ai/z-image/base/lora", + "license_type": "commercial", + "date": "2026-01-27T17:26:38.446Z", + "group": { + "key": "z-image-turbo", + "label": "Base (LoRA)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/z-image/base/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/z-image/base/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/z-image/base/lora", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8c1883/2G6Kn_ounoUBav_-ayY6k_0f72c0a77d9c4a14a38350dadc12806b.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/z-image/base/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/z-image/base/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ZImageBaseLoraInput": { + "title": "ZImageBaseTextToImageLoRAInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Grandmother knitting by a window, an empty chair by her" + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "maximum": 4, + "title": "Number of Images", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The acceleration level to use.", + "type": "string", + "title": "Acceleration", + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "loras": { + "description": "List of LoRA weights to apply (maximum 3).", + "type": "array", + "title": "Loras", + "items": { + "$ref": "#/components/schemas/LoRAInput" + }, + "default": [] + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "guidance_scale": { + "minimum": 1, + "description": "The guidance scale to use for the image generation.", + "type": "number", + "maximum": 20, + "title": "Guidance Scale", + "default": 4 + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "maximum": 50, + "title": "Num Inference Steps", + "default": 28 + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + }, + "negative_prompt": { + "description": "The negative prompt to use for the image generation.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration", + "guidance_scale", + "negative_prompt", + "loras" + ], + "required": [ + "prompt" + ] + }, + "ZImageBaseLoraOutput": { + "title": "ZImageBaseOutput", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/0a8c18a5/1z0k9F1YLgz4qCr64jCBa_r2uqRyDg.png", + "width": 1024 + } + ] + ], + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "description": "Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed.", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "description": "The timings of the generation process.", + "title": "Timings" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoRAInput": { + "description": "LoRA weight configuration.", + "type": "object", + "properties": { + "path": { + "description": "URL, HuggingFace repo ID (owner/repo) to lora weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "description": "Scale factor for LoRA application (0.0 to 4.0).", + "type": "number", + "maximum": 4, + "title": "Scale", + "default": 1 + } + }, + "title": "LoRAInput", + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/z-image/base/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image/base/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/z-image/base/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageBaseLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image/base/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageBaseLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/z-image/base", + "metadata": { + "display_name": "Z Image Base", + "category": "text-to-image", + "description": "Z-Image is the foundation model of the Z- Image family, engineered for good quality, robust generative diversity, broad stylistic coverage, and precise prompt adherence.", + "status": "active", + "tags": [ + "z-image", + "base" + ], + "updated_at": "2026-01-27T18:16:32.039Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8c1871/Kxq-m63uqtEhDgll_5W63_c53ec36259f8446495a23b09592f2f09.jpg", + "model_url": "https://fal.run/fal-ai/z-image/base", + "license_type": "commercial", + "date": "2026-01-27T17:24:16.384Z", + "group": { + "key": "z-image-turbo", + "label": "Base" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/z-image/base", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/z-image/base queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/z-image/base", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8c1871/Kxq-m63uqtEhDgll_5W63_c53ec36259f8446495a23b09592f2f09.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/z-image/base", + "documentationUrl": "https://fal.ai/models/fal-ai/z-image/base/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ZImageBaseInput": { + "title": "ZImageBaseTextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Grandmother knitting by a window, an empty chair by her" + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "maximum": 4, + "title": "Number of Images", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The acceleration level to use.", + "type": "string", + "title": "Acceleration", + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "guidance_scale": { + "minimum": 1, + "description": "The guidance scale to use for the image generation.", + "type": "number", + "maximum": 20, + "title": "Guidance Scale", + "default": 4 + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "maximum": 50, + "title": "Num Inference Steps", + "default": 28 + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "negative_prompt": { + "description": "The negative prompt to use for the image generation.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration", + "guidance_scale", + "negative_prompt" + ], + "required": [ + "prompt" + ] + }, + "ZImageBaseOutput": { + "title": "ZImageBaseOutput", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/0a8c18a5/1z0k9F1YLgz4qCr64jCBa_r2uqRyDg.png", + "width": 1024 + } + ] + ], + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "description": "Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed.", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "description": "The timings of the generation process.", + "title": "Timings" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/z-image/base/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image/base/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/z-image/base": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageBaseInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image/base/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageBaseOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2/klein/9b/base/lora", + "metadata": { + "display_name": "Flux 2 [klein] 9B Base Lora", + "category": "text-to-image", + "description": "Text-to-image generation with LoRA support for FLUX.2 [klein] 9B Base from Black Forest Labs. Custom style adaptation and fine-tuned model variations.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:31.060Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b09ab/3my7lbot7weIdE03-d5xc_2da235d3c4d14924b2c7a03f47e1bd65.jpg", + "model_url": "https://fal.run/fal-ai/flux-2/klein/9b/base/lora", + "license_type": "commercial", + "date": "2026-01-19T16:34:00.337Z", + "group": { + "key": "flux-2-klein-lora", + "label": "9B Base Text to Image (LoRa)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/flux-2-klein-9b-base-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/flux-2-klein-9b-base-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2/klein/9b/base/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2/klein/9b/base/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2/klein/9b/base/lora", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b09ab/3my7lbot7weIdE03-d5xc_2da235d3c4d14924b2c7a03f47e1bd65.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2/klein/9b/base/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2/klein/9b/base/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2Klein9bBaseLoraInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "guidance_scale", + "seed", + "num_inference_steps", + "image_size", + "num_images", + "acceleration", + "sync_mode", + "enable_safety_checker", + "output_format", + "loras" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A serene Japanese garden with cherry blossoms, koi pond, and traditional wooden bridge at golden hour" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Number of Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the image to generate.", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use for image generation.", + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "List of LoRA weights to apply (maximum 3).", + "items": { + "$ref": "#/components/schemas/fal-ai_flux-2-klein_LoRAInput" + }, + "default": [] + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI. Output is not stored when this is True.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 4, + "maximum": 50, + "type": "integer", + "title": "Number of Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for the generation. If not provided, a random seed will be used." + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for classifier-free guidance. Describes what to avoid in the image.", + "default": "" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "Guidance scale for classifier-free guidance.", + "default": 5 + } + }, + "title": "KleinBaseLoRAInput", + "required": [ + "prompt" + ] + }, + "Flux2Klein9bBaseLoraOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "title": "KleinT2IOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "fal-ai_flux-2-klein_LoRAInput": { + "x-fal-order-properties": [ + "path", + "scale" + ], + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL, HuggingFace repo ID (owner/repo), or local path to LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "Scale factor for LoRA application (0.0 to 4.0).", + "default": 1 + } + }, + "title": "LoRAInput", + "required": [ + "path" + ] + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2/klein/9b/base/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/9b/base/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/9b/base/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein9bBaseLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/9b/base/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein9bBaseLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2/klein/4b/base/lora", + "metadata": { + "display_name": "Flux 2 [klein] 4B Base Lora", + "category": "text-to-image", + "description": "Text-to-image generation with LoRA support for FLUX.2 [klein] 4B Base from Black Forest Labs. Custom style adaptation and fine-tuned model variations.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:31.310Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b09ad/V8uFhTiTNXdAgvt1tbJmB_1335a918cf5542539d5954c13b7d0fef.jpg", + "model_url": "https://fal.run/fal-ai/flux-2/klein/4b/base/lora", + "license_type": "commercial", + "date": "2026-01-19T16:24:56.142Z", + "group": { + "key": "flux-2-klein-lora", + "label": "4B Base Text to Image (LoRa)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/flux-2-klein-4b-base-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/flux-2-klein-4b-base-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2/klein/4b/base/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2/klein/4b/base/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2/klein/4b/base/lora", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b09ad/V8uFhTiTNXdAgvt1tbJmB_1335a918cf5542539d5954c13b7d0fef.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2/klein/4b/base/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2/klein/4b/base/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2Klein4bBaseLoraInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "guidance_scale", + "seed", + "num_inference_steps", + "image_size", + "num_images", + "acceleration", + "sync_mode", + "enable_safety_checker", + "output_format", + "loras" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A serene Japanese garden with cherry blossoms, koi pond, and traditional wooden bridge at golden hour" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Number of Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the image to generate.", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use for image generation.", + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "List of LoRA weights to apply (maximum 3).", + "items": { + "$ref": "#/components/schemas/fal-ai_flux-2-klein_LoRAInput" + }, + "default": [] + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI. Output is not stored when this is True.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 4, + "maximum": 50, + "type": "integer", + "title": "Number of Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for the generation. If not provided, a random seed will be used." + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for classifier-free guidance. Describes what to avoid in the image.", + "default": "" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "Guidance scale for classifier-free guidance.", + "default": 5 + } + }, + "title": "KleinBaseLoRAInput", + "required": [ + "prompt" + ] + }, + "Flux2Klein4bBaseLoraOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "title": "KleinT2IOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "fal-ai_flux-2-klein_LoRAInput": { + "x-fal-order-properties": [ + "path", + "scale" + ], + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL, HuggingFace repo ID (owner/repo), or local path to LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "Scale factor for LoRA application (0.0 to 4.0).", + "default": 1 + } + }, + "title": "LoRAInput", + "required": [ + "path" + ] + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2/klein/4b/base/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/4b/base/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/4b/base/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein4bBaseLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/4b/base/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein4bBaseLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2/klein/9b/base", + "metadata": { + "display_name": "FLUX.2 [klein] 9B Base", + "category": "text-to-image", + "description": "Text-to-image generation with FLUX.2 [klein] 9B Base from Black Forest Labs. Enhanced realism, crisper text generation, and native editing capabilities.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:33.187Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8a7f3c/90FKDpwtSCZTqOu0jUI-V_64c1a6ec0f9343908d9efa61b7f2444b.jpg", + "model_url": "https://fal.run/fal-ai/flux-2/klein/9b/base", + "license_type": "commercial", + "date": "2026-01-15T20:51:04.479Z", + "group": { + "key": "klein", + "label": "9B Base Text to Image" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.ai/models/fal-ai/flux-2/klein/9b/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2/klein/9b/base", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2/klein/9b/base queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2/klein/9b/base", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8a7f3c/90FKDpwtSCZTqOu0jUI-V_64c1a6ec0f9343908d9efa61b7f2444b.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2/klein/9b/base", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2/klein/9b/base/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2Klein9bBaseInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "guidance_scale", + "seed", + "num_inference_steps", + "image_size", + "num_images", + "acceleration", + "sync_mode", + "enable_safety_checker", + "output_format" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cyberpunk samurai standing in neon-lit Tokyo street at night, rain-soaked pavement reflecting holographic advertisements in pink and blue, steam rising from street vents, wearing futuristic armor with glowing accents, cinematic composition, Unreal Engine 5 quality, ray-traced reflections, shallow depth of field, hyper-detailed" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Number of Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the image to generate.", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use for image generation.", + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI. Output is not stored when this is True.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 4, + "maximum": 50, + "type": "integer", + "title": "Number of Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for the generation. If not provided, a random seed will be used." + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for classifier-free guidance. Describes what to avoid in the image.", + "default": "" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "Guidance scale for classifier-free guidance.", + "default": 5 + } + }, + "title": "Klein9BBaseInput", + "required": [ + "prompt" + ] + }, + "Flux2Klein9bBaseOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/0a8a69bc/EaxKO6wroq3eaDb2Znfpo.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "title": "Klein9BT2IOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2/klein/9b/base/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/9b/base/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/9b/base": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein9bBaseInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/9b/base/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein9bBaseOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2/klein/4b/base", + "metadata": { + "display_name": "Flux 2 [klein] 4B Base", + "category": "text-to-image", + "description": "Text-to-image generation with Flux 2 [klein] 4B Base from Black Forest Labs. Enhanced realism, crisper text generation, and native editing capabilities.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:33.442Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8a7f36/bYUAh_nzYUAUa_yCBkrP1_2dd84022eeda49e99db95e13fc588e47.jpg", + "model_url": "https://fal.run/fal-ai/flux-2/klein/4b/base", + "license_type": "commercial", + "date": "2026-01-15T18:57:56.846Z", + "group": { + "key": "klein", + "label": "4B Base Text to Image" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.ai/models/fal-ai/flux-2/klein/4b/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2/klein/4b/base", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2/klein/4b/base queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2/klein/4b/base", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8a7f36/bYUAh_nzYUAUa_yCBkrP1_2dd84022eeda49e99db95e13fc588e47.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2/klein/4b/base", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2/klein/4b/base/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2Klein4bBaseInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "guidance_scale", + "seed", + "num_inference_steps", + "image_size", + "num_images", + "acceleration", + "sync_mode", + "enable_safety_checker", + "output_format" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Japanese zen garden at first light, perfect rake lines in gravel, koi pond with morning mist, temple bell in background, meditation ready" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Number of Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the image to generate.", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use for image generation.", + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI. Output is not stored when this is True.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 4, + "maximum": 50, + "type": "integer", + "title": "Number of Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for the generation. If not provided, a random seed will be used." + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for classifier-free guidance. Describes what to avoid in the image.", + "default": "" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "Guidance scale for classifier-free guidance.", + "default": 5 + } + }, + "title": "Klein4BBaseInput", + "required": [ + "prompt" + ] + }, + "Flux2Klein4bBaseOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/0a8a69c2/5IoN78I6tZ8ZH69SB3PhW.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "title": "Klein4BT2IOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2/klein/4b/base/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/4b/base/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/4b/base": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein4bBaseInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/4b/base/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein4bBaseOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2/klein/9b", + "metadata": { + "display_name": "FLUX.2 [klein] 9B", + "category": "text-to-image", + "description": "Text-to-image generation with FLUX.2 [klein] 9B from Black Forest Labs. Enhanced realism, crisper text generation, and native editing capabilities.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:34.043Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8a7f3c/90FKDpwtSCZTqOu0jUI-V_64c1a6ec0f9343908d9efa61b7f2444b.jpg", + "model_url": "https://fal.run/fal-ai/flux-2/klein/9b", + "license_type": "commercial", + "date": "2026-01-15T12:24:18.909Z", + "group": { + "key": "klein", + "label": "9B Text to Image" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.ai/models/fal-ai/flux-2/klein/9b/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2/klein/9b", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2/klein/9b queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2/klein/9b", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8a7f3c/90FKDpwtSCZTqOu0jUI-V_64c1a6ec0f9343908d9efa61b7f2444b.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2/klein/9b", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2/klein/9b/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2Klein9bInput": { + "x-fal-order-properties": [ + "prompt", + "seed", + "num_inference_steps", + "image_size", + "num_images", + "sync_mode", + "enable_safety_checker", + "output_format" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A bright, surreal scene on a vast white salt flat under a clear blue sky, featuring two fluffy white alpacas standing in the foreground. Behind them sits a sleek white supercar with its scissor doors open, creating a dramatic futuristic silhouette. The sunlight is strong and crisp, casting sharp shadows on the ground and giving the image a clean, high-contrast cinematic look. Minimalist composition, playful luxury vibe, ultra sharp detail, wide-angle perspective, high resolution." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Number of Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the image to generate.", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI. Output is not stored when this is True.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 4, + "maximum": 8, + "type": "integer", + "title": "Number of Inference Steps", + "description": "The number of inference steps to perform.", + "default": 4 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for the generation. If not provided, a random seed will be used." + } + }, + "title": "Klein9BDistilledInput", + "required": [ + "prompt" + ] + }, + "Flux2Klein9bOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/0a8a6905/DuNRy1OODaGrlUGEfl9SX.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "title": "Klein9BDistilledT2IOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2/klein/9b/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/9b/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/9b": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein9bInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/9b/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein9bOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2/klein/4b", + "metadata": { + "display_name": "Flux 2 [klein] 4B", + "category": "text-to-image", + "description": "Text-to-image generation with Flux 2 [klein] 4B from Black Forest Labs. Enhanced realism, crisper text generation, and native editing capabilities.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:34.302Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8a7f30/UwGq5qBE9zqd4r6QI7En0_082c2d0376a646378870218b6c0589f9.jpg", + "model_url": "https://fal.run/fal-ai/flux-2/klein/4b", + "license_type": "commercial", + "date": "2026-01-15T12:20:58.031Z", + "group": { + "key": "klein", + "label": "4B Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2/klein/4b", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2/klein/4b queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2/klein/4b", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8a7f30/UwGq5qBE9zqd4r6QI7En0_082c2d0376a646378870218b6c0589f9.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2/klein/4b", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2/klein/4b/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2Klein4bInput": { + "x-fal-order-properties": [ + "prompt", + "seed", + "num_inference_steps", + "image_size", + "num_images", + "sync_mode", + "enable_safety_checker", + "output_format" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Cheetah in pre-sprint crouch, muscles tensed, gazelle visible in distance, African savanna golden grass, dust particles in air, National Geographic wildlife photography" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Number of Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the image to generate.", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI. Output is not stored when this is True.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 4, + "maximum": 8, + "type": "integer", + "title": "Number of Inference Steps", + "description": "The number of inference steps to perform.", + "default": 4 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for the generation. If not provided, a random seed will be used." + } + }, + "title": "KleinDistilledInput", + "required": [ + "prompt" + ] + }, + "Flux2Klein4bOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/0a8a69c7/gU9ZgfFC9oAZjpIoveAac.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "title": "Klein4BDistilledT2IOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2/klein/4b/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/4b/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/4b": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein4bInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/klein/4b/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein4bOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "imagineart/imagineart-1.5-pro-preview/text-to-image", + "metadata": { + "display_name": "ImagineArt 1.5 Pro Preview", + "category": "text-to-image", + "description": "ImagineArt 1.5 Pro is an advanced text-to-image model that creates ultra-high-fidelity 4K visuals with lifelike realism, refined aesthetics, and powerful creative output suited for professional use.", + "status": "active", + "tags": [ + "visuals", + "imagineart", + "realism", + "text" + ], + "updated_at": "2026-01-26T21:41:35.491Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8a775c/_DQSwdUUQgTD1E8ZjcsSf_15104de7104540b38840ceb135a0f673.jpg", + "model_url": "https://fal.run/imagineart/imagineart-1.5-pro-preview/text-to-image", + "license_type": "commercial", + "date": "2026-01-15T08:52:36.780Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for imagineart/imagineart-1.5-pro-preview/text-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the imagineart/imagineart-1.5-pro-preview/text-to-image queue.", + "x-fal-metadata": { + "endpointId": "imagineart/imagineart-1.5-pro-preview/text-to-image", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8a775c/_DQSwdUUQgTD1E8ZjcsSf_15104de7104540b38840ceb135a0f673.jpg", + "playgroundUrl": "https://fal.ai/models/imagineart/imagineart-1.5-pro-preview/text-to-image", + "documentationUrl": "https://fal.ai/models/imagineart/imagineart-1.5-pro-preview/text-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Imagineart15ProPreviewTextToImageInput": { + "title": "ImagineArt_1_5_Input", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A photorealistic close-up selfie portrait of a young woman with voluminous, wavy, shoulder-length dark brown hair and striking light blue eyes. She features sharp black winged eyeliner, rosy blushed cheeks, and glossy pinkish-red lips. She is wearing a silver choker necklace composed of linked butterfly charms and silver hoop earrings. She is dressed in a black top with thin spaghetti straps. The background is a soft-focus interior living room with beige walls and natural sunlight illuminating her face from the side, highlighting her skin texture and features." + ], + "description": "Text prompt describing the desired image", + "type": "string", + "title": "Prompt" + }, + "aspect_ratio": { + "examples": [ + "1:1", + "3:1", + "1:3", + "16:9", + "9:16", + "4:3", + "3:4", + "3:2", + "2:3" + ], + "description": "Image aspect ratio: 1:1, 3:1, 1:3, 16:9, 9:16, 4:3, 3:4, 3:2, 2:3", + "type": "string", + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4", + "3:1", + "1:3", + "3:2", + "2:3" + ], + "title": "Aspect Ratio", + "default": "1:1" + }, + "seed": { + "examples": [ + 0 + ], + "title": "Seed", + "type": "integer", + "description": "Seed for the image generation" + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "seed" + ], + "required": [ + "prompt" + ] + }, + "Imagineart15ProPreviewTextToImageOutput": { + "title": "ImagineArt_1_5_Output", + "type": "object", + "properties": { + "images": { + "description": "Generated image", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "examples": [ + 1024 + ], + "description": "The height of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the image in pixels." + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/imagineart/imagineart-1.5-pro-preview/text-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/imagineart/imagineart-1.5-pro-preview/text-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/imagineart/imagineart-1.5-pro-preview/text-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Imagineart15ProPreviewTextToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/imagineart/imagineart-1.5-pro-preview/text-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Imagineart15ProPreviewTextToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/glm-image", + "metadata": { + "display_name": "Glm Image", + "category": "text-to-image", + "description": "Create high-quality images with accurate text rendering and rich knowledge details—supports editing, style transfer, and maintaining consistent characters across multiple images.", + "status": "active", + "tags": [ + "text-to-image" + ], + "updated_at": "2026-01-26T21:41:37.159Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8a4c6f/XY6gc2c6YgtIKHKiTKRfO_c7be09cba1eb4ebe850aea98676df98d.jpg", + "model_url": "https://fal.run/fal-ai/glm-image", + "license_type": "commercial", + "date": "2026-01-14T02:17:38.633Z", + "group": { + "key": "glm-image", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/glm-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/glm-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/glm-image", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8a4c6f/XY6gc2c6YgtIKHKiTKRfO_c7be09cba1eb4ebe850aea98676df98d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/glm-image", + "documentationUrl": "https://fal.ai/models/fal-ai/glm-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "GlmImageInput": { + "title": "GlmImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "An elegant close-up photograph of hands holding a beautifully illustrated watercolor menu card. The hands have natural sun-kissed skin with a delicate gold ring, gripping the menu gently and refined.\n\nThe menu is a work of art—hand-painted watercolor illustration on textured cream watercolor paper with soft deckled edges:\n\n- Top: \"AZURE\" painted in flowing navy blue watercolor calligraphy with organic brushstroke texture and slight color bleeding\n- Watercolor illustration border: delicate tropical elements painted in soft washes—translucent turquoise waves flowing along the edges, loose coral and pink hibiscus flowers in the corners, gentle green palm leaf strokes, and small golden paint splatters suggesting sunlight\n- Center menu items in elegant hand-lettered watercolor script with slight variations in ink density:\n\n \"Tuna Tartare — 24\"\n \"Sea Bass — 32\"\n \"Mango Pavlova — 14\"\n\n- Bottom: \"Koh Samui\" in small watercolor lettering with a tiny painted wave\n\nThe watercolor has beautiful organic qualities—soft color gradients, natural paper texture visible through transparent washes, slight bleeding at edges of brushstrokes, layered translucent blues and greens creating depth. The paint has a luminous, fresh quality with white paper showing through in places.\n\nBackground: dreamy out-of-focus turquoise ocean with sparkling bokeh lights reflecting off water, creating soft circular light spots in aqua and gold tones. The blurred background complements the watercolor aesthetic perfectly.\n\nLighting: warm natural golden hour sunlight from upper left, illuminating the watercolor pigments and making them glow. The light catches the textured watercolor paper beautifully, showing subtle shadows in the paint layers and paper grain.\n\nPhotography style: shot on 85mm f/1.4, shallow depth of field with only the menu in sharp focus. High-end editorial aesthetic that celebrates the handmade, artistic quality of the watercolor. Color palette: cream paper, translucent turquoise and teal watercolors, soft coral pink, navy blue, gentle greens, golden accents, warm skin tones.\n\nThe overall mood is artistic, luxurious, handcrafted—like a boutique resort that values artistry and craftsmanship. The watercolor style feels fresh, organic, and elevated." + ], + "description": "Text prompt for image generation.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "Number of images to generate.", + "type": "integer", + "maximum": 4, + "title": "Num Images", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9", + "portrait_3_2", + "landscape_3_2", + "portrait_hd", + "landscape_hd" + ], + "type": "string" + } + ], + "description": "Output image size.", + "title": "Image Size", + "default": "square_hd" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "Output image format.", + "default": "jpeg" + }, + "sync_mode": { + "description": "If True, the image will be returned as a base64 data URI instead of a URL.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "guidance_scale": { + "minimum": 1, + "description": "Classifier-free guidance scale. Higher values make the model follow the prompt more closely.", + "type": "number", + "maximum": 10, + "title": "Guidance Scale", + "default": 1.5 + }, + "seed": { + "description": "Random seed for reproducibility. The same seed with the same prompt will produce the same image.", + "type": "integer", + "title": "Seed" + }, + "enable_prompt_expansion": { + "description": "If True, the prompt will be enhanced using an LLM for more detailed and higher quality results.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "num_inference_steps": { + "minimum": 10, + "description": "Number of diffusion denoising steps. More steps generally produce higher quality images.", + "type": "integer", + "maximum": 100, + "title": "Num Inference Steps", + "default": 30 + }, + "enable_safety_checker": { + "description": "Enable NSFW safety checking on the generated images.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "guidance_scale", + "seed", + "num_images", + "enable_safety_checker", + "output_format", + "sync_mode", + "enable_prompt_expansion" + ], + "required": [ + "prompt" + ] + }, + "GlmImageOutput": { + "title": "GlmImageOutput", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "height": 1024, + "content_type": "image/jpeg", + "url": "https://storage.googleapis.com/falserverless/example_outputs/menu.jpg", + "width": 1024 + } + ] + ], + "description": "List of URLs to the generated images.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/glm-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/glm-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/glm-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GlmImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/glm-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GlmImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-2512/lora", + "metadata": { + "display_name": "Qwen Image 2512", + "category": "text-to-image", + "description": "LoRA inference endpoint for Qwen Image 2512, an improved version of Qwen Image with better text rendering, finer natural textures, and more realistic human generation.", + "status": "active", + "tags": [ + "qwen", + "2512", + "lora" + ], + "updated_at": "2026-01-26T21:41:43.537Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a88be6b/el0hVqbMWfVjFm1jSR81z_311fa68b84444d11b75445e6ecb3f8b3.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-2512/lora", + "license_type": "commercial", + "date": "2026-01-02T07:15:39.360Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-2512/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-2512/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-2512/lora", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a88be6b/el0hVqbMWfVjFm1jSR81z_311fa68b84444d11b75445e6ecb3f8b3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-2512/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-2512/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImage2512LoraInput": { + "title": "LoraInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Single red rose in a clear glass vase on white marble streaked with black and gold veins, harsh directional shadow, high contrast, editorial style, clean negative space." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "description": "The number of images to generate.", + "title": "Num Images", + "default": 1 + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The acceleration level to use.", + "type": "string", + "title": "Acceleration", + "default": "regular" + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use up to 3 LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "title": "Loras", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [] + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "description": "The guidance scale to use for the image generation.", + "title": "Guidance Scale", + "default": 4 + }, + "negative_prompt": { + "description": "The negative prompt to generate an image from.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "description": "The number of inference steps to perform.", + "title": "Num Inference Steps", + "default": 28 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "guidance_scale", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration", + "loras" + ], + "required": [ + "prompt" + ] + }, + "QwenImage2512LoraOutput": { + "title": "QwenImage2512Output", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/0a887d5a/r3LfL0WY1re7dT5Qb85U_.png", + "width": 1024 + } + ] + ], + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "description": "The height of the generated image.", + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "description": "The width of the generated image.", + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "description": "URL or the path to the LoRA weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "title": "Scale", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-2512/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-2512/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-2512/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImage2512LoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-2512/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImage2512LoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-2512", + "metadata": { + "display_name": "Qwen Image 2512", + "category": "text-to-image", + "description": "Qwen Image 2512 is an improved version of Qwen Image with better text rendering, finer natural textures, and more realistic human generation.", + "status": "active", + "tags": [ + "qwen", + "2512" + ], + "updated_at": "2026-01-26T21:41:44.103Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a88687b/hYfxa5kVUy6mpm42BBrXD_a2b18111ab6f4189b90f14100605aa85.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-2512", + "license_type": "commercial", + "date": "2025-12-30T18:09:37.348Z", + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/qwen-image-2512-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/qwen-image-2512-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-2512", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-2512 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-2512", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a88687b/hYfxa5kVUy6mpm42BBrXD_a2b18111ab6f4189b90f14100605aa85.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-2512", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-2512/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImage2512Input": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Single red rose in a clear glass vase on white marble streaked with black and gold veins, harsh directional shadow, high contrast, editorial style, clean negative space." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "description": "The number of images to generate.", + "title": "Num Images", + "default": 1 + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The acceleration level to use.", + "type": "string", + "title": "Acceleration", + "default": "regular" + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "description": "The guidance scale to use for the image generation.", + "title": "Guidance Scale", + "default": 4 + }, + "negative_prompt": { + "description": "The negative prompt to generate an image from.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "description": "The number of inference steps to perform.", + "title": "Num Inference Steps", + "default": 28 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "guidance_scale", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "required": [ + "prompt" + ] + }, + "QwenImage2512Output": { + "title": "QwenImage2512Output", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/0a887d5a/r3LfL0WY1re7dT5Qb85U_.png", + "width": 1024 + } + ] + ], + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "description": "The height of the generated image.", + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "description": "The width of the generated image.", + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-2512/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-2512/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-2512": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImage2512Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-2512/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImage2512Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "wan/v2.6/text-to-image", + "metadata": { + "display_name": "Wan v2.6 Text to Image", + "category": "text-to-image", + "description": "Wan 2.6 text-to-image model.", + "status": "active", + "tags": [ + "text-to-image" + ], + "updated_at": "2026-01-26T21:41:47.179Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a878040/GSP3n-IjMgY2GlxiwOu3V.png", + "model_url": "https://fal.run/wan/v2.6/text-to-image", + "license_type": "commercial", + "date": "2025-12-23T21:00:10.736Z", + "group": { + "key": "v2.6", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for wan/v2.6/text-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the wan/v2.6/text-to-image queue.", + "x-fal-metadata": { + "endpointId": "wan/v2.6/text-to-image", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a878040/GSP3n-IjMgY2GlxiwOu3V.png", + "playgroundUrl": "https://fal.ai/models/wan/v2.6/text-to-image", + "documentationUrl": "https://fal.ai/models/wan/v2.6/text-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "V26TextToImageInput": { + "x-fal-order-properties": [ + "prompt", + "image_url", + "negative_prompt", + "image_size", + "max_images", + "seed", + "enable_safety_checker" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "An ancient library floating among clouds, golden hour light streaming through massive windows, photorealistic" + ], + "description": "Text prompt describing the desired image. Supports Chinese and English. Max 2000 characters.", + "minLength": 1, + "title": "Prompt", + "type": "string" + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "Output image size. If not set: matches input image size (up to 1280*1280). Use presets like 'square_hd', 'landscape_16_9', or specify exact dimensions.", + "title": "Image Size", + "examples": [ + "square_hd", + "landscape_16_9" + ] + }, + "max_images": { + "minimum": 1, + "maximum": 5, + "type": "integer", + "title": "Max Images", + "description": "Maximum number of images to generate (1-5). Actual count may be less depending on model inference.", + "default": 1 + }, + "image_url": { + "description": "Optional reference image (0 or 1). When provided, can be used for style guidance. Resolution: 384-5000px each dimension. Max size: 10MB. Formats: JPEG, JPG, PNG (no alpha), BMP, WEBP.", + "type": "string", + "title": "Image Url" + }, + "enable_safety_checker": { + "description": "Enable content moderation for input and output.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "description": "Random seed for reproducibility (0-2147483647).", + "type": "integer", + "title": "Seed" + }, + "negative_prompt": { + "examples": [ + "low resolution, error, worst quality, low quality, deformed" + ], + "description": "Content to avoid in the generated image. Max 500 characters.", + "type": "string", + "title": "Negative Prompt", + "default": "" + } + }, + "title": "TextToImageWanInput", + "description": "Input for Wan 2.6 text-to-image or mixed text-and-image generation (enable_interleave=true)", + "required": [ + "prompt" + ] + }, + "V26TextToImageOutput": { + "x-fal-order-properties": [ + "images", + "generated_text", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_name": "output_1.png", + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/0a86d6b0/cBXGSUEl3DkTcBnf9IEM0_output_1.png" + } + ] + ], + "description": "Generated images in PNG format", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "seed": { + "examples": [ + 175932751 + ], + "description": "The seed used for generation", + "type": "integer", + "title": "Seed" + }, + "generated_text": { + "description": "Generated text content (in mixed text-and-image mode). May be None if only images were generated.", + "type": "string", + "title": "Generated Text" + } + }, + "title": "TextToImageWanOutput", + "description": "Output for Wan 2.6 text-to-image (can include generated text in mixed mode)", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/wan/v2.6/text-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/wan/v2.6/text-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/wan/v2.6/text-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/V26TextToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/wan/v2.6/text-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/V26TextToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2/flash", + "metadata": { + "display_name": "Flux 2", + "category": "text-to-image", + "description": "Text-to-image generation with FLUX.2 [dev] from Black Forest Labs. Enhanced realism, crisper text generation, and native editing capabilities— in a flash.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:52.486Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a871486/tX7YdfQViGtCE7ZjxOCph_5f5262a21e9e426e8981ea9513d11999.jpg", + "model_url": "https://fal.run/fal-ai/flux-2/flash", + "license_type": "commercial", + "date": "2025-12-16T20:07:20.209Z", + "group": { + "key": "Flux2", + "label": "Text to Image (Flash)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2/flash", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2/flash queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2/flash", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a871486/tX7YdfQViGtCE7ZjxOCph_5f5262a21e9e426e8981ea9513d11999.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2/flash", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2/flash/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2FlashInput": { + "x-fal-order-properties": [ + "prompt", + "guidance_scale", + "seed", + "image_size", + "num_images", + "enable_prompt_expansion", + "sync_mode", + "enable_safety_checker", + "output_format" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "An army of ants battles for a sugar cube in a giant sandbox, with tiny plastic soldier toys caught in the melee. Ant bodies shine against gritty beige sand and the glossy, crystalline cube. Fiery sunrise spills golden highlights, throwing dramatic shadows, amplifying the excitement. Shot macro (100mm), aperture f/2.8 for soft background, centered composition with ground-level POV, mixing organic and manufactured textures." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "title": "Number of Images", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the image to generate. The width and height must be between 512 and 2048 pixels.", + "title": "Image Size", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "description": "Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "type": "number", + "title": "Guidance Scale", + "maximum": 20, + "default": 2.5 + }, + "seed": { + "description": "The seed to use for the generation. If not provided, a random seed will be used.", + "type": "integer", + "title": "Seed" + }, + "enable_prompt_expansion": { + "description": "If set to true, the prompt will be expanded for better results.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + } + }, + "title": "Flux2FlashTextToImageInput", + "required": [ + "prompt" + ] + }, + "Flux2FlashOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/flux-2-flash-t2i.png" + } + ] + ], + "description": "The generated images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + } + }, + "title": "Flux2FlashT2IOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2/flash/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/flash/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2/flash": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2FlashInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/flash/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2FlashOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/gpt-image-1.5", + "metadata": { + "display_name": "GPT-Image 1.5", + "category": "text-to-image", + "description": "GPT Image 1.5 generates high-fidelity images with strong prompt adherence, preserving composition, lighting, and fine-grained detail.", + "status": "active", + "tags": [ + "openai", + "gpt-image", + "" + ], + "updated_at": "2026-01-27T18:33:30.651Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a87580d/tNVCM7rX3ST05Lko3RnxC_46dfd21f89e34bf99546dce50117a568.jpg", + "model_url": "https://fal.run/fal-ai/gpt-image-1.5", + "license_type": "commercial", + "date": "2025-12-16T18:36:42.550Z", + "group": { + "key": "gpt-image-1.5", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.run/fal-ai/gpt-image-1.5/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/gpt-image-1.5", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/gpt-image-1.5 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/gpt-image-1.5", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a87580d/tNVCM7rX3ST05Lko3RnxC_46dfd21f89e34bf99546dce50117a568.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/gpt-image-1.5", + "documentationUrl": "https://fal.ai/models/fal-ai/gpt-image-1.5/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "GptImage15Input": { + "title": "TextToImageRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "create a realistic image taken with iphone at these coordinates 41°43′32″N 49°56′49″W 15 April 1912" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt for image generation", + "minLength": 2 + }, + "num_images": { + "description": "Number of images to generate", + "type": "integer", + "minimum": 1, + "title": "Number of Images", + "examples": [ + 1 + ], + "maximum": 4, + "default": 1 + }, + "image_size": { + "enum": [ + "1024x1024", + "1536x1024", + "1024x1536" + ], + "title": "Image Size", + "type": "string", + "description": "Aspect ratio for the generated image", + "default": "1024x1024" + }, + "background": { + "enum": [ + "auto", + "transparent", + "opaque" + ], + "title": "Background", + "type": "string", + "description": "Background for the generated image", + "default": "auto" + }, + "quality": { + "enum": [ + "low", + "medium", + "high" + ], + "title": "Quality", + "type": "string", + "description": "Quality for the generated image", + "default": "high" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "Output format for the images", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "background", + "quality", + "num_images", + "output_format", + "sync_mode" + ], + "required": [ + "prompt" + ] + }, + "GptImage15Output": { + "title": "ImageResponse", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "height": 1024, + "file_name": "EnWrO3XWjPE0nxBDpaQrj.png", + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/0a869129/EnWrO3XWjPE0nxBDpaQrj.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images.", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/gpt-image-1.5/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gpt-image-1.5/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/gpt-image-1.5": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GptImage15Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gpt-image-1.5/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GptImage15Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/fibo-lite/generate", + "metadata": { + "display_name": "Fibo Lite", + "category": "text-to-image", + "description": "Fibo Lite, the new addition to the Fibo model family, allows generating high-quality images with the same controllability of the JSON structured prompt with significantly improved latency. ", + "status": "active", + "tags": [ + "bria", + "fibo", + "lite" + ], + "updated_at": "2026-01-26T21:41:52.986Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a868a4a/_QE3Hulvi5T-kP4GZwZxf_35c1ffe448b0481183bdb17a0618ef52.jpg", + "model_url": "https://fal.run/bria/fibo-lite/generate", + "license_type": "commercial", + "date": "2025-12-16T14:07:05.087Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/fibo-lite/generate", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/fibo-lite/generate queue.", + "x-fal-metadata": { + "endpointId": "bria/fibo-lite/generate", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a868a4a/_QE3Hulvi5T-kP4GZwZxf_35c1ffe448b0481183bdb17a0618ef52.jpg", + "playgroundUrl": "https://fal.ai/models/bria/fibo-lite/generate", + "documentationUrl": "https://fal.ai/models/bria/fibo-lite/generate/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FiboLiteGenerateInput": { + "title": "GaiaLiteInputModel", + "type": "object", + "properties": { + "prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Prompt for image generation.", + "title": "Prompt", + "examples": [ + "A hyper-detailed, ultra-fluffy owl sitting in the trees at night, looking directly at the camera with wide, adorable, expressive eyes. Its feathers are soft and voluminous, catching the cool moonlight with subtle silver highlights. The owl’s gaze is curious and full of charm, giving it a whimsical, storybook-like personality." + ] + }, + "steps_num": { + "description": "Number of inference steps for Fibo Lite.", + "type": "integer", + "minimum": 4, + "title": "Steps Num", + "maximum": 30, + "default": 8 + }, + "aspect_ratio": { + "enum": [ + "1:1", + "2:3", + "3:2", + "3:4", + "4:3", + "4:5", + "5:4", + "9:16", + "16:9" + ], + "description": "Aspect ratio. Options: 1:1, 2:3, 3:2, 3:4, 4:3, 4:5, 5:4, 9:16, 16:9", + "type": "string", + "title": "Aspect Ratio", + "default": "1:1" + }, + "image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Reference image (file or URL).", + "title": "Image Url" + }, + "sync_mode": { + "description": "If true, returns the image directly in the response (increases latency).", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "seed": { + "description": "Random seed for reproducibility.", + "type": "integer", + "title": "Seed", + "default": 5555 + }, + "structured_prompt": { + "anyOf": [ + { + "$ref": "#/components/schemas/StructuredPrompt" + }, + { + "type": "null" + } + ], + "description": "The structured prompt to generate an image from." + } + }, + "x-fal-order-properties": [ + "prompt", + "structured_prompt", + "image_url", + "seed", + "steps_num", + "aspect_ratio", + "sync_mode" + ] + }, + "FiboLiteGenerateOutput": { + "title": "GaiaOutputModel", + "type": "object", + "properties": { + "images": { + "description": "Generated images.", + "type": "array", + "title": "Images", + "items": { + "additionalProperties": true, + "type": "object" + }, + "default": [] + }, + "image": { + "description": "Generated image.", + "$ref": "#/components/schemas/Image" + }, + "structured_prompt": { + "description": "Current prompt.", + "type": "object", + "additionalProperties": true, + "title": "Structured Prompt" + } + }, + "x-fal-order-properties": [ + "image", + "images", + "structured_prompt" + ], + "required": [ + "image", + "structured_prompt" + ] + }, + "StructuredPrompt": { + "title": "StructuredPrompt", + "type": "object", + "properties": { + "background_setting": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The background setting of the image to be generated.", + "title": "Background Setting" + }, + "artistic_style": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The artistic style of the image to be generated.", + "title": "Artistic Style" + }, + "context": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The context of the image to be generated.", + "title": "Context" + }, + "text_render": { + "anyOf": [ + { + "type": "array", + "items": {} + }, + { + "type": "null" + } + ], + "description": "A list of text to be rendered in the image.", + "title": "Text Render", + "default": [] + }, + "objects": { + "anyOf": [ + { + "type": "array", + "items": { + "$ref": "#/components/schemas/PromptObject" + } + }, + { + "type": "null" + } + ], + "description": "A list of objects in the image to be generated, along with their attributes and relationships to other objects in the image.", + "title": "Objects", + "default": [] + }, + "style_medium": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The style medium of the image to be generated.", + "title": "Style Medium" + }, + "photographic_characteristics": { + "anyOf": [ + { + "$ref": "#/components/schemas/PhotographicCharacteristics" + }, + { + "type": "null" + } + ], + "description": "The photographic characteristics of the image to be generated." + }, + "aesthetics": { + "anyOf": [ + { + "$ref": "#/components/schemas/Aesthetics" + }, + { + "type": "null" + } + ], + "description": "The aesthetics of the image to be generated." + }, + "lighting": { + "anyOf": [ + { + "$ref": "#/components/schemas/Lighting" + }, + { + "type": "null" + } + ], + "description": "The lighting of the image to be generated." + }, + "short_description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "A short description of the image to be generated.", + "title": "Short Description" + } + }, + "x-fal-order-properties": [ + "short_description", + "objects", + "background_setting", + "lighting", + "aesthetics", + "photographic_characteristics", + "style_medium", + "text_render", + "context", + "artistic_style" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the image in pixels.", + "title": "Height", + "examples": [ + 1024 + ] + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the image in pixels.", + "title": "Width", + "examples": [ + 1024 + ] + } + }, + "required": [ + "url" + ] + }, + "PromptObject": { + "title": "PromptObject", + "type": "object", + "properties": { + "relative_size": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The relative size of the object in the image.", + "title": "Relative Size" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "A description of the object to be generated.", + "title": "Description" + }, + "skin_tone_and_texture": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The skin tone and texture of the object in the image.", + "title": "Skin Tone And Texture" + }, + "appearance_details": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The appearance details of the object.", + "title": "Appearance Details" + }, + "number_of_objects": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of objects in the image.", + "title": "Number Of Objects" + }, + "pose": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The pose of the object in the image.", + "title": "Pose" + }, + "expression": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The expression of the object in the image.", + "title": "Expression" + }, + "shape_and_color": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The shape and color of the object.", + "title": "Shape And Color" + }, + "relationship": { + "description": "The relationship of the object to other objects in the image.", + "type": "string", + "title": "Relationship" + }, + "texture": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The texture of the object.", + "title": "Texture" + }, + "gender": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The gender of the object in the image.", + "title": "Gender" + }, + "clothing": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The clothing of the object in the image.", + "title": "Clothing" + }, + "location": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The location of the object in the image.", + "title": "Location" + }, + "orientation": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The orientation of the object in the image.", + "title": "Orientation" + }, + "action": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The action of the object in the image.", + "title": "Action" + } + }, + "x-fal-order-properties": [ + "description", + "location", + "relationship", + "relative_size", + "shape_and_color", + "texture", + "appearance_details", + "number_of_objects", + "pose", + "expression", + "clothing", + "action", + "gender", + "skin_tone_and_texture", + "orientation" + ], + "required": [ + "relationship" + ] + }, + "PhotographicCharacteristics": { + "title": "PhotographicCharacteristics", + "type": "object", + "properties": { + "focus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The focus in the image to be generated.", + "title": "Focus" + }, + "lens_focal_length": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The focal length of the lens in the image to be generated.", + "title": "Lens Focal Length" + }, + "camera_angle": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The angle of the camera in the image to be generated.", + "title": "Camera Angle" + }, + "depth_of_field": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The depth of field in the image to be generated.", + "title": "Depth Of Field" + } + }, + "x-fal-order-properties": [ + "depth_of_field", + "focus", + "camera_angle", + "lens_focal_length" + ] + }, + "Aesthetics": { + "title": "Aesthetics", + "type": "object", + "properties": { + "composition": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The composition of the image to be generated.", + "title": "Composition" + }, + "mood_atmosphere": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mood and atmosphere of the image to be generated.", + "title": "Mood Atmosphere" + }, + "color_scheme": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The color scheme of the image to be generated.", + "title": "Color Scheme" + } + }, + "x-fal-order-properties": [ + "composition", + "color_scheme", + "mood_atmosphere" + ] + }, + "Lighting": { + "title": "Lighting", + "type": "object", + "properties": { + "shadows": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The shadows in the image to be generated.", + "title": "Shadows" + }, + "conditions": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The conditions of the lighting in the image to be generated.", + "title": "Conditions" + }, + "direction": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The direction of the lighting in the image to be generated.", + "title": "Direction" + } + }, + "x-fal-order-properties": [ + "conditions", + "direction", + "shadows" + ] + } + } + }, + "paths": { + "/bria/fibo-lite/generate/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-lite/generate/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/fibo-lite/generate": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboLiteGenerateInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-lite/generate/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboLiteGenerateOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2/turbo", + "metadata": { + "display_name": "Flux 2", + "category": "text-to-image", + "description": "Text-to-image generation with FLUX.2 [dev] from Black Forest Labs. Enhanced realism, crisper text generation, and native editing capabilities—all at turbo speed.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:53.237Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a871494/j8F-tmy_dz4TyImvIHj19_510cc93373ef451386734b7e05711de1.jpg", + "model_url": "https://fal.run/fal-ai/flux-2/turbo", + "license_type": "commercial", + "date": "2025-12-16T14:02:30.209Z", + "group": { + "key": "Flux2", + "label": "Text To Image (Turbo)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2/turbo", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2/turbo queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2/turbo", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a871494/j8F-tmy_dz4TyImvIHj19_510cc93373ef451386734b7e05711de1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2/turbo", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2/turbo/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2TurboInput": { + "x-fal-order-properties": [ + "prompt", + "guidance_scale", + "seed", + "image_size", + "num_images", + "enable_prompt_expansion", + "sync_mode", + "enable_safety_checker", + "output_format" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A realistic photograph of a vintage typewriter with a sheet of paper inserted that says 'Chapter One: The Journey Begins,' sunlight falling across the desk." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "title": "Number of Images", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the image to generate. The width and height must be between 512 and 2048 pixels.", + "title": "Image Size", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "description": "Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "type": "number", + "title": "Guidance Scale", + "maximum": 20, + "default": 2.5 + }, + "seed": { + "description": "The seed to use for the generation. If not provided, a random seed will be used.", + "type": "integer", + "title": "Seed" + }, + "enable_prompt_expansion": { + "description": "If set to true, the prompt will be expanded for better results.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + } + }, + "title": "Flux2TurboTextToImageInput", + "required": [ + "prompt" + ] + }, + "Flux2TurboOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/flux-2-turbo-t2i.png" + } + ] + ], + "description": "The generated images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + } + }, + "title": "Flux2TurboT2IOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2/turbo/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/turbo/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2/turbo": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2TurboInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2/turbo/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2TurboOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-max", + "metadata": { + "display_name": "Flux 2 Max", + "category": "text-to-image", + "description": "FLUX.2 [max] delivers state-of-the-art image generation and advanced image editing with exceptional realism, precision, and consistency.", + "status": "active", + "tags": [ + "flux2", + "max" + ], + "updated_at": "2026-01-26T21:41:53.362Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a868a0f/zL7LNUIqnPPhZNy_PtHJq_330f66115240460788092cb9523b6aba.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-max", + "license_type": "commercial", + "date": "2025-12-16T13:55:00.993Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-max", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2-max queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-max", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a868a0f/zL7LNUIqnPPhZNy_PtHJq_330f66115240460788092cb9523b6aba.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-max", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-max/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2MaxInput": { + "x-fal-order-properties": [ + "prompt", + "image_size", + "seed", + "safety_tolerance", + "enable_safety_checker", + "output_format", + "sync_mode" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A lavish, baroque-style image of a powerful sorceress in her arcane study. She is dressed in robes of deep emerald velvet embroidered with gold thread and shimmering beetle wings, holding a staff topped with a glowing, swirling galaxy trapped in a crystal orb.She stands before a massive oak desk covered in open grimoires with illuminated pages showing alchemical diagrams, bubbling potions in glass alembics, and a sleeping pseudodragon curled around a stack of scrolls. The room is filled with curiosities: shelves of leather-bound books, celestial globes, and dried magical herbs hanging from the ceiling. The lighting is chiaroscuro, from a large fireplace with green flames and a magical candelabra floating in mid-air. The brushwork is visible and textured, with rich, deep colors. The style is reminiscent of Rembrandt meets classic fantasy art. The words 'Flux 2 Max is available on fal' are written in elegant cursive font on the top of the image." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "jpeg" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5" + ], + "description": "The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.", + "type": "string", + "title": "Safety Tolerance", + "default": "2" + }, + "enable_safety_checker": { + "description": "Whether to enable the safety checker.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "description": "The seed to use for the generation.", + "type": "integer", + "title": "Seed" + } + }, + "title": "Flux2MaxTextToImageInput", + "required": [ + "prompt" + ] + }, + "Flux2MaxOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/flux2/max.jpg" + } + ] + ], + "description": "The generated images.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "description": "The seed used for the generation.", + "type": "integer", + "title": "Seed" + } + }, + "title": "Flux2MaxOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-max/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-max/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-max": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2MaxInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-max/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2MaxOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/longcat-image", + "metadata": { + "display_name": "Longcat Image", + "category": "text-to-image", + "description": "LongCat image is a 6B parameter model excelling at multilingual text rendering, photorealism and deployment efficiency.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:03.457Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a851b6e/FRYO5MwzWRxk8dt0FR5Ib_bb0a8072b65d4af59dbb8fccdd52b02b.jpg", + "model_url": "https://fal.run/fal-ai/longcat-image", + "license_type": "commercial", + "date": "2025-12-05T17:12:42.231Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/longcat-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/longcat-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/longcat-image", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a851b6e/FRYO5MwzWRxk8dt0FR5Ib_bb0a8072b65d4af59dbb8fccdd52b02b.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/longcat-image", + "documentationUrl": "https://fal.ai/models/fal-ai/longcat-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LongcatImageInput": { + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "guidance_scale", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A lioness crouching in the tall dry grass of the Serengeti during golden hour, intense gaze, telephoto lens with shallow depth of field" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "default": "regular" + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "guidance_scale": { + "minimum": 1, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The guidance scale to use for the image generation.", + "default": 4.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + } + }, + "title": "TextToImageInput", + "required": [ + "prompt" + ] + }, + "LongcatImageOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/longcat_image/t2i.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "title": "TextToImageOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "title": "Image", + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/longcat-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/longcat-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bytedance/seedream/v4.5/text-to-image", + "metadata": { + "display_name": "Bytedance", + "category": "text-to-image", + "description": "A new-generation image creation model ByteDance, Seedream 4.5 integrates image generation and image editing capabilities into a single, unified architecture.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:06.172Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a84c771/1bdm3n1K8bRK-TzYjLF04_57edae699adf457a8a66ae6665a17e3a.jpg", + "model_url": "https://fal.run/fal-ai/bytedance/seedream/v4.5/text-to-image", + "license_type": "commercial", + "date": "2025-12-03T10:46:15.916Z", + "group": { + "key": "Seedream45", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bytedance/seedream/v4.5/text-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bytedance/seedream/v4.5/text-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bytedance/seedream/v4.5/text-to-image", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a84c771/1bdm3n1K8bRK-TzYjLF04_57edae699adf457a8a66ae6665a17e3a.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/bytedance/seedream/v4.5/text-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/bytedance/seedream/v4.5/text-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BytedanceSeedreamV45TextToImageInput": { + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_images", + "max_images", + "seed", + "sync_mode", + "enable_safety_checker" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A selfie of a cat, with the cat as the protagonist. The setting is twilight at the Eiffel Tower. The cat is happy, holding a piece of baklava in its paw. The photo has a slight motion blur and is slightly overexposed. From a selfie angle, with a bit of motion blur, the overall image presents a sense of calm madness. The text \"Seedream 4.5 is on fal\" should be written on the picture at the top in clearly visible font and crisp lettering. The image has a 4:3 aspect ratio" + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt used to generate the image" + }, + "num_images": { + "minimum": 1, + "maximum": 6, + "type": "integer", + "title": "Num Images", + "description": "Number of separate model generations to be run with the prompt.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9", + "auto_2K", + "auto_4K" + ], + "type": "string" + } + ], + "description": "The size of the generated image. Width and height must be between 1920 and 4096, or total number of pixels must be between 2560*1440 and 4096*4096.", + "title": "Image Size", + "examples": [ + "auto_2K" + ], + "default": { + "height": 2048, + "width": 2048 + } + }, + "max_images": { + "minimum": 1, + "maximum": 6, + "type": "integer", + "title": "Max Images", + "description": "If set to a number greater than one, enables multi-image generation. The model will potentially return up to `max_images` images every generation, and in total, `num_images` generations will be carried out. In total, the number of images generated will be between `num_images` and `max_images*num_images`.", + "default": 1 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed to control the stochasticity of image generation." + } + }, + "title": "SeedDream45T2IInput", + "required": [ + "prompt" + ] + }, + "BytedanceSeedreamV45TextToImageOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/seedreamv45/seedream_v45_t2i_output.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Generated images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for generation" + } + }, + "title": "SeedDream45T2IOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bytedance/seedream/v4.5/text-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedream/v4.5/text-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedream/v4.5/text-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedreamV45TextToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedream/v4.5/text-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedreamV45TextToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/vidu/q2/text-to-image", + "metadata": { + "display_name": "Vidu", + "category": "text-to-image", + "description": "Use vidu Text-to-Image to turn your prompts into reality.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:07.233Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a85892d/ODEhamwEyaZ8NSG6ShAdS_055c7fbf76db47deb35ee6042014bc91.jpg", + "model_url": "https://fal.run/fal-ai/vidu/q2/text-to-image", + "license_type": "commercial", + "date": "2025-12-02T15:12:08.030Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/vidu/q2/text-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/vidu/q2/text-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/vidu/q2/text-to-image", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a85892d/ODEhamwEyaZ8NSG6ShAdS_055c7fbf76db47deb35ee6042014bc91.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/vidu/q2/text-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/vidu/q2/text-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ViduQ2TextToImageInput": { + "title": "TextToImageRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A majestic dragon perches on the mountaintop, its eyes fixed intently on a small baby dragon." + ], + "title": "Prompt", + "type": "string", + "maxLength": 1500, + "description": "Text prompt for video generation, max 1500 characters" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the output video", + "default": "16:9" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation" + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "seed" + ], + "required": [ + "prompt" + ] + }, + "ViduQ2TextToImageOutput": { + "title": "TextToImageOutput", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/videos/general-1-2025-12-02T14_55_54Z.png" + } + ], + "title": "Image", + "description": "The edited image", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/vidu/q2/text-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/q2/text-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/vidu/q2/text-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduQ2TextToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/q2/text-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduQ2TextToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/z-image/turbo/lora", + "metadata": { + "display_name": "Z-Image Turbo", + "category": "text-to-image", + "description": "Text-to-Image endpoint with LoRA support for Z-Image Turbo, a super fast text-to-image model of 6B parameters developed by Tongyi-MAI.", + "status": "active", + "tags": [ + "z-image", + "lora", + "fast" + ], + "updated_at": "2026-01-26T21:42:08.052Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8499c8/NoYhi_0XiRWB_GyGXzXdh_c756d700b1a8445a90bcf47126bc8459.jpg", + "model_url": "https://fal.run/fal-ai/z-image/turbo/lora", + "license_type": "commercial", + "date": "2025-12-01T21:00:30.834Z", + "group": { + "key": "z-image-turbo", + "label": "Text to Image (LoRA)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/z-image-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/z-image-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/z-image/turbo/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/z-image/turbo/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/z-image/turbo/lora", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8499c8/NoYhi_0XiRWB_GyGXzXdh_c756d700b1a8445a90bcf47126bc8459.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/z-image/turbo/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/z-image/turbo/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ZImageTurboLoraInput": { + "title": "ZImageTurboTextToImageLoRAInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A hyper-realistic, close-up portrait of a tribal elder from the Omo Valley, painted with intricate white chalk patterns and adorned with a headdress made of dried flowers, seed pods, and rusted bottle caps. The focus is razor-sharp on the texture of the skin, showing every pore, wrinkle, and scar that tells a story of survival. The background is a blurred, smoky hut interior, with the warm glow of a cooking fire reflecting in the subject's dark, soulful eyes. Shot on a Leica M6 with Kodak Portra 400 film grain aesthetic." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "maximum": 4, + "title": "Number of Images", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The acceleration level to use.", + "type": "string", + "title": "Acceleration", + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "loras": { + "description": "List of LoRA weights to apply (maximum 3).", + "type": "array", + "title": "Loras", + "items": { + "$ref": "#/components/schemas/LoRAInput" + }, + "default": [] + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "enable_prompt_expansion": { + "description": "Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "maximum": 8, + "title": "Number of Inference Steps", + "default": 8 + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration", + "enable_prompt_expansion", + "loras" + ], + "required": [ + "prompt" + ] + }, + "ZImageTurboLoraOutput": { + "title": "ZImageTurboOutput", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/z-image-turbo-output.png", + "width": 1024 + } + ] + ], + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "description": "Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed.", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "description": "The timings of the generation process.", + "title": "Timings" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoRAInput": { + "description": "LoRA weight configuration.", + "type": "object", + "properties": { + "path": { + "description": "URL, HuggingFace repo ID (owner/repo) to lora weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "description": "Scale factor for LoRA application (0.0 to 4.0).", + "type": "number", + "maximum": 4, + "title": "Scale", + "default": 1 + } + }, + "title": "LoRAInput", + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/z-image/turbo/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageTurboLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageTurboLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ovis-image", + "metadata": { + "display_name": "Ovis Image", + "category": "text-to-image", + "description": "Ovis-Image is a 7B text-to-image model specifically optimized for quick, high quality text rendering.", + "status": "active", + "tags": [ + "ovis-image", + "artistic" + ], + "updated_at": "2026-01-26T21:42:09.411Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a849101/zu6l9lNp8GIuvAQyX7miT_0b2125b05401473bac38bf8d8bd40577.jpg", + "model_url": "https://fal.run/fal-ai/ovis-image", + "license_type": "commercial", + "date": "2025-11-29T18:45:42.568Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ovis-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ovis-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ovis-image", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a849101/zu6l9lNp8GIuvAQyX7miT_0b2125b05401473bac38bf8d8bd40577.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ovis-image", + "documentationUrl": "https://fal.ai/models/fal-ai/ovis-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "OvisImageInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "guidance_scale", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The moment of digital upload, a human breaking apart into 3D voxels, leaving a grey world for a technicolor void, dynamic action shot, particle physics simulation, bright neon colors." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "default": "regular" + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "guidance_scale": { + "minimum": 1, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The guidance scale to use for the image generation.", + "default": 5 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate an image from.", + "default": "" + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + } + }, + "title": "TextToImageInput", + "required": [ + "prompt" + ] + }, + "OvisImageOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/ovis_image_output.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + } + }, + "title": "OvisImageOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "title": "Image", + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/ovis-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ovis-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ovis-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OvisImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ovis-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OvisImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/z-image/turbo", + "metadata": { + "display_name": "Z-Image Turbo", + "category": "text-to-image", + "description": "Z-Image Turbo is a super fast text-to-image model of 6B parameters developed by Tongyi-MAI.", + "status": "active", + "tags": [ + "turbo", + "z-image", + "fast" + ], + "updated_at": "2026-01-26T21:42:11.591Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/koala/Pvji5sM6n6oNIo-p0Xsn__245f3d4eb5df4952aa80ec74f4afd7f2.jpg", + "model_url": "https://fal.run/fal-ai/z-image/turbo", + "license_type": "commercial", + "date": "2025-11-26T14:38:10.381Z", + "group": { + "key": "z-image-turbo", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/z-image-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/z-image-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/z-image/turbo", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/z-image/turbo queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/z-image/turbo", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/koala/Pvji5sM6n6oNIo-p0Xsn__245f3d4eb5df4952aa80ec74f4afd7f2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/z-image/turbo", + "documentationUrl": "https://fal.ai/models/fal-ai/z-image/turbo/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ZImageTurboInput": { + "title": "ZImageTurboTextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A hyper-realistic, close-up portrait of a tribal elder from the Omo Valley, painted with intricate white chalk patterns and adorned with a headdress made of dried flowers, seed pods, and rusted bottle caps. The focus is razor-sharp on the texture of the skin, showing every pore, wrinkle, and scar that tells a story of survival. The background is a blurred, smoky hut interior, with the warm glow of a cooking fire reflecting in the subject's dark, soulful eyes. Shot on a Leica M6 with Kodak Portra 400 film grain aesthetic." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "maximum": 4, + "title": "Number of Images", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The acceleration level to use.", + "type": "string", + "title": "Acceleration", + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "enable_prompt_expansion": { + "description": "Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "maximum": 8, + "title": "Number of Inference Steps", + "default": 8 + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration", + "enable_prompt_expansion" + ], + "required": [ + "prompt" + ] + }, + "ZImageTurboOutput": { + "title": "ZImageTurboOutput", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/z-image-turbo-output.png", + "width": 1024 + } + ] + ], + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "description": "Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed.", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "description": "The timings of the generation process.", + "title": "Timings" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/z-image/turbo/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageTurboInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image/turbo/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageTurboOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-lora-gallery/sepia-vintage", + "metadata": { + "display_name": "Flux 2 Lora Gallery", + "category": "text-to-image", + "description": "Applies sepia vintage effect to images", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:11.845Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/qyhINx709mN5xIBjraXXQ_af4667229d1d44b2912f6becc366d6d9.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-lora-gallery/sepia-vintage", + "license_type": "commercial", + "date": "2025-11-25T19:58:53.285Z", + "group": { + "key": "flux-2-lora-gallery", + "label": "Sepia Vintage" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-lora-gallery/sepia-vintage", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2-lora-gallery/sepia-vintage queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-lora-gallery/sepia-vintage", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/qyhINx709mN5xIBjraXXQ_af4667229d1d44b2912f6becc366d6d9.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-lora-gallery/sepia-vintage", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-lora-gallery/sepia-vintage/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2LoraGallerySepiaVintageInput": { + "title": "SepiaVintageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A portrait of a Victorian lady in sepia tones", + "Vintage street scene with old cars, sepia photography", + "Old family photo in sepia vintage style" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate a sepia vintage photography style image." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "title": "Lora Scale", + "type": "number", + "maximum": 2, + "description": "The strength of the sepia vintage photography effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "maximum": 20, + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 2.5 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 40 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "lora_scale" + ], + "description": "Input model for Sepia Vintage Photography endpoint - Generate vintage sepia style images", + "required": [ + "prompt" + ] + }, + "Flux2LoraGallerySepiaVintageOutput": { + "title": "SepiaVintageOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation" + }, + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/zebra/qOMZKBNStQgVkaKB9SLY4.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated sepia vintage photography style images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "x-fal-order-properties": [ + "images", + "seed", + "prompt" + ], + "required": [ + "images", + "seed", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "description": "The height of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "description": "The width of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-lora-gallery/sepia-vintage/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/sepia-vintage/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/sepia-vintage": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraGallerySepiaVintageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/sepia-vintage/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraGallerySepiaVintageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-lora-gallery/satellite-view-style", + "metadata": { + "display_name": "Flux 2 Lora Gallery", + "category": "text-to-image", + "description": "Generates satellite/aerial view style images", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:12.128Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/BPMCo8jjOVngBxqYDK7sK_9b09e8f629d74207bcb6cab302cff659.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-lora-gallery/satellite-view-style", + "license_type": "commercial", + "date": "2025-11-25T19:47:02.458Z", + "group": { + "key": "flux-2-lora-gallery", + "label": "Satellite View Style" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-lora-gallery/satellite-view-style", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2-lora-gallery/satellite-view-style queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-lora-gallery/satellite-view-style", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/BPMCo8jjOVngBxqYDK7sK_9b09e8f629d74207bcb6cab302cff659.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-lora-gallery/satellite-view-style", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-lora-gallery/satellite-view-style/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2LoraGallerySatelliteViewStyleInput": { + "title": "SatelliteViewStyleInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Aerial view of a city from above, satellite style", + "Satellite view of mountains and valleys", + "Bird's eye view of a coastal town" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate a satellite/aerial view style image." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "title": "Lora Scale", + "type": "number", + "maximum": 2, + "description": "The strength of the satellite view style effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "maximum": 20, + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 2.5 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 40 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "lora_scale" + ], + "description": "Input model for Satellite View Style endpoint - Generate satellite/aerial view style images", + "required": [ + "prompt" + ] + }, + "Flux2LoraGallerySatelliteViewStyleOutput": { + "title": "SatelliteViewStyleOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation" + }, + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/zebra/_rfPAtMbIRCRgKIbG990u.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated satellite view style images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "x-fal-order-properties": [ + "images", + "seed", + "prompt" + ], + "required": [ + "images", + "seed", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "description": "The height of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "description": "The width of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-lora-gallery/satellite-view-style/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/satellite-view-style/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/satellite-view-style": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraGallerySatelliteViewStyleInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/satellite-view-style/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraGallerySatelliteViewStyleOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-lora-gallery/realism", + "metadata": { + "display_name": "Flux 2 Lora Gallery", + "category": "text-to-image", + "description": "Makes images more photorealistic and natural", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:12.253Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/HJMvMFtI3pbgP7xP7L995_81e2ec28da9b49f9badb58560cb70088.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-lora-gallery/realism", + "license_type": "commercial", + "date": "2025-11-25T19:45:02.910Z", + "group": { + "key": "flux-2-lora-gallery", + "label": "Realism" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-lora-gallery/realism", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2-lora-gallery/realism queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-lora-gallery/realism", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/HJMvMFtI3pbgP7xP7L995_81e2ec28da9b49f9badb58560cb70088.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-lora-gallery/realism", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-lora-gallery/realism/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2LoraGalleryRealismInput": { + "title": "RealismInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A portrait of a woman with natural lighting", + "Street photography of a busy city scene", + "Landscape photography of mountains at golden hour", + "Documentary style photo of everyday life" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate a realistic image with natural lighting and authentic details." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "title": "Lora Scale", + "type": "number", + "maximum": 2, + "description": "The strength of the realism effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "maximum": 20, + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 2.5 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 40 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "lora_scale" + ], + "description": "Input model for Realism endpoint - Generate realistic style images", + "required": [ + "prompt" + ] + }, + "Flux2LoraGalleryRealismOutput": { + "title": "RealismOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation" + }, + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/koala/zjTCkqpflaiokwqCX3fKk.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated realistic style images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "x-fal-order-properties": [ + "images", + "seed", + "prompt" + ], + "required": [ + "images", + "seed", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "description": "The height of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "description": "The width of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-lora-gallery/realism/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/realism/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/realism": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraGalleryRealismInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/realism/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraGalleryRealismOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-lora-gallery/hdr-style", + "metadata": { + "display_name": "Flux 2 Lora Gallery", + "category": "text-to-image", + "description": "HDR surrealistic effect with intense colors", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:12.644Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/rabbit/XhRCpnCkio1p3CnaNTQyQ_839690f21013420f8d0bb5d101fd329d.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-lora-gallery/hdr-style", + "license_type": "commercial", + "date": "2025-11-25T19:38:44.387Z", + "group": { + "key": "flux-2-lora-gallery", + "label": "Hdr Style" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-lora-gallery/hdr-style", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2-lora-gallery/hdr-style queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-lora-gallery/hdr-style", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/rabbit/XhRCpnCkio1p3CnaNTQyQ_839690f21013420f8d0bb5d101fd329d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-lora-gallery/hdr-style", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-lora-gallery/hdr-style/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2LoraGalleryHdrStyleInput": { + "title": "HdrStyleInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A wonderful tropical landscape", + "A mystical forest with glowing mushrooms, intricate details and volumetric lighting", + "Futuristic cityscape at night, photorealistic rendering with ray tracing", + "Portrait with ultra detailed skin texture, natural lighting, 8K quality" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an HDR style image. The trigger word 'Hyp3rRe4list1c' will be automatically prepended." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "title": "Lora Scale", + "type": "number", + "maximum": 2, + "description": "The strength of the HDR style effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "maximum": 20, + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 2.5 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 40 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "lora_scale" + ], + "description": "Input model for HDR Style endpoint - Generate HDR style images with vibrant colors", + "required": [ + "prompt" + ] + }, + "Flux2LoraGalleryHdrStyleOutput": { + "title": "HdrStyleOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation" + }, + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/penguin/3gPcw3pH__e6teqtOEku9.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated HDR style images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "x-fal-order-properties": [ + "images", + "seed", + "prompt" + ], + "required": [ + "images", + "seed", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "description": "The height of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "description": "The width of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-lora-gallery/hdr-style/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/hdr-style/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/hdr-style": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraGalleryHdrStyleInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/hdr-style/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraGalleryHdrStyleOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-lora-gallery/digital-comic-art", + "metadata": { + "display_name": "Flux 2 Lora Gallery", + "category": "text-to-image", + "description": "Transforms images into comic book style", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:13.011Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/koala/-C_LrtBKqv1kuA3PkybTA_60142c0dc6da4ba8905165de61daacaf.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-lora-gallery/digital-comic-art", + "license_type": "commercial", + "date": "2025-11-25T19:32:21.201Z", + "group": { + "key": "flux-2-lora-gallery", + "label": "Digital Comic Art" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-lora-gallery/digital-comic-art", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2-lora-gallery/digital-comic-art queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-lora-gallery/digital-comic-art", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/koala/-C_LrtBKqv1kuA3PkybTA_60142c0dc6da4ba8905165de61daacaf.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-lora-gallery/digital-comic-art", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-lora-gallery/digital-comic-art/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2LoraGalleryDigitalComicArtInput": { + "title": "DigitalComicArtInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A superhero flying over a city, d1g1t4l comic art style", + "Action scene with explosion effects, digital comic illustration", + "Comic book panel with dramatic lighting" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate a digital comic art style image. Use 'd1g1t4l' trigger word for best results." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "title": "Lora Scale", + "type": "number", + "maximum": 2, + "description": "The strength of the digital comic art effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "maximum": 20, + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 2.5 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 40 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "lora_scale" + ], + "description": "Input model for Digital Comic Art endpoint - Generate digital comic art style images", + "required": [ + "prompt" + ] + }, + "Flux2LoraGalleryDigitalComicArtOutput": { + "title": "DigitalComicArtOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation" + }, + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/tiger/9_onjLEABvvZl9r-R6w_w.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated digital comic art style images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "x-fal-order-properties": [ + "images", + "seed", + "prompt" + ], + "required": [ + "images", + "seed", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "description": "The height of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "description": "The width of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-lora-gallery/digital-comic-art/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/digital-comic-art/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/digital-comic-art": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraGalleryDigitalComicArtInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/digital-comic-art/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraGalleryDigitalComicArtOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-lora-gallery/ballpoint-pen-sketch", + "metadata": { + "display_name": "Flux 2 Lora Gallery", + "category": "text-to-image", + "description": "Ballpoint pen sketch drawing style", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:13.145Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/G7xRP2MFaynDxvtM3tDIf_8f3d89b67c234182bd704e3349162883.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-lora-gallery/ballpoint-pen-sketch", + "license_type": "commercial", + "date": "2025-11-25T19:30:43.105Z", + "group": { + "key": "flux-2-lora-gallery", + "label": "Ballpoint Pen Sketch" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-lora-gallery/ballpoint-pen-sketch", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2-lora-gallery/ballpoint-pen-sketch queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-lora-gallery/ballpoint-pen-sketch", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/G7xRP2MFaynDxvtM3tDIf_8f3d89b67c234182bd704e3349162883.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-lora-gallery/ballpoint-pen-sketch", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-lora-gallery/ballpoint-pen-sketch/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2LoraGalleryBallpointPenSketchInput": { + "title": "BallpointPenSketchInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Portrait of a person, b4llp01nt ballpoint pen sketch", + "Urban landscape sketch in ballpoint pen style", + "Detailed hand-drawn illustration of a building" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate a ballpoint pen sketch style image. Use 'b4llp01nt' trigger word for best results." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. 'regular' balances speed and quality.", + "default": "regular" + }, + "lora_scale": { + "minimum": 0, + "title": "Lora Scale", + "type": "number", + "maximum": 2, + "description": "The strength of the ballpoint pen sketch effect.", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and won't be saved in history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "maximum": 20, + "description": "The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.", + "default": 2.5 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. Same seed with same prompt will produce same result." + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker for the generated image.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 40 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "guidance_scale", + "num_inference_steps", + "acceleration", + "seed", + "sync_mode", + "enable_safety_checker", + "output_format", + "num_images", + "lora_scale" + ], + "description": "Input model for Ballpoint Pen Sketch endpoint - Generate ballpoint pen sketch style images", + "required": [ + "prompt" + ] + }, + "Flux2LoraGalleryBallpointPenSketchOutput": { + "title": "BallpointPenSketchOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation" + }, + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/lion/6Eq6ijrWRcWsa6ivqdlL1.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated ballpoint pen sketch style images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation" + } + }, + "x-fal-order-properties": [ + "images", + "seed", + "prompt" + ], + "required": [ + "images", + "seed", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "description": "The height of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "description": "The width of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-lora-gallery/ballpoint-pen-sketch/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/ballpoint-pen-sketch/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/ballpoint-pen-sketch": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraGalleryBallpointPenSketchInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-lora-gallery/ballpoint-pen-sketch/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2LoraGalleryBallpointPenSketchOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-flex", + "metadata": { + "display_name": "Flux 2 Flex", + "category": "text-to-image", + "description": "Text-to-image generation with FLUX.2 [flex] from Black Forest Labs. Features adjustable inference steps and guidance scale for fine-tuned control. Enhanced typography and text rendering capabilities.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:14.045Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/panda/LqyVE8NElm_vf-t27Yfkz_6c1dd3323df343e4a3ec968d8f67024c.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-flex", + "license_type": "commercial", + "date": "2025-11-25T02:35:27.724Z", + "group": { + "key": "Flex", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-flex", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-2-flex queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-flex", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/panda/LqyVE8NElm_vf-t27Yfkz_6c1dd3323df343e4a3ec968d8f67024c.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-flex", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-flex/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2FlexInput": { + "title": "Flux2FlexTextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A high-quality 3D render of a cute fluffy monster eating a giant donut; the fur simulation is incredibly detailed, the donut glaze is sticky and reflective, bright daylight lighting, shallow depth of field." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.", + "default": "2" + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to expand the prompt using the model's own knowledge.", + "default": true + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Number of Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for the generation." + }, + "guidance_scale": { + "minimum": 1.5, + "maximum": 10, + "type": "number", + "title": "Guidance Scale", + "description": "The guidance scale to use for the generation.", + "default": 3.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "enable_prompt_expansion", + "seed", + "safety_tolerance", + "enable_safety_checker", + "output_format", + "sync_mode", + "guidance_scale", + "num_inference_steps" + ], + "required": [ + "prompt" + ] + }, + "Flux2FlexOutput": { + "title": "Flux2FlexOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/flux2_flex_t2i_output.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images.", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for the generation." + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-flex/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-flex/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-flex": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2FlexInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-flex/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2FlexOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/gemini-3-pro-image-preview", + "metadata": { + "display_name": "Gemini 3 Pro Image Preview", + "category": "text-to-image", + "description": "Nano Banana Pro (a.k.a Nano Banana 2) is Google's new state-of-the-art image generation and editing model", + "status": "active", + "tags": [ + "realism", + "typography" + ], + "updated_at": "2026-01-26T21:42:20.642Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/koala/Neuz3jAvI1MSVxj3tCrTU_afda4e7f4bc14945a716e864e1f609d8.jpg", + "model_url": "https://fal.run/fal-ai/gemini-3-pro-image-preview", + "license_type": "commercial", + "date": "2025-11-20T14:28:35.485Z", + "group": { + "key": "Gemini-3-Pro", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/gemini-3-pro-image-preview", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/gemini-3-pro-image-preview queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/gemini-3-pro-image-preview", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/koala/Neuz3jAvI1MSVxj3tCrTU_afda4e7f4bc14945a716e864e1f609d8.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/gemini-3-pro-image-preview", + "documentationUrl": "https://fal.ai/models/fal-ai/gemini-3-pro-image-preview/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Gemini3ProImagePreviewInput": { + "title": "NanoBananaTextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "An action shot of a black lab swimming in an inground suburban swimming pool. The camera is placed meticulously on the water line, dividing the image in half, revealing both the dogs head above water holding a tennis ball in it's mouth, and it's paws paddling underwater." + ], + "title": "Prompt", + "minLength": 3, + "description": "The text prompt to generate an image from.", + "type": "string", + "maxLength": 50000 + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Number of Images", + "description": "The number of images to generate.", + "default": 1 + }, + "enable_web_search": { + "title": "Enable Web Search", + "type": "boolean", + "description": "Enable web search for the image generation task. This will allow the model to use the latest information from the web to generate the image.", + "default": false + }, + "resolution": { + "enum": [ + "1K", + "2K", + "4K" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the image to generate.", + "default": "1K" + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "3:2", + "4:3", + "5:4", + "1:1", + "4:5", + "3:4", + "2:3", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image.", + "default": "1:1" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator." + }, + "limit_generations": { + "title": "Limit Generations", + "type": "boolean", + "description": "Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "num_images", + "seed", + "aspect_ratio", + "output_format", + "sync_mode", + "resolution", + "limit_generations", + "enable_web_search" + ], + "required": [ + "prompt" + ] + }, + "Gemini3ProImagePreviewOutput": { + "title": "NanoBananaTextToImageOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_name": "nano-banana-t2i-output.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/nano-banana-t2i-output.png" + } + ] + ], + "description": "The generated images.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "description": { + "title": "Description", + "type": "string", + "description": "The description of the generated images." + } + }, + "x-fal-order-properties": [ + "images", + "description" + ], + "required": [ + "images", + "description" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/gemini-3-pro-image-preview/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gemini-3-pro-image-preview/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/gemini-3-pro-image-preview": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gemini3ProImagePreviewInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gemini-3-pro-image-preview/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gemini3ProImagePreviewOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/nano-banana-pro", + "metadata": { + "display_name": "Nano Banana Pro", + "category": "text-to-image", + "description": "Nano Banana Pro (a.k.a Nano Banana 2) is Google's new state-of-the-art image generation and editing model", + "status": "active", + "tags": [ + "realism", + "typography" + ], + "updated_at": "2026-01-26T21:42:20.897Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8691ce/SR0_u1zPJbx8jCIO6bJR0_8c83f0d66bbd48f3b55f825117941f84.jpg", + "model_url": "https://fal.run/fal-ai/nano-banana-pro", + "license_type": "commercial", + "date": "2025-11-20T14:24:45.001Z", + "group": { + "key": "Nano-Banana-Pro", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/nano-banana-pro", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/nano-banana-pro queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/nano-banana-pro", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8691ce/SR0_u1zPJbx8jCIO6bJR0_8c83f0d66bbd48f3b55f825117941f84.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/nano-banana-pro", + "documentationUrl": "https://fal.ai/models/fal-ai/nano-banana-pro/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "NanoBananaProInput": { + "x-fal-order-properties": [ + "prompt", + "num_images", + "seed", + "aspect_ratio", + "output_format", + "sync_mode", + "resolution", + "limit_generations", + "enable_web_search" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "An action shot of a black lab swimming in an inground suburban swimming pool. The camera is placed meticulously on the water line, dividing the image in half, revealing both the dogs head above water holding a tennis ball in it's mouth, and it's paws paddling underwater." + ], + "maxLength": 50000, + "type": "string", + "minLength": 3, + "description": "The text prompt to generate an image from.", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "description": "The number of images to generate.", + "title": "Number of Images", + "default": 1 + }, + "enable_web_search": { + "description": "Enable web search for the image generation task. This will allow the model to use the latest information from the web to generate the image.", + "type": "boolean", + "title": "Enable Web Search", + "default": false + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "3:2", + "4:3", + "5:4", + "1:1", + "4:5", + "3:4", + "2:3", + "9:16" + ], + "description": "The aspect ratio of the generated image.", + "type": "string", + "title": "Aspect Ratio", + "default": "1:1" + }, + "resolution": { + "enum": [ + "1K", + "2K", + "4K" + ], + "description": "The resolution of the image to generate.", + "type": "string", + "title": "Resolution", + "default": "1K" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "seed": { + "description": "The seed for the random number generator.", + "type": "integer", + "title": "Seed" + }, + "limit_generations": { + "description": "Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate.", + "type": "boolean", + "title": "Limit Generations", + "default": false + } + }, + "title": "NanoBananaTextToImageInput", + "required": [ + "prompt" + ] + }, + "NanoBananaProOutput": { + "x-fal-order-properties": [ + "images", + "description" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_name": "nano-banana-t2i-output.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/nano-banana-t2i-output.png" + } + ] + ], + "description": "The generated images.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "description": { + "description": "The description of the generated images.", + "type": "string", + "title": "Description" + } + }, + "title": "NanoBananaTextToImageOutput", + "required": [ + "images", + "description" + ] + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/nano-banana-pro/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/nano-banana-pro/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/nano-banana-pro": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NanoBananaProInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/nano-banana-pro/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NanoBananaProOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "imagineart/imagineart-1.5-preview/text-to-image", + "metadata": { + "display_name": "Imagineart 1.5 Preview", + "category": "text-to-image", + "description": "ImagineArt 1.5 text-to-image model generates high-fidelity professional-grade visuals with lifelike realism, strong aesthetics, and text that actually reads correctly.", + "status": "active", + "tags": [ + "visuals", + "imagineart", + "realism", + "text" + ], + "updated_at": "2026-01-26T21:42:21.023Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/dzrNfvK_MHMcQvZSBju-R_9731ad76917e41e4bd48796e82fa9bc3.jpg", + "model_url": "https://fal.run/imagineart/imagineart-1.5-preview/text-to-image", + "license_type": "commercial", + "date": "2025-11-20T07:54:03.048Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for imagineart/imagineart-1.5-preview/text-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the imagineart/imagineart-1.5-preview/text-to-image queue.", + "x-fal-metadata": { + "endpointId": "imagineart/imagineart-1.5-preview/text-to-image", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/dzrNfvK_MHMcQvZSBju-R_9731ad76917e41e4bd48796e82fa9bc3.jpg", + "playgroundUrl": "https://fal.ai/models/imagineart/imagineart-1.5-preview/text-to-image", + "documentationUrl": "https://fal.ai/models/imagineart/imagineart-1.5-preview/text-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Imagineart15PreviewTextToImageInput": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "seed" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A high-angle, realistic photograph capturing a spontaneous moment of pure joy on a bright, sunny day. A young woman with long, wavy brown hair is sitting on the curb of an urban street, her head tilted all the way back as she laughs or shouts ecstatically up at the sky. She is wearing large black sunglasses, which have a bright glare from the sun, a white ribbed tank top with black trim, and black jeans. One of her hands is raised towards her face, fingers loosely curled near her sunglasses. The background is dominated by the strong graphic pattern of a black asphalt road with thick, white painted lines of a crosswalk. The lighting is harsh and direct, creating high contrast and deep shadows on the pavement, and brightly illuminating the woman's sun-kissed skin. The shot has a candid, in-the-moment feel, emphasizing the carefree and happy mood." + ], + "description": "Text prompt describing the desired image", + "type": "string", + "title": "Prompt" + }, + "aspect_ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4", + "3:1", + "1:3", + "3:2", + "2:3" + ], + "description": "Image aspect ratio: 1:1, 3:1, 1:3, 16:9, 9:16, 4:3, 3:4, 3:2, 2:3", + "type": "string", + "examples": [ + "1:1", + "3:1", + "1:3", + "16:9", + "9:16", + "4:3", + "3:4", + "3:2", + "2:3" + ], + "title": "Aspect Ratio", + "default": "1:1" + }, + "seed": { + "examples": [ + 0 + ], + "description": "Seed for the image generation", + "type": "integer", + "title": "Seed" + } + }, + "title": "ImagineArt_1_5_Input", + "required": [ + "prompt" + ] + }, + "Imagineart15PreviewTextToImageOutput": { + "x-fal-order-properties": [ + "images" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "height": 2048, + "content_type": "image/webp", + "url": "https://fal.media/files/tiger/ou4RvYdGLTWe2rMzHhrnE_generated_imagineart_1_5_2.webp", + "width": 2048 + } + ] + ], + "description": "Generated image", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image-Output" + } + } + }, + "title": "ImagineArt_1_5_Output", + "required": [ + "images" + ] + }, + "Image-Output": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/imagineart/imagineart-1.5-preview/text-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/imagineart/imagineart-1.5-preview/text-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/imagineart/imagineart-1.5-preview/text-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Imagineart15PreviewTextToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/imagineart/imagineart-1.5-preview/text-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Imagineart15PreviewTextToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/emu-3.5-image/text-to-image", + "metadata": { + "display_name": "Emu 3.5 Image", + "category": "text-to-image", + "description": "Generate images from text using Emu 3.5 Image", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:27.792Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/elephant/vIz3smWaozr97-oRkQBRu_de31d4d13d5d47d99814d2afe57cde1f.jpg", + "model_url": "https://fal.run/fal-ai/emu-3.5-image/text-to-image", + "license_type": "commercial", + "date": "2025-11-01T02:28:53.205Z", + "group": { + "key": "emu-3.5", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.ai/models/fal-ai/emu-3.5-image/text-to-image/stream", + "duration_estimate": 6, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/emu-3.5-image/text-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/emu-3.5-image/text-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/emu-3.5-image/text-to-image", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/elephant/vIz3smWaozr97-oRkQBRu_de31d4d13d5d47d99814d2afe57cde1f.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/emu-3.5-image/text-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/emu-3.5-image/text-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Emu35ImageTextToImageInput": { + "title": "Emu35ImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Capture an intimate close-up bathed in warm, soft, late-afternoon sunlight filtering into a quintessential 1960s kitchen. The focal point is a charmingly designed vintage package of all-purpose flour, resting invitingly on a speckled Formica countertop. The packaging itself evokes pure nostalgia: perhaps thick, slightly textured paper in a warm cream tone, adorned with simple, bold typography (a friendly serif or script) in classic red and blue \"ALL-PURPOSE FLOUR\", featuring a delightful illustration like a stylized sheaf of wheat or a cheerful baker character. In smaller bold print at the bottom of the package: \"NET WT 5 LBS (80 OZ) 2.27kg\". Focus sharply on the package details – the slightly soft edges of the paper bag, the texture of the vintage printing, the inviting \"All-Purpose Flour\" text. Subtle hints of the 1960s kitchen frame the shot – the chrome edge of the counter gleaming softly, a blurred glimpse of a pastel yellow ceramic tile backsplash, or the corner of a vintage metal canister set just out of focus. The shallow depth of field keeps attention locked on the beautifully designed package, creating an aesthetic rich in warmth, authenticity, and nostalgic appeal." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to create the image." + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the output image.", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the output image.", + "default": "1:1" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "Whether to return the image in sync mode.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the inference." + } + }, + "x-fal-order-properties": [ + "prompt", + "resolution", + "aspect_ratio", + "enable_safety_checker", + "seed", + "output_format", + "sync_mode" + ], + "required": [ + "prompt" + ] + }, + "Emu35ImageTextToImageOutput": { + "title": "Emu35Output", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "height": 880, + "file_name": "2_gRhwfsnmNKYtZ_dveyV.jpg", + "content_type": "image/jpeg", + "url": "https://v3b.fal.media/files/b/koala/UFe9ES9IGdp0N90JmCyd4.jpg", + "width": 1184 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The edited image.", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seed": { + "examples": [ + 1815037768 + ], + "title": "Seed", + "type": "integer", + "description": "The seed for the inference." + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/emu-3.5-image/text-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/emu-3.5-image/text-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/emu-3.5-image/text-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Emu35ImageTextToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/emu-3.5-image/text-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Emu35ImageTextToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/fibo/generate", + "metadata": { + "display_name": "Fibo", + "category": "text-to-image", + "description": "SOTA Open source model trained on licensed data, transforming intent into structured control for precise, high-quality AI image generation in enterprise and agentic workflows.", + "status": "active", + "tags": [ + "bria", + "fibo", + "prompt-adherence" + ], + "updated_at": "2026-01-26T21:42:30.943Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/monkey/KZ0Rdf_H7CtVE7Gpqk7XB_430bc50472c44700aaee472dd73c18f1.jpg", + "model_url": "https://fal.run/bria/fibo/generate", + "license_type": "commercial", + "date": "2025-10-29T10:12:55.756Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/fibo/generate", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/fibo/generate queue.", + "x-fal-metadata": { + "endpointId": "bria/fibo/generate", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/monkey/KZ0Rdf_H7CtVE7Gpqk7XB_430bc50472c44700aaee472dd73c18f1.jpg", + "playgroundUrl": "https://fal.ai/models/bria/fibo/generate", + "documentationUrl": "https://fal.ai/models/bria/fibo/generate/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FiboGenerateInput": { + "title": "GaiaInputModel", + "type": "object", + "properties": { + "prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Prompt", + "examples": [ + "A hyper-detailed, ultra-fluffy owl sitting in the trees at night, looking directly at the camera with wide, adorable, expressive eyes. Its feathers are soft and voluminous, catching the cool moonlight with subtle silver highlights. The owl’s gaze is curious and full of charm, giving it a whimsical, storybook-like personality." + ], + "description": "Prompt for image generation." + }, + "steps_num": { + "description": "Number of inference steps.", + "type": "integer", + "minimum": 20, + "title": "Steps Num", + "maximum": 50, + "default": 50 + }, + "aspect_ratio": { + "enum": [ + "1:1", + "2:3", + "3:2", + "3:4", + "4:3", + "4:5", + "5:4", + "9:16", + "16:9" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio. Options: 1:1, 2:3, 3:2, 3:4, 4:3, 4:5, 5:4, 9:16, 16:9", + "default": "1:1" + }, + "image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Url", + "description": "Reference image (file or URL)." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If true, returns the image directly in the response (increases latency).", + "default": false + }, + "guidance_scale": { + "description": "Guidance scale for text.", + "type": "integer", + "minimum": 3, + "title": "Guidance Scale", + "maximum": 5, + "default": 5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility.", + "default": 5555 + }, + "structured_prompt": { + "anyOf": [ + { + "$ref": "#/components/schemas/StructuredPrompt" + }, + { + "type": "null" + } + ], + "description": "The structured prompt to generate an image from." + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for image generation.", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "structured_prompt", + "image_url", + "seed", + "steps_num", + "aspect_ratio", + "negative_prompt", + "guidance_scale", + "sync_mode" + ] + }, + "FiboGenerateOutput": { + "title": "GaiaOutputModel", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "Generated images.", + "items": { + "additionalProperties": true, + "type": "object" + }, + "default": [] + }, + "image": { + "description": "Generated image.", + "$ref": "#/components/schemas/Image" + }, + "structured_prompt": { + "title": "Structured Prompt", + "type": "object", + "additionalProperties": true, + "description": "Current prompt." + } + }, + "x-fal-order-properties": [ + "image", + "images", + "structured_prompt" + ], + "required": [ + "image", + "structured_prompt" + ] + }, + "StructuredPrompt": { + "title": "StructuredPrompt", + "type": "object", + "properties": { + "background_setting": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Background Setting", + "description": "The background setting of the image to be generated." + }, + "artistic_style": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Artistic Style", + "description": "The artistic style of the image to be generated." + }, + "aesthetics": { + "anyOf": [ + { + "$ref": "#/components/schemas/Aesthetics" + }, + { + "type": "null" + } + ], + "description": "The aesthetics of the image to be generated." + }, + "text_render": { + "anyOf": [ + { + "type": "array", + "items": {} + }, + { + "type": "null" + } + ], + "title": "Text Render", + "description": "A list of text to be rendered in the image.", + "default": [] + }, + "objects": { + "anyOf": [ + { + "type": "array", + "items": { + "$ref": "#/components/schemas/PromptObject" + } + }, + { + "type": "null" + } + ], + "title": "Objects", + "description": "A list of objects in the image to be generated, along with their attributes and relationships to other objects in the image.", + "default": [] + }, + "style_medium": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Style Medium", + "description": "The style medium of the image to be generated." + }, + "photographic_characteristics": { + "anyOf": [ + { + "$ref": "#/components/schemas/PhotographicCharacteristics" + }, + { + "type": "null" + } + ], + "description": "The photographic characteristics of the image to be generated." + }, + "context": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Context", + "description": "The context of the image to be generated." + }, + "lighting": { + "anyOf": [ + { + "$ref": "#/components/schemas/Lighting" + }, + { + "type": "null" + } + ], + "description": "The lighting of the image to be generated." + }, + "short_description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Short Description", + "description": "A short description of the image to be generated." + } + }, + "x-fal-order-properties": [ + "short_description", + "objects", + "background_setting", + "lighting", + "aesthetics", + "photographic_characteristics", + "style_medium", + "text_render", + "context", + "artistic_style" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "examples": [ + 1024 + ], + "description": "The height of the image in pixels." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "examples": [ + 1024 + ], + "description": "The width of the image in pixels." + } + }, + "required": [ + "url" + ] + }, + "Aesthetics": { + "title": "Aesthetics", + "type": "object", + "properties": { + "composition": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Composition", + "description": "The composition of the image to be generated." + }, + "mood_atmosphere": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Mood Atmosphere", + "description": "The mood and atmosphere of the image to be generated." + }, + "color_scheme": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Color Scheme", + "description": "The color scheme of the image to be generated." + } + }, + "x-fal-order-properties": [ + "composition", + "color_scheme", + "mood_atmosphere" + ] + }, + "PromptObject": { + "title": "PromptObject", + "type": "object", + "properties": { + "clothing": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Clothing", + "description": "The clothing of the object in the image." + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Description", + "description": "A description of the object to be generated." + }, + "skin_tone_and_texture": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Skin Tone And Texture", + "description": "The skin tone and texture of the object in the image." + }, + "appearance_details": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Appearance Details", + "description": "The appearance details of the object." + }, + "number_of_objects": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Number Of Objects", + "description": "The number of objects in the image." + }, + "expression": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Expression", + "description": "The expression of the object in the image." + }, + "pose": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Pose", + "description": "The pose of the object in the image." + }, + "shape_and_color": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Shape And Color", + "description": "The shape and color of the object." + }, + "relationship": { + "title": "Relationship", + "type": "string", + "description": "The relationship of the object to other objects in the image." + }, + "texture": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Texture", + "description": "The texture of the object." + }, + "gender": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Gender", + "description": "The gender of the object in the image." + }, + "relative_size": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Relative Size", + "description": "The relative size of the object in the image." + }, + "location": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Location", + "description": "The location of the object in the image." + }, + "orientation": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Orientation", + "description": "The orientation of the object in the image." + }, + "action": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Action", + "description": "The action of the object in the image." + } + }, + "x-fal-order-properties": [ + "description", + "location", + "relationship", + "relative_size", + "shape_and_color", + "texture", + "appearance_details", + "number_of_objects", + "pose", + "expression", + "clothing", + "action", + "gender", + "skin_tone_and_texture", + "orientation" + ], + "required": [ + "relationship" + ] + }, + "PhotographicCharacteristics": { + "title": "PhotographicCharacteristics", + "type": "object", + "properties": { + "focus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Focus", + "description": "The focus in the image to be generated." + }, + "lens_focal_length": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Lens Focal Length", + "description": "The focal length of the lens in the image to be generated." + }, + "camera_angle": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Camera Angle", + "description": "The angle of the camera in the image to be generated." + }, + "depth_of_field": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Depth Of Field", + "description": "The depth of field in the image to be generated." + } + }, + "x-fal-order-properties": [ + "depth_of_field", + "focus", + "camera_angle", + "lens_focal_length" + ] + }, + "Lighting": { + "title": "Lighting", + "type": "object", + "properties": { + "shadows": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Shadows", + "description": "The shadows in the image to be generated." + }, + "conditions": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Conditions", + "description": "The conditions of the lighting in the image to be generated." + }, + "direction": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Direction", + "description": "The direction of the lighting in the image to be generated." + } + }, + "x-fal-order-properties": [ + "conditions", + "direction", + "shadows" + ] + } + } + }, + "paths": { + "/bria/fibo/generate/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo/generate/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/fibo/generate": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboGenerateInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo/generate/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboGenerateOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/piflow", + "metadata": { + "display_name": "Piflow", + "category": "text-to-image", + "description": "Use the faster speed of piflow to generate images with same quality to that of slower models.", + "status": "active", + "tags": [ + "text-to-image" + ], + "updated_at": "2026-01-26T21:42:31.467Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/zebra/OMrCc16jyBF2eU2ViUG-f_bb0c75ca99a74bc28f3f45982f59cac1.jpg", + "model_url": "https://fal.run/fal-ai/piflow", + "license_type": "commercial", + "date": "2025-10-27T14:46:28.015Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/piflow", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/piflow queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/piflow", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/zebra/OMrCc16jyBF2eU2ViUG-f_bb0c75ca99a74bc28f3f45982f59cac1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/piflow", + "documentationUrl": "https://fal.ai/models/fal-ai/piflow/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PiflowInput": { + "title": "PiQwenInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Photo of a coffee shop entrance featuring a chalkboard sign reading \"π-Qwen Coffee 😊 $2 per cup,\" with a neon light beside it displaying \"π-通义千问\". Next to it hangs a poster showing a beautiful Chinese woman, and beneath the poster is written \"e≈2.71828-18284-59045-23536-02874-71352\"." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "\n The size of the generated image. You can choose between some presets or custom height and width\n that **must be multiples of 8**.\n ", + "default": "square_hd" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 8 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducible generation. If set to None, a random seed will be used." + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "num_images", + "output_format", + "enable_safety_checker", + "sync_mode" + ], + "required": [ + "prompt" + ] + }, + "PiflowOutput": { + "title": "PiQwenOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/monkey/hfFo8wc77eSDchDUDxFEi.jpg" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The URLs of the generated images.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/piflow/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/piflow/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/piflow": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PiflowInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/piflow/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PiflowOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/gpt-image-1-mini", + "metadata": { + "display_name": "GPT Image 1 Mini", + "category": "text-to-image", + "description": "GPT Image 1 mini combines OpenAI's advanced language capabilities, powered by GPT-5, with GPT Image 1 Mini for efficient image generation. ", + "status": "active", + "tags": [ + "text-to-image" + ], + "updated_at": "2026-01-26T21:42:36.755Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/monkey/Z7Y5fVvUXaT1XzWGqVngY_f333390b078a4bed9491ccda3b9f0e90.jpg", + "model_url": "https://fal.run/fal-ai/gpt-image-1-mini", + "license_type": "commercial", + "date": "2025-10-21T22:06:41.680Z", + "group": { + "key": "GPT-Image-1-Mini", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/gpt-image-1-mini", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/gpt-image-1-mini queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/gpt-image-1-mini", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/monkey/Z7Y5fVvUXaT1XzWGqVngY_f333390b078a4bed9491ccda3b9f0e90.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/gpt-image-1-mini", + "documentationUrl": "https://fal.ai/models/fal-ai/gpt-image-1-mini/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "GptImage1MiniInput": { + "title": "TextToImageRequestMini", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A serene landscape with mountains reflecting in a crystal-clear lake at sunset, photorealistic style" + ], + "description": "The prompt for image generation", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "description": "Number of images to generate", + "type": "integer", + "minimum": 1, + "title": "Number of Images", + "examples": [ + 1 + ], + "maximum": 4, + "default": 1 + }, + "image_size": { + "enum": [ + "auto", + "1024x1024", + "1536x1024", + "1024x1536" + ], + "description": "Aspect ratio for the generated image", + "type": "string", + "title": "Image Size", + "default": "auto" + }, + "background": { + "enum": [ + "auto", + "transparent", + "opaque" + ], + "description": "Background for the generated image", + "type": "string", + "title": "Background", + "default": "auto" + }, + "quality": { + "enum": [ + "auto", + "low", + "medium", + "high" + ], + "description": "Quality for the generated image", + "type": "string", + "title": "Quality", + "default": "auto" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "Output format for the images", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "background", + "quality", + "num_images", + "output_format", + "sync_mode" + ], + "required": [ + "prompt" + ] + }, + "GptImage1MiniOutput": { + "title": "ImageResponseMini", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "height": 1024, + "file_name": "1EXVSVlSs4Yz5hKplrzTv_2595c4e8720f4c19bcbc3dd373b18065.jpg", + "content_type": "image/jpeg", + "url": "https://v3b.fal.media/files/b/elephant/1EXVSVlSs4Yz5hKplrzTv_2595c4e8720f4c19bcbc3dd373b18065.jpg", + "width": 1024 + } + ] + ], + "description": "The generated images.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/gpt-image-1-mini/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gpt-image-1-mini/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/gpt-image-1-mini": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GptImage1MiniInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gpt-image-1-mini/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GptImage1MiniOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/reve/text-to-image", + "metadata": { + "display_name": "Reve", + "category": "text-to-image", + "description": "Reve’s text-to-image model generates detailed visual output that closely follow your instructions, with strong aesthetic quality and accurate text rendering.", + "status": "active", + "tags": [ + "text-to-image" + ], + "updated_at": "2026-01-26T21:42:38.593Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/panda/jNWFUrGVBR4OL3rF3KBZp_f7c2b693436142a3bd3bd012ee26342c.jpg", + "model_url": "https://fal.run/fal-ai/reve/text-to-image", + "license_type": "commercial", + "date": "2025-10-17T18:20:06.420Z", + "group": { + "key": "Reve", + "label": "Text To Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/reve/text-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/reve/text-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/reve/text-to-image", + "category": "text-to-image", + "thumbnailUrl": "https://v3b.fal.media/files/b/panda/jNWFUrGVBR4OL3rF3KBZp_f7c2b693436142a3bd3bd012ee26342c.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/reve/text-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/reve/text-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ReveTextToImageInput": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "num_images", + "output_format", + "sync_mode" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A serene mountain landscape at sunset with snow-capped peaks" + ], + "maxLength": 2560, + "minLength": 1, + "description": "The text description of the desired image.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "description": "Number of images to generate", + "type": "integer", + "minimum": 1, + "maximum": 4, + "title": "Number of Images", + "examples": [ + 1 + ], + "default": 1 + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "3:2", + "2:3", + "4:3", + "3:4", + "1:1" + ], + "description": "The desired aspect ratio of the generated image.", + "type": "string", + "title": "Aspect Ratio", + "examples": [ + "16:9" + ], + "default": "3:2" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "description": "Output format for the generated image.", + "type": "string", + "title": "Output Format", + "examples": [ + "png" + ], + "default": "png" + } + }, + "description": "Input for Reve text-to-image generation", + "title": "ReveCreateInput", + "required": [ + "prompt" + ] + }, + "ReveTextToImageOutput": { + "x-fal-order-properties": [ + "images" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3b.fal.media/files/b/panda/-WnGcaJCtfrT6Q2oms97E.png" + } + ] + ], + "description": "The generated images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + } + }, + "description": "Output for Reve text-to-image generation", + "title": "ReveCreateOutput", + "required": [ + "images" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/reve/text-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/reve/text-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/reve/text-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ReveTextToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/reve/text-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ReveTextToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan-image/v3/text-to-image", + "metadata": { + "display_name": "Hunyuan Image", + "category": "text-to-image", + "description": "Leverage the state-of-the-art capabilities of Hunyuan Image 3.0 to generate visual content that effectively conveys the messaging of your written material.", + "status": "active", + "tags": [ + "text-to-image" + ], + "updated_at": "2026-01-26T21:42:46.932Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/kangaroo/en7kxz5HNAQKg5QuONGDH_760708cdbc4b49e1a96e6b7628e43c35.jpg", + "model_url": "https://fal.run/fal-ai/hunyuan-image/v3/text-to-image", + "license_type": "commercial", + "date": "2025-09-28T06:04:53.920Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan-image/v3/text-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan-image/v3/text-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan-image/v3/text-to-image", + "category": "text-to-image", + "thumbnailUrl": "https://v3.fal.media/files/kangaroo/en7kxz5HNAQKg5QuONGDH_760708cdbc4b49e1a96e6b7628e43c35.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan-image/v3/text-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan-image/v3/text-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "HunyuanImageV3TextToImageInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_size", + "num_images", + "num_inference_steps", + "guidance_scale", + "seed", + "enable_safety_checker", + "sync_mode", + "output_format", + "enable_prompt_expansion" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "200mm telephoto through crowd gaps; subject laughing, candid; creamy background compression, color pop from a single bold garment, catchlight in eyes." + ], + "description": "The text prompt for image-to-image.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The desired size of the generated image.", + "title": "Image Size", + "default": "square_hd" + }, + "enable_prompt_expansion": { + "examples": [ + true + ], + "description": "Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "guidance_scale": { + "minimum": 1, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "Controls how much the model adheres to the prompt. Higher values mean stricter adherence.", + "default": 7.5 + }, + "seed": { + "description": "Random seed for reproducible results. If None, a random seed is used.", + "type": "integer", + "title": "Seed" + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, watermark, signature" + ], + "description": "The negative prompt to guide the image generation away from certain concepts.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of denoising steps.", + "default": 28 + } + }, + "title": "HunyuanTextToImageInputV3", + "required": [ + "prompt" + ] + }, + "HunyuanImageV3TextToImageOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "content_type": "image/png", + "url": "https://v3b.fal.media/files/b/kangaroo/uIKrZHT6LaGqgXoxtBSn7.png" + } + ] + ], + "description": "A list of the generated images.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "description": "The base seed used for the generation process.", + "type": "integer", + "title": "Seed" + } + }, + "title": "HunyuanTextToImageV3Output", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan-image/v3/text-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-image/v3/text-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-image/v3/text-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanImageV3TextToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-image/v3/text-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanImageV3TextToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-25-preview/text-to-image", + "metadata": { + "display_name": "Wan 2.5 Text to Image", + "category": "text-to-image", + "description": "Wan 2.5 text-to-image model.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:47.607Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/penguin/6PlSnB3q28Mu0QjURXyBe_051522e72e02496da12bf011190ca281.jpg", + "model_url": "https://fal.run/fal-ai/wan-25-preview/text-to-image", + "license_type": "commercial", + "date": "2025-09-25T20:57:10.526Z", + "group": { + "key": "wan-25-preview", + "label": "Text To Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-25-preview/text-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-25-preview/text-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-25-preview/text-to-image", + "category": "text-to-image", + "thumbnailUrl": "https://v3.fal.media/files/penguin/6PlSnB3q28Mu0QjURXyBe_051522e72e02496da12bf011190ca281.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-25-preview/text-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-25-preview/text-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Wan25PreviewTextToImageInput": { + "description": "Input for text-to-image generation", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A lone samurai standing on the edge of a cliff at twilight, overlooking a vast valley shrouded in mist. The sky burns with deep orange and purple hues from the setting sun, casting long, dramatic shadows. The samurai’s silhouette glows against the horizon, with their sword reflecting a glint of fading light. The overall style is hyper-realistic, cinematic, and moody, with dramatic contrast and atmospheric depth." + ], + "description": "The prompt for image generation. Supports Chinese and English, max 2000 characters.", + "type": "string", + "title": "Prompt", + "minLength": 1 + }, + "num_images": { + "description": "Number of images to generate. Values from 1 to 4.", + "type": "integer", + "minimum": 1, + "maximum": 4, + "title": "Num Images", + "examples": [ + 1 + ], + "default": 1 + }, + "image_size": { + "examples": [ + "square", + "landscape_16_9", + "portrait_16_9", + { + "height": 1280, + "width": 1280 + } + ], + "description": "The size of the generated image. Can use preset names like 'square', 'landscape_16_9', etc., or specific dimensions. Total pixels must be between 768×768 and 1440×1440, with aspect ratio between [1:4, 4:1].", + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "default": "square" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + }, + "enable_prompt_expansion": { + "description": "Whether to enable prompt rewriting using LLM. Improves results for short prompts but increases processing time.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": true + }, + "negative_prompt": { + "examples": [ + "low resolution, error, worst quality, low quality, defects" + ], + "description": "Negative prompt to describe content to avoid. Max 500 characters.", + "type": "string", + "title": "Negative Prompt" + } + }, + "title": "TextToImageInput", + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_images", + "image_size", + "enable_prompt_expansion", + "seed", + "enable_safety_checker" + ], + "required": [ + "prompt" + ] + }, + "Wan25PreviewTextToImageOutput": { + "description": "Output for text-to-image generation", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "content_type": "image/png", + "url": "https://v3.fal.media/files/penguin/4VZ7I1ZK5XNv33LV2JBxg.png" + } + ] + ], + "description": "The generated images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "seeds": { + "examples": [ + [ + 175932751 + ] + ], + "description": "The seeds used for each generated image", + "type": "array", + "title": "Seeds", + "items": { + "type": "integer" + } + }, + "actual_prompt": { + "examples": [ + "A lone samurai standing on the edge of a cliff at twilight, overlooking a vast valley shrouded in mist. The sky burns with deep orange and purple hues from the setting sun, casting long, dramatic shadows. The samurai’s silhouette glows against the horizon, with their sword reflecting a glint of fading light. The overall style is hyper-realistic, cinematic, and moody, with dramatic contrast and atmospheric depth." + ], + "description": "The actual prompt used if prompt rewriting was enabled", + "type": "string", + "title": "Actual Prompt" + } + }, + "title": "TextToImageOutput", + "x-fal-order-properties": [ + "images", + "seeds", + "actual_prompt" + ], + "required": [ + "images", + "seeds" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-25-preview/text-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-25-preview/text-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-25-preview/text-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Wan25PreviewTextToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-25-preview/text-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Wan25PreviewTextToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux/srpo", + "metadata": { + "display_name": "FLUX.1 SRPO [dev]", + "category": "text-to-image", + "description": "FLUX.1 SRPO [dev] is a 12 billion parameter flow transformer that generates high-quality images from text with incredible aesthetics. It is suitable for personal and commercial use.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:55.505Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/lion/ZNXdbSzAuCKiNcAobhmuq_433a1adbd71044199027c873cac81298.jpg", + "model_url": "https://fal.run/fal-ai/flux/srpo", + "license_type": "commercial", + "date": "2025-09-15T23:01:28.380Z", + "group": { + "key": "srpo-models-og", + "label": "Text to Image [dev]" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.run/fal-ai/flux/srpo/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux/srpo", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux/srpo queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux/srpo", + "category": "text-to-image", + "thumbnailUrl": "https://fal.media/files/lion/ZNXdbSzAuCKiNcAobhmuq_433a1adbd71044199027c873cac81298.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux/srpo", + "documentationUrl": "https://fal.ai/models/fal-ai/flux/srpo/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxSrpoInput": { + "title": "BaseSRPOInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Mountain guide, sturdy build, wilderness wisdom, alert gaze, technical outdoor gear with rope coils, snow-capped peaks background, crisp mountain lighting, leading pose, wind-swept hair with full beard, weather-worn face with quiet confidence, alpine expert presence" + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "type": "string", + "title": "Acceleration", + "default": "none" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "jpeg" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "title": "Seed" + }, + "guidance_scale": { + "minimum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "title": "Guidance scale (CFG)", + "maximum": 20, + "default": 4.5 + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 28 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "required": [ + "prompt" + ] + }, + "FluxSrpoOutput": { + "title": "SRPOOutput", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/jpeg", + "url": "https://storage.googleapis.com/falserverless/example_outputs/flux-srpo-output.jpeg", + "width": 1024 + } + ] + ], + "description": "The generated images.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux/srpo/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux/srpo/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux/srpo": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxSrpoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux/srpo/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxSrpoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-1/srpo", + "metadata": { + "display_name": "FLUX.1 SRPO [dev]", + "category": "text-to-image", + "description": "FLUX.1 SRPO [dev] is a 12 billion parameter flow transformer that generates high-quality images from text with incredible aesthetics. It is suitable for personal and commercial use.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:55.753Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/elephant/sc5nHfAUsSmVjmNNzoHDo_0b10ed5de0c24d9f88df8ed0a350f49f.jpg", + "model_url": "https://fal.run/fal-ai/flux-1/srpo", + "license_type": "commercial", + "date": "2025-09-15T22:03:08.956Z", + "group": { + "key": "srpo-models", + "label": "Text to Image [dev]" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.run/fal-ai/flux-1/srpo/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-1/srpo", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-1/srpo queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-1/srpo", + "category": "text-to-image", + "thumbnailUrl": "https://fal.media/files/elephant/sc5nHfAUsSmVjmNNzoHDo_0b10ed5de0c24d9f88df8ed0a350f49f.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-1/srpo", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-1/srpo/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux1SrpoInput": { + "title": "BaseSRPOFlux1Input", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Mountain guide, sturdy build, wilderness wisdom, alert gaze, technical outdoor gear with rope coils, snow-capped peaks background, crisp mountain lighting, leading pose, wind-swept hair with full beard, weather-worn face with quiet confidence, alpine expert presence" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "maximum": 4, + "title": "Num Images", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "maximum": 50, + "title": "Num Inference Steps", + "default": 28 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "guidance_scale": { + "minimum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "maximum": 20, + "title": "Guidance scale (CFG)", + "default": 4.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "required": [ + "prompt" + ] + }, + "Flux1SrpoOutput": { + "title": "SRPOOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/jpeg", + "url": "https://storage.googleapis.com/falserverless/example_outputs/flux-srpo-output.jpeg", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-1/srpo/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-1/srpo/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-1/srpo": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux1SrpoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-1/srpo/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux1SrpoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan-image/v2.1/text-to-image", + "metadata": { + "display_name": "Hunyuan Image", + "category": "text-to-image", + "description": "Use the amazing capabilities of hunyuan image 2.1 to generate images that express the feelings of your text.", + "status": "active", + "tags": [ + "text-to-image" + ], + "updated_at": "2026-01-26T21:42:57.699Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/monkey/fl_uWB5P9sMjDBhq_7hG0_644c0f46ad94482d8e2e09e180e64c88.jpg", + "model_url": "https://fal.run/fal-ai/hunyuan-image/v2.1/text-to-image", + "license_type": "commercial", + "date": "2025-09-09T16:54:57.565Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan-image/v2.1/text-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan-image/v2.1/text-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan-image/v2.1/text-to-image", + "category": "text-to-image", + "thumbnailUrl": "https://fal.media/files/monkey/fl_uWB5P9sMjDBhq_7hG0_644c0f46ad94482d8e2e09e180e64c88.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan-image/v2.1/text-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan-image/v2.1/text-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "HunyuanImageV21TextToImageInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_size", + "num_images", + "num_inference_steps", + "guidance_scale", + "seed", + "use_reprompt", + "use_refiner", + "enable_safety_checker", + "sync_mode", + "output_format" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cute, cartoon-style anthropomorphic penguin plush toy, standing in a painting studio, wearing a red knitted scarf and beret." + ], + "description": "The text prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The desired size of the generated image.", + "title": "Image Size", + "default": "square_hd" + }, + "use_reprompt": { + "description": "Enable prompt enhancement for potentially better results.", + "type": "boolean", + "title": "Use Reprompt", + "default": true + }, + "use_refiner": { + "description": "Enable the refiner model for improved image quality.", + "type": "boolean", + "title": "Use Refiner", + "default": false + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "guidance_scale": { + "minimum": 1, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "Controls how much the model adheres to the prompt. Higher values mean stricter adherence.", + "default": 3.5 + }, + "seed": { + "description": "Random seed for reproducible results. If None, a random seed is used.", + "type": "integer", + "title": "Seed" + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, watermark, signature" + ], + "description": "The negative prompt to guide the image generation away from certain concepts.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of denoising steps.", + "default": 28 + } + }, + "title": "HunyuanTextToImageInput", + "required": [ + "prompt" + ] + }, + "HunyuanImageV21TextToImageOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + { + "content_type": "image/png", + "url": "https://v3.fal.media/files/zebra/WCrMfUTYp6mYCf6yRE3kw_generated_image_0.png" + } + ], + "description": "A list of the generated images.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "description": "The base seed used for the generation process.", + "type": "integer", + "title": "Seed" + } + }, + "title": "HunyuanTextToImageOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan-image/v2.1/text-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-image/v2.1/text-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-image/v2.1/text-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanImageV21TextToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-image/v2.1/text-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanImageV21TextToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bytedance/seedream/v4/text-to-image", + "metadata": { + "display_name": "Bytedance Seedream v4", + "category": "text-to-image", + "description": "A new-generation image creation model ByteDance, Seedream 4.0 integrates image generation and image editing capabilities into a single, unified architecture.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:58.196Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/kangaroo/MTKbHTmLwlCPVvxnEPYVW_cd47bf24871b46af9747a5fcb7f4f97b.jpg", + "model_url": "https://fal.run/fal-ai/bytedance/seedream/v4/text-to-image", + "license_type": "commercial", + "date": "2025-09-09T08:27:00.849Z", + "group": { + "key": "bytedance-image", + "label": "Seedream 4.0 Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bytedance/seedream/v4/text-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bytedance/seedream/v4/text-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bytedance/seedream/v4/text-to-image", + "category": "text-to-image", + "thumbnailUrl": "https://fal.media/files/kangaroo/MTKbHTmLwlCPVvxnEPYVW_cd47bf24871b46af9747a5fcb7f4f97b.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/bytedance/seedream/v4/text-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/bytedance/seedream/v4/text-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BytedanceSeedreamV4TextToImageInput": { + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_images", + "max_images", + "seed", + "sync_mode", + "enable_safety_checker", + "enhance_prompt_mode" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A trendy restaurant with a digital menu board displaying \"Seedream 4.0 is available on fal\" in elegant script, with diners enjoying their meals." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt used to generate the image" + }, + "num_images": { + "minimum": 1, + "maximum": 6, + "type": "integer", + "title": "Num Images", + "description": "Number of separate model generations to be run with the prompt.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9", + "auto", + "auto_2K", + "auto_4K" + ], + "type": "string" + } + ], + "description": "The size of the generated image. Total pixels must be between 960x960 and 4096x4096.", + "title": "Image Size", + "examples": [ + { + "height": 4096, + "width": 4096 + } + ], + "default": { + "height": 2048, + "width": 2048 + } + }, + "enhance_prompt_mode": { + "enum": [ + "standard", + "fast" + ], + "title": "Enhance Prompt Mode", + "type": "string", + "description": "The mode to use for enhancing prompt enhancement. Standard mode provides higher quality results but takes longer to generate. Fast mode provides average quality results but takes less time to generate.", + "default": "standard" + }, + "max_images": { + "minimum": 1, + "maximum": 6, + "type": "integer", + "title": "Max Images", + "description": "If set to a number greater than one, enables multi-image generation. The model will potentially return up to `max_images` images every generation, and in total, `num_images` generations will be carried out. In total, the number of images generated will be between `num_images` and `max_images*num_images`.", + "default": 1 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed to control the stochasticity of image generation." + } + }, + "title": "SeedDream4T2IInput", + "required": [ + "prompt" + ] + }, + "BytedanceSeedreamV4TextToImageOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/seedream4_t2i_output.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Generated images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "examples": [ + 746406749 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for generation" + } + }, + "title": "SeedDream4T2IOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bytedance/seedream/v4/text-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedream/v4/text-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedream/v4/text-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedreamV4TextToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedream/v4/text-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedreamV4TextToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/gemini-25-flash-image", + "metadata": { + "display_name": "Gemini 2.5 Flash Image", + "category": "text-to-image", + "description": "Nano Banana is Google's state-of-the-art image generation and editing model\n", + "status": "active", + "tags": [ + "text-to-image" + ], + "updated_at": "2026-01-26T21:43:00.576Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/penguin/f0TePjwvxUJQ2MJ4kDDtC_dba951a1d05a4c8f9324127e7751181e.jpg", + "model_url": "https://fal.run/fal-ai/gemini-25-flash-image", + "license_type": "commercial", + "date": "2025-08-26T01:20:51.897Z", + "group": { + "key": "gemini-25-flash", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/gemini-25-flash-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/gemini-25-flash-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/gemini-25-flash-image", + "category": "text-to-image", + "thumbnailUrl": "https://fal.media/files/penguin/f0TePjwvxUJQ2MJ4kDDtC_dba951a1d05a4c8f9324127e7751181e.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/gemini-25-flash-image", + "documentationUrl": "https://fal.ai/models/fal-ai/gemini-25-flash-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Gemini25FlashImageInput": { + "x-fal-order-properties": [ + "prompt", + "num_images", + "aspect_ratio", + "output_format", + "sync_mode", + "limit_generations" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "An action shot of a black lab swimming in an inground suburban swimming pool. The camera is placed meticulously on the water line, dividing the image in half, revealing both the dogs head above water holding a tennis ball in it's mouth, and it's paws paddling underwater." + ], + "description": "The text prompt to generate an image from.", + "type": "string", + "minLength": 3, + "maxLength": 50000, + "title": "Prompt" + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "3:2", + "4:3", + "5:4", + "1:1", + "4:5", + "3:4", + "2:3", + "9:16" + ], + "description": "The aspect ratio of the generated image.", + "type": "string", + "title": "Aspect Ratio", + "default": "1:1" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "description": "The number of images to generate.", + "title": "Number of Images", + "default": 1 + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "limit_generations": { + "description": "Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate.", + "type": "boolean", + "title": "Limit Generations", + "default": false + } + }, + "title": "NanoBananaTextToImageInput", + "required": [ + "prompt" + ] + }, + "Gemini25FlashImageOutput": { + "x-fal-order-properties": [ + "images", + "description" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_name": "nano-banana-t2i-output.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/nano-banana-t2i-output.png" + } + ] + ], + "description": "The generated images.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "description": { + "description": "The description of the generated images.", + "type": "string", + "title": "Description" + } + }, + "title": "NanoBananaTextToImageOutput", + "required": [ + "images", + "description" + ] + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "description": "The height of the image", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "description": "The width of the image", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/gemini-25-flash-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gemini-25-flash-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/gemini-25-flash-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gemini25FlashImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gemini-25-flash-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Gemini25FlashImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/nano-banana", + "metadata": { + "display_name": "Nano Banana", + "category": "text-to-image", + "description": "Google's state-of-the-art image generation and editing model", + "status": "active", + "tags": [ + "image-generation" + ], + "updated_at": "2026-01-26T21:43:03.398Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/zebra/eLGB_Z0WHjbSD4Ad5aY-g_83f5f6da4f09426489042866cb0e4e9c.jpg", + "model_url": "https://fal.run/fal-ai/nano-banana", + "license_type": "commercial", + "date": "2025-08-19T22:11:04.357Z", + "group": { + "key": "nano-banana", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/nano-banana", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/nano-banana queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/nano-banana", + "category": "text-to-image", + "thumbnailUrl": "https://fal.media/files/zebra/eLGB_Z0WHjbSD4Ad5aY-g_83f5f6da4f09426489042866cb0e4e9c.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/nano-banana", + "documentationUrl": "https://fal.ai/models/fal-ai/nano-banana/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "NanoBananaInput": { + "title": "NanoBananaTextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "An action shot of a black lab swimming in an inground suburban swimming pool. The camera is placed meticulously on the water line, dividing the image in half, revealing both the dogs head above water holding a tennis ball in it's mouth, and it's paws paddling underwater." + ], + "title": "Prompt", + "minLength": 3, + "maxLength": 50000, + "type": "string", + "description": "The text prompt to generate an image from." + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "3:2", + "4:3", + "5:4", + "1:1", + "4:5", + "3:4", + "2:3", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image.", + "default": "1:1" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Number of Images", + "description": "The number of images to generate.", + "default": 1 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "limit_generations": { + "title": "Limit Generations", + "type": "boolean", + "description": "Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "num_images", + "aspect_ratio", + "output_format", + "sync_mode", + "limit_generations" + ], + "required": [ + "prompt" + ] + }, + "NanoBananaOutput": { + "title": "NanoBananaTextToImageOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_name": "nano-banana-t2i-output.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/example_outputs/nano-banana-t2i-output.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images.", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "description": { + "title": "Description", + "type": "string", + "description": "The description of the generated images." + } + }, + "x-fal-order-properties": [ + "images", + "description" + ], + "required": [ + "images", + "description" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/nano-banana/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/nano-banana/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/nano-banana": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NanoBananaInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/nano-banana/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/NanoBananaOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bytedance/dreamina/v3.1/text-to-image", + "metadata": { + "display_name": "Bytedance", + "category": "text-to-image", + "description": "Dreamina showcases superior picture effects, with significant improvements in picture aesthetics, precise and diverse styles, and rich details.", + "status": "active", + "tags": [ + "text-to-image" + ], + "updated_at": "2026-01-26T21:43:07.290Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/kangaroo/l70sBwCHmfC3XRlEd6GV5_ebd0a5807bba4b6ca02f3c3bffca5cc7.jpg", + "model_url": "https://fal.run/fal-ai/bytedance/dreamina/v3.1/text-to-image", + "license_type": "commercial", + "date": "2025-08-06T16:21:39.741Z", + "group": { + "key": "bytedance-image", + "label": "Dreamina 3.1 Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bytedance/dreamina/v3.1/text-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bytedance/dreamina/v3.1/text-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bytedance/dreamina/v3.1/text-to-image", + "category": "text-to-image", + "thumbnailUrl": "https://fal.media/files/kangaroo/l70sBwCHmfC3XRlEd6GV5_ebd0a5807bba4b6ca02f3c3bffca5cc7.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/bytedance/dreamina/v3.1/text-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/bytedance/dreamina/v3.1/text-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BytedanceDreaminaV31TextToImageInput": { + "x-fal-order-properties": [ + "prompt", + "image_size", + "enhance_prompt", + "num_images", + "seed", + "sync_mode" + ], + "type": "object", + "properties": { + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image. Width and height must be between 512 and 2048.", + "default": { + "height": 1536, + "width": 2048 + } + }, + "prompt": { + "examples": [ + "A 25-year-old korean woman selfie, front facing camera, lighting is soft and natural. If background is visible, it's a clean, modern apartment interior. The clothing color is clearly visible and distinct, adding a hint of color contrast" + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt used to generate the image" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed to control the stochasticity of image generation." + }, + "enhance_prompt": { + "title": "Enhance Prompt", + "type": "boolean", + "description": "Whether to use an LLM to enhance the prompt", + "default": false + } + }, + "title": "DreaminaInput", + "required": [ + "prompt" + ] + }, + "BytedanceDreaminaV31TextToImageOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/panda/4mddd7PmDvbbBZDs-xnUW_4294a9041c9d46eaa7b98d15ce6300fb.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Generated images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "examples": [ + 746406749 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for generation" + } + }, + "title": "DreaminaOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bytedance/dreamina/v3.1/text-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/dreamina/v3.1/text-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bytedance/dreamina/v3.1/text-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceDreaminaV31TextToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/dreamina/v3.1/text-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceDreaminaV31TextToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan/v2.2-a14b/text-to-image/lora", + "metadata": { + "display_name": "Wan v2.2 A14B Text-to-Image A14B with LoRAs", + "category": "text-to-image", + "description": "Wan 2.2's 14B model with LoRA support generates high-fidelity images with enhanced prompt alignment, style adaptability.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:07.540Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/lion/hV9rZRZFnLQo1Bns6llc7_148c4a2e142445cda4c87daed812f731.jpg", + "model_url": "https://fal.run/fal-ai/wan/v2.2-a14b/text-to-image/lora", + "license_type": "commercial", + "date": "2025-08-05T23:56:14.535Z", + "group": { + "key": "wan-v22-lora", + "label": "Text to Image (LoRA)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/wan-22-image-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/wan-22-image-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan/v2.2-a14b/text-to-image/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan/v2.2-a14b/text-to-image/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan/v2.2-a14b/text-to-image/lora", + "category": "text-to-image", + "thumbnailUrl": "https://fal.media/files/lion/hV9rZRZFnLQo1Bns6llc7_148c4a2e142445cda4c87daed812f731.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan/v2.2-a14b/text-to-image/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/wan/v2.2-a14b/text-to-image/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanV22A14bTextToImageLoraInput": { + "title": "WanLoRAT2IRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "In this breathtaking wildlife documentary, we are drawn into an intimate close-up of a majestic lion's face, framed against the backdrop of a vast African savannah at dawn. The camera captures the raw power and nobility of the creature as it gazes intently into the distance, its golden-brown fur glistening under the soft, diffused light that bathes the scene in an ethereal glow. Harsh shadows dance across its features, accentuating the deep wrinkles around its eyes and the rugged texture of its fur, each strand a testament to its age and wisdom. The static camera angle invites viewers to immerse themselves in this moment of profound stillness, where the lion's intense focus hints at an unseen presence or a distant threat. As the sun ascends, the landscape transforms into a symphony of warm hues, enhancing the serene yet tense atmosphere that envelops this extraordinary encounter with nature's untamed beauty." + ], + "description": "The text prompt to guide image generation.", + "type": "string", + "title": "Prompt" + }, + "shift": { + "description": "Shift value for the image. Must be between 1.0 and 10.0.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Shift", + "examples": [ + 2 + ], + "default": 2 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "examples": [ + "square_hd" + ], + "default": "square_hd" + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "description": "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + "type": "string", + "title": "Acceleration", + "examples": [ + "regular" + ], + "default": "regular" + }, + "reverse_video": { + "description": "If true, the video will be reversed.", + "type": "boolean", + "title": "Reverse Video", + "default": false + }, + "loras": { + "description": "LoRA weights to be used in the inference.", + "type": "array", + "title": "Loras", + "items": { + "$ref": "#/components/schemas/LoRAWeight" + }, + "default": [] + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, input data will be checked for safety before processing.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": false + }, + "guidance_scale": { + "description": "Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale (1st Stage)", + "examples": [ + 3.5 + ], + "default": 3.5 + }, + "image_format": { + "enum": [ + "png", + "jpeg" + ], + "description": "The format of the output image.", + "type": "string", + "title": "Image Format", + "examples": [ + "jpeg" + ], + "default": "jpeg" + }, + "negative_prompt": { + "description": "Negative prompt for video generation.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "enable_output_safety_checker": { + "examples": [ + false + ], + "description": "If set to true, output video will be checked for safety after generation.", + "type": "boolean", + "title": "Enable Output Safety Checker", + "default": false + }, + "guidance_scale_2": { + "description": "Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale (2nd Stage)", + "examples": [ + 4 + ], + "default": 4 + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "description": "Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "num_inference_steps": { + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "type": "integer", + "minimum": 2, + "maximum": 40, + "title": "Number of Inference Steps", + "examples": [ + 27 + ], + "default": 27 + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "seed", + "num_inference_steps", + "enable_safety_checker", + "enable_output_safety_checker", + "enable_prompt_expansion", + "acceleration", + "guidance_scale", + "guidance_scale_2", + "shift", + "loras", + "reverse_video", + "image_size", + "image_format" + ], + "required": [ + "prompt" + ] + }, + "WanV22A14bTextToImageLoraOutput": { + "title": "WanT2IResponse", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/wan/t2i-output.png" + } + ], + "description": "The generated image file.", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "image", + "seed" + ], + "required": [ + "image", + "seed" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoRAWeight": { + "title": "LoRAWeight", + "type": "object", + "properties": { + "path": { + "description": "URL or the path to the LoRA weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + }, + "transformer": { + "enum": [ + "high", + "low", + "both" + ], + "description": "Specifies the transformer to load the lora weight into. 'high' loads into the high-noise transformer, 'low' loads it into the low-noise transformer, while 'both' loads the LoRA into both transformers.", + "type": "string", + "title": "Transformer", + "default": "high" + }, + "weight_name": { + "description": "Name of the LoRA weight. Used only if `path` is a Hugging Face repository, and required only if you have more than 1 safetensors file in the repo.", + "type": "string", + "title": "Weight Name" + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale", + "transformer" + ], + "required": [ + "path" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan/v2.2-a14b/text-to-image/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/text-to-image/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/text-to-image/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV22A14bTextToImageLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/text-to-image/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV22A14bTextToImageLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan/v2.2-5b/text-to-image", + "metadata": { + "display_name": "Wan", + "category": "text-to-image", + "description": "Wan 2.2's 5B model generates high-resolution, photorealistic images with powerful prompt understanding and fine-grained visual detail", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:07.666Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/kangaroo/xvsySaKCeM5ZulogiaTX2_f6fa9b031e374468bcb9e426528807e4.jpg", + "model_url": "https://fal.run/fal-ai/wan/v2.2-5b/text-to-image", + "license_type": "commercial", + "date": "2025-08-05T21:53:55.050Z", + "group": { + "key": "wan-v22", + "label": "Text to Image (5B)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan/v2.2-5b/text-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan/v2.2-5b/text-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan/v2.2-5b/text-to-image", + "category": "text-to-image", + "thumbnailUrl": "https://fal.media/files/kangaroo/xvsySaKCeM5ZulogiaTX2_f6fa9b031e374468bcb9e426528807e4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan/v2.2-5b/text-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/wan/v2.2-5b/text-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanV225bTextToImageInput": { + "title": "WanSmallT2IRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "In this breathtaking wildlife documentary, we are drawn into an intimate close-up of a majestic lion's face, framed against the backdrop of a vast African savannah at dawn. The camera captures the raw power and nobility of the creature as it gazes intently into the distance, its golden-brown fur glistening under the soft, diffused light that bathes the scene in an ethereal glow. Harsh shadows dance across its features, accentuating the deep wrinkles around its eyes and the rugged texture of its fur, each strand a testament to its age and wisdom. The static camera angle invites viewers to immerse themselves in this moment of profound stillness, where the lion's intense focus hints at an unseen presence or a distant threat. As the sun ascends, the landscape transforms into a symphony of warm hues, enhancing the serene yet tense atmosphere that envelops this extraordinary encounter with nature's untamed beauty." + ], + "description": "The text prompt to guide image generation.", + "type": "string", + "title": "Prompt" + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "description": "Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "examples": [ + "square_hd" + ], + "default": "square_hd" + }, + "negative_prompt": { + "description": "Negative prompt for video generation.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "shift": { + "description": "Shift value for the image. Must be between 1.0 and 10.0.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Shift", + "examples": [ + 2 + ], + "default": 2 + }, + "enable_output_safety_checker": { + "examples": [ + false + ], + "description": "If set to true, output video will be checked for safety after generation.", + "type": "boolean", + "title": "Enable Output Safety Checker", + "default": false + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, input data will be checked for safety before processing.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": false + }, + "num_inference_steps": { + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "type": "integer", + "minimum": 2, + "maximum": 50, + "title": "Number of Inference Steps", + "examples": [ + 40 + ], + "default": 40 + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + }, + "image_format": { + "enum": [ + "png", + "jpeg" + ], + "description": "The format of the output image.", + "type": "string", + "title": "Image Format", + "examples": [ + "jpeg" + ], + "default": "jpeg" + }, + "guidance_scale": { + "description": "Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale", + "examples": [ + 3.5 + ], + "default": 3.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "seed", + "num_inference_steps", + "enable_safety_checker", + "enable_output_safety_checker", + "enable_prompt_expansion", + "guidance_scale", + "shift", + "image_size", + "image_format" + ], + "required": [ + "prompt" + ] + }, + "WanV225bTextToImageOutput": { + "title": "WanSmallT2IResponse", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/wan/small-t2i-output-2.png" + } + ], + "description": "The generated image file.", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "image", + "seed" + ], + "required": [ + "image", + "seed" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan/v2.2-5b/text-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-5b/text-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-5b/text-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV225bTextToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-5b/text-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV225bTextToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan/v2.2-a14b/text-to-image", + "metadata": { + "display_name": "Wan", + "category": "text-to-image", + "description": "Wan 2.2's 14B model generates high-resolution, photorealistic images with powerful prompt understanding and fine-grained visual detail", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:07.794Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "model_url": "https://fal.run/fal-ai/wan/v2.2-a14b/text-to-image", + "license_type": "commercial", + "date": "2025-08-05T21:53:20.379Z", + "group": { + "key": "wan-v22-large", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan/v2.2-a14b/text-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan/v2.2-a14b/text-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan/v2.2-a14b/text-to-image", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan/v2.2-a14b/text-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/wan/v2.2-a14b/text-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanV22A14bTextToImageInput": { + "title": "WanT2IRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "In this breathtaking wildlife documentary, we are drawn into an intimate close-up of a majestic lion's face, framed against the backdrop of a vast African savannah at dawn. The camera captures the raw power and nobility of the creature as it gazes intently into the distance, its golden-brown fur glistening under the soft, diffused light that bathes the scene in an ethereal glow. Harsh shadows dance across its features, accentuating the deep wrinkles around its eyes and the rugged texture of its fur, each strand a testament to its age and wisdom. The static camera angle invites viewers to immerse themselves in this moment of profound stillness, where the lion's intense focus hints at an unseen presence or a distant threat. As the sun ascends, the landscape transforms into a symphony of warm hues, enhancing the serene yet tense atmosphere that envelops this extraordinary encounter with nature's untamed beauty." + ], + "description": "The text prompt to guide image generation.", + "type": "string", + "title": "Prompt" + }, + "guidance_scale": { + "description": "Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale (1st Stage)", + "examples": [ + 3.5 + ], + "default": 3.5 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "examples": [ + "square_hd" + ], + "default": "square_hd" + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "description": "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + "type": "string", + "title": "Acceleration", + "examples": [ + "regular" + ], + "default": "regular" + }, + "shift": { + "description": "Shift value for the image. Must be between 1.0 and 10.0.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Shift", + "examples": [ + 2 + ], + "default": 2 + }, + "enable_output_safety_checker": { + "examples": [ + false + ], + "description": "If set to true, output video will be checked for safety after generation.", + "type": "boolean", + "title": "Enable Output Safety Checker", + "default": false + }, + "guidance_scale_2": { + "description": "Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale (2nd Stage)", + "examples": [ + 4 + ], + "default": 4 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, input data will be checked for safety before processing.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": false + }, + "num_inference_steps": { + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "type": "integer", + "minimum": 2, + "maximum": 40, + "title": "Number of Inference Steps", + "examples": [ + 27 + ], + "default": 27 + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + }, + "negative_prompt": { + "description": "Negative prompt for video generation.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "description": "Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "seed", + "num_inference_steps", + "enable_safety_checker", + "enable_output_safety_checker", + "enable_prompt_expansion", + "acceleration", + "guidance_scale", + "guidance_scale_2", + "shift", + "image_size" + ], + "required": [ + "prompt" + ] + }, + "WanV22A14bTextToImageOutput": { + "title": "WanT2IResponse", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/wan/t2i-output.png" + } + ], + "description": "The generated image file.", + "title": "Image", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "image", + "seed" + ], + "required": [ + "image", + "seed" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan/v2.2-a14b/text-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/text-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/text-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV22A14bTextToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/text-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV22A14bTextToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image", + "metadata": { + "display_name": "Qwen Image", + "category": "text-to-image", + "description": "Qwen-Image is an image generation foundation model in the Qwen series that achieves significant advances in complex text rendering and precise image editing. ", + "status": "active", + "tags": [ + "text-to-image" + ], + "updated_at": "2026-01-26T21:43:08.169Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/koala/6JAMNCSti-vm-zJeZi6hA_626cdc11d4d04560ac9523fbd61f2eac.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image", + "license_type": "commercial", + "date": "2025-08-04T17:48:45.350Z", + "group": { + "key": "qwen-image", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/qwen-image-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/qwen-image-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image", + "category": "text-to-image", + "thumbnailUrl": "https://fal.media/files/koala/6JAMNCSti-vm-zJeZi6hA_626cdc11d4d04560ac9523fbd61f2eac.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageInput": { + "title": "BaseQwenImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Mount Fuji with cherry blossoms in the foreground, clear sky, peaceful spring day, soft natural light, realistic landscape." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the image with" + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "The number of images to generate.", + "default": 1 + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration level for image generation. Options: 'none', 'regular', 'high'. Higher acceleration increases speed. 'regular' balances speed and quality. 'high' is recommended for images without text.", + "examples": [ + "none" + ], + "default": "none" + }, + "num_inference_steps": { + "minimum": 2, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 250, + "description": "The number of inference steps to perform.", + "default": 30 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use up to 3 LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "use_turbo": { + "examples": [ + true + ], + "title": "Use Turbo", + "type": "boolean", + "description": "Enable turbo mode for faster generation with high quality. When enabled, uses optimized settings (10 steps, CFG=1.2).", + "default": false + }, + "negative_prompt": { + "examples": [ + "blurry, ugly" + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the generation", + "default": " " + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale", + "type": "number", + "maximum": 20, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 2.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "negative_prompt", + "acceleration", + "loras", + "use_turbo" + ], + "required": [ + "prompt" + ] + }, + "QwenImageOutput": { + "title": "QwenImageOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/jpeg", + "url": "https://v3.fal.media/files/rabbit/KoIbq6nhDBDPxDQrivW-m.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "maximum": 4, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-krea-lora/stream", + "metadata": { + "display_name": "Flux Krea Lora", + "category": "text-to-image", + "description": "Super fast endpoint for the FLUX.1 [dev] model with LoRA support, enabling rapid and high-quality image generation using pre-trained LoRA adaptations for personalization, specific styles, brand identities, and product-specific outputs.", + "status": "active", + "tags": [ + "lora", + "personalization" + ], + "updated_at": "2026-01-26T21:43:08.555Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/zebra/6CK9OSIhC3AAEhihyCqsh_9a84ecc9c66343d688ccdd5c9f57c80b.jpg", + "model_url": "https://fal.run/fal-ai/flux-krea-lora/stream", + "license_type": "commercial", + "date": "2025-08-01T23:39:51.003Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-krea-lora/stream", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-krea-lora/stream queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-krea-lora/stream", + "category": "text-to-image", + "thumbnailUrl": "https://fal.media/files/zebra/6CK9OSIhC3AAEhihyCqsh_9a84ecc9c66343d688ccdd5c9f57c80b.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-krea-lora/stream", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-krea-lora/stream/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxKreaLoraStreamInput": { + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "loras", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Extreme close-up of a single tiger eye, direct frontal view. Detailed iris and pupil. Sharp focus on eye texture and color. Natural lighting to capture authentic eye shine and depth. The word \"FLUX\" is painted over it in big, white brush strokes with visible texture." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate. This is always set to 1 for streaming output.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "jpeg" + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "title": "Guidance scale (CFG)", + "maximum": 35, + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 28 + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + } + }, + "title": "TextToImageInput", + "required": [ + "prompt" + ] + }, + "FluxKreaLoraStreamOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + } + }, + "title": "Output", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "LoraWeight": { + "x-fal-order-properties": [ + "path", + "scale" + ], + "type": "object", + "properties": { + "path": { + "description": "URL or the path to the LoRA weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "type": "number", + "title": "Scale", + "maximum": 4, + "default": 1 + } + }, + "title": "LoraWeight", + "required": [ + "path" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "title": "Image", + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-krea-lora/stream/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-krea-lora/stream/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-krea-lora/stream": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKreaLoraStreamInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-krea-lora/stream/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKreaLoraStreamOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-krea-lora", + "metadata": { + "display_name": "FLUX.1 Krea [dev] with LoRAs", + "category": "text-to-image", + "description": "Super fast endpoint for the FLUX.1 [dev] model with LoRA support, enabling rapid and high-quality image generation using pre-trained LoRA adaptations for personalization, specific styles, brand identities, and product-specific outputs.", + "status": "active", + "tags": [ + "lora", + "personalization" + ], + "updated_at": "2026-01-26T21:43:08.804Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/tiger/fB-RsJ-BW4mrUVAH8oKF2_LOuGVDgg07U8OWbOhhMFt_d6ab08c96ab94da8b6d3e979d634af16.jpg", + "model_url": "https://fal.run/fal-ai/flux-krea-lora", + "license_type": "commercial", + "date": "2025-08-01T23:35:24.369Z", + "group": { + "key": "flux-krea-lora", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/flux-krea-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/flux-krea-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-krea-lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-krea-lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-krea-lora", + "category": "text-to-image", + "thumbnailUrl": "https://v3.fal.media/files/tiger/fB-RsJ-BW4mrUVAH8oKF2_LOuGVDgg07U8OWbOhhMFt_d6ab08c96ab94da8b6d3e979d634af16.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-krea-lora", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-krea-lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxKreaLoraInput": { + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "loras", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Extreme close-up of a single tiger eye, direct frontal view. Detailed iris and pupil. Sharp focus on eye texture and color. Natural lighting to capture authentic eye shine and depth. The word \"FLUX\" is painted over it in big, white brush strokes with visible texture." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate. This is always set to 1 for streaming output.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "jpeg" + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "title": "Guidance scale (CFG)", + "maximum": 35, + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 28 + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + } + }, + "title": "TextToImageInput", + "required": [ + "prompt" + ] + }, + "FluxKreaLoraOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + } + }, + "title": "Output", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "LoraWeight": { + "x-fal-order-properties": [ + "path", + "scale" + ], + "type": "object", + "properties": { + "path": { + "description": "URL or the path to the LoRA weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "type": "number", + "title": "Scale", + "maximum": 4, + "default": 1 + } + }, + "title": "LoraWeight", + "required": [ + "path" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "title": "Image", + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-krea-lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-krea-lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-krea-lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKreaLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-krea-lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKreaLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux/krea", + "metadata": { + "display_name": "FLUX.1 Krea [dev]", + "category": "text-to-image", + "description": "FLUX.1 Krea [dev] is a 12 billion parameter flow transformer that generates high-quality images from text with incredible aesthetics. It is suitable for personal and commercial use.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:10.459Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-2.jpg", + "model_url": "https://fal.run/fal-ai/flux/krea", + "license_type": "commercial", + "date": "2025-07-30T14:24:29.026Z", + "group": { + "key": "krea-models", + "label": "Text to Image [dev]" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.run/fal-ai/flux/krea/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux/krea", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux/krea queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux/krea", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux/krea", + "documentationUrl": "https://fal.ai/models/fal-ai/flux/krea/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxKreaInput": { + "title": "BaseKreaInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A candid street photo of a woman with a pink bob and bold eyeliner on a graffiti-covered subway platform. She wears a bright yellow patent leather coat over a black-and-white checkered turtleneck and platform boots. Natural subway lighting creates an authentic urban scene with a relaxed, unposed feel." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "type": "string", + "title": "Acceleration", + "default": "none" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "jpeg" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "title": "Seed" + }, + "guidance_scale": { + "minimum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "title": "Guidance scale (CFG)", + "maximum": 20, + "default": 4.5 + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 28 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "required": [ + "prompt" + ] + }, + "FluxKreaOutput": { + "title": "KreaOutput", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/jpeg", + "url": "https://storage.googleapis.com/falserverless/example_outputs/flux_krea_t2i_output_1.jpg", + "width": 1024 + } + ] + ], + "description": "The generated images.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux/krea/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux/krea/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux/krea": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKreaInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux/krea/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKreaOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-1/krea", + "metadata": { + "display_name": "FLUX.1 Krea [dev]", + "category": "text-to-image", + "description": "FLUX.1 Krea [dev] is a 12 billion parameter flow transformer that generates high-quality images from text with incredible aesthetics. It is suitable for personal and commercial use.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:10.953Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-5.jpg", + "model_url": "https://fal.run/fal-ai/flux-1/krea", + "license_type": "commercial", + "date": "2025-07-30T13:15:37.175Z", + "group": { + "key": "krea-models-fast", + "label": "Text to Image [dev]" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.run/fal-ai/flux-1/krea/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-1/krea", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-1/krea queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-1/krea", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-1/krea", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-1/krea/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux1KreaInput": { + "title": "BaseKreaFlux1Input", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A candid street photo of a woman with a pink bob and bold eyeliner on a graffiti-covered subway platform. She wears a bright yellow patent leather coat over a black-and-white checkered turtleneck and platform boots. Natural subway lighting creates an authentic urban scene with a relaxed, unposed feel." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "maximum": 4, + "title": "Num Images", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "maximum": 50, + "title": "Num Inference Steps", + "default": 28 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "guidance_scale": { + "minimum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "maximum": 20, + "title": "Guidance scale (CFG)", + "default": 4.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "required": [ + "prompt" + ] + }, + "Flux1KreaOutput": { + "title": "KreaOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/jpeg", + "url": "https://storage.googleapis.com/falserverless/example_outputs/flux_krea_t2i_output_1.jpg", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-1/krea/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-1/krea/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-1/krea": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux1KreaInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-1/krea/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux1KreaOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sky-raccoon", + "metadata": { + "display_name": "Sky Raccoon", + "category": "text-to-image", + "description": "Generate images from a text prompt.", + "status": "active", + "tags": [ + "text-to-image" + ], + "updated_at": "2026-01-26T21:43:12.655Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "model_url": "https://fal.run/fal-ai/sky-raccoon", + "license_type": "commercial", + "date": "2025-07-26T20:56:47.206Z", + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sky-raccoon", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sky-raccoon queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sky-raccoon", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sky-raccoon", + "documentationUrl": "https://fal.ai/models/fal-ai/sky-raccoon/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SkyRaccoonInput": { + "title": "SkyRaccoonRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage. She wears a black leather jacket, a long red dress, and black boots, and carries a black purse." + ], + "description": "The text prompt to guide video generation.", + "type": "string", + "title": "Prompt" + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": { + "height": 1024, + "width": 1024 + } + }, + "turbo_mode": { + "title": "Turbo Mode", + "type": "boolean", + "description": "If true, the video will be generated faster with no noticeable degradation in the visual quality.", + "default": false + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "description": "Whether to enable prompt expansion.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": false + }, + "negative_prompt": { + "examples": [ + "bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + ], + "description": "Negative prompt for video generation.", + "type": "string", + "title": "Negative Prompt", + "default": "bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + }, + "num_inference_steps": { + "minimum": 2, + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "type": "integer", + "maximum": 40, + "title": "Num Inference Steps", + "default": 30 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "seed", + "num_inference_steps", + "image_size", + "enable_safety_checker", + "enable_prompt_expansion", + "turbo_mode" + ], + "required": [ + "prompt" + ] + }, + "SkyRaccoonOutput": { + "title": "SkyRaccoonResponse", + "type": "object", + "properties": { + "image": { + "examples": [ + { + "url": "https://v3.fal.media/files/kangaroo/xP17uR1w0uHA1T2W2k6Gi_video-1752605675.png" + } + ], + "title": "Image", + "description": "The generated image file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "image", + "seed" + ], + "required": [ + "image", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sky-raccoon/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sky-raccoon/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sky-raccoon": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SkyRaccoonInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sky-raccoon/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SkyRaccoonOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-kontext-lora/text-to-image", + "metadata": { + "display_name": "Flux Kontext Lora", + "category": "text-to-image", + "description": "Super fast text-to-image endpoint for the FLUX.1 Kontext [dev] model with LoRA support, enabling rapid and high-quality image generation using pre-trained LoRA adaptations for personalization, specific styles, brand identities, and product-specific outputs.", + "status": "active", + "tags": [ + "text-to-image" + ], + "updated_at": "2026-01-26T21:43:23.871Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training-4.jpg", + "model_url": "https://fal.run/fal-ai/flux-kontext-lora/text-to-image", + "license_type": "commercial", + "date": "2025-06-25T21:15:54.503Z", + "group": { + "key": "flux-kontext-lora", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.ai/models/fal-ai/flux-kontext-lora/text-to-image/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-kontext-lora/text-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-kontext-lora/text-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-kontext-lora/text-to-image", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training-4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-kontext-lora/text-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-kontext-lora/text-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxKontextLoraTextToImageInput": { + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "loras", + "acceleration" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Mount Fuji with cherry blossoms in the foreground, clear sky, peaceful spring day, soft natural light, realistic landscape." + ], + "description": "The prompt to generate the image with", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "type": "string", + "title": "Acceleration", + "default": "none" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "png" + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "title": "Guidance scale (CFG)", + "maximum": 20, + "default": 2.5 + }, + "num_inference_steps": { + "minimum": 10, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 30 + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + } + }, + "title": "BaseKontextInput", + "required": [ + "prompt" + ] + }, + "FluxKontextLoraTextToImageOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/jpeg", + "url": "https://storage.googleapis.com/falserverless/example_outputs/kontext_example_t2i_output.png", + "width": 1024 + } + ] + ], + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + } + }, + "title": "KontextT2IOutput", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "LoraWeight": { + "x-fal-order-properties": [ + "path", + "scale" + ], + "type": "object", + "properties": { + "path": { + "description": "URL or the path to the LoRA weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "type": "number", + "title": "Scale", + "maximum": 4, + "default": 1 + } + }, + "title": "LoraWeight", + "required": [ + "path" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "title": "Image", + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-kontext-lora/text-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-kontext-lora/text-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-kontext-lora/text-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKontextLoraTextToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-kontext-lora/text-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKontextLoraTextToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/omnigen-v2", + "metadata": { + "display_name": "Omnigen V2", + "category": "text-to-image", + "description": "OmniGen is a unified image generation model that can generate a wide range of images from multi-modal prompts. It can be used for various tasks such as Image Editing, Personalized Image Generation, Virtual Try-On, Multi Person Generation and more!", + "status": "active", + "tags": [ + "multimodal", + "editing", + "try-on" + ], + "updated_at": "2026-01-26T21:43:24.126Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "model_url": "https://fal.run/fal-ai/omnigen-v2", + "license_type": "commercial", + "date": "2025-06-25T11:26:05.155Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/omnigen-v2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/omnigen-v2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/omnigen-v2", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/omnigen-v2", + "documentationUrl": "https://fal.ai/models/fal-ai/omnigen-v2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "OmnigenV2Input": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Make the dress blue", + "Add a fisherman hat to the woman's head", + "Replace the sword with a hammer.", + "Change the dress to blue.", + "Remove the cat" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate or edit an image. Use specific language like 'Add the bird from image 1 to the desk in image 2' for better results." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square_hd" + }, + "scheduler": { + "enum": [ + "euler", + "dpmsolver" + ], + "title": "Scheduler", + "type": "string", + "description": "The scheduler to use for the diffusion process.", + "default": "euler" + }, + "cfg_range_end": { + "minimum": 0, + "maximum": 1, + "type": "number", + "description": "CFG range end value.", + "title": "Cfg Range End", + "default": 1 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to guide what should not be in the image.", + "default": "(((deformed))), blurry, over saturation, bad anatomy, disfigured, poorly drawn face, mutation, mutated, (extra_limb), (ugly), (poorly drawn hands), fused fingers, messy drawing, broken legs censor, censored, censor_bar" + }, + "text_guidance_scale": { + "minimum": 1, + "maximum": 8, + "type": "number", + "description": "\n The Text Guidance scale controls how closely the model follows the text prompt.\n Higher values make the model stick more closely to the prompt.\n ", + "title": "Text Guidance scale", + "default": 5 + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "description": "The number of images to generate.", + "title": "Num Images", + "default": 1 + }, + "image_guidance_scale": { + "minimum": 1, + "maximum": 3, + "type": "number", + "description": "\n The Image Guidance scale controls how closely the model follows the input images.\n For image editing: 1.3-2.0, for in-context generation: 2.0-3.0\n ", + "title": "Image Guidance scale", + "default": 2 + }, + "input_image_urls": { + "description": "URLs of input images to use for image editing or multi-image generation. Support up to 3 images.", + "type": "array", + "items": { + "type": "string" + }, + "examples": [ + [ + "https://storage.googleapis.com/falserverless/omnigen/input.png" + ] + ], + "title": "Input Image Urls", + "default": [] + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "cfg_range_start": { + "minimum": 0, + "maximum": 1, + "type": "number", + "description": "CFG range start value.", + "title": "Cfg Range Start", + "default": 0 + }, + "num_inference_steps": { + "minimum": 20, + "maximum": 50, + "type": "integer", + "description": "The number of inference steps to perform.", + "title": "Num Inference Steps", + "default": 50 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "prompt", + "input_image_urls", + "image_size", + "num_inference_steps", + "seed", + "text_guidance_scale", + "image_guidance_scale", + "negative_prompt", + "cfg_range_start", + "cfg_range_end", + "scheduler", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format" + ], + "required": [ + "prompt" + ] + }, + "OmnigenV2Output": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "description": "The height of the generated image.", + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "description": "The width of the generated image.", + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/omnigen-v2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/omnigen-v2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/omnigen-v2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OmnigenV2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/omnigen-v2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OmnigenV2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bytedance/seedream/v3/text-to-image", + "metadata": { + "display_name": "Bytedance", + "category": "text-to-image", + "description": "Seedream 3.0 is a bilingual (Chinese and English) text-to-image model that excels at text-to-image generation.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:29.220Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-1.jpg", + "model_url": "https://fal.run/fal-ai/bytedance/seedream/v3/text-to-image", + "license_type": "commercial", + "date": "2025-06-10T18:39:25.070Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bytedance/seedream/v3/text-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bytedance/seedream/v3/text-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bytedance/seedream/v3/text-to-image", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/bytedance/seedream/v3/text-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/bytedance/seedream/v3/text-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BytedanceSeedreamV3TextToImageInput": { + "x-fal-order-properties": [ + "prompt", + "image_size", + "guidance_scale", + "num_images", + "seed", + "enable_safety_checker", + "sync_mode" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Fisheye lens, the head of a cat, the image shows the effect that the facial features of the cat are distorted due to the shooting method." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt used to generate the image" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "Use for finer control over the output image size. Will be used over aspect_ratio, if both are provided. Width and height must be between 512 and 2048." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "minimum": 1, + "maximum": 10, + "type": "number", + "title": "Guidance Scale", + "description": "Controls how closely the output image aligns with the input prompt. Higher values mean stronger prompt correlation.", + "default": 2.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed to control the stochasticity of image generation." + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + } + }, + "title": "SeedDreamInput", + "required": [ + "prompt" + ] + }, + "BytedanceSeedreamV3TextToImageOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/rabbit/EJqemc4hQlHKAtkkfTJqB_a2aaccab7ff84740b6323da580146087.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "Generated images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for generation" + } + }, + "title": "SeedDreamOutput", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bytedance/seedream/v3/text-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedream/v3/text-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedream/v3/text-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedreamV3TextToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedream/v3/text-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedreamV3TextToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-1/schnell", + "metadata": { + "display_name": "FLUX.1 [schnell]", + "category": "text-to-image", + "description": "Fastest inference in the world for the 12 billion parameter FLUX.1 [schnell] text-to-image model. ", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:32.515Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-2.jpg", + "model_url": "https://fal.run/fal-ai/flux-1/schnell", + "license_type": "commercial", + "date": "2025-06-02T18:15:06.266Z", + "group": { + "key": "flux-1-fast", + "label": "Text to Image [schnell]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-1/schnell", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-1/schnell queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-1/schnell", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-1/schnell", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-1/schnell/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux1SchnellInput": { + "title": "SchnellFlux1TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Extreme close-up of a single tiger eye, direct frontal view. Detailed iris and pupil. Sharp focus on eye texture and color. Natural lighting to capture authentic eye shine and depth. The word \"FLUX\" is painted over it in big, white brush strokes with visible texture." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "maximum": 4, + "title": "Num Images", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "maximum": 12, + "title": "Num Inference Steps", + "default": 4 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "guidance_scale": { + "minimum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "maximum": 20, + "title": "Guidance scale (CFG)", + "default": 3.5 + } + }, + "x-fal-order-properties": [ + "num_inference_steps", + "prompt", + "image_size", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "required": [ + "prompt" + ] + }, + "Flux1SchnellOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-1/schnell/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-1/schnell/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-1/schnell": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux1SchnellInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-1/schnell/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux1SchnellOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-1/dev", + "metadata": { + "display_name": "FLUX.1 [dev]", + "category": "text-to-image", + "description": "FLUX.1 [dev] is a 12 billion parameter flow transformer that generates high-quality images from text. It is suitable for personal and commercial use.\n", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:32.638Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-4.jpg", + "model_url": "https://fal.run/fal-ai/flux-1/dev", + "license_type": "commercial", + "date": "2025-06-02T18:12:26.276Z", + "group": { + "key": "flux-1-fast", + "label": "Text to Image [dev]" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.run/fal-ai/flux-1/dev/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-1/dev", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-1/dev queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-1/dev", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-1/dev", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-1/dev/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux1DevInput": { + "title": "BaseFlux1Input", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Extreme close-up of a single tiger eye, direct frontal view. Detailed iris and pupil. Sharp focus on eye texture and color. Natural lighting to capture authentic eye shine and depth. The word \"FLUX\" is painted over it in big, white brush strokes with visible texture." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "maximum": 4, + "title": "Num Images", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "default": "regular" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "maximum": 50, + "title": "Num Inference Steps", + "default": 28 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "guidance_scale": { + "minimum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "maximum": 20, + "title": "Guidance scale (CFG)", + "default": 3.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "required": [ + "prompt" + ] + }, + "Flux1DevOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-1/dev/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-1/dev/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-1/dev": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux1DevInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-1/dev/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux1DevOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-pro/kontext/max/text-to-image", + "metadata": { + "display_name": "FLUX.1 Kontext [max]", + "category": "text-to-image", + "description": "FLUX.1 Kontext [max] text-to-image is a new premium model brings maximum performance across all aspects – greatly improved prompt adherence.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:36.246Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "model_url": "https://fal.run/fal-ai/flux-pro/kontext/max/text-to-image", + "license_type": "commercial", + "date": "2025-05-29T04:52:29.523Z", + "group": { + "key": "flux-pro-kontext", + "label": "Kontext [max] -- Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-pro/kontext/max/text-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-pro/kontext/max/text-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-pro/kontext/max/text-to-image", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-pro/kontext/max/text-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-pro/kontext/max/text-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxProKontextMaxTextToImageInput": { + "title": "FluxProTextToImageInputWithAR", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Extreme close-up of a single tiger eye, direct frontal view. Detailed iris and pupil. Sharp focus on eye texture and color. Natural lighting to capture authentic eye shine and depth. The word \"FLUX\" is painted over it in big, white brush strokes with visible texture." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "description": "The number of images to generate.", + "maximum": 4, + "default": 1 + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image.", + "default": "1:1" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance scale (CFG)", + "type": "number", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "maximum": 20, + "default": 3.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "enhance_prompt": { + "title": "Enhance Prompt", + "type": "boolean", + "description": "Whether to enhance the prompt for better results.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "output_format", + "safety_tolerance", + "enhance_prompt", + "aspect_ratio" + ], + "required": [ + "prompt" + ] + }, + "FluxProKontextMaxTextToImageOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/registry__image__fast_sdxl__models__Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "registry__image__fast_sdxl__models__Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-pro/kontext/max/text-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/kontext/max/text-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/kontext/max/text-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProKontextMaxTextToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/kontext/max/text-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProKontextMaxTextToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-pro/kontext/text-to-image", + "metadata": { + "display_name": "FLUX.1 Kontext [pro]", + "category": "text-to-image", + "description": "The FLUX.1 Kontext [pro] text-to-image delivers state-of-the-art image generation results with unprecedented prompt following, photorealistic rendering, and flawless typography.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:37.143Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/zebra/VOrzt92hNVLX9m9jB-7-4_deea28b6b45344d4aa4eb3be14b3478e.jpg", + "model_url": "https://fal.run/fal-ai/flux-pro/kontext/text-to-image", + "license_type": "commercial", + "date": "2025-05-28T19:25:39.115Z", + "group": { + "key": "flux-pro-kontext", + "label": "Kontext [pro] -- Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-pro/kontext/text-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-pro/kontext/text-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-pro/kontext/text-to-image", + "category": "text-to-image", + "thumbnailUrl": "https://fal.media/files/zebra/VOrzt92hNVLX9m9jB-7-4_deea28b6b45344d4aa4eb3be14b3478e.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-pro/kontext/text-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-pro/kontext/text-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxProKontextTextToImageInput": { + "title": "FluxProTextToImageInputWithAR", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Extreme close-up of a single tiger eye, direct frontal view. Detailed iris and pupil. Sharp focus on eye texture and color. Natural lighting to capture authentic eye shine and depth. The word \"FLUX\" is painted over it in big, white brush strokes with visible texture." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "description": "The number of images to generate.", + "maximum": 4, + "default": 1 + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image.", + "default": "1:1" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.", + "default": "2" + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance scale (CFG)", + "type": "number", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "maximum": 20, + "default": 3.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "enhance_prompt": { + "title": "Enhance Prompt", + "type": "boolean", + "description": "Whether to enhance the prompt for better results.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "output_format", + "safety_tolerance", + "enhance_prompt", + "aspect_ratio" + ], + "required": [ + "prompt" + ] + }, + "FluxProKontextTextToImageOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/registry__image__fast_sdxl__models__Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "registry__image__fast_sdxl__models__Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-pro/kontext/text-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/kontext/text-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/kontext/text-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProKontextTextToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/kontext/text-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProKontextTextToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bagel", + "metadata": { + "display_name": "Bagel", + "category": "text-to-image", + "description": "Bagel is a 7B parameter from Bytedance-Seed multimodal model that can generate both text and images.", + "status": "active", + "tags": [ + "text-to-image", + "multimodal" + ], + "updated_at": "2026-01-26T21:43:39.575Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/bagel.webp", + "model_url": "https://fal.run/fal-ai/bagel", + "license_type": "commercial", + "date": "2025-05-21T18:27:20.822Z", + "group": { + "key": "bagel", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bagel", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bagel queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bagel", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/bagel.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/bagel", + "documentationUrl": "https://fal.ai/models/fal-ai/bagel/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BagelInput": { + "title": "ImageGenInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A luminous ancient temple floating among cosmic clouds, with impossible architecture of twisted spires and inverted arches. The structure is half-built from crystalline white marble and half from living bioluminescent coral in vibrant teal and purple. Ethereal light filters through stained glass windows depicting mythological scenes. Tiny cloaked figures with glowing lanterns traverse impossible staircases. In the foreground, a massive ornate door stands slightly ajar, revealing a glimpse of swirling golden energy within. The scene is lit by two moons of different colors, casting overlapping shadows. Cinematic lighting, hyper-detailed textures, 8K resolution." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for the generation." + }, + "use_thought": { + "title": "Use Thought", + "type": "boolean", + "description": "Whether to use thought tokens for generation. If set to true, the model will \"think\" to potentially improve generation quality. Increases generation time and increases the cost by 20%.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "use_thought", + "enable_safety_checker" + ], + "required": [ + "prompt" + ] + }, + "BagelOutput": { + "title": "ImageOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "file_size": 423052, + "height": 1024, + "file_name": "wRhCPSyiKTiLnnWvUpGIl.jpeg", + "content_type": "image/jpeg", + "url": "https://storage.googleapis.com/falserverless/bagel/wRhCPSyiKTiLnnWvUpGIl.jpeg", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bagel/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bagel/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bagel": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BagelInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bagel/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BagelOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/imagen4/preview/ultra", + "metadata": { + "display_name": "Imagen 4 Ultra", + "category": "text-to-image", + "description": "Google’s highest quality image generation model", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:40.677Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "model_url": "https://fal.run/fal-ai/imagen4/preview/ultra", + "license_type": "commercial", + "date": "2025-05-20T19:11:16.424Z", + "group": { + "key": "imagen-4", + "label": "Imagen 4 Ultra" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/imagen4/preview/ultra", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/imagen4/preview/ultra queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/imagen4/preview/ultra", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/imagen4/preview/ultra", + "documentationUrl": "https://fal.ai/models/fal-ai/imagen4/preview/ultra/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Imagen4PreviewUltraInput": { + "title": "Imagen4TextToImageUltraInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "This four-panel comic strip uses a charming, deliberately pixelated art style reminiscent of classic 8-bit video games, featuring simple shapes and a limited, bright color palette dominated by greens, blues, browns, and the dinosaur's iconic grey/black. The setting is a stylized pixel beach. Panel one shows the familiar Google Chrome T-Rex dinosaur, complete with its characteristic pixelated form, wearing tiny pixel sunglasses and lounging on a pixelated beach towel under a blocky yellow sun. Pixelated palm trees sway gently in the background against a blue pixel sky. A caption box with pixelated font reads, \"Even error messages need a vacation.\" Panel two is a close-up of the T-Rex attempting to build a pixel sandcastle. It awkwardly pats a mound of brown pixels with its tiny pixel arms, looking focused. Small pixelated shells dot the sand around it. Panel three depicts the T-Rex joyfully hopping over a series of pixelated cacti planted near the beach, mimicking its game obstacle avoidance. Small \"Boing! Boing!\" sound effect text appears in a blocky font above each jump. A pixelated crab watches from the side, waving its pixel claw. The final panel shows the T-Rex floating peacefully on its back in the blocky blue pixel water, sunglasses still on, with a contented expression. A small thought bubble above it contains pixelated \"Zzz...\" indicating relaxation." + ], + "maxLength": 5000, + "type": "string", + "title": "Prompt", + "minLength": 3, + "description": "The text prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Number of Images", + "description": "The number of images to generate.", + "default": 1 + }, + "aspect_ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated image.", + "default": "1:1" + }, + "resolution": { + "enum": [ + "1K", + "2K" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated image.", + "default": "1K" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + } + }, + "x-fal-order-properties": [ + "prompt", + "num_images", + "aspect_ratio", + "output_format", + "sync_mode", + "resolution" + ], + "required": [ + "prompt" + ] + }, + "Imagen4PreviewUltraOutput": { + "title": "Imagen4TextToImageUltraOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_name": "uzopSU5Me5PUENW-IXMXD_output.png", + "content_type": "image/png", + "url": "https://v3.fal.media/files/panda/uzopSU5Me5PUENW-IXMXD_output.png" + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images.", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + }, + "description": { + "title": "Description", + "type": "string", + "description": "The description of the generated images." + } + }, + "x-fal-order-properties": [ + "images", + "description" + ], + "required": [ + "images", + "description" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/imagen4/preview/ultra/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/imagen4/preview/ultra/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/imagen4/preview/ultra": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Imagen4PreviewUltraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/imagen4/preview/ultra/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Imagen4PreviewUltraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/dreamo", + "metadata": { + "display_name": "DreamO", + "category": "text-to-image", + "description": "DreamO is an image customization framework designed to support a wide range of tasks while facilitating seamless integration of multiple conditions.", + "status": "active", + "tags": [ + "stylized", + "realism" + ], + "updated_at": "2026-01-26T21:43:41.105Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training-5.jpg", + "model_url": "https://fal.run/fal-ai/dreamo", + "license_type": "commercial", + "date": "2025-05-19T21:26:55.668Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/dreamo", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/dreamo queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/dreamo", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/dreamo", + "documentationUrl": "https://fal.ai/models/fal-ai/dreamo/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "DreamoInput": { + "title": "DreamOInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Two people hugging inside a forest" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "first_image_url": { + "examples": [ + "https://v3.fal.media/files/rabbit/I3exImt_zOYaiZv8caeGP_Pz4CnQ12tCUuDIhEQkmbD_ae4193792924495e89c516e6b492ed2b_1.jpg" + ], + "title": "First Reference Image URL", + "type": "string", + "description": "URL of first reference image to use for generation." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square_hd" + }, + "second_image_url": { + "examples": [ + "https://v3.fal.media/files/penguin/F3Yqprwlv-yaeusxAS0bS_image.webp" + ], + "title": "Second Reference Image URL", + "type": "string", + "description": "URL of second reference image to use for generation." + }, + "second_reference_task": { + "enum": [ + "ip", + "id", + "style" + ], + "title": "Second Reference Task", + "type": "string", + "description": "Task for second reference image (ip/id/style).", + "default": "ip" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "first_reference_task": { + "enum": [ + "ip", + "id", + "style" + ], + "title": "First Reference Task", + "type": "string", + "description": "Task for first reference image (ip/id/style).", + "default": "ip" + }, + "negative_prompt": { + "examples": [ + "bad quality, worst quality, text, signature, watermark, extra limbs" + ], + "title": "Negative Prompt", + "type": "string", + "description": "The prompt to generate an image from.", + "default": "" + }, + "ref_resolution": { + "minimum": 512, + "maximum": 1024, + "type": "integer", + "title": "Ref Resolution", + "description": "Resolution for reference images.", + "default": 512 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "true_cfg": { + "minimum": 1, + "maximum": 5, + "type": "number", + "title": "True Cfg", + "description": "The weight of the CFG loss.", + "default": 1 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 12 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "prompt", + "first_image_url", + "second_image_url", + "first_reference_task", + "second_reference_task", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "negative_prompt", + "sync_mode", + "ref_resolution", + "true_cfg", + "enable_safety_checker" + ], + "required": [ + "prompt" + ] + }, + "DreamoOutput": { + "title": "DreamOOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Two people hugging inside a forest" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used to generate the image." + }, + "images": { + "examples": [ + [ + { + "height": 1024, + "content_type": "image/jpeg", + "url": "https://v3.fal.media/files/elephant/Qqd29dv20375fBbN1233_.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The URLs of the generated images.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/dreamo/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/dreamo/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/dreamo": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DreamoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/dreamo/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DreamoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-lora/stream", + "metadata": { + "display_name": "Flux Lora", + "category": "text-to-image", + "description": "Super fast endpoint for the FLUX.1 [dev] model with LoRA support, enabling rapid and high-quality image generation using pre-trained LoRA adaptations for personalization, specific styles, brand identities, and product-specific outputs.", + "status": "active", + "tags": [ + "lora", + "personalization" + ], + "updated_at": "2026-01-26T21:43:42.516Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-5.jpeg", + "model_url": "https://fal.run/fal-ai/flux-lora/stream", + "license_type": "commercial", + "date": "2025-05-15T22:28:51.397Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-lora/stream", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-lora/stream queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-lora/stream", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-5.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-lora/stream", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-lora/stream/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxLoraStreamInput": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Extreme close-up of a single tiger eye, direct frontal view. Detailed iris and pupil. Sharp focus on eye texture and color. Natural lighting to capture authentic eye shine and depth. The word \"FLUX\" is painted over it in big, white brush strokes with visible texture." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate. This is always set to 1 for streaming output.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 35, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "loras", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format" + ], + "required": [ + "prompt" + ] + }, + "FluxLoraStreamOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-lora/stream/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-lora/stream/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-lora/stream": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxLoraStreamInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-lora/stream/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxLoraStreamOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/image-01", + "metadata": { + "display_name": "MiniMax (Hailuo AI) Text to Image", + "category": "text-to-image", + "description": "Generate high quality images from text prompts using MiniMax Image-01. Longer text prompts will result in better quality images.", + "status": "active", + "tags": [ + "stylized", + "realism" + ], + "updated_at": "2026-01-26T21:43:48.160Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/minimax-image/minimax.webp", + "model_url": "https://fal.run/fal-ai/minimax/image-01", + "license_type": "commercial", + "date": "2025-05-06T16:11:12.334Z", + "group": { + "key": "minimax-image", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/image-01", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/image-01 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/image-01", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/minimax-image/minimax.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/image-01", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/image-01/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxImage01Input": { + "title": "MiniMaxTextToImageRequest", + "type": "object", + "properties": { + "prompt_optimizer": { + "description": "Whether to enable automatic prompt optimization", + "type": "boolean", + "title": "Prompt Optimizer", + "default": false + }, + "aspect_ratio": { + "enum": [ + "1:1", + "16:9", + "4:3", + "3:2", + "2:3", + "3:4", + "9:16", + "21:9" + ], + "description": "Aspect ratio of the generated image", + "type": "string", + "title": "Aspect Ratio", + "default": "1:1" + }, + "num_images": { + "minimum": 1, + "description": "Number of images to generate (1-9)", + "type": "integer", + "maximum": 9, + "title": "Num Images", + "default": 1 + }, + "prompt": { + "examples": [ + "Man dressed in white t shirt, full-body stand front view image, outdoor, Venice beach sign, full-body image, Los Angeles, Fashion photography of 90s, documentary, Film grain, photorealistic" + ], + "maxLength": 1500, + "type": "string", + "title": "Prompt", + "description": "Text prompt for image generation (max 1500 characters)", + "minLength": 1 + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "num_images", + "prompt_optimizer" + ], + "required": [ + "prompt" + ] + }, + "MinimaxImage01Output": { + "title": "MiniMaxTextToImageOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_size": 351366, + "file_name": "image.jpg", + "content_type": "image/jpeg", + "url": "https://v3.fal.media/files/tiger/xLcblZAbiw1kM6ZR_2D-r_image.jpg" + } + ] + ], + "description": "Generated images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/File" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/image-01/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/image-01/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/image-01": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxImage01Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/image-01/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxImage01Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pony-v7", + "metadata": { + "display_name": "Pony V7", + "category": "text-to-image", + "description": "Pony V7 is a finetuned text to image for superior aesthetics and prompt following.", + "status": "active", + "tags": [ + "diffusion", + "style" + ], + "updated_at": "2026-01-26T21:43:49.095Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-5.jpg", + "model_url": "https://fal.run/fal-ai/pony-v7", + "license_type": "commercial", + "date": "2025-05-05T08:25:39.654Z", + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.run/fal-ai/pony-v7/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pony-v7", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pony-v7 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pony-v7", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/pony-v7", + "documentationUrl": "https://fal.ai/models/fal-ai/pony-v7/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PonyV7Input": { + "title": "Input", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Close-up portrait of a majestic iguana with vibrant blue-green scales, piercing amber eyes, and orange spiky crest. Intricate textures and details visible on scaly skin. Wrapped in dark hood, giving regal appearance. Dramatic lighting against black background. Hyper-realistic, high-resolution image showcasing the reptile's expressive features and coloration." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate images from" + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 2, + "description": "The number of images to generate", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square_hd" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "noise_source": { + "enum": [ + "gpu", + "cpu" + ], + "title": "Noise Source", + "type": "string", + "description": "\n The source of the noise to use for generating images.\n If set to 'gpu', the noise will be generated on the GPU.\n If set to 'cpu', the noise will be generated on the CPU.\n ", + "default": "gpu" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "maximum": 20, + "description": "Classifier free guidance scale", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 20, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to take", + "default": 40 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for generating images" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_images", + "seed", + "guidance_scale", + "num_inference_steps", + "noise_source", + "sync_mode", + "enable_safety_checker", + "output_format" + ], + "required": [ + "prompt" + ] + }, + "PonyV7Output": { + "title": "ImageOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 1024, + "content_type": "image/jpeg", + "url": "https://v3.fal.media/files/monkey/cfJDLaR5mCnlbfoEWXZhm.jpeg", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pony-v7/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pony-v7/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pony-v7": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PonyV7Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pony-v7/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PonyV7Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ideogram/v3", + "metadata": { + "display_name": "Ideogram Text to Image", + "category": "text-to-image", + "description": "Generate high-quality images, posters, and logos with Ideogram V3. Features exceptional typography handling and realistic outputs optimized for commercial and creative use.", + "status": "active", + "tags": [ + "realism", + "typography" + ], + "updated_at": "2026-01-26T21:43:49.594Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/koala/nTe9hpbTjo8BWgaGYTGzi_7b7c3112872b48b6be63734f9daa3f73.jpg", + "model_url": "https://fal.run/fal-ai/ideogram/v3", + "license_type": "commercial", + "date": "2025-05-01T16:04:02.805Z", + "group": { + "key": "ideogram-v3", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ideogram/v3", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ideogram/v3 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ideogram/v3", + "category": "text-to-image", + "thumbnailUrl": "https://fal.media/files/koala/nTe9hpbTjo8BWgaGYTGzi_7b7c3112872b48b6be63734f9daa3f73.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ideogram/v3", + "documentationUrl": "https://fal.ai/models/fal-ai/ideogram/v3/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "IdeogramV3Input": { + "x-fal-order-properties": [ + "image_urls", + "rendering_speed", + "color_palette", + "style_codes", + "style", + "expand_prompt", + "num_images", + "seed", + "sync_mode", + "style_preset", + "prompt", + "image_size", + "negative_prompt" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The Bone Forest stretched across the horizon, its trees fashioned from the ossified remains of ancient leviathans that once swam through the sky. Shamans with antlers growing from their shoulders and eyes that revealed the true nature of any being they beheld conducted rituals to commune with the spirits that still inhabited the calcified grove. In sky writes \"Ideogram V3 in fal.ai\"" + ], + "title": "Prompt", + "type": "string" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "description": "Number of images to generate.", + "title": "Num Images", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The resolution of the generated image", + "title": "Image Size", + "default": "square_hd" + }, + "style": { + "anyOf": [ + { + "enum": [ + "AUTO", + "GENERAL", + "REALISTIC", + "DESIGN" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The style type to generate with. Cannot be used with style_codes.", + "title": "Style" + }, + "style_preset": { + "anyOf": [ + { + "enum": [ + "80S_ILLUSTRATION", + "90S_NOSTALGIA", + "ABSTRACT_ORGANIC", + "ANALOG_NOSTALGIA", + "ART_BRUT", + "ART_DECO", + "ART_POSTER", + "AURA", + "AVANT_GARDE", + "BAUHAUS", + "BLUEPRINT", + "BLURRY_MOTION", + "BRIGHT_ART", + "C4D_CARTOON", + "CHILDRENS_BOOK", + "COLLAGE", + "COLORING_BOOK_I", + "COLORING_BOOK_II", + "CUBISM", + "DARK_AURA", + "DOODLE", + "DOUBLE_EXPOSURE", + "DRAMATIC_CINEMA", + "EDITORIAL", + "EMOTIONAL_MINIMAL", + "ETHEREAL_PARTY", + "EXPIRED_FILM", + "FLAT_ART", + "FLAT_VECTOR", + "FOREST_REVERIE", + "GEO_MINIMALIST", + "GLASS_PRISM", + "GOLDEN_HOUR", + "GRAFFITI_I", + "GRAFFITI_II", + "HALFTONE_PRINT", + "HIGH_CONTRAST", + "HIPPIE_ERA", + "ICONIC", + "JAPANDI_FUSION", + "JAZZY", + "LONG_EXPOSURE", + "MAGAZINE_EDITORIAL", + "MINIMAL_ILLUSTRATION", + "MIXED_MEDIA", + "MONOCHROME", + "NIGHTLIFE", + "OIL_PAINTING", + "OLD_CARTOONS", + "PAINT_GESTURE", + "POP_ART", + "RETRO_ETCHING", + "RIVIERA_POP", + "SPOTLIGHT_80S", + "STYLIZED_RED", + "SURREAL_COLLAGE", + "TRAVEL_POSTER", + "VINTAGE_GEO", + "VINTAGE_POSTER", + "WATERCOLOR", + "WEIRD", + "WOODBLOCK_PRINT" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Style preset for generation. The chosen style preset will guide the generation.", + "title": "Style Preset" + }, + "expand_prompt": { + "description": "Determine if MagicPrompt should be used in generating the request or not.", + "type": "boolean", + "title": "Expand Prompt", + "default": true + }, + "rendering_speed": { + "enum": [ + "TURBO", + "BALANCED", + "QUALITY" + ], + "description": "The rendering speed to use.", + "type": "string", + "title": "Rendering Speed", + "default": "BALANCED" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "color_palette": { + "anyOf": [ + { + "$ref": "#/components/schemas/ColorPalette" + }, + { + "type": "null" + } + ], + "description": "A color palette for generation, must EITHER be specified via one of the presets (name) or explicitly via hexadecimal representations of the color with optional weights (members)" + }, + "style_codes": { + "anyOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ], + "description": "A list of 8 character hexadecimal codes representing the style of the image. Cannot be used in conjunction with style_reference_images or style", + "title": "Style Codes" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Seed for the random number generator", + "title": "Seed" + }, + "image_urls": { + "anyOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "null" + } + ], + "description": "A set of images to use as style references (maximum total size 10MB across all style references). The images should be in JPEG, PNG or WebP format", + "title": "Image Urls" + }, + "negative_prompt": { + "description": "Description of what to exclude from an image. Descriptions in the prompt take precedence to descriptions in the negative prompt.", + "type": "string", + "title": "Negative Prompt", + "default": "" + } + }, + "title": "BaseTextToImageInputV3", + "required": [ + "prompt" + ] + }, + "IdeogramV3Output": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/penguin/lHdRabS80guysb8Zw1kul_image.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "seed": { + "examples": [ + 123456 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for the random number generator" + } + }, + "title": "OutputV3", + "required": [ + "images", + "seed" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "ColorPalette": { + "x-fal-order-properties": [ + "members", + "name" + ], + "type": "object", + "properties": { + "members": { + "anyOf": [ + { + "type": "array", + "items": { + "$ref": "#/components/schemas/ColorPaletteMember" + } + }, + { + "type": "null" + } + ], + "description": "A list of color palette members that define the color palette", + "title": "Members" + }, + "name": { + "anyOf": [ + { + "enum": [ + "EMBER", + "FRESH", + "JUNGLE", + "MAGIC", + "MELON", + "MOSAIC", + "PASTEL", + "ULTRAMARINE" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "description": "A color palette preset value", + "title": "Name" + } + }, + "title": "ColorPalette" + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + }, + "ColorPaletteMember": { + "x-fal-order-properties": [ + "rgb", + "color_weight" + ], + "type": "object", + "properties": { + "color_weight": { + "anyOf": [ + { + "minimum": 0.05, + "maximum": 1, + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The weight of the color in the color palette", + "title": "Color Weight", + "default": 0.5 + }, + "rgb": { + "description": "RGB color value for the palette member", + "$ref": "#/components/schemas/RGBColor" + } + }, + "title": "ColorPaletteMember", + "required": [ + "rgb" + ] + }, + "RGBColor": { + "x-fal-order-properties": [ + "r", + "g", + "b" + ], + "type": "object", + "properties": { + "r": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Red color value", + "title": "R", + "default": 0 + }, + "b": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Blue color value", + "title": "B", + "default": 0 + }, + "g": { + "minimum": 0, + "maximum": 255, + "type": "integer", + "description": "Green color value", + "title": "G", + "default": 0 + } + }, + "title": "RGBColor" + } + } + }, + "paths": { + "/fal-ai/ideogram/v3/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v3/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v3": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV3Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v3/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV3Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/f-lite/standard", + "metadata": { + "display_name": "F Lite", + "category": "text-to-image", + "description": "F Lite is a 10B parameter diffusion model created by Fal and Freepik, trained exclusively on copyright-safe and SFW content.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:50.713Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-4.jpg", + "model_url": "https://fal.run/fal-ai/f-lite/standard", + "license_type": "commercial", + "date": "2025-04-28T17:58:51.831Z", + "group": { + "key": "f-lite", + "label": "Regular Mode" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/f-lite/standard", + "version": "1.0.0", + "description": "F Lite is a 10B parameter diffusion model created by Fal and Freepik, trained exclusively on copyright-safe and SFW content. The model was trained on Freepik's internal dataset comprising approximately 80 million copyright-safe images, making it the first publicly available model of this scale trained exclusively on legally compliant and SFW content.", + "x-fal-metadata": { + "endpointId": "fal-ai/f-lite/standard", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/f-lite/standard", + "documentationUrl": "https://fal.ai/models/fal-ai/f-lite/standard/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FLiteStandardInput": { + "title": "TextToImageInputStandard", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Mount Fuji at sunset, with the iconic snow-capped peak silhouetted against a vibrant orange and purple sky. A tranquil lake in the foreground perfectly reflects the mountain and colorful sky. A few traditional Japanese cherry blossom trees frame the scene, with their delicate pink petals visible in the foreground." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 20, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 28 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "negative_prompt": { + "examples": [ + "Blurry, out of focus, low resolution, bad anatomy, ugly, deformed, poorly drawn, extra limbs" + ], + "title": "Negative prompt", + "type": "string", + "description": "Negative Prompt for generation.", + "default": "" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker" + ], + "required": [ + "prompt" + ] + }, + "FLiteStandardOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/f-lite/standard/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/f-lite/standard/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/f-lite/standard": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FLiteStandardInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/f-lite/standard/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FLiteStandardOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/f-lite/texture", + "metadata": { + "display_name": "F Lite (texture mode)", + "category": "text-to-image", + "description": "F Lite is a 10B parameter diffusion model created by Fal and Freepik, trained exclusively on copyright-safe and SFW content. This is a high texture density variant of the model.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:50.872Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-2.jpg", + "model_url": "https://fal.run/fal-ai/f-lite/texture", + "license_type": "commercial", + "date": "2025-04-28T17:58:41.761Z", + "group": { + "key": "f-lite", + "label": "Texture Mode" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/f-lite/texture", + "version": "1.0.0", + "description": "F Lite is a 10B parameter diffusion model created by Fal and Freepik, trained exclusively on copyright-safe and SFW content. The model was trained on Freepik's internal dataset comprising approximately 80 million copyright-safe images, making it the first publicly available model of this scale trained exclusively on legally compliant and SFW content.", + "x-fal-metadata": { + "endpointId": "fal-ai/f-lite/texture", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/f-lite/texture", + "documentationUrl": "https://fal.ai/models/fal-ai/f-lite/texture/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FLiteTextureInput": { + "title": "TextToImageInputTexture", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Mount Fuji at sunset, with the iconic snow-capped peak silhouetted against a vibrant orange and purple sky. A tranquil lake in the foreground perfectly reflects the mountain and colorful sky. A few traditional Japanese cherry blossom trees frame the scene, with their delicate pink petals visible in the foreground." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 20, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 28 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "negative_prompt": { + "examples": [ + "Blurry, out of focus, low resolution, bad anatomy, ugly, deformed, poorly drawn, extra limbs" + ], + "title": "Negative prompt", + "type": "string", + "description": "Negative Prompt for generation.", + "default": "" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker" + ], + "required": [ + "prompt" + ] + }, + "FLiteTextureOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/f-lite/texture/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/f-lite/texture/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/f-lite/texture": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FLiteTextureInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/f-lite/texture/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FLiteTextureOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/gpt-image-1/text-to-image", + "metadata": { + "display_name": "gpt-image-1", + "category": "text-to-image", + "description": "OpenAI's latest image generation and editing model: gpt-1-image.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:53.033Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-2.jpg", + "model_url": "https://fal.run/fal-ai/gpt-image-1/text-to-image", + "license_type": "commercial", + "date": "2025-04-23T17:29:52.367Z", + "group": { + "key": "gpt-image-1", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/gpt-image-1/text-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/gpt-image-1/text-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/gpt-image-1/text-to-image", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/gpt-image-1/text-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/gpt-image-1/text-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "GptImage1TextToImageInput": { + "title": "TextToImageRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A serene cyberpunk cityscape at twilight, with neon signs glowing in vibrant blues and purples, reflecting on rain-slick streets. Sleek futuristic buildings tower above, connected by glowing skybridges. A lone figure in a hooded jacket stands under a streetlamp, backlit by soft mist. The atmosphere is cinematic, moody" + ], + "title": "Prompt", + "minLength": 2, + "type": "string", + "description": "The prompt for image generation" + }, + "num_images": { + "description": "Number of images to generate", + "type": "integer", + "minimum": 1, + "title": "Number of Images", + "maximum": 4, + "examples": [ + 1 + ], + "default": 1 + }, + "image_size": { + "enum": [ + "auto", + "1024x1024", + "1536x1024", + "1024x1536" + ], + "title": "Image Size", + "type": "string", + "description": "Aspect ratio for the generated image", + "default": "auto" + }, + "background": { + "enum": [ + "auto", + "transparent", + "opaque" + ], + "title": "Background", + "type": "string", + "description": "Background for the generated image", + "default": "auto" + }, + "quality": { + "enum": [ + "auto", + "low", + "medium", + "high" + ], + "title": "Quality", + "type": "string", + "description": "Quality for the generated image", + "default": "auto" + }, + "output_format": { + "enum": [ + "jpeg", + "png", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "Output format for the images", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "background", + "quality", + "num_images", + "output_format", + "sync_mode" + ], + "required": [ + "prompt" + ] + }, + "GptImage1TextToImageOutput": { + "title": "ImageResponse", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "height": 1536, + "file_name": "cyberpunk.png", + "content_type": "image/png", + "url": "https://storage.googleapis.com/falserverless/model_tests/gpt-image-1/cyberpunk.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images.", + "items": { + "$ref": "#/components/schemas/ImageFile" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "ImageFile": { + "title": "ImageFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/gpt-image-1/text-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gpt-image-1/text-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/gpt-image-1/text-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GptImage1TextToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/gpt-image-1/text-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GptImage1TextToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sana/v1.5/1.6b", + "metadata": { + "display_name": "Sana v1.5 1.6B", + "category": "text-to-image", + "description": "Sana v1.5 1.6B is a lightweight text-to-image model that delivers 4K image generation with impressive efficiency.", + "status": "active", + "tags": [ + "text to image", + "4k", + "lightweight" + ], + "updated_at": "2026-01-26T21:44:18.194Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/sana.webp", + "model_url": "https://fal.run/fal-ai/sana/v1.5/1.6b", + "date": "2025-03-31T00:00:00.000Z", + "group": { + "key": "sana", + "label": "V1.5 (1.6B)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sana/v1.5/1.6b", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sana/v1.5/1.6b queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sana/v1.5/1.6b", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/sana.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/sana/v1.5/1.6b", + "documentationUrl": "https://fal.ai/models/fal-ai/sana/v1.5/1.6b/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SanaV151.6bInput": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Underwater coral reef ecosystem during peak bioluminescent activity, multiple layers of marine life - from microscopic plankton to massive coral structures, light refracting through crystal-clear tropical waters, creating prismatic color gradients, hyper-detailed texture of marine organisms" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": { + "height": 2160, + "width": 3840 + } + }, + "style_name": { + "enum": [ + "(No style)", + "Cinematic", + "Photographic", + "Anime", + "Manga", + "Digital Art", + "Pixel art", + "Fantasy art", + "Neonpunk", + "3D Model" + ], + "title": "Style Name", + "type": "string", + "description": "The style to generate the image in.", + "default": "(No style)" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 18 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "negative_prompt": { + "examples": [ + "" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "style_name" + ], + "required": [ + "prompt" + ] + }, + "SanaV151.6bOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/sana/v1.5/1.6b/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sana/v1.5/1.6b/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sana/v1.5/1.6b": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SanaV151.6bInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sana/v1.5/1.6b/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SanaV151.6bOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sana/v1.5/4.8b", + "metadata": { + "display_name": "Sana v1.5 4.8B", + "category": "text-to-image", + "description": "Sana v1.5 4.8B is a powerful text-to-image model that generates ultra-high quality 4K images with remarkable detail.", + "status": "active", + "tags": [ + "text to image", + "4k", + "high-quality" + ], + "updated_at": "2026-01-26T21:44:18.012Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/sana.webp", + "model_url": "https://fal.run/fal-ai/sana/v1.5/4.8b", + "date": "2025-03-31T00:00:00.000Z", + "group": { + "key": "sana", + "label": "V1.5 (4.8B)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sana/v1.5/4.8b", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sana/v1.5/4.8b queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sana/v1.5/4.8b", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/sana.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/sana/v1.5/4.8b", + "documentationUrl": "https://fal.ai/models/fal-ai/sana/v1.5/4.8b/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SanaV154.8bInput": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Underwater coral reef ecosystem during peak bioluminescent activity, multiple layers of marine life - from microscopic plankton to massive coral structures, light refracting through crystal-clear tropical waters, creating prismatic color gradients, hyper-detailed texture of marine organisms" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": { + "height": 2160, + "width": 3840 + } + }, + "style_name": { + "enum": [ + "(No style)", + "Cinematic", + "Photographic", + "Anime", + "Manga", + "Digital Art", + "Pixel art", + "Fantasy art", + "Neonpunk", + "3D Model" + ], + "title": "Style Name", + "type": "string", + "description": "The style to generate the image in.", + "default": "(No style)" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 18 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "negative_prompt": { + "examples": [ + "" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "style_name" + ], + "required": [ + "prompt" + ] + }, + "SanaV154.8bOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/sana/v1.5/4.8b/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sana/v1.5/4.8b/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sana/v1.5/4.8b": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SanaV154.8bInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sana/v1.5/4.8b/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SanaV154.8bOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sana/sprint", + "metadata": { + "display_name": "Sana Sprint", + "category": "text-to-image", + "description": "Sana Sprint is a text-to-image model capable of generating 4K images with exceptional speed.", + "status": "active", + "tags": [ + "text to image", + "4k", + "high-speed" + ], + "updated_at": "2026-01-26T21:44:17.381Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-6.jpeg", + "model_url": "https://fal.run/fal-ai/sana/sprint", + "license_type": "commercial", + "date": "2025-03-31T00:00:00.000Z", + "group": { + "key": "sana", + "label": "Sprint" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sana/sprint", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sana/sprint queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sana/sprint", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-6.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sana/sprint", + "documentationUrl": "https://fal.ai/models/fal-ai/sana/sprint/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SanaSprintInput": { + "title": "SprintInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Underwater coral reef ecosystem during peak bioluminescent activity, multiple layers of marine life - from microscopic plankton to massive coral structures, light refracting through crystal-clear tropical waters, creating prismatic color gradients, hyper-detailed texture of marine organisms" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": { + "height": 2160, + "width": 3840 + } + }, + "style_name": { + "enum": [ + "(No style)", + "Cinematic", + "Photographic", + "Anime", + "Manga", + "Digital Art", + "Pixel art", + "Fantasy art", + "Neonpunk", + "3D Model" + ], + "title": "Style Name", + "type": "string", + "description": "The style to generate the image in.", + "default": "(No style)" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 20, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 2 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "negative_prompt": { + "examples": [ + "" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "style_name" + ], + "required": [ + "prompt" + ] + }, + "SanaSprintOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/sana/sprint/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sana/sprint/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sana/sprint": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SanaSprintInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sana/sprint/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SanaSprintOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "rundiffusion-fal/juggernaut-flux/lightning", + "metadata": { + "display_name": "Juggernaut Flux Lightning", + "category": "text-to-image", + "description": "Juggernaut Lightning Flux by RunDiffusion provides blazing-fast, high-quality images rendered at five times the speed of Flux. Perfect for mood boards and mass ideation, this model excels in both realism and prompt adherence.", + "status": "active", + "tags": [ + "image generation" + ], + "updated_at": "2026-01-26T21:44:21.667Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/juggernaut-flux-lightning.webp", + "model_url": "https://fal.run/rundiffusion-fal/juggernaut-flux/lightning", + "license_type": "commercial", + "date": "2025-03-05T00:00:00.000Z", + "group": { + "key": "rundiffusion-juggernaut-flux", + "label": "Lightning" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for rundiffusion-fal/juggernaut-flux/lightning", + "version": "1.0.0", + "description": "The OpenAPI schema for the rundiffusion-fal/juggernaut-flux/lightning queue.", + "x-fal-metadata": { + "endpointId": "rundiffusion-fal/juggernaut-flux/lightning", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/juggernaut-flux-lightning.webp", + "playgroundUrl": "https://fal.ai/models/rundiffusion-fal/juggernaut-flux/lightning", + "documentationUrl": "https://fal.ai/models/rundiffusion-fal/juggernaut-flux/lightning/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "JuggernautFluxLightningInput": { + "title": "SchnellTextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Extreme close-up of a single tiger eye, direct frontal view. Detailed iris and pupil. Sharp focus on eye texture and color. Natural lighting to capture authentic eye shine and depth. The word \"FLUX\" is painted over it in big, white brush strokes with visible texture." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 12, + "description": "The number of inference steps to perform.", + "default": 4 + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format" + ], + "required": [ + "prompt" + ] + }, + "JuggernautFluxLightningOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/rundiffusion-fal/juggernaut-flux/lightning/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/rundiffusion-fal/juggernaut-flux/lightning/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/rundiffusion-fal/juggernaut-flux/lightning": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JuggernautFluxLightningInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/rundiffusion-fal/juggernaut-flux/lightning/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JuggernautFluxLightningOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "rundiffusion-fal/juggernaut-flux/pro", + "metadata": { + "display_name": "Juggernaut Flux Pro", + "category": "text-to-image", + "description": "Juggernaut Pro Flux by RunDiffusion is the flagship Juggernaut model rivaling some of the most advanced image models available, often surpassing them in realism. It combines Juggernaut Base with RunDiffusion Photo and features enhancements like reduced background blurriness.", + "status": "active", + "tags": [ + "image generation" + ], + "updated_at": "2026-01-26T21:44:21.858Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/juggernaut-flux-pro.webp", + "model_url": "https://fal.run/rundiffusion-fal/juggernaut-flux/pro", + "license_type": "commercial", + "date": "2025-03-05T00:00:00.000Z", + "group": { + "key": "rundiffusion-juggernaut-flux", + "label": "Pro" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for rundiffusion-fal/juggernaut-flux/pro", + "version": "1.0.0", + "description": "The OpenAPI schema for the rundiffusion-fal/juggernaut-flux/pro queue.", + "x-fal-metadata": { + "endpointId": "rundiffusion-fal/juggernaut-flux/pro", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/juggernaut-flux-pro.webp", + "playgroundUrl": "https://fal.ai/models/rundiffusion-fal/juggernaut-flux/pro", + "documentationUrl": "https://fal.ai/models/rundiffusion-fal/juggernaut-flux/pro/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "JuggernautFluxProInput": { + "title": "DevTextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Extreme close-up of a single tiger eye, direct frontal view. Detailed iris and pupil. Sharp focus on eye texture and color. Natural lighting to capture authentic eye shine and depth. The word \"FLUX\" is painted over it in big, white brush strokes with visible texture." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 20, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 28 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format" + ], + "required": [ + "prompt" + ] + }, + "JuggernautFluxProOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/rundiffusion-fal/juggernaut-flux/pro/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/rundiffusion-fal/juggernaut-flux/pro/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/rundiffusion-fal/juggernaut-flux/pro": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JuggernautFluxProInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/rundiffusion-fal/juggernaut-flux/pro/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JuggernautFluxProOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "rundiffusion-fal/juggernaut-flux-lora", + "metadata": { + "display_name": "Juggernaut Flux Base LoRA", + "category": "text-to-image", + "description": "Juggernaut Base Flux LoRA by RunDiffusion is a drop-in replacement for Flux [Dev] that delivers sharper details, richer colors, and enhanced realism to all your LoRAs and LyCORIS with full compatibility.", + "status": "active", + "tags": [ + "image generation" + ], + "updated_at": "2026-01-26T21:44:22.228Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/juggernaut-flux-lora.webp", + "model_url": "https://fal.run/rundiffusion-fal/juggernaut-flux-lora", + "license_type": "commercial", + "date": "2025-03-05T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for rundiffusion-fal/juggernaut-flux-lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the rundiffusion-fal/juggernaut-flux-lora queue.", + "x-fal-metadata": { + "endpointId": "rundiffusion-fal/juggernaut-flux-lora", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/juggernaut-flux-lora.webp", + "playgroundUrl": "https://fal.ai/models/rundiffusion-fal/juggernaut-flux-lora", + "documentationUrl": "https://fal.ai/models/rundiffusion-fal/juggernaut-flux-lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "JuggernautFluxLoraInput": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Extreme close-up of a single tiger eye, direct frontal view. Detailed iris and pupil. Sharp focus on eye texture and color. Natural lighting to capture authentic eye shine and depth. The word \"FLUX\" is painted over it in big, white brush strokes with visible texture." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 35, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "loras", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format" + ], + "required": [ + "prompt" + ] + }, + "JuggernautFluxLoraOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/rundiffusion-fal/juggernaut-flux-lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/rundiffusion-fal/juggernaut-flux-lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/rundiffusion-fal/juggernaut-flux-lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JuggernautFluxLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/rundiffusion-fal/juggernaut-flux-lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JuggernautFluxLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "rundiffusion-fal/rundiffusion-photo-flux", + "metadata": { + "display_name": "Rundiffusion Photo Flux", + "category": "text-to-image", + "description": "RunDiffusion Photo Flux provides insane realism. With this enhancer, textures and skin details burst to life, turning your favorite prompts into vivid, lifelike creations. Recommended to keep it at 0.65 to 0.80 weight. Supports resolutions up to 1536x1536.", + "status": "active", + "tags": [ + "image generation", + "lora" + ], + "updated_at": "2026-01-26T21:44:22.664Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/rundiffusion-photo-flux.webp", + "model_url": "https://fal.run/rundiffusion-fal/rundiffusion-photo-flux", + "license_type": "commercial", + "date": "2025-03-05T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for rundiffusion-fal/rundiffusion-photo-flux", + "version": "1.0.0", + "description": "The OpenAPI schema for the rundiffusion-fal/rundiffusion-photo-flux queue.", + "x-fal-metadata": { + "endpointId": "rundiffusion-fal/rundiffusion-photo-flux", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/rundiffusion-photo-flux.webp", + "playgroundUrl": "https://fal.ai/models/rundiffusion-fal/rundiffusion-photo-flux", + "documentationUrl": "https://fal.ai/models/rundiffusion-fal/rundiffusion-photo-flux/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "RundiffusionPhotoFluxInput": { + "title": "PhotoLoraT2IInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Extreme close-up of a single tiger eye, direct frontal view. Detailed iris and pupil. Sharp focus on eye texture and color. Natural lighting to capture authentic eye shine and depth. The word \"FLUX\" is painted over it in big, white brush strokes with visible texture." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 35, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "photo_lora_scale": { + "title": "Photo Lora Scale", + "type": "number", + "description": "LoRA Scale of the photo lora model", + "default": 0.75 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "photo_lora_scale", + "prompt", + "image_size", + "num_inference_steps", + "seed", + "loras", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format" + ], + "required": [ + "prompt" + ] + }, + "RundiffusionPhotoFluxOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/rundiffusion-fal/rundiffusion-photo-flux/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/rundiffusion-fal/rundiffusion-photo-flux/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/rundiffusion-fal/rundiffusion-photo-flux": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RundiffusionPhotoFluxInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/rundiffusion-fal/rundiffusion-photo-flux/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RundiffusionPhotoFluxOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "rundiffusion-fal/juggernaut-flux/base", + "metadata": { + "display_name": "Juggernaut Flux Base", + "category": "text-to-image", + "description": "Juggernaut Base Flux by RunDiffusion is a drop-in replacement for Flux [Dev] that delivers sharper details, richer colors, and enhanced realism, while instantly boosting LoRAs and LyCORIS with full compatibility.", + "status": "active", + "tags": [ + "image generation" + ], + "updated_at": "2026-01-26T21:44:22.372Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/juggernaut-flux-base.webp", + "model_url": "https://fal.run/rundiffusion-fal/juggernaut-flux/base", + "license_type": "commercial", + "date": "2025-03-05T00:00:00.000Z", + "group": { + "key": "rundiffusion-juggernaut-flux", + "label": "Base" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for rundiffusion-fal/juggernaut-flux/base", + "version": "1.0.0", + "description": "The OpenAPI schema for the rundiffusion-fal/juggernaut-flux/base queue.", + "x-fal-metadata": { + "endpointId": "rundiffusion-fal/juggernaut-flux/base", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/juggernaut-flux-base.webp", + "playgroundUrl": "https://fal.ai/models/rundiffusion-fal/juggernaut-flux/base", + "documentationUrl": "https://fal.ai/models/rundiffusion-fal/juggernaut-flux/base/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "JuggernautFluxBaseInput": { + "title": "DevTextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Extreme close-up of a single tiger eye, direct frontal view. Detailed iris and pupil. Sharp focus on eye texture and color. Natural lighting to capture authentic eye shine and depth. The word \"FLUX\" is painted over it in big, white brush strokes with visible texture." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 20, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 28 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format" + ], + "required": [ + "prompt" + ] + }, + "JuggernautFluxBaseOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/rundiffusion-fal/juggernaut-flux/base/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/rundiffusion-fal/juggernaut-flux/base/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/rundiffusion-fal/juggernaut-flux/base": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JuggernautFluxBaseInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/rundiffusion-fal/juggernaut-flux/base/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JuggernautFluxBaseOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/cogview4", + "metadata": { + "display_name": "CogView", + "category": "text-to-image", + "description": "Generate high quality images from text prompts using CogView4. Longer text prompts will result in better quality images.", + "status": "active", + "tags": [ + "stylized" + ], + "updated_at": "2026-01-26T21:44:23.395Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/CogView4/CogView4.webp", + "model_url": "https://fal.run/fal-ai/cogview4", + "license_type": "commercial", + "date": "2025-03-04T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/cogview4", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/cogview4 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/cogview4", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/CogView4/CogView4.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/cogview4", + "documentationUrl": "https://fal.ai/models/fal-ai/cogview4/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Cogview4Input": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A vibrant and artistic digital composition featuring colorful splashes of paint in the background, creating an energetic and dynamic effect. The text 'CogView4 on Fal' is elegantly integrated into the scene, standing out with a modern, bold, and slightly futuristic font. The colors are bright and varied, including neon blues, purples, pinks, and oranges, blending seamlessly in a fluid, abstract style. The text appears slightly illuminated, complementing the vivid splashes around it." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 50 + }, + "negative_prompt": { + "examples": [ + "" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "negative_prompt", + "seed", + "guidance_scale", + "num_inference_steps", + "num_images", + "sync_mode", + "enable_safety_checker", + "output_format" + ], + "required": [ + "prompt" + ] + }, + "Cogview4Output": { + "title": "ImageOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/jpeg", + "url": "https://v3.fal.media/files/tiger/rN6_PpE-o8QlSecqFku6h.jpeg", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/cogview4/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/cogview4/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/cogview4": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Cogview4Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/cogview4/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Cogview4Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ideogram/v2a/turbo", + "metadata": { + "display_name": "Ideogram V2A Turbo", + "category": "text-to-image", + "description": "Accelerated image generation with Ideogram V2A Turbo. Create high-quality visuals, posters, and logos with enhanced speed while maintaining Ideogram's signature quality.", + "status": "active", + "tags": [ + "realism", + "typography" + ], + "updated_at": "2026-01-26T21:44:04.263Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/zebra/G9QL1XoVt8ZMvbYwA3Zrw_15ff55eb74a0429eaeb9a288e71763ef.jpg", + "model_url": "https://fal.run/fal-ai/ideogram/v2a/turbo", + "license_type": "commercial", + "date": "2025-02-27T00:00:00.000Z", + "group": { + "key": "ideogram-v2a", + "label": "Text to Image (Turbo)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ideogram/v2a/turbo", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ideogram/v2a/turbo queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ideogram/v2a/turbo", + "category": "text-to-image", + "thumbnailUrl": "https://fal.media/files/zebra/G9QL1XoVt8ZMvbYwA3Zrw_15ff55eb74a0429eaeb9a288e71763ef.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ideogram/v2a/turbo", + "documentationUrl": "https://fal.ai/models/fal-ai/ideogram/v2a/turbo/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "IdeogramV2aTurboInput": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "expand_prompt", + "seed", + "style", + "sync_mode" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A comic style illustration of a skeleton sitting on a toilet in a bathroom. The bathroom has a Halloween decoration with a pumpkin jack-o-lantern and bats flying around. There is a text above the skeleton that says \"Just Waiting for Halloween with Ideogram 2.0 at fal.ai\"" + ], + "title": "Prompt", + "type": "string" + }, + "aspect_ratio": { + "enum": [ + "10:16", + "16:10", + "9:16", + "16:9", + "4:3", + "3:4", + "1:1", + "1:3", + "3:1", + "3:2", + "2:3" + ], + "description": "The aspect ratio of the generated image", + "type": "string", + "title": "Aspect Ratio", + "default": "1:1" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "style": { + "enum": [ + "auto", + "general", + "realistic", + "design", + "render_3D", + "anime" + ], + "description": "The style of the generated image", + "type": "string", + "title": "Style", + "default": "auto" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Seed for the random number generator", + "title": "Seed" + }, + "expand_prompt": { + "description": "Whether to expand the prompt with MagicPrompt functionality.", + "type": "boolean", + "title": "Expand Prompt", + "default": true + } + }, + "title": "BaseTextToImageInput", + "required": [ + "prompt" + ] + }, + "IdeogramV2aTurboOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/monkey/cNaoxPl0YAWYb-QVBvO9F_image.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "seed": { + "examples": [ + 123456 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for the random number generator" + } + }, + "title": "Output", + "required": [ + "images", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ideogram/v2a/turbo/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2a/turbo/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2a/turbo": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV2aTurboInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2a/turbo/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV2aTurboOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ideogram/v2a", + "metadata": { + "display_name": "Ideogram V2A", + "category": "text-to-image", + "description": "Generate high-quality images, posters, and logos with Ideogram V2A. Features exceptional typography handling and realistic outputs optimized for commercial and creative use.", + "status": "active", + "tags": [ + "realism", + "typography" + ], + "updated_at": "2026-01-26T21:44:04.784Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/monkey/mYGi5w1eEI_yIOrXfqMPk_e233dd6442da4904b2bc2fd83f8915f8.jpg", + "model_url": "https://fal.run/fal-ai/ideogram/v2a", + "license_type": "commercial", + "date": "2025-02-27T00:00:00.000Z", + "group": { + "key": "ideogram-v2a", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ideogram/v2a", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ideogram/v2a queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ideogram/v2a", + "category": "text-to-image", + "thumbnailUrl": "https://fal.media/files/monkey/mYGi5w1eEI_yIOrXfqMPk_e233dd6442da4904b2bc2fd83f8915f8.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ideogram/v2a", + "documentationUrl": "https://fal.ai/models/fal-ai/ideogram/v2a/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "IdeogramV2aInput": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "expand_prompt", + "seed", + "style", + "sync_mode" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A comic style illustration of a skeleton sitting on a toilet in a bathroom. The bathroom has a Halloween decoration with a pumpkin jack-o-lantern and bats flying around. There is a text above the skeleton that says \"Just Waiting for Halloween with Ideogram 2.0 at fal.ai\"" + ], + "title": "Prompt", + "type": "string" + }, + "aspect_ratio": { + "enum": [ + "10:16", + "16:10", + "9:16", + "16:9", + "4:3", + "3:4", + "1:1", + "1:3", + "3:1", + "3:2", + "2:3" + ], + "description": "The aspect ratio of the generated image", + "type": "string", + "title": "Aspect Ratio", + "default": "1:1" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "style": { + "enum": [ + "auto", + "general", + "realistic", + "design", + "render_3D", + "anime" + ], + "description": "The style of the generated image", + "type": "string", + "title": "Style", + "default": "auto" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Seed for the random number generator", + "title": "Seed" + }, + "expand_prompt": { + "description": "Whether to expand the prompt with MagicPrompt functionality.", + "type": "boolean", + "title": "Expand Prompt", + "default": true + } + }, + "title": "BaseTextToImageInput", + "required": [ + "prompt" + ] + }, + "IdeogramV2aOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/monkey/cNaoxPl0YAWYb-QVBvO9F_image.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "seed": { + "examples": [ + 123456 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for the random number generator" + } + }, + "title": "Output", + "required": [ + "images", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ideogram/v2a/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2a/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2a": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV2aInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2a/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV2aOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-control-lora-canny", + "metadata": { + "display_name": "FLUX.1 [dev] Control LoRA Canny", + "category": "text-to-image", + "description": "FLUX Control LoRA Canny is a high-performance endpoint that uses a control image to transfer structure to the generated image, using a Canny edge map.", + "status": "active", + "tags": [ + "lora", + "style transfer" + ], + "updated_at": "2026-01-26T21:44:05.942Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/control-lora-canny.jpeg", + "model_url": "https://fal.run/fal-ai/flux-control-lora-canny", + "license_type": "commercial", + "date": "2025-02-11T00:00:00.000Z", + "group": { + "key": "flux-control-lora-canny", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/flux-lora-fast-training", + "fal-ai/flux-lora-portrait-trainer", + "fal-ai/flux-lora-general-training" + ], + "inference_endpoint_ids": [ + "fal-ai/flux-lora-fast-training", + "fal-ai/flux-lora-portrait-trainer", + "fal-ai/flux-lora-general-training" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-control-lora-canny", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-control-lora-canny queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-control-lora-canny", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/control-lora-canny.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-control-lora-canny", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-control-lora-canny/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxControlLoraCannyInput": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "control_lora_strength": { + "minimum": 0, + "title": "Control Lora Strength", + "type": "number", + "maximum": 2, + "description": "The strength of the control lora.", + "default": 1 + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "prompt": { + "examples": [ + "Extreme close-up of a single tiger eye, direct frontal view. Detailed iris and pupil. Sharp focus on eye texture and color. Natural lighting to capture authentic eye shine and depth. The word \"FLUX\" is painted over it in big, white brush strokes with visible texture." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 35, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 28 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "control_lora_image_url": { + "title": "Control Lora Image Url", + "type": "string", + "description": "\n The image to use for control lora. This is used to control the style of the generated image.\n " + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "loras", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "control_lora_image_url", + "control_lora_strength" + ], + "required": [ + "prompt" + ] + }, + "FluxControlLoraCannyOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "maximum": 4, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-control-lora-canny/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-control-lora-canny/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-control-lora-canny": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxControlLoraCannyInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-control-lora-canny/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxControlLoraCannyOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-control-lora-depth", + "metadata": { + "display_name": "FLUX.1 [dev] Control LoRA Depth", + "category": "text-to-image", + "description": "FLUX Control LoRA Depth is a high-performance endpoint that uses a control image to transfer structure to the generated image, using a depth map.", + "status": "active", + "tags": [ + "lora", + "style transfer" + ], + "updated_at": "2026-01-26T21:44:28.123Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/control-lora-depth.jpeg", + "model_url": "https://fal.run/fal-ai/flux-control-lora-depth", + "license_type": "commercial", + "date": "2025-02-11T00:00:00.000Z", + "group": { + "key": "flux-control-lora-depth", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-control-lora-depth", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-control-lora-depth queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-control-lora-depth", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/control-lora-depth.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-control-lora-depth", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-control-lora-depth/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxControlLoraDepthInput": { + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "loras", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "control_lora_image_url", + "control_lora_strength", + "preprocess_depth" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Extreme close-up of a single tiger eye, direct frontal view. Detailed iris and pupil. Sharp focus on eye texture and color. Natural lighting to capture authentic eye shine and depth. The word \"FLUX\" is painted over it in big, white brush strokes with visible texture." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "landscape_4_3" + }, + "control_lora_strength": { + "minimum": 0, + "description": "The strength of the control lora.", + "type": "number", + "title": "Control Lora Strength", + "maximum": 2, + "default": 1 + }, + "preprocess_depth": { + "description": "\n If set to true, the input image will be preprocessed to extract depth information.\n This is useful for generating depth maps from images.\n ", + "type": "boolean", + "title": "Preprocess Depth", + "default": true + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "jpeg" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "guidance_scale": { + "minimum": 0, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "title": "Guidance scale (CFG)", + "maximum": 35, + "default": 3.5 + }, + "control_lora_image_url": { + "examples": [ + "https://v3.fal.media/files/kangaroo/Cb7BeM7G4DauK_lWjzY3N_Celeb6.jpg" + ], + "description": "\n The image to use for control lora. This is used to control the style of the generated image.\n ", + "type": "string", + "title": "Control Lora Image Url" + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 28 + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + } + }, + "title": "DepthLoraInput", + "required": [ + "prompt", + "control_lora_image_url" + ] + }, + "FluxControlLoraDepthOutput": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + } + }, + "title": "Output", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "maximum": 14142, + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "LoraWeight": { + "x-fal-order-properties": [ + "path", + "scale" + ], + "type": "object", + "properties": { + "path": { + "description": "URL or the path to the LoRA weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "type": "number", + "title": "Scale", + "maximum": 4, + "default": 1 + } + }, + "title": "LoraWeight", + "required": [ + "path" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "title": "Image", + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-control-lora-depth/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-control-lora-depth/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-control-lora-depth": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxControlLoraDepthInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-control-lora-depth/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxControlLoraDepthOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/imagen3", + "metadata": { + "display_name": "Imagen3", + "category": "text-to-image", + "description": "Imagen3 is a high-quality text-to-image model that generates realistic images from text prompts.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:28.812Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/imagen3/imagen.webp", + "model_url": "https://fal.run/fal-ai/imagen3", + "date": "2025-02-10T00:00:00.000Z", + "group": { + "key": "imagen3", + "label": "Text to Image" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/imagen3", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/imagen3 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/imagen3", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/imagen3/imagen.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/imagen3", + "documentationUrl": "https://fal.ai/models/fal-ai/imagen3/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Imagen3Input": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A serene landscape with mountains reflected in a crystal clear lake at sunset" + ], + "description": "The text prompt describing what you want to see", + "type": "string", + "title": "Prompt" + }, + "aspect_ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "3:4", + "4:3" + ], + "description": "The aspect ratio of the generated image", + "type": "string", + "title": "Aspect Ratio", + "default": "1:1" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate (1-4)", + "default": 1 + }, + "seed": { + "description": "Random seed for reproducible generation", + "type": "integer", + "title": "Seed" + }, + "negative_prompt": { + "description": "A description of what to discourage in the generated images", + "type": "string", + "title": "Negative Prompt", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "aspect_ratio", + "num_images", + "seed" + ], + "required": [ + "prompt" + ] + }, + "Imagen3Output": { + "title": "Output", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/kangaroo/c0RfXzCisqX6YRkIF7apw_output.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "seed": { + "examples": [ + 42 + ], + "description": "Seed used for generation", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/imagen3/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/imagen3/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/imagen3": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Imagen3Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/imagen3/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Imagen3Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/imagen3/fast", + "metadata": { + "display_name": "Imagen3 Fast", + "category": "text-to-image", + "description": "Imagen3 Fast is a high-quality text-to-image model that generates realistic images from text prompts.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:28.556Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/imagen3/imagen.webp", + "model_url": "https://fal.run/fal-ai/imagen3/fast", + "date": "2025-02-10T00:00:00.000Z", + "group": { + "key": "imagen3", + "label": "Text to Image (fast)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/imagen3/fast", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/imagen3/fast queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/imagen3/fast", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/imagen3/imagen.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/imagen3/fast", + "documentationUrl": "https://fal.ai/models/fal-ai/imagen3/fast/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Imagen3FastInput": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A serene landscape with mountains reflected in a crystal clear lake at sunset" + ], + "description": "The text prompt describing what you want to see", + "type": "string", + "title": "Prompt" + }, + "aspect_ratio": { + "enum": [ + "1:1", + "16:9", + "9:16", + "3:4", + "4:3" + ], + "description": "The aspect ratio of the generated image", + "type": "string", + "title": "Aspect Ratio", + "default": "1:1" + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "Number of images to generate (1-4)", + "default": 1 + }, + "seed": { + "description": "Random seed for reproducible generation", + "type": "integer", + "title": "Seed" + }, + "negative_prompt": { + "description": "A description of what to discourage in the generated images", + "type": "string", + "title": "Negative Prompt", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "aspect_ratio", + "num_images", + "seed" + ], + "required": [ + "prompt" + ] + }, + "Imagen3FastOutput": { + "title": "Output", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://v3.fal.media/files/kangaroo/c0RfXzCisqX6YRkIF7apw_output.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "seed": { + "examples": [ + 42 + ], + "description": "Seed used for generation", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/imagen3/fast/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/imagen3/fast/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/imagen3/fast": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Imagen3FastInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/imagen3/fast/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Imagen3FastOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/lumina-image/v2", + "metadata": { + "display_name": "Lumina Image 2", + "category": "text-to-image", + "description": "Lumina-Image-2.0 is a 2 billion parameter flow-based diffusion transforer which features improved performance in image quality, typography, complex prompt understanding, and resource-efficiency.", + "status": "active", + "tags": [ + "diffusion", + "typography", + "style" + ], + "updated_at": "2026-01-26T21:44:06.067Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/lumina-image-v2.webp", + "model_url": "https://fal.run/fal-ai/lumina-image/v2", + "license_type": "commercial", + "date": "2025-01-31T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.run/fal-ai/lumina-image/v2/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/lumina-image/v2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/lumina-image/v2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/lumina-image/v2", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/lumina-image-v2.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/lumina-image/v2", + "documentationUrl": "https://fal.ai/models/fal-ai/lumina-image/v2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LuminaImageV2Input": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A vibrant and artistic digital composition featuring colorful splashes of paint in the background, creating an energetic and dynamic effect. The text 'Lumina on Fal' is elegantly integrated into the scene, standing out with a modern, bold, and slightly futuristic font. The colors are bright and varied, including neon blues, purples, pinks, and oranges, blending seamlessly in a fluid, abstract style. The text appears slightly illuminated, complementing the vivid splashes around it." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "cfg_trunc_ratio": { + "minimum": 0, + "title": "Cfg Trunc Ratio", + "type": "number", + "maximum": 1, + "description": "The ratio of the timestep interval to apply normalization-based guidance scale.", + "default": 1 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "system_prompt": { + "title": "System Prompt", + "type": "string", + "description": "The system prompt to use.", + "default": "You are an assistant designed to generate superior images with the superior degree of image-text alignment based on textual prompts or user prompts." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 20, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 4 + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 30 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "negative_prompt": { + "examples": [ + "" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "cfg_normalization": { + "title": "Cfg Normalization", + "type": "boolean", + "description": "Whether to apply normalization-based guidance scale.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "negative_prompt", + "system_prompt", + "seed", + "guidance_scale", + "num_inference_steps", + "cfg_normalization", + "cfg_trunc_ratio", + "num_images", + "sync_mode", + "enable_safety_checker", + "output_format" + ], + "required": [ + "prompt" + ] + }, + "LuminaImageV2Output": { + "title": "ImageOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 768, + "content_type": "image/jpeg", + "url": "https://v3.fal.media/files/rabbit/pBwaEZysJhnstKWEHGpLc.png", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/lumina-image/v2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/lumina-image/v2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/lumina-image/v2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LuminaImageV2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/lumina-image/v2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LuminaImageV2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/janus", + "metadata": { + "display_name": "DeepSeek Janus-Pro", + "category": "text-to-image", + "description": "DeepSeek Janus-Pro is a novel text-to-image model that unifies multimodal understanding and generation through an autoregressive framework", + "status": "active", + "tags": [ + "stylized" + ], + "updated_at": "2026-01-26T21:44:29.460Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/janus/januspro.webp", + "model_url": "https://fal.run/fal-ai/janus", + "date": "2025-01-28T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/janus", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/janus queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/janus", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/janus/januspro.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/janus", + "documentationUrl": "https://fal.ai/models/fal-ai/janus/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "JanusInput": { + "title": "JanusInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "beautiful girl, inside a house" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 16, + "description": "Number of images to generate in parallel.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square" + }, + "cfg_weight": { + "minimum": 1, + "title": "Cfg Weight", + "type": "number", + "maximum": 20, + "description": "Classifier Free Guidance scale - how closely to follow the prompt.", + "default": 5 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "temperature": { + "minimum": 0.1, + "title": "Temperature", + "type": "number", + "maximum": 2, + "description": "Controls randomness in the generation. Higher values make output more random.", + "default": 1 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducible generation." + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "temperature", + "cfg_weight", + "num_images", + "seed", + "enable_safety_checker" + ], + "required": [ + "prompt" + ] + }, + "JanusOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/janus/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/janus/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/janus": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JanusInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/janus/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/JanusOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-pro/v1.1-ultra-finetuned", + "metadata": { + "display_name": "FLUX1.1 [pro] ultra Fine-tuned", + "category": "text-to-image", + "description": "FLUX1.1 [pro] ultra fine-tuned is the newest version of FLUX1.1 [pro] with a fine-tuned LoRA, maintaining professional-grade image quality while delivering up to 2K resolution with improved photo realism.", + "status": "active", + "tags": [ + "high-res", + "realism" + ], + "updated_at": "2026-01-26T21:44:07.804Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/flux-pro-11-ultra.webp", + "model_url": "https://fal.run/fal-ai/flux-pro/v1.1-ultra-finetuned", + "license_type": "commercial", + "date": "2025-01-16T00:00:00.000Z", + "group": { + "key": "flux-pro", + "label": "FLUX 1.1 [pro] (ultra) Fine-tuned" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/flux-pro-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/flux-pro-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-pro/v1.1-ultra-finetuned", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-pro/v1.1-ultra-finetuned queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-pro/v1.1-ultra-finetuned", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/flux-pro-11-ultra.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-pro/v1.1-ultra-finetuned", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-pro/v1.1-ultra-finetuned/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxProV11UltraFinetunedInput": { + "title": "FluxProUltraTextToImageFinetunedInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Extreme close-up of a single tiger eye, direct frontal view. Detailed iris and pupil. Sharp focus on eye texture and color. Natural lighting to capture authentic eye shine and depth. The word \"FLUX\" is painted over it in big, white brush strokes with visible texture." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "finetune_id": { + "title": "Fine-tune ID", + "type": "string", + "description": "References your specific model" + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.", + "default": "2" + }, + "image_prompt_strength": { + "minimum": 0, + "title": "Image Prompt Strength", + "type": "number", + "description": "The strength of the image prompt, between 0 and 1.", + "maximum": 1, + "default": 0.1 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "enhance_prompt": { + "title": "Enhance Prompt", + "type": "boolean", + "description": "Whether to enhance the prompt for better results.", + "default": false + }, + "raw": { + "title": "Raw", + "type": "boolean", + "description": "Generate less processed, more natural-looking images.", + "default": false + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "description": "The number of images to generate.", + "maximum": 4, + "default": 1 + }, + "aspect_ratio": { + "anyOf": [ + { + "enum": [ + "21:9", + "16:9", + "4:3", + "3:2", + "1:1", + "2:3", + "3:4", + "9:16", + "9:21" + ], + "type": "string" + }, + { + "type": "string" + } + ], + "title": "Aspect Ratio", + "description": "The aspect ratio of the generated image.", + "default": "16:9" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "title": "Image URL", + "type": "string", + "description": "The image URL to generate an image from." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "finetune_strength": { + "minimum": 0, + "title": "Fine-tune Strength", + "type": "number", + "description": "\n Controls finetune influence.\n Increase this value if your target concept isn't showing up strongly enough.\n The optimal setting depends on your finetune and prompt\n ", + "maximum": 2 + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "safety_tolerance", + "enhance_prompt", + "image_url", + "image_prompt_strength", + "aspect_ratio", + "raw", + "finetune_id", + "finetune_strength" + ], + "required": [ + "prompt", + "finetune_id", + "finetune_strength" + ] + }, + "FluxProV11UltraFinetunedOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/registry__image__fast_sdxl__models__Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "registry__image__fast_sdxl__models__Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-pro/v1.1-ultra-finetuned/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/v1.1-ultra-finetuned/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/v1.1-ultra-finetuned": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProV11UltraFinetunedInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/v1.1-ultra-finetuned/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProV11UltraFinetunedOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-pro/v1.1", + "metadata": { + "display_name": "FLUX1.1 [pro]", + "category": "text-to-image", + "description": "FLUX1.1 [pro] is an enhanced version of FLUX.1 [pro], improved image generation capabilities, delivering superior composition, detail, and artistic fidelity compared to its predecessor.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:30.974Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/turbo_thumbnail.jpg", + "model_url": "https://fal.run/fal-ai/flux-pro/v1.1", + "license_type": "commercial", + "date": "2025-01-16T00:00:00.000Z", + "group": { + "key": "flux-pro", + "label": "FLUX 1.1 [pro]" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-pro/v1.1", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-pro/v1.1 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-pro/v1.1", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/turbo_thumbnail.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-pro/v1.1", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-pro/v1.1/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxProV11Input": { + "title": "FluxProPlusTextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Extreme close-up of a single tiger eye, direct frontal view. Detailed iris and pupil. Sharp focus on eye texture and color. Natural lighting to capture authentic eye shine and depth. The word \"FLUX\" is painted over it in big, white brush strokes with visible texture." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "description": "The number of images to generate.", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "safety_tolerance": { + "enum": [ + "1", + "2", + "3", + "4", + "5", + "6" + ], + "title": "Safety Tolerance", + "type": "string", + "description": "The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.", + "default": "2" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "enhance_prompt": { + "title": "Enhance Prompt", + "type": "boolean", + "description": "Whether to enhance the prompt for better results.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "safety_tolerance", + "enhance_prompt" + ], + "required": [ + "prompt" + ] + }, + "FluxProV11Output": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/registry__image__fast_sdxl__models__Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "registry__image__fast_sdxl__models__Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-pro/v1.1/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/v1.1/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/v1.1": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProV11Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-pro/v1.1/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxProV11Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/switti", + "metadata": { + "display_name": "Switti 1024", + "category": "text-to-image", + "description": "Switti is a scale-wise transformer for fast text-to-image generation that outperforms existing T2I AR models and competes with state-of-the-art T2I diffusion models while being faster than distilled diffusion models.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:32.655Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/switti.webp", + "model_url": "https://fal.run/fal-ai/switti", + "license_type": "commercial", + "date": "2024-12-31T00:00:00.000Z", + "group": { + "key": "switti", + "label": "1024" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/switti", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/switti queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/switti", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/switti.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/switti", + "documentationUrl": "https://fal.ai/models/fal-ai/switti/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SwittiInput": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cat wearing a hoodie with 'FAL' written on it." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "sampling_top_k": { + "minimum": 10, + "maximum": 1000, + "type": "integer", + "title": "Sampling Top-k", + "description": "The number of top-k tokens to sample from.", + "default": 400 + }, + "turn_off_cfg_start_si": { + "minimum": 0, + "maximum": 10, + "type": "integer", + "title": "Disable CFG starting scale", + "description": "Disable CFG starting scale", + "default": 8 + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 6 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "smooth_start_si": { + "minimum": 0, + "maximum": 10, + "type": "integer", + "title": "Smoothing starting scale", + "description": "Smoothing starting scale", + "default": 2 + }, + "negative_prompt": { + "examples": [ + "" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "last_scale_temp": { + "minimum": 0.1, + "maximum": 10, + "type": "number", + "title": "Temperature after disabling CFG", + "description": "Temperature after disabling CFG", + "default": 0.1 + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "more_diverse": { + "title": "More Diverse", + "type": "boolean", + "description": "More diverse sampling", + "default": false + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "more_smooth": { + "title": "More Smooth", + "type": "boolean", + "description": "Smoothing with Gumbel softmax sampling", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "sampling_top_p": { + "minimum": 0.1, + "maximum": 1, + "type": "number", + "title": "Sampling Top-p", + "description": "The top-p probability to sample from.", + "default": 0.95 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "sampling_top_k", + "sampling_top_p", + "more_smooth", + "more_diverse", + "smooth_start_si", + "turn_off_cfg_start_si", + "last_scale_temp", + "seed", + "guidance_scale", + "sync_mode", + "enable_safety_checker", + "output_format" + ], + "required": [ + "prompt" + ] + }, + "SwittiOutput": { + "title": "SwittiOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 1024, + "content_type": "image/jpeg", + "url": "https://fal.media/files/lion/JpgBX7w379jHteLeeNsM5.jpeg", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/switti/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/switti/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/switti": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SwittiInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/switti/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SwittiOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/switti/512", + "metadata": { + "display_name": "Switti 512", + "category": "text-to-image", + "description": "Switti is a scale-wise transformer for fast text-to-image generation that outperforms existing T2I AR models and competes with state-of-the-art T2I diffusion models while being faster than distilled diffusion models.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:32.784Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/switti.webp", + "model_url": "https://fal.run/fal-ai/switti/512", + "license_type": "commercial", + "date": "2024-12-31T00:00:00.000Z", + "group": { + "key": "switti", + "label": "512" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/switti/512", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/switti/512 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/switti/512", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/switti.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/switti/512", + "documentationUrl": "https://fal.ai/models/fal-ai/switti/512/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Switti512Input": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cat wearing a hoodie with 'FAL' written on it." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "sampling_top_k": { + "minimum": 10, + "maximum": 1000, + "type": "integer", + "title": "Sampling Top-k", + "description": "The number of top-k tokens to sample from.", + "default": 400 + }, + "turn_off_cfg_start_si": { + "minimum": 0, + "maximum": 10, + "type": "integer", + "title": "Disable CFG starting scale", + "description": "Disable CFG starting scale", + "default": 8 + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 6 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "smooth_start_si": { + "minimum": 0, + "maximum": 10, + "type": "integer", + "title": "Smoothing starting scale", + "description": "Smoothing starting scale", + "default": 2 + }, + "negative_prompt": { + "examples": [ + "" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "last_scale_temp": { + "minimum": 0.1, + "maximum": 10, + "type": "number", + "title": "Temperature after disabling CFG", + "description": "Temperature after disabling CFG", + "default": 0.1 + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "more_diverse": { + "title": "More Diverse", + "type": "boolean", + "description": "More diverse sampling", + "default": false + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "more_smooth": { + "title": "More Smooth", + "type": "boolean", + "description": "Smoothing with Gumbel softmax sampling", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "sampling_top_p": { + "minimum": 0.1, + "maximum": 1, + "type": "number", + "title": "Sampling Top-p", + "description": "The top-p probability to sample from.", + "default": 0.95 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "sampling_top_k", + "sampling_top_p", + "more_smooth", + "more_diverse", + "smooth_start_si", + "turn_off_cfg_start_si", + "last_scale_temp", + "seed", + "guidance_scale", + "sync_mode", + "enable_safety_checker", + "output_format" + ], + "required": [ + "prompt" + ] + }, + "Switti512Output": { + "title": "SwittiOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "examples": [ + [ + { + "height": 1024, + "content_type": "image/jpeg", + "url": "https://fal.media/files/lion/JpgBX7w379jHteLeeNsM5.jpeg", + "width": 1024 + } + ] + ], + "title": "Images", + "type": "array", + "description": "The generated images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/switti/512/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/switti/512/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/switti/512": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Switti512Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/switti/512/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Switti512Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bria/text-to-image/base", + "metadata": { + "display_name": "Bria Text-to-Image Base", + "category": "text-to-image", + "description": "Bria's Text-to-Image model, trained exclusively on licensed data for safe and risk-free commercial use. Available also as source code and weights. For access to weights: https://bria.ai/contact-us", + "status": "active", + "tags": [ + "image generation" + ], + "updated_at": "2026-01-26T21:44:34.399Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/bria.webp", + "model_url": "https://fal.run/fal-ai/bria/text-to-image/base", + "license_type": "commercial", + "date": "2024-12-19T00:00:00.000Z", + "group": { + "key": "bria", + "label": "Text to Image (Base)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bria/text-to-image/base", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bria/text-to-image/base queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bria/text-to-image/base", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/bria.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/bria/text-to-image/base", + "documentationUrl": "https://fal.ai/models/fal-ai/bria/text-to-image/base/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BriaTextToImageBaseInput": { + "title": "TextToImageRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A lone figure stands on the edge of a serene cliff at sunset, gazing out over a vast, mystical valley. The figure is clad in flowing robes that ripple in the gentle breeze, silhouetted against the golden and lavender hues of the sky. Below, a cascading waterfall pours into a sparkling river winding through a forest of bioluminescent trees. The scene blends the awe of nature with a touch of otherworldly wonder, inviting reflection and imagination." + ], + "description": "The prompt you would like to use to generate images.", + "type": "string", + "title": "Prompt", + "minLength": 1 + }, + "num_images": { + "minimum": 1, + "description": "How many images you would like to generate. When using any Guidance Method, Value is set to 1.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 4 + }, + "prompt_enhancement": { + "description": "When set to true, enhances the provided prompt by generating additional, more descriptive variations, resulting in more diverse and creative output images.", + "type": "boolean", + "title": "Prompt Enhancement", + "default": false + }, + "guidance": { + "description": "Guidance images to use for the generation. Up to 4 guidance methods can be combined during a single inference.", + "type": "array", + "title": "Guidance", + "items": { + "$ref": "#/components/schemas/GuidanceInput" + }, + "default": [] + }, + "aspect_ratio": { + "enum": [ + "1:1", + "2:3", + "3:2", + "3:4", + "4:3", + "4:5", + "5:4", + "9:16", + "16:9" + ], + "description": "The aspect ratio of the image. When a guidance method is being used, the aspect ratio is defined by the guidance image and this parameter is ignored.", + "type": "string", + "title": "Aspect Ratio", + "default": "1:1" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "title": "Guidance scale (CFG)", + "maximum": 20, + "default": 5 + }, + "medium": { + "enum": [ + "photography", + "art" + ], + "description": "Which medium should be included in your generated images. This parameter is optional.", + "type": "string", + "title": "Medium" + }, + "seed": { + "minimum": 0, + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed", + "maximum": 2147483647 + }, + "negative_prompt": { + "description": "The negative prompt you would like to use to generate images.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "num_inference_steps": { + "minimum": 20, + "description": "The number of iterations the model goes through to refine the generated image. This parameter is optional.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 30 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_images", + "aspect_ratio", + "seed", + "num_inference_steps", + "guidance_scale", + "prompt_enhancement", + "medium", + "guidance", + "sync_mode" + ], + "required": [ + "prompt" + ] + }, + "BriaTextToImageBaseOutput": { + "title": "Output", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_size": 3731290, + "height": 1536, + "file_name": "257cf8e7bd3a47c2959396343d5b38cf.png", + "content_type": "image/png", + "url": "https://v3.fal.media/files/tiger/48e63e0K6C9XQYBuomoU-_257cf8e7bd3a47c2959396343d5b38cf.png", + "width": 1536 + } + ] + ], + "description": "The generated images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "description": "Seed value used for generation.", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "GuidanceInput": { + "title": "GuidanceInput", + "type": "object", + "properties": { + "scale": { + "minimum": 0, + "description": "Impact of the guidance.", + "type": "number", + "title": "Scale", + "maximum": 1, + "default": 1 + }, + "method": { + "enum": [ + "controlnet_canny", + "controlnet_depth", + "controlnet_recoloring", + "controlnet_color_grid" + ], + "description": "Which guidance type you would like to include in the generation. Up to 4 guidance methods can be combined during a single inference. This parameter is optional.", + "type": "string", + "title": "Method" + }, + "image_url": { + "description": "The image that should be used as guidance, in base64 format, with the method defined in guidance_method_1. Accepted formats are jpeg, jpg, png, webp. Maximum file size 12MB. If more then one guidance method is used, all guidance images must be of the same aspect ratio, and this will be the aspect ratio of the generated results. If guidance_method_1 is selected, an image must be provided.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url", + "method", + "scale" + ], + "required": [ + "image_url" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bria/text-to-image/base/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bria/text-to-image/base/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bria/text-to-image/base": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BriaTextToImageBaseInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bria/text-to-image/base/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BriaTextToImageBaseOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bria/text-to-image/fast", + "metadata": { + "display_name": "Bria Text-to-Image Fast", + "category": "text-to-image", + "description": "Bria's Text-to-Image model with perfect harmony of latency and quality. Trained exclusively on licensed data for safe and risk-free commercial use. Available also as source code and weights. For access to weights: https://bria.ai/contact-us", + "status": "active", + "tags": [ + "image generation" + ], + "updated_at": "2026-01-26T21:44:33.982Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/bria.webp", + "model_url": "https://fal.run/fal-ai/bria/text-to-image/fast", + "license_type": "commercial", + "date": "2024-12-19T00:00:00.000Z", + "group": { + "key": "bria", + "label": "Text to Image (Fast)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bria/text-to-image/fast", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bria/text-to-image/fast queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bria/text-to-image/fast", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/bria.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/bria/text-to-image/fast", + "documentationUrl": "https://fal.ai/models/fal-ai/bria/text-to-image/fast/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BriaTextToImageFastInput": { + "title": "FastTextToImageRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A lone figure stands on the edge of a serene cliff at sunset, gazing out over a vast, mystical valley. The figure is clad in flowing robes that ripple in the gentle breeze, silhouetted against the golden and lavender hues of the sky. Below, a cascading waterfall pours into a sparkling river winding through a forest of bioluminescent trees. The scene blends the awe of nature with a touch of otherworldly wonder, inviting reflection and imagination." + ], + "description": "The prompt you would like to use to generate images.", + "type": "string", + "title": "Prompt", + "minLength": 1 + }, + "num_images": { + "minimum": 1, + "description": "How many images you would like to generate. When using any Guidance Method, Value is set to 1.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 4 + }, + "prompt_enhancement": { + "description": "When set to true, enhances the provided prompt by generating additional, more descriptive variations, resulting in more diverse and creative output images.", + "type": "boolean", + "title": "Prompt Enhancement", + "default": false + }, + "guidance": { + "description": "Guidance images to use for the generation. Up to 4 guidance methods can be combined during a single inference.", + "type": "array", + "title": "Guidance", + "items": { + "$ref": "#/components/schemas/GuidanceInput" + }, + "default": [] + }, + "aspect_ratio": { + "enum": [ + "1:1", + "2:3", + "3:2", + "3:4", + "4:3", + "4:5", + "5:4", + "9:16", + "16:9" + ], + "description": "The aspect ratio of the image. When a guidance method is being used, the aspect ratio is defined by the guidance image and this parameter is ignored.", + "type": "string", + "title": "Aspect Ratio", + "default": "1:1" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "title": "Guidance scale (CFG)", + "maximum": 20, + "default": 5 + }, + "medium": { + "enum": [ + "photography", + "art" + ], + "description": "Which medium should be included in your generated images. This parameter is optional.", + "type": "string", + "title": "Medium" + }, + "seed": { + "minimum": 0, + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed", + "maximum": 2147483647 + }, + "negative_prompt": { + "description": "The negative prompt you would like to use to generate images.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "num_inference_steps": { + "minimum": 4, + "description": "The number of iterations the model goes through to refine the generated image. This parameter is optional.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 10, + "default": 8 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_images", + "aspect_ratio", + "seed", + "num_inference_steps", + "guidance_scale", + "prompt_enhancement", + "medium", + "guidance", + "sync_mode" + ], + "required": [ + "prompt" + ] + }, + "BriaTextToImageFastOutput": { + "title": "Output", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_size": 3731290, + "height": 1536, + "file_name": "257cf8e7bd3a47c2959396343d5b38cf.png", + "content_type": "image/png", + "url": "https://v3.fal.media/files/tiger/48e63e0K6C9XQYBuomoU-_257cf8e7bd3a47c2959396343d5b38cf.png", + "width": 1536 + } + ] + ], + "description": "The generated images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "description": "Seed value used for generation.", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "GuidanceInput": { + "title": "GuidanceInput", + "type": "object", + "properties": { + "scale": { + "minimum": 0, + "description": "Impact of the guidance.", + "type": "number", + "title": "Scale", + "maximum": 1, + "default": 1 + }, + "method": { + "enum": [ + "controlnet_canny", + "controlnet_depth", + "controlnet_recoloring", + "controlnet_color_grid" + ], + "description": "Which guidance type you would like to include in the generation. Up to 4 guidance methods can be combined during a single inference. This parameter is optional.", + "type": "string", + "title": "Method" + }, + "image_url": { + "description": "The image that should be used as guidance, in base64 format, with the method defined in guidance_method_1. Accepted formats are jpeg, jpg, png, webp. Maximum file size 12MB. If more then one guidance method is used, all guidance images must be of the same aspect ratio, and this will be the aspect ratio of the generated results. If guidance_method_1 is selected, an image must be provided.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url", + "method", + "scale" + ], + "required": [ + "image_url" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bria/text-to-image/fast/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bria/text-to-image/fast/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bria/text-to-image/fast": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BriaTextToImageFastInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bria/text-to-image/fast/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BriaTextToImageFastOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bria/text-to-image/hd", + "metadata": { + "display_name": "Bria Text-to-Image HD", + "category": "text-to-image", + "description": "Bria's Text-to-Image model for HD images. Trained exclusively on licensed data for safe and risk-free commercial use. Available also as source code and weights. For access to weights: https://bria.ai/contact-us", + "status": "active", + "tags": [ + "image generation" + ], + "updated_at": "2026-01-26T21:44:34.109Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/bria.webp", + "model_url": "https://fal.run/fal-ai/bria/text-to-image/hd", + "license_type": "commercial", + "date": "2024-12-19T00:00:00.000Z", + "group": { + "key": "bria", + "label": "Text to Image (HD)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bria/text-to-image/hd", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bria/text-to-image/hd queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bria/text-to-image/hd", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/bria.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/bria/text-to-image/hd", + "documentationUrl": "https://fal.ai/models/fal-ai/bria/text-to-image/hd/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BriaTextToImageHdInput": { + "title": "TextToImageRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A lone figure stands on the edge of a serene cliff at sunset, gazing out over a vast, mystical valley. The figure is clad in flowing robes that ripple in the gentle breeze, silhouetted against the golden and lavender hues of the sky. Below, a cascading waterfall pours into a sparkling river winding through a forest of bioluminescent trees. The scene blends the awe of nature with a touch of otherworldly wonder, inviting reflection and imagination." + ], + "description": "The prompt you would like to use to generate images.", + "type": "string", + "title": "Prompt", + "minLength": 1 + }, + "num_images": { + "minimum": 1, + "description": "How many images you would like to generate. When using any Guidance Method, Value is set to 1.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 4 + }, + "prompt_enhancement": { + "description": "When set to true, enhances the provided prompt by generating additional, more descriptive variations, resulting in more diverse and creative output images.", + "type": "boolean", + "title": "Prompt Enhancement", + "default": false + }, + "guidance": { + "description": "Guidance images to use for the generation. Up to 4 guidance methods can be combined during a single inference.", + "type": "array", + "title": "Guidance", + "items": { + "$ref": "#/components/schemas/GuidanceInput" + }, + "default": [] + }, + "aspect_ratio": { + "enum": [ + "1:1", + "2:3", + "3:2", + "3:4", + "4:3", + "4:5", + "5:4", + "9:16", + "16:9" + ], + "description": "The aspect ratio of the image. When a guidance method is being used, the aspect ratio is defined by the guidance image and this parameter is ignored.", + "type": "string", + "title": "Aspect Ratio", + "default": "1:1" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "title": "Guidance scale (CFG)", + "maximum": 20, + "default": 5 + }, + "medium": { + "enum": [ + "photography", + "art" + ], + "description": "Which medium should be included in your generated images. This parameter is optional.", + "type": "string", + "title": "Medium" + }, + "seed": { + "minimum": 0, + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed", + "maximum": 2147483647 + }, + "negative_prompt": { + "description": "The negative prompt you would like to use to generate images.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "num_inference_steps": { + "minimum": 20, + "description": "The number of iterations the model goes through to refine the generated image. This parameter is optional.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 30 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_images", + "aspect_ratio", + "seed", + "num_inference_steps", + "guidance_scale", + "prompt_enhancement", + "medium", + "guidance", + "sync_mode" + ], + "required": [ + "prompt" + ] + }, + "BriaTextToImageHdOutput": { + "title": "Output", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "file_size": 3731290, + "height": 1536, + "file_name": "257cf8e7bd3a47c2959396343d5b38cf.png", + "content_type": "image/png", + "url": "https://v3.fal.media/files/tiger/48e63e0K6C9XQYBuomoU-_257cf8e7bd3a47c2959396343d5b38cf.png", + "width": 1536 + } + ] + ], + "description": "The generated images", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "description": "Seed value used for generation.", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "images", + "seed" + ], + "required": [ + "images", + "seed" + ] + }, + "GuidanceInput": { + "title": "GuidanceInput", + "type": "object", + "properties": { + "scale": { + "minimum": 0, + "description": "Impact of the guidance.", + "type": "number", + "title": "Scale", + "maximum": 1, + "default": 1 + }, + "method": { + "enum": [ + "controlnet_canny", + "controlnet_depth", + "controlnet_recoloring", + "controlnet_color_grid" + ], + "description": "Which guidance type you would like to include in the generation. Up to 4 guidance methods can be combined during a single inference. This parameter is optional.", + "type": "string", + "title": "Method" + }, + "image_url": { + "description": "The image that should be used as guidance, in base64 format, with the method defined in guidance_method_1. Accepted formats are jpeg, jpg, png, webp. Maximum file size 12MB. If more then one guidance method is used, all guidance images must be of the same aspect ratio, and this will be the aspect ratio of the generated results. If guidance_method_1 is selected, an image must be provided.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url", + "method", + "scale" + ], + "required": [ + "image_url" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bria/text-to-image/hd/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bria/text-to-image/hd/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bria/text-to-image/hd": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BriaTextToImageHdInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bria/text-to-image/hd/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BriaTextToImageHdOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/recraft-20b", + "metadata": { + "display_name": "Recraft 20b", + "category": "text-to-image", + "description": "Recraft 20b is a new and affordable text-to-image model.", + "status": "active", + "tags": [ + "image generation", + "vector art", + "typograph", + "style" + ], + "updated_at": "2026-01-26T21:44:35.149Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/Fal_Visuals_V1_011.jpg", + "model_url": "https://fal.run/fal-ai/recraft-20b", + "license_type": "commercial", + "date": "2024-12-16T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/recraft-20b", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/recraft-20b queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/recraft-20b", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/Fal_Visuals_V1_011.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/recraft-20b", + "documentationUrl": "https://fal.ai/models/fal-ai/recraft-20b/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Recraft20bInput": { + "title": "Recraft20BTextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "a red panda in Kyoto" + ], + "title": "Prompt", + "type": "string", + "minLength": 1, + "maxLength": 1000 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "default": "square_hd" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "colors": { + "title": "Colors", + "type": "array", + "description": "An array of preferable colors", + "items": { + "$ref": "#/components/schemas/RGBColor" + }, + "default": [] + }, + "style": { + "enum": [ + "any", + "realistic_image", + "digital_illustration", + "vector_illustration", + "realistic_image/b_and_w", + "realistic_image/enterprise", + "realistic_image/hard_flash", + "realistic_image/hdr", + "realistic_image/motion_blur", + "realistic_image/natural_light", + "realistic_image/studio_portrait", + "digital_illustration/2d_art_poster", + "digital_illustration/2d_art_poster_2", + "digital_illustration/3d", + "digital_illustration/80s", + "digital_illustration/engraving_color", + "digital_illustration/glow", + "digital_illustration/grain", + "digital_illustration/hand_drawn", + "digital_illustration/hand_drawn_outline", + "digital_illustration/handmade_3d", + "digital_illustration/infantile_sketch", + "digital_illustration/kawaii", + "digital_illustration/pixel_art", + "digital_illustration/psychedelic", + "digital_illustration/seamless", + "digital_illustration/voxel", + "digital_illustration/watercolor", + "vector_illustration/cartoon", + "vector_illustration/doodle_line_art", + "vector_illustration/engraving", + "vector_illustration/flat_2", + "vector_illustration/kawaii", + "vector_illustration/line_art", + "vector_illustration/line_circuit", + "vector_illustration/linocut", + "vector_illustration/seamless" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated images. Vector images cost 2X as much.", + "default": "realistic_image" + }, + "style_id": { + "format": "uuid4", + "title": "Style Id", + "type": "string", + "description": "The ID of the custom style reference (optional)" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "style", + "colors", + "style_id", + "enable_safety_checker" + ], + "required": [ + "prompt" + ] + }, + "Recraft20bOutput": { + "title": "Recraft20BTextToImageOutput", + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/tiger/qeO5RlXiAsdCREUMYg5ZU_image.webp" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "RGBColor": { + "title": "RGBColor", + "type": "object", + "properties": { + "r": { + "minimum": 0, + "title": "R", + "type": "integer", + "maximum": 255, + "description": "Red color value", + "default": 0 + }, + "b": { + "minimum": 0, + "title": "B", + "type": "integer", + "maximum": 255, + "description": "Blue color value", + "default": 0 + }, + "g": { + "minimum": 0, + "title": "G", + "type": "integer", + "maximum": 255, + "description": "Green color value", + "default": 0 + } + }, + "x-fal-order-properties": [ + "r", + "g", + "b" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/recraft-20b/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/recraft-20b/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/recraft-20b": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Recraft20bInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/recraft-20b/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Recraft20bOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ideogram/v2/turbo", + "metadata": { + "display_name": "Ideogram V2 Turbo", + "category": "text-to-image", + "description": "Accelerated image generation with Ideogram V2 Turbo. Create high-quality visuals, posters, and logos with enhanced speed while maintaining Ideogram's signature quality.", + "status": "active", + "tags": [ + "realism", + "typography" + ], + "updated_at": "2026-01-26T21:44:35.523Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/ideogram.webp", + "model_url": "https://fal.run/fal-ai/ideogram/v2/turbo", + "license_type": "commercial", + "date": "2024-12-04T00:00:00.000Z", + "group": { + "key": "ideogram-turbo", + "label": "Text to Image (Turbo)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ideogram/v2/turbo", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ideogram/v2/turbo queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ideogram/v2/turbo", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/ideogram.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/ideogram/v2/turbo", + "documentationUrl": "https://fal.ai/models/fal-ai/ideogram/v2/turbo/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "IdeogramV2TurboInput": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "expand_prompt", + "seed", + "style", + "sync_mode", + "negative_prompt" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A comic style illustration of a skeleton sitting on a toilet in a bathroom. The bathroom has a Halloween decoration with a pumpkin jack-o-lantern and bats flying around. There is a text above the skeleton that says \"Just Waiting for Halloween with Ideogram 2.0 at fal.ai\"" + ], + "title": "Prompt", + "type": "string" + }, + "aspect_ratio": { + "enum": [ + "10:16", + "16:10", + "9:16", + "16:9", + "4:3", + "3:4", + "1:1", + "1:3", + "3:1", + "3:2", + "2:3" + ], + "description": "The aspect ratio of the generated image", + "type": "string", + "title": "Aspect Ratio", + "default": "1:1" + }, + "style": { + "enum": [ + "auto", + "general", + "realistic", + "design", + "render_3D", + "anime" + ], + "description": "The style of the generated image", + "type": "string", + "title": "Style", + "default": "auto" + }, + "expand_prompt": { + "description": "Whether to expand the prompt with MagicPrompt functionality.", + "type": "boolean", + "title": "Expand Prompt", + "default": true + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Seed for the random number generator", + "title": "Seed" + }, + "negative_prompt": { + "description": "A negative prompt to avoid in the generated image", + "type": "string", + "title": "Negative Prompt", + "default": "" + } + }, + "title": "TextToImageInput", + "required": [ + "prompt" + ] + }, + "IdeogramV2TurboOutput": { + "x-fal-order-properties": [ + "images", + "seed" + ], + "type": "object", + "properties": { + "images": { + "examples": [ + [ + { + "url": "https://fal.media/files/monkey/cNaoxPl0YAWYb-QVBvO9F_image.png" + } + ] + ], + "title": "Images", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "seed": { + "examples": [ + 123456 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for the random number generator" + } + }, + "title": "Output", + "required": [ + "images", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ideogram/v2/turbo/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2/turbo/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2/turbo": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV2TurboInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ideogram/v2/turbo/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IdeogramV2TurboOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/luma-photon/flash", + "metadata": { + "display_name": "Luma Photon Flash", + "category": "text-to-image", + "description": "Generate images from your prompts using Luma Photon Flash. Photon Flash is the most creative, personalizable, and intelligent visual models for creatives, bringing a step-function change in the cost of high-quality image generation.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:36.269Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/luma-photon.webp", + "model_url": "https://fal.run/fal-ai/luma-photon/flash", + "date": "2024-12-03T00:00:00.000Z", + "group": { + "key": "luma-photon", + "label": "Text to Image [flash]" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/luma-photon/flash", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/luma-photon/flash queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/luma-photon/flash", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/luma-photon.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/luma-photon/flash", + "documentationUrl": "https://fal.ai/models/fal-ai/luma-photon/flash/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LumaPhotonFlashInput": { + "title": "TextToImageRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A teddy bear in sunglasses playing electric guitar and dancing" + ], + "maxLength": 5000, + "type": "string", + "minLength": 3, + "title": "Prompt" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1", + "4:3", + "3:4", + "21:9", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "1:1" + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio" + ], + "required": [ + "prompt" + ] + }, + "LumaPhotonFlashOutput": { + "title": "T2IOutput", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "The generated image", + "items": { + "$ref": "#/components/schemas/File" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/luma-photon/flash/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-photon/flash/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/luma-photon/flash": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaPhotonFlashInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-photon/flash/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaPhotonFlashOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/aura-flow", + "metadata": { + "display_name": "AuraFlow", + "category": "text-to-image", + "description": "AuraFlow v0.3 is an open-source flow-based text-to-image generation model that achieves state-of-the-art results on GenEval. The model is currently in beta.", + "status": "active", + "tags": [ + "typography", + "style" + ], + "updated_at": "2026-01-26T21:44:10.972Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/aura-flow.webp", + "model_url": "https://fal.run/fal-ai/aura-flow", + "github_url": "https://huggingface.co/fal/aura-flow/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-12-02T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/aura-flow", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/aura-flow queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/aura-flow", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/aura-flow.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/aura-flow", + "documentationUrl": "https://fal.ai/models/fal-ai/aura-flow/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AuraFlowInput": { + "title": "Input", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Close-up portrait of a majestic iguana with vibrant blue-green scales, piercing amber eyes, and orange spiky crest. Intricate textures and details visible on scaly skin. Wrapped in dark hood, giving regal appearance. Dramatic lighting against black background. Hyper-realistic, high-resolution image showcasing the reptile's expressive features and coloration." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate images from" + }, + "num_images": { + "minimum": 1, + "maximum": 2, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate", + "default": 1 + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "Whether to perform prompt expansion (recommended)", + "default": true + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "Classifier free guidance scale", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 20, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to take", + "default": 50 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for generating images" + } + }, + "x-fal-order-properties": [ + "prompt", + "num_images", + "seed", + "guidance_scale", + "num_inference_steps", + "expand_prompt", + "sync_mode" + ], + "required": [ + "prompt" + ] + }, + "AuraFlowOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The expanded prompt" + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used to generate the images" + } + }, + "x-fal-order-properties": [ + "images", + "seed", + "prompt" + ], + "required": [ + "images", + "seed", + "prompt" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/aura-flow/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/aura-flow/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/aura-flow": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AuraFlowInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/aura-flow/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AuraFlowOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/omnigen-v1", + "metadata": { + "display_name": "OmniGen v1", + "category": "text-to-image", + "description": "OmniGen is a unified image generation model that can generate a wide range of images from multi-modal prompts. It can be used for various tasks such as Image Editing, Personalized Image Generation, Virtual Try-On, Multi Person Generation and more!", + "status": "active", + "tags": [ + "multimodal", + "editing", + "try-on" + ], + "updated_at": "2026-01-26T21:44:11.113Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/omnigen-v1.webp", + "model_url": "https://fal.run/fal-ai/omnigen-v1", + "license_type": "commercial", + "date": "2024-11-29T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/omnigen-v1", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/omnigen-v1 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/omnigen-v1", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/omnigen-v1.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/omnigen-v1", + "documentationUrl": "https://fal.ai/models/fal-ai/omnigen-v1/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "OmnigenV1Input": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Neon words \"Omni Gen\" are flashing in the prosperous future city, the sense of science and technology, quality details, hyper realistic, high definition, 8K, photo, best quality, high quality." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square_hd" + }, + "img_guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Image Guidance scale", + "description": "\n The Image Guidance scale is a measure of how close you want\n the model to stick to your input image when looking for a related image to show you.\n ", + "default": 1.6 + }, + "input_image_urls": { + "description": "URL of images to use while generating the image, Use <|image_1|> for the first image and so on.", + "type": "array", + "items": { + "type": "string" + }, + "examples": [], + "title": "Input Image Urls", + "default": [] + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 50 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "prompt", + "input_image_urls", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "img_guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format" + ], + "required": [ + "prompt" + ] + }, + "OmnigenV1Output": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/omnigen-v1/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/omnigen-v1/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/omnigen-v1": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OmnigenV1Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/omnigen-v1/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OmnigenV1Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux/schnell", + "metadata": { + "display_name": "FLUX.1 [schnell]", + "category": "text-to-image", + "description": "FLUX.1 [schnell] is a 12 billion parameter flow transformer that generates high-quality images from text in 1 to 4 steps, suitable for personal and commercial use.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:11.395Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/flux-schnell-thumb.webp", + "model_url": "https://fal.run/fal-ai/flux/schnell", + "github_url": "https://www.apache.org/licenses/LICENSE-2.0.txt", + "license_type": "commercial", + "date": "2024-11-25T00:00:00.000Z", + "group": { + "key": "flux-1", + "label": "Text to Image [schnell]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux/schnell", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux/schnell queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux/schnell", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/flux-schnell-thumb.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux/schnell", + "documentationUrl": "https://fal.ai/models/fal-ai/flux/schnell/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxSchnellInput": { + "title": "SchnellTextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Extreme close-up of a single tiger eye, direct frontal view. Detailed iris and pupil. Sharp focus on eye texture and color. Natural lighting to capture authentic eye shine and depth. The word \"FLUX\" is painted over it in big, white brush strokes with visible texture." + ], + "description": "The prompt to generate an image from.", + "type": "string", + "title": "Prompt" + }, + "num_images": { + "minimum": 1, + "description": "The number of images to generate.", + "type": "integer", + "title": "Num Images", + "maximum": 4, + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "landscape_4_3" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "description": "The speed of the generation. The higher the speed, the faster the generation.", + "type": "string", + "title": "Acceleration", + "default": "none" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Output Format", + "default": "jpeg" + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ", + "title": "Seed" + }, + "guidance_scale": { + "minimum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "type": "number", + "title": "Guidance scale (CFG)", + "maximum": 20, + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps to perform.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 12, + "default": 4 + } + }, + "x-fal-order-properties": [ + "num_inference_steps", + "prompt", + "image_size", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "acceleration" + ], + "required": [ + "prompt" + ] + }, + "FluxSchnellOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "description": "The height of the generated image.", + "type": "integer", + "title": "Height", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "description": "The width of the generated image.", + "type": "integer", + "title": "Width", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux/schnell/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux/schnell/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux/schnell": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxSchnellInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux/schnell/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxSchnellOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/stable-diffusion-v35-medium", + "metadata": { + "display_name": "Stable Diffusion 3.5 Medium", + "category": "text-to-image", + "description": "Stable Diffusion 3.5 Medium is a Multimodal Diffusion Transformer (MMDiT) text-to-image model that features improved performance in image quality, typography, complex prompt understanding, and resource-efficiency.", + "status": "active", + "tags": [ + "diffusion", + "typography", + "style" + ], + "updated_at": "2026-01-26T21:44:38.571Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/stable-diffusion-v35-medium.webp", + "model_url": "https://fal.run/fal-ai/stable-diffusion-v35-medium", + "github_url": "https://stability.ai/license", + "license_type": "commercial", + "date": "2024-10-27T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/stable-diffusion-v35-medium", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/stable-diffusion-v35-medium queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/stable-diffusion-v35-medium", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/stable-diffusion-v35-medium.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/stable-diffusion-v35-medium", + "documentationUrl": "https://fal.ai/models/fal-ai/stable-diffusion-v35-medium/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "StableDiffusionV35MediumInput": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A dreamlike Japanese garden in perpetual twilight, bathed in bioluminescent cherry blossoms that emit a soft pink-purple glow. Floating paper lanterns drift lazily through the scene, their warm light creating dancing reflections in a mirror-like koi pond. Ethereal mist weaves between ancient stone pathways lined with glowing mushrooms in pastel blues and purples. A traditional wooden bridge arches gracefully over the water, dusted with fallen petals that sparkle like stardust. The scene is captured through a cinematic lens with perfect bokeh, creating an otherworldly atmosphere. In the background, a crescent moon hangs impossibly large in the sky, surrounded by a sea of stars and auroral wisps in teal and violet. Crystal formations emerge from the ground, refracting the ambient light into rainbow prisms. The entire composition follows the golden ratio, with moody film-like color grading reminiscent of Studio Ghibli, enhanced by volumetric god rays filtering through the luminous foliage. 8K resolution, masterful photography, hyperdetailed, magical realism." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "landscape_4_3" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 4.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 40 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "negative_prompt": { + "examples": [ + "" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format" + ], + "required": [ + "prompt" + ] + }, + "StableDiffusionV35MediumOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/stable-diffusion-v35-medium/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-diffusion-v35-medium/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/stable-diffusion-v35-medium": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableDiffusionV35MediumInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-diffusion-v35-medium/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableDiffusionV35MediumOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-lora/inpainting", + "metadata": { + "display_name": "FLUX.1 [dev] Inpainting with LoRAs", + "category": "text-to-image", + "description": "Super fast endpoint for the FLUX.1 [dev] inpainting model with LoRA support, enabling rapid and high-quality image inpaingting using pre-trained LoRA adaptations for personalization, specific styles, brand identities, and product-specific outputs.", + "status": "active", + "tags": [ + "lora", + "personalization" + ], + "updated_at": "2026-01-26T21:44:13.562Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/flux_lora.jpg", + "model_url": "https://fal.run/fal-ai/flux-lora/inpainting", + "license_type": "commercial", + "date": "2024-09-18T00:00:00.000Z", + "group": { + "key": "flux-lora", + "label": "Inpainting" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-lora/inpainting", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-lora/inpainting queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-lora/inpainting", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/flux_lora.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-lora/inpainting", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-lora/inpainting/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxLoraInpaintingInput": { + "title": "InpaintInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A photo of a lion sitting on a stone bench" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate. This is always set to 1 for streaming output.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image." + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/dog.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image to use for inpainting. or img2img" + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original.", + "default": 0.85 + }, + "guidance_scale": { + "minimum": 0, + "maximum": 35, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 28 + }, + "mask_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/dog_mask.png" + ], + "title": "Mask Url", + "type": "string", + "description": "\n The mask to area to Inpaint in.\n " + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "loras", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "image_url", + "strength", + "mask_url" + ], + "required": [ + "prompt", + "image_url", + "mask_url" + ] + }, + "FluxLoraInpaintingOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-lora/inpainting/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-lora/inpainting/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-lora/inpainting": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxLoraInpaintingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-lora/inpainting/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxLoraInpaintingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/stable-diffusion-v3-medium", + "metadata": { + "display_name": "Stable Diffusion V3", + "category": "text-to-image", + "description": "Stable Diffusion 3 Medium (Text to Image) is a Multimodal Diffusion Transformer (MMDiT) model that improves image quality, typography, prompt understanding, and efficiency.", + "status": "active", + "tags": [ + "diffusion", + "style" + ], + "updated_at": "2026-01-26T21:44:14.189Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/landing/sd3-sample-03.webp", + "model_url": "https://fal.run/fal-ai/stable-diffusion-v3-medium", + "github_url": "https://huggingface.co/stabilityai/stable-diffusion-3-medium/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-08-20T00:00:00.000Z", + "group": { + "key": "stable-diffusion-v3-medium", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/stable-diffusion-v3-medium", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/stable-diffusion-v3-medium queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/stable-diffusion-v3-medium", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/landing/sd3-sample-03.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/stable-diffusion-v3-medium", + "documentationUrl": "https://fal.ai/models/fal-ai/stable-diffusion-v3-medium/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "StableDiffusionV3MediumInput": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt_expansion": { + "title": "Enhance Prompt", + "type": "boolean", + "description": "If set to true, prompt will be upsampled with more details.", + "default": false + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square_hd" + }, + "prompt": { + "examples": [ + "Digital art, portrait of an anthropomorphic roaring Tiger warrior with full armor, close up in the middle of a battle, behind him there is a banner with the text \"Open Source\"" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 20, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 5 + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 28 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate an image from.", + "default": "" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "prompt_expansion", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker" + ], + "required": [ + "prompt" + ] + }, + "StableDiffusionV3MediumOutput": { + "title": "SD3Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "num_images": { + "title": "Number of Images", + "type": "integer", + "description": "The number of images generated." + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt", + "num_images" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt", + "num_images" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/stable-diffusion-v3-medium/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-diffusion-v3-medium/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/stable-diffusion-v3-medium": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableDiffusionV3MediumInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-diffusion-v3-medium/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableDiffusionV3MediumOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fooocus/upscale-or-vary", + "metadata": { + "display_name": "Fooocus Upscale or Vary", + "category": "text-to-image", + "description": "Default parameters with automated optimizations and quality improvements.", + "status": "active", + "tags": [ + "upscaling", + "vary", + "stylized" + ], + "updated_at": "2026-01-26T21:44:43.087Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/fooocus/fal_ai_fooocus_cyberpunk-city.jpeg", + "model_url": "https://fal.run/fal-ai/fooocus/upscale-or-vary", + "github_url": "https://github.com/lllyasviel/Fooocus/blob/main/LICENSE", + "date": "2024-08-12T00:00:00.000Z", + "group": { + "key": "fooocus", + "label": "Upscale" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fooocus/upscale-or-vary", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fooocus/upscale-or-vary queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fooocus/upscale-or-vary", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/fooocus/fal_ai_fooocus_cyberpunk-city.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/fooocus/upscale-or-vary", + "documentationUrl": "https://fal.ai/models/fal-ai/fooocus/upscale-or-vary/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FooocusUpscaleOrVaryInput": { + "title": "FooocusUpscaleOrVaryInput", + "type": "object", + "properties": { + "styles": { + "title": "Styles", + "type": "array", + "description": "\n The style to use.\n ", + "uniqueItems": true, + "items": { + "enum": [ + "Fooocus V2", + "Fooocus Enhance", + "Fooocus Sharp", + "Fooocus Semi Realistic", + "Fooocus Masterpiece", + "Fooocus Photograph", + "Fooocus Negative", + "Fooocus Cinematic", + "SAI 3D Model", + "SAI Analog Film", + "SAI Anime", + "SAI Cinematic", + "SAI Comic Book", + "SAI Craft Clay", + "SAI Digital Art", + "SAI Enhance", + "SAI Fantasy Art", + "SAI Isometric", + "SAI Line Art", + "SAI Lowpoly", + "SAI Neonpunk", + "SAI Origami", + "SAI Photographic", + "SAI Pixel Art", + "SAI Texture", + "MRE Cinematic Dynamic", + "MRE Spontaneous Picture", + "MRE Artistic Vision", + "MRE Dark Dream", + "MRE Gloomy Art", + "MRE Bad Dream", + "MRE Underground", + "MRE Surreal Painting", + "MRE Dynamic Illustration", + "MRE Undead Art", + "MRE Elemental Art", + "MRE Space Art", + "MRE Ancient Illustration", + "MRE Brave Art", + "MRE Heroic Fantasy", + "MRE Dark Cyberpunk", + "MRE Lyrical Geometry", + "MRE Sumi E Symbolic", + "MRE Sumi E Detailed", + "MRE Manga", + "MRE Anime", + "MRE Comic", + "Ads Advertising", + "Ads Automotive", + "Ads Corporate", + "Ads Fashion Editorial", + "Ads Food Photography", + "Ads Gourmet Food Photography", + "Ads Luxury", + "Ads Real Estate", + "Ads Retail", + "Artstyle Abstract", + "Artstyle Abstract Expressionism", + "Artstyle Art Deco", + "Artstyle Art Nouveau", + "Artstyle Constructivist", + "Artstyle Cubist", + "Artstyle Expressionist", + "Artstyle Graffiti", + "Artstyle Hyperrealism", + "Artstyle Impressionist", + "Artstyle Pointillism", + "Artstyle Pop Art", + "Artstyle Psychedelic", + "Artstyle Renaissance", + "Artstyle Steampunk", + "Artstyle Surrealist", + "Artstyle Typography", + "Artstyle Watercolor", + "Futuristic Biomechanical", + "Futuristic Biomechanical Cyberpunk", + "Futuristic Cybernetic", + "Futuristic Cybernetic Robot", + "Futuristic Cyberpunk Cityscape", + "Futuristic Futuristic", + "Futuristic Retro Cyberpunk", + "Futuristic Retro Futurism", + "Futuristic Sci Fi", + "Futuristic Vaporwave", + "Game Bubble Bobble", + "Game Cyberpunk Game", + "Game Fighting Game", + "Game Gta", + "Game Mario", + "Game Minecraft", + "Game Pokemon", + "Game Retro Arcade", + "Game Retro Game", + "Game Rpg Fantasy Game", + "Game Strategy Game", + "Game Streetfighter", + "Game Zelda", + "Misc Architectural", + "Misc Disco", + "Misc Dreamscape", + "Misc Dystopian", + "Misc Fairy Tale", + "Misc Gothic", + "Misc Grunge", + "Misc Horror", + "Misc Kawaii", + "Misc Lovecraftian", + "Misc Macabre", + "Misc Manga", + "Misc Metropolis", + "Misc Minimalist", + "Misc Monochrome", + "Misc Nautical", + "Misc Space", + "Misc Stained Glass", + "Misc Techwear Fashion", + "Misc Tribal", + "Misc Zentangle", + "Papercraft Collage", + "Papercraft Flat Papercut", + "Papercraft Kirigami", + "Papercraft Paper Mache", + "Papercraft Paper Quilling", + "Papercraft Papercut Collage", + "Papercraft Papercut Shadow Box", + "Papercraft Stacked Papercut", + "Papercraft Thick Layered Papercut", + "Photo Alien", + "Photo Film Noir", + "Photo Glamour", + "Photo Hdr", + "Photo Iphone Photographic", + "Photo Long Exposure", + "Photo Neon Noir", + "Photo Silhouette", + "Photo Tilt Shift", + "Cinematic Diva", + "Abstract Expressionism", + "Academia", + "Action Figure", + "Adorable 3D Character", + "Adorable Kawaii", + "Art Deco", + "Art Nouveau", + "Astral Aura", + "Avant Garde", + "Baroque", + "Bauhaus Style Poster", + "Blueprint Schematic Drawing", + "Caricature", + "Cel Shaded Art", + "Character Design Sheet", + "Classicism Art", + "Color Field Painting", + "Colored Pencil Art", + "Conceptual Art", + "Constructivism", + "Cubism", + "Dadaism", + "Dark Fantasy", + "Dark Moody Atmosphere", + "Dmt Art Style", + "Doodle Art", + "Double Exposure", + "Dripping Paint Splatter Art", + "Expressionism", + "Faded Polaroid Photo", + "Fauvism", + "Flat 2d Art", + "Fortnite Art Style", + "Futurism", + "Glitchcore", + "Glo Fi", + "Googie Art Style", + "Graffiti Art", + "Harlem Renaissance Art", + "High Fashion", + "Idyllic", + "Impressionism", + "Infographic Drawing", + "Ink Dripping Drawing", + "Japanese Ink Drawing", + "Knolling Photography", + "Light Cheery Atmosphere", + "Logo Design", + "Luxurious Elegance", + "Macro Photography", + "Mandola Art", + "Marker Drawing", + "Medievalism", + "Minimalism", + "Neo Baroque", + "Neo Byzantine", + "Neo Futurism", + "Neo Impressionism", + "Neo Rococo", + "Neoclassicism", + "Op Art", + "Ornate And Intricate", + "Pencil Sketch Drawing", + "Pop Art 2", + "Rococo", + "Silhouette Art", + "Simple Vector Art", + "Sketchup", + "Steampunk 2", + "Surrealism", + "Suprematism", + "Terragen", + "Tranquil Relaxing Atmosphere", + "Sticker Designs", + "Vibrant Rim Light", + "Volumetric Lighting", + "Watercolor 2", + "Whimsical And Playful", + "Mk Chromolithography", + "Mk Cross Processing Print", + "Mk Dufaycolor Photograph", + "Mk Herbarium", + "Mk Punk Collage", + "Mk Mosaic", + "Mk Van Gogh", + "Mk Coloring Book", + "Mk Singer Sargent", + "Mk Pollock", + "Mk Basquiat", + "Mk Andy Warhol", + "Mk Halftone Print", + "Mk Gond Painting", + "Mk Albumen Print", + "Mk Aquatint Print", + "Mk Anthotype Print", + "Mk Inuit Carving", + "Mk Bromoil Print", + "Mk Calotype Print", + "Mk Color Sketchnote", + "Mk Cibulak Porcelain", + "Mk Alcohol Ink Art", + "Mk One Line Art", + "Mk Blacklight Paint", + "Mk Carnival Glass", + "Mk Cyanotype Print", + "Mk Cross Stitching", + "Mk Encaustic Paint", + "Mk Embroidery", + "Mk Gyotaku", + "Mk Luminogram", + "Mk Lite Brite Art", + "Mk Mokume Gane", + "Pebble Art", + "Mk Palekh", + "Mk Suminagashi", + "Mk Scrimshaw", + "Mk Shibori", + "Mk Vitreous Enamel", + "Mk Ukiyo E", + "Mk Vintage Airline Poster", + "Mk Vintage Travel Poster", + "Mk Bauhaus Style", + "Mk Afrofuturism", + "Mk Atompunk", + "Mk Constructivism", + "Mk Chicano Art", + "Mk De Stijl", + "Mk Dayak Art", + "Mk Fayum Portrait", + "Mk Illuminated Manuscript", + "Mk Kalighat Painting", + "Mk Madhubani Painting", + "Mk Pictorialism", + "Mk Pichwai Painting", + "Mk Patachitra Painting", + "Mk Samoan Art Inspired", + "Mk Tlingit Art", + "Mk Adnate Style", + "Mk Ron English Style", + "Mk Shepard Fairey Style" + ], + "type": "string" + }, + "default": [ + "Fooocus Enhance", + "Fooocus V2", + "Fooocus Sharp" + ] + }, + "uov_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/fooocus/fruit_basket.jpeg" + ], + "title": "UOV Image URL", + "type": "string", + "description": "The image to upscale or vary." + }, + "performance": { + "enum": [ + "Speed", + "Quality", + "Extreme Speed", + "Lightning" + ], + "title": "Performance", + "type": "string", + "description": "\n You can choose Speed or Quality\n ", + "default": "Extreme Speed" + }, + "mixing_image_prompt_and_vary_upscale": { + "title": "Mixing Image Prompt and Vary/Upscale", + "type": "boolean", + "description": "Mixing Image Prompt and Vary/Upscale", + "default": false + }, + "image_prompt_3": { + "$ref": "#/components/schemas/ImagePrompt" + }, + "prompt": { + "examples": [ + "a basket of various fruits, bokeh, realistic, masterpiece" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results.", + "default": "" + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "\n The LoRAs to use for the image generation. You can use up to 5 LoRAs\n and they will be merged together to generate the final image.\n ", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [ + { + "path": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors", + "scale": 0.1 + } + ] + }, + "image_prompt_4": { + "$ref": "#/components/schemas/ImagePrompt" + }, + "image_prompt_1": { + "$ref": "#/components/schemas/ImagePrompt" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to false, the safety checker will be disabled.", + "default": true + }, + "sharpness": { + "minimum": 0, + "title": "Sharpness", + "type": "number", + "maximum": 30, + "description": "\n The sharpness of the generated image. Use it to control how sharp the generated\n image should be. Higher value means image and texture are sharper.\n ", + "default": 2 + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "maximum": 30, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 4 + }, + "negative_prompt": { + "examples": [ + "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "aspect_ratio": { + "title": "Aspect Ratio", + "type": "string", + "description": "\n The size of the generated image. You can choose between some presets or\n custom height and width that **must be multiples of 8**.\n ", + "default": "1024x1024" + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "\n Number of images to generate in one request\n ", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "refiner_model": { + "enum": [ + "None", + "realisticVisionV60B1_v51VAE.safetensors" + ], + "title": "Refiner Model", + "type": "string", + "description": "Refiner (SDXL or SD 1.5)", + "default": "None" + }, + "image_prompt_2": { + "$ref": "#/components/schemas/ImagePrompt" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "uov_method": { + "enum": [ + "Disabled", + "Vary (Subtle)", + "Vary (Strong)", + "Upscale (1.5x)", + "Upscale (2x)", + "Upscale (Fast 2x)" + ], + "title": "UOV Method", + "type": "string", + "description": "The method to use for upscaling or varying.", + "default": "Vary (Strong)" + }, + "seed": { + "examples": [ + 176400 + ], + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ", + "nullable": true + }, + "refiner_switch": { + "description": "\n Use 0.4 for SD1.5 realistic models; 0.667 for SD1.5 anime models\n 0.8 for XL-refiners; or any value for switching two SDXL models.\n ", + "type": "number", + "minimum": 0, + "title": "Refiner Switch At", + "maximum": 1, + "multipleOf": 0.0001, + "default": 0.8 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "styles", + "performance", + "guidance_scale", + "sharpness", + "aspect_ratio", + "num_images", + "loras", + "refiner_model", + "refiner_switch", + "output_format", + "sync_mode", + "seed", + "uov_image_url", + "uov_method", + "image_prompt_1", + "image_prompt_2", + "image_prompt_3", + "image_prompt_4", + "mixing_image_prompt_and_vary_upscale", + "enable_safety_checker" + ], + "required": [ + "uov_image_url" + ] + }, + "FooocusUpscaleOrVaryOutput": { + "title": "FooocusOutput", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "The generated image file info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + }, + "description": "The time taken for the generation process." + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "has_nsfw_concepts" + ], + "required": [ + "images", + "timings", + "has_nsfw_concepts" + ] + }, + "ImagePrompt": { + "title": "ImagePrompt", + "type": "object", + "properties": { + "weight": { + "minimum": 0, + "title": "Weight", + "type": "number", + "maximum": 2, + "default": 1 + }, + "stop_at": { + "minimum": 0, + "title": "Stop At", + "type": "number", + "maximum": 1, + "default": 0.5 + }, + "type": { + "enum": [ + "ImagePrompt", + "PyraCanny", + "CPDS", + "FaceSwap" + ], + "title": "Type", + "type": "string", + "default": "ImagePrompt" + }, + "image_url": { + "title": "Image Url", + "type": "string" + } + }, + "x-fal-order-properties": [ + "type", + "image_url", + "stop_at", + "weight" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "examples": [ + "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "maximum": 1, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 0.1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/fooocus/upscale-or-vary/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fooocus/upscale-or-vary/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fooocus/upscale-or-vary": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FooocusUpscaleOrVaryInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fooocus/upscale-or-vary/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FooocusUpscaleOrVaryOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sana", + "metadata": { + "display_name": "Sana", + "category": "text-to-image", + "description": "Sana can synthesize high-resolution, high-quality images with strong text-image alignment at a remarkably fast speed, with the ability to generate 4K images in less than a second.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:43.344Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/sana.webp", + "model_url": "https://fal.run/fal-ai/sana", + "github_url": "https://github.com/NVlabs/Sana/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-08-01T00:00:00.000Z", + "group": { + "key": "sana", + "label": "Base" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sana", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sana queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sana", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/sana.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/sana", + "documentationUrl": "https://fal.ai/models/fal-ai/sana/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SanaInput": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Underwater coral reef ecosystem during peak bioluminescent activity, multiple layers of marine life - from microscopic plankton to massive coral structures, light refracting through crystal-clear tropical waters, creating prismatic color gradients, hyper-detailed texture of marine organisms" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": { + "height": 2160, + "width": 3840 + } + }, + "style_name": { + "enum": [ + "(No style)", + "Cinematic", + "Photographic", + "Anime", + "Manga", + "Digital Art", + "Pixel art", + "Fantasy art", + "Neonpunk", + "3D Model" + ], + "title": "Style Name", + "type": "string", + "description": "The style to generate the image in.", + "default": "(No style)" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 18 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "negative_prompt": { + "examples": [ + "" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format", + "style_name" + ], + "required": [ + "prompt" + ] + }, + "SanaOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/sana/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sana/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sana": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SanaInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sana/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SanaOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-subject", + "metadata": { + "display_name": "FLUX.1 Subject", + "category": "text-to-image", + "description": "Super fast endpoint for the FLUX.1 [schnell] model with subject input capabilities, enabling rapid and high-quality image generation for personalization, specific styles, brand identities, and product-specific outputs.", + "status": "active", + "tags": [ + "personalization", + "customization" + ], + "updated_at": "2026-01-26T21:44:43.219Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/flux-subject.webp", + "model_url": "https://fal.run/fal-ai/flux-subject", + "license_type": "commercial", + "date": "2024-08-01T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-subject", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-subject queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-subject", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/flux-subject.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-subject", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-subject/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxSubjectInput": { + "title": "FluxSubjectInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "On the beach, a lady sits under a beach umbrella with 'Omini' written on it. She's wearing this shirt and has a big smile on her face, with her surfboard hehind her. The sun is setting in the background. The sky is a beautiful shade of orange and purple." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square_hd" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/ominicontrol/ominishirt.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of image of the subject" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 3.5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 8 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n " + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "output_format" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "FluxSubjectOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/flux-subject/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-subject/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-subject": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxSubjectInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-subject/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxSubjectOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixart-sigma", + "metadata": { + "display_name": "PixArt-Σ", + "category": "text-to-image", + "description": "Weak-to-Strong Training of Diffusion Transformer for 4K Text-to-Image Generation", + "status": "active", + "tags": [ + "diffusion" + ], + "updated_at": "2026-01-26T21:44:43.509Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/pixart-sigma.jpeg", + "model_url": "https://fal.run/fal-ai/pixart-sigma", + "github_url": "https://github.com/PixArt-alpha/PixArt-sigma/blob/master/LICENSE", + "date": "2024-08-01T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixart-sigma", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixart-sigma queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixart-sigma", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/pixart-sigma.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixart-sigma", + "documentationUrl": "https://fal.ai/models/fal-ai/pixart-sigma/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixartSigmaInput": { + "title": "PixArtSigmaInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Photorealistic closeup video of two pirate ships battling each other as they sail inside a cup of coffee.", + "an astronaut sitting in a diner, eating fries, cinematic, analog film", + "Pirate ship trapped in a cosmic maelstrom nebula, rendered in cosmic beach whirlpool engine, volumetric lighting, spectacular, ambient lights, light pollution, cinematic atmosphere, art nouveau style, illustration art artwork by SenseiJaye, intricate detail.", + "stars, water, brilliantly, gorgeous large scale scene, a little girl, in the style of dreamy realism, light gold and amber, blue and pink, brilliantly illuminated in the background.", + "professional portrait photo of an anthropomorphic cat wearing fancy gentleman hat and jacket walking in autumn forest.", + "beautiful lady, freckles, big smile, blue eyes, short ginger hair, dark makeup, wearing a floral blue vest top, soft light, dark grey background", + "Spectacular Tiny World in the Transparent Jar On the Table, interior of the Great Hall, Elaborate, Carved Architecture, Anatomy, Symmetrical, Geometric and Parameteric Details, Precision Flat line Details, Pattern, Dark fantasy, Dark errie mood and ineffably mysterious mood, Technical design, Intricate Ultra Detail, Ornate Detail, Stylized and Futuristic and Biomorphic Details, Architectural Concept, Low contrast Details, Cinematic Lighting, 8k, by moebius, Fullshot, Epic, Fullshot, Octane render, Unreal ,Photorealistic, Hyperrealism", + "anthropomorphic profile of the white snow owl Crystal priestess , art deco painting, pretty and expressive eyes, ornate costume, mythical, ethereal, intricate, elaborate, hyperrealism, hyper detailed, 3D, 8K, Ultra Realistic, high octane, ultra resolution, amazing detail, perfection, In frame, photorealistic, cinematic lighting, visual clarity, shading , Lumen Reflections, Super-Resolution, gigapixel, color grading, retouch, enhanced, PBR, Blender, V-ray, Procreate, zBrush, Unreal Engine 5, cinematic, volumetric, dramatic, neon lighting, wide angle lens ,no digital painting blur", + "The parametric hotel lobby is a sleek and modern space with plenty of natural light. The lobby is spacious and open with a variety of seating options. The front desk is a sleek white counter with a parametric design. The walls are a light blue color with parametric patterns. The floor is a light wood color with a parametric design. There are plenty of plants and flowers throughout the space. The overall effect is a calm and relaxing space. occlusion, moody, sunset, concept art, octane rendering, 8k, highly detailed, concept art, highly detailed, beautiful scenery, cinematic, beautiful light, hyperreal, octane render, hdr, long exposure, 8K, realistic, fog, moody, fire and explosions, smoke, 50mm f2.8" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square_hd" + }, + "style": { + "enum": [ + "(No style)", + "Cinematic", + "Photographic", + "Anime", + "Manga", + "Digital Art", + "Pixel art", + "Fantasy art", + "Neonpunk", + "3D Model" + ], + "title": "Style", + "type": "string", + "description": "The style to apply to the image.", + "default": "(No style)" + }, + "scheduler": { + "enum": [ + "DPM-SOLVER", + "SA-SOLVER" + ], + "title": "Scheduler", + "type": "string", + "description": "The scheduler to use for the model.", + "default": "DPM-SOLVER" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "guidance_scale": { + "minimum": 1, + "maximum": 10, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 4.5 + }, + "num_inference_steps": { + "minimum": 5, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 35 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "negative_prompt": { + "examples": [ + "cartoon, illustration, animation. face. male, female", + "ugly, deformed" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "style", + "image_size", + "num_inference_steps", + "scheduler", + "guidance_scale", + "seed", + "sync_mode", + "num_images", + "enable_safety_checker" + ], + "required": [ + "prompt" + ] + }, + "PixartSigmaOutput": { + "title": "PixArtSigmaOutput", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings", + "description": "The timings of the different steps of the generation process." + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixart-sigma/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixart-sigma/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixart-sigma": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixartSigmaInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixart-sigma/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixartSigmaOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sdxl-controlnet-union", + "metadata": { + "display_name": "SDXL ControlNet Union", + "category": "text-to-image", + "description": "An efficent SDXL multi-controlnet text-to-image model.", + "status": "active", + "tags": [ + "diffusion", + "controlnet", + "composition" + ], + "updated_at": "2026-01-26T21:44:14.448Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/controlnet-union/000004_openpose_scribble_concat.jpg", + "model_url": "https://fal.run/fal-ai/sdxl-controlnet-union", + "github_url": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md", + "license_type": "commercial", + "date": "2024-07-31T00:00:00.000Z", + "group": { + "key": "sdxl-controlnet-union", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sdxl-controlnet-union", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sdxl-controlnet-union queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sdxl-controlnet-union", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/controlnet-union/000004_openpose_scribble_concat.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sdxl-controlnet-union", + "documentationUrl": "https://fal.ai/models/fal-ai/sdxl-controlnet-union/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SdxlControlnetUnionInput": { + "title": "TextToImageControlNetUnionInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Ice fortress, aurora skies, polar wildlife, twilight" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "depth_preprocess": { + "title": "Depth Preprocess", + "type": "boolean", + "description": "Whether to preprocess the depth image.", + "default": true + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image. Leave it none to automatically infer from the control image.", + "examples": [ + null + ], + "nullable": true + }, + "normal_image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/MiN_j3St9B8esJleCZKMU.jpeg" + ], + "title": "Normal Image Url", + "type": "string", + "description": "The URL of the control image." + }, + "embeddings": { + "title": "Embeddings", + "type": "array", + "description": "The list of embeddings to use.", + "items": { + "$ref": "#/components/schemas/Embedding" + }, + "default": [] + }, + "teed_image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/MiN_j3St9B8esJleCZKMU.jpeg" + ], + "title": "Teed Image Url", + "type": "string", + "description": "The URL of the control image." + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "The list of LoRA weights to use.", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [] + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 7.5 + }, + "canny_image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/MiN_j3St9B8esJleCZKMU.jpeg" + ], + "title": "Canny Image Url", + "type": "string", + "description": "The URL of the control image." + }, + "segmentation_preprocess": { + "title": "Segmentation Preprocess", + "type": "boolean", + "description": "Whether to preprocess the segmentation image.", + "default": true + }, + "format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "request_id": { + "title": "Request Id", + "type": "string", + "description": "\n An id bound to a request, can be used with response to identify the request\n itself.\n ", + "default": "" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "segmentation_image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/MiN_j3St9B8esJleCZKMU.jpeg" + ], + "title": "Segmentation Image Url", + "type": "string", + "description": "The URL of the control image." + }, + "openpose_image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/MiN_j3St9B8esJleCZKMU.jpeg" + ], + "title": "Openpose Image Url", + "type": "string", + "description": "The URL of the control image." + }, + "canny_preprocess": { + "title": "Canny Preprocess", + "type": "boolean", + "description": "Whether to preprocess the canny image.", + "default": true + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "If set to true, the prompt will be expanded with additional prompts.", + "default": false + }, + "depth_image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/MiN_j3St9B8esJleCZKMU.jpeg" + ], + "title": "Depth Image Url", + "type": "string", + "description": "The URL of the control image." + }, + "normal_preprocess": { + "title": "Normal Preprocess", + "type": "boolean", + "description": "Whether to preprocess the normal image.", + "default": true + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "negative_prompt": { + "examples": [ + "cartoon, illustration, animation. face. male, female", + "ugly, deformed" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "teed_preprocess": { + "title": "Teed Preprocess", + "type": "boolean", + "description": "Whether to preprocess the teed image.", + "default": true + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "controlnet_conditioning_scale": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Controlnet Conditioning Scale", + "description": "The scale of the controlnet conditioning.", + "default": 0.5 + }, + "safety_checker_version": { + "enum": [ + "v1", + "v2" + ], + "title": "Safety Checker Version", + "type": "string", + "description": "The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.", + "default": "v1" + }, + "openpose_preprocess": { + "title": "Openpose Preprocess", + "type": "boolean", + "description": "Whether to preprocess the openpose image.", + "default": true + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 70, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 35 + } + }, + "x-fal-order-properties": [ + "prompt", + "controlnet_conditioning_scale", + "negative_prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "loras", + "embeddings", + "enable_safety_checker", + "safety_checker_version", + "expand_prompt", + "format", + "request_id", + "openpose_image_url", + "openpose_preprocess", + "depth_image_url", + "depth_preprocess", + "teed_image_url", + "teed_preprocess", + "canny_image_url", + "canny_preprocess", + "normal_image_url", + "normal_preprocess", + "segmentation_image_url", + "segmentation_preprocess" + ], + "required": [ + "prompt" + ] + }, + "SdxlControlnetUnionOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Embedding": { + "title": "Embedding", + "type": "object", + "properties": { + "tokens": { + "title": "Tokens", + "type": "array", + "description": "The list of tokens to use for the embedding.", + "items": { + "type": "string" + }, + "default": [ + "", + "" + ] + }, + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the embedding weights." + } + }, + "x-fal-order-properties": [ + "path", + "tokens" + ], + "required": [ + "path" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights. Or HF model name." + }, + "scale": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + }, + "force": { + "title": "Force", + "type": "boolean", + "description": "If set to true, the embedding will be forced to be used.", + "default": false + } + }, + "x-fal-order-properties": [ + "path", + "scale", + "force" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/sdxl-controlnet-union/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sdxl-controlnet-union/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sdxl-controlnet-union": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SdxlControlnetUnionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sdxl-controlnet-union/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SdxlControlnetUnionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kolors", + "metadata": { + "display_name": "Kolors", + "category": "text-to-image", + "description": "Photorealistic Text-to-Image", + "status": "active", + "tags": [ + "realism", + "diffusion" + ], + "updated_at": "2026-01-26T21:44:43.892Z", + "is_favorited": false, + "thumbnail_url": "https://v2.fal.media/files/bdcf6a7a3f4146c39555e0c195715e65_73e054513f15488f93248ae10d67ece5.png", + "model_url": "https://fal.run/fal-ai/kolors", + "github_url": "https://huggingface.co/Kwai-Kolors/Kolors-diffusers/raw/main/MODEL_LICENSE", + "license_type": "commercial", + "date": "2024-07-24T00:00:00.000Z", + "group": { + "key": "kolors", + "label": "Text to Image" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kolors", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kolors queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kolors", + "category": "text-to-image", + "thumbnailUrl": "https://v2.fal.media/files/bdcf6a7a3f4146c39555e0c195715e65_73e054513f15488f93248ae10d67ece5.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/kolors", + "documentationUrl": "https://fal.ai/models/fal-ai/kolors/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KolorsInput": { + "title": "KolorsInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A young Chinese couple with fair skin, dressed in stylish sportswear, with the modern Beijing city skyline in the background. Facial details, clear pores, captured using the latest camera model, close-up shot, ultra-high quality, 8K, visual feast.", + "The image features four mythical beasts: Vermilion Bird, Black Tortoise, Azure Dragon, and White Tiger. The Vermilion Bird is at the top of the image, with feathers as red as fire and a tail as magnificent as a phoenix, its wings spreading like burning flames. The Black Tortoise is at the bottom, depicted as a giant turtle intertwined with a snake. Ancient runes adorn the turtle's shell, and the snake's eyes are cold and sharp. The Azure Dragon is on the right, its long body coiling in the sky, with jade-green scales, flowing whiskers, deer-like horns, and exhaling clouds and mist. The White Tiger is on the left, with a majestic posture, white fur with black stripes, piercing eyes, sharp teeth and claws, surrounded by vast mountains and grasslands." + ], + "title": "Prompt", + "type": "string", + "description": "\n The prompt to use for generating the image. Be as descriptive as possible\n for best results.\n " + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square_hd" + }, + "output_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "png" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and\n uploaded before returning the response. This will increase the latency of\n the function but it allows you to get the image directly in the response\n without going through the CDN.\n ", + "default": false + }, + "scheduler": { + "enum": [ + "EulerDiscreteScheduler", + "EulerAncestralDiscreteScheduler", + "DPMSolverMultistepScheduler", + "DPMSolverMultistepScheduler_SDE_karras", + "UniPCMultistepScheduler", + "DEISMultistepScheduler" + ], + "title": "Scheduler", + "type": "string", + "description": "The scheduler to use for the model.", + "default": "EulerDiscreteScheduler" + }, + "guidance_scale": { + "minimum": 1, + "maximum": 10, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show\n you.\n ", + "default": 5 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 150, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 50 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed" + }, + "negative_prompt": { + "examples": [ + "ugly, deformed, blurry" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small\n details (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Enable safety checker.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "guidance_scale", + "num_inference_steps", + "seed", + "sync_mode", + "enable_safety_checker", + "num_images", + "image_size", + "scheduler", + "output_format" + ], + "required": [ + "prompt" + ] + }, + "KolorsOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/kolors/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kolors/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kolors": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KolorsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kolors/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KolorsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/stable-cascade", + "metadata": { + "display_name": "Stable Cascade", + "category": "text-to-image", + "description": "Stable Cascade: Image generation on a smaller & cheaper latent space.", + "status": "active", + "tags": [ + "diffusion", + "lcm" + ], + "updated_at": "2026-01-26T21:44:14.835Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/stable-cascade.jpeg", + "model_url": "https://fal.run/fal-ai/stable-cascade", + "github_url": "https://huggingface.co/stabilityai/stable-cascade/blob/main/LICENSE", + "license_type": "research", + "date": "2024-06-25T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/stable-cascade", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/stable-cascade queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/stable-cascade", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/stable-cascade.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/stable-cascade", + "documentationUrl": "https://fal.ai/models/fal-ai/stable-cascade/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "StableCascadeInput": { + "title": "StableCascadeInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "An image of a shiba inu, donning a spacesuit and helmet" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square_hd" + }, + "second_stage_guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Decoder Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 0 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the image will be returned as base64 encoded string.\n ", + "default": false + }, + "first_stage_steps": { + "minimum": 4, + "maximum": 40, + "type": "integer", + "title": "First Stage Steps", + "description": "Number of steps to run the first stage for.", + "default": 20 + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 4 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Cascade\n will output the same image every time.\n " + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to false, the safety checker will be disabled.", + "default": true + }, + "negative_prompt": { + "examples": [ + "ugly, deformed" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "second_stage_steps": { + "minimum": 4, + "maximum": 24, + "type": "integer", + "title": "Second Stage Steps", + "description": "Number of steps to run the second stage for.", + "default": 10 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "first_stage_steps", + "second_stage_steps", + "guidance_scale", + "second_stage_guidance_scale", + "image_size", + "seed", + "enable_safety_checker", + "num_images", + "sync_mode" + ], + "required": [ + "prompt" + ] + }, + "StableCascadeOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/stable-cascade/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-cascade/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/stable-cascade": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableCascadeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-cascade/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableCascadeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fast-sdxl", + "metadata": { + "display_name": "Stable Diffusion XL", + "category": "text-to-image", + "description": "Run SDXL at the speed of light", + "status": "active", + "tags": [ + "diffusion", + "lora", + "embeddings", + "high-res", + "style" + ], + "updated_at": "2026-01-26T21:44:46.660Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/fast-sdxl.jpeg", + "model_url": "https://fal.run/fal-ai/fast-sdxl", + "github_url": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md", + "date": "2024-06-12T00:00:00.000Z", + "group": { + "key": "stable-diffusion-xl", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fast-sdxl", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fast-sdxl queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fast-sdxl", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/fast-sdxl.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/fast-sdxl", + "documentationUrl": "https://fal.ai/models/fal-ai/fast-sdxl/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FastSdxlInput": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "photo of a rhino dressed suit and tie sitting at a table in a bar with a bar stools, award winning photography, Elke vogelsang", + "Photo of a classic red mustang car parked in las vegas strip at night" + ], + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results.", + "type": "string", + "title": "Prompt" + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated image.", + "title": "Image Size", + "default": "square_hd" + }, + "embeddings": { + "description": "The list of embeddings to use.", + "type": "array", + "title": "Embeddings", + "items": { + "$ref": "#/components/schemas/Embedding" + }, + "default": [] + }, + "expand_prompt": { + "description": "If set to true, the prompt will be expanded with additional prompts.", + "type": "boolean", + "title": "Expand Prompt", + "default": false + }, + "loras": { + "description": "The list of LoRA weights to use.", + "type": "array", + "title": "Loras", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [] + }, + "enable_safety_checker": { + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 7.5 + }, + "negative_prompt": { + "examples": [ + "cartoon, illustration, animation. face. male, female", + "ugly, deformed" + ], + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "format": { + "enum": [ + "jpeg", + "png" + ], + "description": "The format of the generated image.", + "type": "string", + "title": "Format", + "default": "jpeg" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "sync_mode": { + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "safety_checker_version": { + "enum": [ + "v1", + "v2" + ], + "description": "The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.", + "type": "string", + "title": "Safety Checker Version", + "default": "v1" + }, + "request_id": { + "description": "\n An id bound to a request, can be used with response to identify the request\n itself.\n ", + "type": "string", + "title": "Request Id", + "default": "" + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 25 + }, + "seed": { + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "loras", + "embeddings", + "enable_safety_checker", + "safety_checker_version", + "expand_prompt", + "format", + "request_id" + ], + "required": [ + "prompt" + ] + }, + "FastSdxlOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "description": "The prompt used for generating the image.", + "type": "string", + "title": "Prompt" + }, + "images": { + "description": "The generated image files info.", + "type": "array", + "title": "Images", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "seed": { + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ", + "type": "integer", + "title": "Seed" + }, + "has_nsfw_concepts": { + "description": "Whether the generated images contain NSFW concepts.", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Embedding": { + "title": "Embedding", + "type": "object", + "properties": { + "tokens": { + "description": "The list of tokens to use for the embedding.", + "type": "array", + "title": "Tokens", + "items": { + "type": "string" + }, + "default": [ + "", + "" + ] + }, + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "description": "URL or the path to the embedding weights.", + "type": "string", + "title": "Path" + } + }, + "x-fal-order-properties": [ + "path", + "tokens" + ], + "required": [ + "path" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "description": "URL or the path to the LoRA weights. Or HF model name.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + }, + "force": { + "description": "If set to true, the embedding will be forced to be used.", + "type": "boolean", + "title": "Force", + "default": false + } + }, + "x-fal-order-properties": [ + "path", + "scale", + "force" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/fast-sdxl/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-sdxl/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fast-sdxl": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastSdxlInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-sdxl/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastSdxlOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/stable-cascade/sote-diffusion", + "metadata": { + "display_name": "SoteDiffusion", + "category": "text-to-image", + "description": "Anime finetune of Würstchen V3.", + "status": "active", + "tags": [ + "lcm", + "stylized" + ], + "updated_at": "2026-01-26T21:44:47.111Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/sotediffusion.jpeg", + "model_url": "https://fal.run/fal-ai/stable-cascade/sote-diffusion", + "github_url": "https://huggingface.co/stabilityai/stable-cascade/blob/main/LICENSE", + "license_type": "research", + "date": "2024-06-10T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/stable-cascade/sote-diffusion", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/stable-cascade/sote-diffusion queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/stable-cascade/sote-diffusion", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/sotediffusion.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/stable-cascade/sote-diffusion", + "documentationUrl": "https://fal.ai/models/fal-ai/stable-cascade/sote-diffusion/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "StableCascadeSoteDiffusionInput": { + "title": "SoteDiffusionInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "newest, extremely aesthetic, best quality, 1girl, solo, pink hair, blue eyes, long hair, looking at viewer, smile, black background, holding a sign, the text on the sign says \"Hello\"" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "num_images": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": { + "height": 1536, + "width": 1024 + } + }, + "second_stage_guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Decoder Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 2 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the image will be returned as base64 encoded string.\n ", + "default": false + }, + "first_stage_steps": { + "minimum": 4, + "maximum": 50, + "type": "integer", + "title": "First Stage Steps", + "description": "Number of steps to run the first stage for.", + "default": 25 + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 8 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Cascade\n will output the same image every time.\n " + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to false, the safety checker will be disabled.", + "default": true + }, + "negative_prompt": { + "examples": [ + "very displeasing, worst quality, monochrome, realistic, oldest" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "second_stage_steps": { + "minimum": 4, + "maximum": 24, + "type": "integer", + "title": "Second Stage Steps", + "description": "Number of steps to run the second stage for.", + "default": 10 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "first_stage_steps", + "second_stage_steps", + "guidance_scale", + "second_stage_guidance_scale", + "image_size", + "seed", + "enable_safety_checker", + "num_images", + "sync_mode" + ], + "required": [ + "prompt" + ] + }, + "StableCascadeSoteDiffusionOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/stable-cascade/sote-diffusion/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-cascade/sote-diffusion/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/stable-cascade/sote-diffusion": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableCascadeSoteDiffusionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-cascade/sote-diffusion/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableCascadeSoteDiffusionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/luma-photon", + "metadata": { + "display_name": "Luma Photon", + "category": "text-to-image", + "description": "Generate images from your prompts using Luma Photon. Photon is the most creative, personalizable, and intelligent visual models for creatives, bringing a step-function change in the cost of high-quality image generation.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:47.468Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/luma-photon.webp", + "model_url": "https://fal.run/fal-ai/luma-photon", + "date": "2024-06-03T00:00:00.000Z", + "group": { + "key": "luma-photon", + "label": "Text to Image" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/luma-photon", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/luma-photon queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/luma-photon", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/luma-photon.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/luma-photon", + "documentationUrl": "https://fal.ai/models/fal-ai/luma-photon/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LumaPhotonInput": { + "title": "TextToImageRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A teddy bear in sunglasses playing electric guitar and dancing" + ], + "maxLength": 5000, + "type": "string", + "minLength": 3, + "title": "Prompt" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1", + "4:3", + "3:4", + "21:9", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "1:1" + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio" + ], + "required": [ + "prompt" + ] + }, + "LumaPhotonOutput": { + "title": "T2IOutput", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "The generated image", + "items": { + "$ref": "#/components/schemas/File" + } + } + }, + "x-fal-order-properties": [ + "images" + ], + "required": [ + "images" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/luma-photon/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-photon/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/luma-photon": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaPhotonInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-photon/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaPhotonOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/lightning-models", + "metadata": { + "display_name": "Lightning Models", + "category": "text-to-image", + "description": "Collection of SDXL Lightning models.", + "status": "active", + "tags": [ + "diffusion", + "lightning" + ], + "updated_at": "2026-01-26T21:44:50.409Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/lightning-models.jpeg", + "model_url": "https://fal.run/fal-ai/lightning-models", + "date": "2024-04-25T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/lightning-models", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/lightning-models queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/lightning-models", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/lightning-models.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/lightning-models", + "documentationUrl": "https://fal.ai/models/fal-ai/lightning-models/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LightningModelsInput": { + "title": "LightningModelsTextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A hyperdetailed photograph of a Cat dressed as a mafia boss holding a fish walking down a Japanese fish market with an angry face, 8k resolution, best quality, beautiful photograph, dynamic lighting," + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "default": { + "height": 1024, + "width": 1024 + } + }, + "embeddings": { + "title": "Embeddings", + "type": "array", + "description": "The list of embeddings to use.", + "items": { + "$ref": "#/components/schemas/Embedding" + }, + "default": [] + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "If set to true, the prompt will be expanded with additional prompts.", + "default": false + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "The list of LoRA weights to use.", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [] + }, + "scheduler": { + "enum": [ + "DPM++ 2M", + "DPM++ 2M Karras", + "DPM++ 2M SDE", + "DPM++ 2M SDE Karras", + "DPM++ SDE", + "DPM++ SDE Karras", + "KDPM 2A", + "Euler", + "Euler (trailing timesteps)", + "Euler A", + "LCM", + "EDMDPMSolverMultistepScheduler", + "TCDScheduler" + ], + "title": "Scheduler", + "type": "string", + "description": "Scheduler / sampler to use for the image denoising process." + }, + "guidance_scale": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 2 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to use. Use it to address details that you don't want in the image.", + "default": "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)" + }, + "format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "model_name": { + "examples": [ + "Lykon/dreamshaper-xl-lightning", + "SG161222/RealVisXL_V4.0_Lightning" + ], + "title": "Model Name", + "type": "string", + "description": "The Lightning model to use." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "safety_checker_version": { + "enum": [ + "v1", + "v2" + ], + "title": "Safety Checker Version", + "type": "string", + "description": "The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.", + "default": "v1" + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 12, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "model_name", + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "guidance_scale", + "loras", + "embeddings", + "scheduler", + "expand_prompt", + "num_images", + "seed", + "enable_safety_checker", + "sync_mode", + "format", + "safety_checker_version" + ], + "required": [ + "prompt" + ] + }, + "LightningModelsOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Embedding": { + "title": "Embedding", + "type": "object", + "properties": { + "tokens": { + "title": "Tokens", + "type": "array", + "description": "The list of tokens to use for the embedding.", + "items": { + "type": "string" + }, + "default": [ + "", + "" + ] + }, + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the embedding weights." + } + }, + "x-fal-order-properties": [ + "path", + "tokens" + ], + "required": [ + "path" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights. Or HF model name." + }, + "scale": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + }, + "force": { + "title": "Force", + "type": "boolean", + "description": "If set to true, the embedding will be forced to be used.", + "default": false + } + }, + "x-fal-order-properties": [ + "path", + "scale", + "force" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/lightning-models/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/lightning-models/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/lightning-models": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LightningModelsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/lightning-models/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LightningModelsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/playground-v25", + "metadata": { + "display_name": "Playground v2.5", + "category": "text-to-image", + "description": "State-of-the-art open-source model in aesthetic quality", + "status": "active", + "tags": [ + "artistic", + "style" + ], + "updated_at": "2026-01-26T21:44:15.224Z", + "is_favorited": false, + "thumbnail_url": "https://fal-cdn.batuhan-941.workers.dev/files/monkey/8WXjrk5HEam79CPlQlo5T.jpeg", + "model_url": "https://fal.run/fal-ai/playground-v25", + "github_url": "https://huggingface.co/playgroundai/playground-v2.5-1024px-aesthetic/blob/main/LICENSE.md", + "license_type": "commercial", + "date": "2024-04-25T00:00:00.000Z", + "group": { + "key": "playground-v25", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/playground-v25", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/playground-v25 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/playground-v25", + "category": "text-to-image", + "thumbnailUrl": "https://fal-cdn.batuhan-941.workers.dev/files/monkey/8WXjrk5HEam79CPlQlo5T.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/playground-v25", + "documentationUrl": "https://fal.ai/models/fal-ai/playground-v25/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PlaygroundV25Input": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "num_images", + "embeddings", + "enable_safety_checker", + "safety_checker_version", + "expand_prompt", + "format", + "guidance_rescale", + "request_id" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Masterpiece (wide angle shot) , Easterbunny crafting an incantation, (creating a little colorful magic egg in a nest:1.6), standing on an old carved table in a colorful factory laboratory. fantastic view" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square_hd" + }, + "embeddings": { + "title": "Embeddings", + "type": "array", + "description": "The list of embeddings to use.", + "items": { + "$ref": "#/components/schemas/Embedding" + }, + "default": [] + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "If set to true, the prompt will be expanded with additional prompts.", + "default": false + }, + "guidance_rescale": { + "minimum": 0, + "title": "Guidance Rescale", + "type": "number", + "description": "The rescale factor for the CFG.", + "maximum": 1, + "default": 0 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale (CFG)", + "type": "number", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "maximum": 20, + "default": 3 + }, + "negative_prompt": { + "examples": [ + "cartoon, illustration, animation. face. male, female", + "ugly, deformed" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "description": "The number of images to generate.", + "maximum": 8, + "default": 1 + }, + "safety_checker_version": { + "enum": [ + "v1", + "v2" + ], + "title": "Safety Checker Version", + "type": "string", + "description": "The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.", + "default": "v1" + }, + "request_id": { + "title": "Request Id", + "type": "string", + "description": "\n An id bound to a request, can be used with response to identify the request\n itself.\n ", + "default": "" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "description": "The number of inference steps to perform.", + "maximum": 50, + "default": 25 + } + }, + "title": "TextToImagePlaygroundv25Input", + "required": [ + "prompt" + ] + }, + "PlaygroundV25Output": { + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "title": "Output", + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "x-fal-order-properties": [ + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "title": "ImageSize" + }, + "Embedding": { + "x-fal-order-properties": [ + "path", + "tokens" + ], + "type": "object", + "properties": { + "tokens": { + "title": "Tokens", + "type": "array", + "description": "The list of tokens to use for the embedding.", + "items": { + "type": "string" + }, + "default": [ + "", + "" + ] + }, + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the embedding weights." + } + }, + "title": "Embedding", + "required": [ + "path" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "title": "Image", + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/playground-v25/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/playground-v25/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/playground-v25": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PlaygroundV25Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/playground-v25/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PlaygroundV25Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/realistic-vision", + "metadata": { + "display_name": "Realistic Vision", + "category": "text-to-image", + "description": "Generate realistic images.", + "status": "active", + "tags": [ + "realism", + "diffusion" + ], + "updated_at": "2026-01-26T21:44:50.668Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/realistic-vision.jpeg", + "model_url": "https://fal.run/fal-ai/realistic-vision", + "date": "2024-04-25T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/realistic-vision", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/realistic-vision queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/realistic-vision", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/realistic-vision.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/realistic-vision", + "documentationUrl": "https://fal.ai/models/fal-ai/realistic-vision/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "RealisticVisionInput": { + "title": "RealisticVisionTextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A hyperdetailed photograph of a Cat dressed as a mafia boss holding a fish walking down a Japanese fish market with an angry face, 8k resolution, best quality, beautiful photograph, dynamic lighting," + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "default": { + "height": 1024, + "width": 1024 + } + }, + "embeddings": { + "title": "Embeddings", + "type": "array", + "description": "The list of embeddings to use.", + "items": { + "$ref": "#/components/schemas/Embedding" + }, + "default": [] + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "If set to true, the prompt will be expanded with additional prompts.", + "default": false + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "The list of LoRA weights to use.", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [] + }, + "guidance_rescale": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Guidance Rescale", + "description": "The rescale factor for the CFG.", + "default": 0 + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 5 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to use. Use it to address details that you don't want in the image.", + "default": "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)" + }, + "format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "model_name": { + "examples": [ + "SG161222/Realistic_Vision_V6.0_B1_noVAE", + "SG161222/RealVisXL_V4.0" + ], + "title": "Model Name", + "type": "string", + "description": "The Realistic Vision model to use." + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "safety_checker_version": { + "enum": [ + "v1", + "v2" + ], + "title": "Safety Checker Version", + "type": "string", + "description": "The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.", + "default": "v1" + }, + "request_id": { + "title": "Request Id", + "type": "string", + "description": "\n An id bound to a request, can be used with response to identify the request\n itself.\n ", + "default": "" + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 70, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 35 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "model_name", + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "guidance_scale", + "loras", + "embeddings", + "expand_prompt", + "num_images", + "seed", + "enable_safety_checker", + "sync_mode", + "format", + "safety_checker_version", + "guidance_rescale", + "request_id" + ], + "required": [ + "prompt" + ] + }, + "RealisticVisionOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Embedding": { + "title": "Embedding", + "type": "object", + "properties": { + "tokens": { + "title": "Tokens", + "type": "array", + "description": "The list of tokens to use for the embedding.", + "items": { + "type": "string" + }, + "default": [ + "", + "" + ] + }, + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the embedding weights." + } + }, + "x-fal-order-properties": [ + "path", + "tokens" + ], + "required": [ + "path" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights. Or HF model name." + }, + "scale": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + }, + "force": { + "title": "Force", + "type": "boolean", + "description": "If set to true, the embedding will be forced to be used.", + "default": false + } + }, + "x-fal-order-properties": [ + "path", + "scale", + "force" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/realistic-vision/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/realistic-vision/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/realistic-vision": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RealisticVisionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/realistic-vision/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RealisticVisionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/dreamshaper", + "metadata": { + "display_name": "Dreamshaper", + "category": "text-to-image", + "description": "Dreamshaper model.", + "status": "active", + "tags": [ + "stylized", + "diffusion" + ], + "updated_at": "2026-01-26T21:44:50.268Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/dreamshaper.jpeg", + "model_url": "https://fal.run/fal-ai/dreamshaper", + "date": "2024-04-25T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/dreamshaper", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/dreamshaper queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/dreamshaper", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/dreamshaper.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/dreamshaper", + "documentationUrl": "https://fal.ai/models/fal-ai/dreamshaper/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "DreamshaperInput": { + "title": "DreamshaperTextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A hyperdetailed photograph of a Cat dressed as a mafia boss holding a fish walking down a Japanese fish market with an angry face, 8k resolution, best quality, beautiful photograph, dynamic lighting," + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "default": { + "height": 1024, + "width": 1024 + } + }, + "embeddings": { + "title": "Embeddings", + "type": "array", + "description": "The list of embeddings to use.", + "items": { + "$ref": "#/components/schemas/Embedding" + }, + "default": [] + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "If set to true, the prompt will be expanded with additional prompts.", + "default": false + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "The list of LoRA weights to use.", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [] + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 5 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to use. Use it to address details that you don't want in the image.", + "default": "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)" + }, + "format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "model_name": { + "enum": [ + "Lykon/dreamshaper-xl-1-0", + "Lykon/dreamshaper-xl-v2-turbo", + "Lykon/dreamshaper-8" + ], + "title": "Model Name", + "type": "string", + "description": "The Dreamshaper model to use.", + "examples": [ + "Lykon/dreamshaper-8", + "Lykon/dreamshaper-xl-1-0", + "Lykon/dreamshaper-xl-v2-turbo" + ] + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "safety_checker_version": { + "enum": [ + "v1", + "v2" + ], + "title": "Safety Checker Version", + "type": "string", + "description": "The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.", + "default": "v1" + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 70, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 35 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "model_name", + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "guidance_scale", + "loras", + "embeddings", + "expand_prompt", + "num_images", + "seed", + "enable_safety_checker", + "sync_mode", + "format", + "safety_checker_version" + ], + "required": [ + "prompt" + ] + }, + "DreamshaperOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Embedding": { + "title": "Embedding", + "type": "object", + "properties": { + "tokens": { + "title": "Tokens", + "type": "array", + "description": "The list of tokens to use for the embedding.", + "items": { + "type": "string" + }, + "default": [ + "", + "" + ] + }, + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the embedding weights." + } + }, + "x-fal-order-properties": [ + "path", + "tokens" + ], + "required": [ + "path" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights. Or HF model name." + }, + "scale": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + }, + "force": { + "title": "Force", + "type": "boolean", + "description": "If set to true, the embedding will be forced to be used.", + "default": false + } + }, + "x-fal-order-properties": [ + "path", + "scale", + "force" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/dreamshaper/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/dreamshaper/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/dreamshaper": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DreamshaperInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/dreamshaper/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DreamshaperOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/stable-diffusion-v15", + "metadata": { + "display_name": "Stable Diffusion v1.5", + "category": "text-to-image", + "description": "Stable Diffusion v1.5", + "status": "active", + "tags": [ + "diffusion" + ], + "updated_at": "2026-01-26T21:44:51.683Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/sd-loras.jpeg", + "model_url": "https://fal.run/fal-ai/stable-diffusion-v15", + "github_url": "https://huggingface.co/runwayml/stable-diffusion-v1-5", + "date": "2024-04-16T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/stable-diffusion-v15", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/stable-diffusion-v15 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/stable-diffusion-v15", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/sd-loras.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/stable-diffusion-v15", + "documentationUrl": "https://fal.ai/models/fal-ai/stable-diffusion-v15/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "StableDiffusionV15Input": { + "title": "TextToImageSD15Input", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "photo of a rhino dressed suit and tie sitting at a table in a bar with a bar stools, award winning photography, Elke vogelsang", + "Photo of a classic red mustang car parked in las vegas strip at night" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square" + }, + "embeddings": { + "title": "Embeddings", + "type": "array", + "description": "The list of embeddings to use.", + "items": { + "$ref": "#/components/schemas/Embedding" + }, + "default": [] + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "If set to true, the prompt will be expanded with additional prompts.", + "default": false + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "The list of LoRA weights to use.", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [] + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 7.5 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "negative_prompt": { + "examples": [ + "cartoon, illustration, animation. face. male, female", + "ugly, deformed" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "safety_checker_version": { + "enum": [ + "v1", + "v2" + ], + "title": "Safety Checker Version", + "type": "string", + "description": "The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.", + "default": "v1" + }, + "request_id": { + "title": "Request Id", + "type": "string", + "description": "\n An id bound to a request, can be used with response to identify the request\n itself.\n ", + "default": "" + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 25 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "loras", + "embeddings", + "enable_safety_checker", + "safety_checker_version", + "expand_prompt", + "format", + "request_id" + ], + "required": [ + "prompt" + ] + }, + "StableDiffusionV15Output": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Embedding": { + "title": "Embedding", + "type": "object", + "properties": { + "tokens": { + "title": "Tokens", + "type": "array", + "description": "The list of tokens to use for the embedding.", + "items": { + "type": "string" + }, + "default": [ + "", + "" + ] + }, + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the embedding weights." + } + }, + "x-fal-order-properties": [ + "path", + "tokens" + ], + "required": [ + "path" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights. Or HF model name." + }, + "scale": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + }, + "force": { + "title": "Force", + "type": "boolean", + "description": "If set to true, the embedding will be forced to be used.", + "default": false + } + }, + "x-fal-order-properties": [ + "path", + "scale", + "force" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/stable-diffusion-v15/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-diffusion-v15/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/stable-diffusion-v15": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableDiffusionV15Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/stable-diffusion-v15/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StableDiffusionV15Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/layer-diffusion", + "metadata": { + "display_name": "Layer Diffusion XL", + "category": "text-to-image", + "description": "SDXL with an alpha channel.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:52.071Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/model_tests/layer_diffusion/309211077-38ace070-6530-43c9-9ca1-c98aa5b7a0ed.png", + "model_url": "https://fal.run/fal-ai/layer-diffusion", + "github_url": "https://github.com/huchenlei/ComfyUI-layerdiffuse/blob/main/LICENSE", + "date": "2024-04-13T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/layer-diffusion", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/layer-diffusion queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/layer-diffusion", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/model_tests/layer_diffusion/309211077-38ace070-6530-43c9-9ca1-c98aa5b7a0ed.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/layer-diffusion", + "documentationUrl": "https://fal.ai/models/fal-ai/layer-diffusion/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LayerDiffusionInput": { + "title": "Input", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "a male army soldier holding a gun" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results.", + "default": "" + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance Scale", + "description": "The guidance scale for the model.", + "default": 8 + }, + "num_inference_steps": { + "minimum": 10, + "maximum": 40, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps for the model.", + "default": 20 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The prompt to use for generating the negative image. Be as descriptive as possible for best results.", + "default": "text, watermark" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to false, the safety checker will be disabled.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "guidance_scale", + "num_inference_steps", + "seed", + "enable_safety_checker" + ] + }, + "LayerDiffusionOutput": { + "title": "Output", + "type": "object", + "properties": { + "image": { + "title": "Image", + "description": "The URL of the generated image.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used to generate the image." + } + }, + "x-fal-order-properties": [ + "image", + "seed" + ], + "required": [ + "image", + "seed" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/layer-diffusion/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/layer-diffusion/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/layer-diffusion": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LayerDiffusionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/layer-diffusion/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LayerDiffusionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fast-lightning-sdxl", + "metadata": { + "display_name": "Stable Diffusion XL Lightning", + "category": "text-to-image", + "description": "Run SDXL at the speed of light", + "status": "active", + "tags": [ + "diffusion", + "lightning", + "real-time" + ], + "updated_at": "2026-01-26T21:44:15.481Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/stable-diffusion-xl-lightning.webp", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/stable-diffusion-xl-lightning-animated.webp", + "model_url": "https://fal.run/fal-ai/fast-lightning-sdxl", + "github_url": "https://huggingface.co/ByteDance/SDXL-Lightning/blob/main/LICENSE.md", + "license_type": "commercial", + "date": "2024-04-11T00:00:00.000Z", + "group": { + "key": "stable-diffusion-xl-lightning", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fast-lightning-sdxl", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fast-lightning-sdxl queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fast-lightning-sdxl", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/stable-diffusion-xl-lightning.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/fast-lightning-sdxl", + "documentationUrl": "https://fal.ai/models/fal-ai/fast-lightning-sdxl/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FastLightningSdxlInput": { + "title": "TextToImageLightningInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "photo of a girl smiling during a sunset, with lightnings in the background" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square_hd" + }, + "format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "embeddings": { + "title": "Embeddings", + "type": "array", + "description": "The list of embeddings to use.", + "items": { + "$ref": "#/components/schemas/Embedding" + }, + "default": [] + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "If set to true, the prompt will be expanded with additional prompts.", + "default": false + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "guidance_rescale": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Guidance Rescale", + "description": "The rescale factor for the CFG.", + "default": 0 + }, + "safety_checker_version": { + "enum": [ + "v1", + "v2" + ], + "title": "Safety Checker Version", + "type": "string", + "description": "The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.", + "default": "v1" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "enum": [ + "1", + "2", + "4", + "8" + ], + "title": "Num Inference Steps", + "type": "string", + "description": "The number of inference steps to perform.", + "default": 4 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "request_id": { + "title": "Request Id", + "type": "string", + "description": "\n An id bound to a request, can be used with response to identify the request\n itself.\n ", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_size", + "num_inference_steps", + "seed", + "sync_mode", + "num_images", + "embeddings", + "enable_safety_checker", + "safety_checker_version", + "expand_prompt", + "format", + "guidance_rescale", + "request_id" + ], + "required": [ + "prompt" + ] + }, + "FastLightningSdxlOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Embedding": { + "title": "Embedding", + "type": "object", + "properties": { + "tokens": { + "title": "Tokens", + "type": "array", + "description": "The list of tokens to use for the embedding.", + "items": { + "type": "string" + }, + "default": [ + "", + "" + ] + }, + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the embedding weights." + } + }, + "x-fal-order-properties": [ + "path", + "tokens" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/fast-lightning-sdxl/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-lightning-sdxl/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fast-lightning-sdxl": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastLightningSdxlInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-lightning-sdxl/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastLightningSdxlOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fast-fooocus-sdxl/image-to-image", + "metadata": { + "display_name": "Fooocus", + "category": "text-to-image", + "description": "Fooocus extreme speed mode as a standalone app.", + "status": "active", + "tags": [ + "stylized" + ], + "updated_at": "2026-01-26T21:44:53.355Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/fooocus/fal_ai_fooocus_cyberpunk-city.jpeg", + "model_url": "https://fal.run/fal-ai/fast-fooocus-sdxl/image-to-image", + "github_url": "https://github.com/lllyasviel/Fooocus/blob/main/LICENSE", + "date": "2024-03-13T00:00:00.000Z", + "group": { + "key": "fooocus-extreme-speed", + "label": "Image to Image" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fast-fooocus-sdxl/image-to-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fast-fooocus-sdxl/image-to-image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fast-fooocus-sdxl/image-to-image", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/fooocus/fal_ai_fooocus_cyberpunk-city.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/fast-fooocus-sdxl/image-to-image", + "documentationUrl": "https://fal.ai/models/fal-ai/fast-fooocus-sdxl/image-to-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FastFooocusSdxlImageToImageInput": { + "title": "ImageToImageFooocusInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "an island near sea, with seagulls, moon shining over the sea, light house, boats int he background, fish flying over the sea" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "enable_refiner": { + "title": "Enable Refiner", + "type": "boolean", + "description": "If set to true, a smaller model will try to refine the output after it was processed.", + "default": true + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image. Leave it none to automatically infer from the prompt image.", + "examples": [ + null + ], + "nullable": true + }, + "embeddings": { + "title": "Embeddings", + "type": "array", + "description": "The list of embeddings to use.", + "items": { + "$ref": "#/components/schemas/Embedding" + }, + "default": [] + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "If set to true, the prompt will be expanded with additional prompts.", + "default": true + }, + "guidance_rescale": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Guidance Rescale", + "description": "The rescale factor for the CFG.", + "default": 0 + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 2 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "negative_prompt": { + "examples": [ + "cartoon, illustration, animation. face. male, female" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "image_url": { + "examples": [ + "https://fal-cdn.batuhan-941.workers.dev/files/tiger/IExuP-WICqaIesLZAZPur.jpeg" + ], + "title": "Image Url", + "type": "string", + "description": "The URL of the image to use as a starting point for the generation." + }, + "strength": { + "minimum": 0.05, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "determines how much the generated image resembles the initial image", + "default": 0.95 + }, + "safety_checker_version": { + "enum": [ + "v1", + "v2" + ], + "title": "Safety Checker Version", + "type": "string", + "description": "The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.", + "default": "v1" + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 24, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 8 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "guidance_scale", + "strength", + "seed", + "num_images", + "embeddings", + "enable_safety_checker", + "safety_checker_version", + "expand_prompt", + "format", + "guidance_rescale", + "enable_refiner" + ], + "required": [ + "image_url", + "prompt" + ] + }, + "FastFooocusSdxlImageToImageOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Embedding": { + "title": "Embedding", + "type": "object", + "properties": { + "tokens": { + "title": "Tokens", + "type": "array", + "description": "The list of tokens to use for the embedding.", + "items": { + "type": "string" + }, + "default": [ + "", + "" + ] + }, + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the embedding weights." + } + }, + "x-fal-order-properties": [ + "path", + "tokens" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/fast-fooocus-sdxl/image-to-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-fooocus-sdxl/image-to-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fast-fooocus-sdxl/image-to-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastFooocusSdxlImageToImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-fooocus-sdxl/image-to-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastFooocusSdxlImageToImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fast-sdxl-controlnet-canny", + "metadata": { + "display_name": "ControlNet SDXL", + "category": "text-to-image", + "description": "Generate Images with ControlNet.", + "status": "active", + "tags": [ + "diffusion", + "controlnet", + "manipulation" + ], + "updated_at": "2026-01-26T21:44:54.222Z", + "is_favorited": false, + "thumbnail_url": "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/ynzNm1f0ZoDCuOvAE9tKR.jpeg", + "model_url": "https://fal.run/fal-ai/fast-sdxl-controlnet-canny", + "date": "2024-02-28T00:00:00.000Z", + "group": { + "key": "controlnet-sdxl", + "label": "Text to Image" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fast-sdxl-controlnet-canny", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fast-sdxl-controlnet-canny queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fast-sdxl-controlnet-canny", + "category": "text-to-image", + "thumbnailUrl": "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/ynzNm1f0ZoDCuOvAE9tKR.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/fast-sdxl-controlnet-canny", + "documentationUrl": "https://fal.ai/models/fal-ai/fast-sdxl-controlnet-canny/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FastSdxlControlnetCannyInput": { + "title": "TextToImageControlNetInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Ice fortress, aurora skies, polar wildlife, twilight" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image. Leave it none to automatically infer from the control image.", + "nullable": true + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "If set to true, the prompt will be expanded with additional prompts.", + "default": false + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "The list of LoRA weights to use.", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [] + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 20, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 7.5 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "negative_prompt": { + "examples": [ + "cartoon, illustration, animation. face. male, female", + "ugly, deformed" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 8, + "description": "The number of images to generate.", + "default": 1 + }, + "controlnet_conditioning_scale": { + "minimum": 0, + "title": "Controlnet Conditioning Scale", + "type": "number", + "maximum": 1, + "description": "The scale of the controlnet conditioning.", + "default": 0.5 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "control_image_url": { + "examples": [ + "https://avatars.githubusercontent.com/u/74778219" + ], + "title": "Control Image Url", + "type": "string", + "description": "The URL of the control image." + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 25 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "enable_deep_cache": { + "title": "Enable Deep Cache", + "type": "boolean", + "description": "\n If set to true, DeepCache will be enabled. TBD\n ", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "control_image_url", + "controlnet_conditioning_scale", + "negative_prompt", + "image_size", + "num_inference_steps", + "seed", + "enable_deep_cache", + "guidance_scale", + "sync_mode", + "num_images", + "loras", + "enable_safety_checker", + "expand_prompt" + ], + "required": [ + "prompt", + "control_image_url" + ] + }, + "FastSdxlControlnetCannyOutput": { + "title": "Output", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights. Or HF model name." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "maximum": 1, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/fast-sdxl-controlnet-canny/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-sdxl-controlnet-canny/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fast-sdxl-controlnet-canny": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastSdxlControlnetCannyInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-sdxl-controlnet-canny/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastSdxlControlnetCannyOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hyper-sdxl", + "metadata": { + "display_name": "Hyper SDXL", + "category": "text-to-image", + "description": "Hyper-charge SDXL's performance and creativity.", + "status": "active", + "tags": [ + "diffusion", + "real-time" + ], + "updated_at": "2026-01-26T21:44:54.998Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/kangaroo/LM0fy_9qT_8FlKrWhR7Zt.jpeg", + "model_url": "https://fal.run/fal-ai/hyper-sdxl", + "github_url": "https://huggingface.co/ByteDance/Hyper-SD/blob/main/LICENSE.md", + "date": "2024-02-21T00:00:00.000Z", + "group": { + "key": "hyper-sdxl", + "label": "Text to Image" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "error": { + "code": "expansion_failed", + "message": "Failed to fetch OpenAPI schema" + } + } + }, + { + "endpoint_id": "fal-ai/fast-lcm-diffusion", + "metadata": { + "display_name": "Latent Consistency Models (v1.5/XL)", + "category": "text-to-image", + "description": "Run SDXL at the speed of light", + "status": "active", + "tags": [ + "lcm", + "diffusion", + "turbo", + "real-time" + ], + "updated_at": "2026-01-26T21:44:55.993Z", + "is_favorited": false, + "thumbnail_url": "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/P322iQXlz2jOOuRFBWK-q.jpeg", + "model_url": "https://fal.run/fal-ai/fast-lcm-diffusion", + "github_url": "https://huggingface.co/latent-consistency/lcm-lora-sdxl/blob/main/README.md", + "date": "2024-02-19T00:00:00.000Z", + "group": { + "key": "fast-lcm-diffusion", + "label": "Text to Image" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fast-lcm-diffusion", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fast-lcm-diffusion queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fast-lcm-diffusion", + "category": "text-to-image", + "thumbnailUrl": "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/P322iQXlz2jOOuRFBWK-q.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/fast-lcm-diffusion", + "documentationUrl": "https://fal.ai/models/fal-ai/fast-lcm-diffusion/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FastLcmDiffusionInput": { + "title": "TextToImageLCMInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square_hd" + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "If set to true, the prompt will be expanded with additional prompts.", + "default": false + }, + "guidance_rescale": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Guidance Rescale", + "description": "The rescale factor for the CFG.", + "default": 0 + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 1.5 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "negative_prompt": { + "examples": [ + "cartoon, illustration, animation. face. male, female", + "ugly, deformed" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "model_name": { + "enum": [ + "stabilityai/stable-diffusion-xl-base-1.0", + "runwayml/stable-diffusion-v1-5" + ], + "title": "Model Name", + "type": "string", + "description": "The name of the model to use.", + "examples": [ + "stabilityai/stable-diffusion-xl-base-1.0", + "runwayml/stable-diffusion-v1-5" + ], + "default": "stabilityai/stable-diffusion-xl-base-1.0" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": true + }, + "safety_checker_version": { + "enum": [ + "v1", + "v2" + ], + "title": "Safety Checker Version", + "type": "string", + "description": "The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.", + "default": "v1" + }, + "request_id": { + "title": "Request Id", + "type": "string", + "description": "\n An id bound to a request, can be used with response to identify the request\n itself.\n ", + "default": "" + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 32, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 6 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "model_name", + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "sync_mode", + "num_images", + "enable_safety_checker", + "safety_checker_version", + "expand_prompt", + "format", + "guidance_rescale", + "request_id" + ], + "required": [ + "prompt" + ] + }, + "FastLcmDiffusionOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/fast-lcm-diffusion/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-lcm-diffusion/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fast-lcm-diffusion": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastLcmDiffusionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-lcm-diffusion/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastLcmDiffusionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fast-fooocus-sdxl", + "metadata": { + "display_name": "Fooocus", + "category": "text-to-image", + "description": "Fooocus extreme speed mode as a standalone app.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:56.245Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/fooocus/fal_ai_fooocus_cyberpunk-city.jpeg", + "model_url": "https://fal.run/fal-ai/fast-fooocus-sdxl", + "github_url": "https://github.com/lllyasviel/Fooocus/blob/main/LICENSE", + "date": "2024-02-16T00:00:00.000Z", + "group": { + "key": "fooocus-extreme-speed", + "label": "Text to Image" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fast-fooocus-sdxl", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fast-fooocus-sdxl queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fast-fooocus-sdxl", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/fooocus/fal_ai_fooocus_cyberpunk-city.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/fast-fooocus-sdxl", + "documentationUrl": "https://fal.ai/models/fal-ai/fast-fooocus-sdxl/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FastFooocusSdxlInput": { + "title": "TextToImageFooocusInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "enable_refiner": { + "title": "Enable Refiner", + "type": "boolean", + "description": "If set to true, a smaller model will try to refine the output after it was processed.", + "default": true + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "The size of the generated image.", + "default": "square_hd" + }, + "embeddings": { + "title": "Embeddings", + "type": "array", + "description": "The list of embeddings to use.", + "items": { + "$ref": "#/components/schemas/Embedding" + }, + "default": [] + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "If set to true, the prompt will be expanded with additional prompts.", + "default": true + }, + "guidance_rescale": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Guidance Rescale", + "description": "The rescale factor for the CFG.", + "default": 0 + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 2 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "negative_prompt": { + "examples": [ + "cartoon, illustration, animation. face. male, female", + "ugly, deformed" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Num Images", + "description": "The number of images to generate.", + "default": 1 + }, + "safety_checker_version": { + "enum": [ + "v1", + "v2" + ], + "title": "Safety Checker Version", + "type": "string", + "description": "The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.", + "default": "v1" + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 24, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 8 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_size", + "num_inference_steps", + "seed", + "guidance_scale", + "num_images", + "embeddings", + "enable_safety_checker", + "safety_checker_version", + "expand_prompt", + "format", + "guidance_rescale", + "enable_refiner" + ], + "required": [ + "prompt" + ] + }, + "FastFooocusSdxlOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the image." + }, + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ], + "required": [ + "images", + "timings", + "seed", + "has_nsfw_concepts", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Embedding": { + "title": "Embedding", + "type": "object", + "properties": { + "tokens": { + "title": "Tokens", + "type": "array", + "description": "The list of tokens to use for the embedding.", + "items": { + "type": "string" + }, + "default": [ + "", + "" + ] + }, + "path": { + "examples": [ + "https://civitai.com/api/download/models/135931", + "https://filebin.net/3chfqasxpqu21y8n/my-custom-lora-v1.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the embedding weights." + } + }, + "x-fal-order-properties": [ + "path", + "tokens" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/fast-fooocus-sdxl/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-fooocus-sdxl/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fast-fooocus-sdxl": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastFooocusSdxlInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-fooocus-sdxl/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastFooocusSdxlOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/illusion-diffusion", + "metadata": { + "display_name": "Illusion Diffusion", + "category": "text-to-image", + "description": "Create illusions conditioned on image.", + "status": "active", + "tags": [ + "composition", + "stylized" + ], + "updated_at": "2026-01-26T21:44:56.622Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/illusion-diffusion.jpeg", + "model_url": "https://fal.run/fal-ai/illusion-diffusion", + "github_url": "https://huggingface.co/spaces/CompVis/stable-diffusion-license", + "date": "2024-02-13T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/illusion-diffusion", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/illusion-diffusion queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/illusion-diffusion", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/illusion-diffusion.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/illusion-diffusion", + "documentationUrl": "https://fal.ai/models/fal-ai/illusion-diffusion/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "IllusionDiffusionInput": { + "title": "IllusionDiffusionInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "(masterpiece:1.4), (best quality), (detailed), Medieval village scene with busy streets and castle in the distance" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "\n The size of the generated image. You can choose between some presets or\n custom height and width that **must be multiples of 8**.\n ", + "default": "square_hd" + }, + "controlnet_conditioning_scale": { + "title": "Controlnet Conditioning Scale", + "type": "number", + "description": "The scale of the ControlNet.", + "default": 1 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/illusion-examples/pattern.png", + "https://storage.googleapis.com/falserverless/illusion-examples/checkers.png", + "https://storage.googleapis.com/falserverless/illusion-examples/checkers_mid.jpg", + "https://storage.googleapis.com/falserverless/illusion-examples/ultra_checkers.png", + "https://storage.googleapis.com/falserverless/illusion-examples/funky.jpeg", + "https://storage.googleapis.com/falserverless/illusion-examples/cubes.jpeg", + "https://storage.googleapis.com/falserverless/illusion-examples/turkey-flag.png", + "https://storage.googleapis.com/falserverless/illusion-examples/india-flag.png", + "https://storage.googleapis.com/falserverless/illusion-examples/usa-flag.png" + ], + "title": "Image Url", + "type": "string", + "description": "Input image url." + }, + "scheduler": { + "enum": [ + "DPM++ Karras SDE", + "Euler" + ], + "title": "Scheduler", + "type": "string", + "description": "Scheduler / sampler to use for the image denoising process.", + "default": "Euler" + }, + "control_guidance_start": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Control Guidance Start", + "default": 0 + }, + "guidance_scale": { + "maximum": 50, + "type": "number", + "title": "Guidance Scale", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "exclusiveMinimum": 0, + "default": 7.5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + }, + "control_guidance_end": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Control Guidance End", + "default": 1 + }, + "negative_prompt": { + "examples": [ + "(worst quality, poor details:1.4), lowres, (artist name, signature, watermark:1.4), bad-artist-anime, bad_prompt_version2, bad-hands-5, ng_deepnegative_v1_75t" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "num_inference_steps": { + "minimum": 0, + "maximum": 80, + "type": "integer", + "title": "Number of inference steps", + "description": "\n Increasing the amount of steps tells Stable Diffusion that it should take more steps\n to generate your final result which can increase the amount of detail in your image.\n ", + "default": 40 + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "negative_prompt", + "guidance_scale", + "controlnet_conditioning_scale", + "control_guidance_start", + "control_guidance_end", + "seed", + "scheduler", + "num_inference_steps", + "image_size" + ], + "required": [ + "image_url", + "prompt" + ] + }, + "IllusionDiffusionOutput": { + "title": "IllusionDiffusionOutput", + "type": "object", + "properties": { + "image": { + "title": "Image", + "description": "The generated image file info.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "image", + "seed" + ], + "required": [ + "image", + "seed" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/illusion-diffusion/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/illusion-diffusion/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/illusion-diffusion": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IllusionDiffusionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/illusion-diffusion/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IllusionDiffusionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fooocus/image-prompt", + "metadata": { + "display_name": "Fooocus Image Prompt", + "category": "text-to-image", + "description": "Default parameters with automated optimizations and quality improvements.", + "status": "active", + "tags": [ + "stylized" + ], + "updated_at": "2026-01-26T21:44:57.535Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/fooocus/fal_ai_fooocus_cyberpunk-city.jpeg", + "model_url": "https://fal.run/fal-ai/fooocus/image-prompt", + "github_url": "https://github.com/lllyasviel/Fooocus/blob/main/LICENSE", + "date": "2024-02-13T00:00:00.000Z", + "group": { + "key": "fooocus", + "label": "Image to Image" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fooocus/image-prompt", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fooocus/image-prompt queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fooocus/image-prompt", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/fooocus/fal_ai_fooocus_cyberpunk-city.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/fooocus/image-prompt", + "documentationUrl": "https://fal.ai/models/fal-ai/fooocus/image-prompt/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FooocusImagePromptInput": { + "title": "FooocusImagePromptInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "pikachu" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results.", + "default": "" + }, + "uov_image_url": { + "title": "UOV Image URL", + "type": "string", + "description": "The image to upscale or vary." + }, + "performance": { + "enum": [ + "Speed", + "Quality", + "Extreme Speed", + "Lightning" + ], + "title": "Performance", + "type": "string", + "description": "\n You can choose Speed or Quality\n ", + "default": "Extreme Speed" + }, + "image_prompt_3": { + "$ref": "#/components/schemas/ImagePrompt" + }, + "styles": { + "title": "Styles", + "type": "array", + "description": "\n The style to use.\n ", + "uniqueItems": true, + "items": { + "enum": [ + "Fooocus V2", + "Fooocus Enhance", + "Fooocus Sharp", + "Fooocus Semi Realistic", + "Fooocus Masterpiece", + "Fooocus Photograph", + "Fooocus Negative", + "Fooocus Cinematic", + "SAI 3D Model", + "SAI Analog Film", + "SAI Anime", + "SAI Cinematic", + "SAI Comic Book", + "SAI Craft Clay", + "SAI Digital Art", + "SAI Enhance", + "SAI Fantasy Art", + "SAI Isometric", + "SAI Line Art", + "SAI Lowpoly", + "SAI Neonpunk", + "SAI Origami", + "SAI Photographic", + "SAI Pixel Art", + "SAI Texture", + "MRE Cinematic Dynamic", + "MRE Spontaneous Picture", + "MRE Artistic Vision", + "MRE Dark Dream", + "MRE Gloomy Art", + "MRE Bad Dream", + "MRE Underground", + "MRE Surreal Painting", + "MRE Dynamic Illustration", + "MRE Undead Art", + "MRE Elemental Art", + "MRE Space Art", + "MRE Ancient Illustration", + "MRE Brave Art", + "MRE Heroic Fantasy", + "MRE Dark Cyberpunk", + "MRE Lyrical Geometry", + "MRE Sumi E Symbolic", + "MRE Sumi E Detailed", + "MRE Manga", + "MRE Anime", + "MRE Comic", + "Ads Advertising", + "Ads Automotive", + "Ads Corporate", + "Ads Fashion Editorial", + "Ads Food Photography", + "Ads Gourmet Food Photography", + "Ads Luxury", + "Ads Real Estate", + "Ads Retail", + "Artstyle Abstract", + "Artstyle Abstract Expressionism", + "Artstyle Art Deco", + "Artstyle Art Nouveau", + "Artstyle Constructivist", + "Artstyle Cubist", + "Artstyle Expressionist", + "Artstyle Graffiti", + "Artstyle Hyperrealism", + "Artstyle Impressionist", + "Artstyle Pointillism", + "Artstyle Pop Art", + "Artstyle Psychedelic", + "Artstyle Renaissance", + "Artstyle Steampunk", + "Artstyle Surrealist", + "Artstyle Typography", + "Artstyle Watercolor", + "Futuristic Biomechanical", + "Futuristic Biomechanical Cyberpunk", + "Futuristic Cybernetic", + "Futuristic Cybernetic Robot", + "Futuristic Cyberpunk Cityscape", + "Futuristic Futuristic", + "Futuristic Retro Cyberpunk", + "Futuristic Retro Futurism", + "Futuristic Sci Fi", + "Futuristic Vaporwave", + "Game Bubble Bobble", + "Game Cyberpunk Game", + "Game Fighting Game", + "Game Gta", + "Game Mario", + "Game Minecraft", + "Game Pokemon", + "Game Retro Arcade", + "Game Retro Game", + "Game Rpg Fantasy Game", + "Game Strategy Game", + "Game Streetfighter", + "Game Zelda", + "Misc Architectural", + "Misc Disco", + "Misc Dreamscape", + "Misc Dystopian", + "Misc Fairy Tale", + "Misc Gothic", + "Misc Grunge", + "Misc Horror", + "Misc Kawaii", + "Misc Lovecraftian", + "Misc Macabre", + "Misc Manga", + "Misc Metropolis", + "Misc Minimalist", + "Misc Monochrome", + "Misc Nautical", + "Misc Space", + "Misc Stained Glass", + "Misc Techwear Fashion", + "Misc Tribal", + "Misc Zentangle", + "Papercraft Collage", + "Papercraft Flat Papercut", + "Papercraft Kirigami", + "Papercraft Paper Mache", + "Papercraft Paper Quilling", + "Papercraft Papercut Collage", + "Papercraft Papercut Shadow Box", + "Papercraft Stacked Papercut", + "Papercraft Thick Layered Papercut", + "Photo Alien", + "Photo Film Noir", + "Photo Glamour", + "Photo Hdr", + "Photo Iphone Photographic", + "Photo Long Exposure", + "Photo Neon Noir", + "Photo Silhouette", + "Photo Tilt Shift", + "Cinematic Diva", + "Abstract Expressionism", + "Academia", + "Action Figure", + "Adorable 3D Character", + "Adorable Kawaii", + "Art Deco", + "Art Nouveau", + "Astral Aura", + "Avant Garde", + "Baroque", + "Bauhaus Style Poster", + "Blueprint Schematic Drawing", + "Caricature", + "Cel Shaded Art", + "Character Design Sheet", + "Classicism Art", + "Color Field Painting", + "Colored Pencil Art", + "Conceptual Art", + "Constructivism", + "Cubism", + "Dadaism", + "Dark Fantasy", + "Dark Moody Atmosphere", + "Dmt Art Style", + "Doodle Art", + "Double Exposure", + "Dripping Paint Splatter Art", + "Expressionism", + "Faded Polaroid Photo", + "Fauvism", + "Flat 2d Art", + "Fortnite Art Style", + "Futurism", + "Glitchcore", + "Glo Fi", + "Googie Art Style", + "Graffiti Art", + "Harlem Renaissance Art", + "High Fashion", + "Idyllic", + "Impressionism", + "Infographic Drawing", + "Ink Dripping Drawing", + "Japanese Ink Drawing", + "Knolling Photography", + "Light Cheery Atmosphere", + "Logo Design", + "Luxurious Elegance", + "Macro Photography", + "Mandola Art", + "Marker Drawing", + "Medievalism", + "Minimalism", + "Neo Baroque", + "Neo Byzantine", + "Neo Futurism", + "Neo Impressionism", + "Neo Rococo", + "Neoclassicism", + "Op Art", + "Ornate And Intricate", + "Pencil Sketch Drawing", + "Pop Art 2", + "Rococo", + "Silhouette Art", + "Simple Vector Art", + "Sketchup", + "Steampunk 2", + "Surrealism", + "Suprematism", + "Terragen", + "Tranquil Relaxing Atmosphere", + "Sticker Designs", + "Vibrant Rim Light", + "Volumetric Lighting", + "Watercolor 2", + "Whimsical And Playful", + "Mk Chromolithography", + "Mk Cross Processing Print", + "Mk Dufaycolor Photograph", + "Mk Herbarium", + "Mk Punk Collage", + "Mk Mosaic", + "Mk Van Gogh", + "Mk Coloring Book", + "Mk Singer Sargent", + "Mk Pollock", + "Mk Basquiat", + "Mk Andy Warhol", + "Mk Halftone Print", + "Mk Gond Painting", + "Mk Albumen Print", + "Mk Aquatint Print", + "Mk Anthotype Print", + "Mk Inuit Carving", + "Mk Bromoil Print", + "Mk Calotype Print", + "Mk Color Sketchnote", + "Mk Cibulak Porcelain", + "Mk Alcohol Ink Art", + "Mk One Line Art", + "Mk Blacklight Paint", + "Mk Carnival Glass", + "Mk Cyanotype Print", + "Mk Cross Stitching", + "Mk Encaustic Paint", + "Mk Embroidery", + "Mk Gyotaku", + "Mk Luminogram", + "Mk Lite Brite Art", + "Mk Mokume Gane", + "Pebble Art", + "Mk Palekh", + "Mk Suminagashi", + "Mk Scrimshaw", + "Mk Shibori", + "Mk Vitreous Enamel", + "Mk Ukiyo E", + "Mk Vintage Airline Poster", + "Mk Vintage Travel Poster", + "Mk Bauhaus Style", + "Mk Afrofuturism", + "Mk Atompunk", + "Mk Constructivism", + "Mk Chicano Art", + "Mk De Stijl", + "Mk Dayak Art", + "Mk Fayum Portrait", + "Mk Illuminated Manuscript", + "Mk Kalighat Painting", + "Mk Madhubani Painting", + "Mk Pictorialism", + "Mk Pichwai Painting", + "Mk Patachitra Painting", + "Mk Samoan Art Inspired", + "Mk Tlingit Art", + "Mk Adnate Style", + "Mk Ron English Style", + "Mk Shepard Fairey Style" + ], + "type": "string" + }, + "default": [ + "Fooocus Enhance", + "Fooocus V2", + "Fooocus Sharp" + ] + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "\n The LoRAs to use for the image generation. You can use up to 5 LoRAs\n and they will be merged together to generate the final image.\n ", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [ + { + "path": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors", + "scale": 0.1 + } + ] + }, + "image_prompt_4": { + "$ref": "#/components/schemas/ImagePrompt" + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "maximum": 30, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 4 + }, + "sharpness": { + "minimum": 0, + "title": "Sharpness", + "type": "number", + "maximum": 30, + "description": "\n The sharpness of the generated image. Use it to control how sharp the generated\n image should be. Higher value means image and texture are sharper.\n ", + "default": 2 + }, + "mixing_image_prompt_and_inpaint": { + "title": "Mixing Image Prompt and Inpaint", + "type": "boolean", + "description": "Mixing Image Prompt and Inpaint", + "default": false + }, + "outpaint_selections": { + "title": "Outpaint Direction", + "type": "array", + "description": "The directions to outpaint.", + "uniqueItems": true, + "items": { + "enum": [ + "Left", + "Right", + "Top", + "Bottom" + ], + "type": "string" + }, + "default": [] + }, + "inpaint_image_url": { + "title": "Inpaint Image URL", + "type": "string", + "description": "The image to use as a reference for inpainting." + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "refiner_model": { + "enum": [ + "None", + "realisticVisionV60B1_v51VAE.safetensors" + ], + "title": "Refiner Model", + "type": "string", + "description": "Refiner (SDXL or SD 1.5)", + "default": "None" + }, + "image_prompt_2": { + "$ref": "#/components/schemas/ImagePrompt" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "inpaint_mode": { + "enum": [ + "Inpaint or Outpaint (default)", + "Improve Detail (face, hand, eyes, etc.)", + "Modify Content (add objects, change background, etc.)" + ], + "title": "Inpaint Mode", + "type": "string", + "description": "The mode to use for inpainting.", + "default": "Inpaint or Outpaint (default)" + }, + "uov_method": { + "enum": [ + "Disabled", + "Vary (Subtle)", + "Vary (Strong)", + "Upscale (1.5x)", + "Upscale (2x)", + "Upscale (Fast 2x)" + ], + "title": "UOV Method", + "type": "string", + "description": "The method to use for upscaling or varying.", + "default": "Disabled" + }, + "seed": { + "examples": [ + 176400 + ], + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ", + "nullable": true + }, + "refiner_switch": { + "description": "\n Use 0.4 for SD1.5 realistic models; 0.667 for SD1.5 anime models\n 0.8 for XL-refiners; or any value for switching two SDXL models.\n ", + "type": "number", + "minimum": 0, + "title": "Refiner Switch At", + "maximum": 1, + "multipleOf": 0.0001, + "default": 0.8 + }, + "mixing_image_prompt_and_vary_upscale": { + "title": "Mixing Image Prompt and Vary/Upscale", + "type": "boolean", + "description": "Mixing Image Prompt and Vary/Upscale", + "default": false + }, + "mask_image_url": { + "title": "Mask Image URL", + "type": "string", + "description": "The image to use as a mask for the generated image." + }, + "image_prompt_1": { + "examples": [ + { + "weight": 1, + "stop_at": 1, + "type": "PyraCanny", + "image_url": "https://storage.googleapis.com/falserverless/model_tests/fooocus/Pikachu.webp" + } + ], + "title": "Image Prompt 1", + "allOf": [ + { + "$ref": "#/components/schemas/ImagePrompt" + } + ] + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to false, the safety checker will be disabled.", + "default": true + }, + "negative_prompt": { + "examples": [ + "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "\n Number of images to generate in one request\n ", + "default": 1 + }, + "aspect_ratio": { + "title": "Aspect Ratio", + "type": "string", + "description": "\n The size of the generated image. You can choose between some presets or\n custom height and width that **must be multiples of 8**.\n ", + "default": "1024x1024" + }, + "inpaint_additional_prompt": { + "title": "Inpaint Additional Prompt", + "type": "string", + "description": "Describe what you want to inpaint.", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "styles", + "performance", + "guidance_scale", + "sharpness", + "aspect_ratio", + "num_images", + "loras", + "refiner_model", + "refiner_switch", + "output_format", + "sync_mode", + "seed", + "image_prompt_1", + "image_prompt_2", + "image_prompt_3", + "image_prompt_4", + "inpaint_image_url", + "mask_image_url", + "inpaint_mode", + "inpaint_additional_prompt", + "outpaint_selections", + "mixing_image_prompt_and_inpaint", + "uov_image_url", + "uov_method", + "mixing_image_prompt_and_vary_upscale", + "enable_safety_checker" + ], + "required": [ + "image_prompt_1" + ] + }, + "FooocusImagePromptOutput": { + "title": "FooocusOutput", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "The generated image file info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + }, + "description": "The time taken for the generation process." + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "has_nsfw_concepts" + ], + "required": [ + "images", + "timings", + "has_nsfw_concepts" + ] + }, + "ImagePrompt": { + "title": "ImagePrompt", + "type": "object", + "properties": { + "weight": { + "minimum": 0, + "title": "Weight", + "type": "number", + "maximum": 2, + "default": 1 + }, + "stop_at": { + "minimum": 0, + "title": "Stop At", + "type": "number", + "maximum": 1, + "default": 0.5 + }, + "type": { + "enum": [ + "ImagePrompt", + "PyraCanny", + "CPDS", + "FaceSwap" + ], + "title": "Type", + "type": "string", + "default": "ImagePrompt" + }, + "image_url": { + "title": "Image Url", + "type": "string" + } + }, + "x-fal-order-properties": [ + "type", + "image_url", + "stop_at", + "weight" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "examples": [ + "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "maximum": 1, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 0.1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/fooocus/image-prompt/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fooocus/image-prompt/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fooocus/image-prompt": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FooocusImagePromptInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fooocus/image-prompt/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FooocusImagePromptOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fooocus/inpaint", + "metadata": { + "display_name": "Fooocus Inpainting", + "category": "text-to-image", + "description": "Default parameters with automated optimizations and quality improvements.", + "status": "active", + "tags": [ + "stylized", + "editing" + ], + "updated_at": "2026-01-26T21:44:57.961Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/fooocus/fal_ai_fooocus_cyberpunk-city.jpeg", + "model_url": "https://fal.run/fal-ai/fooocus/inpaint", + "github_url": "https://github.com/lllyasviel/Fooocus/blob/main/LICENSE", + "date": "2024-02-13T00:00:00.000Z", + "group": { + "key": "fooocus", + "label": "Inpaint" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fooocus/inpaint", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fooocus/inpaint queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fooocus/inpaint", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/fooocus/fal_ai_fooocus_cyberpunk-city.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/fooocus/inpaint", + "documentationUrl": "https://fal.ai/models/fal-ai/fooocus/inpaint/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FooocusInpaintInput": { + "title": "FooocusInpaintInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "a cat on a bench, realistic, highly detailed, 8k" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results.", + "default": "" + }, + "performance": { + "enum": [ + "Speed", + "Quality", + "Extreme Speed", + "Lightning" + ], + "title": "Performance", + "type": "string", + "description": "\n You can choose Speed or Quality\n ", + "default": "Extreme Speed" + }, + "styles": { + "title": "Styles", + "type": "array", + "description": "\n The style to use.\n ", + "uniqueItems": true, + "items": { + "enum": [ + "Fooocus V2", + "Fooocus Enhance", + "Fooocus Sharp", + "Fooocus Semi Realistic", + "Fooocus Masterpiece", + "Fooocus Photograph", + "Fooocus Negative", + "Fooocus Cinematic", + "SAI 3D Model", + "SAI Analog Film", + "SAI Anime", + "SAI Cinematic", + "SAI Comic Book", + "SAI Craft Clay", + "SAI Digital Art", + "SAI Enhance", + "SAI Fantasy Art", + "SAI Isometric", + "SAI Line Art", + "SAI Lowpoly", + "SAI Neonpunk", + "SAI Origami", + "SAI Photographic", + "SAI Pixel Art", + "SAI Texture", + "MRE Cinematic Dynamic", + "MRE Spontaneous Picture", + "MRE Artistic Vision", + "MRE Dark Dream", + "MRE Gloomy Art", + "MRE Bad Dream", + "MRE Underground", + "MRE Surreal Painting", + "MRE Dynamic Illustration", + "MRE Undead Art", + "MRE Elemental Art", + "MRE Space Art", + "MRE Ancient Illustration", + "MRE Brave Art", + "MRE Heroic Fantasy", + "MRE Dark Cyberpunk", + "MRE Lyrical Geometry", + "MRE Sumi E Symbolic", + "MRE Sumi E Detailed", + "MRE Manga", + "MRE Anime", + "MRE Comic", + "Ads Advertising", + "Ads Automotive", + "Ads Corporate", + "Ads Fashion Editorial", + "Ads Food Photography", + "Ads Gourmet Food Photography", + "Ads Luxury", + "Ads Real Estate", + "Ads Retail", + "Artstyle Abstract", + "Artstyle Abstract Expressionism", + "Artstyle Art Deco", + "Artstyle Art Nouveau", + "Artstyle Constructivist", + "Artstyle Cubist", + "Artstyle Expressionist", + "Artstyle Graffiti", + "Artstyle Hyperrealism", + "Artstyle Impressionist", + "Artstyle Pointillism", + "Artstyle Pop Art", + "Artstyle Psychedelic", + "Artstyle Renaissance", + "Artstyle Steampunk", + "Artstyle Surrealist", + "Artstyle Typography", + "Artstyle Watercolor", + "Futuristic Biomechanical", + "Futuristic Biomechanical Cyberpunk", + "Futuristic Cybernetic", + "Futuristic Cybernetic Robot", + "Futuristic Cyberpunk Cityscape", + "Futuristic Futuristic", + "Futuristic Retro Cyberpunk", + "Futuristic Retro Futurism", + "Futuristic Sci Fi", + "Futuristic Vaporwave", + "Game Bubble Bobble", + "Game Cyberpunk Game", + "Game Fighting Game", + "Game Gta", + "Game Mario", + "Game Minecraft", + "Game Pokemon", + "Game Retro Arcade", + "Game Retro Game", + "Game Rpg Fantasy Game", + "Game Strategy Game", + "Game Streetfighter", + "Game Zelda", + "Misc Architectural", + "Misc Disco", + "Misc Dreamscape", + "Misc Dystopian", + "Misc Fairy Tale", + "Misc Gothic", + "Misc Grunge", + "Misc Horror", + "Misc Kawaii", + "Misc Lovecraftian", + "Misc Macabre", + "Misc Manga", + "Misc Metropolis", + "Misc Minimalist", + "Misc Monochrome", + "Misc Nautical", + "Misc Space", + "Misc Stained Glass", + "Misc Techwear Fashion", + "Misc Tribal", + "Misc Zentangle", + "Papercraft Collage", + "Papercraft Flat Papercut", + "Papercraft Kirigami", + "Papercraft Paper Mache", + "Papercraft Paper Quilling", + "Papercraft Papercut Collage", + "Papercraft Papercut Shadow Box", + "Papercraft Stacked Papercut", + "Papercraft Thick Layered Papercut", + "Photo Alien", + "Photo Film Noir", + "Photo Glamour", + "Photo Hdr", + "Photo Iphone Photographic", + "Photo Long Exposure", + "Photo Neon Noir", + "Photo Silhouette", + "Photo Tilt Shift", + "Cinematic Diva", + "Abstract Expressionism", + "Academia", + "Action Figure", + "Adorable 3D Character", + "Adorable Kawaii", + "Art Deco", + "Art Nouveau", + "Astral Aura", + "Avant Garde", + "Baroque", + "Bauhaus Style Poster", + "Blueprint Schematic Drawing", + "Caricature", + "Cel Shaded Art", + "Character Design Sheet", + "Classicism Art", + "Color Field Painting", + "Colored Pencil Art", + "Conceptual Art", + "Constructivism", + "Cubism", + "Dadaism", + "Dark Fantasy", + "Dark Moody Atmosphere", + "Dmt Art Style", + "Doodle Art", + "Double Exposure", + "Dripping Paint Splatter Art", + "Expressionism", + "Faded Polaroid Photo", + "Fauvism", + "Flat 2d Art", + "Fortnite Art Style", + "Futurism", + "Glitchcore", + "Glo Fi", + "Googie Art Style", + "Graffiti Art", + "Harlem Renaissance Art", + "High Fashion", + "Idyllic", + "Impressionism", + "Infographic Drawing", + "Ink Dripping Drawing", + "Japanese Ink Drawing", + "Knolling Photography", + "Light Cheery Atmosphere", + "Logo Design", + "Luxurious Elegance", + "Macro Photography", + "Mandola Art", + "Marker Drawing", + "Medievalism", + "Minimalism", + "Neo Baroque", + "Neo Byzantine", + "Neo Futurism", + "Neo Impressionism", + "Neo Rococo", + "Neoclassicism", + "Op Art", + "Ornate And Intricate", + "Pencil Sketch Drawing", + "Pop Art 2", + "Rococo", + "Silhouette Art", + "Simple Vector Art", + "Sketchup", + "Steampunk 2", + "Surrealism", + "Suprematism", + "Terragen", + "Tranquil Relaxing Atmosphere", + "Sticker Designs", + "Vibrant Rim Light", + "Volumetric Lighting", + "Watercolor 2", + "Whimsical And Playful", + "Mk Chromolithography", + "Mk Cross Processing Print", + "Mk Dufaycolor Photograph", + "Mk Herbarium", + "Mk Punk Collage", + "Mk Mosaic", + "Mk Van Gogh", + "Mk Coloring Book", + "Mk Singer Sargent", + "Mk Pollock", + "Mk Basquiat", + "Mk Andy Warhol", + "Mk Halftone Print", + "Mk Gond Painting", + "Mk Albumen Print", + "Mk Aquatint Print", + "Mk Anthotype Print", + "Mk Inuit Carving", + "Mk Bromoil Print", + "Mk Calotype Print", + "Mk Color Sketchnote", + "Mk Cibulak Porcelain", + "Mk Alcohol Ink Art", + "Mk One Line Art", + "Mk Blacklight Paint", + "Mk Carnival Glass", + "Mk Cyanotype Print", + "Mk Cross Stitching", + "Mk Encaustic Paint", + "Mk Embroidery", + "Mk Gyotaku", + "Mk Luminogram", + "Mk Lite Brite Art", + "Mk Mokume Gane", + "Pebble Art", + "Mk Palekh", + "Mk Suminagashi", + "Mk Scrimshaw", + "Mk Shibori", + "Mk Vitreous Enamel", + "Mk Ukiyo E", + "Mk Vintage Airline Poster", + "Mk Vintage Travel Poster", + "Mk Bauhaus Style", + "Mk Afrofuturism", + "Mk Atompunk", + "Mk Constructivism", + "Mk Chicano Art", + "Mk De Stijl", + "Mk Dayak Art", + "Mk Fayum Portrait", + "Mk Illuminated Manuscript", + "Mk Kalighat Painting", + "Mk Madhubani Painting", + "Mk Pictorialism", + "Mk Pichwai Painting", + "Mk Patachitra Painting", + "Mk Samoan Art Inspired", + "Mk Tlingit Art", + "Mk Adnate Style", + "Mk Ron English Style", + "Mk Shepard Fairey Style" + ], + "type": "string" + }, + "default": [ + "Fooocus Enhance", + "Fooocus V2", + "Fooocus Sharp" + ] + }, + "image_prompt_3": { + "$ref": "#/components/schemas/ImagePrompt" + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "\n The LoRAs to use for the image generation. You can use up to 5 LoRAs\n and they will be merged together to generate the final image.\n ", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [ + { + "path": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors", + "scale": 0.1 + } + ] + }, + "image_prompt_4": { + "$ref": "#/components/schemas/ImagePrompt" + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "maximum": 30, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 4 + }, + "sharpness": { + "minimum": 0, + "title": "Sharpness", + "type": "number", + "maximum": 30, + "description": "\n The sharpness of the generated image. Use it to control how sharp the generated\n image should be. Higher value means image and texture are sharper.\n ", + "default": 2 + }, + "mixing_image_prompt_and_inpaint": { + "title": "Mixing Image Prompt and Inpaint", + "type": "boolean", + "description": "Mixing Image Prompt and Inpaint", + "default": false + }, + "outpaint_selections": { + "title": "Outpaint Direction", + "type": "array", + "description": "The directions to outpaint.", + "uniqueItems": true, + "items": { + "enum": [ + "Left", + "Right", + "Top", + "Bottom" + ], + "type": "string" + }, + "default": [] + }, + "inpaint_image_url": { + "examples": [ + "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + ], + "title": "Inpaint Image Url", + "type": "string", + "description": "The image to use as a reference for inpainting." + }, + "refiner_model": { + "enum": [ + "None", + "realisticVisionV60B1_v51VAE.safetensors" + ], + "title": "Refiner Model", + "type": "string", + "description": "Refiner (SDXL or SD 1.5)", + "default": "None" + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "image_prompt_2": { + "$ref": "#/components/schemas/ImagePrompt" + }, + "inpaint_respective_field": { + "description": "\n The area to inpaint. Value 0 is same as \"Only Masked\" in A1111. Value 1 is\n same as \"Whole Image\" in A1111. Only used in inpaint, not used in outpaint.\n (Outpaint always use 1.0)\n ", + "type": "number", + "minimum": 0, + "title": "Inpaint Respective Field", + "maximum": 1, + "multipleOf": 0.001, + "default": 0.618 + }, + "inpaint_mode": { + "enum": [ + "Inpaint or Outpaint (default)", + "Improve Detail (face, hand, eyes, etc.)", + "Modify Content (add objects, change background, etc.)" + ], + "title": "Inpaint Mode", + "type": "string", + "description": "The mode to use for inpainting.", + "default": "Inpaint or Outpaint (default)" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "seed": { + "examples": [ + 176400 + ], + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ", + "nullable": true + }, + "refiner_switch": { + "description": "\n Use 0.4 for SD1.5 realistic models; 0.667 for SD1.5 anime models\n 0.8 for XL-refiners; or any value for switching two SDXL models.\n ", + "type": "number", + "minimum": 0, + "title": "Refiner Switch At", + "maximum": 1, + "multipleOf": 0.0001, + "default": 0.8 + }, + "inpaint_disable_initial_latent": { + "title": "Disable Initial Latent In Inpaint", + "type": "boolean", + "description": "If set to true, the initial preprocessing will be disabled.", + "default": false + }, + "mask_image_url": { + "examples": [ + "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + ], + "title": "Mask Image Url", + "type": "string", + "description": "The image to use as a mask for the generated image." + }, + "invert_mask": { + "title": "Invert Mask", + "type": "boolean", + "description": "If set to true, the mask will be inverted.", + "default": false + }, + "image_prompt_1": { + "$ref": "#/components/schemas/ImagePrompt" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to false, the safety checker will be disabled.", + "default": true + }, + "negative_prompt": { + "examples": [ + "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "\n Number of images to generate in one request\n ", + "default": 1 + }, + "aspect_ratio": { + "title": "Aspect Ratio", + "type": "string", + "description": "\n The size of the generated image. You can choose between some presets or\n custom height and width that **must be multiples of 8**.\n ", + "default": "1024x1024" + }, + "inpaint_additional_prompt": { + "title": "Inpaint Additional Prompt", + "type": "string", + "description": "Describe what you want to inpaint.", + "default": "" + }, + "inpaint_strength": { + "description": "\n Same as the denoising strength in A1111 inpaint. Only used in inpaint, not\n used in outpaint. (Outpaint always use 1.0)\n ", + "type": "number", + "title": "Inpaint Denoising Strength", + "maximum": 1, + "multipleOf": 0.001, + "exclusiveMinimum": 0, + "default": 1 + }, + "override_inpaint_options": { + "title": "Override Inpaint Options", + "type": "boolean", + "description": "\n If set to true, the advanced inpaint options ('inpaint_disable_initial_latent',\n 'inpaint_engine', 'inpaint_strength', 'inpaint_respective_field',\n 'inpaint_erode_or_dilate') will be overridden.\n Otherwise, the default values will be used.\n ", + "default": false + }, + "inpaint_engine": { + "enum": [ + "None", + "v1", + "v2.5", + "v2.6" + ], + "title": "Inpaint Engine", + "type": "string", + "description": "Version of Fooocus inpaint model", + "default": "v2.6" + }, + "inpaint_erode_or_dilate": { + "description": "\n Positive value will make white area in the mask larger, negative value will\n make white area smaller. (default is 0, always process before any mask\n invert)\n ", + "type": "number", + "minimum": -64, + "title": "Mask Erode or Dilate", + "maximum": 64, + "multipleOf": 1, + "default": 0 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "styles", + "performance", + "guidance_scale", + "sharpness", + "aspect_ratio", + "num_images", + "loras", + "refiner_model", + "refiner_switch", + "output_format", + "sync_mode", + "seed", + "inpaint_image_url", + "mask_image_url", + "inpaint_mode", + "inpaint_additional_prompt", + "outpaint_selections", + "override_inpaint_options", + "inpaint_disable_initial_latent", + "inpaint_engine", + "inpaint_strength", + "inpaint_respective_field", + "inpaint_erode_or_dilate", + "invert_mask", + "image_prompt_1", + "image_prompt_2", + "image_prompt_3", + "image_prompt_4", + "mixing_image_prompt_and_inpaint", + "enable_safety_checker" + ], + "required": [ + "inpaint_image_url" + ] + }, + "FooocusInpaintOutput": { + "title": "FooocusOutput", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "The generated image file info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + }, + "description": "The time taken for the generation process." + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "has_nsfw_concepts" + ], + "required": [ + "images", + "timings", + "has_nsfw_concepts" + ] + }, + "ImagePrompt": { + "title": "ImagePrompt", + "type": "object", + "properties": { + "weight": { + "minimum": 0, + "title": "Weight", + "type": "number", + "maximum": 2, + "default": 1 + }, + "stop_at": { + "minimum": 0, + "title": "Stop At", + "type": "number", + "maximum": 1, + "default": 0.5 + }, + "type": { + "enum": [ + "ImagePrompt", + "PyraCanny", + "CPDS", + "FaceSwap" + ], + "title": "Type", + "type": "string", + "default": "ImagePrompt" + }, + "image_url": { + "title": "Image Url", + "type": "string" + } + }, + "x-fal-order-properties": [ + "type", + "image_url", + "stop_at", + "weight" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "examples": [ + "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "maximum": 1, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 0.1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/fooocus/inpaint/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fooocus/inpaint/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fooocus/inpaint": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FooocusInpaintInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fooocus/inpaint/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FooocusInpaintOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/lcm", + "metadata": { + "display_name": "Latent Consistency (SDXL & SDv1.5)", + "category": "text-to-image", + "description": "Produce high-quality images with minimal inference steps.", + "status": "active", + "tags": [ + "diffusion", + "lcm", + "real-time" + ], + "updated_at": "2026-01-26T21:44:58.423Z", + "is_favorited": false, + "thumbnail_url": "https://fal-cdn.batuhan-941.workers.dev/files/penguin/FS1_8TqEc1VEk8fFSes1C.jpeg", + "model_url": "https://fal.run/fal-ai/lcm", + "date": "2024-02-04T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/lcm", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/lcm queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/lcm", + "category": "text-to-image", + "thumbnailUrl": "https://fal-cdn.batuhan-941.workers.dev/files/penguin/FS1_8TqEc1VEk8fFSes1C.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/lcm", + "documentationUrl": "https://fal.ai/models/fal-ai/lcm/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LcmInput": { + "title": "LCMInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "a black cat with glowing eyes, cute, adorable, disney, pixar, highly detailed, 8k", + "an island near sea, with seagulls, moon shining over the sea, light house, boats int he background, fish flying over the sea" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "controlnet_inpaint": { + "title": "Controlnet Inpaint", + "type": "boolean", + "description": "\n If set to true, the inpainting pipeline will use controlnet inpainting.\n Only effective for inpainting pipelines.\n ", + "default": false + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "\n The size of the generated image. You can choose between some presets or\n custom height and width that **must be multiples of 8**.\n\n If not provided:\n - For text-to-image generations, the default size is 512x512.\n - For image-to-image generations, the default size is the same as the input image.\n - For inpainting generations, the default size is the same as the input image.\n " + }, + "enable_safety_checks": { + "title": "Enable Safety Checks", + "type": "boolean", + "description": "\n If set to true, the resulting image will be checked whether it includes any\n potentially unsafe content. If it does, it will be replaced with a black\n image.\n ", + "default": true + }, + "model": { + "enum": [ + "sdxl", + "sdv1-5" + ], + "title": "Model", + "type": "string", + "description": "The model to use for generating the image.", + "default": "sdv1-5" + }, + "lora_url": { + "title": "Lora Url", + "type": "string", + "description": "\n The url of the lora server to use for image generation.\n " + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 8, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 1 + }, + "negative_prompt": { + "examples": [ + "cartoon, illustration, animation. face. male, female", + "ugly, deformed" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "inpaint_mask_only": { + "title": "Inpaint Mask Only", + "type": "boolean", + "description": "\n If set to true, the inpainting pipeline will only inpaint the provided mask\n area. Only effective for inpainting pipelines.\n ", + "default": false + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 8, + "description": "\n The number of images to generate. The function will return a list of images\n with the same prompt and negative prompt but different seeds.\n ", + "default": 1 + }, + "lora_scale": { + "title": "Lora Scale", + "type": "number", + "description": "\n The scale of the lora server to use for image generation.\n ", + "default": 1 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/lcm/inpaint_image.png", + "https://storage.googleapis.com/falserverless/model_tests/lcm/beach.png" + ], + "title": "Image Url", + "type": "string", + "description": "\n The base image to use for guiding the image generation on image-to-image\n generations. If the either width or height of the image is larger than 1024\n pixels, the image will be resized to 1024 pixels while keeping the aspect ratio.\n " + }, + "strength": { + "minimum": 0, + "title": "Strength", + "type": "number", + "maximum": 1, + "description": "\n The strength of the image that is passed as `image_url`. The strength\n determines how much the generated image will be similar to the image passed as\n `image_url`. The higher the strength the more model gets \"creative\" and\n generates an image that's different from the initial image. A strength of 1.0\n means that the initial image is more or less ignored and the model will try to\n generate an image that's as close as possible to the prompt.\n ", + "default": 0.8 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "request_id": { + "title": "Request Id", + "type": "string", + "description": "\n An id bound to a request, can be used with response to identify the request\n itself.\n ", + "default": "" + }, + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "mask_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/lcm/inpaint_mask.png" + ], + "title": "Mask Url", + "type": "string", + "description": "\n The mask to use for guiding the image generation on image\n inpainting. The model will focus on the mask area and try to fill it with\n the most relevant content.\n\n The mask must be a black and white image where the white area is the area\n that needs to be filled and the black area is the area that should be\n ignored.\n\n The mask must have the same dimensions as the image passed as `image_url`.\n " + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 12, + "description": "\n The number of inference steps to use for generating the image. The more steps\n the better the image will be but it will also take longer to generate.\n ", + "default": 4 + } + }, + "x-fal-order-properties": [ + "model", + "prompt", + "image_url", + "mask_url", + "strength", + "negative_prompt", + "seed", + "guidance_scale", + "num_inference_steps", + "image_size", + "sync_mode", + "num_images", + "enable_safety_checks", + "request_id", + "inpaint_mask_only", + "controlnet_inpaint", + "lora_url", + "lora_scale" + ], + "required": [ + "prompt" + ] + }, + "LcmOutput": { + "title": "LCMOutput", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "request_id": { + "title": "Request Id", + "type": "string", + "description": "\n An id bound to a request, can be used with response to identify the request\n itself.\n ", + "default": "" + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + }, + "num_inference_steps": { + "title": "Num Inference Steps", + "type": "integer", + "description": "\n Number of inference steps used to generate the image. It will be the same value of the one passed in the\n input or the default one in case none was passed.\n ", + "default": 4 + }, + "nsfw_content_detected": { + "title": "Nsfw Content Detected", + "type": "array", + "description": "\n A list of booleans indicating whether the generated image contains any\n potentially unsafe content. If the safety check is disabled, this field\n will all will be false.\n ", + "items": { + "type": "boolean" + } + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "seed", + "num_inference_steps", + "request_id", + "nsfw_content_detected" + ], + "required": [ + "images", + "timings", + "seed", + "nsfw_content_detected" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer" + }, + "content_type": { + "title": "Content Type", + "type": "string", + "default": "image/jpeg" + }, + "url": { + "title": "Url", + "type": "string" + }, + "width": { + "title": "Width", + "type": "integer" + } + }, + "x-fal-order-properties": [ + "url", + "width", + "height", + "content_type" + ], + "required": [ + "url", + "width", + "height" + ] + } + } + }, + "paths": { + "/fal-ai/lcm/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/lcm/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/lcm": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LcmInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/lcm/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LcmOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/diffusion-edge", + "metadata": { + "display_name": "DiffusionEdge", + "category": "text-to-image", + "description": "Diffusion based high quality edge detection", + "status": "active", + "tags": [ + "detection" + ], + "updated_at": "2026-01-26T21:44:59.294Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/diffusion_edge_1.png", + "model_url": "https://fal.run/fal-ai/diffusion-edge", + "github_url": "https://github.com/GuHuangAI/DiffusionEdge", + "date": "2024-01-08T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/diffusion-edge", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/diffusion-edge queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/diffusion-edge", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/diffusion_edge_1.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/diffusion-edge", + "documentationUrl": "https://fal.ai/models/fal-ai/diffusion-edge/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "DiffusionEdgeInput": { + "title": "DiffusionEdgeInput", + "type": "object", + "properties": { + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/upscale/hamburger.png" + ], + "title": "Image Url", + "type": "string", + "description": "The text prompt you would like to convert to speech." + } + }, + "x-fal-order-properties": [ + "image_url" + ], + "required": [ + "image_url" + ] + }, + "DiffusionEdgeOutput": { + "title": "DiffusionEdgeOutput", + "type": "object", + "properties": { + "image": { + "title": "Image", + "description": "The generated image file info.", + "allOf": [ + { + "$ref": "#/components/schemas/Image" + } + ] + } + }, + "x-fal-order-properties": [ + "image" + ], + "required": [ + "image" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "examples": [ + "https://url.to/generated/file/z9RV14K95DvU.png" + ], + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "required": [ + "url", + "content_type", + "file_name", + "file_size" + ] + } + } + }, + "paths": { + "/fal-ai/diffusion-edge/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/diffusion-edge/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/diffusion-edge": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DiffusionEdgeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/diffusion-edge/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DiffusionEdgeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fooocus", + "metadata": { + "display_name": "Fooocus", + "category": "text-to-image", + "description": "Default parameters with automated optimizations and quality improvements.", + "status": "active", + "tags": [ + "stylized" + ], + "updated_at": "2026-01-26T21:45:00.358Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/fooocus/fal_ai_fooocus_cyberpunk-city.jpeg", + "model_url": "https://fal.run/fal-ai/fooocus", + "github_url": "https://github.com/lllyasviel/Fooocus/blob/main/LICENSE", + "date": "2023-11-16T00:00:00.000Z", + "group": { + "key": "fooocus", + "label": "Base" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fooocus", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fooocus queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fooocus", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/fooocus/fal_ai_fooocus_cyberpunk-city.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/fooocus", + "documentationUrl": "https://fal.ai/models/fal-ai/fooocus/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FooocusInput": { + "title": "FooocusLegacyInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "an astronaut in the jungle, cold color palette with butterflies in the background, highly detailed, 8k" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results.", + "default": "" + }, + "performance": { + "enum": [ + "Speed", + "Quality", + "Extreme Speed", + "Lightning" + ], + "title": "Performance", + "type": "string", + "description": "\n You can choose Speed or Quality\n ", + "default": "Extreme Speed" + }, + "styles": { + "title": "Styles", + "type": "array", + "description": "\n The style to use.\n ", + "uniqueItems": true, + "items": { + "enum": [ + "Fooocus V2", + "Fooocus Enhance", + "Fooocus Sharp", + "Fooocus Semi Realistic", + "Fooocus Masterpiece", + "Fooocus Photograph", + "Fooocus Negative", + "Fooocus Cinematic", + "SAI 3D Model", + "SAI Analog Film", + "SAI Anime", + "SAI Cinematic", + "SAI Comic Book", + "SAI Craft Clay", + "SAI Digital Art", + "SAI Enhance", + "SAI Fantasy Art", + "SAI Isometric", + "SAI Line Art", + "SAI Lowpoly", + "SAI Neonpunk", + "SAI Origami", + "SAI Photographic", + "SAI Pixel Art", + "SAI Texture", + "MRE Cinematic Dynamic", + "MRE Spontaneous Picture", + "MRE Artistic Vision", + "MRE Dark Dream", + "MRE Gloomy Art", + "MRE Bad Dream", + "MRE Underground", + "MRE Surreal Painting", + "MRE Dynamic Illustration", + "MRE Undead Art", + "MRE Elemental Art", + "MRE Space Art", + "MRE Ancient Illustration", + "MRE Brave Art", + "MRE Heroic Fantasy", + "MRE Dark Cyberpunk", + "MRE Lyrical Geometry", + "MRE Sumi E Symbolic", + "MRE Sumi E Detailed", + "MRE Manga", + "MRE Anime", + "MRE Comic", + "Ads Advertising", + "Ads Automotive", + "Ads Corporate", + "Ads Fashion Editorial", + "Ads Food Photography", + "Ads Gourmet Food Photography", + "Ads Luxury", + "Ads Real Estate", + "Ads Retail", + "Artstyle Abstract", + "Artstyle Abstract Expressionism", + "Artstyle Art Deco", + "Artstyle Art Nouveau", + "Artstyle Constructivist", + "Artstyle Cubist", + "Artstyle Expressionist", + "Artstyle Graffiti", + "Artstyle Hyperrealism", + "Artstyle Impressionist", + "Artstyle Pointillism", + "Artstyle Pop Art", + "Artstyle Psychedelic", + "Artstyle Renaissance", + "Artstyle Steampunk", + "Artstyle Surrealist", + "Artstyle Typography", + "Artstyle Watercolor", + "Futuristic Biomechanical", + "Futuristic Biomechanical Cyberpunk", + "Futuristic Cybernetic", + "Futuristic Cybernetic Robot", + "Futuristic Cyberpunk Cityscape", + "Futuristic Futuristic", + "Futuristic Retro Cyberpunk", + "Futuristic Retro Futurism", + "Futuristic Sci Fi", + "Futuristic Vaporwave", + "Game Bubble Bobble", + "Game Cyberpunk Game", + "Game Fighting Game", + "Game Gta", + "Game Mario", + "Game Minecraft", + "Game Pokemon", + "Game Retro Arcade", + "Game Retro Game", + "Game Rpg Fantasy Game", + "Game Strategy Game", + "Game Streetfighter", + "Game Zelda", + "Misc Architectural", + "Misc Disco", + "Misc Dreamscape", + "Misc Dystopian", + "Misc Fairy Tale", + "Misc Gothic", + "Misc Grunge", + "Misc Horror", + "Misc Kawaii", + "Misc Lovecraftian", + "Misc Macabre", + "Misc Manga", + "Misc Metropolis", + "Misc Minimalist", + "Misc Monochrome", + "Misc Nautical", + "Misc Space", + "Misc Stained Glass", + "Misc Techwear Fashion", + "Misc Tribal", + "Misc Zentangle", + "Papercraft Collage", + "Papercraft Flat Papercut", + "Papercraft Kirigami", + "Papercraft Paper Mache", + "Papercraft Paper Quilling", + "Papercraft Papercut Collage", + "Papercraft Papercut Shadow Box", + "Papercraft Stacked Papercut", + "Papercraft Thick Layered Papercut", + "Photo Alien", + "Photo Film Noir", + "Photo Glamour", + "Photo Hdr", + "Photo Iphone Photographic", + "Photo Long Exposure", + "Photo Neon Noir", + "Photo Silhouette", + "Photo Tilt Shift", + "Cinematic Diva", + "Abstract Expressionism", + "Academia", + "Action Figure", + "Adorable 3D Character", + "Adorable Kawaii", + "Art Deco", + "Art Nouveau", + "Astral Aura", + "Avant Garde", + "Baroque", + "Bauhaus Style Poster", + "Blueprint Schematic Drawing", + "Caricature", + "Cel Shaded Art", + "Character Design Sheet", + "Classicism Art", + "Color Field Painting", + "Colored Pencil Art", + "Conceptual Art", + "Constructivism", + "Cubism", + "Dadaism", + "Dark Fantasy", + "Dark Moody Atmosphere", + "Dmt Art Style", + "Doodle Art", + "Double Exposure", + "Dripping Paint Splatter Art", + "Expressionism", + "Faded Polaroid Photo", + "Fauvism", + "Flat 2d Art", + "Fortnite Art Style", + "Futurism", + "Glitchcore", + "Glo Fi", + "Googie Art Style", + "Graffiti Art", + "Harlem Renaissance Art", + "High Fashion", + "Idyllic", + "Impressionism", + "Infographic Drawing", + "Ink Dripping Drawing", + "Japanese Ink Drawing", + "Knolling Photography", + "Light Cheery Atmosphere", + "Logo Design", + "Luxurious Elegance", + "Macro Photography", + "Mandola Art", + "Marker Drawing", + "Medievalism", + "Minimalism", + "Neo Baroque", + "Neo Byzantine", + "Neo Futurism", + "Neo Impressionism", + "Neo Rococo", + "Neoclassicism", + "Op Art", + "Ornate And Intricate", + "Pencil Sketch Drawing", + "Pop Art 2", + "Rococo", + "Silhouette Art", + "Simple Vector Art", + "Sketchup", + "Steampunk 2", + "Surrealism", + "Suprematism", + "Terragen", + "Tranquil Relaxing Atmosphere", + "Sticker Designs", + "Vibrant Rim Light", + "Volumetric Lighting", + "Watercolor 2", + "Whimsical And Playful", + "Mk Chromolithography", + "Mk Cross Processing Print", + "Mk Dufaycolor Photograph", + "Mk Herbarium", + "Mk Punk Collage", + "Mk Mosaic", + "Mk Van Gogh", + "Mk Coloring Book", + "Mk Singer Sargent", + "Mk Pollock", + "Mk Basquiat", + "Mk Andy Warhol", + "Mk Halftone Print", + "Mk Gond Painting", + "Mk Albumen Print", + "Mk Aquatint Print", + "Mk Anthotype Print", + "Mk Inuit Carving", + "Mk Bromoil Print", + "Mk Calotype Print", + "Mk Color Sketchnote", + "Mk Cibulak Porcelain", + "Mk Alcohol Ink Art", + "Mk One Line Art", + "Mk Blacklight Paint", + "Mk Carnival Glass", + "Mk Cyanotype Print", + "Mk Cross Stitching", + "Mk Encaustic Paint", + "Mk Embroidery", + "Mk Gyotaku", + "Mk Luminogram", + "Mk Lite Brite Art", + "Mk Mokume Gane", + "Pebble Art", + "Mk Palekh", + "Mk Suminagashi", + "Mk Scrimshaw", + "Mk Shibori", + "Mk Vitreous Enamel", + "Mk Ukiyo E", + "Mk Vintage Airline Poster", + "Mk Vintage Travel Poster", + "Mk Bauhaus Style", + "Mk Afrofuturism", + "Mk Atompunk", + "Mk Constructivism", + "Mk Chicano Art", + "Mk De Stijl", + "Mk Dayak Art", + "Mk Fayum Portrait", + "Mk Illuminated Manuscript", + "Mk Kalighat Painting", + "Mk Madhubani Painting", + "Mk Pictorialism", + "Mk Pichwai Painting", + "Mk Patachitra Painting", + "Mk Samoan Art Inspired", + "Mk Tlingit Art", + "Mk Adnate Style", + "Mk Ron English Style", + "Mk Shepard Fairey Style" + ], + "type": "string" + }, + "default": [ + "Fooocus Enhance", + "Fooocus V2", + "Fooocus Sharp" + ] + }, + "control_type": { + "enum": [ + "ImagePrompt", + "PyraCanny", + "CPDS", + "FaceSwap" + ], + "title": "Control Type", + "type": "string", + "examples": [ + "ImagePrompt", + "PyraCanny", + "CPDS", + "FaceSwap" + ], + "description": "The type of image control", + "default": "PyraCanny" + }, + "mask_image_url": { + "title": "Mask Image Url", + "type": "string", + "description": "The image to use as a mask for the generated image.", + "nullable": true + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "\n The LoRAs to use for the image generation. You can use up to 5 LoRAs\n and they will be merged together to generate the final image.\n ", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [ + { + "path": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors", + "scale": 0.1 + } + ] + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to false, the safety checker will be disabled.", + "default": true + }, + "sharpness": { + "minimum": 0, + "title": "Sharpness", + "type": "number", + "maximum": 30, + "description": "\n The sharpness of the generated image. Use it to control how sharp the generated\n image should be. Higher value means image and texture are sharper.\n ", + "default": 2 + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "maximum": 30, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 4 + }, + "negative_prompt": { + "examples": [ + "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "inpaint_image_url": { + "title": "Inpaint Image Url", + "type": "string", + "description": "The image to use as a reference for inpainting.", + "nullable": true + }, + "mixing_image_prompt_and_inpaint": { + "title": "Mixing Image Prompt And Inpaint", + "type": "boolean", + "default": false + }, + "aspect_ratio": { + "title": "Aspect Ratio", + "type": "string", + "description": "\n The size of the generated image. You can choose between some presets or\n custom height and width that **must be multiples of 8**.\n ", + "default": "1024x1024" + }, + "num_images": { + "minimum": 1, + "title": "Num Images", + "type": "integer", + "maximum": 4, + "description": "\n Number of images to generate in one request\n ", + "default": 1 + }, + "output_format": { + "enum": [ + "png", + "jpeg", + "webp" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the generated image.", + "default": "jpeg" + }, + "refiner_model": { + "enum": [ + "None", + "realisticVisionV60B1_v51VAE.safetensors" + ], + "title": "Refiner Model", + "type": "string", + "description": "Refiner (SDXL or SD 1.5)", + "default": "None" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ", + "default": false + }, + "control_image_url": { + "title": "Control Image Url", + "type": "string", + "description": "The image to use as a reference for the generated image.", + "nullable": true + }, + "seed": { + "examples": [ + 176400 + ], + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ", + "nullable": true + }, + "refiner_switch": { + "description": "\n Use 0.4 for SD1.5 realistic models; 0.667 for SD1.5 anime models\n 0.8 for XL-refiners; or any value for switching two SDXL models.\n ", + "type": "number", + "minimum": 0, + "title": "Refiner Switch At", + "maximum": 1, + "multipleOf": 0.0001, + "default": 0.8 + }, + "control_image_weight": { + "minimum": 0, + "title": "Control Image Weight", + "type": "number", + "maximum": 2, + "description": "\n The strength of the control image. Use it to control how much the generated image\n should look like the control image.\n ", + "default": 1 + }, + "control_image_stop_at": { + "minimum": 0, + "title": "Control Image Stop At", + "type": "number", + "maximum": 1, + "description": "\n The stop at value of the control image. Use it to control how much the generated image\n should look like the control image.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "styles", + "performance", + "guidance_scale", + "sharpness", + "aspect_ratio", + "num_images", + "loras", + "refiner_model", + "refiner_switch", + "output_format", + "sync_mode", + "seed", + "control_image_url", + "control_type", + "control_image_weight", + "control_image_stop_at", + "inpaint_image_url", + "mask_image_url", + "mixing_image_prompt_and_inpaint", + "enable_safety_checker" + ] + }, + "FooocusOutput": { + "title": "FooocusOutput", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "The generated image file info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + }, + "description": "The time taken for the generation process." + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + } + }, + "x-fal-order-properties": [ + "images", + "timings", + "has_nsfw_concepts" + ], + "required": [ + "images", + "timings", + "has_nsfw_concepts" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "examples": [ + "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors" + ], + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "maximum": 1, + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 0.1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "Image": { + "title": "Image", + "type": "object", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "description": "Represents an image file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/fooocus/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fooocus/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fooocus": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FooocusInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fooocus/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FooocusOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/lora", + "metadata": { + "display_name": "Stable Diffusion with LoRAs", + "category": "text-to-image", + "description": "Run Any Stable Diffusion model with customizable LoRA weights.", + "status": "active", + "tags": [ + "diffusion", + "lora", + "customization" + ], + "updated_at": "2026-01-26T21:45:00.984Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/sd-loras.jpeg", + "model_url": "https://fal.run/fal-ai/lora", + "github_url": "https://huggingface.co/spaces/CompVis/stable-diffusion-license", + "date": "2023-09-26T00:00:00.000Z", + "group": { + "key": "sd-loras", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/lora", + "category": "text-to-image", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/sd-loras.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LoraInput": { + "title": "TextToImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Photo of a european medieval 40 year old queen, silver hair, highly detailed face, detailed eyes, head shot, intricate crown, age spots, wrinkles", + "Photo of a classic red mustang car parked in las vegas strip at night" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "image_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Image Size", + "description": "\n The size of the generated image. You can choose between some presets or custom height and width\n that **must be multiples of 8**.\n ", + "default": "square_hd" + }, + "tile_height": { + "minimum": 128, + "maximum": 4096, + "type": "integer", + "title": "Tile Height", + "description": "The size of the tiles to be used for the image generation.", + "default": 4096 + }, + "embeddings": { + "title": "Embeddings", + "type": "array", + "description": "\n The embeddings to use for the image generation. Only a single embedding is supported at the moment.\n The embeddings will be used to map the tokens in the prompt to the embedding weights.\n ", + "items": { + "$ref": "#/components/schemas/Embedding" + }, + "default": [] + }, + "ic_light_model_url": { + "title": "Ic Light Model Url", + "type": "string", + "description": "\n The URL of the IC Light model to use for the image generation.\n " + }, + "image_encoder_weight_name": { + "examples": [ + "pytorch_model.bin" + ], + "title": "Image Encoder Weight Name", + "type": "string", + "description": "\n The weight name of the image encoder model to use for the image generation.\n ", + "default": "pytorch_model.bin" + }, + "ip_adapter": { + "title": "Ip Adapter", + "type": "array", + "description": "\n The IP adapter to use for the image generation.\n ", + "items": { + "$ref": "#/components/schemas/IPAdapter" + }, + "default": [] + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [] + }, + "scheduler": { + "enum": [ + "DPM++ 2M", + "DPM++ 2M Karras", + "DPM++ 2M SDE", + "DPM++ 2M SDE Karras", + "Euler", + "Euler A", + "Euler (trailing timesteps)", + "LCM", + "LCM (trailing timesteps)", + "DDIM", + "TCD" + ], + "title": "Scheduler", + "type": "string", + "description": "Scheduler / sampler to use for the image denoising process." + }, + "sigmas": { + "default": { + "method": "default", + "array": [] + }, + "title": "Sigmas", + "description": "\n Optionally override the sigmas to use for the denoising process. Only works with schedulers which support the `sigmas` argument in their `set_sigmas` method.\n Defaults to not overriding, in which case the scheduler automatically sets the sigmas based on the `num_inference_steps` parameter.\n If set to a custom sigma schedule, the `num_inference_steps` parameter will be ignored. Cannot be set if `timesteps` is set.\n ", + "allOf": [ + { + "$ref": "#/components/schemas/SigmasInput" + } + ] + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 7.5 + }, + "tile_stride_width": { + "minimum": 64, + "maximum": 2048, + "type": "integer", + "title": "Tile Stride Width", + "description": "The stride of the tiles to be used for the image generation.", + "default": 2048 + }, + "debug_per_pass_latents": { + "title": "Debug Per Pass Latents", + "type": "boolean", + "description": "If set to true, the latents will be saved for debugging per pass.", + "default": false + }, + "timesteps": { + "default": { + "method": "default", + "array": [] + }, + "title": "Timesteps", + "description": "\n Optionally override the timesteps to use for the denoising process. Only works with schedulers which support the `timesteps` argument in their `set_timesteps` method.\n Defaults to not overriding, in which case the scheduler automatically sets the timesteps based on the `num_inference_steps` parameter.\n If set to a custom timestep schedule, the `num_inference_steps` parameter will be ignored. Cannot be set if `sigmas` is set.\n ", + "allOf": [ + { + "$ref": "#/components/schemas/TimestepsInput" + } + ] + }, + "image_encoder_subfolder": { + "examples": [], + "title": "Image Encoder Subfolder", + "type": "string", + "description": "\n The subfolder of the image encoder model to use for the image generation.\n " + }, + "prompt_weighting": { + "examples": [ + true + ], + "title": "Prompt Weighting", + "type": "boolean", + "description": "\n If set to true, the prompt weighting syntax will be used.\n Additionally, this will lift the 77 token limit by averaging embeddings.\n ", + "default": false + }, + "variant": { + "title": "Variant", + "type": "string", + "description": "The variant of the model to use for huggingface models, e.g. 'fp16'." + }, + "model_name": { + "examples": [ + "stabilityai/stable-diffusion-xl-base-1.0", + "runwayml/stable-diffusion-v1-5", + "SG161222/Realistic_Vision_V2.0" + ], + "title": "Model Name", + "type": "string", + "description": "URL or HuggingFace ID of the base model to generate the image." + }, + "controlnet_guess_mode": { + "title": "Controlnet Guess Mode", + "type": "boolean", + "description": "\n If set to true, the controlnet will be applied to only the conditional predictions.\n ", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "ic_light_model_background_image_url": { + "title": "Ic Light Model Background Image Url", + "type": "string", + "description": "\n The URL of the IC Light model background image to use for the image generation.\n Make sure to use a background compatible with the model.\n " + }, + "rescale_betas_snr_zero": { + "title": "Rescale Betas Snr Zero", + "type": "boolean", + "description": "\n Whether to set the rescale_betas_snr_zero option or not for the sampler\n ", + "default": false + }, + "tile_width": { + "minimum": 128, + "maximum": 4096, + "type": "integer", + "title": "Tile Width", + "description": "The size of the tiles to be used for the image generation.", + "default": 4096 + }, + "prediction_type": { + "enum": [ + "v_prediction", + "epsilon" + ], + "title": "Prediction Type", + "type": "string", + "description": "\n The type of prediction to use for the image generation.\n The `epsilon` is the default.\n ", + "default": "epsilon" + }, + "eta": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Eta", + "description": "The eta value to be used for the image generation.", + "default": 0 + }, + "image_encoder_path": { + "title": "Image Encoder Path", + "type": "string", + "description": "\n The path to the image encoder model to use for the image generation.\n " + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "negative_prompt": { + "examples": [ + "cartoon, painting, illustration, worst quality, low quality, normal quality" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "" + }, + "image_format": { + "enum": [ + "jpeg", + "png" + ], + "title": "Image Format", + "type": "string", + "description": "The format of the generated image.", + "examples": [ + "jpeg" + ], + "default": "png" + }, + "num_images": { + "minimum": 1, + "maximum": 8, + "type": "integer", + "title": "Number of images", + "description": "\n Number of images to generate in one request. Note that the higher the batch size,\n the longer it will take to generate the images.\n ", + "default": 1 + }, + "debug_latents": { + "title": "Debug Latents", + "type": "boolean", + "description": "If set to true, the latents will be saved for debugging.", + "default": false + }, + "ic_light_image_url": { + "title": "Ic Light Image Url", + "type": "string", + "description": "\n The URL of the IC Light model image to use for the image generation.\n " + }, + "unet_name": { + "title": "Unet Name", + "type": "string", + "description": "URL or HuggingFace ID of the custom U-Net model to use for the image generation." + }, + "clip_skip": { + "minimum": 0, + "maximum": 2, + "type": "integer", + "title": "Clip Skip", + "description": "\n Skips part of the image generation process, leading to slightly different results.\n This means the image renders faster, too.\n ", + "default": 0 + }, + "tile_stride_height": { + "minimum": 64, + "maximum": 2048, + "type": "integer", + "title": "Tile Stride Height", + "description": "The stride of the tiles to be used for the image generation.", + "default": 2048 + }, + "controlnets": { + "title": "Controlnets", + "type": "array", + "description": "\n The control nets to use for the image generation. You can use any number of control nets\n and they will be applied to the image at the specified timesteps.\n ", + "items": { + "$ref": "#/components/schemas/ControlNet" + }, + "default": [] + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 150, + "type": "integer", + "title": "Number of inference steps", + "description": "\n Increasing the amount of steps tells Stable Diffusion that it should take more steps\n to generate your final result which can increase the amount of detail in your image.\n ", + "default": 30 + } + }, + "x-fal-order-properties": [ + "model_name", + "unet_name", + "variant", + "prompt", + "negative_prompt", + "prompt_weighting", + "loras", + "embeddings", + "controlnets", + "controlnet_guess_mode", + "ip_adapter", + "image_encoder_path", + "image_encoder_subfolder", + "image_encoder_weight_name", + "ic_light_model_url", + "ic_light_model_background_image_url", + "ic_light_image_url", + "seed", + "image_size", + "num_inference_steps", + "guidance_scale", + "clip_skip", + "scheduler", + "timesteps", + "sigmas", + "prediction_type", + "rescale_betas_snr_zero", + "image_format", + "num_images", + "enable_safety_checker", + "tile_width", + "tile_height", + "tile_stride_width", + "tile_stride_height", + "eta", + "debug_latents", + "debug_per_pass_latents" + ], + "required": [ + "model_name", + "prompt" + ] + }, + "LoraOutput": { + "title": "OutputParameters", + "type": "object", + "properties": { + "images": { + "title": "Images", + "type": "array", + "description": "The generated image files info.", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "debug_latents": { + "title": "Debug Latents", + "description": "The latents saved for debugging.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + }, + "has_nsfw_concepts": { + "title": "Has Nsfw Concepts", + "type": "array", + "description": "Whether the generated images contain NSFW concepts.", + "items": { + "type": "boolean" + } + }, + "debug_per_pass_latents": { + "title": "Debug Per Pass Latents", + "description": "The latents saved for debugging per pass.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "images", + "seed", + "has_nsfw_concepts", + "debug_latents", + "debug_per_pass_latents" + ], + "required": [ + "images", + "seed", + "has_nsfw_concepts" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "Embedding": { + "title": "Embedding", + "type": "object", + "properties": { + "tokens": { + "title": "Tokens", + "type": "array", + "description": "\n The tokens to map the embedding weights to. Use these tokens in your prompts.\n ", + "items": { + "type": "string" + }, + "default": [ + "", + "" + ] + }, + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the embedding weights." + } + }, + "x-fal-order-properties": [ + "path", + "tokens" + ], + "required": [ + "path" + ] + }, + "IPAdapter": { + "title": "IPAdapter", + "type": "object", + "properties": { + "unconditional_noising_factor": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Unconditional Noising Factor", + "description": "The factor to apply to the unconditional noising of the IP adapter.", + "default": 0 + }, + "ip_adapter_image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + } + ], + "title": "Ip Adapter Image Url", + "description": "URL of the image to be used as the IP adapter." + }, + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the IP adapter weights." + }, + "image_projection_shortcut": { + "title": "Image Projection Shortcut", + "type": "boolean", + "description": "\n The value to set the image projection shortcut to. For FaceID plus V1 models,\n this should be set to False. For FaceID plus V2 models, this should be set to True.\n Default is True.\n ", + "default": true + }, + "scale_json": { + "title": "Scale Json", + "type": "object", + "description": "\n The scale of the IP adapter weight. This is used to scale the IP adapter weight\n before merging it with the base model.\n " + }, + "ip_adapter_mask_url": { + "title": "Ip Adapter Mask Url", + "type": "string", + "description": "\n The mask to use for the IP adapter. When using a mask, the ip-adapter image size and the mask size must be the same\n " + }, + "model_subfolder": { + "title": "Model Subfolder", + "type": "string", + "description": "Subfolder in the model directory where the IP adapter weights are stored." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "description": "\n The scale of the IP adapter weight. This is used to scale the IP adapter weight\n before merging it with the base model.\n ", + "default": 1 + }, + "insight_face_model_path": { + "title": "Insight Face Model Path", + "type": "string", + "description": "URL or the path to the InsightFace model weights." + }, + "weight_name": { + "title": "Weight Name", + "type": "string", + "description": "Name of the weight file." + } + }, + "x-fal-order-properties": [ + "ip_adapter_image_url", + "ip_adapter_mask_url", + "path", + "model_subfolder", + "weight_name", + "insight_face_model_path", + "scale", + "scale_json", + "unconditional_noising_factor", + "image_projection_shortcut" + ], + "required": [ + "ip_adapter_image_url", + "path" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "SigmasInput": { + "title": "SigmasInput", + "type": "object", + "properties": { + "method": { + "enum": [ + "default", + "array" + ], + "title": "Method", + "type": "string", + "description": "\n The method to use for the sigmas. If set to 'custom', the sigmas will be set based\n on the provided sigmas schedule in the `array` field.\n Defaults to 'default' which means the scheduler will use the sigmas of the scheduler.\n ", + "default": "default" + }, + "array": { + "title": "Array", + "type": "array", + "description": "\n Sigmas schedule to be used if 'custom' method is selected.\n ", + "items": { + "type": "number" + }, + "default": [] + } + }, + "x-fal-order-properties": [ + "method", + "array" + ] + }, + "TimestepsInput": { + "title": "TimestepsInput", + "type": "object", + "properties": { + "method": { + "enum": [ + "default", + "array" + ], + "title": "Method", + "type": "string", + "description": "\n The method to use for the timesteps. If set to 'array', the timesteps will be set based\n on the provided timesteps schedule in the `array` field.\n Defaults to 'default' which means the scheduler will use the `num_inference_steps` parameter.\n ", + "default": "default" + }, + "array": { + "title": "Array", + "type": "array", + "description": "\n Timesteps schedule to be used if 'custom' method is selected.\n ", + "items": { + "type": "integer" + }, + "default": [] + } + }, + "x-fal-order-properties": [ + "method", + "array" + ] + }, + "ControlNet": { + "title": "ControlNet", + "type": "object", + "properties": { + "conditioning_scale": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Conditioning Scale", + "description": "\n The scale of the control net weight. This is used to scale the control net weight\n before merging it with the base model.\n ", + "default": 1 + }, + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the control net weights." + }, + "ip_adapter_index": { + "title": "Ip Adapter Index", + "type": "integer", + "description": "\n The index of the IP adapter to be applied to the controlnet. This is only needed for InstantID ControlNets.\n " + }, + "end_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "End Percentage", + "description": "\n The percentage of the image to end applying the controlnet in terms of the total timesteps.\n ", + "default": 1 + }, + "config_url": { + "title": "Config Url", + "type": "string", + "description": "optional URL to the controlnet config.json file." + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "URL of the image to be used as the control net." + }, + "variant": { + "title": "Variant", + "type": "string", + "description": "The optional variant if a Hugging Face repo key is used." + }, + "mask_url": { + "title": "Mask Url", + "type": "string", + "description": "\n The mask to use for the controlnet. When using a mask, the control image size and the mask size must be the same and divisible by 32.\n " + }, + "start_percentage": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Start Percentage", + "description": "\n The percentage of the image to start applying the controlnet in terms of the total timesteps.\n ", + "default": 0 + } + }, + "x-fal-order-properties": [ + "path", + "config_url", + "variant", + "image_url", + "mask_url", + "conditioning_scale", + "start_percentage", + "end_percentage", + "ip_adapter_index" + ], + "required": [ + "path", + "image_url" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "title": "Image", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.text-to-json.json b/packages/typescript/ai-fal/json/fal.models.text-to-json.json new file mode 100644 index 00000000..bb25ac29 --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.text-to-json.json @@ -0,0 +1,2672 @@ +{ + "generated_at": "2026-01-28T02:51:51.868Z", + "total_models": 4, + "category": "text-to-json", + "models": [ + { + "endpoint_id": "bria/fibo-edit/edit/structured_instruction", + "metadata": { + "display_name": "Fibo Edit [Structured Instruction]", + "category": "text-to-json", + "description": "Structured Instructions Generation endpoint for Fibo Edit, Bria's newest editing model.", + "status": "active", + "tags": [ + "structured-prompt-generation", + "fibo-edit", + "json" + ], + "updated_at": "2026-01-26T21:41:28.558Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b2483/UZdf2yfDnuLfTMdQeTNzT_328d7729251342ac8fbc5c8721feb5c5.jpg", + "model_url": "https://fal.run/bria/fibo-edit/edit/structured_instruction", + "license_type": "commercial", + "date": "2026-01-20T11:55:38.689Z", + "group": { + "key": "fibo-edit", + "label": "Structured Instruction" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/fibo-edit/edit/structured_instruction", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/fibo-edit/edit/structured_instruction queue.", + "x-fal-metadata": { + "endpointId": "bria/fibo-edit/edit/structured_instruction", + "category": "text-to-json", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b2483/UZdf2yfDnuLfTMdQeTNzT_328d7729251342ac8fbc5c8721feb5c5.jpg", + "playgroundUrl": "https://fal.ai/models/bria/fibo-edit/edit/structured_instruction", + "documentationUrl": "https://fal.ai/models/bria/fibo-edit/edit/structured_instruction/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FiboEditEditStructured_instructionInput": { + "x-fal-order-properties": [ + "image_url", + "mask_url", + "instruction", + "seed", + "sync_mode" + ], + "type": "object", + "properties": { + "sync_mode": { + "description": "If true, returns the image directly in the response (increases latency).", + "type": "boolean", + "title": "Sync Mode", + "default": false + }, + "seed": { + "description": "Random seed for reproducibility.", + "type": "integer", + "title": "Seed", + "default": 5555 + }, + "mask_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Reference image mask (file or URL). Optional.", + "title": "Mask Url" + }, + "instruction": { + "examples": [ + "change lighting to starlight nighttime" + ], + "description": "Instruction for image editing.", + "title": "Instruction", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a8b07e8/GYKVk2EVivg_MC3jRRZi3_png%20-%202026-01-13T094835.850%20(3).png" + ], + "description": "Reference image (file or URL).", + "title": "Image Url", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + } + }, + "title": "StructuredInstructionInputModel" + }, + "FiboEditEditStructured_instructionOutput": {} + } + }, + "paths": { + "/bria/fibo-edit/edit/structured_instruction/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/edit/structured_instruction/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/fibo-edit/edit/structured_instruction": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditEditStructured_instructionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-edit/edit/structured_instruction/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboEditEditStructured_instructionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/fibo-lite/generate/structured_prompt", + "metadata": { + "display_name": "Fibo Lite", + "category": "text-to-json", + "description": "Structured Prompt Generation endpoint for Fibo-Lite, Bria's SOTA Open source model", + "status": "active", + "tags": [ + "bria", + "fibo", + "structured-prompt" + ], + "updated_at": "2026-01-26T21:41:31.684Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b00e9/MkX4-gN9W49Ijknjs_Qzh_ef502877702747e39edfab1d2fe5a86f.jpg", + "model_url": "https://fal.run/bria/fibo-lite/generate/structured_prompt", + "license_type": "commercial", + "date": "2026-01-19T10:37:06.298Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/fibo-lite/generate/structured_prompt", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/fibo-lite/generate/structured_prompt queue.", + "x-fal-metadata": { + "endpointId": "bria/fibo-lite/generate/structured_prompt", + "category": "text-to-json", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b00e9/MkX4-gN9W49Ijknjs_Qzh_ef502877702747e39edfab1d2fe5a86f.jpg", + "playgroundUrl": "https://fal.ai/models/bria/fibo-lite/generate/structured_prompt", + "documentationUrl": "https://fal.ai/models/bria/fibo-lite/generate/structured_prompt/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FiboLiteGenerateStructured_promptInput": { + "title": "StructuredPromptModel", + "type": "object", + "properties": { + "prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Prompt for image generation.", + "title": "Prompt", + "examples": [ + "A hyper-detailed, ultra-fluffy owl sitting in the trees at night, looking directly at the camera with wide, adorable, expressive eyes. Its feathers are soft and voluminous, catching the cool moonlight with subtle silver highlights. The owl’s gaze is curious and full of charm, giving it a whimsical, storybook-like personality." + ] + }, + "seed": { + "description": "Random seed for reproducibility.", + "type": "integer", + "title": "Seed", + "default": 5555 + }, + "structured_prompt": { + "anyOf": [ + { + "$ref": "#/components/schemas/StructuredPrompt" + }, + { + "type": "null" + } + ], + "description": "The structured prompt to generate an image from." + }, + "image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Reference image (file or URL).", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "prompt", + "structured_prompt", + "image_url", + "seed" + ] + }, + "FiboLiteGenerateStructured_promptOutput": {}, + "StructuredPrompt": { + "title": "StructuredPrompt", + "type": "object", + "properties": { + "background_setting": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The background setting of the image to be generated.", + "title": "Background Setting" + }, + "artistic_style": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The artistic style of the image to be generated.", + "title": "Artistic Style" + }, + "context": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The context of the image to be generated.", + "title": "Context" + }, + "text_render": { + "anyOf": [ + { + "type": "array", + "items": {} + }, + { + "type": "null" + } + ], + "description": "A list of text to be rendered in the image.", + "title": "Text Render", + "default": [] + }, + "objects": { + "anyOf": [ + { + "type": "array", + "items": { + "$ref": "#/components/schemas/PromptObject" + } + }, + { + "type": "null" + } + ], + "description": "A list of objects in the image to be generated, along with their attributes and relationships to other objects in the image.", + "title": "Objects", + "default": [] + }, + "style_medium": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The style medium of the image to be generated.", + "title": "Style Medium" + }, + "photographic_characteristics": { + "anyOf": [ + { + "$ref": "#/components/schemas/PhotographicCharacteristics" + }, + { + "type": "null" + } + ], + "description": "The photographic characteristics of the image to be generated." + }, + "aesthetics": { + "anyOf": [ + { + "$ref": "#/components/schemas/Aesthetics" + }, + { + "type": "null" + } + ], + "description": "The aesthetics of the image to be generated." + }, + "lighting": { + "anyOf": [ + { + "$ref": "#/components/schemas/Lighting" + }, + { + "type": "null" + } + ], + "description": "The lighting of the image to be generated." + }, + "short_description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "A short description of the image to be generated.", + "title": "Short Description" + } + }, + "x-fal-order-properties": [ + "short_description", + "objects", + "background_setting", + "lighting", + "aesthetics", + "photographic_characteristics", + "style_medium", + "text_render", + "context", + "artistic_style" + ] + }, + "PromptObject": { + "title": "PromptObject", + "type": "object", + "properties": { + "relative_size": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The relative size of the object in the image.", + "title": "Relative Size" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "A description of the object to be generated.", + "title": "Description" + }, + "skin_tone_and_texture": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The skin tone and texture of the object in the image.", + "title": "Skin Tone And Texture" + }, + "appearance_details": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The appearance details of the object.", + "title": "Appearance Details" + }, + "number_of_objects": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of objects in the image.", + "title": "Number Of Objects" + }, + "pose": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The pose of the object in the image.", + "title": "Pose" + }, + "expression": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The expression of the object in the image.", + "title": "Expression" + }, + "shape_and_color": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The shape and color of the object.", + "title": "Shape And Color" + }, + "relationship": { + "description": "The relationship of the object to other objects in the image.", + "type": "string", + "title": "Relationship" + }, + "texture": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The texture of the object.", + "title": "Texture" + }, + "gender": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The gender of the object in the image.", + "title": "Gender" + }, + "clothing": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The clothing of the object in the image.", + "title": "Clothing" + }, + "location": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The location of the object in the image.", + "title": "Location" + }, + "orientation": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The orientation of the object in the image.", + "title": "Orientation" + }, + "action": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The action of the object in the image.", + "title": "Action" + } + }, + "x-fal-order-properties": [ + "description", + "location", + "relationship", + "relative_size", + "shape_and_color", + "texture", + "appearance_details", + "number_of_objects", + "pose", + "expression", + "clothing", + "action", + "gender", + "skin_tone_and_texture", + "orientation" + ], + "required": [ + "relationship" + ] + }, + "PhotographicCharacteristics": { + "title": "PhotographicCharacteristics", + "type": "object", + "properties": { + "focus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The focus in the image to be generated.", + "title": "Focus" + }, + "lens_focal_length": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The focal length of the lens in the image to be generated.", + "title": "Lens Focal Length" + }, + "camera_angle": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The angle of the camera in the image to be generated.", + "title": "Camera Angle" + }, + "depth_of_field": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The depth of field in the image to be generated.", + "title": "Depth Of Field" + } + }, + "x-fal-order-properties": [ + "depth_of_field", + "focus", + "camera_angle", + "lens_focal_length" + ] + }, + "Aesthetics": { + "title": "Aesthetics", + "type": "object", + "properties": { + "composition": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The composition of the image to be generated.", + "title": "Composition" + }, + "mood_atmosphere": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mood and atmosphere of the image to be generated.", + "title": "Mood Atmosphere" + }, + "color_scheme": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The color scheme of the image to be generated.", + "title": "Color Scheme" + } + }, + "x-fal-order-properties": [ + "composition", + "color_scheme", + "mood_atmosphere" + ] + }, + "Lighting": { + "title": "Lighting", + "type": "object", + "properties": { + "shadows": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The shadows in the image to be generated.", + "title": "Shadows" + }, + "conditions": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The conditions of the lighting in the image to be generated.", + "title": "Conditions" + }, + "direction": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The direction of the lighting in the image to be generated.", + "title": "Direction" + } + }, + "x-fal-order-properties": [ + "conditions", + "direction", + "shadows" + ] + } + } + }, + "paths": { + "/bria/fibo-lite/generate/structured_prompt/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-lite/generate/structured_prompt/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/fibo-lite/generate/structured_prompt": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboLiteGenerateStructured_promptInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-lite/generate/structured_prompt/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboLiteGenerateStructured_promptOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/fibo-lite/generate/structured_prompt/lite", + "metadata": { + "display_name": "Fibo Lite", + "category": "text-to-json", + "description": "Structured Prompt Generation endpoint for Fibo-Lite, Bria's SOTA Open source model", + "status": "active", + "tags": [ + "bria", + "structured-prompting", + "" + ], + "updated_at": "2026-01-26T21:41:31.810Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8affc0/46xH4G2e60X5umkgM0rrW_da15955ad9b04677b5f071d149bf4d0b.jpg", + "model_url": "https://fal.run/bria/fibo-lite/generate/structured_prompt/lite", + "license_type": "commercial", + "date": "2026-01-19T10:34:58.616Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/fibo-lite/generate/structured_prompt/lite", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/fibo-lite/generate/structured_prompt/lite queue.", + "x-fal-metadata": { + "endpointId": "bria/fibo-lite/generate/structured_prompt/lite", + "category": "text-to-json", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8affc0/46xH4G2e60X5umkgM0rrW_da15955ad9b04677b5f071d149bf4d0b.jpg", + "playgroundUrl": "https://fal.ai/models/bria/fibo-lite/generate/structured_prompt/lite", + "documentationUrl": "https://fal.ai/models/bria/fibo-lite/generate/structured_prompt/lite/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FiboLiteGenerateStructured_promptLiteInput": { + "title": "StructuredPromptModel", + "type": "object", + "properties": { + "prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Prompt", + "description": "Prompt for image generation.", + "examples": [ + "A hyper-detailed, ultra-fluffy owl sitting in the trees at night, looking directly at the camera with wide, adorable, expressive eyes. Its feathers are soft and voluminous, catching the cool moonlight with subtle silver highlights. The owl's gaze is curious and full of charm, giving it a whimsical, storybook-like personality." + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility.", + "default": 5555 + }, + "structured_prompt": { + "anyOf": [ + { + "$ref": "#/components/schemas/bria_fibo-vlm_StructuredPrompt" + }, + { + "type": "null" + } + ], + "description": "The structured prompt to generate an image from." + }, + "image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Url", + "description": "Reference image (file or URL)." + } + }, + "x-fal-order-properties": [ + "prompt", + "structured_prompt", + "image_url", + "seed" + ] + }, + "FiboLiteGenerateStructured_promptLiteOutput": {}, + "bria_fibo-vlm_StructuredPrompt": { + "title": "StructuredPrompt", + "type": "object", + "properties": { + "background_setting": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Background Setting", + "description": "The background setting of the image to be generated." + }, + "artistic_style": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Artistic Style", + "description": "The artistic style of the image to be generated." + }, + "style_medium": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Style Medium", + "description": "The style medium of the image to be generated." + }, + "text_render": { + "anyOf": [ + { + "type": "array", + "items": {} + }, + { + "type": "null" + } + ], + "title": "Text Render", + "description": "A list of text to be rendered in the image.", + "default": [] + }, + "objects": { + "anyOf": [ + { + "type": "array", + "items": { + "$ref": "#/components/schemas/PromptObject" + } + }, + { + "type": "null" + } + ], + "title": "Objects", + "description": "A list of objects in the image to be generated, along with their attributes and relationships to other objects in the image.", + "default": [] + }, + "context": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Context", + "description": "The context of the image to be generated." + }, + "photographic_characteristics": { + "anyOf": [ + { + "$ref": "#/components/schemas/PhotographicCharacteristics" + }, + { + "type": "null" + } + ], + "description": "The photographic characteristics of the image to be generated." + }, + "aesthetics": { + "anyOf": [ + { + "$ref": "#/components/schemas/bria_fibo-vlm_Aesthetics" + }, + { + "type": "null" + } + ], + "description": "The aesthetics of the image to be generated." + }, + "lighting": { + "anyOf": [ + { + "$ref": "#/components/schemas/Lighting" + }, + { + "type": "null" + } + ], + "description": "The lighting of the image to be generated." + }, + "short_description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Short Description", + "description": "A short description of the image to be generated." + } + }, + "x-fal-order-properties": [ + "short_description", + "objects", + "background_setting", + "lighting", + "aesthetics", + "photographic_characteristics", + "style_medium", + "text_render", + "context", + "artistic_style" + ] + }, + "PromptObject": { + "title": "PromptObject", + "type": "object", + "properties": { + "relative_size": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The relative size of the object in the image.", + "title": "Relative Size" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "A description of the object to be generated.", + "title": "Description" + }, + "skin_tone_and_texture": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The skin tone and texture of the object in the image.", + "title": "Skin Tone And Texture" + }, + "appearance_details": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The appearance details of the object.", + "title": "Appearance Details" + }, + "number_of_objects": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of objects in the image.", + "title": "Number Of Objects" + }, + "pose": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The pose of the object in the image.", + "title": "Pose" + }, + "expression": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The expression of the object in the image.", + "title": "Expression" + }, + "shape_and_color": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The shape and color of the object.", + "title": "Shape And Color" + }, + "relationship": { + "description": "The relationship of the object to other objects in the image.", + "type": "string", + "title": "Relationship" + }, + "texture": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The texture of the object.", + "title": "Texture" + }, + "gender": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The gender of the object in the image.", + "title": "Gender" + }, + "clothing": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The clothing of the object in the image.", + "title": "Clothing" + }, + "location": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The location of the object in the image.", + "title": "Location" + }, + "orientation": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The orientation of the object in the image.", + "title": "Orientation" + }, + "action": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The action of the object in the image.", + "title": "Action" + } + }, + "x-fal-order-properties": [ + "description", + "location", + "relationship", + "relative_size", + "shape_and_color", + "texture", + "appearance_details", + "number_of_objects", + "pose", + "expression", + "clothing", + "action", + "gender", + "skin_tone_and_texture", + "orientation" + ], + "required": [ + "relationship" + ] + }, + "PhotographicCharacteristics": { + "title": "PhotographicCharacteristics", + "type": "object", + "properties": { + "focus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The focus in the image to be generated.", + "title": "Focus" + }, + "lens_focal_length": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The focal length of the lens in the image to be generated.", + "title": "Lens Focal Length" + }, + "camera_angle": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The angle of the camera in the image to be generated.", + "title": "Camera Angle" + }, + "depth_of_field": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The depth of field in the image to be generated.", + "title": "Depth Of Field" + } + }, + "x-fal-order-properties": [ + "depth_of_field", + "focus", + "camera_angle", + "lens_focal_length" + ] + }, + "bria_fibo-vlm_Aesthetics": { + "title": "Aesthetics", + "type": "object", + "properties": { + "preference_score": { + "title": "Preference Score", + "type": "string", + "description": "The preference score of the image." + }, + "composition": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Composition", + "description": "The composition of the image to be generated." + }, + "mood_atmosphere": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Mood Atmosphere", + "description": "The mood and atmosphere of the image to be generated." + }, + "aesthetic_score": { + "title": "Aesthetic Score", + "type": "string", + "description": "The aesthetic score of the image." + }, + "color_scheme": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Color Scheme", + "description": "The color scheme of the image to be generated." + } + }, + "x-fal-order-properties": [ + "composition", + "color_scheme", + "mood_atmosphere", + "aesthetic_score", + "preference_score" + ], + "required": [ + "aesthetic_score", + "preference_score" + ] + }, + "Lighting": { + "title": "Lighting", + "type": "object", + "properties": { + "shadows": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The shadows in the image to be generated.", + "title": "Shadows" + }, + "conditions": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The conditions of the lighting in the image to be generated.", + "title": "Conditions" + }, + "direction": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The direction of the lighting in the image to be generated.", + "title": "Direction" + } + }, + "x-fal-order-properties": [ + "conditions", + "direction", + "shadows" + ] + } + } + }, + "paths": { + "/bria/fibo-lite/generate/structured_prompt/lite/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-lite/generate/structured_prompt/lite/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/fibo-lite/generate/structured_prompt/lite": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboLiteGenerateStructured_promptLiteInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo-lite/generate/structured_prompt/lite/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboLiteGenerateStructured_promptLiteOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/fibo/generate/structured_prompt", + "metadata": { + "display_name": "Fibo", + "category": "text-to-json", + "description": "Structured Prompt Generation endpoint for Fibo, Bria's SOTA Open source model", + "status": "active", + "tags": [ + "bria", + "fibo", + "structured-prompting" + ], + "updated_at": "2026-01-26T21:42:29.977Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/0DWtoqgo_Rli062CkYFDy_1329a7c4a855435c94b1e9d7169fe405.jpg", + "model_url": "https://fal.run/bria/fibo/generate/structured_prompt", + "license_type": "commercial", + "date": "2025-10-29T15:47:29.106Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/fibo/generate/structured_prompt", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/fibo/generate/structured_prompt queue.", + "x-fal-metadata": { + "endpointId": "bria/fibo/generate/structured_prompt", + "category": "text-to-json", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/0DWtoqgo_Rli062CkYFDy_1329a7c4a855435c94b1e9d7169fe405.jpg", + "playgroundUrl": "https://fal.ai/models/bria/fibo/generate/structured_prompt", + "documentationUrl": "https://fal.ai/models/bria/fibo/generate/structured_prompt/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FiboGenerateStructured_promptInput": { + "title": "StructuredPromptModel", + "type": "object", + "properties": { + "prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Prompt", + "examples": [ + "A hyper-detailed, ultra-fluffy owl sitting in the trees at night, looking directly at the camera with wide, adorable, expressive eyes. Its feathers are soft and voluminous, catching the cool moonlight with subtle silver highlights. The owl’s gaze is curious and full of charm, giving it a whimsical, storybook-like personality." + ], + "description": "Prompt for image generation." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility.", + "default": 5555 + }, + "structured_prompt": { + "anyOf": [ + { + "$ref": "#/components/schemas/StructuredPrompt" + }, + { + "type": "null" + } + ], + "description": "The structured prompt to generate an image from." + }, + "image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Image Url", + "description": "Reference image (file or URL)." + } + }, + "x-fal-order-properties": [ + "prompt", + "structured_prompt", + "image_url", + "seed" + ] + }, + "FiboGenerateStructured_promptOutput": {}, + "StructuredPrompt": { + "title": "StructuredPrompt", + "type": "object", + "properties": { + "background_setting": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Background Setting", + "description": "The background setting of the image to be generated." + }, + "artistic_style": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Artistic Style", + "description": "The artistic style of the image to be generated." + }, + "aesthetics": { + "anyOf": [ + { + "$ref": "#/components/schemas/Aesthetics" + }, + { + "type": "null" + } + ], + "description": "The aesthetics of the image to be generated." + }, + "text_render": { + "anyOf": [ + { + "type": "array", + "items": {} + }, + { + "type": "null" + } + ], + "title": "Text Render", + "description": "A list of text to be rendered in the image.", + "default": [] + }, + "objects": { + "anyOf": [ + { + "type": "array", + "items": { + "$ref": "#/components/schemas/PromptObject" + } + }, + { + "type": "null" + } + ], + "title": "Objects", + "description": "A list of objects in the image to be generated, along with their attributes and relationships to other objects in the image.", + "default": [] + }, + "style_medium": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Style Medium", + "description": "The style medium of the image to be generated." + }, + "photographic_characteristics": { + "anyOf": [ + { + "$ref": "#/components/schemas/PhotographicCharacteristics" + }, + { + "type": "null" + } + ], + "description": "The photographic characteristics of the image to be generated." + }, + "context": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Context", + "description": "The context of the image to be generated." + }, + "lighting": { + "anyOf": [ + { + "$ref": "#/components/schemas/Lighting" + }, + { + "type": "null" + } + ], + "description": "The lighting of the image to be generated." + }, + "short_description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Short Description", + "description": "A short description of the image to be generated." + } + }, + "x-fal-order-properties": [ + "short_description", + "objects", + "background_setting", + "lighting", + "aesthetics", + "photographic_characteristics", + "style_medium", + "text_render", + "context", + "artistic_style" + ] + }, + "Aesthetics": { + "title": "Aesthetics", + "type": "object", + "properties": { + "composition": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Composition", + "description": "The composition of the image to be generated." + }, + "mood_atmosphere": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Mood Atmosphere", + "description": "The mood and atmosphere of the image to be generated." + }, + "color_scheme": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Color Scheme", + "description": "The color scheme of the image to be generated." + } + }, + "x-fal-order-properties": [ + "composition", + "color_scheme", + "mood_atmosphere" + ] + }, + "PromptObject": { + "title": "PromptObject", + "type": "object", + "properties": { + "clothing": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Clothing", + "description": "The clothing of the object in the image." + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Description", + "description": "A description of the object to be generated." + }, + "skin_tone_and_texture": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Skin Tone And Texture", + "description": "The skin tone and texture of the object in the image." + }, + "appearance_details": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Appearance Details", + "description": "The appearance details of the object." + }, + "number_of_objects": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Number Of Objects", + "description": "The number of objects in the image." + }, + "expression": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Expression", + "description": "The expression of the object in the image." + }, + "pose": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Pose", + "description": "The pose of the object in the image." + }, + "shape_and_color": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Shape And Color", + "description": "The shape and color of the object." + }, + "relationship": { + "title": "Relationship", + "type": "string", + "description": "The relationship of the object to other objects in the image." + }, + "texture": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Texture", + "description": "The texture of the object." + }, + "gender": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Gender", + "description": "The gender of the object in the image." + }, + "relative_size": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Relative Size", + "description": "The relative size of the object in the image." + }, + "location": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Location", + "description": "The location of the object in the image." + }, + "orientation": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Orientation", + "description": "The orientation of the object in the image." + }, + "action": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Action", + "description": "The action of the object in the image." + } + }, + "x-fal-order-properties": [ + "description", + "location", + "relationship", + "relative_size", + "shape_and_color", + "texture", + "appearance_details", + "number_of_objects", + "pose", + "expression", + "clothing", + "action", + "gender", + "skin_tone_and_texture", + "orientation" + ], + "required": [ + "relationship" + ] + }, + "PhotographicCharacteristics": { + "title": "PhotographicCharacteristics", + "type": "object", + "properties": { + "focus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Focus", + "description": "The focus in the image to be generated." + }, + "lens_focal_length": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Lens Focal Length", + "description": "The focal length of the lens in the image to be generated." + }, + "camera_angle": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Camera Angle", + "description": "The angle of the camera in the image to be generated." + }, + "depth_of_field": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Depth Of Field", + "description": "The depth of field in the image to be generated." + } + }, + "x-fal-order-properties": [ + "depth_of_field", + "focus", + "camera_angle", + "lens_focal_length" + ] + }, + "Lighting": { + "title": "Lighting", + "type": "object", + "properties": { + "shadows": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Shadows", + "description": "The shadows in the image to be generated." + }, + "conditions": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Conditions", + "description": "The conditions of the lighting in the image to be generated." + }, + "direction": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Direction", + "description": "The direction of the lighting in the image to be generated." + } + }, + "x-fal-order-properties": [ + "conditions", + "direction", + "shadows" + ] + } + } + }, + "paths": { + "/bria/fibo/generate/structured_prompt/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo/generate/structured_prompt/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/fibo/generate/structured_prompt": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboGenerateStructured_promptInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/fibo/generate/structured_prompt/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FiboGenerateStructured_promptOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.text-to-speech.json b/packages/typescript/ai-fal/json/fal.models.text-to-speech.json new file mode 100644 index 00000000..e5991ef7 --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.text-to-speech.json @@ -0,0 +1,11602 @@ +{ + "generated_at": "2026-01-28T02:51:51.866Z", + "total_models": 25, + "category": "text-to-speech", + "models": [ + { + "endpoint_id": "fal-ai/qwen-3-tts/voice-design/1.7b", + "metadata": { + "display_name": "Qwen 3 TTS - Voice Design [1.7B]", + "category": "text-to-speech", + "description": "Create custom voices using Qwen3-TTS Voice Design model and later use Clone Voice model to create your own voices!", + "status": "active", + "tags": [ + "text-to-speech", + "voice-design" + ], + "updated_at": "2026-01-26T21:41:25.125Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8bf564/hAR_lGabCTI37uWDVwiAp_64495f3b4ba447e7912cfd327b2cd0f1.jpg", + "model_url": "https://fal.run/fal-ai/qwen-3-tts/voice-design/1.7b", + "license_type": "commercial", + "date": "2026-01-26T16:28:35.599Z", + "group": { + "key": "qwen-3-tts", + "label": "Voice-Design [1.7B]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-3-tts/voice-design/1.7b", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-3-tts/voice-design/1.7b queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-3-tts/voice-design/1.7b", + "category": "text-to-speech", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8bf564/hAR_lGabCTI37uWDVwiAp_64495f3b4ba447e7912cfd327b2cd0f1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-3-tts/voice-design/1.7b", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-3-tts/voice-design/1.7b/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Qwen3TtsVoiceDesign17bInput": { + "title": "Qwen3DesignVoiceInput", + "type": "object", + "properties": { + "repetition_penalty": { + "minimum": 0, + "description": "Penalty to reduce repeated tokens/codes.", + "type": "number", + "title": "Repetition Penalty", + "default": 1.05 + }, + "subtalker_top_k": { + "minimum": 0, + "description": "Top-k for sub-talker sampling.", + "type": "integer", + "title": "Subtalker Top K", + "default": 50 + }, + "top_p": { + "minimum": 0, + "maximum": 1, + "type": "number", + "description": "Top-p sampling parameter.", + "title": "Top P", + "default": 1 + }, + "prompt": { + "examples": [ + "Speak in an incredulous tone, but with a hint of panic beginning to creep into your voice." + ], + "description": "Optional prompt to guide the style of the generated speech.", + "type": "string", + "title": "Prompt" + }, + "max_new_tokens": { + "minimum": 1, + "maximum": 8192, + "type": "integer", + "description": "Maximum number of new codec tokens to generate.", + "title": "Max New Tokens", + "default": 200 + }, + "text": { + "examples": [ + "It's in the top drawer... wait, it's empty? No way, that's impossible! I'm sure I put it there!" + ], + "description": "The text to be converted to speech.", + "type": "string", + "title": "Text" + }, + "language": { + "examples": [ + "English" + ], + "description": "The language of the voice to be designed.", + "type": "string", + "title": "Language", + "enum": [ + "Auto", + "English", + "Chinese", + "Spanish", + "French", + "German", + "Italian", + "Japanese", + "Korean", + "Portuguese", + "Russian" + ], + "default": "Auto" + }, + "top_k": { + "minimum": 0, + "description": "Top-k sampling parameter.", + "type": "integer", + "title": "Top K", + "default": 50 + }, + "subtalker_dosample": { + "description": "Sampling switch for the sub-talker.", + "type": "boolean", + "title": "Subtalker Dosample", + "default": true + }, + "subtalker_temperature": { + "minimum": 0, + "maximum": 1, + "type": "number", + "description": "Temperature for sub-talker sampling.", + "title": "Subtalker Temperature", + "default": 0.9 + }, + "subtalker_top_p": { + "minimum": 0, + "maximum": 1, + "type": "number", + "description": "Top-p for sub-talker sampling.", + "title": "Subtalker Top P", + "default": 1 + }, + "temperature": { + "minimum": 0, + "maximum": 1, + "type": "number", + "description": "Sampling temperature; higher => more random.", + "title": "Temperature", + "default": 0.9 + } + }, + "x-fal-order-properties": [ + "text", + "language", + "prompt", + "top_k", + "top_p", + "temperature", + "repetition_penalty", + "subtalker_dosample", + "subtalker_top_k", + "subtalker_top_p", + "subtalker_temperature", + "max_new_tokens" + ], + "required": [ + "text", + "prompt" + ] + }, + "Qwen3TtsVoiceDesign17bOutput": { + "title": "Qwen3DesignVoiceOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "duration": 7.736875, + "file_name": "rHFLVApz9Rdenm20UvnGf_FtjmMLBV.mp3", + "sample_rate": 24000, + "content_type": "audio/mpeg", + "url": "https://storage.googleapis.com/falserverless/example_outputs/qwen3-tts/design_out.mp3", + "channels": 1 + } + ], + "description": "The generated speech audio file.", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/AudioFile" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "AudioFile": { + "title": "AudioFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "duration": { + "description": "The duration of the audio", + "type": "number", + "title": "Duration" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + }, + "channels": { + "description": "The number of channels in the audio", + "type": "integer", + "title": "Channels" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "sample_rate": { + "description": "The sample rate of the audio", + "type": "integer", + "title": "Sample Rate" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "bitrate": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ], + "description": "The bitrate of the audio (e.g., '192k' or 192000)", + "title": "Bitrate" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "duration", + "channels", + "sample_rate", + "bitrate" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-3-tts/voice-design/1.7b/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-3-tts/voice-design/1.7b/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-3-tts/voice-design/1.7b": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Qwen3TtsVoiceDesign17bInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-3-tts/voice-design/1.7b/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Qwen3TtsVoiceDesign17bOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-3-tts/text-to-speech/1.7b", + "metadata": { + "display_name": "Qwen 3 TTS - Text to Speech [1.7B]", + "category": "text-to-speech", + "description": "Bring speech to your texts using Qwen3-TTS Custom-Voice model with pre-trained voices or use your custom voice with Qwen3-TTS Clone Voice model", + "status": "active", + "tags": [ + "text-to-speech" + ], + "updated_at": "2026-01-26T21:41:25.253Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8bf558/-9L7FupJfteEP98jc8XPM_825292b8601e47b98a34ab5e5f6b3870.jpg", + "model_url": "https://fal.run/fal-ai/qwen-3-tts/text-to-speech/1.7b", + "license_type": "commercial", + "date": "2026-01-26T16:26:09.816Z", + "group": { + "key": "qwen-3-tts", + "label": "Text to Speech [1.7B]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-3-tts/text-to-speech/1.7b", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-3-tts/text-to-speech/1.7b queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-3-tts/text-to-speech/1.7b", + "category": "text-to-speech", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8bf558/-9L7FupJfteEP98jc8XPM_825292b8601e47b98a34ab5e5f6b3870.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-3-tts/text-to-speech/1.7b", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-3-tts/text-to-speech/1.7b/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Qwen3TtsTextToSpeech17bInput": { + "title": "Qwen3TTSInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Very happy." + ], + "description": "Optional prompt to guide the style of the generated speech. This prompt will be ignored if a speaker embedding is provided.", + "type": "string", + "title": "Prompt" + }, + "speaker_voice_embedding_file_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_outputs/qwen3-tts/clone_out.safetensors" + ], + "description": "URL to a speaker embedding file in safetensors format, from `fal-ai/qwen-3-tts/clone-voice` endpoint. If provided, the TTS model will use the cloned voice for synthesis instead of the predefined voices.", + "type": "string", + "title": "Speaker Voice Embedding File Url" + }, + "top_p": { + "minimum": 0, + "maximum": 1, + "type": "number", + "description": "Top-p sampling parameter.", + "title": "Top P", + "default": 1 + }, + "repetition_penalty": { + "minimum": 0, + "description": "Penalty to reduce repeated tokens/codes.", + "type": "number", + "title": "Repetition Penalty", + "default": 1.05 + }, + "subtalker_temperature": { + "minimum": 0, + "maximum": 1, + "type": "number", + "description": "Temperature for sub-talker sampling.", + "title": "Subtalker Temperature", + "default": 0.9 + }, + "top_k": { + "minimum": 0, + "description": "Top-k sampling parameter.", + "type": "integer", + "title": "Top K", + "default": 50 + }, + "voice": { + "examples": [ + "Vivian" + ], + "description": "The voice to be used for speech synthesis, will be ignored if a speaker embedding is provided. Check out the **[documentation](https://github.com/QwenLM/Qwen3-TTS/tree/main?tab=readme-ov-file#custom-voice-generate)** for each voice's details and which language they primarily support.", + "type": "string", + "title": "Voice", + "enum": [ + "Vivian", + "Serena", + "Uncle_Fu", + "Dylan", + "Eric", + "Ryan", + "Aiden", + "Ono_Anna", + "Sohee" + ] + }, + "reference_text": { + "examples": [ + "Okay. Yeah. I resent you. I love you. I respect you. But you know what? You blew it! And it is all thanks to you." + ], + "description": "Optional reference text that was used when creating the speaker embedding. Providing this can improve synthesis quality when using a cloned voice.", + "type": "string", + "title": "Reference Text" + }, + "temperature": { + "minimum": 0, + "maximum": 1, + "type": "number", + "description": "Sampling temperature; higher => more random.", + "title": "Temperature", + "default": 0.9 + }, + "language": { + "examples": [ + "English" + ], + "description": "The language of the voice.", + "type": "string", + "title": "Language", + "enum": [ + "Auto", + "English", + "Chinese", + "Spanish", + "French", + "German", + "Italian", + "Japanese", + "Korean", + "Portuguese", + "Russian" + ], + "default": "Auto" + }, + "subtalker_top_k": { + "minimum": 0, + "description": "Top-k for sub-talker sampling.", + "type": "integer", + "title": "Subtalker Top K", + "default": 50 + }, + "text": { + "examples": [ + "I am solving the equation: x = [-b ± √(b²-4ac)] / 2a? Nobody can — it's a disaster (◍•͈⌔•͈◍), very sad!" + ], + "description": "The text to be converted to speech.", + "type": "string", + "title": "Text" + }, + "max_new_tokens": { + "minimum": 1, + "maximum": 8192, + "type": "integer", + "description": "Maximum number of new codec tokens to generate.", + "title": "Max New Tokens", + "default": 200 + }, + "subtalker_dosample": { + "description": "Sampling switch for the sub-talker.", + "type": "boolean", + "title": "Subtalker Dosample", + "default": true + }, + "subtalker_top_p": { + "minimum": 0, + "maximum": 1, + "type": "number", + "description": "Top-p for sub-talker sampling.", + "title": "Subtalker Top P", + "default": 1 + } + }, + "x-fal-order-properties": [ + "text", + "prompt", + "voice", + "language", + "speaker_voice_embedding_file_url", + "reference_text", + "top_k", + "top_p", + "temperature", + "repetition_penalty", + "subtalker_dosample", + "subtalker_top_k", + "subtalker_top_p", + "subtalker_temperature", + "max_new_tokens" + ], + "required": [ + "text" + ] + }, + "Qwen3TtsTextToSpeech17bOutput": { + "title": "Qwen3TTSOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "duration": 13.025333333333334, + "file_name": "n5Ynr2aFKUPw1QjLYjB_4_XEdHoD1K.mp3", + "sample_rate": 24000, + "content_type": "audio/mpeg", + "url": "https://storage.googleapis.com/falserverless/example_outputs/qwen3-tts/tts_out.mp3", + "channels": 1 + } + ], + "description": "The generated speech audio file.", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/AudioFile" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "AudioFile": { + "title": "AudioFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "duration": { + "description": "The duration of the audio", + "type": "number", + "title": "Duration" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + }, + "channels": { + "description": "The number of channels in the audio", + "type": "integer", + "title": "Channels" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "sample_rate": { + "description": "The sample rate of the audio", + "type": "integer", + "title": "Sample Rate" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "bitrate": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ], + "description": "The bitrate of the audio (e.g., '192k' or 192000)", + "title": "Bitrate" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "duration", + "channels", + "sample_rate", + "bitrate" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-3-tts/text-to-speech/1.7b/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-3-tts/text-to-speech/1.7b/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-3-tts/text-to-speech/1.7b": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Qwen3TtsTextToSpeech17bInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-3-tts/text-to-speech/1.7b/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Qwen3TtsTextToSpeech17bOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-3-tts/text-to-speech/0.6b", + "metadata": { + "display_name": "Qwen 3 TTS - Text to Speech [0.6B]", + "category": "text-to-speech", + "description": "Bring speech to your texts using Qwen3-TTS Custom-Voice model with pre-trained voices or use your custom voice with Qwen3-TTS Clone Voice model", + "status": "active", + "tags": [ + "text-to-speech" + ], + "updated_at": "2026-01-26T21:41:26.540Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8bf550/lVS8zCathHyxFPybUe2dc_4afab9bbcdfe49379b8e6d3c05970958.jpg", + "model_url": "https://fal.run/fal-ai/qwen-3-tts/text-to-speech/0.6b", + "license_type": "commercial", + "date": "2026-01-26T16:24:48.185Z", + "group": { + "key": "qwen-3-tts", + "label": "Text to Speech [0.6B]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-3-tts/text-to-speech/0.6b", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-3-tts/text-to-speech/0.6b queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-3-tts/text-to-speech/0.6b", + "category": "text-to-speech", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8bf550/lVS8zCathHyxFPybUe2dc_4afab9bbcdfe49379b8e6d3c05970958.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-3-tts/text-to-speech/0.6b", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-3-tts/text-to-speech/0.6b/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Qwen3TtsTextToSpeech06bInput": { + "title": "Qwen3TTSInput06b", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Very happy." + ], + "description": "Optional prompt to guide the style of the generated speech. This prompt will be ignored if a speaker embedding is provided.", + "type": "string", + "title": "Prompt" + }, + "speaker_voice_embedding_file_url": { + "description": "URL to a speaker embedding file in safetensors format, from `fal-ai/qwen-3-tts/clone-voice/0.6b` endpoint. If provided, the TTS model will use the cloned voice for synthesis instead of the predefined voices.", + "type": "string", + "title": "Speaker Voice Embedding File Url" + }, + "top_p": { + "minimum": 0, + "maximum": 1, + "type": "number", + "description": "Top-p sampling parameter.", + "title": "Top P", + "default": 1 + }, + "repetition_penalty": { + "minimum": 0, + "description": "Penalty to reduce repeated tokens/codes.", + "type": "number", + "title": "Repetition Penalty", + "default": 1.05 + }, + "subtalker_temperature": { + "minimum": 0, + "maximum": 1, + "type": "number", + "description": "Temperature for sub-talker sampling.", + "title": "Subtalker Temperature", + "default": 0.9 + }, + "top_k": { + "minimum": 0, + "description": "Top-k sampling parameter.", + "type": "integer", + "title": "Top K", + "default": 50 + }, + "voice": { + "examples": [ + "Vivian" + ], + "description": "The voice to be used for speech synthesis, will be ignored if a speaker embedding is provided. Check out the **[documentation](https://github.com/QwenLM/Qwen3-TTS/tree/main?tab=readme-ov-file#custom-voice-generate)** for each voice's details and which language they primarily support.", + "type": "string", + "title": "Voice", + "enum": [ + "Vivian", + "Serena", + "Uncle_Fu", + "Dylan", + "Eric", + "Ryan", + "Aiden", + "Ono_Anna", + "Sohee" + ] + }, + "reference_text": { + "description": "Optional reference text that was used when creating the speaker embedding. Providing this can improve synthesis quality when using a cloned voice.", + "type": "string", + "title": "Reference Text" + }, + "temperature": { + "minimum": 0, + "maximum": 1, + "type": "number", + "description": "Sampling temperature; higher => more random.", + "title": "Temperature", + "default": 0.9 + }, + "language": { + "examples": [ + "English" + ], + "description": "The language of the voice.", + "type": "string", + "title": "Language", + "enum": [ + "Auto", + "English", + "Chinese", + "Spanish", + "French", + "German", + "Italian", + "Japanese", + "Korean", + "Portuguese", + "Russian" + ], + "default": "Auto" + }, + "subtalker_top_k": { + "minimum": 0, + "description": "Top-k for sub-talker sampling.", + "type": "integer", + "title": "Subtalker Top K", + "default": 50 + }, + "text": { + "examples": [ + "I feel like I'm taking crazy pills! How can something be both a square and a circle at the same time? It defies all logic!" + ], + "description": "The text to be converted to speech.", + "type": "string", + "title": "Text" + }, + "max_new_tokens": { + "minimum": 1, + "maximum": 8192, + "type": "integer", + "description": "Maximum number of new codec tokens to generate.", + "title": "Max New Tokens", + "default": 200 + }, + "subtalker_dosample": { + "description": "Sampling switch for the sub-talker.", + "type": "boolean", + "title": "Subtalker Dosample", + "default": true + }, + "subtalker_top_p": { + "minimum": 0, + "maximum": 1, + "type": "number", + "description": "Top-p for sub-talker sampling.", + "title": "Subtalker Top P", + "default": 1 + } + }, + "x-fal-order-properties": [ + "text", + "prompt", + "voice", + "language", + "speaker_voice_embedding_file_url", + "reference_text", + "top_k", + "top_p", + "temperature", + "repetition_penalty", + "subtalker_dosample", + "subtalker_top_k", + "subtalker_top_p", + "subtalker_temperature", + "max_new_tokens" + ], + "required": [ + "text" + ] + }, + "Qwen3TtsTextToSpeech06bOutput": { + "title": "Qwen3TTSOutput06b", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "duration": 9.816875, + "file_name": "n6Av3SeD5dFENf9-VmQ1v_is3jLh5h.mp3", + "sample_rate": 24000, + "content_type": "audio/mpeg", + "url": "https://storage.googleapis.com/falserverless/example_outputs/example_outputs/qwen3-tts/tts_out_06b.mp3", + "channels": 1 + } + ], + "description": "The generated speech audio file.", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/AudioFile" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "AudioFile": { + "title": "AudioFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "duration": { + "description": "The duration of the audio", + "type": "number", + "title": "Duration" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + }, + "channels": { + "description": "The number of channels in the audio", + "type": "integer", + "title": "Channels" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "sample_rate": { + "description": "The sample rate of the audio", + "type": "integer", + "title": "Sample Rate" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "bitrate": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ], + "description": "The bitrate of the audio (e.g., '192k' or 192000)", + "title": "Bitrate" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "duration", + "channels", + "sample_rate", + "bitrate" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-3-tts/text-to-speech/0.6b/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-3-tts/text-to-speech/0.6b/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-3-tts/text-to-speech/0.6b": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Qwen3TtsTextToSpeech06bInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-3-tts/text-to-speech/0.6b/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Qwen3TtsTextToSpeech06bOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/vibevoice/0.5b", + "metadata": { + "display_name": "Vibevoice", + "category": "text-to-speech", + "description": "Generate long speech snippets fast using Microsoft's powerful TTS.", + "status": "active", + "tags": [ + "vibevoice", + "fast" + ], + "updated_at": "2026-01-26T21:41:51.174Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a87578a/YHiVLig00ZQrC_arc0biD_1f8eb20e928e4bc2b29499c7cb0eae5b.jpg", + "model_url": "https://fal.run/fal-ai/vibevoice/0.5b", + "license_type": "commercial", + "date": "2025-12-17T17:49:53.269Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/vibevoice/0.5b", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/vibevoice/0.5b queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/vibevoice/0.5b", + "category": "text-to-speech", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a87578a/YHiVLig00ZQrC_arc0biD_1f8eb20e928e4bc2b29499c7cb0eae5b.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/vibevoice/0.5b", + "documentationUrl": "https://fal.ai/models/fal-ai/vibevoice/0.5b/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Vibevoice05bInput": { + "x-fal-order-properties": [ + "script", + "speaker", + "seed", + "cfg_scale" + ], + "type": "object", + "properties": { + "script": { + "examples": [ + "VibeVoice is now available on Fal!" + ], + "maxLength": 90000, + "type": "string", + "description": "The script to convert to speech.", + "title": "Script" + }, + "seed": { + "description": "Random seed for reproducible generation.", + "type": "integer", + "title": "Seed" + }, + "speaker": { + "examples": [ + "Frank" + ], + "description": "Voice to use for speaking.", + "type": "string", + "enum": [ + "Frank", + "Wayne", + "Carter", + "Emma", + "Grace", + "Mike" + ], + "title": "Speaker" + }, + "cfg_scale": { + "minimum": 1, + "maximum": 2, + "type": "number", + "title": "CFG Scale", + "description": "CFG (Classifier-Free Guidance) scale for generation. Higher values increase adherence to text.", + "default": 1.3 + } + }, + "description": "Input schema for VibeVoice-0.5b TTS generation", + "title": "VibeVoice0_5bInput", + "required": [ + "script", + "speaker" + ] + }, + "Vibevoice05bOutput": { + "x-fal-order-properties": [ + "audio", + "duration", + "sample_rate", + "generation_time", + "rtf" + ], + "type": "object", + "properties": { + "duration": { + "examples": [ + 9.46 + ], + "description": "Duration of the generated audio in seconds", + "type": "number", + "title": "Duration" + }, + "rtf": { + "examples": [ + 0.53 + ], + "description": "Real-time factor (generation_time / audio_duration). Lower is better.", + "type": "number", + "title": "Rtf" + }, + "sample_rate": { + "examples": [ + 24000 + ], + "description": "Sample rate of the generated audio", + "type": "integer", + "title": "Sample Rate" + }, + "generation_time": { + "examples": [ + 5.6 + ], + "description": "Time taken to generate the audio in seconds", + "type": "number", + "title": "Generation Time" + }, + "audio": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/vibevoice/0_5b.mp3" + } + ], + "description": "The generated audio file containing the speech", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "description": "Output schema for VibeVoice-0.5b TTS generation", + "title": "VibeVoice_0_5BOutput", + "required": [ + "audio", + "duration", + "sample_rate", + "generation_time", + "rtf" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/vibevoice/0.5b/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vibevoice/0.5b/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/vibevoice/0.5b": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Vibevoice05bInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vibevoice/0.5b/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Vibevoice05bOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/maya/batch", + "metadata": { + "display_name": "Maya", + "category": "text-to-speech", + "description": "Maya1 is a state-of-the-art speech model by Maya Research for expressive voice generation, built to capture real human emotion and precise voice design.", + "status": "active", + "tags": [ + "text-to-speech", + "tts" + ], + "updated_at": "2026-01-26T21:41:59.385Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a860f0a/FtJNWhDtad-vpXpr9mWeX_77608b3886eb43eb9a89425ea6253d94.jpg", + "model_url": "https://fal.run/fal-ai/maya/batch", + "license_type": "commercial", + "date": "2025-12-12T22:26:37.028Z", + "group": { + "key": "maya", + "label": "batch" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/maya/batch", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/maya/batch queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/maya/batch", + "category": "text-to-speech", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a860f0a/FtJNWhDtad-vpXpr9mWeX_77608b3886eb43eb9a89425ea6253d94.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/maya/batch", + "documentationUrl": "https://fal.ai/models/fal-ai/maya/batch/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MayaBatchInput": { + "description": "Input schema for batch Maya-1-Voice TTS generation", + "type": "object", + "properties": { + "repetition_penalty": { + "minimum": 1, + "maximum": 2, + "type": "number", + "title": "Repetition Penalty", + "description": "Repetition penalty for all generations.", + "default": 1.1 + }, + "top_p": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Top P", + "description": "Nucleus sampling parameter for all generations.", + "default": 0.9 + }, + "output_format": { + "enum": [ + "wav", + "mp3" + ], + "description": "Output audio format for all generated speech files", + "type": "string", + "examples": [ + "wav", + "mp3" + ], + "title": "Output Format", + "default": "wav" + }, + "texts": { + "description": "List of texts to synthesize into speech. You can embed emotion tags in each text using the format .", + "type": "array", + "minItems": 1, + "maxItems": 100, + "title": "Texts", + "items": { + "type": "string" + } + }, + "prompts": { + "description": "List of voice descriptions for each text. Must match the length of texts list. Each describes the voice/character attributes.", + "type": "array", + "minItems": 1, + "maxItems": 100, + "title": "Prompts", + "items": { + "type": "string" + } + }, + "max_tokens": { + "minimum": 28, + "maximum": 4000, + "type": "integer", + "title": "Max Tokens", + "description": "Maximum SNAC tokens per generation.", + "default": 2000 + }, + "temperature": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Temperature", + "description": "Sampling temperature for all generations.", + "default": 0.4 + }, + "sample_rate": { + "enum": [ + "48 kHz", + "24 kHz" + ], + "description": "Output audio sample rate for all generations. 48 kHz provides higher quality, 24 kHz is faster.", + "type": "string", + "examples": [ + "48 kHz", + "24 kHz" + ], + "title": "Sample Rate", + "default": "48 kHz" + } + }, + "title": "MayaVoiceBatchInput", + "x-fal-order-properties": [ + "texts", + "prompts", + "temperature", + "top_p", + "max_tokens", + "repetition_penalty", + "sample_rate", + "output_format" + ], + "required": [ + "texts", + "prompts" + ] + }, + "MayaBatchOutput": { + "description": "Output schema for batch Maya-1-Voice TTS generation", + "type": "object", + "properties": { + "average_rtf": { + "examples": [ + 0.15 + ], + "description": "Average real-time factor across all generations", + "type": "number", + "title": "Average Rtf" + }, + "sample_rate": { + "examples": [ + "48 kHz", + "24 kHz" + ], + "description": "Sample rate of all generated audio files", + "type": "string", + "title": "Sample Rate" + }, + "total_generation_time": { + "examples": [ + 5.7 + ], + "description": "Total time taken to generate all audio files in seconds", + "type": "number", + "title": "Total Generation Time" + }, + "audios": { + "description": "List of generated audio files", + "type": "array", + "title": "Audios", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "durations": { + "description": "Duration of each generated audio in seconds", + "type": "array", + "title": "Durations", + "items": { + "type": "number" + } + } + }, + "title": "MayaVoiceBatchOutput", + "x-fal-order-properties": [ + "audios", + "durations", + "sample_rate", + "total_generation_time", + "average_rtf" + ], + "required": [ + "audios", + "durations", + "sample_rate", + "total_generation_time", + "average_rtf" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/maya/batch/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/maya/batch/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/maya/batch": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MayaBatchInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/maya/batch/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MayaBatchOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/maya/stream", + "metadata": { + "display_name": "Maya", + "category": "text-to-speech", + "description": "Maya1 is a state-of-the-art speech model by Maya Research for expressive voice generation, built to capture real human emotion and precise voice design.", + "status": "active", + "tags": [ + "text-to-speech", + "tts" + ], + "updated_at": "2026-01-26T21:41:59.553Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a860e8c/yukdPKr05eoZ3Fx9zp6b0_fcbf051a9f4e4ab9a510e8cc9084098f.jpg", + "model_url": "https://fal.run/fal-ai/maya/stream", + "license_type": "commercial", + "date": "2025-12-12T22:06:21.721Z", + "group": { + "key": "maya", + "label": "stream" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/maya/stream", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/maya/stream queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/maya/stream", + "category": "text-to-speech", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a860e8c/yukdPKr05eoZ3Fx9zp6b0_fcbf051a9f4e4ab9a510e8cc9084098f.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/maya/stream", + "documentationUrl": "https://fal.ai/models/fal-ai/maya/stream/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MayaStreamInput": { + "description": "Input schema for Maya-1-Voice streaming TTS generation", + "type": "object", + "properties": { + "repetition_penalty": { + "minimum": 1, + "maximum": 2, + "type": "number", + "title": "Repetition Penalty", + "description": "Penalty for repeating tokens. Higher values reduce repetition artifacts.", + "default": 1.1 + }, + "prompt": { + "examples": [ + "Realistic male voice in the 30s age with american accent. Normal pitch, warm timbre, conversational pacing, neutral tone delivery at med intensity.", + "Creative, dark_villain character. Male voice in their 40s with british accent. Low pitch, gravelly timbre, slow pacing, angry tone at high intensity." + ], + "description": "Description of the voice/character. Includes attributes like age, accent, pitch, timbre, pacing, tone, and intensity. See examples for format.", + "type": "string", + "title": "Prompt", + "maxLength": 500 + }, + "top_p": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Top P", + "description": "Nucleus sampling parameter. Controls diversity of token selection.", + "default": 0.9 + }, + "text": { + "examples": [ + "Hello world! This is a test of the Maya-1-Voice text-to-speech system.", + "The darkness isn't coming... it's already here!" + ], + "description": "The text to synthesize into speech. You can embed emotion tags anywhere in the text using the format . Available emotions: laugh, laugh_harder, sigh, chuckle, gasp, angry, excited, whisper, cry, scream, sing, snort, exhale, gulp, giggle, sarcastic, curious. Example: 'Hello world! This is amazing!' or 'I can't believe this happened again.'", + "type": "string", + "title": "Text", + "maxLength": 5000 + }, + "output_format": { + "enum": [ + "mp3", + "wav", + "pcm" + ], + "description": "Output audio format. 'mp3' for browser-playable audio, 'wav' for uncompressed audio, 'pcm' for raw PCM (lowest latency, requires client-side decoding).", + "type": "string", + "examples": [ + "mp3", + "wav", + "pcm" + ], + "title": "Output Format", + "default": "mp3" + }, + "max_tokens": { + "minimum": 28, + "maximum": 4000, + "type": "integer", + "title": "Max Tokens", + "description": "Maximum number of SNAC tokens to generate (7 tokens per frame). Controls maximum audio length.", + "default": 2000 + }, + "temperature": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Temperature", + "description": "Sampling temperature. Lower values (0.2-0.5) produce more stable/consistent audio. Higher values add variation.", + "default": 0.4 + }, + "sample_rate": { + "enum": [ + "48 kHz", + "24 kHz" + ], + "description": "Output audio sample rate. 48 kHz uses upsampling for higher quality audio, 24 kHz is native SNAC output (faster, lower latency).", + "type": "string", + "examples": [ + "48 kHz", + "24 kHz" + ], + "title": "Sample Rate", + "default": "24 kHz" + } + }, + "title": "MayaVoiceStreamingInput", + "x-fal-order-properties": [ + "text", + "prompt", + "temperature", + "top_p", + "max_tokens", + "repetition_penalty", + "sample_rate", + "output_format" + ], + "required": [ + "text", + "prompt" + ] + }, + "MayaStreamOutput": {} + } + }, + "paths": { + "/fal-ai/maya/stream/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/maya/stream/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/maya/stream": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MayaStreamInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/maya/stream/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MayaStreamOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/maya", + "metadata": { + "display_name": "Maya1", + "category": "text-to-speech", + "description": "Maya1 is a state-of-the-art speech model by Maya Research for expressive voice generation, built to capture real human emotion and precise voice design.", + "status": "active", + "tags": [ + "text-to-speech", + "tts" + ], + "updated_at": "2026-01-26T21:42:22.541Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/panda/w_fnH4AqLcG0s0HRVYhQK_755e9d6c2502409fa7cf87f768925dd4.jpg", + "model_url": "https://fal.run/fal-ai/maya", + "license_type": "commercial", + "date": "2025-11-15T01:00:12.000Z", + "group": { + "key": "maya", + "label": "tts" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/maya", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/maya queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/maya", + "category": "text-to-speech", + "thumbnailUrl": "https://v3b.fal.media/files/b/panda/w_fnH4AqLcG0s0HRVYhQK_755e9d6c2502409fa7cf87f768925dd4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/maya", + "documentationUrl": "https://fal.ai/models/fal-ai/maya/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MayaInput": { + "description": "Input schema for Maya-1-Voice TTS generation", + "type": "object", + "properties": { + "repetition_penalty": { + "minimum": 1, + "maximum": 2, + "type": "number", + "title": "Repetition Penalty", + "description": "Penalty for repeating tokens. Higher values reduce repetition artifacts.", + "default": 1.1 + }, + "prompt": { + "examples": [ + "Realistic male voice in the 30s age with american accent. Normal pitch, warm timbre, conversational pacing, neutral tone delivery at med intensity.", + "Creative, dark_villain character. Male voice in their 40s with british accent. Low pitch, gravelly timbre, slow pacing, angry tone at high intensity." + ], + "description": "Description of the voice/character. Includes attributes like age, accent, pitch, timbre, pacing, tone, and intensity. See examples for format.", + "type": "string", + "title": "Prompt", + "maxLength": 500 + }, + "top_p": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Top P", + "description": "Nucleus sampling parameter. Controls diversity of token selection.", + "default": 0.9 + }, + "text": { + "examples": [ + "Hello world! This is a test of the Maya-1-Voice text-to-speech system.", + "The darkness isn't coming... it's already here!", + "That's hilarious! I can't stop thinking about it!", + " I have a secret to tell you. You won't believe what happened!" + ], + "description": "The text to synthesize into speech. You can embed emotion tags anywhere in the text using the format . Available emotions: laugh, laugh_harder, sigh, chuckle, gasp, angry, excited, whisper, cry, scream, sing, snort, exhale, gulp, giggle, sarcastic, curious. Example: 'Hello world! This is amazing!' or 'I can't believe this happened again.'", + "type": "string", + "title": "Text", + "maxLength": 5000 + }, + "output_format": { + "enum": [ + "wav", + "mp3" + ], + "description": "Output audio format for the generated speech", + "type": "string", + "examples": [ + "wav", + "mp3" + ], + "title": "Output Format", + "default": "wav" + }, + "max_tokens": { + "minimum": 28, + "maximum": 4000, + "type": "integer", + "title": "Max Tokens", + "description": "Maximum number of SNAC tokens to generate (7 tokens per frame). Controls maximum audio length.", + "default": 2000 + }, + "temperature": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Temperature", + "description": "Sampling temperature. Lower values (0.2-0.5) produce more stable/consistent audio. Higher values add variation.", + "default": 0.4 + }, + "sample_rate": { + "enum": [ + "48 kHz", + "24 kHz" + ], + "description": "Output audio sample rate. 48 kHz provides higher quality audio, 24 kHz is faster.", + "type": "string", + "examples": [ + "48 kHz", + "24 kHz" + ], + "title": "Sample Rate", + "default": "48 kHz" + } + }, + "title": "MayaVoiceInput", + "x-fal-order-properties": [ + "text", + "prompt", + "temperature", + "top_p", + "max_tokens", + "repetition_penalty", + "sample_rate", + "output_format" + ], + "required": [ + "text", + "prompt" + ] + }, + "MayaOutput": { + "description": "Output schema for Maya-1-Voice TTS generation", + "type": "object", + "properties": { + "rtf": { + "examples": [ + 0.51 + ], + "description": "Real-time factor (generation_time / audio_duration). Lower is better.", + "type": "number", + "title": "Rtf" + }, + "duration": { + "examples": [ + 4.5 + ], + "description": "Duration of the generated audio in seconds", + "type": "number", + "title": "Duration" + }, + "sample_rate": { + "examples": [ + "48 kHz", + "24 kHz" + ], + "description": "Sample rate of the generated audio", + "type": "string", + "title": "Sample Rate" + }, + "generation_time": { + "examples": [ + 2.3 + ], + "description": "Time taken to generate the audio in seconds", + "type": "number", + "title": "Generation Time" + }, + "audio": { + "description": "The generated audio file containing the speech (WAV or MP3 format, 24kHz or 48kHz mono depending on upsampler)", + "$ref": "#/components/schemas/File" + } + }, + "title": "MayaVoiceOutput", + "x-fal-order-properties": [ + "audio", + "duration", + "sample_rate", + "generation_time", + "rtf" + ], + "required": [ + "audio", + "duration", + "sample_rate", + "generation_time", + "rtf" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/maya/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/maya/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/maya": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MayaInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/maya/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MayaOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/speech-2.6-turbo", + "metadata": { + "display_name": "MiniMax Speech 2.6 [Turbo]", + "category": "text-to-speech", + "description": "Generate speech from text prompts and different voices using the MiniMax Speech-2.6 HD model, which leverages advanced AI techniques to create high-quality text-to-speech.", + "status": "active", + "tags": [ + "text-to-speech" + ], + "updated_at": "2026-01-26T21:42:30.372Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/koala/i6OK72M3MRgCjSurJLlYT_018f5a573af24e69aadfc9a9b559a110.jpg", + "model_url": "https://fal.run/fal-ai/minimax/speech-2.6-turbo", + "license_type": "commercial", + "date": "2025-10-29T14:37:37.968Z", + "group": { + "key": "minimax-speech-26", + "label": "Text To Speech Turbo" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.run/fal-ai/minimax/speech-2.6-turbo/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/speech-2.6-turbo", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/speech-2.6-turbo queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/speech-2.6-turbo", + "category": "text-to-speech", + "thumbnailUrl": "https://v3b.fal.media/files/b/koala/i6OK72M3MRgCjSurJLlYT_018f5a573af24e69aadfc9a9b559a110.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/speech-2.6-turbo", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/speech-2.6-turbo/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxSpeech26TurboInput": { + "title": "TextToSpeechTurbo26Request", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Hello world! Welcome MiniMax's new text to speech model <#0.1#> Speech 2.6 Turbo, now available on Fal!" + ], + "maxLength": 10000, + "type": "string", + "title": "Prompt", + "description": "Text to convert to speech. Paragraph breaks should be marked with newline characters. **NOTE**: You can customize speech pauses by adding markers in the form `<#x#>`, where `x` is the pause duration in seconds. Valid range: `[0.01, 99.99]`, up to two decimal places. Pause markers must be placed between speakable text segments and cannot be used consecutively.", + "minLength": 1 + }, + "language_boost": { + "enum": [ + "Chinese", + "Chinese,Yue", + "English", + "Arabic", + "Russian", + "Spanish", + "French", + "Portuguese", + "German", + "Turkish", + "Dutch", + "Ukrainian", + "Vietnamese", + "Indonesian", + "Japanese", + "Italian", + "Korean", + "Thai", + "Polish", + "Romanian", + "Greek", + "Czech", + "Finnish", + "Hindi", + "Bulgarian", + "Danish", + "Hebrew", + "Malay", + "Slovak", + "Swedish", + "Croatian", + "Hungarian", + "Norwegian", + "Slovenian", + "Catalan", + "Nynorsk", + "Afrikaans", + "auto" + ], + "description": "Enhance recognition of specified languages and dialects", + "type": "string", + "title": "Language Boost" + }, + "output_format": { + "enum": [ + "url", + "hex" + ], + "description": "Format of the output content (non-streaming only)", + "type": "string", + "title": "Output Format", + "default": "hex" + }, + "pronunciation_dict": { + "description": "Custom pronunciation dictionary for text replacement", + "title": "Pronunciation Dict", + "allOf": [ + { + "$ref": "#/components/schemas/PronunciationDict" + } + ] + }, + "voice_setting": { + "default": { + "speed": 1, + "vol": 1, + "voice_id": "Wise_Woman", + "pitch": 0, + "english_normalization": false + }, + "description": "Voice configuration settings", + "title": "Voice Setting", + "allOf": [ + { + "$ref": "#/components/schemas/VoiceSetting" + } + ] + }, + "normalization_setting": { + "description": "Loudness normalization settings for the audio", + "title": "Normalization Setting", + "allOf": [ + { + "$ref": "#/components/schemas/LoudnessNormalizationSetting" + } + ] + }, + "audio_setting": { + "description": "Audio configuration settings", + "title": "Audio Setting", + "allOf": [ + { + "$ref": "#/components/schemas/AudioSetting" + } + ] + } + }, + "x-fal-order-properties": [ + "voice_setting", + "audio_setting", + "language_boost", + "output_format", + "pronunciation_dict", + "prompt", + "normalization_setting" + ], + "required": [ + "prompt" + ] + }, + "MinimaxSpeech26TurboOutput": { + "title": "TextToSpeechTurbo26Output", + "type": "object", + "properties": { + "duration_ms": { + "description": "Duration of the audio in milliseconds", + "type": "integer", + "title": "Duration Ms" + }, + "audio": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/minimax-speech-26/speech_26_turbo_out.mp3" + } + ], + "description": "The generated audio file", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio", + "duration_ms" + ], + "required": [ + "audio", + "duration_ms" + ] + }, + "PronunciationDict": { + "title": "PronunciationDict", + "type": "object", + "properties": { + "tone_list": { + "description": "List of pronunciation replacements in format ['text/(pronunciation)', ...]. For Chinese, tones are 1-5. Example: ['燕少飞/(yan4)(shao3)(fei1)']", + "type": "array", + "title": "Tone List", + "items": { + "type": "string" + } + } + }, + "x-fal-order-properties": [ + "tone_list" + ] + }, + "VoiceSetting": { + "title": "VoiceSetting", + "type": "object", + "properties": { + "speed": { + "minimum": 0.5, + "description": "Speech speed (0.5-2.0)", + "type": "number", + "maximum": 2, + "title": "Speed", + "default": 1 + }, + "vol": { + "minimum": 0.01, + "description": "Volume (0-10)", + "type": "number", + "maximum": 10, + "title": "Vol", + "default": 1 + }, + "voice_id": { + "examples": [ + "Wise_Woman", + "Friendly_Person", + "Inspirational_girl", + "Deep_Voice_Man", + "Calm_Woman", + "Casual_Guy", + "Lively_Girl", + "Patient_Man", + "Young_Knight", + "Determined_Man", + "Lovely_Girl", + "Decent_Boy", + "Imposing_Manner", + "Elegant_Man", + "Abbess", + "Sweet_Girl_2", + "Exuberant_Girl" + ], + "description": "Predefined voice ID to use for synthesis", + "type": "string", + "title": "Voice Id", + "default": "Wise_Woman" + }, + "pitch": { + "minimum": -12, + "description": "Voice pitch (-12 to 12)", + "type": "integer", + "maximum": 12, + "title": "Pitch", + "default": 0 + }, + "english_normalization": { + "description": "Enables English text normalization to improve number reading performance, with a slight increase in latency", + "type": "boolean", + "title": "English Normalization", + "default": false + }, + "emotion": { + "enum": [ + "happy", + "sad", + "angry", + "fearful", + "disgusted", + "surprised", + "neutral" + ], + "description": "Emotion of the generated speech", + "type": "string", + "title": "Emotion" + } + }, + "x-fal-order-properties": [ + "voice_id", + "speed", + "vol", + "pitch", + "emotion", + "english_normalization" + ] + }, + "LoudnessNormalizationSetting": { + "title": "LoudnessNormalizationSetting", + "type": "object", + "properties": { + "enabled": { + "description": "Enable loudness normalization for the audio", + "type": "boolean", + "title": "Enabled", + "default": true + }, + "target_loudness": { + "minimum": -70, + "description": "Target loudness in LUFS (default -18.0)", + "type": "number", + "maximum": -10, + "title": "Target Loudness", + "default": -18 + }, + "target_range": { + "minimum": 0, + "description": "Target loudness range in LU (default 8.0)", + "type": "number", + "maximum": 20, + "title": "Target Range", + "default": 8 + }, + "target_peak": { + "minimum": -3, + "description": "Target peak level in dBTP (default -0.5).", + "type": "number", + "maximum": 0, + "title": "Target Peak", + "default": -0.5 + } + }, + "x-fal-order-properties": [ + "enabled", + "target_loudness", + "target_range", + "target_peak" + ] + }, + "AudioSetting": { + "title": "AudioSetting", + "type": "object", + "properties": { + "format": { + "enum": [ + "mp3", + "pcm", + "flac" + ], + "description": "Audio format", + "type": "string", + "title": "Format", + "default": "mp3" + }, + "sample_rate": { + "enum": [ + 8000, + 16000, + 22050, + 24000, + 32000, + 44100 + ], + "description": "Sample rate of generated audio", + "type": "integer", + "title": "Sample Rate", + "default": 32000 + }, + "channel": { + "enum": [ + 1, + 2 + ], + "description": "Number of audio channels (1=mono, 2=stereo)", + "type": "integer", + "title": "Channel", + "default": 1 + }, + "bitrate": { + "enum": [ + 32000, + 64000, + 128000, + 256000 + ], + "description": "Bitrate of generated audio", + "type": "integer", + "title": "Bitrate", + "default": 128000 + } + }, + "x-fal-order-properties": [ + "sample_rate", + "bitrate", + "format", + "channel" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/speech-2.6-turbo/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/speech-2.6-turbo/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/speech-2.6-turbo": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxSpeech26TurboInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/speech-2.6-turbo/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxSpeech26TurboOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/speech-2.6-hd", + "metadata": { + "display_name": "MiniMax Speech 2.6 [HD]", + "category": "text-to-speech", + "description": "Generate speech from text prompts and different voices using the MiniMax Speech-2.6 HD model, which leverages advanced AI techniques to create high-quality text-to-speech.", + "status": "active", + "tags": [ + "text-to-speech" + ], + "updated_at": "2026-01-26T21:42:30.559Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/SjejS2pB-SmgrWVM-2UQ9_0b44c0c9f723499297b9f93624fa899b.jpg", + "model_url": "https://fal.run/fal-ai/minimax/speech-2.6-hd", + "license_type": "commercial", + "date": "2025-10-29T14:33:02.910Z", + "group": { + "key": "minimax-speech-26", + "label": "Text To Speech HD" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.run/fal-ai/minimax/speech-2.6-hd/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/speech-2.6-hd", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/speech-2.6-hd queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/speech-2.6-hd", + "category": "text-to-speech", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/SjejS2pB-SmgrWVM-2UQ9_0b44c0c9f723499297b9f93624fa899b.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/speech-2.6-hd", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/speech-2.6-hd/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxSpeech26HdInput": { + "title": "TextToSpeechHD26Request", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Hello world! Welcome MiniMax's new text to speech model <#0.1#> Speech 2.6, now available on Fal!" + ], + "maxLength": 10000, + "type": "string", + "title": "Prompt", + "description": "Text to convert to speech. Paragraph breaks should be marked with newline characters. **NOTE**: You can customize speech pauses by adding markers in the form `<#x#>`, where `x` is the pause duration in seconds. Valid range: `[0.01, 99.99]`, up to two decimal places. Pause markers must be placed between speakable text segments and cannot be used consecutively.", + "minLength": 1 + }, + "language_boost": { + "enum": [ + "Chinese", + "Chinese,Yue", + "English", + "Arabic", + "Russian", + "Spanish", + "French", + "Portuguese", + "German", + "Turkish", + "Dutch", + "Ukrainian", + "Vietnamese", + "Indonesian", + "Japanese", + "Italian", + "Korean", + "Thai", + "Polish", + "Romanian", + "Greek", + "Czech", + "Finnish", + "Hindi", + "Bulgarian", + "Danish", + "Hebrew", + "Malay", + "Slovak", + "Swedish", + "Croatian", + "Hungarian", + "Norwegian", + "Slovenian", + "Catalan", + "Nynorsk", + "Afrikaans", + "auto" + ], + "description": "Enhance recognition of specified languages and dialects", + "type": "string", + "title": "Language Boost" + }, + "output_format": { + "enum": [ + "url", + "hex" + ], + "description": "Format of the output content (non-streaming only)", + "type": "string", + "title": "Output Format", + "default": "hex" + }, + "pronunciation_dict": { + "description": "Custom pronunciation dictionary for text replacement", + "title": "Pronunciation Dict", + "allOf": [ + { + "$ref": "#/components/schemas/PronunciationDict" + } + ] + }, + "voice_setting": { + "default": { + "speed": 1, + "vol": 1, + "voice_id": "Wise_Woman", + "pitch": 0, + "english_normalization": false + }, + "description": "Voice configuration settings", + "title": "Voice Setting", + "allOf": [ + { + "$ref": "#/components/schemas/VoiceSetting" + } + ] + }, + "normalization_setting": { + "description": "Loudness normalization settings for the audio", + "title": "Normalization Setting", + "allOf": [ + { + "$ref": "#/components/schemas/LoudnessNormalizationSetting" + } + ] + }, + "audio_setting": { + "description": "Audio configuration settings", + "title": "Audio Setting", + "allOf": [ + { + "$ref": "#/components/schemas/AudioSetting" + } + ] + } + }, + "x-fal-order-properties": [ + "voice_setting", + "audio_setting", + "language_boost", + "output_format", + "pronunciation_dict", + "prompt", + "normalization_setting" + ], + "required": [ + "prompt" + ] + }, + "MinimaxSpeech26HdOutput": { + "title": "TextToSpeechHD26Output", + "type": "object", + "properties": { + "duration_ms": { + "description": "Duration of the audio in milliseconds", + "type": "integer", + "title": "Duration Ms" + }, + "audio": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/minimax-speech-26/speech_26_hd_out.mp3" + } + ], + "description": "The generated audio file", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio", + "duration_ms" + ], + "required": [ + "audio", + "duration_ms" + ] + }, + "PronunciationDict": { + "title": "PronunciationDict", + "type": "object", + "properties": { + "tone_list": { + "description": "List of pronunciation replacements in format ['text/(pronunciation)', ...]. For Chinese, tones are 1-5. Example: ['燕少飞/(yan4)(shao3)(fei1)']", + "type": "array", + "title": "Tone List", + "items": { + "type": "string" + } + } + }, + "x-fal-order-properties": [ + "tone_list" + ] + }, + "VoiceSetting": { + "title": "VoiceSetting", + "type": "object", + "properties": { + "speed": { + "minimum": 0.5, + "description": "Speech speed (0.5-2.0)", + "type": "number", + "maximum": 2, + "title": "Speed", + "default": 1 + }, + "vol": { + "minimum": 0.01, + "description": "Volume (0-10)", + "type": "number", + "maximum": 10, + "title": "Vol", + "default": 1 + }, + "voice_id": { + "examples": [ + "Wise_Woman", + "Friendly_Person", + "Inspirational_girl", + "Deep_Voice_Man", + "Calm_Woman", + "Casual_Guy", + "Lively_Girl", + "Patient_Man", + "Young_Knight", + "Determined_Man", + "Lovely_Girl", + "Decent_Boy", + "Imposing_Manner", + "Elegant_Man", + "Abbess", + "Sweet_Girl_2", + "Exuberant_Girl" + ], + "description": "Predefined voice ID to use for synthesis", + "type": "string", + "title": "Voice Id", + "default": "Wise_Woman" + }, + "pitch": { + "minimum": -12, + "description": "Voice pitch (-12 to 12)", + "type": "integer", + "maximum": 12, + "title": "Pitch", + "default": 0 + }, + "english_normalization": { + "description": "Enables English text normalization to improve number reading performance, with a slight increase in latency", + "type": "boolean", + "title": "English Normalization", + "default": false + }, + "emotion": { + "enum": [ + "happy", + "sad", + "angry", + "fearful", + "disgusted", + "surprised", + "neutral" + ], + "description": "Emotion of the generated speech", + "type": "string", + "title": "Emotion" + } + }, + "x-fal-order-properties": [ + "voice_id", + "speed", + "vol", + "pitch", + "emotion", + "english_normalization" + ] + }, + "LoudnessNormalizationSetting": { + "title": "LoudnessNormalizationSetting", + "type": "object", + "properties": { + "enabled": { + "description": "Enable loudness normalization for the audio", + "type": "boolean", + "title": "Enabled", + "default": true + }, + "target_loudness": { + "minimum": -70, + "description": "Target loudness in LUFS (default -18.0)", + "type": "number", + "maximum": -10, + "title": "Target Loudness", + "default": -18 + }, + "target_range": { + "minimum": 0, + "description": "Target loudness range in LU (default 8.0)", + "type": "number", + "maximum": 20, + "title": "Target Range", + "default": 8 + }, + "target_peak": { + "minimum": -3, + "description": "Target peak level in dBTP (default -0.5).", + "type": "number", + "maximum": 0, + "title": "Target Peak", + "default": -0.5 + } + }, + "x-fal-order-properties": [ + "enabled", + "target_loudness", + "target_range", + "target_peak" + ] + }, + "AudioSetting": { + "title": "AudioSetting", + "type": "object", + "properties": { + "format": { + "enum": [ + "mp3", + "pcm", + "flac" + ], + "description": "Audio format", + "type": "string", + "title": "Format", + "default": "mp3" + }, + "sample_rate": { + "enum": [ + 8000, + 16000, + 22050, + 24000, + 32000, + 44100 + ], + "description": "Sample rate of generated audio", + "type": "integer", + "title": "Sample Rate", + "default": 32000 + }, + "channel": { + "enum": [ + 1, + 2 + ], + "description": "Number of audio channels (1=mono, 2=stereo)", + "type": "integer", + "title": "Channel", + "default": 1 + }, + "bitrate": { + "enum": [ + 32000, + 64000, + 128000, + 256000 + ], + "description": "Bitrate of generated audio", + "type": "integer", + "title": "Bitrate", + "default": 128000 + } + }, + "x-fal-order-properties": [ + "sample_rate", + "bitrate", + "format", + "channel" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/speech-2.6-hd/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/speech-2.6-hd/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/speech-2.6-hd": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxSpeech26HdInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/speech-2.6-hd/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxSpeech26HdOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/index-tts-2/text-to-speech", + "metadata": { + "display_name": "Index TTS 2.0", + "category": "text-to-speech", + "description": "Generate natural, clear speeches using Index TTS 2.0 from IndexTeam", + "status": "active", + "tags": [ + "text-to-speech" + ], + "updated_at": "2026-01-26T21:42:44.483Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/lion/Y_c2YQvO2F3F-Q3v8cMHr_e14577c0c33b4eca81ce5ebeb2a5ca49.jpg", + "model_url": "https://fal.run/fal-ai/index-tts-2/text-to-speech", + "license_type": "commercial", + "date": "2025-10-07T17:21:56.690Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/index-tts-2/text-to-speech", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/index-tts-2/text-to-speech queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/index-tts-2/text-to-speech", + "category": "text-to-speech", + "thumbnailUrl": "https://v3b.fal.media/files/b/lion/Y_c2YQvO2F3F-Q3v8cMHr_e14577c0c33b4eca81ce5ebeb2a5ca49.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/index-tts-2/text-to-speech", + "documentationUrl": "https://fal.ai/models/fal-ai/index-tts-2/text-to-speech/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "IndexTts2TextToSpeechInput": { + "x-fal-order-properties": [ + "audio_url", + "prompt", + "emotional_audio_url", + "strength", + "emotional_strengths", + "should_use_prompt_for_emotion", + "emotion_prompt" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Hide! He's coming! He's coming to get us!" + ], + "description": "The speech prompt to generate", + "type": "string", + "title": "Prompt" + }, + "emotional_strengths": { + "examples": [ + null + ], + "description": "The strengths of individual emotions for fine-grained control. ", + "title": "Emotional Strengths", + "allOf": [ + { + "$ref": "#/components/schemas/EmotionalStrengths" + } + ] + }, + "strength": { + "minimum": 0, + "description": "The strength of the emotional style transfer. Higher values result in stronger emotional influence.", + "type": "number", + "maximum": 1, + "title": "Strength", + "default": 1 + }, + "emotional_audio_url": { + "description": "The emotional reference audio file to extract the style from.", + "type": "string", + "title": "Emotional Audio Url" + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/index-tts-2/tts_in.mp3" + ], + "description": "The audio file to generate the speech from.", + "type": "string", + "title": "Audio Url" + }, + "emotion_prompt": { + "examples": [ + "You scared me to death! What are you, a ghost?" + ], + "description": "The emotional prompt to influence the emotional style. Must be used together with should_use_prompt_for_emotion.", + "type": "string", + "title": "Emotion Prompt" + }, + "should_use_prompt_for_emotion": { + "examples": [ + true + ], + "description": "Whether to use the `prompt` to calculate emotional strengths, if enabled it will overwrite the `emotional_strengths` values. If `emotion_prompt` is provided, it will be used to instead of `prompt` to extract the emotional style.", + "type": "boolean", + "title": "Should Use Prompt For Emotion", + "default": false + } + }, + "title": "IndexTTS2Input", + "required": [ + "audio_url", + "prompt" + ] + }, + "IndexTts2TextToSpeechOutput": { + "x-fal-order-properties": [ + "audio" + ], + "type": "object", + "properties": { + "audio": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_outputs/index-tts-2/tts_out.mp3" + ], + "description": "The generated audio file in base64 format.", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "IndexTTS2Output", + "required": [ + "audio" + ] + }, + "EmotionalStrengths": { + "x-fal-order-properties": [ + "happy", + "angry", + "sad", + "afraid", + "disgusted", + "melancholic", + "surprised", + "calm" + ], + "type": "object", + "properties": { + "afraid": { + "minimum": 0, + "description": "Strength of fear emotion", + "type": "number", + "maximum": 1, + "title": "Afraid", + "default": 0 + }, + "calm": { + "minimum": 0, + "description": "Strength of calm emotion", + "type": "number", + "maximum": 1, + "title": "Calm", + "default": 0 + }, + "disgusted": { + "minimum": 0, + "description": "Strength of disgust emotion", + "type": "number", + "maximum": 1, + "title": "Disgusted", + "default": 0 + }, + "angry": { + "minimum": 0, + "description": "Strength of anger emotion", + "type": "number", + "maximum": 1, + "title": "Angry", + "default": 0 + }, + "sad": { + "minimum": 0, + "description": "Strength of sadness emotion", + "type": "number", + "maximum": 1, + "title": "Sad", + "default": 0 + }, + "melancholic": { + "minimum": 0, + "description": "Strength of melancholic emotion", + "type": "number", + "maximum": 1, + "title": "Melancholic", + "default": 0 + }, + "surprised": { + "minimum": 0, + "description": "Strength of surprise emotion", + "type": "number", + "maximum": 1, + "title": "Surprised", + "default": 0 + }, + "happy": { + "minimum": 0, + "description": "Strength of happiness emotion", + "type": "number", + "maximum": 1, + "title": "Happy", + "default": 0 + } + }, + "title": "EmotionalStrengths" + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/index-tts-2/text-to-speech/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/index-tts-2/text-to-speech/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/index-tts-2/text-to-speech": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IndexTts2TextToSpeechInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/index-tts-2/text-to-speech/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/IndexTts2TextToSpeechOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v1/tts", + "metadata": { + "display_name": "Kling TTS", + "category": "text-to-speech", + "description": "Generate speech from text prompts and different voices using the Kling TTS model, which leverages advanced AI techniques to create high-quality text-to-speech.", + "status": "active", + "tags": [ + "audio" + ], + "updated_at": "2026-01-26T21:42:56.001Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/zebra/kuLfhOkVceXDTcdBxGQAf_2011e640a3aa4acab55d01d69f3a6e16.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/v1/tts", + "license_type": "commercial", + "date": "2025-09-13T07:45:11.363Z", + "group": { + "key": "Kling-Avatar", + "label": "Text to Speech" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v1/tts", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v1/tts queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v1/tts", + "category": "text-to-speech", + "thumbnailUrl": "https://v3.fal.media/files/zebra/kuLfhOkVceXDTcdBxGQAf_2011e640a3aa4acab55d01d69f3a6e16.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v1/tts", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v1/tts/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV1TtsInput": { + "title": "TTSInput", + "type": "object", + "properties": { + "text": { + "examples": [ + "Hello world! Kling TTS is available on FAL!" + ], + "title": "Text", + "type": "string", + "maxLength": 500, + "description": "The text to be converted to speech" + }, + "voice_id": { + "enum": [ + "genshin_vindi2", + "zhinen_xuesheng", + "AOT", + "ai_shatang", + "genshin_klee2", + "genshin_kirara", + "ai_kaiya", + "oversea_male1", + "ai_chenjiahao_712", + "girlfriend_4_speech02", + "chat1_female_new-3", + "chat_0407_5-1", + "cartoon-boy-07", + "uk_boy1", + "cartoon-girl-01", + "PeppaPig_platform", + "ai_huangzhong_712", + "ai_huangyaoshi_712", + "ai_laoguowang_712", + "chengshu_jiejie", + "you_pingjing", + "calm_story1", + "uk_man2", + "laopopo_speech02", + "heainainai_speech02", + "reader_en_m-v1", + "commercial_lady_en_f-v1", + "tiyuxi_xuedi", + "tiexin_nanyou", + "girlfriend_1_speech02", + "girlfriend_2_speech02", + "zhuxi_speech02", + "uk_oldman3", + "dongbeilaotie_speech02", + "chongqingxiaohuo_speech02", + "chuanmeizi_speech02", + "chaoshandashu_speech02", + "ai_taiwan_man2_speech02", + "xianzhanggui_speech02", + "tianjinjiejie_speech02", + "diyinnansang_DB_CN_M_04-v2", + "yizhipiannan-v1", + "guanxiaofang-v2", + "tianmeixuemei-v1", + "daopianyansang-v1", + "mengwa-v1" + ], + "title": "Voice Id", + "type": "string", + "description": "The voice ID to use for speech synthesis", + "default": "genshin_vindi2" + }, + "voice_speed": { + "minimum": 0.8, + "title": "Voice Speed", + "type": "number", + "maximum": 2, + "description": "Rate of speech", + "default": 1 + } + }, + "x-fal-order-properties": [ + "text", + "voice_id", + "voice_speed" + ], + "required": [ + "text" + ] + }, + "KlingVideoV1TtsOutput": { + "title": "TTSOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://v3.fal.media/files/monkey/O-ekVTtYqeDblD1oSf2uv_output.mp3" + } + ], + "title": "Audio", + "description": "The generated audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v1/tts/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1/tts/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1/tts": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV1TtsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1/tts/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV1TtsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/chatterbox/text-to-speech/multilingual", + "metadata": { + "display_name": "Chatterbox", + "category": "text-to-speech", + "description": "Whether you're working on memes, videos, games, or AI agents, Chatterbox brings your content to life. Use the first tts from resemble ai.", + "status": "active", + "tags": [ + "text-to-speech", + "multilingual" + ], + "updated_at": "2026-01-26T21:42:58.445Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/rabbit/FzzCnGuQNXLOEYuQq8CE8_7afb3290e0de46d5a7e4d13495938e3f.jpg", + "model_url": "https://fal.run/fal-ai/chatterbox/text-to-speech/multilingual", + "license_type": "commercial", + "date": "2025-09-04T18:18:45.389Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/chatterbox/text-to-speech/multilingual", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/chatterbox/text-to-speech/multilingual queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/chatterbox/text-to-speech/multilingual", + "category": "text-to-speech", + "thumbnailUrl": "https://fal.media/files/rabbit/FzzCnGuQNXLOEYuQq8CE8_7afb3290e0de46d5a7e4d13495938e3f.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/chatterbox/text-to-speech/multilingual", + "documentationUrl": "https://fal.ai/models/fal-ai/chatterbox/text-to-speech/multilingual/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ChatterboxTextToSpeechMultilingualInput": { + "title": "ChatterboxMultilingualRequest", + "type": "object", + "properties": { + "text": { + "examples": [ + "Last month, we reached a new milestone with two billion views on our YouTube channel.", + "Le mois dernier, nous avons atteint un nouveau jalon avec deux milliards de vues sur notre chaîne YouTube." + ], + "maxLength": 300, + "type": "string", + "description": "The text to be converted to speech (maximum 300 characters). Supports 23 languages including English, French, German, Spanish, Italian, Portuguese, Hindi, Arabic, Chinese, Japanese, Korean, and more.", + "title": "Text" + }, + "custom_audio_language": { + "enum": [ + "english", + "arabic", + "danish", + "german", + "greek", + "spanish", + "finnish", + "french", + "hebrew", + "hindi", + "italian", + "japanese", + "korean", + "malay", + "dutch", + "norwegian", + "polish", + "portuguese", + "russian", + "swedish", + "swahili", + "turkish", + "chinese" + ], + "description": "If using a custom audio URL, specify the language of the audio here. Ignored if voice is not a custom url.", + "type": "string", + "title": "Custom Audio Language" + }, + "exaggeration": { + "minimum": 0.25, + "maximum": 2, + "type": "number", + "description": "Controls speech expressiveness and emotional intensity (0.25-2.0). 0.5 is neutral, higher values increase expressiveness. Extreme values may be unstable.", + "title": "Exaggeration", + "default": 0.5 + }, + "voice": { + "examples": [ + "english", + "arabic", + "danish", + "german", + "greek", + "spanish", + "finnish", + "french", + "hebrew", + "hindi", + "italian", + "japanese", + "korean", + "malay", + "dutch", + "norwegian", + "polish", + "portuguese", + "russian", + "swedish", + "swahili", + "turkish", + "chinese" + ], + "description": "Language code for synthesis. In case using custom please provide audio url and select custom_audio_language. ", + "type": "string", + "title": "Voice", + "default": "english" + }, + "temperature": { + "minimum": 0.05, + "maximum": 5, + "type": "number", + "description": "Controls randomness and variation in generation (0.05-5.0). Higher values create more varied speech patterns.", + "title": "Temperature", + "default": 0.8 + }, + "seed": { + "description": "Random seed for reproducible results. Set to 0 for random generation, or provide a specific number for consistent outputs.", + "type": "integer", + "title": "Seed" + }, + "cfg_scale": { + "minimum": 0, + "maximum": 1, + "type": "number", + "description": "Configuration/pace weight controlling generation guidance (0.0-1.0). Use 0.0 for language transfer to mitigate accent inheritance.", + "title": "CFG Scale", + "default": 0.5 + } + }, + "x-fal-order-properties": [ + "text", + "voice", + "custom_audio_language", + "exaggeration", + "temperature", + "cfg_scale", + "seed" + ], + "required": [ + "text" + ] + }, + "ChatterboxTextToSpeechMultilingualOutput": { + "title": "ChatterboxMultilingualOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://v3.fal.media/files/example/multilingual_speech_output.wav" + } + ], + "description": "The generated multilingual speech audio file", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/chatterbox/text-to-speech/multilingual/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/chatterbox/text-to-speech/multilingual/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/chatterbox/text-to-speech/multilingual": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChatterboxTextToSpeechMultilingualInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/chatterbox/text-to-speech/multilingual/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChatterboxTextToSpeechMultilingualOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/vibevoice/7b", + "metadata": { + "display_name": "VibeVoice 7B", + "category": "text-to-speech", + "description": "Generate long, expressive multi-voice speech using Microsoft's powerful TTS", + "status": "active", + "tags": [ + "text-to-speech", + "multi-speaker", + "podcast" + ], + "updated_at": "2026-01-26T21:42:59.829Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/monkey/5_nmz_TLmy87FiukLCnNM_19cf8bee41214a099ad9b8a2d1d7e160.jpg", + "model_url": "https://fal.run/fal-ai/vibevoice/7b", + "license_type": "commercial", + "date": "2025-08-27T16:22:23.854Z", + "group": { + "key": "vibevoice", + "label": "7B" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/vibevoice/7b", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/vibevoice/7b queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/vibevoice/7b", + "category": "text-to-speech", + "thumbnailUrl": "https://fal.media/files/monkey/5_nmz_TLmy87FiukLCnNM_19cf8bee41214a099ad9b8a2d1d7e160.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/vibevoice/7b", + "documentationUrl": "https://fal.ai/models/fal-ai/vibevoice/7b/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Vibevoice7bInput": { + "x-fal-order-properties": [ + "script", + "speakers", + "seed", + "cfg_scale" + ], + "type": "object", + "properties": { + "script": { + "examples": [ + "Speaker 0: VibeVoice is now available on Fal. Isn't that right, Carter?\nSpeaker 1: That's right Frank, and it supports up to four speakers at once. Try it now!" + ], + "maxLength": 30000, + "type": "string", + "description": "The script to convert to speech. Can be formatted with 'Speaker X:' prefixes for multi-speaker dialogues.", + "title": "Script" + }, + "seed": { + "description": "Random seed for reproducible generation.", + "type": "integer", + "title": "Seed" + }, + "speakers": { + "examples": [ + [ + { + "preset": "Frank [EN]" + }, + { + "preset": "Carter [EN]" + } + ] + ], + "description": "List of speakers to use for the script. If not provided, will be inferred from the script or voice samples.", + "type": "array", + "title": "Speakers", + "items": { + "$ref": "#/components/schemas/VibeVoiceSpeaker" + } + }, + "cfg_scale": { + "minimum": 1, + "maximum": 2, + "type": "number", + "title": "CFG Scale", + "description": "CFG (Classifier-Free Guidance) scale for generation. Higher values increase adherence to text.", + "default": 1.3 + } + }, + "description": "Input schema for VibeVoice-7b TTS generation", + "title": "VibeVoice7bInput", + "required": [ + "script", + "speakers" + ] + }, + "Vibevoice7bOutput": { + "x-fal-order-properties": [ + "audio", + "duration", + "sample_rate", + "generation_time", + "rtf" + ], + "type": "object", + "properties": { + "duration": { + "examples": [ + 9.46 + ], + "description": "Duration of the generated audio in seconds", + "type": "number", + "title": "Duration" + }, + "rtf": { + "examples": [ + 0.53 + ], + "description": "Real-time factor (generation_time / audio_duration). Lower is better.", + "type": "number", + "title": "Rtf" + }, + "sample_rate": { + "examples": [ + 24000 + ], + "description": "Sample rate of the generated audio", + "type": "integer", + "title": "Sample Rate" + }, + "generation_time": { + "examples": [ + 5.6 + ], + "description": "Time taken to generate the audio in seconds", + "type": "number", + "title": "Generation Time" + }, + "audio": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/vibevoice.mp3" + } + ], + "description": "The generated audio file containing the speech", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "description": "Output schema for VibeVoice TTS generation", + "title": "VibeVoiceOutput", + "required": [ + "audio", + "duration", + "sample_rate", + "generation_time", + "rtf" + ] + }, + "VibeVoiceSpeaker": { + "x-fal-order-properties": [ + "preset", + "audio_url" + ], + "type": "object", + "properties": { + "preset": { + "examples": [ + "Alice [EN]" + ], + "title": "Preset", + "type": "string", + "enum": [ + "Alice [EN]", + "Carter [EN]", + "Frank [EN]", + "Mary [EN] (Background Music)", + "Maya [EN]", + "Anchen [ZH] (Background Music)", + "Bowen [ZH]", + "Xinran [ZH]" + ], + "description": "Default voice preset to use for the speaker. Not used if `audio_url` is provided.", + "default": "Alice [EN]" + }, + "audio_url": { + "description": "URL to a voice sample audio file. If provided, `preset` will be ignored.", + "type": "string", + "title": "Audio URL" + } + }, + "title": "VibeVoiceSpeaker" + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/vibevoice/7b/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vibevoice/7b/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/vibevoice/7b": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Vibevoice7bInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vibevoice/7b/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Vibevoice7bOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/vibevoice", + "metadata": { + "display_name": "VibeVoice 1.5B", + "category": "text-to-speech", + "description": "Generate long, expressive multi-voice speech using Microsoft's powerful TTS", + "status": "active", + "tags": [ + "text-to-speech", + "multi-speaker", + "podcast" + ], + "updated_at": "2026-01-26T21:42:59.953Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/lion/iW_TAXlsir_HRdRs3sGeO_2473bedbf906459b9b70adafd02143eb.jpg", + "model_url": "https://fal.run/fal-ai/vibevoice", + "license_type": "commercial", + "date": "2025-08-27T14:33:10.717Z", + "group": { + "key": "vibevoice", + "label": "1.5B" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/vibevoice", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/vibevoice queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/vibevoice", + "category": "text-to-speech", + "thumbnailUrl": "https://fal.media/files/lion/iW_TAXlsir_HRdRs3sGeO_2473bedbf906459b9b70adafd02143eb.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/vibevoice", + "documentationUrl": "https://fal.ai/models/fal-ai/vibevoice/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "VibevoiceInput": { + "x-fal-order-properties": [ + "script", + "speakers", + "seed", + "cfg_scale" + ], + "type": "object", + "properties": { + "script": { + "examples": [ + "Speaker 0: VibeVoice is now available on Fal. Isn't that right, Carter?\nSpeaker 1: That's right Frank, and it supports up to four speakers at once. Try it now!" + ], + "maxLength": 90000, + "type": "string", + "description": "The script to convert to speech. Can be formatted with 'Speaker X:' prefixes for multi-speaker dialogues.", + "title": "Script" + }, + "seed": { + "description": "Random seed for reproducible generation.", + "type": "integer", + "title": "Seed" + }, + "speakers": { + "examples": [ + [ + { + "preset": "Frank [EN]" + }, + { + "preset": "Carter [EN]" + } + ] + ], + "description": "List of speakers to use for the script. If not provided, will be inferred from the script or voice samples.", + "type": "array", + "title": "Speakers", + "items": { + "$ref": "#/components/schemas/VibeVoiceSpeaker" + } + }, + "cfg_scale": { + "minimum": 1, + "maximum": 2, + "type": "number", + "title": "CFG Scale", + "description": "CFG (Classifier-Free Guidance) scale for generation. Higher values increase adherence to text.", + "default": 1.3 + } + }, + "description": "Input schema for VibeVoice TTS generation", + "title": "VibeVoiceInput", + "required": [ + "script", + "speakers" + ] + }, + "VibevoiceOutput": { + "x-fal-order-properties": [ + "audio", + "duration", + "sample_rate", + "generation_time", + "rtf" + ], + "type": "object", + "properties": { + "duration": { + "examples": [ + 9.46 + ], + "description": "Duration of the generated audio in seconds", + "type": "number", + "title": "Duration" + }, + "rtf": { + "examples": [ + 0.53 + ], + "description": "Real-time factor (generation_time / audio_duration). Lower is better.", + "type": "number", + "title": "Rtf" + }, + "sample_rate": { + "examples": [ + 24000 + ], + "description": "Sample rate of the generated audio", + "type": "integer", + "title": "Sample Rate" + }, + "generation_time": { + "examples": [ + 5.6 + ], + "description": "Time taken to generate the audio in seconds", + "type": "number", + "title": "Generation Time" + }, + "audio": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/vibevoice.mp3" + } + ], + "description": "The generated audio file containing the speech", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "description": "Output schema for VibeVoice TTS generation", + "title": "VibeVoiceOutput", + "required": [ + "audio", + "duration", + "sample_rate", + "generation_time", + "rtf" + ] + }, + "VibeVoiceSpeaker": { + "x-fal-order-properties": [ + "preset", + "audio_url" + ], + "type": "object", + "properties": { + "preset": { + "examples": [ + "Alice [EN]" + ], + "title": "Preset", + "type": "string", + "enum": [ + "Alice [EN]", + "Carter [EN]", + "Frank [EN]", + "Mary [EN] (Background Music)", + "Maya [EN]", + "Anchen [ZH] (Background Music)", + "Bowen [ZH]", + "Xinran [ZH]" + ], + "description": "Default voice preset to use for the speaker. Not used if `audio_url` is provided.", + "default": "Alice [EN]" + }, + "audio_url": { + "description": "URL to a voice sample audio file. If provided, `preset` will be ignored.", + "type": "string", + "title": "Audio URL" + } + }, + "title": "VibeVoiceSpeaker" + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/vibevoice/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vibevoice/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/vibevoice": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VibevoiceInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vibevoice/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VibevoiceOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/preview/speech-2.5-hd", + "metadata": { + "display_name": "Minimax", + "category": "text-to-speech", + "description": "Generate speech from text prompts and different voices using the MiniMax Speech-02 HD model, which leverages advanced AI techniques to create high-quality text-to-speech.", + "status": "active", + "tags": [ + "speech" + ], + "updated_at": "2026-01-26T21:43:05.512Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/monkey/yKMqbFeN7Q3b6SSjB41Bd_a0f390d1bb6e4b37a93bb8d0dc58afe7.jpg", + "model_url": "https://fal.run/fal-ai/minimax/preview/speech-2.5-hd", + "license_type": "commercial", + "date": "2025-08-11T09:41:03.032Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/preview/speech-2.5-hd", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/preview/speech-2.5-hd queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/preview/speech-2.5-hd", + "category": "text-to-speech", + "thumbnailUrl": "https://fal.media/files/monkey/yKMqbFeN7Q3b6SSjB41Bd_a0f390d1bb6e4b37a93bb8d0dc58afe7.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/preview/speech-2.5-hd", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/preview/speech-2.5-hd/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxPreviewSpeech25HdInput": { + "title": "TextToSpeechHDv25Request", + "type": "object", + "properties": { + "text": { + "examples": [ + "Hello world! This is a test of the text-to-speech system." + ], + "maxLength": 5000, + "type": "string", + "title": "Text", + "description": "Text to convert to speech (max 5000 characters, minimum 1 non-whitespace character)", + "minLength": 1 + }, + "language_boost": { + "enum": [ + "Persian", + "Filipino", + "Tamil", + "Chinese", + "Chinese,Yue", + "English", + "Arabic", + "Russian", + "Spanish", + "French", + "Portuguese", + "German", + "Turkish", + "Dutch", + "Ukrainian", + "Vietnamese", + "Indonesian", + "Japanese", + "Italian", + "Korean", + "Thai", + "Polish", + "Romanian", + "Greek", + "Czech", + "Finnish", + "Hindi", + "Bulgarian", + "Danish", + "Hebrew", + "Malay", + "Slovak", + "Swedish", + "Croatian", + "Hungarian", + "Norwegian", + "Slovenian", + "Catalan", + "Nynorsk", + "Afrikaans", + "auto" + ], + "description": "Enhance recognition of specified languages and dialects", + "type": "string", + "title": "Language Boost" + }, + "voice_setting": { + "default": { + "speed": 1, + "vol": 1, + "voice_id": "Wise_Woman", + "pitch": 0, + "english_normalization": false + }, + "description": "Voice configuration settings", + "title": "Voice Setting", + "allOf": [ + { + "$ref": "#/components/schemas/VoiceSetting" + } + ] + }, + "output_format": { + "enum": [ + "url", + "hex" + ], + "description": "Format of the output content (non-streaming only)", + "type": "string", + "title": "Output Format", + "default": "hex" + }, + "pronunciation_dict": { + "description": "Custom pronunciation dictionary for text replacement", + "title": "Pronunciation Dict", + "allOf": [ + { + "$ref": "#/components/schemas/PronunciationDict" + } + ] + }, + "audio_setting": { + "description": "Audio configuration settings", + "title": "Audio Setting", + "allOf": [ + { + "$ref": "#/components/schemas/AudioSetting" + } + ] + } + }, + "x-fal-order-properties": [ + "text", + "voice_setting", + "audio_setting", + "language_boost", + "output_format", + "pronunciation_dict" + ], + "required": [ + "text" + ] + }, + "MinimaxPreviewSpeech25HdOutput": { + "title": "TextToSpeechOutput", + "type": "object", + "properties": { + "duration_ms": { + "description": "Duration of the audio in milliseconds", + "type": "integer", + "title": "Duration Ms" + }, + "audio": { + "examples": [ + { + "url": "https://fal.media/files/kangaroo/kojPUCNZ9iUGFGMR-xb7h_speech.mp3" + } + ], + "description": "The generated audio file", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio", + "duration_ms" + ], + "required": [ + "audio", + "duration_ms" + ] + }, + "VoiceSetting": { + "title": "VoiceSetting", + "type": "object", + "properties": { + "speed": { + "minimum": 0.5, + "description": "Speech speed (0.5-2.0)", + "type": "number", + "maximum": 2, + "title": "Speed", + "default": 1 + }, + "vol": { + "minimum": 0.01, + "description": "Volume (0-10)", + "type": "number", + "maximum": 10, + "title": "Vol", + "default": 1 + }, + "voice_id": { + "examples": [ + "Wise_Woman", + "Friendly_Person", + "Inspirational_girl", + "Deep_Voice_Man", + "Calm_Woman", + "Casual_Guy", + "Lively_Girl", + "Patient_Man", + "Young_Knight", + "Determined_Man", + "Lovely_Girl", + "Decent_Boy", + "Imposing_Manner", + "Elegant_Man", + "Abbess", + "Sweet_Girl_2", + "Exuberant_Girl" + ], + "description": "Predefined voice ID to use for synthesis", + "type": "string", + "title": "Voice Id", + "default": "Wise_Woman" + }, + "pitch": { + "minimum": -12, + "description": "Voice pitch (-12 to 12)", + "type": "integer", + "maximum": 12, + "title": "Pitch", + "default": 0 + }, + "english_normalization": { + "description": "Enables English text normalization to improve number reading performance, with a slight increase in latency", + "type": "boolean", + "title": "English Normalization", + "default": false + }, + "emotion": { + "enum": [ + "happy", + "sad", + "angry", + "fearful", + "disgusted", + "surprised", + "neutral" + ], + "description": "Emotion of the generated speech", + "type": "string", + "title": "Emotion" + } + }, + "x-fal-order-properties": [ + "voice_id", + "speed", + "vol", + "pitch", + "emotion", + "english_normalization" + ] + }, + "PronunciationDict": { + "title": "PronunciationDict", + "type": "object", + "properties": { + "tone_list": { + "description": "List of pronunciation replacements in format ['text/(pronunciation)', ...]. For Chinese, tones are 1-5. Example: ['燕少飞/(yan4)(shao3)(fei1)']", + "type": "array", + "title": "Tone List", + "items": { + "type": "string" + } + } + }, + "x-fal-order-properties": [ + "tone_list" + ] + }, + "AudioSetting": { + "title": "AudioSetting", + "type": "object", + "properties": { + "format": { + "enum": [ + "mp3", + "pcm", + "flac" + ], + "description": "Audio format", + "type": "string", + "title": "Format", + "default": "mp3" + }, + "sample_rate": { + "enum": [ + 8000, + 16000, + 22050, + 24000, + 32000, + 44100 + ], + "description": "Sample rate of generated audio", + "type": "integer", + "title": "Sample Rate", + "default": 32000 + }, + "channel": { + "enum": [ + 1, + 2 + ], + "description": "Number of audio channels (1=mono, 2=stereo)", + "type": "integer", + "title": "Channel", + "default": 1 + }, + "bitrate": { + "enum": [ + 32000, + 64000, + 128000, + 256000 + ], + "description": "Bitrate of generated audio", + "type": "integer", + "title": "Bitrate", + "default": 128000 + } + }, + "x-fal-order-properties": [ + "sample_rate", + "bitrate", + "format", + "channel" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/preview/speech-2.5-hd/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/preview/speech-2.5-hd/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/preview/speech-2.5-hd": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxPreviewSpeech25HdInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/preview/speech-2.5-hd/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxPreviewSpeech25HdOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/preview/speech-2.5-turbo", + "metadata": { + "display_name": "Minimax", + "category": "text-to-speech", + "description": "Generate fast speech from text prompts and different voices using the MiniMax Speech-02 Turbo model, which leverages advanced AI techniques to create high-quality text-to-speech.", + "status": "active", + "tags": [ + "text-to-speech" + ], + "updated_at": "2026-01-26T21:43:05.638Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/elephant/u7RqSqZJAXI8RIzIYw9HP_9d16c25ecc85427bb97102a847da0736.jpg", + "model_url": "https://fal.run/fal-ai/minimax/preview/speech-2.5-turbo", + "license_type": "commercial", + "date": "2025-08-11T09:33:12.088Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/preview/speech-2.5-turbo", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/preview/speech-2.5-turbo queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/preview/speech-2.5-turbo", + "category": "text-to-speech", + "thumbnailUrl": "https://fal.media/files/elephant/u7RqSqZJAXI8RIzIYw9HP_9d16c25ecc85427bb97102a847da0736.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/preview/speech-2.5-turbo", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/preview/speech-2.5-turbo/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxPreviewSpeech25TurboInput": { + "title": "TextToSpeechTurbov25Request", + "type": "object", + "properties": { + "text": { + "examples": [ + "Hello world! This is a test of the text-to-speech system." + ], + "maxLength": 5000, + "type": "string", + "title": "Text", + "description": "Text to convert to speech (max 5000 characters, minimum 1 non-whitespace character)", + "minLength": 1 + }, + "language_boost": { + "enum": [ + "Persian", + "Filipino", + "Tamil", + "Chinese", + "Chinese,Yue", + "English", + "Arabic", + "Russian", + "Spanish", + "French", + "Portuguese", + "German", + "Turkish", + "Dutch", + "Ukrainian", + "Vietnamese", + "Indonesian", + "Japanese", + "Italian", + "Korean", + "Thai", + "Polish", + "Romanian", + "Greek", + "Czech", + "Finnish", + "Hindi", + "Bulgarian", + "Danish", + "Hebrew", + "Malay", + "Slovak", + "Swedish", + "Croatian", + "Hungarian", + "Norwegian", + "Slovenian", + "Catalan", + "Nynorsk", + "Afrikaans", + "auto" + ], + "description": "Enhance recognition of specified languages and dialects", + "type": "string", + "title": "Language Boost" + }, + "voice_setting": { + "default": { + "speed": 1, + "vol": 1, + "voice_id": "Wise_Woman", + "pitch": 0, + "english_normalization": false + }, + "description": "Voice configuration settings", + "title": "Voice Setting", + "allOf": [ + { + "$ref": "#/components/schemas/VoiceSetting" + } + ] + }, + "output_format": { + "enum": [ + "url", + "hex" + ], + "description": "Format of the output content (non-streaming only)", + "type": "string", + "title": "Output Format", + "default": "hex" + }, + "pronunciation_dict": { + "description": "Custom pronunciation dictionary for text replacement", + "title": "Pronunciation Dict", + "allOf": [ + { + "$ref": "#/components/schemas/PronunciationDict" + } + ] + }, + "audio_setting": { + "description": "Audio configuration settings", + "title": "Audio Setting", + "allOf": [ + { + "$ref": "#/components/schemas/AudioSetting" + } + ] + } + }, + "x-fal-order-properties": [ + "text", + "voice_setting", + "audio_setting", + "language_boost", + "output_format", + "pronunciation_dict" + ], + "required": [ + "text" + ] + }, + "MinimaxPreviewSpeech25TurboOutput": { + "title": "TextToSpeechOutput", + "type": "object", + "properties": { + "duration_ms": { + "description": "Duration of the audio in milliseconds", + "type": "integer", + "title": "Duration Ms" + }, + "audio": { + "examples": [ + { + "url": "https://fal.media/files/kangaroo/kojPUCNZ9iUGFGMR-xb7h_speech.mp3" + } + ], + "description": "The generated audio file", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio", + "duration_ms" + ], + "required": [ + "audio", + "duration_ms" + ] + }, + "VoiceSetting": { + "title": "VoiceSetting", + "type": "object", + "properties": { + "speed": { + "minimum": 0.5, + "description": "Speech speed (0.5-2.0)", + "type": "number", + "maximum": 2, + "title": "Speed", + "default": 1 + }, + "vol": { + "minimum": 0.01, + "description": "Volume (0-10)", + "type": "number", + "maximum": 10, + "title": "Vol", + "default": 1 + }, + "voice_id": { + "examples": [ + "Wise_Woman", + "Friendly_Person", + "Inspirational_girl", + "Deep_Voice_Man", + "Calm_Woman", + "Casual_Guy", + "Lively_Girl", + "Patient_Man", + "Young_Knight", + "Determined_Man", + "Lovely_Girl", + "Decent_Boy", + "Imposing_Manner", + "Elegant_Man", + "Abbess", + "Sweet_Girl_2", + "Exuberant_Girl" + ], + "description": "Predefined voice ID to use for synthesis", + "type": "string", + "title": "Voice Id", + "default": "Wise_Woman" + }, + "pitch": { + "minimum": -12, + "description": "Voice pitch (-12 to 12)", + "type": "integer", + "maximum": 12, + "title": "Pitch", + "default": 0 + }, + "english_normalization": { + "description": "Enables English text normalization to improve number reading performance, with a slight increase in latency", + "type": "boolean", + "title": "English Normalization", + "default": false + }, + "emotion": { + "enum": [ + "happy", + "sad", + "angry", + "fearful", + "disgusted", + "surprised", + "neutral" + ], + "description": "Emotion of the generated speech", + "type": "string", + "title": "Emotion" + } + }, + "x-fal-order-properties": [ + "voice_id", + "speed", + "vol", + "pitch", + "emotion", + "english_normalization" + ] + }, + "PronunciationDict": { + "title": "PronunciationDict", + "type": "object", + "properties": { + "tone_list": { + "description": "List of pronunciation replacements in format ['text/(pronunciation)', ...]. For Chinese, tones are 1-5. Example: ['燕少飞/(yan4)(shao3)(fei1)']", + "type": "array", + "title": "Tone List", + "items": { + "type": "string" + } + } + }, + "x-fal-order-properties": [ + "tone_list" + ] + }, + "AudioSetting": { + "title": "AudioSetting", + "type": "object", + "properties": { + "format": { + "enum": [ + "mp3", + "pcm", + "flac" + ], + "description": "Audio format", + "type": "string", + "title": "Format", + "default": "mp3" + }, + "sample_rate": { + "enum": [ + 8000, + 16000, + 22050, + 24000, + 32000, + 44100 + ], + "description": "Sample rate of generated audio", + "type": "integer", + "title": "Sample Rate", + "default": 32000 + }, + "channel": { + "enum": [ + 1, + 2 + ], + "description": "Number of audio channels (1=mono, 2=stereo)", + "type": "integer", + "title": "Channel", + "default": 1 + }, + "bitrate": { + "enum": [ + 32000, + 64000, + 128000, + 256000 + ], + "description": "Bitrate of generated audio", + "type": "integer", + "title": "Bitrate", + "default": 128000 + } + }, + "x-fal-order-properties": [ + "sample_rate", + "bitrate", + "format", + "channel" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/preview/speech-2.5-turbo/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/preview/speech-2.5-turbo/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/preview/speech-2.5-turbo": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxPreviewSpeech25TurboInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/preview/speech-2.5-turbo/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxPreviewSpeech25TurboOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/voice-design", + "metadata": { + "display_name": "MiniMax Voice Design", + "category": "text-to-speech", + "description": "Design a personalized voice from a text description, and generate speech from text prompts using the MiniMax model, which leverages advanced AI techniques to create high-quality text-to-speech.", + "status": "active", + "tags": [ + "speech", + "" + ], + "updated_at": "2026-01-26T21:43:14.706Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/rabbit/gAqnPCSWSWoC_if0g88gI_e7bf818aa9fc45aeafaceb1b713c8717.jpg", + "model_url": "https://fal.run/fal-ai/minimax/voice-design", + "license_type": "commercial", + "date": "2025-07-18T21:21:52.686Z", + "group": { + "key": "minimax-speech", + "label": "Voice Design" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/voice-design", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/voice-design queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/voice-design", + "category": "text-to-speech", + "thumbnailUrl": "https://fal.media/files/rabbit/gAqnPCSWSWoC_if0g88gI_e7bf818aa9fc45aeafaceb1b713c8717.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/voice-design", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/voice-design/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxVoiceDesignInput": { + "title": "VoiceDesignRequest", + "type": "object", + "properties": { + "preview_text": { + "examples": [ + "Oh my gosh, hi. It's like so amazing to be here. This new endpoint just dropped on fal and the results have been like totally incredible. Use it now, It's gonna be like epic!" + ], + "maxLength": 500, + "type": "string", + "title": "Preview Text", + "description": "Text for audio preview. Limited to 500 characters. A fee of $30 per 1M characters will be charged for the generation of the preview audio." + }, + "prompt": { + "examples": [ + "Bubbly and excitable female pop star interviewee, youthful, slightly breathless, and very enthusiastic" + ], + "maxLength": 2000, + "type": "string", + "title": "Prompt", + "description": "Voice description prompt for generating a personalized voice" + } + }, + "x-fal-order-properties": [ + "prompt", + "preview_text" + ], + "required": [ + "prompt", + "preview_text" + ] + }, + "MinimaxVoiceDesignOutput": { + "title": "VoiceDesignOutput", + "type": "object", + "properties": { + "custom_voice_id": { + "description": "The voice_id of the generated voice", + "type": "string", + "title": "Custom Voice Id" + }, + "audio": { + "examples": [ + { + "url": "https://v3.fal.media/files/kangaroo/gT22cxTqxgLtGMSDz2JSq_preview.mp3" + } + ], + "description": "The preview audio using the generated voice", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "custom_voice_id", + "audio" + ], + "required": [ + "custom_voice_id", + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/voice-design/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/voice-design/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/voice-design": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxVoiceDesignInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/voice-design/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxVoiceDesignOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "resemble-ai/chatterboxhd/text-to-speech", + "metadata": { + "display_name": "Chatterboxhd", + "category": "text-to-speech", + "description": "Generate expressive, natural speech with Resemble AI's Chatterbox. Features unique emotion control, instant voice cloning from short audio, and built-in watermarking.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:31.955Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/chatterbox.webp", + "model_url": "https://fal.run/resemble-ai/chatterboxhd/text-to-speech", + "license_type": "commercial", + "date": "2025-06-02T19:16:12.548Z", + "group": { + "key": "chatterboxhd", + "label": "Text To Speech" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for resemble-ai/chatterboxhd/text-to-speech", + "version": "1.0.0", + "description": "The OpenAPI schema for the resemble-ai/chatterboxhd/text-to-speech queue.", + "x-fal-metadata": { + "endpointId": "resemble-ai/chatterboxhd/text-to-speech", + "category": "text-to-speech", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/chatterbox.webp", + "playgroundUrl": "https://fal.ai/models/resemble-ai/chatterboxhd/text-to-speech", + "documentationUrl": "https://fal.ai/models/resemble-ai/chatterboxhd/text-to-speech/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ChatterboxhdTextToSpeechInput": { + "description": "Input parameters for the TTS request.", + "type": "object", + "properties": { + "text": { + "description": "Text to synthesize into speech.", + "type": "string", + "title": "Text", + "default": "My name is Maximus Decimus Meridius, commander of the Armies of the North, General of the Felix Legions and loyal servant to the true emperor, Marcus Aurelius. Father to a murdered son, husband to a murdered wife. And I will have my vengeance, in this life or the next." + }, + "exaggeration": { + "description": "Controls emotion exaggeration. Range typically 0.25 to 2.0.", + "type": "number", + "minimum": 0.25, + "maximum": 2, + "title": "Exaggeration", + "default": 0.5 + }, + "high_quality_audio": { + "description": "If True, the generated audio will be upscaled to 48kHz. The generation of the audio will take longer, but the quality will be higher. If False, the generated audio will be 24kHz. ", + "type": "boolean", + "title": "High Quality Audio", + "default": false + }, + "voice": { + "enum": [ + "Aurora", + "Blade", + "Britney", + "Carl", + "Cliff", + "Richard", + "Rico", + "Siobhan", + "Vicky" + ], + "description": "The voice to use for the TTS request. If neither voice nor audio are provided, a random voice will be used.", + "type": "string", + "title": "Voice" + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/chatterbox-demo-samples/prompts/male_rickmorty.mp3", + "https://storage.googleapis.com/chatterbox-demo-samples/prompts/male_old_movie.flac" + ], + "description": "URL to the audio sample to use as a voice prompt for zero-shot TTS voice cloning. Providing a audio sample will override the voice setting. If neither voice nor audio_url are provided, a random voice will be used.", + "type": "string", + "title": "Audio Url" + }, + "temperature": { + "minimum": 0.05, + "maximum": 5, + "type": "number", + "description": "Controls the randomness of generation. Range typically 0.05 to 5.", + "title": "Temperature", + "default": 0.8 + }, + "seed": { + "minimum": 0, + "description": "Useful to control the reproducibility of the generated audio. Assuming all other properties didn't change, a fixed seed should always generate the exact same audio file. Set to 0 for random seed.", + "type": "integer", + "title": "Seed", + "default": 0 + }, + "cfg": { + "description": "Classifier-free guidance scale (CFG) controls the conditioning factor. Range typically 0.2 to 1.0. For expressive or dramatic speech, try lower cfg values (e.g. ~0.3) and increase exaggeration to around 0.7 or higher. If the reference speaker has a fast speaking style, lowering cfg to around 0.3 can improve pacing.", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Cfg", + "default": 0.5 + } + }, + "x-fal-order-properties": [ + "text", + "voice", + "audio_url", + "exaggeration", + "cfg", + "high_quality_audio", + "seed", + "temperature" + ], + "title": "TTSInput" + }, + "ChatterboxhdTextToSpeechOutput": { + "description": "Output parameters for the TTS request.", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://storage.googleapis.com/chatterbox-demo-samples/samples/gladiator_rick.wav" + }, + { + "url": "https://storage.googleapis.com/chatterbox-demo-samples/samples/gladiator_old_movie.wav" + } + ], + "description": "The generated audio file.", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/Audio" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "title": "TTSOutput", + "required": [ + "audio" + ] + }, + "Audio": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "Audio", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/resemble-ai/chatterboxhd/text-to-speech/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/resemble-ai/chatterboxhd/text-to-speech/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/resemble-ai/chatterboxhd/text-to-speech": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChatterboxhdTextToSpeechInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/resemble-ai/chatterboxhd/text-to-speech/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChatterboxhdTextToSpeechOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/chatterbox/text-to-speech", + "metadata": { + "display_name": "Chatterbox", + "category": "text-to-speech", + "description": "Whether you're working on memes, videos, games, or AI agents, Chatterbox brings your content to life. Use the first tts from resemble ai.", + "status": "active", + "tags": [ + "text-to-speech" + ], + "updated_at": "2026-01-26T21:43:33.625Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/rabbit/FzzCnGuQNXLOEYuQq8CE8_7afb3290e0de46d5a7e4d13495938e3f.jpg", + "model_url": "https://fal.run/fal-ai/chatterbox/text-to-speech", + "license_type": "commercial", + "date": "2025-06-01T19:53:08.674Z", + "group": { + "key": "chatterbox", + "label": "Text To Speech" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/chatterbox/text-to-speech", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/chatterbox/text-to-speech queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/chatterbox/text-to-speech", + "category": "text-to-speech", + "thumbnailUrl": "https://fal.media/files/rabbit/FzzCnGuQNXLOEYuQq8CE8_7afb3290e0de46d5a7e4d13495938e3f.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/chatterbox/text-to-speech", + "documentationUrl": "https://fal.ai/models/fal-ai/chatterbox/text-to-speech/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ChatterboxTextToSpeechInput": { + "title": "ChatterboxRequest", + "type": "object", + "properties": { + "text": { + "examples": [ + "I just found a hidden treasure in the backyard! Check it out!" + ], + "description": "The text to be converted to speech. You can additionally add the following emotive tags: , , , , , , , ", + "type": "string", + "title": "Text" + }, + "exaggeration": { + "description": "Exaggeration factor for the generated speech (0.0 = no exaggeration, 1.0 = maximum exaggeration).", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Exaggeration", + "default": 0.25 + }, + "audio_url": { + "description": "Optional URL to an audio file to use as a reference for the generated speech. If provided, the model will try to match the style and tone of the reference audio.", + "type": "string", + "title": "Audio Url", + "default": "https://storage.googleapis.com/chatterbox-demo-samples/prompts/male_rickmorty.mp3" + }, + "temperature": { + "minimum": 0.05, + "maximum": 2, + "type": "number", + "description": "Temperature for generation (higher = more creative).", + "title": "Temperature", + "default": 0.7 + }, + "seed": { + "description": "Useful to control the reproducibility of the generated audio. Assuming all other properties didn't change, a fixed seed should always generate the exact same audio file. Set to 0 for random seed..", + "type": "integer", + "title": "Seed" + }, + "cfg": { + "minimum": 0.1, + "maximum": 1, + "type": "number", + "title": "Cfg", + "default": 0.5 + } + }, + "x-fal-order-properties": [ + "text", + "audio_url", + "exaggeration", + "temperature", + "cfg", + "seed" + ], + "required": [ + "text" + ] + }, + "ChatterboxTextToSpeechOutput": {} + } + }, + "paths": { + "/fal-ai/chatterbox/text-to-speech/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/chatterbox/text-to-speech/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/chatterbox/text-to-speech": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChatterboxTextToSpeechInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/chatterbox/text-to-speech/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChatterboxTextToSpeechOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/voice-clone", + "metadata": { + "display_name": "MiniMax Voice Cloning", + "category": "text-to-speech", + "description": "Clone a voice from a sample audio and generate speech from text prompts using the MiniMax model, which leverages advanced AI techniques to create high-quality text-to-speech.", + "status": "active", + "tags": [ + "speech", + "" + ], + "updated_at": "2026-01-26T21:43:47.197Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/minimax-tts/minimax-tts.webp", + "model_url": "https://fal.run/fal-ai/minimax/voice-clone", + "license_type": "commercial", + "date": "2025-05-06T16:22:37.683Z", + "group": { + "key": "minimax-speech", + "label": "Voice Cloning" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/voice-clone", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/voice-clone queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/voice-clone", + "category": "text-to-speech", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/minimax-tts/minimax-tts.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/voice-clone", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/voice-clone/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxVoiceCloneInput": { + "title": "VoiceCloneRequest", + "type": "object", + "properties": { + "model": { + "examples": [ + "speech-02-hd", + "speech-02-turbo", + "speech-01-hd", + "speech-01-turbo" + ], + "description": "TTS model to use for preview. Options: speech-02-hd, speech-02-turbo, speech-01-hd, speech-01-turbo", + "type": "string", + "title": "Model", + "enum": [ + "speech-02-hd", + "speech-02-turbo", + "speech-01-hd", + "speech-01-turbo" + ], + "default": "speech-02-hd" + }, + "text": { + "examples": [ + "Hello, this is a preview of your cloned voice! I hope you like it!" + ], + "maxLength": 1000, + "type": "string", + "title": "Text", + "description": "Text to generate a TTS preview with the cloned voice (optional)", + "default": "Hello, this is a preview of your cloned voice! I hope you like it!" + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/zonos/demo_voice_zonos.wav" + ], + "description": "\n URL of the input audio file for voice cloning. Should be at least 10 seconds\n long. To retain the voice permanently, use it with a TTS (text-to-speech)\n endpoint at least once within 7 days. Otherwise, it will be\n automatically deleted.\n ", + "type": "string", + "title": "Audio Url" + }, + "accuracy": { + "minimum": 0, + "description": "Text validation accuracy threshold (0-1)", + "type": "number", + "title": "Accuracy", + "maximum": 1 + }, + "noise_reduction": { + "description": "Enable noise reduction for the cloned voice", + "type": "boolean", + "title": "Noise Reduction", + "default": false + }, + "need_volume_normalization": { + "description": "Enable volume normalization for the cloned voice", + "type": "boolean", + "title": "Need Volume Normalization", + "default": false + } + }, + "x-fal-order-properties": [ + "audio_url", + "noise_reduction", + "need_volume_normalization", + "accuracy", + "text", + "model" + ], + "required": [ + "audio_url" + ] + }, + "MinimaxVoiceCloneOutput": { + "title": "VoiceCloneOutput", + "type": "object", + "properties": { + "custom_voice_id": { + "description": "The cloned voice ID for use with TTS", + "type": "string", + "title": "Custom Voice Id" + }, + "audio": { + "examples": [ + { + "url": "https://fal.media/files/kangaroo/kojPUCNZ9iUGFGMR-xb7h_speech.mp3" + } + ], + "description": "Preview audio generated with the cloned voice (if requested)", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "custom_voice_id", + "audio" + ], + "required": [ + "custom_voice_id" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/voice-clone/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/voice-clone/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/voice-clone": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxVoiceCloneInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/voice-clone/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxVoiceCloneOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/speech-02-turbo", + "metadata": { + "display_name": "MiniMax Speech-02 Turbo", + "category": "text-to-speech", + "description": "Generate fast speech from text prompts and different voices using the MiniMax Speech-02 Turbo model, which leverages advanced AI techniques to create high-quality text-to-speech.", + "status": "active", + "tags": [ + "speech", + "" + ], + "updated_at": "2026-01-26T21:43:47.324Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/lion/9GDtsluojQ0BAg28nSQLh_f7fea66a2ac94526b14e6ef21ab65414.jpg", + "model_url": "https://fal.run/fal-ai/minimax/speech-02-turbo", + "license_type": "commercial", + "date": "2025-05-06T16:20:17.682Z", + "group": { + "key": "minimax-speech", + "label": "Text to Speech Turbo" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/speech-02-turbo", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/speech-02-turbo queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/speech-02-turbo", + "category": "text-to-speech", + "thumbnailUrl": "https://fal.media/files/lion/9GDtsluojQ0BAg28nSQLh_f7fea66a2ac94526b14e6ef21ab65414.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/speech-02-turbo", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/speech-02-turbo/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxSpeech02TurboInput": { + "title": "TextToSpeechTurboRequest", + "type": "object", + "properties": { + "text": { + "examples": [ + "Hello world! This is a test of the text-to-speech system." + ], + "maxLength": 5000, + "type": "string", + "title": "Text", + "description": "Text to convert to speech (max 5000 characters, minimum 1 non-whitespace character)", + "minLength": 1 + }, + "language_boost": { + "enum": [ + "Chinese", + "Chinese,Yue", + "English", + "Arabic", + "Russian", + "Spanish", + "French", + "Portuguese", + "German", + "Turkish", + "Dutch", + "Ukrainian", + "Vietnamese", + "Indonesian", + "Japanese", + "Italian", + "Korean", + "Thai", + "Polish", + "Romanian", + "Greek", + "Czech", + "Finnish", + "Hindi", + "Bulgarian", + "Danish", + "Hebrew", + "Malay", + "Slovak", + "Swedish", + "Croatian", + "Hungarian", + "Norwegian", + "Slovenian", + "Catalan", + "Nynorsk", + "Afrikaans", + "auto" + ], + "description": "Enhance recognition of specified languages and dialects", + "type": "string", + "title": "Language Boost" + }, + "voice_setting": { + "default": { + "speed": 1, + "vol": 1, + "voice_id": "Wise_Woman", + "pitch": 0, + "english_normalization": false + }, + "description": "Voice configuration settings", + "title": "Voice Setting", + "allOf": [ + { + "$ref": "#/components/schemas/VoiceSetting" + } + ] + }, + "output_format": { + "enum": [ + "url", + "hex" + ], + "description": "Format of the output content (non-streaming only)", + "type": "string", + "title": "Output Format", + "default": "hex" + }, + "pronunciation_dict": { + "description": "Custom pronunciation dictionary for text replacement", + "title": "Pronunciation Dict", + "allOf": [ + { + "$ref": "#/components/schemas/PronunciationDict" + } + ] + }, + "audio_setting": { + "description": "Audio configuration settings", + "title": "Audio Setting", + "allOf": [ + { + "$ref": "#/components/schemas/AudioSetting" + } + ] + } + }, + "x-fal-order-properties": [ + "text", + "voice_setting", + "audio_setting", + "language_boost", + "output_format", + "pronunciation_dict" + ], + "required": [ + "text" + ] + }, + "MinimaxSpeech02TurboOutput": { + "title": "TextToSpeechOutput", + "type": "object", + "properties": { + "duration_ms": { + "description": "Duration of the audio in milliseconds", + "type": "integer", + "title": "Duration Ms" + }, + "audio": { + "examples": [ + { + "url": "https://fal.media/files/kangaroo/kojPUCNZ9iUGFGMR-xb7h_speech.mp3" + } + ], + "description": "The generated audio file", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio", + "duration_ms" + ], + "required": [ + "audio", + "duration_ms" + ] + }, + "VoiceSetting": { + "title": "VoiceSetting", + "type": "object", + "properties": { + "speed": { + "minimum": 0.5, + "description": "Speech speed (0.5-2.0)", + "type": "number", + "maximum": 2, + "title": "Speed", + "default": 1 + }, + "vol": { + "minimum": 0.01, + "description": "Volume (0-10)", + "type": "number", + "maximum": 10, + "title": "Vol", + "default": 1 + }, + "voice_id": { + "examples": [ + "Wise_Woman", + "Friendly_Person", + "Inspirational_girl", + "Deep_Voice_Man", + "Calm_Woman", + "Casual_Guy", + "Lively_Girl", + "Patient_Man", + "Young_Knight", + "Determined_Man", + "Lovely_Girl", + "Decent_Boy", + "Imposing_Manner", + "Elegant_Man", + "Abbess", + "Sweet_Girl_2", + "Exuberant_Girl" + ], + "description": "Predefined voice ID to use for synthesis", + "type": "string", + "title": "Voice Id", + "default": "Wise_Woman" + }, + "pitch": { + "minimum": -12, + "description": "Voice pitch (-12 to 12)", + "type": "integer", + "maximum": 12, + "title": "Pitch", + "default": 0 + }, + "english_normalization": { + "description": "Enables English text normalization to improve number reading performance, with a slight increase in latency", + "type": "boolean", + "title": "English Normalization", + "default": false + }, + "emotion": { + "enum": [ + "happy", + "sad", + "angry", + "fearful", + "disgusted", + "surprised", + "neutral" + ], + "description": "Emotion of the generated speech", + "type": "string", + "title": "Emotion" + } + }, + "x-fal-order-properties": [ + "voice_id", + "speed", + "vol", + "pitch", + "emotion", + "english_normalization" + ] + }, + "PronunciationDict": { + "title": "PronunciationDict", + "type": "object", + "properties": { + "tone_list": { + "description": "List of pronunciation replacements in format ['text/(pronunciation)', ...]. For Chinese, tones are 1-5. Example: ['燕少飞/(yan4)(shao3)(fei1)']", + "type": "array", + "title": "Tone List", + "items": { + "type": "string" + } + } + }, + "x-fal-order-properties": [ + "tone_list" + ] + }, + "AudioSetting": { + "title": "AudioSetting", + "type": "object", + "properties": { + "format": { + "enum": [ + "mp3", + "pcm", + "flac" + ], + "description": "Audio format", + "type": "string", + "title": "Format", + "default": "mp3" + }, + "sample_rate": { + "enum": [ + 8000, + 16000, + 22050, + 24000, + 32000, + 44100 + ], + "description": "Sample rate of generated audio", + "type": "integer", + "title": "Sample Rate", + "default": 32000 + }, + "channel": { + "enum": [ + 1, + 2 + ], + "description": "Number of audio channels (1=mono, 2=stereo)", + "type": "integer", + "title": "Channel", + "default": 1 + }, + "bitrate": { + "enum": [ + 32000, + 64000, + 128000, + 256000 + ], + "description": "Bitrate of generated audio", + "type": "integer", + "title": "Bitrate", + "default": 128000 + } + }, + "x-fal-order-properties": [ + "sample_rate", + "bitrate", + "format", + "channel" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/speech-02-turbo/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/speech-02-turbo/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/speech-02-turbo": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxSpeech02TurboInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/speech-02-turbo/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxSpeech02TurboOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/speech-02-hd", + "metadata": { + "display_name": "MiniMax Speech-02 HD", + "category": "text-to-speech", + "description": "Generate speech from text prompts and different voices using the MiniMax Speech-02 HD model, which leverages advanced AI techniques to create high-quality text-to-speech.", + "status": "active", + "tags": [ + "speech" + ], + "updated_at": "2026-01-26T21:43:47.610Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/panda/A-mMZvJzo3C_kFbO7NmMi_28b71bd757bf4319973fb209c96453f9.jpg", + "model_url": "https://fal.run/fal-ai/minimax/speech-02-hd", + "license_type": "commercial", + "date": "2025-05-06T16:17:49.560Z", + "group": { + "key": "minimax-speech", + "label": "Text To Speech HD" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/speech-02-hd", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/speech-02-hd queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/speech-02-hd", + "category": "text-to-speech", + "thumbnailUrl": "https://fal.media/files/panda/A-mMZvJzo3C_kFbO7NmMi_28b71bd757bf4319973fb209c96453f9.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/speech-02-hd", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/speech-02-hd/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxSpeech02HdInput": { + "title": "TextToSpeechHDRequest", + "type": "object", + "properties": { + "text": { + "examples": [ + "Hello world! This is a test of the text-to-speech system." + ], + "maxLength": 5000, + "type": "string", + "title": "Text", + "description": "Text to convert to speech (max 5000 characters, minimum 1 non-whitespace character)", + "minLength": 1 + }, + "language_boost": { + "enum": [ + "Chinese", + "Chinese,Yue", + "English", + "Arabic", + "Russian", + "Spanish", + "French", + "Portuguese", + "German", + "Turkish", + "Dutch", + "Ukrainian", + "Vietnamese", + "Indonesian", + "Japanese", + "Italian", + "Korean", + "Thai", + "Polish", + "Romanian", + "Greek", + "Czech", + "Finnish", + "Hindi", + "Bulgarian", + "Danish", + "Hebrew", + "Malay", + "Slovak", + "Swedish", + "Croatian", + "Hungarian", + "Norwegian", + "Slovenian", + "Catalan", + "Nynorsk", + "Afrikaans", + "auto" + ], + "description": "Enhance recognition of specified languages and dialects", + "type": "string", + "title": "Language Boost" + }, + "voice_setting": { + "default": { + "speed": 1, + "vol": 1, + "voice_id": "Wise_Woman", + "pitch": 0, + "english_normalization": false + }, + "description": "Voice configuration settings", + "title": "Voice Setting", + "allOf": [ + { + "$ref": "#/components/schemas/VoiceSetting" + } + ] + }, + "output_format": { + "enum": [ + "url", + "hex" + ], + "description": "Format of the output content (non-streaming only)", + "type": "string", + "title": "Output Format", + "default": "hex" + }, + "pronunciation_dict": { + "description": "Custom pronunciation dictionary for text replacement", + "title": "Pronunciation Dict", + "allOf": [ + { + "$ref": "#/components/schemas/PronunciationDict" + } + ] + }, + "audio_setting": { + "description": "Audio configuration settings", + "title": "Audio Setting", + "allOf": [ + { + "$ref": "#/components/schemas/AudioSetting" + } + ] + } + }, + "x-fal-order-properties": [ + "text", + "voice_setting", + "audio_setting", + "language_boost", + "output_format", + "pronunciation_dict" + ], + "required": [ + "text" + ] + }, + "MinimaxSpeech02HdOutput": { + "title": "TextToSpeechOutput", + "type": "object", + "properties": { + "duration_ms": { + "description": "Duration of the audio in milliseconds", + "type": "integer", + "title": "Duration Ms" + }, + "audio": { + "examples": [ + { + "url": "https://fal.media/files/kangaroo/kojPUCNZ9iUGFGMR-xb7h_speech.mp3" + } + ], + "description": "The generated audio file", + "title": "Audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio", + "duration_ms" + ], + "required": [ + "audio", + "duration_ms" + ] + }, + "VoiceSetting": { + "title": "VoiceSetting", + "type": "object", + "properties": { + "speed": { + "minimum": 0.5, + "description": "Speech speed (0.5-2.0)", + "type": "number", + "maximum": 2, + "title": "Speed", + "default": 1 + }, + "vol": { + "minimum": 0.01, + "description": "Volume (0-10)", + "type": "number", + "maximum": 10, + "title": "Vol", + "default": 1 + }, + "voice_id": { + "examples": [ + "Wise_Woman", + "Friendly_Person", + "Inspirational_girl", + "Deep_Voice_Man", + "Calm_Woman", + "Casual_Guy", + "Lively_Girl", + "Patient_Man", + "Young_Knight", + "Determined_Man", + "Lovely_Girl", + "Decent_Boy", + "Imposing_Manner", + "Elegant_Man", + "Abbess", + "Sweet_Girl_2", + "Exuberant_Girl" + ], + "description": "Predefined voice ID to use for synthesis", + "type": "string", + "title": "Voice Id", + "default": "Wise_Woman" + }, + "pitch": { + "minimum": -12, + "description": "Voice pitch (-12 to 12)", + "type": "integer", + "maximum": 12, + "title": "Pitch", + "default": 0 + }, + "english_normalization": { + "description": "Enables English text normalization to improve number reading performance, with a slight increase in latency", + "type": "boolean", + "title": "English Normalization", + "default": false + }, + "emotion": { + "enum": [ + "happy", + "sad", + "angry", + "fearful", + "disgusted", + "surprised", + "neutral" + ], + "description": "Emotion of the generated speech", + "type": "string", + "title": "Emotion" + } + }, + "x-fal-order-properties": [ + "voice_id", + "speed", + "vol", + "pitch", + "emotion", + "english_normalization" + ] + }, + "PronunciationDict": { + "title": "PronunciationDict", + "type": "object", + "properties": { + "tone_list": { + "description": "List of pronunciation replacements in format ['text/(pronunciation)', ...]. For Chinese, tones are 1-5. Example: ['燕少飞/(yan4)(shao3)(fei1)']", + "type": "array", + "title": "Tone List", + "items": { + "type": "string" + } + } + }, + "x-fal-order-properties": [ + "tone_list" + ] + }, + "AudioSetting": { + "title": "AudioSetting", + "type": "object", + "properties": { + "format": { + "enum": [ + "mp3", + "pcm", + "flac" + ], + "description": "Audio format", + "type": "string", + "title": "Format", + "default": "mp3" + }, + "sample_rate": { + "enum": [ + 8000, + 16000, + 22050, + 24000, + 32000, + 44100 + ], + "description": "Sample rate of generated audio", + "type": "integer", + "title": "Sample Rate", + "default": 32000 + }, + "channel": { + "enum": [ + 1, + 2 + ], + "description": "Number of audio channels (1=mono, 2=stereo)", + "type": "integer", + "title": "Channel", + "default": 1 + }, + "bitrate": { + "enum": [ + 32000, + 64000, + 128000, + 256000 + ], + "description": "Bitrate of generated audio", + "type": "integer", + "title": "Bitrate", + "default": 128000 + } + }, + "x-fal-order-properties": [ + "sample_rate", + "bitrate", + "format", + "channel" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/speech-02-hd/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/speech-02-hd/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/speech-02-hd": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxSpeech02HdInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/speech-02-hd/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxSpeech02HdOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/dia-tts", + "metadata": { + "display_name": "Dia", + "category": "text-to-speech", + "description": "Dia directly generates realistic dialogue from transcripts. Audio conditioning enables emotion control. Produces natural nonverbals like laughter and throat clearing.", + "status": "active", + "tags": [ + "text-to-speech" + ], + "updated_at": "2026-01-26T21:43:54.120Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound.jpg", + "model_url": "https://fal.run/fal-ai/dia-tts", + "license_type": "commercial", + "date": "2025-04-22T17:25:11.601Z", + "group": { + "key": "dia-tts", + "label": "Text to Speech" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/dia-tts", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/dia-tts queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/dia-tts", + "category": "text-to-speech", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/dia-tts", + "documentationUrl": "https://fal.ai/models/fal-ai/dia-tts/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "DiaTtsInput": { + "title": "DiaRequest", + "type": "object", + "properties": { + "text": { + "examples": [ + "[S1] Dia is an open weights text to dialogue model. [S2] You get full control over scripts and voices. [S1] Wow. Amazing. (laughs) [S2] Try it now on Fal." + ], + "title": "Text", + "type": "string", + "description": "The text to be converted to speech." + } + }, + "x-fal-order-properties": [ + "text" + ], + "required": [ + "text" + ] + }, + "DiaTtsOutput": { + "title": "DiaOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://v3.fal.media/files/elephant/d5lORit2npFfBykcAtyUr_tmplacfh8oa.mp3" + } + ], + "description": "The generated speech audio", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/dia-tts/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/dia-tts/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/dia-tts": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DiaTtsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/dia-tts/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DiaTtsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/orpheus-tts", + "metadata": { + "display_name": "Orpheus TTS", + "category": "text-to-speech", + "description": "Orpheus TTS is a state-of-the-art, Llama-based Speech-LLM designed for high-quality, empathetic text-to-speech generation. This model has been finetuned to deliver human-level speech synthesis, achieving exceptional clarity, expressiveness, and real-time performances.", + "status": "active", + "tags": [ + "text to speech", + "voice synthesis", + "high-fidelity" + ], + "updated_at": "2026-01-26T21:44:17.251Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound.jpg", + "model_url": "https://fal.run/fal-ai/orpheus-tts", + "license_type": "commercial", + "date": "2025-03-31T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/orpheus-tts", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/orpheus-tts queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/orpheus-tts", + "category": "text-to-speech", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/orpheus-tts", + "documentationUrl": "https://fal.ai/models/fal-ai/orpheus-tts/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "OrpheusTtsInput": { + "title": "OrpheusRequest", + "type": "object", + "properties": { + "text": { + "examples": [ + "I just found a hidden treasure in the backyard! Check it out!" + ], + "title": "Text", + "type": "string", + "description": "The text to be converted to speech. You can additionally add the following emotive tags: , , , , , , , " + }, + "voice": { + "enum": [ + "tara", + "leah", + "jess", + "leo", + "dan", + "mia", + "zac", + "zoe" + ], + "title": "Voice", + "type": "string", + "description": "Voice ID for the desired voice.", + "examples": [ + "tara" + ], + "default": "tara" + }, + "repetition_penalty": { + "minimum": 1.1, + "maximum": 2, + "type": "number", + "title": "Repetition Penalty", + "description": "Repetition penalty (>= 1.1 required for stable generations).", + "default": 1.2 + }, + "temperature": { + "minimum": 0, + "maximum": 2, + "type": "number", + "title": "Temperature", + "description": "Temperature for generation (higher = more creative).", + "default": 0.7 + } + }, + "x-fal-order-properties": [ + "text", + "voice", + "temperature", + "repetition_penalty" + ], + "required": [ + "text" + ] + }, + "OrpheusTtsOutput": { + "title": "OrpheusOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://v3.fal.media/files/kangaroo/RQ_pxc7oPdueYqWUqEbPE_tmpjnzvvzx_.wav" + } + ], + "description": "The generated speech audio", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/orpheus-tts/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/orpheus-tts/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/orpheus-tts": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OrpheusTtsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/orpheus-tts/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OrpheusTtsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/elevenlabs/tts/turbo-v2.5", + "metadata": { + "display_name": "ElevenLabs TTS Turbo v2.5", + "category": "text-to-speech", + "description": "Generate high-speed text-to-speech audio using ElevenLabs TTS Turbo v2.5.", + "status": "active", + "tags": [ + "audio" + ], + "updated_at": "2026-01-26T21:44:24.288Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/elevenlabs/elevenlabs_thumbnail.webp", + "model_url": "https://fal.run/fal-ai/elevenlabs/tts/turbo-v2.5", + "license_type": "commercial", + "date": "2025-02-27T00:00:00.000Z", + "group": { + "key": "elevenlabs-audio", + "label": "TTS Turbo v2.5" + }, + "highlighted": false, + "stream_url": "https://fal.run/fal-ai/elevenlabs/tts/turbo-v2.5/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/elevenlabs/tts/turbo-v2.5", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/elevenlabs/tts/turbo-v2.5 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/elevenlabs/tts/turbo-v2.5", + "category": "text-to-speech", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/elevenlabs/elevenlabs_thumbnail.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/elevenlabs/tts/turbo-v2.5", + "documentationUrl": "https://fal.ai/models/fal-ai/elevenlabs/tts/turbo-v2.5/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ElevenlabsTtsTurboV25Input": { + "title": "TextToSpeechRequest", + "type": "object", + "properties": { + "stability": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Stability", + "description": "Voice stability (0-1)", + "default": 0.5 + }, + "next_text": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.", + "title": "Next Text" + }, + "speed": { + "minimum": 0.7, + "maximum": 1.2, + "type": "number", + "title": "Speed", + "description": "Speech speed (0.7-1.2). Values below 1.0 slow down the speech, above 1.0 speed it up. Extreme values may affect quality.", + "default": 1 + }, + "style": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Style", + "description": "Style exaggeration (0-1)", + "default": 0 + }, + "text": { + "examples": [ + "Hello! This is a test of the text to speech system, powered by ElevenLabs. How does it sound?" + ], + "description": "The text to convert to speech", + "type": "string", + "title": "Text", + "minLength": 1 + }, + "timestamps": { + "description": "Whether to return timestamps for each word in the generated speech", + "type": "boolean", + "title": "Timestamps", + "default": false + }, + "similarity_boost": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Similarity Boost", + "description": "Similarity boost (0-1)", + "default": 0.75 + }, + "voice": { + "examples": [ + "Aria", + "Roger", + "Sarah", + "Laura", + "Charlie", + "George", + "Callum", + "River", + "Liam", + "Charlotte", + "Alice", + "Matilda", + "Will", + "Jessica", + "Eric", + "Chris", + "Brian", + "Daniel", + "Lily", + "Bill" + ], + "description": "The voice to use for speech generation", + "type": "string", + "title": "Voice", + "default": "Rachel" + }, + "language_code": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Language code (ISO 639-1) used to enforce a language for the model. An error will be returned if language code is not supported by the model.", + "title": "Language Code" + }, + "previous_text": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.", + "title": "Previous Text" + } + }, + "x-fal-order-properties": [ + "text", + "voice", + "stability", + "similarity_boost", + "style", + "speed", + "timestamps", + "previous_text", + "next_text", + "language_code" + ], + "required": [ + "text" + ] + }, + "ElevenlabsTtsTurboV25Output": { + "title": "TTSOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://v3.fal.media/files/zebra/zJL_oRY8h5RWwjoK1w7tx_output.mp3" + } + ], + "description": "The generated audio file", + "$ref": "#/components/schemas/File" + }, + "timestamps": { + "anyOf": [ + { + "type": "array", + "items": {} + }, + { + "type": "null" + } + ], + "description": "Timestamps for each word in the generated speech. Only returned if `timestamps` is set to True in the request.", + "title": "Timestamps" + } + }, + "x-fal-order-properties": [ + "audio", + "timestamps" + ], + "required": [ + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/elevenlabs/tts/turbo-v2.5/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/tts/turbo-v2.5/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/tts/turbo-v2.5": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ElevenlabsTtsTurboV25Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/elevenlabs/tts/turbo-v2.5/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ElevenlabsTtsTurboV25Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.text-to-text.json b/packages/typescript/ai-fal/json/fal.models.text-to-text.json new file mode 100644 index 00000000..75f42676 --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.text-to-text.json @@ -0,0 +1,309 @@ +{ + "generated_at": "2026-01-28T02:51:51.876Z", + "total_models": 1, + "category": "text-to-text", + "models": [ + { + "endpoint_id": "half-moon-ai/ai-detector/detect-text", + "metadata": { + "display_name": "Ai Detector", + "category": "text-to-text", + "description": "AI Detector (Text) is an advanced AI service that analyzes a passage and returns a verdict on whether it was likely written by AI.", + "status": "active", + "tags": [ + "utility", + "" + ], + "updated_at": "2026-01-26T21:41:54.089Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a868787/qHXAof1Mz69GGG3KoMze4_8fe2d05ca5d2442680a9ecb5e11fe129.jpg", + "model_url": "https://fal.run/half-moon-ai/ai-detector/detect-text", + "license_type": "commercial", + "date": "2025-12-16T12:11:26.765Z", + "group": { + "key": "Half-Moon-Detection", + "label": "Text Detection" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for half-moon-ai/ai-detector/detect-text", + "version": "1.0.0", + "description": "The OpenAPI schema for the half-moon-ai/ai-detector/detect-text queue.", + "x-fal-metadata": { + "endpointId": "half-moon-ai/ai-detector/detect-text", + "category": "text-to-text", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a868787/qHXAof1Mz69GGG3KoMze4_8fe2d05ca5d2442680a9ecb5e11fe129.jpg", + "playgroundUrl": "https://fal.ai/models/half-moon-ai/ai-detector/detect-text", + "documentationUrl": "https://fal.ai/models/half-moon-ai/ai-detector/detect-text/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AiDetectorDetectTextInput": { + "x-fal-order-properties": [ + "text" + ], + "type": "object", + "properties": { + "text": { + "description": "Text content to analyze for AI generation.", + "type": "string", + "examples": [ + "yo guys so i just tried this new coffee place downtown and honestly?? not worth the hype. waited like 30 mins for a latte that tasted burnt lol. maybe i caught them on a bad day idk but wont be going back anytime soon" + ], + "maxLength": 20000, + "minLength": 1, + "title": "Text" + } + }, + "title": "TextDetectionInput", + "required": [ + "text" + ] + }, + "AiDetectorDetectTextOutput": { + "x-fal-order-properties": [ + "verdict", + "confidence", + "is_ai_generated", + "latency" + ], + "type": "object", + "properties": { + "latency": { + "examples": [ + 13.617770671844482 + ], + "title": "Latency", + "type": "number" + }, + "verdict": { + "examples": [ + "human" + ], + "title": "Verdict", + "type": "string" + }, + "is_ai_generated": { + "examples": [ + false + ], + "title": "Is Ai Generated", + "type": "boolean" + }, + "confidence": { + "examples": [ + 0.85 + ], + "title": "Confidence", + "type": "number" + } + }, + "title": "AITextDetectionOutput", + "required": [ + "verdict", + "confidence", + "is_ai_generated", + "latency" + ] + } + } + }, + "paths": { + "/half-moon-ai/ai-detector/detect-text/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/half-moon-ai/ai-detector/detect-text/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/half-moon-ai/ai-detector/detect-text": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiDetectorDetectTextInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/half-moon-ai/ai-detector/detect-text/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiDetectorDetectTextOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.text-to-video.json b/packages/typescript/ai-fal/json/fal.models.text-to-video.json new file mode 100644 index 00000000..8853fbc5 --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.text-to-video.json @@ -0,0 +1,43737 @@ +{ + "generated_at": "2026-01-28T02:51:51.850Z", + "total_models": 103, + "category": "text-to-video", + "models": [ + { + "endpoint_id": "fal-ai/kling-video/v2.5-turbo/pro/text-to-video", + "metadata": { + "display_name": "Kling v2.5 Text to Video", + "category": "text-to-video", + "description": "Kling 2.5 Turbo Pro: Top-tier text-to-video generation with unparalleled motion fluidity, cinematic visuals, and exceptional prompt precision.", + "status": "active", + "tags": [ + "animation", + "stylized" + ], + "updated_at": "2026-01-26T21:42:48.689Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/panda/6YaK9lV7ySsUA9I3dUp5r_8a807b0c8e2641db9e345107ab8a809e.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/v2.5-turbo/pro/text-to-video", + "license_type": "commercial", + "date": "2025-09-22T22:48:44.637Z", + "group": { + "key": "kling-video-v25", + "label": "2.5 Turbo (Text to Video) Pro" + }, + "highlighted": true, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v2.5-turbo/pro/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v2.5-turbo/pro/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v2.5-turbo/pro/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3.fal.media/files/panda/6YaK9lV7ySsUA9I3dUp5r_8a807b0c8e2641db9e345107ab8a809e.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v2.5-turbo/pro/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v2.5-turbo/pro/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV25TurboProTextToVideoInput": { + "title": "TextToVideoV25ProRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A noble lord walks among his people, his presence a comforting reassurance. He greets them with a gentle smile, embodying their hopes and earning their respect through simple interactions. The atmosphere is intimate and sincere, highlighting the bond between the leader and community." + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500 + }, + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video frame", + "default": "16:9" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "maxLength": 2500, + "default": "blur, distort, and low quality" + }, + "cfg_scale": { + "minimum": 0, + "title": "Cfg Scale", + "type": "number", + "maximum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ", + "default": 0.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "duration", + "aspect_ratio", + "negative_prompt", + "cfg_scale" + ], + "required": [ + "prompt" + ] + }, + "KlingVideoV25TurboProTextToVideoOutput": { + "title": "TextToVideoV25ProOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/model_tests/kling/kling-v2.5-turbo-pro-text-to-video-output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v2.5-turbo/pro/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.5-turbo/pro/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.5-turbo/pro/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV25TurboProTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.5-turbo/pro/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV25TurboProTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/veo3/fast", + "metadata": { + "display_name": "Veo 3 Fast", + "category": "text-to-video", + "description": "Faster and more cost effective version of Google's Veo 3! ", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:17.085Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "model_url": "https://fal.run/fal-ai/veo3/fast", + "license_type": "commercial", + "date": "2025-07-09T16:38:31.244Z", + "group": { + "key": "veo3", + "label": "Text to Video [fast]" + }, + "highlighted": true, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/veo3/fast", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/veo3/fast queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/veo3/fast", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/veo3/fast", + "documentationUrl": "https://fal.ai/models/fal-ai/veo3/fast/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Veo3FastInput": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "duration", + "negative_prompt", + "resolution", + "generate_audio", + "seed", + "auto_fix" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A casual street interview on a busy New York City sidewalk in the afternoon. The interviewer holds a plain, unbranded microphone and asks: Have you seen Google's new Veo3 model It is a super good model. Person replies: Yeah I saw it, it's already available on fal. It's crazy good." + ], + "maxLength": 20000, + "type": "string", + "title": "Prompt", + "description": "The text prompt describing the video you want to generate" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16" + ], + "description": "The aspect ratio of the generated video.", + "type": "string", + "title": "Aspect Ratio", + "default": "16:9" + }, + "duration": { + "enum": [ + "4s", + "6s", + "8s" + ], + "description": "The duration of the generated video.", + "type": "string", + "title": "Duration", + "default": "8s" + }, + "generate_audio": { + "description": "Whether to generate audio for the video.", + "type": "boolean", + "title": "Generate Audio", + "default": true + }, + "auto_fix": { + "description": "Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.", + "type": "boolean", + "title": "Auto Fix", + "default": true + }, + "resolution": { + "enum": [ + "720p", + "1080p" + ], + "description": "The resolution of the generated video.", + "type": "string", + "title": "Resolution", + "default": "720p" + }, + "seed": { + "description": "The seed for the random number generator.", + "type": "integer", + "title": "Seed" + }, + "negative_prompt": { + "description": "A negative prompt to guide the video generation.", + "type": "string", + "title": "Negative Prompt" + } + }, + "title": "Veo3TextToVideoInput", + "required": [ + "prompt" + ] + }, + "Veo3FastOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/penguin/Q-2dpcjIoQOldJRL3grsc_output.mp4" + } + ], + "description": "The generated video.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "Veo3TextToVideoOutput", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/veo3/fast/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3/fast/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/veo3/fast": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo3FastInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3/fast/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo3FastOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/hailuo-02/standard/text-to-video", + "metadata": { + "display_name": "MiniMax Hailuo 02 [Standard] (Text to Video)", + "category": "text-to-video", + "description": "MiniMax Hailuo-02 Text To Video API (Standard, 768p): Advanced video generation model with 768p resolution", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:26.493Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-1.jpg", + "model_url": "https://fal.run/fal-ai/minimax/hailuo-02/standard/text-to-video", + "license_type": "commercial", + "date": "2025-06-18T00:40:08.840Z", + "group": { + "key": "hailuo-02", + "label": "Text to Video (standard) " + }, + "highlighted": true, + "kind": "inference", + "duration_estimate": 4, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/hailuo-02/standard/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/hailuo-02/standard/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/hailuo-02/standard/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/hailuo-02/standard/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/hailuo-02/standard/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxHailuo02StandardTextToVideoInput": { + "title": "StandardTextToVideoHailuo02Input", + "type": "object", + "properties": { + "prompt_optimizer": { + "description": "Whether to use the model's prompt optimizer", + "type": "boolean", + "title": "Prompt Optimizer", + "default": true + }, + "duration": { + "enum": [ + "6", + "10" + ], + "description": "The duration of the video in seconds. 10 seconds videos are not supported for 1080p resolution.", + "type": "string", + "title": "Duration", + "default": "6" + }, + "prompt": { + "examples": [ + "A Galactic Smuggler is a rogue figure with a cybernetic arm and a well-worn coat that hints at many dangerous escapades across the galaxy. Their ship is filled with rare and exotic treasures from distant planets, concealed in hidden compartments, showing their expertise in illicit trade. Their belt is adorned with energy-based weapons, ready to be drawn at any moment to protect themselves or escape from tight situations. This character thrives in the shadows of space, navigating between the law and chaos with stealth and wit, always seeking the next big score while evading bounty hunters and law enforcement. The rogue's ship, rugged yet efficient, serves as both a home and a tool for their dangerous lifestyle. The treasures they collect reflect the diverse and intriguing worlds they've encountered—alien artifacts, rare minerals, and artifacts of unknown origin. Their reputation precedes them, with whispers of their dealings and the deadly encounters that often follow. A master of negotiation and deception, the Galactic Smuggler navigates the cosmos with an eye on the horizon, always one step ahead of those who pursue them." + ], + "maxLength": 2000, + "type": "string", + "title": "Prompt", + "minLength": 1 + } + }, + "x-fal-order-properties": [ + "prompt", + "duration", + "prompt_optimizer" + ], + "required": [ + "prompt" + ] + }, + "MinimaxHailuo02StandardTextToVideoOutput": { + "title": "TextToVideoHailuo02Output", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/kangaroo/_qEOfY3iKHsc86kqHUUh2_output.mp4" + } + ], + "description": "The generated video", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/hailuo-02/standard/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-02/standard/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-02/standard/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxHailuo02StandardTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-02/standard/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxHailuo02StandardTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/veo3", + "metadata": { + "display_name": "Veo 3", + "category": "text-to-video", + "description": "Veo 3 by Google, the most advanced AI video generation model in the world. With sound on!", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:30.578Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "model_url": "https://fal.run/fal-ai/veo3", + "license_type": "commercial", + "date": "2025-06-05T20:59:51.339Z", + "group": { + "key": "veo3", + "label": "Text to Video" + }, + "highlighted": true, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/veo3", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/veo3 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/veo3", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/veo3", + "documentationUrl": "https://fal.ai/models/fal-ai/veo3/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Veo3Input": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "duration", + "negative_prompt", + "resolution", + "generate_audio", + "seed", + "auto_fix" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A casual street interview on a busy New York City sidewalk in the afternoon. The interviewer holds a plain, unbranded microphone and asks: Have you seen Google's new Veo3 model It is a super good model. Person replies: Yeah I saw it, it's already available on fal. It's crazy good." + ], + "maxLength": 20000, + "type": "string", + "title": "Prompt", + "description": "The text prompt describing the video you want to generate" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16" + ], + "description": "The aspect ratio of the generated video.", + "type": "string", + "title": "Aspect Ratio", + "default": "16:9" + }, + "duration": { + "enum": [ + "4s", + "6s", + "8s" + ], + "description": "The duration of the generated video.", + "type": "string", + "title": "Duration", + "default": "8s" + }, + "generate_audio": { + "description": "Whether to generate audio for the video.", + "type": "boolean", + "title": "Generate Audio", + "default": true + }, + "auto_fix": { + "description": "Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.", + "type": "boolean", + "title": "Auto Fix", + "default": true + }, + "resolution": { + "enum": [ + "720p", + "1080p" + ], + "description": "The resolution of the generated video.", + "type": "string", + "title": "Resolution", + "default": "720p" + }, + "seed": { + "description": "The seed for the random number generator.", + "type": "integer", + "title": "Seed" + }, + "negative_prompt": { + "description": "A negative prompt to guide the video generation.", + "type": "string", + "title": "Negative Prompt" + } + }, + "title": "Veo3TextToVideoInput", + "required": [ + "prompt" + ] + }, + "Veo3Output": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/penguin/Q-2dpcjIoQOldJRL3grsc_output.mp4" + } + ], + "description": "The generated video.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "Veo3TextToVideoOutput", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/veo3/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/veo3": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo3Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo3Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v2/master/text-to-video", + "metadata": { + "display_name": "Kling 2.0 Master", + "category": "text-to-video", + "description": "Generate video clips from your prompts using Kling 2.0 Master", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:56.101Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-5.jpeg", + "model_url": "https://fal.run/fal-ai/kling-video/v2/master/text-to-video", + "license_type": "commercial", + "date": "2025-04-14T21:47:18.476Z", + "group": { + "key": "kling-video-v2", + "label": "Text to Video v2 Master" + }, + "highlighted": true, + "kind": "inference", + "duration_estimate": 5, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v2/master/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v2/master/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v2/master/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-5.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v2/master/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v2/master/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV2MasterTextToVideoInput": { + "title": "TextToVideoV2MasterRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A slow-motion drone shot descending from above a maze of neon-lit Tokyo alleyways at night during heavy rainfall. The camera gradually focuses on a lone figure in a luminescent white raincoat standing perfectly still amid the bustling crowd, all carrying black umbrellas. As the camera continues its downward journey, we see the raindrops creating rippling patterns on puddles that reflect the kaleidoscope of colors from the surrounding signs, creating a mirror world beneath the city." + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500 + }, + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video frame", + "default": "16:9" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "maxLength": 2500, + "default": "blur, distort, and low quality" + }, + "cfg_scale": { + "minimum": 0, + "title": "Cfg Scale", + "type": "number", + "maximum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ", + "default": 0.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "duration", + "aspect_ratio", + "negative_prompt", + "cfg_scale" + ], + "required": [ + "prompt" + ] + }, + "KlingVideoV2MasterTextToVideoOutput": { + "title": "TextToVideoV2MasterOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/rabbit/5fu6OSZdvV825r2s_c0S8_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v2/master/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2/master/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2/master/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV2MasterTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2/master/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV2MasterTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v5.6/text-to-video", + "metadata": { + "display_name": "Pixverse", + "category": "text-to-video", + "description": "Use the latest pixverse v5.6 model to turn your texts into amazing videos.", + "status": "active", + "tags": [ + "text-to-video" + ], + "updated_at": "2026-01-27T09:08:18.283Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8bf630/B3GZjoCeMcK3mjcRg_gSU_cdbadbe8b3d84cd1a4bbfb2ef6f3675b.jpg", + "model_url": "https://fal.run/fal-ai/pixverse/v5.6/text-to-video", + "license_type": "commercial", + "date": "2026-01-26T17:02:31.936Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v5.6/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v5.6/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v5.6/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8bf630/B3GZjoCeMcK3mjcRg_gSU_cdbadbe8b3d84cd1a4bbfb2ef6f3675b.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v5.6/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v5.6/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV56TextToVideoInput": { + "title": "TextToVideoRequestV5_6", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Epic low-cut camera capture of a girl clad in ultraviolet threads, Peter Max art style depiction, luminous diamond skin glistening under a vast moon's radiance, embodied in a superhuman flight among mystical ruins, symbolizing a deity's ritual ascent, hyper-detailed" + ], + "title": "Prompt", + "type": "string" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "4:3", + "1:1", + "3:4", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated video" + }, + "thinking_type": { + "enum": [ + "enabled", + "disabled", + "auto" + ], + "title": "Thinking Type", + "type": "string", + "description": "Prompt optimization mode: 'enabled' to optimize, 'disabled' to turn off, 'auto' for model decision" + }, + "duration": { + "enum": [ + "5", + "8", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds. 1080p videos are limited to 5 or 8 seconds", + "default": "5" + }, + "generate_audio_switch": { + "title": "Generate Audio Switch", + "type": "boolean", + "description": "Enable audio generation (BGM, SFX, dialogue)", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, low resolution, pixelated, noisy, grainy, out of focus, poorly lit, poorly exposed, poorly composed, poorly framed, poorly cropped, poorly color corrected, poorly color graded" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "resolution", + "duration", + "negative_prompt", + "style", + "seed", + "generate_audio_switch", + "thinking_type" + ], + "required": [ + "prompt" + ] + }, + "PixverseV56TextToVideoOutput": { + "title": "VideoOutputV5_5", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 5485412, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/model_tests/video_models/output-4.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v5.6/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5.6/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5.6/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV56TextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5.6/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV56TextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2-19b/distilled/text-to-video/lora", + "metadata": { + "display_name": "LTX-2 19B Distilled", + "category": "text-to-video", + "description": "Generate video with audio from text using LTX-2 Distilled and custom LoRA", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:42.355Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8936f9/MqecW0ic5_aJ01aOHjOt__da41267b8c59467bbbbaa3d80016ec07.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2-19b/distilled/text-to-video/lora", + "license_type": "commercial", + "date": "2026-01-05T20:56:37.306Z", + "group": { + "key": "ltx-2-19b-distilled", + "label": "Text to Video with LoRA" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2-19b/distilled/text-to-video/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2-19b/distilled/text-to-video/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2-19b/distilled/text-to-video/lora", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8936f9/MqecW0ic5_aJ01aOHjOt__da41267b8c59467bbbbaa3d80016ec07.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/distilled/text-to-video/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/distilled/text-to-video/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx219bDistilledTextToVideoLoraInput": { + "title": "LTX2LoRADistilledTextToVideoInput", + "type": "object", + "properties": { + "use_multiscale": { + "title": "Use Multi-Scale", + "type": "boolean", + "description": "Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.", + "default": true + }, + "prompt": { + "examples": [ + "A cowboy walking through a dusty town at high noon, camera following from behind, cinematic depth, realistic lighting, western mood, 4K film grain." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high", + "full" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "examples": [ + "none" + ], + "default": "none" + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate audio for the video.", + "default": true + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "number", + "description": "The frames per second of the generated video.", + "maximum": 60, + "default": 25 + }, + "loras": { + "title": "LoRAs", + "type": "array", + "description": "The LoRAs to use for the generation.", + "items": { + "$ref": "#/components/schemas/LoRAInput" + } + }, + "camera_lora": { + "enum": [ + "dolly_in", + "dolly_out", + "dolly_left", + "dolly_right", + "jib_up", + "jib_down", + "static", + "none" + ], + "title": "Camera LoRA", + "type": "string", + "description": "The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "examples": [ + "none" + ], + "default": "none" + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated video.", + "title": "Video Size", + "default": "landscape_4_3" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 9, + "title": "Number of Frames", + "maximum": 481, + "default": 121 + }, + "camera_lora_scale": { + "minimum": 0, + "title": "Camera LoRA Scale", + "type": "number", + "description": "The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "maximum": 1, + "default": 1 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the video from.", + "default": "blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts." + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed for the random number generator.", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "prompt", + "num_frames", + "video_size", + "generate_audio", + "use_multiscale", + "fps", + "acceleration", + "camera_lora", + "camera_lora_scale", + "negative_prompt", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode", + "loras" + ], + "required": [ + "loras", + "prompt" + ] + }, + "Ltx219bDistilledTextToVideoLoraOutput": { + "title": "LTX2TextToVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cowboy walking through a dusty town at high noon, camera following from behind, cinematic depth, realistic lighting, western mood, 4K film grain." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "seed": { + "examples": [ + 149063119 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for the random number generator." + }, + "video": { + "examples": [ + { + "height": 704, + "duration": 6.44, + "url": "https://v3b.fal.media/files/b/0a8824b1/sdm0KfmenrlywesfzY1Y1_if6euPp1.mp4", + "width": 1248, + "fps": 25, + "file_name": "sdm0KfmenrlywesfzY1Y1_if6euPp1.mp4", + "content_type": "video/mp4", + "num_frames": 161 + } + ], + "description": "The generated video.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "prompt" + ], + "required": [ + "video", + "seed", + "prompt" + ] + }, + "LoRAInput": { + "title": "LoRAInput", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL, HuggingFace repo ID (owner/repo) to lora weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "description": "Scale factor for LoRA application (0.0 to 4.0).", + "maximum": 4, + "default": 1 + }, + "weight_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights.", + "title": "Weight Name" + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "description": "LoRA weight configuration.", + "required": [ + "path" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The duration of the video", + "title": "Duration" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the video", + "title": "Height" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the video", + "title": "Width" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The FPS of the video", + "title": "Fps" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of frames in the video", + "title": "Num Frames" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2-19b/distilled/text-to-video/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/text-to-video/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/text-to-video/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bDistilledTextToVideoLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/text-to-video/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bDistilledTextToVideoLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2-19b/distilled/text-to-video", + "metadata": { + "display_name": "LTX-2 19B Distilled", + "category": "text-to-video", + "description": "Generate video with audio from text using LTX-2 Distilled", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:42.485Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8936f6/lAxk4cHPDAO8Vi-zFWrYF_79d59ee30f374e16a7d2a2a2daa87e71.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2-19b/distilled/text-to-video", + "license_type": "commercial", + "date": "2026-01-05T20:55:11.318Z", + "group": { + "key": "ltx-2-19b-distilled", + "label": "Text to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2-19b/distilled/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2-19b/distilled/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2-19b/distilled/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8936f6/lAxk4cHPDAO8Vi-zFWrYF_79d59ee30f374e16a7d2a2a2daa87e71.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/distilled/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/distilled/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx219bDistilledTextToVideoInput": { + "title": "LTX2DistilledTextToVideoInput", + "type": "object", + "properties": { + "use_multiscale": { + "title": "Use Multi-Scale", + "type": "boolean", + "description": "Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.", + "default": true + }, + "prompt": { + "examples": [ + "A cowboy walking through a dusty town at high noon, camera following from behind, cinematic depth, realistic lighting, western mood, 4K film grain." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high", + "full" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "examples": [ + "none" + ], + "default": "none" + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate audio for the video.", + "default": true + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "number", + "description": "The frames per second of the generated video.", + "maximum": 60, + "default": 25 + }, + "camera_lora": { + "enum": [ + "dolly_in", + "dolly_out", + "dolly_left", + "dolly_right", + "jib_up", + "jib_down", + "static", + "none" + ], + "title": "Camera LoRA", + "type": "string", + "description": "The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "examples": [ + "none" + ], + "default": "none" + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated video.", + "title": "Video Size", + "default": "landscape_4_3" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 9, + "title": "Number of Frames", + "maximum": 481, + "default": 121 + }, + "camera_lora_scale": { + "minimum": 0, + "title": "Camera LoRA Scale", + "type": "number", + "description": "The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "maximum": 1, + "default": 1 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the video from.", + "default": "blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts." + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed for the random number generator.", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "prompt", + "num_frames", + "video_size", + "generate_audio", + "use_multiscale", + "fps", + "acceleration", + "camera_lora", + "camera_lora_scale", + "negative_prompt", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode" + ], + "required": [ + "prompt" + ] + }, + "Ltx219bDistilledTextToVideoOutput": { + "title": "LTX2TextToVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cowboy walking through a dusty town at high noon, camera following from behind, cinematic depth, realistic lighting, western mood, 4K film grain." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "seed": { + "examples": [ + 149063119 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for the random number generator." + }, + "video": { + "examples": [ + { + "height": 704, + "duration": 6.44, + "url": "https://v3b.fal.media/files/b/0a8824b1/sdm0KfmenrlywesfzY1Y1_if6euPp1.mp4", + "width": 1248, + "fps": 25, + "file_name": "sdm0KfmenrlywesfzY1Y1_if6euPp1.mp4", + "content_type": "video/mp4", + "num_frames": 161 + } + ], + "description": "The generated video.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "prompt" + ], + "required": [ + "video", + "seed", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The duration of the video", + "title": "Duration" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the video", + "title": "Height" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the video", + "title": "Width" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The FPS of the video", + "title": "Fps" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of frames in the video", + "title": "Num Frames" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2-19b/distilled/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bDistilledTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bDistilledTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2-19b/text-to-video/lora", + "metadata": { + "display_name": "LTX-2 19B", + "category": "text-to-video", + "description": "Generate video with audio from text using LTX-2 and custom LoRA", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:42.743Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8936e2/Ys99LaAscUTgLjNHdoXe9_2d41aa966d6b473c9a63abb61cb65d38.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2-19b/text-to-video/lora", + "license_type": "commercial", + "date": "2026-01-05T20:35:44.742Z", + "group": { + "key": "ltx-2-19b", + "label": "Text to Video with LoRA" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2-19b/text-to-video/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2-19b/text-to-video/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2-19b/text-to-video/lora", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8936e2/Ys99LaAscUTgLjNHdoXe9_2d41aa966d6b473c9a63abb61cb65d38.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/text-to-video/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/text-to-video/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx219bTextToVideoLoraInput": { + "title": "LTX2LoRATextToVideoInput", + "type": "object", + "properties": { + "use_multiscale": { + "title": "Use Multi-Scale", + "type": "boolean", + "description": "Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.", + "default": true + }, + "prompt": { + "examples": [ + "A cowboy walking through a dusty town at high noon, camera following from behind, cinematic depth, realistic lighting, western mood, 4K film grain." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high", + "full" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate audio for the video.", + "default": true + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "number", + "description": "The frames per second of the generated video.", + "maximum": 60, + "default": 25 + }, + "loras": { + "title": "LoRAs", + "type": "array", + "description": "The LoRAs to use for the generation.", + "items": { + "$ref": "#/components/schemas/LoRAInput" + } + }, + "camera_lora": { + "enum": [ + "dolly_in", + "dolly_out", + "dolly_left", + "dolly_right", + "jib_up", + "jib_down", + "static", + "none" + ], + "title": "Camera LoRA", + "type": "string", + "description": "The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "examples": [ + "none" + ], + "default": "none" + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated video.", + "title": "Video Size", + "default": "landscape_4_3" + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "description": "The guidance scale to use.", + "maximum": 10, + "default": 3 + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 9, + "title": "Number of Frames", + "maximum": 481, + "default": 121 + }, + "camera_lora_scale": { + "minimum": 0, + "title": "Camera LoRA Scale", + "type": "number", + "description": "The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "maximum": 1, + "default": 1 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the video from.", + "default": "blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts." + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "num_inference_steps": { + "minimum": 8, + "title": "Number of Inference Steps", + "type": "integer", + "description": "The number of inference steps to use.", + "maximum": 50, + "default": 40 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed for the random number generator.", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "prompt", + "num_frames", + "video_size", + "generate_audio", + "use_multiscale", + "fps", + "guidance_scale", + "num_inference_steps", + "acceleration", + "camera_lora", + "camera_lora_scale", + "negative_prompt", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode", + "loras" + ], + "required": [ + "loras", + "prompt" + ] + }, + "Ltx219bTextToVideoLoraOutput": { + "title": "LTX2TextToVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cowboy walking through a dusty town at high noon, camera following from behind, cinematic depth, realistic lighting, western mood, 4K film grain." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "seed": { + "examples": [ + 149063119 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for the random number generator." + }, + "video": { + "examples": [ + { + "height": 704, + "duration": 6.44, + "url": "https://v3b.fal.media/files/b/0a8824b1/sdm0KfmenrlywesfzY1Y1_if6euPp1.mp4", + "width": 1248, + "fps": 25, + "file_name": "sdm0KfmenrlywesfzY1Y1_if6euPp1.mp4", + "content_type": "video/mp4", + "num_frames": 161 + } + ], + "description": "The generated video.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "prompt" + ], + "required": [ + "video", + "seed", + "prompt" + ] + }, + "LoRAInput": { + "title": "LoRAInput", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL, HuggingFace repo ID (owner/repo) to lora weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "description": "Scale factor for LoRA application (0.0 to 4.0).", + "maximum": 4, + "default": 1 + }, + "weight_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights.", + "title": "Weight Name" + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "description": "LoRA weight configuration.", + "required": [ + "path" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The duration of the video", + "title": "Duration" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the video", + "title": "Height" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the video", + "title": "Width" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The FPS of the video", + "title": "Fps" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of frames in the video", + "title": "Num Frames" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2-19b/text-to-video/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/text-to-video/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/text-to-video/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bTextToVideoLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/text-to-video/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bTextToVideoLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2-19b/text-to-video", + "metadata": { + "display_name": "LTX-2 19B", + "category": "text-to-video", + "description": "Generate video with audio from text using LTX-2", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:43.146Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a893615/SMsOPqZ6266qmhio8-GEf_22814884373a42bcadb19dbe083d2cfe.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2-19b/text-to-video", + "license_type": "commercial", + "date": "2026-01-05T20:21:49.298Z", + "group": { + "key": "ltx-2-19b", + "label": "Text to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2-19b/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2-19b/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2-19b/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a893615/SMsOPqZ6266qmhio8-GEf_22814884373a42bcadb19dbe083d2cfe.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx219bTextToVideoInput": { + "title": "LTX2TextToVideoInput", + "type": "object", + "properties": { + "use_multiscale": { + "title": "Use Multi-Scale", + "type": "boolean", + "description": "Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.", + "default": true + }, + "prompt": { + "examples": [ + "A cowboy walking through a dusty town at high noon, camera following from behind, cinematic depth, realistic lighting, western mood, 4K film grain." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high", + "full" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate audio for the video.", + "default": true + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "number", + "description": "The frames per second of the generated video.", + "maximum": 60, + "default": 25 + }, + "camera_lora": { + "enum": [ + "dolly_in", + "dolly_out", + "dolly_left", + "dolly_right", + "jib_up", + "jib_down", + "static", + "none" + ], + "title": "Camera LoRA", + "type": "string", + "description": "The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "examples": [ + "none" + ], + "default": "none" + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated video.", + "title": "Video Size", + "default": "landscape_4_3" + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "description": "The guidance scale to use.", + "maximum": 10, + "default": 3 + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 9, + "title": "Number of Frames", + "maximum": 481, + "default": 121 + }, + "camera_lora_scale": { + "minimum": 0, + "title": "Camera LoRA Scale", + "type": "number", + "description": "The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "maximum": 1, + "default": 1 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the video from.", + "default": "blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts." + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "num_inference_steps": { + "minimum": 8, + "title": "Number of Inference Steps", + "type": "integer", + "description": "The number of inference steps to use.", + "maximum": 50, + "default": 40 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed for the random number generator.", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "prompt", + "num_frames", + "video_size", + "generate_audio", + "use_multiscale", + "fps", + "guidance_scale", + "num_inference_steps", + "acceleration", + "camera_lora", + "camera_lora_scale", + "negative_prompt", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode" + ], + "required": [ + "prompt" + ] + }, + "Ltx219bTextToVideoOutput": { + "title": "LTX2TextToVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cowboy walking through a dusty town at high noon, camera following from behind, cinematic depth, realistic lighting, western mood, 4K film grain." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "seed": { + "examples": [ + 149063119 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for the random number generator." + }, + "video": { + "examples": [ + { + "height": 704, + "duration": 6.44, + "url": "https://v3b.fal.media/files/b/0a8824b1/sdm0KfmenrlywesfzY1Y1_if6euPp1.mp4", + "width": 1248, + "fps": 25, + "file_name": "sdm0KfmenrlywesfzY1Y1_if6euPp1.mp4", + "content_type": "video/mp4", + "num_frames": 161 + } + ], + "description": "The generated video.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "prompt" + ], + "required": [ + "video", + "seed", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The duration of the video", + "title": "Duration" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the video", + "title": "Height" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the video", + "title": "Width" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The FPS of the video", + "title": "Fps" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of frames in the video", + "title": "Num Frames" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2-19b/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kandinsky5-pro/text-to-video", + "metadata": { + "display_name": "Kandinsky5 Pro", + "category": "text-to-video", + "description": "Kandinsky 5.0 Pro is a diffusion model for fast, high-quality text-to-video generation.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:48.161Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8775f2/Z3ul8R1t-mF0RUH6Z36v9_ae12c69d675b4a1794890ce3cb7b6b49.jpg", + "model_url": "https://fal.run/fal-ai/kandinsky5-pro/text-to-video", + "license_type": "commercial", + "date": "2025-12-23T13:40:22.639Z", + "group": { + "key": "kandinsky5-pro", + "label": "Text to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kandinsky5-pro/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kandinsky5-pro/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kandinsky5-pro/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8775f2/Z3ul8R1t-mF0RUH6Z36v9_ae12c69d675b4a1794890ce3cb7b6b49.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kandinsky5-pro/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kandinsky5-pro/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Kandinsky5ProTextToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "resolution", + "aspect_ratio", + "duration", + "num_inference_steps", + "acceleration" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A medium shot establishes a modern, minimalist office setting: clean lines, muted grey walls, and polished wood surfaces. The focus shifts to a close-up on a woman in sharp, navy blue business attire. Her crisp white blouse contrasts with the deep blue of her tailored suit jacket. The subtle texture of the fabric is visible—a fine weave with a slight sheen. Her expression is serious, yet engaging, as she speaks to someone unseen just beyond the frame. Close-up on her eyes, showing the intensity of her gaze and the fine lines around them that hint at experience and focus. Her lips are slightly parted, as if mid-sentence. The light catches the subtle highlights in her auburn hair, meticulously styled. Note the slight catch of light on the silver band of her watch. High resolution 4k" + ], + "description": "The text prompt to guide video generation.", + "type": "string", + "title": "Prompt" + }, + "resolution": { + "enum": [ + "512P", + "1024P" + ], + "description": "Video resolution: 512p or 1024p.", + "type": "string", + "title": "Resolution", + "default": "512P" + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "description": "Acceleration level for faster generation.", + "type": "string", + "title": "Acceleration", + "default": "regular" + }, + "aspect_ratio": { + "enum": [ + "3:2", + "1:1", + "2:3" + ], + "description": "Aspect ratio of the generated video. One of (3:2, 1:1, 2:3).", + "type": "string", + "title": "Aspect Ratio", + "default": "3:2" + }, + "num_inference_steps": { + "minimum": 1, + "description": "The number of inference steps.", + "type": "integer", + "title": "Num Inference Steps", + "maximum": 50, + "default": 28 + }, + "duration": { + "enum": [ + "5s" + ], + "description": "The length of the video to generate (5s or 10s)", + "type": "string", + "examples": [ + "5s" + ], + "title": "Duration", + "default": "5s" + } + }, + "title": "KandinskyT2VRequest", + "required": [ + "prompt" + ] + }, + "Kandinsky5ProTextToVideoOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 14530500, + "file_name": "output.mp4", + "content_type": "application/octet-stream", + "url": "https://v3b.fal.media/files/b/0a87754e/o5FWdz83KTXzq0FB7aG5Q_output.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "KandinskyT2VResponse" + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kandinsky5-pro/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kandinsky5-pro/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kandinsky5-pro/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Kandinsky5ProTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kandinsky5-pro/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Kandinsky5ProTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bytedance/seedance/v1.5/pro/text-to-video", + "metadata": { + "display_name": "Bytedance", + "category": "text-to-video", + "description": "Generate videos with audio with Seedance 1.5", + "status": "active", + "tags": [ + "bytedance", + "seedance", + "audio" + ], + "updated_at": "2026-01-26T21:41:48.287Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a876c8c/7ieGsrCnzZmiES-uDGNXs_d16568d7e335443fb7a8dece1f9a9b0d.jpg", + "model_url": "https://fal.run/fal-ai/bytedance/seedance/v1.5/pro/text-to-video", + "license_type": "commercial", + "date": "2025-12-23T06:59:34.673Z", + "group": { + "key": "seedance-v15", + "label": "Text to Video Pro v1.5" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bytedance/seedance/v1.5/pro/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bytedance/seedance/v1.5/pro/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bytedance/seedance/v1.5/pro/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a876c8c/7ieGsrCnzZmiES-uDGNXs_d16568d7e335443fb7a8dece1f9a9b0d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/bytedance/seedance/v1.5/pro/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/bytedance/seedance/v1.5/pro/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BytedanceSeedanceV15ProTextToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "resolution", + "duration", + "camera_fixed", + "seed", + "enable_safety_checker", + "generate_audio" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Defense attorney declaring \"Ladies and gentlemen, reasonable doubt isn't just a phrase, it's the foundation of justice itself\", footsteps on marble, jury shifting, courtroom drama, closing argument power." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt used to generate the video" + }, + "resolution": { + "enum": [ + "480p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "Video resolution - 480p for faster generation, 720p for balance, 1080p for higher quality", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "1:1", + "3:4", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate audio for the video", + "default": true + }, + "duration": { + "enum": [ + "4", + "5", + "6", + "7", + "8", + "9", + "10", + "11", + "12" + ], + "title": "Duration", + "type": "string", + "description": "Duration of the video in seconds", + "default": "5" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "camera_fixed": { + "title": "Camera Fixed", + "type": "boolean", + "description": "Whether to fix the camera position", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed to control video generation. Use -1 for random." + } + }, + "title": "SeedanceProv15TextToVideoInput", + "required": [ + "prompt" + ] + }, + "BytedanceSeedanceV15ProTextToVideoOutput": { + "x-fal-order-properties": [ + "video", + "seed" + ], + "type": "object", + "properties": { + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for generation" + }, + "video": { + "examples": [ + { + "url": "https://v3b.fal.media/files/b/0a87743e/0K5lW0v-iC_BbKo64o0cA_video.mp4" + } + ], + "title": "Video", + "description": "Generated video file", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "SeedanceProv15T2VVideoOutput", + "required": [ + "video", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bytedance/seedance/v1.5/pro/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1.5/pro/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1.5/pro/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedanceV15ProTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1.5/pro/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedanceV15ProTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "wan/v2.6/text-to-video", + "metadata": { + "display_name": "Wan v2.6 Text to Video", + "category": "text-to-video", + "description": "Wan 2.6 text-to-video model.", + "status": "active", + "tags": [ + "text-to-video" + ], + "updated_at": "2026-01-26T21:41:54.655Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a86d2d7/3WTDnIgc5z_RVh24RgF-D.png", + "model_url": "https://fal.run/wan/v2.6/text-to-video", + "license_type": "commercial", + "date": "2025-12-16T05:12:22.693Z", + "group": { + "key": "v2.6", + "label": "Text to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for wan/v2.6/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the wan/v2.6/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "wan/v2.6/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a86d2d7/3WTDnIgc5z_RVh24RgF-D.png", + "playgroundUrl": "https://fal.ai/models/wan/v2.6/text-to-video", + "documentationUrl": "https://fal.ai/models/wan/v2.6/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "V26TextToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "audio_url", + "aspect_ratio", + "resolution", + "duration", + "negative_prompt", + "enable_prompt_expansion", + "multi_shots", + "seed", + "enable_safety_checker" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Humorous but premium mini-trailer: a tiny fox 3D director proves \"multi-scene\" by calling simple commands that instantly change the set. Extreme photoreal 4K, cinematic lighting, subtle film grain, smooth camera. No subtitles, no UI, no watermark.\n\nShot 1 [0-3s] Macro close-up on the fox snapping a clapboard labeled \"fal\". the fox says : \"Action.\"\nShot 2 [3-6s] Hard cut: Wild West street at sunset. Wide shot, dust in the air. The Fox (in frame) points forward: \"Make it wide.\"\nShot 3 [6-10s] Hard cut: jungle river. The fox stands on a small boat. The camera pushes forward through vines and mist. Fox saying: \"Now… adventure.\"\nShot 4 [10-15s] Hard cut: space station window. Slow orbit around the fox with stars outside. Fox nods: \"Done. Next movie.\"\n" + ], + "description": "The text prompt for video generation. Supports Chinese and English, max 800 characters. For multi-shot videos, use format: 'Overall description. First shot [0-3s] content. Second shot [3-5s] content.'", + "type": "string", + "minLength": 1, + "title": "Prompt" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1", + "4:3", + "3:4" + ], + "description": "The aspect ratio of the generated video. Wan 2.6 supports additional ratios.", + "type": "string", + "title": "Aspect Ratio", + "default": "16:9" + }, + "resolution": { + "enum": [ + "720p", + "1080p" + ], + "description": "Video resolution tier. Wan 2.6 T2V only supports 720p and 1080p (no 480p).", + "type": "string", + "title": "Resolution", + "default": "1080p" + }, + "duration": { + "enum": [ + "5", + "10", + "15" + ], + "description": "Duration of the generated video in seconds. Choose between 5, 10, or 15 seconds.", + "type": "string", + "examples": [ + "5", + "10", + "15" + ], + "title": "Duration", + "default": "5" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "audio_url": { + "description": "\nURL of the audio to use as the background music. Must be publicly accessible.\nLimit handling: If the audio duration exceeds the duration value (5, 10, or 15 seconds),\nthe audio is truncated to the first N seconds, and the rest is discarded. If\nthe audio is shorter than the video, the remaining part of the video will be silent.\nFor example, if the audio is 3 seconds long and the video duration is 5 seconds, the\nfirst 3 seconds of the output video will have sound, and the last 2 seconds will be silent.\n- Format: WAV, MP3.\n- Duration: 3 to 30 s.\n- File size: Up to 15 MB.\n", + "type": "string", + "title": "Audio Url" + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + }, + "multi_shots": { + "description": "When true, enables intelligent multi-shot segmentation for coherent narrative videos. Only active when enable_prompt_expansion is True. Set to false for single-shot generation.", + "type": "boolean", + "title": "Multi Shots", + "default": true + }, + "negative_prompt": { + "examples": [ + "low resolution, error, worst quality, low quality, defects" + ], + "description": "Negative prompt to describe content to avoid. Max 500 characters.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "enable_prompt_expansion": { + "description": "Whether to enable prompt rewriting using LLM. Improves results for short prompts but increases processing time.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": true + } + }, + "description": "Input for Wan 2.6 text-to-video generation", + "title": "TextToVideoInput", + "required": [ + "prompt" + ] + }, + "V26TextToVideoOutput": { + "x-fal-order-properties": [ + "video", + "seed", + "actual_prompt" + ], + "type": "object", + "properties": { + "actual_prompt": { + "examples": [ + "Humorous but premium mini-trailer: a tiny fox 3D director proves \"multi-scene\" by calling simple commands that instantly change the set. Extreme photoreal 4K, cinematic lighting, subtle film grain, smooth camera. No subtitles, no UI, no watermark.\n\nShot 1 [0-3s] Macro close-up on the fox snapping a clapboard labeled \"fal\". the fox says : \"Action.\"\nShot 2 [3-6s] Hard cut: Wild West street at sunset. Wide shot, dust in the air. The Fox (in frame) points forward: \"Make it wide.\"\nShot 3 [6-10s] Hard cut: jungle river. The fox stands on a small boat. The camera pushes forward through vines and mist. Fox saying: \"Now… adventure.\"\nShot 4 [10-15s] Hard cut: space station window. Slow orbit around the fox with stars outside. Fox nods: \"Done. Next movie.\"\n" + ], + "description": "The actual prompt used if prompt rewriting was enabled", + "type": "string", + "title": "Actual Prompt" + }, + "seed": { + "examples": [ + 175932751 + ], + "description": "The seed used for generation", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/0a867564/PsHtrg623uJuI7DdRqXvb_etx4d0Un.mp4" + } + ], + "description": "The generated video file", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "description": "Output for text-to-video generation", + "title": "TextToVideoOutput", + "required": [ + "video", + "seed" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/wan/v2.6/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/wan/v2.6/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/wan/v2.6/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/V26TextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/wan/v2.6/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/V26TextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "veed/fabric-1.0/text", + "metadata": { + "display_name": "Fabric 1.0", + "category": "text-to-video", + "description": "VEED Fabric 1.0 text-to-video API", + "status": "active", + "tags": [ + "lipsync", + "avatar", + "text-to-video" + ], + "updated_at": "2026-01-26T21:42:00.593Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a860513/IhQfhGTiM994lkg1kPlOX_4836a8df5844488399dc1205ef3cc083.jpg", + "model_url": "https://fal.run/veed/fabric-1.0/text", + "license_type": "commercial", + "date": "2025-12-12T15:22:12.764Z", + "group": { + "key": "fabric-1.0", + "label": "Text To Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for veed/fabric-1.0/text", + "version": "1.0.0", + "description": "The OpenAPI schema for the veed/fabric-1.0/text queue.", + "x-fal-metadata": { + "endpointId": "veed/fabric-1.0/text", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a860513/IhQfhGTiM994lkg1kPlOX_4836a8df5844488399dc1205ef3cc083.jpg", + "playgroundUrl": "https://fal.ai/models/veed/fabric-1.0/text", + "documentationUrl": "https://fal.ai/models/veed/fabric-1.0/text/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Fabric10TextInput": { + "title": "FabricOneTextInput", + "type": "object", + "properties": { + "text": { + "examples": [ + "Create talking videos with VEED Fabric-One API." + ], + "maxLength": 2000, + "type": "string", + "minLength": 1, + "title": "Text" + }, + "resolution": { + "enum": [ + "720p", + "480p" + ], + "description": "Resolution", + "type": "string", + "title": "Resolution" + }, + "voice_description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Voice Description", + "description": "Optional additional voice description. The primary voice description is auto-generated from the image. You can use simple descriptors like 'British accent' or 'Confident' or provide a detailed description like 'Confident male voice, mid-20s, with notes of...'" + }, + "image_url": { + "format": "uri", + "maxLength": 2083, + "type": "string", + "minLength": 1, + "title": "Image Url", + "examples": [ + "https://v3.fal.media/files/koala/NLVPfOI4XL1cWT2PmmqT3_Hope.png" + ] + } + }, + "x-fal-order-properties": [ + "image_url", + "text", + "voice_description", + "resolution" + ], + "required": [ + "image_url", + "text", + "resolution" + ] + }, + "Fabric10TextOutput": { + "title": "FabricOneTextOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "content_type": "audio/mp4", + "url": "https://v3b.fal.media/files/b/0a8604be/zVkoAB4hTa8g6Fyl6V733_tmpy1fslwp2.mp4" + } + ], + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/veed/fabric-1.0/text/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/veed/fabric-1.0/text/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/veed/fabric-1.0/text": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Fabric10TextInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/veed/fabric-1.0/text/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Fabric10TextOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v2.6/pro/text-to-video", + "metadata": { + "display_name": "Kling Video v2.6 Text to Video", + "category": "text-to-video", + "description": "Kling 2.6 Pro: Top-tier text-to-video with cinematic visuals, fluid motion, and native audio generation.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:07.421Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a84aab6/uMErfUYxq7gJBJ0-MOKS7_2640fde44b6049a59bc34adf72e38ed7.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/v2.6/pro/text-to-video", + "license_type": "commercial", + "date": "2025-12-02T09:03:38.171Z", + "group": { + "key": "kling-video/v2.6", + "label": "Text to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v2.6/pro/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v2.6/pro/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v2.6/pro/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a84aab6/uMErfUYxq7gJBJ0-MOKS7_2640fde44b6049a59bc34adf72e38ed7.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v2.6/pro/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v2.6/pro/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV26ProTextToVideoInput": { + "title": "TextToVideoV26ProRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Old friends reuniting at a train station after 20 years, one exclaims 'Is that really you?!' other tearfully replies 'I promised I'd come back, didn't I?', train whistle, steam hissing, emotional orchestral swell, crowd murmur" + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500 + }, + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video frame", + "default": "16:9" + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate native audio for the video. Supports Chinese and English voice output. Other languages are automatically translated to English. For English speech, use lowercase letters; for acronyms or proper nouns, use uppercase.", + "default": true + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "maxLength": 2500, + "default": "blur, distort, and low quality" + }, + "cfg_scale": { + "minimum": 0, + "title": "Cfg Scale", + "type": "number", + "maximum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ", + "default": 0.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "duration", + "aspect_ratio", + "negative_prompt", + "cfg_scale", + "generate_audio" + ], + "required": [ + "prompt" + ] + }, + "KlingVideoV26ProTextToVideoOutput": { + "title": "TextToVideoV26ProOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 8195664, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/0a84ab71/8hPbLs7n59WhWY-BN69yX_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v2.6/pro/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.6/pro/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.6/pro/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV26ProTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.6/pro/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV26ProTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v5.5/text-to-video", + "metadata": { + "display_name": "Pixverse", + "category": "text-to-video", + "description": "Generate high quality video clips from text and image prompts using PixVerse v5.5", + "status": "active", + "tags": [ + "text-to-video" + ], + "updated_at": "2026-01-26T21:42:08.483Z", + "is_favorited": false, + "thumbnail_url": "blob:https://fal.ai/4a345dbf-e94a-4ee1-9f43-4b72654d2178", + "model_url": "https://fal.run/fal-ai/pixverse/v5.5/text-to-video", + "license_type": "commercial", + "date": "2025-12-01T16:59:49.183Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v5.5/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v5.5/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v5.5/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "blob:https://fal.ai/4a345dbf-e94a-4ee1-9f43-4b72654d2178", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v5.5/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v5.5/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV55TextToVideoInput": { + "title": "TextToVideoRequestV5_5", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Epic low-cut camera capture of a girl clad in ultraviolet threads, Peter Max art style depiction, luminous diamond skin glistening under a vast moon's radiance, embodied in a superhuman flight among mystical ruins, symbolizing a deity's ritual ascent, hyper-detailed" + ], + "title": "Prompt", + "type": "string" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "4:3", + "1:1", + "3:4", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated video" + }, + "thinking_type": { + "enum": [ + "enabled", + "disabled", + "auto" + ], + "title": "Thinking Type", + "type": "string", + "description": "Prompt optimization mode: 'enabled' to optimize, 'disabled' to turn off, 'auto' for model decision" + }, + "generate_multi_clip_switch": { + "title": "Generate Multi Clip Switch", + "type": "boolean", + "description": "Enable multi-clip generation with dynamic camera changes", + "default": false + }, + "duration": { + "enum": [ + "5", + "8", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds. Longer durations cost more. 1080p videos are limited to 5 or 8 seconds", + "default": "5" + }, + "generate_audio_switch": { + "title": "Generate Audio Switch", + "type": "boolean", + "description": "Enable audio generation (BGM, SFX, dialogue)", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, low resolution, pixelated, noisy, grainy, out of focus, poorly lit, poorly exposed, poorly composed, poorly framed, poorly cropped, poorly color corrected, poorly color graded" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "resolution", + "duration", + "negative_prompt", + "style", + "seed", + "generate_audio_switch", + "generate_multi_clip_switch", + "thinking_type" + ], + "required": [ + "prompt" + ] + }, + "PixverseV55TextToVideoOutput": { + "title": "VideoOutputV5_5", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 5485412, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/model_tests/video_models/output-4.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v5.5/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5.5/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5.5/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV55TextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5.5/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV55TextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2/text-to-video/fast", + "metadata": { + "display_name": "LTX Video 2.0 Fast", + "category": "text-to-video", + "description": "Create high-fidelity video with audio from text with LTX-2 Fast", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:10.611Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/monkey/WESFAXKxTUdd23u9ruHHS_b6a3b01d00ef48aba773b29b918a8aea.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2/text-to-video/fast", + "license_type": "commercial", + "date": "2025-11-26T17:29:41.777Z", + "group": { + "key": "ltx-2", + "label": "Text to Video (Fast)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2/text-to-video/fast", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2/text-to-video/fast queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2/text-to-video/fast", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/monkey/WESFAXKxTUdd23u9ruHHS_b6a3b01d00ef48aba773b29b918a8aea.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2/text-to-video/fast", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2/text-to-video/fast/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx2TextToVideoFastInput": { + "title": "LTXVTextToVideoFastRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cowboy walking through a dusty town at high noon, camera following from behind, cinematic depth, realistic lighting, western mood, 4K film grain." + ], + "maxLength": 5000, + "type": "string", + "minLength": 1, + "title": "Prompt", + "description": "The prompt to generate the video from" + }, + "aspect_ratio": { + "enum": [ + "16:9" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "duration": { + "enum": [ + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20 + ], + "title": "Duration", + "type": "integer", + "description": "The duration of the generated video in seconds. The fast model supports 6-20 seconds. Note: Durations longer than 10 seconds (12, 14, 16, 18, 20) are only supported with 25 FPS and 1080p resolution.", + "default": 6 + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate audio for the generated video", + "default": true + }, + "resolution": { + "enum": [ + "1080p", + "1440p", + "2160p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "1080p" + }, + "fps": { + "enum": [ + 25, + 50 + ], + "title": "Frames per Second", + "type": "integer", + "description": "The frames per second of the generated video", + "default": 25 + } + }, + "x-fal-order-properties": [ + "prompt", + "duration", + "resolution", + "aspect_ratio", + "fps", + "generate_audio" + ], + "required": [ + "prompt" + ] + }, + "Ltx2TextToVideoFastOutput": { + "title": "LTXVTextToVideoResponse", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_name": "ltxv-2-t2v-output.mp4", + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/example_outputs/ltxv-2-t2v-output.mp4" + } + ], + "title": "Video", + "description": "The generated video file", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2/text-to-video/fast/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2/text-to-video/fast/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2/text-to-video/fast": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx2TextToVideoFastInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2/text-to-video/fast/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx2TextToVideoFastOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2/text-to-video", + "metadata": { + "display_name": "LTX Video 2.0 Pro", + "category": "text-to-video", + "description": "Create high-fidelity video with audio from text with LTX-2 Pro.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:10.736Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/lion/8KQLQn8JfajzxzHs5KTAp_96322584ff4b4779b10368b8f0ce00bb.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2/text-to-video", + "license_type": "commercial", + "date": "2025-11-26T17:29:02.911Z", + "group": { + "key": "ltx-2", + "label": "Text to Video (Pro)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/lion/8KQLQn8JfajzxzHs5KTAp_96322584ff4b4779b10368b8f0ce00bb.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx2TextToVideoInput": { + "title": "LTXVTextToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cowboy walking through a dusty town at high noon, camera following from behind, cinematic depth, realistic lighting, western mood, 4K film grain." + ], + "maxLength": 5000, + "type": "string", + "minLength": 1, + "title": "Prompt", + "description": "The prompt to generate the video from" + }, + "aspect_ratio": { + "enum": [ + "16:9" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "duration": { + "enum": [ + 6, + 8, + 10 + ], + "title": "Duration", + "type": "integer", + "description": "The duration of the generated video in seconds", + "default": 6 + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate audio for the generated video", + "default": true + }, + "resolution": { + "enum": [ + "1080p", + "1440p", + "2160p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "1080p" + }, + "fps": { + "enum": [ + 25, + 50 + ], + "title": "Frames per Second", + "type": "integer", + "description": "The frames per second of the generated video", + "default": 25 + } + }, + "x-fal-order-properties": [ + "prompt", + "duration", + "resolution", + "aspect_ratio", + "fps", + "generate_audio" + ], + "required": [ + "prompt" + ] + }, + "Ltx2TextToVideoOutput": { + "title": "LTXVTextToVideoResponse", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_name": "ltxv-2-t2v-output.mp4", + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/example_outputs/ltxv-2-t2v-output.mp4" + } + ], + "title": "Video", + "description": "The generated video file", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx2TextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx2TextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan-video-v1.5/text-to-video", + "metadata": { + "display_name": "Hunyuan Video V1.5", + "category": "text-to-video", + "description": "Hunyuan Video 1.5 is Tencent's latest and best video model", + "status": "active", + "tags": [ + "hunyuan-video", + "text-to-video" + ], + "updated_at": "2026-01-26T21:42:18.212Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/panda/yRuXc12ZmJxhXiGcd_1jW_9ebc91f64d1a44b7995e66b3410547e9.jpg", + "model_url": "https://fal.run/fal-ai/hunyuan-video-v1.5/text-to-video", + "license_type": "commercial", + "date": "2025-11-21T12:19:44.185Z", + "highlighted": false, + "kind": "inference", + "duration_estimate": 3, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan-video-v1.5/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan-video-v1.5/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan-video-v1.5/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/panda/yRuXc12ZmJxhXiGcd_1jW_9ebc91f64d1a44b7995e66b3410547e9.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan-video-v1.5/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan-video-v1.5/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "HunyuanVideoV15TextToVideoInput": { + "title": "HunyuanVideo15T2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video." + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the video.", + "default": "16:9" + }, + "resolution": { + "enum": [ + "480p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the video.", + "default": "480p" + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Enable prompt expansion to enhance the input prompt.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility." + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps.", + "default": 28 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to guide what not to generate.", + "default": "" + }, + "num_frames": { + "minimum": 1, + "title": "Num Frames", + "type": "integer", + "maximum": 121, + "description": "The number of frames to generate.", + "default": 121 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_inference_steps", + "seed", + "aspect_ratio", + "resolution", + "num_frames", + "enable_prompt_expansion" + ], + "required": [ + "prompt" + ] + }, + "HunyuanVideoV15TextToVideoOutput": { + "title": "HunyuanVideo15Response", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/hyvideo_v15_480p_output.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan-video-v1.5/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-v1.5/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-v1.5/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanVideoV15TextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-v1.5/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanVideoV15TextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/infinity-star/text-to-video", + "metadata": { + "display_name": "Infinity Star", + "category": "text-to-video", + "description": "InfinityStar’s unified 8B spacetime autoregressive engine to turn any text prompt into crisp 720p videos - 10× faster than diffusion models.", + "status": "active", + "tags": [ + "text-to-video" + ], + "updated_at": "2026-01-26T21:42:26.144Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/rabbit/aDnlxPy8aKLxo5Gfa8pQx_5da559726dfb4102b6dd32dab6fd5f04.jpg", + "model_url": "https://fal.run/fal-ai/infinity-star/text-to-video", + "license_type": "commercial", + "date": "2025-11-07T18:46:05.330Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/infinity-star/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/infinity-star/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/infinity-star/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/rabbit/aDnlxPy8aKLxo5Gfa8pQx_5da559726dfb4102b6dd32dab6fd5f04.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/infinity-star/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/infinity-star/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "InfinityStarTextToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_inference_steps", + "guidance_scale", + "tau_video", + "use_apg", + "aspect_ratio", + "seed", + "enhance_prompt" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A serene mountain landscape at sunset with flowing clouds" + ], + "description": "Text prompt for generating the video", + "type": "string", + "title": "Prompt" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "1:1", + "9:16" + ], + "description": "Aspect ratio of the generated output", + "type": "string", + "title": "Aspect Ratio", + "default": "16:9" + }, + "enhance_prompt": { + "description": "Whether to use an LLM to enhance the prompt.", + "type": "boolean", + "title": "Enhance Prompt", + "default": true + }, + "use_apg": { + "description": "Whether to use APG", + "type": "boolean", + "title": "Use Apg", + "default": true + }, + "guidance_scale": { + "minimum": 1, + "description": "Guidance scale for generation", + "type": "number", + "maximum": 40, + "title": "Guidance Scale", + "default": 7.5 + }, + "num_inference_steps": { + "minimum": 1, + "description": "Number of inference steps", + "type": "integer", + "maximum": 100, + "title": "Num Inference Steps", + "default": 50 + }, + "seed": { + "description": "Random seed for reproducibility. Leave empty for random generation.", + "type": "integer", + "title": "Seed" + }, + "negative_prompt": { + "description": "Negative prompt to guide what to avoid in generation", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "tau_video": { + "minimum": 0.1, + "description": "Tau value for video scale", + "type": "number", + "maximum": 1, + "title": "Tau Video", + "default": 0.4 + } + }, + "description": "Input model for text-to-video generation", + "title": "GenerationInput", + "required": [ + "prompt" + ] + }, + "InfinityStarTextToVideoOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "description": "Generated video file", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "description": "Output model for text-to-video generation", + "title": "GenerationOutput", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/infinity-star/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/infinity-star/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/infinity-star/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InfinityStarTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/infinity-star/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InfinityStarTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sana-video", + "metadata": { + "display_name": "Sana Video", + "category": "text-to-video", + "description": "Leverage Sana's ultra-fast processing speed to generate high-quality assets that transform your text prompts into production-ready videos", + "status": "active", + "tags": [ + "text-to-video" + ], + "updated_at": "2026-01-26T21:42:26.268Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/GCWRmD1TPULLBQ1NCWtwo_02d01a7c0e0b4e5f98c4ed4c18a9be72.jpg", + "model_url": "https://fal.run/fal-ai/sana-video", + "license_type": "commercial", + "date": "2025-11-07T12:31:32.230Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sana-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sana-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sana-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/GCWRmD1TPULLBQ1NCWtwo_02d01a7c0e0b4e5f98c4ed4c18a9be72.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sana-video", + "documentationUrl": "https://fal.ai/models/fal-ai/sana-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SanaVideoInput": { + "title": "SanaVideoInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Evening, backlight, side lighting, soft light, high contrast, mid-shot, centered composition, clean solo shot, warm color. A young Caucasian man stands in a forest, golden light glimmers on his hair as sunlight filters through the leaves." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt describing the video to generate" + }, + "resolution": { + "enum": [ + "480p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the output video", + "default": "480p" + }, + "fps": { + "minimum": 8, + "title": "Fps", + "type": "integer", + "maximum": 30, + "description": "Frames per second for the output video", + "default": 16 + }, + "motion_score": { + "minimum": 0, + "title": "Motion Score", + "type": "integer", + "maximum": 100, + "description": "Motion intensity score (higher = more motion)", + "default": 30 + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "maximum": 20, + "description": "Guidance scale for generation (higher = more prompt adherence)", + "default": 6 + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "Number of denoising steps", + "default": 28 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducible generation. If not provided, a random seed will be used." + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt describing what to avoid in the generation", + "default": "A chaotic sequence with misshapen, deformed limbs in heavy motion blur, sudden disappearance, jump cuts, jerky movements, rapid shot changes, frames out of sync, inconsistent character shapes, temporal artifacts, jitter, and ghosting effects, creating a disorienting visual experience." + }, + "num_frames": { + "minimum": 16, + "title": "Num Frames", + "type": "integer", + "maximum": 200, + "description": "Number of frames to generate", + "default": 81 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "resolution", + "num_frames", + "fps", + "motion_score", + "guidance_scale", + "num_inference_steps", + "seed" + ], + "required": [ + "prompt" + ] + }, + "SanaVideoOutput": { + "title": "SanaVideoOutput", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The random seed used for the generation process" + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/zebra/TipA9XXsXRYlB6vK6PQ0l_output.mp4" + } + ], + "title": "Video", + "description": "Generated video file", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sana-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sana-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sana-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SanaVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sana-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SanaVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/longcat-video/text-to-video/720p", + "metadata": { + "display_name": "LongCat Video", + "category": "text-to-video", + "description": "Generate long videos in 720p/30fps from text using LongCat Video", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:29.128Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/panda/yE1r-CAPmdJnlspqz6rJf_520c6560105249e1a1720c47f830c299.jpg", + "model_url": "https://fal.run/fal-ai/longcat-video/text-to-video/720p", + "license_type": "commercial", + "date": "2025-10-30T16:24:45.138Z", + "group": { + "key": "longcat", + "label": "Text to Video (720p)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/longcat-video/text-to-video/720p", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/longcat-video/text-to-video/720p queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/longcat-video/text-to-video/720p", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/panda/yE1r-CAPmdJnlspqz6rJf_520c6560105249e1a1720c47f830c299.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/longcat-video/text-to-video/720p", + "documentationUrl": "https://fal.ai/models/fal-ai/longcat-video/text-to-video/720p/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LongcatVideoTextToVideo720pInput": { + "title": "LongCat720PCFGVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "realistic filming style, a person wearing a dark helmet, a deep-colored jacket, blue jeans, and bright yellow shoes rides a skateboard along a winding mountain road. The skateboarder starts in a standing position, then gradually lowers into a crouch, extending one hand to touch the road surface while maintaining a low center of gravity to navigate a sharp curve. After completing the turn, the skateboarder rises back to a standing position and continues gliding forward. The background features lush green hills flanking both sides of the road, with distant snow-capped mountain peaks rising against a clear, bright blue sky. The camera follows closely from behind, smoothly tracking the skateboarder’s movements and capturing the dynamic scenery along the route. The scene is shot in natural daylight, highlighting the vivid outdoor environment and the skateboarder’s fluid actions." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to guide the video generation." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use for the video generation.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "integer", + "description": "The frame rate of the generated video.", + "maximum": 60, + "default": 30 + }, + "num_refine_inference_steps": { + "minimum": 8, + "title": "Number of Refinement Inference Steps", + "type": "integer", + "description": "The number of inference steps to use for refinement.", + "maximum": 50, + "default": 40 + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "description": "The guidance scale to use for the video generation.", + "maximum": 10, + "default": 4 + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 17, + "title": "Number of Frames", + "maximum": 961, + "default": 162 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable safety checker.", + "default": true + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to use for the video generation.", + "default": "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video.", + "default": "16:9" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "num_inference_steps": { + "minimum": 8, + "title": "Number of Inference Steps", + "type": "integer", + "description": "The number of inference steps to use for the video generation.", + "maximum": 50, + "default": 40 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator." + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_frames", + "num_inference_steps", + "num_refine_inference_steps", + "guidance_scale", + "aspect_ratio", + "fps", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode", + "acceleration" + ], + "required": [ + "prompt" + ] + }, + "LongcatVideoTextToVideo720pOutput": { + "title": "LongCatVideoResponse", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "realistic filming style, a person wearing a dark helmet, a deep-colored jacket, blue jeans, and bright yellow shoes rides a skateboard along a winding mountain road. The skateboarder starts in a standing position, then gradually lowers into a crouch, extending one hand to touch the road surface while maintaining a low center of gravity to navigate a sharp curve. After completing the turn, the skateboarder rises back to a standing position and continues gliding forward. The background features lush green hills flanking both sides of the road, with distant snow-capped mountain peaks rising against a clear, bright blue sky. The camera follows closely from behind, smoothly tracking the skateboarder’s movements and capturing the dynamic scenery along the route. The scene is shot in natural daylight, highlighting the vivid outdoor environment and the skateboarder’s fluid actions." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "seed": { + "examples": [ + 424911732 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/zebra/lXFrGA-egaUXWFGSp8GqT_BxoDEqUZ.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "video" + ], + "required": [ + "prompt", + "seed", + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/longcat-video/text-to-video/720p/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/text-to-video/720p/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/text-to-video/720p": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatVideoTextToVideo720pInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/text-to-video/720p/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatVideoTextToVideo720pOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/longcat-video/text-to-video/480p", + "metadata": { + "display_name": "LongCat Video", + "category": "text-to-video", + "description": "Generate long videos from text using LongCat Video", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:29.531Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/rYYa2jpouP8HP5sofxQjq_194c767959414548bff2c3f04fedbe0f.jpg", + "model_url": "https://fal.run/fal-ai/longcat-video/text-to-video/480p", + "license_type": "commercial", + "date": "2025-10-30T14:50:01.405Z", + "group": { + "key": "longcat", + "label": "Text to Video (480p)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/longcat-video/text-to-video/480p", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/longcat-video/text-to-video/480p queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/longcat-video/text-to-video/480p", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/rYYa2jpouP8HP5sofxQjq_194c767959414548bff2c3f04fedbe0f.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/longcat-video/text-to-video/480p", + "documentationUrl": "https://fal.ai/models/fal-ai/longcat-video/text-to-video/480p/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LongcatVideoTextToVideo480pInput": { + "title": "LongCatCFGVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "realistic filming style, a person wearing a dark helmet, a deep-colored jacket, blue jeans, and bright yellow shoes rides a skateboard along a winding mountain road. The skateboarder starts in a standing position, then gradually lowers into a crouch, extending one hand to touch the road surface while maintaining a low center of gravity to navigate a sharp curve. After completing the turn, the skateboarder rises back to a standing position and continues gliding forward. The background features lush green hills flanking both sides of the road, with distant snow-capped mountain peaks rising against a clear, bright blue sky. The camera follows closely from behind, smoothly tracking the skateboarder’s movements and capturing the dynamic scenery along the route. The scene is shot in natural daylight, highlighting the vivid outdoor environment and the skateboarder’s fluid actions." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to guide the video generation." + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use for the video generation.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "integer", + "description": "The frame rate of the generated video.", + "maximum": 60, + "default": 15 + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "description": "The guidance scale to use for the video generation.", + "maximum": 10, + "default": 4 + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 17, + "title": "Number of Frames", + "maximum": 961, + "default": 162 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable safety checker.", + "default": true + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to use for the video generation.", + "default": "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video.", + "default": "16:9" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator." + }, + "num_inference_steps": { + "minimum": 8, + "title": "Number of Inference Steps", + "type": "integer", + "description": "The number of inference steps to use for the video generation.", + "maximum": 50, + "default": 40 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_frames", + "num_inference_steps", + "guidance_scale", + "aspect_ratio", + "fps", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode", + "acceleration" + ], + "required": [ + "prompt" + ] + }, + "LongcatVideoTextToVideo480pOutput": { + "title": "LongCatVideoResponse", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "realistic filming style, a person wearing a dark helmet, a deep-colored jacket, blue jeans, and bright yellow shoes rides a skateboard along a winding mountain road. The skateboarder starts in a standing position, then gradually lowers into a crouch, extending one hand to touch the road surface while maintaining a low center of gravity to navigate a sharp curve. After completing the turn, the skateboarder rises back to a standing position and continues gliding forward. The background features lush green hills flanking both sides of the road, with distant snow-capped mountain peaks rising against a clear, bright blue sky. The camera follows closely from behind, smoothly tracking the skateboarder’s movements and capturing the dynamic scenery along the route. The scene is shot in natural daylight, highlighting the vivid outdoor environment and the skateboarder’s fluid actions." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "seed": { + "examples": [ + 424911732 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/zebra/lXFrGA-egaUXWFGSp8GqT_BxoDEqUZ.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "video" + ], + "required": [ + "prompt", + "seed", + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/longcat-video/text-to-video/480p/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/text-to-video/480p/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/text-to-video/480p": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatVideoTextToVideo480pInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/text-to-video/480p/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatVideoTextToVideo480pOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/longcat-video/distilled/text-to-video/720p", + "metadata": { + "display_name": "LongCat Video Distilled", + "category": "text-to-video", + "description": "Generate long videos in 720p/30fps from text using LongCat Video Distilled", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:29.849Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/rabbit/Xh3L9IfiYFQpDx1WpQH8T_e0057041856c45b1ab31a267f8132455.jpg", + "model_url": "https://fal.run/fal-ai/longcat-video/distilled/text-to-video/720p", + "license_type": "commercial", + "date": "2025-10-30T14:41:43.380Z", + "group": { + "key": "longcat-distilled", + "label": "Text to Video (720p)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/longcat-video/distilled/text-to-video/720p", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/longcat-video/distilled/text-to-video/720p queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/longcat-video/distilled/text-to-video/720p", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/rabbit/Xh3L9IfiYFQpDx1WpQH8T_e0057041856c45b1ab31a267f8132455.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/longcat-video/distilled/text-to-video/720p", + "documentationUrl": "https://fal.ai/models/fal-ai/longcat-video/distilled/text-to-video/720p/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LongcatVideoDistilledTextToVideo720pInput": { + "title": "LongCat720PVideoRequest", + "type": "object", + "properties": { + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video.", + "default": "16:9" + }, + "prompt": { + "examples": [ + "realistic filming style, a person wearing a dark helmet, a deep-colored jacket, blue jeans, and bright yellow shoes rides a skateboard along a winding mountain road. The skateboarder starts in a standing position, then gradually lowers into a crouch, extending one hand to touch the road surface while maintaining a low center of gravity to navigate a sharp curve. After completing the turn, the skateboarder rises back to a standing position and continues gliding forward. The background features lush green hills flanking both sides of the road, with distant snow-capped mountain peaks rising against a clear, bright blue sky. The camera follows closely from behind, smoothly tracking the skateboarder’s movements and capturing the dynamic scenery along the route. The scene is shot in natural daylight, highlighting the vivid outdoor environment and the skateboarder’s fluid actions." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to guide the video generation." + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "integer", + "description": "The frame rate of the generated video.", + "maximum": 60, + "default": 30 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "num_refine_inference_steps": { + "minimum": 2, + "title": "Number of Refinement Inference Steps", + "type": "integer", + "description": "The number of inference steps to use for refinement.", + "maximum": 16, + "default": 12 + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable safety checker.", + "default": true + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 17, + "title": "Number of Frames", + "maximum": 961, + "default": 162 + }, + "num_inference_steps": { + "minimum": 2, + "title": "Number of Inference Steps", + "type": "integer", + "description": "The number of inference steps to use.", + "maximum": 16, + "default": 12 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator." + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "num_frames", + "num_inference_steps", + "num_refine_inference_steps", + "aspect_ratio", + "fps", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode" + ], + "required": [ + "prompt" + ] + }, + "LongcatVideoDistilledTextToVideo720pOutput": { + "title": "LongCatVideoResponse", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "realistic filming style, a person wearing a dark helmet, a deep-colored jacket, blue jeans, and bright yellow shoes rides a skateboard along a winding mountain road. The skateboarder starts in a standing position, then gradually lowers into a crouch, extending one hand to touch the road surface while maintaining a low center of gravity to navigate a sharp curve. After completing the turn, the skateboarder rises back to a standing position and continues gliding forward. The background features lush green hills flanking both sides of the road, with distant snow-capped mountain peaks rising against a clear, bright blue sky. The camera follows closely from behind, smoothly tracking the skateboarder’s movements and capturing the dynamic scenery along the route. The scene is shot in natural daylight, highlighting the vivid outdoor environment and the skateboarder’s fluid actions." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "seed": { + "examples": [ + 424911732 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/zebra/lXFrGA-egaUXWFGSp8GqT_BxoDEqUZ.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "video" + ], + "required": [ + "prompt", + "seed", + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/longcat-video/distilled/text-to-video/720p/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/distilled/text-to-video/720p/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/distilled/text-to-video/720p": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatVideoDistilledTextToVideo720pInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/distilled/text-to-video/720p/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatVideoDistilledTextToVideo720pOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/longcat-video/distilled/text-to-video/480p", + "metadata": { + "display_name": "LongCat Video Distilled", + "category": "text-to-video", + "description": "Generate long videos from text using LongCat Video Distilled", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:31.218Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/zebra/wjjJyktF2Y8GLxCDR9XsW_11eea2cee7e94b67aa89b3fc28ee55e1.jpg", + "model_url": "https://fal.run/fal-ai/longcat-video/distilled/text-to-video/480p", + "license_type": "commercial", + "date": "2025-10-28T23:29:22.398Z", + "group": { + "key": "longcat-distilled", + "label": "Text to Video (480p)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/longcat-video/distilled/text-to-video/480p", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/longcat-video/distilled/text-to-video/480p queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/longcat-video/distilled/text-to-video/480p", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/zebra/wjjJyktF2Y8GLxCDR9XsW_11eea2cee7e94b67aa89b3fc28ee55e1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/longcat-video/distilled/text-to-video/480p", + "documentationUrl": "https://fal.ai/models/fal-ai/longcat-video/distilled/text-to-video/480p/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LongcatVideoDistilledTextToVideo480pInput": { + "title": "LongCatVideoRequest", + "type": "object", + "properties": { + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video.", + "default": "16:9" + }, + "prompt": { + "examples": [ + "realistic filming style, a person wearing a dark helmet, a deep-colored jacket, blue jeans, and bright yellow shoes rides a skateboard along a winding mountain road. The skateboarder starts in a standing position, then gradually lowers into a crouch, extending one hand to touch the road surface while maintaining a low center of gravity to navigate a sharp curve. After completing the turn, the skateboarder rises back to a standing position and continues gliding forward. The background features lush green hills flanking both sides of the road, with distant snow-capped mountain peaks rising against a clear, bright blue sky. The camera follows closely from behind, smoothly tracking the skateboarder’s movements and capturing the dynamic scenery along the route. The scene is shot in natural daylight, highlighting the vivid outdoor environment and the skateboarder’s fluid actions." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to guide the video generation." + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "integer", + "description": "The frame rate of the generated video.", + "maximum": 60, + "default": 15 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable safety checker.", + "default": true + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 17, + "title": "Number of Frames", + "maximum": 961, + "default": 162 + }, + "num_inference_steps": { + "minimum": 2, + "title": "Number of Inference Steps", + "type": "integer", + "description": "The number of inference steps to use.", + "maximum": 16, + "default": 12 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator." + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "num_frames", + "num_inference_steps", + "aspect_ratio", + "fps", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode" + ], + "required": [ + "prompt" + ] + }, + "LongcatVideoDistilledTextToVideo480pOutput": { + "title": "LongCatVideoResponse", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "realistic filming style, a person wearing a dark helmet, a deep-colored jacket, blue jeans, and bright yellow shoes rides a skateboard along a winding mountain road. The skateboarder starts in a standing position, then gradually lowers into a crouch, extending one hand to touch the road surface while maintaining a low center of gravity to navigate a sharp curve. After completing the turn, the skateboarder rises back to a standing position and continues gliding forward. The background features lush green hills flanking both sides of the road, with distant snow-capped mountain peaks rising against a clear, bright blue sky. The camera follows closely from behind, smoothly tracking the skateboarder’s movements and capturing the dynamic scenery along the route. The scene is shot in natural daylight, highlighting the vivid outdoor environment and the skateboarder’s fluid actions." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "seed": { + "examples": [ + 424911732 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/zebra/lXFrGA-egaUXWFGSp8GqT_BxoDEqUZ.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "video" + ], + "required": [ + "prompt", + "seed", + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/longcat-video/distilled/text-to-video/480p/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/distilled/text-to-video/480p/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/distilled/text-to-video/480p": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatVideoDistilledTextToVideo480pInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/longcat-video/distilled/text-to-video/480p/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LongcatVideoDistilledTextToVideo480pOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/hailuo-2.3/standard/text-to-video", + "metadata": { + "display_name": "MiniMax Hailuo 2.3 [Standard] (Text to Video)", + "category": "text-to-video", + "description": "MiniMax Hailuo-2.3 Text To Video API (Standard, 768p): Advanced text-to-video generation model with 768p resolution", + "status": "active", + "tags": [ + "text-to-video" + ], + "updated_at": "2026-01-26T21:42:32.092Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/elephant/j3hSIKLqmWT7NXmIwTUQ0_1ec8fc1d7a53473fac7c2892ff459693.jpg", + "model_url": "https://fal.run/fal-ai/minimax/hailuo-2.3/standard/text-to-video", + "license_type": "commercial", + "date": "2025-10-27T13:06:48.176Z", + "group": { + "key": "hailuo-23", + "label": "Text To Video (standard)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/hailuo-2.3/standard/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/hailuo-2.3/standard/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/hailuo-2.3/standard/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/elephant/j3hSIKLqmWT7NXmIwTUQ0_1ec8fc1d7a53473fac7c2892ff459693.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/hailuo-2.3/standard/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/hailuo-2.3/standard/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxHailuo23StandardTextToVideoInput": { + "title": "StandardTextToVideoHailuo23Input", + "type": "object", + "properties": { + "prompt_optimizer": { + "description": "Whether to use the model's prompt optimizer", + "type": "boolean", + "title": "Prompt Optimizer", + "default": true + }, + "duration": { + "enum": [ + "6", + "10" + ], + "description": "The duration of the video in seconds.", + "type": "string", + "title": "Duration", + "default": "6" + }, + "prompt": { + "examples": [ + "An intense electrical storm rages over a modern city skyline at night. Multiple lightning bolts strike simultaneously, illuminating the towering skyscrapers in brilliant white flashes. Thunder clouds roil and churn overhead while constant lightning creates a strobe effect. Rain pours in heavy sheets, visible in the glow of city lights. The camera captures the drama from across a river as lightning reflects in the water. Lightning branches across the sky in intricate patterns. Atmosphere: dramatic, powerful, electrifying urban storm." + ], + "maxLength": 2000, + "type": "string", + "title": "Prompt", + "minLength": 1 + } + }, + "x-fal-order-properties": [ + "prompt", + "prompt_optimizer", + "duration" + ], + "required": [ + "prompt" + ] + }, + "MinimaxHailuo23StandardTextToVideoOutput": { + "title": "StandardTextToVideoHailuo23Output", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/hailuo23/standard_t2v_out.mp4" + } + ], + "description": "The generated video", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/hailuo-2.3/standard/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-2.3/standard/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-2.3/standard/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxHailuo23StandardTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-2.3/standard/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxHailuo23StandardTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/hailuo-2.3/pro/text-to-video", + "metadata": { + "display_name": "MiniMax Hailuo 2.3 [Pro] (Text to Video)", + "category": "text-to-video", + "description": "MiniMax Hailuo-2.3 Text To Video API (Pro, 1080p): Advanced text-to-video generation model with 1080p resolution", + "status": "active", + "tags": [ + "text-to-video" + ], + "updated_at": "2026-01-26T21:42:32.216Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/x_n_LT1ApmpYZnZw8sdNq_0147471d0d7e4bbba8780820dee6a3da.jpg", + "model_url": "https://fal.run/fal-ai/minimax/hailuo-2.3/pro/text-to-video", + "license_type": "commercial", + "date": "2025-10-27T13:05:46.983Z", + "group": { + "key": "hailuo-23", + "label": "Text To Video (pro)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/hailuo-2.3/pro/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/hailuo-2.3/pro/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/hailuo-2.3/pro/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/x_n_LT1ApmpYZnZw8sdNq_0147471d0d7e4bbba8780820dee6a3da.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/hailuo-2.3/pro/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/hailuo-2.3/pro/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxHailuo23ProTextToVideoInput": { + "title": "ProTextToVideoHailuo23Input", + "type": "object", + "properties": { + "prompt_optimizer": { + "description": "Whether to use the model's prompt optimizer", + "type": "boolean", + "title": "Prompt Optimizer", + "default": true + }, + "prompt": { + "examples": [ + "The camera follows the snowboarder as they carve down the mountain through deep powder, each turn sending up huge rooster tails of snow. They navigate between trees, floating through the powder with smooth, flowing movements. The rider launches off a natural jump, grabbing the board mid-air before landing softly in deep snow and continuing down. Powder sprays continuously as they link turns together. The atmosphere is exhilarating and free. Audio: Board cutting through snow, powder spraying, wind rushing, the rider's excited shouts, and the soft thuds of landing in deep snow." + ], + "maxLength": 2000, + "type": "string", + "title": "Prompt", + "description": "Text prompt for video generation", + "minLength": 1 + } + }, + "x-fal-order-properties": [ + "prompt", + "prompt_optimizer" + ], + "required": [ + "prompt" + ] + }, + "MinimaxHailuo23ProTextToVideoOutput": { + "title": "ProTextToVideoHailuo23Output", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/hailuo23/pro_t2v_out.mp4" + } + ], + "description": "The generated video", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/hailuo-2.3/pro/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-2.3/pro/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-2.3/pro/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxHailuo23ProTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-2.3/pro/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxHailuo23ProTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bytedance/seedance/v1/pro/fast/text-to-video", + "metadata": { + "display_name": "Bytedance", + "category": "text-to-video", + "description": "Text to Video endpoint for Seedance 1.0 Pro Fast, a next-generation video model designed to deliver maximum performance at minimal cost", + "status": "active", + "tags": [ + "bytedance", + "fast", + "motion" + ], + "updated_at": "2026-01-26T21:42:32.783Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/monkey/vuOJvxcEA4z0fsRkw3r4Y_4930420a08f045c9ae3c401e3a6d20fc.jpg", + "model_url": "https://fal.run/fal-ai/bytedance/seedance/v1/pro/fast/text-to-video", + "license_type": "commercial", + "date": "2025-10-24T11:54:23.140Z", + "group": { + "key": "seedance-v1", + "label": "Seedance 1.0 Pro Fast --Text to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bytedance/seedance/v1/pro/fast/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bytedance/seedance/v1/pro/fast/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bytedance/seedance/v1/pro/fast/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/monkey/vuOJvxcEA4z0fsRkw3r4Y_4930420a08f045c9ae3c401e3a6d20fc.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/bytedance/seedance/v1/pro/fast/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/bytedance/seedance/v1/pro/fast/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BytedanceSeedanceV1ProFastTextToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "resolution", + "duration", + "camera_fixed", + "seed", + "enable_safety_checker" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Inside a quiet dojo, a martial artist moves with precision and grace. The performance highlights the beauty and discipline inherent in the ancient practice. Each form unfolds clearly, a testament to dedication and skill." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt used to generate the video" + }, + "resolution": { + "enum": [ + "480p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "Video resolution - 480p for faster generation, 720p for balance, 1080p for higher quality", + "default": "1080p" + }, + "duration": { + "enum": [ + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10", + "11", + "12" + ], + "title": "Duration", + "type": "string", + "description": "Duration of the video in seconds", + "default": "5" + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "1:1", + "3:4", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed to control video generation. Use -1 for random." + }, + "camera_fixed": { + "title": "Camera Fixed", + "type": "boolean", + "description": "Whether to fix the camera position", + "default": false + } + }, + "title": "SeedanceProFastTextToVideoInput", + "required": [ + "prompt" + ] + }, + "BytedanceSeedanceV1ProFastTextToVideoOutput": { + "x-fal-order-properties": [ + "video", + "seed" + ], + "type": "object", + "properties": { + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for generation" + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_inputs/seedance_fast_t2v_output.mp4" + } + ], + "title": "Video", + "description": "Generated video file", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "SeedanceFastT2VVideoOutput", + "required": [ + "video", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bytedance/seedance/v1/pro/fast/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1/pro/fast/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1/pro/fast/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedanceV1ProFastTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1/pro/fast/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedanceV1ProFastTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/vidu/q2/text-to-video", + "metadata": { + "display_name": "Vidu", + "category": "text-to-video", + "description": "Use the latest Vidu Q2 models which much more better quality and control on your videos.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:34.627Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/ui3mJj8_Fmxg22mCDuesY_38c8acb860cf4072899ec0c062efcf9e.jpg", + "model_url": "https://fal.run/fal-ai/vidu/q2/text-to-video", + "license_type": "commercial", + "date": "2025-10-22T16:40:07.242Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/vidu/q2/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/vidu/q2/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/vidu/q2/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/ui3mJj8_Fmxg22mCDuesY_38c8acb860cf4072899ec0c062efcf9e.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/vidu/q2/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/vidu/q2/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ViduQ2TextToVideoInput": { + "title": "Q2TextToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cinematic shot of a futuristic city at sunset, with flying cars and towering skyscrapers." + ], + "title": "Prompt", + "type": "string", + "maxLength": 3000, + "description": "Text prompt for video generation, max 3000 characters" + }, + "resolution": { + "enum": [ + "360p", + "520p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "Output video resolution", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the output video", + "default": "16:9" + }, + "duration": { + "enum": [ + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "title": "Duration", + "type": "integer", + "description": "Duration of the video in seconds", + "default": 4 + }, + "bgm": { + "title": "Bgm", + "type": "boolean", + "description": "Whether to add background music to the video (only for 4-second videos)", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "movement_amplitude": { + "enum": [ + "auto", + "small", + "medium", + "large" + ], + "title": "Movement Amplitude", + "type": "string", + "description": "The movement amplitude of objects in the frame", + "default": "auto" + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "duration", + "resolution", + "aspect_ratio", + "movement_amplitude", + "bgm" + ], + "required": [ + "prompt" + ] + }, + "ViduQ2TextToVideoOutput": { + "title": "Q2TextToVideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://fal.media/files/penguin/sUYfE1bo3z5Gds7pSuFHD_output.mp4" + } + ], + "title": "Video", + "description": "The generated video from text using the Q2 model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/vidu/q2/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/q2/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/vidu/q2/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduQ2TextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/q2/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduQ2TextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/krea-wan-14b/text-to-video", + "metadata": { + "display_name": "Krea Wan 14b- Text to Video", + "category": "text-to-video", + "description": "Fast Text-to-Video endpoint for Krea's Wan 14b model.", + "status": "active", + "tags": [ + "text to video", + "fast" + ], + "updated_at": "2026-01-26T21:42:37.844Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/rabbit/n7SozOMd7d9UiTYhyZ4w3_99e21d49333f47b9be7e834d73a16d8a.jpg", + "model_url": "https://fal.run/fal-ai/krea-wan-14b/text-to-video", + "license_type": "commercial", + "date": "2025-10-20T17:24:26.630Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/krea-wan-14b/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/krea-wan-14b/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/krea-wan-14b/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/rabbit/n7SozOMd7d9UiTYhyZ4w3_99e21d49333f47b9be7e834d73a16d8a.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/krea-wan-14b/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/krea-wan-14b/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KreaWan14bTextToVideoInput": { + "title": "TextToVideoInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A powerful, matte black jeep, its robust frame contrasting with the lush green surroundings, navigates a winding jungle road, kicking up small clouds of dust and loose earth from its tires." + ], + "title": "Prompt", + "type": "string", + "description": "Prompt for the video-to-video generation." + }, + "enable_prompt_expansion": { + "examples": [ + true + ], + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.", + "default": false + }, + "num_frames": { + "minimum": 18, + "maximum": 162, + "type": "integer", + "title": "Num Frames", + "description": "Number of frames to generate. Must be a multiple of 12 plus 6, for example 6, 18, 30, 42, etc.", + "default": 78 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Seed for the video-to-video generation." + } + }, + "x-fal-order-properties": [ + "prompt", + "num_frames", + "enable_prompt_expansion", + "seed" + ], + "required": [ + "prompt" + ] + }, + "KreaWan14bTextToVideoOutput": { + "title": "VideoToVideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/krea_wan_14b_v2v_output.mp4" + } + ], + "description": "The generated video file.", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/krea-wan-14b/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/krea-wan-14b/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/krea-wan-14b/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KreaWan14bTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/krea-wan-14b/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KreaWan14bTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-alpha", + "metadata": { + "display_name": "Wan Alpha", + "category": "text-to-video", + "description": "Generate videos with transparent backgrounds", + "status": "active", + "tags": [ + "transparent", + "alpha" + ], + "updated_at": "2026-01-26T21:42:39.559Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/lion/3oIcvX_9oIQh2cDai8geS_59a419be75fb46c5bd8094bb50e9cabf.jpg", + "model_url": "https://fal.run/fal-ai/wan-alpha", + "license_type": "commercial", + "date": "2025-10-16T02:14:54.924Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-alpha", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-alpha queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-alpha", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/lion/3oIcvX_9oIQh2cDai8geS_59a419be75fb46c5bd8094bb50e9cabf.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-alpha", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-alpha/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanAlphaInput": { + "title": "WanAlphaRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Medium shot. A little girl holds a bubble wand and blows out colorful bubbles that float and pop in the air. The background of this video is transparent. Realistic style." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to guide the video generation." + }, + "shift": { + "minimum": 1, + "maximum": 15, + "type": "number", + "title": "Shift", + "description": "The shift of the generated video.", + "default": 10.5 + }, + "mask_clamp_upper": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Mask Clamp Upper", + "description": "The upper bound of the mask clamping.", + "default": 0.75 + }, + "fps": { + "minimum": 1, + "maximum": 60, + "type": "integer", + "title": "FPS", + "description": "The frame rate of the generated video.", + "default": 16 + }, + "mask_clamp_lower": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Mask Clamp Lower", + "description": "The lower bound of the mask clamping.", + "default": 0.1 + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 17, + "maximum": 121, + "title": "Number of Frames", + "default": 81 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable safety checker.", + "default": true + }, + "mask_binarization_threshold": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Mask Binarization Threshold", + "description": "The threshold for mask binarization. When binarize_mask is True, this threshold will be used to binarize the mask. This will also be used for transparency when the output type is `.webm`.", + "default": 0.8 + }, + "sampler": { + "enum": [ + "unipc", + "dpm++", + "euler" + ], + "title": "Sampler", + "type": "string", + "description": "The sampler to use.", + "default": "euler" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "resolution": { + "enum": [ + "240p", + "360p", + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video.", + "default": "480p" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "1:1", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video.", + "default": "16:9" + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "VP9 (.webm)" + }, + "binarize_mask": { + "title": "Binarize Mask", + "type": "boolean", + "description": "Whether to binarize the mask.", + "default": false + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 16, + "type": "integer", + "title": "Number of Inference Steps", + "description": "The number of inference steps to use.", + "default": 8 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator." + } + }, + "x-fal-order-properties": [ + "prompt", + "num_frames", + "fps", + "num_inference_steps", + "seed", + "sampler", + "shift", + "resolution", + "aspect_ratio", + "enable_prompt_expansion", + "enable_safety_checker", + "mask_clamp_lower", + "mask_clamp_upper", + "binarize_mask", + "mask_binarization_threshold", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode" + ], + "required": [ + "prompt" + ] + }, + "WanAlphaOutput": { + "title": "WanAlphaResponse", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Medium shot. A little girl holds a bubble wand and blows out colorful bubbles that float and pop in the air. The background of this video is transparent. Realistic style." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "image": { + "title": "Image", + "description": "The generated image file.", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + }, + "seed": { + "examples": [ + 424911732 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "mask": { + "examples": [ + { + "height": 720, + "duration": 5.0625, + "url": "https://storage.googleapis.com/falserverless/example_outputs/wan-alpha-mask-output.webm", + "fps": 16, + "width": 1280, + "file_name": "wan-alpha-mask-output.webm", + "content_type": "video/webm", + "num_frames": 81 + } + ], + "title": "Mask", + "description": "The generated mask file.", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + }, + "video": { + "examples": [ + { + "height": 720, + "file_name": "wan-alpha-rgba-output.webm", + "content_type": "video/webm", + "url": "https://storage.googleapis.com/falserverless/example_outputs/wan-alpha-rgba-output.webm", + "width": 1280 + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "video", + "image", + "mask" + ], + "required": [ + "prompt", + "seed" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-alpha/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-alpha/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-alpha": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanAlphaInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-alpha/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanAlphaOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kandinsky5/text-to-video/distill", + "metadata": { + "display_name": "Kandinsky5", + "category": "text-to-video", + "description": "Kandinsky 5.0 Distilled is a lightweight diffusion model for fast, high-quality text-to-video generation.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:40.181Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/TyHaKAdxHRg3VIjUlKZPI_674ee5ee1f2d467e9f3806531f85dee2.jpg", + "model_url": "https://fal.run/fal-ai/kandinsky5/text-to-video/distill", + "license_type": "commercial", + "date": "2025-10-13T14:45:27.847Z", + "group": { + "key": "kandinsky5", + "label": "Text to Video (Distill)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kandinsky5/text-to-video/distill", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kandinsky5/text-to-video/distill queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kandinsky5/text-to-video/distill", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/TyHaKAdxHRg3VIjUlKZPI_674ee5ee1f2d467e9f3806531f85dee2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kandinsky5/text-to-video/distill", + "documentationUrl": "https://fal.ai/models/fal-ai/kandinsky5/text-to-video/distill/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Kandinsky5TextToVideoDistillInput": { + "title": "KandinskyT2VDistillRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A dog in red hat" + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "duration": { + "enum": [ + "5s", + "10s" + ], + "description": "The length of the video to generate (5s or 10s)", + "type": "string", + "examples": [ + "5s", + "10s" + ], + "title": "Duration", + "default": "5s" + }, + "aspect_ratio": { + "enum": [ + "3:2", + "1:1", + "2:3" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video. One of (3:2, 1:1, 2:3).", + "default": "3:2" + }, + "resolution": { + "enum": [ + "768x512" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video in W:H format. Will be calculated based on the aspect ratio(768x512, 512x512, 512x768).", + "default": "768x512" + } + }, + "x-fal-order-properties": [ + "prompt", + "resolution", + "aspect_ratio", + "duration" + ], + "required": [ + "prompt" + ] + }, + "Kandinsky5TextToVideoDistillOutput": { + "title": "KandinskyT2VResponse", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 5797172, + "file_name": "output.mp4", + "content_type": "application/octet-stream", + "url": "https://v3b.fal.media/files/b/tiger/5d-CATfsfPrBaXAK38hy6_output.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kandinsky5/text-to-video/distill/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kandinsky5/text-to-video/distill/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kandinsky5/text-to-video/distill": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Kandinsky5TextToVideoDistillInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kandinsky5/text-to-video/distill/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Kandinsky5TextToVideoDistillOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kandinsky5/text-to-video", + "metadata": { + "display_name": "Kandinsky5", + "category": "text-to-video", + "description": "Kandinsky 5.0 is a diffusion model for fast, high-quality text-to-video generation.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:40.306Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/panda/F-LagExgsZASG2uyIcj6X_093dccc7417c47e593e245767017f251.jpg", + "model_url": "https://fal.run/fal-ai/kandinsky5/text-to-video", + "license_type": "commercial", + "date": "2025-10-13T14:43:38.479Z", + "group": { + "key": "kandinsky5", + "label": "Text to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kandinsky5/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kandinsky5/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kandinsky5/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/panda/F-LagExgsZASG2uyIcj6X_093dccc7417c47e593e245767017f251.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kandinsky5/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kandinsky5/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Kandinsky5TextToVideoInput": { + "title": "KandinskyT2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A dog in red hat" + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "resolution": { + "enum": [ + "768x512" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video in W:H format. Will be calculated based on the aspect ratio(768x512, 512x512, 512x768).", + "default": "768x512" + }, + "duration": { + "enum": [ + "5s", + "10s" + ], + "description": "The length of the video to generate (5s or 10s)", + "type": "string", + "examples": [ + "5s", + "10s" + ], + "title": "Duration", + "default": "5s" + }, + "aspect_ratio": { + "enum": [ + "3:2", + "1:1", + "2:3" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video. One of (3:2, 1:1, 2:3).", + "default": "3:2" + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "description": "The number of inference steps.", + "title": "Num Inference Steps", + "default": 30 + } + }, + "x-fal-order-properties": [ + "prompt", + "resolution", + "aspect_ratio", + "duration", + "num_inference_steps" + ], + "required": [ + "prompt" + ] + }, + "Kandinsky5TextToVideoOutput": { + "title": "KandinskyT2VResponse", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 5797172, + "file_name": "output.mp4", + "content_type": "application/octet-stream", + "url": "https://v3b.fal.media/files/b/tiger/5d-CATfsfPrBaXAK38hy6_output.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kandinsky5/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kandinsky5/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kandinsky5/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Kandinsky5TextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kandinsky5/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Kandinsky5TextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/veo3.1/fast", + "metadata": { + "display_name": "Veo 3.1 Fast", + "category": "text-to-video", + "description": "Faster and more cost effective version of Google's Veo 3.1! ", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:43.285Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/lion/o5x4rjXr3fAEwQgr4Y5vp_69ee214c658e428ba2f8f8b054d70a0e.jpg", + "model_url": "https://fal.run/fal-ai/veo3.1/fast", + "license_type": "commercial", + "date": "2025-10-08T17:05:11.177Z", + "group": { + "key": "veo3.1", + "label": "Text to Video (Fast)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/veo3.1/fast", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/veo3.1/fast queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/veo3.1/fast", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/lion/o5x4rjXr3fAEwQgr4Y5vp_69ee214c658e428ba2f8f8b054d70a0e.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/veo3.1/fast", + "documentationUrl": "https://fal.ai/models/fal-ai/veo3.1/fast/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Veo31FastInput": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "duration", + "negative_prompt", + "resolution", + "generate_audio", + "seed", + "auto_fix" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Two person street interview in New York City.\nSample Dialogue:\nHost: \"Did you hear the news?\"\nPerson: \"Yes! Veo 3.1 is now available on fal. If you want to see it, go check their website.\"" + ], + "description": "The text prompt describing the video you want to generate", + "type": "string", + "title": "Prompt", + "maxLength": 20000 + }, + "duration": { + "enum": [ + "4s", + "6s", + "8s" + ], + "description": "The duration of the generated video.", + "type": "string", + "title": "Duration", + "default": "8s" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16" + ], + "description": "Aspect ratio of the generated video", + "type": "string", + "title": "Aspect Ratio", + "default": "16:9" + }, + "generate_audio": { + "description": "Whether to generate audio for the video.", + "type": "boolean", + "title": "Generate Audio", + "default": true + }, + "auto_fix": { + "description": "Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.", + "type": "boolean", + "title": "Auto Fix", + "default": true + }, + "resolution": { + "enum": [ + "720p", + "1080p", + "4k" + ], + "description": "The resolution of the generated video.", + "type": "string", + "title": "Resolution", + "default": "720p" + }, + "seed": { + "description": "The seed for the random number generator.", + "type": "integer", + "title": "Seed" + }, + "negative_prompt": { + "description": "A negative prompt to guide the video generation.", + "type": "string", + "title": "Negative Prompt" + } + }, + "title": "Veo31TextToVideoInput", + "required": [ + "prompt" + ] + }, + "Veo31FastOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3b.fal.media/files/b/kangaroo/oUCiZjQwEy6bIQdPUSLDF_output.mp4" + } + ], + "description": "The generated video.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "Veo31TextToVideoOutput", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/veo3.1/fast/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/fast/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/fast": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo31FastInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/fast/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo31FastOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/veo3.1", + "metadata": { + "display_name": "Veo 3.1", + "category": "text-to-video", + "description": "Veo 3.1 by Google, the most advanced AI video generation model in the world. With sound on!", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:43.669Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/tiger/TGTZBJyLk9HdwjB6SB90e.png", + "model_url": "https://fal.run/fal-ai/veo3.1", + "license_type": "commercial", + "date": "2025-10-08T17:01:21.561Z", + "group": { + "key": "veo3.1", + "label": "Text to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/veo3.1", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/veo3.1 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/veo3.1", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/tiger/TGTZBJyLk9HdwjB6SB90e.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/veo3.1", + "documentationUrl": "https://fal.ai/models/fal-ai/veo3.1/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Veo31Input": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "duration", + "negative_prompt", + "resolution", + "generate_audio", + "seed", + "auto_fix" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Two person street interview in New York City.\nSample Dialogue:\nHost: \"Did you hear the news?\"\nPerson: \"Yes! Veo 3.1 is now available on fal. If you want to see it, go check their website.\"" + ], + "description": "The text prompt describing the video you want to generate", + "type": "string", + "title": "Prompt", + "maxLength": 20000 + }, + "duration": { + "enum": [ + "4s", + "6s", + "8s" + ], + "description": "The duration of the generated video.", + "type": "string", + "title": "Duration", + "default": "8s" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16" + ], + "description": "Aspect ratio of the generated video", + "type": "string", + "title": "Aspect Ratio", + "default": "16:9" + }, + "generate_audio": { + "description": "Whether to generate audio for the video.", + "type": "boolean", + "title": "Generate Audio", + "default": true + }, + "auto_fix": { + "description": "Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.", + "type": "boolean", + "title": "Auto Fix", + "default": true + }, + "resolution": { + "enum": [ + "720p", + "1080p", + "4k" + ], + "description": "The resolution of the generated video.", + "type": "string", + "title": "Resolution", + "default": "720p" + }, + "seed": { + "description": "The seed for the random number generator.", + "type": "integer", + "title": "Seed" + }, + "negative_prompt": { + "description": "A negative prompt to guide the video generation.", + "type": "string", + "title": "Negative Prompt" + } + }, + "title": "Veo31TextToVideoInput", + "required": [ + "prompt" + ] + }, + "Veo31Output": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3b.fal.media/files/b/kangaroo/oUCiZjQwEy6bIQdPUSLDF_output.mp4" + } + ], + "description": "The generated video.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "Veo31TextToVideoOutput", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/veo3.1/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/veo3.1": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo31Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo31Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sora-2/text-to-video/pro", + "metadata": { + "display_name": "Sora 2", + "category": "text-to-video", + "description": "Text-to-video endpoint for Sora 2 Pro, OpenAI's state-of-the-art video model capable of creating richly detailed, dynamic clips with audio from natural language or images.", + "status": "active", + "tags": [ + "text-to-video", + "audio", + "sora-2-pro" + ], + "updated_at": "2026-01-26T21:42:45.608Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/lion/0bXyMS_zSKpeaG3LM6ARv_d4ee6acbfd9a4168b012a848c33b154d.jpg", + "model_url": "https://fal.run/fal-ai/sora-2/text-to-video/pro", + "license_type": "commercial", + "date": "2025-10-06T21:56:38.344Z", + "group": { + "key": "sora-2", + "label": "Text to Video (Pro)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sora-2/text-to-video/pro", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sora-2/text-to-video/pro queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sora-2/text-to-video/pro", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/lion/0bXyMS_zSKpeaG3LM6ARv_d4ee6acbfd9a4168b012a848c33b154d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sora-2/text-to-video/pro", + "documentationUrl": "https://fal.ai/models/fal-ai/sora-2/text-to-video/pro/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Sora2TextToVideoProInput": { + "x-fal-order-properties": [ + "prompt", + "resolution", + "aspect_ratio", + "duration", + "delete_video" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A dramatic Hollywood breakup scene at dusk on a quiet suburban street. A man and a woman in their 30s face each other, speaking softly but emotionally, lips syncing to breakup dialogue. Cinematic lighting, warm sunset tones, shallow depth of field, gentle breeze moving autumn leaves, realistic natural sound, no background music" + ], + "maxLength": 5000, + "type": "string", + "minLength": 1, + "title": "Prompt", + "description": "The text prompt describing the video you want to generate" + }, + "duration": { + "enum": [ + 4, + 8, + 12 + ], + "description": "Duration of the generated video in seconds", + "type": "integer", + "title": "Duration", + "default": 4 + }, + "resolution": { + "enum": [ + "720p", + "1080p" + ], + "description": "The resolution of the generated video", + "type": "string", + "title": "Resolution", + "default": "1080p" + }, + "aspect_ratio": { + "enum": [ + "9:16", + "16:9" + ], + "description": "The aspect ratio of the generated video", + "type": "string", + "title": "Aspect Ratio", + "default": "16:9" + }, + "delete_video": { + "description": "Whether to delete the video after generation for privacy reasons. If True, the video cannot be used for remixing and will be permanently deleted.", + "type": "boolean", + "title": "Delete Video", + "default": true + } + }, + "title": "ProTextToVideoInput", + "required": [ + "prompt" + ] + }, + "Sora2TextToVideoProOutput": { + "x-fal-order-properties": [ + "video", + "video_id", + "thumbnail", + "spritesheet" + ], + "type": "object", + "properties": { + "spritesheet": { + "title": "Spritesheet", + "description": "Spritesheet image for the video", + "allOf": [ + { + "$ref": "#/components/schemas/ImageFile" + } + ] + }, + "thumbnail": { + "title": "Thumbnail", + "description": "Thumbnail image for the video", + "allOf": [ + { + "$ref": "#/components/schemas/ImageFile" + } + ] + }, + "video_id": { + "examples": [ + "video_123" + ], + "title": "Video ID", + "type": "string", + "description": "The ID of the generated video" + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/example_outputs/sora-2-pro-t2v-output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "title": "ProTextToVideoOutput", + "required": [ + "video", + "video_id" + ] + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + }, + "VideoFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "VideoFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sora-2/text-to-video/pro/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sora-2/text-to-video/pro/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sora-2/text-to-video/pro": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sora2TextToVideoProInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sora-2/text-to-video/pro/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sora2TextToVideoProOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sora-2/text-to-video", + "metadata": { + "display_name": "Sora 2", + "category": "text-to-video", + "description": "Text-to-video endpoint for Sora 2, OpenAI's state-of-the-art video model capable of creating richly detailed, dynamic clips with audio from natural language or images.", + "status": "active", + "tags": [ + "text to video", + "audio", + "sora" + ], + "updated_at": "2026-01-26T21:42:45.733Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/penguin/eOGowiQKXIKwyDfwgeWQO_b80784431c524553a564ebdd7550d7e6.jpg", + "model_url": "https://fal.run/fal-ai/sora-2/text-to-video", + "license_type": "commercial", + "date": "2025-10-06T19:34:55.710Z", + "group": { + "key": "sora-2", + "label": "Text to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sora-2/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sora-2/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sora-2/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3.fal.media/files/penguin/eOGowiQKXIKwyDfwgeWQO_b80784431c524553a564ebdd7550d7e6.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sora-2/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/sora-2/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Sora2TextToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "resolution", + "aspect_ratio", + "duration", + "delete_video", + "model" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A dramatic Hollywood breakup scene at dusk on a quiet suburban street. A man and a woman in their 30s face each other, speaking softly but emotionally, lips syncing to breakup dialogue. Cinematic lighting, warm sunset tones, shallow depth of field, gentle breeze moving autumn leaves, realistic natural sound, no background music" + ], + "maxLength": 5000, + "type": "string", + "minLength": 1, + "title": "Prompt", + "description": "The text prompt describing the video you want to generate" + }, + "duration": { + "enum": [ + 4, + 8, + 12 + ], + "description": "Duration of the generated video in seconds", + "type": "integer", + "title": "Duration", + "default": 4 + }, + "resolution": { + "enum": [ + "720p" + ], + "description": "The resolution of the generated video", + "type": "string", + "title": "Resolution", + "default": "720p" + }, + "model": { + "enum": [ + "sora-2", + "sora-2-2025-12-08", + "sora-2-2025-10-06" + ], + "description": "The model to use for the generation. When the default model is selected, the latest snapshot of the model will be used - otherwise, select a specific snapshot of the model.", + "type": "string", + "title": "Model", + "default": "sora-2" + }, + "delete_video": { + "description": "Whether to delete the video after generation for privacy reasons. If True, the video cannot be used for remixing and will be permanently deleted.", + "type": "boolean", + "title": "Delete Video", + "default": true + }, + "aspect_ratio": { + "enum": [ + "9:16", + "16:9" + ], + "description": "The aspect ratio of the generated video", + "type": "string", + "title": "Aspect Ratio", + "default": "16:9" + } + }, + "title": "TextToVideoInput", + "required": [ + "prompt" + ] + }, + "Sora2TextToVideoOutput": { + "x-fal-order-properties": [ + "video", + "video_id", + "thumbnail", + "spritesheet" + ], + "type": "object", + "properties": { + "spritesheet": { + "title": "Spritesheet", + "description": "Spritesheet image for the video", + "allOf": [ + { + "$ref": "#/components/schemas/ImageFile" + } + ] + }, + "thumbnail": { + "title": "Thumbnail", + "description": "Thumbnail image for the video", + "allOf": [ + { + "$ref": "#/components/schemas/ImageFile" + } + ] + }, + "video_id": { + "examples": [ + "video_123" + ], + "title": "Video ID", + "type": "string", + "description": "The ID of the generated video" + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/example_outputs/sora_t2v_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "title": "TextToVideoOutput", + "required": [ + "video", + "video_id" + ] + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + }, + "VideoFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "VideoFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sora-2/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sora-2/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sora-2/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sora2TextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sora-2/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sora2TextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ovi", + "metadata": { + "display_name": "Ovi Text to Video", + "category": "text-to-video", + "description": "A unified paradigm for audio-video generation", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:46.367Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/lion/yU9aRgq2QMYK4eGH5mohA_2822a9b5892d46699e218791b207ae5c.jpg", + "model_url": "https://fal.run/fal-ai/ovi", + "license_type": "commercial", + "date": "2025-10-03T12:43:15.945Z", + "group": { + "key": "ovi", + "label": "Text to video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ovi", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ovi queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ovi", + "category": "text-to-video", + "thumbnailUrl": "https://v3.fal.media/files/lion/yU9aRgq2QMYK4eGH5mohA_2822a9b5892d46699e218791b207ae5c.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ovi", + "documentationUrl": "https://fal.ai/models/fal-ai/ovi/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "OviInput": { + "title": "OviT2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A close-up of someone's face as they pet a cat, their hands stroking the soft fur in the foreground. Their affectionate expression shows as the cat purrs contentedly in their lap. They say, This little guy has been with me for eight years now. He knows exactly when I need comfort. Animals are pretty amazing that way..Affectionate voice with cat purring and gentle petting sounds" + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "resolution": { + "enum": [ + "512x992", + "992x512", + "960x512", + "512x960", + "720x720", + "448x1120", + "1120x448" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video in W:H format. One of (512x992, 992x512, 960x512, 512x960, 720x720, or 448x1120).", + "default": "992x512" + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps.", + "default": 30 + }, + "audio_negative_prompt": { + "title": "Audio Negative Prompt", + "type": "string", + "description": "Negative prompt for audio generation.", + "default": "robotic, muffled, echo, distorted" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "jitter, bad hands, blur, distortion" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_inference_steps", + "audio_negative_prompt", + "seed", + "resolution" + ], + "required": [ + "prompt" + ] + }, + "OviOutput": { + "title": "OviT2VResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_inputs/ovi_t2v_output.mp4" + } + ], + "description": "The generated video file.", + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ovi/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ovi/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ovi": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OviInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ovi/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OviOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-25-preview/text-to-video", + "metadata": { + "display_name": "Wan 2.5 Text to Video", + "category": "text-to-video", + "description": "Wan 2.5 text-to-video model.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:47.930Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/koala/yojKCMBipiqJvLr9a3sLC_deec1d2b5b2c4019bc353f6060ec0c09.jpg", + "model_url": "https://fal.run/fal-ai/wan-25-preview/text-to-video", + "license_type": "commercial", + "date": "2025-09-24T02:07:33.047Z", + "group": { + "key": "wan-25-preview", + "label": "Text to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-25-preview/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-25-preview/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-25-preview/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3.fal.media/files/koala/yojKCMBipiqJvLr9a3sLC_deec1d2b5b2c4019bc353f6060ec0c09.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-25-preview/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-25-preview/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Wan25PreviewTextToVideoInput": { + "description": "Input for text-to-video generation", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The white dragon warrior stands still, eyes full of determination and strength. The camera slowly moves closer or circles around the warrior, highlighting the powerful presence and heroic spirit of the character." + ], + "description": "The text prompt for video generation. Supports Chinese and English, max 800 characters.", + "type": "string", + "title": "Prompt", + "minLength": 1 + }, + "resolution": { + "enum": [ + "480p", + "720p", + "1080p" + ], + "description": "Video resolution tier", + "type": "string", + "title": "Resolution", + "default": "1080p" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "description": "The aspect ratio of the generated video", + "type": "string", + "title": "Aspect Ratio", + "default": "16:9" + }, + "duration": { + "enum": [ + "5", + "10" + ], + "description": "Duration of the generated video in seconds. Choose between 5 or 10 seconds.", + "type": "string", + "title": "Duration", + "examples": [ + "5", + "10" + ], + "default": "5" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + }, + "audio_url": { + "description": "\nURL of the audio to use as the background music. Must be publicly accessible.\nLimit handling: If the audio duration exceeds the duration value (5 or 10 seconds),\nthe audio is truncated to the first 5 or 10 seconds, and the rest is discarded. If\nthe audio is shorter than the video, the remaining part of the video will be silent.\nFor example, if the audio is 3 seconds long and the video duration is 5 seconds, the\nfirst 3 seconds of the output video will have sound, and the last 2 seconds will be silent.\n- Format: WAV, MP3.\n- Duration: 3 to 30 s.\n- File size: Up to 15 MB.\n", + "type": "string", + "title": "Audio Url" + }, + "negative_prompt": { + "examples": [ + "low resolution, error, worst quality, low quality, defects" + ], + "description": "Negative prompt to describe content to avoid. Max 500 characters.", + "type": "string", + "title": "Negative Prompt" + }, + "enable_prompt_expansion": { + "description": "Whether to enable prompt rewriting using LLM. Improves results for short prompts but increases processing time.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": true + } + }, + "title": "TextToVideoInput", + "x-fal-order-properties": [ + "prompt", + "audio_url", + "aspect_ratio", + "resolution", + "duration", + "negative_prompt", + "enable_prompt_expansion", + "seed", + "enable_safety_checker" + ], + "required": [ + "prompt" + ] + }, + "Wan25PreviewTextToVideoOutput": { + "description": "Base output for video generation", + "type": "object", + "properties": { + "actual_prompt": { + "examples": [ + "The white dragon warrior stands still in a grand cathedral-like structure, its glowing golden eyes fixed forward. The camera slowly moves closer, focusing on the warrior's armored chest and face. It then begins to circle around the warrior, capturing the intricate details of the white scale armor with gold accents. The warrior maintains a strong, determined posture. Ambient sounds and soft choral tones fill the background, enhancing the majestic atmosphere. The camera continues its slow circular motion, emphasizing the warrior's heroic presence before ending with a close-up of the face." + ], + "description": "The actual prompt used if prompt rewriting was enabled", + "type": "string", + "title": "Actual Prompt" + }, + "seed": { + "examples": [ + 175932751 + ], + "description": "The seed used for generation", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/example_outputs/wan-25-i2v-output.mp4" + } + ], + "description": "The generated video file", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "title": "VideoOutput", + "x-fal-order-properties": [ + "video", + "seed", + "actual_prompt" + ], + "required": [ + "video", + "seed" + ] + }, + "VideoFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "duration": { + "description": "The duration of the video", + "type": "number", + "title": "Duration" + }, + "height": { + "description": "The height of the video", + "type": "integer", + "title": "Height" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "fps": { + "description": "The FPS of the video", + "type": "number", + "title": "Fps" + }, + "width": { + "description": "The width of the video", + "type": "integer", + "title": "Width" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "num_frames": { + "description": "The number of frames in the video", + "type": "integer", + "title": "Num Frames" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "VideoFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-25-preview/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-25-preview/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-25-preview/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Wan25PreviewTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-25-preview/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Wan25PreviewTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "argil/avatars/text-to-video", + "metadata": { + "display_name": "Avatars Text to Video", + "category": "text-to-video", + "description": "High-quality avatar videos that feel real, generated from your text", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:59.066Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/zebra/9NGo8wnyQwuQTJQr4Cvht_27981df50e43459ea657ea36bee1b76b.jpg", + "model_url": "https://fal.run/argil/avatars/text-to-video", + "license_type": "commercial", + "date": "2025-09-01T09:50:49.180Z", + "group": { + "key": "argil-avatar", + "label": "Text to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for argil/avatars/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the argil/avatars/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "argil/avatars/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://fal.media/files/zebra/9NGo8wnyQwuQTJQr4Cvht_27981df50e43459ea657ea36bee1b76b.jpg", + "playgroundUrl": "https://fal.ai/models/argil/avatars/text-to-video", + "documentationUrl": "https://fal.ai/models/argil/avatars/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AvatarsTextToVideoInput": { + "title": "TextToVideoRequest", + "type": "object", + "properties": { + "text": { + "examples": [ + "\nArgil is kinda crazy guys! You just turn a real person into \nan avatar that actually talks and moves and it's already reel-ready, \nfor TikTok, Shorts, whatever. No wasting hours editing, it still looks super pro.\n" + ], + "title": "Text", + "type": "string" + }, + "voice": { + "enum": [ + "Rachel", + "Clyde", + "Roger", + "Sarah", + "Laura", + "Thomas", + "Charlie", + "George", + "Callum", + "River", + "Harry", + "Liam", + "Alice", + "Matilda", + "Will", + "Jessica", + "Lilly", + "Bill", + "Oxley", + "Luna" + ], + "title": "Voice", + "type": "string" + }, + "remove_background": { + "title": "Remove Background", + "type": "boolean", + "description": "Enabling the remove background feature will result in a 50% increase in the price.", + "default": false + }, + "avatar": { + "enum": [ + "Mia outdoor (UGC)", + "Lara (Masterclass)", + "Ines (UGC)", + "Maria (Masterclass)", + "Emma (UGC)", + "Sienna (Masterclass)", + "Elena (UGC)", + "Jasmine (Masterclass)", + "Amara (Masterclass)", + "Ryan podcast (UGC)", + "Tyler (Masterclass)", + "Jayse (Masterclass)", + "Paul (Masterclass)", + "Matteo (UGC)", + "Daniel car (UGC)", + "Dario (Masterclass)", + "Viva (Masterclass)", + "Chen (Masterclass)", + "Alex (Masterclass)", + "Vanessa (UGC)", + "Laurent (UGC)", + "Noemie car (UGC)", + "Brandon (UGC)", + "Byron (Masterclass)", + "Calista (Masterclass)", + "Milo (Masterclass)", + "Fabien (Masterclass)", + "Rose (UGC)" + ], + "title": "Avatar", + "type": "string", + "examples": [ + "Noemie car (UGC)" + ] + } + }, + "x-fal-order-properties": [ + "avatar", + "text", + "voice", + "remove_background" + ], + "required": [ + "avatar", + "text", + "voice" + ] + }, + "AvatarsTextToVideoOutput": { + "title": "InferenceResult", + "type": "object", + "properties": { + "moderation_transcription": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Moderation Transcription" + }, + "moderation_error": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Moderation Error" + }, + "moderation_flagged": { + "title": "Moderation Flagged", + "type": "boolean", + "default": false + }, + "video": { + "anyOf": [ + { + "$ref": "#/components/schemas/Video" + }, + { + "type": "null" + } + ], + "examples": [ + { + "url": "https://argildotai.s3.us-east-1.amazonaws.com/fal-resource/example_fal.mp4" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "moderation_flagged", + "moderation_transcription", + "moderation_error" + ] + }, + "Video": { + "title": "Video", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/argil/avatars/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/argil/avatars/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/argil/avatars/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AvatarsTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/argil/avatars/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AvatarsTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v5/text-to-video", + "metadata": { + "display_name": "Pixverse", + "category": "text-to-video", + "description": "Generate high quality video clips from text and image prompts using PixVerse v5", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:01.727Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "model_url": "https://fal.run/fal-ai/pixverse/v5/text-to-video", + "license_type": "commercial", + "date": "2025-08-23T10:50:22.733Z", + "group": { + "key": "pixverse-5", + "label": "Text to Video v5" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v5/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v5/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v5/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v5/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v5/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV5TextToVideoInput": { + "title": "TextToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Epic low-cut camera capture of a girl clad in ultraviolet threads, Peter Max art style depiction, luminous diamond skin glistening under a vast moon's radiance, embodied in a superhuman flight among mystical ruins, symbolizing a deity's ritual ascent, hyper-detailed" + ], + "title": "Prompt", + "type": "string" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "4:3", + "1:1", + "3:4", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated video" + }, + "duration": { + "enum": [ + "5", + "8" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds", + "default": "5" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, low resolution, pixelated, noisy, grainy, out of focus, poorly lit, poorly exposed, poorly composed, poorly framed, poorly cropped, poorly color corrected, poorly color graded" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "resolution", + "duration", + "negative_prompt", + "style", + "seed" + ], + "required": [ + "prompt" + ] + }, + "PixverseV5TextToVideoOutput": { + "title": "VideoOutputV5", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 5485412, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/model_tests/video_models/output-4.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v5/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV5TextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v5/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV5TextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/infinitalk/single-text", + "metadata": { + "display_name": "Infinitalk", + "category": "text-to-video", + "description": "Infinitalk model generates a talking avatar video from a text and audio file. The avatar lip-syncs to the provided audio with natural facial expressions.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:01.913Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/tiger/no2x6NmiDH44hhb5uCRpF_7a1a914245a54424a4f70019bb757ea3.jpg", + "model_url": "https://fal.run/fal-ai/infinitalk/single-text", + "license_type": "commercial", + "date": "2025-08-22T15:48:52.311Z", + "group": { + "key": "Infinitalk", + "label": "Text" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/infinitalk/single-text", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/infinitalk/single-text queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/infinitalk/single-text", + "category": "text-to-video", + "thumbnailUrl": "https://fal.media/files/tiger/no2x6NmiDH44hhb5uCRpF_7a1a914245a54424a4f70019bb757ea3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/infinitalk/single-text", + "documentationUrl": "https://fal.ai/models/fal-ai/infinitalk/single-text/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "InfinitalkSingleTextInput": { + "title": "InfiniTalkSingleTextRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "An elderly man with a white beard and headphones records audio with a microphone. He appears engaged and expressive, suggesting a podcast or voiceover." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the video to generate. Must be either 480p or 720p.", + "default": "480p" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use for generation.", + "default": "regular" + }, + "text_input": { + "examples": [ + "Spend more time with people who make you feel alive, and less with things that drain your soul." + ], + "title": "Text Input", + "type": "string", + "description": "The text input to guide video generation." + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/panda/HuM21CXMf0q7OO2zbvwhV_c4533aada79a495b90e50e32dc9b83a8.png" + ], + "title": "Image URL", + "type": "string", + "description": "URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped." + }, + "voice": { + "enum": [ + "Aria", + "Roger", + "Sarah", + "Laura", + "Charlie", + "George", + "Callum", + "River", + "Liam", + "Charlotte", + "Alice", + "Matilda", + "Will", + "Jessica", + "Eric", + "Chris", + "Brian", + "Daniel", + "Lily", + "Bill" + ], + "title": "Voice", + "type": "string", + "examples": [ + "Bill" + ], + "description": "The voice to use for speech generation" + }, + "num_frames": { + "minimum": 41, + "maximum": 721, + "type": "integer", + "title": "Number of Frames", + "description": "Number of frames to generate. Must be between 41 to 721.", + "default": 145 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "default": 42 + } + }, + "x-fal-order-properties": [ + "image_url", + "text_input", + "voice", + "prompt", + "num_frames", + "resolution", + "seed", + "acceleration" + ], + "required": [ + "image_url", + "text_input", + "voice", + "prompt" + ] + }, + "InfinitalkSingleTextOutput": { + "title": "AvatarSingleTextResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "file_size": 797478, + "file_name": "6c9dd31e1d9a4482877747a52a661a0a.mp4", + "content_type": "application/octet-stream", + "url": "https://v3.fal.media/files/elephant/-huMN0zTaXmBr2CqzCMps_6c9dd31e1d9a4482877747a52a661a0a.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/infinitalk/single-text/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/infinitalk/single-text/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/infinitalk/single-text": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InfinitalkSingleTextInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/infinitalk/single-text/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InfinitalkSingleTextOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "moonvalley/marey/t2v", + "metadata": { + "display_name": "Marey Realism V1.5", + "category": "text-to-video", + "description": "Generate a video from a text prompt with Marey, a generative video model trained exclusively on fully licensed data.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:05.015Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/zebra/XqLulT-Va4wv0SoknC72P_504081bd51c84280b787bc27906b490e.jpg", + "model_url": "https://fal.run/moonvalley/marey/t2v", + "license_type": "commercial", + "date": "2025-08-14T01:01:05.296Z", + "group": { + "key": "marey", + "label": "Text to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for moonvalley/marey/t2v", + "version": "1.0.0", + "description": "The OpenAPI schema for the moonvalley/marey/t2v queue.", + "x-fal-metadata": { + "endpointId": "moonvalley/marey/t2v", + "category": "text-to-video", + "thumbnailUrl": "https://fal.media/files/zebra/XqLulT-Va4wv0SoknC72P_504081bd51c84280b787bc27906b490e.jpg", + "playgroundUrl": "https://fal.ai/models/moonvalley/marey/t2v", + "documentationUrl": "https://fal.ai/models/moonvalley/marey/t2v/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MareyT2vInput": { + "title": "MareyInputT2V", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Detailed Description: A small, white paper boat, with one corner engulfed in bright orange flames, drifts precariously across a dark puddle on wet asphalt. As raindrops fall, they create ever-expanding ripples on the water's surface, gently rocking the fragile vessel and causing the fiery reflection below to dance and shimmer. The flickering flame slowly consumes the paper, charring the edges black as the boat becomes waterlogged, beginning to sink in a poignant slow-motion battle between fire and water. Background: The background is softly blurred, suggesting an overcast day with out-of-focus foliage, enhancing the scene's intimate and melancholic mood. Middleground: Raindrops continuously strike the puddle's surface, creating concentric ripples that gently push the boat along its short, determined voyage. Foreground: The burning paper boat floats in sharp focus, its bright, flickering flame casting a warm, dramatic glow that reflects and distorts on the dark, wet surface of the asphalt." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate a video from" + }, + "duration": { + "enum": [ + "5s", + "10s" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video.", + "default": "5s" + }, + "dimensions": { + "enum": [ + "1920x1080", + "1152x1152", + "1536x1152", + "1152x1536" + ], + "title": "Dimensions", + "type": "string", + "description": "The dimensions of the generated video in width x height format.", + "default": "1920x1080" + }, + "guidance_scale": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Guidance Scale", + "description": "Controls how strongly the generation is guided by the prompt (0-20). Higher values follow the prompt more closely." + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Seed for random number generation. Use -1 for random seed each run.", + "default": -1 + }, + "negative_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Negative Prompt", + "description": "Negative prompt used to guide the model away from undesirable features.", + "default": " low-poly, flat shader, bad rigging, stiff animation, uncanny eyes, low-quality textures, looping glitch, cheap effect, overbloom, bloom spam, default lighting, game asset, stiff face, ugly specular, AI artifacts" + } + }, + "x-fal-order-properties": [ + "prompt", + "dimensions", + "duration", + "negative_prompt", + "seed", + "guidance_scale" + ], + "required": [ + "prompt" + ] + }, + "MareyT2vOutput": { + "title": "MareyOutput", + "type": "object", + "properties": { + "video": { + "description": "The generated video.", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/moonvalley/marey/t2v/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/moonvalley/marey/t2v/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/moonvalley/marey/t2v": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MareyT2vInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/moonvalley/marey/t2v/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MareyT2vOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan/v2.2-a14b/text-to-video/lora", + "metadata": { + "display_name": "Wan-2.2 Text-to-Video A14B with LoRAs", + "category": "text-to-video", + "description": "Wan-2.2 text-to-video is a video model that generates high-quality videos with high visual quality and motion diversity from text prompts. This endpoint supports LoRAs made for Wan 2.2.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:06.486Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/lion/brSX2_cSumQ6aBMB-jTpi_ebd2e40e80d243e2a65f96daf3c961c7.jpg", + "model_url": "https://fal.run/fal-ai/wan/v2.2-a14b/text-to-video/lora", + "license_type": "commercial", + "date": "2025-08-07T12:45:10.109Z", + "group": { + "key": "wan-v22-lora", + "label": "Text to Video (LoRA)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan/v2.2-a14b/text-to-video/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan/v2.2-a14b/text-to-video/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan/v2.2-a14b/text-to-video/lora", + "category": "text-to-video", + "thumbnailUrl": "https://fal.media/files/lion/brSX2_cSumQ6aBMB-jTpi_ebd2e40e80d243e2a65f96daf3c961c7.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan/v2.2-a14b/text-to-video/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/wan/v2.2-a14b/text-to-video/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanV22A14bTextToVideoLoraInput": { + "title": "WanLoRAT2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A close-up of a young woman smiling gently in the rain, raindrops glistening on her face and eyelashes. The video captures the delicate details of her expression and the water droplets, with soft light reflecting off her skin in the rainy atmosphere." + ], + "description": "The text prompt to guide video generation.", + "type": "string", + "title": "Prompt" + }, + "shift": { + "description": "Shift value for the video. Must be between 1.0 and 10.0.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Shift", + "examples": [ + 5 + ], + "default": 5 + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "description": "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + "type": "string", + "title": "Acceleration", + "examples": [ + "regular" + ], + "default": "regular" + }, + "num_interpolated_frames": { + "description": "Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4.", + "type": "integer", + "minimum": 0, + "maximum": 4, + "title": "Number of Interpolated Frames", + "examples": [ + 1 + ], + "default": 1 + }, + "reverse_video": { + "description": "If true, the video will be reversed.", + "type": "boolean", + "title": "Reverse Video", + "default": false + }, + "loras": { + "description": "LoRA weights to be used in the inference.", + "type": "array", + "title": "Loras", + "items": { + "$ref": "#/components/schemas/LoRAWeight" + }, + "default": [] + }, + "frames_per_second": { + "description": "Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is.", + "type": "integer", + "minimum": 4, + "maximum": 60, + "title": "Frames per Second", + "examples": [ + 16 + ], + "default": 16 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, input data will be checked for safety before processing.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": false + }, + "num_frames": { + "description": "Number of frames to generate. Must be between 17 to 161 (inclusive).", + "type": "integer", + "minimum": 17, + "maximum": 161, + "title": "Number of Frames", + "examples": [ + 81 + ], + "default": 81 + }, + "guidance_scale": { + "description": "Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale (1st Stage)", + "examples": [ + 3.5 + ], + "default": 3.5 + }, + "negative_prompt": { + "description": "Negative prompt for video generation.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "description": "The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.", + "type": "string", + "title": "Video Write Mode", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "description": "Resolution of the generated video (480p, 580p, or 720p).", + "type": "string", + "title": "Resolution", + "examples": [ + "720p" + ], + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "description": "Aspect ratio of the generated video (16:9 or 9:16).", + "type": "string", + "title": "Aspect Ratio", + "examples": [ + "16:9", + "9:16", + "1:1" + ], + "default": "16:9" + }, + "enable_output_safety_checker": { + "examples": [ + false + ], + "description": "If set to true, output video will be checked for safety after generation.", + "type": "boolean", + "title": "Enable Output Safety Checker", + "default": false + }, + "guidance_scale_2": { + "description": "Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale (2nd Stage)", + "examples": [ + 4 + ], + "default": 4 + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "description": "The quality of the output video. Higher quality means better visual quality but larger file size.", + "type": "string", + "title": "Video Quality", + "examples": [ + "high" + ], + "default": "high" + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "description": "Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "num_inference_steps": { + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "type": "integer", + "minimum": 2, + "maximum": 40, + "title": "Number of Inference Steps", + "examples": [ + 27 + ], + "default": 27 + }, + "interpolator_model": { + "enum": [ + "none", + "film", + "rife" + ], + "description": "The model to use for frame interpolation. If None, no interpolation is applied.", + "type": "string", + "title": "Interpolator Model", + "examples": [ + "film" + ], + "default": "film" + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + }, + "adjust_fps_for_interpolation": { + "examples": [ + true + ], + "description": "If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is.", + "type": "boolean", + "title": "Adjust FPS for Interpolation", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_frames", + "frames_per_second", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "enable_safety_checker", + "enable_output_safety_checker", + "enable_prompt_expansion", + "acceleration", + "guidance_scale", + "guidance_scale_2", + "shift", + "interpolator_model", + "num_interpolated_frames", + "adjust_fps_for_interpolation", + "video_quality", + "video_write_mode", + "loras", + "reverse_video" + ], + "required": [ + "prompt" + ] + }, + "WanV22A14bTextToVideoLoraOutput": { + "title": "WanT2VResponse", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A close-up of a young woman smiling gently in the rain, raindrops glistening on her face and eyelashes. The video captures the delicate details of her expression and the water droplets, with soft light reflecting off her skin in the rainy atmosphere." + ], + "description": "The text prompt used for video generation.", + "type": "string", + "title": "Prompt", + "default": "" + }, + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/model_tests/wan/v2.2-woman-output.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "LoRAWeight": { + "title": "LoRAWeight", + "type": "object", + "properties": { + "path": { + "description": "URL or the path to the LoRA weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + }, + "transformer": { + "enum": [ + "high", + "low", + "both" + ], + "description": "Specifies the transformer to load the lora weight into. 'high' loads into the high-noise transformer, 'low' loads it into the low-noise transformer, while 'both' loads the LoRA into both transformers.", + "type": "string", + "title": "Transformer", + "default": "high" + }, + "weight_name": { + "description": "Name of the LoRA weight. Used only if `path` is a Hugging Face repository, and required only if you have more than 1 safetensors file in the repo.", + "type": "string", + "title": "Weight Name" + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale", + "transformer" + ], + "required": [ + "path" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan/v2.2-a14b/text-to-video/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/text-to-video/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/text-to-video/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV22A14bTextToVideoLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/text-to-video/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV22A14bTextToVideoLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan/v2.2-5b/text-to-video/distill", + "metadata": { + "display_name": "Wan", + "category": "text-to-video", + "description": "Wan 2.2's 5B distill model produces up to 5 seconds of video 720p at 24FPS with fluid motion and powerful prompt understanding", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:06.749Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/kangaroo/40VOx0fdYnthcwT_S30u4_98d8b53ed1dd439db51e2ffa2d253427.jpg", + "model_url": "https://fal.run/fal-ai/wan/v2.2-5b/text-to-video/distill", + "license_type": "commercial", + "date": "2025-08-06T17:34:42.197Z", + "group": { + "key": "wan-v22", + "label": "Text to Video (5B Distill)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan/v2.2-5b/text-to-video/distill", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan/v2.2-5b/text-to-video/distill queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan/v2.2-5b/text-to-video/distill", + "category": "text-to-video", + "thumbnailUrl": "https://fal.media/files/kangaroo/40VOx0fdYnthcwT_S30u4_98d8b53ed1dd439db51e2ffa2d253427.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan/v2.2-5b/text-to-video/distill", + "documentationUrl": "https://fal.ai/models/fal-ai/wan/v2.2-5b/text-to-video/distill/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanV225bTextToVideoDistillInput": { + "title": "WanDistillT2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A medium shot establishes a modern, minimalist office setting: clean lines, muted grey walls, and polished wood surfaces. The focus shifts to a close-up on a woman in sharp, navy blue business attire. Her crisp white blouse contrasts with the deep blue of her tailored suit jacket. The subtle texture of the fabric is visible—a fine weave with a slight sheen. Her expression is serious, yet engaging, as she speaks to someone unseen just beyond the frame. Close-up on her eyes, showing the intensity of her gaze and the fine lines around them that hint at experience and focus. Her lips are slightly parted, as if mid-sentence. The light catches the subtle highlights in her auburn hair, meticulously styled. Note the slight catch of light on the silver band of her watch. High resolution 4k" + ], + "description": "The text prompt to guide video generation.", + "type": "string", + "title": "Prompt" + }, + "shift": { + "description": "Shift value for the video. Must be between 1.0 and 10.0.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Shift", + "examples": [ + 5 + ], + "default": 5 + }, + "num_interpolated_frames": { + "description": "Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4.", + "type": "integer", + "minimum": 0, + "maximum": 4, + "title": "Number of Interpolated Frames", + "examples": [ + 0 + ], + "default": 0 + }, + "frames_per_second": { + "description": "Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is.", + "type": "integer", + "minimum": 4, + "maximum": 60, + "title": "Frames per Second", + "examples": [ + 24 + ], + "default": 24 + }, + "guidance_scale": { + "description": "Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale (1st Stage)", + "examples": [ + 1 + ], + "default": 1 + }, + "num_frames": { + "description": "Number of frames to generate. Must be between 17 to 161 (inclusive).", + "type": "integer", + "minimum": 17, + "maximum": 161, + "title": "Number of Frames", + "examples": [ + 81 + ], + "default": 81 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, input data will be checked for safety before processing.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": false + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "description": "The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.", + "type": "string", + "title": "Video Write Mode", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "resolution": { + "enum": [ + "580p", + "720p" + ], + "description": "Resolution of the generated video (580p or 720p).", + "type": "string", + "title": "Resolution", + "examples": [ + "720p" + ], + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "description": "Aspect ratio of the generated video (16:9 or 9:16).", + "type": "string", + "title": "Aspect Ratio", + "examples": [ + "16:9", + "9:16", + "1:1" + ], + "default": "16:9" + }, + "enable_output_safety_checker": { + "examples": [ + false + ], + "description": "If set to true, output video will be checked for safety after generation.", + "type": "boolean", + "title": "Enable Output Safety Checker", + "default": false + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "description": "The quality of the output video. Higher quality means better visual quality but larger file size.", + "type": "string", + "title": "Video Quality", + "examples": [ + "high" + ], + "default": "high" + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "description": "Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "num_inference_steps": { + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "type": "integer", + "minimum": 2, + "maximum": 50, + "title": "Number of Inference Steps", + "examples": [ + 40 + ], + "default": 40 + }, + "interpolator_model": { + "enum": [ + "none", + "film", + "rife" + ], + "description": "The model to use for frame interpolation. If None, no interpolation is applied.", + "type": "string", + "title": "Interpolator Model", + "examples": [ + "film" + ], + "default": "film" + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + }, + "adjust_fps_for_interpolation": { + "examples": [ + true + ], + "description": "If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is.", + "type": "boolean", + "title": "Adjust FPS for Interpolation", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "num_frames", + "frames_per_second", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "enable_safety_checker", + "enable_output_safety_checker", + "enable_prompt_expansion", + "guidance_scale", + "shift", + "interpolator_model", + "num_interpolated_frames", + "adjust_fps_for_interpolation", + "video_quality", + "video_write_mode" + ], + "required": [ + "prompt" + ] + }, + "WanV225bTextToVideoDistillOutput": { + "title": "WanSmallT2VResponse", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A medium shot establishes a modern, minimalist office setting: clean lines, muted grey walls, and polished wood surfaces. The focus shifts to a close-up on a woman in sharp, navy blue business attire. Her crisp white blouse contrasts with the deep blue of her tailored suit jacket. The subtle texture of the fabric is visible—a fine weave with a slight sheen. Her expression is serious, yet engaging, as she speaks to someone unseen just beyond the frame. Close-up on her eyes, showing the intensity of her gaze and the fine lines around them that hint at experience and focus. Her lips are slightly parted, as if mid-sentence. The light catches the subtle highlights in her auburn hair, meticulously styled. Note the slight catch of light on the silver band of her watch. High resolution 4k" + ], + "description": "The text prompt used for video generation.", + "type": "string", + "title": "Prompt", + "default": "" + }, + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/model_tests/wan/v2.2-small-output.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan/v2.2-5b/text-to-video/distill/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-5b/text-to-video/distill/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-5b/text-to-video/distill": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV225bTextToVideoDistillInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-5b/text-to-video/distill/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV225bTextToVideoDistillOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan/v2.2-5b/text-to-video/fast-wan", + "metadata": { + "display_name": "Wan", + "category": "text-to-video", + "description": "Wan 2.2's 5B FastVideo model produces up to 5 seconds of video 720p at 24FPS with fluid motion and powerful prompt understanding", + "status": "active", + "tags": [ + "text to video", + "motion" + ], + "updated_at": "2026-01-26T21:43:07.414Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/elephant/4c9sGLsb2lXhga0i89W2N_47ef03f9caa949ca901a2801a4d42e6a.jpg", + "model_url": "https://fal.run/fal-ai/wan/v2.2-5b/text-to-video/fast-wan", + "license_type": "commercial", + "date": "2025-08-05T23:59:17.192Z", + "group": { + "key": "wan-v22-fastwan", + "label": "Text to Video (FastWan 5B)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan/v2.2-5b/text-to-video/fast-wan", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan/v2.2-5b/text-to-video/fast-wan queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan/v2.2-5b/text-to-video/fast-wan", + "category": "text-to-video", + "thumbnailUrl": "https://fal.media/files/elephant/4c9sGLsb2lXhga0i89W2N_47ef03f9caa949ca901a2801a4d42e6a.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan/v2.2-5b/text-to-video/fast-wan", + "documentationUrl": "https://fal.ai/models/fal-ai/wan/v2.2-5b/text-to-video/fast-wan/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanV225bTextToVideoFastWanInput": { + "title": "WanSmallFastVideoT2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A medium shot establishes a modern, minimalist office setting: clean lines, muted grey walls, and polished wood surfaces. The focus shifts to a close-up on a woman in sharp, navy blue business attire. Her crisp white blouse contrasts with the deep blue of her tailored suit jacket. The subtle texture of the fabric is visible—a fine weave with a slight sheen. Her expression is serious, yet engaging, as she speaks to someone unseen just beyond the frame. Close-up on her eyes, showing the intensity of her gaze and the fine lines around them that hint at experience and focus. Her lips are slightly parted, as if mid-sentence. The light catches the subtle highlights in her auburn hair, meticulously styled. Note the slight catch of light on the silver band of her watch. High resolution 4k" + ], + "description": "The text prompt to guide video generation.", + "type": "string", + "title": "Prompt" + }, + "num_interpolated_frames": { + "description": "Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4.", + "type": "integer", + "minimum": 0, + "maximum": 4, + "title": "Number of Interpolated Frames", + "examples": [ + 0 + ], + "default": 0 + }, + "frames_per_second": { + "description": "Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is.", + "type": "integer", + "minimum": 4, + "maximum": 60, + "title": "Frames per Second", + "examples": [ + 24 + ], + "default": 24 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, input data will be checked for safety before processing.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": false + }, + "num_frames": { + "description": "Number of frames to generate. Must be between 17 to 161 (inclusive).", + "type": "integer", + "minimum": 17, + "maximum": 161, + "title": "Number of Frames", + "examples": [ + 81 + ], + "default": 81 + }, + "guidance_scale": { + "description": "Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale", + "examples": [ + 3.5 + ], + "default": 3.5 + }, + "negative_prompt": { + "description": "Negative prompt for video generation.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "description": "The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.", + "type": "string", + "title": "Video Write Mode", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "description": "Resolution of the generated video (580p or 720p).", + "type": "string", + "title": "Resolution", + "examples": [ + "720p" + ], + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "description": "Aspect ratio of the generated video (16:9 or 9:16).", + "type": "string", + "title": "Aspect Ratio", + "examples": [ + "16:9", + "9:16", + "1:1" + ], + "default": "16:9" + }, + "enable_output_safety_checker": { + "examples": [ + false + ], + "description": "If set to true, output video will be checked for safety after generation.", + "type": "boolean", + "title": "Enable Output Safety Checker", + "default": false + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "description": "The quality of the output video. Higher quality means better visual quality but larger file size.", + "type": "string", + "title": "Video Quality", + "examples": [ + "high" + ], + "default": "high" + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "description": "Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + }, + "interpolator_model": { + "enum": [ + "none", + "film", + "rife" + ], + "description": "The model to use for frame interpolation. If None, no interpolation is applied.", + "type": "string", + "title": "Interpolator Model", + "examples": [ + "film" + ], + "default": "film" + }, + "adjust_fps_for_interpolation": { + "examples": [ + true + ], + "description": "If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is.", + "type": "boolean", + "title": "Adjust FPS for Interpolation", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_frames", + "frames_per_second", + "seed", + "resolution", + "aspect_ratio", + "enable_safety_checker", + "enable_output_safety_checker", + "enable_prompt_expansion", + "guidance_scale", + "interpolator_model", + "num_interpolated_frames", + "adjust_fps_for_interpolation", + "video_quality", + "video_write_mode" + ], + "required": [ + "prompt" + ] + }, + "WanV225bTextToVideoFastWanOutput": { + "title": "WanSmallFastVideoT2VResponse", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A medium shot establishes a modern, minimalist office setting: clean lines, muted grey walls, and polished wood surfaces. The focus shifts to a close-up on a woman in sharp, navy blue business attire. Her crisp white blouse contrasts with the deep blue of her tailored suit jacket. The subtle texture of the fabric is visible—a fine weave with a slight sheen. Her expression is serious, yet engaging, as she speaks to someone unseen just beyond the frame. Close-up on her eyes, showing the intensity of her gaze and the fine lines around them that hint at experience and focus. Her lips are slightly parted, as if mid-sentence. The light catches the subtle highlights in her auburn hair, meticulously styled. Note the slight catch of light on the silver band of her watch. High resolution 4k" + ], + "description": "The text prompt used for video generation.", + "type": "string", + "title": "Prompt", + "default": "" + }, + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/model_tests/wan/v2.2-small-output.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan/v2.2-5b/text-to-video/fast-wan/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-5b/text-to-video/fast-wan/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-5b/text-to-video/fast-wan": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV225bTextToVideoFastWanInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-5b/text-to-video/fast-wan/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV225bTextToVideoFastWanOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan/v2.2-a14b/text-to-video/turbo", + "metadata": { + "display_name": "Wan", + "category": "text-to-video", + "description": "Wan-2.2 turbo text-to-video is a video model that generates high-quality videos with high visual quality and motion diversity from text prompts. ", + "status": "active", + "tags": [ + "text to video", + "motion" + ], + "updated_at": "2026-01-26T21:43:09.602Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-4.jpg", + "model_url": "https://fal.run/fal-ai/wan/v2.2-a14b/text-to-video/turbo", + "license_type": "commercial", + "date": "2025-07-31T18:01:55.359Z", + "group": { + "key": "wan-v22-turbo", + "label": "Text to Video (Turbo)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan/v2.2-a14b/text-to-video/turbo", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan/v2.2-a14b/text-to-video/turbo queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan/v2.2-a14b/text-to-video/turbo", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan/v2.2-a14b/text-to-video/turbo", + "documentationUrl": "https://fal.ai/models/fal-ai/wan/v2.2-a14b/text-to-video/turbo/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanV22A14bTextToVideoTurboInput": { + "title": "WanTurboT2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A medium shot establishes a modern, minimalist office setting: clean lines, muted grey walls, and polished wood surfaces. The focus shifts to a close-up on a woman in sharp, navy blue business attire. Her crisp white blouse contrasts with the deep blue of her tailored suit jacket. The subtle texture of the fabric is visible—a fine weave with a slight sheen. Her expression is serious, yet engaging, as she speaks to someone unseen just beyond the frame. Close-up on her eyes, showing the intensity of her gaze and the fine lines around them that hint at experience and focus. Her lips are slightly parted, as if mid-sentence. The light catches the subtle highlights in her auburn hair, meticulously styled. Note the slight catch of light on the silver band of her watch. High resolution 4k" + ], + "description": "The text prompt to guide video generation.", + "type": "string", + "title": "Prompt" + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "description": "Resolution of the generated video (480p, 580p, or 720p).", + "type": "string", + "title": "Resolution", + "examples": [ + "720p" + ], + "default": "720p" + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "description": "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + "type": "string", + "title": "Acceleration", + "examples": [ + "regular" + ], + "default": "regular" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "description": "Aspect ratio of the generated video (16:9 or 9:16).", + "type": "string", + "title": "Aspect Ratio", + "examples": [ + "16:9", + "9:16", + "1:1" + ], + "default": "16:9" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "description": "The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.", + "type": "string", + "title": "Video Write Mode", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "enable_output_safety_checker": { + "examples": [ + false + ], + "description": "If set to true, output video will be checked for safety after generation.", + "type": "boolean", + "title": "Enable Output Safety Checker", + "default": false + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "description": "The quality of the output video. Higher quality means better visual quality but larger file size.", + "type": "string", + "title": "Video Quality", + "examples": [ + "high" + ], + "default": "high" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, input data will be checked for safety before processing.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": false + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "description": "Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "resolution", + "aspect_ratio", + "enable_safety_checker", + "enable_output_safety_checker", + "enable_prompt_expansion", + "acceleration", + "video_quality", + "video_write_mode" + ], + "required": [ + "prompt" + ] + }, + "WanV22A14bTextToVideoTurboOutput": { + "title": "WanTurboT2VResponse", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A medium shot establishes a modern, minimalist office setting: clean lines, muted grey walls, and polished wood surfaces. The focus shifts to a close-up on a woman in sharp, navy blue business attire. Her crisp white blouse contrasts with the deep blue of her tailored suit jacket. The subtle texture of the fabric is visible—a fine weave with a slight sheen. Her expression is serious, yet engaging, as she speaks to someone unseen just beyond the frame. Close-up on her eyes, showing the intensity of her gaze and the fine lines around them that hint at experience and focus. Her lips are slightly parted, as if mid-sentence. The light catches the subtle highlights in her auburn hair, meticulously styled. Note the slight catch of light on the silver band of her watch. High resolution 4k" + ], + "description": "The text prompt used for video generation.", + "type": "string", + "title": "Prompt", + "default": "" + }, + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/gallery/wan-t2v-turbo.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan/v2.2-a14b/text-to-video/turbo/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/text-to-video/turbo/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/text-to-video/turbo": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV22A14bTextToVideoTurboInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/text-to-video/turbo/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV22A14bTextToVideoTurboOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan/v2.2-5b/text-to-video", + "metadata": { + "display_name": "Wan v2.2 5B", + "category": "text-to-video", + "description": "Wan 2.2's 5B model produces up to 5 seconds of video 720p at 24FPS with fluid motion and powerful prompt understanding", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:11.327Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "model_url": "https://fal.run/fal-ai/wan/v2.2-5b/text-to-video", + "license_type": "commercial", + "date": "2025-07-28T20:48:02.539Z", + "group": { + "key": "wan-v22", + "label": "Text to Video (5B)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan/v2.2-5b/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan/v2.2-5b/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan/v2.2-5b/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan/v2.2-5b/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/wan/v2.2-5b/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanV225bTextToVideoInput": { + "title": "WanSmallT2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A medium shot establishes a modern, minimalist office setting: clean lines, muted grey walls, and polished wood surfaces. The focus shifts to a close-up on a woman in sharp, navy blue business attire. Her crisp white blouse contrasts with the deep blue of her tailored suit jacket. The subtle texture of the fabric is visible—a fine weave with a slight sheen. Her expression is serious, yet engaging, as she speaks to someone unseen just beyond the frame. Close-up on her eyes, showing the intensity of her gaze and the fine lines around them that hint at experience and focus. Her lips are slightly parted, as if mid-sentence. The light catches the subtle highlights in her auburn hair, meticulously styled. Note the slight catch of light on the silver band of her watch. High resolution 4k" + ], + "description": "The text prompt to guide video generation.", + "type": "string", + "title": "Prompt" + }, + "shift": { + "description": "Shift value for the video. Must be between 1.0 and 10.0.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Shift", + "examples": [ + 5 + ], + "default": 5 + }, + "num_interpolated_frames": { + "description": "Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4.", + "type": "integer", + "minimum": 0, + "maximum": 4, + "title": "Number of Interpolated Frames", + "examples": [ + 0 + ], + "default": 0 + }, + "frames_per_second": { + "description": "Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is.", + "type": "integer", + "minimum": 4, + "maximum": 60, + "title": "Frames per Second", + "examples": [ + 24 + ], + "default": 24 + }, + "guidance_scale": { + "description": "Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale", + "examples": [ + 3.5 + ], + "default": 3.5 + }, + "num_frames": { + "description": "Number of frames to generate. Must be between 17 to 161 (inclusive).", + "type": "integer", + "minimum": 17, + "maximum": 161, + "title": "Number of Frames", + "examples": [ + 81 + ], + "default": 81 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, input data will be checked for safety before processing.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": false + }, + "negative_prompt": { + "description": "Negative prompt for video generation.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "description": "The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.", + "type": "string", + "title": "Video Write Mode", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "resolution": { + "enum": [ + "580p", + "720p" + ], + "description": "Resolution of the generated video (580p or 720p).", + "type": "string", + "title": "Resolution", + "examples": [ + "720p" + ], + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "description": "Aspect ratio of the generated video (16:9 or 9:16).", + "type": "string", + "title": "Aspect Ratio", + "examples": [ + "16:9", + "9:16", + "1:1" + ], + "default": "16:9" + }, + "enable_output_safety_checker": { + "examples": [ + false + ], + "description": "If set to true, output video will be checked for safety after generation.", + "type": "boolean", + "title": "Enable Output Safety Checker", + "default": false + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "description": "The quality of the output video. Higher quality means better visual quality but larger file size.", + "type": "string", + "title": "Video Quality", + "examples": [ + "high" + ], + "default": "high" + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "description": "Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "num_inference_steps": { + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "type": "integer", + "minimum": 2, + "maximum": 50, + "title": "Number of Inference Steps", + "examples": [ + 40 + ], + "default": 40 + }, + "interpolator_model": { + "enum": [ + "none", + "film", + "rife" + ], + "description": "The model to use for frame interpolation. If None, no interpolation is applied.", + "type": "string", + "title": "Interpolator Model", + "examples": [ + "film" + ], + "default": "film" + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + }, + "adjust_fps_for_interpolation": { + "examples": [ + true + ], + "description": "If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is.", + "type": "boolean", + "title": "Adjust FPS for Interpolation", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_frames", + "frames_per_second", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "enable_safety_checker", + "enable_output_safety_checker", + "enable_prompt_expansion", + "guidance_scale", + "shift", + "interpolator_model", + "num_interpolated_frames", + "adjust_fps_for_interpolation", + "video_quality", + "video_write_mode" + ], + "required": [ + "prompt" + ] + }, + "WanV225bTextToVideoOutput": { + "title": "WanSmallT2VResponse", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A medium shot establishes a modern, minimalist office setting: clean lines, muted grey walls, and polished wood surfaces. The focus shifts to a close-up on a woman in sharp, navy blue business attire. Her crisp white blouse contrasts with the deep blue of her tailored suit jacket. The subtle texture of the fabric is visible—a fine weave with a slight sheen. Her expression is serious, yet engaging, as she speaks to someone unseen just beyond the frame. Close-up on her eyes, showing the intensity of her gaze and the fine lines around them that hint at experience and focus. Her lips are slightly parted, as if mid-sentence. The light catches the subtle highlights in her auburn hair, meticulously styled. Note the slight catch of light on the silver band of her watch. High resolution 4k" + ], + "description": "The text prompt used for video generation.", + "type": "string", + "title": "Prompt", + "default": "" + }, + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/model_tests/wan/v2.2-small-output.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan/v2.2-5b/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-5b/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-5b/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV225bTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-5b/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV225bTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan/v2.2-a14b/text-to-video", + "metadata": { + "display_name": "Wan-2.2 Text-to-Video A14B", + "category": "text-to-video", + "description": "Wan-2.2 text-to-video is a video model that generates high-quality videos with high visual quality and motion diversity from text prompts. ", + "status": "active", + "tags": [ + "text to video", + "motion" + ], + "updated_at": "2026-01-26T21:43:11.575Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-4.jpg", + "model_url": "https://fal.run/fal-ai/wan/v2.2-a14b/text-to-video", + "license_type": "commercial", + "date": "2025-07-28T16:14:33.355Z", + "group": { + "key": "wan-v22-large", + "label": "Text to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan/v2.2-a14b/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan/v2.2-a14b/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan/v2.2-a14b/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan/v2.2-a14b/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/wan/v2.2-a14b/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanV22A14bTextToVideoInput": { + "title": "WanT2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A close-up of a young woman smiling gently in the rain, raindrops glistening on her face and eyelashes. The video captures the delicate details of her expression and the water droplets, with soft light reflecting off her skin in the rainy atmosphere." + ], + "description": "The text prompt to guide video generation.", + "type": "string", + "title": "Prompt" + }, + "shift": { + "description": "Shift value for the video. Must be between 1.0 and 10.0.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Shift", + "examples": [ + 5 + ], + "default": 5 + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "description": "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + "type": "string", + "title": "Acceleration", + "examples": [ + "regular" + ], + "default": "regular" + }, + "num_interpolated_frames": { + "description": "Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4.", + "type": "integer", + "minimum": 0, + "maximum": 4, + "title": "Number of Interpolated Frames", + "examples": [ + 1 + ], + "default": 1 + }, + "frames_per_second": { + "description": "Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is.", + "type": "integer", + "minimum": 4, + "maximum": 60, + "title": "Frames per Second", + "examples": [ + 16 + ], + "default": 16 + }, + "guidance_scale": { + "description": "Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale (1st Stage)", + "examples": [ + 3.5 + ], + "default": 3.5 + }, + "num_frames": { + "description": "Number of frames to generate. Must be between 17 to 161 (inclusive).", + "type": "integer", + "minimum": 17, + "maximum": 161, + "title": "Number of Frames", + "examples": [ + 81 + ], + "default": 81 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, input data will be checked for safety before processing.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": false + }, + "negative_prompt": { + "description": "Negative prompt for video generation.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "description": "The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.", + "type": "string", + "title": "Video Write Mode", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "description": "Resolution of the generated video (480p, 580p, or 720p).", + "type": "string", + "title": "Resolution", + "examples": [ + "720p" + ], + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "description": "Aspect ratio of the generated video (16:9 or 9:16).", + "type": "string", + "title": "Aspect Ratio", + "examples": [ + "16:9", + "9:16", + "1:1" + ], + "default": "16:9" + }, + "enable_output_safety_checker": { + "examples": [ + false + ], + "description": "If set to true, output video will be checked for safety after generation.", + "type": "boolean", + "title": "Enable Output Safety Checker", + "default": false + }, + "guidance_scale_2": { + "description": "Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale (2nd Stage)", + "examples": [ + 4 + ], + "default": 4 + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "description": "The quality of the output video. Higher quality means better visual quality but larger file size.", + "type": "string", + "title": "Video Quality", + "examples": [ + "high" + ], + "default": "high" + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "description": "Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "num_inference_steps": { + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "type": "integer", + "minimum": 2, + "maximum": 40, + "title": "Number of Inference Steps", + "examples": [ + 27 + ], + "default": 27 + }, + "interpolator_model": { + "enum": [ + "none", + "film", + "rife" + ], + "description": "The model to use for frame interpolation. If None, no interpolation is applied.", + "type": "string", + "title": "Interpolator Model", + "examples": [ + "film" + ], + "default": "film" + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + }, + "adjust_fps_for_interpolation": { + "examples": [ + true + ], + "description": "If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is.", + "type": "boolean", + "title": "Adjust FPS for Interpolation", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_frames", + "frames_per_second", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "enable_safety_checker", + "enable_output_safety_checker", + "enable_prompt_expansion", + "acceleration", + "guidance_scale", + "guidance_scale_2", + "shift", + "interpolator_model", + "num_interpolated_frames", + "adjust_fps_for_interpolation", + "video_quality", + "video_write_mode" + ], + "required": [ + "prompt" + ] + }, + "WanV22A14bTextToVideoOutput": { + "title": "WanT2VResponse", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A close-up of a young woman smiling gently in the rain, raindrops glistening on her face and eyelashes. The video captures the delicate details of her expression and the water droplets, with soft light reflecting off her skin in the rainy atmosphere." + ], + "description": "The text prompt used for video generation.", + "type": "string", + "title": "Prompt", + "default": "" + }, + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/model_tests/wan/v2.2-woman-output.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan/v2.2-a14b/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV22A14bTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV22A14bTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltxv-13b-098-distilled", + "metadata": { + "display_name": "LTX-Video 13B 0.9.8 Distilled", + "category": "text-to-video", + "description": "Generate long videos from prompts using LTX Video-0.9.8 13B Distilled and custom LoRA", + "status": "active", + "tags": [ + "video", + "ltx-video", + "text-to-video" + ], + "updated_at": "2026-01-26T21:43:15.210Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training-4.jpg", + "model_url": "https://fal.run/fal-ai/ltxv-13b-098-distilled", + "license_type": "commercial", + "date": "2025-07-17T03:01:15.578Z", + "group": { + "key": "ltx-video-13b-098", + "label": "Text to Video" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltxv-13b-098-distilled", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltxv-13b-098-distilled queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltxv-13b-098-distilled", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training-4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltxv-13b-098-distilled", + "documentationUrl": "https://fal.ai/models/fal-ai/ltxv-13b-098-distilled/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltxv13b098DistilledInput": { + "description": "Distilled model input", + "type": "object", + "properties": { + "second_pass_skip_initial_steps": { + "description": "The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.", + "type": "integer", + "minimum": 1, + "maximum": 11, + "title": "Second Pass Skip Initial Steps", + "examples": [ + 5 + ], + "default": 5 + }, + "first_pass_num_inference_steps": { + "description": "Number of inference steps during the first pass.", + "type": "integer", + "minimum": 2, + "maximum": 12, + "title": "Number of Inference Steps", + "examples": [ + 8 + ], + "default": 8 + }, + "frame_rate": { + "description": "The frame rate of the video.", + "type": "integer", + "minimum": 1, + "maximum": 60, + "title": "Frame Rate", + "examples": [ + 24 + ], + "default": 24 + }, + "reverse_video": { + "examples": [ + false + ], + "title": "Reverse Video", + "type": "boolean", + "description": "Whether to reverse the video.", + "default": false + }, + "prompt": { + "examples": [ + "A cinematic fast-tracking shot follows a vintage, teal camper van as it descends a winding mountain trail. The van, slightly weathered but well-maintained, is the central focus, its retro design emphasized by the motion blur. Medium shot reveals the dusty, ochre trail, edged with vibrant green pine trees. Close-up on the van's tires shows the gravel spraying, highlighting the speed and rugged terrain. Sunlight filters through the trees, casting dappled shadows on the van and the trail. The background is a hazy, majestic mountain range bathed in warm, golden light. The overall mood is adventurous and exhilarating. High resolution 4k movie scene." + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt to guide generation" + }, + "expand_prompt": { + "examples": [ + false + ], + "title": "Expand Prompt", + "type": "boolean", + "description": "Whether to expand the prompt using a language model.", + "default": false + }, + "temporal_adain_factor": { + "description": "The factor for adaptive instance normalization (AdaIN) applied to generated video chunks after the first. This can help deal with a gradual increase in saturation/contrast in the generated video by normalizing the color distribution across the video. A high value will ensure the color distribution is more consistent across the video, while a low value will allow for more variation in color distribution.", + "type": "number", + "examples": [ + 0.5 + ], + "maximum": 1, + "title": "Temporal AdaIN Factor", + "minimum": 0, + "multipleOf": 0.05, + "default": 0.5 + }, + "loras": { + "description": "LoRA weights to use for generation", + "type": "array", + "title": "Loras", + "items": { + "$ref": "#/components/schemas/LoRAWeight" + }, + "default": [] + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "num_frames": { + "description": "The number of frames in the video.", + "type": "integer", + "minimum": 9, + "maximum": 1441, + "title": "Number of Frames", + "examples": [ + 121 + ], + "default": 121 + }, + "second_pass_num_inference_steps": { + "description": "Number of inference steps during the second pass.", + "type": "integer", + "minimum": 2, + "maximum": 12, + "title": "Second Pass Number of Inference Steps", + "examples": [ + 8 + ], + "default": 8 + }, + "negative_prompt": { + "description": "Negative prompt for generation", + "type": "string", + "title": "Negative Prompt", + "default": "worst quality, inconsistent motion, blurry, jittery, distorted" + }, + "enable_detail_pass": { + "examples": [ + false + ], + "title": "Enable Detail Pass", + "type": "boolean", + "description": "Whether to use a detail pass. If True, the model will perform a second pass to refine the video and enhance details. This incurs a 2.0x cost multiplier on the base price.", + "default": false + }, + "resolution": { + "examples": [ + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video.", + "enum": [ + "480p", + "720p" + ], + "default": "720p" + }, + "aspect_ratio": { + "examples": [ + "16:9" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video.", + "enum": [ + "9:16", + "1:1", + "16:9" + ], + "default": "16:9" + }, + "tone_map_compression_ratio": { + "description": "The compression ratio for tone mapping. This is used to compress the dynamic range of the video to improve visual quality. A value of 0.0 means no compression, while a value of 1.0 means maximum compression.", + "type": "number", + "examples": [ + 0 + ], + "maximum": 1, + "title": "Tone Map Compression Ratio", + "minimum": 0, + "multipleOf": 0.05, + "default": 0 + }, + "seed": { + "description": "Random seed for generation", + "type": "integer", + "title": "Seed" + } + }, + "title": "DistilledTextToVideoInput", + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "loras", + "resolution", + "aspect_ratio", + "seed", + "num_frames", + "first_pass_num_inference_steps", + "second_pass_num_inference_steps", + "second_pass_skip_initial_steps", + "frame_rate", + "expand_prompt", + "reverse_video", + "enable_safety_checker", + "enable_detail_pass", + "temporal_adain_factor", + "tone_map_compression_ratio" + ], + "required": [ + "prompt" + ] + }, + "Ltxv13b098DistilledOutput": { + "title": "TextToVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cinematic fast-tracking shot follows a vintage, teal camper van as it descends a winding mountain trail. The van, slightly weathered but well-maintained, is the central focus, its retro design emphasized by the motion blur. Medium shot reveals the dusty, ochre trail, edged with vibrant green pine trees. Close-up on the van's tires shows the gravel spraying, highlighting the speed and rugged terrain. Sunlight filters through the trees, casting dappled shadows on the van and the trail. The background is a hazy, majestic mountain range bathed in warm, golden light. The overall mood is adventurous and exhilarating. High resolution 4k movie scene." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/ltxv-multiscale-text-to-video.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed" + ], + "required": [ + "video", + "prompt", + "seed" + ] + }, + "LoRAWeight": { + "title": "LoRAWeight", + "type": "object", + "properties": { + "path": { + "description": "URL or path to the LoRA weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "description": "Scale of the LoRA weight. This is a multiplier applied to the LoRA weight when loading it.", + "title": "Scale", + "default": 1 + }, + "weight_name": { + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights.", + "type": "string", + "title": "Weight Name" + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "required": [ + "path" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltxv-13b-098-distilled/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltxv-13b-098-distilled/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltxv-13b-098-distilled": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltxv13b098DistilledInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltxv-13b-098-distilled/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltxv13b098DistilledOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/hailuo-02/pro/text-to-video", + "metadata": { + "display_name": "MiniMax Hailuo 02 [Pro] (Text to Video)", + "category": "text-to-video", + "description": "MiniMax Hailuo-02 Text To Video API (Pro, 1080p): Advanced video generation model with 1080p resolution", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:26.308Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-2.jpg", + "model_url": "https://fal.run/fal-ai/minimax/hailuo-02/pro/text-to-video", + "license_type": "commercial", + "date": "2025-06-18T00:41:11.159Z", + "group": { + "key": "hailuo-02", + "label": "Text to Video (pro) " + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 8, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/hailuo-02/pro/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/hailuo-02/pro/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/hailuo-02/pro/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/hailuo-02/pro/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/hailuo-02/pro/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxHailuo02ProTextToVideoInput": { + "title": "ProTextToVideoHailuo02Input", + "type": "object", + "properties": { + "prompt_optimizer": { + "description": "Whether to use the model's prompt optimizer", + "type": "boolean", + "title": "Prompt Optimizer", + "default": true + }, + "prompt": { + "examples": [ + "A Galactic Smuggler is a rogue figure with a cybernetic arm and a well-worn coat that hints at many dangerous escapades across the galaxy. Their ship is filled with rare and exotic treasures from distant planets, concealed in hidden compartments, showing their expertise in illicit trade. Their belt is adorned with energy-based weapons, ready to be drawn at any moment to protect themselves or escape from tight situations. This character thrives in the shadows of space, navigating between the law and chaos with stealth and wit, always seeking the next big score while evading bounty hunters and law enforcement. The rogue's ship, rugged yet efficient, serves as both a home and a tool for their dangerous lifestyle. The treasures they collect reflect the diverse and intriguing worlds they've encountered—alien artifacts, rare minerals, and artifacts of unknown origin. Their reputation precedes them, with whispers of their dealings and the deadly encounters that often follow. A master of negotiation and deception, the Galactic Smuggler navigates the cosmos with an eye on the horizon, always one step ahead of those who pursue them." + ], + "maxLength": 2000, + "type": "string", + "title": "Prompt", + "minLength": 1 + } + }, + "x-fal-order-properties": [ + "prompt", + "prompt_optimizer" + ], + "required": [ + "prompt" + ] + }, + "MinimaxHailuo02ProTextToVideoOutput": { + "title": "TextToVideoHailuo02Output", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/kangaroo/_qEOfY3iKHsc86kqHUUh2_output.mp4" + } + ], + "description": "The generated video", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/hailuo-02/pro/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-02/pro/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-02/pro/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxHailuo02ProTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/hailuo-02/pro/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxHailuo02ProTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bytedance/seedance/v1/pro/text-to-video", + "metadata": { + "display_name": "Seedance 1.0 Pro", + "category": "text-to-video", + "description": "Seedance 1.0 Pro, a high quality video generation model developed by Bytedance.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:27.328Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-1.jpg", + "model_url": "https://fal.run/fal-ai/bytedance/seedance/v1/pro/text-to-video", + "license_type": "commercial", + "date": "2025-06-16T16:22:01.447Z", + "group": { + "key": "seedance-v1", + "label": "Seedance 1.0 Pro -- Text to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bytedance/seedance/v1/pro/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bytedance/seedance/v1/pro/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bytedance/seedance/v1/pro/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/bytedance/seedance/v1/pro/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/bytedance/seedance/v1/pro/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BytedanceSeedanceV1ProTextToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "resolution", + "duration", + "camera_fixed", + "seed", + "enable_safety_checker" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A bright blue race car speeds along a snowy racetrack. [Low-angle shot] Captures several cars speeding along the racetrack through a harsh snowstorm. [Overhead shot] The camera gradually pulls upward, revealing the full race scene illuminated by storm lights" + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt used to generate the video" + }, + "resolution": { + "enum": [ + "480p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "Video resolution - 480p for faster generation, 720p for balance, 1080p for higher quality", + "default": "1080p" + }, + "duration": { + "enum": [ + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10", + "11", + "12" + ], + "title": "Duration", + "type": "string", + "description": "Duration of the video in seconds", + "default": "5" + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "1:1", + "3:4", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed to control video generation. Use -1 for random." + }, + "camera_fixed": { + "title": "Camera Fixed", + "type": "boolean", + "description": "Whether to fix the camera position", + "default": false + } + }, + "title": "SeedanceProTextToVideoInput", + "required": [ + "prompt" + ] + }, + "BytedanceSeedanceV1ProTextToVideoOutput": { + "x-fal-order-properties": [ + "video", + "seed" + ], + "type": "object", + "properties": { + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for generation" + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_inputs/seedance_pro_t2v.mp4" + } + ], + "title": "Video", + "description": "Generated video file", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "SeedanceProT2VVideoOutput", + "required": [ + "video", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bytedance/seedance/v1/pro/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1/pro/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1/pro/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedanceV1ProTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1/pro/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedanceV1ProTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bytedance/seedance/v1/lite/text-to-video", + "metadata": { + "display_name": "Seedance 1.0 Lite", + "category": "text-to-video", + "description": "Seedance 1.0 Lite", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:28.159Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "model_url": "https://fal.run/fal-ai/bytedance/seedance/v1/lite/text-to-video", + "license_type": "commercial", + "date": "2025-06-13T04:30:22.765Z", + "group": { + "key": "seedance-v1", + "label": "Seedance 1.0 Lite -- Text to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bytedance/seedance/v1/lite/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bytedance/seedance/v1/lite/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bytedance/seedance/v1/lite/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/bytedance/seedance/v1/lite/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/bytedance/seedance/v1/lite/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BytedanceSeedanceV1LiteTextToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "resolution", + "duration", + "camera_fixed", + "seed", + "enable_safety_checker" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A little dog is running in the sunshine. The camera follows the dog as it plays in a garden." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt used to generate the video" + }, + "resolution": { + "enum": [ + "480p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "Video resolution - 480p for faster generation, 720p for higher quality", + "default": "720p" + }, + "duration": { + "enum": [ + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10", + "11", + "12" + ], + "title": "Duration", + "type": "string", + "description": "Duration of the video in seconds", + "default": "5" + }, + "aspect_ratio": { + "enum": [ + "21:9", + "16:9", + "4:3", + "1:1", + "3:4", + "9:16", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed to control video generation. Use -1 for random." + }, + "camera_fixed": { + "title": "Camera Fixed", + "type": "boolean", + "description": "Whether to fix the camera position", + "default": false + } + }, + "title": "SeedanceTextToVideoInput", + "required": [ + "prompt" + ] + }, + "BytedanceSeedanceV1LiteTextToVideoOutput": { + "x-fal-order-properties": [ + "video", + "seed" + ], + "type": "object", + "properties": { + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "Seed used for generation" + }, + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/penguin/qmLZSvOIzTKs6bDFXiEtH_video.mp4" + } + ], + "title": "Video", + "description": "Generated video file", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "SeedanceVideoOutput", + "required": [ + "video", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bytedance/seedance/v1/lite/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1/lite/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1/lite/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedanceV1LiteTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance/seedance/v1/lite/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceSeedanceV1LiteTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v2.1/master/text-to-video", + "metadata": { + "display_name": "Kling 2.1 Master", + "category": "text-to-video", + "description": "Kling 2.1 Master: The premium endpoint for Kling 2.1, designed for top-tier text-to-video generation with unparalleled motion fluidity, cinematic visuals, and exceptional prompt precision.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:36.394Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-5.jpeg", + "model_url": "https://fal.run/fal-ai/kling-video/v2.1/master/text-to-video", + "license_type": "commercial", + "date": "2025-05-29T00:34:10.450Z", + "group": { + "key": "kling-video-v21", + "label": "2.1 Master (Text to Video)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 5, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v2.1/master/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v2.1/master/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v2.1/master/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-5.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v2.1/master/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v2.1/master/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV21MasterTextToVideoInput": { + "title": "TextToVideoV21MasterRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Warm, earthy tones bathe the scene as the potter's hands, rough and calloused, coax a shapeless lump of clay into a vessel of elegant curves, the slow, deliberate movements highlighted by the subtle shifting light; the clay's cool, damp texture contrasts sharply with the warmth of the potter's touch, creating a captivating interplay between material and maker." + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500 + }, + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video frame", + "default": "16:9" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "maxLength": 2500, + "default": "blur, distort, and low quality" + }, + "cfg_scale": { + "minimum": 0, + "title": "Cfg Scale", + "type": "number", + "maximum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ", + "default": 0.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "duration", + "aspect_ratio", + "negative_prompt", + "cfg_scale" + ], + "required": [ + "prompt" + ] + }, + "KlingVideoV21MasterTextToVideoOutput": { + "title": "TextToVideoV21MasterOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/lion/0wTlhR7GCXFI-_BZXGy99_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v2.1/master/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.1/master/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.1/master/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV21MasterTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.1/master/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV21MasterTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "veed/avatars/text-to-video", + "metadata": { + "display_name": "Avatars", + "category": "text-to-video", + "description": "Generate high-quality videos with UGC-like avatars from text", + "status": "active", + "tags": [ + "lipsync", + "text-to-video" + ], + "updated_at": "2026-01-26T21:43:38.259Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/veed_logo.webp", + "model_url": "https://fal.run/veed/avatars/text-to-video", + "license_type": "commercial", + "date": "2025-05-28T14:20:10.759Z", + "group": { + "key": "veed-avatars-1", + "label": "Text To Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for veed/avatars/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the veed/avatars/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "veed/avatars/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/veed_logo.webp", + "playgroundUrl": "https://fal.ai/models/veed/avatars/text-to-video", + "documentationUrl": "https://fal.ai/models/veed/avatars/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AvatarsTextToVideoInput": { + "title": "Text2VideoInput", + "type": "object", + "properties": { + "text": { + "examples": [ + "\nEver wondered how to get that flawless glow? \nIntroducing our new skincare line, designed for real life. \nStep one: Cleanse with our gentle, nourishing formula. \nStep two: Apply our hydrating serum for that dewy look. \nStep three: Lock it in with our lightweight moisturizer. \nFeel the difference with every application. \nSee the glow? That's the magic of our skincare. \nUse code 'GLOW20' for an exclusive discount. \nJoin the skincare revolution today!\n" + ], + "title": "Text", + "type": "string" + }, + "avatar_id": { + "enum": [ + "emily_vertical_primary", + "emily_vertical_secondary", + "marcus_vertical_primary", + "marcus_vertical_secondary", + "mira_vertical_primary", + "mira_vertical_secondary", + "jasmine_vertical_primary", + "jasmine_vertical_secondary", + "jasmine_vertical_walking", + "aisha_vertical_walking", + "elena_vertical_primary", + "elena_vertical_secondary", + "any_male_vertical_primary", + "any_female_vertical_primary", + "any_male_vertical_secondary", + "any_female_vertical_secondary", + "any_female_vertical_walking", + "emily_primary", + "emily_side", + "marcus_primary", + "marcus_side", + "aisha_walking", + "elena_primary", + "elena_side", + "any_male_primary", + "any_female_primary", + "any_male_side", + "any_female_side" + ], + "description": "The avatar to use for the video", + "type": "string", + "title": "Avatar Id" + } + }, + "x-fal-order-properties": [ + "avatar_id", + "text" + ], + "required": [ + "avatar_id", + "text" + ] + }, + "AvatarsTextToVideoOutput": { + "title": "AvatarsAppOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://v3.fal.media/files/panda/kt9d4vZ8Mfw_WzYnvr2Q0_tmp0ir4znsr.mp4" + } + ], + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/veed/avatars/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/veed/avatars/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/veed/avatars/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AvatarsTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/veed/avatars/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AvatarsTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-video-13b-dev", + "metadata": { + "display_name": "LTX Video-0.9.7 13B", + "category": "text-to-video", + "description": "Generate videos from prompts using LTX Video-0.9.7 13B and custom LoRA", + "status": "active", + "tags": [ + "video", + "ltx-video", + "text-to-video" + ], + "updated_at": "2026-01-26T21:43:42.019Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training-4.jpg", + "model_url": "https://fal.run/fal-ai/ltx-video-13b-dev", + "license_type": "commercial", + "date": "2025-05-17T01:51:47.329Z", + "group": { + "key": "ltx-video-13b", + "label": "Text to Video" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-video-13b-dev", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-video-13b-dev queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-video-13b-dev", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training-4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-video-13b-dev", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-video-13b-dev/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LtxVideo13bDevInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "loras", + "resolution", + "aspect_ratio", + "seed", + "num_frames", + "first_pass_num_inference_steps", + "first_pass_skip_final_steps", + "second_pass_num_inference_steps", + "second_pass_skip_initial_steps", + "frame_rate", + "expand_prompt", + "reverse_video", + "enable_safety_checker" + ], + "type": "object", + "properties": { + "second_pass_skip_initial_steps": { + "description": "The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.", + "type": "integer", + "minimum": 1, + "maximum": 50, + "title": "Second Pass Skip Initial Steps", + "examples": [ + 17 + ], + "default": 17 + }, + "first_pass_num_inference_steps": { + "description": "Number of inference steps during the first pass.", + "type": "integer", + "minimum": 2, + "maximum": 50, + "title": "First Pass Num Inference Steps", + "examples": [ + 30 + ], + "default": 30 + }, + "frame_rate": { + "description": "The frame rate of the video.", + "type": "integer", + "minimum": 1, + "maximum": 60, + "title": "Frame Rate", + "examples": [ + 30 + ], + "default": 30 + }, + "prompt": { + "examples": [ + "A cinematic fast-tracking shot follows a vintage, teal camper van as it descends a winding mountain trail. The van, slightly weathered but well-maintained, is the central focus, its retro design emphasized by the motion blur. Medium shot reveals the dusty, ochre trail, edged with vibrant green pine trees. Close-up on the van's tires shows the gravel spraying, highlighting the speed and rugged terrain. Sunlight filters through the trees, casting dappled shadows on the van and the trail. The background is a hazy, majestic mountain range bathed in warm, golden light. The overall mood is adventurous and exhilarating. High resolution 4k movie scene." + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt to guide generation" + }, + "reverse_video": { + "examples": [ + false + ], + "title": "Reverse Video", + "type": "boolean", + "description": "Whether to reverse the video.", + "default": false + }, + "expand_prompt": { + "examples": [ + false + ], + "title": "Expand Prompt", + "type": "boolean", + "description": "Whether to expand the prompt using a language model.", + "default": false + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "LoRA weights to use for generation", + "items": { + "$ref": "#/components/schemas/LoRAWeight" + }, + "default": [] + }, + "second_pass_num_inference_steps": { + "description": "Number of inference steps during the second pass.", + "type": "integer", + "minimum": 2, + "maximum": 50, + "title": "Second Pass Num Inference Steps", + "examples": [ + 30 + ], + "default": 30 + }, + "num_frames": { + "description": "The number of frames in the video.", + "type": "integer", + "minimum": 9, + "maximum": 161, + "title": "Num Frames", + "examples": [ + 121 + ], + "default": 121 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for generation", + "default": "worst quality, inconsistent motion, blurry, jittery, distorted" + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "examples": [ + "720p" + ], + "description": "Resolution of the generated video (480p or 720p).", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "9:16", + "1:1", + "16:9" + ], + "title": "Aspect Ratio", + "type": "string", + "examples": [ + "16:9" + ], + "description": "Aspect ratio of the generated video (16:9, 1:1 or 9:16).", + "default": "16:9" + }, + "first_pass_skip_final_steps": { + "minimum": 0, + "maximum": 50, + "type": "integer", + "title": "First Pass Skip Final Steps", + "description": "Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details.", + "default": 3 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation" + } + }, + "title": "TextToVideoInput", + "required": [ + "prompt" + ] + }, + "LtxVideo13bDevOutput": { + "x-fal-order-properties": [ + "video", + "prompt", + "seed" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cinematic fast-tracking shot follows a vintage, teal camper van as it descends a winding mountain trail. The van, slightly weathered but well-maintained, is the central focus, its retro design emphasized by the motion blur. Medium shot reveals the dusty, ochre trail, edged with vibrant green pine trees. Close-up on the van's tires shows the gravel spraying, highlighting the speed and rugged terrain. Sunlight filters through the trees, casting dappled shadows on the van and the trail. The background is a hazy, majestic mountain range bathed in warm, golden light. The overall mood is adventurous and exhilarating. High resolution 4k movie scene." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/ltxv-multiscale-text-to-video.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "TextToVideoOutput", + "required": [ + "video", + "prompt", + "seed" + ] + }, + "LoRAWeight": { + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "Scale of the LoRA weight. This is a multiplier applied to the LoRA weight when loading it.", + "default": 1 + }, + "weight_name": { + "title": "Weight Name", + "type": "string", + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights." + } + }, + "title": "LoRAWeight", + "required": [ + "path" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-video-13b-dev/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-dev/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-dev": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideo13bDevInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-dev/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideo13bDevOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-video-13b-distilled", + "metadata": { + "display_name": "LTX Video-0.9.7 13B Distilled", + "category": "text-to-video", + "description": "Generate videos from prompts using LTX Video-0.9.7 13B Distilled and custom LoRA", + "status": "active", + "tags": [ + "video", + "ltx-video", + "text-to-video" + ], + "updated_at": "2026-01-26T21:43:42.142Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training-4.jpg", + "model_url": "https://fal.run/fal-ai/ltx-video-13b-distilled", + "license_type": "commercial", + "date": "2025-05-17T01:49:06.775Z", + "group": { + "key": "ltx-video-13b-distilled", + "label": "Text to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-video-13b-distilled", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-video-13b-distilled queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-video-13b-distilled", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training-4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-video-13b-distilled", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-video-13b-distilled/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LtxVideo13bDistilledInput": { + "title": "DistilledTextToVideoInput", + "type": "object", + "properties": { + "second_pass_skip_initial_steps": { + "description": "The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.", + "type": "integer", + "minimum": 1, + "title": "Second Pass Skip Initial Steps", + "examples": [ + 5 + ], + "maximum": 20, + "default": 5 + }, + "first_pass_num_inference_steps": { + "description": "Number of inference steps during the first pass.", + "type": "integer", + "minimum": 2, + "title": "First Pass Num Inference Steps", + "examples": [ + 8 + ], + "maximum": 20, + "default": 8 + }, + "frame_rate": { + "description": "The frame rate of the video.", + "type": "integer", + "minimum": 1, + "title": "Frame Rate", + "examples": [ + 30 + ], + "maximum": 60, + "default": 30 + }, + "reverse_video": { + "examples": [ + false + ], + "title": "Reverse Video", + "type": "boolean", + "description": "Whether to reverse the video.", + "default": false + }, + "prompt": { + "examples": [ + "A cinematic fast-tracking shot follows a vintage, teal camper van as it descends a winding mountain trail. The van, slightly weathered but well-maintained, is the central focus, its retro design emphasized by the motion blur. Medium shot reveals the dusty, ochre trail, edged with vibrant green pine trees. Close-up on the van's tires shows the gravel spraying, highlighting the speed and rugged terrain. Sunlight filters through the trees, casting dappled shadows on the van and the trail. The background is a hazy, majestic mountain range bathed in warm, golden light. The overall mood is adventurous and exhilarating. High resolution 4k movie scene." + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt to guide generation" + }, + "expand_prompt": { + "examples": [ + false + ], + "title": "Expand Prompt", + "type": "boolean", + "description": "Whether to expand the prompt using a language model.", + "default": false + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "LoRA weights to use for generation", + "items": { + "$ref": "#/components/schemas/LoRAWeight" + }, + "default": [] + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "num_frames": { + "description": "The number of frames in the video.", + "type": "integer", + "minimum": 9, + "title": "Num Frames", + "examples": [ + 121 + ], + "maximum": 161, + "default": 121 + }, + "second_pass_num_inference_steps": { + "description": "Number of inference steps during the second pass.", + "type": "integer", + "minimum": 2, + "title": "Second Pass Num Inference Steps", + "examples": [ + 8 + ], + "maximum": 20, + "default": 8 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for generation", + "default": "worst quality, inconsistent motion, blurry, jittery, distorted" + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "examples": [ + "720p" + ], + "description": "Resolution of the generated video (480p or 720p).", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "9:16", + "1:1", + "16:9" + ], + "title": "Aspect Ratio", + "type": "string", + "examples": [ + "16:9" + ], + "description": "Aspect ratio of the generated video (16:9, 1:1 or 9:16).", + "default": "16:9" + }, + "first_pass_skip_final_steps": { + "minimum": 0, + "title": "First Pass Skip Final Steps", + "type": "integer", + "maximum": 20, + "description": "Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details.", + "default": 1 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation" + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "loras", + "resolution", + "aspect_ratio", + "seed", + "num_frames", + "first_pass_num_inference_steps", + "first_pass_skip_final_steps", + "second_pass_num_inference_steps", + "second_pass_skip_initial_steps", + "frame_rate", + "expand_prompt", + "reverse_video", + "enable_safety_checker" + ], + "description": "Distilled model input", + "required": [ + "prompt" + ] + }, + "LtxVideo13bDistilledOutput": { + "title": "TextToVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cinematic fast-tracking shot follows a vintage, teal camper van as it descends a winding mountain trail. The van, slightly weathered but well-maintained, is the central focus, its retro design emphasized by the motion blur. Medium shot reveals the dusty, ochre trail, edged with vibrant green pine trees. Close-up on the van's tires shows the gravel spraying, highlighting the speed and rugged terrain. Sunlight filters through the trees, casting dappled shadows on the van and the trail. The background is a hazy, majestic mountain range bathed in warm, golden light. The overall mood is adventurous and exhilarating. High resolution 4k movie scene." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/ltxv-multiscale-text-to-video.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed" + ], + "required": [ + "video", + "prompt", + "seed" + ] + }, + "LoRAWeight": { + "title": "LoRAWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "maximum": 4, + "description": "Scale of the LoRA weight. This is a multiplier applied to the LoRA weight when loading it.", + "default": 1 + }, + "weight_name": { + "title": "Weight Name", + "type": "string", + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights." + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "required": [ + "path" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-video-13b-distilled/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-distilled/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-distilled": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideo13bDistilledInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-distilled/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideo13bDistilledOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v4.5/text-to-video/fast", + "metadata": { + "display_name": "Pixverse", + "category": "text-to-video", + "description": "Generate high quality and fast video clips from text and image prompts using PixVerse v4.5 fast", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:43.879Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "model_url": "https://fal.run/fal-ai/pixverse/v4.5/text-to-video/fast", + "license_type": "commercial", + "date": "2025-05-15T15:51:13.967Z", + "group": { + "key": "pixverse-45", + "label": "Text to Video v4.5 (Fast)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v4.5/text-to-video/fast", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v4.5/text-to-video/fast queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v4.5/text-to-video/fast", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v4.5/text-to-video/fast", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v4.5/text-to-video/fast/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV45TextToVideoFastInput": { + "title": "FastTextToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Epic low-cut camera capture of a girl clad in ultraviolet threads, Peter Max art style depiction, luminous diamond skin glistening under a vast moon's radiance, embodied in a superhuman flight among mystical ruins, symbolizing a deity's ritual ascent, hyper-detailed" + ], + "title": "Prompt", + "type": "string" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "4:3", + "1:1", + "3:4", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated video" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, low resolution, pixelated, noisy, grainy, out of focus, poorly lit, poorly exposed, poorly composed, poorly framed, poorly cropped, poorly color corrected, poorly color graded" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "resolution", + "negative_prompt", + "style", + "seed" + ], + "required": [ + "prompt" + ] + }, + "PixverseV45TextToVideoFastOutput": { + "title": "VideoOutputV4", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 5485412, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://fal.media/files/lion/_fVEU5nzHND_fHGQUhXEm_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v4.5/text-to-video/fast/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4.5/text-to-video/fast/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4.5/text-to-video/fast": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV45TextToVideoFastInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4.5/text-to-video/fast/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV45TextToVideoFastOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v4.5/text-to-video", + "metadata": { + "display_name": "Pixverse", + "category": "text-to-video", + "description": "Generate high quality video clips from text and image prompts using PixVerse v4.5", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:44.004Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "model_url": "https://fal.run/fal-ai/pixverse/v4.5/text-to-video", + "license_type": "commercial", + "date": "2025-05-15T15:48:45.845Z", + "group": { + "key": "pixverse-45", + "label": "Text to Video v4.5" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v4.5/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v4.5/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v4.5/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v4.5/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v4.5/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV45TextToVideoInput": { + "title": "TextToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Epic low-cut camera capture of a girl clad in ultraviolet threads, Peter Max art style depiction, luminous diamond skin glistening under a vast moon's radiance, embodied in a superhuman flight among mystical ruins, symbolizing a deity's ritual ascent, hyper-detailed" + ], + "title": "Prompt", + "type": "string" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "4:3", + "1:1", + "3:4", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated video" + }, + "duration": { + "enum": [ + "5", + "8" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds", + "default": "5" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, low resolution, pixelated, noisy, grainy, out of focus, poorly lit, poorly exposed, poorly composed, poorly framed, poorly cropped, poorly color corrected, poorly color graded" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "resolution", + "duration", + "negative_prompt", + "style", + "seed" + ], + "required": [ + "prompt" + ] + }, + "PixverseV45TextToVideoOutput": { + "title": "VideoOutputV4", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 5485412, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://fal.media/files/lion/_fVEU5nzHND_fHGQUhXEm_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v4.5/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4.5/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4.5/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV45TextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4.5/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV45TextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/vidu/q1/text-to-video", + "metadata": { + "display_name": "Vidu Text to Video", + "category": "text-to-video", + "description": "Vidu Q1 Text to Video generates high-quality 1080p videos with exceptional visual quality and motion diversity", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:45.762Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Upscale-1.jpeg", + "model_url": "https://fal.run/fal-ai/vidu/q1/text-to-video", + "license_type": "commercial", + "date": "2025-05-09T03:10:36.806Z", + "group": { + "key": "vidu-q1", + "label": "Text to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/vidu/q1/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/vidu/q1/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/vidu/q1/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Upscale-1.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/vidu/q1/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/vidu/q1/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ViduQ1TextToVideoInput": { + "title": "Q1TextToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "In an ultra-realistic fashion photography style featuring light blue and pale amber tones, an astronaut in a spacesuit walks through the fog. The background consists of enchanting white and golden lights, creating a minimalist still life and an impressive panoramic scene." + ], + "title": "Prompt", + "type": "string", + "maxLength": 1500, + "description": "Text prompt for video generation, max 1500 characters" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the output video", + "default": "16:9" + }, + "style": { + "enum": [ + "general", + "anime" + ], + "title": "Style", + "type": "string", + "description": "The style of output video", + "default": "general" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed for the random number generator" + }, + "movement_amplitude": { + "enum": [ + "auto", + "small", + "medium", + "large" + ], + "title": "Movement Amplitude", + "type": "string", + "description": "The movement amplitude of objects in the frame", + "default": "auto" + } + }, + "x-fal-order-properties": [ + "prompt", + "style", + "seed", + "aspect_ratio", + "movement_amplitude" + ], + "required": [ + "prompt" + ] + }, + "ViduQ1TextToVideoOutput": { + "title": "Q1TextToVideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://fal.media/files/penguin/senyvDPQAk8Fvt5voX3NU_output.mp4" + } + ], + "title": "Video", + "description": "The generated video using the Q1 model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/vidu/q1/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/q1/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/vidu/q1/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduQ1TextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/q1/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduQ1TextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/magi", + "metadata": { + "display_name": "MAGI-1", + "category": "text-to-video", + "description": "MAGI-1 is a video generation model with exceptional understanding of physical interactions and cinematic prompts", + "status": "active", + "tags": [ + "text-to-video" + ], + "updated_at": "2026-01-26T21:43:52.785Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-1.jpg", + "model_url": "https://fal.run/fal-ai/magi", + "license_type": "commercial", + "date": "2025-04-23T22:38:37.209Z", + "group": { + "key": "magi", + "label": "Text-to-Video" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 9, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/magi", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/magi queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/magi", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/magi", + "documentationUrl": "https://fal.ai/models/fal-ai/magi/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MagiInput": { + "title": "MagiTextToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Close-up shot: the old sea captain stares intently, pipe in mouth, wisps of smoke curling around his weathered face. The camera begins a slow clockwise orbit, pulling back. Finally, the camera rises high above, revealing the entire wooden sailing ship cutting through the waves, the captain unmoved, gazing toward the distant horizon." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit.", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + "default": "auto" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "enum": [ + 4, + 8, + 16, + 32, + 64 + ], + "title": "Num Inference Steps", + "type": "integer", + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 16 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "num_frames": { + "minimum": 96, + "title": "Num Frames", + "type": "integer", + "maximum": 192, + "description": "Number of frames to generate. Must be between 96 and 192 (inclusive). Each additional 24 frames beyond 96 incurs an additional billing unit.", + "default": 96 + } + }, + "x-fal-order-properties": [ + "prompt", + "num_frames", + "seed", + "resolution", + "num_inference_steps", + "enable_safety_checker", + "aspect_ratio" + ], + "required": [ + "prompt" + ] + }, + "MagiOutput": { + "title": "MagiResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/elephant/Foq1oFk7e5_dzujsITYfl_f7c4f24d-a68d-4b8b-8199-320002a99ac8.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/magi/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/magi/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/magi": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MagiInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/magi/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MagiOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/magi-distilled", + "metadata": { + "display_name": "MAGI-1 (Distilled)", + "category": "text-to-video", + "description": "MAGI-1 distilled is a faster video generation model with exceptional understanding of physical interactions and cinematic prompts", + "status": "active", + "tags": [ + "text-to-video" + ], + "updated_at": "2026-01-26T21:43:54.270Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-1.jpg", + "model_url": "https://fal.run/fal-ai/magi-distilled", + "license_type": "commercial", + "date": "2025-04-22T03:34:30.792Z", + "group": { + "key": "magi", + "label": "Text-to-Video (Distilled)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/magi-distilled", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/magi-distilled queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/magi-distilled", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/magi-distilled", + "documentationUrl": "https://fal.ai/models/fal-ai/magi-distilled/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MagiDistilledInput": { + "title": "MagiTextToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Close-up shot: the old sea captain stares intently, pipe in mouth, wisps of smoke curling around his weathered face. The camera begins a slow clockwise orbit, pulling back. Finally, the camera rises high above, revealing the entire wooden sailing ship cutting through the waves, the captain unmoved, gazing toward the distant horizon." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit.", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + "default": "auto" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "enum": [ + 4, + 8, + 16, + 32 + ], + "title": "Num Inference Steps", + "type": "integer", + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 16 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "num_frames": { + "minimum": 96, + "title": "Num Frames", + "type": "integer", + "maximum": 192, + "description": "Number of frames to generate. Must be between 96 and 192 (inclusive). Each additional 24 frames beyond 96 incurs an additional billing unit.", + "default": 96 + } + }, + "x-fal-order-properties": [ + "prompt", + "num_frames", + "seed", + "resolution", + "num_inference_steps", + "enable_safety_checker", + "aspect_ratio" + ], + "required": [ + "prompt" + ] + }, + "MagiDistilledOutput": { + "title": "MagiResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/rabbit/lTH9PY_LQG0FjueBxMfDN_0395dec3-0c4a-4c25-8399-ebb198b73a30.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/magi-distilled/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/magi-distilled/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/magi-distilled": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MagiDistilledInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/magi-distilled/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MagiDistilledOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v4/text-to-video", + "metadata": { + "display_name": "PixVerse v4: Text to Video", + "category": "text-to-video", + "description": "Generate high quality video clips from text and image prompts using PixVerse v4", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:59.003Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "model_url": "https://fal.run/fal-ai/pixverse/v4/text-to-video", + "license_type": "commercial", + "date": "2025-04-01T02:42:23.743Z", + "group": { + "key": "pixverse", + "label": "Text to Video v4" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v4/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v4/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v4/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v4/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v4/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV4TextToVideoInput": { + "title": "TextToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Epic low-cut camera capture of a girl clad in ultraviolet threads, Peter Max art style depiction, luminous diamond skin glistening under a vast moon's radiance, embodied in a superhuman flight among mystical ruins, symbolizing a deity's ritual ascent, hyper-detailed" + ], + "title": "Prompt", + "type": "string" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "4:3", + "1:1", + "3:4", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated video" + }, + "duration": { + "enum": [ + "5", + "8" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds", + "default": "5" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, low resolution, pixelated, noisy, grainy, out of focus, poorly lit, poorly exposed, poorly composed, poorly framed, poorly cropped, poorly color corrected, poorly color graded" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "resolution", + "duration", + "negative_prompt", + "style", + "seed" + ], + "required": [ + "prompt" + ] + }, + "PixverseV4TextToVideoOutput": { + "title": "VideoOutputV4", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 5485412, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://fal.media/files/lion/_fVEU5nzHND_fHGQUhXEm_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v4/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV4TextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV4TextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v4/text-to-video/fast", + "metadata": { + "display_name": "PixVerse v4: Text to Video Fast", + "category": "text-to-video", + "description": "Generate high quality and fast video clips from text and image prompts using PixVerse v4 fast", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:59.251Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "model_url": "https://fal.run/fal-ai/pixverse/v4/text-to-video/fast", + "license_type": "commercial", + "date": "2025-04-01T02:33:43.392Z", + "group": { + "key": "pixverse", + "label": "Text to Video v4 (Fast)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v4/text-to-video/fast", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v4/text-to-video/fast queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v4/text-to-video/fast", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v4/text-to-video/fast", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v4/text-to-video/fast/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV4TextToVideoFastInput": { + "title": "FastTextToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Epic low-cut camera capture of a girl clad in ultraviolet threads, Peter Max art style depiction, luminous diamond skin glistening under a vast moon's radiance, embodied in a superhuman flight among mystical ruins, symbolizing a deity's ritual ascent, hyper-detailed" + ], + "title": "Prompt", + "type": "string" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "4:3", + "1:1", + "3:4", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated video" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, low resolution, pixelated, noisy, grainy, out of focus, poorly lit, poorly exposed, poorly composed, poorly framed, poorly cropped, poorly color corrected, poorly color graded" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "resolution", + "negative_prompt", + "style", + "seed" + ], + "required": [ + "prompt" + ] + }, + "PixverseV4TextToVideoFastOutput": { + "title": "VideoOutputV4", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 5485412, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://fal.media/files/lion/_fVEU5nzHND_fHGQUhXEm_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v4/text-to-video/fast/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4/text-to-video/fast/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4/text-to-video/fast": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV4TextToVideoFastInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v4/text-to-video/fast/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV4TextToVideoFastOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/lipsync/audio-to-video", + "metadata": { + "display_name": "Kling LipSync Audio-to-Video", + "category": "text-to-video", + "description": "Kling LipSync is an audio-to-video model that generates realistic lip movements from audio input.", + "status": "active", + "tags": [ + "audio to video", + "lipsync" + ], + "updated_at": "2026-01-26T21:43:59.880Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/elephant/njNipNC0TkA9fJguiS1NB_c75b090e2ebb4d9581d21d66cfc4a0d3.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/lipsync/audio-to-video", + "license_type": "commercial", + "date": "2025-03-27T00:00:00.000Z", + "group": { + "key": "kling-video-lipsync", + "label": "LipSync (Audio to Video)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 12, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/lipsync/audio-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/lipsync/audio-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/lipsync/audio-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/elephant/njNipNC0TkA9fJguiS1NB_c75b090e2ebb4d9581d21d66cfc4a0d3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/lipsync/audio-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/lipsync/audio-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoLipsyncAudioToVideoInput": { + "title": "LipsyncA2VRequest", + "type": "object", + "properties": { + "video_url": { + "examples": [ + "https://fal.media/files/koala/8teUPbRRMtAUTORDvqy0l.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "The URL of the video to generate the lip sync for. Supports .mp4/.mov, ≤100MB, 2–10s, 720p/1080p only, width/height 720–1920px." + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/kling/kling-audio.mp3" + ], + "title": "Audio Url", + "type": "string", + "description": "The URL of the audio to generate the lip sync for. Minimum duration is 2s and maximum duration is 60s. Maximum file size is 5MB." + } + }, + "x-fal-order-properties": [ + "video_url", + "audio_url" + ], + "required": [ + "video_url", + "audio_url" + ] + }, + "KlingVideoLipsyncAudioToVideoOutput": { + "title": "LipsyncA2VOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/kling/kling_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/lipsync/audio-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/lipsync/audio-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/lipsync/audio-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoLipsyncAudioToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/lipsync/audio-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoLipsyncAudioToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/lipsync/text-to-video", + "metadata": { + "display_name": "Kling LipSync Text-to-Video", + "category": "text-to-video", + "description": "Kling LipSync is a text-to-video model that generates realistic lip movements from text input.", + "status": "active", + "tags": [ + "text to video", + "lipsync" + ], + "updated_at": "2026-01-26T21:44:00.006Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/monkey/GoSnDOnX0Tea08N7iI7oM.jpeg", + "model_url": "https://fal.run/fal-ai/kling-video/lipsync/text-to-video", + "license_type": "commercial", + "date": "2025-03-27T00:00:00.000Z", + "group": { + "key": "kling-video-lipsync", + "label": "LipSync (Text to Video)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 12, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/lipsync/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/lipsync/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/lipsync/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://fal.media/files/monkey/GoSnDOnX0Tea08N7iI7oM.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/lipsync/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/lipsync/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoLipsyncTextToVideoInput": { + "title": "LipsyncT2VRequest", + "type": "object", + "properties": { + "text": { + "examples": [ + "Mental health is as important as physical health, shaping our emotions, thoughts, and daily interactions." + ], + "title": "Text", + "type": "string", + "maxLength": 120, + "description": "Text content for lip-sync video generation. Max 120 characters." + }, + "video_url": { + "examples": [ + "https://fal.media/files/koala/8teUPbRRMtAUTORDvqy0l.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "The URL of the video to generate the lip sync for. Supports .mp4/.mov, ≤100MB, 2-60s, 720p/1080p only, width/height 720–1920px. If validation fails, an error is returned." + }, + "voice_id": { + "enum": [ + "genshin_vindi2", + "zhinen_xuesheng", + "AOT", + "ai_shatang", + "genshin_klee2", + "genshin_kirara", + "ai_kaiya", + "oversea_male1", + "ai_chenjiahao_712", + "girlfriend_4_speech02", + "chat1_female_new-3", + "chat_0407_5-1", + "cartoon-boy-07", + "uk_boy1", + "cartoon-girl-01", + "PeppaPig_platform", + "ai_huangzhong_712", + "ai_huangyaoshi_712", + "ai_laoguowang_712", + "chengshu_jiejie", + "you_pingjing", + "calm_story1", + "uk_man2", + "laopopo_speech02", + "heainainai_speech02", + "reader_en_m-v1", + "commercial_lady_en_f-v1", + "tiyuxi_xuedi", + "tiexin_nanyou", + "girlfriend_1_speech02", + "girlfriend_2_speech02", + "zhuxi_speech02", + "uk_oldman3", + "dongbeilaotie_speech02", + "chongqingxiaohuo_speech02", + "chuanmeizi_speech02", + "chaoshandashu_speech02", + "ai_taiwan_man2_speech02", + "xianzhanggui_speech02", + "tianjinjiejie_speech02", + "diyinnansang_DB_CN_M_04-v2", + "yizhipiannan-v1", + "guanxiaofang-v2", + "tianmeixuemei-v1", + "daopianyansang-v1", + "mengwa-v1" + ], + "title": "Voice Id", + "type": "string", + "examples": [ + "genshin_klee2" + ], + "description": "Voice ID to use for speech synthesis" + }, + "voice_language": { + "enum": [ + "zh", + "en" + ], + "title": "Voice Language", + "type": "string", + "description": "The voice language corresponding to the Voice ID", + "default": "en" + }, + "voice_speed": { + "minimum": 0.8, + "title": "Voice Speed", + "type": "number", + "maximum": 2, + "description": "Speech rate for Text to Video generation", + "default": 1 + } + }, + "x-fal-order-properties": [ + "video_url", + "text", + "voice_id", + "voice_language", + "voice_speed" + ], + "required": [ + "video_url", + "text", + "voice_id" + ] + }, + "KlingVideoLipsyncTextToVideoOutput": { + "title": "LipsyncOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/kling/kling_text_lipsync.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/lipsync/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/lipsync/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/lipsync/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoLipsyncTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/lipsync/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoLipsyncTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-t2v-lora", + "metadata": { + "display_name": "Wan-2.1 Text-to-Video with LoRAs", + "category": "text-to-video", + "description": "Add custom LoRAs to Wan-2.1 is a text-to-video model that generates high-quality videos with high visual quality and motion diversity from images", + "status": "active", + "tags": [ + "\"text to video\"", + "\"motion\"", + "\"lora\"" + ], + "updated_at": "2026-01-26T21:44:00.319Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/wan-i2v-lora.webp", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/wan-i2v-lora-animated.webp", + "model_url": "https://fal.run/fal-ai/wan-t2v-lora", + "license_type": "commercial", + "date": "2025-03-25T06:02:53.703Z", + "highlighted": false, + "kind": "inference", + "duration_estimate": 5, + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/wan-trainer/t2v-14b" + ], + "inference_endpoint_ids": [ + "fal-ai/wan-trainer/t2v-14b" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-t2v-lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-t2v-lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-t2v-lora", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/wan-i2v-lora.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-t2v-lora", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-t2v-lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanT2vLoraInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_frames", + "frames_per_second", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "enable_safety_checker", + "enable_prompt_expansion", + "turbo_mode", + "loras", + "reverse_video" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage. She wears a black leather jacket, a long red dress, and black boots, and carries a black purse." + ], + "description": "The text prompt to guide video generation.", + "type": "string", + "title": "Prompt" + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video (480p,580p, or 720p).", + "default": "480p" + }, + "reverse_video": { + "title": "Reverse Video", + "type": "boolean", + "description": "If true, the video will be reversed.", + "default": false + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + }, + "aspect_ratio": { + "enum": [ + "9:16", + "16:9" + ], + "description": "Aspect ratio of the generated video (16:9 or 9:16).", + "type": "string", + "title": "Aspect Ratio", + "default": "16:9" + }, + "loras": { + "description": "LoRA weights to be used in the inference.", + "type": "array", + "title": "Loras", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "default": [] + }, + "frames_per_second": { + "minimum": 5, + "description": "Frames per second of the generated video. Must be between 5 to 24.", + "type": "integer", + "maximum": 24, + "title": "Frames Per Second", + "default": 16 + }, + "turbo_mode": { + "description": "If true, the video will be generated faster with no noticeable degradation in the visual quality.", + "type": "boolean", + "title": "Turbo Mode", + "default": true + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "num_inference_steps": { + "minimum": 2, + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "type": "integer", + "maximum": 40, + "title": "Num Inference Steps", + "default": 30 + }, + "num_frames": { + "minimum": 81, + "description": "Number of frames to generate. Must be between 81 to 100 (inclusive).", + "type": "integer", + "maximum": 100, + "title": "Num Frames", + "default": 81 + }, + "negative_prompt": { + "examples": [ + "bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + }, + "enable_prompt_expansion": { + "examples": [ + true + ], + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + } + }, + "title": "WanLoRARequest", + "required": [ + "prompt" + ] + }, + "WanT2vLoraOutput": { + "x-fal-order-properties": [ + "video", + "seed" + ], + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/web-examples/wan/t2v.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "WanT2VResponse", + "required": [ + "video", + "seed" + ] + }, + "LoraWeight": { + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "type": "object", + "properties": { + "path": { + "description": "URL or the path to the LoRA weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "maximum": 4, + "default": 1 + }, + "weight_name": { + "description": "Name of the LoRA weight. Used only if `path` is a Hugging Face repository, and required only if you have more than 1 safetensors file in the repo.", + "type": "string", + "title": "Weight Name" + } + }, + "title": "LoraWeight", + "required": [ + "path" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-t2v-lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-t2v-lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-t2v-lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanT2vLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-t2v-lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanT2vLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/luma-dream-machine/ray-2-flash", + "metadata": { + "display_name": "Luma Ray 2 Flash", + "category": "text-to-video", + "description": "Ray2 Flash is a fast video generative model capable of creating realistic visuals with natural, coherent motion.", + "status": "active", + "tags": [ + "motion", + "transformation" + ], + "updated_at": "2026-01-26T21:44:19.489Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/luma-dream-machine-ray-2.webp", + "model_url": "https://fal.run/fal-ai/luma-dream-machine/ray-2-flash", + "date": "2025-03-17T00:00:00.000Z", + "group": { + "key": "luma-dream-machine", + "label": "Text to Video (Ray 2 Flash)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/luma-dream-machine/ray-2-flash", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/luma-dream-machine/ray-2-flash queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/luma-dream-machine/ray-2-flash", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/luma-dream-machine-ray-2.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/luma-dream-machine/ray-2-flash", + "documentationUrl": "https://fal.ai/models/fal-ai/luma-dream-machine/ray-2-flash/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LumaDreamMachineRay2FlashInput": { + "title": "Ray2TextToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A herd of wild horses galloping across a dusty desert plain under a blazing midday sun, their manes flying in the wind; filmed in a wide tracking shot with dynamic motion, warm natural lighting, and an epic." + ], + "title": "Prompt", + "type": "string", + "maxLength": 5000, + "minLength": 3 + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "4:3", + "3:4", + "21:9", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "resolution": { + "enum": [ + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video (720p costs 2x more, 1080p costs 4x more)", + "default": "540p" + }, + "loop": { + "title": "Loop", + "type": "boolean", + "description": "Whether the video should loop (end of video is blended with the beginning)", + "default": false + }, + "duration": { + "enum": [ + "5s", + "9s" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video (9s costs 2x more)", + "default": "5s" + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "loop", + "resolution", + "duration" + ], + "required": [ + "prompt" + ] + }, + "LumaDreamMachineRay2FlashOutput": { + "title": "Ray2T2VOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/penguin/Om3xjcOwiSCJwrXs7DUi__output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/luma-dream-machine/ray-2-flash/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2-flash/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2-flash": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaDreamMachineRay2FlashInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2-flash/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaDreamMachineRay2FlashOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pika/v2/turbo/text-to-video", + "metadata": { + "display_name": "Pika Text to Video Turbo (v2)", + "category": "text-to-video", + "description": "Pika v2 Turbo creates videos from a text prompt with high quality output.", + "status": "active", + "tags": [ + "editing", + "effects", + "animation" + ], + "updated_at": "2026-01-26T21:44:02.021Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/D6kbQkNiBrPL9m05gdWnE_48bca5e513bd42a6b777dfb9b08e0ca9.jpg", + "model_url": "https://fal.run/fal-ai/pika/v2/turbo/text-to-video", + "license_type": "commercial", + "date": "2025-03-14T00:00:00.000Z", + "group": { + "key": "pika", + "label": "Text to Video Turbo (v2)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pika/v2/turbo/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pika/v2/turbo/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pika/v2/turbo/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/D6kbQkNiBrPL9m05gdWnE_48bca5e513bd42a6b777dfb9b08e0ca9.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/pika/v2/turbo/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/pika/v2/turbo/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PikaV2TurboTextToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "seed", + "negative_prompt", + "aspect_ratio", + "resolution", + "duration" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A luxurious tea scene where a luxury brand teapot pours tea into an oversized luxury cup. Steam rises, creating an ethereal moment. camera dolly in" + ], + "title": "Prompt", + "type": "string" + }, + "resolution": { + "enum": [ + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1", + "4:5", + "5:4", + "3:2", + "2:3" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "duration": { + "title": "Duration", + "type": "integer", + "description": "The duration of the generated video in seconds", + "default": 5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "A negative prompt to guide the model", + "default": "" + } + }, + "title": "TextToVideoTurboInput", + "description": "Base request for text-to-video generation", + "required": [ + "prompt" + ] + }, + "PikaV2TurboTextToVideoOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/pika/pika_t2v_v2_turbo_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "TurboTextToVideoOutput", + "description": "Output from text-to-video generation", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pika/v2/turbo/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pika/v2/turbo/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pika/v2/turbo/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PikaV2TurboTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pika/v2/turbo/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PikaV2TurboTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pika/v2.1/text-to-video", + "metadata": { + "display_name": "Pika Text to Video (v2.1)", + "category": "text-to-video", + "description": "Start with a simple text input to create dynamic generations that defy expectations. Anything you dream can come to life with sharp details, impressive character control and cinematic camera moves.", + "status": "active", + "tags": [ + "editing", + "effects", + "animation" + ], + "updated_at": "2026-01-26T21:44:01.398Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/CUxIh-EAd_N4npYGWlEqA_d08d3d9739e947e9814d7d2f2a1c998d.jpg", + "model_url": "https://fal.run/fal-ai/pika/v2.1/text-to-video", + "license_type": "commercial", + "date": "2025-03-14T00:00:00.000Z", + "group": { + "key": "pika", + "label": "Text to Video (v2.1)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pika/v2.1/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pika/v2.1/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pika/v2.1/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/CUxIh-EAd_N4npYGWlEqA_d08d3d9739e947e9814d7d2f2a1c998d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/pika/v2.1/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/pika/v2.1/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PikaV21TextToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "seed", + "negative_prompt", + "aspect_ratio", + "resolution", + "duration" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman styled in a high-fashion brand editorial. the woman stands confidently in a whimsical outdoor setting against a soft, cloudy sky. She wears a bright yellow luxury brand monogram jacket over a crisp striped shirt, paired with flowing pink trousers, accessorized with oversized sunglasses, a golden chain necklace, and a bold luxury brand belt. Delicate flowers in the foreground add a dreamy and artistic touch, evoking a retro yet luxurious high fashion campaign aesthetic. the camera crane up from the flowers to the woman" + ], + "title": "Prompt", + "type": "string" + }, + "resolution": { + "enum": [ + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1", + "4:5", + "5:4", + "3:2", + "2:3" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "duration": { + "title": "Duration", + "type": "integer", + "description": "The duration of the generated video in seconds", + "default": 5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "A negative prompt to guide the model", + "default": "" + } + }, + "title": "TextToVideov21Input", + "description": "Base request for text-to-video generation", + "required": [ + "prompt" + ] + }, + "PikaV21TextToVideoOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/pika/pika_t2v_v21_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "TextToVideoV21Output", + "description": "Output from text-to-video generation", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pika/v2.1/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pika/v2.1/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pika/v2.1/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PikaV21TextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pika/v2.1/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PikaV21TextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pika/v2.2/text-to-video", + "metadata": { + "display_name": "Pika Text to Video (v2.2)", + "category": "text-to-video", + "description": "Start with a simple text input to create dynamic generations that defy expectations in up to 1080p. Experience better image clarity and crisper, sharper visuals.", + "status": "active", + "tags": [ + "editing", + "effects", + "animation" + ], + "updated_at": "2026-01-26T21:44:01.772Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/panda/d7bGY17P07W2dKiNoWXfQ_fb8e23d259a44c5a893f04ae7a710b95.jpg", + "model_url": "https://fal.run/fal-ai/pika/v2.2/text-to-video", + "license_type": "commercial", + "date": "2025-03-14T00:00:00.000Z", + "group": { + "key": "pika", + "label": "Text to Video (v2.2)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pika/v2.2/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pika/v2.2/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pika/v2.2/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/panda/d7bGY17P07W2dKiNoWXfQ_fb8e23d259a44c5a893f04ae7a710b95.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/pika/v2.2/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/pika/v2.2/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PikaV22TextToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "seed", + "negative_prompt", + "aspect_ratio", + "resolution", + "duration" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Large elegant white poodle standing proudly on the deck of a white yacht, wearing oversized glamorous sunglasses and a luxurious silk Gucci-style scarf tied around its neck, layered pearl necklaces draped across its chest, photographed from outside the yacht at a low upward angle, clear blue sky background, strong midday sunlight, washed-out faded tones, slightly overexposed 2000s fashion editorial aesthetic, cinematic analog film texture, playful luxury mood, glossy magazine style, bright harsh light and soft shadows, stylish and extravagant atmosphere. camera slow orbit and dolly in" + ], + "title": "Prompt", + "type": "string" + }, + "resolution": { + "examples": [ + "1080p", + "720p" + ], + "title": "Resolution", + "type": "string", + "enum": [ + "1080p", + "720p" + ], + "description": "The resolution of the generated video", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1", + "4:5", + "5:4", + "3:2", + "2:3" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "duration": { + "enum": [ + 5, + 10 + ], + "title": "Duration", + "type": "integer", + "description": "The duration of the generated video in seconds", + "default": 5 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "A negative prompt to guide the model", + "default": "ugly, bad, terrible" + } + }, + "title": "Pika22TextToVideoRequest", + "description": "Request model for Pika 2.2 text-to-video generation", + "required": [ + "prompt" + ] + }, + "PikaV22TextToVideoOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/pika/pika_t2v_v22_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "Pika22TextToVideoOutput", + "description": "Output model for Pika 2.2 text-to-video generation", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pika/v2.2/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pika/v2.2/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pika/v2.2/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PikaV22TextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pika/v2.2/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PikaV22TextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-pro/text-to-video", + "metadata": { + "display_name": "Wan-2.1 Pro Text-to-Video", + "category": "text-to-video", + "description": "Wan-2.1 Pro is a premium text-to-video model that generates high-quality 1080p videos at 30fps with up to 6 seconds duration, delivering exceptional visual quality and motion diversity from text prompts", + "status": "active", + "tags": [ + "text to video", + "motion" + ], + "updated_at": "2026-01-26T21:44:02.275Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/Fal_Visuals_V1_02.jpg", + "model_url": "https://fal.run/fal-ai/wan-pro/text-to-video", + "license_type": "commercial", + "date": "2025-03-11T00:00:00.000Z", + "group": { + "key": "wan-2.1", + "label": "Text-to-Video (Pro)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 5, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-pro/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-pro/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-pro/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/Fal_Visuals_V1_02.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-pro/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-pro/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanProTextToVideoInput": { + "x-fal-order-properties": [ + "prompt", + "seed", + "enable_safety_checker" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A lone astronaut in a detailed NASA spacesuit performs an exuberant dance on the lunar surface, arms outstretched in joyful abandon against the stark moonscape. The Earth hangs dramatically in the black sky, appearing to streak past due to the motion of the dance, creating a sense of dynamic movement. The scene captures extreme contrasts between the brilliant white of the spacesuit reflecting harsh sunlight and the deep shadows of the lunar craters. Every detail is rendered with photorealistic precision: the texture of the regolith disturbed by the astronaut's boots, the reflections on the helmet visor." + ], + "description": "The prompt to generate the video", + "type": "string", + "title": "Prompt" + }, + "enable_safety_checker": { + "description": "Whether to enable the safety checker", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "title": "Seed" + } + }, + "title": "WanProT2VRequest", + "required": [ + "prompt" + ] + }, + "WanProTextToVideoOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://fal.media/files/panda/YxRLson-aETxeBK1DI4VW.mp4" + } + ], + "description": "The generated video", + "$ref": "#/components/schemas/File" + } + }, + "title": "WanProT2VResponse", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-pro/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-pro/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-pro/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanProTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-pro/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanProTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v1.5/pro/effects", + "metadata": { + "display_name": "Kling 1.5", + "category": "text-to-video", + "description": "Generate video clips from your prompts using Kling 1.5 (pro)", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:03.488Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/elephant/28-vTrv3W2BT-u8_cy7mt.png", + "model_url": "https://fal.run/fal-ai/kling-video/v1.5/pro/effects", + "license_type": "commercial", + "date": "2025-03-06T00:00:00.000Z", + "group": { + "key": "kling-video-effects", + "label": "v1.5 Effects (pro)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 6, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v1.5/pro/effects", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v1.5/pro/effects queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v1.5/pro/effects", + "category": "text-to-video", + "thumbnailUrl": "https://fal.media/files/elephant/28-vTrv3W2BT-u8_cy7mt.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v1.5/pro/effects", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v1.5/pro/effects/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV15ProEffectsInput": { + "title": "VideoEffectsRequest", + "type": "object", + "properties": { + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "input_image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/juggernaut_examples/VHXMavzPyI27zi6JseyL4.png", + "https://storage.googleapis.com/falserverless/juggernaut_examples/QEW5VrzccxGva7mPfEXjf.png" + ] + ], + "title": "Input Image Urls", + "type": "array", + "description": "URL of images to be used for hug, kiss or heart_gesture video.", + "items": { + "type": "string" + } + }, + "effect_scene": { + "enum": [ + "hug", + "kiss", + "heart_gesture", + "squish", + "expansion", + "fuzzyfuzzy", + "bloombloom", + "dizzydizzy", + "jelly_press", + "jelly_slice", + "jelly_squish", + "jelly_jiggle", + "pixelpixel", + "yearbook", + "instant_film", + "anime_figure", + "rocketrocket", + "fly_fly", + "disappear", + "lightning_power", + "bullet_time", + "bullet_time_360", + "media_interview", + "day_to_night", + "let's_ride", + "jumpdrop", + "swish_swish", + "running_man", + "jazz_jazz", + "swing_swing", + "skateskate", + "building_sweater", + "pure_white_wings", + "black_wings", + "golden_wing", + "pink_pink_wings", + "rampage_ape", + "a_list_look", + "countdown_teleport", + "firework_2026", + "instant_christmas", + "birthday_star", + "firework", + "celebration", + "tiger_hug_pro", + "pet_lion_pro", + "guardian_spirit", + "squeeze_scream", + "inner_voice", + "memory_alive", + "guess_what", + "eagle_snatch", + "hug_from_past", + "instant_kid", + "dollar_rain", + "cry_cry", + "building_collapse", + "mushroom", + "jesus_hug", + "shark_alert", + "lie_flat", + "polar_bear_hug", + "brown_bear_hug", + "office_escape_plow", + "watermelon_bomb", + "boss_coming", + "wig_out", + "car_explosion", + "tiger_hug", + "siblings", + "construction_worker", + "snatched", + "felt_felt", + "plushcut", + "drunk_dance", + "drunk_dance_pet", + "daoma_dance", + "bouncy_dance", + "smooth_sailing_dance", + "new_year_greeting", + "lion_dance", + "prosperity", + "great_success", + "golden_horse_fortune", + "red_packet_box", + "lucky_horse_year", + "lucky_red_packet", + "lucky_money_come", + "lion_dance_pet", + "dumpling_making_pet", + "fish_making_pet", + "pet_red_packet", + "lantern_glow", + "expression_challenge", + "overdrive", + "heart_gesture_dance", + "poping", + "martial_arts", + "running", + "nezha", + "motorcycle_dance", + "subject_3_dance", + "ghost_step_dance", + "phantom_jewel", + "zoom_out", + "cheers_2026", + "kiss_pro", + "fight_pro", + "hug_pro", + "heart_gesture_pro", + "dollar_rain_pro", + "pet_bee_pro", + "santa_random_surprise", + "magic_match_tree", + "happy_birthday", + "thumbs_up_pro", + "surprise_bouquet", + "bouquet_drop", + "3d_cartoon_1_pro", + "glamour_photo_shoot", + "box_of_joy", + "first_toast_of_the_year", + "my_santa_pic", + "santa_gift", + "steampunk_christmas", + "snowglobe", + "christmas_photo_shoot", + "ornament_crash", + "santa_express", + "particle_santa_surround", + "coronation_of_frost", + "spark_in_the_snow", + "scarlet_and_snow", + "cozy_toon_wrap", + "bullet_time_lite", + "magic_cloak", + "balloon_parade", + "jumping_ginger_joy", + "c4d_cartoon_pro", + "venomous_spider", + "throne_of_king", + "luminous_elf", + "woodland_elf", + "japanese_anime_1", + "american_comics", + "snowboarding", + "witch_transform", + "vampire_transform", + "pumpkin_head_transform", + "demon_transform", + "mummy_transform", + "zombie_transform", + "cute_pumpkin_transform", + "cute_ghost_transform", + "knock_knock_halloween", + "halloween_escape", + "baseball", + "trampoline", + "trampoline_night", + "pucker_up", + "feed_mooncake", + "flyer", + "dishwasher", + "pet_chinese_opera", + "magic_fireball", + "gallery_ring", + "pet_moto_rider", + "muscle_pet", + "pet_delivery", + "mythic_style", + "steampunk", + "3d_cartoon_2", + "pet_chef", + "santa_gifts", + "santa_hug", + "girlfriend", + "boyfriend", + "heart_gesture_1", + "pet_wizard", + "smoke_smoke", + "gun_shot", + "double_gun", + "pet_warrior", + "long_hair", + "pet_dance", + "wool_curly", + "pet_bee", + "marry_me", + "piggy_morph", + "ski_ski", + "magic_broom", + "splashsplash", + "surfsurf", + "fairy_wing", + "angel_wing", + "dark_wing", + "emoji" + ], + "title": "Effect Scene", + "type": "string", + "examples": [ + "hug" + ], + "description": "The effect scene to use for the video generation" + } + }, + "x-fal-order-properties": [ + "input_image_urls", + "effect_scene", + "duration" + ], + "required": [ + "effect_scene" + ] + }, + "KlingVideoV15ProEffectsOutput": { + "title": "VideoEffectsOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/kling/kling_ex.mp4.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v1.5/pro/effects/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.5/pro/effects/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.5/pro/effects": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV15ProEffectsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.5/pro/effects/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV15ProEffectsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v1.6/pro/effects", + "metadata": { + "display_name": "Kling 1.6", + "category": "text-to-video", + "description": "Generate video clips from your prompts using Kling 1.6 (pro)", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:03.060Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/elephant/28-vTrv3W2BT-u8_cy7mt.png", + "model_url": "https://fal.run/fal-ai/kling-video/v1.6/pro/effects", + "license_type": "commercial", + "date": "2025-03-06T00:00:00.000Z", + "group": { + "key": "kling-video-effects", + "label": "v1.6 Effects (pro)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 4, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v1.6/pro/effects", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v1.6/pro/effects queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v1.6/pro/effects", + "category": "text-to-video", + "thumbnailUrl": "https://fal.media/files/elephant/28-vTrv3W2BT-u8_cy7mt.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v1.6/pro/effects", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v1.6/pro/effects/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV16ProEffectsInput": { + "title": "VideoEffectsRequest", + "type": "object", + "properties": { + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "input_image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/juggernaut_examples/VHXMavzPyI27zi6JseyL4.png", + "https://storage.googleapis.com/falserverless/juggernaut_examples/QEW5VrzccxGva7mPfEXjf.png" + ] + ], + "title": "Input Image Urls", + "type": "array", + "description": "URL of images to be used for hug, kiss or heart_gesture video.", + "items": { + "type": "string" + } + }, + "effect_scene": { + "enum": [ + "hug", + "kiss", + "heart_gesture", + "squish", + "expansion", + "fuzzyfuzzy", + "bloombloom", + "dizzydizzy", + "jelly_press", + "jelly_slice", + "jelly_squish", + "jelly_jiggle", + "pixelpixel", + "yearbook", + "instant_film", + "anime_figure", + "rocketrocket", + "fly_fly", + "disappear", + "lightning_power", + "bullet_time", + "bullet_time_360", + "media_interview", + "day_to_night", + "let's_ride", + "jumpdrop", + "swish_swish", + "running_man", + "jazz_jazz", + "swing_swing", + "skateskate", + "building_sweater", + "pure_white_wings", + "black_wings", + "golden_wing", + "pink_pink_wings", + "rampage_ape", + "a_list_look", + "countdown_teleport", + "firework_2026", + "instant_christmas", + "birthday_star", + "firework", + "celebration", + "tiger_hug_pro", + "pet_lion_pro", + "guardian_spirit", + "squeeze_scream", + "inner_voice", + "memory_alive", + "guess_what", + "eagle_snatch", + "hug_from_past", + "instant_kid", + "dollar_rain", + "cry_cry", + "building_collapse", + "mushroom", + "jesus_hug", + "shark_alert", + "lie_flat", + "polar_bear_hug", + "brown_bear_hug", + "office_escape_plow", + "watermelon_bomb", + "boss_coming", + "wig_out", + "car_explosion", + "tiger_hug", + "siblings", + "construction_worker", + "snatched", + "felt_felt", + "plushcut", + "drunk_dance", + "drunk_dance_pet", + "daoma_dance", + "bouncy_dance", + "smooth_sailing_dance", + "new_year_greeting", + "lion_dance", + "prosperity", + "great_success", + "golden_horse_fortune", + "red_packet_box", + "lucky_horse_year", + "lucky_red_packet", + "lucky_money_come", + "lion_dance_pet", + "dumpling_making_pet", + "fish_making_pet", + "pet_red_packet", + "lantern_glow", + "expression_challenge", + "overdrive", + "heart_gesture_dance", + "poping", + "martial_arts", + "running", + "nezha", + "motorcycle_dance", + "subject_3_dance", + "ghost_step_dance", + "phantom_jewel", + "zoom_out", + "cheers_2026", + "kiss_pro", + "fight_pro", + "hug_pro", + "heart_gesture_pro", + "dollar_rain_pro", + "pet_bee_pro", + "santa_random_surprise", + "magic_match_tree", + "happy_birthday", + "thumbs_up_pro", + "surprise_bouquet", + "bouquet_drop", + "3d_cartoon_1_pro", + "glamour_photo_shoot", + "box_of_joy", + "first_toast_of_the_year", + "my_santa_pic", + "santa_gift", + "steampunk_christmas", + "snowglobe", + "christmas_photo_shoot", + "ornament_crash", + "santa_express", + "particle_santa_surround", + "coronation_of_frost", + "spark_in_the_snow", + "scarlet_and_snow", + "cozy_toon_wrap", + "bullet_time_lite", + "magic_cloak", + "balloon_parade", + "jumping_ginger_joy", + "c4d_cartoon_pro", + "venomous_spider", + "throne_of_king", + "luminous_elf", + "woodland_elf", + "japanese_anime_1", + "american_comics", + "snowboarding", + "witch_transform", + "vampire_transform", + "pumpkin_head_transform", + "demon_transform", + "mummy_transform", + "zombie_transform", + "cute_pumpkin_transform", + "cute_ghost_transform", + "knock_knock_halloween", + "halloween_escape", + "baseball", + "trampoline", + "trampoline_night", + "pucker_up", + "feed_mooncake", + "flyer", + "dishwasher", + "pet_chinese_opera", + "magic_fireball", + "gallery_ring", + "pet_moto_rider", + "muscle_pet", + "pet_delivery", + "mythic_style", + "steampunk", + "3d_cartoon_2", + "pet_chef", + "santa_gifts", + "santa_hug", + "girlfriend", + "boyfriend", + "heart_gesture_1", + "pet_wizard", + "smoke_smoke", + "gun_shot", + "double_gun", + "pet_warrior", + "long_hair", + "pet_dance", + "wool_curly", + "pet_bee", + "marry_me", + "piggy_morph", + "ski_ski", + "magic_broom", + "splashsplash", + "surfsurf", + "fairy_wing", + "angel_wing", + "dark_wing", + "emoji" + ], + "title": "Effect Scene", + "type": "string", + "examples": [ + "hug" + ], + "description": "The effect scene to use for the video generation" + } + }, + "x-fal-order-properties": [ + "input_image_urls", + "effect_scene", + "duration" + ], + "required": [ + "effect_scene" + ] + }, + "KlingVideoV16ProEffectsOutput": { + "title": "VideoEffectsOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/kling/kling_ex.mp4.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v1.6/pro/effects/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/pro/effects/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/pro/effects": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV16ProEffectsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/pro/effects/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV16ProEffectsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v1/standard/effects", + "metadata": { + "display_name": "Kling 1.0", + "category": "text-to-video", + "description": "Generate video clips from your prompts using Kling 1.0", + "status": "active", + "tags": [ + "motion" + ], + "updated_at": "2026-01-26T21:44:03.360Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/monkey/GoSnDOnX0Tea08N7iI7oM.jpeg", + "model_url": "https://fal.run/fal-ai/kling-video/v1/standard/effects", + "license_type": "commercial", + "date": "2025-03-06T00:00:00.000Z", + "group": { + "key": "kling-video-effects", + "label": "Effects (standard)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 6, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v1/standard/effects", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v1/standard/effects queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v1/standard/effects", + "category": "text-to-video", + "thumbnailUrl": "https://fal.media/files/monkey/GoSnDOnX0Tea08N7iI7oM.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v1/standard/effects", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v1/standard/effects/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV1StandardEffectsInput": { + "title": "VideoEffectsRequest", + "type": "object", + "properties": { + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "input_image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/juggernaut_examples/VHXMavzPyI27zi6JseyL4.png", + "https://storage.googleapis.com/falserverless/juggernaut_examples/QEW5VrzccxGva7mPfEXjf.png" + ] + ], + "title": "Input Image Urls", + "type": "array", + "description": "URL of images to be used for hug, kiss or heart_gesture video.", + "items": { + "type": "string" + } + }, + "effect_scene": { + "enum": [ + "hug", + "kiss", + "heart_gesture", + "squish", + "expansion", + "fuzzyfuzzy", + "bloombloom", + "dizzydizzy", + "jelly_press", + "jelly_slice", + "jelly_squish", + "jelly_jiggle", + "pixelpixel", + "yearbook", + "instant_film", + "anime_figure", + "rocketrocket", + "fly_fly", + "disappear", + "lightning_power", + "bullet_time", + "bullet_time_360", + "media_interview", + "day_to_night", + "let's_ride", + "jumpdrop", + "swish_swish", + "running_man", + "jazz_jazz", + "swing_swing", + "skateskate", + "building_sweater", + "pure_white_wings", + "black_wings", + "golden_wing", + "pink_pink_wings", + "rampage_ape", + "a_list_look", + "countdown_teleport", + "firework_2026", + "instant_christmas", + "birthday_star", + "firework", + "celebration", + "tiger_hug_pro", + "pet_lion_pro", + "guardian_spirit", + "squeeze_scream", + "inner_voice", + "memory_alive", + "guess_what", + "eagle_snatch", + "hug_from_past", + "instant_kid", + "dollar_rain", + "cry_cry", + "building_collapse", + "mushroom", + "jesus_hug", + "shark_alert", + "lie_flat", + "polar_bear_hug", + "brown_bear_hug", + "office_escape_plow", + "watermelon_bomb", + "boss_coming", + "wig_out", + "car_explosion", + "tiger_hug", + "siblings", + "construction_worker", + "snatched", + "felt_felt", + "plushcut", + "drunk_dance", + "drunk_dance_pet", + "daoma_dance", + "bouncy_dance", + "smooth_sailing_dance", + "new_year_greeting", + "lion_dance", + "prosperity", + "great_success", + "golden_horse_fortune", + "red_packet_box", + "lucky_horse_year", + "lucky_red_packet", + "lucky_money_come", + "lion_dance_pet", + "dumpling_making_pet", + "fish_making_pet", + "pet_red_packet", + "lantern_glow", + "expression_challenge", + "overdrive", + "heart_gesture_dance", + "poping", + "martial_arts", + "running", + "nezha", + "motorcycle_dance", + "subject_3_dance", + "ghost_step_dance", + "phantom_jewel", + "zoom_out", + "cheers_2026", + "kiss_pro", + "fight_pro", + "hug_pro", + "heart_gesture_pro", + "dollar_rain_pro", + "pet_bee_pro", + "santa_random_surprise", + "magic_match_tree", + "happy_birthday", + "thumbs_up_pro", + "surprise_bouquet", + "bouquet_drop", + "3d_cartoon_1_pro", + "glamour_photo_shoot", + "box_of_joy", + "first_toast_of_the_year", + "my_santa_pic", + "santa_gift", + "steampunk_christmas", + "snowglobe", + "christmas_photo_shoot", + "ornament_crash", + "santa_express", + "particle_santa_surround", + "coronation_of_frost", + "spark_in_the_snow", + "scarlet_and_snow", + "cozy_toon_wrap", + "bullet_time_lite", + "magic_cloak", + "balloon_parade", + "jumping_ginger_joy", + "c4d_cartoon_pro", + "venomous_spider", + "throne_of_king", + "luminous_elf", + "woodland_elf", + "japanese_anime_1", + "american_comics", + "snowboarding", + "witch_transform", + "vampire_transform", + "pumpkin_head_transform", + "demon_transform", + "mummy_transform", + "zombie_transform", + "cute_pumpkin_transform", + "cute_ghost_transform", + "knock_knock_halloween", + "halloween_escape", + "baseball", + "trampoline", + "trampoline_night", + "pucker_up", + "feed_mooncake", + "flyer", + "dishwasher", + "pet_chinese_opera", + "magic_fireball", + "gallery_ring", + "pet_moto_rider", + "muscle_pet", + "pet_delivery", + "mythic_style", + "steampunk", + "3d_cartoon_2", + "pet_chef", + "santa_gifts", + "santa_hug", + "girlfriend", + "boyfriend", + "heart_gesture_1", + "pet_wizard", + "smoke_smoke", + "gun_shot", + "double_gun", + "pet_warrior", + "long_hair", + "pet_dance", + "wool_curly", + "pet_bee", + "marry_me", + "piggy_morph", + "ski_ski", + "magic_broom", + "splashsplash", + "surfsurf", + "fairy_wing", + "angel_wing", + "dark_wing", + "emoji" + ], + "title": "Effect Scene", + "type": "string", + "examples": [ + "hug" + ], + "description": "The effect scene to use for the video generation" + } + }, + "x-fal-order-properties": [ + "input_image_urls", + "effect_scene", + "duration" + ], + "required": [ + "effect_scene" + ] + }, + "KlingVideoV1StandardEffectsOutput": { + "title": "VideoEffectsOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/kling/kling_ex.mp4.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v1/standard/effects/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1/standard/effects/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1/standard/effects": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV1StandardEffectsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1/standard/effects/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV1StandardEffectsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v1.6/standard/effects", + "metadata": { + "display_name": "Kling 1.6", + "category": "text-to-video", + "description": "Generate video clips from your prompts using Kling 1.6 (std)", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:03.187Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/elephant/28-vTrv3W2BT-u8_cy7mt.png", + "model_url": "https://fal.run/fal-ai/kling-video/v1.6/standard/effects", + "license_type": "commercial", + "date": "2025-03-06T00:00:00.000Z", + "group": { + "key": "kling-video-effects", + "label": "v1.6 Effects (std)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 6, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v1.6/standard/effects", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v1.6/standard/effects queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v1.6/standard/effects", + "category": "text-to-video", + "thumbnailUrl": "https://fal.media/files/elephant/28-vTrv3W2BT-u8_cy7mt.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v1.6/standard/effects", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v1.6/standard/effects/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV16StandardEffectsInput": { + "title": "VideoEffectsRequest", + "type": "object", + "properties": { + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "input_image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/juggernaut_examples/VHXMavzPyI27zi6JseyL4.png", + "https://storage.googleapis.com/falserverless/juggernaut_examples/QEW5VrzccxGva7mPfEXjf.png" + ] + ], + "title": "Input Image Urls", + "type": "array", + "description": "URL of images to be used for hug, kiss or heart_gesture video.", + "items": { + "type": "string" + } + }, + "effect_scene": { + "enum": [ + "hug", + "kiss", + "heart_gesture", + "squish", + "expansion", + "fuzzyfuzzy", + "bloombloom", + "dizzydizzy", + "jelly_press", + "jelly_slice", + "jelly_squish", + "jelly_jiggle", + "pixelpixel", + "yearbook", + "instant_film", + "anime_figure", + "rocketrocket", + "fly_fly", + "disappear", + "lightning_power", + "bullet_time", + "bullet_time_360", + "media_interview", + "day_to_night", + "let's_ride", + "jumpdrop", + "swish_swish", + "running_man", + "jazz_jazz", + "swing_swing", + "skateskate", + "building_sweater", + "pure_white_wings", + "black_wings", + "golden_wing", + "pink_pink_wings", + "rampage_ape", + "a_list_look", + "countdown_teleport", + "firework_2026", + "instant_christmas", + "birthday_star", + "firework", + "celebration", + "tiger_hug_pro", + "pet_lion_pro", + "guardian_spirit", + "squeeze_scream", + "inner_voice", + "memory_alive", + "guess_what", + "eagle_snatch", + "hug_from_past", + "instant_kid", + "dollar_rain", + "cry_cry", + "building_collapse", + "mushroom", + "jesus_hug", + "shark_alert", + "lie_flat", + "polar_bear_hug", + "brown_bear_hug", + "office_escape_plow", + "watermelon_bomb", + "boss_coming", + "wig_out", + "car_explosion", + "tiger_hug", + "siblings", + "construction_worker", + "snatched", + "felt_felt", + "plushcut", + "drunk_dance", + "drunk_dance_pet", + "daoma_dance", + "bouncy_dance", + "smooth_sailing_dance", + "new_year_greeting", + "lion_dance", + "prosperity", + "great_success", + "golden_horse_fortune", + "red_packet_box", + "lucky_horse_year", + "lucky_red_packet", + "lucky_money_come", + "lion_dance_pet", + "dumpling_making_pet", + "fish_making_pet", + "pet_red_packet", + "lantern_glow", + "expression_challenge", + "overdrive", + "heart_gesture_dance", + "poping", + "martial_arts", + "running", + "nezha", + "motorcycle_dance", + "subject_3_dance", + "ghost_step_dance", + "phantom_jewel", + "zoom_out", + "cheers_2026", + "kiss_pro", + "fight_pro", + "hug_pro", + "heart_gesture_pro", + "dollar_rain_pro", + "pet_bee_pro", + "santa_random_surprise", + "magic_match_tree", + "happy_birthday", + "thumbs_up_pro", + "surprise_bouquet", + "bouquet_drop", + "3d_cartoon_1_pro", + "glamour_photo_shoot", + "box_of_joy", + "first_toast_of_the_year", + "my_santa_pic", + "santa_gift", + "steampunk_christmas", + "snowglobe", + "christmas_photo_shoot", + "ornament_crash", + "santa_express", + "particle_santa_surround", + "coronation_of_frost", + "spark_in_the_snow", + "scarlet_and_snow", + "cozy_toon_wrap", + "bullet_time_lite", + "magic_cloak", + "balloon_parade", + "jumping_ginger_joy", + "c4d_cartoon_pro", + "venomous_spider", + "throne_of_king", + "luminous_elf", + "woodland_elf", + "japanese_anime_1", + "american_comics", + "snowboarding", + "witch_transform", + "vampire_transform", + "pumpkin_head_transform", + "demon_transform", + "mummy_transform", + "zombie_transform", + "cute_pumpkin_transform", + "cute_ghost_transform", + "knock_knock_halloween", + "halloween_escape", + "baseball", + "trampoline", + "trampoline_night", + "pucker_up", + "feed_mooncake", + "flyer", + "dishwasher", + "pet_chinese_opera", + "magic_fireball", + "gallery_ring", + "pet_moto_rider", + "muscle_pet", + "pet_delivery", + "mythic_style", + "steampunk", + "3d_cartoon_2", + "pet_chef", + "santa_gifts", + "santa_hug", + "girlfriend", + "boyfriend", + "heart_gesture_1", + "pet_wizard", + "smoke_smoke", + "gun_shot", + "double_gun", + "pet_warrior", + "long_hair", + "pet_dance", + "wool_curly", + "pet_bee", + "marry_me", + "piggy_morph", + "ski_ski", + "magic_broom", + "splashsplash", + "surfsurf", + "fairy_wing", + "angel_wing", + "dark_wing", + "emoji" + ], + "title": "Effect Scene", + "type": "string", + "examples": [ + "hug" + ], + "description": "The effect scene to use for the video generation" + } + }, + "x-fal-order-properties": [ + "input_image_urls", + "effect_scene", + "duration" + ], + "required": [ + "effect_scene" + ] + }, + "KlingVideoV16StandardEffectsOutput": { + "title": "VideoEffectsOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/kling/kling_ex.mp4.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v1.6/standard/effects/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/standard/effects/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/standard/effects": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV16StandardEffectsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/standard/effects/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV16StandardEffectsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-video-v095", + "metadata": { + "display_name": "LTX Video-0.9.5", + "category": "text-to-video", + "description": "Generate videos from prompts using LTX Video-0.9.5", + "status": "active", + "tags": [ + "video", + "text-video" + ], + "updated_at": "2026-01-26T21:44:22.923Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/ltx-0.9.5.webp", + "model_url": "https://fal.run/fal-ai/ltx-video-v095", + "date": "2025-03-05T00:00:00.000Z", + "group": { + "key": "ltx-video-v0.9.5", + "label": "Text to Video" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-video-v095", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-video-v095 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-video-v095", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/ltx-0.9.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-video-v095", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-video-v095/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LtxVideoV095Input": { + "title": "TextToVideoInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cute cat walking on a sidewalk" + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt to guide generation" + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video (480p or 720p).", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "9:16", + "16:9" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video (16:9 or 9:16).", + "default": "16:9" + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "Whether to expand the prompt using the model's own capabilities.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation" + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps", + "default": 40 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for generation", + "default": "worst quality, inconsistent motion, blurry, jittery, distorted" + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "resolution", + "aspect_ratio", + "seed", + "num_inference_steps", + "expand_prompt" + ], + "required": [ + "prompt" + ] + }, + "LtxVideoV095Output": { + "title": "TextToVideoOutput", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/ltx-t2v_output.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-video-v095/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-v095/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-v095": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideoV095Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-v095/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideoV095Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v1.6/pro/text-to-video", + "metadata": { + "display_name": "Kling 1.6", + "category": "text-to-video", + "description": "Generate video clips from your prompts using Kling 1.6 (pro)", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:04.520Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/elephant/28-vTrv3W2BT-u8_cy7mt.png", + "model_url": "https://fal.run/fal-ai/kling-video/v1.6/pro/text-to-video", + "license_type": "commercial", + "date": "2025-02-27T00:00:00.000Z", + "group": { + "key": "kling-video-v1-6", + "label": "Text to Video v1.6 (pro)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 4, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v1.6/pro/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v1.6/pro/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v1.6/pro/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://fal.media/files/elephant/28-vTrv3W2BT-u8_cy7mt.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v1.6/pro/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v1.6/pro/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV16ProTextToVideoInput": { + "title": "TextToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage. She wears a black leather jacket, a long red dress, and black boots, and carries a black purse." + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500 + }, + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video frame", + "default": "16:9" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "maxLength": 2500, + "default": "blur, distort, and low quality" + }, + "cfg_scale": { + "minimum": 0, + "title": "Cfg Scale", + "type": "number", + "maximum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ", + "default": 0.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "duration", + "aspect_ratio", + "negative_prompt", + "cfg_scale" + ], + "required": [ + "prompt" + ] + }, + "KlingVideoV16ProTextToVideoOutput": { + "title": "T2VOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v2.fal.media/files/fb33a862b94d4d7195e610e4cbc5d392_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v1.6/pro/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/pro/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/pro/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV16ProTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/pro/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV16ProTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan/v2.1/1.3b/text-to-video", + "metadata": { + "display_name": "Wan-2.1 1.3B Text-to-Video", + "category": "text-to-video", + "description": "Wan-2.1 1.3B is a text-to-video model that generates high-quality videos with high visual quality and motion diversity from text promptsat faster speeds.", + "status": "active", + "tags": [ + "text to video", + "motion" + ], + "updated_at": "2026-01-26T21:44:04.135Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/Fal_Visuals_V1_02.jpg", + "model_url": "https://fal.run/fal-ai/wan/v2.1/1.3b/text-to-video", + "license_type": "commercial", + "date": "2025-02-27T00:00:00.000Z", + "group": { + "key": "wan-2.1", + "label": "Text-to-Video (1.3B)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "error": { + "code": "expansion_failed", + "message": "OpenAPI schema not available for this endpoint" + } + } + }, + { + "endpoint_id": "fal-ai/wan-t2v", + "metadata": { + "display_name": "Wan-2.1 Text-to-Video", + "category": "text-to-video", + "description": "Wan-2.1 is a text-to-video model that generates high-quality videos with high visual quality and motion diversity from text prompts", + "status": "active", + "tags": [ + "text to video", + "motion" + ], + "updated_at": "2026-01-26T21:44:05.171Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/Fal_Visuals_V1_02.jpg", + "model_url": "https://fal.run/fal-ai/wan-t2v", + "license_type": "commercial", + "date": "2025-02-25T00:00:00.000Z", + "group": { + "key": "wan-2.1", + "label": "Text-to-Video" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-t2v", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-t2v queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-t2v", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/Fal_Visuals_V1_02.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-t2v", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-t2v/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanT2vInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_frames", + "frames_per_second", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "enable_safety_checker", + "enable_prompt_expansion", + "turbo_mode" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage. She wears a black leather jacket, a long red dress, and black boots, and carries a black purse." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "aspect_ratio": { + "enum": [ + "9:16", + "16:9" + ], + "description": "Aspect ratio of the generated video (16:9 or 9:16).", + "type": "string", + "title": "Aspect Ratio", + "default": "16:9" + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "description": "Resolution of the generated video (480p, 580p, or 720p).", + "type": "string", + "title": "Resolution", + "default": "720p" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "turbo_mode": { + "description": "If true, the video will be generated faster with no noticeable degradation in the visual quality.", + "type": "boolean", + "title": "Turbo Mode", + "default": false + }, + "frames_per_second": { + "minimum": 5, + "maximum": 24, + "type": "integer", + "description": "Frames per second of the generated video. Must be between 5 to 24.", + "title": "Frames Per Second", + "default": 16 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "num_frames": { + "minimum": 81, + "title": "Num Frames", + "type": "integer", + "maximum": 100, + "description": "Number of frames to generate. Must be between 81 to 100 (inclusive).", + "default": 81 + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 40, + "type": "integer", + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "title": "Num Inference Steps", + "default": 30 + }, + "negative_prompt": { + "examples": [ + "bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + ], + "description": "Negative prompt for video generation.", + "type": "string", + "title": "Negative Prompt", + "default": "bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + } + }, + "title": "WanT2VRequest", + "required": [ + "prompt" + ] + }, + "WanT2vOutput": { + "x-fal-order-properties": [ + "video", + "seed" + ], + "type": "object", + "properties": { + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/web-examples/wan/t2v.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "WanT2VResponse", + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-t2v/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-t2v/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-t2v": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanT2vInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-t2v/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanT2vOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/veo2", + "metadata": { + "display_name": "Veo 2", + "category": "text-to-video", + "description": "Veo 2 creates videos with realistic motion and high quality output. Explore different styles and find your own with extensive camera controls.", + "status": "active", + "tags": [ + "motion", + "transformation" + ], + "updated_at": "2026-01-26T21:44:05.299Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/veo2/veo2.webp", + "model_url": "https://fal.run/fal-ai/veo2", + "license_type": "commercial", + "date": "2025-02-21T00:00:00.000Z", + "group": { + "key": "veo2", + "label": "Text to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/veo2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/veo2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/veo2", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/veo2/veo2.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/veo2", + "documentationUrl": "https://fal.ai/models/fal-ai/veo2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Veo2Input": { + "title": "TextToVideoInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The camera floats gently through rows of pastel-painted wooden beehives, buzzing honeybees gliding in and out of frame. The motion settles on the refined farmer standing at the center, his pristine white beekeeping suit gleaming in the golden afternoon light. He lifts a jar of honey, tilting it slightly to catch the light. Behind him, tall sunflowers sway rhythmically in the breeze, their petals glowing in the warm sunlight. The camera tilts upward to reveal a retro farmhouse with mint-green shutters, its walls dappled with shadows from swaying trees. Shot with a 35mm lens on Kodak Portra 400 film, the golden light creates rich textures on the farmer's gloves, marmalade jar, and weathered wood of the beehives." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt describing the video you want to generate", + "minLength": 1 + }, + "duration": { + "enum": [ + "5s", + "6s", + "7s", + "8s" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5s" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "A seed to use for the video generation" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "A negative prompt to guide the video generation" + }, + "enhance_prompt": { + "title": "Enhance Prompt", + "type": "boolean", + "description": "Whether to enhance the video generation", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "duration", + "negative_prompt", + "enhance_prompt", + "seed" + ], + "required": [ + "prompt" + ] + }, + "Veo2Output": { + "title": "TextToVideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/tiger/83-YzufmOlsnhqq5ed382_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/veo2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/veo2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/video-01-director", + "metadata": { + "display_name": "MiniMax (Hailuo AI) Video 01 Director", + "category": "text-to-video", + "description": "Generate video clips more accurately with respect to natural language descriptions and using camera movement instructions for shot control.", + "status": "active", + "tags": [ + "motion", + "transformation", + "camera-controls" + ], + "updated_at": "2026-01-26T21:44:27.867Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/red_clouds.jpg", + "model_url": "https://fal.run/fal-ai/minimax/video-01-director", + "date": "2025-02-11T00:00:00.000Z", + "group": { + "key": "minimax-video", + "label": "T2V-01 Director (Camera Control)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/video-01-director", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/video-01-director queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/video-01-director", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/red_clouds.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/video-01-director", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/video-01-director/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxVideo01DirectorInput": { + "title": "TextToVideoDirectorRequest", + "type": "object", + "properties": { + "prompt_optimizer": { + "description": "Whether to use the model's prompt optimizer", + "type": "boolean", + "title": "Prompt Optimizer", + "default": true + }, + "prompt": { + "examples": [ + "[Push in]Close up of a tense woman looks to the left, startled by a sound, in a darkened kitchen, Pots and pans hang ominously, the window in the kitchen is open and the wind softly blows the pans and creates an ominous mood. [Shake]the woman's shock turns to fear. Black-and-white film noir shot dimly lit, 1950s-style, with dramatic, high-contrast shadows. The overall atmosphere is reminiscent of Alfred Hitchcock's suspenseful storytelling, evoking a looming sense of dread with stark chiaroscuro lighting and a slight film-grain texture." + ], + "maxLength": 2000, + "type": "string", + "title": "Prompt", + "description": "Text prompt for video generation. Camera movement instructions can be added using square brackets (e.g. [Pan left] or [Zoom in]). You can use up to 3 combined movements per prompt. Supported movements: Truck left/right, Pan left/right, Push in/Pull out, Pedestal up/down, Tilt up/down, Zoom in/out, Shake, Tracking shot, Static shot. For example: [Truck left, Pan right, Zoom in]. For a more detailed guide, refer https://sixth-switch-2ac.notion.site/T2V-01-Director-Model-Tutorial-with-camera-movement-1886c20a98eb80f395b8e05291ad8645" + } + }, + "x-fal-order-properties": [ + "prompt", + "prompt_optimizer" + ], + "required": [ + "prompt" + ] + }, + "MinimaxVideo01DirectorOutput": { + "title": "T2VDirectorOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://fal.media/files/panda/4Et1qL4cbedh-OACEw7OF_output.mp4" + } + ], + "description": "The generated video", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/video-01-director/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/video-01-director/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/video-01-director": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxVideo01DirectorInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/video-01-director/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxVideo01DirectorOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v3.5/text-to-video", + "metadata": { + "display_name": "PixVerse v3.5", + "category": "text-to-video", + "description": "Generate high quality video clips from text prompts using PixVerse v3.5", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:06.316Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "model_url": "https://fal.run/fal-ai/pixverse/v3.5/text-to-video", + "license_type": "commercial", + "date": "2025-01-29T00:00:00.000Z", + "group": { + "key": "pixverse", + "label": "Text to Video" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v3.5/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v3.5/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v3.5/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v3.5/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v3.5/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV35TextToVideoInput": { + "title": "TextToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Epic low-cut camera capture of a girl clad in ultraviolet threads, Peter Max art style depiction, luminous diamond skin glistening under a vast moon's radiance, embodied in a superhuman flight among mystical ruins, symbolizing a deity's ritual ascent, hyper-detailed" + ], + "title": "Prompt", + "type": "string" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "4:3", + "1:1", + "3:4", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated video" + }, + "duration": { + "enum": [ + "5", + "8" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds", + "default": "5" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, low resolution, pixelated, noisy, grainy, out of focus, poorly lit, poorly exposed, poorly composed, poorly framed, poorly cropped, poorly color corrected, poorly color graded" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "resolution", + "duration", + "negative_prompt", + "style", + "seed" + ], + "required": [ + "prompt" + ] + }, + "PixverseV35TextToVideoOutput": { + "title": "VideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 2995630, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://fal.media/files/zebra/11UahivZ3XZ1tRlcEcgPq_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v3.5/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v3.5/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v3.5/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV35TextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v3.5/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV35TextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/v3.5/text-to-video/fast", + "metadata": { + "display_name": "PixVerse v3.5 Fast", + "category": "text-to-video", + "description": "Generate high quality video clips quickly from text prompts using PixVerse v3.5 Fast", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:06.624Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "model_url": "https://fal.run/fal-ai/pixverse/v3.5/text-to-video/fast", + "license_type": "commercial", + "date": "2025-01-29T00:00:00.000Z", + "group": { + "key": "pixverse", + "label": "Text to Video (Fast)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/v3.5/text-to-video/fast", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/v3.5/text-to-video/fast queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/v3.5/text-to-video/fast", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/v3.5/text-to-video/fast", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/v3.5/text-to-video/fast/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseV35TextToVideoFastInput": { + "title": "FastTextToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Epic low-cut camera capture of a girl clad in ultraviolet threads, Peter Max art style depiction, luminous diamond skin glistening under a vast moon's radiance, embodied in a superhuman flight among mystical ruins, symbolizing a deity's ritual ascent, hyper-detailed" + ], + "title": "Prompt", + "type": "string" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "4:3", + "1:1", + "3:4", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the generated video" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + }, + "negative_prompt": { + "examples": [ + "blurry, low quality, low resolution, pixelated, noisy, grainy, out of focus, poorly lit, poorly exposed, poorly composed, poorly framed, poorly cropped, poorly color corrected, poorly color graded" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "resolution", + "negative_prompt", + "style", + "seed" + ], + "required": [ + "prompt" + ] + }, + "PixverseV35TextToVideoFastOutput": { + "title": "VideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 2995630, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://fal.media/files/zebra/11UahivZ3XZ1tRlcEcgPq_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/v3.5/text-to-video/fast/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v3.5/text-to-video/fast/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v3.5/text-to-video/fast": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV35TextToVideoFastInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/v3.5/text-to-video/fast/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseV35TextToVideoFastOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/luma-dream-machine/ray-2", + "metadata": { + "display_name": "Luma Ray 2", + "category": "text-to-video", + "description": "Ray2 is a large-scale video generative model capable of creating realistic visuals with natural, coherent motion.", + "status": "active", + "tags": [ + "motion", + "transformation" + ], + "updated_at": "2026-01-26T21:44:29.646Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/luma-dream-machine-ray-2.webp", + "model_url": "https://fal.run/fal-ai/luma-dream-machine/ray-2", + "date": "2025-01-27T00:00:00.000Z", + "group": { + "key": "luma-dream-machine", + "label": "Text to Video (Ray 2)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/luma-dream-machine/ray-2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/luma-dream-machine/ray-2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/luma-dream-machine/ray-2", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/luma-dream-machine-ray-2.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/luma-dream-machine/ray-2", + "documentationUrl": "https://fal.ai/models/fal-ai/luma-dream-machine/ray-2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LumaDreamMachineRay2Input": { + "title": "Ray2TextToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A herd of wild horses galloping across a dusty desert plain under a blazing midday sun, their manes flying in the wind; filmed in a wide tracking shot with dynamic motion, warm natural lighting, and an epic." + ], + "title": "Prompt", + "type": "string", + "maxLength": 5000, + "minLength": 3 + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "4:3", + "3:4", + "21:9", + "9:21" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video", + "default": "16:9" + }, + "resolution": { + "enum": [ + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video (720p costs 2x more, 1080p costs 4x more)", + "default": "540p" + }, + "loop": { + "title": "Loop", + "type": "boolean", + "description": "Whether the video should loop (end of video is blended with the beginning)", + "default": false + }, + "duration": { + "enum": [ + "5s", + "9s" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video (9s costs 2x more)", + "default": "5s" + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "loop", + "resolution", + "duration" + ], + "required": [ + "prompt" + ] + }, + "LumaDreamMachineRay2Output": { + "title": "Ray2T2VOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/penguin/Om3xjcOwiSCJwrXs7DUi__output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/luma-dream-machine/ray-2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaDreamMachineRay2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaDreamMachineRay2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan-video-lora", + "metadata": { + "display_name": "Hunyuan Video LoRA Inference", + "category": "text-to-video", + "description": "Hunyuan Video is an Open video generation model with high visual quality, motion diversity, text-video alignment, and generation stability", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:31.103Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/hunyuan-video.webp?v=1", + "model_url": "https://fal.run/fal-ai/hunyuan-video-lora", + "date": "2025-01-16T00:00:00.000Z", + "group": { + "key": "hunyuan-text-to-video-lora", + "label": "Text-to-Video" + }, + "highlighted": false, + "duration_estimate": 4, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan-video-lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan-video-lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan-video-lora", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/hunyuan-video.webp?v=1", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan-video-lora", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan-video-lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "HunyuanVideoLoraInput": { + "title": "HunyuanT2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage. She wears a black leather jacket, a long red dress, and black boots, and carries a black purse." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16" + ], + "title": "Aspect Ratio (W:H)", + "type": "string", + "description": "The aspect ratio of the video to generate.", + "default": "16:9" + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the video to generate.", + "default": "720p" + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for generating the video." + }, + "num_frames": { + "enum": [ + "129", + "85" + ], + "title": "Number of Frames", + "type": "string", + "description": "The number of frames to generate.", + "default": 129 + }, + "pro_mode": { + "title": "Pro Mode", + "type": "boolean", + "description": "By default, generations are done with 35 steps. Pro mode does 55 steps which results in higher quality videos but will take more time and cost 2x more billing units.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "pro_mode", + "aspect_ratio", + "resolution", + "num_frames", + "enable_safety_checker", + "loras" + ], + "required": [ + "prompt" + ] + }, + "HunyuanVideoLoraOutput": { + "title": "HunyuanT2VResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generating the video." + }, + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/kangaroo/y5-1YTGpun17eSeggZMzX_video-1733468228.mp4" + } + ], + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan-video-lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanVideoLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanVideoLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/transpixar", + "metadata": { + "display_name": "TransPixar V1", + "category": "text-to-video", + "description": "Transform text into stunning videos with TransPixar - an AI model that generates both RGB footage and alpha channels, enabling seamless compositing and creative video effects.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:31.493Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/transpixar.webp", + "model_url": "https://fal.run/fal-ai/transpixar", + "github_url": "https://huggingface.co/THUDM/CogVideoX-5b/blob/main/LICENSE", + "license_type": "commercial", + "date": "2025-01-14T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/transpixar", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/transpixar queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/transpixar", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/transpixar.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/transpixar", + "documentationUrl": "https://fal.ai/models/fal-ai/transpixar/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "TranspixarInput": { + "title": "BaseInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A cloud of dust erupting and dispersing like an explosion." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related video to show you.\n ", + "default": 7 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 24 + }, + "export_fps": { + "minimum": 4, + "maximum": 32, + "type": "integer", + "title": "Export Fps", + "description": "The target FPS of the video", + "default": 8 + }, + "negative_prompt": { + "examples": [ + "Distorted, discontinuous, Ugly, blurry, low resolution, motionless, static, disfigured, disconnected limbs, Ugly faces, incomplete arms" + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate video from", + "default": "" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_inference_steps", + "seed", + "guidance_scale", + "export_fps" + ], + "required": [ + "prompt" + ] + }, + "TranspixarOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the video." + }, + "videos": { + "examples": [ + [ + { + "file_size": 146468, + "file_name": "rgb.mp4", + "content_type": "application/octet-stream", + "url": "https://v3.fal.media/files/kangaroo/G6gkFsuyU5L7sJ55nZUPU_rgb.mp4" + }, + { + "file_size": 106894, + "file_name": "alpha.mp4", + "content_type": "application/octet-stream", + "url": "https://v3.fal.media/files/lion/g7PBZfQEH9SoPXYgeyl5P_alpha.mp4" + } + ] + ], + "title": "Videos", + "type": "array", + "description": "The URL to the generated video", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated video. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + } + }, + "x-fal-order-properties": [ + "videos", + "timings", + "seed", + "prompt" + ], + "required": [ + "videos", + "timings", + "seed", + "prompt" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/transpixar/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/transpixar/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/transpixar": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TranspixarInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/transpixar/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TranspixarOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/cogvideox-5b", + "metadata": { + "display_name": "CogVideoX-5B", + "category": "text-to-video", + "description": "Generate videos from prompts using CogVideoX-5B", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:31.365Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/model_tests/cogvideox/panda.gif.gif", + "model_url": "https://fal.run/fal-ai/cogvideox-5b", + "github_url": "https://huggingface.co/THUDM/CogVideoX-5b/blob/main/LICENSE", + "date": "2025-01-14T00:00:00.000Z", + "group": { + "key": "cogvideox-5b", + "label": "Text to Video" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/cogvideox-5b", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/cogvideox-5b queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/cogvideox-5b", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/model_tests/cogvideox/panda.gif.gif", + "playgroundUrl": "https://fal.ai/models/fal-ai/cogvideox-5b", + "documentationUrl": "https://fal.ai/models/fal-ai/cogvideox-5b/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Cogvideox5bInput": { + "title": "BaseInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A garden comes to life as a kaleidoscope of butterflies flutters amidst the blossoms, their delicate wings casting shadows on the petals below. In the background, a grand fountain cascades water with a gentle splendor, its rhythmic sound providing a soothing backdrop. Beneath the cool shade of a mature tree, a solitary wooden chair invites solitude and reflection, its smooth surface worn by the touch of countless visitors seeking a moment of tranquility in nature's embrace." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "use_rife": { + "title": "Use Rife", + "type": "boolean", + "description": "Use RIFE for video interpolation", + "default": true + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. We currently support one lora.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Video Size", + "description": "The size of the generated video.", + "default": { + "height": 480, + "width": 720 + } + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related video to show you.\n ", + "default": 7 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 50 + }, + "export_fps": { + "minimum": 4, + "maximum": 32, + "type": "integer", + "title": "Export Fps", + "description": "The target FPS of the video", + "default": 16 + }, + "negative_prompt": { + "examples": [ + "Distorted, discontinuous, Ugly, blurry, low resolution, motionless, static, disfigured, disconnected limbs, Ugly faces, incomplete arms" + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate video from", + "default": "" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + } + }, + "x-fal-order-properties": [ + "prompt", + "video_size", + "negative_prompt", + "loras", + "num_inference_steps", + "seed", + "guidance_scale", + "use_rife", + "export_fps" + ], + "required": [ + "prompt" + ] + }, + "Cogvideox5bOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the video." + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated video. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + }, + "video": { + "description": "The URL to the generated video", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "timings", + "seed", + "prompt" + ], + "required": [ + "video", + "timings", + "seed", + "prompt" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/cogvideox-5b/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/cogvideox-5b/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/cogvideox-5b": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Cogvideox5bInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/cogvideox-5b/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Cogvideox5bOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v1.6/standard/text-to-video", + "metadata": { + "display_name": "Kling 1.6", + "category": "text-to-video", + "description": "Generate video clips from your prompts using Kling 1.6 (std)", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:08.882Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/elephant/28-vTrv3W2BT-u8_cy7mt.png", + "model_url": "https://fal.run/fal-ai/kling-video/v1.6/standard/text-to-video", + "license_type": "commercial", + "date": "2025-01-07T00:00:00.000Z", + "group": { + "key": "kling-video-v1-6", + "label": "Text to Video v1.6 (std)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 6, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v1.6/standard/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v1.6/standard/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v1.6/standard/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://fal.media/files/elephant/28-vTrv3W2BT-u8_cy7mt.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v1.6/standard/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v1.6/standard/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV16StandardTextToVideoInput": { + "title": "TextToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage. She wears a black leather jacket, a long red dress, and black boots, and carries a black purse." + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500 + }, + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video frame", + "default": "16:9" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "maxLength": 2500, + "default": "blur, distort, and low quality" + }, + "cfg_scale": { + "minimum": 0, + "title": "Cfg Scale", + "type": "number", + "maximum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ", + "default": 0.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "duration", + "aspect_ratio", + "negative_prompt", + "cfg_scale" + ], + "required": [ + "prompt" + ] + }, + "KlingVideoV16StandardTextToVideoOutput": { + "title": "T2VOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v2.fal.media/files/fb33a862b94d4d7195e610e4cbc5d392_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v1.6/standard/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/standard/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/standard/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV16StandardTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.6/standard/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV16StandardTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/video-01-live", + "metadata": { + "display_name": "MiniMax (Hailuo AI) Video 01 Live", + "category": "text-to-video", + "description": "Generate video clips from your prompts using MiniMax model", + "status": "active", + "tags": [ + "motion", + "transformation" + ], + "updated_at": "2026-01-26T21:44:35.025Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/Fal_Visuals_V1_016.jpg", + "model_url": "https://fal.run/fal-ai/minimax/video-01-live", + "date": "2024-12-16T00:00:00.000Z", + "group": { + "key": "minimax-video", + "label": "Text to Video (Live)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/video-01-live", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/video-01-live queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/video-01-live", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/Fal_Visuals_V1_016.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/video-01-live", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/video-01-live/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxVideo01LiveInput": { + "title": "TextToVideoLiveRequest", + "type": "object", + "properties": { + "prompt_optimizer": { + "description": "Whether to use the model's prompt optimizer", + "type": "boolean", + "title": "Prompt Optimizer", + "default": true + }, + "prompt": { + "examples": [ + "A rugged middle-aged man with wheat-colored skin and a full beard streaked with gray stands in the harsh sunlight of a desert outpost. His curly hair is windswept, and sweat drips down the bridge of his slightly crooked nose. His faded utility jacket and weathered boots are caked in dust, while his sharp, watchful eyes scan the horizon." + ], + "maxLength": 2000, + "type": "string", + "title": "Prompt" + } + }, + "x-fal-order-properties": [ + "prompt", + "prompt_optimizer" + ], + "required": [ + "prompt" + ] + }, + "MinimaxVideo01LiveOutput": { + "title": "T2VLiveOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://fal.media/files/monkey/EbJRdZfaJbNiJBUvPta3c_output.mp4" + } + ], + "description": "The generated video", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/video-01-live/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/video-01-live/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/video-01-live": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxVideo01LiveInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/video-01-live/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxVideo01LiveOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v1/standard/text-to-video", + "metadata": { + "display_name": "Kling 1.0", + "category": "text-to-video", + "description": "Generate video clips from your prompts using Kling 1.0", + "status": "active", + "tags": [ + "motion" + ], + "updated_at": "2026-01-26T21:44:36.145Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/monkey/GoSnDOnX0Tea08N7iI7oM.jpeg", + "model_url": "https://fal.run/fal-ai/kling-video/v1/standard/text-to-video", + "date": "2024-12-03T00:00:00.000Z", + "group": { + "key": "kling-video", + "label": "Text to Video (standard)" + }, + "highlighted": false, + "duration_estimate": 6, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v1/standard/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v1/standard/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v1/standard/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://fal.media/files/monkey/GoSnDOnX0Tea08N7iI7oM.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v1/standard/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v1/standard/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV1StandardTextToVideoInput": { + "title": "V1TextToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage. She wears a black leather jacket, a long red dress, and black boots, and carries a black purse." + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500 + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video frame", + "default": "16:9" + }, + "advanced_camera_control": { + "title": "Advanced Camera Control", + "description": "Advanced Camera control parameters", + "allOf": [ + { + "$ref": "#/components/schemas/CameraControl" + } + ] + }, + "cfg_scale": { + "minimum": 0, + "title": "Cfg Scale", + "type": "number", + "maximum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ", + "default": 0.5 + }, + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "maxLength": 2500, + "default": "blur, distort, and low quality" + }, + "camera_control": { + "enum": [ + "down_back", + "forward_up", + "right_turn_forward", + "left_turn_forward" + ], + "title": "Camera Control", + "type": "string", + "description": "Camera control parameters" + } + }, + "x-fal-order-properties": [ + "prompt", + "duration", + "aspect_ratio", + "negative_prompt", + "cfg_scale", + "camera_control", + "advanced_camera_control" + ], + "required": [ + "prompt" + ] + }, + "KlingVideoV1StandardTextToVideoOutput": { + "title": "T2VOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v2.fal.media/files/fb33a862b94d4d7195e610e4cbc5d392_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "CameraControl": { + "title": "CameraControl", + "type": "object", + "properties": { + "movement_type": { + "enum": [ + "horizontal", + "vertical", + "pan", + "tilt", + "roll", + "zoom" + ], + "title": "Movement Type", + "type": "string", + "examples": [ + "horizontal" + ], + "description": "The type of camera movement" + }, + "movement_value": { + "minimum": -10, + "title": "Movement Value", + "type": "integer", + "examples": [ + 10 + ], + "description": "The value of the camera movement", + "maximum": 10 + } + }, + "x-fal-order-properties": [ + "movement_type", + "movement_value" + ], + "required": [ + "movement_type", + "movement_value" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v1/standard/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1/standard/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1/standard/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV1StandardTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1/standard/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV1StandardTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v1.5/pro/text-to-video", + "metadata": { + "display_name": "Kling 1.5", + "category": "text-to-video", + "description": "Generate video clips from your prompts using Kling 1.5 (pro)", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:11.247Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/elephant/28-vTrv3W2BT-u8_cy7mt.png", + "model_url": "https://fal.run/fal-ai/kling-video/v1.5/pro/text-to-video", + "license_type": "commercial", + "date": "2024-11-25T00:00:00.000Z", + "group": { + "key": "kling-video-v1-5", + "label": "Text to Video v1.5 (pro)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 6, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v1.5/pro/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v1.5/pro/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v1.5/pro/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://fal.media/files/elephant/28-vTrv3W2BT-u8_cy7mt.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v1.5/pro/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v1.5/pro/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV15ProTextToVideoInput": { + "title": "TextToVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage. She wears a black leather jacket, a long red dress, and black boots, and carries a black purse." + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500 + }, + "duration": { + "enum": [ + "5", + "10" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds", + "default": "5" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video frame", + "default": "16:9" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "maxLength": 2500, + "default": "blur, distort, and low quality" + }, + "cfg_scale": { + "minimum": 0, + "title": "Cfg Scale", + "type": "number", + "maximum": 1, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ", + "default": 0.5 + } + }, + "x-fal-order-properties": [ + "prompt", + "duration", + "aspect_ratio", + "negative_prompt", + "cfg_scale" + ], + "required": [ + "prompt" + ] + }, + "KlingVideoV15ProTextToVideoOutput": { + "title": "T2VOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v2.fal.media/files/fb33a862b94d4d7195e610e4cbc5d392_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v1.5/pro/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.5/pro/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.5/pro/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV15ProTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v1.5/pro/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV15ProTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/mochi-v1", + "metadata": { + "display_name": "Mochi 1", + "category": "text-to-video", + "description": "Mochi 1 preview is an open state-of-the-art video generation model with high-fidelity motion and strong prompt adherence in preliminary evaluation.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:37.793Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/mochi-v1.webp?v=1", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/mochi-v1-animated.webp?v=1", + "model_url": "https://fal.run/fal-ai/mochi-v1", + "date": "2024-11-07T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/mochi-v1", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/mochi-v1 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/mochi-v1", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/mochi-v1.webp?v=1", + "playgroundUrl": "https://fal.ai/models/fal-ai/mochi-v1", + "documentationUrl": "https://fal.ai/models/fal-ai/mochi-v1/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MochiV1Input": { + "title": "MochiT2VInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A dog running in a field." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate a video from." + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for generating the video." + }, + "negative_prompt": { + "examples": [ + "Blurry, shaky footage" + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt for the video.", + "default": "" + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "seed", + "enable_prompt_expansion" + ], + "required": [ + "prompt" + ] + }, + "MochiV1Output": { + "title": "MochiT2VOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://fal.media/files/zebra/GScPi-7ma3Fn8r1O1on4z_output_1729631871.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/mochi-v1/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/mochi-v1/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/mochi-v1": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MochiV1Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/mochi-v1/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MochiV1Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan-video", + "metadata": { + "display_name": "Hunyuan Video", + "category": "text-to-video", + "description": "Hunyuan Video is an Open video generation model with high visual quality, motion diversity, text-video alignment, and generation stability. This endpoint generates videos from text descriptions.", + "status": "active", + "tags": [ + "motion" + ], + "updated_at": "2026-01-26T21:44:17.507Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/hunyuan-video.webp?v=1", + "model_url": "https://fal.run/fal-ai/hunyuan-video", + "license_type": "commercial", + "date": "2024-10-22T00:00:00.000Z", + "group": { + "key": "hunyuan-text-to-video", + "label": "Text-to-Video" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 4, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan-video", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/hunyuan-video.webp?v=1", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan-video", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "HunyuanVideoInput": { + "title": "HunyuanVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage. She wears a black leather jacket, a long red dress, and black boots, and carries a black purse." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16" + ], + "title": "Aspect Ratio (W:H)", + "type": "string", + "description": "The aspect ratio of the video to generate.", + "default": "16:9" + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the video to generate.", + "default": "720p" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 30, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to run. Lower gets faster results, higher gets better results.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for generating the video." + }, + "num_frames": { + "enum": [ + "129", + "85" + ], + "title": "Number of Frames", + "type": "string", + "description": "The number of frames to generate.", + "default": 129 + }, + "pro_mode": { + "title": "Pro Mode", + "type": "boolean", + "description": "By default, generations are done with 35 steps. Pro mode does 55 steps which results in higher quality videos but will take more time and cost 2x more billing units.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "num_inference_steps", + "seed", + "pro_mode", + "aspect_ratio", + "resolution", + "num_frames", + "enable_safety_checker" + ], + "required": [ + "prompt" + ] + }, + "HunyuanVideoOutput": { + "title": "HunyuanT2VResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generating the video." + }, + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/kangaroo/y5-1YTGpun17eSeggZMzX_video-1733468228.mp4" + } + ], + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v1/pro/text-to-video", + "metadata": { + "display_name": "Kling 1.0", + "category": "text-to-video", + "description": "Generate video clips from your prompts using Kling 1.0 (pro)", + "status": "active", + "tags": [ + "motion" + ], + "updated_at": "2026-01-26T21:44:39.109Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/monkey/GoSnDOnX0Tea08N7iI7oM.jpeg", + "model_url": "https://fal.run/fal-ai/kling-video/v1/pro/text-to-video", + "date": "2024-10-04T00:00:00.000Z", + "group": { + "key": "kling-video", + "label": "Text to Video (pro)" + }, + "highlighted": false, + "duration_estimate": 6, + "pinned": false + }, + "openapi": { + "error": { + "code": "expansion_failed", + "message": "OpenAPI schema not available for this endpoint" + } + } + }, + { + "endpoint_id": "fal-ai/ltx-video", + "metadata": { + "display_name": "LTX Video (preview)", + "category": "text-to-video", + "description": "Generate videos from prompts using LTX Video", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:39.484Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/model_tests/cogvideox/panda.gif.gif", + "model_url": "https://fal.run/fal-ai/ltx-video", + "license_type": "research", + "date": "2024-10-04T00:00:00.000Z", + "group": { + "key": "ltx-video", + "label": "Text to Video" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-video", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/model_tests/cogvideox/panda.gif.gif", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-video", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LtxVideoInput": { + "title": "TextToVideoInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A man stands waist-deep in a crystal-clear mountain pool, his back turned to a massive, thundering waterfall that cascades down jagged cliffs behind him. He wears a dark blue swimming shorts and his muscular back glistens with water droplets. The camera moves in a dynamic circular motion around him, starting from his right side and sweeping left, maintaining a slightly low angle that emphasizes the towering height of the waterfall. As the camera moves, the man slowly turns his head to follow its movement, his expression one of awe as he gazes up at the natural wonder. The waterfall creates a misty atmosphere, with sunlight filtering through the spray to create rainbow refractions. The water churns and ripples around him, reflecting the dramatic landscape. The handheld camera movement adds a subtle shake that enhances the raw, untamed energy of the scene. The lighting is natural and bright, with the sun positioned behind the waterfall, creating a backlit effect that silhouettes the falling water and illuminates the mist." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "guidance_scale": { + "maximum": 10, + "type": "number", + "title": "Guidance Scale", + "description": "The guidance scale to use.", + "exclusiveMinimum": 1, + "default": 3 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for random number generation." + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Number of Inference Steps", + "description": "The number of inference steps to take.", + "default": 30 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the video from.", + "default": "low quality, worst quality, deformed, distorted, disfigured, motion smear, motion artifacts, fused fingers, bad anatomy, weird hand, ugly" + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "seed", + "num_inference_steps", + "guidance_scale" + ], + "required": [ + "prompt" + ] + }, + "LtxVideoOutput": { + "title": "Output", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for random number generation." + }, + "video": { + "title": "Video", + "description": "The generated video.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fast-svd/text-to-video", + "metadata": { + "display_name": "Stable Video Diffusion", + "category": "text-to-video", + "description": "Generate short video clips from your prompts using SVD v1.1", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:44:40.992Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/svd_rocket.gif", + "model_url": "https://fal.run/fal-ai/fast-svd/text-to-video", + "github_url": "https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt-1-1/blob/main/LICENSE", + "date": "2024-09-16T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fast-svd/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fast-svd/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fast-svd/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/svd_rocket.gif", + "playgroundUrl": "https://fal.ai/models/fal-ai/fast-svd/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/fast-svd/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FastSvdTextToVideoInput": { + "title": "FastSVDTextInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A rocket flying that is about to take off" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use as a starting point for the generation." + }, + "cond_aug": { + "minimum": 0, + "maximum": 10, + "type": "number", + "title": "Cond Aug", + "description": "\n The conditoning augmentation determines the amount of noise that will be\n added to the conditioning frame. The higher the number, the more noise\n there will be, and the less the video will look like the initial image.\n Increase it for more motion.\n ", + "default": 0.02 + }, + "deep_cache": { + "enum": [ + "none", + "minimum", + "medium", + "high" + ], + "title": "Deep Cache", + "type": "string", + "description": "\n Enabling [DeepCache](https://github.com/horseee/DeepCache) will make the execution\n faster, but might sometimes degrade overall quality. The higher the setting, the\n faster the execution will be, but the more quality might be lost.\n ", + "default": "none" + }, + "fps": { + "minimum": 1, + "maximum": 25, + "type": "integer", + "title": "Fps", + "description": "\n The FPS of the generated video. The higher the number, the faster the video will\n play. Total video length is 25 frames.\n ", + "default": 10 + }, + "motion_bucket_id": { + "minimum": 1, + "maximum": 255, + "type": "integer", + "title": "Motion Bucket Id", + "description": "\n The motion bucket id determines the motion of the generated video. The\n higher the number, the more motion there will be.\n ", + "default": 127 + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Video Size", + "description": "The size of the generated video.", + "default": "landscape_16_9" + }, + "steps": { + "minimum": 1, + "maximum": 100, + "type": "integer", + "title": "Steps", + "description": "\n The number of steps to run the model for. The higher the number the better\n the quality and longer it will take to generate.\n ", + "default": 20 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to use as a starting point for the generation.", + "default": "unrealistic, saturated, high contrast, big nose, painting, drawing, sketch, cartoon, anime, manga, render, CG, 3d, watermark, signature, label" + } + }, + "x-fal-order-properties": [ + "prompt", + "motion_bucket_id", + "cond_aug", + "seed", + "steps", + "deep_cache", + "fps", + "negative_prompt", + "video_size" + ], + "required": [ + "prompt" + ] + }, + "FastSvdTextToVideoOutput": { + "title": "FastSVDOutput", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n\n " + }, + "video": { + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/fast-svd/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-svd/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fast-svd/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastSvdTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-svd/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastSvdTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fast-svd-lcm/text-to-video", + "metadata": { + "display_name": "Stable Video Diffusion Turbo", + "category": "text-to-video", + "description": "Generate short video clips from your images using SVD v1.1 at Lightning Speed", + "status": "active", + "tags": [ + "lcm", + "diffusion", + "turbo" + ], + "updated_at": "2026-01-26T21:44:47.603Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/fast-svd-turbo.gif", + "model_url": "https://fal.run/fal-ai/fast-svd-lcm/text-to-video", + "github_url": "https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt-1-1/blob/main/LICENSE", + "date": "2024-06-03T00:00:00.000Z", + "group": { + "key": "stable-video-diffusion-turbo", + "label": "Text to Video" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fast-svd-lcm/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fast-svd-lcm/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fast-svd-lcm/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/fast-svd-turbo.gif", + "playgroundUrl": "https://fal.ai/models/fal-ai/fast-svd-lcm/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/fast-svd-lcm/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FastSvdLcmTextToVideoInput": { + "title": "FastSVDTextInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A rocket flying that is about to take off" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use as a starting point for the generation." + }, + "cond_aug": { + "minimum": 0, + "maximum": 10, + "type": "number", + "title": "Cond Aug", + "description": "\n The conditoning augmentation determines the amount of noise that will be\n added to the conditioning frame. The higher the number, the more noise\n there will be, and the less the video will look like the initial image.\n Increase it for more motion.\n ", + "default": 0.02 + }, + "fps": { + "minimum": 1, + "maximum": 25, + "type": "integer", + "title": "Fps", + "description": "\n The FPS of the generated video. The higher the number, the faster the video will\n play. Total video length is 25 frames.\n ", + "default": 10 + }, + "motion_bucket_id": { + "minimum": 1, + "maximum": 255, + "type": "integer", + "title": "Motion Bucket Id", + "description": "\n The motion bucket id determines the motion of the generated video. The\n higher the number, the more motion there will be.\n ", + "default": 127 + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Video Size", + "description": "The size of the generated video.", + "default": "landscape_16_9" + }, + "steps": { + "minimum": 1, + "maximum": 20, + "type": "integer", + "title": "Steps", + "description": "\n The number of steps to run the model for. The higher the number the better\n the quality and longer it will take to generate.\n ", + "default": 4 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + } + }, + "x-fal-order-properties": [ + "prompt", + "motion_bucket_id", + "cond_aug", + "seed", + "steps", + "fps", + "video_size" + ], + "required": [ + "prompt" + ] + }, + "FastSvdLcmTextToVideoOutput": { + "title": "FastSVDOutput", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n\n " + }, + "video": { + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/fast-svd-lcm/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-svd-lcm/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fast-svd-lcm/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastSvdLcmTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-svd-lcm/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastSvdLcmTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/t2v-turbo", + "metadata": { + "display_name": "T2V Turbo - Video Crafter", + "category": "text-to-video", + "description": "Generate short video clips from your prompts", + "status": "active", + "tags": [ + "turbo" + ], + "updated_at": "2026-01-26T21:44:53.684Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/monkey/yirvhUzF8h7DVpCoDGsdU.png", + "thumbnail_animated_url": "https://fal.media/files/rabbit/fhnhJJvJT0CwDKoSV1IqO.webp", + "model_url": "https://fal.run/fal-ai/t2v-turbo", + "github_url": "https://t2v-turbo.github.io/", + "date": "2024-03-08T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/t2v-turbo", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/t2v-turbo queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/t2v-turbo", + "category": "text-to-video", + "thumbnailUrl": "https://fal.media/files/monkey/yirvhUzF8h7DVpCoDGsdU.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/t2v-turbo", + "documentationUrl": "https://fal.ai/models/fal-ai/t2v-turbo/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "T2vTurboInput": { + "title": "Input", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "a dog wearing vr goggles on a boat" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate images from" + }, + "guidance_scale": { + "minimum": 0.1, + "maximum": 30, + "type": "number", + "title": "Guidance Scale", + "description": "The guidance scale", + "default": 7.5 + }, + "seed": { + "anyOf": [ + { + "minimum": 0, + "maximum": 203279, + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "The seed to use for the random number generator" + }, + "export_fps": { + "minimum": 1, + "maximum": 24, + "type": "integer", + "title": "Export Fps", + "description": "The FPS of the exported video", + "default": 8 + }, + "num_frames": { + "minimum": 16, + "maximum": 32, + "type": "integer", + "title": "Num Frames", + "description": "The number of frames to generate", + "default": 16 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 12, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of steps to sample", + "default": 4 + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "num_inference_steps", + "guidance_scale", + "num_frames", + "export_fps" + ], + "required": [ + "prompt" + ] + }, + "T2vTurboOutput": { + "title": "Output", + "type": "object", + "properties": { + "video": { + "description": "The URL to the generated video", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/t2v-turbo/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/t2v-turbo/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/t2v-turbo": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/T2vTurboInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/t2v-turbo/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/T2vTurboOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fast-animatediff/text-to-video", + "metadata": { + "display_name": "AnimateDiff", + "category": "text-to-video", + "description": "Animate your ideas!", + "status": "active", + "tags": [ + "animation", + "stylized" + ], + "updated_at": "2026-01-26T21:44:54.741Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/fast-animatediff-t2v.webp", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/fast-animatediff-t2v-animated.webp", + "model_url": "https://fal.run/fal-ai/fast-animatediff/text-to-video", + "github_url": "https://github.com/guoyww/AnimateDiff/blob/main/LICENSE.txt", + "date": "2024-02-21T00:00:00.000Z", + "group": { + "key": "fast-animatediff", + "label": "Text to Video" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fast-animatediff/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fast-animatediff/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fast-animatediff/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/fast-animatediff-t2v.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/fast-animatediff/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/fast-animatediff/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FastAnimatediffTextToVideoInput": { + "title": "AnimateDiffT2VInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "masterpiece, best quality, 1girl, solo, cherry blossoms, hanami, pink flower, white flower, spring season, wisteria, petals, flower, plum blossoms, outdoors, falling petals, white hair, black eyes", + "panda playing a guitar, on a boat, in the ocean, high quality, high quality, ultra HD, realistic" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the video. Be as descriptive as possible for best results." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "fps": { + "minimum": 1, + "title": "Fps", + "type": "integer", + "maximum": 16, + "description": "Number of frames per second to extract from the video.", + "default": 8 + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Video Size", + "description": "The size of the video to generate.", + "default": "square" + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 20, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 7.5 + }, + "num_frames": { + "minimum": 1, + "title": "Num Frames", + "type": "integer", + "maximum": 32, + "description": "The number of frames to generate for the video.", + "default": 16 + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 25 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "(bad quality, worst quality:1.2), ugly faces, bad anime" + }, + "motions": { + "title": "Motions", + "type": "array", + "description": "The motions to apply to the video.", + "uniqueItems": true, + "items": { + "enum": [ + "zoom-out", + "zoom-in", + "pan-left", + "pan-right", + "tilt-up", + "tilt-down" + ], + "type": "string" + } + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_frames", + "num_inference_steps", + "guidance_scale", + "seed", + "fps", + "motions", + "video_size" + ], + "required": [ + "prompt" + ] + }, + "FastAnimatediffTextToVideoOutput": { + "title": "AnimateDiffT2VOutput", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed used for generating the video." + }, + "video": { + "examples": [ + { + "url": "https://fal-cdn.batuhan-941.workers.dev/files/kangaroo/DSrFBOk9XXIplm_kukI4n.mp4" + } + ], + "title": "Video", + "description": "Generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/fast-animatediff/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-animatediff/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fast-animatediff/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastAnimatediffTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-animatediff/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastAnimatediffTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fast-animatediff/turbo/text-to-video", + "metadata": { + "display_name": "AnimateDiff Turbo", + "category": "text-to-video", + "description": "Animate your ideas in lightning speed!", + "status": "active", + "tags": [ + "animation", + "stylized", + "turbo" + ], + "updated_at": "2026-01-26T21:44:56.747Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/fast-animatediff-t2v-turbo.webp", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/fast-animatediff-t2v-turbo-animated.webp", + "model_url": "https://fal.run/fal-ai/fast-animatediff/turbo/text-to-video", + "github_url": "https://github.com/guoyww/AnimateDiff/blob/main/LICENSE.txt", + "date": "2024-02-13T00:00:00.000Z", + "group": { + "key": "fast-animatediff-turbo", + "label": "Text to Video" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fast-animatediff/turbo/text-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fast-animatediff/turbo/text-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fast-animatediff/turbo/text-to-video", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/fast-animatediff-t2v-turbo.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/fast-animatediff/turbo/text-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/fast-animatediff/turbo/text-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FastAnimatediffTurboTextToVideoInput": { + "title": "AnimateDiffT2VTurboInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "masterpiece, best quality, 1girl, solo, cherry blossoms, hanami, pink flower, white flower, spring season, wisteria, petals, flower, plum blossoms, outdoors, falling petals, white hair, black eyes", + "panda playing a guitar, on a boat, in the ocean, high quality, high quality, ultra HD, realistic" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the video. Be as descriptive as possible for best results." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "fps": { + "minimum": 1, + "title": "Fps", + "type": "integer", + "maximum": 16, + "description": "Number of frames per second to extract from the video.", + "default": 8 + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Video Size", + "description": "The size of the video to generate.", + "default": "square" + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "maximum": 20, + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 1 + }, + "num_frames": { + "minimum": 1, + "title": "Num Frames", + "type": "integer", + "maximum": 64, + "description": "The number of frames to generate for the video.", + "default": 16 + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 8, + "description": "The number of inference steps to perform. 4-12 is recommended for turbo mode.", + "default": 4 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "(bad quality, worst quality:1.2), ugly faces, bad anime" + }, + "motions": { + "title": "Motions", + "type": "array", + "description": "The motions to apply to the video.", + "uniqueItems": true, + "items": { + "enum": [ + "zoom-out", + "zoom-in", + "pan-left", + "pan-right", + "tilt-up", + "tilt-down" + ], + "type": "string" + } + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_frames", + "num_inference_steps", + "guidance_scale", + "seed", + "fps", + "motions", + "video_size" + ], + "required": [ + "prompt" + ] + }, + "FastAnimatediffTurboTextToVideoOutput": { + "title": "AnimateDiffT2VOutput", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed used for generating the video." + }, + "video": { + "examples": [ + { + "url": "https://fal-cdn.batuhan-941.workers.dev/files/kangaroo/DSrFBOk9XXIplm_kukI4n.mp4" + } + ], + "title": "Video", + "description": "Generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "maximum": 14142, + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "maximum": 14142, + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/fast-animatediff/turbo/text-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-animatediff/turbo/text-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fast-animatediff/turbo/text-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastAnimatediffTurboTextToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-animatediff/turbo/text-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastAnimatediffTurboTextToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/minimax/video-01", + "metadata": { + "display_name": "MiniMax (Hailuo AI) Video 01", + "category": "text-to-video", + "description": "Generate video clips from your prompts using MiniMax model", + "status": "active", + "tags": [ + "motion", + "transformation" + ], + "updated_at": "2026-01-26T21:44:57.044Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/red_clouds.jpg", + "model_url": "https://fal.run/fal-ai/minimax/video-01", + "date": "2024-02-13T00:00:00.000Z", + "group": { + "key": "minimax-video", + "label": "Text to Video" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/minimax/video-01", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/minimax/video-01 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/minimax/video-01", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/red_clouds.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/minimax/video-01", + "documentationUrl": "https://fal.ai/models/fal-ai/minimax/video-01/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MinimaxVideo01Input": { + "title": "TextToVideoRequest", + "type": "object", + "properties": { + "prompt_optimizer": { + "description": "Whether to use the model's prompt optimizer", + "type": "boolean", + "title": "Prompt Optimizer", + "default": true + }, + "prompt": { + "examples": [ + "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage. She wears a black leather jacket, a long red dress, and black boots, and carries a black purse." + ], + "maxLength": 2000, + "type": "string", + "title": "Prompt" + } + }, + "x-fal-order-properties": [ + "prompt", + "prompt_optimizer" + ], + "required": [ + "prompt" + ] + }, + "MinimaxVideo01Output": { + "title": "VideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://fal.media/files/monkey/vNZqQV_WgC9MhoidClLyw_output.mp4" + } + ], + "description": "The generated video", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/minimax/video-01/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/video-01/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/minimax/video-01": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxVideo01Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/minimax/video-01/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MinimaxVideo01Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/animatediff-sparsectrl-lcm", + "metadata": { + "display_name": "Animatediff SparseCtrl LCM", + "category": "text-to-video", + "description": "Animate Your Drawings with Latent Consistency Models!", + "status": "active", + "tags": [ + "lcm", + "animation", + "stylized" + ], + "updated_at": "2026-01-26T21:45:00.609Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/ad-sparsectrl-lcm.png", + "model_url": "https://fal.run/fal-ai/animatediff-sparsectrl-lcm", + "github_url": "https://github.com/guoyww/AnimateDiff/blob/main/LICENSE.txt", + "date": "2023-11-09T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/animatediff-sparsectrl-lcm", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/animatediff-sparsectrl-lcm queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/animatediff-sparsectrl-lcm", + "category": "text-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/ad-sparsectrl-lcm.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/animatediff-sparsectrl-lcm", + "documentationUrl": "https://fal.ai/models/fal-ai/animatediff-sparsectrl-lcm/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AnimatediffSparsectrlLcmInput": { + "title": "AnimatediffLCMInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Drone footage, futuristic city at night, synthwave, vaporware, neon lights, highly detailed, masterpeice, high quality" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable\n Diffusion will output the same image every time.\n " + }, + "controlnet_type": { + "enum": [ + "scribble", + "rgb" + ], + "title": "Controlnet Type", + "type": "string", + "description": "The type of controlnet to use for generating the video. The controlnet determines how the video will be animated.", + "default": "scribble" + }, + "keyframe_2_index": { + "examples": [ + 15 + ], + "title": "Keyframe 2 Index", + "type": "integer", + "description": "The frame index of the third keyframe to use for the generation.", + "nullable": false, + "default": 0 + }, + "keyframe_0_index": { + "examples": [ + 0 + ], + "title": "Keyframe 0 Index", + "type": "integer", + "description": "The frame index of the first keyframe to use for the generation.", + "nullable": false, + "default": 0 + }, + "keyframe_1_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/scribble2/scribble_2_2.png" + ], + "title": "Keyframe 1 Image Url", + "type": "string", + "description": "The URL of the second keyframe to use for the generation.", + "nullable": true + }, + "keyframe_1_index": { + "examples": [ + 8 + ], + "title": "Keyframe 1 Index", + "type": "integer", + "description": "The frame index of the second keyframe to use for the generation.", + "nullable": false, + "default": 0 + }, + "guidance_scale": { + "minimum": 0, + "maximum": 2, + "type": "integer", + "title": "Classifier-Free Guidance scale (CFG)", + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 1 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 12, + "type": "integer", + "title": "Number of inference steps", + "description": "Increasing the amount of steps tells Stable Diffusion that it should take more steps to generate your final result which can increase the amount of detail in your image.", + "default": 4 + }, + "keyframe_2_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/scribble2/scribble_2_3.png" + ], + "title": "Keyframe 2 Image Url", + "type": "string", + "description": "The URL of the third keyframe to use for the generation.", + "nullable": true + }, + "negative_prompt": { + "examples": [ + "blurry, low resolution, bad, ugly, low quality, pixelated, interpolated, compression artifacts, noisey, grainy" + ], + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to specify what you don't want.\n ", + "default": "" + }, + "keyframe_0_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/scribble2/scribble_2_1.png" + ], + "title": "Keyframe 0 Image Url", + "type": "string", + "description": "The URL of the first keyframe to use for the generation.", + "nullable": true + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "controlnet_type", + "num_inference_steps", + "guidance_scale", + "seed", + "keyframe_0_image_url", + "keyframe_0_index", + "keyframe_1_image_url", + "keyframe_1_index", + "keyframe_2_image_url", + "keyframe_2_index" + ], + "required": [ + "prompt" + ] + }, + "AnimatediffSparsectrlLcmOutput": { + "title": "AnimatediffLCMOutput", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used to generate the video." + }, + "video": { + "title": "Video", + "description": "Generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "examples": [ + "https://url.to/generated/file/z9RV14K95DvU.png" + ], + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url", + "content_type", + "file_name", + "file_size" + ] + } + } + }, + "paths": { + "/fal-ai/animatediff-sparsectrl-lcm/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/animatediff-sparsectrl-lcm/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/animatediff-sparsectrl-lcm": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AnimatediffSparsectrlLcmInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/animatediff-sparsectrl-lcm/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AnimatediffSparsectrlLcmOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.training.json b/packages/typescript/ai-fal/json/fal.models.training.json new file mode 100644 index 00000000..90b2d9a6 --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.training.json @@ -0,0 +1,13892 @@ +{ + "generated_at": "2026-01-28T02:51:51.856Z", + "total_models": 35, + "category": "training", + "models": [ + { + "endpoint_id": "fal-ai/flux-krea-trainer", + "metadata": { + "display_name": "Train Flux Krea LoRA", + "category": "training", + "description": "Train styles, people and other subjects at blazing speeds using the FLUX.1 Krea [dev] base model.", + "status": "active", + "tags": [ + "lora", + "personalization" + ], + "updated_at": "2026-01-26T21:43:08.430Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/rabbit/uKINGMekBEYrVNUULujts_RVU-Kvlhsr5rEwqG7Uc-s_56e80afe7c1243d5a2f5eed5868ae63d.jpg", + "model_url": "https://fal.run/fal-ai/flux-krea-trainer", + "license_type": "commercial", + "date": "2025-08-01T23:43:35.378Z", + "highlighted": true, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/flux-krea-lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-krea-trainer", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-krea-trainer queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-krea-trainer", + "category": "training", + "thumbnailUrl": "https://v3.fal.media/files/rabbit/uKINGMekBEYrVNUULujts_RVU-Kvlhsr5rEwqG7Uc-s_56e80afe7c1243d5a2f5eed5868ae63d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-krea-trainer", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-krea-trainer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxKreaTrainerInput": { + "title": "PublicInput", + "type": "object", + "properties": { + "images_data_url": { + "title": "Images Data Url", + "type": "string", + "description": "\n URL to zip archive with images. Try to use at least 4 images in general the more the better.\n\n In addition to images the archive can contain text files with captions. Each text file should have the same name as the image file it corresponds to.\n " + }, + "is_input_format_already_preprocessed": { + "title": "Is Input Format Already Preprocessed", + "type": "boolean", + "description": "Specifies whether the input data is already in a processed format. When set to False (default), the system expects raw input where image files and their corresponding caption files share the same name (e.g., 'photo.jpg' and 'photo.txt'). Set to True if your data is already in a preprocessed format.", + "default": false + }, + "trigger_word": { + "title": "Trigger Word", + "type": "string", + "description": "Trigger word to be used in the captions. If None, a trigger word will not be used.\n If no captions are provide the trigger_word will be used instead of captions. If captions are the trigger word will not be used.\n ", + "nullable": true + }, + "steps": { + "description": "Number of steps to train the LoRA on.", + "type": "integer", + "examples": [ + 1000 + ], + "maximum": 10000, + "title": "Steps", + "minimum": 1 + }, + "data_archive_format": { + "title": "Data Archive Format", + "type": "string", + "description": "The format of the archive. If not specified, the format will be inferred from the URL.", + "nullable": true + }, + "is_style": { + "title": "Is Style", + "type": "boolean", + "description": "If True, the training will be for a style. This will deactivate segmentation, captioning and will use trigger word instead. Use the trigger word to specify the style.", + "default": false + }, + "create_masks": { + "title": "Create Masks", + "type": "boolean", + "description": "If True segmentation masks will be used in the weight the training loss. For people a face mask is used if possible.", + "default": true + } + }, + "x-fal-order-properties": [ + "images_data_url", + "trigger_word", + "create_masks", + "steps", + "is_style", + "is_input_format_already_preprocessed", + "data_archive_format" + ], + "required": [ + "images_data_url" + ] + }, + "FluxKreaTrainerOutput": { + "title": "Output", + "type": "object", + "properties": { + "config_file": { + "title": "Config File", + "description": "URL to the training configuration file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "debug_preprocessed_output": { + "title": "Debug Preprocessed Output", + "description": "URL to the preprocessed images.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "diffusers_lora_file": { + "title": "Diffusers Lora File", + "description": "URL to the trained diffusers lora weights.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file", + "debug_preprocessed_output" + ], + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-krea-trainer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-krea-trainer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-krea-trainer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKreaTrainerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-krea-trainer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKreaTrainerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-kontext-trainer", + "metadata": { + "display_name": "Flux Kontext Trainer", + "category": "training", + "description": "LoRA trainer for FLUX.1 Kontext [dev]", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:23.103Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/monkey/pYXiffttc2Skv36wflufu_dec4efe0d27e4527b64acfbc0e91536a.jpg", + "model_url": "https://fal.run/fal-ai/flux-kontext-trainer", + "license_type": "commercial", + "date": "2025-06-26T13:55:28.583Z", + "highlighted": true, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/flux-kontext-lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-kontext-trainer", + "version": "1.0.0", + "description": "LoRA trainer for FLUX.1 Kontext [dev]. Train custom LoRAs to extend the image editing functionality of FLUX.1 Kontext [dev]", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-kontext-trainer", + "category": "training", + "thumbnailUrl": "https://v3.fal.media/files/monkey/pYXiffttc2Skv36wflufu_dec4efe0d27e4527b64acfbc0e91536a.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-kontext-trainer", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-kontext-trainer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxKontextTrainerInput": { + "title": "Input", + "type": "object", + "properties": { + "steps": { + "description": "Number of steps to train for", + "type": "integer", + "minimum": 2, + "maximum": 10000, + "title": "Steps", + "default": 1000 + }, + "image_data_url": { + "title": "Image Data Url", + "type": "string", + "description": "\n URL to the input data zip archive.\n\n The zip should contain pairs of images. The images should be named:\n\n ROOT_start.EXT and ROOT_end.EXT\n For example:\n photo_start.jpg and photo_end.jpg\n\n The zip can also contain a text file for each image pair. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n " + }, + "learning_rate": { + "title": "Learning Rate", + "type": "number", + "default": 0.0001 + }, + "default_caption": { + "title": "Default Caption", + "type": "string", + "description": "Default caption to use when caption files are missing. If None, missing captions will cause an error." + }, + "output_lora_format": { + "enum": [ + "fal", + "comfy" + ], + "title": "Output Lora Format", + "type": "string", + "description": "Dictates the naming scheme for the output weights", + "default": "fal" + } + }, + "x-fal-order-properties": [ + "image_data_url", + "steps", + "learning_rate", + "default_caption", + "output_lora_format" + ], + "required": [ + "image_data_url" + ] + }, + "FluxKontextTrainerOutput": { + "title": "Output", + "type": "object", + "properties": { + "config_file": { + "title": "Config File", + "description": "URL to the configuration file for the trained model.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "diffusers_lora_file": { + "title": "Diffusers Lora File", + "description": "URL to the trained diffusers lora weights.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file" + ], + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-kontext-trainer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-kontext-trainer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-kontext-trainer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKontextTrainerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-kontext-trainer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxKontextTrainerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-lora-fast-training", + "metadata": { + "display_name": "Train Flux LoRA", + "category": "training", + "description": "Train styles, people and other subjects at blazing speeds.", + "status": "active", + "tags": [ + "lora", + "personalization" + ], + "updated_at": "2026-01-26T21:44:09.199Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/video-training.webp", + "model_url": "https://fal.run/fal-ai/flux-lora-fast-training", + "license_type": "commercial", + "date": "2025-01-01T00:00:00.000Z", + "highlighted": true, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/flux-lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-lora-fast-training", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-lora-fast-training queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-lora-fast-training", + "category": "training", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/video-training.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-lora-fast-training", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-lora-fast-training/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxLoraFastTrainingInput": { + "title": "PublicInput", + "type": "object", + "properties": { + "images_data_url": { + "title": "Images Data Url", + "type": "string", + "description": "\n URL to zip archive with images. Try to use at least 4 images in general the more the better.\n\n In addition to images the archive can contain text files with captions. Each text file should have the same name as the image file it corresponds to.\n " + }, + "is_input_format_already_preprocessed": { + "title": "Is Input Format Already Preprocessed", + "type": "boolean", + "description": "Specifies whether the input data is already in a processed format. When set to False (default), the system expects raw input where image files and their corresponding caption files share the same name (e.g., 'photo.jpg' and 'photo.txt'). Set to True if your data is already in a preprocessed format.", + "default": false + }, + "trigger_word": { + "title": "Trigger Word", + "type": "string", + "description": "Trigger word to be used in the captions. If None, a trigger word will not be used.\n If no captions are provide the trigger_word will be used instead of captions. If captions are the trigger word will not be used.\n ", + "nullable": true + }, + "steps": { + "description": "Number of steps to train the LoRA on.", + "type": "integer", + "minimum": 1, + "maximum": 10000, + "examples": [ + 1000 + ], + "title": "Steps" + }, + "data_archive_format": { + "title": "Data Archive Format", + "type": "string", + "description": "The format of the archive. If not specified, the format will be inferred from the URL.", + "nullable": true + }, + "is_style": { + "title": "Is Style", + "type": "boolean", + "description": "If True, the training will be for a style. This will deactivate segmentation, captioning and will use trigger word instead. Use the trigger word to specify the style.", + "default": false + }, + "create_masks": { + "title": "Create Masks", + "type": "boolean", + "description": "If True segmentation masks will be used in the weight the training loss. For people a face mask is used if possible.", + "default": true + } + }, + "x-fal-order-properties": [ + "images_data_url", + "trigger_word", + "create_masks", + "steps", + "is_style", + "is_input_format_already_preprocessed", + "data_archive_format" + ], + "required": [ + "images_data_url" + ] + }, + "FluxLoraFastTrainingOutput": { + "title": "Output", + "type": "object", + "properties": { + "config_file": { + "title": "Config File", + "description": "URL to the training configuration file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "debug_preprocessed_output": { + "title": "Debug Preprocessed Output", + "description": "URL to the preprocessed images.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "diffusers_lora_file": { + "title": "Diffusers Lora File", + "description": "URL to the trained diffusers lora weights.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file", + "debug_preprocessed_output" + ], + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-lora-fast-training/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-lora-fast-training/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-lora-fast-training": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxLoraFastTrainingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-lora-fast-training/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxLoraFastTrainingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-lora-portrait-trainer", + "metadata": { + "display_name": "Train Flux LoRAs For Portraits", + "category": "training", + "description": "FLUX LoRA training optimized for portrait generation, with bright highlights, excellent prompt following and highly detailed results.", + "status": "active", + "tags": [ + "lora", + "personalization" + ], + "updated_at": "2026-01-26T21:44:11.904Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/rabbit/kh3cW2FXV5m3jigGYxcVU_3657df3e2e324e628af565129148736d.jpg", + "model_url": "https://fal.run/fal-ai/flux-lora-portrait-trainer", + "license_type": "commercial", + "date": "2024-11-07T00:00:00.000Z", + "highlighted": true, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/flux-lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-lora-portrait-trainer", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flux-lora-portrait-trainer queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-lora-portrait-trainer", + "category": "training", + "thumbnailUrl": "https://fal.media/files/rabbit/kh3cW2FXV5m3jigGYxcVU_3657df3e2e324e628af565129148736d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-lora-portrait-trainer", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-lora-portrait-trainer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FluxLoraPortraitTrainerInput": { + "title": "PublicInput", + "type": "object", + "properties": { + "images_data_url": { + "description": "\n URL to zip archive with images of a consistent style. Try to use at least 10 images, although more is better.\n\n In addition to images the archive can contain text files with captions. Each text file should have the same name as the image file it corresponds to.\n\n The captions can include a special string `[trigger]`. If a trigger_word is specified, it will replace `[trigger]` in the captions.\n ", + "type": "string", + "title": "Images Data Url" + }, + "trigger_phrase": { + "description": "Trigger phrase to be used in the captions. If None, a trigger word will not be used.\n If no captions are provide the trigger_work will be used instead of captions. If captions are provided, the trigger word will replace the `[trigger]` string in the captions.\n ", + "type": "string", + "title": "Trigger Phrase", + "nullable": true + }, + "resume_from_checkpoint": { + "description": "URL to a checkpoint to resume training from.", + "type": "string", + "title": "Resume From Checkpoint", + "default": "" + }, + "subject_crop": { + "examples": [ + true + ], + "description": "If True, the subject will be cropped from the image.", + "type": "boolean", + "title": "Subject Crop", + "default": true + }, + "learning_rate": { + "description": "Learning rate to use for training.", + "type": "number", + "minimum": 0.000001, + "maximum": 0.001, + "title": "Learning Rate", + "examples": [ + 0.0002 + ], + "default": 0.00009 + }, + "multiresolution_training": { + "examples": [ + true + ], + "description": "If True, multiresolution training will be used.", + "type": "boolean", + "title": "Multiresolution Training", + "default": true + }, + "steps": { + "description": "Number of steps to train the LoRA on.", + "type": "integer", + "minimum": 1, + "maximum": 10000, + "title": "Steps", + "examples": [ + 1000 + ], + "default": 2500 + }, + "data_archive_format": { + "description": "The format of the archive. If not specified, the format will be inferred from the URL.", + "type": "string", + "title": "Data Archive Format", + "nullable": true + }, + "create_masks": { + "examples": [ + false + ], + "description": "If True, masks will be created for the subject.", + "type": "boolean", + "title": "Create Masks", + "default": false + } + }, + "x-fal-order-properties": [ + "images_data_url", + "trigger_phrase", + "learning_rate", + "steps", + "multiresolution_training", + "subject_crop", + "data_archive_format", + "resume_from_checkpoint", + "create_masks" + ], + "required": [ + "images_data_url" + ] + }, + "FluxLoraPortraitTrainerOutput": { + "title": "Output", + "type": "object", + "properties": { + "config_file": { + "description": "URL to the training configuration file.", + "title": "Config File", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "diffusers_lora_file": { + "description": "URL to the trained diffusers lora weights.", + "title": "Diffusers Lora File", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file" + ], + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-lora-portrait-trainer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-lora-portrait-trainer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-lora-portrait-trainer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxLoraPortraitTrainerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-lora-portrait-trainer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FluxLoraPortraitTrainerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/z-image-base-trainer", + "metadata": { + "display_name": "Z-Image Trainer", + "category": "training", + "description": "Fast LoRA trainer for Z-Image, a super fast text-to-image model of 6B parameters developed by Tongyi-MAI.", + "status": "active", + "tags": [ + "lora", + "personalization", + "trainer" + ], + "updated_at": "2026-01-27T22:58:54.194Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8c1e96/pZXwtJOoyFLNKCmXZo865_a1f570b30c7140379cac4faa237a6619.jpg", + "model_url": "https://fal.run/fal-ai/z-image-base-trainer", + "license_type": "commercial", + "date": "2026-01-27T21:47:15.156Z", + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/z-image/base/lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/z-image-base-trainer", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/z-image-base-trainer queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/z-image-base-trainer", + "category": "training", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8c1e96/pZXwtJOoyFLNKCmXZo865_a1f570b30c7140379cac4faa237a6619.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/z-image-base-trainer", + "documentationUrl": "https://fal.ai/models/fal-ai/z-image-base-trainer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ZImageBaseTrainerInput": { + "x-fal-order-properties": [ + "image_data_url", + "steps", + "default_caption", + "learning_rate" + ], + "type": "object", + "properties": { + "steps": { + "description": "Number of steps to train for", + "type": "integer", + "minimum": 10, + "maximum": 40000, + "title": "Steps", + "default": 2000 + }, + "image_data_url": { + "description": "\n URL to the input data zip archive.\n\n The zip should contain pairs of images and corresponding captions.\n\n The images should be named: ROOT.EXT. For example: 001.jpg\n\n The corresponding captions should be named: ROOT.txt. For example: 001.txt\n\n If no text file is provided for an image, the default_caption will be used.\n ", + "type": "string", + "title": "Image Data Url" + }, + "learning_rate": { + "description": "Learning rate.", + "type": "number", + "title": "Learning Rate", + "default": 0.0005 + }, + "default_caption": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Default caption to use when caption files are missing. If None, missing captions will cause an error.", + "title": "Default Caption" + } + }, + "title": "Input", + "required": [ + "image_data_url" + ] + }, + "ZImageBaseTrainerOutput": { + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file" + ], + "type": "object", + "properties": { + "config_file": { + "description": "URL to the configuration file for the trained model.", + "$ref": "#/components/schemas/File" + }, + "diffusers_lora_file": { + "description": "URL to the trained diffusers lora weights.", + "$ref": "#/components/schemas/File" + } + }, + "title": "Output", + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/z-image-base-trainer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image-base-trainer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/z-image-base-trainer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageBaseTrainerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image-base-trainer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageBaseTrainerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/z-image-turbo-trainer-v2", + "metadata": { + "display_name": "Z Image Turbo Trainer V2", + "category": "training", + "description": "Fast LoRA trainer for Z-Image-Turbo, a super fast text-to-image model of 6B parameters developed by Tongyi-MAI.", + "status": "active", + "tags": [ + "lora", + "personalization", + "trainer" + ], + "updated_at": "2026-01-26T22:40:13.588Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b9eef/T4yyPE_lpuMlcnmqlF8Ns_ab635223ffad4b2cb49d48d926eae60f.jpg", + "model_url": "https://fal.run/fal-ai/z-image-turbo-trainer-v2", + "license_type": "commercial", + "date": "2026-01-24T03:10:13.337Z", + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/z-image/turbo/lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/z-image-turbo-trainer-v2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/z-image-turbo-trainer-v2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/z-image-turbo-trainer-v2", + "category": "training", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b9eef/T4yyPE_lpuMlcnmqlF8Ns_ab635223ffad4b2cb49d48d926eae60f.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/z-image-turbo-trainer-v2", + "documentationUrl": "https://fal.ai/models/fal-ai/z-image-turbo-trainer-v2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ZImageTurboTrainerV2Input": { + "x-fal-order-properties": [ + "image_data_url", + "steps", + "default_caption", + "learning_rate" + ], + "type": "object", + "properties": { + "steps": { + "description": "Number of steps to train for", + "type": "integer", + "minimum": 10, + "title": "Steps", + "maximum": 40000, + "default": 2000 + }, + "image_data_url": { + "title": "Image Data Url", + "type": "string", + "description": "\n URL to the input data zip archive.\n\n The zip should contain pairs of images and corresponding captions.\n\n The images should be named: ROOT.EXT. For example: 001.jpg\n\n The corresponding captions should be named: ROOT.txt. For example: 001.txt\n\n If no text file is provided for an image, the default_caption will be used.\n " + }, + "learning_rate": { + "description": "Learning rate.", + "type": "number", + "title": "Learning Rate", + "default": 0.0005 + }, + "default_caption": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Default Caption", + "description": "Default caption to use when caption files are missing. If None, missing captions will cause an error." + } + }, + "title": "Input", + "required": [ + "image_data_url" + ] + }, + "ZImageTurboTrainerV2Output": { + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file" + ], + "type": "object", + "properties": { + "config_file": { + "description": "URL to the configuration file for the trained model.", + "$ref": "#/components/schemas/File" + }, + "diffusers_lora_file": { + "description": "URL to the trained diffusers lora weights.", + "$ref": "#/components/schemas/File" + } + }, + "title": "Output", + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/z-image-turbo-trainer-v2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image-turbo-trainer-v2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/z-image-turbo-trainer-v2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageTurboTrainerV2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image-turbo-trainer-v2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageTurboTrainerV2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-klein-9b-base-trainer/edit", + "metadata": { + "display_name": "Flux 2 Klein 9B Base Trainer", + "category": "training", + "description": "Fine-tune FLUX.2 [klein] 4B from Black Forest Labs with custom datasets. Create specialized LoRA adaptations for specific editing tasks.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:32.562Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b0825/dvng2ddAgvgcH9WxFOxF7_b324e03aec15473c998151bb6fa0453c.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-klein-9b-base-trainer/edit", + "license_type": "commercial", + "date": "2026-01-17T00:12:09.074Z", + "group": { + "key": "flux-2-klein-9b-base-trainer", + "label": "Edit" + }, + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/flux-2/klein/9b/base/edit/lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-klein-9b-base-trainer/edit", + "version": "1.0.0", + "description": "Train image editing LoRAs for FLUX.2 [klein], BFL's latest image model. FLUX.2 [klein] offers enhanced realism, crisper and more accurate text generation, and native image editing capabilities.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-klein-9b-base-trainer/edit", + "category": "training", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b0825/dvng2ddAgvgcH9WxFOxF7_b324e03aec15473c998151bb6fa0453c.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-klein-9b-base-trainer/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-klein-9b-base-trainer/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2Klein9bBaseTrainerEditInput": { + "title": "InputEditV2", + "type": "object", + "properties": { + "steps": { + "description": "Total number of training steps.", + "type": "integer", + "minimum": 100, + "title": "Steps", + "maximum": 10000, + "multipleOf": 100, + "default": 1000 + }, + "image_data_url": { + "title": "Image Data Url", + "type": "string", + "description": "\n URL to the input data zip archive.\n\n The zip should contain pairs of images. The images should be named:\n\n ROOT_start.EXT and ROOT_end.EXT\n For example:\n photo_start.jpg and photo_end.jpg\n\n The zip can also contain up to four reference image for each image pair. The reference images should be named:\n ROOT_start.EXT, ROOT_start2.EXT, ROOT_start3.EXT, ROOT_start4.EXT, ROOT_end.EXT\n For example:\n photo_start.jpg, photo_start2.jpg, photo_end.jpg\n\n The zip can also contain a text file for each image pair. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n " + }, + "learning_rate": { + "title": "Learning Rate", + "type": "number", + "description": "Learning rate applied to trainable parameters.", + "default": 0.00005 + }, + "default_caption": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Default Caption", + "description": "Default caption to use when caption files are missing. If None, missing captions will cause an error." + }, + "output_lora_format": { + "enum": [ + "fal", + "comfy" + ], + "title": "Output Lora Format", + "type": "string", + "description": "Dictates the naming scheme for the output weights", + "default": "fal" + } + }, + "x-fal-order-properties": [ + "image_data_url", + "steps", + "learning_rate", + "default_caption", + "output_lora_format" + ], + "required": [ + "image_data_url" + ] + }, + "Flux2Klein9bBaseTrainerEditOutput": { + "title": "Output", + "type": "object", + "properties": { + "config_file": { + "description": "URL to the configuration file for the trained model.", + "$ref": "#/components/schemas/File" + }, + "diffusers_lora_file": { + "description": "URL to the trained diffusers lora weights.", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file" + ], + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-klein-9b-base-trainer/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-klein-9b-base-trainer/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-klein-9b-base-trainer/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein9bBaseTrainerEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-klein-9b-base-trainer/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein9bBaseTrainerEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-klein-9b-base-trainer", + "metadata": { + "display_name": "Flux 2 Klein 9B Base Trainer", + "category": "training", + "description": "Fine-tune FLUX.2 [klein] 9B from Black Forest Labs with custom datasets. Create specialized LoRA adaptations for specific editing tasks.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:32.687Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b082b/4dsf0LE8NoXuk9Pz0Ziue_d7c1c380c4d04e03b820d06500a5749f.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-klein-9b-base-trainer", + "license_type": "commercial", + "date": "2026-01-17T00:10:10.940Z", + "group": { + "key": "flux-2-klein-9b-base-trainer", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/flux-2/klein/9b/base/lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-klein-9b-base-trainer", + "version": "1.0.0", + "description": "Train text-to-image LoRAs for Flux.2 [klein], BFL's latest image model. Flux.2 [klein] offers enhanced realism, crisper and more accurate text generation, and native image editing capabilities.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-klein-9b-base-trainer", + "category": "training", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b082b/4dsf0LE8NoXuk9Pz0Ziue_d7c1c380c4d04e03b820d06500a5749f.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-klein-9b-base-trainer", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-klein-9b-base-trainer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2Klein9bBaseTrainerInput": { + "title": "InputT2IV2", + "type": "object", + "properties": { + "steps": { + "description": "Total number of training steps.", + "type": "integer", + "minimum": 100, + "title": "Steps", + "maximum": 10000, + "multipleOf": 100, + "default": 1000 + }, + "image_data_url": { + "title": "Image Data Url", + "type": "string", + "description": "\n URL to zip archive with images of a consistent style. Try to use at least 10 images, although more is better.\n\n The zip can also contain a text file for each image. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n " + }, + "learning_rate": { + "title": "Learning Rate", + "type": "number", + "description": "Learning rate applied to trainable parameters.", + "default": 0.00005 + }, + "default_caption": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Default Caption", + "description": "Default caption to use when caption files are missing. If None, missing captions will cause an error." + }, + "output_lora_format": { + "enum": [ + "fal", + "comfy" + ], + "title": "Output Lora Format", + "type": "string", + "description": "Dictates the naming scheme for the output weights", + "default": "fal" + } + }, + "x-fal-order-properties": [ + "image_data_url", + "steps", + "learning_rate", + "default_caption", + "output_lora_format" + ], + "description": "V2 input with multi-resolution bucketing.", + "required": [ + "image_data_url" + ] + }, + "Flux2Klein9bBaseTrainerOutput": { + "title": "Output", + "type": "object", + "properties": { + "config_file": { + "description": "URL to the configuration file for the trained model.", + "$ref": "#/components/schemas/File" + }, + "diffusers_lora_file": { + "description": "URL to the trained diffusers lora weights.", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file" + ], + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-klein-9b-base-trainer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-klein-9b-base-trainer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-klein-9b-base-trainer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein9bBaseTrainerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-klein-9b-base-trainer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein9bBaseTrainerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-klein-4b-base-trainer", + "metadata": { + "display_name": "Flux 2 Klein 4B Base Trainer", + "category": "training", + "description": "Fine-tune FLUX.2 [klein] 4B from Black Forest Labs with custom datasets. Create specialized LoRA adaptations for specific styles and domains.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:32.812Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b082e/N8Fy12FSedqMd-2Ehh8z1_70e50238ee6b479ebd61270840b4806e.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-klein-4b-base-trainer", + "license_type": "commercial", + "date": "2026-01-17T00:00:25.052Z", + "group": { + "key": "flux-2-klein-4b-base-trainer", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/flux-2/klein/4b/base/lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-klein-4b-base-trainer", + "version": "1.0.0", + "description": "Train text-to-image LoRAs for Flux.2 [klein], BFL's latest image model. Flux.2 [klein] offers enhanced realism, crisper and more accurate text generation, and native image editing capabilities.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-klein-4b-base-trainer", + "category": "training", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b082e/N8Fy12FSedqMd-2Ehh8z1_70e50238ee6b479ebd61270840b4806e.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-klein-4b-base-trainer", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-klein-4b-base-trainer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2Klein4bBaseTrainerInput": { + "title": "InputT2IV2", + "type": "object", + "properties": { + "steps": { + "description": "Total number of training steps.", + "type": "integer", + "minimum": 100, + "title": "Steps", + "maximum": 10000, + "multipleOf": 100, + "default": 1000 + }, + "image_data_url": { + "title": "Image Data Url", + "type": "string", + "description": "\n URL to zip archive with images of a consistent style. Try to use at least 10 images, although more is better.\n\n The zip can also contain a text file for each image. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n " + }, + "learning_rate": { + "title": "Learning Rate", + "type": "number", + "description": "Learning rate applied to trainable parameters.", + "default": 0.00005 + }, + "default_caption": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Default Caption", + "description": "Default caption to use when caption files are missing. If None, missing captions will cause an error." + }, + "output_lora_format": { + "enum": [ + "fal", + "comfy" + ], + "title": "Output Lora Format", + "type": "string", + "description": "Dictates the naming scheme for the output weights", + "default": "fal" + } + }, + "x-fal-order-properties": [ + "image_data_url", + "steps", + "learning_rate", + "default_caption", + "output_lora_format" + ], + "description": "V2 input with multi-resolution bucketing.", + "required": [ + "image_data_url" + ] + }, + "Flux2Klein4bBaseTrainerOutput": { + "title": "Output", + "type": "object", + "properties": { + "config_file": { + "description": "URL to the configuration file for the trained model.", + "$ref": "#/components/schemas/File" + }, + "diffusers_lora_file": { + "description": "URL to the trained diffusers lora weights.", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file" + ], + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-klein-4b-base-trainer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-klein-4b-base-trainer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-klein-4b-base-trainer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein4bBaseTrainerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-klein-4b-base-trainer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein4bBaseTrainerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-klein-4b-base-trainer/edit", + "metadata": { + "display_name": "Flux 2 Klein 4B Base Trainer", + "category": "training", + "description": "Fine-tune FLUX.2 [klein] 4B from Black Forest Labs with custom datasets. Create specialized LoRA adaptations for specific editing tasks.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:32.937Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b0832/jZ_NLBvIMIzZUSunU51Vv_3508e6e1b4f040078ec5ef718c3e77b9.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-klein-4b-base-trainer/edit", + "license_type": "commercial", + "date": "2026-01-16T23:53:13.121Z", + "group": { + "key": "flux-2-klein-4b-base-trainer", + "label": "Edit" + }, + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/flux-2/klein/4b/base/edit/lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-klein-4b-base-trainer/edit", + "version": "1.0.0", + "description": "Train image editing LoRAs for FLUX.2 [klein], BFL's latest image model. FLUX.2 [klein] offers enhanced realism, crisper and more accurate text generation, and native image editing capabilities.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-klein-4b-base-trainer/edit", + "category": "training", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b0832/jZ_NLBvIMIzZUSunU51Vv_3508e6e1b4f040078ec5ef718c3e77b9.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-klein-4b-base-trainer/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-klein-4b-base-trainer/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2Klein4bBaseTrainerEditInput": { + "title": "InputEditV2", + "type": "object", + "properties": { + "steps": { + "description": "Total number of training steps.", + "type": "integer", + "minimum": 100, + "title": "Steps", + "maximum": 10000, + "multipleOf": 100, + "default": 1000 + }, + "image_data_url": { + "title": "Image Data Url", + "type": "string", + "description": "\n URL to the input data zip archive.\n\n The zip should contain pairs of images. The images should be named:\n\n ROOT_start.EXT and ROOT_end.EXT\n For example:\n photo_start.jpg and photo_end.jpg\n\n The zip can also contain up to four reference image for each image pair. The reference images should be named:\n ROOT_start.EXT, ROOT_start2.EXT, ROOT_start3.EXT, ROOT_start4.EXT, ROOT_end.EXT\n For example:\n photo_start.jpg, photo_start2.jpg, photo_end.jpg\n\n The zip can also contain a text file for each image pair. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n " + }, + "learning_rate": { + "title": "Learning Rate", + "type": "number", + "description": "Learning rate applied to trainable parameters.", + "default": 0.00005 + }, + "default_caption": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Default Caption", + "description": "Default caption to use when caption files are missing. If None, missing captions will cause an error." + }, + "output_lora_format": { + "enum": [ + "fal", + "comfy" + ], + "title": "Output Lora Format", + "type": "string", + "description": "Dictates the naming scheme for the output weights", + "default": "fal" + } + }, + "x-fal-order-properties": [ + "image_data_url", + "steps", + "learning_rate", + "default_caption", + "output_lora_format" + ], + "required": [ + "image_data_url" + ] + }, + "Flux2Klein4bBaseTrainerEditOutput": { + "title": "Output", + "type": "object", + "properties": { + "config_file": { + "description": "URL to the configuration file for the trained model.", + "$ref": "#/components/schemas/File" + }, + "diffusers_lora_file": { + "description": "URL to the trained diffusers lora weights.", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file" + ], + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-klein-4b-base-trainer/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-klein-4b-base-trainer/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-klein-4b-base-trainer/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein4bBaseTrainerEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-klein-4b-base-trainer/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2Klein4bBaseTrainerEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-2512-trainer-v2", + "metadata": { + "display_name": "Qwen Image 2512 Trainer V2", + "category": "training", + "description": "Fast LoRA trainer for Qwen-Image-2512", + "status": "active", + "tags": [ + "lora", + "personalization" + ], + "updated_at": "2026-01-26T21:41:35.619Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8a6d81/ftGhoUbrfMQ1ddU8Oi-oe_0a22ea3dbb8b428393765dea02fe1e9c.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-2512-trainer-v2", + "license_type": "commercial", + "date": "2026-01-15T01:56:33.046Z", + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/qwen-image-2512/lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-2512-trainer-v2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-2512-trainer-v2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-2512-trainer-v2", + "category": "training", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8a6d81/ftGhoUbrfMQ1ddU8Oi-oe_0a22ea3dbb8b428393765dea02fe1e9c.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-2512-trainer-v2", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-2512-trainer-v2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImage2512TrainerV2Input": { + "title": "Input", + "type": "object", + "properties": { + "steps": { + "description": "Number of steps to train for", + "type": "integer", + "minimum": 10, + "maximum": 40000, + "title": "Steps", + "default": 2000 + }, + "image_data_url": { + "title": "Image Data Url", + "type": "string", + "description": "\n URL to the input data zip archive.\n\n The zip should contain pairs of images and corresponding captions.\n\n The images should be named: ROOT.EXT. For example: 001.jpg\n\n The corresponding captions should be named: ROOT.txt. For example: 001.txt\n\n If no text file is provided for an image, the default_caption will be used.\n " + }, + "learning_rate": { + "title": "Learning Rate", + "type": "number", + "description": "Learning rate.", + "default": 0.0005 + }, + "default_caption": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Default Caption", + "description": "Default caption to use when caption files are missing. If None, missing captions will cause an error." + } + }, + "x-fal-order-properties": [ + "image_data_url", + "steps", + "default_caption", + "learning_rate" + ], + "required": [ + "image_data_url" + ] + }, + "QwenImage2512TrainerV2Output": { + "title": "Output", + "type": "object", + "properties": { + "config_file": { + "description": "URL to the configuration file for the trained model.", + "$ref": "#/components/schemas/File" + }, + "diffusers_lora_file": { + "description": "URL to the trained diffusers lora weights.", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file" + ], + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-2512-trainer-v2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-2512-trainer-v2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-2512-trainer-v2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImage2512TrainerV2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-2512-trainer-v2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImage2512TrainerV2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-trainer-v2/edit", + "metadata": { + "display_name": "Flux 2 Trainer V2", + "category": "training", + "description": "Fine-tune FLUX.2 [dev] from Black Forest Labs with custom datasets. Create specialized LoRA adaptations for specific editing tasks.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:39.863Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/rabbit/QQxycBXjY75hch-HBAQKZ_4af8ba3ddb9d457ba5fc51fcd428e720.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-trainer-v2/edit", + "license_type": "commercial", + "date": "2026-01-10T01:08:01.647Z", + "group": { + "key": "flux2-trainer-v2", + "label": "Edit" + }, + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/flux-2/lora/edit" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-trainer-v2/edit", + "version": "1.0.0", + "description": "Train image editing LoRAs for FLUX.2 [dev], BFL's latest image model. FLUX.2 [dev] offers enhanced realism, crisper and more accurate text generation, and native image editing capabilities.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-trainer-v2/edit", + "category": "training", + "thumbnailUrl": "https://v3b.fal.media/files/b/rabbit/QQxycBXjY75hch-HBAQKZ_4af8ba3ddb9d457ba5fc51fcd428e720.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-trainer-v2/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-trainer-v2/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2TrainerV2EditInput": { + "x-fal-order-properties": [ + "image_data_url", + "steps", + "learning_rate", + "default_caption", + "output_lora_format" + ], + "type": "object", + "properties": { + "steps": { + "description": "Total number of training steps.", + "type": "integer", + "minimum": 100, + "title": "Steps", + "maximum": 10000, + "multipleOf": 100, + "default": 1000 + }, + "image_data_url": { + "description": "\n URL to the input data zip archive.\n\n The zip should contain pairs of images. The images should be named:\n\n ROOT_start.EXT and ROOT_end.EXT\n For example:\n photo_start.jpg and photo_end.jpg\n\n The zip can also contain up to four reference image for each image pair. The reference images should be named:\n ROOT_start.EXT, ROOT_start2.EXT, ROOT_start3.EXT, ROOT_start4.EXT, ROOT_end.EXT\n For example:\n photo_start.jpg, photo_start2.jpg, photo_end.jpg\n\n The zip can also contain a text file for each image pair. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n ", + "type": "string", + "title": "Image Data Url" + }, + "learning_rate": { + "description": "Learning rate applied to trainable parameters.", + "type": "number", + "title": "Learning Rate", + "default": 0.00005 + }, + "default_caption": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Default caption to use when caption files are missing. If None, missing captions will cause an error.", + "title": "Default Caption" + }, + "output_lora_format": { + "enum": [ + "fal", + "comfy" + ], + "title": "Output Lora Format", + "type": "string", + "description": "Dictates the naming scheme for the output weights", + "default": "fal" + } + }, + "title": "InputEditV2", + "required": [ + "image_data_url" + ] + }, + "Flux2TrainerV2EditOutput": { + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file" + ], + "type": "object", + "properties": { + "config_file": { + "description": "URL to the configuration file for the trained model.", + "$ref": "#/components/schemas/File" + }, + "diffusers_lora_file": { + "description": "URL to the trained diffusers lora weights.", + "$ref": "#/components/schemas/File" + } + }, + "title": "Output", + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-trainer-v2/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-trainer-v2/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-trainer-v2/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2TrainerV2EditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-trainer-v2/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2TrainerV2EditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-trainer-v2", + "metadata": { + "display_name": "Flux 2 Trainer V2", + "category": "training", + "description": "Fine-tune FLUX.2 [dev] from Black Forest Labs with custom datasets. Create specialized LoRA adaptations for specific styles and domains.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:39.988Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/tiger/nYv87OHdt503yjlNUk1P3_2551388f5f4e4537b67e8ed436333bca.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-trainer-v2", + "license_type": "commercial", + "date": "2026-01-10T00:59:39.928Z", + "group": { + "key": "flux2-trainer-v2", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/flux-2/lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-trainer-v2", + "version": "1.0.0", + "description": "Train text-to-image LoRAs for Flux.2, BFL's latest image model. Flux.2 offers enhanced realism, crisper and more accurate text generation, and native image editing capabilities.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-trainer-v2", + "category": "training", + "thumbnailUrl": "https://v3b.fal.media/files/b/tiger/nYv87OHdt503yjlNUk1P3_2551388f5f4e4537b67e8ed436333bca.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-trainer-v2", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-trainer-v2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2TrainerV2Input": { + "description": "V2 input with multi-resolution bucketing.", + "type": "object", + "properties": { + "steps": { + "description": "Total number of training steps.", + "type": "integer", + "minimum": 100, + "title": "Steps", + "maximum": 10000, + "multipleOf": 100, + "default": 1000 + }, + "image_data_url": { + "description": "\n URL to zip archive with images of a consistent style. Try to use at least 10 images, although more is better.\n\n The zip can also contain a text file for each image. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n ", + "type": "string", + "title": "Image Data Url" + }, + "learning_rate": { + "description": "Learning rate applied to trainable parameters.", + "type": "number", + "title": "Learning Rate", + "default": 0.00005 + }, + "default_caption": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Default caption to use when caption files are missing. If None, missing captions will cause an error.", + "title": "Default Caption" + }, + "output_lora_format": { + "enum": [ + "fal", + "comfy" + ], + "title": "Output Lora Format", + "type": "string", + "description": "Dictates the naming scheme for the output weights", + "default": "fal" + } + }, + "title": "InputT2IV2", + "x-fal-order-properties": [ + "image_data_url", + "steps", + "learning_rate", + "default_caption", + "output_lora_format" + ], + "required": [ + "image_data_url" + ] + }, + "Flux2TrainerV2Output": { + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file" + ], + "type": "object", + "properties": { + "config_file": { + "description": "URL to the configuration file for the trained model.", + "$ref": "#/components/schemas/File" + }, + "diffusers_lora_file": { + "description": "URL to the trained diffusers lora weights.", + "$ref": "#/components/schemas/File" + } + }, + "title": "Output", + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-trainer-v2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-trainer-v2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-trainer-v2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2TrainerV2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-trainer-v2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2TrainerV2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx2-v2v-trainer", + "metadata": { + "display_name": "LTX-2 Video to Video Trainer", + "category": "training", + "description": "Train LTX-2 for video transformation or video-conditioned generation.", + "status": "active", + "tags": [ + "ltx2-video", + "fine-tuning", + "video-to-video" + ], + "updated_at": "2026-01-26T21:41:40.838Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8974c8/5HxQgdbMq2dg4Mp-8Kug1_a8a11ca50370401eacad6e04a14e18e1.jpg", + "model_url": "https://fal.run/fal-ai/ltx2-v2v-trainer", + "license_type": "commercial", + "date": "2026-01-07T16:55:20.500Z", + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/ltx-2-19b/video-to-video/lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx2-v2v-trainer", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx2-v2v-trainer queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx2-v2v-trainer", + "category": "training", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8974c8/5HxQgdbMq2dg4Mp-8Kug1_a8a11ca50370401eacad6e04a14e18e1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx2-v2v-trainer", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx2-v2v-trainer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx2V2vTrainerInput": { + "title": "LTX2V2VInput", + "type": "object", + "properties": { + "number_of_steps": { + "description": "The number of training steps.", + "type": "integer", + "examples": [ + 2000 + ], + "maximum": 20000, + "title": "Number Of Steps", + "minimum": 100, + "step": 100, + "default": 2000 + }, + "frame_rate": { + "description": "Target frames per second for the video.", + "type": "integer", + "examples": [ + 25 + ], + "maximum": 60, + "title": "Frame Rate", + "minimum": 8, + "default": 25 + }, + "learning_rate": { + "description": "Learning rate for optimization. Higher values can lead to faster training but may cause overfitting.", + "type": "number", + "examples": [ + 0.0002 + ], + "maximum": 1, + "title": "Learning Rate", + "minimum": 0.000001, + "step": 0.0001, + "default": 0.0002 + }, + "validation": { + "title": "Validation", + "type": "array", + "description": "A list of validation inputs with prompts and reference videos.", + "maxItems": 2, + "items": { + "$ref": "#/components/schemas/V2VValidation" + }, + "default": [] + }, + "number_of_frames": { + "description": "Number of frames per training sample. Must satisfy frames % 8 == 1 (e.g., 1, 9, 17, 25, 33, 41, 49, 57, 65, 73, 81, 89, 97).", + "type": "integer", + "examples": [ + 89 + ], + "maximum": 121, + "title": "Number Of Frames", + "minimum": 9, + "default": 89 + }, + "training_data_url": { + "title": "Training Data Url", + "type": "string", + "description": "URL to zip archive with videos or images. Try to use at least 10 files, although more is better.\n\n **Supported video formats:** .mp4, .mov, .avi, .mkv\n **Supported image formats:** .png, .jpg, .jpeg\n\n Note: The dataset must contain ONLY videos OR ONLY images - mixed datasets are not supported.\n\n The archive can also contain text files with captions. Each text file should have the same name as the media file it corresponds to." + }, + "split_input_duration_threshold": { + "description": "The duration threshold in seconds. If a video is longer than this, it will be split into scenes.", + "type": "number", + "examples": [ + 30 + ], + "maximum": 60, + "title": "Split Input Duration Threshold", + "minimum": 1, + "default": 30 + }, + "rank": { + "examples": [ + 32 + ], + "title": "Rank", + "type": "integer", + "description": "The rank of the LoRA adaptation. Higher values increase capacity but use more memory.", + "enum": [ + 8, + 16, + 32, + 64, + 128 + ], + "default": 32 + }, + "stg_scale": { + "minimum": 0, + "maximum": 3, + "type": "number", + "description": "STG (Spatio-Temporal Guidance) scale. 0.0 disables STG. Recommended value is 1.0.", + "title": "Stg Scale", + "default": 1 + }, + "first_frame_conditioning_p": { + "minimum": 0, + "maximum": 1, + "type": "number", + "description": "Probability of conditioning on the first frame during training. Lower values work better for video-to-video transformation.", + "title": "First Frame Conditioning P", + "default": 0.1 + }, + "aspect_ratio": { + "examples": [ + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio to use for training.", + "enum": [ + "16:9", + "1:1", + "9:16" + ], + "default": "1:1" + }, + "trigger_phrase": { + "examples": [ + "" + ], + "title": "Trigger Phrase", + "type": "string", + "description": "A phrase that will trigger the LoRA style. Will be prepended to captions during training.", + "default": "" + }, + "resolution": { + "examples": [ + "medium" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution to use for training. Higher resolutions require more memory.", + "enum": [ + "low", + "medium", + "high" + ], + "default": "medium" + }, + "validation_frame_rate": { + "description": "Target frames per second for validation videos.", + "type": "integer", + "examples": [ + 25 + ], + "maximum": 60, + "title": "Validation Frame Rate", + "minimum": 8, + "default": 25 + }, + "split_input_into_scenes": { + "examples": [ + true + ], + "title": "Split Input Into Scenes", + "type": "boolean", + "description": "If true, videos above a certain duration threshold will be split into scenes.", + "default": true + }, + "validation_resolution": { + "examples": [ + "high" + ], + "title": "Validation Resolution", + "type": "string", + "description": "The resolution to use for validation.", + "enum": [ + "low", + "medium", + "high" + ], + "default": "high" + }, + "validation_number_of_frames": { + "description": "The number of frames in validation videos.", + "type": "integer", + "examples": [ + 89 + ], + "maximum": 121, + "title": "Validation Number Of Frames", + "minimum": 9, + "default": 89 + }, + "validation_aspect_ratio": { + "examples": [ + "1:1" + ], + "title": "Validation Aspect Ratio", + "type": "string", + "description": "The aspect ratio to use for validation.", + "enum": [ + "16:9", + "1:1", + "9:16" + ], + "default": "1:1" + }, + "validation_negative_prompt": { + "title": "Validation Negative Prompt", + "type": "string", + "description": "A negative prompt to use for validation.", + "default": "worst quality, inconsistent motion, blurry, jittery, distorted" + }, + "auto_scale_input": { + "examples": [ + false + ], + "title": "Auto Scale Input", + "type": "boolean", + "description": "If true, videos will be automatically scaled to the target frame count and fps. This option has no effect on image datasets.", + "default": false + } + }, + "description": "Input configuration for LTX-2 video-to-video (IC-LoRA) training.", + "x-fal-order-properties": [ + "training_data_url", + "rank", + "number_of_steps", + "learning_rate", + "number_of_frames", + "frame_rate", + "resolution", + "aspect_ratio", + "trigger_phrase", + "auto_scale_input", + "split_input_into_scenes", + "split_input_duration_threshold", + "first_frame_conditioning_p", + "validation", + "validation_negative_prompt", + "validation_number_of_frames", + "validation_frame_rate", + "validation_resolution", + "validation_aspect_ratio", + "stg_scale" + ], + "required": [ + "training_data_url" + ] + }, + "Ltx2V2vTrainerOutput": { + "title": "LTX2V2VOutput", + "type": "object", + "properties": { + "lora_file": { + "description": "URL to the trained IC-LoRA weights (.safetensors).", + "$ref": "#/components/schemas/File" + }, + "config_file": { + "description": "Configuration used for setting up inference endpoints.", + "$ref": "#/components/schemas/File" + }, + "debug_dataset": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "URL to the debug dataset archive containing decoded videos." + }, + "video": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "The URL to the validation videos (with reference videos side-by-side), if any." + } + }, + "description": "Output from LTX-2 video-to-video training.", + "x-fal-order-properties": [ + "video", + "lora_file", + "config_file", + "debug_dataset" + ], + "required": [ + "video", + "lora_file", + "config_file" + ] + }, + "V2VValidation": { + "title": "V2VValidation", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt to use for validation." + }, + "reference_video_url": { + "title": "Reference Video Url", + "minLength": 1, + "description": "URL to reference video for IC-LoRA validation. This is the input video that will be transformed.", + "type": "string" + } + }, + "description": "Validation input for video-to-video training.", + "x-fal-order-properties": [ + "prompt", + "reference_video_url" + ], + "required": [ + "prompt", + "reference_video_url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx2-v2v-trainer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx2-v2v-trainer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx2-v2v-trainer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx2V2vTrainerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx2-v2v-trainer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx2V2vTrainerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx2-video-trainer", + "metadata": { + "display_name": "LTX-2 Video Trainer", + "category": "training", + "description": "Train LTX-2 for custom styles and effects.", + "status": "active", + "tags": [ + "ltx2-video", + "fine-tuning" + ], + "updated_at": "2026-01-26T21:41:43.411Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a88dc8e/AzUBOgcNgP8TwHeO0UVHX_221203c7df6149b6ab546c6243092128.jpg", + "model_url": "https://fal.run/fal-ai/ltx2-video-trainer", + "license_type": "commercial", + "date": "2026-01-03T04:40:56.768Z", + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/ltx-2-19b/text-to-video/lora", + "fal-ai/ltx-2-19b/extend-video/lora", + "fal-ai/ltx-2-19b/image-to-video/lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx2-video-trainer", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx2-video-trainer queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx2-video-trainer", + "category": "training", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a88dc8e/AzUBOgcNgP8TwHeO0UVHX_221203c7df6149b6ab546c6243092128.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx2-video-trainer", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx2-video-trainer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx2VideoTrainerInput": { + "description": "Input configuration for LTX-2 text-to-video training.", + "type": "object", + "properties": { + "number_of_steps": { + "description": "The number of training steps.", + "type": "integer", + "minimum": 100, + "maximum": 20000, + "title": "Number Of Steps", + "examples": [ + 2000 + ], + "step": 100, + "default": 2000 + }, + "audio_preserve_pitch": { + "description": "When audio duration doesn't match video duration, stretch/compress audio without changing pitch. If disabled, audio is trimmed or padded with silence.", + "type": "boolean", + "title": "Audio Preserve Pitch", + "default": true + }, + "frame_rate": { + "description": "Target frames per second for the video.", + "type": "integer", + "examples": [ + 25 + ], + "maximum": 60, + "minimum": 8, + "title": "Frame Rate", + "default": 25 + }, + "audio_normalize": { + "description": "Normalize audio peak amplitude to a consistent level. Recommended for consistent audio levels across the dataset.", + "type": "boolean", + "title": "Audio Normalize", + "default": true + }, + "validation": { + "description": "A list of validation prompts to use during training. When providing an image, _all_ validation inputs must have an image.", + "type": "array", + "maxItems": 2, + "title": "Validation", + "items": { + "$ref": "#/components/schemas/Validation" + }, + "default": [] + }, + "learning_rate": { + "description": "Learning rate for optimization. Higher values can lead to faster training but may cause overfitting.", + "type": "number", + "minimum": 0.000001, + "maximum": 1, + "title": "Learning Rate", + "examples": [ + 0.0002 + ], + "step": 0.0001, + "default": 0.0002 + }, + "number_of_frames": { + "description": "Number of frames per training sample. Must satisfy frames % 8 == 1 (e.g., 1, 9, 17, 25, 33, 41, 49, 57, 65, 73, 81, 89, 97).", + "type": "integer", + "examples": [ + 89 + ], + "maximum": 121, + "minimum": 9, + "title": "Number Of Frames", + "default": 89 + }, + "training_data_url": { + "description": "URL to zip archive with videos or images. Try to use at least 10 files, although more is better.\n\n **Supported video formats:** .mp4, .mov, .avi, .mkv\n **Supported image formats:** .png, .jpg, .jpeg\n\n Note: The dataset must contain ONLY videos OR ONLY images - mixed datasets are not supported.\n\n The archive can also contain text files with captions. Each text file should have the same name as the media file it corresponds to.", + "type": "string", + "title": "Training Data Url" + }, + "split_input_duration_threshold": { + "description": "The duration threshold in seconds. If a video is longer than this, it will be split into scenes.", + "type": "number", + "examples": [ + 30 + ], + "maximum": 60, + "minimum": 1, + "title": "Split Input Duration Threshold", + "default": 30 + }, + "rank": { + "examples": [ + 32 + ], + "description": "The rank of the LoRA adaptation. Higher values increase capacity but use more memory.", + "type": "integer", + "enum": [ + 8, + 16, + 32, + 64, + 128 + ], + "title": "Rank", + "default": 32 + }, + "first_frame_conditioning_p": { + "minimum": 0, + "description": "Probability of conditioning on the first frame during training. Higher values improve image-to-video performance.", + "type": "number", + "maximum": 1, + "title": "First Frame Conditioning P", + "default": 0.5 + }, + "stg_scale": { + "minimum": 0, + "description": "STG (Spatio-Temporal Guidance) scale. 0.0 disables STG. Recommended value is 1.0.", + "type": "number", + "maximum": 3, + "title": "Stg Scale", + "default": 1 + }, + "aspect_ratio": { + "examples": [ + "1:1" + ], + "description": "Aspect ratio to use for training.", + "type": "string", + "enum": [ + "16:9", + "1:1", + "9:16" + ], + "title": "Aspect Ratio", + "default": "1:1" + }, + "with_audio": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "description": "Enable joint audio-video training. If None (default), automatically detects whether input videos have audio. Set to True to force audio training, or False to disable.", + "title": "With Audio" + }, + "trigger_phrase": { + "examples": [ + "" + ], + "description": "A phrase that will trigger the LoRA style. Will be prepended to captions during training.", + "type": "string", + "title": "Trigger Phrase", + "default": "" + }, + "validation_frame_rate": { + "description": "Target frames per second for validation videos.", + "type": "integer", + "examples": [ + 25 + ], + "maximum": 60, + "minimum": 8, + "title": "Validation Frame Rate", + "default": 25 + }, + "resolution": { + "examples": [ + "medium" + ], + "description": "Resolution to use for training. Higher resolutions require more memory.", + "type": "string", + "enum": [ + "low", + "medium", + "high" + ], + "title": "Resolution", + "default": "medium" + }, + "split_input_into_scenes": { + "examples": [ + true + ], + "description": "If true, videos above a certain duration threshold will be split into scenes.", + "type": "boolean", + "title": "Split Input Into Scenes", + "default": true + }, + "generate_audio_in_validation": { + "description": "Whether to generate audio in validation samples.", + "type": "boolean", + "title": "Generate Audio In Validation", + "default": true + }, + "validation_resolution": { + "examples": [ + "high" + ], + "description": "The resolution to use for validation.", + "type": "string", + "enum": [ + "low", + "medium", + "high" + ], + "title": "Validation Resolution", + "default": "high" + }, + "validation_number_of_frames": { + "description": "The number of frames in validation videos.", + "type": "integer", + "examples": [ + 89 + ], + "maximum": 121, + "minimum": 9, + "title": "Validation Number Of Frames", + "default": 89 + }, + "validation_aspect_ratio": { + "examples": [ + "1:1" + ], + "description": "The aspect ratio to use for validation.", + "type": "string", + "enum": [ + "16:9", + "1:1", + "9:16" + ], + "title": "Validation Aspect Ratio", + "default": "1:1" + }, + "validation_negative_prompt": { + "description": "A negative prompt to use for validation.", + "type": "string", + "title": "Validation Negative Prompt", + "default": "worst quality, inconsistent motion, blurry, jittery, distorted" + }, + "auto_scale_input": { + "examples": [ + false + ], + "description": "If true, videos will be automatically scaled to the target frame count and fps. This option has no effect on image datasets.", + "type": "boolean", + "title": "Auto Scale Input", + "default": false + } + }, + "x-fal-order-properties": [ + "training_data_url", + "rank", + "number_of_steps", + "learning_rate", + "number_of_frames", + "frame_rate", + "resolution", + "aspect_ratio", + "trigger_phrase", + "auto_scale_input", + "split_input_into_scenes", + "split_input_duration_threshold", + "with_audio", + "audio_normalize", + "audio_preserve_pitch", + "first_frame_conditioning_p", + "validation", + "validation_negative_prompt", + "validation_number_of_frames", + "validation_frame_rate", + "validation_resolution", + "validation_aspect_ratio", + "stg_scale", + "generate_audio_in_validation" + ], + "title": "LTX2Input", + "required": [ + "training_data_url" + ] + }, + "Ltx2VideoTrainerOutput": { + "description": "Output from LTX-2 training.", + "type": "object", + "properties": { + "lora_file": { + "description": "URL to the trained LoRA weights (.safetensors).", + "$ref": "#/components/schemas/File" + }, + "config_file": { + "description": "Configuration used for setting up inference endpoints.", + "$ref": "#/components/schemas/File" + }, + "debug_dataset": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "URL to the debug dataset archive containing decoded videos and audio." + }, + "video": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "The URL to the validation videos, if any." + } + }, + "x-fal-order-properties": [ + "video", + "lora_file", + "config_file", + "debug_dataset" + ], + "title": "LTX2Output", + "required": [ + "video", + "lora_file", + "config_file" + ] + }, + "Validation": { + "x-fal-order-properties": [ + "prompt", + "image_url" + ], + "type": "object", + "properties": { + "prompt": { + "description": "The prompt to use for validation.", + "type": "string", + "title": "Prompt" + }, + "image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "An image to use for image-to-video validation. If provided for one validation, _all_ validation inputs must have an image.", + "title": "Image Url" + } + }, + "title": "Validation", + "required": [ + "prompt" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx2-video-trainer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx2-video-trainer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx2-video-trainer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx2VideoTrainerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx2-video-trainer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx2VideoTrainerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-2512-trainer", + "metadata": { + "display_name": "Qwen Image 2512 Trainer", + "category": "training", + "description": "Qwen Image 2512 LoRA training", + "status": "active", + "tags": [ + "lora", + "personalization" + ], + "updated_at": "2026-01-26T21:41:43.663Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a88971d/RI2dnHIfi0FJ7BCSy83hh_089d63dbaf4b491ca15894cad8ee4741.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-2512-trainer", + "license_type": "commercial", + "date": "2026-01-01T03:13:13.085Z", + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/qwen-image-2512/lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-2512-trainer", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-2512-trainer queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-2512-trainer", + "category": "training", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a88971d/RI2dnHIfi0FJ7BCSy83hh_089d63dbaf4b491ca15894cad8ee4741.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-2512-trainer", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-2512-trainer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImage2512TrainerInput": { + "x-fal-order-properties": [ + "image_data_url", + "learning_rate", + "steps", + "default_caption" + ], + "type": "object", + "properties": { + "steps": { + "description": "Number of steps to train for", + "type": "integer", + "minimum": 100, + "maximum": 30000, + "title": "Steps", + "multipleOf": 100, + "default": 1000 + }, + "image_data_url": { + "description": "\n URL to the input data zip archive for text-to-image training.\n\n The zip should contain images with their corresponding text captions:\n\n image.EXT and image.txt\n For example:\n photo.jpg and photo.txt\n\n The text file contains the caption/prompt describing the target image.\n\n If no text file is provided for an image, the default_caption will be used.\n\n If no default_caption is provided and a text file is missing, the training will fail.\n ", + "type": "string", + "title": "Image Data Url" + }, + "learning_rate": { + "description": "Learning rate for LoRA parameters.", + "type": "number", + "title": "Learning Rate", + "default": 0.0005 + }, + "default_caption": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Default caption to use when caption files are missing. If None, missing captions will cause an error.", + "title": "Default Caption" + } + }, + "title": "InputImage", + "required": [ + "image_data_url" + ] + }, + "QwenImage2512TrainerOutput": { + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file" + ], + "type": "object", + "properties": { + "config_file": { + "description": "URL to the configuration file for the trained model.", + "$ref": "#/components/schemas/File" + }, + "diffusers_lora_file": { + "description": "URL to the trained diffusers lora weights.", + "$ref": "#/components/schemas/File" + } + }, + "title": "Output", + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-2512-trainer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-2512-trainer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-2512-trainer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImage2512TrainerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-2512-trainer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImage2512TrainerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-2511-trainer", + "metadata": { + "display_name": "Qwen Image Edit 2511 Trainer", + "category": "training", + "description": "LoRA trainer for Qwen Image Edit 2511", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:47.856Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a877998/iFlGCuBjmBr937TUS4j0s_5ef9846274ec45628f2c1d61c23597c9.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-2511-trainer", + "license_type": "commercial", + "date": "2025-12-23T16:15:33.735Z", + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/qwen-image-edit-2511/lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-2511-trainer", + "version": "1.0.0", + "description": "LoRA trainer for Qwen Image Edit 2511. Train custom LoRAs to extend the image editing functionality of Qwen Image Edit 2511", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-2511-trainer", + "category": "training", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a877998/iFlGCuBjmBr937TUS4j0s_5ef9846274ec45628f2c1d61c23597c9.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2511-trainer", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2511-trainer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEdit2511TrainerInput": { + "x-fal-order-properties": [ + "image_data_url", + "learning_rate", + "steps", + "default_caption" + ], + "type": "object", + "properties": { + "steps": { + "description": "Number of steps to train for", + "type": "integer", + "minimum": 100, + "maximum": 30000, + "title": "Steps", + "multipleOf": 100, + "default": 1000 + }, + "image_data_url": { + "description": "\n URL to the input data zip archive.\n\n The zip should contain pairs of images. The images should be named:\n\n ROOT_start.EXT and ROOT_end.EXT\n For example:\n photo_start.jpg and photo_end.jpg\n\n The zip can also contain more than one reference image for each image pair. The reference images should be named:\n ROOT_start.EXT, ROOT_start2.EXT, ROOT_start3.EXT, ..., ROOT_end.EXT\n For example:\n photo_start.jpg, photo_start2.jpg, photo_end.jpg\n\n The Reference Image Count field should be set to the number of reference images.\n\n The zip can also contain a text file for each image pair. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n ", + "type": "string", + "title": "Image Data Url" + }, + "learning_rate": { + "description": "Learning rate for LoRA parameters.", + "type": "number", + "title": "Learning Rate", + "default": 0.0001 + }, + "default_caption": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Default caption to use when caption files are missing. If None, missing captions will cause an error.", + "title": "Default Caption" + } + }, + "title": "Input2511", + "required": [ + "image_data_url" + ] + }, + "QwenImageEdit2511TrainerOutput": { + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file" + ], + "type": "object", + "properties": { + "config_file": { + "description": "URL to the configuration file for the trained model.", + "$ref": "#/components/schemas/File" + }, + "diffusers_lora_file": { + "description": "URL to the trained diffusers lora weights.", + "$ref": "#/components/schemas/File" + } + }, + "title": "Output", + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ], + "title": "File Size" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name" + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "examples": [ + "image/png" + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-2511-trainer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2511-trainer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2511-trainer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2511TrainerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2511-trainer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2511TrainerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-layered-trainer", + "metadata": { + "display_name": "Qwen Image Layered Trainer", + "category": "training", + "description": "Train LoRAs for the Qwen-Image-Layered model, customize how images are split into layers.", + "status": "active", + "tags": [ + "qwen", + "layer", + "trainer" + ], + "updated_at": "2026-01-26T21:41:48.664Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8768c3/UAxjM9u4oT0-qP6IRgdUG_75ddfb38008f4271abb4a22013275ac4.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-layered-trainer", + "license_type": "commercial", + "date": "2025-12-23T04:17:31.192Z", + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/qwen-image-layered/lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-layered-trainer", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-layered-trainer queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-layered-trainer", + "category": "training", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8768c3/UAxjM9u4oT0-qP6IRgdUG_75ddfb38008f4271abb4a22013275ac4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-layered-trainer", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-layered-trainer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageLayeredTrainerInput": { + "x-fal-order-properties": [ + "image_data_url", + "learning_rate", + "steps", + "default_caption" + ], + "type": "object", + "properties": { + "steps": { + "description": "Number of steps to train for", + "type": "integer", + "minimum": 100, + "maximum": 10000, + "title": "Steps", + "multipleOf": 100, + "default": 1000 + }, + "image_data_url": { + "title": "Image Data Url", + "type": "string", + "description": "\n URL to the input data zip archive.\n\n The zip should contain groups of images. The images should be named:\n\n ROOT_start.EXT, ROOT_end.EXT, ROOT_end2.EXT, ..., ROOT_endN.EXT\n For example:\n photo_start.png, photo_end.png, photo_end2.png, ..., photo_endN.png\n\n The start image is the base image that will be decomposed into layers.\n The end images are the layers that will be added to the base image. ROOT_end.EXT is the first layer, ROOT_end2.EXT is the second layer, and so on.\n You can have up to 8 layers.\n All image groups must have the same number of output layers.\n\n The end images can contain transparent regions. Only PNG and WebP images are supported since these are the only formats that support transparency.\n\n The zip can also contain a text file for each image group. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify a description of the base image.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n " + }, + "learning_rate": { + "title": "Learning Rate", + "type": "number", + "description": "Learning rate for LoRA parameters.", + "default": 0.0001 + }, + "default_caption": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Default caption to use when caption files are missing. If None, missing captions will cause an error.", + "title": "Default Caption" + } + }, + "title": "Input", + "required": [ + "image_data_url" + ] + }, + "QwenImageLayeredTrainerOutput": { + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file" + ], + "type": "object", + "properties": { + "config_file": { + "description": "URL to the configuration file for the trained model.", + "$ref": "#/components/schemas/File" + }, + "diffusers_lora_file": { + "description": "URL to the trained diffusers lora weights.", + "$ref": "#/components/schemas/File" + } + }, + "title": "Output", + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-layered-trainer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-layered-trainer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-layered-trainer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageLayeredTrainerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-layered-trainer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageLayeredTrainerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-2509-trainer", + "metadata": { + "display_name": "Qwen Image Edit 2509 Trainer", + "category": "training", + "description": "LoRA trainer for Qwen Image Edit 2509", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:57.285Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/JUf-sx4jdYBPYceTS7zZL_f65366a94795483fad050e560a86513a.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-2509-trainer", + "license_type": "commercial", + "date": "2025-12-15T21:18:42.940Z", + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/qwen-image-edit-2509-lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-2509-trainer", + "version": "1.0.0", + "description": "LoRA trainer for Qwen Image Edit 2509. Train custom LoRAs to extend the image editing functionality of Qwen Image Edit 2509", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-2509-trainer", + "category": "training", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/JUf-sx4jdYBPYceTS7zZL_f65366a94795483fad050e560a86513a.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-trainer", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-2509-trainer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEdit2509TrainerInput": { + "title": "InputPlus", + "type": "object", + "properties": { + "steps": { + "description": "Number of steps to train for", + "type": "integer", + "minimum": 100, + "title": "Steps", + "maximum": 30000, + "multipleOf": 100, + "default": 1000 + }, + "image_data_url": { + "title": "Image Data Url", + "type": "string", + "description": "\n URL to the input data zip archive.\n\n The zip should contain pairs of images. The images should be named:\n\n ROOT_start.EXT and ROOT_end.EXT\n For example:\n photo_start.jpg and photo_end.jpg\n\n The zip can also contain more than one reference image for each image pair. The reference images should be named:\n ROOT_start.EXT, ROOT_start2.EXT, ROOT_start3.EXT, ..., ROOT_end.EXT\n For example:\n photo_start.jpg, photo_start2.jpg, photo_end.jpg\n\n The Reference Image Count field should be set to the number of reference images.\n\n The zip can also contain a text file for each image pair. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n " + }, + "learning_rate": { + "title": "Learning Rate", + "type": "number", + "description": "Learning rate for LoRA parameters.", + "default": 0.0001 + }, + "default_caption": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Default caption to use when caption files are missing. If None, missing captions will cause an error.", + "title": "Default Caption" + } + }, + "x-fal-order-properties": [ + "image_data_url", + "learning_rate", + "steps", + "default_caption" + ], + "required": [ + "image_data_url" + ] + }, + "QwenImageEdit2509TrainerOutput": { + "title": "Output", + "type": "object", + "properties": { + "config_file": { + "description": "URL to the configuration file for the trained model.", + "$ref": "#/components/schemas/File" + }, + "diffusers_lora_file": { + "description": "URL to the trained diffusers lora weights.", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file" + ], + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-2509-trainer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-trainer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-trainer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509TrainerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-2509-trainer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEdit2509TrainerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/z-image-trainer", + "metadata": { + "display_name": "Z Image Trainer", + "category": "training", + "description": "Train LoRAs on Z-Image Turbo, a super fast text-to-image model of 6B parameters developed by Tongyi-MAI.", + "status": "active", + "tags": [ + "turbo", + "z-image", + "fast", + "trainer" + ], + "updated_at": "2026-01-26T21:42:06.044Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a84daae/ubfu5r0bOZVRBN1_V5AJY_fb0dc5421f4e413ba4a8c94c2e043225.jpg", + "model_url": "https://fal.run/fal-ai/z-image-trainer", + "license_type": "commercial", + "date": "2025-12-03T19:09:57.722Z", + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/z-image/turbo/lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/z-image-trainer", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/z-image-trainer queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/z-image-trainer", + "category": "training", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a84daae/ubfu5r0bOZVRBN1_V5AJY_fb0dc5421f4e413ba4a8c94c2e043225.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/z-image-trainer", + "documentationUrl": "https://fal.ai/models/fal-ai/z-image-trainer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ZImageTrainerInput": { + "title": "Input", + "type": "object", + "properties": { + "steps": { + "description": "Total number of training steps.", + "type": "integer", + "minimum": 100, + "title": "Steps", + "maximum": 10000, + "multipleOf": 100, + "default": 1000 + }, + "image_data_url": { + "title": "Image Data Url", + "type": "string", + "description": "\n URL to zip archive with images of a consistent style. Try to use at least 10 images, although more is better.\n\n The zip can also contain a text file for each image. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n " + }, + "training_type": { + "enum": [ + "content", + "style", + "balanced" + ], + "title": "Training Type", + "type": "string", + "description": "Type of training to perform. Use 'content' to focus on the content of the images, 'style' to focus on the style of the images, and 'balanced' to focus on a combination of both.", + "default": "balanced" + }, + "learning_rate": { + "title": "Learning Rate", + "type": "number", + "description": "Learning rate applied to trainable parameters.", + "default": 0.0001 + }, + "default_caption": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Default Caption", + "description": "Default caption to use when caption files are missing. If None, missing captions will cause an error." + } + }, + "x-fal-order-properties": [ + "image_data_url", + "steps", + "learning_rate", + "default_caption", + "training_type" + ], + "required": [ + "image_data_url" + ] + }, + "ZImageTrainerOutput": { + "title": "Output", + "type": "object", + "properties": { + "config_file": { + "description": "URL to the configuration file for the trained model.", + "$ref": "#/components/schemas/File" + }, + "diffusers_lora_file": { + "description": "URL to the trained diffusers lora weights.", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file" + ], + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/z-image-trainer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image-trainer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/z-image-trainer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageTrainerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/z-image-trainer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ZImageTrainerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-trainer/edit", + "metadata": { + "display_name": "Flux 2 Trainer", + "category": "training", + "description": "Fine-tune FLUX.2 [dev] from Black Forest Labs with custom datasets. Create specialized LoRA adaptations for specific editing tasks.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:13.657Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/rabbit/QQxycBXjY75hch-HBAQKZ_4af8ba3ddb9d457ba5fc51fcd428e720.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-trainer/edit", + "license_type": "commercial", + "date": "2025-11-25T04:36:01.280Z", + "group": { + "key": "flux2-trainer", + "label": "Edit" + }, + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/flux-2/lora/edit" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-trainer/edit", + "version": "1.0.0", + "description": "Train image editing LoRAs for FLUX.2 [dev], BFL's latest image model. FLUX.2 [dev] offers enhanced realism, crisper and more accurate text generation, and native image editing capabilities.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-trainer/edit", + "category": "training", + "thumbnailUrl": "https://v3b.fal.media/files/b/rabbit/QQxycBXjY75hch-HBAQKZ_4af8ba3ddb9d457ba5fc51fcd428e720.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-trainer/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-trainer/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2TrainerEditInput": { + "title": "InputEdit", + "type": "object", + "properties": { + "steps": { + "description": "Total number of training steps.", + "type": "integer", + "minimum": 100, + "title": "Steps", + "maximum": 10000, + "multipleOf": 100, + "default": 1000 + }, + "image_data_url": { + "title": "Image Data Url", + "type": "string", + "description": "\n URL to the input data zip archive.\n\n The zip should contain pairs of images. The images should be named:\n\n ROOT_start.EXT and ROOT_end.EXT\n For example:\n photo_start.jpg and photo_end.jpg\n\n The zip can also contain up to four reference image for each image pair. The reference images should be named:\n ROOT_start.EXT, ROOT_start2.EXT, ROOT_start3.EXT, ROOT_start4.EXT, ROOT_end.EXT\n For example:\n photo_start.jpg, photo_start2.jpg, photo_end.jpg\n\n The zip can also contain a text file for each image pair. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n " + }, + "learning_rate": { + "title": "Learning Rate", + "type": "number", + "description": "Learning rate applied to trainable parameters.", + "default": 0.00005 + }, + "default_caption": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Default Caption", + "description": "Default caption to use when caption files are missing. If None, missing captions will cause an error." + }, + "output_lora_format": { + "enum": [ + "fal", + "comfy" + ], + "title": "Output Lora Format", + "type": "string", + "description": "Dictates the naming scheme for the output weights", + "default": "fal" + } + }, + "x-fal-order-properties": [ + "image_data_url", + "steps", + "learning_rate", + "default_caption", + "output_lora_format" + ], + "required": [ + "image_data_url" + ] + }, + "Flux2TrainerEditOutput": { + "title": "Output", + "type": "object", + "properties": { + "config_file": { + "description": "URL to the configuration file for the trained model.", + "$ref": "#/components/schemas/File" + }, + "diffusers_lora_file": { + "description": "URL to the trained diffusers lora weights.", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file" + ], + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-trainer/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-trainer/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-trainer/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2TrainerEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-trainer/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2TrainerEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flux-2-trainer", + "metadata": { + "display_name": "Flux 2 Trainer", + "category": "training", + "description": "Fine-tune FLUX.2 [dev] from Black Forest Labs with custom datasets. Create specialized LoRA adaptations for specific styles and domains.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:13.787Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/tiger/nYv87OHdt503yjlNUk1P3_2551388f5f4e4537b67e8ed436333bca.jpg", + "model_url": "https://fal.run/fal-ai/flux-2-trainer", + "license_type": "commercial", + "date": "2025-11-25T04:33:24.920Z", + "group": { + "key": "flux2-trainer", + "label": "Text to Image" + }, + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/flux-2/lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flux-2-trainer", + "version": "1.0.0", + "description": "Train text-to-image LoRAs for Flux.2, BFL's latest image model. Flux.2 offers enhanced realism, crisper and more accurate text generation, and native image editing capabilities.", + "x-fal-metadata": { + "endpointId": "fal-ai/flux-2-trainer", + "category": "training", + "thumbnailUrl": "https://v3b.fal.media/files/b/tiger/nYv87OHdt503yjlNUk1P3_2551388f5f4e4537b67e8ed436333bca.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flux-2-trainer", + "documentationUrl": "https://fal.ai/models/fal-ai/flux-2-trainer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Flux2TrainerInput": { + "title": "InputT2I", + "type": "object", + "properties": { + "steps": { + "description": "Total number of training steps.", + "type": "integer", + "minimum": 100, + "title": "Steps", + "maximum": 10000, + "multipleOf": 100, + "default": 1000 + }, + "image_data_url": { + "title": "Image Data Url", + "type": "string", + "description": "\n URL to zip archive with images of a consistent style. Try to use at least 10 images, although more is better.\n\n The zip can also contain a text file for each image. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n " + }, + "learning_rate": { + "title": "Learning Rate", + "type": "number", + "description": "Learning rate applied to trainable parameters.", + "default": 0.00005 + }, + "default_caption": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Default Caption", + "description": "Default caption to use when caption files are missing. If None, missing captions will cause an error." + }, + "output_lora_format": { + "enum": [ + "fal", + "comfy" + ], + "title": "Output Lora Format", + "type": "string", + "description": "Dictates the naming scheme for the output weights", + "default": "fal" + } + }, + "x-fal-order-properties": [ + "image_data_url", + "steps", + "learning_rate", + "default_caption", + "output_lora_format" + ], + "required": [ + "image_data_url" + ] + }, + "Flux2TrainerOutput": { + "title": "Output", + "type": "object", + "properties": { + "config_file": { + "description": "URL to the configuration file for the trained model.", + "$ref": "#/components/schemas/File" + }, + "diffusers_lora_file": { + "description": "URL to the trained diffusers lora weights.", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file" + ], + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flux-2-trainer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-trainer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flux-2-trainer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2TrainerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flux-2-trainer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flux2TrainerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-plus-trainer", + "metadata": { + "display_name": "Qwen Image Edit Plus Trainer", + "category": "training", + "description": "LoRA trainer for Qwen Image Edit Plus", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:28.817Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/JUf-sx4jdYBPYceTS7zZL_f65366a94795483fad050e560a86513a.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-plus-trainer", + "license_type": "commercial", + "date": "2025-10-30T18:26:29.952Z", + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/qwen-image-edit-plus-lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-plus-trainer", + "version": "1.0.0", + "description": "LoRA trainer for Qwen Image Edit Plus. Train custom LoRAs to extend the image editing functionality of Qwen Image Edit Plus", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-plus-trainer", + "category": "training", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/JUf-sx4jdYBPYceTS7zZL_f65366a94795483fad050e560a86513a.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-trainer", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-plus-trainer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEditPlusTrainerInput": { + "title": "InputPlus", + "type": "object", + "properties": { + "steps": { + "description": "Number of steps to train for", + "type": "integer", + "minimum": 100, + "title": "Steps", + "maximum": 30000, + "multipleOf": 100, + "default": 1000 + }, + "image_data_url": { + "title": "Image Data Url", + "type": "string", + "description": "\n URL to the input data zip archive.\n\n The zip should contain pairs of images. The images should be named:\n\n ROOT_start.EXT and ROOT_end.EXT\n For example:\n photo_start.jpg and photo_end.jpg\n\n The zip can also contain more than one reference image for each image pair. The reference images should be named:\n ROOT_start.EXT, ROOT_start2.EXT, ROOT_start3.EXT, ..., ROOT_end.EXT\n For example:\n photo_start.jpg, photo_start2.jpg, photo_end.jpg\n\n The Reference Image Count field should be set to the number of reference images.\n\n The zip can also contain a text file for each image pair. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n " + }, + "learning_rate": { + "title": "Learning Rate", + "type": "number", + "description": "Learning rate for LoRA parameters.", + "default": 0.0001 + }, + "default_caption": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Default caption to use when caption files are missing. If None, missing captions will cause an error.", + "title": "Default Caption" + } + }, + "x-fal-order-properties": [ + "image_data_url", + "learning_rate", + "steps", + "default_caption" + ], + "required": [ + "image_data_url" + ] + }, + "QwenImageEditPlusTrainerOutput": { + "title": "Output", + "type": "object", + "properties": { + "config_file": { + "description": "URL to the configuration file for the trained model.", + "$ref": "#/components/schemas/File" + }, + "diffusers_lora_file": { + "description": "URL to the trained diffusers lora weights.", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file" + ], + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-plus-trainer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-trainer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-trainer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusTrainerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-plus-trainer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditPlusTrainerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-edit-trainer", + "metadata": { + "display_name": "Qwen Image Edit Trainer", + "category": "training", + "description": "LoRA trainer for Qwen Image Edit", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:28.946Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/rabbit/pB3FhNP-IEVkffftFpArb_54830ef85194496cbf11cf7f061cd2ba.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-edit-trainer", + "license_type": "commercial", + "date": "2025-10-30T18:17:15.299Z", + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/qwen-image-edit-lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-edit-trainer", + "version": "1.0.0", + "description": "LoRA trainer for Qwen Image Edit. Train custom LoRAs to extend the image editing functionality of Qwen Image Edit", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-edit-trainer", + "category": "training", + "thumbnailUrl": "https://v3b.fal.media/files/b/rabbit/pB3FhNP-IEVkffftFpArb_54830ef85194496cbf11cf7f061cd2ba.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-trainer", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-edit-trainer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageEditTrainerInput": { + "x-fal-order-properties": [ + "image_data_url", + "learning_rate", + "steps", + "default_caption" + ], + "type": "object", + "properties": { + "steps": { + "description": "Number of steps to train for", + "type": "integer", + "minimum": 100, + "maximum": 30000, + "title": "Steps", + "multipleOf": 100, + "default": 1000 + }, + "image_data_url": { + "title": "Image Data Url", + "type": "string", + "description": "\n URL to the input data zip archive.\n\n The zip should contain pairs of images. The images should be named:\n\n ROOT_start.EXT and ROOT_end.EXT\n For example:\n photo_start.jpg and photo_end.jpg\n\n The zip can also contain a text file for each image pair. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n " + }, + "learning_rate": { + "title": "Learning Rate", + "type": "number", + "description": "Learning rate for LoRA parameters.", + "default": 0.0001 + }, + "default_caption": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Default Caption", + "description": "Default caption to use when caption files are missing. If None, missing captions will cause an error." + } + }, + "title": "InputEdit", + "required": [ + "image_data_url" + ] + }, + "QwenImageEditTrainerOutput": { + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file" + ], + "type": "object", + "properties": { + "config_file": { + "description": "URL to the configuration file for the trained model.", + "$ref": "#/components/schemas/File" + }, + "diffusers_lora_file": { + "description": "URL to the trained diffusers lora weights.", + "$ref": "#/components/schemas/File" + } + }, + "title": "Output", + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-edit-trainer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-trainer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-trainer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditTrainerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-edit-trainer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageEditTrainerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-image-trainer", + "metadata": { + "display_name": "Qwen Image Trainer", + "category": "training", + "description": "Qwen Image LoRA training", + "status": "active", + "tags": [ + "lora", + "personalization" + ], + "updated_at": "2026-01-26T21:43:04.890Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/kangaroo/Ezdr45OqUN2jaBP8tBuqL_d7bb3b0fadd54124b9884975b7e2f626.jpg", + "model_url": "https://fal.run/fal-ai/qwen-image-trainer", + "license_type": "commercial", + "date": "2025-08-14T01:23:07.232Z", + "highlighted": false, + "kind": "training", + "duration_estimate": 35, + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/qwen-image" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-image-trainer", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-image-trainer queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-image-trainer", + "category": "training", + "thumbnailUrl": "https://fal.media/files/kangaroo/Ezdr45OqUN2jaBP8tBuqL_d7bb3b0fadd54124b9884975b7e2f626.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-image-trainer", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-image-trainer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "QwenImageTrainerInput": { + "title": "PublicInput", + "type": "object", + "properties": { + "steps": { + "description": "Total number of training steps to perform. Default is 4000.", + "type": "integer", + "minimum": 1, + "maximum": 8000, + "title": "Steps", + "default": 1000 + }, + "image_data_url": { + "title": "Image Data Url", + "type": "string", + "description": "\n URL to zip archive with images for training. The archive should contain images and corresponding text files with captions.\n Each text file should have the same name as the image file it corresponds to (e.g., image1.jpg and image1.txt).\n If text files are missing for some images, you can provide a trigger_phrase to automatically create them.\n Supported image formats: PNG, JPG, JPEG, WEBP.\n Try to use at least 10 images, although more is better.\n " + }, + "learning_rate": { + "minimum": 0.000001, + "maximum": 0.01, + "type": "number", + "title": "Learning Rate", + "description": "Learning rate for training. Default is 5e-4", + "default": 0.0005 + }, + "trigger_phrase": { + "title": "Trigger Phrase", + "type": "string", + "description": "Default caption to use for images that don't have corresponding text files. If provided, missing .txt files will be created automatically.", + "default": "" + } + }, + "x-fal-order-properties": [ + "image_data_url", + "steps", + "learning_rate", + "trigger_phrase" + ], + "required": [ + "image_data_url" + ] + }, + "QwenImageTrainerOutput": { + "title": "Output", + "type": "object", + "properties": { + "lora_file": { + "title": "Lora File", + "description": "URL to the trained LoRA weights file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "config_file": { + "title": "Config File", + "description": "URL to the training configuration file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "lora_file", + "config_file" + ], + "required": [ + "lora_file", + "config_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-image-trainer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-trainer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-trainer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageTrainerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-image-trainer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QwenImageTrainerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-22-image-trainer", + "metadata": { + "display_name": "Wan 2.2 14B Image Trainer", + "category": "training", + "description": "Wan 2.2 text to image LoRA trainer. Fine-tune Wan 2.2 for subjects and styles with unprecedented detail.", + "status": "active", + "tags": [ + "lora", + "personalization" + ], + "updated_at": "2026-01-26T21:43:05.770Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/koala/1VUig6knsJ9-DAf8eRzBQ_5a0e3e33bbbf4d869ec368fa31f8700e.jpg", + "model_url": "https://fal.run/fal-ai/wan-22-image-trainer", + "license_type": "commercial", + "date": "2025-08-11T05:09:18.685Z", + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/wan/v2.2-a14b/text-to-image/lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-22-image-trainer", + "version": "1.0.0", + "description": "Wan 2.2 text to image LoRA trainer. Fine-tune Wan 2.2 for subjects and styles with unprecedented detail.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-22-image-trainer", + "category": "training", + "thumbnailUrl": "https://fal.media/files/koala/1VUig6knsJ9-DAf8eRzBQ_5a0e3e33bbbf4d869ec368fa31f8700e.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-22-image-trainer", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-22-image-trainer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Wan22ImageTrainerInput": { + "x-fal-order-properties": [ + "training_data_url", + "trigger_phrase", + "include_synthetic_captions", + "use_face_detection", + "use_face_cropping", + "use_masks", + "steps", + "learning_rate", + "is_style" + ], + "type": "object", + "properties": { + "trigger_phrase": { + "description": "Trigger phrase for the model.", + "type": "string", + "title": "Trigger Phrase" + }, + "use_masks": { + "examples": [ + true + ], + "description": "Whether to use masks for the training data.", + "type": "boolean", + "title": "Use Masks", + "default": true + }, + "learning_rate": { + "description": "Learning rate for training.", + "type": "number", + "examples": [ + 0.0007 + ], + "title": "Learning Rate", + "minimum": 0.000001, + "maximum": 0.1, + "multipleOf": 0.000001, + "default": 0.0007 + }, + "use_face_cropping": { + "examples": [ + false + ], + "description": "Whether to use face cropping for the training data. When enabled, images will be cropped to the face before resizing.", + "type": "boolean", + "title": "Use Face Cropping", + "default": false + }, + "training_data_url": { + "description": "URL to the training data.", + "type": "string", + "title": "Training Data URL" + }, + "steps": { + "description": "Number of training steps.", + "type": "integer", + "minimum": 10, + "title": "Number of Steps", + "examples": [ + 1000 + ], + "maximum": 6000, + "default": 1000 + }, + "include_synthetic_captions": { + "description": "Whether to include synthetic captions.", + "type": "boolean", + "title": "Include Synthetic Captions", + "default": false + }, + "is_style": { + "examples": [ + false + ], + "description": "Whether the training data is style data. If true, face specific options like masking and face detection will be disabled.", + "type": "boolean", + "title": "Is Style", + "default": false + }, + "use_face_detection": { + "examples": [ + true + ], + "description": "Whether to use face detection for the training data. When enabled, images will use the center of the face as the center of the image when resizing.", + "type": "boolean", + "title": "Use Face Detection", + "default": true + } + }, + "title": "BasicInput", + "required": [ + "training_data_url", + "trigger_phrase" + ] + }, + "Wan22ImageTrainerOutput": { + "x-fal-order-properties": [ + "diffusers_lora_file", + "high_noise_lora", + "config_file" + ], + "type": "object", + "properties": { + "config_file": { + "description": "Config file helping inference endpoints after training.", + "title": "Config File", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "high_noise_lora": { + "description": "High noise LoRA file.", + "title": "High Noise LoRA", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "diffusers_lora_file": { + "description": "Low noise LoRA file.", + "title": "Low Noise LoRA", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "WanTrainerResponse", + "required": [ + "diffusers_lora_file", + "high_noise_lora", + "config_file" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-22-image-trainer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-22-image-trainer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-22-image-trainer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Wan22ImageTrainerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-22-image-trainer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Wan22ImageTrainerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-trainer/t2v", + "metadata": { + "display_name": "Wan-2.1 LoRA Trainer", + "category": "training", + "description": "Train custom LoRAs for Wan-2.1 T2V 1.3B", + "status": "active", + "tags": [ + "lora", + "training" + ], + "updated_at": "2026-01-26T21:43:28.626Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/monkey/ZQ0YvNuW1FVoX7wvAU5uE_0590d28948af442dbb20bd581085f8c3.jpg", + "model_url": "https://fal.run/fal-ai/wan-trainer/t2v", + "license_type": "commercial", + "date": "2025-06-11T00:35:31.207Z", + "group": { + "key": "wan-trainer", + "label": "T2V 1.3B" + }, + "highlighted": false, + "kind": "training", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-trainer/t2v", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-trainer/t2v queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-trainer/t2v", + "category": "training", + "thumbnailUrl": "https://fal.media/files/monkey/ZQ0YvNuW1FVoX7wvAU5uE_0590d28948af442dbb20bd581085f8c3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-trainer/t2v", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-trainer/t2v/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanTrainerT2vInput": { + "title": "Input", + "type": "object", + "properties": { + "number_of_steps": { + "minimum": 1, + "maximum": 20000, + "type": "integer", + "title": "Number Of Steps", + "description": "The number of steps to train for.", + "default": 400 + }, + "training_data_url": { + "title": "Training Data URL", + "type": "string", + "description": "URL to zip archive with images of a consistent style. Try to use at least 10 images and/or videos, although more is better.\n\n In addition to images the archive can contain text files with captions. Each text file should have the same name as the image/video file it corresponds to." + }, + "trigger_phrase": { + "title": "Trigger Phrase", + "type": "string", + "description": "The phrase that will trigger the model to generate an image.", + "default": "" + }, + "learning_rate": { + "minimum": 0.000001, + "maximum": 1, + "type": "number", + "title": "Learning Rate", + "description": "The rate at which the model learns. Higher values can lead to faster training, but over-fitting.", + "default": 0.0002 + }, + "auto_scale_input": { + "examples": [ + true + ], + "title": "Auto-Scale Input", + "type": "boolean", + "description": "If true, the input will be automatically scale the video to 81 frames at 16fps.", + "default": false + } + }, + "x-fal-order-properties": [ + "training_data_url", + "number_of_steps", + "learning_rate", + "trigger_phrase", + "auto_scale_input" + ], + "required": [ + "training_data_url" + ] + }, + "WanTrainerT2vOutput": { + "title": "Output", + "type": "object", + "properties": { + "lora_file": { + "title": "Lora File", + "description": "URL to the trained LoRA weights.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "config_file": { + "title": "Config File", + "description": "Configuration used for setting up the inference endpoints.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "lora_file", + "config_file" + ], + "required": [ + "lora_file", + "config_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-trainer/t2v/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-trainer/t2v/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-trainer/t2v": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanTrainerT2vInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-trainer/t2v/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanTrainerT2vOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-trainer/t2v-14b", + "metadata": { + "display_name": "Wan-2.1 LoRA Trainer", + "category": "training", + "description": "Train custom LoRAs for Wan-2.1 T2V 14B", + "status": "active", + "tags": [ + "lora", + "training" + ], + "updated_at": "2026-01-26T21:43:28.756Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/penguin/BtZ-j-cWvM_WqOtQGdjLj_a10682239ded4738a4e23ffc2a4ddeb4.jpg", + "model_url": "https://fal.run/fal-ai/wan-trainer/t2v-14b", + "license_type": "commercial", + "date": "2025-06-11T00:34:42.291Z", + "group": { + "key": "wan-trainer", + "label": "T2V 14B" + }, + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/wan-t2v-lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-trainer/t2v-14b", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-trainer/t2v-14b queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-trainer/t2v-14b", + "category": "training", + "thumbnailUrl": "https://fal.media/files/penguin/BtZ-j-cWvM_WqOtQGdjLj_a10682239ded4738a4e23ffc2a4ddeb4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-trainer/t2v-14b", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-trainer/t2v-14b/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanTrainerT2v14bInput": { + "title": "Input", + "type": "object", + "properties": { + "number_of_steps": { + "minimum": 1, + "maximum": 20000, + "type": "integer", + "title": "Number Of Steps", + "description": "The number of steps to train for.", + "default": 400 + }, + "training_data_url": { + "title": "Training Data URL", + "type": "string", + "description": "URL to zip archive with images of a consistent style. Try to use at least 10 images and/or videos, although more is better.\n\n In addition to images the archive can contain text files with captions. Each text file should have the same name as the image/video file it corresponds to." + }, + "trigger_phrase": { + "title": "Trigger Phrase", + "type": "string", + "description": "The phrase that will trigger the model to generate an image.", + "default": "" + }, + "learning_rate": { + "minimum": 0.000001, + "maximum": 1, + "type": "number", + "title": "Learning Rate", + "description": "The rate at which the model learns. Higher values can lead to faster training, but over-fitting.", + "default": 0.0002 + }, + "auto_scale_input": { + "examples": [ + true + ], + "title": "Auto-Scale Input", + "type": "boolean", + "description": "If true, the input will be automatically scale the video to 81 frames at 16fps.", + "default": false + } + }, + "x-fal-order-properties": [ + "training_data_url", + "number_of_steps", + "learning_rate", + "trigger_phrase", + "auto_scale_input" + ], + "required": [ + "training_data_url" + ] + }, + "WanTrainerT2v14bOutput": { + "title": "Output", + "type": "object", + "properties": { + "lora_file": { + "title": "Lora File", + "description": "URL to the trained LoRA weights.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "config_file": { + "title": "Config File", + "description": "Configuration used for setting up the inference endpoints.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "lora_file", + "config_file" + ], + "required": [ + "lora_file", + "config_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-trainer/t2v-14b/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-trainer/t2v-14b/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-trainer/t2v-14b": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanTrainerT2v14bInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-trainer/t2v-14b/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanTrainerT2v14bOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-trainer/i2v-720p", + "metadata": { + "display_name": "Wan-2.1 LoRA Trainer", + "category": "training", + "description": "Train custom LoRAs for Wan-2.1 I2V 720P", + "status": "active", + "tags": [ + "lora", + "training" + ], + "updated_at": "2026-01-26T21:43:28.884Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/panda/R1H5KbqWR_DIyysIpr271_e73fd3a6acde48209bc152f277959385.jpg", + "model_url": "https://fal.run/fal-ai/wan-trainer/i2v-720p", + "license_type": "commercial", + "date": "2025-06-11T00:33:08.714Z", + "group": { + "key": "wan-trainer", + "label": "I2V 14B 720P" + }, + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/wan-i2v-lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-trainer/i2v-720p", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-trainer/i2v-720p queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-trainer/i2v-720p", + "category": "training", + "thumbnailUrl": "https://fal.media/files/panda/R1H5KbqWR_DIyysIpr271_e73fd3a6acde48209bc152f277959385.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-trainer/i2v-720p", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-trainer/i2v-720p/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanTrainerI2v720pInput": { + "title": "Input", + "type": "object", + "properties": { + "number_of_steps": { + "minimum": 1, + "maximum": 20000, + "type": "integer", + "title": "Number Of Steps", + "description": "The number of steps to train for.", + "default": 400 + }, + "training_data_url": { + "title": "Training Data URL", + "type": "string", + "description": "URL to zip archive with images of a consistent style. Try to use at least 10 images and/or videos, although more is better.\n\n In addition to images the archive can contain text files with captions. Each text file should have the same name as the image/video file it corresponds to." + }, + "trigger_phrase": { + "title": "Trigger Phrase", + "type": "string", + "description": "The phrase that will trigger the model to generate an image.", + "default": "" + }, + "learning_rate": { + "minimum": 0.000001, + "maximum": 1, + "type": "number", + "title": "Learning Rate", + "description": "The rate at which the model learns. Higher values can lead to faster training, but over-fitting.", + "default": 0.0002 + }, + "auto_scale_input": { + "examples": [ + true + ], + "title": "Auto-Scale Input", + "type": "boolean", + "description": "If true, the input will be automatically scale the video to 81 frames at 16fps.", + "default": false + } + }, + "x-fal-order-properties": [ + "training_data_url", + "number_of_steps", + "learning_rate", + "trigger_phrase", + "auto_scale_input" + ], + "required": [ + "training_data_url" + ] + }, + "WanTrainerI2v720pOutput": { + "title": "Output", + "type": "object", + "properties": { + "lora_file": { + "title": "Lora File", + "description": "URL to the trained LoRA weights.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "config_file": { + "title": "Config File", + "description": "Configuration used for setting up the inference endpoints.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "lora_file", + "config_file" + ], + "required": [ + "lora_file", + "config_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-trainer/i2v-720p/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-trainer/i2v-720p/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-trainer/i2v-720p": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanTrainerI2v720pInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-trainer/i2v-720p/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanTrainerI2v720pOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-trainer/flf2v-720p", + "metadata": { + "display_name": "Wan-2.1 LoRA Trainer", + "category": "training", + "description": "Train custom LoRAs for Wan-2.1 FLF2V 720P", + "status": "active", + "tags": [ + "lora", + "training" + ], + "updated_at": "2026-01-26T21:43:29.096Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/elephant/GRWFeDBLFXvbTF9b0lmJf_e269b3bf7ba147d3b56b0ee7b6e36439.jpg", + "model_url": "https://fal.run/fal-ai/wan-trainer/flf2v-720p", + "license_type": "commercial", + "date": "2025-06-11T00:29:37.017Z", + "group": { + "key": "wan-trainer", + "label": "FLF2V 14B 720P" + }, + "highlighted": false, + "kind": "training", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-trainer/flf2v-720p", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-trainer/flf2v-720p queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-trainer/flf2v-720p", + "category": "training", + "thumbnailUrl": "https://fal.media/files/elephant/GRWFeDBLFXvbTF9b0lmJf_e269b3bf7ba147d3b56b0ee7b6e36439.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-trainer/flf2v-720p", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-trainer/flf2v-720p/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanTrainerFlf2v720pInput": { + "title": "Input", + "type": "object", + "properties": { + "number_of_steps": { + "minimum": 1, + "maximum": 20000, + "type": "integer", + "title": "Number Of Steps", + "description": "The number of steps to train for.", + "default": 400 + }, + "training_data_url": { + "title": "Training Data URL", + "type": "string", + "description": "URL to zip archive with images of a consistent style. Try to use at least 10 images and/or videos, although more is better.\n\n In addition to images the archive can contain text files with captions. Each text file should have the same name as the image/video file it corresponds to." + }, + "trigger_phrase": { + "title": "Trigger Phrase", + "type": "string", + "description": "The phrase that will trigger the model to generate an image.", + "default": "" + }, + "learning_rate": { + "minimum": 0.000001, + "maximum": 1, + "type": "number", + "title": "Learning Rate", + "description": "The rate at which the model learns. Higher values can lead to faster training, but over-fitting.", + "default": 0.0002 + }, + "auto_scale_input": { + "examples": [ + true + ], + "title": "Auto-Scale Input", + "type": "boolean", + "description": "If true, the input will be automatically scale the video to 81 frames at 16fps.", + "default": false + } + }, + "x-fal-order-properties": [ + "training_data_url", + "number_of_steps", + "learning_rate", + "trigger_phrase", + "auto_scale_input" + ], + "required": [ + "training_data_url" + ] + }, + "WanTrainerFlf2v720pOutput": { + "title": "Output", + "type": "object", + "properties": { + "lora_file": { + "title": "Lora File", + "description": "URL to the trained LoRA weights.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "config_file": { + "title": "Config File", + "description": "Configuration used for setting up the inference endpoints.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "lora_file", + "config_file" + ], + "required": [ + "lora_file", + "config_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-trainer/flf2v-720p/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-trainer/flf2v-720p/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-trainer/flf2v-720p": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanTrainerFlf2v720pInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-trainer/flf2v-720p/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanTrainerFlf2v720pOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-video-trainer", + "metadata": { + "display_name": "LTX Video Trainer", + "category": "training", + "description": "Train LTX Video 0.9.7 for custom styles and effects.", + "status": "active", + "tags": [ + "ltx-video", + "fine-tuning" + ], + "updated_at": "2026-01-26T21:43:46.136Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training-3.jpg", + "model_url": "https://fal.run/fal-ai/ltx-video-trainer", + "license_type": "commercial", + "date": "2025-05-08T19:29:59.104Z", + "highlighted": false, + "kind": "training", + "duration_estimate": 20, + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/ltx-video-lora/image-to-video" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-video-trainer", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-video-trainer queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-video-trainer", + "category": "training", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training-3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-video-trainer", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-video-trainer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LtxVideoTrainerInput": { + "x-fal-order-properties": [ + "training_data_url", + "rank", + "number_of_steps", + "number_of_frames", + "frame_rate", + "resolution", + "aspect_ratio", + "learning_rate", + "trigger_phrase", + "auto_scale_input", + "split_input_into_scenes", + "split_input_duration_threshold", + "validation", + "validation_negative_prompt", + "validation_number_of_frames", + "validation_resolution", + "validation_aspect_ratio", + "validation_reverse" + ], + "type": "object", + "properties": { + "number_of_steps": { + "description": "The number of steps to train for.", + "type": "integer", + "minimum": 100, + "maximum": 20000, + "title": "Number Of Steps", + "examples": [ + 1000 + ], + "step": 100, + "default": 1000 + }, + "frame_rate": { + "description": "The target frames per second for the video.", + "type": "integer", + "minimum": 8, + "maximum": 60, + "title": "Frame Rate", + "examples": [ + 25 + ], + "default": 25 + }, + "learning_rate": { + "description": "The rate at which the model learns. Higher values can lead to faster training, but over-fitting.", + "type": "number", + "minimum": 0.000001, + "maximum": 1, + "title": "Learning Rate", + "examples": [ + 0.0002 + ], + "step": 0.0001, + "default": 0.0002 + }, + "validation": { + "description": "A list of validation prompts to use during training. When providing an image, _all_ validation inputs must have an image.", + "type": "array", + "maxItems": 2, + "title": "Validation", + "items": { + "$ref": "#/components/schemas/Validation" + }, + "default": [] + }, + "number_of_frames": { + "description": "The number of frames to use for training. This is the number of frames per second multiplied by the number of seconds.", + "type": "integer", + "minimum": 25, + "maximum": 121, + "title": "Number Of Frames", + "examples": [ + 81 + ], + "default": 81 + }, + "validation_reverse": { + "description": "If true, the validation videos will be reversed. This is useful for effects that are learned in reverse and then applied in reverse.", + "type": "boolean", + "title": "Validation Reverse", + "default": false + }, + "training_data_url": { + "description": "URL to zip archive with videos or images. Try to use at least 10 files, although more is better.\n\n **Supported video formats:** .mp4, .mov, .avi, .mkv\n **Supported image formats:** .png, .jpg, .jpeg\n\n Note: The dataset must contain ONLY videos OR ONLY images - mixed datasets are not supported.\n\n The archive can also contain text files with captions. Each text file should have the same name as the media file it corresponds to.", + "type": "string", + "title": "Training Data Url" + }, + "split_input_duration_threshold": { + "description": "The duration threshold in seconds. If a video is longer than this, it will be split into scenes. If you provide captions for a split video, the caption will be applied to each scene. If you do not provide captions, scenes will be auto-captioned.", + "type": "number", + "minimum": 1, + "maximum": 60, + "title": "Split Input Duration Threshold", + "examples": [ + 30 + ], + "default": 30 + }, + "rank": { + "enum": [ + 8, + 16, + 32, + 64, + 128 + ], + "description": "The rank of the LoRA.", + "type": "integer", + "title": "Rank", + "examples": [ + 128 + ], + "default": 128 + }, + "aspect_ratio": { + "enum": [ + "16:9", + "1:1", + "9:16" + ], + "description": "The aspect ratio to use for training. This is the aspect ratio of the video.", + "type": "string", + "title": "Aspect Ratio", + "examples": [ + "1:1" + ], + "default": "1:1" + }, + "trigger_phrase": { + "examples": [ + "" + ], + "description": "The phrase that will trigger the model to generate an image.", + "type": "string", + "title": "Trigger Phrase", + "default": "" + }, + "resolution": { + "enum": [ + "low", + "medium", + "high" + ], + "description": "The resolution to use for training. This is the resolution of the video.", + "type": "string", + "title": "Resolution", + "examples": [ + "medium" + ], + "default": "medium" + }, + "split_input_into_scenes": { + "examples": [ + true + ], + "description": "If true, videos above a certain duration threshold will be split into scenes. If you provide captions for a split video, the caption will be applied to each scene. If you do not provide captions, scenes will be auto-captioned. This option has no effect on image datasets.", + "type": "boolean", + "title": "Split Input Into Scenes", + "default": true + }, + "validation_resolution": { + "enum": [ + "low", + "medium", + "high" + ], + "description": "The resolution to use for validation.", + "type": "string", + "title": "Validation Resolution", + "examples": [ + "high" + ], + "default": "high" + }, + "validation_number_of_frames": { + "description": "The number of frames to use for validation.", + "type": "integer", + "minimum": 8, + "maximum": 121, + "title": "Validation Number Of Frames", + "examples": [ + 81 + ], + "default": 81 + }, + "validation_aspect_ratio": { + "enum": [ + "16:9", + "1:1", + "9:16" + ], + "description": "The aspect ratio to use for validation.", + "type": "string", + "title": "Validation Aspect Ratio", + "examples": [ + "1:1" + ], + "default": "1:1" + }, + "validation_negative_prompt": { + "description": "A negative prompt to use for validation.", + "type": "string", + "title": "Validation Negative Prompt", + "default": "blurry, low quality, bad quality, out of focus" + }, + "auto_scale_input": { + "examples": [ + false + ], + "description": "If true, videos will be automatically scaled to the target frame count and fps. This option has no effect on image datasets.", + "type": "boolean", + "title": "Auto Scale Input", + "default": false + } + }, + "title": "Input", + "required": [ + "training_data_url" + ] + }, + "LtxVideoTrainerOutput": { + "x-fal-order-properties": [ + "video", + "lora_file", + "config_file" + ], + "type": "object", + "properties": { + "lora_file": { + "description": "URL to the trained LoRA weights.", + "$ref": "#/components/schemas/File" + }, + "config_file": { + "description": "Configuration used for setting up the inference endpoints.", + "$ref": "#/components/schemas/File" + }, + "video": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "The URL to the validations video." + } + }, + "title": "TrainingOutput", + "required": [ + "video", + "lora_file", + "config_file" + ] + }, + "Validation": { + "x-fal-order-properties": [ + "prompt", + "image_url" + ], + "type": "object", + "properties": { + "prompt": { + "description": "The prompt to use for validation.", + "type": "string", + "title": "Prompt" + }, + "image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "An image to use for image-to-video validation. If provided for one validation, _all_ validation inputs must have an image.", + "title": "Image Url" + } + }, + "title": "Validation", + "required": [ + "prompt" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-video-trainer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-trainer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-trainer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideoTrainerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-trainer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideoTrainerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/recraft/v3/create-style", + "metadata": { + "display_name": "Recraft V3 Create Style", + "category": "training", + "description": "Recraft V3 Create Style is capable of creating unique styles for Recraft V3 based on your images.", + "status": "active", + "tags": [ + "style", + "vector", + "personalization" + ], + "updated_at": "2026-01-26T21:43:46.636Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/recraft-v3-create-style.webp", + "model_url": "https://fal.run/fal-ai/recraft/v3/create-style", + "license_type": "commercial", + "date": "2025-05-07T12:54:56.466Z", + "group": { + "key": "fal-ai/recraft/v3", + "label": "Create Style" + }, + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/recraft/v3/text-to-image", + "fal-ai/recraft/v3/image-to-image" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/recraft/v3/create-style", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/recraft/v3/create-style queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/recraft/v3/create-style", + "category": "training", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/recraft-v3-create-style.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/recraft/v3/create-style", + "documentationUrl": "https://fal.ai/models/fal-ai/recraft/v3/create-style/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "RecraftV3CreateStyleInput": { + "title": "StyleReferenceInput", + "type": "object", + "properties": { + "images_data_url": { + "title": "Images Data Url", + "type": "string", + "description": "URL to zip archive with images, use PNG format. Maximum 5 images are allowed." + }, + "base_style": { + "enum": [ + "any", + "realistic_image", + "digital_illustration", + "vector_illustration", + "realistic_image/b_and_w", + "realistic_image/hard_flash", + "realistic_image/hdr", + "realistic_image/natural_light", + "realistic_image/studio_portrait", + "realistic_image/enterprise", + "realistic_image/motion_blur", + "realistic_image/evening_light", + "realistic_image/faded_nostalgia", + "realistic_image/forest_life", + "realistic_image/mystic_naturalism", + "realistic_image/natural_tones", + "realistic_image/organic_calm", + "realistic_image/real_life_glow", + "realistic_image/retro_realism", + "realistic_image/retro_snapshot", + "realistic_image/urban_drama", + "realistic_image/village_realism", + "realistic_image/warm_folk", + "digital_illustration/pixel_art", + "digital_illustration/hand_drawn", + "digital_illustration/grain", + "digital_illustration/infantile_sketch", + "digital_illustration/2d_art_poster", + "digital_illustration/handmade_3d", + "digital_illustration/hand_drawn_outline", + "digital_illustration/engraving_color", + "digital_illustration/2d_art_poster_2", + "digital_illustration/antiquarian", + "digital_illustration/bold_fantasy", + "digital_illustration/child_book", + "digital_illustration/child_books", + "digital_illustration/cover", + "digital_illustration/crosshatch", + "digital_illustration/digital_engraving", + "digital_illustration/expressionism", + "digital_illustration/freehand_details", + "digital_illustration/grain_20", + "digital_illustration/graphic_intensity", + "digital_illustration/hard_comics", + "digital_illustration/long_shadow", + "digital_illustration/modern_folk", + "digital_illustration/multicolor", + "digital_illustration/neon_calm", + "digital_illustration/noir", + "digital_illustration/nostalgic_pastel", + "digital_illustration/outline_details", + "digital_illustration/pastel_gradient", + "digital_illustration/pastel_sketch", + "digital_illustration/pop_art", + "digital_illustration/pop_renaissance", + "digital_illustration/street_art", + "digital_illustration/tablet_sketch", + "digital_illustration/urban_glow", + "digital_illustration/urban_sketching", + "digital_illustration/vanilla_dreams", + "digital_illustration/young_adult_book", + "digital_illustration/young_adult_book_2", + "vector_illustration/bold_stroke", + "vector_illustration/chemistry", + "vector_illustration/colored_stencil", + "vector_illustration/contour_pop_art", + "vector_illustration/cosmics", + "vector_illustration/cutout", + "vector_illustration/depressive", + "vector_illustration/editorial", + "vector_illustration/emotional_flat", + "vector_illustration/infographical", + "vector_illustration/marker_outline", + "vector_illustration/mosaic", + "vector_illustration/naivector", + "vector_illustration/roundish_flat", + "vector_illustration/segmented_colors", + "vector_illustration/sharp_contrast", + "vector_illustration/thin", + "vector_illustration/vector_photo", + "vector_illustration/vivid_shapes", + "vector_illustration/engraving", + "vector_illustration/line_art", + "vector_illustration/line_circuit", + "vector_illustration/linocut" + ], + "title": "Base Style", + "type": "string", + "description": "The base style of the generated images, this topic is covered above.", + "default": "digital_illustration" + } + }, + "x-fal-order-properties": [ + "images_data_url", + "base_style" + ], + "required": [ + "images_data_url" + ] + }, + "RecraftV3CreateStyleOutput": { + "title": "StyleReferenceOutput", + "type": "object", + "properties": { + "style_id": { + "format": "uuid4", + "title": "Style Id", + "type": "string", + "description": "The ID of the created style, this ID can be used to reference the style in the future." + } + }, + "x-fal-order-properties": [ + "style_id" + ], + "required": [ + "style_id" + ] + } + } + }, + "paths": { + "/fal-ai/recraft/v3/create-style/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/recraft/v3/create-style/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/recraft/v3/create-style": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RecraftV3CreateStyleInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/recraft/v3/create-style/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RecraftV3CreateStyleOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/turbo-flux-trainer", + "metadata": { + "display_name": "Turbo Flux Trainer", + "category": "training", + "description": "A blazing fast FLUX dev LoRA trainer for subjects and styles.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:55.386Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training-4.jpg", + "model_url": "https://fal.run/fal-ai/turbo-flux-trainer", + "license_type": "commercial", + "date": "2025-04-17T17:40:19.122Z", + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/flux-lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/turbo-flux-trainer", + "version": "1.0.0", + "description": "A blazing fast FLUX dev LoRA trainer for subjects and styles.", + "x-fal-metadata": { + "endpointId": "fal-ai/turbo-flux-trainer", + "category": "training", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training-4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/turbo-flux-trainer", + "documentationUrl": "https://fal.ai/models/fal-ai/turbo-flux-trainer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "TurboFluxTrainerInput": { + "title": "Input", + "type": "object", + "properties": { + "images_data_url": { + "title": "Images Data Url", + "type": "string", + "description": "\n URL to zip archive with images of a consistent style. Try to use at least 10 images, although more is better.\n " + }, + "trigger_phrase": { + "title": "Trigger Phrase", + "type": "string", + "description": "Trigger phrase to be used in the captions. If None, a trigger word will not be used.\n If no captions are provide the trigger_work will be used instead of captions. If captions are provided, the trigger word will replace the `[trigger]` string in the captions.\n ", + "default": "ohwx" + }, + "steps": { + "description": "Number of steps to train the LoRA on.", + "type": "integer", + "minimum": 1, + "maximum": 10000, + "title": "Steps", + "examples": [ + 1000 + ], + "default": 1000 + }, + "learning_rate": { + "description": "Learning rate for the training.", + "type": "number", + "minimum": 1e-7, + "maximum": 0.01, + "title": "Learning Rate", + "default": 0.00115 + }, + "training_style": { + "enum": [ + "subject", + "style" + ], + "title": "Training Style", + "type": "string", + "description": "Training style to use.", + "default": "subject" + }, + "face_crop": { + "title": "Face Crop", + "type": "boolean", + "description": "Whether to try to detect the face and crop the images to the face.", + "default": true + } + }, + "x-fal-order-properties": [ + "images_data_url", + "trigger_phrase", + "steps", + "learning_rate", + "training_style", + "face_crop" + ], + "required": [ + "images_data_url" + ] + }, + "TurboFluxTrainerOutput": { + "title": "Output", + "type": "object", + "properties": { + "config_file": { + "title": "Config File", + "description": "URL to the trained diffusers config file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "diffusers_lora_file": { + "title": "Diffusers Lora File", + "description": "URL to the trained diffusers lora weights.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file" + ], + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/turbo-flux-trainer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/turbo-flux-trainer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/turbo-flux-trainer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TurboFluxTrainerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/turbo-flux-trainer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TurboFluxTrainerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-trainer", + "metadata": { + "display_name": "Wan-2.1 LoRA Trainer", + "category": "training", + "description": "Train custom LoRAs for Wan-2.1 I2V 480P", + "status": "active", + "tags": [ + "lora", + "training" + ], + "updated_at": "2026-01-26T21:44:00.452Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/wan-trainer.png", + "model_url": "https://fal.run/fal-ai/wan-trainer", + "license_type": "commercial", + "date": "2025-03-24T17:02:37.214Z", + "group": { + "key": "wan-trainer", + "label": "I2V 14B 480P" + }, + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/wan-i2v-lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-trainer", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-trainer queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-trainer", + "category": "training", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/wan-trainer.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-trainer", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-trainer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanTrainerInput": { + "title": "Input", + "type": "object", + "properties": { + "number_of_steps": { + "minimum": 1, + "maximum": 20000, + "type": "integer", + "title": "Number Of Steps", + "description": "The number of steps to train for.", + "default": 400 + }, + "training_data_url": { + "title": "Training Data URL", + "type": "string", + "description": "URL to zip archive with images of a consistent style. Try to use at least 10 images and/or videos, although more is better.\n\n In addition to images the archive can contain text files with captions. Each text file should have the same name as the image/video file it corresponds to." + }, + "trigger_phrase": { + "title": "Trigger Phrase", + "type": "string", + "description": "The phrase that will trigger the model to generate an image.", + "default": "" + }, + "learning_rate": { + "minimum": 0.000001, + "maximum": 1, + "type": "number", + "title": "Learning Rate", + "description": "The rate at which the model learns. Higher values can lead to faster training, but over-fitting.", + "default": 0.0002 + }, + "auto_scale_input": { + "examples": [ + true + ], + "title": "Auto-Scale Input", + "type": "boolean", + "description": "If true, the input will be automatically scale the video to 81 frames at 16fps.", + "default": false + } + }, + "x-fal-order-properties": [ + "training_data_url", + "number_of_steps", + "learning_rate", + "trigger_phrase", + "auto_scale_input" + ], + "required": [ + "training_data_url" + ] + }, + "WanTrainerOutput": { + "title": "Output", + "type": "object", + "properties": { + "lora_file": { + "title": "Lora File", + "description": "URL to the trained LoRA weights.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "config_file": { + "title": "Config File", + "description": "Configuration used for setting up the inference endpoints.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "lora_file", + "config_file" + ], + "required": [ + "lora_file", + "config_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-trainer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-trainer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-trainer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanTrainerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-trainer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanTrainerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan-video-lora-training", + "metadata": { + "display_name": "Train Hunyuan LoRA", + "category": "training", + "description": "Train Hunyuan Video lora on people, objects, characters and more!", + "status": "active", + "tags": [ + "lora", + "personalization" + ], + "updated_at": "2026-01-26T21:44:31.230Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/Fal_Visuals_V1_014.jpg", + "model_url": "https://fal.run/fal-ai/hunyuan-video-lora-training", + "license_type": "commercial", + "date": "2025-01-14T00:00:00.000Z", + "highlighted": false, + "kind": "training", + "pinned": false, + "inference_endpoint_ids": [ + "fal-ai/hunyuan-video-lora" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan-video-lora-training", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan-video-lora-training queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan-video-lora-training", + "category": "training", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/Fal_Visuals_V1_014.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan-video-lora-training", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan-video-lora-training/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "HunyuanVideoLoraTrainingInput": { + "title": "PublicInput", + "type": "object", + "properties": { + "trigger_word": { + "title": "Trigger Word", + "type": "string", + "description": "The trigger word to use.", + "default": "" + }, + "images_data_url": { + "title": "Images Data Url", + "type": "string", + "description": "\n URL to zip archive with images. Try to use at least 4 images in general the more the better.\n\n In addition to images the archive can contain text files with captions. Each text file should have the same name as the image file it corresponds to.\n " + }, + "steps": { + "description": "Number of steps to train the LoRA on.", + "type": "integer", + "minimum": 1, + "maximum": 5000, + "examples": [ + 1000 + ], + "title": "Steps" + }, + "data_archive_format": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Data Archive Format", + "description": "The format of the archive. If not specified, the format will be inferred from the URL.", + "nullable": true + }, + "learning_rate": { + "title": "Learning Rate", + "type": "number", + "description": "Learning rate to use for training.", + "default": 0.0001 + }, + "do_caption": { + "title": "Do Caption", + "type": "boolean", + "description": "Whether to generate captions for the images.", + "default": true + } + }, + "x-fal-order-properties": [ + "images_data_url", + "steps", + "trigger_word", + "learning_rate", + "do_caption", + "data_archive_format" + ], + "required": [ + "images_data_url", + "steps" + ] + }, + "HunyuanVideoLoraTrainingOutput": { + "title": "Output", + "type": "object", + "properties": { + "config_file": { + "description": "URL to the lora configuration file.", + "$ref": "#/components/schemas/File" + }, + "diffusers_lora_file": { + "description": "URL to the trained diffusers lora weights.", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "diffusers_lora_file", + "config_file" + ], + "required": [ + "diffusers_lora_file", + "config_file" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan-video-lora-training/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-lora-training/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-lora-training": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanVideoLoraTrainingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-lora-training/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanVideoLoraTrainingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.unknown.json b/packages/typescript/ai-fal/json/fal.models.unknown.json new file mode 100644 index 00000000..692c3216 --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.unknown.json @@ -0,0 +1,1409 @@ +{ + "generated_at": "2026-01-28T02:51:51.866Z", + "total_models": 4, + "category": "unknown", + "models": [ + { + "endpoint_id": "fal-ai/workflow-utilities/interleave-video", + "metadata": { + "display_name": "Workflow Utilities", + "category": "unknown", + "description": "ffmpeg utility to interleave videos", + "status": "active", + "tags": [], + "updated_at": "2026-01-27T17:40:26.521Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a87987b/4FBs4s-6F9ng4C6B3e-4i_d97b1dd479a045cfa4b0e8d3a1ed39e1.jpg", + "model_url": "https://fal.run/fal-ai/workflow-utilities/interleave-video", + "license_type": "private", + "date": "2026-01-27T17:40:26.521Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/workflow-utilities/interleave-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/workflow-utilities/interleave-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/workflow-utilities/interleave-video", + "category": "unknown", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a87987b/4FBs4s-6F9ng4C6B3e-4i_d97b1dd479a045cfa4b0e8d3a1ed39e1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/workflow-utilities/interleave-video", + "documentationUrl": "https://fal.ai/models/fal-ai/workflow-utilities/interleave-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WorkflowUtilitiesInterleaveVideoInput": { + "title": "InterleaveVideoInput", + "type": "object", + "properties": { + "video_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/example_outputs/wan-25-i2v-output.mp4", + "https://storage.googleapis.com/falserverless/model_tests/kling/kling-v2.5-turbo-pro-image-to-video-output.mp4", + "https://storage.googleapis.com/falserverless/example_inputs/seedance_pro_i2v.mp4", + "https://storage.googleapis.com/falserverless/example_outputs/wan-25-i2v-output.mp4", + "https://storage.googleapis.com/falserverless/model_tests/kling/kling-v2.5-turbo-pro-image-to-video-output.mp4", + "https://storage.googleapis.com/falserverless/example_inputs/seedance_pro_i2v.mp4" + ] + ], + "title": "Video Urls", + "type": "array", + "description": "List of video URLs to interleave in order", + "items": { + "type": "string" + }, + "max_file_size": 100000000 + } + }, + "description": "Input model for interleaving multiple videos", + "x-fal-order-properties": [ + "video_urls" + ], + "required": [ + "video_urls" + ] + }, + "WorkflowUtilitiesInterleaveVideoOutput": { + "title": "InterleaveVideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 3886177, + "file_name": "output.mp4", + "content_type": "application/octet-stream", + "url": "https://v3b.fal.media/files/b/monkey/xVp56BqDLgb39NONPs1Gb_output.mp4" + } + ], + "title": "Video", + "description": "The interleaved video output", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "description": "Output model for interleaved video", + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/workflow-utilities/interleave-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/workflow-utilities/interleave-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/workflow-utilities/interleave-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WorkflowUtilitiesInterleaveVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/workflow-utilities/interleave-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WorkflowUtilitiesInterleaveVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-3-tts/clone-voice/1.7b", + "metadata": { + "display_name": "Qwen 3 TTS - Clone Voice [1.7B]", + "category": "unknown", + "description": "Clone your voices using Qwen3-TTS Clone-Voice model with zero shot cloning capabilities and use it on text-to-speech models to create speeches of yours!", + "status": "active", + "tags": [ + "clone-voice", + "voice-clone" + ], + "updated_at": "2026-01-26T21:41:26.670Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8bf538/p0L3C6l3FW2tyXCmwR0Fs_f6585c02243149fcadb77b0d4c6122b9.jpg", + "model_url": "https://fal.run/fal-ai/qwen-3-tts/clone-voice/1.7b", + "license_type": "commercial", + "date": "2026-01-26T16:20:48.076Z", + "group": { + "key": "qwen-3-tts", + "label": "Clone-Voice [1.7B]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-3-tts/clone-voice/1.7b", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-3-tts/clone-voice/1.7b queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-3-tts/clone-voice/1.7b", + "category": "unknown", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8bf538/p0L3C6l3FW2tyXCmwR0Fs_f6585c02243149fcadb77b0d4c6122b9.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-3-tts/clone-voice/1.7b", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-3-tts/clone-voice/1.7b/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Qwen3TtsCloneVoice17bInput": { + "title": "Qwen3CloneVoiceInput", + "type": "object", + "properties": { + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/qwen3-tts/clone_in.mp3" + ], + "description": "URL to the reference audio file used for voice cloning.", + "type": "string", + "title": "Audio Url" + }, + "reference_text": { + "examples": [ + "Okay. Yeah. I resent you. I love you. I respect you. But you know what? You blew it! And it is all thanks to you." + ], + "description": "Optional reference text that was used when creating the speaker embedding. Providing this can improve synthesis quality when using a cloned voice.", + "type": "string", + "title": "Reference Text" + } + }, + "x-fal-order-properties": [ + "audio_url", + "reference_text" + ], + "required": [ + "audio_url" + ] + }, + "Qwen3TtsCloneVoice17bOutput": { + "title": "Qwen3CloneVoiceOutput", + "type": "object", + "properties": { + "speaker_embedding": { + "examples": [ + { + "file_size": 16288, + "file_name": "tmpe71u7t4j.safetensors", + "content_type": "application/octet-stream", + "url": "https://storage.googleapis.com/falserverless/example_outputs/qwen3-tts/clone_out.safetensors" + } + ], + "description": "The generated speaker embedding file in safetensors format.", + "title": "Speaker Embedding", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "speaker_embedding" + ], + "required": [ + "speaker_embedding" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-3-tts/clone-voice/1.7b/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-3-tts/clone-voice/1.7b/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-3-tts/clone-voice/1.7b": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Qwen3TtsCloneVoice17bInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-3-tts/clone-voice/1.7b/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Qwen3TtsCloneVoice17bOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/qwen-3-tts/clone-voice/0.6b", + "metadata": { + "display_name": "Qwen 3 TTS - Clone Voice [0.6B]", + "category": "unknown", + "description": "Clone your voices using Qwen3-TTS Clone-Voice model with zero shot cloning capabilities and use it on text-to-speech models to create speeches of yours!", + "status": "active", + "tags": [ + "clone-voice", + "voice-clone" + ], + "updated_at": "2026-01-26T21:41:26.808Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8bf531/Rd0WefO2k5vCoxNgTSyzn_97cc41f0cf54490d8780612f0691f62f.jpg", + "model_url": "https://fal.run/fal-ai/qwen-3-tts/clone-voice/0.6b", + "license_type": "commercial", + "date": "2026-01-26T16:19:27.641Z", + "group": { + "key": "qwen-3-tts", + "label": "Clone-Voice [0.6B]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/qwen-3-tts/clone-voice/0.6b", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/qwen-3-tts/clone-voice/0.6b queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/qwen-3-tts/clone-voice/0.6b", + "category": "unknown", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8bf531/Rd0WefO2k5vCoxNgTSyzn_97cc41f0cf54490d8780612f0691f62f.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/qwen-3-tts/clone-voice/0.6b", + "documentationUrl": "https://fal.ai/models/fal-ai/qwen-3-tts/clone-voice/0.6b/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Qwen3TtsCloneVoice06bInput": { + "title": "Qwen3CloneVoiceInput", + "type": "object", + "properties": { + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/qwen3-tts/clone_in.mp3" + ], + "description": "URL to the reference audio file used for voice cloning.", + "type": "string", + "title": "Audio Url" + }, + "reference_text": { + "examples": [ + "Okay. Yeah. I resent you. I love you. I respect you. But you know what? You blew it! And it is all thanks to you." + ], + "description": "Optional reference text that was used when creating the speaker embedding. Providing this can improve synthesis quality when using a cloned voice.", + "type": "string", + "title": "Reference Text" + } + }, + "x-fal-order-properties": [ + "audio_url", + "reference_text" + ], + "required": [ + "audio_url" + ] + }, + "Qwen3TtsCloneVoice06bOutput": { + "title": "Qwen3CloneVoiceOutput", + "type": "object", + "properties": { + "speaker_embedding": { + "examples": [ + { + "file_size": 16288, + "file_name": "tmpe71u7t4j.safetensors", + "content_type": "application/octet-stream", + "url": "https://storage.googleapis.com/falserverless/example_outputs/qwen3-tts/clone_out.safetensors" + } + ], + "description": "The generated speaker embedding file in safetensors format.", + "title": "Speaker Embedding", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "speaker_embedding" + ], + "required": [ + "speaker_embedding" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/qwen-3-tts/clone-voice/0.6b/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-3-tts/clone-voice/0.6b/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/qwen-3-tts/clone-voice/0.6b": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Qwen3TtsCloneVoice06bInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/qwen-3-tts/clone-voice/0.6b/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Qwen3TtsCloneVoice06bOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "openrouter/router/audio", + "metadata": { + "display_name": "OpenRouter [Audio]", + "category": "unknown", + "description": "Run any ALM (Audio Language Model) with fal, powered by OpenRouter.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:49.406Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a87593d/pDGPXXb694Rjgst08FkUa_60fe8689185944889b162dc64de6cffc.jpg", + "model_url": "https://fal.run/openrouter/router/audio", + "license_type": "commercial", + "date": "2025-12-22T17:15:42.947Z", + "group": { + "key": "openrouter/router", + "label": "Any ALM" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for openrouter/router/audio", + "version": "1.0.0", + "description": "The OpenAPI schema for the openrouter/router/audio queue.", + "x-fal-metadata": { + "endpointId": "openrouter/router/audio", + "category": "unknown", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a87593d/pDGPXXb694Rjgst08FkUa_60fe8689185944889b162dc64de6cffc.jpg", + "playgroundUrl": "https://fal.ai/models/openrouter/router/audio", + "documentationUrl": "https://fal.ai/models/openrouter/router/audio/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "RouterAudioInput": { + "x-fal-order-properties": [ + "audio_url", + "prompt", + "system_prompt", + "model", + "reasoning", + "temperature", + "max_tokens" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Please transcribe this audio file.", + "What is being said in this audio?" + ], + "description": "Prompt to be used for the audio processing", + "type": "string", + "title": "Prompt" + }, + "system_prompt": { + "examples": [ + "Transcribe the audio accurately, including speaker identification if multiple speakers are present." + ], + "description": "System prompt to provide context or instructions to the model", + "type": "string", + "title": "System Prompt" + }, + "reasoning": { + "description": "Should reasoning be the part of the final answer.", + "type": "boolean", + "title": "Reasoning", + "default": false + }, + "model": { + "examples": [ + "google/gemini-3-flash-preview", + "google/gemini-2.5-flash", + "google/gemini-3-pro-preview" + ], + "description": "Name of the model to use. Charged based on actual token usage.", + "type": "string", + "title": "Model" + }, + "max_tokens": { + "minimum": 1, + "description": "This sets the upper limit for the number of tokens the model can generate in response. It won't produce more than this limit. The maximum value is the context length minus the prompt length.", + "type": "integer", + "title": "Max Tokens" + }, + "temperature": { + "minimum": 0, + "maximum": 2, + "type": "number", + "description": "This setting influences the variety in the model's responses. Lower values lead to more predictable and typical responses, while higher values encourage more diverse and less common responses. At 0, the model always gives the same response for a given input.", + "title": "Temperature", + "default": 1 + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/audio-understanding/Title_%20Running%20on%20Fal.mp3" + ], + "description": "URL or data URI of the audio file to process. Supported formats: wav, mp3, aiff, aac, ogg, flac, m4a.", + "type": "string", + "title": "Audio Url" + } + }, + "title": "AudioInput", + "required": [ + "prompt", + "audio_url", + "model" + ] + }, + "RouterAudioOutput": { + "x-fal-order-properties": [ + "output", + "usage" + ], + "type": "object", + "properties": { + "usage": { + "examples": [ + { + "prompt_tokens": 500, + "total_tokens": 550, + "completion_tokens": 50, + "cost": 0.0003 + } + ], + "description": "Token usage information", + "title": "Usage", + "allOf": [ + { + "$ref": "#/components/schemas/UsageInfo" + } + ] + }, + "output": { + "examples": [ + "The audio contains a conversation between two people discussing the weather forecast for the upcoming week." + ], + "description": "Generated output from audio processing", + "type": "string", + "title": "Output" + } + }, + "title": "AudioOutput", + "required": [ + "output" + ] + }, + "UsageInfo": { + "x-fal-order-properties": [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "cost" + ], + "type": "object", + "properties": { + "prompt_tokens": { + "title": "Prompt Tokens", + "type": "integer" + }, + "total_tokens": { + "title": "Total Tokens", + "type": "integer", + "default": 0 + }, + "completion_tokens": { + "title": "Completion Tokens", + "type": "integer" + }, + "cost": { + "title": "Cost", + "type": "number" + } + }, + "title": "UsageInfo", + "required": [ + "cost" + ] + } + } + }, + "paths": { + "/openrouter/router/audio/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/openrouter/router/audio/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/openrouter/router/audio": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RouterAudioInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/openrouter/router/audio/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RouterAudioOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.video-to-audio.json b/packages/typescript/ai-fal/json/fal.models.video-to-audio.json new file mode 100644 index 00000000..52369a96 --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.video-to-audio.json @@ -0,0 +1,1638 @@ +{ + "generated_at": "2026-01-28T02:51:51.872Z", + "total_models": 4, + "category": "video-to-audio", + "models": [ + { + "endpoint_id": "fal-ai/sam-audio/visual-separate", + "metadata": { + "display_name": "Sam Audio", + "category": "video-to-audio", + "description": "Audio separation with SAM Audio. Isolate any sound using natural language—professional-grade audio editing made simple for creators, researchers, and accessibility applications.", + "status": "active", + "tags": [ + "video-to-audio", + "sam-audio" + ], + "updated_at": "2026-01-26T21:41:44.984Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8865ba/oZ7GwnFaLuxD6NEEWYkcM_84e3d234884b41bea323b1bcb5a168e9.jpg", + "model_url": "https://fal.run/fal-ai/sam-audio/visual-separate", + "license_type": "commercial", + "date": "2025-12-30T16:16:20.141Z", + "group": { + "key": "sam-audio", + "label": "Visual-guided" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sam-audio/visual-separate", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sam-audio/visual-separate queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sam-audio/visual-separate", + "category": "video-to-audio", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8865ba/oZ7GwnFaLuxD6NEEWYkcM_84e3d234884b41bea323b1bcb5a168e9.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sam-audio/visual-separate", + "documentationUrl": "https://fal.ai/models/fal-ai/sam-audio/visual-separate/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SamAudioVisualSeparateInput": { + "description": "Input for visual-prompted audio separation.", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "man on the left" + ], + "description": "Text prompt to assist with separation. Use natural language to describe the target sound.", + "type": "string", + "title": "Prompt", + "default": "" + }, + "video_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a8850d1/gff7zKI-6XwIbBBip4946_office.mp4" + ], + "description": "URL of the video file to process (MP4, MOV, etc.)", + "type": "string", + "title": "Video Url" + }, + "acceleration": { + "enum": [ + "fast", + "balanced", + "quality" + ], + "description": "The acceleration level to use.", + "type": "string", + "title": "Acceleration", + "default": "balanced" + }, + "mask_video_url": { + "description": "URL of the mask video (binary mask indicating target object). Black=target, White=background.", + "type": "string", + "title": "Mask Video Url" + }, + "output_format": { + "enum": [ + "wav", + "mp3" + ], + "description": "Output audio format.", + "type": "string", + "title": "Output Format", + "default": "wav" + }, + "reranking_candidates": { + "minimum": 1, + "maximum": 4, + "type": "integer", + "description": "Number of candidates to generate and rank. Higher improves quality but increases latency and cost.", + "title": "Reranking Candidates", + "default": 1 + } + }, + "title": "SAMAudioVisualInput", + "x-fal-order-properties": [ + "video_url", + "mask_video_url", + "prompt", + "reranking_candidates", + "acceleration", + "output_format" + ], + "required": [ + "video_url" + ] + }, + "SamAudioVisualSeparateOutput": { + "description": "Output for visual-prompted audio separation.", + "type": "object", + "properties": { + "target": { + "examples": [ + { + "content_type": "audio/wav", + "url": "https://v3b.fal.media/files/b/0a88550c/CVyBZ1Cxka1vLxVwOfUcc_tmpzzftm934.wav" + } + ], + "description": "The isolated target sound.", + "title": "Target", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "duration": { + "examples": [ + 15 + ], + "description": "Duration of the output audio in seconds.", + "type": "number", + "title": "Duration" + }, + "sample_rate": { + "description": "Sample rate of the output audio in Hz.", + "type": "integer", + "title": "Sample Rate", + "default": 48000 + }, + "residual": { + "examples": [ + { + "content_type": "audio/wav", + "url": "https://v3b.fal.media/files/b/0a88550c/pdOH_J84S-197LRjMQDrz_tmprx375uix.wav" + } + ], + "description": "Everything else in the audio.", + "title": "Residual", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "SAMAudioVisualSeparateOutput", + "x-fal-order-properties": [ + "target", + "residual", + "duration", + "sample_rate" + ], + "required": [ + "target", + "residual", + "duration" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sam-audio/visual-separate/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam-audio/visual-separate/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sam-audio/visual-separate": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SamAudioVisualSeparateInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam-audio/visual-separate/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SamAudioVisualSeparateOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "mirelo-ai/sfx-v1.5/video-to-audio", + "metadata": { + "display_name": "Mirelo SFX V1.5", + "category": "video-to-audio", + "description": "Generate synced sounds for any video, and return the new sound track (like MMAudio)", + "status": "active", + "tags": [ + "video-to-audio", + "sfx" + ], + "updated_at": "2026-01-26T21:42:39.683Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/lion/wwhXRNDWSVEdS9jIG6BAA_b1d337ecdccb441aa80caab321487fb7.jpg", + "model_url": "https://fal.run/mirelo-ai/sfx-v1.5/video-to-audio", + "license_type": "commercial", + "date": "2025-10-15T11:13:45.451Z", + "group": { + "key": "mirelo-sfx-v1.5", + "label": "Video To Audio" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for mirelo-ai/sfx-v1.5/video-to-audio", + "version": "1.0.0", + "description": "The OpenAPI schema for the mirelo-ai/sfx-v1.5/video-to-audio queue.", + "x-fal-metadata": { + "endpointId": "mirelo-ai/sfx-v1.5/video-to-audio", + "category": "video-to-audio", + "thumbnailUrl": "https://v3b.fal.media/files/b/lion/wwhXRNDWSVEdS9jIG6BAA_b1d337ecdccb441aa80caab321487fb7.jpg", + "playgroundUrl": "https://fal.ai/models/mirelo-ai/sfx-v1.5/video-to-audio", + "documentationUrl": "https://fal.ai/models/mirelo-ai/sfx-v1.5/video-to-audio/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SfxV15VideoToAudioInput": { + "title": "Input", + "type": "object", + "properties": { + "num_samples": { + "anyOf": [ + { + "minimum": 2, + "maximum": 8, + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Num Samples", + "description": "The number of samples to generate from the model", + "default": 2 + }, + "duration": { + "anyOf": [ + { + "minimum": 1, + "maximum": 10, + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The duration of the generated audio in seconds", + "title": "Duration", + "default": 10 + }, + "start_offset": { + "anyOf": [ + { + "minimum": 0, + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The start offset in seconds to start the audio generation from", + "title": "Start Offset", + "default": 0 + }, + "video_url": { + "format": "uri", + "description": "A video url that can accessed from the API to process and add sound effects", + "type": "string", + "examples": [ + "https://di3otfzjg1gxa.cloudfront.net/battlefield_scene_silent.mp4" + ], + "title": "Video Url", + "minLength": 1, + "maxLength": 2083 + }, + "seed": { + "anyOf": [ + { + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed to use for the generation. If not provided, a random seed will be used", + "title": "Seed", + "default": 8069 + }, + "text_prompt": { + "examples": [ + "" + ], + "title": "Text Prompt", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Additional description to guide the model" + } + }, + "x-fal-order-properties": [ + "video_url", + "text_prompt", + "num_samples", + "seed", + "duration", + "start_offset" + ], + "required": [ + "video_url" + ] + }, + "SfxV15VideoToAudioOutput": { + "title": "AudioOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + [ + { + "file_name": "generated_audio_1.wav", + "content_type": "audio/wav", + "url": "https://v3b.fal.media/files/b/kangaroo/Cv3NHGnjVq3fz_sCddQh8_generated_audio.wav" + }, + { + "file_name": "generated_audio_2.wav", + "content_type": "audio/wav", + "url": "https://v3b.fal.media/files/b/zebra/EnTHS-6lClxK-H3i07YGP_generated_audio.wav" + } + ] + ], + "description": "The generated sound effects audio", + "type": "array", + "title": "Audio", + "items": { + "$ref": "#/components/schemas/Audio-Output" + } + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "Audio-Output": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "title": "Audio", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/mirelo-ai/sfx-v1.5/video-to-audio/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/mirelo-ai/sfx-v1.5/video-to-audio/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/mirelo-ai/sfx-v1.5/video-to-audio": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SfxV15VideoToAudioInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/mirelo-ai/sfx-v1.5/video-to-audio/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SfxV15VideoToAudioOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/video-to-audio", + "metadata": { + "display_name": "Kling Video", + "category": "video-to-audio", + "description": "Generate audio from input videos using Kling", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:41.297Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/rabbit/UcIrV8sk1rLDIZAkPoW75_32e8d2d3f8de429aa32f3f35b595d5ac.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/video-to-audio", + "license_type": "commercial", + "date": "2025-10-09T17:25:52.059Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/video-to-audio", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/video-to-audio queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/video-to-audio", + "category": "video-to-audio", + "thumbnailUrl": "https://v3b.fal.media/files/b/rabbit/UcIrV8sk1rLDIZAkPoW75_32e8d2d3f8de429aa32f3f35b595d5ac.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/video-to-audio", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/video-to-audio/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoVideoToAudioInput": { + "title": "VideoToAudioInput", + "type": "object", + "properties": { + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/kling/kling-v2.5-turbo-pro-image-to-video-output.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "The video URL to extract audio from. Only .mp4/.mov formats are supported. File size does not exceed 100MB. Video duration between 3.0s and 20.0s." + }, + "asmr_mode": { + "title": "Asmr Mode", + "type": "boolean", + "description": "Enable ASMR mode. This mode enhances detailed sound effects and is suitable for highly immersive content scenarios.", + "default": false + }, + "background_music_prompt": { + "title": "Background Music Prompt", + "type": "string", + "maxLength": 200, + "description": "Background music prompt. Cannot exceed 200 characters.", + "default": "intense car race" + }, + "sound_effect_prompt": { + "title": "Sound Effect Prompt", + "type": "string", + "maxLength": 200, + "description": "Sound effect prompt. Cannot exceed 200 characters.", + "default": "Car tires screech as they accelerate in a drag race" + } + }, + "x-fal-order-properties": [ + "video_url", + "sound_effect_prompt", + "background_music_prompt", + "asmr_mode" + ], + "required": [ + "video_url" + ] + }, + "KlingVideoVideoToAudioOutput": { + "title": "VideoToAudioOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + { + "url": "https://v3.fal.media/files/monkey/O-ekVTtYqeDblD1oSf2uv_extracted_audio.mp3" + } + ], + "title": "Audio", + "description": "The extracted/generated audio from the video in MP3 format", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/monkey/O-ekVTtYqeDblD1oSf2uv_dubbed_video.mp4" + } + ], + "title": "Video", + "description": "The original video with dubbed audio applied", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "audio" + ], + "required": [ + "video", + "audio" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/video-to-audio/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/video-to-audio/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/video-to-audio": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoVideoToAudioInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/video-to-audio/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoVideoToAudioOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "mirelo-ai/sfx-v1/video-to-audio", + "metadata": { + "display_name": "Mirelo SFX", + "category": "video-to-audio", + "description": "Generate synced sounds for any video, and return the new sound track (like MMAudio)", + "status": "active", + "tags": [ + "sfx" + ], + "updated_at": "2026-01-26T21:43:03.895Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/rabbit/tU7ZHxRInmbvp8xAHwl1b_0cce21ee98f14735941825a2ed8df979.jpg", + "model_url": "https://fal.run/mirelo-ai/sfx-v1/video-to-audio", + "license_type": "commercial", + "date": "2025-08-15T12:24:51.311Z", + "group": { + "key": "mirelo-sfx-v1", + "label": "Video To Audio" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for mirelo-ai/sfx-v1/video-to-audio", + "version": "1.0.0", + "description": "The OpenAPI schema for the mirelo-ai/sfx-v1/video-to-audio queue.", + "x-fal-metadata": { + "endpointId": "mirelo-ai/sfx-v1/video-to-audio", + "category": "video-to-audio", + "thumbnailUrl": "https://fal.media/files/rabbit/tU7ZHxRInmbvp8xAHwl1b_0cce21ee98f14735941825a2ed8df979.jpg", + "playgroundUrl": "https://fal.ai/models/mirelo-ai/sfx-v1/video-to-audio", + "documentationUrl": "https://fal.ai/models/mirelo-ai/sfx-v1/video-to-audio/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SfxV1VideoToAudioInput": { + "x-fal-order-properties": [ + "video_url", + "text_prompt", + "num_samples", + "seed", + "duration" + ], + "type": "object", + "properties": { + "num_samples": { + "anyOf": [ + { + "minimum": 2, + "maximum": 8, + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Num Samples", + "description": "The number of samples to generate from the model", + "default": 2 + }, + "video_url": { + "format": "uri", + "description": "A video url that can accessed from the API to process and add sound effects", + "type": "string", + "examples": [ + "https://di3otfzjg1gxa.cloudfront.net/input_example.mp4" + ], + "maxLength": 2083, + "minLength": 1, + "title": "Video Url" + }, + "duration": { + "anyOf": [ + { + "minimum": 1, + "maximum": 10, + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Duration", + "description": "The duration of the generated audio in seconds", + "default": 10 + }, + "seed": { + "anyOf": [ + { + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed to use for the generation. If not provided, a random seed will be used", + "title": "Seed", + "default": 2105 + }, + "text_prompt": { + "examples": [ + "" + ], + "title": "Text Prompt", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Additional description to guide the model" + } + }, + "title": "Input", + "required": [ + "video_url" + ] + }, + "SfxV1VideoToAudioOutput": { + "title": "AudioOutput", + "type": "object", + "properties": { + "audio": { + "examples": [ + [ + { + "file_name": "generated_audio_1.wav", + "content_type": "audio/wav", + "url": "https://v3.fal.media/files/panda/d5WcvFtu93KpmlVvPg1oh_generated_audio.wav" + }, + { + "file_name": "generated_audio_2.wav", + "content_type": "audio/wav", + "url": "https://v3.fal.media/files/penguin/f51pS8iF5ZMVRni-O2F3S_generated_audio.wav" + } + ] + ], + "description": "The generated sound effects audio", + "type": "array", + "title": "Audio", + "items": { + "$ref": "#/components/schemas/Audio" + } + } + }, + "x-fal-order-properties": [ + "audio" + ], + "required": [ + "audio" + ] + }, + "Audio": { + "title": "Audio", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name" + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/mirelo-ai/sfx-v1/video-to-audio/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/mirelo-ai/sfx-v1/video-to-audio/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/mirelo-ai/sfx-v1/video-to-audio": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SfxV1VideoToAudioInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/mirelo-ai/sfx-v1/video-to-audio/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SfxV1VideoToAudioOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.video-to-text.json b/packages/typescript/ai-fal/json/fal.models.video-to-text.json new file mode 100644 index 00000000..59e3642b --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.video-to-text.json @@ -0,0 +1,778 @@ +{ + "generated_at": "2026-01-28T02:51:51.871Z", + "total_models": 2, + "category": "video-to-text", + "models": [ + { + "endpoint_id": "openrouter/router/video/enterprise", + "metadata": { + "display_name": "OpenRouter [Video][Enterprise]", + "category": "video-to-text", + "description": "Run any VLM (Video Language Model) with fal, powered by OpenRouter.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:37.283Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8a491c/S5EO4nTWCnQl8LBj3j-pb_d972f9018895443e8d8f7a4bb0f08050.jpg", + "model_url": "https://fal.run/openrouter/router/video/enterprise", + "license_type": "commercial", + "date": "2026-01-13T23:57:11.907Z", + "group": { + "key": "openrouter/router", + "label": "openrouter/router" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.run/openrouter/router/video/enterprise/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for openrouter/router/video/enterprise", + "version": "1.0.0", + "description": "The OpenAPI schema for the openrouter/router/video/enterprise queue.", + "x-fal-metadata": { + "endpointId": "openrouter/router/video/enterprise", + "category": "video-to-text", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8a491c/S5EO4nTWCnQl8LBj3j-pb_d972f9018895443e8d8f7a4bb0f08050.jpg", + "playgroundUrl": "https://fal.ai/models/openrouter/router/video/enterprise", + "documentationUrl": "https://fal.ai/models/openrouter/router/video/enterprise/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "RouterVideoEnterpriseInput": { + "x-fal-order-properties": [ + "video_urls", + "prompt", + "system_prompt", + "model", + "reasoning", + "temperature", + "max_tokens" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Please transcribe the videos respectively." + ], + "description": "Prompt to be used for the video processing", + "type": "string", + "title": "Prompt" + }, + "video_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/0a8b3081/t4Jsy53x-Q8iQqg78_Vj__vid01.mp4", + "https://v3b.fal.media/files/b/0a8b3085/xWtbpb6pf4i-BSvR2oWbi_vid06.mp4" + ] + ], + "description": "List of URLs or data URIs of video files to process. Supported formats: mp4, mpeg, mov, webm. For Google Gemini on AI Studio, YouTube links are also supported. Mutually exclusive with video_url.", + "type": "array", + "title": "Video Urls", + "items": { + "type": "string" + } + }, + "system_prompt": { + "examples": [ + "Please look at the videos in order and answer the question." + ], + "description": "System prompt to provide context or instructions to the model", + "type": "string", + "title": "System Prompt" + }, + "reasoning": { + "description": "Should reasoning be the part of the final answer.", + "type": "boolean", + "title": "Reasoning", + "default": false + }, + "model": { + "examples": [ + "google/gemini-2.5-flash", + "google/gemini-2.5-flash-lite", + "google/gemini-2.5-flash-preview-09-2025", + "google/gemini-2.5-flash-lite-preview-09-2025", + "google/gemini-2.5-pro", + "google/gemini-2.5-pro-preview", + "google/gemini-2.5-pro-preview-05-06", + "google/gemini-2.0-flash-001", + "google/gemini-2.0-flash-lite-001", + "google/gemini-3-flash-preview", + "google/gemini-3-pro-preview" + ], + "description": "Name of the model to use. Charged based on actual token usage.", + "type": "string", + "title": "Model" + }, + "max_tokens": { + "minimum": 1, + "description": "This sets the upper limit for the number of tokens the model can generate in response. It won't produce more than this limit. The maximum value is the context length minus the prompt length.", + "type": "integer", + "title": "Max Tokens" + }, + "temperature": { + "minimum": 0, + "maximum": 2, + "type": "number", + "description": "This setting influences the variety in the model's responses. Lower values lead to more predictable and typical responses, while higher values encourage more diverse and less common responses. At 0, the model always gives the same response for a given input.", + "title": "Temperature", + "default": 1 + } + }, + "title": "VideoEnterpriseInput", + "required": [ + "prompt", + "model" + ] + }, + "RouterVideoEnterpriseOutput": { + "x-fal-order-properties": [ + "output", + "usage" + ], + "type": "object", + "properties": { + "usage": { + "examples": [ + { + "prompt_tokens": 1000, + "total_tokens": 1100, + "completion_tokens": 100, + "cost": 0.0005 + } + ], + "description": "Token usage information", + "title": "Usage", + "allOf": [ + { + "$ref": "#/components/schemas/UsageInfo" + } + ] + }, + "output": { + "examples": [ + "that's the way I look at it and I don't know what you would say. Sooner or later the child gets run over.\nThey seem to be too local, too provincial." + ], + "description": "Generated output from video processing", + "type": "string", + "title": "Output" + } + }, + "title": "VideoOutput", + "required": [ + "output" + ] + }, + "UsageInfo": { + "x-fal-order-properties": [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "cost" + ], + "type": "object", + "properties": { + "prompt_tokens": { + "title": "Prompt Tokens", + "type": "integer" + }, + "total_tokens": { + "title": "Total Tokens", + "type": "integer", + "default": 0 + }, + "completion_tokens": { + "title": "Completion Tokens", + "type": "integer" + }, + "cost": { + "title": "Cost", + "type": "number" + } + }, + "title": "UsageInfo", + "required": [ + "cost" + ] + } + } + }, + "paths": { + "/openrouter/router/video/enterprise/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/openrouter/router/video/enterprise/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/openrouter/router/video/enterprise": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RouterVideoEnterpriseInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/openrouter/router/video/enterprise/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RouterVideoEnterpriseOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "openrouter/router/video", + "metadata": { + "display_name": "OpenRouter [Video]", + "category": "video-to-text", + "description": "Run any VLM (Video Language Model) with fal, powered by OpenRouter.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:37.408Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8a4887/4urYAt4UGIT4KfbwEdL6R_f0b95609b9e241468dbaf34bf3229f91.jpg", + "model_url": "https://fal.run/openrouter/router/video", + "license_type": "commercial", + "date": "2026-01-13T23:31:16.485Z", + "group": { + "key": "openrouter/router", + "label": "Any Video LM" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.run/openrouter/router/video/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for openrouter/router/video", + "version": "1.0.0", + "description": "The OpenAPI schema for the openrouter/router/video queue.", + "x-fal-metadata": { + "endpointId": "openrouter/router/video", + "category": "video-to-text", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8a4887/4urYAt4UGIT4KfbwEdL6R_f0b95609b9e241468dbaf34bf3229f91.jpg", + "playgroundUrl": "https://fal.ai/models/openrouter/router/video", + "documentationUrl": "https://fal.ai/models/openrouter/router/video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "RouterVideoInput": { + "x-fal-order-properties": [ + "video_urls", + "prompt", + "system_prompt", + "model", + "reasoning", + "temperature", + "max_tokens" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Please transcribe the videos respectively." + ], + "description": "Prompt to be used for the video processing", + "type": "string", + "title": "Prompt" + }, + "video_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/0a8b3081/t4Jsy53x-Q8iQqg78_Vj__vid01.mp4", + "https://v3b.fal.media/files/b/0a8b3085/xWtbpb6pf4i-BSvR2oWbi_vid06.mp4" + ] + ], + "description": "List of URLs or data URIs of video files to process. Supported formats: mp4, mpeg, mov, webm. For Google Gemini on AI Studio, YouTube links are also supported. Mutually exclusive with video_url.", + "type": "array", + "title": "Video Urls", + "items": { + "type": "string" + } + }, + "system_prompt": { + "examples": [ + "Please look at the videos in order and answer the question." + ], + "description": "System prompt to provide context or instructions to the model", + "type": "string", + "title": "System Prompt" + }, + "reasoning": { + "description": "Should reasoning be the part of the final answer.", + "type": "boolean", + "title": "Reasoning", + "default": false + }, + "model": { + "examples": [ + "google/gemini-2.5-flash", + "google/gemini-2.5-pro", + "google/gemini-3-flash-preview", + "google/gemini-3-pro-preview" + ], + "description": "Name of the model to use. Charged based on actual token usage.", + "type": "string", + "title": "Model" + }, + "max_tokens": { + "minimum": 1, + "description": "This sets the upper limit for the number of tokens the model can generate in response. It won't produce more than this limit. The maximum value is the context length minus the prompt length.", + "type": "integer", + "title": "Max Tokens" + }, + "temperature": { + "minimum": 0, + "maximum": 2, + "type": "number", + "description": "This setting influences the variety in the model's responses. Lower values lead to more predictable and typical responses, while higher values encourage more diverse and less common responses. At 0, the model always gives the same response for a given input.", + "title": "Temperature", + "default": 1 + } + }, + "title": "VideoInput", + "required": [ + "prompt", + "model" + ] + }, + "RouterVideoOutput": { + "x-fal-order-properties": [ + "output", + "usage" + ], + "type": "object", + "properties": { + "usage": { + "examples": [ + { + "prompt_tokens": 1000, + "total_tokens": 1100, + "completion_tokens": 100, + "cost": 0.0005 + } + ], + "description": "Token usage information", + "title": "Usage", + "allOf": [ + { + "$ref": "#/components/schemas/UsageInfo" + } + ] + }, + "output": { + "examples": [ + "that's the way I look at it and I don't know what you would say. Sooner or later the child gets run over.\nThey seem to be too local, too provincial." + ], + "description": "Generated output from video processing", + "type": "string", + "title": "Output" + } + }, + "title": "VideoOutput", + "required": [ + "output" + ] + }, + "UsageInfo": { + "x-fal-order-properties": [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "cost" + ], + "type": "object", + "properties": { + "prompt_tokens": { + "title": "Prompt Tokens", + "type": "integer" + }, + "total_tokens": { + "title": "Total Tokens", + "type": "integer", + "default": 0 + }, + "completion_tokens": { + "title": "Completion Tokens", + "type": "integer" + }, + "cost": { + "title": "Cost", + "type": "number" + } + }, + "title": "UsageInfo", + "required": [ + "cost" + ] + } + } + }, + "paths": { + "/openrouter/router/video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/openrouter/router/video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/openrouter/router/video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RouterVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/openrouter/router/video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RouterVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.video-to-video.json b/packages/typescript/ai-fal/json/fal.models.video-to-video.json new file mode 100644 index 00000000..55e815d3 --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.video-to-video.json @@ -0,0 +1,63893 @@ +{ + "generated_at": "2026-01-28T02:51:51.858Z", + "total_models": 129, + "category": "video-to-video", + "models": [ + { + "endpoint_id": "bria/video/background-removal", + "metadata": { + "display_name": "Video", + "category": "video-to-video", + "description": "Automatically remove backgrounds from videos -perfect for creating clean, professional content without a green screen.", + "status": "active", + "tags": [ + "background-removal" + ], + "updated_at": "2026-01-26T21:43:22.444Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "model_url": "https://fal.run/bria/video/background-removal", + "license_type": "commercial", + "date": "2025-06-30T06:13:15.906Z", + "group": { + "key": "bria", + "label": "Video Background Removal" + }, + "highlighted": true, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/video/background-removal", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/video/background-removal queue.", + "x-fal-metadata": { + "endpointId": "bria/video/background-removal", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "playgroundUrl": "https://fal.ai/models/bria/video/background-removal", + "documentationUrl": "https://fal.ai/models/bria/video/background-removal/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "VideoBackgroundRemovalInput": { + "title": "InputRemoveBackgroundModel", + "type": "object", + "properties": { + "video_url": { + "examples": [ + "https://bria-datasets.s3.us-east-1.amazonaws.com/rmbg_tests/videos/5586521-uhd_3840_2160_25fps_original.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "Input video to remove background from. Size should be less than 14142x14142 and duration less than 30s." + }, + "output_container_and_codec": { + "enum": [ + "mp4_h265", + "mp4_h264", + "webm_vp9", + "mov_h265", + "mov_proresks", + "mkv_h265", + "mkv_h264", + "mkv_vp9", + "gif" + ], + "title": "Output Container And Codec", + "type": "string", + "description": "Output container and codec. Options: mp4_h265, mp4_h264, webm_vp9, mov_h265, mov_proresks, mkv_h265, mkv_h264, mkv_vp9, gif.", + "default": "webm_vp9" + }, + "background_color": { + "enum": [ + "Transparent", + "Black", + "White", + "Gray", + "Red", + "Green", + "Blue", + "Yellow", + "Cyan", + "Magenta", + "Orange" + ], + "title": "Background Color", + "type": "string", + "description": "Background color. Options: Transparent, Black, White, Gray, Red, Green, Blue, Yellow, Cyan, Magenta, Orange.", + "default": "Black" + } + }, + "x-fal-order-properties": [ + "video_url", + "background_color", + "output_container_and_codec" + ], + "required": [ + "video_url" + ] + }, + "VideoBackgroundRemovalOutput": { + "title": "OutputRemoveBackgroundModel", + "type": "object", + "properties": { + "video": { + "anyOf": [ + { + "$ref": "#/components/schemas/Video" + }, + { + "$ref": "#/components/schemas/File" + } + ], + "title": "Video", + "description": "Video with removed background and audio." + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "Video": { + "title": "Video", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/bria/video/background-removal/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/video/background-removal/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/video/background-removal": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoBackgroundRemovalInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/video/background-removal/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoBackgroundRemovalOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/mmaudio-v2", + "metadata": { + "display_name": "MMAudio V2", + "category": "video-to-video", + "description": "MMAudio generates synchronized audio given video and/or text inputs. It can be combined with video models to get videos with audio.", + "status": "active", + "tags": [ + "ai video", + "fast" + ], + "updated_at": "2026-01-26T21:44:10.412Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/mmaudio-v2.webp", + "model_url": "https://fal.run/fal-ai/mmaudio-v2", + "license_type": "commercial", + "date": "2024-12-12T00:00:00.000Z", + "group": { + "key": "mmaudio-v2", + "label": "Video to Video" + }, + "highlighted": true, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/mmaudio-v2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/mmaudio-v2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/mmaudio-v2", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/mmaudio-v2.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/mmaudio-v2", + "documentationUrl": "https://fal.ai/models/fal-ai/mmaudio-v2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MmaudioV2Input": { + "title": "BaseInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Indian holy music" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the audio for." + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/video_models/mmaudio_input.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "The URL of the video to generate the audio for." + }, + "num_steps": { + "minimum": 4, + "title": "Num Steps", + "type": "integer", + "maximum": 50, + "description": "The number of steps to generate the audio for.", + "default": 25 + }, + "duration": { + "minimum": 1, + "title": "Duration", + "type": "number", + "maximum": 30, + "description": "The duration of the audio to generate.", + "default": 8 + }, + "cfg_strength": { + "minimum": 0, + "title": "Cfg Strength", + "type": "number", + "maximum": 20, + "description": "The strength of Classifier Free Guidance.", + "default": 4.5 + }, + "seed": { + "minimum": 0, + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator", + "maximum": 65535 + }, + "mask_away_clip": { + "title": "Mask Away Clip", + "type": "boolean", + "description": "Whether to mask away the clip.", + "default": false + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the audio for.", + "default": "" + } + }, + "x-fal-order-properties": [ + "video_url", + "prompt", + "negative_prompt", + "seed", + "num_steps", + "duration", + "cfg_strength", + "mask_away_clip" + ], + "required": [ + "video_url", + "prompt" + ] + }, + "MmaudioV2Output": { + "title": "Output", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 1001342, + "file_name": "mmaudio_input.mp4", + "content_type": "application/octet-stream", + "url": "https://storage.googleapis.com/falserverless/model_tests/video_models/mmaudio_output.mp4" + } + ], + "title": "Video", + "description": "The generated video with the lip sync.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/mmaudio-v2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/mmaudio-v2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/mmaudio-v2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MmaudioV2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/mmaudio-v2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MmaudioV2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "half-moon-ai/ai-face-swap/faceswapvideo", + "metadata": { + "display_name": "Ai Face Swap", + "category": "video-to-video", + "description": "AI-FaceSwap-Video is a service that can replace a person's face throughout a video clip while keeping their movements natural.", + "status": "active", + "tags": [ + "faceswap", + "utility", + "transformation" + ], + "updated_at": "2026-01-26T21:41:27.908Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8b8d9a/fwVfTZ3jr-zefPB8e_EU5_f8d62bb80d3c4224ae681cc7533b07ee.jpg", + "model_url": "https://fal.run/half-moon-ai/ai-face-swap/faceswapvideo", + "license_type": "commercial", + "date": "2026-01-23T14:44:04.723Z", + "group": { + "key": "Half-Moon-Faceswap", + "label": "Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for half-moon-ai/ai-face-swap/faceswapvideo", + "version": "1.0.0", + "description": "The OpenAPI schema for the half-moon-ai/ai-face-swap/faceswapvideo queue.", + "x-fal-metadata": { + "endpointId": "half-moon-ai/ai-face-swap/faceswapvideo", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8b8d9a/fwVfTZ3jr-zefPB8e_EU5_f8d62bb80d3c4224ae681cc7533b07ee.jpg", + "playgroundUrl": "https://fal.ai/models/half-moon-ai/ai-face-swap/faceswapvideo", + "documentationUrl": "https://fal.ai/models/half-moon-ai/ai-face-swap/faceswapvideo/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AiFaceSwapFaceswapvideoInput": { + "description": "Input schema for image ↔ video face swap", + "type": "object", + "properties": { + "source_face_url": { + "examples": [ + "https://images.pexels.com/photos/1642228/pexels-photo-1642228.jpeg" + ], + "description": "Source face image", + "type": "string", + "title": "Source Face Url" + }, + "target_video_url": { + "examples": [ + "https://videos.pexels.com/video-files/3201691/3201691-hd_1920_1080_25fps.mp4" + ], + "description": "Target video URL", + "type": "string", + "title": "Target Video Url" + } + }, + "x-fal-order-properties": [ + "source_face_url", + "target_video_url" + ], + "title": "FaceSwapInputVideo", + "required": [ + "source_face_url", + "target_video_url" + ] + }, + "AiFaceSwapFaceswapvideoOutput": { + "description": "FaceFusion output payload when video content is generated", + "type": "object", + "properties": { + "processing_time_ms": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "Optional processing duration in milliseconds", + "title": "Processing Time Ms" + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://ai-tests.angeneraltest.com/test-files/faceswapvideo.mp4" + } + ], + "description": "Generated video result", + "$ref": "#/components/schemas/Video" + } + }, + "x-fal-order-properties": [ + "video", + "processing_time_ms" + ], + "title": "FaceFusionVideoOutput", + "required": [ + "video" + ] + }, + "Video": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "Video", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/half-moon-ai/ai-face-swap/faceswapvideo/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/half-moon-ai/ai-face-swap/faceswapvideo/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/half-moon-ai/ai-face-swap/faceswapvideo": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiFaceSwapFaceswapvideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/half-moon-ai/ai-face-swap/faceswapvideo/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiFaceSwapFaceswapvideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2-19b/distilled/video-to-video/lora", + "metadata": { + "display_name": "LTX-2 19B Distilled", + "category": "video-to-video", + "description": "Generate video with audio from videos using LTX-2 Distilled and custom LoRA", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:41.153Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a896226/KeZEHWnM6_hr12pAHKWyQ_2ece2ad4736345abb7cbe3e0e404729d.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2-19b/distilled/video-to-video/lora", + "license_type": "commercial", + "date": "2026-01-07T03:40:29.284Z", + "group": { + "key": "ltx-2-19b-distilled", + "label": "Video to Video with LoRA" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2-19b/distilled/video-to-video/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2-19b/distilled/video-to-video/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2-19b/distilled/video-to-video/lora", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a896226/KeZEHWnM6_hr12pAHKWyQ_2ece2ad4736345abb7cbe3e0e404729d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/distilled/video-to-video/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/distilled/video-to-video/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx219bDistilledVideoToVideoLoraInput": { + "title": "LTX2LoRADistilledVideoToVideoInput", + "type": "object", + "properties": { + "use_multiscale": { + "title": "Use Multi-Scale", + "type": "boolean", + "description": "Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.", + "default": true + }, + "video_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a8824b1/sdm0KfmenrlywesfzY1Y1_if6euPp1.mp4" + ], + "title": "Video URL", + "type": "string", + "description": "The URL of the video to generate the video from." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high", + "full" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "examples": [ + "none" + ], + "default": "none" + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate audio for the video.", + "default": true + }, + "prompt": { + "examples": [ + "black-and-white video, a cowboy walks through a dusty town, film grain" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "ic_lora_scale": { + "minimum": 0, + "title": "IC-LoRA Scale", + "type": "number", + "description": "The scale of the IC-LoRA to use. This allows you to control the strength of the IC-LoRA.", + "maximum": 1, + "default": 1 + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "number", + "description": "The frames per second of the generated video.", + "maximum": 60, + "default": 25 + }, + "loras": { + "title": "LoRAs", + "type": "array", + "description": "The LoRAs to use for the generation.", + "items": { + "$ref": "#/components/schemas/LoRAInput" + } + }, + "camera_lora": { + "enum": [ + "dolly_in", + "dolly_out", + "dolly_left", + "dolly_right", + "jib_up", + "jib_down", + "static", + "none" + ], + "title": "Camera LoRA", + "type": "string", + "description": "The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "examples": [ + "none" + ], + "default": "none" + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "auto", + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated video.", + "title": "Video Size", + "default": "auto" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "camera_lora_scale": { + "minimum": 0, + "title": "Camera LoRA Scale", + "type": "number", + "description": "The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "maximum": 1, + "default": 1 + }, + "image_strength": { + "minimum": 0, + "title": "Image Strength", + "type": "number", + "description": "The strength of the image to use for the video generation.", + "maximum": 1, + "default": 1 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the video from.", + "default": "blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts." + }, + "preprocessor": { + "enum": [ + "depth", + "canny", + "pose", + "none" + ], + "description": "The preprocessor to use for the video. When a preprocessor is used and `ic_lora_type` is set to `match_preprocessor`, the IC-LoRA will be loaded based on the preprocessor type.", + "type": "string", + "examples": [ + "none" + ], + "title": "Preprocessor", + "default": "none" + }, + "video_strength": { + "minimum": 0, + "title": "Video Strength", + "type": "number", + "description": "Video conditioning strength. Lower values represent more freedom given to the model to change the video content.", + "maximum": 1, + "default": 1 + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "ic_lora": { + "enum": [ + "match_preprocessor", + "canny", + "depth", + "pose", + "detailer", + "none" + ], + "description": "The type of IC-LoRA to load. In-Context LoRA weights are used to condition the video based on edge, depth, or pose videos. Only change this from `match_preprocessor` if your videos are already preprocessed (or you are using the detailer.)", + "type": "string", + "examples": [ + "match_preprocessor" + ], + "title": "IC-LoRA", + "default": "match_preprocessor" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 9, + "title": "Number of Frames", + "maximum": 481, + "default": 121 + }, + "image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "An optional URL of an image to use as the first frame of the video.", + "title": "Image URL" + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed for the random number generator.", + "title": "Seed" + }, + "match_input_fps": { + "title": "Match Input FPS", + "type": "boolean", + "description": "When true, match the output FPS to the input video's FPS instead of using the default target FPS.", + "default": true + }, + "match_video_length": { + "title": "Match Video Length", + "type": "boolean", + "description": "When enabled, the number of frames will be calculated based on the video duration and FPS. When disabled, use the specified num_frames.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "video_url", + "image_url", + "match_video_length", + "num_frames", + "video_size", + "generate_audio", + "use_multiscale", + "match_input_fps", + "fps", + "acceleration", + "camera_lora", + "camera_lora_scale", + "negative_prompt", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode", + "loras", + "preprocessor", + "ic_lora", + "ic_lora_scale", + "image_strength", + "video_strength" + ], + "required": [ + "loras", + "prompt", + "video_url" + ] + }, + "Ltx219bDistilledVideoToVideoLoraOutput": { + "title": "LTX2VideoToVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "black-and-white video, a cowboy walks through a dusty town, film grain" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "seed": { + "examples": [ + 1490631192028410600 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for the random number generator." + }, + "video": { + "examples": [ + { + "height": 704, + "duration": 6.44, + "url": "https://v3b.fal.media/files/b/0a895ed5/SaTGe87IpMUMiSq33w5Qb_RoCJFZhc.mp4", + "width": 1248, + "fps": 25, + "file_name": "SaTGe87IpMUMiSq33w5Qb_RoCJFZhc.mp4", + "content_type": "video/mp4", + "num_frames": 161 + } + ], + "description": "The generated video.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "prompt" + ], + "required": [ + "video", + "seed", + "prompt" + ] + }, + "LoRAInput": { + "title": "LoRAInput", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL, HuggingFace repo ID (owner/repo) to lora weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "description": "Scale factor for LoRA application (0.0 to 4.0).", + "maximum": 4, + "default": 1 + }, + "weight_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights.", + "title": "Weight Name" + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "description": "LoRA weight configuration.", + "required": [ + "path" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The duration of the video", + "title": "Duration" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the video", + "title": "Height" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the video", + "title": "Width" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The FPS of the video", + "title": "Fps" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of frames in the video", + "title": "Num Frames" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2-19b/distilled/video-to-video/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/video-to-video/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/video-to-video/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bDistilledVideoToVideoLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/video-to-video/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bDistilledVideoToVideoLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2-19b/distilled/video-to-video", + "metadata": { + "display_name": "LTX-2 19B Distilled", + "category": "video-to-video", + "description": "Generate video with audio from videos using LTX-2 Distilled", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:41.285Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a896215/eDZD0zp44dBtoK1K3YPd7_258cf5dc4d47429fa90ca3d625da76cc.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2-19b/distilled/video-to-video", + "license_type": "commercial", + "date": "2026-01-07T03:38:57.274Z", + "group": { + "key": "ltx-2-19b-distilled", + "label": "Video to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2-19b/distilled/video-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2-19b/distilled/video-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2-19b/distilled/video-to-video", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a896215/eDZD0zp44dBtoK1K3YPd7_258cf5dc4d47429fa90ca3d625da76cc.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/distilled/video-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/distilled/video-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx219bDistilledVideoToVideoInput": { + "title": "LTX2DistilledVideoToVideoInput", + "type": "object", + "properties": { + "use_multiscale": { + "title": "Use Multi-Scale", + "type": "boolean", + "description": "Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.", + "default": true + }, + "video_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a8824b1/sdm0KfmenrlywesfzY1Y1_if6euPp1.mp4" + ], + "title": "Video URL", + "type": "string", + "description": "The URL of the video to generate the video from." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high", + "full" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "examples": [ + "none" + ], + "default": "none" + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate audio for the video.", + "default": true + }, + "prompt": { + "examples": [ + "black-and-white video, a cowboy walks through a dusty town, film grain" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "ic_lora_scale": { + "minimum": 0, + "title": "IC-LoRA Scale", + "type": "number", + "description": "The scale of the IC-LoRA to use. This allows you to control the strength of the IC-LoRA.", + "maximum": 1, + "default": 1 + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "number", + "description": "The frames per second of the generated video.", + "maximum": 60, + "default": 25 + }, + "camera_lora": { + "enum": [ + "dolly_in", + "dolly_out", + "dolly_left", + "dolly_right", + "jib_up", + "jib_down", + "static", + "none" + ], + "title": "Camera LoRA", + "type": "string", + "description": "The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "examples": [ + "none" + ], + "default": "none" + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "auto", + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated video.", + "title": "Video Size", + "default": "auto" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "camera_lora_scale": { + "minimum": 0, + "title": "Camera LoRA Scale", + "type": "number", + "description": "The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "maximum": 1, + "default": 1 + }, + "image_strength": { + "minimum": 0, + "title": "Image Strength", + "type": "number", + "description": "The strength of the image to use for the video generation.", + "maximum": 1, + "default": 1 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the video from.", + "default": "blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts." + }, + "preprocessor": { + "enum": [ + "depth", + "canny", + "pose", + "none" + ], + "description": "The preprocessor to use for the video. When a preprocessor is used and `ic_lora_type` is set to `match_preprocessor`, the IC-LoRA will be loaded based on the preprocessor type.", + "type": "string", + "examples": [ + "none" + ], + "title": "Preprocessor", + "default": "none" + }, + "video_strength": { + "minimum": 0, + "title": "Video Strength", + "type": "number", + "description": "Video conditioning strength. Lower values represent more freedom given to the model to change the video content.", + "maximum": 1, + "default": 1 + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "ic_lora": { + "enum": [ + "match_preprocessor", + "canny", + "depth", + "pose", + "detailer", + "none" + ], + "description": "The type of IC-LoRA to load. In-Context LoRA weights are used to condition the video based on edge, depth, or pose videos. Only change this from `match_preprocessor` if your videos are already preprocessed (or you are using the detailer.)", + "type": "string", + "examples": [ + "match_preprocessor" + ], + "title": "IC-LoRA", + "default": "match_preprocessor" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 9, + "title": "Number of Frames", + "maximum": 481, + "default": 121 + }, + "image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "An optional URL of an image to use as the first frame of the video.", + "title": "Image URL" + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed for the random number generator.", + "title": "Seed" + }, + "match_input_fps": { + "title": "Match Input FPS", + "type": "boolean", + "description": "When true, match the output FPS to the input video's FPS instead of using the default target FPS.", + "default": true + }, + "match_video_length": { + "title": "Match Video Length", + "type": "boolean", + "description": "When enabled, the number of frames will be calculated based on the video duration and FPS. When disabled, use the specified num_frames.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "video_url", + "image_url", + "match_video_length", + "num_frames", + "video_size", + "generate_audio", + "use_multiscale", + "match_input_fps", + "fps", + "acceleration", + "camera_lora", + "camera_lora_scale", + "negative_prompt", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode", + "preprocessor", + "ic_lora", + "ic_lora_scale", + "image_strength", + "video_strength" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "Ltx219bDistilledVideoToVideoOutput": { + "title": "LTX2VideoToVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "black-and-white video, a cowboy walks through a dusty town, film grain" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "seed": { + "examples": [ + 1490631192028410600 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for the random number generator." + }, + "video": { + "examples": [ + { + "height": 704, + "duration": 6.44, + "url": "https://v3b.fal.media/files/b/0a895ed5/SaTGe87IpMUMiSq33w5Qb_RoCJFZhc.mp4", + "width": 1248, + "fps": 25, + "file_name": "SaTGe87IpMUMiSq33w5Qb_RoCJFZhc.mp4", + "content_type": "video/mp4", + "num_frames": 161 + } + ], + "description": "The generated video.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "prompt" + ], + "required": [ + "video", + "seed", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The duration of the video", + "title": "Duration" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the video", + "title": "Height" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the video", + "title": "Width" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The FPS of the video", + "title": "Fps" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of frames in the video", + "title": "Num Frames" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2-19b/distilled/video-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/video-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/video-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bDistilledVideoToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/video-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bDistilledVideoToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2-19b/video-to-video/lora", + "metadata": { + "display_name": "LTX-2 19B", + "category": "video-to-video", + "description": "Generate video with audio from videos using LTX-2 and custom LoRA", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:41.435Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a896206/sXN9s121dM9wB94qRWi55_782e4ad62c7b4df680bdc590ca736c94.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2-19b/video-to-video/lora", + "license_type": "commercial", + "date": "2026-01-07T03:35:30.708Z", + "group": { + "key": "ltx-2-19b", + "label": "Video to Video with LoRA" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx2-v2v-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx2-v2v-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2-19b/video-to-video/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2-19b/video-to-video/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2-19b/video-to-video/lora", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a896206/sXN9s121dM9wB94qRWi55_782e4ad62c7b4df680bdc590ca736c94.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/video-to-video/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/video-to-video/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx219bVideoToVideoLoraInput": { + "title": "LTX2LoRAVideoToVideoInput", + "type": "object", + "properties": { + "use_multiscale": { + "title": "Use Multi-Scale", + "type": "boolean", + "description": "Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.", + "default": true + }, + "video_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a8824b1/sdm0KfmenrlywesfzY1Y1_if6euPp1.mp4" + ], + "title": "Video URL", + "type": "string", + "description": "The URL of the video to generate the video from." + }, + "prompt": { + "examples": [ + "black-and-white video, a cowboy walks through a dusty town, film grain" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "ic_lora_scale": { + "minimum": 0, + "title": "IC-LoRA Scale", + "type": "number", + "description": "The scale of the IC-LoRA to use. This allows you to control the strength of the IC-LoRA.", + "maximum": 1, + "default": 1 + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate audio for the video.", + "default": true + }, + "loras": { + "title": "LoRAs", + "type": "array", + "description": "The LoRAs to use for the generation.", + "items": { + "$ref": "#/components/schemas/LoRAInput" + } + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "auto", + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated video.", + "title": "Video Size", + "default": "auto" + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "description": "The guidance scale to use.", + "maximum": 10, + "default": 3 + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 9, + "title": "Number of Frames", + "maximum": 481, + "default": 121 + }, + "camera_lora_scale": { + "minimum": 0, + "title": "Camera LoRA Scale", + "type": "number", + "description": "The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "maximum": 1, + "default": 1 + }, + "video_strength": { + "minimum": 0, + "title": "Video Strength", + "type": "number", + "description": "Video conditioning strength. Lower values represent more freedom given to the model to change the video content.", + "maximum": 1, + "default": 1 + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "An optional URL of an image to use as the first frame of the video.", + "title": "Image URL" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed for the random number generator.", + "title": "Seed" + }, + "match_video_length": { + "title": "Match Video Length", + "type": "boolean", + "description": "When enabled, the number of frames will be calculated based on the video duration and FPS. When disabled, use the specified num_frames.", + "default": true + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high", + "full" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "number", + "description": "The frames per second of the generated video.", + "maximum": 60, + "default": 25 + }, + "camera_lora": { + "enum": [ + "dolly_in", + "dolly_out", + "dolly_left", + "dolly_right", + "jib_up", + "jib_down", + "static", + "none" + ], + "title": "Camera LoRA", + "type": "string", + "description": "The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "examples": [ + "none" + ], + "default": "none" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "image_strength": { + "minimum": 0, + "title": "Image Strength", + "type": "number", + "description": "The strength of the image to use for the video generation.", + "maximum": 1, + "default": 1 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the video from.", + "default": "blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts." + }, + "preprocessor": { + "enum": [ + "depth", + "canny", + "pose", + "none" + ], + "description": "The preprocessor to use for the video. When a preprocessor is used and `ic_lora_type` is set to `match_preprocessor`, the IC-LoRA will be loaded based on the preprocessor type.", + "type": "string", + "examples": [ + "none" + ], + "title": "Preprocessor", + "default": "none" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "ic_lora": { + "enum": [ + "match_preprocessor", + "canny", + "depth", + "pose", + "detailer", + "none" + ], + "description": "The type of IC-LoRA to load. In-Context LoRA weights are used to condition the video based on edge, depth, or pose videos. Only change this from `match_preprocessor` if your videos are already preprocessed (or you are using the detailer.)", + "type": "string", + "examples": [ + "match_preprocessor" + ], + "title": "IC-LoRA", + "default": "match_preprocessor" + }, + "match_input_fps": { + "title": "Match Input FPS", + "type": "boolean", + "description": "When true, match the output FPS to the input video's FPS instead of using the default target FPS.", + "default": true + }, + "num_inference_steps": { + "minimum": 8, + "title": "Number of Inference Steps", + "type": "integer", + "description": "The number of inference steps to use.", + "maximum": 50, + "default": 40 + } + }, + "x-fal-order-properties": [ + "prompt", + "video_url", + "image_url", + "match_video_length", + "num_frames", + "video_size", + "generate_audio", + "use_multiscale", + "match_input_fps", + "fps", + "guidance_scale", + "num_inference_steps", + "acceleration", + "camera_lora", + "camera_lora_scale", + "negative_prompt", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode", + "loras", + "preprocessor", + "ic_lora", + "ic_lora_scale", + "image_strength", + "video_strength" + ], + "required": [ + "loras", + "prompt", + "video_url" + ] + }, + "Ltx219bVideoToVideoLoraOutput": { + "title": "LTX2VideoToVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "black-and-white video, a cowboy walks through a dusty town, film grain" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "seed": { + "examples": [ + 1490631192028410600 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for the random number generator." + }, + "video": { + "examples": [ + { + "height": 704, + "duration": 6.44, + "url": "https://v3b.fal.media/files/b/0a895ed5/SaTGe87IpMUMiSq33w5Qb_RoCJFZhc.mp4", + "width": 1248, + "fps": 25, + "file_name": "SaTGe87IpMUMiSq33w5Qb_RoCJFZhc.mp4", + "content_type": "video/mp4", + "num_frames": 161 + } + ], + "description": "The generated video.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "prompt" + ], + "required": [ + "video", + "seed", + "prompt" + ] + }, + "LoRAInput": { + "title": "LoRAInput", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL, HuggingFace repo ID (owner/repo) to lora weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "description": "Scale factor for LoRA application (0.0 to 4.0).", + "maximum": 4, + "default": 1 + }, + "weight_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights.", + "title": "Weight Name" + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "description": "LoRA weight configuration.", + "required": [ + "path" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The duration of the video", + "title": "Duration" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the video", + "title": "Height" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the video", + "title": "Width" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The FPS of the video", + "title": "Fps" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of frames in the video", + "title": "Num Frames" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2-19b/video-to-video/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/video-to-video/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/video-to-video/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bVideoToVideoLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/video-to-video/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bVideoToVideoLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2-19b/video-to-video", + "metadata": { + "display_name": "LTX-2 19B", + "category": "video-to-video", + "description": "Generate video with audio from videos using LTX-2", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:41.563Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8961f3/1XEMGDaYbwvev5g0UU3DG_e6cfe8e5c1ee4472af89f76d0309eb8c.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2-19b/video-to-video", + "license_type": "commercial", + "date": "2026-01-07T03:32:57.344Z", + "group": { + "key": "ltx-2-19b", + "label": "Video to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2-19b/video-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2-19b/video-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2-19b/video-to-video", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8961f3/1XEMGDaYbwvev5g0UU3DG_e6cfe8e5c1ee4472af89f76d0309eb8c.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/video-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/video-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx219bVideoToVideoInput": { + "title": "LTX2VideoToVideoInput", + "type": "object", + "properties": { + "use_multiscale": { + "title": "Use Multi-Scale", + "type": "boolean", + "description": "Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.", + "default": true + }, + "video_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a8824b1/sdm0KfmenrlywesfzY1Y1_if6euPp1.mp4" + ], + "title": "Video URL", + "type": "string", + "description": "The URL of the video to generate the video from." + }, + "prompt": { + "examples": [ + "black-and-white video, a cowboy walks through a dusty town, film grain" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate audio for the video.", + "default": true + }, + "ic_lora_scale": { + "minimum": 0, + "title": "IC-LoRA Scale", + "type": "number", + "description": "The scale of the IC-LoRA to use. This allows you to control the strength of the IC-LoRA.", + "maximum": 1, + "default": 1 + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "auto", + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated video.", + "title": "Video Size", + "default": "auto" + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "description": "The guidance scale to use.", + "maximum": 10, + "default": 3 + }, + "camera_lora_scale": { + "minimum": 0, + "title": "Camera LoRA Scale", + "type": "number", + "description": "The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "maximum": 1, + "default": 1 + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 9, + "title": "Number of Frames", + "maximum": 481, + "default": 121 + }, + "video_strength": { + "minimum": 0, + "title": "Video Strength", + "type": "number", + "description": "Video conditioning strength. Lower values represent more freedom given to the model to change the video content.", + "maximum": 1, + "default": 1 + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "An optional URL of an image to use as the first frame of the video.", + "title": "Image URL" + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed for the random number generator.", + "title": "Seed" + }, + "match_video_length": { + "title": "Match Video Length", + "type": "boolean", + "description": "When enabled, the number of frames will be calculated based on the video duration and FPS. When disabled, use the specified num_frames.", + "default": true + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high", + "full" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "number", + "description": "The frames per second of the generated video.", + "maximum": 60, + "default": 25 + }, + "camera_lora": { + "enum": [ + "dolly_in", + "dolly_out", + "dolly_left", + "dolly_right", + "jib_up", + "jib_down", + "static", + "none" + ], + "title": "Camera LoRA", + "type": "string", + "description": "The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "examples": [ + "none" + ], + "default": "none" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "image_strength": { + "minimum": 0, + "title": "Image Strength", + "type": "number", + "description": "The strength of the image to use for the video generation.", + "maximum": 1, + "default": 1 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the video from.", + "default": "blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts." + }, + "preprocessor": { + "enum": [ + "depth", + "canny", + "pose", + "none" + ], + "description": "The preprocessor to use for the video. When a preprocessor is used and `ic_lora_type` is set to `match_preprocessor`, the IC-LoRA will be loaded based on the preprocessor type.", + "type": "string", + "examples": [ + "none" + ], + "title": "Preprocessor", + "default": "none" + }, + "ic_lora": { + "enum": [ + "match_preprocessor", + "canny", + "depth", + "pose", + "detailer", + "none" + ], + "description": "The type of IC-LoRA to load. In-Context LoRA weights are used to condition the video based on edge, depth, or pose videos. Only change this from `match_preprocessor` if your videos are already preprocessed (or you are using the detailer.)", + "type": "string", + "examples": [ + "match_preprocessor" + ], + "title": "IC-LoRA", + "default": "match_preprocessor" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "num_inference_steps": { + "minimum": 8, + "title": "Number of Inference Steps", + "type": "integer", + "description": "The number of inference steps to use.", + "maximum": 50, + "default": 40 + }, + "match_input_fps": { + "title": "Match Input FPS", + "type": "boolean", + "description": "When true, match the output FPS to the input video's FPS instead of using the default target FPS.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "video_url", + "image_url", + "match_video_length", + "num_frames", + "video_size", + "generate_audio", + "use_multiscale", + "match_input_fps", + "fps", + "guidance_scale", + "num_inference_steps", + "acceleration", + "camera_lora", + "camera_lora_scale", + "negative_prompt", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode", + "preprocessor", + "ic_lora", + "ic_lora_scale", + "image_strength", + "video_strength" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "Ltx219bVideoToVideoOutput": { + "title": "LTX2VideoToVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "black-and-white video, a cowboy walks through a dusty town, film grain" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "seed": { + "examples": [ + 1490631192028410600 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for the random number generator." + }, + "video": { + "examples": [ + { + "height": 704, + "duration": 6.44, + "url": "https://v3b.fal.media/files/b/0a895ed5/SaTGe87IpMUMiSq33w5Qb_RoCJFZhc.mp4", + "width": 1248, + "fps": 25, + "file_name": "SaTGe87IpMUMiSq33w5Qb_RoCJFZhc.mp4", + "content_type": "video/mp4", + "num_frames": 161 + } + ], + "description": "The generated video.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "prompt" + ], + "required": [ + "video", + "seed", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The duration of the video", + "title": "Duration" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the video", + "title": "Height" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the video", + "title": "Width" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The FPS of the video", + "title": "Fps" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of frames in the video", + "title": "Num Frames" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2-19b/video-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/video-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/video-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bVideoToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/video-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bVideoToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2-19b/distilled/extend-video/lora", + "metadata": { + "display_name": "LTX-2 19B Distilled", + "category": "video-to-video", + "description": "Extend videos with audio using LTX-2 Distilled and custom LoRA", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:41.818Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a893747/pnWfH_gpzQ7JkJ97o1roZ_a1b45e9a99364b168af93a34728a9464.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2-19b/distilled/extend-video/lora", + "license_type": "commercial", + "date": "2026-01-05T21:12:22.324Z", + "group": { + "key": "ltx-2-19b-distilled", + "label": "Extend Video with LoRA" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2-19b/distilled/extend-video/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2-19b/distilled/extend-video/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2-19b/distilled/extend-video/lora", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a893747/pnWfH_gpzQ7JkJ97o1roZ_a1b45e9a99364b168af93a34728a9464.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/distilled/extend-video/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/distilled/extend-video/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx219bDistilledExtendVideoLoraInput": { + "title": "LTX2LoRADistilledExtendVideoInput", + "type": "object", + "properties": { + "use_multiscale": { + "title": "Use Multi-Scale", + "type": "boolean", + "description": "Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.", + "default": true + }, + "video_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a8824b1/sdm0KfmenrlywesfzY1Y1_if6euPp1.mp4" + ], + "title": "Video URL", + "type": "string", + "description": "The URL of the video to extend." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high", + "full" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "examples": [ + "none" + ], + "default": "none" + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate audio for the video.", + "default": true + }, + "prompt": { + "examples": [ + "Continue the scene naturally, maintaining the same style and motion." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "number", + "description": "The frames per second of the generated video.", + "maximum": 60, + "default": 25 + }, + "loras": { + "title": "LoRAs", + "type": "array", + "description": "The LoRAs to use for the generation.", + "items": { + "$ref": "#/components/schemas/LoRAInput" + } + }, + "camera_lora": { + "enum": [ + "dolly_in", + "dolly_out", + "dolly_left", + "dolly_right", + "jib_up", + "jib_down", + "static", + "none" + ], + "title": "Camera LoRA", + "type": "string", + "description": "The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "examples": [ + "none" + ], + "default": "none" + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "auto", + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated video.", + "title": "Video Size", + "default": "auto" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "camera_lora_scale": { + "minimum": 0, + "title": "Camera LoRA Scale", + "type": "number", + "description": "The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "maximum": 1, + "default": 1 + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 9, + "title": "Number of Frames", + "maximum": 481, + "default": 121 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the video from.", + "default": "blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts." + }, + "video_strength": { + "minimum": 0, + "title": "Video Strength", + "type": "number", + "description": "Video conditioning strength. Lower values represent more freedom given to the model to change the video content.", + "maximum": 1, + "default": 1 + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "num_context_frames": { + "description": "The number of frames to use as context for the extension.", + "type": "integer", + "minimum": 0, + "title": "Number of Context Frames", + "maximum": 121, + "default": 25 + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "audio_strength": { + "minimum": 0, + "title": "Audio Strength", + "type": "number", + "description": "Audio conditioning strength. Lower values represent more freedom given to the model to change the audio content.", + "maximum": 1, + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed for the random number generator.", + "title": "Seed" + }, + "match_input_fps": { + "title": "Match Input FPS", + "type": "boolean", + "description": "When true, match the output FPS to the input video's FPS instead of using the default target FPS.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "video_url", + "num_frames", + "video_size", + "generate_audio", + "use_multiscale", + "match_input_fps", + "fps", + "acceleration", + "camera_lora", + "camera_lora_scale", + "negative_prompt", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode", + "loras", + "num_context_frames", + "video_strength", + "audio_strength" + ], + "required": [ + "loras", + "prompt", + "video_url" + ] + }, + "Ltx219bDistilledExtendVideoLoraOutput": { + "title": "LTX2ExtendVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman stands still amid a busy neon-lit street at night. The camera slowly dollies in toward her face as people blur past, their motion emphasizing her calm presence. City lights flicker and reflections shift across her denim jacket." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "seed": { + "examples": [ + 2078003885 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for the random number generator." + }, + "video": { + "examples": [ + { + "height": 704, + "duration": 6.44, + "url": "https://v3b.fal.media/files/b/0a894013/N9lnMTq7W3uMC0lOQg845_BknRPV8I.mp4", + "width": 1248, + "fps": 25, + "file_name": "CJcQGDrxOSRg2YFl5GNDt_glXPMoji.mp4", + "content_type": "video/mp4", + "num_frames": 161 + } + ], + "description": "The generated video.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "prompt" + ], + "required": [ + "video", + "seed", + "prompt" + ] + }, + "LoRAInput": { + "title": "LoRAInput", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL, HuggingFace repo ID (owner/repo) to lora weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "description": "Scale factor for LoRA application (0.0 to 4.0).", + "maximum": 4, + "default": 1 + }, + "weight_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights.", + "title": "Weight Name" + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "description": "LoRA weight configuration.", + "required": [ + "path" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The duration of the video", + "title": "Duration" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the video", + "title": "Height" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the video", + "title": "Width" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The FPS of the video", + "title": "Fps" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of frames in the video", + "title": "Num Frames" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2-19b/distilled/extend-video/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/extend-video/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/extend-video/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bDistilledExtendVideoLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/extend-video/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bDistilledExtendVideoLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2-19b/distilled/extend-video", + "metadata": { + "display_name": "LTX-2 19B Distilled", + "category": "video-to-video", + "description": "Extend videos with audio using LTX-2 Distilled", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:41.972Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a893735/sSRmRNUM6b01I-BJwDDcj_791bb2fc54584ea1a1240085da9060f5.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2-19b/distilled/extend-video", + "license_type": "commercial", + "date": "2026-01-05T21:09:00.536Z", + "group": { + "key": "ltx-2-19b-distilled", + "label": "Extend Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2-19b/distilled/extend-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2-19b/distilled/extend-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2-19b/distilled/extend-video", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a893735/sSRmRNUM6b01I-BJwDDcj_791bb2fc54584ea1a1240085da9060f5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/distilled/extend-video", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/distilled/extend-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx219bDistilledExtendVideoInput": { + "title": "LTX2DistilledExtendVideoInput", + "type": "object", + "properties": { + "use_multiscale": { + "title": "Use Multi-Scale", + "type": "boolean", + "description": "Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.", + "default": true + }, + "video_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a8824b1/sdm0KfmenrlywesfzY1Y1_if6euPp1.mp4" + ], + "title": "Video URL", + "type": "string", + "description": "The URL of the video to extend." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high", + "full" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "examples": [ + "none" + ], + "default": "none" + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate audio for the video.", + "default": true + }, + "prompt": { + "examples": [ + "Continue the scene naturally, maintaining the same style and motion." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "number", + "description": "The frames per second of the generated video.", + "maximum": 60, + "default": 25 + }, + "camera_lora": { + "enum": [ + "dolly_in", + "dolly_out", + "dolly_left", + "dolly_right", + "jib_up", + "jib_down", + "static", + "none" + ], + "title": "Camera LoRA", + "type": "string", + "description": "The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "examples": [ + "none" + ], + "default": "none" + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "auto", + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated video.", + "title": "Video Size", + "default": "auto" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "camera_lora_scale": { + "minimum": 0, + "title": "Camera LoRA Scale", + "type": "number", + "description": "The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "maximum": 1, + "default": 1 + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 9, + "title": "Number of Frames", + "maximum": 481, + "default": 121 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the video from.", + "default": "blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts." + }, + "video_strength": { + "minimum": 0, + "title": "Video Strength", + "type": "number", + "description": "Video conditioning strength. Lower values represent more freedom given to the model to change the video content.", + "maximum": 1, + "default": 1 + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "num_context_frames": { + "description": "The number of frames to use as context for the extension.", + "type": "integer", + "minimum": 0, + "title": "Number of Context Frames", + "maximum": 121, + "default": 25 + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "audio_strength": { + "minimum": 0, + "title": "Audio Strength", + "type": "number", + "description": "Audio conditioning strength. Lower values represent more freedom given to the model to change the audio content.", + "maximum": 1, + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed for the random number generator.", + "title": "Seed" + }, + "match_input_fps": { + "title": "Match Input FPS", + "type": "boolean", + "description": "When true, match the output FPS to the input video's FPS instead of using the default target FPS.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "video_url", + "num_frames", + "video_size", + "generate_audio", + "use_multiscale", + "match_input_fps", + "fps", + "acceleration", + "camera_lora", + "camera_lora_scale", + "negative_prompt", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode", + "num_context_frames", + "video_strength", + "audio_strength" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "Ltx219bDistilledExtendVideoOutput": { + "title": "LTX2ExtendVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman stands still amid a busy neon-lit street at night. The camera slowly dollies in toward her face as people blur past, their motion emphasizing her calm presence. City lights flicker and reflections shift across her denim jacket." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "seed": { + "examples": [ + 2078003885 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for the random number generator." + }, + "video": { + "examples": [ + { + "height": 704, + "duration": 6.44, + "url": "https://v3b.fal.media/files/b/0a894013/N9lnMTq7W3uMC0lOQg845_BknRPV8I.mp4", + "width": 1248, + "fps": 25, + "file_name": "CJcQGDrxOSRg2YFl5GNDt_glXPMoji.mp4", + "content_type": "video/mp4", + "num_frames": 161 + } + ], + "description": "The generated video.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "prompt" + ], + "required": [ + "video", + "seed", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The duration of the video", + "title": "Duration" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the video", + "title": "Height" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the video", + "title": "Width" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The FPS of the video", + "title": "Fps" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of frames in the video", + "title": "Num Frames" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2-19b/distilled/extend-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/extend-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/extend-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bDistilledExtendVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/distilled/extend-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bDistilledExtendVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2-19b/extend-video/lora", + "metadata": { + "display_name": "LTX-2 19B", + "category": "video-to-video", + "description": "Extend video with audio using LTX-2 and custom LoRA", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:42.615Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a89367e/r31DU9uN4dWdHIBJvD-gC_ed11b6e0ebc649688c7ed39cb6f74b6e.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2-19b/extend-video/lora", + "license_type": "commercial", + "date": "2026-01-05T20:38:54.203Z", + "group": { + "key": "ltx-2-19b", + "label": "Extend Video with LoRA" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2-19b/extend-video/lora", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2-19b/extend-video/lora queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2-19b/extend-video/lora", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a89367e/r31DU9uN4dWdHIBJvD-gC_ed11b6e0ebc649688c7ed39cb6f74b6e.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/extend-video/lora", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/extend-video/lora/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx219bExtendVideoLoraInput": { + "title": "LTX2LoRAExtendVideoInput", + "type": "object", + "properties": { + "use_multiscale": { + "title": "Use Multi-Scale", + "type": "boolean", + "description": "Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.", + "default": true + }, + "video_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a8824b1/sdm0KfmenrlywesfzY1Y1_if6euPp1.mp4" + ], + "title": "Video URL", + "type": "string", + "description": "The URL of the video to extend." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high", + "full" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate audio for the video.", + "default": true + }, + "prompt": { + "examples": [ + "Continue the scene naturally, maintaining the same style and motion." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "number", + "description": "The frames per second of the generated video.", + "maximum": 60, + "default": 25 + }, + "loras": { + "title": "LoRAs", + "type": "array", + "description": "The LoRAs to use for the generation.", + "items": { + "$ref": "#/components/schemas/LoRAInput" + } + }, + "camera_lora": { + "enum": [ + "dolly_in", + "dolly_out", + "dolly_left", + "dolly_right", + "jib_up", + "jib_down", + "static", + "none" + ], + "title": "Camera LoRA", + "type": "string", + "description": "The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "examples": [ + "none" + ], + "default": "none" + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "auto", + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated video.", + "title": "Video Size", + "default": "auto" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "camera_lora_scale": { + "minimum": 0, + "title": "Camera LoRA Scale", + "type": "number", + "description": "The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "maximum": 1, + "default": 1 + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "description": "The guidance scale to use.", + "maximum": 10, + "default": 3 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the video from.", + "default": "blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts." + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 9, + "title": "Number of Frames", + "maximum": 481, + "default": 121 + }, + "video_strength": { + "minimum": 0, + "title": "Video Strength", + "type": "number", + "description": "Video conditioning strength. Lower values represent more freedom given to the model to change the video content.", + "maximum": 1, + "default": 1 + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "num_context_frames": { + "description": "The number of frames to use as context for the extension.", + "type": "integer", + "minimum": 0, + "title": "Number of Context Frames", + "maximum": 121, + "default": 25 + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "num_inference_steps": { + "minimum": 8, + "title": "Number of Inference Steps", + "type": "integer", + "description": "The number of inference steps to use.", + "maximum": 50, + "default": 40 + }, + "audio_strength": { + "minimum": 0, + "title": "Audio Strength", + "type": "number", + "description": "Audio conditioning strength. Lower values represent more freedom given to the model to change the audio content.", + "maximum": 1, + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed for the random number generator.", + "title": "Seed" + }, + "match_input_fps": { + "title": "Match Input FPS", + "type": "boolean", + "description": "When true, match the output FPS to the input video's FPS instead of using the default target FPS.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "video_url", + "num_frames", + "video_size", + "generate_audio", + "use_multiscale", + "match_input_fps", + "fps", + "guidance_scale", + "num_inference_steps", + "acceleration", + "camera_lora", + "camera_lora_scale", + "negative_prompt", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode", + "loras", + "num_context_frames", + "video_strength", + "audio_strength" + ], + "required": [ + "loras", + "prompt", + "video_url" + ] + }, + "Ltx219bExtendVideoLoraOutput": { + "title": "LTX2ExtendVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman stands still amid a busy neon-lit street at night. The camera slowly dollies in toward her face as people blur past, their motion emphasizing her calm presence. City lights flicker and reflections shift across her denim jacket." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "seed": { + "examples": [ + 2078003885 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for the random number generator." + }, + "video": { + "examples": [ + { + "height": 704, + "duration": 6.44, + "url": "https://v3b.fal.media/files/b/0a894013/N9lnMTq7W3uMC0lOQg845_BknRPV8I.mp4", + "width": 1248, + "fps": 25, + "file_name": "CJcQGDrxOSRg2YFl5GNDt_glXPMoji.mp4", + "content_type": "video/mp4", + "num_frames": 161 + } + ], + "description": "The generated video.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "prompt" + ], + "required": [ + "video", + "seed", + "prompt" + ] + }, + "LoRAInput": { + "title": "LoRAInput", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL, HuggingFace repo ID (owner/repo) to lora weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "description": "Scale factor for LoRA application (0.0 to 4.0).", + "maximum": 4, + "default": 1 + }, + "weight_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights.", + "title": "Weight Name" + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "description": "LoRA weight configuration.", + "required": [ + "path" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The duration of the video", + "title": "Duration" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the video", + "title": "Height" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the video", + "title": "Width" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The FPS of the video", + "title": "Fps" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of frames in the video", + "title": "Num Frames" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2-19b/extend-video/lora/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/extend-video/lora/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/extend-video/lora": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bExtendVideoLoraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/extend-video/lora/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bExtendVideoLoraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2-19b/extend-video", + "metadata": { + "display_name": "LTX-2 19B", + "category": "video-to-video", + "description": "Extend video with audio using LTX-2", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:43.000Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a893642/HexMym-GfrSQ1RbH2DZjj_f150a074b33f45b6b95a2dc39624dd69.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2-19b/extend-video", + "license_type": "commercial", + "date": "2026-01-05T20:27:27.462Z", + "group": { + "key": "ltx-2-19b", + "label": "Extend Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx2-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2-19b/extend-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2-19b/extend-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2-19b/extend-video", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a893642/HexMym-GfrSQ1RbH2DZjj_f150a074b33f45b6b95a2dc39624dd69.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/extend-video", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2-19b/extend-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx219bExtendVideoInput": { + "title": "LTX2ExtendVideoInput", + "type": "object", + "properties": { + "use_multiscale": { + "title": "Use Multi-Scale", + "type": "boolean", + "description": "Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.", + "default": true + }, + "video_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a8824b1/sdm0KfmenrlywesfzY1Y1_if6euPp1.mp4" + ], + "title": "Video URL", + "type": "string", + "description": "The URL of the video to extend." + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high", + "full" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "generate_audio": { + "title": "Generate Audio", + "type": "boolean", + "description": "Whether to generate audio for the video.", + "default": true + }, + "prompt": { + "examples": [ + "Continue the scene naturally, maintaining the same style and motion." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "fps": { + "minimum": 1, + "title": "FPS", + "type": "number", + "description": "The frames per second of the generated video.", + "maximum": 60, + "default": 25 + }, + "camera_lora": { + "enum": [ + "dolly_in", + "dolly_out", + "dolly_left", + "dolly_right", + "jib_up", + "jib_down", + "static", + "none" + ], + "title": "Camera LoRA", + "type": "string", + "description": "The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "examples": [ + "none" + ], + "default": "none" + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "auto", + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "description": "The size of the generated video.", + "title": "Video Size", + "default": "auto" + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "camera_lora_scale": { + "minimum": 0, + "title": "Camera LoRA Scale", + "type": "number", + "description": "The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.", + "maximum": 1, + "default": 1 + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "description": "The guidance scale to use.", + "maximum": 10, + "default": 3 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the video from.", + "default": "blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts." + }, + "num_frames": { + "description": "The number of frames to generate.", + "type": "integer", + "minimum": 9, + "title": "Number of Frames", + "maximum": 481, + "default": 121 + }, + "video_strength": { + "minimum": 0, + "title": "Video Strength", + "type": "number", + "description": "Video conditioning strength. Lower values represent more freedom given to the model to change the video content.", + "maximum": 1, + "default": 1 + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "num_context_frames": { + "description": "The number of frames to use as context for the extension.", + "type": "integer", + "minimum": 0, + "title": "Number of Context Frames", + "maximum": 121, + "default": 25 + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_prompt_expansion": { + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "num_inference_steps": { + "minimum": 8, + "title": "Number of Inference Steps", + "type": "integer", + "description": "The number of inference steps to use.", + "maximum": 50, + "default": 40 + }, + "audio_strength": { + "minimum": 0, + "title": "Audio Strength", + "type": "number", + "description": "Audio conditioning strength. Lower values represent more freedom given to the model to change the audio content.", + "maximum": 1, + "default": 1 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed for the random number generator.", + "title": "Seed" + }, + "match_input_fps": { + "title": "Match Input FPS", + "type": "boolean", + "description": "When true, match the output FPS to the input video's FPS instead of using the default target FPS.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "video_url", + "num_frames", + "video_size", + "generate_audio", + "use_multiscale", + "match_input_fps", + "fps", + "guidance_scale", + "num_inference_steps", + "acceleration", + "camera_lora", + "camera_lora_scale", + "negative_prompt", + "seed", + "enable_prompt_expansion", + "enable_safety_checker", + "video_output_type", + "video_quality", + "video_write_mode", + "sync_mode", + "num_context_frames", + "video_strength", + "audio_strength" + ], + "description": "extend_direction: ExtendDirection = Field(\n description=\"Direction to extend the video. 'forward' extends from the end of the video, 'backward' extends from the beginning.\",\n default=\"forward\",\n ui={\"important\": True},\n title=\"Extend Direction\",\n)", + "required": [ + "prompt", + "video_url" + ] + }, + "Ltx219bExtendVideoOutput": { + "title": "LTX2ExtendVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman stands still amid a busy neon-lit street at night. The camera slowly dollies in toward her face as people blur past, their motion emphasizing her calm presence. City lights flicker and reflections shift across her denim jacket." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for the generation." + }, + "seed": { + "examples": [ + 2078003885 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for the random number generator." + }, + "video": { + "examples": [ + { + "height": 704, + "duration": 6.44, + "url": "https://v3b.fal.media/files/b/0a894013/N9lnMTq7W3uMC0lOQg845_BknRPV8I.mp4", + "width": 1248, + "fps": 25, + "file_name": "CJcQGDrxOSRg2YFl5GNDt_glXPMoji.mp4", + "content_type": "video/mp4", + "num_frames": 161 + } + ], + "description": "The generated video.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "prompt" + ], + "required": [ + "video", + "seed", + "prompt" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the generated image.", + "maximum": 14142, + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "title": "File Size", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The duration of the video", + "title": "Duration" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The height of the video", + "title": "Height" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The width of the video", + "title": "Width" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The FPS of the video", + "title": "Fps" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The number of frames in the video", + "title": "Num Frames" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2-19b/extend-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/extend-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/extend-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bExtendVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2-19b/extend-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx219bExtendVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/video/erase/keypoints", + "metadata": { + "display_name": "Video", + "category": "video-to-video", + "description": "A high-fidelity capability for erasing unwanted objects, people, or visual elements from videos while maintaining aesthetic quality and temporal consistency.", + "status": "active", + "tags": [ + "bria", + "video", + "erase", + "keypoints" + ], + "updated_at": "2026-01-26T21:41:47.444Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a875807/hufu-WhXnSbIHPs2JUqFE_f40049919fd148f293c7e9aeb04c71ce.jpg", + "model_url": "https://fal.run/bria/video/erase/keypoints", + "license_type": "commercial", + "date": "2025-12-23T19:47:59.068Z", + "group": { + "key": "bria", + "label": "Video Eraser (Keypoints)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/video/erase/keypoints", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/video/erase/keypoints queue.", + "x-fal-metadata": { + "endpointId": "bria/video/erase/keypoints", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a875807/hufu-WhXnSbIHPs2JUqFE_f40049919fd148f293c7e9aeb04c71ce.jpg", + "playgroundUrl": "https://fal.ai/models/bria/video/erase/keypoints", + "documentationUrl": "https://fal.ai/models/bria/video/erase/keypoints/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "VideoEraseKeypointsInput": { + "title": "EraseByKeyPointsInputModel", + "type": "object", + "properties": { + "preserve_audio": { + "description": "If true, audio will be preserved in the output video.", + "type": "boolean", + "title": "Preserve Audio", + "default": true + }, + "video_url": { + "examples": [ + "https://bria-test-images.s3.us-east-1.amazonaws.com/videos/eraser_mask/woman_right_side.mov" + ], + "description": "Input video to erase object from. duration must be less than 5s.", + "type": "string", + "title": "Video Url" + }, + "output_container_and_codec": { + "enum": [ + "mp4_h265", + "mp4_h264", + "webm_vp9", + "gif", + "mov_h264", + "mov_h265", + "mov_proresks", + "mkv_h264", + "mkv_h265", + "mkv_vp9", + "mkv_mpeg4" + ], + "description": "Output container and codec. Options: mp4_h265, mp4_h264, webm_vp9, gif, mov_h264, mov_h265, mov_proresks, mkv_h264, mkv_h265, mkv_vp9, mkv_mpeg4.", + "type": "string", + "title": "Output Container And Codec", + "default": "mp4_h264" + }, + "keypoints": { + "examples": [ + [ + "{'x': 765, 'y': 344, 'type': 'positive'}", + "{'x': 200, 'y': 200, 'type': 'negative'}" + ] + ], + "description": "Input keypoints [x,y] to erase or keep from the video. Format like so: {'x':100, 'y':100, 'type':'positive/negative'}", + "type": "array", + "title": "Keypoints", + "items": { + "type": "string" + } + }, + "auto_trim": { + "description": "auto trim the video, to working duration ( 5s )", + "type": "boolean", + "title": "Auto Trim", + "default": true + } + }, + "x-fal-order-properties": [ + "output_container_and_codec", + "auto_trim", + "preserve_audio", + "keypoints", + "video_url" + ], + "required": [ + "keypoints", + "video_url" + ] + }, + "VideoEraseKeypointsOutput": { + "title": "VideoOutput", + "type": "object", + "properties": { + "video": { + "anyOf": [ + { + "$ref": "#/components/schemas/Video" + }, + { + "$ref": "#/components/schemas/File" + } + ], + "description": "Final video.", + "title": "Video" + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "Video": { + "title": "Video", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/bria/video/erase/keypoints/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/video/erase/keypoints/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/video/erase/keypoints": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoEraseKeypointsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/video/erase/keypoints/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoEraseKeypointsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/video/erase/prompt", + "metadata": { + "display_name": "Video", + "category": "video-to-video", + "description": "A high-fidelity capability for erasing unwanted objects, people, or visual elements from videos while maintaining aesthetic quality and temporal consistency", + "status": "active", + "tags": [ + "bria", + "video", + "erase" + ], + "updated_at": "2026-01-26T21:41:47.588Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a875a33/-FGyIBxFrcWVZh-vsPpsC_f673e5d46d864e53becd4f71174b73ec.jpg", + "model_url": "https://fal.run/bria/video/erase/prompt", + "license_type": "commercial", + "date": "2025-12-23T19:39:44.048Z", + "group": { + "key": "bria", + "label": "Video Eraser (Prompt)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/video/erase/prompt", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/video/erase/prompt queue.", + "x-fal-metadata": { + "endpointId": "bria/video/erase/prompt", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a875a33/-FGyIBxFrcWVZh-vsPpsC_f673e5d46d864e53becd4f71174b73ec.jpg", + "playgroundUrl": "https://fal.ai/models/bria/video/erase/prompt", + "documentationUrl": "https://fal.ai/models/bria/video/erase/prompt/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "VideoErasePromptInput": { + "title": "EraseByPromptInputModel", + "type": "object", + "properties": { + "preserve_audio": { + "description": "If true, audio will be preserved in the output video.", + "type": "boolean", + "title": "Preserve Audio", + "default": true + }, + "video_url": { + "examples": [ + "https://bria-test-images.s3.us-east-1.amazonaws.com/videos/eraser_mask/woman_right_side.mov" + ], + "description": "Input video to erase object from. duration must be less than 5s.", + "type": "string", + "title": "Video Url" + }, + "prompt": { + "examples": [ + "women" + ], + "description": "Input prompt to detect object to erase", + "type": "string", + "title": "Prompt" + }, + "output_container_and_codec": { + "enum": [ + "mp4_h265", + "mp4_h264", + "webm_vp9", + "gif", + "mov_h264", + "mov_h265", + "mov_proresks", + "mkv_h264", + "mkv_h265", + "mkv_vp9", + "mkv_mpeg4" + ], + "description": "Output container and codec. Options: mp4_h265, mp4_h264, webm_vp9, gif, mov_h264, mov_h265, mov_proresks, mkv_h264, mkv_h265, mkv_vp9, mkv_mpeg4.", + "type": "string", + "title": "Output Container And Codec", + "default": "mp4_h264" + }, + "auto_trim": { + "description": "auto trim the video, to working duration ( 5s )", + "type": "boolean", + "title": "Auto Trim", + "default": true + } + }, + "x-fal-order-properties": [ + "output_container_and_codec", + "auto_trim", + "preserve_audio", + "prompt", + "video_url" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "VideoErasePromptOutput": { + "title": "VideoOutput", + "type": "object", + "properties": { + "video": { + "anyOf": [ + { + "$ref": "#/components/schemas/Video" + }, + { + "$ref": "#/components/schemas/File" + } + ], + "description": "Final video.", + "title": "Video" + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "Video": { + "title": "Video", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/bria/video/erase/prompt/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/video/erase/prompt/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/video/erase/prompt": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoErasePromptInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/video/erase/prompt/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoErasePromptOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/video/erase/mask", + "metadata": { + "display_name": "Video", + "category": "video-to-video", + "description": "A high-fidelity capability for erasing unwanted objects, people, or visual elements from videos while maintaining aesthetic quality and temporal consistency.\n", + "status": "active", + "tags": [ + "bria", + "video", + "erase" + ], + "updated_at": "2026-01-26T21:41:47.727Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a875792/fiFX5LR2jtecUmE7yLyY8_340c579c0b1e408ab0d7920d3a2d3e32.jpg", + "model_url": "https://fal.run/bria/video/erase/mask", + "license_type": "commercial", + "date": "2025-12-23T19:20:38.011Z", + "group": { + "key": "bria", + "label": "Video Eraser (Mask)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/video/erase/mask", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/video/erase/mask queue.", + "x-fal-metadata": { + "endpointId": "bria/video/erase/mask", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a875792/fiFX5LR2jtecUmE7yLyY8_340c579c0b1e408ab0d7920d3a2d3e32.jpg", + "playgroundUrl": "https://fal.ai/models/bria/video/erase/mask", + "documentationUrl": "https://fal.ai/models/bria/video/erase/mask/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "VideoEraseMaskInput": { + "title": "EraseInputModel", + "type": "object", + "properties": { + "preserve_audio": { + "description": "If true, audio will be preserved in the output video.", + "type": "boolean", + "title": "Preserve Audio", + "default": true + }, + "video_url": { + "examples": [ + "https://bria-test-images.s3.us-east-1.amazonaws.com/videos/eraser/video1_video.mp4" + ], + "description": "Input video to erase object from. duration must be less than 5s.", + "type": "string", + "title": "Video Url" + }, + "output_container_and_codec": { + "enum": [ + "mp4_h265", + "mp4_h264", + "webm_vp9", + "gif", + "mov_h264", + "mov_h265", + "mov_proresks", + "mkv_h264", + "mkv_h265", + "mkv_vp9", + "mkv_mpeg4" + ], + "description": "Output container and codec. Options: mp4_h265, mp4_h264, webm_vp9, gif, mov_h264, mov_h265, mov_proresks, mkv_h264, mkv_h265, mkv_vp9, mkv_mpeg4.", + "type": "string", + "title": "Output Container And Codec", + "default": "mp4_h264" + }, + "mask_video_url": { + "examples": [ + "https://bria-test-images.s3.us-east-1.amazonaws.com/videos/eraser/video1_mask.mp4" + ], + "description": "Input video to mask erase object from. duration must be less than 5s.", + "type": "string", + "title": "Mask Video Url" + }, + "auto_trim": { + "description": "auto trim the video, to working duration ( 5s )", + "type": "boolean", + "title": "Auto Trim", + "default": true + } + }, + "x-fal-order-properties": [ + "output_container_and_codec", + "auto_trim", + "preserve_audio", + "video_url", + "mask_video_url" + ], + "required": [ + "video_url", + "mask_video_url" + ] + }, + "VideoEraseMaskOutput": { + "title": "VideoOutput", + "type": "object", + "properties": { + "video": { + "anyOf": [ + { + "$ref": "#/components/schemas/Video" + }, + { + "$ref": "#/components/schemas/File" + } + ], + "description": "Final video.", + "title": "Video" + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "Video": { + "title": "Video", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/bria/video/erase/mask/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/video/erase/mask/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/video/erase/mask": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoEraseMaskInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/video/erase/mask/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoEraseMaskOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/lightx/relight", + "metadata": { + "display_name": "Lightx", + "category": "video-to-video", + "description": "Use tlightx capabilities to relight and recamera your videos.", + "status": "active", + "tags": [ + "video-to-video" + ], + "updated_at": "2026-01-26T21:41:49.668Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a875717/5px7bRzEoq58JtxruJDKL_dd06f08a34c6477cbac929a55b2825ad.jpg", + "model_url": "https://fal.run/fal-ai/lightx/relight", + "license_type": "commercial", + "date": "2025-12-22T08:41:53.484Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/lightx/relight", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/lightx/relight queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/lightx/relight", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a875717/5px7bRzEoq58JtxruJDKL_dd06f08a34c6477cbac929a55b2825ad.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/lightx/relight", + "documentationUrl": "https://fal.ai/models/fal-ai/lightx/relight/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LightxRelightInput": { + "title": "LightXRelightRequest", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "Optional text prompt. If omitted, Light-X will auto-caption the video." + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/lightx_video.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the input video." + }, + "relight_parameters": { + "examples": [ + { + "relight_prompt": "Sunlight", + "bg_source": "Right", + "use_sky_mask": false, + "cfg": 2 + } + ], + "title": "Relight Parameters", + "description": "Relighting parameters (required for relight_condition_type='ic'). Not used for 'bg' (which expects a background image URL instead).", + "allOf": [ + { + "$ref": "#/components/schemas/RelightParameters" + } + ] + }, + "ref_id": { + "minimum": 0, + "title": "Ref Id", + "type": "integer", + "description": "Frame index to use as referencen to relight the video with reference.", + "default": 0 + }, + "relit_cond_img_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/lightx_image.png" + ], + "title": "Relit Cond Img Url", + "type": "string", + "description": "URL of conditioning image. Required for relight_condition_type='ref'/'hdr'. Also required for relight_condition_type='bg' (background image)." + }, + "relit_cond_type": { + "enum": [ + "ic", + "ref", + "hdr", + "bg" + ], + "title": "Relit Cond Type", + "type": "string", + "description": "Relight condition type.", + "default": "ic" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + } + }, + "description": "Relighting-only request (minimal schema).", + "x-fal-order-properties": [ + "video_url", + "prompt", + "seed", + "relit_cond_type", + "relight_parameters", + "relit_cond_img_url", + "ref_id" + ], + "required": [ + "video_url" + ] + }, + "LightxRelightOutput": { + "title": "LightXOutput", + "type": "object", + "properties": { + "viz_video": { + "title": "Viz Video", + "description": "Optional: visualization/debug video (if produced by the pipeline).", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "input_video": { + "title": "Input Video", + "description": "Optional: normalized/processed input video (if produced by the pipeline).", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "video": { + "examples": [ + "https://v3b.fal.media/files/b/0a8715c9/x378fHboeiGD6j_0nbWlJ_gen.mp4" + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "input_video", + "viz_video" + ], + "required": [ + "video", + "seed" + ] + }, + "RelightParameters": { + "title": "RelightParameters", + "type": "object", + "properties": { + "relight_prompt": { + "examples": [ + "Sunlight", + "Red and Blue Neon Light", + "Warm indoor lighting" + ], + "title": "Relight Prompt", + "type": "string", + "description": "Text prompt describing the desired lighting condition." + }, + "bg_source": { + "enum": [ + "Left", + "Right", + "Top", + "Bottom" + ], + "title": "Bg Source", + "type": "string", + "description": "Direction of the light source (used for IC-light).", + "default": "Left" + }, + "use_sky_mask": { + "title": "Use Sky Mask", + "type": "boolean", + "description": "Whether to use sky masking for outdoor scenes.", + "default": false + }, + "cfg": { + "minimum": 1, + "title": "Cfg", + "type": "number", + "maximum": 10, + "description": "Classifier-free guidance scale for relighting.", + "default": 2 + } + }, + "description": "Relighting parameters for video relighting operations.\n\nUsed with relight_condition_type 'ic' (intrinsic conditioning).", + "x-fal-order-properties": [ + "relight_prompt", + "bg_source", + "cfg", + "use_sky_mask" + ], + "required": [ + "relight_prompt" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/lightx/relight/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/lightx/relight/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/lightx/relight": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LightxRelightInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/lightx/relight/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LightxRelightOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/lightx/recamera", + "metadata": { + "display_name": "Lightx", + "category": "video-to-video", + "description": "Use the capabilities of lightx to relight and recamera your videos.", + "status": "active", + "tags": [ + "video-to-video", + "recamera", + "relight" + ], + "updated_at": "2026-01-26T21:41:49.792Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a875717/n43lZ8MEY6tF-23JFWuT7_21bfd358f8d747d2bffa37ac97e4c781.jpg", + "model_url": "https://fal.run/fal-ai/lightx/recamera", + "license_type": "commercial", + "date": "2025-12-22T08:40:57.122Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/lightx/recamera", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/lightx/recamera queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/lightx/recamera", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a875717/n43lZ8MEY6tF-23JFWuT7_21bfd358f8d747d2bffa37ac97e4c781.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/lightx/recamera", + "documentationUrl": "https://fal.ai/models/fal-ai/lightx/recamera/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LightxRecameraInput": { + "title": "LightXRecameraRequest", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "Optional text prompt. If omitted, Light-X will auto-caption the video." + }, + "trajectory": { + "examples": [ + { + "theta": [ + 0, + 2, + 8, + 10, + 5, + 3, + 0, + -2, + -5, + -8, + -5, + -3, + 0 + ], + "radius": [ + 0, + 0.02, + 0.09, + 0.16, + 0.25, + 0.2, + 0.09, + 0 + ], + "phi": [ + 0, + -3, + -8, + -15, + -20, + -15, + -10, + -5, + 0 + ] + } + ], + "title": "Trajectory", + "description": "Camera trajectory parameters (required for recamera mode).", + "allOf": [ + { + "$ref": "#/components/schemas/TrajectoryParameters" + } + ] + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/lightx_video.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the input video." + }, + "camera": { + "enum": [ + "traj", + "target" + ], + "title": "Camera", + "type": "string", + "description": "Camera control mode.", + "default": "traj" + }, + "target_pose": { + "examples": [ + [ + 10, + -15, + 0.2, + 0, + 0 + ] + ], + "title": "Target Pose", + "type": "array", + "description": "Target camera pose [theta, phi, radius, x, y] (required when camera='target').", + "items": { + "type": "number" + } + }, + "mode": { + "enum": [ + "gradual", + "bullet", + "direct", + "dolly-zoom" + ], + "title": "Mode", + "type": "string", + "description": "Camera motion mode.", + "default": "gradual" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + } + }, + "description": "Re-camera-only request (minimal schema).", + "x-fal-order-properties": [ + "video_url", + "prompt", + "seed", + "camera", + "mode", + "trajectory", + "target_pose" + ], + "required": [ + "video_url" + ] + }, + "LightxRecameraOutput": { + "title": "LightXOutput", + "type": "object", + "properties": { + "viz_video": { + "title": "Viz Video", + "description": "Optional: visualization/debug video (if produced by the pipeline).", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "input_video": { + "title": "Input Video", + "description": "Optional: normalized/processed input video (if produced by the pipeline).", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "video": { + "examples": [ + "https://v3b.fal.media/files/b/0a8715c9/x378fHboeiGD6j_0nbWlJ_gen.mp4" + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "input_video", + "viz_video" + ], + "required": [ + "video", + "seed" + ] + }, + "TrajectoryParameters": { + "title": "TrajectoryParameters", + "type": "object", + "properties": { + "theta": { + "examples": [ + [ + 0, + 2, + 8, + 10, + 5, + 3, + 0, + -2, + -5, + -8, + -5, + -3, + 0 + ] + ], + "title": "Theta", + "type": "array", + "description": "Horizontal rotation angles (degrees) for each keyframe.", + "items": { + "type": "number" + } + }, + "radius": { + "examples": [ + [ + 0, + 0.02, + 0.09, + 0.16, + 0.25, + 0.2, + 0.09, + 0 + ] + ], + "title": "Radius", + "type": "array", + "description": "Camera distance scaling factors for each keyframe.", + "items": { + "type": "number" + } + }, + "phi": { + "examples": [ + [ + 0, + -3, + -8, + -15, + -20, + -15, + -10, + -5, + 0 + ] + ], + "title": "Phi", + "type": "array", + "description": "Vertical rotation angles (degrees) for each keyframe.", + "items": { + "type": "number" + } + } + }, + "description": "Camera trajectory parameters for re-camera operations.\n\nEach list represents interpolation values across frames:\n- theta: Horizontal rotation angles (degrees)\n- phi: Vertical rotation angles (degrees)\n- radius: Camera distance scaling factors", + "x-fal-order-properties": [ + "theta", + "phi", + "radius" + ], + "required": [ + "theta", + "phi", + "radius" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/lightx/recamera/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/lightx/recamera/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/lightx/recamera": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LightxRecameraInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/lightx/recamera/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LightxRecameraOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v2.6/standard/motion-control", + "metadata": { + "display_name": "Kling Video v2.6 Motion Control [Standard]", + "category": "video-to-video", + "description": "Transfer movements from a reference video to any character image. Cost-effective mode for motion transfer, perfect for portraits and simple animations.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:49.916Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a875717/JVmIsgjXggpbFC-Szsr7q_8e01139cd9a1445facec60b125ca32c9.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/v2.6/standard/motion-control", + "license_type": "commercial", + "date": "2025-12-21T20:50:57.333Z", + "group": { + "key": "kling-video/v2.6", + "label": "Motion Control [Standard]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v2.6/standard/motion-control", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v2.6/standard/motion-control queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v2.6/standard/motion-control", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a875717/JVmIsgjXggpbFC-Szsr7q_8e01139cd9a1445facec60b125ca32c9.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v2.6/standard/motion-control", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v2.6/standard/motion-control/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV26StandardMotionControlInput": { + "title": "MotionControlRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "An african american woman dancing" + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500 + }, + "video_url": { + "description": "Reference video URL. The character actions in the generated video will be consistent with this reference video. Should contain a realistic style character with entire body or upper body visible, including head, without obstruction. Duration limit depends on character_orientation: 10s max for 'image', 30s max for 'video'.", + "type": "string", + "x-fal": { + "max_file_size": 104857600, + "min_duration": 3, + "min_width": 340, + "min_height": 340, + "max_duration": 30.05, + "max_height": 3850, + "timeout": 30, + "max_width": 3850 + }, + "title": "Video Url", + "examples": [ + "https://v3b.fal.media/files/b/0a8752bc/2xrNS217ngQ3wzXqA7LXr_output.mp4" + ], + "limit_description": "Max file size: 100.0MB, Min width: 340px, Min height: 340px, Max width: 3850px, Max height: 3850px, Min duration: 3.0s, Max duration: 30.05s, Timeout: 30.0s" + }, + "character_orientation": { + "enum": [ + "image", + "video" + ], + "title": "Character Orientation", + "type": "string", + "examples": [ + "video" + ], + "description": "Controls whether the output character's orientation matches the reference image or video. 'video': orientation matches reference video - better for complex motions (max 30s). 'image': orientation matches reference image - better for following camera movements (max 10s)." + }, + "keep_original_sound": { + "title": "Keep Original Sound", + "type": "boolean", + "description": "Whether to keep the original sound from the reference video.", + "default": true + }, + "image_url": { + "description": "Reference image URL. The characters, backgrounds, and other elements in the generated video are based on this reference image. Characters should have clear body proportions, avoid occlusion, and occupy more than 5% of the image area.", + "type": "string", + "x-fal": { + "max_file_size": 10485760, + "max_aspect_ratio": 2.5, + "min_aspect_ratio": 0.4, + "min_height": 340, + "max_width": 3850, + "timeout": 20, + "min_width": 340, + "max_height": 3850 + }, + "title": "Image Url", + "examples": [ + "https://v3b.fal.media/files/b/0a875302/8NaxQrQxDNHppHtqcchMm.png" + ], + "limit_description": "Max file size: 10.0MB, Min width: 340px, Min height: 340px, Max width: 3850px, Max height: 3850px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + }, + "description": "Request model for motion control video generation.", + "x-fal-order-properties": [ + "prompt", + "image_url", + "video_url", + "keep_original_sound", + "character_orientation" + ], + "required": [ + "image_url", + "video_url", + "character_orientation" + ] + }, + "KlingVideoV26StandardMotionControlOutput": { + "title": "MotionControlOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 35299865, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/0a875336/8p3rFiXtx3fE2TLoh59KP_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "description": "Output model for motion control video generation.", + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v2.6/standard/motion-control/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.6/standard/motion-control/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.6/standard/motion-control": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV26StandardMotionControlInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.6/standard/motion-control/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV26StandardMotionControlOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/v2.6/pro/motion-control", + "metadata": { + "display_name": "Kling Video v2.6 Motion Control [Pro]", + "category": "video-to-video", + "description": "Transfer movements from a reference video to any character image. Pro mode delivers higher quality output, ideal for complex dance moves and gestures.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:50.040Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a875735/LCSvFM3dgniinh9_nqgJX_99d6d4ec0be146c1a1e0899474b1c2ce.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/v2.6/pro/motion-control", + "license_type": "commercial", + "date": "2025-12-21T20:50:54.254Z", + "group": { + "key": "kling-video/v2.6", + "label": "Motion Control [Pro]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/v2.6/pro/motion-control", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/v2.6/pro/motion-control queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/v2.6/pro/motion-control", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a875735/LCSvFM3dgniinh9_nqgJX_99d6d4ec0be146c1a1e0899474b1c2ce.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/v2.6/pro/motion-control", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/v2.6/pro/motion-control/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoV26ProMotionControlInput": { + "title": "MotionControlRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "An african american woman dancing" + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500 + }, + "video_url": { + "description": "Reference video URL. The character actions in the generated video will be consistent with this reference video. Should contain a realistic style character with entire body or upper body visible, including head, without obstruction. Duration limit depends on character_orientation: 10s max for 'image', 30s max for 'video'.", + "type": "string", + "x-fal": { + "max_file_size": 104857600, + "min_duration": 3, + "min_width": 340, + "min_height": 340, + "max_duration": 30.05, + "max_height": 3850, + "timeout": 30, + "max_width": 3850 + }, + "title": "Video Url", + "examples": [ + "https://v3b.fal.media/files/b/0a8752bc/2xrNS217ngQ3wzXqA7LXr_output.mp4" + ], + "limit_description": "Max file size: 100.0MB, Min width: 340px, Min height: 340px, Max width: 3850px, Max height: 3850px, Min duration: 3.0s, Max duration: 30.05s, Timeout: 30.0s" + }, + "character_orientation": { + "enum": [ + "image", + "video" + ], + "title": "Character Orientation", + "type": "string", + "examples": [ + "video" + ], + "description": "Controls whether the output character's orientation matches the reference image or video. 'video': orientation matches reference video - better for complex motions (max 30s). 'image': orientation matches reference image - better for following camera movements (max 10s)." + }, + "keep_original_sound": { + "title": "Keep Original Sound", + "type": "boolean", + "description": "Whether to keep the original sound from the reference video.", + "default": true + }, + "image_url": { + "description": "Reference image URL. The characters, backgrounds, and other elements in the generated video are based on this reference image. Characters should have clear body proportions, avoid occlusion, and occupy more than 5% of the image area.", + "type": "string", + "x-fal": { + "max_file_size": 10485760, + "max_aspect_ratio": 2.5, + "min_aspect_ratio": 0.4, + "min_height": 340, + "max_width": 3850, + "timeout": 20, + "min_width": 340, + "max_height": 3850 + }, + "title": "Image Url", + "examples": [ + "https://v3b.fal.media/files/b/0a875302/8NaxQrQxDNHppHtqcchMm.png" + ], + "limit_description": "Max file size: 10.0MB, Min width: 340px, Min height: 340px, Max width: 3850px, Max height: 3850px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + }, + "description": "Request model for motion control video generation.", + "x-fal-order-properties": [ + "prompt", + "image_url", + "video_url", + "keep_original_sound", + "character_orientation" + ], + "required": [ + "image_url", + "video_url", + "character_orientation" + ] + }, + "KlingVideoV26ProMotionControlOutput": { + "title": "MotionControlOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 35299865, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/0a875336/8p3rFiXtx3fE2TLoh59KP_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "description": "Output model for motion control video generation.", + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/v2.6/pro/motion-control/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.6/pro/motion-control/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.6/pro/motion-control": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV26ProMotionControlInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/v2.6/pro/motion-control/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoV26ProMotionControlOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "decart/lucy-restyle", + "metadata": { + "display_name": "Lucy Restyle", + "category": "video-to-video", + "description": "Restyle videos up to 30 min long - maintaining maximum detail quality.", + "status": "active", + "tags": [ + "video-edit" + ], + "updated_at": "2026-01-26T21:41:50.415Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a875784/qqR_QkwlbbQ2ARTcqKcKT_2afde46d6ba14b16a21637286fc188aa.jpg", + "model_url": "https://fal.run/decart/lucy-restyle", + "license_type": "commercial", + "date": "2025-12-18T18:15:56.727Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for decart/lucy-restyle", + "version": "1.0.0", + "description": "The OpenAPI schema for the decart/lucy-restyle queue.", + "x-fal-metadata": { + "endpointId": "decart/lucy-restyle", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a875784/qqR_QkwlbbQ2ARTcqKcKT_2afde46d6ba14b16a21637286fc188aa.jpg", + "playgroundUrl": "https://fal.ai/models/decart/lucy-restyle", + "documentationUrl": "https://fal.ai/models/decart/lucy-restyle/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LucyRestyleInput": { + "title": "LucyRestyleInput", + "type": "object", + "properties": { + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the video to be generated\n and uploaded before returning the response. This will increase the\n latency of the function but it allows you to get the video directly\n in the response without going through the CDN.\n ", + "default": false + }, + "video_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a86729a/wIXfP8RYCtKBQAV9HPs7o_output.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the video to edit" + }, + "resolution": { + "enum": [ + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video", + "default": "720p" + }, + "prompt": { + "examples": [ + "Make it psychedelic art style with trippy colors and patterns" + ], + "title": "Prompt", + "type": "string", + "maxLength": 1500, + "description": "Text description of the desired video content" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed for video generation" + }, + "enhance_prompt": { + "title": "Enhance Prompt", + "type": "boolean", + "description": "Whether to enhance the prompt for better results.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "video_url", + "seed", + "resolution", + "enhance_prompt", + "sync_mode" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "LucyRestyleOutput": { + "title": "LucyRestyleOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + "https://v3b.fal.media/files/b/0a86d478/2JM2_bD0iJOmfcKHUEudv_generated_video.mp4" + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/decart/lucy-restyle/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/decart/lucy-restyle/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/decart/lucy-restyle": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LucyRestyleInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/decart/lucy-restyle/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LucyRestyleOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/scail", + "metadata": { + "display_name": "Scail", + "category": "video-to-video", + "description": "SCAIL is a character animation model that uses 3D consistent pose representations to animate reference images with coherent motion, supporting complex movements.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:50.920Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a86b22f/NPPzBWcZWAYis8rhRFxxE_e4854f525e2247a9847bd2a7ea6a5cd5.jpg", + "model_url": "https://fal.run/fal-ai/scail", + "license_type": "commercial", + "date": "2025-12-17T18:27:17.601Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/scail", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/scail queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/scail", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a86b22f/NPPzBWcZWAYis8rhRFxxE_e4854f525e2247a9847bd2a7ea6a5cd5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/scail", + "documentationUrl": "https://fal.ai/models/fal-ai/scail/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ScailInput": { + "title": "ScailRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A person dancing gracefully" + ], + "description": "The prompt to guide video generation.", + "type": "string", + "title": "Prompt" + }, + "video_url": { + "examples": [ + "https://v3b.fal.media/files/b/panda/a6SvJg96V8eoglMlYFShU_5385885-hd_1080_1920_25fps.mp4" + ], + "description": "The URL of the video to use as a reference for the video generation.", + "type": "string", + "title": "Video Url" + }, + "resolution": { + "enum": [ + "512p" + ], + "title": "Resolution", + "type": "string", + "description": "Output resolution. Outputs 896x512 (landscape) or 512x896 (portrait) based on the input image aspect ratio.", + "default": "512p" + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 30, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to use for the video generation.", + "default": 28 + }, + "multi_character": { + "title": "Multi Character", + "type": "boolean", + "description": "Enable multi-character mode. Use when driving video has multiple people.", + "default": false + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/panda/-oMlZo9Yyj_Nzoza_tgds_GmLF86r5bOt50eMMKCszy_eacc949b3933443c9915a83c98fbe85e.png" + ], + "description": "The URL of the image to use as a reference for the video generation.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url", + "video_url", + "multi_character", + "resolution", + "num_inference_steps" + ], + "required": [ + "prompt", + "image_url", + "video_url" + ] + }, + "ScailOutput": { + "title": "ScailResponse", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 464837, + "file_name": "output_000000.mp4", + "content_type": "application/octet-stream", + "url": "https://v3b.fal.media/files/b/0a86b0a5/i50rQdBAsyzGiqqDDQSsl_output_000000.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/scail/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/scail/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/scail": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ScailInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/scail/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ScailOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "clarityai/crystal-video-upscaler", + "metadata": { + "display_name": "Crystal Upscaler [Video]", + "category": "video-to-video", + "description": "Do high precision video upscaling that respects the original video perfectly using Crystal Upscaler's new video upscaling method!", + "status": "active", + "tags": [ + "upscale", + "video-to-video" + ], + "updated_at": "2026-01-26T21:41:51.045Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a86b01d/L1t7sXzCg1McwUfjIr5AO_747a16db692443928b48959e58c268e3.jpg", + "model_url": "https://fal.run/clarityai/crystal-video-upscaler", + "license_type": "commercial", + "date": "2025-12-17T18:13:38.786Z", + "group": { + "key": "clarityai-clarity-upscaler", + "label": "Upscale Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for clarityai/crystal-video-upscaler", + "version": "1.0.0", + "description": "The OpenAPI schema for the clarityai/crystal-video-upscaler queue.", + "x-fal-metadata": { + "endpointId": "clarityai/crystal-video-upscaler", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a86b01d/L1t7sXzCg1McwUfjIr5AO_747a16db692443928b48959e58c268e3.jpg", + "playgroundUrl": "https://fal.ai/models/clarityai/crystal-video-upscaler", + "documentationUrl": "https://fal.ai/models/clarityai/crystal-video-upscaler/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "CrystalVideoUpscalerInput": { + "title": "CrystalVideoUpscaleInput", + "type": "object", + "properties": { + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/crystal_upscaler/video_upscaling/video_in.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL to the input video." + }, + "scale_factor": { + "description": "Scale factor. The scale factor must be chosen such that the upscaled video does not exceed 5K resolution.", + "type": "number", + "minimum": 1, + "title": "Scale Factor", + "examples": [ + 2 + ], + "maximum": 200, + "default": 2 + } + }, + "x-fal-order-properties": [ + "video_url", + "scale_factor" + ], + "required": [ + "video_url" + ] + }, + "CrystalVideoUpscalerOutput": { + "title": "CrystalVideoUpscaleOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "height": 2160, + "duration": 13.056527, + "url": "https://storage.googleapis.com/falserverless/example_outputs/crystal_upscaler/video_upscaling/video_out.mp4", + "fps": 23.130193905817176, + "width": 4096, + "file_name": "w0VQQvPdwvV2GSCtRTMzh_hDH8SPrB.mp4", + "content_type": "video/mp4", + "num_frames": 302 + } + ], + "title": "Video", + "description": "URL to the upscaled video", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/clarityai/crystal-video-upscaler/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/clarityai/crystal-video-upscaler/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/clarityai/crystal-video-upscaler": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CrystalVideoUpscalerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/clarityai/crystal-video-upscaler/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CrystalVideoUpscalerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/bria_video_eraser/erase/mask", + "metadata": { + "display_name": "Bria Video Eraser", + "category": "video-to-video", + "description": "A high-fidelity capability for erasing unwanted objects, people, or visual elements from videos while maintaining aesthetic quality and temporal consistency.", + "status": "active", + "tags": [ + "bria", + "erase" + ], + "updated_at": "2026-01-26T21:41:51.430Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a875792/fiFX5LR2jtecUmE7yLyY8_340c579c0b1e408ab0d7920d3a2d3e32.jpg", + "model_url": "https://fal.run/bria/bria_video_eraser/erase/mask", + "license_type": "commercial", + "date": "2025-12-17T16:58:00.177Z", + "group": { + "key": "bria-video-eraser", + "label": "Mask" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/bria_video_eraser/erase/mask", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/bria_video_eraser/erase/mask queue.", + "x-fal-metadata": { + "endpointId": "bria/bria_video_eraser/erase/mask", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a875792/fiFX5LR2jtecUmE7yLyY8_340c579c0b1e408ab0d7920d3a2d3e32.jpg", + "playgroundUrl": "https://fal.ai/models/bria/bria_video_eraser/erase/mask", + "documentationUrl": "https://fal.ai/models/bria/bria_video_eraser/erase/mask/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Bria_video_eraserEraseMaskInput": { + "title": "EraseInputModel", + "type": "object", + "properties": { + "preserve_audio": { + "description": "If true, audio will be preserved in the output video.", + "type": "boolean", + "title": "Preserve Audio", + "default": true + }, + "video_url": { + "examples": [ + "https://bria-test-images.s3.us-east-1.amazonaws.com/videos/eraser/video1_video.mp4" + ], + "description": "Input video to erase object from. duration must be less than 5s.", + "type": "string", + "title": "Video Url" + }, + "output_container_and_codec": { + "enum": [ + "mp4_h265", + "mp4_h264", + "webm_vp9", + "gif", + "mov_h264", + "mov_h265", + "mov_proresks", + "mkv_h264", + "mkv_h265", + "mkv_vp9", + "mkv_mpeg4" + ], + "description": "Output container and codec. Options: mp4_h265, mp4_h264, webm_vp9, gif, mov_h264, mov_h265, mov_proresks, mkv_h264, mkv_h265, mkv_vp9, mkv_mpeg4.", + "type": "string", + "title": "Output Container And Codec", + "default": "mp4_h264" + }, + "mask_video_url": { + "examples": [ + "https://bria-test-images.s3.us-east-1.amazonaws.com/videos/eraser/video1_mask.mp4" + ], + "description": "Input video to mask erase object from. duration must be less than 5s.", + "type": "string", + "title": "Mask Video Url" + }, + "auto_trim": { + "description": "auto trim the video, to working duration ( 5s )", + "type": "boolean", + "title": "Auto Trim", + "default": true + } + }, + "x-fal-order-properties": [ + "output_container_and_codec", + "auto_trim", + "preserve_audio", + "video_url", + "mask_video_url" + ], + "required": [ + "video_url", + "mask_video_url" + ] + }, + "Bria_video_eraserEraseMaskOutput": { + "title": "VideoOutput", + "type": "object", + "properties": { + "video": { + "anyOf": [ + { + "$ref": "#/components/schemas/Video" + }, + { + "$ref": "#/components/schemas/File" + } + ], + "description": "Final video.", + "title": "Video" + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "Video": { + "title": "Video", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ], + "title": "File Size" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name" + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "examples": [ + "image/png" + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ], + "title": "File Size" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name" + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "examples": [ + "image/png" + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/bria/bria_video_eraser/erase/mask/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/bria_video_eraser/erase/mask/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/bria_video_eraser/erase/mask": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Bria_video_eraserEraseMaskInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/bria_video_eraser/erase/mask/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Bria_video_eraserEraseMaskOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/bria_video_eraser/erase/keypoints", + "metadata": { + "display_name": "Bria Video Eraser", + "category": "video-to-video", + "description": "A high-fidelity capability for erasing unwanted objects, people, or visual elements from videos while maintaining aesthetic quality and temporal consistency.", + "status": "active", + "tags": [ + "bria", + "erase" + ], + "updated_at": "2026-01-26T21:41:51.583Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a875807/hufu-WhXnSbIHPs2JUqFE_f40049919fd148f293c7e9aeb04c71ce.jpg", + "model_url": "https://fal.run/bria/bria_video_eraser/erase/keypoints", + "license_type": "commercial", + "date": "2025-12-17T16:54:16.002Z", + "group": { + "key": "bria-video-eraser", + "label": "Keypoints" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/bria_video_eraser/erase/keypoints", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/bria_video_eraser/erase/keypoints queue.", + "x-fal-metadata": { + "endpointId": "bria/bria_video_eraser/erase/keypoints", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a875807/hufu-WhXnSbIHPs2JUqFE_f40049919fd148f293c7e9aeb04c71ce.jpg", + "playgroundUrl": "https://fal.ai/models/bria/bria_video_eraser/erase/keypoints", + "documentationUrl": "https://fal.ai/models/bria/bria_video_eraser/erase/keypoints/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Bria_video_eraserEraseKeypointsInput": { + "title": "EraseByKeyPointsInputModel", + "type": "object", + "properties": { + "preserve_audio": { + "description": "If true, audio will be preserved in the output video.", + "type": "boolean", + "title": "Preserve Audio", + "default": true + }, + "video_url": { + "examples": [ + "https://bria-test-images.s3.us-east-1.amazonaws.com/videos/eraser_mask/woman_right_side.mov" + ], + "description": "Input video to erase object from. duration must be less than 5s.", + "type": "string", + "title": "Video Url" + }, + "output_container_and_codec": { + "enum": [ + "mp4_h265", + "mp4_h264", + "webm_vp9", + "gif", + "mov_h264", + "mov_h265", + "mov_proresks", + "mkv_h264", + "mkv_h265", + "mkv_vp9", + "mkv_mpeg4" + ], + "description": "Output container and codec. Options: mp4_h265, mp4_h264, webm_vp9, gif, mov_h264, mov_h265, mov_proresks, mkv_h264, mkv_h265, mkv_vp9, mkv_mpeg4.", + "type": "string", + "title": "Output Container And Codec", + "default": "mp4_h264" + }, + "keypoints": { + "examples": [ + [ + "{'x': 765, 'y': 344, 'type': 'positive'}", + "{'x': 200, 'y': 200, 'type': 'negative'}" + ] + ], + "description": "Input keypoints [x,y] to erase or keep from the video. Format like so: {'x':100, 'y':100, 'type':'positive/negative'}", + "type": "array", + "title": "Keypoints", + "items": { + "type": "string" + } + }, + "auto_trim": { + "description": "auto trim the video, to working duration ( 5s )", + "type": "boolean", + "title": "Auto Trim", + "default": true + } + }, + "x-fal-order-properties": [ + "output_container_and_codec", + "auto_trim", + "preserve_audio", + "keypoints", + "video_url" + ], + "required": [ + "keypoints", + "video_url" + ] + }, + "Bria_video_eraserEraseKeypointsOutput": { + "title": "VideoOutput", + "type": "object", + "properties": { + "video": { + "anyOf": [ + { + "$ref": "#/components/schemas/Video" + }, + { + "$ref": "#/components/schemas/File" + } + ], + "description": "Final video.", + "title": "Video" + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "Video": { + "title": "Video", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ], + "title": "File Size" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name" + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "examples": [ + "image/png" + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ], + "title": "File Size" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name" + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "examples": [ + "image/png" + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/bria/bria_video_eraser/erase/keypoints/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/bria_video_eraser/erase/keypoints/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/bria_video_eraser/erase/keypoints": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Bria_video_eraserEraseKeypointsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/bria_video_eraser/erase/keypoints/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Bria_video_eraserEraseKeypointsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/bria_video_eraser/erase/prompt", + "metadata": { + "display_name": "Bria Video Eraser", + "category": "video-to-video", + "description": "A high-fidelity capability for erasing unwanted objects, people, or visual elements from videos while maintaining aesthetic quality and temporal consistency", + "status": "active", + "tags": [ + "bria", + "erase" + ], + "updated_at": "2026-01-26T21:41:51.707Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a875a33/-FGyIBxFrcWVZh-vsPpsC_f673e5d46d864e53becd4f71174b73ec.jpg", + "model_url": "https://fal.run/bria/bria_video_eraser/erase/prompt", + "license_type": "commercial", + "date": "2025-12-17T16:53:08.608Z", + "group": { + "key": "bria-video-eraser", + "label": "Prompt" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/bria_video_eraser/erase/prompt", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/bria_video_eraser/erase/prompt queue.", + "x-fal-metadata": { + "endpointId": "bria/bria_video_eraser/erase/prompt", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a875a33/-FGyIBxFrcWVZh-vsPpsC_f673e5d46d864e53becd4f71174b73ec.jpg", + "playgroundUrl": "https://fal.ai/models/bria/bria_video_eraser/erase/prompt", + "documentationUrl": "https://fal.ai/models/bria/bria_video_eraser/erase/prompt/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Bria_video_eraserErasePromptInput": { + "title": "EraseByPromptInputModel", + "type": "object", + "properties": { + "preserve_audio": { + "description": "If true, audio will be preserved in the output video.", + "type": "boolean", + "title": "Preserve Audio", + "default": true + }, + "video_url": { + "examples": [ + "https://bria-test-images.s3.us-east-1.amazonaws.com/videos/eraser_mask/woman_right_side.mov" + ], + "description": "Input video to erase object from. duration must be less than 5s.", + "type": "string", + "title": "Video Url" + }, + "prompt": { + "examples": [ + "women" + ], + "description": "Input prompt to detect object to erase", + "type": "string", + "title": "Prompt" + }, + "output_container_and_codec": { + "enum": [ + "mp4_h265", + "mp4_h264", + "webm_vp9", + "gif", + "mov_h264", + "mov_h265", + "mov_proresks", + "mkv_h264", + "mkv_h265", + "mkv_vp9", + "mkv_mpeg4" + ], + "description": "Output container and codec. Options: mp4_h265, mp4_h264, webm_vp9, gif, mov_h264, mov_h265, mov_proresks, mkv_h264, mkv_h265, mkv_vp9, mkv_mpeg4.", + "type": "string", + "title": "Output Container And Codec", + "default": "mp4_h264" + }, + "auto_trim": { + "description": "auto trim the video, to working duration ( 5s )", + "type": "boolean", + "title": "Auto Trim", + "default": true + } + }, + "x-fal-order-properties": [ + "output_container_and_codec", + "auto_trim", + "preserve_audio", + "prompt", + "video_url" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "Bria_video_eraserErasePromptOutput": { + "title": "VideoOutput", + "type": "object", + "properties": { + "video": { + "anyOf": [ + { + "$ref": "#/components/schemas/Video" + }, + { + "$ref": "#/components/schemas/File" + } + ], + "description": "Final video.", + "title": "Video" + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "Video": { + "title": "Video", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ], + "title": "File Size" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name" + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "examples": [ + "image/png" + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ], + "title": "File Size" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name" + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "examples": [ + "image/png" + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/bria/bria_video_eraser/erase/prompt/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/bria_video_eraser/erase/prompt/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/bria_video_eraser/erase/prompt": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Bria_video_eraserErasePromptInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/bria_video_eraser/erase/prompt/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Bria_video_eraserErasePromptOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "wan/v2.6/reference-to-video", + "metadata": { + "display_name": "Wan v2.6 Reference to Video", + "category": "video-to-video", + "description": "Wan 2.6 reference-to-video model.", + "status": "active", + "tags": [ + "reference-to-video" + ], + "updated_at": "2026-01-26T21:41:54.779Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a86d2c4/3COND31RbAVcBEj0c7P-b.png", + "model_url": "https://fal.run/wan/v2.6/reference-to-video", + "license_type": "commercial", + "date": "2025-12-16T05:08:01.945Z", + "group": { + "key": "v2.6", + "label": "Reference to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for wan/v2.6/reference-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the wan/v2.6/reference-to-video queue.", + "x-fal-metadata": { + "endpointId": "wan/v2.6/reference-to-video", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a86d2c4/3COND31RbAVcBEj0c7P-b.png", + "playgroundUrl": "https://fal.ai/models/wan/v2.6/reference-to-video", + "documentationUrl": "https://fal.ai/models/wan/v2.6/reference-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "V26ReferenceToVideoInput": { + "description": "Input for Wan 2.6 reference-to-video generation (R2V)", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Dance battle between @Video1 and @Video2." + ], + "description": "Use @Video1, @Video2, @Video3 to reference subjects from your videos. Works for people, animals, or objects. For multi-shot prompts: '[0-3s] Shot 1. [3-6s] Shot 2.' Max 800 characters.", + "type": "string", + "minLength": 1, + "title": "Prompt" + }, + "resolution": { + "enum": [ + "720p", + "1080p" + ], + "description": "Video resolution tier. R2V only supports 720p and 1080p (no 480p).", + "type": "string", + "title": "Resolution", + "default": "1080p" + }, + "video_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/0a86742f/9rVJtQ2ukp9cid8lheutF_output.mp4", + "https://v3b.fal.media/files/b/0a867424/30OqWXFgHWqOwcP2OUwRx_output.mp4" + ] + ], + "description": "Reference videos for subject consistency (1-3 videos). Videos' FPS must be at least 16 FPS.Reference in prompt as @Video1, @Video2, @Video3. Works for people, animals, or objects.", + "type": "array", + "title": "Video Urls", + "items": { + "x-fal": { + "max_file_size": 104857600, + "timeout": 30, + "max_duration": 30, + "min_duration": 2 + }, + "type": "string", + "limit_description": "Max file size: 100.0MB, Min duration: 2s, Max duration: 30s, Timeout: 30.0s" + } + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16", + "1:1", + "4:3", + "3:4" + ], + "description": "The aspect ratio of the generated video.", + "type": "string", + "title": "Aspect Ratio", + "default": "16:9" + }, + "duration": { + "examples": [ + "5", + "10" + ], + "description": "Duration of the generated video in seconds. R2V supports only 5 or 10 seconds (no 15s).", + "type": "string", + "title": "Duration", + "enum": [ + "5", + "10" + ], + "default": "5" + }, + "enable_prompt_expansion": { + "description": "Whether to enable prompt rewriting using LLM.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": true + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + }, + "multi_shots": { + "description": "When true (default), enables intelligent multi-shot segmentation for coherent narrative videos with multiple shots. When false, generates single continuous shot. Only active when enable_prompt_expansion is True.", + "type": "boolean", + "title": "Multi Shots", + "default": true + }, + "negative_prompt": { + "examples": [ + "low resolution, error, worst quality, low quality, defects" + ], + "description": "Negative prompt to describe content to avoid. Max 500 characters.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, the safety checker will be enabled.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "video_urls", + "aspect_ratio", + "resolution", + "duration", + "negative_prompt", + "enable_prompt_expansion", + "multi_shots", + "seed", + "enable_safety_checker" + ], + "title": "ReferenceToVideoInput", + "required": [ + "prompt", + "video_urls" + ] + }, + "V26ReferenceToVideoOutput": { + "description": "Output for reference-to-video generation", + "type": "object", + "properties": { + "actual_prompt": { + "examples": [ + "Dance battle between Character1 and Character2, cinematic lighting, dynamic camera movement." + ], + "description": "The actual prompt used if prompt rewriting was enabled", + "type": "string", + "title": "Actual Prompt" + }, + "seed": { + "examples": [ + 175932751 + ], + "description": "The seed used for generation", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/0a86762b/iDknfPkLFSFwWkyMgJi0U_QIzjwBDQ.mp4" + } + ], + "description": "The generated video file", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "actual_prompt" + ], + "title": "ReferenceToVideoOutput", + "required": [ + "video", + "seed" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/wan/v2.6/reference-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/wan/v2.6/reference-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/wan/v2.6/reference-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/V26ReferenceToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/wan/v2.6/reference-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/V26ReferenceToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/veo3.1/fast/extend-video", + "metadata": { + "display_name": "Veo 3.1 Fast", + "category": "video-to-video", + "description": "Extend Veo-Created Videos up to 30 seconds", + "status": "active", + "tags": [ + "extend-video", + "" + ], + "updated_at": "2026-01-26T21:41:56.899Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8674fa/XbhmACHhbyXpu2YHjob0-_cf9e946ef4df427cbf3111197d7a092a.jpg", + "model_url": "https://fal.run/fal-ai/veo3.1/fast/extend-video", + "license_type": "commercial", + "date": "2025-12-15T22:55:47.939Z", + "group": { + "key": "veo3.1", + "label": "Extend Video (Fast)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/veo3.1/fast/extend-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/veo3.1/fast/extend-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/veo3.1/fast/extend-video", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8674fa/XbhmACHhbyXpu2YHjob0-_cf9e946ef4df427cbf3111197d7a092a.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/veo3.1/fast/extend-video", + "documentationUrl": "https://fal.ai/models/fal-ai/veo3.1/fast/extend-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Veo31FastExtendVideoInput": { + "description": "Input for video extension/video-to-video generation.", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Continue the scene naturally, maintaining the same style and motion." + ], + "description": "The text prompt describing how the video should be extended", + "type": "string", + "title": "Prompt", + "maxLength": 20000 + }, + "duration": { + "enum": [ + "7s" + ], + "description": "The duration of the generated video.", + "type": "string", + "title": "Duration", + "default": "7s" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16" + ], + "description": "The aspect ratio of the generated video.", + "type": "string", + "title": "Aspect Ratio", + "default": "auto" + }, + "generate_audio": { + "description": "Whether to generate audio for the video.", + "type": "boolean", + "title": "Generate Audio", + "default": true + }, + "auto_fix": { + "description": "Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.", + "type": "boolean", + "title": "Auto Fix", + "default": false + }, + "video_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a8670fe/pY8UGl4_C452wOm9XUBYO_9ae04df8771c4f3f979fa5cabeca6ada.mp4" + ], + "description": "URL of the video to extend. The video should be 720p or 1080p resolution in 16:9 or 9:16 aspect ratio.", + "type": "string", + "title": "Video URL" + }, + "resolution": { + "enum": [ + "720p" + ], + "description": "The resolution of the generated video.", + "type": "string", + "title": "Resolution", + "default": "720p" + }, + "seed": { + "description": "The seed for the random number generator.", + "type": "integer", + "title": "Seed" + }, + "negative_prompt": { + "description": "A negative prompt to guide the video generation.", + "type": "string", + "title": "Negative Prompt" + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "duration", + "negative_prompt", + "resolution", + "generate_audio", + "seed", + "auto_fix", + "video_url" + ], + "title": "Veo31VideoToVideoInput", + "required": [ + "prompt", + "video_url" + ] + }, + "Veo31FastExtendVideoOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3b.fal.media/files/b/0a86711b/B_Z96VS4X9Dfd4M5ArB4H_c666e63f729f4a8fa1145c6727cef97d.mp4" + } + ], + "description": "The extended video.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "Veo31VideoToVideoOutput", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/veo3.1/fast/extend-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/fast/extend-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/fast/extend-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo31FastExtendVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/fast/extend-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo31FastExtendVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/veo3.1/extend-video", + "metadata": { + "display_name": "Veo 3.1", + "category": "video-to-video", + "description": "Extend Veo-Created Videos up to 30 seconds", + "status": "active", + "tags": [ + "extend-video" + ], + "updated_at": "2026-01-26T21:41:57.027Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8674d8/bsNAkImO8tHGm28k4uMMZ_6c677c316f174eef8553ecdc67377ba6.jpg", + "model_url": "https://fal.run/fal-ai/veo3.1/extend-video", + "license_type": "commercial", + "date": "2025-12-15T22:50:52.774Z", + "group": { + "key": "veo3.1", + "label": "Extend Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/veo3.1/extend-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/veo3.1/extend-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/veo3.1/extend-video", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8674d8/bsNAkImO8tHGm28k4uMMZ_6c677c316f174eef8553ecdc67377ba6.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/veo3.1/extend-video", + "documentationUrl": "https://fal.ai/models/fal-ai/veo3.1/extend-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Veo31ExtendVideoInput": { + "description": "Input for video extension/video-to-video generation.", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Continue the scene naturally, maintaining the same style and motion." + ], + "description": "The text prompt describing how the video should be extended", + "type": "string", + "title": "Prompt", + "maxLength": 20000 + }, + "duration": { + "enum": [ + "7s" + ], + "description": "The duration of the generated video.", + "type": "string", + "title": "Duration", + "default": "7s" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16" + ], + "description": "The aspect ratio of the generated video.", + "type": "string", + "title": "Aspect Ratio", + "default": "auto" + }, + "generate_audio": { + "description": "Whether to generate audio for the video.", + "type": "boolean", + "title": "Generate Audio", + "default": true + }, + "auto_fix": { + "description": "Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.", + "type": "boolean", + "title": "Auto Fix", + "default": false + }, + "video_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a8670fe/pY8UGl4_C452wOm9XUBYO_9ae04df8771c4f3f979fa5cabeca6ada.mp4" + ], + "description": "URL of the video to extend. The video should be 720p or 1080p resolution in 16:9 or 9:16 aspect ratio.", + "type": "string", + "title": "Video URL" + }, + "resolution": { + "enum": [ + "720p" + ], + "description": "The resolution of the generated video.", + "type": "string", + "title": "Resolution", + "default": "720p" + }, + "seed": { + "description": "The seed for the random number generator.", + "type": "integer", + "title": "Seed" + }, + "negative_prompt": { + "description": "A negative prompt to guide the video generation.", + "type": "string", + "title": "Negative Prompt" + } + }, + "x-fal-order-properties": [ + "prompt", + "aspect_ratio", + "duration", + "negative_prompt", + "resolution", + "generate_audio", + "seed", + "auto_fix", + "video_url" + ], + "title": "Veo31VideoToVideoInput", + "required": [ + "prompt", + "video_url" + ] + }, + "Veo31ExtendVideoOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3b.fal.media/files/b/0a86711b/B_Z96VS4X9Dfd4M5ArB4H_c666e63f729f4a8fa1145c6727cef97d.mp4" + } + ], + "description": "The extended video.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "Veo31VideoToVideoOutput", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/veo3.1/extend-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/extend-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/extend-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo31ExtendVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/veo3.1/extend-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Veo31ExtendVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/o1/standard/video-to-video/reference", + "metadata": { + "display_name": "Kling O1 Reference Video to Video [Standard]", + "category": "video-to-video", + "description": "Kling O1 Omni generates new shots guided by an input reference video, preserving cinematic language such as motion, and camera style to produce seamless scene continuity.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:58.103Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a86615a/KJdMDK3gyg1EI_R6Hw1S2_d70d13f9a3e74c18b228654ea81a7077.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/o1/standard/video-to-video/reference", + "license_type": "commercial", + "date": "2025-12-15T09:36:27.690Z", + "group": { + "key": "kling-video/o1", + "label": "Reference V2V [Standard]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/o1/standard/video-to-video/reference", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/o1/standard/video-to-video/reference queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/o1/standard/video-to-video/reference", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a86615a/KJdMDK3gyg1EI_R6Hw1S2_d70d13f9a3e74c18b228654ea81a7077.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/o1/standard/video-to-video/reference", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/o1/standard/video-to-video/reference/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoO1StandardVideoToVideoReferenceInput": { + "title": "OmniV2VReferenceInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Based on @Video1, generate the next shot. keep the style of the video" + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500, + "description": "Use @Element1, @Element2 to reference elements and @Image1, @Image2 to reference images in order." + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video frame. If 'auto', the aspect ratio will be determined automatically based on the input video, and the closest aspect ratio to the input video will be used.", + "default": "auto" + }, + "duration": { + "enum": [ + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10" + ], + "title": "Duration", + "type": "string", + "description": "Video duration in seconds.", + "default": "5" + }, + "video_url": { + "x-fal": { + "min_fps": 24, + "max_fps": 60, + "max_file_size": 209715200, + "min_duration": 3, + "min_width": 720, + "min_height": 720, + "max_duration": 10.05, + "max_height": 2160, + "max_width": 2160, + "timeout": 30 + }, + "title": "Video Url", + "type": "string", + "examples": [ + "https://v3b.fal.media/files/b/panda/oVdiICFXY03Vbam-08Aj8_output.mp4" + ], + "description": "Reference video URL. Only .mp4/.mov formats supported, 3-10 seconds duration, 720-2160px resolution, max 200MB.\n\nMax file size: 200.0MB, Min width: 720px, Min height: 720px, Max width: 2160px, Max height: 2160px, Min duration: 3.0s, Max duration: 10.05s, Min FPS: 24.0, Max FPS: 60.0, Timeout: 30.0s" + }, + "keep_audio": { + "title": "Keep Audio", + "type": "boolean", + "description": "Whether to keep the original audio from the video.", + "default": false + }, + "elements": { + "examples": [ + [ + { + "reference_image_urls": [ + "https://v3b.fal.media/files/b/kangaroo/YMpmQkYt9xugpOTQyZW0O.png", + "https://v3b.fal.media/files/b/zebra/d6ywajNyJ6bnpa_xBue-K.png" + ], + "frontal_image_url": "https://v3b.fal.media/files/b/panda/MQp-ghIqshvMZROKh9lW3.png" + } + ] + ], + "title": "Elements", + "type": "array", + "description": "Elements (characters/objects) to include. Reference in prompt as @Element1, @Element2, etc. Maximum 4 total (elements + reference images) when using video.", + "items": { + "$ref": "#/components/schemas/OmniVideoElementInput" + } + }, + "image_urls": { + "examples": [ + [] + ], + "title": "Image Urls", + "type": "array", + "description": "Reference images for style/appearance. Reference in prompt as @Image1, @Image2, etc. Maximum 4 total (elements + reference images) when using video.", + "items": { + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "type": "string", + "limit_description": "Max file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + } + }, + "description": "Input for video editing or video-as-reference generation.", + "x-fal-order-properties": [ + "prompt", + "video_url", + "keep_audio", + "image_urls", + "elements", + "aspect_ratio", + "duration" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "KlingVideoO1StandardVideoToVideoReferenceOutput": { + "title": "OmniV2VReferenceOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 28472159, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/kangaroo/3n_Lpxm_SjK5NYyBobRdS_output.mp4" + } + ], + "title": "Video", + "description": "The generated video.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "OmniVideoElementInput": { + "title": "OmniVideoElementInput", + "type": "object", + "properties": { + "reference_image_urls": { + "title": "Reference Image Urls", + "type": "array", + "description": "Additional reference images from different angles. 1-4 images supported. At least one image is required.", + "items": { + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "type": "string", + "limit_description": "Max file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + }, + "frontal_image_url": { + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "title": "Frontal Image Url", + "type": "string", + "description": "The frontal image of the element (main view).\n\nMax file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + }, + "x-fal-order-properties": [ + "frontal_image_url", + "reference_image_urls" + ], + "required": [ + "frontal_image_url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/o1/standard/video-to-video/reference/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/standard/video-to-video/reference/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/standard/video-to-video/reference": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoO1StandardVideoToVideoReferenceInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/standard/video-to-video/reference/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoO1StandardVideoToVideoReferenceOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/o1/standard/video-to-video/edit", + "metadata": { + "display_name": "Kling O1 Edit Video [Standard]", + "category": "video-to-video", + "description": "Edit an existing video using natural-language instructions, transforming subjects, settings, and style while retaining the original motion structure.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:41:58.230Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a866155/Hj1G7kFqvg9zFnWunTT3V_6ac8815e9a52404f82016084def66f34.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/o1/standard/video-to-video/edit", + "license_type": "commercial", + "date": "2025-12-15T09:34:39.306Z", + "group": { + "key": "kling-video/o1", + "label": "Edit Video [Standard]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/o1/standard/video-to-video/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/o1/standard/video-to-video/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/o1/standard/video-to-video/edit", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a866155/Hj1G7kFqvg9zFnWunTT3V_6ac8815e9a52404f82016084def66f34.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/o1/standard/video-to-video/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/o1/standard/video-to-video/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoO1StandardVideoToVideoEditInput": { + "title": "OmniV2VEditInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Replace the character in the video with @Element1, maintaining the same movements and camera angles. Transform the landscape into @Image1" + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500, + "description": "Use @Element1, @Element2 to reference elements and @Image1, @Image2 to reference images in order." + }, + "video_url": { + "x-fal": { + "min_fps": 24, + "max_fps": 60, + "max_file_size": 209715200, + "min_duration": 3, + "min_width": 720, + "min_height": 720, + "max_duration": 10.05, + "max_height": 2160, + "max_width": 2160, + "timeout": 30 + }, + "title": "Video Url", + "type": "string", + "examples": [ + "https://v3b.fal.media/files/b/rabbit/ku8_Wdpf-oTbGRq4lB5DU_output.mp4" + ], + "description": "Reference video URL. Only .mp4/.mov formats supported, 3-10 seconds duration, 720-2160px resolution, max 200MB.\n\nMax file size: 200.0MB, Min width: 720px, Min height: 720px, Max width: 2160px, Max height: 2160px, Min duration: 3.0s, Max duration: 10.05s, Min FPS: 24.0, Max FPS: 60.0, Timeout: 30.0s" + }, + "elements": { + "examples": [ + [ + { + "reference_image_urls": [ + "https://v3b.fal.media/files/b/kangaroo/YMpmQkYt9xugpOTQyZW0O.png", + "https://v3b.fal.media/files/b/zebra/d6ywajNyJ6bnpa_xBue-K.png" + ], + "frontal_image_url": "https://v3b.fal.media/files/b/panda/MQp-ghIqshvMZROKh9lW3.png" + } + ] + ], + "title": "Elements", + "type": "array", + "description": "Elements (characters/objects) to include. Reference in prompt as @Element1, @Element2, etc. Maximum 4 total (elements + reference images) when using video.", + "items": { + "$ref": "#/components/schemas/OmniVideoElementInput" + } + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/lion/MKvhFko5_wYnfORYacNII_AgPt8v25Wt4oyKhjnhVK5.png" + ] + ], + "title": "Image Urls", + "type": "array", + "description": "Reference images for style/appearance. Reference in prompt as @Image1, @Image2, etc. Maximum 4 total (elements + reference images) when using video.", + "items": { + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "type": "string", + "limit_description": "Max file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + }, + "keep_audio": { + "title": "Keep Audio", + "type": "boolean", + "description": "Whether to keep the original audio from the video.", + "default": false + } + }, + "description": "Input for video editing or video-as-reference generation.", + "x-fal-order-properties": [ + "prompt", + "video_url", + "keep_audio", + "image_urls", + "elements" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "KlingVideoO1StandardVideoToVideoEditOutput": { + "title": "OmniV2VEditOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 7533071, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/0a86603b/YAlbB2535l07BTy1wpDeI_output.mp4" + } + ], + "title": "Video", + "description": "The generated video.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "OmniVideoElementInput": { + "title": "OmniVideoElementInput", + "type": "object", + "properties": { + "reference_image_urls": { + "title": "Reference Image Urls", + "type": "array", + "description": "Additional reference images from different angles. 1-4 images supported. At least one image is required.", + "items": { + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "type": "string", + "limit_description": "Max file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + }, + "frontal_image_url": { + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "title": "Frontal Image Url", + "type": "string", + "description": "The frontal image of the element (main view).\n\nMax file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + }, + "x-fal-order-properties": [ + "frontal_image_url", + "reference_image_urls" + ], + "required": [ + "frontal_image_url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/o1/standard/video-to-video/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/standard/video-to-video/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/standard/video-to-video/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoO1StandardVideoToVideoEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/standard/video-to-video/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoO1StandardVideoToVideoEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/steady-dancer", + "metadata": { + "display_name": "Steady Dancer", + "category": "video-to-video", + "description": "Create smooth, realistic videos from a single photo while keeping the original appearance intact—precise motion control without losing identity or visual quality.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:00.969Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a85ecb6/lnUB6S-XSaiMknSior-jV_246c1d7641484d49ac0f744fe1cdfe63.jpg", + "model_url": "https://fal.run/fal-ai/steady-dancer", + "license_type": "commercial", + "date": "2025-12-11T22:05:42.507Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/steady-dancer", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/steady-dancer queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/steady-dancer", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a85ecb6/lnUB6S-XSaiMknSior-jV_246c1d7641484d49ac0f744fe1cdfe63.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/steady-dancer", + "documentationUrl": "https://fal.ai/models/fal-ai/steady-dancer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SteadyDancerInput": { + "title": "SteadyDancerRequest", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "Text prompt describing the desired animation.", + "default": "A person dancing with smooth and natural movements." + }, + "video_url": { + "title": "Video Url", + "type": "string", + "description": "URL of the driving pose video. The motion from this video will be transferred to the reference image.", + "default": "https://v3b.fal.media/files/b/0a84de68/jXDWywjhagRfR-GuZjoRs_video.mp4" + }, + "acceleration": { + "enum": [ + "light", + "moderate", + "aggressive" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration levels.", + "default": "aggressive" + }, + "pose_guidance_scale": { + "minimum": 0.5, + "maximum": 3, + "type": "number", + "title": "Pose Guidance Scale", + "description": "Pose guidance scale for pose control strength.", + "default": 1 + }, + "shift": { + "minimum": 1, + "maximum": 10, + "type": "number", + "title": "Shift", + "description": "Shift parameter for video generation.", + "default": 5 + }, + "pose_guidance_end": { + "minimum": 0.2, + "maximum": 1, + "type": "number", + "title": "Pose Guidance End", + "description": "End ratio for pose guidance. Controls when pose guidance ends.", + "default": 0.4 + }, + "frames_per_second": { + "minimum": 5, + "maximum": 24, + "type": "integer", + "title": "Frames Per Second", + "description": "Frames per second of the generated video. Must be between 5 to 24. If not specified, uses the FPS from the input video." + }, + "guidance_scale": { + "minimum": 1, + "maximum": 6, + "type": "number", + "title": "Guidance Scale", + "description": "Classifier-free guidance scale for prompt adherence.", + "default": 1 + }, + "num_frames": { + "minimum": 5, + "maximum": 241, + "type": "integer", + "title": "Num Frames", + "description": "Number of frames to generate. If not specified, uses the frame count from the input video (capped at 241). Will be adjusted to nearest valid value (must satisfy 4k+1 pattern)." + }, + "use_turbo": { + "examples": [ + true + ], + "title": "Use Turbo", + "type": "boolean", + "description": "If true, applies quality enhancement for faster generation with improved quality. When enabled, parameters are automatically optimized (num_inference_steps=6, guidance_scale=1.0) and uses the LightX2V distillation LoRA.", + "default": false + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "blurred, distorted face, bad anatomy, extra limbs, poorly drawn hands, poorly drawn feet, disfigured, out of frame, duplicate, watermark, signature, text" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video. If 'auto', will be determined from the reference image.", + "default": "auto" + }, + "pose_guidance_start": { + "minimum": 0, + "maximum": 0.5, + "type": "number", + "title": "Pose Guidance Start", + "description": "Start ratio for pose guidance. Controls when pose guidance begins.", + "default": 0.1 + }, + "resolution": { + "enum": [ + "480p", + "576p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video. 576p is default, 720p for higher quality. 480p is lower quality.", + "default": "576p" + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "URL of the reference image to animate. This is the person/character whose appearance will be preserved.", + "default": "https://v3b.fal.media/files/b/0a85edaa/GDUCMPrdvOMcI5JpEcU7f.png" + }, + "preserve_audio": { + "title": "Preserve Audio", + "type": "boolean", + "description": "If enabled, copies audio from the input driving video to the output video.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "num_inference_steps": { + "minimum": 4, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 6 + } + }, + "description": "Request model for SteadyDancer human animation.", + "x-fal-order-properties": [ + "image_url", + "video_url", + "prompt", + "negative_prompt", + "frames_per_second", + "seed", + "resolution", + "num_inference_steps", + "guidance_scale", + "pose_guidance_scale", + "shift", + "pose_guidance_start", + "pose_guidance_end", + "acceleration", + "enable_safety_checker", + "aspect_ratio", + "num_frames", + "preserve_audio", + "use_turbo" + ] + }, + "SteadyDancerOutput": { + "title": "SteadyDancerResponse", + "type": "object", + "properties": { + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The actual number of frames generated (aligned to 4k+1 pattern)." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "file_size": 7772111, + "file_name": "ll5ps0ZyBgxBkuWz-fHcT_output_with_audio.mp4", + "content_type": "application/octet-stream", + "url": "https://v3b.fal.media/files/b/0a87871b/ll5ps0ZyBgxBkuWz-fHcT_output_with_audio.mp4" + } + ], + "title": "Video", + "description": "The generated dance animation video.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed", + "num_frames" + ], + "description": "Response model for SteadyDancer.", + "required": [ + "video", + "seed", + "num_frames" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/steady-dancer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/steady-dancer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/steady-dancer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SteadyDancerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/steady-dancer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SteadyDancerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/one-to-all-animation/1.3b", + "metadata": { + "display_name": "One To All Animation", + "category": "video-to-video", + "description": "One-to-All Animation is a pose driven video model that animates characters from a single reference image, enabling flexible, alignment-free motion transfer across diverse styles and scenes", + "status": "active", + "tags": [ + "video to video", + "motion" + ], + "updated_at": "2026-01-26T21:42:01.415Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a85e86c/_YM6CaRHSSPdK06dwfle4_5ca446af223a4a7db272e9db359fada1.jpg", + "model_url": "https://fal.run/fal-ai/one-to-all-animation/1.3b", + "license_type": "commercial", + "date": "2025-12-11T18:59:16.434Z", + "group": { + "key": "one-to-all-Animation", + "label": "1.3B" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/one-to-all-animation/1.3b", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/one-to-all-animation/1.3b queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/one-to-all-animation/1.3b", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a85e86c/_YM6CaRHSSPdK06dwfle4_5ca446af223a4a7db272e9db359fada1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/one-to-all-animation/1.3b", + "documentationUrl": "https://fal.ai/models/fal-ai/one-to-all-animation/1.3b/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "OneToAllAnimation13bInput": { + "title": "OneToALLAnimationRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A robot figure dancing" + ], + "description": "The prompt to generate the video from.", + "type": "string", + "title": "Prompt" + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "description": "The resolution of the video to generate.", + "type": "string", + "title": "Resolution", + "default": "480p" + }, + "image_guidance_scale": { + "minimum": 1, + "maximum": 10, + "type": "number", + "description": "The image guidance scale to use for the video generation.", + "title": "Image Guidance Scale", + "default": 2 + }, + "pose_guidance_scale": { + "minimum": 1, + "maximum": 10, + "type": "number", + "description": "The pose guidance scale to use for the video generation.", + "title": "Pose Guidance Scale", + "default": 1.5 + }, + "video_url": { + "examples": [ + "https://v3b.fal.media/files/b/panda/a6SvJg96V8eoglMlYFShU_5385885-hd_1080_1920_25fps.mp4" + ], + "description": "The URL of the video to use as a reference for the video generation.", + "type": "string", + "title": "Video Url" + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/panda/-oMlZo9Yyj_Nzoza_tgds_GmLF86r5bOt50eMMKCszy_eacc949b3933443c9915a83c98fbe85e.png" + ], + "description": "The URL of the image to use as a reference for the video generation.", + "type": "string", + "title": "Image Url" + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 30, + "type": "integer", + "description": "The number of inference steps to use for the video generation.", + "title": "Num Inference Steps", + "default": 30 + }, + "negative_prompt": { + "examples": [ + "black background, Aerial view, aerial view, overexposed, low quality, deformation, a poor composition, bad hands, bad teeth, bad eyes, bad limbs, distortion" + ], + "description": "The negative prompt to generate the video from.", + "type": "string", + "title": "Negative Prompt" + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_url", + "video_url", + "resolution", + "num_inference_steps", + "image_guidance_scale", + "pose_guidance_scale" + ], + "required": [ + "prompt", + "negative_prompt", + "image_url", + "video_url" + ] + }, + "OneToAllAnimation13bOutput": { + "title": "OneToALLAnimationResponse", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 225893, + "file_name": "output.mp4", + "content_type": "application/octet-stream", + "url": "https://v3b.fal.media/files/b/0a85e79d/KOuXylETzdzUzMFFWLa4h_output.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/one-to-all-animation/1.3b/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/one-to-all-animation/1.3b/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/one-to-all-animation/1.3b": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OneToAllAnimation13bInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/one-to-all-animation/1.3b/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OneToAllAnimation13bOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/one-to-all-animation/14b", + "metadata": { + "display_name": "One To All Animation", + "category": "video-to-video", + "description": "One-to-All Animation is a pose driven video model that animates characters from a single reference image, enabling flexible, alignment-free motion transfer across diverse styles and scenes", + "status": "active", + "tags": [ + "video to video", + "motion" + ], + "updated_at": "2026-01-26T21:42:01.540Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a85e85f/dT7NUI11TBLXU-CGY9SMO_fbfc70e4d74544f1820ec29e914c1724.jpg", + "model_url": "https://fal.run/fal-ai/one-to-all-animation/14b", + "license_type": "commercial", + "date": "2025-12-11T18:56:35.518Z", + "group": { + "key": "one-to-all-Animation", + "label": "14B" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/one-to-all-animation/14b", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/one-to-all-animation/14b queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/one-to-all-animation/14b", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a85e85f/dT7NUI11TBLXU-CGY9SMO_fbfc70e4d74544f1820ec29e914c1724.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/one-to-all-animation/14b", + "documentationUrl": "https://fal.ai/models/fal-ai/one-to-all-animation/14b/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "OneToAllAnimation14bInput": { + "title": "OneToALLAnimationRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A robot figure dancing" + ], + "description": "The prompt to generate the video from.", + "type": "string", + "title": "Prompt" + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "description": "The resolution of the video to generate.", + "type": "string", + "title": "Resolution", + "default": "480p" + }, + "image_guidance_scale": { + "minimum": 1, + "maximum": 10, + "type": "number", + "description": "The image guidance scale to use for the video generation.", + "title": "Image Guidance Scale", + "default": 2 + }, + "pose_guidance_scale": { + "minimum": 1, + "maximum": 10, + "type": "number", + "description": "The pose guidance scale to use for the video generation.", + "title": "Pose Guidance Scale", + "default": 1.5 + }, + "video_url": { + "examples": [ + "https://v3b.fal.media/files/b/panda/a6SvJg96V8eoglMlYFShU_5385885-hd_1080_1920_25fps.mp4" + ], + "description": "The URL of the video to use as a reference for the video generation.", + "type": "string", + "title": "Video Url" + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/panda/-oMlZo9Yyj_Nzoza_tgds_GmLF86r5bOt50eMMKCszy_eacc949b3933443c9915a83c98fbe85e.png" + ], + "description": "The URL of the image to use as a reference for the video generation.", + "type": "string", + "title": "Image Url" + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 30, + "type": "integer", + "description": "The number of inference steps to use for the video generation.", + "title": "Num Inference Steps", + "default": 30 + }, + "negative_prompt": { + "examples": [ + "black background, Aerial view, aerial view, overexposed, low quality, deformation, a poor composition, bad hands, bad teeth, bad eyes, bad limbs, distortion" + ], + "description": "The negative prompt to generate the video from.", + "type": "string", + "title": "Negative Prompt" + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "image_url", + "video_url", + "resolution", + "num_inference_steps", + "image_guidance_scale", + "pose_guidance_scale" + ], + "required": [ + "prompt", + "negative_prompt", + "image_url", + "video_url" + ] + }, + "OneToAllAnimation14bOutput": { + "title": "OneToALLAnimationResponse", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 225893, + "file_name": "output.mp4", + "content_type": "application/octet-stream", + "url": "https://v3b.fal.media/files/b/0a85e79d/KOuXylETzdzUzMFFWLa4h_output.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/one-to-all-animation/14b/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/one-to-all-animation/14b/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/one-to-all-animation/14b": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OneToAllAnimation14bInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/one-to-all-animation/14b/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OneToAllAnimation14bOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-vision-enhancer", + "metadata": { + "display_name": "Wan Vision Enhancer", + "category": "video-to-video", + "description": "Wan Vision Enhancer for magnify/enhance video with high fidelity and creativity.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:42:01.930Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a85cd7b/McCh3yClnF2hddALPEfL5_b81d681652074c6d8a8cd026e50a94d0.jpg", + "model_url": "https://fal.run/fal-ai/wan-vision-enhancer", + "license_type": "commercial", + "date": "2025-12-10T23:51:05.247Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-vision-enhancer", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-vision-enhancer queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-vision-enhancer", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a85cd7b/McCh3yClnF2hddALPEfL5_b81d681652074c6d8a8cd026e50a94d0.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-vision-enhancer", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-vision-enhancer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanVisionEnhancerInput": { + "x-fal-order-properties": [ + "video_url", + "target_resolution", + "creativity", + "prompt", + "negative_prompt", + "seed" + ], + "type": "object", + "properties": { + "prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Prompt", + "description": "Optional prompt to prepend to the VLM-generated description. Leave empty to use only the auto-generated description from the video." + }, + "video_url": { + "examples": [ + "https://v3b.fal.media/files/b/0a85cc7d/zBFvZyJ7iX-7AZoQ1NaMN_wan_animate_output.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "The URL of the video to enhance with Wan Video. Maximum 200MB file size. Videos longer than 500 frames will have only the first 500 frames processed (~8-21 seconds depending on fps)." + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. If not provided, a random seed will be used." + }, + "target_resolution": { + "enum": [ + "720p", + "1080p" + ], + "description": "Target output resolution for the enhanced video. 720p (native, fast) or 1080p (upscaled, slower). Processing is always done at 720p, then upscaled if 1080p selected.", + "type": "string", + "examples": [ + "720p" + ], + "title": "Output Resolution", + "default": "720p" + }, + "negative_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Negative Prompt", + "description": "Negative prompt to avoid unwanted features.", + "default": "oversaturated, overexposed, static, blurry details, subtitles, stylized, artwork, painting, still frame, overall gray, worst quality, low quality, JPEG artifacts, ugly, mutated, extra fingers, poorly drawn hands, poorly drawn face, deformed, disfigured, malformed limbs, fused fingers, static motion, cluttered background, three legs, crowded background, walking backwards" + }, + "creativity": { + "description": "Controls how much the model enhances/changes the video. 0 = Minimal change (preserves original), 1 = Subtle enhancement (default), 2 = Medium enhancement, 3 = Strong enhancement, 4 = Maximum enhancement.", + "type": "integer", + "minimum": 0, + "maximum": 4, + "title": "Creativity", + "examples": [ + 1 + ], + "default": 1 + } + }, + "description": "Input parameters for Wan Vision Enhancer (Video-to-Video)", + "title": "Input", + "required": [ + "video_url" + ] + }, + "WanVisionEnhancerOutput": { + "x-fal-order-properties": [ + "video", + "seed", + "timings" + ], + "type": "object", + "properties": { + "seed": { + "examples": [ + 42 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "timings": { + "examples": [ + { + "video_processing": 15.2, + "inference": 125.4 + } + ], + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + }, + "description": "The timings of the different steps in the workflow." + }, + "video": { + "examples": [ + { + "url": "https://v3b.fal.media/files/b/0a85cc9a/TBr1WXFaFb2zJ2htEWDdm_combined_2baa6f34a84a4d0caf23896580810ee1.mp4" + } + ], + "description": "The enhanced video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "description": "Output from Wan Vision Enhancer", + "title": "Output", + "required": [ + "video", + "seed", + "timings" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-vision-enhancer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-vision-enhancer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-vision-enhancer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanVisionEnhancerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-vision-enhancer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanVisionEnhancerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sync-lipsync/react-1", + "metadata": { + "display_name": "Sync React-1", + "category": "video-to-video", + "description": "Use React-1 from SyncLabs to refine human emotions and do realistic lip-sync without losing details!", + "status": "active", + "tags": [ + "lipsync", + "video-to-video" + ], + "updated_at": "2026-01-26T21:42:02.561Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a85c033/t_KdQQe8bt53TwIbJwiHX_63eac44271e847bbbe24a93b1a859bfd.jpg", + "model_url": "https://fal.run/fal-ai/sync-lipsync/react-1", + "license_type": "commercial", + "date": "2025-12-10T14:23:05.777Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sync-lipsync/react-1", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sync-lipsync/react-1 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sync-lipsync/react-1", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a85c033/t_KdQQe8bt53TwIbJwiHX_63eac44271e847bbbe24a93b1a859bfd.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sync-lipsync/react-1", + "documentationUrl": "https://fal.ai/models/fal-ai/sync-lipsync/react-1/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SyncLipsyncReact1Input": { + "title": "React1Input", + "type": "object", + "properties": { + "emotion": { + "enum": [ + "happy", + "angry", + "sad", + "neutral", + "disgusted", + "surprised" + ], + "title": "Emotion", + "type": "string", + "description": "Emotion prompt for the generation. Currently supports single-word emotions only.", + "examples": [ + "neutral" + ] + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/react_1/input.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL to the input video. Must be **15 seconds or shorter**." + }, + "lipsync_mode": { + "enum": [ + "cut_off", + "loop", + "bounce", + "silence", + "remap" + ], + "title": "Lipsync Mode", + "type": "string", + "description": "Lipsync mode when audio and video durations are out of sync.", + "default": "bounce" + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/react_1/input.mp3" + ], + "title": "Audio Url", + "type": "string", + "description": "URL to the input audio. Must be **15 seconds or shorter**." + }, + "temperature": { + "minimum": 0, + "title": "Temperature", + "type": "number", + "description": "Controls the expresiveness of the lipsync.", + "maximum": 1, + "default": 0.5 + }, + "model_mode": { + "enum": [ + "lips", + "face", + "head" + ], + "title": "Model Mode", + "type": "string", + "description": "Controls the edit region and movement scope for the model. Available options:\n- `lips`: Only lipsync using react-1 (minimal facial changes).\n- `face`: Lipsync + facial expressions without head movements.\n- `head`: Lipsync + facial expressions + natural talking head movements.", + "default": "face" + } + }, + "x-fal-order-properties": [ + "video_url", + "audio_url", + "emotion", + "model_mode", + "lipsync_mode", + "temperature" + ], + "required": [ + "video_url", + "audio_url", + "emotion" + ] + }, + "SyncLipsyncReact1Output": { + "title": "React1Output", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "height": 1088, + "duration": 7.041667, + "url": "https://storage.googleapis.com/falserverless/example_outputs/react_1/output.mp4", + "width": 1920, + "fps": 24, + "file_name": "output.mp4", + "num_frames": 169, + "content_type": "video/mp4" + } + ], + "title": "Video", + "description": "The generated video with synchronized lip and facial movements.", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sync-lipsync/react-1/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sync-lipsync/react-1/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sync-lipsync/react-1": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SyncLipsyncReact1Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sync-lipsync/react-1/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SyncLipsyncReact1Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "veed/video-background-removal/fast", + "metadata": { + "display_name": "Video Background Removal", + "category": "video-to-video", + "description": "Remove background from any video with people and objects. No green screen needed.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:08.627Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a848f46/fPWWIeh4BRaiUufOGPa1i_2e9706df51e1437d867dfe9eb5b4f854.jpg", + "model_url": "https://fal.run/veed/video-background-removal/fast", + "license_type": "commercial", + "date": "2025-12-01T13:33:43.631Z", + "group": { + "key": "video-background-removal", + "label": "Video To Video [fast]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for veed/video-background-removal/fast", + "version": "1.0.0", + "description": "The OpenAPI schema for the veed/video-background-removal/fast queue.", + "x-fal-metadata": { + "endpointId": "veed/video-background-removal/fast", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a848f46/fPWWIeh4BRaiUufOGPa1i_2e9706df51e1437d867dfe9eb5b4f854.jpg", + "playgroundUrl": "https://fal.ai/models/veed/video-background-removal/fast", + "documentationUrl": "https://fal.ai/models/veed/video-background-removal/fast/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "VideoBackgroundRemovalFastInput": { + "title": "FastGeneralRembgInput", + "type": "object", + "properties": { + "video_url": { + "format": "uri", + "title": "Video Url", + "type": "string", + "examples": [ + "https://v3b.fal.media/files/b/0a8479a0/3ngPszofrNpEvPaL9P5xR_generated.mp4" + ], + "maxLength": 2083, + "minLength": 1 + }, + "subject_is_person": { + "title": "Subject Is Person", + "type": "boolean", + "description": "Set to False if the subject is not a person.", + "default": true + }, + "output_codec": { + "enum": [ + "vp9", + "h264" + ], + "title": "Output Codec", + "type": "string", + "description": "Single VP9 video with alpha channel or two videos (rgb and alpha) in H264 format. H264 is recommended for better RGB quality.", + "default": "vp9" + }, + "refine_foreground_edges": { + "title": "Refine Foreground Edges", + "type": "boolean", + "description": "Improves the quality of the extracted object's edges.", + "default": true + } + }, + "x-fal-order-properties": [ + "video_url", + "output_codec", + "refine_foreground_edges", + "subject_is_person" + ], + "required": [ + "video_url" + ] + }, + "VideoBackgroundRemovalFastOutput": { + "title": "FastGeneralRembgOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + [ + { + "content_type": "video/webm", + "url": "https://v3b.fal.media/files/b/0a8479a7/mdr5_b7CqeDmZROCLkp7i_output.webm" + } + ] + ], + "title": "Video", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/veed/video-background-removal/fast/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/veed/video-background-removal/fast/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/veed/video-background-removal/fast": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoBackgroundRemovalFastInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/veed/video-background-removal/fast/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoBackgroundRemovalFastOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/o1/video-to-video/edit", + "metadata": { + "display_name": "Kling O1 Edit Video [Pro]", + "category": "video-to-video", + "description": "Edit an existing video using natural-language instructions, transforming subjects, settings, and style while retaining the original motion structure.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:09.017Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/KSlUBiWApJEK5TS53lmB1_a9ac938f625f4d5789681706620b9868.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/o1/video-to-video/edit", + "license_type": "commercial", + "date": "2025-12-01T11:29:19.531Z", + "group": { + "key": "kling-video/o1", + "label": "Edit Video [Pro]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/o1/video-to-video/edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/o1/video-to-video/edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/o1/video-to-video/edit", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/KSlUBiWApJEK5TS53lmB1_a9ac938f625f4d5789681706620b9868.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/o1/video-to-video/edit", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/o1/video-to-video/edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoO1VideoToVideoEditInput": { + "title": "OmniV2VEditInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Replace the character in the video with @Element1, maintaining the same movements and camera angles. Transform the landscape into @Image1" + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500, + "description": "Use @Element1, @Element2 to reference elements and @Image1, @Image2 to reference images in order." + }, + "video_url": { + "x-fal": { + "min_fps": 24, + "max_fps": 60, + "max_file_size": 209715200, + "min_duration": 3, + "min_width": 720, + "min_height": 720, + "max_duration": 10.05, + "max_height": 2160, + "max_width": 2160, + "timeout": 30 + }, + "title": "Video Url", + "type": "string", + "examples": [ + "https://v3b.fal.media/files/b/rabbit/ku8_Wdpf-oTbGRq4lB5DU_output.mp4" + ], + "description": "Reference video URL. Only .mp4/.mov formats supported, 3-10 seconds duration, 720-2160px resolution, max 200MB.\n\nMax file size: 200.0MB, Min width: 720px, Min height: 720px, Max width: 2160px, Max height: 2160px, Min duration: 3.0s, Max duration: 10.05s, Min FPS: 24.0, Max FPS: 60.0, Timeout: 30.0s" + }, + "elements": { + "examples": [ + [ + { + "reference_image_urls": [ + "https://v3b.fal.media/files/b/kangaroo/YMpmQkYt9xugpOTQyZW0O.png", + "https://v3b.fal.media/files/b/zebra/d6ywajNyJ6bnpa_xBue-K.png" + ], + "frontal_image_url": "https://v3b.fal.media/files/b/panda/MQp-ghIqshvMZROKh9lW3.png" + } + ] + ], + "title": "Elements", + "type": "array", + "description": "Elements (characters/objects) to include. Reference in prompt as @Element1, @Element2, etc. Maximum 4 total (elements + reference images) when using video.", + "items": { + "$ref": "#/components/schemas/OmniVideoElementInput" + } + }, + "image_urls": { + "examples": [ + [ + "https://v3b.fal.media/files/b/lion/MKvhFko5_wYnfORYacNII_AgPt8v25Wt4oyKhjnhVK5.png" + ] + ], + "title": "Image Urls", + "type": "array", + "description": "Reference images for style/appearance. Reference in prompt as @Image1, @Image2, etc. Maximum 4 total (elements + reference images) when using video.", + "items": { + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "type": "string", + "limit_description": "Max file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + }, + "keep_audio": { + "title": "Keep Audio", + "type": "boolean", + "description": "Whether to keep the original audio from the video.", + "default": false + } + }, + "description": "Input for video editing or video-as-reference generation.", + "x-fal-order-properties": [ + "prompt", + "video_url", + "keep_audio", + "image_urls", + "elements" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "KlingVideoO1VideoToVideoEditOutput": { + "title": "OmniV2VEditOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 7533071, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/0a86603b/YAlbB2535l07BTy1wpDeI_output.mp4" + } + ], + "title": "Video", + "description": "The generated video.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "OmniVideoElementInput": { + "title": "OmniVideoElementInput", + "type": "object", + "properties": { + "reference_image_urls": { + "title": "Reference Image Urls", + "type": "array", + "description": "Additional reference images from different angles. 1-4 images supported. At least one image is required.", + "items": { + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "type": "string", + "limit_description": "Max file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + }, + "frontal_image_url": { + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "title": "Frontal Image Url", + "type": "string", + "description": "The frontal image of the element (main view).\n\nMax file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + }, + "x-fal-order-properties": [ + "frontal_image_url", + "reference_image_urls" + ], + "required": [ + "frontal_image_url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/o1/video-to-video/edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/video-to-video/edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/video-to-video/edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoO1VideoToVideoEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/video-to-video/edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoO1VideoToVideoEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/kling-video/o1/video-to-video/reference", + "metadata": { + "display_name": "Kling O1 Reference Video to Video [Pro]", + "category": "video-to-video", + "description": "Kling O1 Omni generates new shots guided by an input reference video, preserving cinematic language such as motion, and camera style to produce seamless scene continuity.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:09.145Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/koala/W2rN6g_nAeMqlL1W3IdOA_3cf3388f038944deabb311d5ccd584ba.jpg", + "model_url": "https://fal.run/fal-ai/kling-video/o1/video-to-video/reference", + "license_type": "commercial", + "date": "2025-12-01T11:25:09.487Z", + "group": { + "key": "kling-video/o1", + "label": "Reference V2V [Pro]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/kling-video/o1/video-to-video/reference", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/kling-video/o1/video-to-video/reference queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/kling-video/o1/video-to-video/reference", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/koala/W2rN6g_nAeMqlL1W3IdOA_3cf3388f038944deabb311d5ccd584ba.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/kling-video/o1/video-to-video/reference", + "documentationUrl": "https://fal.ai/models/fal-ai/kling-video/o1/video-to-video/reference/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KlingVideoO1VideoToVideoReferenceInput": { + "title": "OmniV2VReferenceInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Based on @Video1, generate the next shot. keep the style of the video" + ], + "title": "Prompt", + "type": "string", + "maxLength": 2500, + "description": "Use @Element1, @Element2 to reference elements and @Image1, @Image2 to reference images in order." + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the generated video frame. If 'auto', the aspect ratio will be determined automatically based on the input video, and the closest aspect ratio to the input video will be used.", + "default": "auto" + }, + "duration": { + "enum": [ + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10" + ], + "title": "Duration", + "type": "string", + "description": "Video duration in seconds.", + "default": "5" + }, + "video_url": { + "x-fal": { + "min_fps": 24, + "max_fps": 60, + "max_file_size": 209715200, + "min_duration": 3, + "min_width": 720, + "min_height": 720, + "max_duration": 10.05, + "max_height": 2160, + "max_width": 2160, + "timeout": 30 + }, + "title": "Video Url", + "type": "string", + "examples": [ + "https://v3b.fal.media/files/b/panda/oVdiICFXY03Vbam-08Aj8_output.mp4" + ], + "description": "Reference video URL. Only .mp4/.mov formats supported, 3-10 seconds duration, 720-2160px resolution, max 200MB.\n\nMax file size: 200.0MB, Min width: 720px, Min height: 720px, Max width: 2160px, Max height: 2160px, Min duration: 3.0s, Max duration: 10.05s, Min FPS: 24.0, Max FPS: 60.0, Timeout: 30.0s" + }, + "keep_audio": { + "title": "Keep Audio", + "type": "boolean", + "description": "Whether to keep the original audio from the video.", + "default": false + }, + "elements": { + "examples": [ + [ + { + "reference_image_urls": [ + "https://v3b.fal.media/files/b/kangaroo/YMpmQkYt9xugpOTQyZW0O.png", + "https://v3b.fal.media/files/b/zebra/d6ywajNyJ6bnpa_xBue-K.png" + ], + "frontal_image_url": "https://v3b.fal.media/files/b/panda/MQp-ghIqshvMZROKh9lW3.png" + } + ] + ], + "title": "Elements", + "type": "array", + "description": "Elements (characters/objects) to include. Reference in prompt as @Element1, @Element2, etc. Maximum 4 total (elements + reference images) when using video.", + "items": { + "$ref": "#/components/schemas/OmniVideoElementInput" + } + }, + "image_urls": { + "examples": [ + [] + ], + "title": "Image Urls", + "type": "array", + "description": "Reference images for style/appearance. Reference in prompt as @Image1, @Image2, etc. Maximum 4 total (elements + reference images) when using video.", + "items": { + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "type": "string", + "limit_description": "Max file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + } + }, + "description": "Input for video editing or video-as-reference generation.", + "x-fal-order-properties": [ + "prompt", + "video_url", + "keep_audio", + "image_urls", + "elements", + "aspect_ratio", + "duration" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "KlingVideoO1VideoToVideoReferenceOutput": { + "title": "OmniV2VReferenceOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 28472159, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/kangaroo/3n_Lpxm_SjK5NYyBobRdS_output.mp4" + } + ], + "title": "Video", + "description": "The generated video.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "OmniVideoElementInput": { + "title": "OmniVideoElementInput", + "type": "object", + "properties": { + "reference_image_urls": { + "title": "Reference Image Urls", + "type": "array", + "description": "Additional reference images from different angles. 1-4 images supported. At least one image is required.", + "items": { + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "type": "string", + "limit_description": "Max file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + }, + "frontal_image_url": { + "x-fal": { + "min_width": 300, + "min_aspect_ratio": 0.4, + "timeout": 20, + "min_height": 300, + "max_aspect_ratio": 2.5, + "max_file_size": 10485760 + }, + "title": "Frontal Image Url", + "type": "string", + "description": "The frontal image of the element (main view).\n\nMax file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s" + } + }, + "x-fal-order-properties": [ + "frontal_image_url", + "reference_image_urls" + ], + "required": [ + "frontal_image_url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/kling-video/o1/video-to-video/reference/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/video-to-video/reference/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/video-to-video/reference": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoO1VideoToVideoReferenceInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/kling-video/o1/video-to-video/reference/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KlingVideoO1VideoToVideoReferenceOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "veed/video-background-removal", + "metadata": { + "display_name": "Video Background Removal", + "category": "video-to-video", + "description": "Remove background from any video with people and objects. No green screen needed.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:09.678Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8493a9/Oi0ou1tpONWDl5MQg1Ior_04bb265985a24a558bc3adae79407610.jpg", + "model_url": "https://fal.run/veed/video-background-removal", + "license_type": "commercial", + "date": "2025-11-28T16:15:50.575Z", + "group": { + "key": "video-background-removal", + "label": "Video To Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for veed/video-background-removal", + "version": "1.0.0", + "description": "The OpenAPI schema for the veed/video-background-removal queue.", + "x-fal-metadata": { + "endpointId": "veed/video-background-removal", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8493a9/Oi0ou1tpONWDl5MQg1Ior_04bb265985a24a558bc3adae79407610.jpg", + "playgroundUrl": "https://fal.ai/models/veed/video-background-removal", + "documentationUrl": "https://fal.ai/models/veed/video-background-removal/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "VideoBackgroundRemovalInput": { + "title": "GeneralRembgInput", + "type": "object", + "properties": { + "video_url": { + "format": "uri", + "title": "Video Url", + "type": "string", + "examples": [ + "https://v3b.fal.media/files/b/0a847700/NYnvFeP13ehgSgqMDV_PL_stock.mp4" + ], + "maxLength": 2083, + "minLength": 1 + }, + "subject_is_person": { + "title": "Subject Is Person", + "type": "boolean", + "description": "Set to False if the subject is not a person.", + "default": true + }, + "output_codec": { + "enum": [ + "vp9", + "h264" + ], + "title": "Output Codec", + "type": "string", + "description": "Single VP9 video with alpha channel or two videos (rgb and alpha) in H264 format. H264 is recommended for better RGB quality.", + "default": "vp9" + }, + "refine_foreground_edges": { + "title": "Refine Foreground Edges", + "type": "boolean", + "description": "Improves the quality of the extracted object's edges.", + "default": true + } + }, + "x-fal-order-properties": [ + "video_url", + "output_codec", + "refine_foreground_edges", + "subject_is_person" + ], + "required": [ + "video_url" + ] + }, + "VideoBackgroundRemovalOutput": { + "title": "GeneralRembgOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + [ + { + "content_type": "video/webm", + "url": "https://v3b.fal.media/files/b/0a847713/C7g1UaT46yKhOPt6KRrgg_output.webm" + } + ] + ], + "title": "Video", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/veed/video-background-removal/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/veed/video-background-removal/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/veed/video-background-removal": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoBackgroundRemovalInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/veed/video-background-removal/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoBackgroundRemovalOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "veed/video-background-removal/green-screen", + "metadata": { + "display_name": "Video Background Removal", + "category": "video-to-video", + "description": "Remove background from videos filmed using chromakey, with automatic green spill suppression for clean, professional edges.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:09.806Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/2dnJmbIsYHIB4aBpIgI0L_0044788dd750470589b08c44e7dc1087.jpg", + "model_url": "https://fal.run/veed/video-background-removal/green-screen", + "license_type": "commercial", + "date": "2025-11-28T16:14:11.401Z", + "group": { + "key": "video-background-removal", + "label": "Video To Video [green-screen]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for veed/video-background-removal/green-screen", + "version": "1.0.0", + "description": "The OpenAPI schema for the veed/video-background-removal/green-screen queue.", + "x-fal-metadata": { + "endpointId": "veed/video-background-removal/green-screen", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/2dnJmbIsYHIB4aBpIgI0L_0044788dd750470589b08c44e7dc1087.jpg", + "playgroundUrl": "https://fal.ai/models/veed/video-background-removal/green-screen", + "documentationUrl": "https://fal.ai/models/veed/video-background-removal/green-screen/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "VideoBackgroundRemovalGreenScreenInput": { + "title": "GreenScreenRembgInput", + "type": "object", + "properties": { + "video_url": { + "format": "uri", + "title": "Video Url", + "type": "string", + "examples": [ + "https://v3b.fal.media/files/b/0a849c38/zoA0frujpZUkj07NtXytv_jessica.mp4" + ], + "maxLength": 2083, + "minLength": 1 + }, + "output_codec": { + "enum": [ + "vp9", + "h264" + ], + "title": "Output Codec", + "type": "string", + "description": "Single VP9 video with alpha channel or two videos (rgb and alpha) in H264 format. H264 is recommended for better RGB quality.", + "default": "vp9" + }, + "spill_suppression_strength": { + "anyOf": [ + { + "minimum": 0, + "maximum": 1, + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Spill Suppression Strength", + "description": "Increase the value if green spots remain in the video, decrease if color changes are noticed on the extracted subject.", + "default": 0.8 + } + }, + "x-fal-order-properties": [ + "video_url", + "output_codec", + "spill_suppression_strength" + ], + "required": [ + "video_url" + ] + }, + "VideoBackgroundRemovalGreenScreenOutput": { + "title": "GreenScreenRembgOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + [ + { + "content_type": "video/webm", + "url": "https://v3b.fal.media/files/b/0a849c48/MFOmvAhK4vvUsFsMVmw0P_output.webm" + } + ] + ], + "title": "Video", + "type": "array", + "items": { + "$ref": "#/components/schemas/File" + } + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/veed/video-background-removal/green-screen/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/veed/video-background-removal/green-screen/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/veed/video-background-removal/green-screen": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoBackgroundRemovalGreenScreenInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/veed/video-background-removal/green-screen/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoBackgroundRemovalGreenScreenOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-2/retake-video", + "metadata": { + "display_name": "LTX Video 2.0 Retake", + "category": "video-to-video", + "description": "Change sections of a video using LTX-2", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:11.148Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/tiger/xvJ-7NU2kAKJBpgXtwGfL_746be952110b4b5db200f410da23e7f8.jpg", + "model_url": "https://fal.run/fal-ai/ltx-2/retake-video", + "license_type": "commercial", + "date": "2025-11-26T17:22:47.621Z", + "group": { + "key": "ltx-2", + "label": "Retake Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-2/retake-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-2/retake-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-2/retake-video", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/tiger/xvJ-7NU2kAKJBpgXtwGfL_746be952110b4b5db200f410da23e7f8.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-2/retake-video", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-2/retake-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltx2RetakeVideoInput": { + "title": "LTXRetakeVideoRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Remove the man" + ], + "maxLength": 5000, + "type": "string", + "minLength": 1, + "title": "Prompt", + "description": "The prompt to retake the video with" + }, + "duration": { + "description": "The duration of the video to retake in seconds", + "type": "number", + "minimum": 2, + "maximum": 20, + "title": "Duration", + "default": 5 + }, + "video_url": { + "examples": [ + "https://v3b.fal.media/files/b/kangaroo/409mvS2FMwb6S7WcwkSW7_output.mp4" + ], + "title": "Video URL", + "type": "string", + "description": "The URL of the video to retake" + }, + "start_time": { + "description": "The start time of the video to retake in seconds", + "type": "number", + "minimum": 0, + "maximum": 20, + "title": "Start Time", + "default": 0 + }, + "retake_mode": { + "enum": [ + "replace_audio", + "replace_video", + "replace_audio_and_video" + ], + "title": "Retake Mode", + "type": "string", + "description": "The retake mode to use for the retake", + "default": "replace_audio_and_video" + } + }, + "x-fal-order-properties": [ + "video_url", + "prompt", + "start_time", + "duration", + "retake_mode" + ], + "required": [ + "video_url", + "prompt" + ] + }, + "Ltx2RetakeVideoOutput": { + "title": "LTXRetakeVideoResponse", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_name": "ltxv-2-retake-output.mp4", + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/zebra/qM8Ve4OM8BcYnX23hoxd8_zvgLuC4m.mp4" + } + ], + "title": "Video", + "description": "The generated video file", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-2/retake-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2/retake-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-2/retake-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx2RetakeVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-2/retake-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltx2RetakeVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "decart/lucy-edit/fast", + "metadata": { + "display_name": "Lucy Edit [Fast]", + "category": "video-to-video", + "description": "Lucy Edit Fast is a rapid, localized video editing model that lets you modify specific elements like objects, or backgrounds in just 10 seconds.", + "status": "active", + "tags": [ + "edit", + "video-edit" + ], + "updated_at": "2026-01-26T21:42:11.718Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/tiger/QBsQaXfxeb6psCcp1Hy8S_648bd2a1522d4d32980a07f678afaaa3.jpg", + "model_url": "https://fal.run/decart/lucy-edit/fast", + "license_type": "commercial", + "date": "2025-11-25T20:09:33.023Z", + "group": { + "key": "lucy-edit", + "label": "Fast" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for decart/lucy-edit/fast", + "version": "1.0.0", + "description": "Lucy Edit Fast is a high-speed, localized video editing model designed for rapid creative exploration. Built on Decart’s Lucy Edit architecture, it enables precise, object-level modifications - such as adding or removing elements, altering clothing, or replacing backgrounds - while preserving lighting, motion, and realism. This variant delivers edits in just 10 seconds, offering faster iteration at lower cost with only minimal quality trade-offs.", + "x-fal-metadata": { + "endpointId": "decart/lucy-edit/fast", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/tiger/QBsQaXfxeb6psCcp1Hy8S_648bd2a1522d4d32980a07f678afaaa3.jpg", + "playgroundUrl": "https://fal.ai/models/decart/lucy-edit/fast", + "documentationUrl": "https://fal.ai/models/decart/lucy-edit/fast/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LucyEditFastInput": { + "title": "LucyEditFastInput", + "type": "object", + "properties": { + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the video to be generated\n and uploaded before returning the response. This will increase the\n latency of the function but it allows you to get the video directly\n in the response without going through the CDN.\n ", + "default": false + }, + "video_url": { + "examples": [ + "https://v3b.fal.media/files/b/elephant/p8aXM3bMaj_Nzbk5RfEoa_original-lucy-fast-coffee.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the video to edit" + }, + "prompt": { + "examples": [ + "Change her jacket to formal brown jacket" + ], + "maxLength": 1500, + "type": "string", + "title": "Prompt", + "description": "Text description of the desired video content" + }, + "enhance_prompt": { + "title": "Enhance Prompt", + "type": "boolean", + "description": "Whether to enhance the prompt for better results.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "video_url", + "sync_mode", + "enhance_prompt" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "LucyEditFastOutput": { + "title": "LucyEditFastOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 819791, + "file_name": "generated_video.mp4", + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/koala/Q24io3IIZNRvszEBUdb6Z_generated_video.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/decart/lucy-edit/fast/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/decart/lucy-edit/fast/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/decart/lucy-edit/fast": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LucyEditFastInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/decart/lucy-edit/fast/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LucyEditFastOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sam-3/video-rle", + "metadata": { + "display_name": "Sam 3", + "category": "video-to-video", + "description": "SAM 3 is a unified foundation model for promptable segmentation in images and videos. It can detect, segment, and track objects using text or visual prompts such as points, boxes, and masks. ", + "status": "active", + "tags": [ + "segmentation", + "mask", + "real-time", + "rle" + ], + "updated_at": "2026-01-26T21:42:19.287Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/OOePxI1jvWaABuigKMkrl_c5195735531a4c9887a415bfc7cf0b74.jpg", + "model_url": "https://fal.run/fal-ai/sam-3/video-rle", + "license_type": "commercial", + "date": "2025-11-20T20:18:27.813Z", + "group": { + "key": "sam3", + "label": "Video to RLE" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sam-3/video-rle", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sam-3/video-rle queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sam-3/video-rle", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/OOePxI1jvWaABuigKMkrl_c5195735531a4c9887a415bfc7cf0b74.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sam-3/video-rle", + "documentationUrl": "https://fal.ai/models/fal-ai/sam-3/video-rle/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Sam3VideoRleInput": { + "x-fal-order-properties": [ + "video_url", + "mask_url", + "prompt", + "point_prompts", + "box_prompts", + "apply_mask", + "boundingbox_zip", + "detection_threshold", + "frame_index" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "person", + "person, cloth" + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt for segmentation. Use commas to track multiple objects (e.g., 'person, cloth').", + "default": "" + }, + "video_url": { + "examples": [ + "https://v3b.fal.media/files/b/elephant/NQdDxB0Ddfo82SPLbhYDp_bedroom.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "The URL of the video to be segmented." + }, + "detection_threshold": { + "minimum": 0.01, + "title": "Detection Threshold", + "type": "number", + "description": "Detection confidence threshold (0.0-1.0). Lower = more detections but less precise. Defaults: 0.5 for existing, 0.7 for new objects. Try 0.2-0.3 if text prompts fail.", + "maximum": 1, + "default": 0.5 + }, + "box_prompts": { + "title": "Box Prompts", + "type": "array", + "description": "List of box prompts with optional frame_index.", + "items": { + "$ref": "#/components/schemas/BoxPrompt" + }, + "default": [] + }, + "boundingbox_zip": { + "title": "Boundingbox Zip", + "type": "boolean", + "description": "Return per-frame bounding box overlays as a zip archive.", + "default": false + }, + "point_prompts": { + "title": "Point Prompts", + "type": "array", + "description": "List of point prompts with frame indices.", + "items": { + "$ref": "#/components/schemas/PointPrompt" + }, + "default": [] + }, + "frame_index": { + "title": "Frame Index", + "type": "integer", + "description": "Frame index used for initial interaction when mask_url is provided.", + "default": 0 + }, + "mask_url": { + "title": "Mask Url", + "type": "string", + "description": "The URL of the mask to be applied initially." + }, + "apply_mask": { + "title": "Apply Mask", + "type": "boolean", + "description": "Apply the mask on the video.", + "default": false + } + }, + "title": "SAM3VideoRLEInput", + "required": [ + "video_url" + ] + }, + "Sam3VideoRleOutput": { + "x-fal-order-properties": [ + "video", + "boundingbox_frames_zip" + ], + "type": "object", + "properties": { + "boundingbox_frames_zip": { + "title": "Boundingbox Frames Zip", + "description": "Zip file containing per-frame bounding box overlays.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "video": { + "examples": [ + "https://fal.media/files/monkey/5BLHmbX3qxu5cD5gQzTqw_output.mp4" + ], + "title": "Video", + "description": "The segmented video.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "SAM3VideoOutput", + "required": [ + "video" + ] + }, + "BoxPrompt": { + "x-fal-order-properties": [ + "x_min", + "y_min", + "x_max", + "y_max", + "object_id", + "frame_index" + ], + "type": "object", + "properties": { + "y_min": { + "title": "Y Min", + "type": "integer", + "description": "Y Min Coordinate of the box" + }, + "object_id": { + "title": "Object Id", + "type": "integer", + "description": "Optional object identifier. Boxes sharing an object id refine the same object." + }, + "frame_index": { + "title": "Frame Index", + "type": "integer", + "description": "The frame index to interact with." + }, + "x_max": { + "title": "X Max", + "type": "integer", + "description": "X Max Coordinate of the box" + }, + "x_min": { + "title": "X Min", + "type": "integer", + "description": "X Min Coordinate of the box" + }, + "y_max": { + "title": "Y Max", + "type": "integer", + "description": "Y Max Coordinate of the box" + } + }, + "title": "BoxPrompt" + }, + "PointPrompt": { + "x-fal-order-properties": [ + "x", + "y", + "label", + "object_id", + "frame_index" + ], + "type": "object", + "properties": { + "y": { + "title": "Y", + "type": "integer", + "description": "Y Coordinate of the prompt" + }, + "x": { + "title": "X", + "type": "integer", + "description": "X Coordinate of the prompt" + }, + "object_id": { + "title": "Object Id", + "type": "integer", + "description": "Optional object identifier. Prompts sharing an object id refine the same object." + }, + "frame_index": { + "title": "Frame Index", + "type": "integer", + "description": "The frame index to interact with." + }, + "label": { + "enum": [ + 0, + 1 + ], + "title": "Label", + "type": "integer", + "description": "1 for foreground, 0 for background" + } + }, + "title": "PointPrompt" + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sam-3/video-rle/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam-3/video-rle/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sam-3/video-rle": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sam3VideoRleInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam-3/video-rle/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sam3VideoRleOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sam-3/video", + "metadata": { + "display_name": "Sam 3", + "category": "video-to-video", + "description": "SAM 3 is a unified foundation model for promptable segmentation in images and videos. It can detect, segment, and track objects using text or visual prompts such as points, boxes, and masks. ", + "status": "active", + "tags": [ + "segmentation", + "mask", + "real-time" + ], + "updated_at": "2026-01-26T21:42:19.414Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/panda/sg8Fv6dzqdXAAgbAvxTta_814b4d079f5942d1b6cfb0f90ece9f36.jpg", + "model_url": "https://fal.run/fal-ai/sam-3/video", + "license_type": "commercial", + "date": "2025-11-20T20:08:20.592Z", + "group": { + "key": "sam3", + "label": "Video to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sam-3/video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sam-3/video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sam-3/video", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/panda/sg8Fv6dzqdXAAgbAvxTta_814b4d079f5942d1b6cfb0f90ece9f36.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sam-3/video", + "documentationUrl": "https://fal.ai/models/fal-ai/sam-3/video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Sam3VideoInput": { + "x-fal-order-properties": [ + "video_url", + "prompt", + "text_prompt", + "point_prompts", + "box_prompts", + "apply_mask", + "detection_threshold" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "person", + "person, cloth" + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt for segmentation. Use commas to track multiple objects (e.g., 'person, cloth').", + "default": "" + }, + "video_url": { + "title": "Video Url", + "type": "string", + "description": "The URL of the video to be segmented." + }, + "detection_threshold": { + "minimum": 0.1, + "title": "Detection Threshold", + "type": "number", + "description": "Detection confidence threshold (0.0-1.0). Lower = more detections but less precise. ", + "maximum": 1, + "default": 0.5 + }, + "box_prompts": { + "title": "Box Prompts", + "type": "array", + "description": "List of box prompt coordinates (x_min, y_min, x_max, y_max).", + "items": { + "$ref": "#/components/schemas/BoxPromptBase" + }, + "default": [] + }, + "point_prompts": { + "title": "Point Prompts", + "type": "array", + "description": "List of point prompts", + "items": { + "$ref": "#/components/schemas/PointPromptBase" + }, + "default": [] + }, + "apply_mask": { + "title": "Apply Mask", + "type": "boolean", + "description": "Apply the mask on the video.", + "default": true + }, + "text_prompt": { + "title": "Text Prompt", + "type": "string", + "description": "[DEPRECATED] Use 'prompt' instead. Kept for backward compatibility.", + "deprecated": true + } + }, + "title": "SAM3VideoInput", + "required": [ + "video_url" + ] + }, + "Sam3VideoOutput": { + "x-fal-order-properties": [ + "video", + "boundingbox_frames_zip" + ], + "type": "object", + "properties": { + "boundingbox_frames_zip": { + "title": "Boundingbox Frames Zip", + "description": "Zip file containing per-frame bounding box overlays.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "video": { + "examples": [ + "https://fal.media/files/monkey/5BLHmbX3qxu5cD5gQzTqw_output.mp4" + ], + "title": "Video", + "description": "The segmented video.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "SAM3VideoOutput", + "required": [ + "video" + ] + }, + "BoxPromptBase": { + "x-fal-order-properties": [ + "x_min", + "y_min", + "x_max", + "y_max", + "object_id" + ], + "type": "object", + "properties": { + "y_min": { + "title": "Y Min", + "type": "integer", + "description": "Y Min Coordinate of the box" + }, + "object_id": { + "title": "Object Id", + "type": "integer", + "description": "Optional object identifier. Boxes sharing an object id refine the same object." + }, + "x_max": { + "title": "X Max", + "type": "integer", + "description": "X Max Coordinate of the box" + }, + "x_min": { + "title": "X Min", + "type": "integer", + "description": "X Min Coordinate of the box" + }, + "y_max": { + "title": "Y Max", + "type": "integer", + "description": "Y Max Coordinate of the box" + } + }, + "title": "BoxPromptBase" + }, + "PointPromptBase": { + "x-fal-order-properties": [ + "x", + "y", + "label", + "object_id" + ], + "type": "object", + "properties": { + "y": { + "title": "Y", + "type": "integer", + "description": "Y Coordinate of the prompt" + }, + "x": { + "title": "X", + "type": "integer", + "description": "X Coordinate of the prompt" + }, + "object_id": { + "title": "Object Id", + "type": "integer", + "description": "Optional object identifier. Prompts sharing an object id refine the same object." + }, + "label": { + "enum": [ + 0, + 1 + ], + "title": "Label", + "type": "integer", + "description": "1 for foreground, 0 for background" + } + }, + "title": "PointPromptBase" + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sam-3/video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam-3/video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sam-3/video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sam3VideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam-3/video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sam3VideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/editto", + "metadata": { + "display_name": "Editto", + "category": "video-to-video", + "description": "Edit videos using instruction-based prompting using Editto model!", + "status": "active", + "tags": [ + "video-edit", + "wan-vace" + ], + "updated_at": "2026-01-26T21:42:24.083Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/panda/W64UaJMRRXDKIQKOyArBO_27c8587eded040778cd5c777b9f71035.jpg", + "model_url": "https://fal.run/fal-ai/editto", + "license_type": "commercial", + "date": "2025-11-12T11:40:08.233Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/editto", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/editto queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/editto", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/panda/W64UaJMRRXDKIQKOyArBO_27c8587eded040778cd5c777b9f71035.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/editto", + "documentationUrl": "https://fal.ai/models/fal-ai/editto/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "EdittoInput": { + "title": "EdittoInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Make it a Pixel Art video." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/editto/example_in.mp4" + ], + "title": "Video URL", + "type": "string", + "description": "URL to the source video file. Required for inpainting." + }, + "acceleration": { + "anyOf": [ + { + "enum": [ + "none", + "low", + "regular" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Acceleration", + "description": "Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "num_interpolated_frames": { + "description": "Number of frames to interpolate between the original frames. A value of 0 means no interpolation.", + "type": "integer", + "minimum": 0, + "maximum": 5, + "examples": [ + 0 + ], + "title": "Number of Interpolated Frames", + "default": 0 + }, + "temporal_downsample_factor": { + "description": "Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.", + "type": "integer", + "minimum": 0, + "maximum": 5, + "examples": [ + 0 + ], + "title": "Temporal Downsample Factor", + "default": 0 + }, + "shift": { + "minimum": 1, + "maximum": 15, + "type": "number", + "title": "Shift", + "description": "Shift parameter for video generation.", + "default": 5 + }, + "frames_per_second": { + "anyOf": [ + { + "minimum": 5, + "maximum": 30, + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Frames per Second", + "description": "Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true.", + "default": 16 + }, + "match_input_num_frames": { + "examples": [ + true + ], + "title": "Match Input Number of Frames", + "type": "boolean", + "description": "If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.", + "default": false + }, + "guidance_scale": { + "description": "Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.", + "type": "number", + "minimum": 1, + "maximum": 10, + "examples": [ + 5 + ], + "title": "Guidance Scale", + "default": 5 + }, + "num_frames": { + "minimum": 17, + "maximum": 241, + "type": "integer", + "title": "Number of Frames", + "description": "Number of frames to generate. Must be between 81 to 241 (inclusive).", + "default": 81 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "negative_prompt": { + "examples": [ + "色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + }, + "sampler": { + "enum": [ + "unipc", + "dpm++", + "euler" + ], + "title": "Sampler", + "type": "string", + "description": "Sampler to use for video generation.", + "examples": [ + "unipc" + ], + "default": "unipc" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "resolution": { + "enum": [ + "auto", + "240p", + "360p", + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video.", + "default": "auto" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "1:1", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video.", + "default": "auto" + }, + "return_frames_zip": { + "examples": [ + false + ], + "title": "Return Frames Zip", + "type": "boolean", + "description": "If true, also return a ZIP file containing all generated frames.", + "default": false + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "examples": [ + "high" + ], + "default": "high" + }, + "match_input_frames_per_second": { + "examples": [ + true + ], + "title": "Match Input Frames Per Second", + "type": "boolean", + "description": "If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.", + "default": false + }, + "sync_mode": { + "examples": [ + false + ], + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "num_inference_steps": { + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "type": "integer", + "minimum": 2, + "maximum": 50, + "examples": [ + 50 + ], + "title": "Number of Inference Steps", + "default": 30 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "enable_auto_downsample": { + "examples": [ + false + ], + "title": "Enable Auto Downsample", + "type": "boolean", + "description": "If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "match_input_num_frames", + "num_frames", + "match_input_frames_per_second", + "frames_per_second", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "guidance_scale", + "sampler", + "shift", + "video_url", + "enable_safety_checker", + "acceleration", + "video_quality", + "video_write_mode", + "num_interpolated_frames", + "temporal_downsample_factor", + "enable_auto_downsample", + "sync_mode", + "return_frames_zip" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "EdittoOutput": { + "title": "EdittoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Make it a Pixel Art video." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "frames_zip": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "ZIP archive of all video frames if requested." + }, + "seed": { + "examples": [ + 1096772785 + ], + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "height": 480, + "duration": 4.86, + "url": "https://storage.googleapis.com/falserverless/example_outputs/editto/example_out.mp4", + "fps": 15, + "width": 832, + "file_name": "example_out.mp4", + "content_type": "video/mp4", + "num_frames": 73 + } + ], + "description": "The generated image to video file.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed", + "frames_zip" + ], + "required": [ + "video", + "prompt", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Duration", + "description": "The duration of the video" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the video" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the video" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Fps", + "description": "The FPS of the video" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Num Frames", + "description": "The number of frames in the video" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/editto/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/editto/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/editto": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EdittoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/editto/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EdittoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/flashvsr/upscale/video", + "metadata": { + "display_name": "Flashvsr", + "category": "video-to-video", + "description": "Upscale your videos using FlashVSR with the fastest speeds!", + "status": "active", + "tags": [ + "upscale", + "video-to-video" + ], + "updated_at": "2026-01-26T21:42:25.770Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/zebra/bj0FLRnM-ZgEQvTLXgax5_2c719e57677a4a05992eb71173b5d28f.jpg", + "model_url": "https://fal.run/fal-ai/flashvsr/upscale/video", + "license_type": "commercial", + "date": "2025-11-11T01:52:01.139Z", + "group": { + "key": "flashvsr", + "label": "Upscale Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/flashvsr/upscale/video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/flashvsr/upscale/video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/flashvsr/upscale/video", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/zebra/bj0FLRnM-ZgEQvTLXgax5_2c719e57677a4a05992eb71173b5d28f.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/flashvsr/upscale/video", + "documentationUrl": "https://fal.ai/models/fal-ai/flashvsr/upscale/video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FlashvsrUpscaleVideoInput": { + "title": "FlashVSRPlusVideoInput", + "type": "object", + "properties": { + "video_url": { + "examples": [ + "https://public-bucket-20251031-222058.s3.us-west-2.amazonaws.com/example_nighttime.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "The input video to be upscaled" + }, + "acceleration": { + "enum": [ + "regular", + "high", + "full" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration mode for VAE decoding. Options: regular (best quality), high (balanced), full (fastest). More accerleation means longer duration videos can be processed too.", + "default": "regular" + }, + "quality": { + "minimum": 0, + "maximum": 100, + "type": "integer", + "title": "Quality", + "description": "Quality level for tile blending (0-100). Controls overlap between tiles to prevent grid artifacts. Higher values provide better quality with more overlap. Recommended: 70-85 for high-res videos, 50-70 for faster processing.", + "default": 70 + }, + "output_format": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output video.", + "default": "X264 (.mp4)" + }, + "color_fix": { + "title": "Color Fix", + "type": "boolean", + "description": "Color correction enabled.", + "default": true + }, + "output_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Output Write Mode", + "type": "string", + "description": "The write mode of the output video.", + "default": "balanced" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned inline and not stored in history.", + "default": false + }, + "output_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Output Quality", + "type": "string", + "description": "The quality of the output video.", + "default": "high" + }, + "upscale_factor": { + "minimum": 1, + "maximum": 4, + "type": "number", + "title": "Upscale Factor", + "description": "Upscaling factor to be used.", + "default": 2 + }, + "preserve_audio": { + "title": "Preserve Audio", + "type": "boolean", + "description": "Copy the original audio tracks into the upscaled video using FFmpeg when possible.", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The random seed used for the generation process." + } + }, + "x-fal-order-properties": [ + "upscale_factor", + "seed", + "sync_mode", + "video_url", + "acceleration", + "color_fix", + "quality", + "preserve_audio", + "output_format", + "output_quality", + "output_write_mode" + ], + "description": "Input fields common to FlashVSR+ image/video endpoints.", + "required": [ + "video_url" + ] + }, + "FlashvsrUpscaleVideoOutput": { + "title": "FlashVSRPlusVideoOutput", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The random seed used for the generation process." + }, + "video": { + "title": "Video", + "description": "Upscaled video file after processing", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/flashvsr/upscale/video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flashvsr/upscale/video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/flashvsr/upscale/video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FlashvsrUpscaleVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/flashvsr/upscale/video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FlashvsrUpscaleVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/workflow-utilities/auto-subtitle", + "metadata": { + "display_name": "Workflow Utilities", + "category": "video-to-video", + "description": "Add automatic subtitles to videos", + "status": "active", + "tags": [ + "auto-subtitle", + "captioning" + ], + "updated_at": "2026-01-26T21:42:26.769Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/zebra/-R1ZjOAerDNnOUrhWNeUv_22f1d53a808040bd99ec01459e4f9547.jpg", + "model_url": "https://fal.run/fal-ai/workflow-utilities/auto-subtitle", + "license_type": "commercial", + "date": "2025-11-04T14:48:45.210Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/workflow-utilities/auto-subtitle", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/workflow-utilities/auto-subtitle queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/workflow-utilities/auto-subtitle", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/zebra/-R1ZjOAerDNnOUrhWNeUv_22f1d53a808040bd99ec01459e4f9547.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/workflow-utilities/auto-subtitle", + "documentationUrl": "https://fal.ai/models/fal-ai/workflow-utilities/auto-subtitle/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WorkflowUtilitiesAutoSubtitleInput": { + "title": "AutoSubtitleInput", + "type": "object", + "properties": { + "font_weight": { + "enum": [ + "normal", + "bold", + "black" + ], + "title": "Font Weight", + "type": "string", + "description": "Font weight (TikTok style typically uses bold or black)", + "default": "bold" + }, + "video_url": { + "x-fal": { + "timeout": 30, + "max_file_size": 100000000 + }, + "title": "Video Url", + "type": "string", + "examples": [ + "https://v3b.fal.media/files/b/kangaroo/oUCiZjQwEy6bIQdPUSLDF_output.mp4" + ], + "description": "URL of the video file to add automatic subtitles to\n\nMax file size: 95.4MB, Timeout: 30.0s" + }, + "stroke_width": { + "minimum": 0, + "title": "Stroke Width", + "type": "integer", + "maximum": 10, + "description": "Text stroke/outline width in pixels (0 for no stroke)", + "default": 3 + }, + "font_color": { + "enum": [ + "white", + "black", + "red", + "green", + "blue", + "yellow", + "orange", + "purple", + "pink", + "brown", + "gray", + "cyan", + "magenta" + ], + "title": "Font Color", + "type": "string", + "description": "Subtitle text color for non-active words", + "default": "white" + }, + "font_size": { + "minimum": 20, + "title": "Font Size", + "type": "integer", + "maximum": 150, + "description": "Font size for subtitles (TikTok style uses larger text)", + "default": 100 + }, + "language": { + "examples": [ + "en", + "es", + "fr", + "de", + "it", + "eng", + "spa", + "fra" + ], + "title": "Language", + "type": "string", + "description": "Language code for transcription (e.g., 'en', 'es', 'fr', 'de', 'it', 'pt', 'nl', 'ja', 'zh', 'ko') or 3-letter ISO code (e.g., 'eng', 'spa', 'fra')", + "default": "en" + }, + "y_offset": { + "minimum": -200, + "title": "Y Offset", + "type": "integer", + "maximum": 200, + "description": "Vertical offset in pixels (positive = move down, negative = move up)", + "default": 75 + }, + "background_opacity": { + "minimum": 0, + "title": "Background Opacity", + "type": "number", + "maximum": 1, + "description": "Background opacity (0.0 = fully transparent, 1.0 = fully opaque)", + "default": 0 + }, + "stroke_color": { + "enum": [ + "black", + "white", + "red", + "green", + "blue", + "yellow", + "orange", + "purple", + "pink", + "brown", + "gray", + "cyan", + "magenta" + ], + "title": "Stroke Color", + "type": "string", + "description": "Text stroke/outline color", + "default": "black" + }, + "highlight_color": { + "enum": [ + "white", + "black", + "red", + "green", + "blue", + "yellow", + "orange", + "purple", + "pink", + "brown", + "gray", + "cyan", + "magenta" + ], + "title": "Highlight Color", + "type": "string", + "description": "Color for the currently speaking word (karaoke-style highlight)", + "default": "purple" + }, + "enable_animation": { + "title": "Enable Animation", + "type": "boolean", + "description": "Enable animation effects for subtitles (bounce style entrance)", + "default": true + }, + "font_name": { + "examples": [ + "Montserrat", + "Poppins", + "Bebas Neue", + "Oswald", + "Inter", + "Roboto", + "BBH Sans Hegarty" + ], + "title": "Font Name", + "type": "string", + "description": "Any Google Font name from fonts.google.com (e.g., 'Montserrat', 'Poppins', 'BBH Sans Hegarty')", + "default": "Montserrat" + }, + "position": { + "enum": [ + "top", + "center", + "bottom" + ], + "title": "Position", + "type": "string", + "description": "Vertical position of subtitles", + "default": "bottom" + }, + "words_per_subtitle": { + "description": "Maximum number of words per subtitle segment. Use 1 for single-word display, 2-3 for short phrases, or 8-12 for full sentences.", + "type": "integer", + "minimum": 1, + "title": "Words Per Subtitle", + "examples": [ + 1, + 3, + 6, + 12 + ], + "maximum": 12, + "default": 3 + }, + "background_color": { + "enum": [ + "black", + "white", + "red", + "green", + "blue", + "yellow", + "orange", + "purple", + "pink", + "brown", + "gray", + "cyan", + "magenta", + "none", + "transparent" + ], + "title": "Background Color", + "type": "string", + "description": "Background color behind text ('none' or 'transparent' for no background)", + "default": "none" + } + }, + "description": "Input model for automatic subtitle generation and styling", + "x-fal-order-properties": [ + "video_url", + "language", + "font_name", + "font_size", + "font_weight", + "font_color", + "highlight_color", + "stroke_width", + "stroke_color", + "background_color", + "background_opacity", + "position", + "y_offset", + "words_per_subtitle", + "enable_animation" + ], + "required": [ + "video_url" + ] + }, + "WorkflowUtilitiesAutoSubtitleOutput": { + "title": "AutoSubtitleOutput", + "type": "object", + "properties": { + "transcription": { + "title": "Transcription", + "type": "string", + "description": "Full transcription text" + }, + "subtitle_count": { + "title": "Subtitle Count", + "type": "integer", + "description": "Number of subtitle segments generated" + }, + "transcription_metadata": { + "title": "Transcription Metadata", + "type": "object", + "description": "Additional transcription metadata from ElevenLabs (language, segments, etc.)" + }, + "words": { + "title": "Words", + "type": "array", + "description": "Word-level timing information from transcription service", + "items": { + "type": "object" + } + }, + "video": { + "examples": [ + { + "file_size": 16789234, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/monkey/HPBSoe-QsAxSIkDh7Zn76_output.mp4" + } + ], + "title": "Video", + "description": "The video with automatic subtitles", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "description": "Output model for video with automatic subtitles", + "x-fal-order-properties": [ + "video", + "transcription", + "subtitle_count", + "words", + "transcription_metadata" + ], + "required": [ + "video", + "transcription", + "subtitle_count" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/workflow-utilities/auto-subtitle/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/workflow-utilities/auto-subtitle/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/workflow-utilities/auto-subtitle": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WorkflowUtilitiesAutoSubtitleInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/workflow-utilities/auto-subtitle/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WorkflowUtilitiesAutoSubtitleOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/bytedance-upscaler/upscale/video", + "metadata": { + "display_name": "Bytedance Upscaler", + "category": "video-to-video", + "description": "Upscale videos with Bytedance's video upscaler.", + "status": "active", + "tags": [ + "upscaler", + "video", + "bytedance" + ], + "updated_at": "2026-01-26T21:42:28.182Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/kangaroo/LorVpq4fAORdnn1YHRM-A_d801feeaa1764cefb7de0c3381ecaf84.jpg", + "model_url": "https://fal.run/fal-ai/bytedance-upscaler/upscale/video", + "license_type": "commercial", + "date": "2025-10-31T16:15:01.762Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/bytedance-upscaler/upscale/video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/bytedance-upscaler/upscale/video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/bytedance-upscaler/upscale/video", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/kangaroo/LorVpq4fAORdnn1YHRM-A_d801feeaa1764cefb7de0c3381ecaf84.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/bytedance-upscaler/upscale/video", + "documentationUrl": "https://fal.ai/models/fal-ai/bytedance-upscaler/upscale/video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BytedanceUpscalerUpscaleVideoInput": { + "title": "UpscaleInput", + "type": "object", + "properties": { + "target_fps": { + "enum": [ + "30fps", + "60fps" + ], + "title": "Target Fps", + "type": "string", + "description": "The target FPS of the video to upscale.", + "default": "30fps" + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/bytedance_video_upscaler_input.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "The URL of the video to upscale." + }, + "target_resolution": { + "enum": [ + "1080p", + "2k", + "4k" + ], + "title": "Target Resolution", + "type": "string", + "description": "The target resolution of the video to upscale.", + "default": "1080p" + } + }, + "x-fal-order-properties": [ + "video_url", + "target_resolution", + "target_fps" + ], + "required": [ + "video_url" + ] + }, + "BytedanceUpscalerUpscaleVideoOutput": { + "title": "UpscaleOutput", + "type": "object", + "properties": { + "duration": { + "title": "Duration", + "type": "number", + "description": "Duration of audio input/video output as used for billing." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/bytedance_video_upscaler_output.mp4" + } + ], + "title": "Video", + "description": "Generated video file", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "duration" + ], + "required": [ + "video", + "duration" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/bytedance-upscaler/upscale/video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance-upscaler/upscale/video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/bytedance-upscaler/upscale/video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceUpscalerUpscaleVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/bytedance-upscaler/upscale/video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BytedanceUpscalerUpscaleVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/video-as-prompt", + "metadata": { + "display_name": "Video As Prompt", + "category": "video-to-video", + "description": "A model for unified semantic control in video generation. It animates a static reference image using the motion and semantics from a reference video as a prompt.", + "status": "active", + "tags": [ + "video-as-prompt", + "semantic control", + "" + ], + "updated_at": "2026-01-26T21:42:30.688Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/lion/E7C3iMDaxEJjuRqOAuKva_6067278d4ea244d6b1932dffb51b3913.jpg", + "model_url": "https://fal.run/fal-ai/video-as-prompt", + "license_type": "commercial", + "date": "2025-10-29T14:24:16.699Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/video-as-prompt", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/video-as-prompt queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/video-as-prompt", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/lion/E7C3iMDaxEJjuRqOAuKva_6067278d4ea244d6b1932dffb51b3913.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/video-as-prompt", + "documentationUrl": "https://fal.ai/models/fal-ai/video-as-prompt/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "VideoAsPromptInput": { + "title": "VideoEffectInputWan", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A chestnut-colored horse stands on a grassy hill against a backdrop of distant, snow-dusted mountains. The horse begins to inflate, its defined, muscular body swelling and rounding into a smooth, balloon-like form while retaining its rich, brown hide color. Without changing its orientation, the now-buoyant horse lifts silently from the ground. It begins a steady vertical ascent, rising straight up and eventually floating out of the top of the frame. The camera remains completely static throughout the entire sequence, holding a fixed shot on the landscape as the horse transforms and departs, ensuring the verdant hill and mountain range in the background stay perfectly still." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate an image from." + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video.", + "default": "9:16" + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video.", + "default": "480p" + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/bytedance-vid2pro/object-725.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "reference video to generate effect video from." + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/bytedance-vid2pro/animal-2.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "Input image to generate the effect video for." + }, + "fps": { + "minimum": 1, + "title": "Frames Per Second", + "type": "integer", + "maximum": 60, + "description": "Frames per second for the output video. Only applicable if output_type is 'video'.", + "default": 16 + }, + "video_description": { + "examples": [ + "A hand holds up a single beige sneaker decorated with gold calligraphy and floral illustrations, with small green plants tucked inside. The sneaker immediately begins to inflate like a balloon, its shape distorting as the decorative details stretch and warp across the expanding surface. It rapidly transforms into a perfectly smooth, matte beige sphere, inheriting the primary color from the original shoe. Once the transformation is complete, the new balloon-like object quickly ascends, moving straight up and exiting the top of the frame. The camera remains completely static and the plain white background is unchanged throughout the entire sequence." + ], + "title": "Video Description", + "type": "string", + "description": "A brief description of the input video content." + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducible generation. If set none, a random seed will be used." + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "maximum": 20, + "description": "Guidance scale for generation.", + "default": 5 + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_frames": { + "minimum": 1, + "title": "Num Frames", + "type": "integer", + "maximum": 100, + "description": "The number of frames to generate.", + "default": 49 + } + }, + "x-fal-order-properties": [ + "prompt", + "video_url", + "image_url", + "seed", + "num_frames", + "fps", + "video_description", + "enable_safety_checker", + "aspect_ratio", + "resolution", + "guidance_scale" + ], + "required": [ + "prompt", + "video_url", + "image_url", + "video_description" + ] + }, + "VideoAsPromptOutput": { + "title": "VideoEffectOutput", + "type": "object", + "properties": { + "video": { + "description": "The URLs of the generated video.", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/video-as-prompt/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/video-as-prompt/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/video-as-prompt": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoAsPromptInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/video-as-prompt/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoAsPromptOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/birefnet/v2/video", + "metadata": { + "display_name": "Birefnet", + "category": "video-to-video", + "description": "Video background removal version of bilateral reference framework (BiRefNet) for high-resolution dichotomous image segmentation (DIS)\n", + "status": "active", + "tags": [ + "utility", + "editing" + ], + "updated_at": "2026-01-26T21:42:32.399Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/monkey/SKrLsOM3RMOk-QuNj8-io_6356be1b301e454394d2e0a853b98c76.jpg", + "model_url": "https://fal.run/fal-ai/birefnet/v2/video", + "license_type": "commercial", + "date": "2025-10-26T18:13:23.036Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/birefnet/v2/video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/birefnet/v2/video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/birefnet/v2/video", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/monkey/SKrLsOM3RMOk-QuNj8-io_6356be1b301e454394d2e0a853b98c76.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/birefnet/v2/video", + "documentationUrl": "https://fal.ai/models/fal-ai/birefnet/v2/video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BirefnetV2VideoInput": { + "title": "VideoInputV2", + "type": "object", + "properties": { + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "video_output_type": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Video Output Type", + "type": "string", + "description": "The output type of the generated video.", + "default": "X264 (.mp4)" + }, + "operating_resolution": { + "enum": [ + "1024x1024", + "2048x2048", + "2304x2304" + ], + "title": "Operating Resolution", + "type": "string", + "description": "The resolution to operate on. The higher the resolution, the more accurate the output will be for high res input images. The '2304x2304' option is only available for the 'General Use (Dynamic)' model.", + "default": "1024x1024" + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/birefnet-video-input.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the video to remove background from" + }, + "model": { + "enum": [ + "General Use (Light)", + "General Use (Light 2K)", + "General Use (Heavy)", + "Matting", + "Portrait", + "General Use (Dynamic)" + ], + "title": "Model", + "type": "string", + "description": "\n Model to use for background removal.\n The 'General Use (Light)' model is the original model used in the BiRefNet repository.\n The 'General Use (Light 2K)' model is the original model used in the BiRefNet repository but trained with 2K images.\n The 'General Use (Heavy)' model is a slower but more accurate model.\n The 'Matting' model is a model trained specifically for matting images.\n The 'Portrait' model is a model trained specifically for portrait images.\n The 'General Use (Dynamic)' model supports dynamic resolutions from 256x256 to 2304x2304.\n The 'General Use (Light)' model is recommended for most use cases.\n\n The corresponding models are as follows:\n - 'General Use (Light)': BiRefNet\n - 'General Use (Light 2K)': BiRefNet_lite-2K\n - 'General Use (Heavy)': BiRefNet_lite\n - 'Matting': BiRefNet-matting\n - 'Portrait': BiRefNet-portrait\n - 'General Use (Dynamic)': BiRefNet_dynamic\n ", + "default": "General Use (Light)" + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "default": "high" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "output_mask": { + "title": "Output Mask", + "type": "boolean", + "description": "Whether to output the mask used to remove the background", + "default": false + }, + "refine_foreground": { + "title": "Refine Foreground", + "type": "boolean", + "description": "Whether to refine the foreground using the estimated mask", + "default": true + } + }, + "x-fal-order-properties": [ + "model", + "operating_resolution", + "output_mask", + "refine_foreground", + "sync_mode", + "video_url", + "video_output_type", + "video_quality", + "video_write_mode" + ], + "required": [ + "video_url" + ] + }, + "BirefnetV2VideoOutput": { + "title": "VideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "height": 1080, + "duration": 8, + "url": "https://storage.googleapis.com/falserverless/example_outputs/birefnet-video-output.webm", + "fps": 24, + "width": 1920, + "file_name": "birefnet-video-output.webm", + "content_type": "video/webm", + "num_frames": 192 + } + ], + "title": "Video", + "description": "Video with background removed", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + }, + "mask_video": { + "title": "Mask Video", + "description": "Mask used to remove the background", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "mask_video" + ], + "required": [ + "video" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/birefnet/v2/video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/birefnet/v2/video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/birefnet/v2/video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BirefnetV2VideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/birefnet/v2/video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BirefnetV2VideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/vidu/q2/video-extension/pro", + "metadata": { + "display_name": "Vidu", + "category": "video-to-video", + "description": "Use the latest Vidu Q2 models which much more better quality and control on your videos.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:32.915Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/w0OmEqIT3ttJ87CyIv4YE_9fef0096f9f44fa7af8012a25eae3fa7.jpg", + "model_url": "https://fal.run/fal-ai/vidu/q2/video-extension/pro", + "license_type": "commercial", + "date": "2025-10-24T10:03:48.146Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/vidu/q2/video-extension/pro", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/vidu/q2/video-extension/pro queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/vidu/q2/video-extension/pro", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/w0OmEqIT3ttJ87CyIv4YE_9fef0096f9f44fa7af8012a25eae3fa7.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/vidu/q2/video-extension/pro", + "documentationUrl": "https://fal.ai/models/fal-ai/vidu/q2/video-extension/pro/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ViduQ2VideoExtensionProInput": { + "title": "Q2VideoExtensionRequest", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "maxLength": 3000, + "description": "text prompt to guide the video extension" + }, + "duration": { + "enum": [ + 2, + 3, + 4, + 5, + 6, + 7 + ], + "title": "Duration", + "type": "integer", + "description": "Duration of the extension in seconds", + "default": 4 + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/video_models/output-3.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the video to extend" + }, + "resolution": { + "enum": [ + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "Output video resolution", + "default": "720p" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + } + }, + "x-fal-order-properties": [ + "video_url", + "prompt", + "seed", + "duration", + "resolution" + ], + "required": [ + "video_url" + ] + }, + "ViduQ2VideoExtensionProOutput": { + "title": "Q2VideoExtensionOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3b.fal.media/files/b/zebra/wSaP_lTetcD47ErjwsVGE_output.mp4" + } + ], + "title": "Video", + "description": "The extended video using the Q2 model", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/vidu/q2/video-extension/pro/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/q2/video-extension/pro/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/vidu/q2/video-extension/pro": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduQ2VideoExtensionProInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/vidu/q2/video-extension/pro/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ViduQ2VideoExtensionProOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "mirelo-ai/sfx-v1.5/video-to-video", + "metadata": { + "display_name": "Mirelo SFX V1.5", + "category": "video-to-video", + "description": "Generate synced sounds for any video, and return it with its new sound track (like MMAudio)", + "status": "active", + "tags": [ + "video-to-video", + "sfx" + ], + "updated_at": "2026-01-26T21:42:39.808Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/ySuE0GYF98p7-856X4mvc_9371de55f14448e08b76871b7d77c059.jpg", + "model_url": "https://fal.run/mirelo-ai/sfx-v1.5/video-to-video", + "license_type": "commercial", + "date": "2025-10-15T11:11:27.394Z", + "group": { + "key": "mirelo-sfx-v1.5", + "label": "Video To Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for mirelo-ai/sfx-v1.5/video-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the mirelo-ai/sfx-v1.5/video-to-video queue.", + "x-fal-metadata": { + "endpointId": "mirelo-ai/sfx-v1.5/video-to-video", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/ySuE0GYF98p7-856X4mvc_9371de55f14448e08b76871b7d77c059.jpg", + "playgroundUrl": "https://fal.ai/models/mirelo-ai/sfx-v1.5/video-to-video", + "documentationUrl": "https://fal.ai/models/mirelo-ai/sfx-v1.5/video-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SfxV15VideoToVideoInput": { + "title": "Input", + "type": "object", + "properties": { + "num_samples": { + "anyOf": [ + { + "minimum": 2, + "maximum": 8, + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Num Samples", + "description": "The number of samples to generate from the model", + "default": 2 + }, + "duration": { + "anyOf": [ + { + "minimum": 1, + "maximum": 10, + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The duration of the generated audio in seconds", + "title": "Duration", + "default": 10 + }, + "start_offset": { + "anyOf": [ + { + "minimum": 0, + "type": "number" + }, + { + "type": "null" + } + ], + "description": "The start offset in seconds to start the audio generation from", + "title": "Start Offset", + "default": 0 + }, + "video_url": { + "format": "uri", + "description": "A video url that can accessed from the API to process and add sound effects", + "type": "string", + "examples": [ + "https://di3otfzjg1gxa.cloudfront.net/battlefield_scene_silent.mp4" + ], + "title": "Video Url", + "minLength": 1, + "maxLength": 2083 + }, + "seed": { + "anyOf": [ + { + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed to use for the generation. If not provided, a random seed will be used", + "title": "Seed", + "default": 8069 + }, + "text_prompt": { + "examples": [ + "" + ], + "title": "Text Prompt", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Additional description to guide the model" + } + }, + "x-fal-order-properties": [ + "video_url", + "text_prompt", + "num_samples", + "seed", + "duration", + "start_offset" + ], + "required": [ + "video_url" + ] + }, + "SfxV15VideoToVideoOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + [ + { + "file_name": "generated_output_1.mp4", + "content_type": "video/mp4", + "url": "https://di3otfzjg1gxa.cloudfront.net/battlefield_scene_output_1.mp4" + }, + { + "file_name": "generated_output_2.mp4", + "content_type": "video/mp4", + "url": "https://di3otfzjg1gxa.cloudfront.net/battlefield_scene_output_2.mp4" + } + ] + ], + "description": "The processed video with sound effects", + "type": "array", + "title": "Video", + "items": { + "$ref": "#/components/schemas/Video-Output" + } + } + }, + "title": "VideoOutput", + "required": [ + "video" + ] + }, + "Video-Output": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ] + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "Video", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/mirelo-ai/sfx-v1.5/video-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/mirelo-ai/sfx-v1.5/video-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/mirelo-ai/sfx-v1.5/video-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SfxV15VideoToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/mirelo-ai/sfx-v1.5/video-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SfxV15VideoToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/krea-wan-14b/video-to-video", + "metadata": { + "display_name": "Krea Wan 14B", + "category": "video-to-video", + "description": "Superfast video model based on Wan 2.1 14b by Krea, excelling at real-time video-editing.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:39.932Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/panda/3KnxV95bno9CU_SVwV0t8_aff74947261f49b4bf8a51a050881c86.jpg", + "model_url": "https://fal.run/fal-ai/krea-wan-14b/video-to-video", + "license_type": "commercial", + "date": "2025-10-14T16:06:51.280Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/krea-wan-14b/video-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/krea-wan-14b/video-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/krea-wan-14b/video-to-video", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/panda/3KnxV95bno9CU_SVwV0t8_aff74947261f49b4bf8a51a050881c86.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/krea-wan-14b/video-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/krea-wan-14b/video-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "KreaWan14bVideoToVideoInput": { + "title": "VideoToVideoInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A powerful, matte black jeep, its robust frame contrasting with the lush green surroundings, navigates a winding jungle road, kicking up small clouds of dust and loose earth from its tires." + ], + "title": "Prompt", + "type": "string", + "description": "Prompt for the video-to-video generation." + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/krea_wan_14b_v2v_input.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the input video. Currently, only outputs of 16:9 aspect ratio and 480p resolution are supported. Video duration should be less than 1000 frames at 16fps, and output frames will be 6 plus a multiple of 12, for example 18, 30, 42, etc." + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "Denoising strength for the video-to-video generation. 0.0 preserves the original, 1.0 completely remakes the video.", + "default": 0.85 + }, + "enable_prompt_expansion": { + "examples": [ + true + ], + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.", + "default": false + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Seed for the video-to-video generation." + } + }, + "x-fal-order-properties": [ + "prompt", + "strength", + "enable_prompt_expansion", + "video_url", + "seed" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "KreaWan14bVideoToVideoOutput": { + "title": "VideoToVideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/krea_wan_14b_v2v_output.mp4" + } + ], + "description": "The generated video file.", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/krea-wan-14b/video-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/krea-wan-14b/video-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/krea-wan-14b/video-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KreaWan14bVideoToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/krea-wan-14b/video-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KreaWan14bVideoToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sora-2/video-to-video/remix", + "metadata": { + "display_name": "Sora 2", + "category": "video-to-video", + "description": "Video-to-video remix endpoint for Sora 2, OpenAI’s advanced model that transforms existing videos based on new text or image prompts allowing rich edits, style changes, and creative reinterpretations while preserving motion and structure", + "status": "active", + "tags": [ + "video to video", + "audio", + "sora", + "" + ], + "updated_at": "2026-01-26T21:42:42.579Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/tiger/Wz6FyBTYFs2Lf8e9AoXED_966c4e27910e4979b2628d5666dd2ea0.jpg", + "model_url": "https://fal.run/fal-ai/sora-2/video-to-video/remix", + "license_type": "commercial", + "date": "2025-10-08T23:13:08.227Z", + "group": { + "key": "sora-2", + "label": "Video to Video (Remix)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sora-2/video-to-video/remix", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sora-2/video-to-video/remix queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sora-2/video-to-video/remix", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/tiger/Wz6FyBTYFs2Lf8e9AoXED_966c4e27910e4979b2628d5666dd2ea0.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sora-2/video-to-video/remix", + "documentationUrl": "https://fal.ai/models/fal-ai/sora-2/video-to-video/remix/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Sora2VideoToVideoRemixInput": { + "x-fal-order-properties": [ + "video_id", + "prompt", + "delete_video" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Change the cat's fur color to purple." + ], + "maxLength": 5000, + "type": "string", + "minLength": 1, + "title": "Prompt", + "description": "Updated text prompt that directs the remix generation" + }, + "video_id": { + "examples": [ + "video_123" + ], + "title": "Video ID", + "type": "string", + "description": "The video_id from a previous Sora 2 generation. Note: You can only remix videos that were generated by Sora (via text-to-video or image-to-video endpoints), not arbitrary uploaded videos." + }, + "delete_video": { + "description": "Whether to delete the video after generation for privacy reasons. If True, the video cannot be used for remixing and will be permanently deleted.", + "type": "boolean", + "title": "Delete Video", + "default": true + } + }, + "title": "RemixInput", + "required": [ + "video_id", + "prompt" + ] + }, + "Sora2VideoToVideoRemixOutput": { + "x-fal-order-properties": [ + "video", + "video_id", + "thumbnail", + "spritesheet" + ], + "type": "object", + "properties": { + "spritesheet": { + "title": "Spritesheet", + "description": "Spritesheet image for the video", + "allOf": [ + { + "$ref": "#/components/schemas/ImageFile" + } + ] + }, + "thumbnail": { + "title": "Thumbnail", + "description": "Thumbnail image for the video", + "allOf": [ + { + "$ref": "#/components/schemas/ImageFile" + } + ] + }, + "video_id": { + "examples": [ + "video_123" + ], + "title": "Video ID", + "type": "string", + "description": "The ID of the generated video" + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/rabbit/nk1MK6LY90QqScvI4_Yn8.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "title": "RemixOutput", + "required": [ + "video", + "video_id" + ] + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + }, + "VideoFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "VideoFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sora-2/video-to-video/remix/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sora-2/video-to-video/remix/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sora-2/video-to-video/remix": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sora2VideoToVideoRemixInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sora-2/video-to-video/remix/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sora2VideoToVideoRemixOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-vace-apps/long-reframe", + "metadata": { + "display_name": "Wan 2.1 VACE Long Reframe", + "category": "video-to-video", + "description": "Reframe entire videos scene-by-scene using Wan VACE 2.1", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:44.359Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/monkey/djppEVsT608o2Uk-qVVnF_db844977aa314ad4aed7f765afde7a2a.jpg", + "model_url": "https://fal.run/fal-ai/wan-vace-apps/long-reframe", + "license_type": "commercial", + "date": "2025-10-07T23:30:40.266Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-vace-apps/long-reframe", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-vace-apps/long-reframe queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-vace-apps/long-reframe", + "category": "video-to-video", + "thumbnailUrl": "https://v3b.fal.media/files/b/monkey/djppEVsT608o2Uk-qVVnF_db844977aa314ad4aed7f765afde7a2a.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-vace-apps/long-reframe", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-vace-apps/long-reframe/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanVaceAppsLongReframeInput": { + "title": "LongWanVACEReframeRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "" + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation. Optional for reframing.", + "default": "" + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/wan/t2v.mp4" + ], + "title": "Video URL", + "type": "string", + "description": "URL to the source video file. This video will be used as a reference for the reframe task." + }, + "acceleration": { + "enum": [ + "none", + "low", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "paste_back": { + "title": "Paste Back", + "type": "boolean", + "description": "Whether to paste back the reframed scene to the original video.", + "default": true + }, + "zoom_factor": { + "description": "Zoom factor for the video. When this value is greater than 0, the video will be zoomed in by this factor (in relation to the canvas size,) cutting off the edges of the video. A value of 0 means no zoom.", + "type": "number", + "minimum": 0, + "maximum": 0.9, + "examples": [ + 0 + ], + "title": "Zoom Factor", + "default": 0 + }, + "shift": { + "minimum": 1, + "maximum": 15, + "type": "number", + "title": "Shift", + "description": "Shift parameter for video generation.", + "default": 5 + }, + "scene_threshold": { + "minimum": 0, + "maximum": 100, + "type": "number", + "title": "Scene Threshold", + "description": "Threshold for scene detection sensitivity (0-100). Lower values detect more scenes.", + "default": 30 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "guidance_scale": { + "description": "Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.", + "type": "number", + "minimum": 1, + "maximum": 10, + "examples": [ + 5 + ], + "title": "Guidance Scale", + "default": 5 + }, + "auto_downsample_min_fps": { + "description": "Minimum FPS for auto downsample.", + "type": "number", + "minimum": 1, + "maximum": 60, + "examples": [ + 6 + ], + "title": "Auto Downsample Min Fps", + "default": 6 + }, + "negative_prompt": { + "examples": [ + "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + }, + "sampler": { + "enum": [ + "unipc", + "dpm++", + "euler" + ], + "title": "Sampler", + "type": "string", + "description": "Sampler to use for video generation.", + "examples": [ + "unipc" + ], + "default": "unipc" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "resolution": { + "enum": [ + "auto", + "240p", + "360p", + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video.", + "default": "auto" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "1:1", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video.", + "default": "auto" + }, + "return_frames_zip": { + "examples": [ + false + ], + "title": "Return Frames Zip", + "type": "boolean", + "description": "If true, also return a ZIP file containing all generated frames.", + "default": false + }, + "trim_borders": { + "examples": [ + true + ], + "title": "Trim Borders", + "type": "boolean", + "description": "Whether to trim borders from the video.", + "default": true + }, + "transparency_mode": { + "enum": [ + "content_aware", + "white", + "black" + ], + "title": "Transparency Mode", + "type": "string", + "description": "The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.", + "examples": [ + "content_aware" + ], + "default": "content_aware" + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "examples": [ + "high" + ], + "default": "high" + }, + "sync_mode": { + "examples": [ + false + ], + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "interpolator_model": { + "enum": [ + "rife", + "film" + ], + "title": "Interpolator Model", + "type": "string", + "description": "The model to use for frame interpolation. Options are 'rife' or 'film'.", + "examples": [ + "film" + ], + "default": "film" + }, + "enable_auto_downsample": { + "examples": [ + true + ], + "title": "Enable Auto Downsample", + "type": "boolean", + "description": "Whether to enable auto downsample.", + "default": true + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Number of Inference Steps", + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 30 + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "guidance_scale", + "sampler", + "shift", + "video_url", + "enable_safety_checker", + "acceleration", + "video_quality", + "video_write_mode", + "enable_auto_downsample", + "auto_downsample_min_fps", + "interpolator_model", + "sync_mode", + "transparency_mode", + "return_frames_zip", + "zoom_factor", + "trim_borders", + "scene_threshold", + "paste_back" + ], + "required": [ + "video_url" + ] + }, + "WanVaceAppsLongReframeOutput": { + "title": "LongWanVACEReframeResponse", + "type": "object", + "properties": { + "video": { + "title": "Video", + "description": "The output video file.", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-vace-apps/long-reframe/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-apps/long-reframe/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-apps/long-reframe": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanVaceAppsLongReframeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-apps/long-reframe/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanVaceAppsLongReframeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/infinitalk/video-to-video", + "metadata": { + "display_name": "Infinitalk", + "category": "video-to-video", + "description": "Infinitalk model generates a talking avatar video from an image and audio file. The avatar lip-syncs to the provided audio with natural facial expressions.", + "status": "active", + "tags": [ + "video-to-video" + ], + "updated_at": "2026-01-26T21:42:49.806Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/lion/CKSKBDtMz0kao3Ege3oAj_0b61adf1742146f2b743efc7921003f3.jpg", + "model_url": "https://fal.run/fal-ai/infinitalk/video-to-video", + "license_type": "commercial", + "date": "2025-09-22T20:17:35.243Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/infinitalk/video-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/infinitalk/video-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/infinitalk/video-to-video", + "category": "video-to-video", + "thumbnailUrl": "https://v3.fal.media/files/lion/CKSKBDtMz0kao3Ege3oAj_0b61adf1742146f2b743efc7921003f3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/infinitalk/video-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/infinitalk/video-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "InfinitalkVideoToVideoInput": { + "title": "InfiniTalkVid2VidAudioRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman with colorful hair talking on a podcast." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the video to generate. Must be either 480p or 720p.", + "default": "480p" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use for generation.", + "default": "regular" + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/video_models/ref_video.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the input video." + }, + "audio_url": { + "examples": [ + "https://v3.fal.media/files/penguin/PtiCYda53E9Dav25QmQYI_output.mp3" + ], + "title": "Audio URL", + "type": "string", + "description": "The URL of the audio file." + }, + "num_frames": { + "minimum": 41, + "maximum": 241, + "type": "integer", + "title": "Number of Frames", + "description": "Number of frames to generate. Must be between 81 to 129 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units.", + "default": 145 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "default": 42 + } + }, + "x-fal-order-properties": [ + "video_url", + "audio_url", + "prompt", + "num_frames", + "resolution", + "seed", + "acceleration" + ], + "required": [ + "video_url", + "audio_url", + "prompt" + ] + }, + "InfinitalkVideoToVideoOutput": { + "title": "InfinitalkVid2VidResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/model_tests/video_models/mk7Ar5IvTtyNjWLRMb-re_dbe605004b664258b38528615afd7e0f.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/infinitalk/video-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/infinitalk/video-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/infinitalk/video-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InfinitalkVideoToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/infinitalk/video-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InfinitalkVideoToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/seedvr/upscale/video", + "metadata": { + "display_name": "SeedVR2", + "category": "video-to-video", + "description": "Upscale your videos using SeedVR2 with temporal consistency!", + "status": "active", + "tags": [ + "upscale", + "video-to-video" + ], + "updated_at": "2026-01-26T21:42:49.960Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/koala/cAs3xUC9Lx-jlQh-27-60_4dddbb5a9bac42da86ddf20a5b9d30b3.jpg", + "model_url": "https://fal.run/fal-ai/seedvr/upscale/video", + "license_type": "commercial", + "date": "2025-09-22T18:29:49.293Z", + "group": { + "key": "seedvr2", + "label": "Upscale Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/seedvr/upscale/video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/seedvr/upscale/video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/seedvr/upscale/video", + "category": "video-to-video", + "thumbnailUrl": "https://v3.fal.media/files/koala/cAs3xUC9Lx-jlQh-27-60_4dddbb5a9bac42da86ddf20a5b9d30b3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/seedvr/upscale/video", + "documentationUrl": "https://fal.ai/models/fal-ai/seedvr/upscale/video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SeedvrUpscaleVideoInput": { + "title": "SeedVRVideoInput", + "type": "object", + "properties": { + "upscale_mode": { + "enum": [ + "target", + "factor" + ], + "title": "Upscale Mode", + "type": "string", + "description": "The mode to use for the upscale. If 'target', the upscale factor will be calculated based on the target resolution. If 'factor', the upscale factor will be used directly.", + "default": "factor" + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/seedvr-input.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "The input video to be processed" + }, + "noise_scale": { + "description": "The noise scale to use for the generation process.", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Noise Scale", + "multipleOf": 0.001, + "default": 0.1 + }, + "output_format": { + "enum": [ + "X264 (.mp4)", + "VP9 (.webm)", + "PRORES4444 (.mov)", + "GIF (.gif)" + ], + "title": "Output Format", + "type": "string", + "description": "The format of the output video.", + "default": "X264 (.mp4)" + }, + "output_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Output Write Mode", + "type": "string", + "description": "The write mode of the output video.", + "default": "balanced" + }, + "target_resolution": { + "enum": [ + "720p", + "1080p", + "1440p", + "2160p" + ], + "title": "Target Resolution", + "type": "string", + "description": "The target resolution to upscale to when `upscale_mode` is `target`.", + "default": "1080p" + }, + "output_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Output Quality", + "type": "string", + "description": "The quality of the output video.", + "default": "high" + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "upscale_factor": { + "minimum": 1, + "maximum": 10, + "type": "number", + "title": "Upscale Factor", + "description": "Upscaling factor to be used. Will multiply the dimensions with this factor when `upscale_mode` is `factor`.", + "default": 2 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "The random seed used for the generation process." + } + }, + "x-fal-order-properties": [ + "video_url", + "upscale_mode", + "upscale_factor", + "target_resolution", + "seed", + "noise_scale", + "output_format", + "output_quality", + "output_write_mode", + "sync_mode" + ], + "required": [ + "video_url" + ] + }, + "SeedvrUpscaleVideoOutput": { + "title": "SeedVRVideoOutput", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The random seed used for the generation process." + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/example_outputs/seedvr-output.mp4" + } + ], + "description": "Upscaled video file after processing", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/seedvr/upscale/video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/seedvr/upscale/video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/seedvr/upscale/video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SeedvrUpscaleVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/seedvr/upscale/video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SeedvrUpscaleVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-vace-apps/video-edit", + "metadata": { + "display_name": "Wan VACE Video Edit", + "category": "video-to-video", + "description": "Edit videos using plain language and Wan VACE", + "status": "active", + "tags": [ + "video-edit", + "wan-vace" + ], + "updated_at": "2026-01-26T21:42:50.216Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/elephant/STesgR1tPPvf0dornjZou_377f4dd48d49425198c3396e86328a8b.jpg", + "model_url": "https://fal.run/fal-ai/wan-vace-apps/video-edit", + "license_type": "commercial", + "date": "2025-09-22T03:14:28.550Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-vace-apps/video-edit", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-vace-apps/video-edit queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-vace-apps/video-edit", + "category": "video-to-video", + "thumbnailUrl": "https://v3.fal.media/files/elephant/STesgR1tPPvf0dornjZou_377f4dd48d49425198c3396e86328a8b.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-vace-apps/video-edit", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-vace-apps/video-edit/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanVaceAppsVideoEditInput": { + "title": "WanVACEVideoEditRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "replace him with a large anthropomorphic polar bear" + ], + "title": "Prompt", + "type": "string", + "description": "Prompt to edit the video." + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/vace-video-edit-input.mp4" + ], + "title": "Video URL", + "type": "string", + "description": "URL of the input video." + }, + "acceleration": { + "enum": [ + "none", + "low", + "regular" + ], + "title": "Acceleration", + "type": "string", + "description": "Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "resolution": { + "enum": [ + "auto", + "240p", + "360p", + "480p", + "580p", + "720p" + ], + "description": "Resolution of the edited video.", + "type": "string", + "examples": [ + "auto" + ], + "title": "Resolution", + "default": "auto" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the edited video.", + "examples": [ + "auto" + ], + "default": "auto" + }, + "return_frames_zip": { + "title": "Return Frames ZIP", + "type": "boolean", + "description": "Whether to include a ZIP archive containing all generated frames.", + "default": false + }, + "enable_safety_checker": { + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "video_type": { + "enum": [ + "auto", + "general", + "human" + ], + "title": "Video Type", + "type": "string", + "description": "The type of video you're editing. Use 'general' for most videos, and 'human' for videos emphasizing human subjects and motions. The default value 'auto' means the model will guess based on the first frame of the video.", + "examples": [ + "auto" + ], + "default": "auto" + }, + "image_urls": { + "title": "Image URLs", + "type": "array", + "description": "URLs of the input images to use as a reference for the generation.", + "items": { + "type": "string" + }, + "default": [] + }, + "enable_auto_downsample": { + "title": "Enable Auto Downsampling", + "type": "boolean", + "description": "Whether to enable automatic downsampling. If your video has a high frame rate or is long, enabling longer sequences to be generated. The video will be interpolated back to the original frame rate after generation.", + "default": true + }, + "auto_downsample_min_fps": { + "minimum": 1, + "maximum": 60, + "type": "number", + "title": "Auto Downsample Min FPS", + "description": "The minimum frames per second to downsample the video to.", + "default": 15 + } + }, + "x-fal-order-properties": [ + "prompt", + "video_url", + "video_type", + "image_urls", + "resolution", + "acceleration", + "enable_auto_downsample", + "aspect_ratio", + "auto_downsample_min_fps", + "enable_safety_checker", + "return_frames_zip" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "WanVaceAppsVideoEditOutput": { + "title": "WanVACEVideoEditResponse", + "type": "object", + "properties": { + "frames_zip": { + "title": "Frames Zip", + "description": "ZIP archive of generated frames if requested.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/vace-video-edit-output.mp4" + } + ], + "title": "Video", + "description": "The edited video.", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "frames_zip" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-vace-apps/video-edit/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-apps/video-edit/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-apps/video-edit": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanVaceAppsVideoEditInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-apps/video-edit/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanVaceAppsVideoEditOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan/v2.2-14b/animate/replace", + "metadata": { + "display_name": "Wan-2.2 Animate Replace", + "category": "video-to-video", + "description": "Wan-Animate Replace is a model that can integrate animated characters into reference videos, replacing the original character while preserving the scene’s lighting and color tone for seamless environmental integration.", + "status": "active", + "tags": [ + "video to video", + "motion" + ], + "updated_at": "2026-01-26T21:42:50.348Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/penguin/YKoL7e0b41wIerWU6PA6x_7fcafa06db85466e9864c8f006cb13a7.jpg", + "model_url": "https://fal.run/fal-ai/wan/v2.2-14b/animate/replace", + "license_type": "commercial", + "date": "2025-09-21T22:34:12.372Z", + "group": { + "key": "wan-v22-animate", + "label": "Replace" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan/v2.2-14b/animate/replace", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan/v2.2-14b/animate/replace queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan/v2.2-14b/animate/replace", + "category": "video-to-video", + "thumbnailUrl": "https://v3.fal.media/files/penguin/YKoL7e0b41wIerWU6PA6x_7fcafa06db85466e9864c8f006cb13a7.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan/v2.2-14b/animate/replace", + "documentationUrl": "https://fal.ai/models/fal-ai/wan/v2.2-14b/animate/replace/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanV2214bAnimateReplaceInput": { + "title": "WanAnimateMoveRequest", + "type": "object", + "properties": { + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "description": "The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.", + "type": "string", + "title": "Video Write Mode", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "video_url": { + "examples": [ + "https://v3b.fal.media/files/b/panda/a6SvJg96V8eoglMlYFShU_5385885-hd_1080_1920_25fps.mp4" + ], + "description": "URL of the input video.", + "type": "string", + "title": "Video URL" + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "description": "Resolution of the generated video (480p, 580p, or 720p).", + "type": "string", + "title": "Resolution", + "examples": [ + "480p" + ], + "default": "480p" + }, + "return_frames_zip": { + "description": "If true, also return a ZIP archive containing per-frame images generated on GPU (lossless).", + "type": "boolean", + "title": "Return Frames ZIP", + "default": false + }, + "shift": { + "description": "Shift value for the video. Must be between 1.0 and 10.0.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Shift", + "examples": [ + 8 + ], + "default": 5 + }, + "enable_output_safety_checker": { + "examples": [ + false + ], + "description": "If set to true, output video will be checked for safety after generation.", + "type": "boolean", + "title": "Enable Output Safety Checker", + "default": false + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/panda/-oMlZo9Yyj_Nzoza_tgds_GmLF86r5bOt50eMMKCszy_eacc949b3933443c9915a83c98fbe85e.png" + ], + "description": "URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.", + "type": "string", + "title": "Image URL" + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "description": "The quality of the output video. Higher quality means better visual quality but larger file size.", + "type": "string", + "title": "Video Quality", + "examples": [ + "high" + ], + "default": "high" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, input data will be checked for safety before processing.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": false + }, + "num_inference_steps": { + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "type": "integer", + "minimum": 2, + "maximum": 40, + "title": "Number of Inference Steps", + "examples": [ + 6 + ], + "default": 20 + }, + "use_turbo": { + "examples": [ + true + ], + "description": "If true, applies quality enhancement for faster generation with improved quality. When enabled, parameters are automatically optimized for best results.", + "type": "boolean", + "title": "Use Turbo", + "default": false + }, + "guidance_scale": { + "description": "Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale", + "examples": [ + 1 + ], + "default": 1 + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "video_url", + "image_url", + "guidance_scale", + "resolution", + "seed", + "num_inference_steps", + "enable_safety_checker", + "enable_output_safety_checker", + "shift", + "video_quality", + "video_write_mode", + "return_frames_zip", + "use_turbo" + ], + "required": [ + "video_url", + "image_url" + ] + }, + "WanV2214bAnimateReplaceOutput": { + "title": "WanAnimateReplaceResponse", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "" + ], + "description": "The prompt used for generation (auto-generated by the model)", + "type": "string", + "title": "Prompt" + }, + "frames_zip": { + "description": "ZIP archive of generated frames (if requested).", + "title": "Frames Zip", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "examples": [ + 1416721728 + ], + "description": "The seed used for generation", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "url": "https://v3b.fal.media/files/b/elephant/9Ofgiju3Peb3b5hriTuBH_wan_animate_output.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "frames_zip", + "prompt", + "seed" + ], + "required": [ + "video", + "prompt", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan/v2.2-14b/animate/replace/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-14b/animate/replace/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-14b/animate/replace": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV2214bAnimateReplaceInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-14b/animate/replace/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV2214bAnimateReplaceOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan/v2.2-14b/animate/move", + "metadata": { + "display_name": "Wan-2.2 Animate Move", + "category": "video-to-video", + "description": "Wan-Animate is a video model that generates high-fidelity character videos by replicating the expressions and movements of characters from reference videos.", + "status": "active", + "tags": [ + "video to video", + "motion" + ], + "updated_at": "2026-01-26T21:42:50.477Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/lion/uXR8uNNcwPze8WMhqrlWO_f4e668e0be7a4a57bf6b45c93bf75199.jpg", + "model_url": "https://fal.run/fal-ai/wan/v2.2-14b/animate/move", + "license_type": "commercial", + "date": "2025-09-21T22:31:45.413Z", + "group": { + "key": "wan-v22-animate", + "label": "Move" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan/v2.2-14b/animate/move", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan/v2.2-14b/animate/move queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan/v2.2-14b/animate/move", + "category": "video-to-video", + "thumbnailUrl": "https://v3.fal.media/files/lion/uXR8uNNcwPze8WMhqrlWO_f4e668e0be7a4a57bf6b45c93bf75199.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan/v2.2-14b/animate/move", + "documentationUrl": "https://fal.ai/models/fal-ai/wan/v2.2-14b/animate/move/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanV2214bAnimateMoveInput": { + "title": "WanAnimateMoveRequest", + "type": "object", + "properties": { + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "description": "The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.", + "type": "string", + "title": "Video Write Mode", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "video_url": { + "examples": [ + "https://v3b.fal.media/files/b/panda/a6SvJg96V8eoglMlYFShU_5385885-hd_1080_1920_25fps.mp4" + ], + "description": "URL of the input video.", + "type": "string", + "title": "Video URL" + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "description": "Resolution of the generated video (480p, 580p, or 720p).", + "type": "string", + "title": "Resolution", + "examples": [ + "480p" + ], + "default": "480p" + }, + "return_frames_zip": { + "description": "If true, also return a ZIP archive containing per-frame images generated on GPU (lossless).", + "type": "boolean", + "title": "Return Frames ZIP", + "default": false + }, + "shift": { + "description": "Shift value for the video. Must be between 1.0 and 10.0.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Shift", + "examples": [ + 8 + ], + "default": 5 + }, + "enable_output_safety_checker": { + "examples": [ + false + ], + "description": "If set to true, output video will be checked for safety after generation.", + "type": "boolean", + "title": "Enable Output Safety Checker", + "default": false + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/panda/-oMlZo9Yyj_Nzoza_tgds_GmLF86r5bOt50eMMKCszy_eacc949b3933443c9915a83c98fbe85e.png" + ], + "description": "URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.", + "type": "string", + "title": "Image URL" + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "description": "The quality of the output video. Higher quality means better visual quality but larger file size.", + "type": "string", + "title": "Video Quality", + "examples": [ + "high" + ], + "default": "high" + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, input data will be checked for safety before processing.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": false + }, + "num_inference_steps": { + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "type": "integer", + "minimum": 2, + "maximum": 40, + "title": "Number of Inference Steps", + "examples": [ + 6 + ], + "default": 20 + }, + "use_turbo": { + "examples": [ + true + ], + "description": "If true, applies quality enhancement for faster generation with improved quality. When enabled, parameters are automatically optimized for best results.", + "type": "boolean", + "title": "Use Turbo", + "default": false + }, + "guidance_scale": { + "description": "Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale", + "examples": [ + 1 + ], + "default": 1 + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + } + }, + "x-fal-order-properties": [ + "video_url", + "image_url", + "guidance_scale", + "resolution", + "seed", + "num_inference_steps", + "enable_safety_checker", + "enable_output_safety_checker", + "shift", + "video_quality", + "video_write_mode", + "return_frames_zip", + "use_turbo" + ], + "required": [ + "video_url", + "image_url" + ] + }, + "WanV2214bAnimateMoveOutput": { + "title": "WanAnimateMoveResponse", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "" + ], + "description": "The prompt used for generation (auto-generated by the model)", + "type": "string", + "title": "Prompt" + }, + "frames_zip": { + "description": "ZIP archive of generated frames (if requested).", + "title": "Frames Zip", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "seed": { + "examples": [ + 1416721728 + ], + "description": "The seed used for generation", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "url": "https://v3b.fal.media/files/b/monkey/xjJYzO0jqMi7MxufJe5tx_wan_animate_output.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "frames_zip", + "prompt", + "seed" + ], + "required": [ + "video", + "prompt", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan/v2.2-14b/animate/move/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-14b/animate/move/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-14b/animate/move": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV2214bAnimateMoveInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-14b/animate/move/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV2214bAnimateMoveOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "decart/lucy-edit/pro", + "metadata": { + "display_name": "Lucy Edit [Pro]", + "category": "video-to-video", + "description": "Edit outfits, objects, faces, or restyle your video - all with maximum detail retention.", + "status": "active", + "tags": [ + "video-edit" + ], + "updated_at": "2026-01-26T21:42:51.133Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/kangaroo/_g9nFjAdwfSx3rZckDkCG_7076290e770c43a094a751a42799f16b.jpg", + "model_url": "https://fal.run/decart/lucy-edit/pro", + "license_type": "commercial", + "date": "2025-09-18T16:11:33.328Z", + "group": { + "key": "lucy-edit", + "label": "Pro" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for decart/lucy-edit/pro", + "version": "1.0.0", + "description": "The OpenAPI schema for the decart/lucy-edit/pro queue.", + "x-fal-metadata": { + "endpointId": "decart/lucy-edit/pro", + "category": "video-to-video", + "thumbnailUrl": "https://v3.fal.media/files/kangaroo/_g9nFjAdwfSx3rZckDkCG_7076290e770c43a094a751a42799f16b.jpg", + "playgroundUrl": "https://fal.ai/models/decart/lucy-edit/pro", + "documentationUrl": "https://fal.ai/models/decart/lucy-edit/pro/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LucyEditProInput": { + "title": "LucyEditProInput", + "type": "object", + "properties": { + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the video to be generated\n and uploaded before returning the response. This will increase the\n latency of the function but it allows you to get the video directly\n in the response without going through the CDN.\n ", + "default": true + }, + "video_url": { + "examples": [ + "https://v3.fal.media/files/monkey/GI7ArkqpQVk3M6V1C_epr_original.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the video to edit" + }, + "prompt": { + "examples": [ + "Transform the woman's outfit into a regal medieval gown with flowing velvet fabric, intricate gold embroidery, and a jeweled crown, giving her the appearance of a queen." + ], + "maxLength": 1500, + "type": "string", + "title": "Prompt", + "description": "Text description of the desired video content" + }, + "resolution": { + "enum": [ + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video", + "default": "720p" + }, + "enhance_prompt": { + "title": "Enhance Prompt", + "type": "boolean", + "description": "Whether to enhance the prompt for better results.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "video_url", + "resolution", + "sync_mode", + "enhance_prompt" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "LucyEditProOutput": { + "title": "LucyEditProOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 687298, + "file_name": "generated_video.mp4", + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/lion/j1BSX8UnGbBZeJXqSWg2E_generated_video.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/decart/lucy-edit/pro/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/decart/lucy-edit/pro/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/decart/lucy-edit/pro": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LucyEditProInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/decart/lucy-edit/pro/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LucyEditProOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "decart/lucy-edit/dev", + "metadata": { + "display_name": "Lucy Edit [Dev]", + "category": "video-to-video", + "description": "Edit outfits, objects, faces, or restyle your video - all with maximum detail retention.", + "status": "active", + "tags": [ + "video-edit" + ], + "updated_at": "2026-01-26T21:42:51.375Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/tiger/oUaiOy3Nx-SgCqJxX0JaH_e780c641fbd840a6b75d8ba411b9a32d.jpg", + "model_url": "https://fal.run/decart/lucy-edit/dev", + "license_type": "commercial", + "date": "2025-09-18T16:11:20.299Z", + "group": { + "key": "lucy-edit", + "label": "Dev" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for decart/lucy-edit/dev", + "version": "1.0.0", + "description": "The OpenAPI schema for the decart/lucy-edit/dev queue.", + "x-fal-metadata": { + "endpointId": "decart/lucy-edit/dev", + "category": "video-to-video", + "thumbnailUrl": "https://v3.fal.media/files/tiger/oUaiOy3Nx-SgCqJxX0JaH_e780c641fbd840a6b75d8ba411b9a32d.jpg", + "playgroundUrl": "https://fal.ai/models/decart/lucy-edit/dev", + "documentationUrl": "https://fal.ai/models/decart/lucy-edit/dev/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LucyEditDevInput": { + "title": "LucyEditDevInput", + "type": "object", + "properties": { + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "\n If set to true, the function will wait for the video to be generated\n and uploaded before returning the response. This will increase the\n latency of the function but it allows you to get the video directly\n in the response without going through the CDN.\n ", + "default": true + }, + "video_url": { + "examples": [ + "https://v3.fal.media/files/monkey/GI7ArkqpQVk3M6V1C_epr_original.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the video to edit" + }, + "prompt": { + "examples": [ + "Transform the woman's outfit into a regal medieval gown with flowing velvet fabric, intricate gold embroidery, and a jeweled crown, giving her the appearance of a queen." + ], + "maxLength": 1500, + "type": "string", + "title": "Prompt", + "description": "Text description of the desired video content" + }, + "enhance_prompt": { + "title": "Enhance Prompt", + "type": "boolean", + "description": "Whether to enhance the prompt for better results.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "video_url", + "sync_mode", + "enhance_prompt" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "LucyEditDevOutput": { + "title": "LucyEditDevOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 687298, + "file_name": "generated_video.mp4", + "content_type": "video/mp4", + "url": "https://v3b.fal.media/files/b/lion/j1BSX8UnGbBZeJXqSWg2E_generated_video.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/decart/lucy-edit/dev/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/decart/lucy-edit/dev/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/decart/lucy-edit/dev": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LucyEditDevInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/decart/lucy-edit/dev/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LucyEditDevOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-22-vace-fun-a14b/reframe", + "metadata": { + "display_name": "Wan 2.2 VACE Fun A14B", + "category": "video-to-video", + "description": "VACE Fun for Wan 2.2 A14B from Alibaba-PAI", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:53.744Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/kangaroo/H7NXqbD7sGQs5qxcoZfVI_43b0ac4da3e24c9eadcfe61361b379af.jpg", + "model_url": "https://fal.run/fal-ai/wan-22-vace-fun-a14b/reframe", + "license_type": "commercial", + "date": "2025-09-17T21:20:15.056Z", + "group": { + "key": "wan-22-vace-fun", + "label": "Reframe" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-22-vace-fun-a14b/reframe", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-22-vace-fun-a14b/reframe queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-22-vace-fun-a14b/reframe", + "category": "video-to-video", + "thumbnailUrl": "https://fal.media/files/kangaroo/H7NXqbD7sGQs5qxcoZfVI_43b0ac4da3e24c9eadcfe61361b379af.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-22-vace-fun-a14b/reframe", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-22-vace-fun-a14b/reframe/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Wan22VaceFunA14bReframeInput": { + "title": "WanVACEReframeRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "" + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation. Optional for reframing.", + "default": "" + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/wan/t2v.mp4" + ], + "title": "Video URL", + "type": "string", + "description": "URL to the source video file. This video will be used as a reference for the reframe task." + }, + "num_interpolated_frames": { + "description": "Number of frames to interpolate between the original frames. A value of 0 means no interpolation.", + "type": "integer", + "minimum": 0, + "title": "Number of Interpolated Frames", + "examples": [ + 0 + ], + "maximum": 5, + "default": 0 + }, + "temporal_downsample_factor": { + "description": "Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.", + "type": "integer", + "minimum": 0, + "title": "Temporal Downsample Factor", + "examples": [ + 0 + ], + "maximum": 5, + "default": 0 + }, + "first_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "First Frame URL", + "description": "URL to the first frame of the video. If provided, the model will use this frame as a reference." + }, + "guidance_scale": { + "description": "Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.", + "type": "number", + "minimum": 1, + "title": "Guidance Scale", + "examples": [ + 5 + ], + "maximum": 10, + "default": 5 + }, + "num_frames": { + "minimum": 17, + "title": "Number of Frames", + "type": "integer", + "maximum": 241, + "description": "Number of frames to generate. Must be between 81 to 241 (inclusive).", + "default": 81 + }, + "auto_downsample_min_fps": { + "description": "The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences.", + "type": "number", + "minimum": 1, + "title": "Auto Downsample Min FPS", + "examples": [ + 15 + ], + "maximum": 60, + "default": 15 + }, + "trim_borders": { + "examples": [ + true + ], + "title": "Trim Borders", + "type": "boolean", + "description": "Whether to trim borders from the video.", + "default": true + }, + "sampler": { + "enum": [ + "unipc", + "dpm++", + "euler" + ], + "title": "Sampler", + "type": "string", + "examples": [ + "unipc" + ], + "description": "Sampler to use for video generation.", + "default": "unipc" + }, + "transparency_mode": { + "enum": [ + "content_aware", + "white", + "black" + ], + "title": "Transparency Mode", + "type": "string", + "examples": [ + "content_aware" + ], + "description": "The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.", + "default": "content_aware" + }, + "sync_mode": { + "examples": [ + false + ], + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "examples": [ + "high" + ], + "description": "The quality of the generated video.", + "default": "high" + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "interpolator_model": { + "enum": [ + "rife", + "film" + ], + "title": "Interpolator Model", + "type": "string", + "examples": [ + "film" + ], + "description": "The model to use for frame interpolation. Options are 'rife' or 'film'.", + "default": "film" + }, + "enable_auto_downsample": { + "examples": [ + false + ], + "title": "Enable Auto Downsample", + "type": "boolean", + "description": "If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.", + "default": false + }, + "shift": { + "minimum": 1, + "title": "Shift", + "type": "number", + "maximum": 15, + "description": "Shift parameter for video generation.", + "default": 5 + }, + "acceleration": { + "anyOf": [ + { + "enum": [ + "none", + "low", + "regular" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Acceleration", + "examples": [ + "regular" + ], + "description": "Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster.", + "default": "regular" + }, + "zoom_factor": { + "description": "Zoom factor for the video. When this value is greater than 0, the video will be zoomed in by this factor (in relation to the canvas size,) cutting off the edges of the video. A value of 0 means no zoom.", + "type": "number", + "minimum": 0, + "title": "Zoom Factor", + "examples": [ + 0 + ], + "maximum": 0.9, + "default": 0 + }, + "frames_per_second": { + "anyOf": [ + { + "minimum": 5, + "maximum": 30, + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Frames per Second", + "description": "Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true.", + "default": 16 + }, + "match_input_num_frames": { + "examples": [ + true + ], + "title": "Match Input Number of Frames", + "type": "boolean", + "description": "If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.", + "default": true + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "negative_prompt": { + "examples": [ + "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "examples": [ + "balanced" + ], + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "resolution": { + "enum": [ + "auto", + "240p", + "360p", + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video.", + "default": "auto" + }, + "return_frames_zip": { + "examples": [ + false + ], + "title": "Return Frames Zip", + "type": "boolean", + "description": "If true, also return a ZIP file containing all generated frames.", + "default": false + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "1:1", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video.", + "default": "auto" + }, + "match_input_frames_per_second": { + "examples": [ + true + ], + "title": "Match Input Frames Per Second", + "type": "boolean", + "description": "If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.", + "default": true + }, + "num_inference_steps": { + "minimum": 2, + "title": "Number of Inference Steps", + "type": "integer", + "maximum": 50, + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 30 + }, + "last_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Last Frame URL", + "description": "URL to the last frame of the video. If provided, the model will use this frame as a reference." + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "match_input_num_frames", + "num_frames", + "match_input_frames_per_second", + "frames_per_second", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "guidance_scale", + "sampler", + "shift", + "video_url", + "first_frame_url", + "last_frame_url", + "enable_safety_checker", + "enable_prompt_expansion", + "acceleration", + "video_quality", + "video_write_mode", + "num_interpolated_frames", + "temporal_downsample_factor", + "enable_auto_downsample", + "auto_downsample_min_fps", + "interpolator_model", + "sync_mode", + "transparency_mode", + "return_frames_zip", + "zoom_factor", + "trim_borders" + ], + "required": [ + "video_url" + ] + }, + "Wan22VaceFunA14bReframeOutput": { + "title": "WanVACEReframeResponse", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "frames_zip": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "ZIP archive of all video frames if requested." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/wan-vace-reframe-output.mp4" + } + ], + "description": "The generated reframe video file.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed", + "frames_zip" + ], + "required": [ + "video", + "prompt", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the video" + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Duration", + "description": "The duration of the video" + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Fps", + "description": "The FPS of the video" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the video" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Num Frames", + "description": "The number of frames in the video" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-22-vace-fun-a14b/reframe/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-22-vace-fun-a14b/reframe/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-22-vace-fun-a14b/reframe": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Wan22VaceFunA14bReframeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-22-vace-fun-a14b/reframe/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Wan22VaceFunA14bReframeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-22-vace-fun-a14b/outpainting", + "metadata": { + "display_name": "Wan 2.2 VACE Fun A14B", + "category": "video-to-video", + "description": "VACE Fun for Wan 2.2 A14B from Alibaba-PAI", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:53.888Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/tiger/qrBLSWxRlek4yKeAtyMaI_8d518c3fb3624b3ea62186e239c046c2.jpg", + "model_url": "https://fal.run/fal-ai/wan-22-vace-fun-a14b/outpainting", + "license_type": "commercial", + "date": "2025-09-17T21:03:36.437Z", + "group": { + "key": "wan-22-vace-fun", + "label": "Outpainting" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-22-vace-fun-a14b/outpainting", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-22-vace-fun-a14b/outpainting queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-22-vace-fun-a14b/outpainting", + "category": "video-to-video", + "thumbnailUrl": "https://fal.media/files/tiger/qrBLSWxRlek4yKeAtyMaI_8d518c3fb3624b3ea62186e239c046c2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-22-vace-fun-a14b/outpainting", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-22-vace-fun-a14b/outpainting/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Wan22VaceFunA14bOutpaintingInput": { + "title": "WanVACEOutpaintingRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A lone woman strides through the neon-drenched streets of Tokyo at night. Her crimson dress, a vibrant splash of color against the deep blues and blacks of the cityscape, flows slightly with each step. A tailored black jacket, crisp and elegant, contrasts sharply with the dress's rich texture. Medium shot: The city hums around her, blurred lights creating streaks of color in the background. Close-up: The fabric of her dress catches the streetlight's glow, revealing a subtle silk sheen and the intricate stitching at the hem. Her black jacket’s subtle texture is visible – a fine wool perhaps, with a matte finish. The overall mood is one of quiet confidence and mystery, a vibrant woman navigating a bustling, nocturnal landscape. High resolution 4k." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/wan/t2v.mp4" + ], + "title": "Video URL", + "type": "string", + "description": "URL to the source video file. Required for outpainting." + }, + "num_interpolated_frames": { + "description": "Number of frames to interpolate between the original frames. A value of 0 means no interpolation.", + "type": "integer", + "minimum": 0, + "title": "Number of Interpolated Frames", + "examples": [ + 0 + ], + "maximum": 5, + "default": 0 + }, + "temporal_downsample_factor": { + "description": "Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.", + "type": "integer", + "minimum": 0, + "title": "Temporal Downsample Factor", + "examples": [ + 0 + ], + "maximum": 5, + "default": 0 + }, + "first_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "First Frame URL", + "description": "URL to the first frame of the video. If provided, the model will use this frame as a reference." + }, + "ref_image_urls": { + "title": "Reference Image URLs", + "type": "array", + "description": "URLs to source reference image. If provided, the model will use this image as reference.", + "items": { + "type": "string" + } + }, + "expand_ratio": { + "description": "Amount of expansion. This is a float value between 0 and 1, where 0.25 adds 25% to the original video size on the specified sides.", + "type": "number", + "minimum": 0, + "title": "Expand Ratio", + "examples": [ + 0.25 + ], + "maximum": 1, + "default": 0.25 + }, + "transparency_mode": { + "enum": [ + "content_aware", + "white", + "black" + ], + "title": "Transparency Mode", + "type": "string", + "examples": [ + "content_aware" + ], + "description": "The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.", + "default": "content_aware" + }, + "num_frames": { + "minimum": 17, + "title": "Number of Frames", + "type": "integer", + "maximum": 241, + "description": "Number of frames to generate. Must be between 81 to 241 (inclusive).", + "default": 81 + }, + "auto_downsample_min_fps": { + "description": "The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences.", + "type": "number", + "minimum": 1, + "title": "Auto Downsample Min FPS", + "examples": [ + 15 + ], + "maximum": 60, + "default": 15 + }, + "guidance_scale": { + "description": "Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.", + "type": "number", + "minimum": 1, + "title": "Guidance Scale", + "examples": [ + 5 + ], + "maximum": 10, + "default": 5 + }, + "sampler": { + "enum": [ + "unipc", + "dpm++", + "euler" + ], + "title": "Sampler", + "type": "string", + "examples": [ + "unipc" + ], + "description": "Sampler to use for video generation.", + "default": "unipc" + }, + "expand_bottom": { + "examples": [ + true + ], + "title": "Expand Bottom", + "type": "boolean", + "description": "Whether to expand the video to the bottom.", + "default": false + }, + "sync_mode": { + "examples": [ + false + ], + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "examples": [ + "high" + ], + "description": "The quality of the generated video.", + "default": "high" + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "expand_left": { + "examples": [ + true + ], + "title": "Expand Left", + "type": "boolean", + "description": "Whether to expand the video to the left.", + "default": false + }, + "interpolator_model": { + "enum": [ + "rife", + "film" + ], + "title": "Interpolator Model", + "type": "string", + "examples": [ + "film" + ], + "description": "The model to use for frame interpolation. Options are 'rife' or 'film'.", + "default": "film" + }, + "enable_auto_downsample": { + "examples": [ + false + ], + "title": "Enable Auto Downsample", + "type": "boolean", + "description": "If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.", + "default": false + }, + "expand_top": { + "examples": [ + true + ], + "title": "Expand Top", + "type": "boolean", + "description": "Whether to expand the video to the top.", + "default": false + }, + "shift": { + "minimum": 1, + "title": "Shift", + "type": "number", + "maximum": 15, + "description": "Shift parameter for video generation.", + "default": 5 + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "acceleration": { + "anyOf": [ + { + "enum": [ + "none", + "low", + "regular" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Acceleration", + "examples": [ + "regular" + ], + "description": "Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster.", + "default": "regular" + }, + "frames_per_second": { + "anyOf": [ + { + "minimum": 5, + "maximum": 30, + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Frames per Second", + "description": "Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true.", + "default": 16 + }, + "match_input_num_frames": { + "examples": [ + false + ], + "title": "Match Input Number of Frames", + "type": "boolean", + "description": "If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.", + "default": false + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "negative_prompt": { + "examples": [ + "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "examples": [ + "balanced" + ], + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "expand_right": { + "examples": [ + true + ], + "title": "Expand Right", + "type": "boolean", + "description": "Whether to expand the video to the right.", + "default": false + }, + "resolution": { + "enum": [ + "auto", + "240p", + "360p", + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video.", + "default": "auto" + }, + "return_frames_zip": { + "examples": [ + false + ], + "title": "Return Frames Zip", + "type": "boolean", + "description": "If true, also return a ZIP file containing all generated frames.", + "default": false + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "1:1", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video.", + "default": "auto" + }, + "match_input_frames_per_second": { + "examples": [ + false + ], + "title": "Match Input Frames Per Second", + "type": "boolean", + "description": "If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.", + "default": false + }, + "num_inference_steps": { + "minimum": 2, + "title": "Number of Inference Steps", + "type": "integer", + "maximum": 50, + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 30 + }, + "last_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Last Frame URL", + "description": "URL to the last frame of the video. If provided, the model will use this frame as a reference." + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "match_input_num_frames", + "num_frames", + "match_input_frames_per_second", + "frames_per_second", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "guidance_scale", + "sampler", + "shift", + "video_url", + "ref_image_urls", + "first_frame_url", + "last_frame_url", + "enable_safety_checker", + "enable_prompt_expansion", + "acceleration", + "video_quality", + "video_write_mode", + "num_interpolated_frames", + "temporal_downsample_factor", + "enable_auto_downsample", + "auto_downsample_min_fps", + "interpolator_model", + "sync_mode", + "transparency_mode", + "return_frames_zip", + "expand_left", + "expand_right", + "expand_top", + "expand_bottom", + "expand_ratio" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "Wan22VaceFunA14bOutpaintingOutput": { + "title": "WanVACEOutpaintingResponse", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "frames_zip": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "ZIP archive of all video frames if requested." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/wan-vace-outpainting-output.mp4" + } + ], + "description": "The generated outpainting video file.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed", + "frames_zip" + ], + "required": [ + "video", + "prompt", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the video" + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Duration", + "description": "The duration of the video" + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Fps", + "description": "The FPS of the video" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the video" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Num Frames", + "description": "The number of frames in the video" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-22-vace-fun-a14b/outpainting/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-22-vace-fun-a14b/outpainting/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-22-vace-fun-a14b/outpainting": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Wan22VaceFunA14bOutpaintingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-22-vace-fun-a14b/outpainting/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Wan22VaceFunA14bOutpaintingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-22-vace-fun-a14b/inpainting", + "metadata": { + "display_name": "Wan 2.2 VACE Fun A14B", + "category": "video-to-video", + "description": "VACE Fun for Wan 2.2 A14B from Alibaba-PAI", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:54.073Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/elephant/qkFGshBowZhFpgeUj-_UY_012ebaf8328540c5afb610e56c58d1c4.jpg", + "model_url": "https://fal.run/fal-ai/wan-22-vace-fun-a14b/inpainting", + "license_type": "commercial", + "date": "2025-09-17T21:00:02.580Z", + "group": { + "key": "wan-22-vace-fun", + "label": "Inpainting" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-22-vace-fun-a14b/inpainting", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-22-vace-fun-a14b/inpainting queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-22-vace-fun-a14b/inpainting", + "category": "video-to-video", + "thumbnailUrl": "https://fal.media/files/elephant/qkFGshBowZhFpgeUj-_UY_012ebaf8328540c5afb610e56c58d1c4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-22-vace-fun-a14b/inpainting", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-22-vace-fun-a14b/inpainting/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Wan22VaceFunA14bInpaintingInput": { + "title": "WanVACEInpaintingRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The video shows a man riding a horse on a vast grassland. He has long lavender hair and wears a traditional dress of a white top and black pants. The animation style makes him look like he is doing some kind of outdoor activity or performing. The background is a spectacular mountain range and cloud sky, giving a sense of tranquility and vastness. The entire video is shot from a fixed angle, focusing on the rider and his horse." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/vace/src_video.mp4" + ], + "title": "Video URL", + "type": "string", + "description": "URL to the source video file. Required for inpainting." + }, + "num_interpolated_frames": { + "description": "Number of frames to interpolate between the original frames. A value of 0 means no interpolation.", + "type": "integer", + "minimum": 0, + "title": "Number of Interpolated Frames", + "examples": [ + 0 + ], + "maximum": 5, + "default": 0 + }, + "temporal_downsample_factor": { + "description": "Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.", + "type": "integer", + "minimum": 0, + "title": "Temporal Downsample Factor", + "examples": [ + 0 + ], + "maximum": 5, + "default": 0 + }, + "first_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "First Frame URL", + "description": "URL to the first frame of the video. If provided, the model will use this frame as a reference." + }, + "ref_image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/vace/src_ref_image_1.png" + ] + ], + "title": "Reference Image URLs", + "type": "array", + "description": "Urls to source reference image. If provided, the model will use this image as reference.", + "items": { + "type": "string" + } + }, + "guidance_scale": { + "description": "Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.", + "type": "number", + "minimum": 1, + "title": "Guidance Scale", + "examples": [ + 5 + ], + "maximum": 10, + "default": 5 + }, + "num_frames": { + "minimum": 17, + "title": "Number of Frames", + "type": "integer", + "maximum": 241, + "description": "Number of frames to generate. Must be between 81 to 241 (inclusive).", + "default": 81 + }, + "auto_downsample_min_fps": { + "description": "The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences.", + "type": "number", + "minimum": 1, + "title": "Auto Downsample Min FPS", + "examples": [ + 15 + ], + "maximum": 60, + "default": 15 + }, + "transparency_mode": { + "enum": [ + "content_aware", + "white", + "black" + ], + "title": "Transparency Mode", + "type": "string", + "examples": [ + "content_aware" + ], + "description": "The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.", + "default": "content_aware" + }, + "sampler": { + "enum": [ + "unipc", + "dpm++", + "euler" + ], + "title": "Sampler", + "type": "string", + "examples": [ + "unipc" + ], + "description": "Sampler to use for video generation.", + "default": "unipc" + }, + "sync_mode": { + "examples": [ + false + ], + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "examples": [ + "high" + ], + "description": "The quality of the generated video.", + "default": "high" + }, + "mask_video_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Mask Video URL", + "examples": [ + "https://storage.googleapis.com/falserverless/vace/src_mask.mp4" + ], + "description": "URL to the source mask file. Required for inpainting." + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "interpolator_model": { + "enum": [ + "rife", + "film" + ], + "title": "Interpolator Model", + "type": "string", + "examples": [ + "film" + ], + "description": "The model to use for frame interpolation. Options are 'rife' or 'film'.", + "default": "film" + }, + "enable_auto_downsample": { + "examples": [ + false + ], + "title": "Enable Auto Downsample", + "type": "boolean", + "description": "If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.", + "default": false + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "shift": { + "minimum": 1, + "title": "Shift", + "type": "number", + "maximum": 15, + "description": "Shift parameter for video generation.", + "default": 5 + }, + "preprocess": { + "examples": [ + false + ], + "title": "Preprocess", + "type": "boolean", + "description": "Whether to preprocess the input video.", + "default": false + }, + "acceleration": { + "anyOf": [ + { + "enum": [ + "none", + "low", + "regular" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Acceleration", + "examples": [ + "regular" + ], + "description": "Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster.", + "default": "regular" + }, + "mask_image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Mask Image URL", + "description": "URL to the guiding mask file. If provided, the model will use this mask as a reference to create masked video using salient mask tracking. Will be ignored if mask_video_url is provided." + }, + "frames_per_second": { + "anyOf": [ + { + "minimum": 5, + "maximum": 30, + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Frames per Second", + "description": "Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true.", + "default": 16 + }, + "match_input_num_frames": { + "examples": [ + false + ], + "title": "Match Input Number of Frames", + "type": "boolean", + "description": "If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.", + "default": false + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "negative_prompt": { + "examples": [ + "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "examples": [ + "balanced" + ], + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "resolution": { + "enum": [ + "auto", + "240p", + "360p", + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video.", + "default": "auto" + }, + "return_frames_zip": { + "examples": [ + false + ], + "title": "Return Frames Zip", + "type": "boolean", + "description": "If true, also return a ZIP file containing all generated frames.", + "default": false + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "1:1", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video.", + "default": "auto" + }, + "match_input_frames_per_second": { + "examples": [ + false + ], + "title": "Match Input Frames Per Second", + "type": "boolean", + "description": "If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.", + "default": false + }, + "num_inference_steps": { + "minimum": 2, + "title": "Number of Inference Steps", + "type": "integer", + "maximum": 50, + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 30 + }, + "last_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Last Frame URL", + "description": "URL to the last frame of the video. If provided, the model will use this frame as a reference." + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "match_input_num_frames", + "num_frames", + "match_input_frames_per_second", + "frames_per_second", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "guidance_scale", + "sampler", + "shift", + "video_url", + "mask_video_url", + "mask_image_url", + "ref_image_urls", + "first_frame_url", + "last_frame_url", + "enable_safety_checker", + "enable_prompt_expansion", + "preprocess", + "acceleration", + "video_quality", + "video_write_mode", + "num_interpolated_frames", + "temporal_downsample_factor", + "enable_auto_downsample", + "auto_downsample_min_fps", + "interpolator_model", + "sync_mode", + "transparency_mode", + "return_frames_zip" + ], + "required": [ + "prompt", + "video_url", + "mask_video_url" + ] + }, + "Wan22VaceFunA14bInpaintingOutput": { + "title": "WanVACEInpaintingResponse", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "frames_zip": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "ZIP archive of all video frames if requested." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/wan-vace-inpainting-output.mp4" + } + ], + "description": "The generated inpainting video file.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed", + "frames_zip" + ], + "required": [ + "video", + "prompt", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the video" + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Duration", + "description": "The duration of the video" + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Fps", + "description": "The FPS of the video" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the video" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Num Frames", + "description": "The number of frames in the video" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-22-vace-fun-a14b/inpainting/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-22-vace-fun-a14b/inpainting/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-22-vace-fun-a14b/inpainting": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Wan22VaceFunA14bInpaintingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-22-vace-fun-a14b/inpainting/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Wan22VaceFunA14bInpaintingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-22-vace-fun-a14b/depth", + "metadata": { + "display_name": "Wan 2.2 VACE Fun A14B", + "category": "video-to-video", + "description": "VACE Fun for Wan 2.2 A14B from Alibaba-PAI", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:54.254Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/kangaroo/GWwAzZDqhJ2AT1d6fVimr_a2c342bb17504782a5788e46c1796644.jpg", + "model_url": "https://fal.run/fal-ai/wan-22-vace-fun-a14b/depth", + "license_type": "commercial", + "date": "2025-09-17T20:52:28.123Z", + "group": { + "key": "wan-22-vace-fun", + "label": "Depth" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-22-vace-fun-a14b/depth", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-22-vace-fun-a14b/depth queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-22-vace-fun-a14b/depth", + "category": "video-to-video", + "thumbnailUrl": "https://fal.media/files/kangaroo/GWwAzZDqhJ2AT1d6fVimr_a2c342bb17504782a5788e46c1796644.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-22-vace-fun-a14b/depth", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-22-vace-fun-a14b/depth/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Wan22VaceFunA14bDepthInput": { + "title": "WanVACEDepthRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A confident woman strides toward the camera down a sun-drenched, empty street. Her vibrant summer dress, a flowing emerald green with delicate white floral embroidery, billows slightly in the gentle breeze. She carries a stylish, woven straw bag, its natural tan contrasting beautifully with the dress. The dress's fabric shimmers subtly, catching the light. The white embroidery is intricate, each tiny flower meticulously detailed. Her expression is focused, yet relaxed, radiating self-assuredness. Her auburn hair, partially pulled back in a loose braid, catches the sunlight, creating warm highlights. The street itself is paved with warm, grey cobblestones, reflecting the bright sun. The mood is optimistic and serene, emphasizing the woman's independence and carefree spirit. High resolution 4k" + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/wan-vace-depth-video.mp4" + ], + "title": "Video URL", + "type": "string", + "description": "URL to the source video file. Required for depth task." + }, + "num_interpolated_frames": { + "description": "Number of frames to interpolate between the original frames. A value of 0 means no interpolation.", + "type": "integer", + "minimum": 0, + "title": "Number of Interpolated Frames", + "examples": [ + 0 + ], + "maximum": 5, + "default": 0 + }, + "temporal_downsample_factor": { + "description": "Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.", + "type": "integer", + "minimum": 0, + "title": "Temporal Downsample Factor", + "examples": [ + 0 + ], + "maximum": 5, + "default": 0 + }, + "first_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "First Frame URL", + "description": "URL to the first frame of the video. If provided, the model will use this frame as a reference." + }, + "ref_image_urls": { + "title": "Reference Image URLs", + "type": "array", + "description": "URLs to source reference image. If provided, the model will use this image as reference.", + "items": { + "type": "string" + } + }, + "guidance_scale": { + "description": "Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.", + "type": "number", + "minimum": 1, + "title": "Guidance Scale", + "examples": [ + 5 + ], + "maximum": 10, + "default": 5 + }, + "num_frames": { + "minimum": 17, + "title": "Number of Frames", + "type": "integer", + "maximum": 241, + "description": "Number of frames to generate. Must be between 81 to 241 (inclusive).", + "default": 81 + }, + "auto_downsample_min_fps": { + "description": "The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences.", + "type": "number", + "minimum": 1, + "title": "Auto Downsample Min FPS", + "examples": [ + 15 + ], + "maximum": 60, + "default": 15 + }, + "transparency_mode": { + "enum": [ + "content_aware", + "white", + "black" + ], + "title": "Transparency Mode", + "type": "string", + "examples": [ + "content_aware" + ], + "description": "The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.", + "default": "content_aware" + }, + "sampler": { + "enum": [ + "unipc", + "dpm++", + "euler" + ], + "title": "Sampler", + "type": "string", + "examples": [ + "unipc" + ], + "description": "Sampler to use for video generation.", + "default": "unipc" + }, + "sync_mode": { + "examples": [ + false + ], + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "examples": [ + "high" + ], + "description": "The quality of the generated video.", + "default": "high" + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "interpolator_model": { + "enum": [ + "rife", + "film" + ], + "title": "Interpolator Model", + "type": "string", + "examples": [ + "film" + ], + "description": "The model to use for frame interpolation. Options are 'rife' or 'film'.", + "default": "film" + }, + "enable_auto_downsample": { + "examples": [ + false + ], + "title": "Enable Auto Downsample", + "type": "boolean", + "description": "If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.", + "default": false + }, + "preprocess": { + "examples": [ + false + ], + "title": "Preprocess", + "type": "boolean", + "description": "Whether to preprocess the input video.", + "default": false + }, + "shift": { + "minimum": 1, + "title": "Shift", + "type": "number", + "maximum": 15, + "description": "Shift parameter for video generation.", + "default": 5 + }, + "acceleration": { + "anyOf": [ + { + "enum": [ + "none", + "low", + "regular" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Acceleration", + "examples": [ + "regular" + ], + "description": "Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster.", + "default": "regular" + }, + "frames_per_second": { + "anyOf": [ + { + "minimum": 5, + "maximum": 30, + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Frames per Second", + "description": "Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true.", + "default": 16 + }, + "match_input_num_frames": { + "examples": [ + false + ], + "title": "Match Input Number of Frames", + "type": "boolean", + "description": "If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.", + "default": false + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "negative_prompt": { + "examples": [ + "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "examples": [ + "balanced" + ], + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "resolution": { + "enum": [ + "auto", + "240p", + "360p", + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video.", + "default": "auto" + }, + "return_frames_zip": { + "examples": [ + false + ], + "title": "Return Frames Zip", + "type": "boolean", + "description": "If true, also return a ZIP file containing all generated frames.", + "default": false + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "1:1", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video.", + "default": "auto" + }, + "match_input_frames_per_second": { + "examples": [ + false + ], + "title": "Match Input Frames Per Second", + "type": "boolean", + "description": "If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.", + "default": false + }, + "num_inference_steps": { + "minimum": 2, + "title": "Number of Inference Steps", + "type": "integer", + "maximum": 50, + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 30 + }, + "last_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Last Frame URL", + "description": "URL to the last frame of the video. If provided, the model will use this frame as a reference." + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "match_input_num_frames", + "num_frames", + "match_input_frames_per_second", + "frames_per_second", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "guidance_scale", + "sampler", + "shift", + "video_url", + "ref_image_urls", + "first_frame_url", + "last_frame_url", + "enable_safety_checker", + "enable_prompt_expansion", + "preprocess", + "acceleration", + "video_quality", + "video_write_mode", + "num_interpolated_frames", + "temporal_downsample_factor", + "enable_auto_downsample", + "auto_downsample_min_fps", + "interpolator_model", + "sync_mode", + "transparency_mode", + "return_frames_zip" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "Wan22VaceFunA14bDepthOutput": { + "title": "WanVACEDepthResponse", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "frames_zip": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "ZIP archive of all video frames if requested." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/wan-vace-depth-output.mp4" + } + ], + "description": "The generated depth video file.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed", + "frames_zip" + ], + "required": [ + "video", + "prompt", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the video" + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Duration", + "description": "The duration of the video" + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Fps", + "description": "The FPS of the video" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the video" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Num Frames", + "description": "The number of frames in the video" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-22-vace-fun-a14b/depth/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-22-vace-fun-a14b/depth/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-22-vace-fun-a14b/depth": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Wan22VaceFunA14bDepthInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-22-vace-fun-a14b/depth/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Wan22VaceFunA14bDepthOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-22-vace-fun-a14b/pose", + "metadata": { + "display_name": "Wan 2.2 VACE Fun A14B", + "category": "video-to-video", + "description": "VACE Fun for Wan 2.2 A14B from Alibaba-PAI", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:54.378Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/lion/FjcKhg-ktT1dCP-F9RtJP_87d1ab4d3f3e4a8c82d53e6cc6d23616.jpg", + "model_url": "https://fal.run/fal-ai/wan-22-vace-fun-a14b/pose", + "license_type": "commercial", + "date": "2025-09-17T20:46:39.571Z", + "group": { + "key": "wan-22-vace-fun", + "label": "Pose" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-22-vace-fun-a14b/pose", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-22-vace-fun-a14b/pose queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-22-vace-fun-a14b/pose", + "category": "video-to-video", + "thumbnailUrl": "https://fal.media/files/lion/FjcKhg-ktT1dCP-F9RtJP_87d1ab4d3f3e4a8c82d53e6cc6d23616.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-22-vace-fun-a14b/pose", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-22-vace-fun-a14b/pose/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Wan22VaceFunA14bPoseInput": { + "title": "WanVACEPoseRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A sharply dressed man walks toward the camera down a sun-drenched hallway. Medium shot: He's framed from the knees up, his confident stride filling the frame. His navy blue business suit is impeccably tailored, the fabric subtly shimmering under the light streaming through the tall, arched windows lining the hallway. Close-up: The rich texture of the suit's wool is visible, each thread reflecting the light. His crisp white shirt contrasts beautifully with the deep crimson of his silk tie, the knot perfectly formed. The sunlight highlights the subtle sheen of his polished shoes. The windows cast long shadows, highlighting the architectural detail of the hallway, creating a sense of both elegance and movement. High resolution 4k." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation. For pose task, the prompt should describe the desired pose and action of the subject in the video." + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/wan-vace-pose-video.mp4" + ], + "title": "Video URL", + "type": "string", + "description": "URL to the source video file. Required for pose task." + }, + "num_interpolated_frames": { + "description": "Number of frames to interpolate between the original frames. A value of 0 means no interpolation.", + "type": "integer", + "minimum": 0, + "title": "Number of Interpolated Frames", + "examples": [ + 0 + ], + "maximum": 5, + "default": 0 + }, + "temporal_downsample_factor": { + "description": "Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.", + "type": "integer", + "minimum": 0, + "title": "Temporal Downsample Factor", + "examples": [ + 0 + ], + "maximum": 5, + "default": 0 + }, + "first_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "First Frame URL", + "description": "URL to the first frame of the video. If provided, the model will use this frame as a reference." + }, + "ref_image_urls": { + "title": "Reference Image URLs", + "type": "array", + "description": "URLs to source reference image. If provided, the model will use this image as reference.", + "items": { + "type": "string" + } + }, + "guidance_scale": { + "description": "Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.", + "type": "number", + "minimum": 1, + "title": "Guidance Scale", + "examples": [ + 5 + ], + "maximum": 10, + "default": 5 + }, + "num_frames": { + "minimum": 17, + "title": "Number of Frames", + "type": "integer", + "maximum": 241, + "description": "Number of frames to generate. Must be between 81 to 241 (inclusive).", + "default": 81 + }, + "auto_downsample_min_fps": { + "description": "The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences.", + "type": "number", + "minimum": 1, + "title": "Auto Downsample Min FPS", + "examples": [ + 15 + ], + "maximum": 60, + "default": 15 + }, + "transparency_mode": { + "enum": [ + "content_aware", + "white", + "black" + ], + "title": "Transparency Mode", + "type": "string", + "examples": [ + "content_aware" + ], + "description": "The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.", + "default": "content_aware" + }, + "sampler": { + "enum": [ + "unipc", + "dpm++", + "euler" + ], + "title": "Sampler", + "type": "string", + "examples": [ + "unipc" + ], + "description": "Sampler to use for video generation.", + "default": "unipc" + }, + "sync_mode": { + "examples": [ + false + ], + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "examples": [ + "high" + ], + "description": "The quality of the generated video.", + "default": "high" + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "interpolator_model": { + "enum": [ + "rife", + "film" + ], + "title": "Interpolator Model", + "type": "string", + "examples": [ + "film" + ], + "description": "The model to use for frame interpolation. Options are 'rife' or 'film'.", + "default": "film" + }, + "enable_auto_downsample": { + "examples": [ + false + ], + "title": "Enable Auto Downsample", + "type": "boolean", + "description": "If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.", + "default": false + }, + "preprocess": { + "examples": [ + false + ], + "title": "Preprocess", + "type": "boolean", + "description": "Whether to preprocess the input video.", + "default": false + }, + "shift": { + "minimum": 1, + "title": "Shift", + "type": "number", + "maximum": 15, + "description": "Shift parameter for video generation.", + "default": 5 + }, + "acceleration": { + "anyOf": [ + { + "enum": [ + "none", + "low", + "regular" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Acceleration", + "examples": [ + "regular" + ], + "description": "Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster.", + "default": "regular" + }, + "frames_per_second": { + "anyOf": [ + { + "minimum": 5, + "maximum": 30, + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Frames per Second", + "description": "Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true.", + "default": 16 + }, + "match_input_num_frames": { + "examples": [ + false + ], + "title": "Match Input Number of Frames", + "type": "boolean", + "description": "If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.", + "default": false + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "negative_prompt": { + "examples": [ + "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "examples": [ + "balanced" + ], + "description": "The write mode of the generated video.", + "default": "balanced" + }, + "resolution": { + "enum": [ + "auto", + "240p", + "360p", + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video.", + "default": "auto" + }, + "return_frames_zip": { + "examples": [ + false + ], + "title": "Return Frames Zip", + "type": "boolean", + "description": "If true, also return a ZIP file containing all generated frames.", + "default": false + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "1:1", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video.", + "default": "auto" + }, + "match_input_frames_per_second": { + "examples": [ + false + ], + "title": "Match Input Frames Per Second", + "type": "boolean", + "description": "If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.", + "default": false + }, + "num_inference_steps": { + "minimum": 2, + "title": "Number of Inference Steps", + "type": "integer", + "maximum": 50, + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 30 + }, + "last_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Last Frame URL", + "description": "URL to the last frame of the video. If provided, the model will use this frame as a reference." + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "match_input_num_frames", + "num_frames", + "match_input_frames_per_second", + "frames_per_second", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "guidance_scale", + "sampler", + "shift", + "video_url", + "ref_image_urls", + "first_frame_url", + "last_frame_url", + "enable_safety_checker", + "enable_prompt_expansion", + "preprocess", + "acceleration", + "video_quality", + "video_write_mode", + "num_interpolated_frames", + "temporal_downsample_factor", + "enable_auto_downsample", + "auto_downsample_min_fps", + "interpolator_model", + "sync_mode", + "transparency_mode", + "return_frames_zip" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "Wan22VaceFunA14bPoseOutput": { + "title": "WanVACEPoseResponse", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "frames_zip": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "ZIP archive of all video frames if requested." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/wan-vace-pose-output.mp4" + } + ], + "description": "The generated pose video file.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed", + "frames_zip" + ], + "required": [ + "video", + "prompt", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the video" + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Duration", + "description": "The duration of the video" + }, + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Fps", + "description": "The FPS of the video" + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the video" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Num Frames", + "description": "The number of frames in the video" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-22-vace-fun-a14b/pose/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-22-vace-fun-a14b/pose/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-22-vace-fun-a14b/pose": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Wan22VaceFunA14bPoseInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-22-vace-fun-a14b/pose/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Wan22VaceFunA14bPoseOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan-video-foley", + "metadata": { + "display_name": "Hunyuan Video Foley", + "category": "video-to-video", + "description": "Use the capabilities of the hunyuan foley model to bring life to your videos by adding sound effect to them.", + "status": "active", + "tags": [ + "video-to-video", + "add-sound" + ], + "updated_at": "2026-01-26T21:42:58.320Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/elephant/3ZFQ2t2qtiioCi2l04ALY_33a1818a6de949c0801ab825dfae6245.jpg", + "model_url": "https://fal.run/fal-ai/hunyuan-video-foley", + "license_type": "commercial", + "date": "2025-09-08T16:50:54.317Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan-video-foley", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan-video-foley queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan-video-foley", + "category": "video-to-video", + "thumbnailUrl": "https://fal.media/files/elephant/3ZFQ2t2qtiioCi2l04ALY_33a1818a6de949c0801ab825dfae6245.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan-video-foley", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan-video-foley/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "HunyuanVideoFoleyInput": { + "title": "HunyuanFoleyRequest", + "type": "object", + "properties": { + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/video_models/1_video.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "The URL of the video to generate audio for." + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "maximum": 10, + "description": "Guidance scale for audio generation.", + "default": 4.5 + }, + "num_inference_steps": { + "minimum": 10, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 100, + "description": "Number of inference steps for generation.", + "default": 50 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducible generation." + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to avoid certain audio characteristics.", + "default": "noisy, harsh" + }, + "text_prompt": { + "examples": [ + "A person walks on frozen ice", + "The crackling of fire and whooshing of flames", + "Gentle footsteps on wooden floor" + ], + "title": "Text Prompt", + "type": "string", + "description": "Text description of the desired audio (optional)." + } + }, + "x-fal-order-properties": [ + "video_url", + "text_prompt", + "negative_prompt", + "guidance_scale", + "num_inference_steps", + "seed" + ], + "required": [ + "video_url", + "text_prompt" + ] + }, + "HunyuanVideoFoleyOutput": { + "title": "HunyuanFoleyResponse", + "type": "object", + "properties": { + "video": { + "title": "Video", + "description": "List of generated video files with audio.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan-video-foley/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-foley/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-foley": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanVideoFoleyInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-foley/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanVideoFoleyOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sync-lipsync/v2/pro", + "metadata": { + "display_name": "Sync Lipsync", + "category": "video-to-video", + "description": "Generate high-quality realistic lipsync animations from audio while preserving unique details like natural teeth and unique facial features using the state-of-the-art Sync Lipsync 2 Pro model.", + "status": "active", + "tags": [ + "animation", + "lip sync", + "high-quality", + "" + ], + "updated_at": "2026-01-26T21:42:58.817Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/zebra/LWtPGHOS-rwf3YakNhv1k_cbaee3dc5fa14f7a9b6fc63f048de8d4.jpg", + "model_url": "https://fal.run/fal-ai/sync-lipsync/v2/pro", + "license_type": "commercial", + "date": "2025-09-02T16:39:37.207Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sync-lipsync/v2/pro", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sync-lipsync/v2/pro queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sync-lipsync/v2/pro", + "category": "video-to-video", + "thumbnailUrl": "https://fal.media/files/zebra/LWtPGHOS-rwf3YakNhv1k_cbaee3dc5fa14f7a9b6fc63f048de8d4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sync-lipsync/v2/pro", + "documentationUrl": "https://fal.ai/models/fal-ai/sync-lipsync/v2/pro/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SyncLipsyncV2ProInput": { + "title": "LipSyncV2ProInput", + "type": "object", + "properties": { + "sync_mode": { + "enum": [ + "cut_off", + "loop", + "bounce", + "silence", + "remap" + ], + "title": "Sync Mode", + "type": "string", + "description": "Lipsync mode when audio and video durations are out of sync.", + "default": "cut_off" + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/sync_v2_pro_video_input.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the input video" + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/sync_v2_pro_audio_input.mp3" + ], + "title": "Audio Url", + "type": "string", + "description": "URL of the input audio" + } + }, + "x-fal-order-properties": [ + "video_url", + "audio_url", + "sync_mode" + ], + "required": [ + "video_url", + "audio_url" + ] + }, + "SyncLipsyncV2ProOutput": { + "title": "LipSyncV2ProOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/sync_v2_pro_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sync-lipsync/v2/pro/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sync-lipsync/v2/pro/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sync-lipsync/v2/pro": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SyncLipsyncV2ProInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sync-lipsync/v2/pro/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SyncLipsyncV2ProOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-fun-control", + "metadata": { + "display_name": "Wan 2.2 Fun Control", + "category": "video-to-video", + "description": "Generate pose or depth controlled video using Alibaba-PAI's Wan 2.2 Fun", + "status": "active", + "tags": [ + "wan", + "pose", + "depth" + ], + "updated_at": "2026-01-26T21:42:59.692Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/lion/fucmWwAIt1kX7fdfTHkxo_a344226d24a74d4e805d4ad606fdc00d.jpg", + "model_url": "https://fal.run/fal-ai/wan-fun-control", + "license_type": "commercial", + "date": "2025-08-28T15:51:00.277Z", + "highlighted": false, + "kind": "inference", + "duration_estimate": 10, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-fun-control", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-fun-control queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-fun-control", + "category": "video-to-video", + "thumbnailUrl": "https://fal.media/files/lion/fucmWwAIt1kX7fdfTHkxo_a344226d24a74d4e805d4ad606fdc00d.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-fun-control", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-fun-control/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanFunControlInput": { + "title": "WanFunControlRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman wearing a lavender floral dress spins around in a circle." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video." + }, + "shift": { + "minimum": 1, + "title": "Shift", + "type": "number", + "maximum": 10, + "description": "The shift for the scheduler.", + "default": 5 + }, + "preprocess_video": { + "title": "Preprocess Video", + "type": "boolean", + "description": "Whether to preprocess the video. If True, the video will be preprocessed to depth or pose.", + "default": false + }, + "reference_image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/wan-fun-input-reference-image.webp" + ], + "title": "Reference Image URL", + "type": "string", + "description": "The URL of the reference image to use as a reference for the video generation." + }, + "fps": { + "minimum": 4, + "title": "FPS", + "type": "integer", + "maximum": 60, + "description": "The fps to generate. Only used when match_input_fps is False.", + "default": 16 + }, + "match_input_num_frames": { + "title": "Match Input Number of Frames", + "type": "boolean", + "description": "Whether to match the number of frames in the input video.", + "default": true + }, + "guidance_scale": { + "minimum": 1, + "title": "Guidance Scale", + "type": "number", + "maximum": 10, + "description": "The guidance scale.", + "default": 6 + }, + "preprocess_type": { + "enum": [ + "depth", + "pose" + ], + "title": "Preprocess Type", + "type": "string", + "description": "The type of preprocess to apply to the video. Only used when preprocess_video is True.", + "default": "depth" + }, + "control_video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/wan-fun-control-video-input.mp4" + ], + "title": "Control Video URL", + "type": "string", + "description": "The URL of the control video to use as a reference for the video generation." + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate the video.", + "default": "" + }, + "num_frames": { + "minimum": 49, + "title": "Number of Frames", + "type": "integer", + "maximum": 121, + "description": "The number of frames to generate. Only used when match_input_num_frames is False.", + "default": 81 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator." + }, + "num_inference_steps": { + "minimum": 4, + "title": "Number of Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps.", + "default": 27 + }, + "match_input_fps": { + "title": "Match Input FPS", + "type": "boolean", + "description": "Whether to match the fps in the input video.", + "default": true + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_inference_steps", + "guidance_scale", + "shift", + "seed", + "match_input_num_frames", + "num_frames", + "match_input_fps", + "fps", + "control_video_url", + "preprocess_video", + "preprocess_type", + "reference_image_url" + ], + "required": [ + "prompt", + "control_video_url" + ] + }, + "WanFunControlOutput": { + "title": "WanFunControlResponse", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/wan-fun-example-output.mp4" + } + ], + "title": "Video", + "description": "The video generated by the model.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-fun-control/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-fun-control/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-fun-control": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanFunControlInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-fun-control/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanFunControlOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "bria/video/increase-resolution", + "metadata": { + "display_name": "Video", + "category": "video-to-video", + "description": "Upscale videos up to 8K output resolution. Trained on fully licensed and commercially safe data.", + "status": "active", + "tags": [ + "video-upscaling", + "upscale" + ], + "updated_at": "2026-01-26T21:43:00.328Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/panda/omvoJT1pplSUhjUPCyluq_3379d19b520046aa88fbc60e473629e3.jpg", + "model_url": "https://fal.run/bria/video/increase-resolution", + "license_type": "commercial", + "date": "2025-08-26T07:20:19.877Z", + "group": { + "key": "bria", + "label": "Video Upscaling" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for bria/video/increase-resolution", + "version": "1.0.0", + "description": "The OpenAPI schema for the bria/video/increase-resolution queue.", + "x-fal-metadata": { + "endpointId": "bria/video/increase-resolution", + "category": "video-to-video", + "thumbnailUrl": "https://fal.media/files/panda/omvoJT1pplSUhjUPCyluq_3379d19b520046aa88fbc60e473629e3.jpg", + "playgroundUrl": "https://fal.ai/models/bria/video/increase-resolution", + "documentationUrl": "https://fal.ai/models/bria/video/increase-resolution/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "VideoIncreaseResolutionInput": { + "title": "InputIncreaseResolutionModel", + "type": "object", + "properties": { + "video_url": { + "examples": [ + "https://bria-datasets.s3.us-east-1.amazonaws.com/video_increase_res/3446608-sd_426_240_25fps.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "Input video to increase resolution. Size should be less than 14142x14142 and duration less than 30s." + }, + "output_container_and_codec": { + "enum": [ + "mp4_h265", + "mp4_h264", + "webm_vp9", + "mov_h265", + "mov_proresks", + "mkv_h265", + "mkv_h264", + "mkv_vp9", + "gif" + ], + "title": "Output Container And Codec", + "type": "string", + "description": "Output container and codec. Options: mp4_h265, mp4_h264, webm_vp9, mov_h265, mov_proresks, mkv_h265, mkv_h264, mkv_vp9, gif.", + "default": "webm_vp9" + }, + "desired_increase": { + "enum": [ + "2", + "4" + ], + "title": "Desired Increase", + "type": "string", + "description": "desired_increase factor. Options: 2x, 4x.", + "default": "2" + } + }, + "x-fal-order-properties": [ + "video_url", + "desired_increase", + "output_container_and_codec" + ], + "required": [ + "video_url" + ] + }, + "VideoIncreaseResolutionOutput": { + "title": "OutputIncreaseResolutionModel", + "type": "object", + "properties": { + "video": { + "anyOf": [ + { + "$ref": "#/components/schemas/Video" + }, + { + "$ref": "#/components/schemas/File" + } + ], + "title": "Video", + "description": "Video with removed background and audio." + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "Video": { + "title": "Video", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/bria/video/increase-resolution/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/video/increase-resolution/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/bria/video/increase-resolution": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoIncreaseResolutionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/bria/video/increase-resolution/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoIncreaseResolutionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/infinitalk", + "metadata": { + "display_name": "Infinitalk", + "category": "video-to-video", + "description": "Infinitalk model generates a talking avatar video from an image and audio file. The avatar lip-syncs to the provided audio with natural facial expressions.", + "status": "active", + "tags": [ + "stylized", + "transform" + ], + "updated_at": "2026-01-26T21:43:02.040Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/elephant/tgT82rlusP7Mfw1gs8CsE_0cd1e1a621dd484b8c84973e3a1292bf.jpg", + "model_url": "https://fal.run/fal-ai/infinitalk", + "license_type": "commercial", + "date": "2025-08-21T13:55:42.331Z", + "group": { + "key": "Infinitalk", + "label": "Audio" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/infinitalk", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/infinitalk queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/infinitalk", + "category": "video-to-video", + "thumbnailUrl": "https://fal.media/files/elephant/tgT82rlusP7Mfw1gs8CsE_0cd1e1a621dd484b8c84973e3a1292bf.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/infinitalk", + "documentationUrl": "https://fal.ai/models/fal-ai/infinitalk/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "InfinitalkInput": { + "title": "InfiniTalkSingleAudioRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A woman with colorful hair talking on a podcast." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the video to generate. Must be either 480p or 720p.", + "default": "480p" + }, + "acceleration": { + "enum": [ + "none", + "regular", + "high" + ], + "title": "Acceleration", + "type": "string", + "description": "The acceleration level to use for generation.", + "default": "regular" + }, + "image_url": { + "examples": [ + "https://v3.fal.media/files/koala/gmpc0QevDF9bBsL1EAYVF_1c637094161147559f0910a68275dc34.png" + ], + "title": "Image URL", + "type": "string", + "description": "URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped." + }, + "audio_url": { + "examples": [ + "https://v3.fal.media/files/penguin/PtiCYda53E9Dav25QmQYI_output.mp3" + ], + "title": "Audio URL", + "type": "string", + "description": "The URL of the audio file." + }, + "num_frames": { + "minimum": 41, + "maximum": 721, + "type": "integer", + "title": "Number of Frames", + "description": "Number of frames to generate. Must be between 41 to 721.", + "default": 145 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "default": 42 + } + }, + "x-fal-order-properties": [ + "image_url", + "audio_url", + "prompt", + "num_frames", + "resolution", + "seed", + "acceleration" + ], + "required": [ + "image_url", + "audio_url", + "prompt" + ] + }, + "InfinitalkOutput": { + "title": "AvatarSingleAudioResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "file_size": 515275, + "file_name": "74af6c0bdd6041c3b1130d54885e3eee.mp4", + "content_type": "application/octet-stream", + "url": "https://v3.fal.media/files/kangaroo/z6VqUwNTwzuWa6YE1g7In_74af6c0bdd6041c3b1130d54885e3eee.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/infinitalk/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/infinitalk/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/infinitalk": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InfinitalkInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/infinitalk/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InfinitalkOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "mirelo-ai/sfx-v1/video-to-video", + "metadata": { + "display_name": "Mirelo SFX", + "category": "video-to-video", + "description": "Generate synced sounds for any video, and return it with its new sound track (like MMAudio)\n", + "status": "active", + "tags": [ + "video-to-video", + "sfx" + ], + "updated_at": "2026-01-26T21:43:04.202Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/koala/ZgJTjdDKaW4OFVZqUmepA_4f542f98a3fc4edbb9043ff22131adde.jpg", + "model_url": "https://fal.run/mirelo-ai/sfx-v1/video-to-video", + "license_type": "commercial", + "date": "2025-08-14T15:28:42.130Z", + "group": { + "key": "mirelo-sfx-v1", + "label": "Video To Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for mirelo-ai/sfx-v1/video-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the mirelo-ai/sfx-v1/video-to-video queue.", + "x-fal-metadata": { + "endpointId": "mirelo-ai/sfx-v1/video-to-video", + "category": "video-to-video", + "thumbnailUrl": "https://fal.media/files/koala/ZgJTjdDKaW4OFVZqUmepA_4f542f98a3fc4edbb9043ff22131adde.jpg", + "playgroundUrl": "https://fal.ai/models/mirelo-ai/sfx-v1/video-to-video", + "documentationUrl": "https://fal.ai/models/mirelo-ai/sfx-v1/video-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SfxV1VideoToVideoInput": { + "x-fal-order-properties": [ + "video_url", + "text_prompt", + "num_samples", + "seed", + "duration" + ], + "type": "object", + "properties": { + "num_samples": { + "anyOf": [ + { + "minimum": 2, + "maximum": 8, + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Num Samples", + "description": "The number of samples to generate from the model", + "default": 2 + }, + "video_url": { + "format": "uri", + "description": "A video url that can accessed from the API to process and add sound effects", + "type": "string", + "examples": [ + "https://di3otfzjg1gxa.cloudfront.net/input_example.mp4" + ], + "maxLength": 2083, + "minLength": 1, + "title": "Video Url" + }, + "duration": { + "anyOf": [ + { + "minimum": 1, + "maximum": 10, + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Duration", + "description": "The duration of the generated audio in seconds", + "default": 10 + }, + "seed": { + "anyOf": [ + { + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The seed to use for the generation. If not provided, a random seed will be used", + "title": "Seed", + "default": 2105 + }, + "text_prompt": { + "examples": [ + "" + ], + "title": "Text Prompt", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Additional description to guide the model" + } + }, + "title": "Input", + "required": [ + "video_url" + ] + }, + "SfxV1VideoToVideoOutput": { + "title": "VideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + [ + { + "file_name": "generated_output_1.mp4", + "content_type": "video/mp4", + "url": "https://di3otfzjg1gxa.cloudfront.net/generated_output_1.mp4" + }, + { + "file_name": "generated_output_2.mp4", + "content_type": "video/mp4", + "url": "https://di3otfzjg1gxa.cloudfront.net/generated_output_2.mp4" + } + ] + ], + "title": "Video", + "type": "array", + "description": "The processed video with sound effects", + "items": { + "$ref": "#/components/schemas/Video" + } + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "Video": { + "title": "Video", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "title": "Content Type", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/mirelo-ai/sfx-v1/video-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/mirelo-ai/sfx-v1/video-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/mirelo-ai/sfx-v1/video-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SfxV1VideoToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/mirelo-ai/sfx-v1/video-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SfxV1VideoToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "moonvalley/marey/pose-transfer", + "metadata": { + "display_name": "Marey Realism V1.5", + "category": "video-to-video", + "description": "Ideal for matching human movement. Your input video determines human poses, gestures, and body movements that will appear in the generated video.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:04.518Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/rabbit/wFiWRdymluA_qihYl76wE_13a2425a72a8455190e54c46b99546e4.jpg", + "model_url": "https://fal.run/moonvalley/marey/pose-transfer", + "license_type": "commercial", + "date": "2025-08-14T01:35:00.575Z", + "group": { + "key": "marey", + "label": "Pose Transfer" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for moonvalley/marey/pose-transfer", + "version": "1.0.0", + "description": "The OpenAPI schema for the moonvalley/marey/pose-transfer queue.", + "x-fal-metadata": { + "endpointId": "moonvalley/marey/pose-transfer", + "category": "video-to-video", + "thumbnailUrl": "https://fal.media/files/rabbit/wFiWRdymluA_qihYl76wE_13a2425a72a8455190e54c46b99546e4.jpg", + "playgroundUrl": "https://fal.ai/models/moonvalley/marey/pose-transfer", + "documentationUrl": "https://fal.ai/models/moonvalley/marey/pose-transfer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MareyPoseTransferInput": { + "title": "MareyInputPoseTransfer", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Detailed Description: A venerable tribal chief, his weathered face marked with dark, ritualistic paint, stands proudly against a jungle backdrop. His elaborate headdress, a magnificent creation of numerous feathers, beads, and a central polished stone, sways gently with his movements. His initial stern expression softens into a confident smile as he begins to speak, his lips moving with unspoken words of wisdom or command. In a single, fluid motion, he raises his hand and gives a decisive wave, a gesture of both greeting and authority that underscores his leadership role within the tribe.\n\nBackground: A dense tropical rainforest is blurred into a soft, verdant backdrop, with muted greens and browns suggesting a lush, humid environment.\n\nMiddleground: The chief is the central focus, his head crowned by the large, intricate feathered headdress that moves subtly as he speaks. His shoulders and torso are visible, adorned with traditional necklaces.\n\nForeground: His hand lifts into the frame, palm open, executing a single, confident wave toward the viewer before lowering again. The closest feathers of his headdress rustle with the motion." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate a video from" + }, + "video_url": { + "examples": [ + "https://d1kaxrqq3vfrw5.cloudfront.net/fal-launch-assets/guide-assets/fal-pose-transfer-input.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "The URL of the video to use as the control video." + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Seed for random number generation. Use -1 for random seed each run.", + "default": -1 + }, + "reference_image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Reference Image Url", + "description": "Optional reference image URL to use for pose control or as a starting frame" + }, + "negative_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Negative Prompt", + "description": "Negative prompt used to guide the model away from undesirable features.", + "default": " low-poly, flat shader, bad rigging, stiff animation, uncanny eyes, low-quality textures, looping glitch, cheap effect, overbloom, bloom spam, default lighting, game asset, stiff face, ugly specular, AI artifacts" + }, + "first_frame_image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "First Frame Image Url", + "description": "Optional first frame image URL to use as the first frame of the generated video" + } + }, + "x-fal-order-properties": [ + "prompt", + "video_url", + "first_frame_image_url", + "reference_image_url", + "negative_prompt", + "seed" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "MareyPoseTransferOutput": { + "title": "MareyOutput", + "type": "object", + "properties": { + "video": { + "description": "The generated video.", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/moonvalley/marey/pose-transfer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/moonvalley/marey/pose-transfer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/moonvalley/marey/pose-transfer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MareyPoseTransferInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/moonvalley/marey/pose-transfer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MareyPoseTransferOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "moonvalley/marey/motion-transfer", + "metadata": { + "display_name": "Marey Realism V1.5", + "category": "video-to-video", + "description": "Pull motion from a reference video and apply it to new subjects or scenes.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:04.643Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/koala/109Ropx2Ejswcp0dNuahi_5a78312f31ce4aa4b33f7be5953c2d95.jpg", + "model_url": "https://fal.run/moonvalley/marey/motion-transfer", + "license_type": "commercial", + "date": "2025-08-14T01:30:37.686Z", + "group": { + "key": "marey", + "label": "Motion Transfer" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for moonvalley/marey/motion-transfer", + "version": "1.0.0", + "description": "The OpenAPI schema for the moonvalley/marey/motion-transfer queue.", + "x-fal-metadata": { + "endpointId": "moonvalley/marey/motion-transfer", + "category": "video-to-video", + "thumbnailUrl": "https://fal.media/files/koala/109Ropx2Ejswcp0dNuahi_5a78312f31ce4aa4b33f7be5953c2d95.jpg", + "playgroundUrl": "https://fal.ai/models/moonvalley/marey/motion-transfer", + "documentationUrl": "https://fal.ai/models/moonvalley/marey/motion-transfer/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MareyMotionTransferInput": { + "title": "MareyInputMotionTransfer", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Detailed Description: A fast, smooth dolly shot glides forward at water level through a monumental, minimalist colonnade. The imposing, symmetrical rows of brutalist marble columns rush past on either side, their strong vertical lines creating a sense of powerful, constant motion. The dark, glassy water of a central pool perfectly reflects the towering structures, with gentle ripples disturbing the mirror image as the camera advances. The scene is cinematic and moody, with the light-colored stone contrasting against the dark water and the pale sky visible at the far end of the architectural tunnel. shot on 35mm, film, organic, analog, motion blur\n\nBackground: A pale, overcast sky and a distant treeline are framed by the opening at the end of the colonnade, growing larger as the camera moves forward.\n\nMiddleground: The two rows of massive, geometric columns recede into the distance, their uniform shapes creating a hypnotic, rhythmic pattern that rushes past the lens.\n\nForeground: The camera skims just above the surface of the dark, rippling water, which reflects the blurred motion of the columns passing on the left and right." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate a video from" + }, + "video_url": { + "examples": [ + "https://d1kaxrqq3vfrw5.cloudfront.net/fal-launch-assets/guide-assets/fal-motion-transfer-input-5s.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "The URL of the video to use as the control video." + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Seed for random number generation. Use -1 for random seed each run.", + "default": -1 + }, + "reference_image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Reference Image Url", + "description": "Optional reference image URL to use for pose control or as a starting frame" + }, + "negative_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Negative Prompt", + "description": "Negative prompt used to guide the model away from undesirable features.", + "default": " low-poly, flat shader, bad rigging, stiff animation, uncanny eyes, low-quality textures, looping glitch, cheap effect, overbloom, bloom spam, default lighting, game asset, stiff face, ugly specular, AI artifacts" + }, + "first_frame_image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "First Frame Image Url", + "description": "Optional first frame image URL to use as the first frame of the generated video", + "default": "https://video-editor-files-prod.s3.us-east-2.amazonaws.com/users/1e4d46df-0702-4491-95ce-763592f33f34/uploaded-images/9b9dce1c-abd0-46c0-bac9-9454f8893b06/original" + } + }, + "x-fal-order-properties": [ + "prompt", + "video_url", + "first_frame_image_url", + "reference_image_url", + "negative_prompt", + "seed" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "MareyMotionTransferOutput": { + "title": "MareyOutput", + "type": "object", + "properties": { + "video": { + "description": "The generated video.", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/moonvalley/marey/motion-transfer/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/moonvalley/marey/motion-transfer/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/moonvalley/marey/motion-transfer": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MareyMotionTransferInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/moonvalley/marey/motion-transfer/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MareyMotionTransferOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ffmpeg-api/merge-videos", + "metadata": { + "display_name": "Ffmpeg Api", + "category": "video-to-video", + "description": "Use ffmpeg capabilities to merge 2 or more videos.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:05.388Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/zebra/FIx0cgDim0YGWogS3xc0x_c4b30ad7fb8a4747814f7d80c99adaaf.jpg", + "model_url": "https://fal.run/fal-ai/ffmpeg-api/merge-videos", + "license_type": "commercial", + "date": "2025-08-12T14:10:10.402Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ffmpeg-api/merge-videos", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ffmpeg-api/merge-videos queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ffmpeg-api/merge-videos", + "category": "video-to-video", + "thumbnailUrl": "https://fal.media/files/zebra/FIx0cgDim0YGWogS3xc0x_c4b30ad7fb8a4747814f7d80c99adaaf.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ffmpeg-api/merge-videos", + "documentationUrl": "https://fal.ai/models/fal-ai/ffmpeg-api/merge-videos/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FfmpegApiMergeVideosInput": { + "title": "MergeVideosInput", + "type": "object", + "properties": { + "target_fps": { + "anyOf": [ + { + "minimum": 1, + "maximum": 60, + "type": "number" + }, + { + "type": "null" + } + ], + "description": "Target FPS for the output video. If not provided, uses the lowest FPS from input videos.", + "title": "Target Fps" + }, + "video_urls": { + "description": "List of video URLs to merge in order", + "type": "array", + "minItems": 2, + "title": "Video Urls", + "items": { + "type": "string" + } + }, + "resolution": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Resolution of the final video. Width and height must be between 512 and 2048.", + "title": "Resolution" + } + }, + "x-fal-order-properties": [ + "video_urls", + "target_fps", + "resolution" + ], + "required": [ + "video_urls" + ] + }, + "FfmpegApiMergeVideosOutput": { + "title": "MergeVideosOutput", + "type": "object", + "properties": { + "metadata": { + "description": "Metadata about the merged video including original video info", + "type": "object", + "title": "Metadata" + }, + "video": { + "description": "Merged video file", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "video", + "metadata" + ], + "required": [ + "video", + "metadata" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "description": "The height of the generated image.", + "title": "Height", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "description": "The width of the generated image.", + "title": "Width", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ffmpeg-api/merge-videos/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/merge-videos/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/merge-videos": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FfmpegApiMergeVideosInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/merge-videos/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FfmpegApiMergeVideosOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan/v2.2-a14b/video-to-video", + "metadata": { + "display_name": "Wan", + "category": "video-to-video", + "description": "Wan-2.2 video-to-video is a video model that generates high-quality videos with high visual quality and motion diversity from text prompts and source videos.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:43:08.296Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/kangaroo/fRLY4F3IQ1P3DwmVR8WUO_Mmzbg7vdKVmIAsxNycTbw_4a43f046e5be4a97b45a98320ff47bf3.jpg", + "model_url": "https://fal.run/fal-ai/wan/v2.2-a14b/video-to-video", + "license_type": "commercial", + "date": "2025-08-02T02:30:21.079Z", + "group": { + "key": "wan-v22-large", + "label": "Video to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan/v2.2-a14b/video-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan/v2.2-a14b/video-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan/v2.2-a14b/video-to-video", + "category": "video-to-video", + "thumbnailUrl": "https://v3.fal.media/files/kangaroo/fRLY4F3IQ1P3DwmVR8WUO_Mmzbg7vdKVmIAsxNycTbw_4a43f046e5be4a97b45a98320ff47bf3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan/v2.2-a14b/video-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/wan/v2.2-a14b/video-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanV22A14bVideoToVideoInput": { + "title": "WanV2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A low-angle medium shot captures a domestic white cat with brown and black patches and a blue bandana sitting on a light-colored tiled floor indoors, meticulously grooming itself by licking its paw and then rubbing its face with it, against a soft-focused background of wooden kitchen cabinets and a reflective metallic appliance." + ], + "description": "The text prompt to guide video generation.", + "type": "string", + "title": "Prompt" + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/wan-2.2-v2v-input.mp4" + ], + "description": "URL of the input video.", + "type": "string", + "title": "Video URL" + }, + "acceleration": { + "enum": [ + "none", + "regular" + ], + "description": "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + "type": "string", + "title": "Acceleration", + "examples": [ + "regular" + ], + "default": "regular" + }, + "num_interpolated_frames": { + "description": "Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4.", + "type": "integer", + "minimum": 0, + "maximum": 4, + "title": "Number of Interpolated Frames", + "examples": [ + 1 + ], + "default": 1 + }, + "shift": { + "description": "Shift value for the video. Must be between 1.0 and 10.0.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Shift", + "examples": [ + 5 + ], + "default": 5 + }, + "resample_fps": { + "examples": [ + false + ], + "description": "If true, the video will be resampled to the passed frames per second. If false, the video will not be resampled.", + "type": "boolean", + "title": "Resample Video Frame Rate", + "default": false + }, + "frames_per_second": { + "description": "Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is.", + "type": "integer", + "minimum": 4, + "maximum": 60, + "title": "Frames per Second", + "examples": [ + 16 + ], + "default": 16 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "description": "If set to true, input data will be checked for safety before processing.", + "type": "boolean", + "title": "Enable Safety Checker", + "default": false + }, + "num_frames": { + "description": "Number of frames to generate. Must be between 17 to 161 (inclusive).", + "type": "integer", + "minimum": 17, + "maximum": 161, + "title": "Number of Frames", + "examples": [ + 81 + ], + "default": 81 + }, + "guidance_scale": { + "description": "Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale (1st Stage)", + "examples": [ + 3.5 + ], + "default": 3.5 + }, + "negative_prompt": { + "description": "Negative prompt for video generation.", + "type": "string", + "title": "Negative Prompt", + "default": "" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "description": "The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.", + "type": "string", + "title": "Video Write Mode", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "description": "Resolution of the generated video (480p, 580p, or 720p).", + "type": "string", + "title": "Resolution", + "examples": [ + "720p" + ], + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16", + "1:1" + ], + "description": "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input video.", + "type": "string", + "title": "Aspect Ratio", + "default": "auto" + }, + "enable_output_safety_checker": { + "examples": [ + false + ], + "description": "If set to true, output video will be checked for safety after generation.", + "type": "boolean", + "title": "Enable Output Safety Checker", + "default": false + }, + "guidance_scale_2": { + "description": "Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model.", + "type": "number", + "minimum": 1, + "maximum": 10, + "title": "Guidance Scale (2nd Stage)", + "examples": [ + 4 + ], + "default": 4 + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "description": "The quality of the output video. Higher quality means better visual quality but larger file size.", + "type": "string", + "title": "Video Quality", + "examples": [ + "high" + ], + "default": "high" + }, + "strength": { + "description": "Strength of the video transformation. A value of 1.0 means the output will be completely based on the prompt, while a value of 0.0 means the output will be identical to the input video.", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Strength", + "examples": [ + 0.9 + ], + "default": 0.9 + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "description": "Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.", + "type": "boolean", + "title": "Enable Prompt Expansion", + "default": false + }, + "num_inference_steps": { + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "type": "integer", + "minimum": 2, + "maximum": 40, + "title": "Number of Inference Steps", + "examples": [ + 27 + ], + "default": 27 + }, + "interpolator_model": { + "enum": [ + "none", + "film", + "rife" + ], + "description": "The model to use for frame interpolation. If None, no interpolation is applied.", + "type": "string", + "title": "Interpolator Model", + "examples": [ + "film" + ], + "default": "film" + }, + "seed": { + "description": "Random seed for reproducibility. If None, a random seed is chosen.", + "type": "integer", + "title": "Seed" + }, + "adjust_fps_for_interpolation": { + "examples": [ + true + ], + "description": "If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is.", + "type": "boolean", + "title": "Adjust FPS for Interpolation", + "default": true + } + }, + "x-fal-order-properties": [ + "video_url", + "prompt", + "strength", + "num_frames", + "frames_per_second", + "negative_prompt", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "enable_safety_checker", + "enable_output_safety_checker", + "enable_prompt_expansion", + "acceleration", + "guidance_scale", + "guidance_scale_2", + "shift", + "interpolator_model", + "num_interpolated_frames", + "adjust_fps_for_interpolation", + "video_quality", + "video_write_mode", + "resample_fps" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "WanV22A14bVideoToVideoOutput": { + "title": "WanV2VResponse", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A low-angle medium shot captures a domestic white cat with brown and black patches and a blue bandana sitting on a light-colored tiled floor indoors, meticulously grooming itself by licking its paw and then rubbing its face with it, against a soft-focused background of wooden kitchen cabinets and a reflective metallic appliance." + ], + "description": "The text prompt used for video generation.", + "type": "string", + "title": "Prompt", + "default": "" + }, + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/wan-2.2-v2v-output.mp4" + } + ], + "description": "The generated video file.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan/v2.2-a14b/video-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/video-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/video-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV22A14bVideoToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan/v2.2-a14b/video-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanV22A14bVideoToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltxv-13b-098-distilled/extend", + "metadata": { + "display_name": "LTX-Video 13B 0.9.8 Distilled", + "category": "video-to-video", + "description": "Extend videos using LTX Video-0.9.8 13B Distilled and custom LoRA", + "status": "active", + "tags": [ + "ltx-video", + "extend" + ], + "updated_at": "2026-01-26T21:43:13.174Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-4.jpg", + "model_url": "https://fal.run/fal-ai/ltxv-13b-098-distilled/extend", + "license_type": "commercial", + "date": "2025-07-23T16:11:07.081Z", + "group": { + "key": "ltx-video-13b-098", + "label": "Extend Video" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltxv-13b-098-distilled/extend", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltxv-13b-098-distilled/extend queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltxv-13b-098-distilled/extend", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltxv-13b-098-distilled/extend", + "documentationUrl": "https://fal.ai/models/fal-ai/ltxv-13b-098-distilled/extend/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltxv13b098DistilledExtendInput": { + "description": "Distilled model input", + "type": "object", + "properties": { + "second_pass_skip_initial_steps": { + "description": "The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.", + "type": "integer", + "minimum": 1, + "maximum": 11, + "title": "Second Pass Skip Initial Steps", + "examples": [ + 5 + ], + "default": 5 + }, + "first_pass_num_inference_steps": { + "description": "Number of inference steps during the first pass.", + "type": "integer", + "minimum": 2, + "maximum": 12, + "title": "Number of Inference Steps", + "examples": [ + 8 + ], + "default": 8 + }, + "frame_rate": { + "description": "The frame rate of the video.", + "type": "integer", + "minimum": 1, + "maximum": 60, + "title": "Frame Rate", + "examples": [ + 24 + ], + "default": 24 + }, + "reverse_video": { + "examples": [ + false + ], + "title": "Reverse Video", + "type": "boolean", + "description": "Whether to reverse the video.", + "default": false + }, + "prompt": { + "examples": [ + "Woman walking on a street in Tokyo" + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt to guide generation" + }, + "expand_prompt": { + "examples": [ + false + ], + "title": "Expand Prompt", + "type": "boolean", + "description": "Whether to expand the prompt using a language model.", + "default": false + }, + "temporal_adain_factor": { + "description": "The factor for adaptive instance normalization (AdaIN) applied to generated video chunks after the first. This can help deal with a gradual increase in saturation/contrast in the generated video by normalizing the color distribution across the video. A high value will ensure the color distribution is more consistent across the video, while a low value will allow for more variation in color distribution.", + "type": "number", + "examples": [ + 0.5 + ], + "maximum": 1, + "title": "Temporal AdaIN Factor", + "minimum": 0, + "multipleOf": 0.05, + "default": 0.5 + }, + "loras": { + "description": "LoRA weights to use for generation", + "type": "array", + "title": "Loras", + "items": { + "$ref": "#/components/schemas/LoRAWeight" + }, + "default": [] + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "num_frames": { + "description": "The number of frames in the video.", + "type": "integer", + "minimum": 9, + "maximum": 1441, + "title": "Number of Frames", + "examples": [ + 121 + ], + "default": 121 + }, + "second_pass_num_inference_steps": { + "description": "Number of inference steps during the second pass.", + "type": "integer", + "minimum": 2, + "maximum": 12, + "title": "Second Pass Number of Inference Steps", + "examples": [ + 8 + ], + "default": 8 + }, + "negative_prompt": { + "description": "Negative prompt for generation", + "type": "string", + "title": "Negative Prompt", + "default": "worst quality, inconsistent motion, blurry, jittery, distorted" + }, + "video": { + "examples": [ + { + "video_url": "https://storage.googleapis.com/falserverless/web-examples/wan/t2v.mp4", + "start_frame_num": 0, + "reverse_video": false, + "limit_num_frames": false, + "resample_fps": false, + "strength": 1, + "target_fps": 24, + "max_num_frames": 1441, + "conditioning_type": "rgb", + "preprocess": false + } + ], + "title": "Video", + "description": "Video to be extended.", + "allOf": [ + { + "$ref": "#/components/schemas/ExtendVideoConditioningInput" + } + ] + }, + "enable_detail_pass": { + "examples": [ + false + ], + "title": "Enable Detail Pass", + "type": "boolean", + "description": "Whether to use a detail pass. If True, the model will perform a second pass to refine the video and enhance details. This incurs a 2.0x cost multiplier on the base price.", + "default": false + }, + "resolution": { + "examples": [ + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video.", + "enum": [ + "480p", + "720p" + ], + "default": "720p" + }, + "aspect_ratio": { + "examples": [ + "auto" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the video.", + "enum": [ + "9:16", + "1:1", + "16:9", + "auto" + ], + "default": "auto" + }, + "tone_map_compression_ratio": { + "description": "The compression ratio for tone mapping. This is used to compress the dynamic range of the video to improve visual quality. A value of 0.0 means no compression, while a value of 1.0 means maximum compression.", + "type": "number", + "examples": [ + 0 + ], + "maximum": 1, + "title": "Tone Map Compression Ratio", + "minimum": 0, + "multipleOf": 0.05, + "default": 0 + }, + "constant_rate_factor": { + "description": "The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality.", + "type": "integer", + "minimum": 0, + "maximum": 51, + "title": "Constant Rate Factor", + "examples": [ + 29 + ], + "default": 29 + }, + "seed": { + "description": "Random seed for generation", + "type": "integer", + "title": "Seed" + } + }, + "title": "DistilledExtendVideoInput", + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "loras", + "resolution", + "aspect_ratio", + "seed", + "num_frames", + "first_pass_num_inference_steps", + "second_pass_num_inference_steps", + "second_pass_skip_initial_steps", + "frame_rate", + "expand_prompt", + "reverse_video", + "enable_safety_checker", + "enable_detail_pass", + "temporal_adain_factor", + "tone_map_compression_ratio", + "constant_rate_factor", + "video" + ], + "required": [ + "prompt", + "video" + ] + }, + "Ltxv13b098DistilledExtendOutput": { + "title": "ExtendVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Woman walking on a street in Tokyo" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/ltx-v095_extend.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed" + ], + "required": [ + "video", + "prompt", + "seed" + ] + }, + "LoRAWeight": { + "title": "LoRAWeight", + "type": "object", + "properties": { + "path": { + "description": "URL or path to the LoRA weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "description": "Scale of the LoRA weight. This is a multiplier applied to the LoRA weight when loading it.", + "title": "Scale", + "default": 1 + }, + "weight_name": { + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights.", + "type": "string", + "title": "Weight Name" + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "required": [ + "path" + ] + }, + "ExtendVideoConditioningInput": { + "title": "ExtendVideoConditioningInput", + "type": "object", + "properties": { + "video_url": { + "description": "URL of video to use as conditioning", + "type": "string", + "title": "Video URL" + }, + "start_frame_num": { + "description": "Frame number of the video from which the conditioning starts. Must be a multiple of 8.", + "type": "integer", + "minimum": 0, + "maximum": 1440, + "title": "Start Frame Number", + "multipleOf": 8, + "default": 0 + }, + "reverse_video": { + "description": "Whether to reverse the video. This is useful for tasks where the video conditioning should be applied in reverse order.", + "type": "boolean", + "title": "Reverse Video", + "default": false + }, + "limit_num_frames": { + "description": "Whether to limit the number of frames used from the video. If True, the `max_num_frames` parameter will be used to limit the number of frames.", + "type": "boolean", + "title": "Limit Number of Frames", + "default": false + }, + "resample_fps": { + "description": "Whether to resample the video to a specific FPS. If True, the `target_fps` parameter will be used to resample the video.", + "type": "boolean", + "title": "Resample FPS", + "default": false + }, + "strength": { + "minimum": 0, + "maximum": 1, + "type": "number", + "description": "Strength of the conditioning. 0.0 means no conditioning, 1.0 means full conditioning.", + "title": "Strength", + "default": 1 + }, + "target_fps": { + "description": "Target FPS to resample the video to. Only relevant if `resample_fps` is True.", + "type": "integer", + "minimum": 1, + "maximum": 60, + "title": "Target FPS", + "examples": [ + 24 + ], + "default": 24 + }, + "max_num_frames": { + "description": "Maximum number of frames to use from the video. If None, all frames will be used.", + "type": "integer", + "minimum": 1, + "maximum": 1441, + "title": "Maximum Number of Frames", + "examples": [ + 1441 + ], + "default": 1441 + } + }, + "x-fal-order-properties": [ + "video_url", + "start_frame_num", + "strength", + "limit_num_frames", + "max_num_frames", + "resample_fps", + "target_fps", + "reverse_video" + ], + "required": [ + "video_url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltxv-13b-098-distilled/extend/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltxv-13b-098-distilled/extend/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltxv-13b-098-distilled/extend": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltxv13b098DistilledExtendInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltxv-13b-098-distilled/extend/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltxv13b098DistilledExtendOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/rife/video", + "metadata": { + "display_name": "RIFE", + "category": "video-to-video", + "description": "Interpolate videos with RIFE - Real-Time Intermediate Flow Estimation", + "status": "active", + "tags": [ + "interpolation" + ], + "updated_at": "2026-01-26T21:43:13.301Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "model_url": "https://fal.run/fal-ai/rife/video", + "license_type": "commercial", + "date": "2025-07-22T20:23:08.948Z", + "group": { + "key": "rife", + "label": "Video Interpolation" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/rife/video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/rife/video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/rife/video", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/rife/video", + "documentationUrl": "https://fal.ai/models/fal-ai/rife/video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "RifeVideoInput": { + "x-fal-order-properties": [ + "video_url", + "num_frames", + "use_scene_detection", + "use_calculated_fps", + "fps", + "loop" + ], + "type": "object", + "properties": { + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/interpolation-video-input.mp4" + ], + "description": "The URL of the video to use for interpolation.", + "type": "string", + "title": "Video URL" + }, + "use_scene_detection": { + "description": "If True, the input video will be split into scenes before interpolation. This removes smear frames between scenes, but can result in false positives if the scene detection is not accurate. If False, the entire video will be treated as a single scene.", + "type": "boolean", + "title": "Use Scene Detection", + "default": false + }, + "loop": { + "description": "If True, the final frame will be looped back to the first frame to create a seamless loop. If False, the final frame will not loop back.", + "type": "boolean", + "title": "Loop", + "default": false + }, + "num_frames": { + "description": "The number of frames to generate between the input video frames.", + "type": "integer", + "minimum": 1, + "maximum": 4, + "title": "Number of Frames", + "default": 1 + }, + "use_calculated_fps": { + "description": "If True, the function will use the calculated FPS of the input video multiplied by the number of frames to determine the output FPS. If False, the passed FPS will be used.", + "type": "boolean", + "title": "Use Calculated FPS", + "default": true + }, + "fps": { + "minimum": 1, + "maximum": 60, + "type": "integer", + "title": "Frames Per Second", + "description": "Frames per second for the output video. Only applicable if use_calculated_fps is False.", + "default": 8 + } + }, + "title": "RIFEVideoInput", + "required": [ + "video_url" + ] + }, + "RifeVideoOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/rife-video-output.mp4" + } + ], + "description": "The generated video file with interpolated frames.", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "RIFEVideoOutput", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "type": "integer", + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "type": "string", + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "type": "string", + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/rife/video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/rife/video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/rife/video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RifeVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/rife/video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RifeVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/film/video", + "metadata": { + "display_name": "FILM", + "category": "video-to-video", + "description": "Interpolate videos with FILM - Frame Interpolation for Large Motion", + "status": "active", + "tags": [ + "interpolation" + ], + "updated_at": "2026-01-26T21:43:13.676Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos.jpg", + "model_url": "https://fal.run/fal-ai/film/video", + "license_type": "commercial", + "date": "2025-07-22T20:08:28.407Z", + "group": { + "key": "film", + "label": "Video Interpolation" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/film/video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/film/video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/film/video", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/film/video", + "documentationUrl": "https://fal.ai/models/fal-ai/film/video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FilmVideoInput": { + "title": "FILMVideoInput", + "type": "object", + "properties": { + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the output video. Only applicable if output_type is 'video'.", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/interpolation-video-input.mp4" + ], + "title": "Video URL", + "type": "string", + "description": "The URL of the video to use for interpolation." + }, + "use_calculated_fps": { + "title": "Use Calculated FPS", + "type": "boolean", + "description": "If True, the function will use the calculated FPS of the input video multiplied by the number of frames to determine the output FPS. If False, the passed FPS will be used.", + "default": true + }, + "loop": { + "title": "Loop", + "type": "boolean", + "description": "If True, the final frame will be looped back to the first frame to create a seamless loop. If False, the final frame will not loop back.", + "default": false + }, + "fps": { + "minimum": 1, + "maximum": 60, + "type": "integer", + "title": "Frames Per Second", + "description": "Frames per second for the output video. Only applicable if use_calculated_fps is False.", + "default": 8 + }, + "sync_mode": { + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the output video. Only applicable if output_type is 'video'.", + "examples": [ + "high" + ], + "default": "high" + }, + "use_scene_detection": { + "title": "Use Scene Detection", + "type": "boolean", + "description": "If True, the input video will be split into scenes before interpolation. This removes smear frames between scenes, but can result in false positives if the scene detection is not accurate. If False, the entire video will be treated as a single scene.", + "default": false + }, + "num_frames": { + "description": "The number of frames to generate between the input video frames.", + "type": "integer", + "minimum": 1, + "maximum": 4, + "title": "Number of Frames", + "default": 1 + } + }, + "x-fal-order-properties": [ + "video_url", + "num_frames", + "use_scene_detection", + "use_calculated_fps", + "fps", + "loop", + "sync_mode", + "video_quality", + "video_write_mode" + ], + "required": [ + "video_url" + ] + }, + "FilmVideoOutput": { + "title": "FILMVideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/film-video-output.mp4" + } + ], + "title": "Video", + "description": "The generated video file with interpolated frames.", + "allOf": [ + { + "$ref": "#/components/schemas/VideoFile" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "VideoFile": { + "title": "VideoFile", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "duration": { + "title": "Duration", + "type": "number", + "description": "The duration of the video" + }, + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the video" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the video" + }, + "fps": { + "title": "Fps", + "type": "number", + "description": "The FPS of the video" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "num_frames": { + "title": "Num Frames", + "type": "integer", + "description": "The number of frames in the video" + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/film/video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/film/video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/film/video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FilmVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/film/video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FilmVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/luma-dream-machine/ray-2-flash/modify", + "metadata": { + "display_name": "Luma Ray 2 Flash Modify", + "category": "video-to-video", + "description": "Ray2 Flash Modify is a video generative model capable of restyling or retexturing the entire shot, from turning live-action into CG or stylized animation, to changing wardrobe, props, or the overall aesthetic and swap environments or time periods, giving you control over background, location, or even weather.", + "status": "active", + "tags": [ + "modify", + "restyle" + ], + "updated_at": "2026-01-26T21:43:14.956Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/koala/y1-LTiotjfV3d_9Hy1lMU_3db5709293314d53abe575e9327c0fe4.jpg", + "model_url": "https://fal.run/fal-ai/luma-dream-machine/ray-2-flash/modify", + "license_type": "commercial", + "date": "2025-07-17T14:03:02.298Z", + "group": { + "key": "luma-dream-machine", + "label": "Ray-2 Flash (Modify)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/luma-dream-machine/ray-2-flash/modify", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/luma-dream-machine/ray-2-flash/modify queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/luma-dream-machine/ray-2-flash/modify", + "category": "video-to-video", + "thumbnailUrl": "https://fal.media/files/koala/y1-LTiotjfV3d_9Hy1lMU_3db5709293314d53abe575e9327c0fe4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/luma-dream-machine/ray-2-flash/modify", + "documentationUrl": "https://fal.ai/models/fal-ai/luma-dream-machine/ray-2-flash/modify/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LumaDreamMachineRay2FlashModifyInput": { + "title": "ModifyVideoRequest", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "maxLength": 5000, + "minLength": 3, + "description": "Instruction for modifying the video" + }, + "video_url": { + "examples": [ + "https://v3.fal.media/files/zebra/9aDde3Te2kuJYHdR0Kz8R_output.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the input video to modify" + }, + "mode": { + "enum": [ + "adhere_1", + "adhere_2", + "adhere_3", + "flex_1", + "flex_2", + "flex_3", + "reimagine_1", + "reimagine_2", + "reimagine_3" + ], + "title": "Mode", + "type": "string", + "description": "Amount of modification to apply to the video, adhere_1 is the least amount of modification, reimagine_3 is the most", + "default": "flex_1" + }, + "image_url": { + "examples": [ + "https://fal.media/files/koala/Kv2821G03ggpKK2AiZX71_d5fa7bacf06049cfaeb9588f6003b6d5.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "Optional URL of the first frame image for modification" + } + }, + "x-fal-order-properties": [ + "video_url", + "image_url", + "prompt", + "mode" + ], + "required": [ + "video_url" + ] + }, + "LumaDreamMachineRay2FlashModifyOutput": { + "title": "ModifyOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/lion/_2UO2QC26T_R8vKeVGAdX_output.mp4" + } + ], + "title": "Video", + "description": "URL of the modified video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/luma-dream-machine/ray-2-flash/modify/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2-flash/modify/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2-flash/modify": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaDreamMachineRay2FlashModifyInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2-flash/modify/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaDreamMachineRay2FlashModifyOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltxv-13b-098-distilled/multiconditioning", + "metadata": { + "display_name": "LTX-Video 13B 0.9.8 Distilled", + "category": "video-to-video", + "description": "Generate long videos from prompts, images, and videos using LTX Video-0.9.8 13B Distilled and custom LoRA", + "status": "active", + "tags": [ + "video", + "ltx-video", + "video-to-video", + "multicondition-to-video", + "image-to-video" + ], + "updated_at": "2026-01-26T21:43:15.337Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training.jpg", + "model_url": "https://fal.run/fal-ai/ltxv-13b-098-distilled/multiconditioning", + "license_type": "commercial", + "date": "2025-07-17T02:59:51.470Z", + "group": { + "key": "ltx-video-13b-098", + "label": "Multicondition to Video" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltxv-13b-098-distilled/multiconditioning", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltxv-13b-098-distilled/multiconditioning queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltxv-13b-098-distilled/multiconditioning", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltxv-13b-098-distilled/multiconditioning", + "documentationUrl": "https://fal.ai/models/fal-ai/ltxv-13b-098-distilled/multiconditioning/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Ltxv13b098DistilledMulticonditioningInput": { + "description": "Distilled model input", + "type": "object", + "properties": { + "second_pass_skip_initial_steps": { + "description": "The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.", + "type": "integer", + "minimum": 1, + "maximum": 11, + "title": "Second Pass Skip Initial Steps", + "examples": [ + 5 + ], + "default": 5 + }, + "first_pass_num_inference_steps": { + "description": "Number of inference steps during the first pass.", + "type": "integer", + "minimum": 2, + "maximum": 12, + "title": "Number of Inference Steps", + "examples": [ + 8 + ], + "default": 8 + }, + "frame_rate": { + "description": "The frame rate of the video.", + "type": "integer", + "minimum": 1, + "maximum": 60, + "title": "Frame Rate", + "examples": [ + 24 + ], + "default": 24 + }, + "reverse_video": { + "examples": [ + false + ], + "title": "Reverse Video", + "type": "boolean", + "description": "Whether to reverse the video.", + "default": false + }, + "prompt": { + "examples": [ + "A vibrant, abstract composition featuring a person with outstretched arms, rendered in a kaleidoscope of colors against a deep, dark background. The figure is composed of intricate, swirling patterns reminiscent of a mosaic, with hues of orange, yellow, blue, and green that evoke the style of artists such as Wassily Kandinsky or Bridget Riley. The camera zooms into the face striking portrait of a man, reimagined through the lens of old-school video-game graphics. The subject's face is rendered in a kaleidoscope of colors, with bold blues and reds set against a vibrant yellow backdrop. His dark hair is pulled back, framing his profile in a dramatic pose." + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt to guide generation" + }, + "expand_prompt": { + "examples": [ + false + ], + "title": "Expand Prompt", + "type": "boolean", + "description": "Whether to expand the prompt using a language model.", + "default": false + }, + "temporal_adain_factor": { + "description": "The factor for adaptive instance normalization (AdaIN) applied to generated video chunks after the first. This can help deal with a gradual increase in saturation/contrast in the generated video by normalizing the color distribution across the video. A high value will ensure the color distribution is more consistent across the video, while a low value will allow for more variation in color distribution.", + "type": "number", + "examples": [ + 0.5 + ], + "maximum": 1, + "title": "Temporal AdaIN Factor", + "minimum": 0, + "multipleOf": 0.05, + "default": 0.5 + }, + "loras": { + "description": "LoRA weights to use for generation", + "type": "array", + "title": "Loras", + "items": { + "$ref": "#/components/schemas/LoRAWeight" + }, + "default": [] + }, + "images": { + "description": "URL of images to use as conditioning", + "type": "array", + "items": { + "$ref": "#/components/schemas/ImageConditioningInput" + }, + "examples": [ + [ + { + "strength": 1, + "start_frame_num": 0, + "image_url": "https://storage.googleapis.com/falserverless/model_tests/ltx/NswO1P8sCLzrh1WefqQFK_9a6bdbfa54b944c9a770338159a113fd.jpg" + }, + { + "strength": 1, + "start_frame_num": 120, + "image_url": "https://storage.googleapis.com/falserverless/model_tests/ltx/YAPOGvmS2tM_Krdp7q6-d_267c97e017c34f679844a4477dfcec38.jpg" + } + ] + ], + "title": "Images", + "default": [] + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "num_frames": { + "description": "The number of frames in the video.", + "type": "integer", + "minimum": 9, + "maximum": 1441, + "title": "Number of Frames", + "examples": [ + 121 + ], + "default": 121 + }, + "second_pass_num_inference_steps": { + "description": "Number of inference steps during the second pass.", + "type": "integer", + "minimum": 2, + "maximum": 12, + "title": "Second Pass Number of Inference Steps", + "examples": [ + 8 + ], + "default": 8 + }, + "negative_prompt": { + "description": "Negative prompt for generation", + "type": "string", + "title": "Negative Prompt", + "default": "worst quality, inconsistent motion, blurry, jittery, distorted" + }, + "enable_detail_pass": { + "examples": [ + false + ], + "title": "Enable Detail Pass", + "type": "boolean", + "description": "Whether to use a detail pass. If True, the model will perform a second pass to refine the video and enhance details. This incurs a 2.0x cost multiplier on the base price.", + "default": false + }, + "resolution": { + "examples": [ + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video.", + "enum": [ + "480p", + "720p" + ], + "default": "720p" + }, + "aspect_ratio": { + "examples": [ + "auto" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "The aspect ratio of the video.", + "enum": [ + "9:16", + "1:1", + "16:9", + "auto" + ], + "default": "auto" + }, + "tone_map_compression_ratio": { + "description": "The compression ratio for tone mapping. This is used to compress the dynamic range of the video to improve visual quality. A value of 0.0 means no compression, while a value of 1.0 means maximum compression.", + "type": "number", + "examples": [ + 0 + ], + "maximum": 1, + "title": "Tone Map Compression Ratio", + "minimum": 0, + "multipleOf": 0.05, + "default": 0 + }, + "videos": { + "description": "Videos to use as conditioning", + "type": "array", + "title": "Videos", + "items": { + "$ref": "#/components/schemas/VideoConditioningInput" + }, + "default": [] + }, + "constant_rate_factor": { + "description": "The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality.", + "type": "integer", + "minimum": 0, + "maximum": 51, + "title": "Constant Rate Factor", + "examples": [ + 29 + ], + "default": 29 + }, + "seed": { + "description": "Random seed for generation", + "type": "integer", + "title": "Seed" + } + }, + "title": "DistilledMultiConditioningVideoInput", + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "loras", + "resolution", + "aspect_ratio", + "seed", + "num_frames", + "first_pass_num_inference_steps", + "second_pass_num_inference_steps", + "second_pass_skip_initial_steps", + "frame_rate", + "expand_prompt", + "reverse_video", + "enable_safety_checker", + "enable_detail_pass", + "temporal_adain_factor", + "tone_map_compression_ratio", + "constant_rate_factor", + "images", + "videos" + ], + "required": [ + "prompt" + ] + }, + "Ltxv13b098DistilledMulticonditioningOutput": { + "title": "MultiConditioningVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A vibrant, abstract composition featuring a person with outstretched arms, rendered in a kaleidoscope of colors against a deep, dark background. The figure is composed of intricate, swirling patterns reminiscent of a mosaic, with hues of orange, yellow, blue, and green that evoke the style of artists such as Wassily Kandinsky or Bridget Riley. The camera zooms into the face striking portrait of a man, reimagined through the lens of old-school video-game graphics. The subject's face is rendered in a kaleidoscope of colors, with bold blues and reds set against a vibrant yellow backdrop. His dark hair is pulled back, framing his profile in a dramatic pose." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "seed": { + "description": "The seed used for generation.", + "type": "integer", + "title": "Seed" + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/ltxv-multiconditioning-output.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed" + ], + "required": [ + "video", + "prompt", + "seed" + ] + }, + "LoRAWeight": { + "title": "LoRAWeight", + "type": "object", + "properties": { + "path": { + "description": "URL or path to the LoRA weights.", + "type": "string", + "title": "Path" + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "description": "Scale of the LoRA weight. This is a multiplier applied to the LoRA weight when loading it.", + "title": "Scale", + "default": 1 + }, + "weight_name": { + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights.", + "type": "string", + "title": "Weight Name" + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "required": [ + "path" + ] + }, + "ImageConditioningInput": { + "title": "ImageConditioningInput", + "type": "object", + "properties": { + "strength": { + "minimum": 0, + "maximum": 1, + "type": "number", + "description": "Strength of the conditioning. 0.0 means no conditioning, 1.0 means full conditioning.", + "title": "Strength", + "default": 1 + }, + "start_frame_num": { + "description": "Frame number of the image from which the conditioning starts. Must be a multiple of 8.", + "type": "integer", + "minimum": 0, + "maximum": 1440, + "title": "Start Frame Number", + "multipleOf": 8, + "default": 0 + }, + "image_url": { + "description": "URL of image to use as conditioning", + "type": "string", + "title": "Image URL" + } + }, + "x-fal-order-properties": [ + "image_url", + "start_frame_num", + "strength" + ], + "required": [ + "image_url" + ] + }, + "VideoConditioningInput": { + "title": "VideoConditioningInput", + "type": "object", + "properties": { + "video_url": { + "description": "URL of video to use as conditioning", + "type": "string", + "title": "Video URL" + }, + "start_frame_num": { + "description": "Frame number of the video from which the conditioning starts. Must be a multiple of 8.", + "type": "integer", + "minimum": 0, + "maximum": 1440, + "title": "Start Frame Number", + "multipleOf": 8, + "default": 0 + }, + "reverse_video": { + "description": "Whether to reverse the video. This is useful for tasks where the video conditioning should be applied in reverse order.", + "type": "boolean", + "title": "Reverse Video", + "default": false + }, + "limit_num_frames": { + "description": "Whether to limit the number of frames used from the video. If True, the `max_num_frames` parameter will be used to limit the number of frames.", + "type": "boolean", + "title": "Limit Number of Frames", + "default": false + }, + "resample_fps": { + "description": "Whether to resample the video to a specific FPS. If True, the `target_fps` parameter will be used to resample the video.", + "type": "boolean", + "title": "Resample FPS", + "default": false + }, + "strength": { + "minimum": 0, + "maximum": 1, + "type": "number", + "description": "Strength of the conditioning. 0.0 means no conditioning, 1.0 means full conditioning.", + "title": "Strength", + "default": 1 + }, + "target_fps": { + "description": "Target FPS to resample the video to. Only relevant if `resample_fps` is True.", + "type": "integer", + "minimum": 1, + "maximum": 60, + "title": "Target FPS", + "examples": [ + 24 + ], + "default": 24 + }, + "max_num_frames": { + "description": "Maximum number of frames to use from the video. If None, all frames will be used.", + "type": "integer", + "minimum": 1, + "maximum": 1441, + "title": "Maximum Number of Frames", + "examples": [ + 1441 + ], + "default": 1441 + }, + "conditioning_type": { + "examples": [ + "rgb" + ], + "title": "Conditioning Type", + "type": "string", + "description": "Type of conditioning this video provides. This is relevant to ensure in-context LoRA weights are applied correctly, as well as selecting the correct preprocessing pipeline, when enabled.", + "enum": [ + "rgb", + "depth", + "pose", + "canny" + ], + "default": "rgb" + }, + "preprocess": { + "description": "Whether to preprocess the video. If True, the video will be preprocessed to match the conditioning type. This is a no-op for RGB conditioning.", + "type": "boolean", + "title": "Preprocess", + "default": false + } + }, + "x-fal-order-properties": [ + "video_url", + "conditioning_type", + "preprocess", + "start_frame_num", + "strength", + "limit_num_frames", + "max_num_frames", + "resample_fps", + "target_fps", + "reverse_video" + ], + "required": [ + "video_url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "file_data": { + "format": "binary", + "description": "File data", + "type": "string", + "title": "File Data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltxv-13b-098-distilled/multiconditioning/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltxv-13b-098-distilled/multiconditioning/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltxv-13b-098-distilled/multiconditioning": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltxv13b098DistilledMulticonditioningInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltxv-13b-098-distilled/multiconditioning/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Ltxv13b098DistilledMulticonditioningOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/sound-effects", + "metadata": { + "display_name": "Pixverse", + "category": "video-to-video", + "description": "Add immersive sound effects and background music to your videos using PixVerse sound effects generation", + "status": "active", + "tags": [ + "audio", + "utility" + ], + "updated_at": "2026-01-26T21:43:18.081Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "model_url": "https://fal.run/fal-ai/pixverse/sound-effects", + "license_type": "commercial", + "date": "2025-07-07T20:56:55.765Z", + "group": { + "key": "pixverse-45", + "label": "Sound Effects" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/sound-effects", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/sound-effects queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/sound-effects", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/sound-effects", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/sound-effects/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseSoundEffectsInput": { + "title": "SoundEffectRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "sea waves", + "thunder storm", + "birds chirping" + ], + "title": "Prompt", + "type": "string", + "description": "Description of the sound effect to generate. If empty, a random sound effect will be generated", + "default": "" + }, + "video_url": { + "examples": [ + "https://v3.fal.media/files/tiger/QfpJmEBkR75KpB6yfNLDM_video.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the input video to add sound effects to" + }, + "original_sound_switch": { + "title": "Original Sound Switch", + "type": "boolean", + "description": "Whether to keep the original audio from the video", + "default": false + } + }, + "x-fal-order-properties": [ + "video_url", + "original_sound_switch", + "prompt" + ], + "required": [ + "video_url" + ] + }, + "PixverseSoundEffectsOutput": { + "title": "SoundEffectOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 1534052, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://v3.fal.media/files/kangaroo/bBQr_DUeICo6_Ty_b_Y0I_output.mp4" + } + ], + "title": "Video", + "description": "The video with added sound effects", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/sound-effects/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/sound-effects/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/sound-effects": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseSoundEffectsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/sound-effects/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseSoundEffectsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/thinksound/audio", + "metadata": { + "display_name": "ThinkSound", + "category": "video-to-video", + "description": "Generate realistic audio from a video with an optional text prompt", + "status": "active", + "tags": [ + "audio-generation", + "video-to-audio" + ], + "updated_at": "2026-01-26T21:43:18.581Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "model_url": "https://fal.run/fal-ai/thinksound/audio", + "license_type": "commercial", + "date": "2025-07-02T22:58:54.729Z", + "group": { + "key": "thinksound", + "label": "Video-to-Audio" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/thinksound/audio", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/thinksound/audio queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/thinksound/audio", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/thinksound/audio", + "documentationUrl": "https://fal.ai/models/fal-ai/thinksound/audio/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ThinksoundAudioInput": { + "x-fal-order-properties": [ + "video_url", + "prompt", + "seed", + "num_inference_steps", + "cfg_scale" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "A prompt to guide the audio generation. If not provided, it will be extracted from the video.", + "default": "" + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/thinksound-input.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "The URL of the video to generate the audio for." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator" + }, + "num_inference_steps": { + "description": "The number of inference steps for audio generation.", + "type": "integer", + "minimum": 2, + "title": "Number of Inference Steps", + "maximum": 100, + "examples": [ + 24 + ], + "default": 24 + }, + "cfg_scale": { + "description": "The classifier-free guidance scale for audio generation.", + "type": "number", + "minimum": 1, + "title": "CFG Scale", + "maximum": 20, + "examples": [ + 5 + ], + "default": 5 + } + }, + "title": "Input", + "required": [ + "video_url" + ] + }, + "ThinksoundAudioOutput": { + "x-fal-order-properties": [ + "audio", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "An acoustic guitar being played indoors." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used to generate the audio." + }, + "audio": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/thinksound-audio.wav" + } + ], + "title": "Audio", + "description": "The generated audio file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "AudioOutput", + "required": [ + "audio", + "prompt" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/thinksound/audio/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/thinksound/audio/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/thinksound/audio": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ThinksoundAudioInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/thinksound/audio/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ThinksoundAudioOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/thinksound", + "metadata": { + "display_name": "ThinkSound", + "category": "video-to-video", + "description": "Generate realistic audio for a video with an optional text prompt and combine", + "status": "active", + "tags": [ + "audio-generation", + "video-to-audio" + ], + "updated_at": "2026-01-26T21:43:18.704Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "model_url": "https://fal.run/fal-ai/thinksound", + "license_type": "commercial", + "date": "2025-07-01T20:56:42.665Z", + "group": { + "key": "thinksound", + "label": "Video-to-Video" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/thinksound", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/thinksound queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/thinksound", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/thinksound", + "documentationUrl": "https://fal.ai/models/fal-ai/thinksound/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ThinksoundInput": { + "x-fal-order-properties": [ + "video_url", + "prompt", + "seed", + "num_inference_steps", + "cfg_scale" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "A prompt to guide the audio generation. If not provided, it will be extracted from the video.", + "default": "" + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/thinksound-input.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "The URL of the video to generate the audio for." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator" + }, + "num_inference_steps": { + "description": "The number of inference steps for audio generation.", + "type": "integer", + "minimum": 2, + "title": "Number of Inference Steps", + "maximum": 100, + "examples": [ + 24 + ], + "default": 24 + }, + "cfg_scale": { + "description": "The classifier-free guidance scale for audio generation.", + "type": "number", + "minimum": 1, + "title": "CFG Scale", + "maximum": 20, + "examples": [ + 5 + ], + "default": 5 + } + }, + "title": "Input", + "required": [ + "video_url" + ] + }, + "ThinksoundOutput": { + "x-fal-order-properties": [ + "video", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "An acoustic guitar being played indoors." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used to generate the audio." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/thinksound-output.mp4" + } + ], + "title": "Video", + "description": "The generated video with audio.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "Output", + "required": [ + "video", + "prompt" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/thinksound/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/thinksound/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/thinksound": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ThinksoundInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/thinksound/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ThinksoundOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/extend/fast", + "metadata": { + "display_name": "Pixverse", + "category": "video-to-video", + "description": "PixVerse Extend model is a video extending tool for your videos using with high-quality video extending techniques ", + "status": "active", + "tags": [ + "utility", + "editing" + ], + "updated_at": "2026-01-26T21:43:21.641Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "model_url": "https://fal.run/fal-ai/pixverse/extend/fast", + "license_type": "commercial", + "date": "2025-06-30T20:58:06.204Z", + "group": { + "key": "pixverse-45", + "label": "Extend Fast" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/extend/fast", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/extend/fast queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/extend/fast", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/extend/fast", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/extend/fast/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseExtendFastInput": { + "title": "FastExtendRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A kid is talking into camera" + ], + "title": "Prompt", + "type": "string", + "description": "Prompt describing how to extend the video" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video. Fast mode doesn't support 1080p", + "default": "720p" + }, + "video_url": { + "examples": [ + "https://v3.fal.media/files/rabbit/88-jI3VWXU4Q8kSNrWo3c_output.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the input video to extend" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the extended video" + }, + "model": { + "enum": [ + "v3.5", + "v4", + "v4.5", + "v5", + "v5.5", + "v5.6" + ], + "title": "Model", + "type": "string", + "description": "The model version to use for generation", + "default": "v4.5" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "video_url", + "prompt", + "negative_prompt", + "style", + "resolution", + "model", + "seed" + ], + "required": [ + "video_url", + "prompt" + ] + }, + "PixverseExtendFastOutput": { + "title": "ExtendOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 1163040, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://v3.fal.media/files/rabbit/88-jI3VWXU4Q8kSNrWo3c_output.mp4" + } + ], + "title": "Video", + "description": "The extended video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/extend/fast/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/extend/fast/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/extend/fast": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseExtendFastInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/extend/fast/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseExtendFastOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/extend", + "metadata": { + "display_name": "Pixverse", + "category": "video-to-video", + "description": "PixVerse Extend model is a video extending tool for your videos using with high-quality video extending techniques ", + "status": "active", + "tags": [ + "utility", + "editing" + ], + "updated_at": "2026-01-26T21:43:21.765Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "model_url": "https://fal.run/fal-ai/pixverse/extend", + "license_type": "commercial", + "date": "2025-06-30T20:57:59.382Z", + "group": { + "key": "pixverse-45", + "label": "Extend" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/extend", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/extend queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/extend", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/pixverse-v3.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/extend", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/extend/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseExtendInput": { + "title": "ExtendRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A kid is talking into camera" + ], + "title": "Prompt", + "type": "string", + "description": "Prompt describing how to extend the video" + }, + "resolution": { + "enum": [ + "360p", + "540p", + "720p", + "1080p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the generated video", + "default": "720p" + }, + "duration": { + "enum": [ + "5", + "8" + ], + "title": "Duration", + "type": "string", + "description": "The duration of the generated video in seconds. 1080p videos are limited to 5 seconds", + "default": "5" + }, + "style": { + "enum": [ + "anime", + "3d_animation", + "clay", + "comic", + "cyberpunk" + ], + "title": "Style", + "type": "string", + "description": "The style of the extended video" + }, + "video_url": { + "examples": [ + "https://v3.fal.media/files/rabbit/88-jI3VWXU4Q8kSNrWo3c_output.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the input video to extend" + }, + "model": { + "enum": [ + "v3.5", + "v4", + "v4.5", + "v5", + "v5.5", + "v5.6" + ], + "title": "Model", + "type": "string", + "description": "The model version to use for generation", + "default": "v4.5" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to be used for the generation", + "default": "" + } + }, + "x-fal-order-properties": [ + "video_url", + "prompt", + "negative_prompt", + "style", + "resolution", + "duration", + "model", + "seed" + ], + "required": [ + "video_url", + "prompt" + ] + }, + "PixverseExtendOutput": { + "title": "ExtendOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 1163040, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://v3.fal.media/files/rabbit/88-jI3VWXU4Q8kSNrWo3c_output.mp4" + } + ], + "title": "Video", + "description": "The extended video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/extend/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/extend/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/extend": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseExtendInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/extend/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseExtendOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pixverse/lipsync", + "metadata": { + "display_name": "Pixverse", + "category": "video-to-video", + "description": "Generate realistic lipsync animations from audio using advanced algorithms for high-quality synchronization with PixVerse Lipsync model", + "status": "active", + "tags": [ + "animation", + "lip sync" + ], + "updated_at": "2026-01-26T21:43:22.056Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/rabbit/dSjrqmujPFztMyFSqTtny_c6457768d98c4537aa1516df53388e79.jpg", + "model_url": "https://fal.run/fal-ai/pixverse/lipsync", + "license_type": "commercial", + "date": "2025-06-30T20:57:46.375Z", + "group": { + "key": "pixverse-45", + "label": "Lipsync" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pixverse/lipsync", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pixverse/lipsync queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pixverse/lipsync", + "category": "video-to-video", + "thumbnailUrl": "https://fal.media/files/rabbit/dSjrqmujPFztMyFSqTtny_c6457768d98c4537aa1516df53388e79.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/pixverse/lipsync", + "documentationUrl": "https://fal.ai/models/fal-ai/pixverse/lipsync/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PixverseLipsyncInput": { + "title": "LipsyncRequest", + "type": "object", + "properties": { + "text": { + "examples": [ + "Hello, this is a test message." + ], + "title": "Text", + "type": "string", + "description": "Text content for TTS when audio_url is not provided" + }, + "video_url": { + "examples": [ + "https://v3.fal.media/files/penguin/T-ONORYMYLoEOB9lXryA2_IKEy3yAyi1evJGBAkXGZx_output.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the input video" + }, + "audio_url": { + "examples": [ + "https://v3.fal.media/files/monkey/k4iyN8bJZWwJXMKH-pO9r_speech.mp3" + ], + "title": "Audio Url", + "type": "string", + "description": "URL of the input audio. If not provided, TTS will be used." + }, + "voice_id": { + "enum": [ + "Emily", + "James", + "Isabella", + "Liam", + "Chloe", + "Adrian", + "Harper", + "Ava", + "Sophia", + "Julia", + "Mason", + "Jack", + "Oliver", + "Ethan", + "Auto" + ], + "title": "Voice Id", + "type": "string", + "description": "Voice to use for TTS when audio_url is not provided", + "default": "Auto" + } + }, + "x-fal-order-properties": [ + "video_url", + "audio_url", + "voice_id", + "text" + ], + "required": [ + "video_url" + ] + }, + "PixverseLipsyncOutput": { + "title": "LipsyncOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 1732359, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://v3.fal.media/files/penguin/hsR_KXBJjuF3IIVYIIDA2_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pixverse/lipsync/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/lipsync/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pixverse/lipsync": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseLipsyncInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pixverse/lipsync/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PixverseLipsyncOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/luma-dream-machine/ray-2/modify", + "metadata": { + "display_name": "Luma Ray 2 Modify", + "category": "video-to-video", + "description": "Ray2 Modify is a video generative model capable of restyling or retexturing the entire shot, from turning live-action into CG or stylized animation, to changing wardrobe, props, or the overall aesthetic and swap environments or time periods, giving you control over background, location, or even weather.", + "status": "active", + "tags": [ + "modify", + "restyle" + ], + "updated_at": "2026-01-26T21:43:22.571Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/luma-dream-machine-ray-2.webp", + "model_url": "https://fal.run/fal-ai/luma-dream-machine/ray-2/modify", + "license_type": "commercial", + "date": "2025-06-28T19:42:05.110Z", + "group": { + "key": "luma-dream-machine", + "label": "Ray-2 (Modify)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/luma-dream-machine/ray-2/modify", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/luma-dream-machine/ray-2/modify queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/luma-dream-machine/ray-2/modify", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/luma-dream-machine-ray-2.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/luma-dream-machine/ray-2/modify", + "documentationUrl": "https://fal.ai/models/fal-ai/luma-dream-machine/ray-2/modify/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LumaDreamMachineRay2ModifyInput": { + "title": "ModifyVideoRequest", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "maxLength": 5000, + "minLength": 3, + "description": "Instruction for modifying the video" + }, + "video_url": { + "examples": [ + "https://v3.fal.media/files/zebra/9aDde3Te2kuJYHdR0Kz8R_output.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the input video to modify" + }, + "mode": { + "enum": [ + "adhere_1", + "adhere_2", + "adhere_3", + "flex_1", + "flex_2", + "flex_3", + "reimagine_1", + "reimagine_2", + "reimagine_3" + ], + "title": "Mode", + "type": "string", + "description": "Amount of modification to apply to the video, adhere_1 is the least amount of modification, reimagine_3 is the most", + "default": "flex_1" + }, + "image_url": { + "examples": [ + "https://fal.media/files/koala/Kv2821G03ggpKK2AiZX71_d5fa7bacf06049cfaeb9588f6003b6d5.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "Optional URL of the first frame image for modification" + } + }, + "x-fal-order-properties": [ + "video_url", + "image_url", + "prompt", + "mode" + ], + "required": [ + "video_url" + ] + }, + "LumaDreamMachineRay2ModifyOutput": { + "title": "ModifyOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/lion/_2UO2QC26T_R8vKeVGAdX_output.mp4" + } + ], + "title": "Video", + "description": "URL of the modified video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/luma-dream-machine/ray-2/modify/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2/modify/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2/modify": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaDreamMachineRay2ModifyInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2/modify/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaDreamMachineRay2ModifyOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-vace-14b/reframe", + "metadata": { + "display_name": "Wan VACE 14B", + "category": "video-to-video", + "description": "VACE is a video generation model that uses a source image, mask, and video to create prompted videos with controllable sources.", + "status": "active", + "tags": [ + "reframe" + ], + "updated_at": "2026-01-26T21:43:25.148Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-1.jpg", + "model_url": "https://fal.run/fal-ai/wan-vace-14b/reframe", + "license_type": "commercial", + "date": "2025-06-18T19:22:46.388Z", + "group": { + "key": "wan-vace-14b", + "label": "Reframe" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-vace-14b/reframe", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-vace-14b/reframe queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-vace-14b/reframe", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-vace-14b/reframe", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-vace-14b/reframe/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanVace14bReframeInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "match_input_num_frames", + "num_frames", + "match_input_frames_per_second", + "frames_per_second", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "guidance_scale", + "sampler", + "shift", + "video_url", + "first_frame_url", + "last_frame_url", + "enable_safety_checker", + "enable_prompt_expansion", + "acceleration", + "video_quality", + "video_write_mode", + "num_interpolated_frames", + "temporal_downsample_factor", + "enable_auto_downsample", + "auto_downsample_min_fps", + "interpolator_model", + "sync_mode", + "transparency_mode", + "return_frames_zip", + "zoom_factor", + "trim_borders" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "" + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation. Optional for reframing.", + "default": "" + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/wan/t2v.mp4" + ], + "title": "Video URL", + "type": "string", + "description": "URL to the source video file. This video will be used as a reference for the reframe task." + }, + "num_interpolated_frames": { + "description": "Number of frames to interpolate between the original frames. A value of 0 means no interpolation.", + "type": "integer", + "minimum": 0, + "title": "Number of Interpolated Frames", + "examples": [ + 0 + ], + "maximum": 5, + "default": 0 + }, + "temporal_downsample_factor": { + "description": "Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.", + "type": "integer", + "minimum": 0, + "title": "Temporal Downsample Factor", + "examples": [ + 0 + ], + "maximum": 5, + "default": 0 + }, + "first_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "First Frame URL", + "description": "URL to the first frame of the video. If provided, the model will use this frame as a reference." + }, + "transparency_mode": { + "enum": [ + "content_aware", + "white", + "black" + ], + "title": "Transparency Mode", + "type": "string", + "description": "The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.", + "examples": [ + "content_aware" + ], + "default": "content_aware" + }, + "num_frames": { + "minimum": 17, + "title": "Number of Frames", + "type": "integer", + "maximum": 241, + "description": "Number of frames to generate. Must be between 81 to 241 (inclusive).", + "default": 81 + }, + "trim_borders": { + "examples": [ + true + ], + "title": "Trim Borders", + "type": "boolean", + "description": "Whether to trim borders from the video.", + "default": true + }, + "auto_downsample_min_fps": { + "description": "The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences.", + "type": "number", + "minimum": 1, + "title": "Auto Downsample Min FPS", + "examples": [ + 15 + ], + "maximum": 60, + "default": 15 + }, + "sampler": { + "enum": [ + "unipc", + "dpm++", + "euler" + ], + "title": "Sampler", + "type": "string", + "description": "Sampler to use for video generation.", + "examples": [ + "unipc" + ], + "default": "unipc" + }, + "guidance_scale": { + "description": "Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.", + "type": "number", + "minimum": 1, + "title": "Guidance Scale", + "examples": [ + 5 + ], + "maximum": 10, + "default": 5 + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "examples": [ + "high" + ], + "default": "high" + }, + "sync_mode": { + "examples": [ + false + ], + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "interpolator_model": { + "enum": [ + "rife", + "film" + ], + "title": "Interpolator Model", + "type": "string", + "description": "The model to use for frame interpolation. Options are 'rife' or 'film'.", + "examples": [ + "film" + ], + "default": "film" + }, + "enable_auto_downsample": { + "examples": [ + false + ], + "title": "Enable Auto Downsample", + "type": "boolean", + "description": "If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.", + "default": false + }, + "shift": { + "minimum": 1, + "title": "Shift", + "type": "number", + "maximum": 15, + "description": "Shift parameter for video generation.", + "default": 5 + }, + "acceleration": { + "anyOf": [ + { + "enum": [ + "none", + "low", + "regular" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Acceleration", + "description": "Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "zoom_factor": { + "description": "Zoom factor for the video. When this value is greater than 0, the video will be zoomed in by this factor (in relation to the canvas size,) cutting off the edges of the video. A value of 0 means no zoom.", + "type": "number", + "minimum": 0, + "title": "Zoom Factor", + "examples": [ + 0 + ], + "maximum": 0.9, + "default": 0 + }, + "match_input_num_frames": { + "examples": [ + true + ], + "title": "Match Input Number of Frames", + "type": "boolean", + "description": "If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.", + "default": true + }, + "frames_per_second": { + "anyOf": [ + { + "minimum": 5, + "maximum": 30, + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Frames per Second", + "description": "Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true.", + "default": 16 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "negative_prompt": { + "examples": [ + "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "return_frames_zip": { + "examples": [ + false + ], + "title": "Return Frames Zip", + "type": "boolean", + "description": "If true, also return a ZIP file containing all generated frames.", + "default": false + }, + "resolution": { + "enum": [ + "auto", + "240p", + "360p", + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video.", + "default": "auto" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "1:1", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video.", + "default": "auto" + }, + "match_input_frames_per_second": { + "examples": [ + true + ], + "title": "Match Input Frames Per Second", + "type": "boolean", + "description": "If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.", + "default": true + }, + "num_inference_steps": { + "minimum": 2, + "title": "Number of Inference Steps", + "type": "integer", + "maximum": 50, + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 30 + }, + "last_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Last Frame URL", + "description": "URL to the last frame of the video. If provided, the model will use this frame as a reference." + } + }, + "title": "WanVACEReframeRequest", + "required": [ + "video_url" + ] + }, + "WanVace14bReframeOutput": { + "x-fal-order-properties": [ + "video", + "prompt", + "seed", + "frames_zip" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "frames_zip": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "ZIP archive of all video frames if requested." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/wan-vace-reframe-output.mp4" + } + ], + "description": "The generated reframe video file.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "title": "WanVACEReframeResponse", + "required": [ + "video", + "prompt", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "title": "File", + "required": [ + "url" + ] + }, + "VideoFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Duration", + "description": "The duration of the video" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the video" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the video" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Fps", + "description": "The FPS of the video" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Num Frames", + "description": "The number of frames in the video" + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + } + }, + "title": "VideoFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-vace-14b/reframe/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-14b/reframe/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-14b/reframe": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanVace14bReframeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-14b/reframe/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanVace14bReframeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-vace-14b/outpainting", + "metadata": { + "display_name": "Wan VACE 14B", + "category": "video-to-video", + "description": "VACE is a video generation model that uses a source image, mask, and video to create prompted videos with controllable sources.", + "status": "active", + "tags": [ + "image-to-video", + "video-to-video", + "text-to-video" + ], + "updated_at": "2026-01-26T21:43:25.276Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-1.jpg", + "model_url": "https://fal.run/fal-ai/wan-vace-14b/outpainting", + "license_type": "commercial", + "date": "2025-06-18T19:21:35.546Z", + "group": { + "key": "wan-vace-14b", + "label": "Outpainting" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-vace-14b/outpainting", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-vace-14b/outpainting queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-vace-14b/outpainting", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-vace-14b/outpainting", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-vace-14b/outpainting/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanVace14bOutpaintingInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "match_input_num_frames", + "num_frames", + "match_input_frames_per_second", + "frames_per_second", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "guidance_scale", + "sampler", + "shift", + "video_url", + "ref_image_urls", + "first_frame_url", + "last_frame_url", + "enable_safety_checker", + "enable_prompt_expansion", + "acceleration", + "video_quality", + "video_write_mode", + "num_interpolated_frames", + "temporal_downsample_factor", + "enable_auto_downsample", + "auto_downsample_min_fps", + "interpolator_model", + "sync_mode", + "transparency_mode", + "return_frames_zip", + "expand_left", + "expand_right", + "expand_top", + "expand_bottom", + "expand_ratio" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A lone woman strides through the neon-drenched streets of Tokyo at night. Her crimson dress, a vibrant splash of color against the deep blues and blacks of the cityscape, flows slightly with each step. A tailored black jacket, crisp and elegant, contrasts sharply with the dress's rich texture. Medium shot: The city hums around her, blurred lights creating streaks of color in the background. Close-up: The fabric of her dress catches the streetlight's glow, revealing a subtle silk sheen and the intricate stitching at the hem. Her black jacket’s subtle texture is visible – a fine wool perhaps, with a matte finish. The overall mood is one of quiet confidence and mystery, a vibrant woman navigating a bustling, nocturnal landscape. High resolution 4k." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/web-examples/wan/t2v.mp4" + ], + "title": "Video URL", + "type": "string", + "description": "URL to the source video file. Required for outpainting." + }, + "num_interpolated_frames": { + "description": "Number of frames to interpolate between the original frames. A value of 0 means no interpolation.", + "type": "integer", + "minimum": 0, + "title": "Number of Interpolated Frames", + "examples": [ + 0 + ], + "maximum": 5, + "default": 0 + }, + "temporal_downsample_factor": { + "description": "Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.", + "type": "integer", + "minimum": 0, + "title": "Temporal Downsample Factor", + "examples": [ + 0 + ], + "maximum": 5, + "default": 0 + }, + "first_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "First Frame URL", + "description": "URL to the first frame of the video. If provided, the model will use this frame as a reference." + }, + "ref_image_urls": { + "title": "Reference Image URLs", + "type": "array", + "description": "URLs to source reference image. If provided, the model will use this image as reference.", + "items": { + "type": "string" + } + }, + "expand_ratio": { + "description": "Amount of expansion. This is a float value between 0 and 1, where 0.25 adds 25% to the original video size on the specified sides.", + "type": "number", + "minimum": 0, + "maximum": 1, + "title": "Expand Ratio", + "examples": [ + 0.25 + ], + "default": 0.25 + }, + "transparency_mode": { + "enum": [ + "content_aware", + "white", + "black" + ], + "title": "Transparency Mode", + "type": "string", + "description": "The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.", + "examples": [ + "content_aware" + ], + "default": "content_aware" + }, + "num_frames": { + "minimum": 17, + "title": "Number of Frames", + "type": "integer", + "maximum": 241, + "description": "Number of frames to generate. Must be between 81 to 241 (inclusive).", + "default": 81 + }, + "auto_downsample_min_fps": { + "description": "The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences.", + "type": "number", + "minimum": 1, + "title": "Auto Downsample Min FPS", + "examples": [ + 15 + ], + "maximum": 60, + "default": 15 + }, + "expand_bottom": { + "examples": [ + true + ], + "title": "Expand Bottom", + "type": "boolean", + "description": "Whether to expand the video to the bottom.", + "default": false + }, + "sampler": { + "enum": [ + "unipc", + "dpm++", + "euler" + ], + "title": "Sampler", + "type": "string", + "description": "Sampler to use for video generation.", + "examples": [ + "unipc" + ], + "default": "unipc" + }, + "guidance_scale": { + "description": "Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.", + "type": "number", + "minimum": 1, + "title": "Guidance Scale", + "examples": [ + 5 + ], + "maximum": 10, + "default": 5 + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "examples": [ + "high" + ], + "default": "high" + }, + "sync_mode": { + "examples": [ + false + ], + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "interpolator_model": { + "enum": [ + "rife", + "film" + ], + "title": "Interpolator Model", + "type": "string", + "description": "The model to use for frame interpolation. Options are 'rife' or 'film'.", + "examples": [ + "film" + ], + "default": "film" + }, + "enable_auto_downsample": { + "examples": [ + false + ], + "title": "Enable Auto Downsample", + "type": "boolean", + "description": "If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.", + "default": false + }, + "expand_top": { + "examples": [ + true + ], + "title": "Expand Top", + "type": "boolean", + "description": "Whether to expand the video to the top.", + "default": false + }, + "shift": { + "minimum": 1, + "title": "Shift", + "type": "number", + "maximum": 15, + "description": "Shift parameter for video generation.", + "default": 5 + }, + "expand_left": { + "examples": [ + true + ], + "title": "Expand Left", + "type": "boolean", + "description": "Whether to expand the video to the left.", + "default": false + }, + "acceleration": { + "anyOf": [ + { + "enum": [ + "none", + "low", + "regular" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Acceleration", + "description": "Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "match_input_num_frames": { + "examples": [ + false + ], + "title": "Match Input Number of Frames", + "type": "boolean", + "description": "If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.", + "default": false + }, + "frames_per_second": { + "anyOf": [ + { + "minimum": 5, + "maximum": 30, + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Frames per Second", + "description": "Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true.", + "default": 16 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "negative_prompt": { + "examples": [ + "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "return_frames_zip": { + "examples": [ + false + ], + "title": "Return Frames Zip", + "type": "boolean", + "description": "If true, also return a ZIP file containing all generated frames.", + "default": false + }, + "resolution": { + "enum": [ + "auto", + "240p", + "360p", + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video.", + "default": "auto" + }, + "expand_right": { + "examples": [ + true + ], + "title": "Expand Right", + "type": "boolean", + "description": "Whether to expand the video to the right.", + "default": false + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "1:1", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video.", + "default": "auto" + }, + "match_input_frames_per_second": { + "examples": [ + false + ], + "title": "Match Input Frames Per Second", + "type": "boolean", + "description": "If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.", + "default": false + }, + "num_inference_steps": { + "minimum": 2, + "title": "Number of Inference Steps", + "type": "integer", + "maximum": 50, + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 30 + }, + "last_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Last Frame URL", + "description": "URL to the last frame of the video. If provided, the model will use this frame as a reference." + } + }, + "title": "WanVACEOutpaintingRequest", + "required": [ + "prompt", + "video_url" + ] + }, + "WanVace14bOutpaintingOutput": { + "x-fal-order-properties": [ + "video", + "prompt", + "seed", + "frames_zip" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "frames_zip": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "ZIP archive of all video frames if requested." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/wan-vace-outpainting-output.mp4" + } + ], + "description": "The generated outpainting video file.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "title": "WanVACEOutpaintingResponse", + "required": [ + "video", + "prompt", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "title": "File", + "required": [ + "url" + ] + }, + "VideoFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Duration", + "description": "The duration of the video" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the video" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the video" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Fps", + "description": "The FPS of the video" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Num Frames", + "description": "The number of frames in the video" + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + } + }, + "title": "VideoFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-vace-14b/outpainting/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-14b/outpainting/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-14b/outpainting": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanVace14bOutpaintingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-14b/outpainting/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanVace14bOutpaintingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-vace-14b/inpainting", + "metadata": { + "display_name": "Wan VACE 14B", + "category": "video-to-video", + "description": "VACE is a video generation model that uses a source image, mask, and video to create prompted videos with controllable sources.", + "status": "active", + "tags": [ + "image-to-video", + "video-to-video", + "text-to-video" + ], + "updated_at": "2026-01-26T21:43:25.403Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-1.jpg", + "model_url": "https://fal.run/fal-ai/wan-vace-14b/inpainting", + "license_type": "commercial", + "date": "2025-06-18T19:20:48.150Z", + "group": { + "key": "wan-vace-14b", + "label": "Inpainting" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-vace-14b/inpainting", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-vace-14b/inpainting queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-vace-14b/inpainting", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-vace-14b/inpainting", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-vace-14b/inpainting/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanVace14bInpaintingInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "match_input_num_frames", + "num_frames", + "match_input_frames_per_second", + "frames_per_second", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "guidance_scale", + "sampler", + "shift", + "video_url", + "mask_video_url", + "mask_image_url", + "ref_image_urls", + "first_frame_url", + "last_frame_url", + "enable_safety_checker", + "enable_prompt_expansion", + "preprocess", + "acceleration", + "video_quality", + "video_write_mode", + "num_interpolated_frames", + "temporal_downsample_factor", + "enable_auto_downsample", + "auto_downsample_min_fps", + "interpolator_model", + "sync_mode", + "transparency_mode", + "return_frames_zip" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The video shows a man riding a horse on a vast grassland. He has long lavender hair and wears a traditional dress of a white top and black pants. The animation style makes him look like he is doing some kind of outdoor activity or performing. The background is a spectacular mountain range and cloud sky, giving a sense of tranquility and vastness. The entire video is shot from a fixed angle, focusing on the rider and his horse." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/vace/src_video.mp4" + ], + "title": "Video URL", + "type": "string", + "description": "URL to the source video file. Required for inpainting." + }, + "num_interpolated_frames": { + "description": "Number of frames to interpolate between the original frames. A value of 0 means no interpolation.", + "type": "integer", + "minimum": 0, + "title": "Number of Interpolated Frames", + "examples": [ + 0 + ], + "maximum": 5, + "default": 0 + }, + "temporal_downsample_factor": { + "description": "Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.", + "type": "integer", + "minimum": 0, + "title": "Temporal Downsample Factor", + "examples": [ + 0 + ], + "maximum": 5, + "default": 0 + }, + "first_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "First Frame URL", + "description": "URL to the first frame of the video. If provided, the model will use this frame as a reference." + }, + "ref_image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/vace/src_ref_image_1.png" + ] + ], + "title": "Reference Image URLs", + "type": "array", + "description": "Urls to source reference image. If provided, the model will use this image as reference.", + "items": { + "type": "string" + } + }, + "transparency_mode": { + "enum": [ + "content_aware", + "white", + "black" + ], + "title": "Transparency Mode", + "type": "string", + "description": "The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.", + "examples": [ + "content_aware" + ], + "default": "content_aware" + }, + "num_frames": { + "minimum": 17, + "title": "Number of Frames", + "type": "integer", + "maximum": 241, + "description": "Number of frames to generate. Must be between 81 to 241 (inclusive).", + "default": 81 + }, + "auto_downsample_min_fps": { + "description": "The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences.", + "type": "number", + "minimum": 1, + "title": "Auto Downsample Min FPS", + "examples": [ + 15 + ], + "maximum": 60, + "default": 15 + }, + "guidance_scale": { + "description": "Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.", + "type": "number", + "minimum": 1, + "title": "Guidance Scale", + "examples": [ + 5 + ], + "maximum": 10, + "default": 5 + }, + "sampler": { + "enum": [ + "unipc", + "dpm++", + "euler" + ], + "title": "Sampler", + "type": "string", + "description": "Sampler to use for video generation.", + "examples": [ + "unipc" + ], + "default": "unipc" + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "examples": [ + "high" + ], + "default": "high" + }, + "sync_mode": { + "examples": [ + false + ], + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "mask_video_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Mask Video URL", + "description": "URL to the source mask file. Required for inpainting.", + "examples": [ + "https://storage.googleapis.com/falserverless/vace/src_mask.mp4" + ] + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "interpolator_model": { + "enum": [ + "rife", + "film" + ], + "title": "Interpolator Model", + "type": "string", + "description": "The model to use for frame interpolation. Options are 'rife' or 'film'.", + "examples": [ + "film" + ], + "default": "film" + }, + "enable_auto_downsample": { + "examples": [ + false + ], + "title": "Enable Auto Downsample", + "type": "boolean", + "description": "If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.", + "default": false + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "shift": { + "minimum": 1, + "title": "Shift", + "type": "number", + "maximum": 15, + "description": "Shift parameter for video generation.", + "default": 5 + }, + "preprocess": { + "examples": [ + false + ], + "title": "Preprocess", + "type": "boolean", + "description": "Whether to preprocess the input video.", + "default": false + }, + "acceleration": { + "anyOf": [ + { + "enum": [ + "none", + "low", + "regular" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Acceleration", + "description": "Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "mask_image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Mask Image URL", + "description": "URL to the guiding mask file. If provided, the model will use this mask as a reference to create masked video using salient mask tracking. Will be ignored if mask_video_url is provided." + }, + "match_input_num_frames": { + "examples": [ + false + ], + "title": "Match Input Number of Frames", + "type": "boolean", + "description": "If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.", + "default": false + }, + "frames_per_second": { + "anyOf": [ + { + "minimum": 5, + "maximum": 30, + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Frames per Second", + "description": "Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true.", + "default": 16 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "negative_prompt": { + "examples": [ + "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "return_frames_zip": { + "examples": [ + false + ], + "title": "Return Frames Zip", + "type": "boolean", + "description": "If true, also return a ZIP file containing all generated frames.", + "default": false + }, + "resolution": { + "enum": [ + "auto", + "240p", + "360p", + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video.", + "default": "auto" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "1:1", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video.", + "default": "auto" + }, + "match_input_frames_per_second": { + "examples": [ + false + ], + "title": "Match Input Frames Per Second", + "type": "boolean", + "description": "If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.", + "default": false + }, + "num_inference_steps": { + "minimum": 2, + "title": "Number of Inference Steps", + "type": "integer", + "maximum": 50, + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 30 + }, + "last_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Last Frame URL", + "description": "URL to the last frame of the video. If provided, the model will use this frame as a reference." + } + }, + "title": "WanVACEInpaintingRequest", + "required": [ + "prompt", + "video_url", + "mask_video_url" + ] + }, + "WanVace14bInpaintingOutput": { + "x-fal-order-properties": [ + "video", + "prompt", + "seed", + "frames_zip" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "frames_zip": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "ZIP archive of all video frames if requested." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/wan-vace-inpainting-output.mp4" + } + ], + "description": "The generated inpainting video file.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "title": "WanVACEInpaintingResponse", + "required": [ + "video", + "prompt", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "title": "File", + "required": [ + "url" + ] + }, + "VideoFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Duration", + "description": "The duration of the video" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the video" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the video" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Fps", + "description": "The FPS of the video" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Num Frames", + "description": "The number of frames in the video" + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + } + }, + "title": "VideoFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-vace-14b/inpainting/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-14b/inpainting/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-14b/inpainting": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanVace14bInpaintingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-14b/inpainting/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanVace14bInpaintingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-vace-14b/pose", + "metadata": { + "display_name": "Wan VACE 14B", + "category": "video-to-video", + "description": "VACE is a video generation model that uses a source image, mask, and video to create prompted videos with controllable sources.", + "status": "active", + "tags": [ + "image-to-video", + "video-to-video", + "text-to-video" + ], + "updated_at": "2026-01-26T21:43:25.537Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-1.jpg", + "model_url": "https://fal.run/fal-ai/wan-vace-14b/pose", + "license_type": "commercial", + "date": "2025-06-18T19:19:07.684Z", + "group": { + "key": "wan-vace-14b", + "label": "Pose" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-vace-14b/pose", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-vace-14b/pose queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-vace-14b/pose", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-vace-14b/pose", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-vace-14b/pose/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanVace14bPoseInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "match_input_num_frames", + "num_frames", + "match_input_frames_per_second", + "frames_per_second", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "guidance_scale", + "sampler", + "shift", + "video_url", + "ref_image_urls", + "first_frame_url", + "last_frame_url", + "enable_safety_checker", + "enable_prompt_expansion", + "preprocess", + "acceleration", + "video_quality", + "video_write_mode", + "num_interpolated_frames", + "temporal_downsample_factor", + "enable_auto_downsample", + "auto_downsample_min_fps", + "interpolator_model", + "sync_mode", + "transparency_mode", + "return_frames_zip" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A sharply dressed man walks toward the camera down a sun-drenched hallway. Medium shot: He's framed from the knees up, his confident stride filling the frame. His navy blue business suit is impeccably tailored, the fabric subtly shimmering under the light streaming through the tall, arched windows lining the hallway. Close-up: The rich texture of the suit's wool is visible, each thread reflecting the light. His crisp white shirt contrasts beautifully with the deep crimson of his silk tie, the knot perfectly formed. The sunlight highlights the subtle sheen of his polished shoes. The windows cast long shadows, highlighting the architectural detail of the hallway, creating a sense of both elegance and movement. High resolution 4k." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation. For pose task, the prompt should describe the desired pose and action of the subject in the video." + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/wan-vace-pose-video.mp4" + ], + "title": "Video URL", + "type": "string", + "description": "URL to the source video file. Required for pose task." + }, + "num_interpolated_frames": { + "description": "Number of frames to interpolate between the original frames. A value of 0 means no interpolation.", + "type": "integer", + "minimum": 0, + "title": "Number of Interpolated Frames", + "examples": [ + 0 + ], + "maximum": 5, + "default": 0 + }, + "temporal_downsample_factor": { + "description": "Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.", + "type": "integer", + "minimum": 0, + "title": "Temporal Downsample Factor", + "examples": [ + 0 + ], + "maximum": 5, + "default": 0 + }, + "first_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "First Frame URL", + "description": "URL to the first frame of the video. If provided, the model will use this frame as a reference." + }, + "ref_image_urls": { + "title": "Reference Image URLs", + "type": "array", + "description": "URLs to source reference image. If provided, the model will use this image as reference.", + "items": { + "type": "string" + } + }, + "transparency_mode": { + "enum": [ + "content_aware", + "white", + "black" + ], + "title": "Transparency Mode", + "type": "string", + "description": "The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.", + "examples": [ + "content_aware" + ], + "default": "content_aware" + }, + "num_frames": { + "minimum": 17, + "title": "Number of Frames", + "type": "integer", + "maximum": 241, + "description": "Number of frames to generate. Must be between 81 to 241 (inclusive).", + "default": 81 + }, + "auto_downsample_min_fps": { + "description": "The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences.", + "type": "number", + "minimum": 1, + "title": "Auto Downsample Min FPS", + "examples": [ + 15 + ], + "maximum": 60, + "default": 15 + }, + "guidance_scale": { + "description": "Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.", + "type": "number", + "minimum": 1, + "title": "Guidance Scale", + "examples": [ + 5 + ], + "maximum": 10, + "default": 5 + }, + "sampler": { + "enum": [ + "unipc", + "dpm++", + "euler" + ], + "title": "Sampler", + "type": "string", + "description": "Sampler to use for video generation.", + "examples": [ + "unipc" + ], + "default": "unipc" + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "examples": [ + "high" + ], + "default": "high" + }, + "sync_mode": { + "examples": [ + false + ], + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "interpolator_model": { + "enum": [ + "rife", + "film" + ], + "title": "Interpolator Model", + "type": "string", + "description": "The model to use for frame interpolation. Options are 'rife' or 'film'.", + "examples": [ + "film" + ], + "default": "film" + }, + "enable_auto_downsample": { + "examples": [ + false + ], + "title": "Enable Auto Downsample", + "type": "boolean", + "description": "If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.", + "default": false + }, + "preprocess": { + "examples": [ + false + ], + "title": "Preprocess", + "type": "boolean", + "description": "Whether to preprocess the input video.", + "default": false + }, + "shift": { + "minimum": 1, + "title": "Shift", + "type": "number", + "maximum": 15, + "description": "Shift parameter for video generation.", + "default": 5 + }, + "acceleration": { + "anyOf": [ + { + "enum": [ + "none", + "low", + "regular" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Acceleration", + "description": "Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "match_input_num_frames": { + "examples": [ + false + ], + "title": "Match Input Number of Frames", + "type": "boolean", + "description": "If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.", + "default": false + }, + "frames_per_second": { + "anyOf": [ + { + "minimum": 5, + "maximum": 30, + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Frames per Second", + "description": "Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true.", + "default": 16 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "negative_prompt": { + "examples": [ + "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "return_frames_zip": { + "examples": [ + false + ], + "title": "Return Frames Zip", + "type": "boolean", + "description": "If true, also return a ZIP file containing all generated frames.", + "default": false + }, + "resolution": { + "enum": [ + "auto", + "240p", + "360p", + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video.", + "default": "auto" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "1:1", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video.", + "default": "auto" + }, + "match_input_frames_per_second": { + "examples": [ + false + ], + "title": "Match Input Frames Per Second", + "type": "boolean", + "description": "If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.", + "default": false + }, + "num_inference_steps": { + "minimum": 2, + "title": "Number of Inference Steps", + "type": "integer", + "maximum": 50, + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 30 + }, + "last_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Last Frame URL", + "description": "URL to the last frame of the video. If provided, the model will use this frame as a reference." + } + }, + "title": "WanVACEPoseRequest", + "required": [ + "prompt", + "video_url" + ] + }, + "WanVace14bPoseOutput": { + "x-fal-order-properties": [ + "video", + "prompt", + "seed", + "frames_zip" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "frames_zip": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "ZIP archive of all video frames if requested." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/wan-vace-pose-output.mp4" + } + ], + "description": "The generated pose video file.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "title": "WanVACEPoseResponse", + "required": [ + "video", + "prompt", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "title": "File", + "required": [ + "url" + ] + }, + "VideoFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Duration", + "description": "The duration of the video" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the video" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the video" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Fps", + "description": "The FPS of the video" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Num Frames", + "description": "The number of frames in the video" + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + } + }, + "title": "VideoFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-vace-14b/pose/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-14b/pose/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-14b/pose": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanVace14bPoseInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-14b/pose/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanVace14bPoseOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-vace-14b/depth", + "metadata": { + "display_name": "Wan VACE 14B", + "category": "video-to-video", + "description": "VACE is a video generation model that uses a source image, mask, and video to create prompted videos with controllable sources.", + "status": "active", + "tags": [ + "image-to-video", + "video-to-video", + "text-to-video" + ], + "updated_at": "2026-01-26T21:43:25.664Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-1.jpg", + "model_url": "https://fal.run/fal-ai/wan-vace-14b/depth", + "license_type": "commercial", + "date": "2025-06-18T19:17:12.449Z", + "group": { + "key": "wan-vace-14b", + "label": "Depth" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-vace-14b/depth", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-vace-14b/depth queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-vace-14b/depth", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-vace-14b/depth", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-vace-14b/depth/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanVace14bDepthInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "match_input_num_frames", + "num_frames", + "match_input_frames_per_second", + "frames_per_second", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "guidance_scale", + "sampler", + "shift", + "video_url", + "ref_image_urls", + "first_frame_url", + "last_frame_url", + "enable_safety_checker", + "enable_prompt_expansion", + "preprocess", + "acceleration", + "video_quality", + "video_write_mode", + "num_interpolated_frames", + "temporal_downsample_factor", + "enable_auto_downsample", + "auto_downsample_min_fps", + "interpolator_model", + "sync_mode", + "transparency_mode", + "return_frames_zip" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A confident woman strides toward the camera down a sun-drenched, empty street. Her vibrant summer dress, a flowing emerald green with delicate white floral embroidery, billows slightly in the gentle breeze. She carries a stylish, woven straw bag, its natural tan contrasting beautifully with the dress. The dress's fabric shimmers subtly, catching the light. The white embroidery is intricate, each tiny flower meticulously detailed. Her expression is focused, yet relaxed, radiating self-assuredness. Her auburn hair, partially pulled back in a loose braid, catches the sunlight, creating warm highlights. The street itself is paved with warm, grey cobblestones, reflecting the bright sun. The mood is optimistic and serene, emphasizing the woman's independence and carefree spirit. High resolution 4k" + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/wan-vace-depth-video.mp4" + ], + "title": "Video URL", + "type": "string", + "description": "URL to the source video file. Required for depth task." + }, + "num_interpolated_frames": { + "description": "Number of frames to interpolate between the original frames. A value of 0 means no interpolation.", + "type": "integer", + "minimum": 0, + "title": "Number of Interpolated Frames", + "examples": [ + 0 + ], + "maximum": 5, + "default": 0 + }, + "temporal_downsample_factor": { + "description": "Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.", + "type": "integer", + "minimum": 0, + "title": "Temporal Downsample Factor", + "examples": [ + 0 + ], + "maximum": 5, + "default": 0 + }, + "first_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "First Frame URL", + "description": "URL to the first frame of the video. If provided, the model will use this frame as a reference." + }, + "ref_image_urls": { + "title": "Reference Image URLs", + "type": "array", + "description": "URLs to source reference image. If provided, the model will use this image as reference.", + "items": { + "type": "string" + } + }, + "transparency_mode": { + "enum": [ + "content_aware", + "white", + "black" + ], + "title": "Transparency Mode", + "type": "string", + "description": "The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.", + "examples": [ + "content_aware" + ], + "default": "content_aware" + }, + "num_frames": { + "minimum": 17, + "title": "Number of Frames", + "type": "integer", + "maximum": 241, + "description": "Number of frames to generate. Must be between 81 to 241 (inclusive).", + "default": 81 + }, + "auto_downsample_min_fps": { + "description": "The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences.", + "type": "number", + "minimum": 1, + "title": "Auto Downsample Min FPS", + "examples": [ + 15 + ], + "maximum": 60, + "default": 15 + }, + "guidance_scale": { + "description": "Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.", + "type": "number", + "minimum": 1, + "title": "Guidance Scale", + "examples": [ + 5 + ], + "maximum": 10, + "default": 5 + }, + "sampler": { + "enum": [ + "unipc", + "dpm++", + "euler" + ], + "title": "Sampler", + "type": "string", + "description": "Sampler to use for video generation.", + "examples": [ + "unipc" + ], + "default": "unipc" + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "examples": [ + "high" + ], + "default": "high" + }, + "sync_mode": { + "examples": [ + false + ], + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "interpolator_model": { + "enum": [ + "rife", + "film" + ], + "title": "Interpolator Model", + "type": "string", + "description": "The model to use for frame interpolation. Options are 'rife' or 'film'.", + "examples": [ + "film" + ], + "default": "film" + }, + "enable_auto_downsample": { + "examples": [ + false + ], + "title": "Enable Auto Downsample", + "type": "boolean", + "description": "If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.", + "default": false + }, + "preprocess": { + "examples": [ + false + ], + "title": "Preprocess", + "type": "boolean", + "description": "Whether to preprocess the input video.", + "default": false + }, + "shift": { + "minimum": 1, + "title": "Shift", + "type": "number", + "maximum": 15, + "description": "Shift parameter for video generation.", + "default": 5 + }, + "acceleration": { + "anyOf": [ + { + "enum": [ + "none", + "low", + "regular" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Acceleration", + "description": "Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "match_input_num_frames": { + "examples": [ + false + ], + "title": "Match Input Number of Frames", + "type": "boolean", + "description": "If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.", + "default": false + }, + "frames_per_second": { + "anyOf": [ + { + "minimum": 5, + "maximum": 30, + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Frames per Second", + "description": "Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true.", + "default": 16 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "negative_prompt": { + "examples": [ + "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "return_frames_zip": { + "examples": [ + false + ], + "title": "Return Frames Zip", + "type": "boolean", + "description": "If true, also return a ZIP file containing all generated frames.", + "default": false + }, + "resolution": { + "enum": [ + "auto", + "240p", + "360p", + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video.", + "default": "auto" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "1:1", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video.", + "default": "auto" + }, + "match_input_frames_per_second": { + "examples": [ + false + ], + "title": "Match Input Frames Per Second", + "type": "boolean", + "description": "If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.", + "default": false + }, + "num_inference_steps": { + "minimum": 2, + "title": "Number of Inference Steps", + "type": "integer", + "maximum": 50, + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 30 + }, + "last_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Last Frame URL", + "description": "URL to the last frame of the video. If provided, the model will use this frame as a reference." + } + }, + "title": "WanVACEDepthRequest", + "required": [ + "prompt", + "video_url" + ] + }, + "WanVace14bDepthOutput": { + "x-fal-order-properties": [ + "video", + "prompt", + "seed", + "frames_zip" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "frames_zip": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "ZIP archive of all video frames if requested." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/wan-vace-depth-output.mp4" + } + ], + "description": "The generated depth video file.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "title": "WanVACEDepthResponse", + "required": [ + "video", + "prompt", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "title": "File", + "required": [ + "url" + ] + }, + "VideoFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Duration", + "description": "The duration of the video" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the video" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the video" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Fps", + "description": "The FPS of the video" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Num Frames", + "description": "The number of frames in the video" + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + } + }, + "title": "VideoFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-vace-14b/depth/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-14b/depth/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-14b/depth": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanVace14bDepthInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-14b/depth/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanVace14bDepthOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/dwpose/video", + "metadata": { + "display_name": "DWPose Pose Prediction", + "category": "video-to-video", + "description": "Predict poses from videos.", + "status": "active", + "tags": [ + "pose", + "utility" + ], + "updated_at": "2026-01-26T21:43:27.729Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/dwpose.jpeg", + "model_url": "https://fal.run/fal-ai/dwpose/video", + "license_type": "commercial", + "date": "2025-06-15T05:20:30.597Z", + "group": { + "key": "dwpose", + "label": "Video to Pose" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/dwpose/video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/dwpose/video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/dwpose/video", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/dwpose.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/dwpose/video", + "documentationUrl": "https://fal.ai/models/fal-ai/dwpose/video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "DwposeVideoInput": { + "title": "DWPoseVideoInput", + "type": "object", + "properties": { + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/gallery/Ben2/100063-video-2160.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of video to be used for pose estimation" + }, + "draw_mode": { + "enum": [ + "full-pose", + "body-pose", + "face-pose", + "hand-pose", + "face-hand-mask", + "face-mask", + "hand-mask" + ], + "title": "Draw Mode", + "type": "string", + "description": "Mode of drawing the pose on the video. Options are: 'full-pose', 'body-pose', 'face-pose', 'hand-pose', 'face-hand-mask', 'face-mask', 'hand-mask'.", + "examples": [ + "body-pose" + ], + "default": "body-pose" + } + }, + "x-fal-order-properties": [ + "video_url", + "draw_mode" + ], + "required": [ + "video_url" + ] + }, + "DwposeVideoOutput": { + "title": "DWPoseVideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/example_outputs/dwpose-video-output.mp4" + } + ], + "title": "Video", + "description": "The output video with pose estimation.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/dwpose/video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/dwpose/video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/dwpose/video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DwposeVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/dwpose/video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DwposeVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ffmpeg-api/merge-audio-video", + "metadata": { + "display_name": "Ffmpeg Api Merge Audio-Video", + "category": "video-to-video", + "description": "Merge videos with standalone audio files or audio from video files.", + "status": "active", + "tags": [ + "ffmpeg" + ], + "updated_at": "2026-01-26T21:43:29.593Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-5.jpg", + "model_url": "https://fal.run/fal-ai/ffmpeg-api/merge-audio-video", + "license_type": "commercial", + "date": "2025-06-09T06:04:14.299Z", + "group": { + "key": "ffmpeg", + "label": "Merge Audio Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ffmpeg-api/merge-audio-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ffmpeg-api/merge-audio-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ffmpeg-api/merge-audio-video", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-5.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ffmpeg-api/merge-audio-video", + "documentationUrl": "https://fal.ai/models/fal-ai/ffmpeg-api/merge-audio-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FfmpegApiMergeAudioVideoInput": { + "title": "CombineInput", + "type": "object", + "properties": { + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ffmpeg-video.mp4" + ], + "description": "URL of the video file to use as the video track", + "type": "string", + "title": "Video Url" + }, + "start_offset": { + "minimum": 0, + "description": "Offset in seconds for when the audio should start relative to the video", + "type": "number", + "title": "Start Offset", + "default": 0 + }, + "audio_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/ffmpeg-audio.wav" + ], + "description": "URL of the audio file to use as the audio track", + "type": "string", + "title": "Audio Url" + } + }, + "x-fal-order-properties": [ + "video_url", + "audio_url", + "start_offset" + ], + "required": [ + "video_url", + "audio_url" + ] + }, + "FfmpegApiMergeAudioVideoOutput": { + "title": "CombineOutput", + "type": "object", + "properties": { + "video": { + "description": "Output video with merged audio.", + "$ref": "#/components/schemas/File" + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ffmpeg-api/merge-audio-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/merge-audio-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/merge-audio-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FfmpegApiMergeAudioVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/merge-audio-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FfmpegApiMergeAudioVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-vace-1-3b", + "metadata": { + "display_name": "Wan Vace 1 3b", + "category": "video-to-video", + "description": "Vace a video generation model that uses a source image, mask, and video to create prompted videos with controllable sources.", + "status": "active", + "tags": [ + "video-to-video" + ], + "updated_at": "2026-01-26T21:43:31.026Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "model_url": "https://fal.run/fal-ai/wan-vace-1-3b", + "license_type": "commercial", + "date": "2025-06-04T17:57:31.687Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-vace-1-3b", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-vace-1-3b queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-vace-1-3b", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-vace-1-3b", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-vace-1-3b/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanVace13bInput": { + "title": "WanT2VRequest", + "type": "object", + "properties": { + "shift": { + "minimum": 1, + "title": "Shift", + "type": "number", + "maximum": 10, + "description": "Shift parameter for video generation.", + "default": 5 + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/vace/src_video.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL to the source video file. If provided, the model will use this video as a reference." + }, + "prompt": { + "examples": [ + "The video shows a man riding a horse on a vast grassland. He has long lavender hair and wears a traditional dress of a white top and black pants. The animation style makes him look like he is doing some kind of outdoor activity or performing. The background is a spectacular mountain range and cloud sky, giving a sense of tranquility and vastness. The entire video is shot from a fixed angle, focusing on the rider and his horse." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "mask_image_url": { + "title": "Mask Image Url", + "type": "string", + "description": "URL to the guiding mask file. If provided, the model will use this mask as a reference to create masked video. If provided mask video url will be ignored." + }, + "task": { + "enum": [ + "depth", + "inpainting", + "pose" + ], + "title": "Task", + "type": "string", + "description": "Task type for the model.", + "default": "depth" + }, + "frames_per_second": { + "minimum": 5, + "title": "Frames Per Second", + "type": "integer", + "maximum": 24, + "description": "Frames per second of the generated video. Must be between 5 to 24.", + "default": 16 + }, + "ref_image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/vace/src_ref_image_1.png" + ] + ], + "title": "Ref Image Urls", + "type": "array", + "description": "Urls to source reference image. If provided, the model will use this image as reference.", + "items": { + "type": "string" + } + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "num_frames": { + "minimum": 81, + "title": "Num Frames", + "type": "integer", + "maximum": 240, + "description": "Number of frames to generate. Must be between 81 to 100 (inclusive). Works only with only reference images as input if source video or mask video is provided output len would be same as source up to 241 frames", + "default": 81 + }, + "negative_prompt": { + "examples": [ + "bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video (480p,580p, or 720p).", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "auto", + "9:16", + "16:9" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video (16:9 or 9:16).", + "default": "16:9" + }, + "mask_video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/vace/src_mask.mp4" + ], + "title": "Mask Video Url", + "type": "string", + "description": "URL to the source mask file. If provided, the model will use this mask as a reference." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "num_inference_steps": { + "minimum": 2, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 40, + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 30 + }, + "preprocess": { + "title": "Preprocess", + "type": "boolean", + "description": "Whether to preprocess the input video.", + "default": false + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_frames", + "frames_per_second", + "task", + "shift", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "video_url", + "mask_video_url", + "mask_image_url", + "ref_image_urls", + "enable_safety_checker", + "enable_prompt_expansion", + "preprocess" + ], + "required": [ + "prompt" + ] + }, + "WanVace13bOutput": { + "title": "WanT2VResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/vace/out_video_vace.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-vace-1-3b/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-1-3b/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-1-3b": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanVace13bInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-1-3b/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanVace13bOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/luma-dream-machine/ray-2-flash/reframe", + "metadata": { + "display_name": "Luma Ray 2 Flash Reframe", + "category": "video-to-video", + "description": "Adjust and enhance videos with Ray-2 Reframe. This advanced tool seamlessly reframes videos to your desired aspect ratio, intelligently inpainting missing regions to ensure realistic visuals and coherent motion, delivering exceptional quality and creative flexibility.", + "status": "active", + "tags": [ + "reframe", + "outpaint", + "flash" + ], + "updated_at": "2026-01-26T21:43:31.320Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/luma-dream-machine-ray-2.webp", + "model_url": "https://fal.run/fal-ai/luma-dream-machine/ray-2-flash/reframe", + "license_type": "commercial", + "date": "2025-06-03T15:52:49.571Z", + "group": { + "key": "luma-dream-machine", + "label": "Reframe (Ray 2 Flash)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/luma-dream-machine/ray-2-flash/reframe", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/luma-dream-machine/ray-2-flash/reframe queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/luma-dream-machine/ray-2-flash/reframe", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/luma-dream-machine-ray-2.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/luma-dream-machine/ray-2-flash/reframe", + "documentationUrl": "https://fal.ai/models/fal-ai/luma-dream-machine/ray-2-flash/reframe/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LumaDreamMachineRay2FlashReframeInput": { + "title": "ReframeVideoRequest", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "maxLength": 5000, + "minLength": 1, + "description": "Optional prompt for reframing" + }, + "aspect_ratio": { + "examples": [ + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4", + "21:9", + "9:21" + ], + "description": "The aspect ratio of the reframed video" + }, + "y_start": { + "title": "Y Start", + "type": "integer", + "description": "Start Y coordinate for reframing" + }, + "x_end": { + "title": "X End", + "type": "integer", + "description": "End X coordinate for reframing" + }, + "video_url": { + "examples": [ + "https://v3.fal.media/files/zebra/9aDde3Te2kuJYHdR0Kz8R_output.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the input video to reframe" + }, + "y_end": { + "title": "Y End", + "type": "integer", + "description": "End Y coordinate for reframing" + }, + "x_start": { + "title": "X Start", + "type": "integer", + "description": "Start X coordinate for reframing" + }, + "grid_position_y": { + "title": "Grid Position Y", + "type": "integer", + "description": "Y position of the grid for reframing" + }, + "grid_position_x": { + "title": "Grid Position X", + "type": "integer", + "description": "X position of the grid for reframing" + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "Optional URL of the first frame image for reframing" + } + }, + "x-fal-order-properties": [ + "video_url", + "aspect_ratio", + "image_url", + "grid_position_x", + "grid_position_y", + "prompt", + "x_end", + "x_start", + "y_end", + "y_start" + ], + "required": [ + "video_url", + "aspect_ratio" + ] + }, + "LumaDreamMachineRay2FlashReframeOutput": { + "title": "ReframeOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/lion/L9nkXSW1MCj2oDimeJ4w5_output.mp4" + } + ], + "title": "Video", + "description": "URL of the reframed video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/luma-dream-machine/ray-2-flash/reframe/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2-flash/reframe/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2-flash/reframe": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaDreamMachineRay2FlashReframeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2-flash/reframe/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaDreamMachineRay2FlashReframeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/luma-dream-machine/ray-2/reframe", + "metadata": { + "display_name": "Luma Ray 2 Reframe", + "category": "video-to-video", + "description": "Adjust and enhance videos with Ray-2 Reframe. This advanced tool seamlessly reframes videos to your desired aspect ratio, intelligently inpainting missing regions to ensure realistic visuals and coherent motion, delivering exceptional quality and creative flexibility.", + "status": "active", + "tags": [ + "reframe", + "outpaint" + ], + "updated_at": "2026-01-26T21:43:31.446Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/luma-dream-machine-ray-2.webp", + "model_url": "https://fal.run/fal-ai/luma-dream-machine/ray-2/reframe", + "license_type": "commercial", + "date": "2025-06-03T15:50:57.754Z", + "group": { + "key": "luma-dream-machine", + "label": "Reframe (Ray 2)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/luma-dream-machine/ray-2/reframe", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/luma-dream-machine/ray-2/reframe queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/luma-dream-machine/ray-2/reframe", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/luma-dream-machine-ray-2.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/luma-dream-machine/ray-2/reframe", + "documentationUrl": "https://fal.ai/models/fal-ai/luma-dream-machine/ray-2/reframe/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LumaDreamMachineRay2ReframeInput": { + "title": "ReframeVideoRequest", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "maxLength": 5000, + "minLength": 1, + "description": "Optional prompt for reframing" + }, + "aspect_ratio": { + "examples": [ + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "enum": [ + "1:1", + "16:9", + "9:16", + "4:3", + "3:4", + "21:9", + "9:21" + ], + "description": "The aspect ratio of the reframed video" + }, + "y_start": { + "title": "Y Start", + "type": "integer", + "description": "Start Y coordinate for reframing" + }, + "x_end": { + "title": "X End", + "type": "integer", + "description": "End X coordinate for reframing" + }, + "video_url": { + "examples": [ + "https://v3.fal.media/files/zebra/9aDde3Te2kuJYHdR0Kz8R_output.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the input video to reframe" + }, + "y_end": { + "title": "Y End", + "type": "integer", + "description": "End Y coordinate for reframing" + }, + "x_start": { + "title": "X Start", + "type": "integer", + "description": "Start X coordinate for reframing" + }, + "grid_position_y": { + "title": "Grid Position Y", + "type": "integer", + "description": "Y position of the grid for reframing" + }, + "grid_position_x": { + "title": "Grid Position X", + "type": "integer", + "description": "X position of the grid for reframing" + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "Optional URL of the first frame image for reframing" + } + }, + "x-fal-order-properties": [ + "video_url", + "aspect_ratio", + "image_url", + "grid_position_x", + "grid_position_y", + "prompt", + "x_end", + "x_start", + "y_end", + "y_start" + ], + "required": [ + "video_url", + "aspect_ratio" + ] + }, + "LumaDreamMachineRay2ReframeOutput": { + "title": "ReframeOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/lion/L9nkXSW1MCj2oDimeJ4w5_output.mp4" + } + ], + "title": "Video", + "description": "URL of the reframed video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/luma-dream-machine/ray-2/reframe/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2/reframe/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2/reframe": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaDreamMachineRay2ReframeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/luma-dream-machine/ray-2/reframe/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LumaDreamMachineRay2ReframeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "veed/lipsync", + "metadata": { + "display_name": "Lipsync", + "category": "video-to-video", + "description": "Generate realistic lipsync from any audio using VEED's latest model", + "status": "active", + "tags": [ + "lipsync", + "video-to-video", + "avatar" + ], + "updated_at": "2026-01-26T21:43:37.995Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/veed_logo.webp", + "model_url": "https://fal.run/veed/lipsync", + "license_type": "commercial", + "date": "2025-05-28T16:17:01.408Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for veed/lipsync", + "version": "1.0.0", + "description": "The OpenAPI schema for the veed/lipsync queue.", + "x-fal-metadata": { + "endpointId": "veed/lipsync", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/veed_logo.webp", + "playgroundUrl": "https://fal.ai/models/veed/lipsync", + "documentationUrl": "https://fal.ai/models/veed/lipsync/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LipsyncInput": { + "x-fal-order-properties": [ + "video_url", + "audio_url" + ], + "type": "object", + "properties": { + "video_url": { + "format": "uri", + "maxLength": 2083, + "type": "string", + "minLength": 1, + "examples": [ + "https://v3.fal.media/files/monkey/q1fDPhrpfjfsaRmbhTed4_influencer.mp4" + ], + "title": "Video Url" + }, + "audio_url": { + "format": "uri", + "maxLength": 2083, + "type": "string", + "minLength": 1, + "examples": [ + "https://v3.fal.media/files/rabbit/Ql3ade3wEKlZXRQLRbhxm_tts.mp3" + ], + "title": "Audio Url" + } + }, + "title": "LipsyncInput", + "required": [ + "video_url", + "audio_url" + ] + }, + "LipsyncOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://v3.fal.media/files/penguin/PsA4BJPGAojXKW2QGztm4_tmpe_e1cgbq.mp4" + } + ], + "$ref": "#/components/schemas/File" + } + }, + "title": "LipsyncAppOutput", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ], + "title": "File Size" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name" + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "The mime type of the file.", + "examples": [ + "image/png" + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/veed/lipsync/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/veed/lipsync/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/veed/lipsync": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LipsyncInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/veed/lipsync/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LipsyncOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-vace-14b", + "metadata": { + "display_name": "Wan VACE 14B", + "category": "video-to-video", + "description": "VACE is a video generation model that uses a source image, mask, and video to create prompted videos with controllable sources.", + "status": "active", + "tags": [ + "image-to-video", + "video-to-video", + "text-to-video" + ], + "updated_at": "2026-01-26T21:43:38.661Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-1.jpg", + "model_url": "https://fal.run/fal-ai/wan-vace-14b", + "license_type": "commercial", + "date": "2025-05-27T07:26:55.366Z", + "group": { + "key": "wan-vace-14b", + "label": "Freeform" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-vace-14b", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-vace-14b queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-vace-14b", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-1.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-vace-14b", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-vace-14b/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanVace14bInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "match_input_num_frames", + "num_frames", + "match_input_frames_per_second", + "frames_per_second", + "task", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "guidance_scale", + "sampler", + "shift", + "video_url", + "mask_video_url", + "mask_image_url", + "ref_image_urls", + "first_frame_url", + "last_frame_url", + "enable_safety_checker", + "enable_prompt_expansion", + "preprocess", + "acceleration", + "video_quality", + "video_write_mode", + "num_interpolated_frames", + "temporal_downsample_factor", + "enable_auto_downsample", + "auto_downsample_min_fps", + "interpolator_model", + "sync_mode", + "transparency_mode", + "return_frames_zip" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "The video shows a man riding a horse on a vast grassland. He has long lavender hair and wears a traditional dress of a white top and black pants. The animation style makes him look like he is doing some kind of outdoor activity or performing. The background is a spectacular mountain range and cloud sky, giving a sense of tranquility and vastness. The entire video is shot from a fixed angle, focusing on the rider and his horse." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "video_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Video URL", + "description": "URL to the source video file. If provided, the model will use this video as a reference." + }, + "num_interpolated_frames": { + "description": "Number of frames to interpolate between the original frames. A value of 0 means no interpolation.", + "type": "integer", + "minimum": 0, + "title": "Number of Interpolated Frames", + "examples": [ + 0 + ], + "maximum": 5, + "default": 0 + }, + "temporal_downsample_factor": { + "description": "Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.", + "type": "integer", + "minimum": 0, + "title": "Temporal Downsample Factor", + "examples": [ + 0 + ], + "maximum": 5, + "default": 0 + }, + "first_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "First Frame URL", + "description": "URL to the first frame of the video. If provided, the model will use this frame as a reference." + }, + "ref_image_urls": { + "title": "Reference Image URLs", + "type": "array", + "description": "URLs to source reference image. If provided, the model will use this image as reference.", + "items": { + "type": "string" + } + }, + "transparency_mode": { + "enum": [ + "content_aware", + "white", + "black" + ], + "title": "Transparency Mode", + "type": "string", + "description": "The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.", + "examples": [ + "content_aware" + ], + "default": "content_aware" + }, + "num_frames": { + "minimum": 17, + "title": "Number of Frames", + "type": "integer", + "maximum": 241, + "description": "Number of frames to generate. Must be between 81 to 241 (inclusive).", + "default": 81 + }, + "auto_downsample_min_fps": { + "description": "The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences.", + "type": "number", + "minimum": 1, + "title": "Auto Downsample Min FPS", + "examples": [ + 15 + ], + "maximum": 60, + "default": 15 + }, + "guidance_scale": { + "description": "Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.", + "type": "number", + "minimum": 1, + "title": "Guidance Scale", + "examples": [ + 5 + ], + "maximum": 10, + "default": 5 + }, + "sampler": { + "enum": [ + "unipc", + "dpm++", + "euler" + ], + "title": "Sampler", + "type": "string", + "description": "Sampler to use for video generation.", + "examples": [ + "unipc" + ], + "default": "unipc" + }, + "video_quality": { + "enum": [ + "low", + "medium", + "high", + "maximum" + ], + "title": "Video Quality", + "type": "string", + "description": "The quality of the generated video.", + "examples": [ + "high" + ], + "default": "high" + }, + "sync_mode": { + "examples": [ + false + ], + "title": "Sync Mode", + "type": "boolean", + "description": "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + "default": false + }, + "mask_video_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Mask Video URL", + "description": "URL to the source mask file. If provided, the model will use this mask as a reference." + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Seed", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "interpolator_model": { + "enum": [ + "rife", + "film" + ], + "title": "Interpolator Model", + "type": "string", + "description": "The model to use for frame interpolation. Options are 'rife' or 'film'.", + "examples": [ + "film" + ], + "default": "film" + }, + "enable_auto_downsample": { + "examples": [ + false + ], + "title": "Enable Auto Downsample", + "type": "boolean", + "description": "If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.", + "default": false + }, + "preprocess": { + "examples": [ + false + ], + "title": "Preprocess", + "type": "boolean", + "description": "Whether to preprocess the input video.", + "default": false + }, + "shift": { + "minimum": 1, + "title": "Shift", + "type": "number", + "maximum": 15, + "description": "Shift parameter for video generation.", + "default": 5 + }, + "enable_prompt_expansion": { + "examples": [ + false + ], + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + }, + "acceleration": { + "anyOf": [ + { + "enum": [ + "none", + "low", + "regular" + ], + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Acceleration", + "description": "Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster.", + "examples": [ + "regular" + ], + "default": "regular" + }, + "mask_image_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Mask Image URL", + "description": "URL to the guiding mask file. If provided, the model will use this mask as a reference to create masked video. If provided mask video url will be ignored." + }, + "task": { + "enum": [ + "depth", + "pose", + "inpainting", + "outpainting", + "reframe" + ], + "title": "Task", + "type": "string", + "description": "Task type for the model.", + "default": "depth" + }, + "match_input_num_frames": { + "examples": [ + false + ], + "title": "Match Input Number of Frames", + "type": "boolean", + "description": "If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.", + "default": false + }, + "frames_per_second": { + "anyOf": [ + { + "minimum": 5, + "maximum": 30, + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Frames per Second", + "description": "Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true.", + "default": 16 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "negative_prompt": { + "examples": [ + "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + }, + "video_write_mode": { + "enum": [ + "fast", + "balanced", + "small" + ], + "title": "Video Write Mode", + "type": "string", + "description": "The write mode of the generated video.", + "examples": [ + "balanced" + ], + "default": "balanced" + }, + "return_frames_zip": { + "examples": [ + false + ], + "title": "Return Frames Zip", + "type": "boolean", + "description": "If true, also return a ZIP file containing all generated frames.", + "default": false + }, + "resolution": { + "enum": [ + "auto", + "240p", + "360p", + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video.", + "default": "auto" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "1:1", + "9:16" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video.", + "default": "auto" + }, + "match_input_frames_per_second": { + "examples": [ + false + ], + "title": "Match Input Frames Per Second", + "type": "boolean", + "description": "If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.", + "default": false + }, + "num_inference_steps": { + "minimum": 2, + "title": "Number of Inference Steps", + "type": "integer", + "maximum": 50, + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 30 + }, + "last_frame_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Last Frame URL", + "description": "URL to the last frame of the video. If provided, the model will use this frame as a reference." + } + }, + "title": "WanVACERequest", + "required": [ + "prompt" + ] + }, + "WanVace14bOutput": { + "x-fal-order-properties": [ + "video", + "prompt", + "seed", + "frames_zip" + ], + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "frames_zip": { + "anyOf": [ + { + "$ref": "#/components/schemas/File" + }, + { + "type": "null" + } + ], + "description": "ZIP archive of all video frames if requested." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "description": "The generated video file.", + "$ref": "#/components/schemas/VideoFile" + } + }, + "title": "WanVACEResponse", + "required": [ + "video", + "prompt", + "seed" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "title": "File", + "required": [ + "url" + ] + }, + "VideoFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height", + "fps", + "duration", + "num_frames" + ], + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "duration": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Duration", + "description": "The duration of the video" + }, + "height": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height", + "description": "The height of the video" + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width", + "description": "The width of the video" + }, + "fps": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Fps", + "description": "The FPS of the video" + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "num_frames": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Num Frames", + "description": "The number of frames in the video" + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + } + }, + "title": "VideoFile", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-vace-14b/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-14b/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-14b": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanVace14bInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-vace-14b/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanVace14bOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-video-13b-distilled/extend", + "metadata": { + "display_name": "LTX Video-0.9.7 13B Distilled", + "category": "video-to-video", + "description": "Extend videos using LTX Video-0.9.7 13B Distilled and custom LoRA", + "status": "active", + "tags": [ + "video", + "ltx-video", + "video-to-video", + "extend-video" + ], + "updated_at": "2026-01-26T21:43:41.229Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training-2.jpg", + "model_url": "https://fal.run/fal-ai/ltx-video-13b-distilled/extend", + "license_type": "commercial", + "date": "2025-05-17T02:04:17.474Z", + "group": { + "key": "ltx-video-13b-distilled", + "label": "Extend Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-video-13b-distilled/extend", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-video-13b-distilled/extend queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-video-13b-distilled/extend", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-video-13b-distilled/extend", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-video-13b-distilled/extend/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LtxVideo13bDistilledExtendInput": { + "title": "DistilledExtendVideoInput", + "type": "object", + "properties": { + "second_pass_skip_initial_steps": { + "description": "The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.", + "type": "integer", + "minimum": 1, + "title": "Second Pass Skip Initial Steps", + "examples": [ + 5 + ], + "maximum": 20, + "default": 5 + }, + "first_pass_num_inference_steps": { + "description": "Number of inference steps during the first pass.", + "type": "integer", + "minimum": 2, + "title": "First Pass Num Inference Steps", + "examples": [ + 8 + ], + "maximum": 20, + "default": 8 + }, + "frame_rate": { + "description": "The frame rate of the video.", + "type": "integer", + "minimum": 1, + "title": "Frame Rate", + "examples": [ + 30 + ], + "maximum": 60, + "default": 30 + }, + "reverse_video": { + "examples": [ + false + ], + "title": "Reverse Video", + "type": "boolean", + "description": "Whether to reverse the video.", + "default": false + }, + "prompt": { + "examples": [ + "Woman walking on a street in Tokyo" + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt to guide generation" + }, + "expand_prompt": { + "examples": [ + false + ], + "title": "Expand Prompt", + "type": "boolean", + "description": "Whether to expand the prompt using a language model.", + "default": false + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "LoRA weights to use for generation", + "items": { + "$ref": "#/components/schemas/LoRAWeight" + }, + "default": [] + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "num_frames": { + "description": "The number of frames in the video.", + "type": "integer", + "minimum": 9, + "title": "Num Frames", + "examples": [ + 121 + ], + "maximum": 161, + "default": 121 + }, + "second_pass_num_inference_steps": { + "description": "Number of inference steps during the second pass.", + "type": "integer", + "minimum": 2, + "title": "Second Pass Num Inference Steps", + "examples": [ + 8 + ], + "maximum": 20, + "default": 8 + }, + "video": { + "examples": [ + { + "video_url": "https://storage.googleapis.com/falserverless/web-examples/wan/t2v.mp4", + "reverse_video": false, + "start_frame_num": 24, + "limit_num_frames": false, + "resample_fps": false, + "strength": 1, + "target_fps": 30, + "max_num_frames": 121, + "conditioning_type": "rgb", + "preprocess": false + } + ], + "title": "Video", + "description": "Video to be extended.", + "allOf": [ + { + "$ref": "#/components/schemas/VideoConditioningInput" + } + ] + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for generation", + "default": "worst quality, inconsistent motion, blurry, jittery, distorted" + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "examples": [ + "720p" + ], + "description": "Resolution of the generated video (480p or 720p).", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "9:16", + "1:1", + "16:9", + "auto" + ], + "title": "Aspect Ratio", + "type": "string", + "examples": [ + "auto" + ], + "description": "The aspect ratio of the video.", + "default": "auto" + }, + "constant_rate_factor": { + "description": "The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality.", + "type": "integer", + "minimum": 20, + "title": "Constant Rate Factor", + "examples": [ + 35 + ], + "maximum": 60, + "default": 35 + }, + "first_pass_skip_final_steps": { + "minimum": 0, + "title": "First Pass Skip Final Steps", + "type": "integer", + "maximum": 20, + "description": "Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details.", + "default": 1 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation" + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "loras", + "resolution", + "aspect_ratio", + "seed", + "num_frames", + "first_pass_num_inference_steps", + "first_pass_skip_final_steps", + "second_pass_num_inference_steps", + "second_pass_skip_initial_steps", + "frame_rate", + "expand_prompt", + "reverse_video", + "enable_safety_checker", + "constant_rate_factor", + "video" + ], + "description": "Distilled model input", + "required": [ + "prompt", + "video" + ] + }, + "LtxVideo13bDistilledExtendOutput": { + "title": "ExtendVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Woman walking on a street in Tokyo" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/ltx-v095_extend.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed" + ], + "required": [ + "video", + "prompt", + "seed" + ] + }, + "LoRAWeight": { + "title": "LoRAWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "maximum": 4, + "description": "Scale of the LoRA weight. This is a multiplier applied to the LoRA weight when loading it.", + "default": 1 + }, + "weight_name": { + "title": "Weight Name", + "type": "string", + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights." + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "required": [ + "path" + ] + }, + "VideoConditioningInput": { + "title": "VideoConditioningInput", + "type": "object", + "properties": { + "video_url": { + "title": "Video URL", + "type": "string", + "description": "URL of video to use as conditioning" + }, + "reverse_video": { + "title": "Reverse Video", + "type": "boolean", + "description": "Whether to reverse the video. This is useful for tasks where the video conditioning should be applied in reverse order.", + "default": false + }, + "start_frame_num": { + "description": "Frame number of the video from which the conditioning starts. Must be a multiple of 8.", + "type": "integer", + "minimum": 0, + "title": "Start Frame Number", + "maximum": 160, + "multipleOf": 8, + "default": 0 + }, + "limit_num_frames": { + "title": "Limit Number of Frames", + "type": "boolean", + "description": "Whether to limit the number of frames used from the video. If True, the `max_num_frames` parameter will be used to limit the number of frames.", + "default": false + }, + "resample_fps": { + "title": "Resample FPS", + "type": "boolean", + "description": "Whether to resample the video to a specific FPS. If True, the `target_fps` parameter will be used to resample the video.", + "default": false + }, + "strength": { + "minimum": 0, + "title": "Strength", + "type": "number", + "maximum": 1, + "description": "Strength of the conditioning. 0.0 means no conditioning, 1.0 means full conditioning.", + "default": 1 + }, + "target_fps": { + "description": "Target FPS to resample the video to. Only relevant if `resample_fps` is True.", + "type": "integer", + "minimum": 1, + "title": "Target FPS", + "examples": [ + 30 + ], + "maximum": 60, + "default": 30 + }, + "max_num_frames": { + "description": "Maximum number of frames to use from the video. If None, all frames will be used.", + "type": "integer", + "minimum": 1, + "title": "Maximum Number of Frames", + "examples": [ + 121 + ], + "maximum": 161, + "default": 121 + }, + "conditioning_type": { + "enum": [ + "rgb", + "depth", + "pose", + "canny" + ], + "title": "Conditioning Type", + "type": "string", + "examples": [ + "rgb" + ], + "description": "Type of conditioning this video provides. This is relevant to ensure in-context LoRA weights are applied correctly, as well as selecting the correct preprocessing pipeline, when enabled.", + "default": "rgb" + }, + "preprocess": { + "title": "Preprocess", + "type": "boolean", + "description": "Whether to preprocess the video. If True, the video will be preprocessed to match the conditioning type. This is a no-op for RGB conditioning.", + "default": false + } + }, + "x-fal-order-properties": [ + "video_url", + "conditioning_type", + "preprocess", + "start_frame_num", + "strength", + "limit_num_frames", + "max_num_frames", + "resample_fps", + "target_fps", + "reverse_video" + ], + "required": [ + "video_url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-video-13b-distilled/extend/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-distilled/extend/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-distilled/extend": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideo13bDistilledExtendInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-distilled/extend/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideo13bDistilledExtendOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-video-13b-distilled/multiconditioning", + "metadata": { + "display_name": "LTX Video-0.9.7 13B Distilled", + "category": "video-to-video", + "description": "Generate videos from prompts, images, and videos using LTX Video-0.9.7 13B Distilled and custom LoRA", + "status": "active", + "tags": [ + "video", + "ltx-video", + "video-to-video", + "multicondition-to-video", + "image-to-video" + ], + "updated_at": "2026-01-26T21:43:41.353Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training.jpg", + "model_url": "https://fal.run/fal-ai/ltx-video-13b-distilled/multiconditioning", + "license_type": "commercial", + "date": "2025-05-17T02:02:53.741Z", + "group": { + "key": "ltx-video-13b-distilled", + "label": "Multicondition to Video" + }, + "highlighted": false, + "kind": "inference", + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-video-13b-distilled/multiconditioning", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-video-13b-distilled/multiconditioning queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-video-13b-distilled/multiconditioning", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-video-13b-distilled/multiconditioning", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-video-13b-distilled/multiconditioning/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LtxVideo13bDistilledMulticonditioningInput": { + "title": "DistilledMultiConditioningVideoInput", + "type": "object", + "properties": { + "second_pass_skip_initial_steps": { + "description": "The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.", + "type": "integer", + "minimum": 1, + "title": "Second Pass Skip Initial Steps", + "examples": [ + 5 + ], + "maximum": 20, + "default": 5 + }, + "first_pass_num_inference_steps": { + "description": "Number of inference steps during the first pass.", + "type": "integer", + "minimum": 2, + "title": "First Pass Num Inference Steps", + "examples": [ + 8 + ], + "maximum": 20, + "default": 8 + }, + "frame_rate": { + "description": "The frame rate of the video.", + "type": "integer", + "minimum": 1, + "title": "Frame Rate", + "examples": [ + 30 + ], + "maximum": 60, + "default": 30 + }, + "reverse_video": { + "examples": [ + false + ], + "title": "Reverse Video", + "type": "boolean", + "description": "Whether to reverse the video.", + "default": false + }, + "prompt": { + "examples": [ + "A vibrant, abstract composition featuring a person with outstretched arms, rendered in a kaleidoscope of colors against a deep, dark background. The figure is composed of intricate, swirling patterns reminiscent of a mosaic, with hues of orange, yellow, blue, and green that evoke the style of artists such as Wassily Kandinsky or Bridget Riley. The camera zooms into the face striking portrait of a man, reimagined through the lens of old-school video-game graphics. The subject's face is rendered in a kaleidoscope of colors, with bold blues and reds set against a vibrant yellow backdrop. His dark hair is pulled back, framing his profile in a dramatic pose." + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt to guide generation" + }, + "expand_prompt": { + "examples": [ + false + ], + "title": "Expand Prompt", + "type": "boolean", + "description": "Whether to expand the prompt using a language model.", + "default": false + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "LoRA weights to use for generation", + "items": { + "$ref": "#/components/schemas/LoRAWeight" + }, + "default": [] + }, + "images": { + "description": "URL of images to use as conditioning", + "type": "array", + "items": { + "$ref": "#/components/schemas/ImageConditioningInput" + }, + "examples": [ + [ + { + "strength": 1, + "start_frame_num": 0, + "image_url": "https://storage.googleapis.com/falserverless/model_tests/ltx/NswO1P8sCLzrh1WefqQFK_9a6bdbfa54b944c9a770338159a113fd.jpg" + }, + { + "strength": 1, + "start_frame_num": 120, + "image_url": "https://storage.googleapis.com/falserverless/model_tests/ltx/YAPOGvmS2tM_Krdp7q6-d_267c97e017c34f679844a4477dfcec38.jpg" + } + ] + ], + "title": "Images", + "default": [] + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "num_frames": { + "description": "The number of frames in the video.", + "type": "integer", + "minimum": 9, + "title": "Num Frames", + "examples": [ + 121 + ], + "maximum": 161, + "default": 121 + }, + "second_pass_num_inference_steps": { + "description": "Number of inference steps during the second pass.", + "type": "integer", + "minimum": 2, + "title": "Second Pass Num Inference Steps", + "examples": [ + 8 + ], + "maximum": 20, + "default": 8 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for generation", + "default": "worst quality, inconsistent motion, blurry, jittery, distorted" + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "examples": [ + "720p" + ], + "description": "Resolution of the generated video (480p or 720p).", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "9:16", + "1:1", + "16:9", + "auto" + ], + "title": "Aspect Ratio", + "type": "string", + "examples": [ + "auto" + ], + "description": "The aspect ratio of the video.", + "default": "auto" + }, + "constant_rate_factor": { + "description": "The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality.", + "type": "integer", + "minimum": 20, + "title": "Constant Rate Factor", + "examples": [ + 35 + ], + "maximum": 60, + "default": 35 + }, + "videos": { + "title": "Videos", + "type": "array", + "description": "Videos to use as conditioning", + "items": { + "$ref": "#/components/schemas/VideoConditioningInput" + }, + "default": [] + }, + "first_pass_skip_final_steps": { + "minimum": 0, + "title": "First Pass Skip Final Steps", + "type": "integer", + "maximum": 20, + "description": "Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details.", + "default": 1 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation" + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "loras", + "resolution", + "aspect_ratio", + "seed", + "num_frames", + "first_pass_num_inference_steps", + "first_pass_skip_final_steps", + "second_pass_num_inference_steps", + "second_pass_skip_initial_steps", + "frame_rate", + "expand_prompt", + "reverse_video", + "enable_safety_checker", + "constant_rate_factor", + "images", + "videos" + ], + "description": "Distilled model input", + "required": [ + "prompt" + ] + }, + "LtxVideo13bDistilledMulticonditioningOutput": { + "title": "MultiConditioningVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A vibrant, abstract composition featuring a person with outstretched arms, rendered in a kaleidoscope of colors against a deep, dark background. The figure is composed of intricate, swirling patterns reminiscent of a mosaic, with hues of orange, yellow, blue, and green that evoke the style of artists such as Wassily Kandinsky or Bridget Riley. The camera zooms into the face striking portrait of a man, reimagined through the lens of old-school video-game graphics. The subject's face is rendered in a kaleidoscope of colors, with bold blues and reds set against a vibrant yellow backdrop. His dark hair is pulled back, framing his profile in a dramatic pose." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/ltxv-multiconditioning-output.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "prompt", + "seed" + ], + "required": [ + "video", + "prompt", + "seed" + ] + }, + "LoRAWeight": { + "title": "LoRAWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "maximum": 4, + "description": "Scale of the LoRA weight. This is a multiplier applied to the LoRA weight when loading it.", + "default": 1 + }, + "weight_name": { + "title": "Weight Name", + "type": "string", + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights." + } + }, + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "required": [ + "path" + ] + }, + "ImageConditioningInput": { + "title": "ImageConditioningInput", + "type": "object", + "properties": { + "strength": { + "minimum": 0, + "title": "Strength", + "type": "number", + "maximum": 1, + "description": "Strength of the conditioning. 0.0 means no conditioning, 1.0 means full conditioning.", + "default": 1 + }, + "start_frame_num": { + "description": "Frame number of the image from which the conditioning starts. Must be a multiple of 8.", + "type": "integer", + "minimum": 0, + "title": "Start Frame Number", + "maximum": 160, + "multipleOf": 8, + "default": 0 + }, + "image_url": { + "title": "Image URL", + "type": "string", + "description": "URL of image to use as conditioning" + } + }, + "x-fal-order-properties": [ + "image_url", + "start_frame_num", + "strength" + ], + "required": [ + "image_url" + ] + }, + "VideoConditioningInput": { + "title": "VideoConditioningInput", + "type": "object", + "properties": { + "video_url": { + "title": "Video URL", + "type": "string", + "description": "URL of video to use as conditioning" + }, + "reverse_video": { + "title": "Reverse Video", + "type": "boolean", + "description": "Whether to reverse the video. This is useful for tasks where the video conditioning should be applied in reverse order.", + "default": false + }, + "start_frame_num": { + "description": "Frame number of the video from which the conditioning starts. Must be a multiple of 8.", + "type": "integer", + "minimum": 0, + "title": "Start Frame Number", + "maximum": 160, + "multipleOf": 8, + "default": 0 + }, + "limit_num_frames": { + "title": "Limit Number of Frames", + "type": "boolean", + "description": "Whether to limit the number of frames used from the video. If True, the `max_num_frames` parameter will be used to limit the number of frames.", + "default": false + }, + "resample_fps": { + "title": "Resample FPS", + "type": "boolean", + "description": "Whether to resample the video to a specific FPS. If True, the `target_fps` parameter will be used to resample the video.", + "default": false + }, + "strength": { + "minimum": 0, + "title": "Strength", + "type": "number", + "maximum": 1, + "description": "Strength of the conditioning. 0.0 means no conditioning, 1.0 means full conditioning.", + "default": 1 + }, + "target_fps": { + "description": "Target FPS to resample the video to. Only relevant if `resample_fps` is True.", + "type": "integer", + "minimum": 1, + "title": "Target FPS", + "examples": [ + 30 + ], + "maximum": 60, + "default": 30 + }, + "max_num_frames": { + "description": "Maximum number of frames to use from the video. If None, all frames will be used.", + "type": "integer", + "minimum": 1, + "title": "Maximum Number of Frames", + "examples": [ + 121 + ], + "maximum": 161, + "default": 121 + }, + "conditioning_type": { + "enum": [ + "rgb", + "depth", + "pose", + "canny" + ], + "title": "Conditioning Type", + "type": "string", + "examples": [ + "rgb" + ], + "description": "Type of conditioning this video provides. This is relevant to ensure in-context LoRA weights are applied correctly, as well as selecting the correct preprocessing pipeline, when enabled.", + "default": "rgb" + }, + "preprocess": { + "title": "Preprocess", + "type": "boolean", + "description": "Whether to preprocess the video. If True, the video will be preprocessed to match the conditioning type. This is a no-op for RGB conditioning.", + "default": false + } + }, + "x-fal-order-properties": [ + "video_url", + "conditioning_type", + "preprocess", + "start_frame_num", + "strength", + "limit_num_frames", + "max_num_frames", + "resample_fps", + "target_fps", + "reverse_video" + ], + "required": [ + "video_url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-video-13b-distilled/multiconditioning/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-distilled/multiconditioning/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-distilled/multiconditioning": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideo13bDistilledMulticonditioningInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-distilled/multiconditioning/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideo13bDistilledMulticonditioningOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-video-13b-dev/multiconditioning", + "metadata": { + "display_name": "LTX Video-0.9.7 13B", + "category": "video-to-video", + "description": "Generate videos from prompts, images, and videos using LTX Video-0.9.7 13B and custom LoRA", + "status": "active", + "tags": [ + "video", + "ltx-video", + "video-to-video", + "multicondition-to-video", + "image-to-video" + ], + "updated_at": "2026-01-26T21:43:41.621Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training.jpg", + "model_url": "https://fal.run/fal-ai/ltx-video-13b-dev/multiconditioning", + "license_type": "commercial", + "date": "2025-05-17T01:56:16.027Z", + "group": { + "key": "ltx-video-13b", + "label": "Multicondition to Video" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-video-13b-dev/multiconditioning", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-video-13b-dev/multiconditioning queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-video-13b-dev/multiconditioning", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-video-13b-dev/multiconditioning", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-video-13b-dev/multiconditioning/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LtxVideo13bDevMulticonditioningInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "loras", + "resolution", + "aspect_ratio", + "seed", + "num_frames", + "first_pass_num_inference_steps", + "first_pass_skip_final_steps", + "second_pass_num_inference_steps", + "second_pass_skip_initial_steps", + "frame_rate", + "expand_prompt", + "reverse_video", + "enable_safety_checker", + "constant_rate_factor", + "images", + "videos" + ], + "type": "object", + "properties": { + "second_pass_skip_initial_steps": { + "description": "The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.", + "type": "integer", + "minimum": 1, + "maximum": 50, + "title": "Second Pass Skip Initial Steps", + "examples": [ + 17 + ], + "default": 17 + }, + "first_pass_num_inference_steps": { + "description": "Number of inference steps during the first pass.", + "type": "integer", + "minimum": 2, + "maximum": 50, + "title": "First Pass Num Inference Steps", + "examples": [ + 30 + ], + "default": 30 + }, + "frame_rate": { + "description": "The frame rate of the video.", + "type": "integer", + "minimum": 1, + "maximum": 60, + "title": "Frame Rate", + "examples": [ + 30 + ], + "default": 30 + }, + "prompt": { + "examples": [ + "A vibrant, abstract composition featuring a person with outstretched arms, rendered in a kaleidoscope of colors against a deep, dark background. The figure is composed of intricate, swirling patterns reminiscent of a mosaic, with hues of orange, yellow, blue, and green that evoke the style of artists such as Wassily Kandinsky or Bridget Riley. The camera zooms into the face striking portrait of a man, reimagined through the lens of old-school video-game graphics. The subject's face is rendered in a kaleidoscope of colors, with bold blues and reds set against a vibrant yellow backdrop. His dark hair is pulled back, framing his profile in a dramatic pose." + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt to guide generation" + }, + "reverse_video": { + "examples": [ + false + ], + "title": "Reverse Video", + "type": "boolean", + "description": "Whether to reverse the video.", + "default": false + }, + "expand_prompt": { + "examples": [ + false + ], + "title": "Expand Prompt", + "type": "boolean", + "description": "Whether to expand the prompt using a language model.", + "default": false + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "LoRA weights to use for generation", + "items": { + "$ref": "#/components/schemas/LoRAWeight" + }, + "default": [] + }, + "images": { + "description": "URL of images to use as conditioning", + "type": "array", + "items": { + "$ref": "#/components/schemas/ImageConditioningInput" + }, + "examples": [ + [ + { + "strength": 1, + "start_frame_num": 0, + "image_url": "https://storage.googleapis.com/falserverless/model_tests/ltx/NswO1P8sCLzrh1WefqQFK_9a6bdbfa54b944c9a770338159a113fd.jpg" + }, + { + "strength": 1, + "start_frame_num": 88, + "image_url": "https://storage.googleapis.com/falserverless/model_tests/ltx/YAPOGvmS2tM_Krdp7q6-d_267c97e017c34f679844a4477dfcec38.jpg" + } + ] + ], + "title": "Images", + "default": [] + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "num_frames": { + "description": "The number of frames in the video.", + "type": "integer", + "minimum": 9, + "maximum": 161, + "title": "Num Frames", + "examples": [ + 121 + ], + "default": 121 + }, + "second_pass_num_inference_steps": { + "description": "Number of inference steps during the second pass.", + "type": "integer", + "minimum": 2, + "maximum": 50, + "title": "Second Pass Num Inference Steps", + "examples": [ + 30 + ], + "default": 30 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for generation", + "default": "worst quality, inconsistent motion, blurry, jittery, distorted" + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "examples": [ + "720p" + ], + "description": "Resolution of the generated video (480p or 720p).", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "9:16", + "1:1", + "16:9", + "auto" + ], + "title": "Aspect Ratio", + "type": "string", + "examples": [ + "auto" + ], + "description": "The aspect ratio of the video.", + "default": "auto" + }, + "videos": { + "title": "Videos", + "type": "array", + "description": "Videos to use as conditioning", + "items": { + "$ref": "#/components/schemas/VideoConditioningInput" + }, + "default": [] + }, + "constant_rate_factor": { + "description": "The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality.", + "type": "integer", + "minimum": 20, + "maximum": 60, + "title": "Constant Rate Factor", + "examples": [ + 35 + ], + "default": 35 + }, + "first_pass_skip_final_steps": { + "minimum": 0, + "maximum": 50, + "type": "integer", + "title": "First Pass Skip Final Steps", + "description": "Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details.", + "default": 3 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation" + } + }, + "title": "MultiConditioningVideoInput", + "required": [ + "prompt" + ] + }, + "LtxVideo13bDevMulticonditioningOutput": { + "x-fal-order-properties": [ + "video", + "prompt", + "seed" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A vibrant, abstract composition featuring a person with outstretched arms, rendered in a kaleidoscope of colors against a deep, dark background. The figure is composed of intricate, swirling patterns reminiscent of a mosaic, with hues of orange, yellow, blue, and green that evoke the style of artists such as Wassily Kandinsky or Bridget Riley. The camera zooms into the face striking portrait of a man, reimagined through the lens of old-school video-game graphics. The subject's face is rendered in a kaleidoscope of colors, with bold blues and reds set against a vibrant yellow backdrop. His dark hair is pulled back, framing his profile in a dramatic pose." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/ltxv-multiconditioning-output.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "MultiConditioningVideoOutput", + "required": [ + "video", + "prompt", + "seed" + ] + }, + "LoRAWeight": { + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "Scale of the LoRA weight. This is a multiplier applied to the LoRA weight when loading it.", + "default": 1 + }, + "weight_name": { + "title": "Weight Name", + "type": "string", + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights." + } + }, + "title": "LoRAWeight", + "required": [ + "path" + ] + }, + "ImageConditioningInput": { + "x-fal-order-properties": [ + "image_url", + "start_frame_num", + "strength" + ], + "type": "object", + "properties": { + "strength": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "Strength of the conditioning. 0.0 means no conditioning, 1.0 means full conditioning.", + "default": 1 + }, + "start_frame_num": { + "description": "Frame number of the image from which the conditioning starts. Must be a multiple of 8.", + "type": "integer", + "minimum": 0, + "maximum": 160, + "title": "Start Frame Number", + "multipleOf": 8, + "default": 0 + }, + "image_url": { + "title": "Image URL", + "type": "string", + "description": "URL of image to use as conditioning" + } + }, + "title": "ImageConditioningInput", + "required": [ + "image_url" + ] + }, + "VideoConditioningInput": { + "x-fal-order-properties": [ + "video_url", + "conditioning_type", + "preprocess", + "start_frame_num", + "strength", + "limit_num_frames", + "max_num_frames", + "resample_fps", + "target_fps", + "reverse_video" + ], + "type": "object", + "properties": { + "video_url": { + "title": "Video URL", + "type": "string", + "description": "URL of video to use as conditioning" + }, + "start_frame_num": { + "description": "Frame number of the video from which the conditioning starts. Must be a multiple of 8.", + "type": "integer", + "minimum": 0, + "maximum": 160, + "title": "Start Frame Number", + "multipleOf": 8, + "default": 0 + }, + "reverse_video": { + "title": "Reverse Video", + "type": "boolean", + "description": "Whether to reverse the video. This is useful for tasks where the video conditioning should be applied in reverse order.", + "default": false + }, + "limit_num_frames": { + "title": "Limit Number of Frames", + "type": "boolean", + "description": "Whether to limit the number of frames used from the video. If True, the `max_num_frames` parameter will be used to limit the number of frames.", + "default": false + }, + "resample_fps": { + "title": "Resample FPS", + "type": "boolean", + "description": "Whether to resample the video to a specific FPS. If True, the `target_fps` parameter will be used to resample the video.", + "default": false + }, + "strength": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "Strength of the conditioning. 0.0 means no conditioning, 1.0 means full conditioning.", + "default": 1 + }, + "target_fps": { + "description": "Target FPS to resample the video to. Only relevant if `resample_fps` is True.", + "type": "integer", + "minimum": 1, + "maximum": 60, + "title": "Target FPS", + "examples": [ + 30 + ], + "default": 30 + }, + "max_num_frames": { + "description": "Maximum number of frames to use from the video. If None, all frames will be used.", + "type": "integer", + "minimum": 1, + "maximum": 161, + "title": "Maximum Number of Frames", + "examples": [ + 121 + ], + "default": 121 + }, + "conditioning_type": { + "enum": [ + "rgb", + "depth", + "pose", + "canny" + ], + "title": "Conditioning Type", + "type": "string", + "examples": [ + "rgb" + ], + "description": "Type of conditioning this video provides. This is relevant to ensure in-context LoRA weights are applied correctly, as well as selecting the correct preprocessing pipeline, when enabled.", + "default": "rgb" + }, + "preprocess": { + "title": "Preprocess", + "type": "boolean", + "description": "Whether to preprocess the video. If True, the video will be preprocessed to match the conditioning type. This is a no-op for RGB conditioning.", + "default": false + } + }, + "title": "VideoConditioningInput", + "required": [ + "video_url" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-video-13b-dev/multiconditioning/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-dev/multiconditioning/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-dev/multiconditioning": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideo13bDevMulticonditioningInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-dev/multiconditioning/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideo13bDevMulticonditioningOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-video-13b-dev/extend", + "metadata": { + "display_name": "LTX Video-0.9.7 13B", + "category": "video-to-video", + "description": "Extend videos using LTX Video-0.9.7 13B and custom LoRA", + "status": "active", + "tags": [ + "video", + "ltx-video", + "video-to-video", + "extend-video" + ], + "updated_at": "2026-01-26T21:43:41.745Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training-2.jpg", + "model_url": "https://fal.run/fal-ai/ltx-video-13b-dev/extend", + "license_type": "commercial", + "date": "2025-05-17T01:55:01.045Z", + "group": { + "key": "ltx-video-13b", + "label": "Extend Video" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false, + "training_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ], + "inference_endpoint_ids": [ + "fal-ai/ltx-video-trainer" + ] + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-video-13b-dev/extend", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-video-13b-dev/extend queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-video-13b-dev/extend", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-video-13b-dev/extend", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-video-13b-dev/extend/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LtxVideo13bDevExtendInput": { + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "loras", + "resolution", + "aspect_ratio", + "seed", + "num_frames", + "first_pass_num_inference_steps", + "first_pass_skip_final_steps", + "second_pass_num_inference_steps", + "second_pass_skip_initial_steps", + "frame_rate", + "expand_prompt", + "reverse_video", + "enable_safety_checker", + "constant_rate_factor", + "video" + ], + "type": "object", + "properties": { + "second_pass_skip_initial_steps": { + "description": "The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.", + "type": "integer", + "minimum": 1, + "maximum": 50, + "title": "Second Pass Skip Initial Steps", + "examples": [ + 17 + ], + "default": 17 + }, + "first_pass_num_inference_steps": { + "description": "Number of inference steps during the first pass.", + "type": "integer", + "minimum": 2, + "maximum": 50, + "title": "First Pass Num Inference Steps", + "examples": [ + 30 + ], + "default": 30 + }, + "frame_rate": { + "description": "The frame rate of the video.", + "type": "integer", + "minimum": 1, + "maximum": 60, + "title": "Frame Rate", + "examples": [ + 30 + ], + "default": 30 + }, + "prompt": { + "examples": [ + "Woman walking on a street in Tokyo" + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt to guide generation" + }, + "reverse_video": { + "examples": [ + false + ], + "title": "Reverse Video", + "type": "boolean", + "description": "Whether to reverse the video.", + "default": false + }, + "expand_prompt": { + "examples": [ + false + ], + "title": "Expand Prompt", + "type": "boolean", + "description": "Whether to expand the prompt using a language model.", + "default": false + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "LoRA weights to use for generation", + "items": { + "$ref": "#/components/schemas/LoRAWeight" + }, + "default": [] + }, + "second_pass_num_inference_steps": { + "description": "Number of inference steps during the second pass.", + "type": "integer", + "minimum": 2, + "maximum": 50, + "title": "Second Pass Num Inference Steps", + "examples": [ + 30 + ], + "default": 30 + }, + "num_frames": { + "description": "The number of frames in the video.", + "type": "integer", + "minimum": 9, + "maximum": 161, + "title": "Num Frames", + "examples": [ + 121 + ], + "default": 121 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "video": { + "examples": [ + { + "video_url": "https://storage.googleapis.com/falserverless/web-examples/wan/t2v.mp4", + "start_frame_num": 24, + "reverse_video": false, + "limit_num_frames": false, + "resample_fps": false, + "strength": 1, + "target_fps": 30, + "max_num_frames": 121, + "conditioning_type": "rgb", + "preprocess": false + } + ], + "title": "Video", + "description": "Video to be extended.", + "allOf": [ + { + "$ref": "#/components/schemas/VideoConditioningInput" + } + ] + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for generation", + "default": "worst quality, inconsistent motion, blurry, jittery, distorted" + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "examples": [ + "720p" + ], + "description": "Resolution of the generated video (480p or 720p).", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "9:16", + "1:1", + "16:9", + "auto" + ], + "title": "Aspect Ratio", + "type": "string", + "examples": [ + "auto" + ], + "description": "The aspect ratio of the video.", + "default": "auto" + }, + "constant_rate_factor": { + "description": "The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality.", + "type": "integer", + "minimum": 20, + "maximum": 60, + "title": "Constant Rate Factor", + "examples": [ + 35 + ], + "default": 35 + }, + "first_pass_skip_final_steps": { + "minimum": 0, + "maximum": 50, + "type": "integer", + "title": "First Pass Skip Final Steps", + "description": "Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details.", + "default": 3 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation" + } + }, + "title": "ExtendVideoInput", + "required": [ + "prompt", + "video" + ] + }, + "LtxVideo13bDevExtendOutput": { + "x-fal-order-properties": [ + "video", + "prompt", + "seed" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Woman walking on a street in Tokyo" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/ltx-v095_extend.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "ExtendVideoOutput", + "required": [ + "video", + "prompt", + "seed" + ] + }, + "LoRAWeight": { + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "Scale of the LoRA weight. This is a multiplier applied to the LoRA weight when loading it.", + "default": 1 + }, + "weight_name": { + "title": "Weight Name", + "type": "string", + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights." + } + }, + "title": "LoRAWeight", + "required": [ + "path" + ] + }, + "VideoConditioningInput": { + "x-fal-order-properties": [ + "video_url", + "conditioning_type", + "preprocess", + "start_frame_num", + "strength", + "limit_num_frames", + "max_num_frames", + "resample_fps", + "target_fps", + "reverse_video" + ], + "type": "object", + "properties": { + "video_url": { + "title": "Video URL", + "type": "string", + "description": "URL of video to use as conditioning" + }, + "start_frame_num": { + "description": "Frame number of the video from which the conditioning starts. Must be a multiple of 8.", + "type": "integer", + "minimum": 0, + "maximum": 160, + "title": "Start Frame Number", + "multipleOf": 8, + "default": 0 + }, + "reverse_video": { + "title": "Reverse Video", + "type": "boolean", + "description": "Whether to reverse the video. This is useful for tasks where the video conditioning should be applied in reverse order.", + "default": false + }, + "limit_num_frames": { + "title": "Limit Number of Frames", + "type": "boolean", + "description": "Whether to limit the number of frames used from the video. If True, the `max_num_frames` parameter will be used to limit the number of frames.", + "default": false + }, + "resample_fps": { + "title": "Resample FPS", + "type": "boolean", + "description": "Whether to resample the video to a specific FPS. If True, the `target_fps` parameter will be used to resample the video.", + "default": false + }, + "strength": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "Strength of the conditioning. 0.0 means no conditioning, 1.0 means full conditioning.", + "default": 1 + }, + "target_fps": { + "description": "Target FPS to resample the video to. Only relevant if `resample_fps` is True.", + "type": "integer", + "minimum": 1, + "maximum": 60, + "title": "Target FPS", + "examples": [ + 30 + ], + "default": 30 + }, + "max_num_frames": { + "description": "Maximum number of frames to use from the video. If None, all frames will be used.", + "type": "integer", + "minimum": 1, + "maximum": 161, + "title": "Maximum Number of Frames", + "examples": [ + 121 + ], + "default": 121 + }, + "conditioning_type": { + "enum": [ + "rgb", + "depth", + "pose", + "canny" + ], + "title": "Conditioning Type", + "type": "string", + "examples": [ + "rgb" + ], + "description": "Type of conditioning this video provides. This is relevant to ensure in-context LoRA weights are applied correctly, as well as selecting the correct preprocessing pipeline, when enabled.", + "default": "rgb" + }, + "preprocess": { + "title": "Preprocess", + "type": "boolean", + "description": "Whether to preprocess the video. If True, the video will be preprocessed to match the conditioning type. This is a no-op for RGB conditioning.", + "default": false + } + }, + "title": "VideoConditioningInput", + "required": [ + "video_url" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-video-13b-dev/extend/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-dev/extend/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-dev/extend": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideo13bDevExtendInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-13b-dev/extend/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideo13bDevExtendOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-video-lora/multiconditioning", + "metadata": { + "display_name": "LTX Video-0.9.7 LoRA", + "category": "video-to-video", + "description": "Generate videos from prompts, images, and videos using LTX Video-0.9.7 and custom LoRA", + "status": "active", + "tags": [ + "video", + "ltx-video", + "video-to-video", + "multicondition-to-video", + "image-to-video" + ], + "updated_at": "2026-01-26T21:43:42.642Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Training.jpg", + "model_url": "https://fal.run/fal-ai/ltx-video-lora/multiconditioning", + "license_type": "commercial", + "date": "2025-05-15T19:38:04.992Z", + "group": { + "key": "ltx-video-lora", + "label": "Multicondition to Video" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 2, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-video-lora/multiconditioning", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-video-lora/multiconditioning queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-video-lora/multiconditioning", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Training.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-video-lora/multiconditioning", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-video-lora/multiconditioning/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LtxVideoLoraMulticonditioningInput": { + "title": "MulticonditioningVideoInput", + "type": "object", + "properties": { + "number_of_steps": { + "description": "The number of inference steps to use.", + "type": "integer", + "minimum": 1, + "title": "Number Of Steps", + "examples": [ + 30 + ], + "maximum": 50, + "default": 30 + }, + "prompt": { + "examples": [ + "A vibrant, abstract composition featuring a person with outstretched arms, rendered in a kaleidoscope of colors against a deep, dark background. The figure is composed of intricate, swirling patterns reminiscent of a mosaic, with hues of orange, yellow, blue, and green that evoke the style of artists such as Wassily Kandinsky or Bridget Riley. The camera zooms into the face striking portrait of a man, reimagined through the lens of old-school video-game graphics. The subject's face is rendered in a kaleidoscope of colors, with bold blues and reds set against a vibrant yellow backdrop. His dark hair is pulled back, framing his profile in a dramatic pose" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "reverse_video": { + "examples": [ + false + ], + "title": "Reverse Video", + "type": "boolean", + "description": "Whether to reverse the video.", + "default": false + }, + "frame_rate": { + "description": "The frame rate of the video.", + "type": "integer", + "minimum": 1, + "title": "Frame Rate", + "examples": [ + 25 + ], + "maximum": 60, + "default": 25 + }, + "expand_prompt": { + "examples": [ + false + ], + "title": "Expand Prompt", + "type": "boolean", + "description": "Whether to expand the prompt using the LLM.", + "default": false + }, + "number_of_frames": { + "description": "The number of frames in the video.", + "type": "integer", + "minimum": 9, + "title": "Number Of Frames", + "examples": [ + 89 + ], + "maximum": 161, + "default": 89 + }, + "loras": { + "title": "Loras", + "type": "array", + "description": "The LoRA weights to use for generation.", + "items": { + "$ref": "#/components/schemas/LoRAWeight" + }, + "default": [] + }, + "images": { + "description": "The image conditions to use for generation.", + "type": "array", + "items": { + "$ref": "#/components/schemas/ImageCondition" + }, + "examples": [ + [ + { + "strength": 1, + "start_frame_number": 0, + "image_url": "https://storage.googleapis.com/falserverless/model_tests/ltx/NswO1P8sCLzrh1WefqQFK_9a6bdbfa54b944c9a770338159a113fd.jpg" + }, + { + "strength": 1, + "start_frame_number": 80, + "image_url": "https://storage.googleapis.com/falserverless/model_tests/ltx/YAPOGvmS2tM_Krdp7q6-d_267c97e017c34f679844a4477dfcec38.jpg" + } + ] + ], + "title": "Images", + "default": [] + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "Whether to enable the safety checker.", + "default": true + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to use.", + "default": "blurry, low quality, low resolution, inconsistent motion, jittery, distorted" + }, + "aspect_ratio": { + "enum": [ + "16:9", + "1:1", + "9:16", + "auto" + ], + "title": "Aspect Ratio", + "type": "string", + "examples": [ + "auto" + ], + "description": "The aspect ratio of the video.", + "default": "auto" + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "examples": [ + "720p" + ], + "description": "The resolution of the video.", + "default": "720p" + }, + "videos": { + "title": "Videos", + "type": "array", + "description": "The video conditions to use for generation.", + "items": { + "$ref": "#/components/schemas/VideoCondition" + }, + "default": [] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for generation." + } + }, + "description": "Request model for text-to-video generation with multiple conditions.", + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "loras", + "resolution", + "aspect_ratio", + "number_of_frames", + "number_of_steps", + "frame_rate", + "seed", + "expand_prompt", + "reverse_video", + "enable_safety_checker", + "images", + "videos" + ], + "required": [ + "prompt" + ] + }, + "LtxVideoLoraMulticonditioningOutput": { + "title": "MulticonditioningVideoOutput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A vibrant, abstract composition featuring a person with outstretched arms, rendered in a kaleidoscope of colors against a deep, dark background. The figure is composed of intricate, swirling patterns reminiscent of a mosaic, with hues of orange, yellow, blue, and green that evoke the style of artists such as Wassily Kandinsky or Bridget Riley. The camera zooms into the face striking portrait of a man, reimagined through the lens of old-school video-game graphics. The subject's face is rendered in a kaleidoscope of colors, with bold blues and reds set against a vibrant yellow backdrop. His dark hair is pulled back, framing his profile in a dramatic pose" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt used for generation." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/gallery/ltx-multicondition.mp4" + } + ], + "title": "Video", + "description": "The generated video.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "video" + ], + "required": [ + "prompt", + "seed", + "video" + ] + }, + "LoRAWeight": { + "title": "LoRAWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "title": "Scale", + "type": "number", + "maximum": 4, + "description": "Scale of the LoRA weight. This is a multiplier applied to the LoRA weight when loading it.", + "default": 1 + }, + "weight_name": { + "title": "Weight Name", + "type": "string", + "description": "Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights." + } + }, + "description": "LoRA weight to use for generation.", + "x-fal-order-properties": [ + "path", + "weight_name", + "scale" + ], + "required": [ + "path" + ] + }, + "ImageCondition": { + "title": "ImageCondition", + "type": "object", + "properties": { + "strength": { + "minimum": 0, + "title": "Strength", + "type": "number", + "maximum": 1, + "description": "The strength of the condition.", + "default": 1 + }, + "start_frame_number": { + "minimum": 0, + "title": "Start Frame Number", + "type": "integer", + "maximum": 160, + "description": "The frame number to start the condition on.", + "default": 0 + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "The URL of the image to use as input." + } + }, + "description": "Image condition to use for generation.", + "x-fal-order-properties": [ + "image_url", + "start_frame_number", + "strength" + ], + "required": [ + "image_url" + ] + }, + "VideoCondition": { + "title": "VideoCondition", + "type": "object", + "properties": { + "strength": { + "minimum": 0, + "title": "Strength", + "type": "number", + "maximum": 1, + "description": "The strength of the condition.", + "default": 1 + }, + "start_frame_number": { + "minimum": 0, + "title": "Start Frame Number", + "type": "integer", + "maximum": 160, + "description": "The frame number to start the condition on.", + "default": 0 + }, + "video_url": { + "title": "Video Url", + "type": "string", + "description": "The URL of the video to use as input." + } + }, + "description": "Video condition to use for generation.", + "x-fal-order-properties": [ + "video_url", + "start_frame_number", + "strength" + ], + "required": [ + "video_url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-video-lora/multiconditioning/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-lora/multiconditioning/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-lora/multiconditioning": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideoLoraMulticonditioningInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-lora/multiconditioning/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideoLoraMulticonditioningOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/magi/extend-video", + "metadata": { + "display_name": "MAGI-1", + "category": "video-to-video", + "description": "MAGI-1 extends videos with an exceptional understanding of physical interactions and prompts", + "status": "active", + "tags": [ + "video-to-video" + ], + "updated_at": "2026-01-26T21:43:52.576Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-4.jpg", + "model_url": "https://fal.run/fal-ai/magi/extend-video", + "license_type": "commercial", + "date": "2025-04-23T22:39:28.842Z", + "group": { + "key": "magi", + "label": "Extend Video" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 9, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/magi/extend-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/magi/extend-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/magi/extend-video", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/magi/extend-video", + "documentationUrl": "https://fal.ai/models/fal-ai/magi/extend-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MagiExtendVideoInput": { + "title": "MagiVideoExtensionRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "" + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit.", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + "default": "auto" + }, + "video_url": { + "examples": [ + "https://v3.fal.media/files/zebra/w4T087gvzG5LMGipMpPCO_pour-2s.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the input video to represent the beginning of the video. If the input video does not match the chosen aspect ratio, it is resized and center cropped." + }, + "start_frame": { + "minimum": 0, + "title": "Start Frame", + "type": "integer", + "description": "The frame to begin the generation from, with the remaining frames will be treated as the prefix video. The final video will contain the frames up until this number unchanged, followed by the generated frames. The default start frame is 32 frames before the end of the video, which gives optimal results." + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "enum": [ + 4, + 8, + 16, + 32, + 64 + ], + "title": "Num Inference Steps", + "type": "integer", + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 16 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "num_frames": { + "minimum": 96, + "title": "Num Frames", + "type": "integer", + "maximum": 192, + "description": "Number of frames to generate. Must be between 96 and 192 (inclusive). Each additional 24 frames beyond 96 incurs an additional billing unit.", + "default": 96 + } + }, + "x-fal-order-properties": [ + "prompt", + "video_url", + "num_frames", + "start_frame", + "seed", + "resolution", + "num_inference_steps", + "enable_safety_checker", + "aspect_ratio" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "MagiExtendVideoOutput": { + "title": "MagiVideoExtensionResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/zebra/yVrs367uHeCqrBGY-VICa_3b064421-fe96-4ccb-a3ea-4f37b54e682e.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/magi/extend-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/magi/extend-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/magi/extend-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MagiExtendVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/magi/extend-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MagiExtendVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/magi-distilled/extend-video", + "metadata": { + "display_name": "MAGI-1 (Distilled)", + "category": "video-to-video", + "description": "MAGI-1 distilled extends videos faster with an exceptional understanding of physical interactions and prompts", + "status": "active", + "tags": [ + "video-to-video", + "video-extend" + ], + "updated_at": "2026-01-26T21:43:53.409Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-4.jpg", + "model_url": "https://fal.run/fal-ai/magi-distilled/extend-video", + "license_type": "commercial", + "date": "2025-04-23T02:31:47.410Z", + "group": { + "key": "magi", + "label": "Extend Video (Distilled)" + }, + "highlighted": false, + "kind": "inference", + "duration_estimate": 1, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/magi-distilled/extend-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/magi-distilled/extend-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/magi-distilled/extend-video", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/magi-distilled/extend-video", + "documentationUrl": "https://fal.ai/models/fal-ai/magi-distilled/extend-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MagiDistilledExtendVideoInput": { + "title": "MagiVideoExtensionRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "" + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit.", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "auto", + "16:9", + "9:16", + "1:1" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + "default": "auto" + }, + "video_url": { + "examples": [ + "https://v3.fal.media/files/rabbit/lTH9PY_LQG0FjueBxMfDN_0395dec3-0c4a-4c25-8399-ebb198b73a30.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the input video to represent the beginning of the video. If the input video does not match the chosen aspect ratio, it is resized and center cropped." + }, + "start_frame": { + "minimum": 0, + "title": "Start Frame", + "type": "integer", + "description": "The frame to begin the generation from, with the remaining frames will be treated as the prefix video. The final video will contain the frames up until this number unchanged, followed by the generated frames. The default start frame is 32 frames before the end of the video, which gives optimal results." + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": true + }, + "num_inference_steps": { + "enum": [ + 4, + 8, + 16, + 32 + ], + "title": "Num Inference Steps", + "type": "integer", + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 16 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "num_frames": { + "minimum": 96, + "title": "Num Frames", + "type": "integer", + "maximum": 192, + "description": "Number of frames to generate. Must be between 96 and 192 (inclusive). Each additional 24 frames beyond 96 incurs an additional billing unit.", + "default": 96 + } + }, + "x-fal-order-properties": [ + "prompt", + "video_url", + "num_frames", + "start_frame", + "seed", + "resolution", + "num_inference_steps", + "enable_safety_checker", + "aspect_ratio" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "MagiDistilledExtendVideoOutput": { + "title": "MagiVideoExtensionResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/zebra/2UjT7u_8oF2gGfxBiT_gL_91a8f175-fd57-4ed6-aedc-1957aa558363.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/magi-distilled/extend-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/magi-distilled/extend-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/magi-distilled/extend-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MagiDistilledExtendVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/magi-distilled/extend-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MagiDistilledExtendVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/wan-vace", + "metadata": { + "display_name": "Vace", + "category": "video-to-video", + "description": "Vace a video generation model that uses a source image, mask, and video to create prompted videos with controllable sources.", + "status": "active", + "tags": [ + "video-to-video", + "image-to-video", + "text-to-video" + ], + "updated_at": "2026-01-26T21:43:56.519Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/hunyuan-video-image-to-video.webp", + "model_url": "https://fal.run/fal-ai/wan-vace", + "license_type": "commercial", + "date": "2025-04-11T20:48:26.011Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/wan-vace", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/wan-vace queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/wan-vace", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/hunyuan-video-image-to-video.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/wan-vace", + "documentationUrl": "https://fal.ai/models/fal-ai/wan-vace/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "WanVaceInput": { + "title": "WanT2VRequest", + "type": "object", + "properties": { + "shift": { + "minimum": 1, + "title": "Shift", + "type": "number", + "maximum": 10, + "description": "Shift parameter for video generation.", + "default": 5 + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/vace/src_video.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL to the source video file. If provided, the model will use this video as a reference." + }, + "prompt": { + "examples": [ + "The video shows a man riding a horse on a vast grassland. He has long lavender hair and wears a traditional dress of a white top and black pants. The animation style makes him look like he is doing some kind of outdoor activity or performing. The background is a spectacular mountain range and cloud sky, giving a sense of tranquility and vastness. The entire video is shot from a fixed angle, focusing on the rider and his horse." + ], + "title": "Prompt", + "type": "string", + "description": "The text prompt to guide video generation." + }, + "ref_image_urls": { + "examples": [ + [ + "https://storage.googleapis.com/falserverless/vace/src_ref_image_1.png" + ] + ], + "title": "Ref Image Urls", + "type": "array", + "description": "Urls to source reference image. If provided, the model will use this image as reference.", + "items": { + "type": "string" + } + }, + "task": { + "enum": [ + "depth", + "inpainting" + ], + "title": "Task", + "type": "string", + "description": "Task type for the model.", + "default": "depth" + }, + "frames_per_second": { + "minimum": 5, + "title": "Frames Per Second", + "type": "integer", + "maximum": 24, + "description": "Frames per second of the generated video. Must be between 5 to 24.", + "default": 16 + }, + "mask_image_url": { + "title": "Mask Image Url", + "type": "string", + "description": "URL to the guiding mask file. If provided, the model will use this mask as a reference to create masked video. If provided mask video url will be ignored." + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "num_frames": { + "minimum": 81, + "title": "Num Frames", + "type": "integer", + "maximum": 240, + "description": "Number of frames to generate. Must be between 81 to 100 (inclusive). Works only with only reference images as input if source video or mask video is provided output len would be same as source up to 241 frames", + "default": 81 + }, + "negative_prompt": { + "examples": [ + "bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + ], + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for video generation.", + "default": "bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards" + }, + "aspect_ratio": { + "enum": [ + "auto", + "9:16", + "16:9" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video (16:9 or 9:16).", + "default": "16:9" + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video (480p,580p, or 720p).", + "default": "720p" + }, + "mask_video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/vace/src_mask.mp4" + ], + "title": "Mask Video Url", + "type": "string", + "description": "URL to the source mask file. If provided, the model will use this mask as a reference." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducibility. If None, a random seed is chosen." + }, + "num_inference_steps": { + "minimum": 2, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 40, + "description": "Number of inference steps for sampling. Higher values give better quality but take longer.", + "default": 30 + }, + "preprocess": { + "title": "Preprocess", + "type": "boolean", + "description": "Whether to preprocess the input video.", + "default": false + }, + "enable_prompt_expansion": { + "examples": [ + true + ], + "title": "Enable Prompt Expansion", + "type": "boolean", + "description": "Whether to enable prompt expansion.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "num_frames", + "frames_per_second", + "task", + "shift", + "seed", + "resolution", + "aspect_ratio", + "num_inference_steps", + "video_url", + "mask_video_url", + "mask_image_url", + "ref_image_urls", + "enable_safety_checker", + "enable_prompt_expansion", + "preprocess" + ], + "required": [ + "prompt" + ] + }, + "WanVaceOutput": { + "title": "WanT2VResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/vace/out_video_vace.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/wan-vace/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-vace/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/wan-vace": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanVaceInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/wan-vace/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WanVaceOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "cassetteai/video-sound-effects-generator", + "metadata": { + "display_name": "Video Sound Effects Generator", + "category": "video-to-video", + "description": "Add sound effects to your videos", + "status": "active", + "tags": [ + "sound-effects", + "sfx", + "cassetteai" + ], + "updated_at": "2026-01-26T21:43:57.551Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/cassetteai-video-sound-effects-generator.webp", + "model_url": "https://fal.run/cassetteai/video-sound-effects-generator", + "license_type": "commercial", + "date": "2025-04-07T21:35:38.015Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for cassetteai/video-sound-effects-generator", + "version": "1.0.0", + "description": "The OpenAPI schema for the cassetteai/video-sound-effects-generator queue.", + "x-fal-metadata": { + "endpointId": "cassetteai/video-sound-effects-generator", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/cassetteai-video-sound-effects-generator.webp", + "playgroundUrl": "https://fal.ai/models/cassetteai/video-sound-effects-generator", + "documentationUrl": "https://fal.ai/models/cassetteai/video-sound-effects-generator/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "VideoSoundEffectsGeneratorInput": { + "title": "VideoInput", + "type": "object", + "properties": { + "video_url": { + "examples": [ + "https://v3.fal.media/files/tiger/3NOa3BqrJfr3jJBMqGexs_final_with_sfx.mp4", + "https://v3.fal.media/files/rabbit/vkNtbcJ3x7KmzjJZeVWQe_final_with_sfx.mp4" + ], + "description": "A video file to analyze & re-sound with generated SFX.", + "$ref": "#/components/schemas/Video" + } + }, + "description": "Pydantic model for receiving a video file to analyze and re-sound.", + "x-fal-order-properties": [ + "video_url" + ], + "required": [ + "video_url" + ] + }, + "VideoSoundEffectsGeneratorOutput": { + "title": "VideoOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/tiger/3NOa3BqrJfr3jJBMqGexs_final_with_sfx.mp4" + } + ], + "title": "Please ensure to unmute the video after playing manually to hear the SFX.", + "description": "The final video with the newly generated SFX track.", + "$ref": "#/components/schemas/File" + } + }, + "description": "Pydantic model for returning the re-sounded video back to the client.", + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "Video": { + "title": "Video", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "anyOf": [ + { + "format": "binary", + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Data", + "description": "File data" + } + }, + "description": "Represents a video file.", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes." + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "examples": [ + "image/png" + ], + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/cassetteai/video-sound-effects-generator/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/cassetteai/video-sound-effects-generator/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/cassetteai/video-sound-effects-generator": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoSoundEffectsGeneratorInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/cassetteai/video-sound-effects-generator/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoSoundEffectsGeneratorOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sync-lipsync/v2", + "metadata": { + "display_name": "Sync Lipsync 2.0", + "category": "video-to-video", + "description": "Generate realistic lipsync animations from audio using advanced algorithms for high-quality synchronization with Sync Lipsync 2.0 model", + "status": "active", + "tags": [ + "animation", + "lip sync" + ], + "updated_at": "2026-01-26T21:43:58.327Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/kangaroo/LXWSAzq0Snzf2xmt-qPQL_62439375ce7745769a977f3989d035d7.jpg", + "model_url": "https://fal.run/fal-ai/sync-lipsync/v2", + "license_type": "commercial", + "date": "2025-04-01T18:57:06.699Z", + "group": { + "key": "sync-lipsync", + "label": "Lipsync 2.0" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sync-lipsync/v2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sync-lipsync/v2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sync-lipsync/v2", + "category": "video-to-video", + "thumbnailUrl": "https://fal.media/files/kangaroo/LXWSAzq0Snzf2xmt-qPQL_62439375ce7745769a977f3989d035d7.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sync-lipsync/v2", + "documentationUrl": "https://fal.ai/models/fal-ai/sync-lipsync/v2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SyncLipsyncV2Input": { + "title": "LipSyncV2Input", + "type": "object", + "properties": { + "model": { + "enum": [ + "lipsync-2", + "lipsync-2-pro" + ], + "title": "Model", + "type": "string", + "description": "The model to use for lipsyncing. `lipsync-2-pro` will cost roughly 1.67 times as much as `lipsync-2` for the same duration.", + "default": "lipsync-2" + }, + "video_url": { + "examples": [ + "https://v3.fal.media/files/tiger/IugLCDJRIoGqvqTa-EJTr_3wg74vCqyNuQ-IiBd77MM_output.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the input video" + }, + "sync_mode": { + "enum": [ + "cut_off", + "loop", + "bounce", + "silence", + "remap" + ], + "title": "Sync Mode", + "type": "string", + "description": "Lipsync mode when audio and video durations are out of sync.", + "default": "cut_off" + }, + "audio_url": { + "examples": [ + "https://fal.media/files/lion/vyFWygmZsIZlUO4s0nr2n.wav" + ], + "title": "Audio Url", + "type": "string", + "description": "URL of the input audio" + } + }, + "x-fal-order-properties": [ + "model", + "video_url", + "audio_url", + "sync_mode" + ], + "required": [ + "video_url", + "audio_url" + ] + }, + "SyncLipsyncV2Output": { + "title": "LipSyncV2Output", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/kangaroo/WIhlgDEJbccwGwAsvL3vz_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sync-lipsync/v2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sync-lipsync/v2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sync-lipsync/v2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SyncLipsyncV2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sync-lipsync/v2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SyncLipsyncV2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/latentsync", + "metadata": { + "display_name": "LatentSync", + "category": "video-to-video", + "description": "LatentSync is a video-to-video model that generates lip sync animations from audio using advanced algorithms for high-quality synchronization.", + "status": "active", + "tags": [ + "animation", + "lip sync" + ], + "updated_at": "2026-01-26T21:44:00.196Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/latentsync/latentsync-3.jpg", + "model_url": "https://fal.run/fal-ai/latentsync", + "license_type": "commercial", + "date": "2025-03-25T10:24:14.332Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/latentsync", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/latentsync queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/latentsync", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/latentsync/latentsync-3.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/latentsync", + "documentationUrl": "https://fal.ai/models/fal-ai/latentsync/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LatentsyncInput": { + "x-fal-order-properties": [ + "video_url", + "audio_url", + "guidance_scale", + "seed", + "loop_mode" + ], + "type": "object", + "properties": { + "video_url": { + "examples": [ + "https://fal.media/files/koala/8teUPbRRMtAUTORDvqy0l.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "The URL of the video to generate the lip sync for." + }, + "guidance_scale": { + "description": "Guidance scale for the model inference", + "type": "number", + "minimum": 1, + "title": "Guidance Scale", + "maximum": 2, + "step": 0.1, + "default": 1 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation. If None, a random seed will be used." + }, + "audio_url": { + "examples": [ + "https://fal.media/files/lion/vyFWygmZsIZlUO4s0nr2n.wav" + ], + "title": "Audio Url", + "type": "string", + "description": "The URL of the audio to generate the lip sync for." + }, + "loop_mode": { + "enum": [ + "pingpong", + "loop" + ], + "title": "Loop Mode", + "type": "string", + "description": "Video loop mode when audio is longer than video. Options: pingpong, loop" + } + }, + "title": "Input", + "required": [ + "video_url", + "audio_url" + ] + }, + "LatentsyncOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "title": "Video", + "description": "The generated video with the lip sync.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "Output", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/latentsync/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/latentsync/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/latentsync": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LatentsyncInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/latentsync/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LatentsyncOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/pika/v2/pikadditions", + "metadata": { + "display_name": "Pikadditions (v2)", + "category": "video-to-video", + "description": "Pikadditions is a powerful video-to-video AI model that allows you to add anyone or anything to any video with seamless integration.", + "status": "active", + "tags": [ + "editing", + "effects", + "animation" + ], + "updated_at": "2026-01-26T21:44:00.961Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/1wavesunset.webp", + "model_url": "https://fal.run/fal-ai/pika/v2/pikadditions", + "license_type": "commercial", + "date": "2025-03-14T00:00:00.000Z", + "group": { + "key": "pika", + "label": "Pikadditions (v2)" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/pika/v2/pikadditions", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/pika/v2/pikadditions queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/pika/v2/pikadditions", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/1wavesunset.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/pika/v2/pikadditions", + "documentationUrl": "https://fal.ai/models/fal-ai/pika/v2/pikadditions/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "PikaV2PikadditionsInput": { + "x-fal-order-properties": [ + "video_url", + "image_url", + "prompt", + "negative_prompt", + "seed" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A parrot in the shoulder of the person picking up cookies" + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt describing what to add" + }, + "video_url": { + "examples": [ + "https://v3.fal.media/files/monkey/vXi5n_oq0Qpnbs7Eb2k-b_output.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the input video" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed for the random number generator" + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt to guide the model" + }, + "image_url": { + "examples": [ + "https://fal.media/files/zebra/V3_Kpw_eqbVoOAIpNKb3Z_c0f2425a9d224d8b9b8d9b800612b782.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to add" + } + }, + "title": "PikadditionsRequest", + "description": "Request model for Pikadditions endpoint", + "required": [ + "video_url", + "image_url" + ] + }, + "PikaV2PikadditionsOutput": { + "x-fal-order-properties": [ + "video" + ], + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/lion/sbM48rVVi7y0yh5EuMtoC_output.mp4" + } + ], + "title": "Video", + "description": "The generated video with added objects/images", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "title": "PikadditionsOutput", + "description": "Output from Pikadditions generation", + "required": [ + "video" + ] + }, + "File": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "File", + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/pika/v2/pikadditions/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pika/v2/pikadditions/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/pika/v2/pikadditions": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PikaV2PikadditionsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/pika/v2/pikadditions/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PikaV2PikadditionsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-video-v095/multiconditioning", + "metadata": { + "display_name": "LTX Video-0.9.5", + "category": "video-to-video", + "description": "Generate videos from prompts,images, and videos using LTX Video-0.9.5", + "status": "active", + "tags": [ + "video", + "image-to-video", + "text-to-video" + ], + "updated_at": "2026-01-26T21:44:21.374Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/ltx-0.9.5.webp", + "model_url": "https://fal.run/fal-ai/ltx-video-v095/multiconditioning", + "date": "2025-03-05T00:00:00.000Z", + "group": { + "key": "ltx-video-v0.9.5", + "label": "Multicondition to Video" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-video-v095/multiconditioning", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-video-v095/multiconditioning queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-video-v095/multiconditioning", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/ltx-0.9.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-video-v095/multiconditioning", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-video-v095/multiconditioning/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LtxVideoV095MulticonditioningInput": { + "title": "MultiConditioningVideoInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "\n A vibrant, abstract composition featuring a person with outstretched arms, rendered in a kaleidoscope of colors against a deep, dark background. The figure is composed of intricate, swirling patterns reminiscent of a mosaic, with hues of orange, yellow, blue, and green that evoke the style of artists such as Wassily Kandinsky or Bridget Riley. \n\nThe camera zooms into the face striking portrait of a man, reimagined through the lens of old-school video-game graphics. The subject's face is rendered in a kaleidoscope of colors, with bold blues and reds set against a vibrant yellow backdrop. His dark hair is pulled back, framing his profile in a dramatic pose\n " + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt to guide generation" + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video (480p or 720p).", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "9:16", + "16:9" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video (16:9 or 9:16).", + "default": "16:9" + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "Whether to expand the prompt using the model's own capabilities.", + "default": true + }, + "images": { + "examples": [ + [ + { + "start_frame_num": 0, + "image_url": "https://storage.googleapis.com/falserverless/model_tests/ltx/NswO1P8sCLzrh1WefqQFK_9a6bdbfa54b944c9a770338159a113fd.jpg" + }, + { + "start_frame_num": 120, + "image_url": "https://storage.googleapis.com/falserverless/model_tests/ltx/YAPOGvmS2tM_Krdp7q6-d_267c97e017c34f679844a4477dfcec38.jpg" + } + ] + ], + "title": "Images", + "type": "array", + "description": "URL of images to use as conditioning", + "items": { + "$ref": "#/components/schemas/ImageConditioningInput" + }, + "default": [] + }, + "videos": { + "title": "Videos", + "type": "array", + "description": "Videos to use as conditioning", + "items": { + "$ref": "#/components/schemas/VideoConditioningInput" + }, + "default": [] + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation" + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps", + "default": 40 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for generation", + "default": "worst quality, inconsistent motion, blurry, jittery, distorted" + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "resolution", + "aspect_ratio", + "seed", + "num_inference_steps", + "expand_prompt", + "images", + "videos" + ], + "required": [ + "prompt" + ] + }, + "LtxVideoV095MulticonditioningOutput": { + "title": "MulticonditioningVideoOutput", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/gallery/ltx-multicondition.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "ImageConditioningInput": { + "title": "ImageConditioningInput", + "type": "object", + "properties": { + "start_frame_num": { + "minimum": 0, + "maximum": 120, + "type": "integer", + "title": "Start Frame Num", + "multipleOf": 8, + "description": "Frame number of the image from which the conditioning starts. Must be a multiple of 8." + }, + "image_url": { + "title": "Image Url", + "type": "string", + "description": "URL of image to use as conditioning" + } + }, + "x-fal-order-properties": [ + "image_url", + "start_frame_num" + ], + "required": [ + "image_url", + "start_frame_num" + ] + }, + "VideoConditioningInput": { + "title": "VideoConditioningInput", + "type": "object", + "properties": { + "video_url": { + "title": "Video Url", + "type": "string", + "description": "URL of video to be extended" + }, + "start_frame_num": { + "minimum": 0, + "maximum": 120, + "type": "integer", + "title": "Start Frame Num", + "multipleOf": 8, + "description": "Frame number of the video from which the conditioning starts. Must be a multiple of 8." + } + }, + "x-fal-order-properties": [ + "video_url", + "start_frame_num" + ], + "required": [ + "video_url", + "start_frame_num" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-video-v095/multiconditioning/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-v095/multiconditioning/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-v095/multiconditioning": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideoV095MulticonditioningInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-v095/multiconditioning/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideoV095MulticonditioningOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ltx-video-v095/extend", + "metadata": { + "display_name": "LTX Video-0.9.5", + "category": "video-to-video", + "description": "Generate videos from prompts and videos using LTX Video-0.9.5", + "status": "active", + "tags": [ + "video", + "video-to-video" + ], + "updated_at": "2026-01-26T21:44:22.795Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/ltx-0.9.5.webp", + "model_url": "https://fal.run/fal-ai/ltx-video-v095/extend", + "date": "2025-03-05T00:00:00.000Z", + "group": { + "key": "ltx-video-v0.9.5", + "label": "Extend Video" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ltx-video-v095/extend", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ltx-video-v095/extend queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ltx-video-v095/extend", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/ltx-0.9.5.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/ltx-video-v095/extend", + "documentationUrl": "https://fal.ai/models/fal-ai/ltx-video-v095/extend/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LtxVideoV095ExtendInput": { + "title": "ExtendVideoInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Woman walking on a street in Tokyo" + ], + "title": "Prompt", + "type": "string", + "description": "Text prompt to guide generation" + }, + "resolution": { + "enum": [ + "480p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "Resolution of the generated video (480p or 720p).", + "default": "720p" + }, + "aspect_ratio": { + "enum": [ + "9:16", + "16:9" + ], + "title": "Aspect Ratio", + "type": "string", + "description": "Aspect ratio of the generated video (16:9 or 9:16).", + "default": "16:9" + }, + "expand_prompt": { + "title": "Expand Prompt", + "type": "boolean", + "description": "Whether to expand the prompt using the model's own capabilities.", + "default": true + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for generation" + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps", + "default": 40 + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "Negative prompt for generation", + "default": "worst quality, inconsistent motion, blurry, jittery, distorted" + }, + "video": { + "examples": [ + { + "video_url": "https://storage.googleapis.com/falserverless/web-examples/wan/t2v.mp4", + "start_frame_num": 24 + } + ], + "title": "Video", + "description": "Video to be extended.", + "allOf": [ + { + "$ref": "#/components/schemas/VideoConditioningInput" + } + ] + } + }, + "x-fal-order-properties": [ + "prompt", + "negative_prompt", + "resolution", + "aspect_ratio", + "seed", + "num_inference_steps", + "expand_prompt", + "video" + ], + "required": [ + "prompt", + "video" + ] + }, + "LtxVideoV095ExtendOutput": { + "title": "ExtendVideoOutput", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generation." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/ltx-v095_extend.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "VideoConditioningInput": { + "title": "VideoConditioningInput", + "type": "object", + "properties": { + "video_url": { + "title": "Video Url", + "type": "string", + "description": "URL of video to be extended" + }, + "start_frame_num": { + "minimum": 0, + "maximum": 120, + "type": "integer", + "title": "Start Frame Num", + "multipleOf": 8, + "description": "Frame number of the video from which the conditioning starts. Must be a multiple of 8." + } + }, + "x-fal-order-properties": [ + "video_url", + "start_frame_num" + ], + "required": [ + "video_url", + "start_frame_num" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ltx-video-v095/extend/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-v095/extend/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-v095/extend": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideoV095ExtendInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ltx-video-v095/extend/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LtxVideoV095ExtendOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/topaz/upscale/video", + "metadata": { + "display_name": "Topaz Video Upscale", + "category": "video-to-video", + "description": "Professional-grade video upscaling using Topaz technology. Enhance your videos with high-quality upscaling.", + "status": "active", + "tags": [ + "upscaling", + "high-res" + ], + "updated_at": "2026-01-26T21:44:04.006Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/topaz-video-upscale.png", + "model_url": "https://fal.run/fal-ai/topaz/upscale/video", + "license_type": "commercial", + "date": "2025-03-04T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/topaz/upscale/video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/topaz/upscale/video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/topaz/upscale/video", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/topaz-video-upscale.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/topaz/upscale/video", + "documentationUrl": "https://fal.ai/models/fal-ai/topaz/upscale/video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "TopazUpscaleVideoInput": { + "title": "VideoUpscaleRequest", + "type": "object", + "properties": { + "H264_output": { + "title": "H264 Output", + "type": "boolean", + "description": "Whether to use H264 codec for output video. Default is H265.", + "default": false + }, + "video_url": { + "examples": [ + "https://v3.fal.media/files/kangaroo/y5-1YTGpun17eSeggZMzX_video-1733468228.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the video to upscale" + }, + "upscale_factor": { + "minimum": 1, + "maximum": 4, + "type": "number", + "title": "Upscale Factor", + "description": "Factor to upscale the video by (e.g. 2.0 doubles width and height)", + "default": 2 + }, + "target_fps": { + "minimum": 16, + "maximum": 60, + "type": "integer", + "title": "Target Fps", + "description": "Target FPS for frame interpolation. If set, frame interpolation will be enabled." + } + }, + "x-fal-order-properties": [ + "video_url", + "upscale_factor", + "target_fps", + "H264_output" + ], + "required": [ + "video_url" + ] + }, + "TopazUpscaleVideoOutput": { + "title": "VideoUpscaleOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/penguin/ztj_LB4gQlW6HIfVs8zX4_upscaled.mp4" + } + ], + "title": "Video", + "description": "The upscaled video file", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/topaz/upscale/video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/topaz/upscale/video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/topaz/upscale/video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TopazUpscaleVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/topaz/upscale/video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TopazUpscaleVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ben/v2/video", + "metadata": { + "display_name": "Ben-Video-Bg-Rm", + "category": "video-to-video", + "description": "A model for high quality and smooth background removal for videos.", + "status": "active", + "tags": [ + "segmentation", + "background removal" + ], + "updated_at": "2026-01-26T21:44:28.427Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/Ben2/Vi9PBzFF8BfuKGsLCvoH-_773fe2c0efc744af900eaac2047b9b5f.webp", + "model_url": "https://fal.run/fal-ai/ben/v2/video", + "date": "2025-02-11T00:00:00.000Z", + "group": { + "key": "ben-v2", + "label": "Background Remover (video)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ben/v2/video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ben/v2/video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ben/v2/video", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/Ben2/Vi9PBzFF8BfuKGsLCvoH-_773fe2c0efc744af900eaac2047b9b5f.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/ben/v2/video", + "documentationUrl": "https://fal.ai/models/fal-ai/ben/v2/video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "BenV2VideoInput": { + "title": "Ben2InputVideo", + "type": "object", + "properties": { + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/gallery/Ben2/100063-video-2160.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of video to be used for background removal." + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "Random seed for reproducible generation." + }, + "background_color": { + "title": "Background Color", + "type": "array", + "minItems": 3, + "description": "Optional RGB values (0-255) for the background color. If not provided, the background will be transparent. For ex: [0, 0, 0]", + "maxItems": 3, + "items": { + "0": { + "type": "integer" + }, + "1": { + "type": "integer" + }, + "2": { + "type": "integer" + } + } + } + }, + "x-fal-order-properties": [ + "video_url", + "background_color", + "seed" + ], + "required": [ + "video_url" + ] + }, + "BenV2VideoOutput": { + "title": "Ben2OutputVideo", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + }, + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/gallery/Ben2/foreground.mp4" + } + ], + "title": "Video", + "description": "The generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ben/v2/video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ben/v2/video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ben/v2/video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BenV2VideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ben/v2/video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BenV2VideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan-video/video-to-video", + "metadata": { + "display_name": "Hunyuan Video (Video-to-Video)", + "category": "video-to-video", + "description": "Hunyuan Video is an Open video generation model with high visual quality, motion diversity, text-video alignment, and generation stability. Use this endpoint to generate videos from videos.", + "status": "active", + "tags": [ + "video to video", + "motion" + ], + "updated_at": "2026-01-26T21:44:29.329Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/hunyuan-video.webp?v=1", + "model_url": "https://fal.run/fal-ai/hunyuan-video/video-to-video", + "date": "2025-01-30T00:00:00.000Z", + "group": { + "key": "hunyuan-text-to-video", + "label": "Video-to-Video" + }, + "highlighted": false, + "duration_estimate": 4, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan-video/video-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan-video/video-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan-video/video-to-video", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/hunyuan-video.webp?v=1", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan-video/video-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan-video/video-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "HunyuanVideoVideoToVideoInput": { + "title": "HunyuanV2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage. She wears a dark blue leather jacket, a long pink dress, and bright yellow boots, and carries a black purse." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16" + ], + "title": "Aspect Ratio (W:H)", + "type": "string", + "description": "The aspect ratio of the video to generate.", + "default": "16:9" + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the video to generate.", + "default": "720p" + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/hunyuan_video/hunyuan_v2v_input.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the video input." + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "Strength for Video-to-Video", + "default": 0.85 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "num_inference_steps": { + "minimum": 2, + "maximum": 30, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to run. Lower gets faster results, higher gets better results.", + "default": 30 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for generating the video." + }, + "num_frames": { + "enum": [ + "129", + "85" + ], + "title": "Number of Frames", + "type": "string", + "description": "The number of frames to generate.", + "default": 129 + }, + "pro_mode": { + "title": "Pro Mode", + "type": "boolean", + "description": "By default, generations are done with 35 steps. Pro mode does 55 steps which results in higher quality videos but will take more time and cost 2x more billing units.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "num_inference_steps", + "seed", + "pro_mode", + "aspect_ratio", + "resolution", + "num_frames", + "enable_safety_checker", + "video_url", + "strength" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "HunyuanVideoVideoToVideoOutput": { + "title": "HunyuanT2VResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generating the video." + }, + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/kangaroo/y5-1YTGpun17eSeggZMzX_video-1733468228.mp4" + } + ], + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan-video/video-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video/video-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video/video-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanVideoVideoToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video/video-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanVideoVideoToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/hunyuan-video-lora/video-to-video", + "metadata": { + "display_name": "Hunyuan Video LoRA Inference (Video-to-Video)", + "category": "video-to-video", + "description": "Hunyuan Video is an Open video generation model with high visual quality, motion diversity, text-video alignment, and generation stability. Use this endpoint to generate videos from videos.", + "status": "active", + "tags": [ + "video to video", + "motion", + "lora" + ], + "updated_at": "2026-01-26T21:44:29.200Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/hunyuan-video.webp?v=1", + "model_url": "https://fal.run/fal-ai/hunyuan-video-lora/video-to-video", + "date": "2025-01-30T00:00:00.000Z", + "group": { + "key": "hunyuan-text-to-video-lora", + "label": "Video-to-Video" + }, + "highlighted": false, + "duration_estimate": 4, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/hunyuan-video-lora/video-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/hunyuan-video-lora/video-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/hunyuan-video-lora/video-to-video", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/hunyuan-video.webp?v=1", + "playgroundUrl": "https://fal.ai/models/fal-ai/hunyuan-video-lora/video-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/hunyuan-video-lora/video-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "HunyuanVideoLoraVideoToVideoInput": { + "title": "HunyuanV2VRequest", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage. She wears a dark blue leather jacket, a long pink dress, and bright yellow boots, and carries a black purse." + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "aspect_ratio": { + "enum": [ + "16:9", + "9:16" + ], + "title": "Aspect Ratio (W:H)", + "type": "string", + "description": "The aspect ratio of the video to generate.", + "default": "16:9" + }, + "resolution": { + "enum": [ + "480p", + "580p", + "720p" + ], + "title": "Resolution", + "type": "string", + "description": "The resolution of the video to generate.", + "default": "720p" + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/hunyuan_video/hunyuan_v2v_input.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the video" + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "strength": { + "minimum": 0.01, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "Strength of video-to-video", + "default": 0.75 + }, + "enable_safety_checker": { + "examples": [ + true + ], + "title": "Enable Safety Checker", + "type": "boolean", + "description": "If set to true, the safety checker will be enabled.", + "default": false + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed to use for generating the video." + }, + "num_frames": { + "enum": [ + "129", + "85" + ], + "title": "Number of Frames", + "type": "string", + "description": "The number of frames to generate.", + "default": 129 + }, + "pro_mode": { + "title": "Pro Mode", + "type": "boolean", + "description": "By default, generations are done with 35 steps. Pro mode does 55 steps which results in higher quality videos but will take more time and cost 2x more billing units.", + "default": false + } + }, + "x-fal-order-properties": [ + "prompt", + "seed", + "pro_mode", + "aspect_ratio", + "resolution", + "num_frames", + "enable_safety_checker", + "loras", + "video_url", + "strength" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "HunyuanVideoLoraVideoToVideoOutput": { + "title": "HunyuanV2VResponse", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "The seed used for generating the video." + }, + "video": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/hunyuan_video/hunyuan_v2v_output.mp4" + } + ], + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/hunyuan-video-lora/video-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-lora/video-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-lora/video-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanVideoLoraVideoToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/hunyuan-video-lora/video-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HunyuanVideoLoraVideoToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/ffmpeg-api/compose", + "metadata": { + "display_name": "FFmpeg API Compose", + "category": "video-to-video", + "description": "Compose videos from multiple media sources using FFmpeg API.", + "status": "active", + "tags": [ + "ffmpeg" + ], + "updated_at": "2026-01-26T21:44:07.023Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/ffmpeg-api-compose.webp", + "model_url": "https://fal.run/fal-ai/ffmpeg-api/compose", + "license_type": "commercial", + "date": "2025-01-22T00:00:00.000Z", + "group": { + "key": "ffmpeg", + "label": "Compose" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/ffmpeg-api/compose", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/ffmpeg-api/compose queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/ffmpeg-api/compose", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/ffmpeg-api-compose.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/ffmpeg-api/compose", + "documentationUrl": "https://fal.ai/models/fal-ai/ffmpeg-api/compose/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FfmpegApiComposeInput": { + "title": "Input", + "type": "object", + "properties": { + "tracks": { + "description": "List of tracks to be combined into the final media", + "type": "array", + "title": "Tracks", + "items": { + "$ref": "#/components/schemas/Track" + } + } + }, + "x-fal-order-properties": [ + "tracks" + ], + "required": [ + "tracks" + ] + }, + "FfmpegApiComposeOutput": { + "title": "ComposeOutput", + "type": "object", + "properties": { + "video_url": { + "description": "URL of the processed video file", + "type": "string", + "title": "Video Url" + }, + "thumbnail_url": { + "description": "URL of the video's thumbnail image", + "type": "string", + "title": "Thumbnail Url" + } + }, + "x-fal-order-properties": [ + "video_url", + "thumbnail_url" + ], + "required": [ + "video_url", + "thumbnail_url" + ] + }, + "Track": { + "title": "Track", + "type": "object", + "properties": { + "type": { + "description": "Type of track ('video' or 'audio')", + "type": "string", + "title": "Type" + }, + "id": { + "description": "Unique identifier for the track", + "type": "string", + "title": "Id" + }, + "keyframes": { + "description": "List of keyframes that make up this track", + "type": "array", + "title": "Keyframes", + "items": { + "$ref": "#/components/schemas/Keyframe" + } + } + }, + "x-fal-order-properties": [ + "id", + "type", + "keyframes" + ], + "required": [ + "id", + "type", + "keyframes" + ] + }, + "Keyframe": { + "title": "Keyframe", + "type": "object", + "properties": { + "duration": { + "description": "The duration in milliseconds of this keyframe", + "type": "number", + "title": "Duration" + }, + "timestamp": { + "description": "The timestamp in milliseconds where this keyframe starts", + "type": "number", + "title": "Timestamp" + }, + "url": { + "description": "The URL where this keyframe's media file can be accessed", + "type": "string", + "title": "Url" + } + }, + "x-fal-order-properties": [ + "timestamp", + "duration", + "url" + ], + "required": [ + "timestamp", + "duration", + "url" + ] + } + } + }, + "paths": { + "/fal-ai/ffmpeg-api/compose/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/compose/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/compose": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FfmpegApiComposeInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/ffmpeg-api/compose/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FfmpegApiComposeOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sync-lipsync", + "metadata": { + "display_name": "sync.so -- lipsync 1.9.0-beta", + "category": "video-to-video", + "description": "Generate realistic lipsync animations from audio using advanced algorithms for high-quality synchronization.", + "status": "active", + "tags": [ + "animation", + "lip sync" + ], + "updated_at": "2026-01-26T21:44:32.135Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/rabbit/0FiW23RmTuPYSqUv6RSCc.png", + "model_url": "https://fal.run/fal-ai/sync-lipsync", + "license_type": "commercial", + "date": "2025-01-13T00:00:00.000Z", + "group": { + "key": "sync-lipsync", + "label": "Lipsync 1.9" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sync-lipsync", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sync-lipsync queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sync-lipsync", + "category": "video-to-video", + "thumbnailUrl": "https://fal.media/files/rabbit/0FiW23RmTuPYSqUv6RSCc.png", + "playgroundUrl": "https://fal.ai/models/fal-ai/sync-lipsync", + "documentationUrl": "https://fal.ai/models/fal-ai/sync-lipsync/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "SyncLipsyncInput": { + "title": "LipSyncInput", + "type": "object", + "properties": { + "model": { + "enum": [ + "lipsync-1.8.0", + "lipsync-1.7.1", + "lipsync-1.9.0-beta" + ], + "title": "Model", + "type": "string", + "description": "The model to use for lipsyncing", + "default": "lipsync-1.9.0-beta" + }, + "video_url": { + "examples": [ + "https://fal.media/files/koala/8teUPbRRMtAUTORDvqy0l.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the input video" + }, + "sync_mode": { + "enum": [ + "cut_off", + "loop", + "bounce", + "silence", + "remap" + ], + "title": "Sync Mode", + "type": "string", + "description": "Lipsync mode when audio and video durations are out of sync.", + "default": "cut_off" + }, + "audio_url": { + "examples": [ + "https://fal.media/files/lion/vyFWygmZsIZlUO4s0nr2n.wav" + ], + "title": "Audio Url", + "type": "string", + "description": "URL of the input audio" + } + }, + "x-fal-order-properties": [ + "model", + "video_url", + "audio_url", + "sync_mode" + ], + "required": [ + "video_url", + "audio_url" + ] + }, + "SyncLipsyncOutput": { + "title": "LipSyncOutput", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "url": "https://v3.fal.media/files/rabbit/6gJV-z7RJsF0AxkZHkdgJ_output.mp4" + } + ], + "title": "Video", + "description": "The generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sync-lipsync/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sync-lipsync/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sync-lipsync": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SyncLipsyncInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sync-lipsync/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SyncLipsyncOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/auto-caption", + "metadata": { + "display_name": "Auto-Captioner", + "category": "video-to-video", + "description": "Automatically generates text captions for your videos from the audio as per text colour/font specifications", + "status": "active", + "tags": [ + "captioning", + "video" + ], + "updated_at": "2026-01-26T21:44:32.527Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/caption_thumbnail.webp", + "model_url": "https://fal.run/fal-ai/auto-caption", + "date": "2025-01-03T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/auto-caption", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/auto-caption queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/auto-caption", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/caption_thumbnail.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/auto-caption", + "documentationUrl": "https://fal.ai/models/fal-ai/auto-caption/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AutoCaptionInput": { + "title": "CaptionInput", + "type": "object", + "properties": { + "txt_font": { + "title": "Txt Font", + "type": "string", + "description": "Font for generated captions. Choose one in 'Arial','Standard','Garamond', 'Times New Roman','Georgia', or pass a url to a .ttf file", + "default": "Standard" + }, + "video_url": { + "title": "Video Url", + "type": "string", + "description": "URL to the .mp4 video with audio. Only videos of size <100MB are allowed." + }, + "top_align": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "title": "Top Align", + "description": "Top-to-bottom alignment of the text. Can be a string ('top', 'center', 'bottom') or a float (0.0-1.0)", + "default": "center" + }, + "txt_color": { + "title": "Txt Color", + "type": "string", + "description": "Colour of the text. Can be a RGB tuple, a color name, or an hexadecimal notation.", + "default": "white" + }, + "stroke_width": { + "title": "Stroke Width", + "type": "integer", + "description": "Width of the text strokes in pixels", + "default": 1 + }, + "refresh_interval": { + "minimum": 0.5, + "maximum": 3, + "type": "number", + "title": "Refresh Interval", + "description": "Number of seconds the captions should stay on screen. A higher number will also result in more text being displayed at once.", + "default": 1.5 + }, + "font_size": { + "title": "Font Size", + "type": "integer", + "description": "Size of text in generated captions.", + "default": 24 + }, + "left_align": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "title": "Left Align", + "description": "Left-to-right alignment of the text. Can be a string ('left', 'center', 'right') or a float (0.0-1.0)", + "default": "center" + } + }, + "x-fal-order-properties": [ + "video_url", + "txt_color", + "txt_font", + "font_size", + "stroke_width", + "left_align", + "top_align", + "refresh_interval" + ], + "required": [ + "video_url" + ] + }, + "AutoCaptionOutput": { + "title": "Output", + "type": "object", + "properties": { + "video_url": { + "title": "Video Url", + "type": "string", + "description": "URL to the caption .mp4 video." + } + }, + "x-fal-order-properties": [ + "video_url" + ], + "required": [ + "video_url" + ] + } + } + }, + "paths": { + "/fal-ai/auto-caption/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/auto-caption/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/auto-caption": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AutoCaptionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/auto-caption/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AutoCaptionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/dubbing", + "metadata": { + "display_name": "Dubbing", + "category": "video-to-video", + "description": "This endpoint delivers seamlessly localized videos by generating lip-synced dubs in multiple languages, ensuring natural and immersive multilingual experiences", + "status": "active", + "tags": [ + "animation", + "lip sync", + "dubbing" + ], + "updated_at": "2026-01-26T21:44:32.912Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/dubbing.webp", + "model_url": "https://fal.run/fal-ai/dubbing", + "license_type": "commercial", + "date": "2024-12-20T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/dubbing", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/dubbing queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/dubbing", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/dubbing.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/dubbing", + "documentationUrl": "https://fal.ai/models/fal-ai/dubbing/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "DubbingInput": { + "title": "InputModel", + "type": "object", + "properties": { + "do_lipsync": { + "title": "Do Lipsync", + "type": "boolean", + "description": "Whether to lip sync the audio to the video", + "default": true + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/dubbing/swapjokes_clip_cropped.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "Input video URL to be dubbed." + }, + "target_language": { + "enum": [ + "hindi", + "turkish", + "english" + ], + "title": "Target Language", + "type": "string", + "description": "Target language to dub the video to", + "default": "hindi" + } + }, + "x-fal-order-properties": [ + "video_url", + "target_language", + "do_lipsync" + ], + "required": [ + "video_url" + ] + }, + "DubbingOutput": { + "title": "OutputModel", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "file_size": 120000, + "file_name": "output.mp4", + "content_type": "video/mp4", + "url": "https://v3.fal.media/files/koala/7BzEwUucbr6yuFjpcJipl_output.mp4" + } + ], + "title": "Video", + "description": "The generated video with the lip sync.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/dubbing/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/dubbing/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/dubbing": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DubbingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/dubbing/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DubbingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/video-upscaler", + "metadata": { + "display_name": "Video Upscaler", + "category": "video-to-video", + "description": "The video upscaler endpoint uses RealESRGAN on each frame of the input video to upscale the video to a higher resolution.", + "status": "active", + "tags": [ + "video generation", + "video to video", + "ai video", + "high fidelity motion" + ], + "updated_at": "2026-01-26T21:44:36.021Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/video-upscaler-thumbnail.jpg", + "model_url": "https://fal.run/fal-ai/video-upscaler", + "date": "2024-12-04T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/video-upscaler", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/video-upscaler queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/video-upscaler", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/video-upscaler-thumbnail.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/video-upscaler", + "documentationUrl": "https://fal.ai/models/fal-ai/video-upscaler/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "VideoUpscalerInput": { + "title": "Input", + "type": "object", + "properties": { + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/videos/_o3VmzjOytBwRjCVPFX6i_output.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "The URL of the video to upscale" + }, + "scale": { + "description": "The scale factor", + "type": "number", + "minimum": 1, + "maximum": 8, + "title": "Scale", + "default": 2 + } + }, + "x-fal-order-properties": [ + "video_url", + "scale" + ], + "required": [ + "video_url" + ] + }, + "VideoUpscalerOutput": { + "title": "Output", + "type": "object", + "properties": { + "video": { + "examples": [ + { + "content_type": "video/mp4", + "url": "https://storage.googleapis.com/falserverless/videos/h0jgPaO6AJAbyrsNYNbGl_upscaled_video.mp4" + } + ], + "title": "Video", + "description": "The stitched video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/video-upscaler/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/video-upscaler/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/video-upscaler": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoUpscalerInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/video-upscaler/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoUpscalerOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/cogvideox-5b/video-to-video", + "metadata": { + "display_name": "CogVideoX-5B", + "category": "video-to-video", + "description": "Generate videos from videos and prompts using CogVideoX-5B", + "status": "active", + "tags": [ + "editing" + ], + "updated_at": "2026-01-26T21:44:38.983Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/model_tests/cogvideox/panda.gif.gif", + "model_url": "https://fal.run/fal-ai/cogvideox-5b/video-to-video", + "github_url": "https://huggingface.co/THUDM/CogVideoX-5b/blob/main/LICENSE", + "date": "2024-10-17T00:00:00.000Z", + "group": { + "key": "cogvideox-5b", + "label": "Video to Video" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/cogvideox-5b/video-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/cogvideox-5b/video-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/cogvideox-5b/video-to-video", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/model_tests/cogvideox/panda.gif.gif", + "playgroundUrl": "https://fal.ai/models/fal-ai/cogvideox-5b/video-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/cogvideox-5b/video-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Cogvideox5bVideoToVideoInput": { + "title": "VideoToVideoInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "An astronaut stands triumphantly at the peak of a towering mountain. Panorama of rugged peaks and valleys. Very futuristic vibe and animated aesthetic. Highlights of purple and golden colors in the scene. The sky is looks like an animated/cartoonish dream of galaxies, nebulae, stars, planets, moons, but the remainder of the scene is mostly realistic. " + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to generate the video from." + }, + "video_url": { + "examples": [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/hiker.mp4" + ], + "title": "Input Video Url", + "type": "string", + "description": "The video to generate the video from." + }, + "use_rife": { + "title": "Use Rife", + "type": "boolean", + "description": "Use RIFE for video interpolation", + "default": true + }, + "loras": { + "description": "\n The LoRAs to use for the image generation. We currently support one lora.\n ", + "type": "array", + "items": { + "$ref": "#/components/schemas/LoraWeight" + }, + "examples": [], + "title": "Loras", + "default": [] + }, + "video_size": { + "anyOf": [ + { + "$ref": "#/components/schemas/ImageSize" + }, + { + "enum": [ + "square_hd", + "square", + "portrait_4_3", + "portrait_16_9", + "landscape_4_3", + "landscape_16_9" + ], + "type": "string" + } + ], + "title": "Video Size", + "description": "The size of the generated video.", + "default": { + "height": 480, + "width": 720 + } + }, + "strength": { + "minimum": 0.05, + "maximum": 1, + "type": "number", + "title": "Strength", + "description": "The strength to use for Video to Video. 1.0 completely remakes the video while 0.0 preserves the original.", + "default": 0.8 + }, + "guidance_scale": { + "minimum": 0, + "maximum": 20, + "type": "number", + "title": "Guidance scale (CFG)", + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related video to show you.\n ", + "default": 7 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Num Inference Steps", + "description": "The number of inference steps to perform.", + "default": 50 + }, + "export_fps": { + "minimum": 4, + "maximum": 32, + "type": "integer", + "title": "Export Fps", + "description": "The target FPS of the video", + "default": 16 + }, + "negative_prompt": { + "examples": [ + "Distorted, discontinuous, Ugly, blurry, low resolution, motionless, static, disfigured, disconnected limbs, Ugly faces, incomplete arms" + ], + "title": "Negative Prompt", + "type": "string", + "description": "The negative prompt to generate video from", + "default": "" + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n " + } + }, + "x-fal-order-properties": [ + "prompt", + "video_size", + "negative_prompt", + "loras", + "num_inference_steps", + "seed", + "guidance_scale", + "use_rife", + "export_fps", + "video_url", + "strength" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "Cogvideox5bVideoToVideoOutput": { + "title": "Output", + "type": "object", + "properties": { + "prompt": { + "title": "Prompt", + "type": "string", + "description": "The prompt used for generating the video." + }, + "timings": { + "title": "Timings", + "type": "object", + "additionalProperties": { + "type": "number" + } + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n Seed of the generated video. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n " + }, + "video": { + "description": "The URL to the generated video", + "title": "Video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "timings", + "seed", + "prompt" + ], + "required": [ + "video", + "timings", + "seed", + "prompt" + ] + }, + "LoraWeight": { + "title": "LoraWeight", + "type": "object", + "properties": { + "path": { + "title": "Path", + "type": "string", + "description": "URL or the path to the LoRA weights." + }, + "scale": { + "minimum": 0, + "maximum": 4, + "type": "number", + "title": "Scale", + "description": "\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ", + "default": 1 + } + }, + "x-fal-order-properties": [ + "path", + "scale" + ], + "required": [ + "path" + ] + }, + "ImageSize": { + "title": "ImageSize", + "type": "object", + "properties": { + "height": { + "maximum": 14142, + "type": "integer", + "title": "Height", + "description": "The height of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + }, + "width": { + "maximum": 14142, + "type": "integer", + "title": "Width", + "description": "The width of the generated image.", + "exclusiveMinimum": 0, + "default": 512 + } + }, + "x-fal-order-properties": [ + "width", + "height" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/cogvideox-5b/video-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/cogvideox-5b/video-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/cogvideox-5b/video-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Cogvideox5bVideoToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/cogvideox-5b/video-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Cogvideox5bVideoToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/controlnext", + "metadata": { + "display_name": "ControlNeXt SVD", + "category": "video-to-video", + "description": "Animate a reference image with a driving video using ControlNeXt.", + "status": "active", + "tags": [ + "animation", + "stylized" + ], + "updated_at": "2026-01-26T21:44:41.371Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/controlnext.JPG", + "model_url": "https://fal.run/fal-ai/controlnext", + "github_url": "https://github.com/dvlab-research/ControlNeXt/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-09-05T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/controlnext", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/controlnext queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/controlnext", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/controlnext.JPG", + "playgroundUrl": "https://fal.ai/models/fal-ai/controlnext", + "documentationUrl": "https://fal.ai/models/fal-ai/controlnext/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ControlnextInput": { + "title": "ControlNeXtInput", + "type": "object", + "properties": { + "controlnext_cond_scale": { + "minimum": 0.1, + "maximum": 10, + "type": "number", + "title": "Controlnext Cond Scale", + "description": "Condition scale for ControlNeXt.", + "default": 1 + }, + "video_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/musepose/dance.mp4" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the input video." + }, + "fps": { + "minimum": 1, + "maximum": 60, + "type": "integer", + "title": "Fps", + "description": "Frames per second for the output video.", + "default": 7 + }, + "max_frame_num": { + "minimum": 1, + "maximum": 1000, + "type": "integer", + "title": "Max Frame Num", + "description": "Maximum number of frames to process.", + "default": 240 + }, + "width": { + "minimum": 64, + "maximum": 1024, + "type": "integer", + "title": "Width", + "description": "Width of the output video.", + "default": 576 + }, + "overlap": { + "minimum": 0, + "maximum": 20, + "type": "integer", + "title": "Overlap", + "description": "Number of overlapping frames between batches.", + "default": 6 + }, + "guidance_scale": { + "minimum": 0.1, + "maximum": 10, + "type": "number", + "title": "Guidance Scale", + "description": "Guidance scale for the diffusion process.", + "default": 3 + }, + "batch_frames": { + "minimum": 1, + "maximum": 50, + "type": "integer", + "title": "Batch Frames", + "description": "Number of frames to process in each batch.", + "default": 24 + }, + "height": { + "minimum": 64, + "maximum": 1024, + "type": "integer", + "title": "Height", + "description": "Height of the output video.", + "default": 1024 + }, + "sample_stride": { + "minimum": 1, + "maximum": 10, + "type": "integer", + "title": "Sample Stride", + "description": "Stride for sampling frames from the input video.", + "default": 2 + }, + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/musepose/ref.png" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the reference image." + }, + "decode_chunk_size": { + "minimum": 1, + "maximum": 10, + "type": "integer", + "title": "Decode Chunk Size", + "description": "Chunk size for decoding frames.", + "default": 2 + }, + "motion_bucket_id": { + "minimum": 0, + "maximum": 255, + "type": "number", + "title": "Motion Bucket Id", + "description": "Motion bucket ID for the pipeline.", + "default": 127 + }, + "num_inference_steps": { + "minimum": 1, + "maximum": 100, + "type": "integer", + "title": "Num Inference Steps", + "description": "Number of inference steps.", + "default": 25 + } + }, + "x-fal-order-properties": [ + "image_url", + "video_url", + "height", + "width", + "guidance_scale", + "num_inference_steps", + "max_frame_num", + "batch_frames", + "overlap", + "sample_stride", + "decode_chunk_size", + "motion_bucket_id", + "fps", + "controlnext_cond_scale" + ], + "required": [ + "image_url", + "video_url" + ] + }, + "ControlnextOutput": { + "title": "ControlNeXtOutput", + "type": "object", + "properties": { + "video": { + "description": "The generated video.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size", + "description": "The size of the file in bytes.", + "examples": [ + 4404019 + ] + }, + "file_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name", + "description": "The name of the file. It will be auto-generated if not provided.", + "examples": [ + "z9RV14K95DvU.png" + ] + }, + "content_type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type", + "description": "The mime type of the file.", + "examples": [ + "image/png" + ] + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/controlnext/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/controlnext/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/controlnext": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ControlnextInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/controlnext/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ControlnextOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sam2/video", + "metadata": { + "display_name": "Segment Anything Model 2", + "category": "video-to-video", + "description": "SAM 2 is a model for segmenting images and videos in real-time.", + "status": "active", + "tags": [ + "segmentation", + "mask", + "real-time" + ], + "updated_at": "2026-01-26T21:44:41.746Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/sam2.gif", + "model_url": "https://fal.run/fal-ai/sam2/video", + "date": "2024-08-15T00:00:00.000Z", + "group": { + "key": "sam2", + "label": "Video to Video" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sam2/video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sam2/video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sam2/video", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/sam2.gif", + "playgroundUrl": "https://fal.ai/models/fal-ai/sam2/video", + "documentationUrl": "https://fal.ai/models/fal-ai/sam2/video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Sam2VideoInput": { + "title": "SAM2VideoRLEInput", + "type": "object", + "properties": { + "video_url": { + "examples": [ + "https://drive.google.com/uc?id=1iOFYbNITYwrebBBp9kaEGhBndFSRLz8k" + ], + "title": "Video Url", + "type": "string", + "description": "The URL of the video to be segmented." + }, + "prompts": { + "examples": [ + [ + { + "y": 350, + "label": 1, + "frame_index": 0, + "x": 210 + }, + { + "y": 220, + "label": 1, + "frame_index": 0, + "x": 250 + } + ] + ], + "title": "Prompts", + "type": "array", + "description": "List of prompts to segment the video", + "items": { + "$ref": "#/components/schemas/PointPrompt" + }, + "default": [] + }, + "boundingbox_zip": { + "title": "Boundingbox Zip", + "type": "boolean", + "description": "Return per-frame bounding box overlays as a zip archive.", + "default": false + }, + "mask_url": { + "title": "Mask Url", + "type": "string", + "description": "The URL of the mask to be applied initially." + }, + "apply_mask": { + "title": "Apply Mask", + "type": "boolean", + "description": "Apply the mask on the video.", + "default": false + }, + "box_prompts": { + "examples": [ + [ + { + "y_min": 0, + "frame_index": 0, + "x_max": 500, + "x_min": 300, + "y_max": 400 + } + ] + ], + "title": "Box Prompts", + "type": "array", + "description": "Coordinates for boxes", + "items": { + "$ref": "#/components/schemas/BoxPrompt" + }, + "default": [] + } + }, + "x-fal-order-properties": [ + "video_url", + "mask_url", + "prompts", + "box_prompts", + "apply_mask", + "boundingbox_zip" + ], + "required": [ + "video_url" + ] + }, + "Sam2VideoOutput": { + "title": "SAM2VideoOutput", + "type": "object", + "properties": { + "boundingbox_frames_zip": { + "title": "Boundingbox Frames Zip", + "description": "Zip file containing per-frame bounding box overlays.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + }, + "video": { + "title": "Video", + "description": "The segmented video.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "boundingbox_frames_zip" + ], + "required": [ + "video" + ] + }, + "PointPrompt": { + "title": "PointPrompt", + "type": "object", + "properties": { + "y": { + "title": "Y", + "type": "integer", + "description": "Y Coordinate of the prompt", + "default": 350 + }, + "label": { + "enum": [ + 0, + 1 + ], + "title": "Label", + "type": "integer", + "description": "Label of the prompt. 1 for foreground, 0 for background", + "default": 1 + }, + "frame_index": { + "title": "Frame Index", + "type": "integer", + "description": "The frame index to interact with.", + "default": 0 + }, + "x": { + "title": "X", + "type": "integer", + "description": "X Coordinate of the prompt", + "default": 305 + } + }, + "x-fal-order-properties": [ + "x", + "y", + "label", + "frame_index" + ] + }, + "BoxPrompt": { + "title": "BoxPrompt", + "type": "object", + "properties": { + "y_min": { + "title": "Y Min", + "type": "integer", + "description": "Y Min Coordinate of the box", + "default": 0 + }, + "frame_index": { + "title": "Frame Index", + "type": "integer", + "description": "The frame index to interact with.", + "default": 0 + }, + "x_max": { + "title": "X Max", + "type": "integer", + "description": "X Max Coordinate of the prompt", + "default": 0 + }, + "x_min": { + "title": "X Min", + "type": "integer", + "description": "X Min Coordinate of the box", + "default": 0 + }, + "y_max": { + "title": "Y Max", + "type": "integer", + "description": "Y Max Coordinate of the prompt", + "default": 0 + } + }, + "x-fal-order-properties": [ + "x_min", + "y_min", + "x_max", + "y_max", + "frame_index" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sam2/video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam2/video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sam2/video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sam2VideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam2/video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sam2VideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/amt-interpolation", + "metadata": { + "display_name": "AMT Interpolation", + "category": "video-to-video", + "description": "Interpolate between video frames", + "status": "active", + "tags": [ + "interpolation", + "editing" + ], + "updated_at": "2026-01-26T21:44:54.486Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/amt-interpolation.webp", + "thumbnail_animated_url": "https://storage.googleapis.com/falserverless/gallery/amt-interpolation-animated.webp", + "model_url": "https://fal.run/fal-ai/amt-interpolation", + "date": "2024-02-21T00:00:00.000Z", + "group": { + "key": "amt-interpolation", + "label": "Video Interpolation" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/amt-interpolation", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/amt-interpolation queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/amt-interpolation", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/amt-interpolation.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/amt-interpolation", + "documentationUrl": "https://fal.ai/models/fal-ai/amt-interpolation/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AmtInterpolationInput": { + "title": "AMTInterpolationInput", + "type": "object", + "properties": { + "video_url": { + "examples": [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-vid2vid-input-2.gif" + ], + "title": "Video URL", + "type": "string", + "description": "URL of the video to be processed" + }, + "recursive_interpolation_passes": { + "min": 1, + "title": "Recursive Interpolation Passes", + "type": "integer", + "description": "Number of recursive interpolation passes", + "max": 10, + "default": 2 + }, + "output_fps": { + "min": 1, + "title": "Output FPS", + "type": "integer", + "description": "Output frames per second", + "max": 60, + "default": 24 + } + }, + "x-fal-order-properties": [ + "video_url", + "output_fps", + "recursive_interpolation_passes" + ], + "required": [ + "video_url" + ] + }, + "AmtInterpolationOutput": { + "title": "AMTInterpolationOutput", + "type": "object", + "properties": { + "video": { + "title": "Video", + "description": "Generated video", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video" + ], + "required": [ + "video" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/amt-interpolation/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/amt-interpolation/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/amt-interpolation": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AmtInterpolationInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/amt-interpolation/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AmtInterpolationOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fast-animatediff/turbo/video-to-video", + "metadata": { + "display_name": "AnimateDiff Turbo", + "category": "video-to-video", + "description": "Re-animate your videos in lightning speed!", + "status": "active", + "tags": [ + "animation", + "stylized", + "turbo" + ], + "updated_at": "2026-01-26T21:44:56.497Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/animatediff-v2v.gif", + "model_url": "https://fal.run/fal-ai/fast-animatediff/turbo/video-to-video", + "github_url": "https://github.com/guoyww/AnimateDiff/blob/main/LICENSE.txt", + "date": "2024-02-13T00:00:00.000Z", + "group": { + "key": "fast-animatediff-turbo", + "label": "Video to Video" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fast-animatediff/turbo/video-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fast-animatediff/turbo/video-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fast-animatediff/turbo/video-to-video", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/animatediff-v2v.gif", + "playgroundUrl": "https://fal.ai/models/fal-ai/fast-animatediff/turbo/video-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/fast-animatediff/turbo/video-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FastAnimatediffTurboVideoToVideoInput": { + "title": "AnimateDiffV2VTurboInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "closeup of tony stark, robert downey jr, fireworks, high quality, ultra HD", + "panda playing a guitar, on a boat, in the ocean, high quality, high quality, ultra HD, realistic" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "video_url": { + "examples": [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-vid2vid-input-2.gif", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-vid2vid-input-1.gif" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the video." + }, + "first_n_seconds": { + "minimum": 2, + "title": "First N Seconds", + "type": "integer", + "maximum": 12, + "description": "The first N number of seconds of video to animate.", + "default": 3 + }, + "fps": { + "minimum": 1, + "title": "Fps", + "type": "integer", + "maximum": 16, + "description": "Number of frames per second to extract from the video.", + "default": 8 + }, + "strength": { + "minimum": 0.1, + "title": "Strength", + "type": "number", + "maximum": 1, + "description": "The strength of the input video in the final output.", + "default": 0.7 + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance Scale", + "type": "number", + "maximum": 20, + "description": "The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.", + "default": 1 + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 32, + "description": "The number of inference steps to perform. 4-12 is recommended for turbo mode.", + "default": 8 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "(bad quality, worst quality:1.2), ugly faces, bad anime" + }, + "motions": { + "title": "Motions", + "type": "array", + "description": "The motions to apply to the video.", + "uniqueItems": true, + "items": { + "enum": [ + "zoom-out", + "zoom-in", + "pan-left", + "pan-right", + "tilt-up", + "tilt-down" + ], + "type": "string" + } + } + }, + "x-fal-order-properties": [ + "video_url", + "first_n_seconds", + "prompt", + "negative_prompt", + "num_inference_steps", + "strength", + "guidance_scale", + "seed", + "fps", + "motions" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "FastAnimatediffTurboVideoToVideoOutput": { + "title": "AnimateDiffV2VOutput", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed used for generating the video." + }, + "video": { + "examples": [ + { + "url": "https://fal-cdn.batuhan-941.workers.dev/files/koala/5Cb_6P_s9wW8f8-g9c4yj.mp4" + } + ], + "title": "Video", + "description": "Generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/fast-animatediff/turbo/video-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-animatediff/turbo/video-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fast-animatediff/turbo/video-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastAnimatediffTurboVideoToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-animatediff/turbo/video-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastAnimatediffTurboVideoToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/fast-animatediff/video-to-video", + "metadata": { + "display_name": "AnimateDiff", + "category": "video-to-video", + "description": "Re-animate your videos!", + "status": "active", + "tags": [ + "animation", + "stylized" + ], + "updated_at": "2026-01-26T21:44:57.339Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/animatediff-v2v.gif", + "model_url": "https://fal.run/fal-ai/fast-animatediff/video-to-video", + "github_url": "https://github.com/guoyww/AnimateDiff/blob/main/LICENSE.txt", + "date": "2024-02-13T00:00:00.000Z", + "group": { + "key": "fast-animatediff", + "label": "Video to Video" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/fast-animatediff/video-to-video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/fast-animatediff/video-to-video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/fast-animatediff/video-to-video", + "category": "video-to-video", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/animatediff-v2v.gif", + "playgroundUrl": "https://fal.ai/models/fal-ai/fast-animatediff/video-to-video", + "documentationUrl": "https://fal.ai/models/fal-ai/fast-animatediff/video-to-video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "FastAnimatediffVideoToVideoInput": { + "title": "AnimateDiffV2VInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "closeup of tony stark, robert downey jr, fireworks, high quality, ultra HD", + "panda playing a guitar, on a boat, in the ocean, high quality, high quality, ultra HD, realistic" + ], + "title": "Prompt", + "type": "string", + "description": "The prompt to use for generating the image. Be as descriptive as possible for best results." + }, + "video_url": { + "examples": [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-vid2vid-input-2.gif", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-vid2vid-input-1.gif" + ], + "title": "Video Url", + "type": "string", + "description": "URL of the video." + }, + "first_n_seconds": { + "minimum": 2, + "title": "First N Seconds", + "type": "integer", + "maximum": 4, + "description": "The first N number of seconds of video to animate.", + "default": 3 + }, + "fps": { + "minimum": 1, + "title": "Fps", + "type": "integer", + "maximum": 16, + "description": "Number of frames per second to extract from the video.", + "default": 8 + }, + "strength": { + "minimum": 0.1, + "title": "Strength", + "type": "number", + "maximum": 1, + "description": "The strength of the input video in the final output.", + "default": 0.7 + }, + "guidance_scale": { + "minimum": 0, + "title": "Guidance scale (CFG)", + "type": "number", + "maximum": 20, + "description": "\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ", + "default": 7.5 + }, + "num_inference_steps": { + "minimum": 1, + "title": "Num Inference Steps", + "type": "integer", + "maximum": 50, + "description": "The number of inference steps to perform.", + "default": 25 + }, + "seed": { + "title": "Seed", + "type": "integer", + "description": "\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n " + }, + "negative_prompt": { + "title": "Negative Prompt", + "type": "string", + "description": "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + "default": "(bad quality, worst quality:1.2), ugly faces, bad anime" + }, + "motions": { + "title": "Motions", + "type": "array", + "description": "The motions to apply to the video.", + "uniqueItems": true, + "items": { + "enum": [ + "zoom-out", + "zoom-in", + "pan-left", + "pan-right", + "tilt-up", + "tilt-down" + ], + "type": "string" + } + } + }, + "x-fal-order-properties": [ + "video_url", + "first_n_seconds", + "prompt", + "negative_prompt", + "num_inference_steps", + "strength", + "guidance_scale", + "seed", + "fps", + "motions" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "FastAnimatediffVideoToVideoOutput": { + "title": "AnimateDiffV2VOutput", + "type": "object", + "properties": { + "seed": { + "title": "Seed", + "type": "integer", + "description": "Seed used for generating the video." + }, + "video": { + "examples": [ + { + "url": "https://fal-cdn.batuhan-941.workers.dev/files/koala/5Cb_6P_s9wW8f8-g9c4yj.mp4" + } + ], + "title": "Video", + "description": "Generated video file.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "video", + "seed" + ], + "required": [ + "video", + "seed" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/fast-animatediff/video-to-video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-animatediff/video-to-video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/fast-animatediff/video-to-video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastAnimatediffVideoToVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/fast-animatediff/video-to-video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FastAnimatediffVideoToVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/json/fal.models.vision.json b/packages/typescript/ai-fal/json/fal.models.vision.json new file mode 100644 index 00000000..192b5de4 --- /dev/null +++ b/packages/typescript/ai-fal/json/fal.models.vision.json @@ -0,0 +1,11381 @@ +{ + "generated_at": "2026-01-28T02:51:51.873Z", + "total_models": 34, + "category": "vision", + "models": [ + { + "endpoint_id": "fal-ai/arbiter/image/text", + "metadata": { + "display_name": "Arbiter", + "category": "vision", + "description": "Semantic image alignment measurements", + "status": "active", + "tags": [ + "clip-score" + ], + "updated_at": "2026-01-26T21:41:45.944Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a87e625/krpZAPx6sz5DAYiSA5GIm_d1a51979fee3446a8622a9f4eafeea53.jpg", + "model_url": "https://fal.run/fal-ai/arbiter/image/text", + "license_type": "commercial", + "date": "2025-12-26T21:27:11.482Z", + "group": { + "key": "arbiter", + "label": "Semantic Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/arbiter/image/text", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/arbiter/image/text queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/arbiter/image/text", + "category": "vision", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a87e625/krpZAPx6sz5DAYiSA5GIm_d1a51979fee3446a8622a9f4eafeea53.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/arbiter/image/text", + "documentationUrl": "https://fal.ai/models/fal-ai/arbiter/image/text/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ArbiterImageTextInput": { + "x-fal-order-properties": [ + "measurements", + "inputs" + ], + "type": "object", + "properties": { + "measurements": { + "description": "The measurements to use for the measurement.", + "type": "array", + "title": "Measurements", + "items": { + "enum": [ + "clip_score" + ], + "type": "string" + } + }, + "inputs": { + "description": "The inputs to use for the measurement.", + "type": "array", + "title": "Inputs", + "items": { + "$ref": "#/components/schemas/SemanticImageInput" + } + } + }, + "title": "SemanticImageMeasurementInput", + "required": [ + "measurements", + "inputs" + ] + }, + "ArbiterImageTextOutput": { + "x-fal-order-properties": [ + "values" + ], + "type": "object", + "properties": { + "values": { + "description": "The values of the measurements.", + "type": "array", + "title": "Values", + "items": { + "additionalProperties": { + "anyOf": [ + { + "type": "number" + }, + { + "additionalProperties": { + "type": "number" + }, + "type": "object" + } + ] + }, + "type": "object" + } + } + }, + "title": "MultiMeasurementOutput" + }, + "SemanticImageInput": { + "x-fal-order-properties": [ + "hypothesis", + "reference" + ], + "type": "object", + "properties": { + "hypothesis": { + "description": "The hypothesis image to use for the measurement.", + "type": "string", + "title": "Hypothesis" + }, + "reference": { + "description": "The text reference to use for the measurement.", + "type": "string", + "title": "Reference" + } + }, + "title": "SemanticImageInput", + "required": [ + "hypothesis", + "reference" + ] + } + } + }, + "paths": { + "/fal-ai/arbiter/image/text/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/arbiter/image/text/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/arbiter/image/text": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ArbiterImageTextInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/arbiter/image/text/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ArbiterImageTextOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/arbiter/image/image", + "metadata": { + "display_name": "Arbiter", + "category": "vision", + "description": "Image reference comparison measurements", + "status": "active", + "tags": [ + "dists", + "sdi", + "mse", + "ssim", + "lpips" + ], + "updated_at": "2026-01-26T21:41:46.068Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a87e5fc/jeGlIl8xYi2_8bNuQNG0M_1a59b66653474448b2c40e47b5fa26f0.jpg", + "model_url": "https://fal.run/fal-ai/arbiter/image/image", + "license_type": "commercial", + "date": "2025-12-26T21:21:39.208Z", + "group": { + "key": "arbiter", + "label": "Reference Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/arbiter/image/image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/arbiter/image/image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/arbiter/image/image", + "category": "vision", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a87e5fc/jeGlIl8xYi2_8bNuQNG0M_1a59b66653474448b2c40e47b5fa26f0.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/arbiter/image/image", + "documentationUrl": "https://fal.ai/models/fal-ai/arbiter/image/image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ArbiterImageImageInput": { + "x-fal-order-properties": [ + "measurements", + "inputs" + ], + "type": "object", + "properties": { + "measurements": { + "description": "The measurements to use for the measurement.", + "type": "array", + "title": "Measurements", + "items": { + "enum": [ + "dists", + "mse", + "lpips", + "sdi", + "ssim" + ], + "type": "string" + } + }, + "inputs": { + "description": "The inputs to use for the measurement.", + "type": "array", + "title": "Inputs", + "items": { + "$ref": "#/components/schemas/ReferenceImageInput" + } + } + }, + "title": "ImageReferenceMeasurementInput", + "required": [ + "measurements", + "inputs" + ] + }, + "ArbiterImageImageOutput": { + "x-fal-order-properties": [ + "values" + ], + "type": "object", + "properties": { + "values": { + "description": "The values of the measurements.", + "type": "array", + "title": "Values", + "items": { + "additionalProperties": { + "anyOf": [ + { + "type": "number" + }, + { + "additionalProperties": { + "type": "number" + }, + "type": "object" + } + ] + }, + "type": "object" + } + } + }, + "title": "MultiMeasurementOutput" + }, + "ReferenceImageInput": { + "x-fal-order-properties": [ + "reference", + "hypothesis" + ], + "type": "object", + "properties": { + "hypothesis": { + "description": "The hypothesis image to use for the measurement.", + "type": "string", + "title": "Hypothesis" + }, + "reference": { + "description": "The image to use for the measurement.", + "type": "string", + "title": "Reference" + } + }, + "title": "ReferenceImageInput", + "required": [ + "reference", + "hypothesis" + ] + } + } + }, + "paths": { + "/fal-ai/arbiter/image/image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/arbiter/image/image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/arbiter/image/image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ArbiterImageImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/arbiter/image/image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ArbiterImageImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/arbiter/image", + "metadata": { + "display_name": "Arbiter", + "category": "vision", + "description": "Reference-free image measurements", + "status": "active", + "tags": [ + "arniqa", + "nima", + "iqa", + "musiq" + ], + "updated_at": "2026-01-26T21:41:46.198Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a87e5ca/ZaCjvwux8DGFjjTamngMy_d770c80051204269911a228ced51b682.jpg", + "model_url": "https://fal.run/fal-ai/arbiter/image", + "license_type": "commercial", + "date": "2025-12-26T21:15:05.536Z", + "group": { + "key": "arbiter", + "label": "Reference-Free Image" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/arbiter/image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/arbiter/image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/arbiter/image", + "category": "vision", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a87e5ca/ZaCjvwux8DGFjjTamngMy_d770c80051204269911a228ced51b682.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/arbiter/image", + "documentationUrl": "https://fal.ai/models/fal-ai/arbiter/image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ArbiterImageInput": { + "x-fal-order-properties": [ + "measurements", + "inputs" + ], + "type": "object", + "properties": { + "measurements": { + "description": "The measurements to use for the measurement.", + "type": "array", + "title": "Measurements", + "items": { + "enum": [ + "arniqa", + "clip_iqa", + "musiq", + "nima", + "lapvar" + ], + "type": "string" + } + }, + "inputs": { + "description": "The inputs to use for the measurement.", + "type": "array", + "title": "Inputs", + "items": { + "$ref": "#/components/schemas/ImageInput" + } + } + }, + "title": "ImageMultiMeasurementInput", + "required": [ + "measurements", + "inputs" + ] + }, + "ArbiterImageOutput": { + "x-fal-order-properties": [ + "values" + ], + "type": "object", + "properties": { + "values": { + "description": "The values of the measurements.", + "type": "array", + "title": "Values", + "items": { + "additionalProperties": { + "anyOf": [ + { + "type": "number" + }, + { + "additionalProperties": { + "type": "number" + }, + "type": "object" + } + ] + }, + "type": "object" + } + } + }, + "title": "MultiMeasurementOutput" + }, + "ImageInput": { + "x-fal-order-properties": [ + "hypothesis" + ], + "type": "object", + "properties": { + "hypothesis": { + "description": "The image to use for the measurement.", + "type": "string", + "title": "Hypothesis" + } + }, + "title": "ImageInput", + "required": [ + "hypothesis" + ] + } + } + }, + "paths": { + "/fal-ai/arbiter/image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/arbiter/image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/arbiter/image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ArbiterImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/arbiter/image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ArbiterImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "half-moon-ai/ai-detector/detect-image", + "metadata": { + "display_name": "Ai Detector", + "category": "vision", + "description": "AI Detector (Image) is an advanced service that analyzes a single picture and returns a verdict on whether it was likely created by AI.", + "status": "active", + "tags": [ + "utility", + "" + ], + "updated_at": "2026-01-26T21:41:53.919Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/0a8687b8/3X6GqiXrTw3msn-lSf4It_00c1009711474f82a5d2e295224b3ee5.jpg", + "model_url": "https://fal.run/half-moon-ai/ai-detector/detect-image", + "license_type": "commercial", + "date": "2025-12-16T12:16:06.007Z", + "group": { + "key": "Half-Moon-Detection", + "label": "Image Detection" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for half-moon-ai/ai-detector/detect-image", + "version": "1.0.0", + "description": "The OpenAPI schema for the half-moon-ai/ai-detector/detect-image queue.", + "x-fal-metadata": { + "endpointId": "half-moon-ai/ai-detector/detect-image", + "category": "vision", + "thumbnailUrl": "https://v3b.fal.media/files/b/0a8687b8/3X6GqiXrTw3msn-lSf4It_00c1009711474f82a5d2e295224b3ee5.jpg", + "playgroundUrl": "https://fal.ai/models/half-moon-ai/ai-detector/detect-image", + "documentationUrl": "https://fal.ai/models/half-moon-ai/ai-detector/detect-image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "AiDetectorDetectImageInput": { + "x-fal-order-properties": [ + "image_url" + ], + "type": "object", + "properties": { + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/zebra/3E1W5H2yzHnq4ivo8kdW7_beach_google.png" + ], + "description": "URL pointing to an image to analyze for AI generation.(Max: 3000 characters)", + "type": "string", + "title": "Image Url" + } + }, + "title": "ImageDetectionInput", + "required": [ + "image_url" + ] + }, + "AiDetectorDetectImageOutput": { + "x-fal-order-properties": [ + "verdict", + "confidence", + "is_ai_generated", + "latency" + ], + "type": "object", + "properties": { + "latency": { + "examples": [ + 14.015489339828491 + ], + "title": "Latency", + "type": "number" + }, + "verdict": { + "examples": [ + "ai" + ], + "title": "Verdict", + "type": "string" + }, + "is_ai_generated": { + "examples": [ + true + ], + "title": "Is Ai Generated", + "type": "boolean" + }, + "confidence": { + "examples": [ + 0.92 + ], + "title": "Confidence", + "type": "number" + } + }, + "title": "AIImageDetectionOutput", + "required": [ + "verdict", + "confidence", + "is_ai_generated", + "latency" + ] + } + } + }, + "paths": { + "/half-moon-ai/ai-detector/detect-image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/half-moon-ai/ai-detector/detect-image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/half-moon-ai/ai-detector/detect-image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiDetectorDetectImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/half-moon-ai/ai-detector/detect-image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AiDetectorDetectImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sam-3/image/embed", + "metadata": { + "display_name": "Sam 3", + "category": "vision", + "description": "SAM 3 is a unified foundation model for promptable segmentation in images and videos. It can detect, segment, and track objects using text or visual prompts such as points, boxes, and masks. ", + "status": "active", + "tags": [ + "embeddings", + "mask", + "real-time" + ], + "updated_at": "2026-01-26T21:42:19.159Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/zebra/nV3tuTldL1rK_c-slZGQ7_a5ad14e94b304162be3202f54a1671f7.jpg", + "model_url": "https://fal.run/fal-ai/sam-3/image/embed", + "license_type": "commercial", + "date": "2025-11-20T20:21:00.592Z", + "group": { + "key": "sam3", + "label": "Image Embeddings" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sam-3/image/embed", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sam-3/image/embed queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sam-3/image/embed", + "category": "vision", + "thumbnailUrl": "https://v3b.fal.media/files/b/zebra/nV3tuTldL1rK_c-slZGQ7_a5ad14e94b304162be3202f54a1671f7.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/sam-3/image/embed", + "documentationUrl": "https://fal.ai/models/fal-ai/sam-3/image/embed/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Sam3ImageEmbedInput": { + "x-fal-order-properties": [ + "image_url" + ], + "type": "object", + "properties": { + "image_url": { + "examples": [ + "https://raw.githubusercontent.com/facebookresearch/segment-anything-2/main/notebooks/images/truck.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "URL of the image to embed." + } + }, + "title": "SAM3EmbeddingInput", + "required": [ + "image_url" + ] + }, + "Sam3ImageEmbedOutput": { + "x-fal-order-properties": [ + "embedding_b64" + ], + "type": "object", + "properties": { + "embedding_b64": { + "title": "Embedding B64", + "type": "string", + "description": "Embedding of the image" + } + }, + "title": "SAM3EmbeddingOutput", + "required": [ + "embedding_b64" + ] + } + } + }, + "paths": { + "/fal-ai/sam-3/image/embed/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam-3/image/embed/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sam-3/image/embed": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sam3ImageEmbedInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sam-3/image/embed/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sam3ImageEmbedOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "openrouter/router/vision", + "metadata": { + "display_name": "OpenRouter [Vision]", + "category": "vision", + "description": "Run any VLM (Vision Language Model) with fal, powered by OpenRouter.", + "status": "active", + "tags": [], + "updated_at": "2026-01-26T21:42:23.644Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/v-wl5CGbHxNVatcGXntIY_e14c7922d88348769a90469d1c206501.jpg", + "model_url": "https://fal.run/openrouter/router/vision", + "license_type": "commercial", + "date": "2025-11-12T20:20:52.668Z", + "group": { + "key": "openrouter/router", + "label": "Any VLM" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.run/openrouter/router/vision/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for openrouter/router/vision", + "version": "1.0.0", + "description": "The OpenAPI schema for the openrouter/router/vision queue.", + "x-fal-metadata": { + "endpointId": "openrouter/router/vision", + "category": "vision", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/v-wl5CGbHxNVatcGXntIY_e14c7922d88348769a90469d1c206501.jpg", + "playgroundUrl": "https://fal.ai/models/openrouter/router/vision", + "documentationUrl": "https://fal.ai/models/openrouter/router/vision/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "RouterVisionInput": { + "x-fal-order-properties": [ + "image_urls", + "prompt", + "system_prompt", + "model", + "reasoning", + "temperature", + "max_tokens" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Caption this image for a text-to-image model with as much detail as possible." + ], + "description": "Prompt to be used for the image", + "type": "string", + "title": "Prompt" + }, + "system_prompt": { + "examples": [ + "Only answer the question, do not provide any additional information or add any prefix/suffix other than the answer of the original question. Don't use markdown." + ], + "description": "System prompt to provide context or instructions to the model", + "type": "string", + "title": "System Prompt" + }, + "reasoning": { + "description": "Should reasoning be the part of the final answer.", + "type": "boolean", + "title": "Reasoning", + "default": false + }, + "model": { + "examples": [ + "google/gemini-2.5-flash", + "anthropic/claude-sonnet-4.5", + "openai/gpt-4o", + "qwen/qwen3-vl-235b-a22b-instruct", + "x-ai/grok-4-fast" + ], + "description": "Name of the model to use. Charged based on actual token usage.", + "type": "string", + "title": "Model" + }, + "max_tokens": { + "minimum": 1, + "description": "This sets the upper limit for the number of tokens the model can generate in response. It won't produce more than this limit. The maximum value is the context length minus the prompt length.", + "type": "integer", + "title": "Max Tokens" + }, + "temperature": { + "minimum": 0, + "maximum": 2, + "type": "number", + "description": "This setting influences the variety in the model's responses. Lower values lead to more predictable and typical responses, while higher values encourage more diverse and less common responses. At 0, the model always gives the same response for a given input.", + "title": "Temperature", + "default": 1 + }, + "image_urls": { + "examples": [ + [ + "https://fal.media/files/tiger/4Ew1xYW6oZCs6STQVC7V8_86440216d0fe42e4b826d03a2121468e.jpg" + ] + ], + "description": "List of image URLs to be processed", + "type": "array", + "title": "Image Urls", + "items": { + "type": "string" + } + } + }, + "title": "VisionInput", + "required": [ + "prompt", + "image_urls", + "model" + ] + }, + "RouterVisionOutput": { + "x-fal-order-properties": [ + "output", + "usage" + ], + "type": "object", + "properties": { + "usage": { + "examples": [ + { + "prompt_tokens": 1340, + "total_tokens": 1403, + "completion_tokens": 63, + "cost": 0.0005595 + } + ], + "description": "Token usage information", + "title": "Usage", + "allOf": [ + { + "$ref": "#/components/schemas/UsageInfo" + } + ] + }, + "output": { + "examples": [ + "A close-up of a tiger's face focusing on its bright orange iris and the area around its eye, with white fur eyebrows and a contrasting black and rich orange striped fur pattern. The word \"FLUX\" is overlaid in bold, white, brush-stroke styled text across the tiger's face." + ], + "description": "Generated output", + "type": "string", + "title": "Output" + } + }, + "title": "VisionOutput", + "required": [ + "output" + ] + }, + "UsageInfo": { + "x-fal-order-properties": [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "cost" + ], + "type": "object", + "properties": { + "prompt_tokens": { + "title": "Prompt Tokens", + "type": "integer" + }, + "total_tokens": { + "title": "Total Tokens", + "type": "integer", + "default": 0 + }, + "completion_tokens": { + "title": "Completion Tokens", + "type": "integer" + }, + "cost": { + "title": "Cost", + "type": "number" + } + }, + "title": "UsageInfo", + "required": [ + "cost" + ] + } + } + }, + "paths": { + "/openrouter/router/vision/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/openrouter/router/vision/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/openrouter/router/vision": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RouterVisionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/openrouter/router/vision/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RouterVisionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/moondream3-preview/detect", + "metadata": { + "display_name": "Moondream3 Preview [Detect]", + "category": "vision", + "description": "Moondream 3 is a vision language model that brings frontier-level visual reasoning with native object detection, pointing, and OCR capabilities to real-world applications requiring fast, inexpensive inference at scale.", + "status": "active", + "tags": [ + "Vision" + ], + "updated_at": "2026-01-26T21:42:40.720Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/penguin/Ma6NXbAm7Gi18CcciKraL_c5133f964896437ca8fdb88652c1a3b9.jpg", + "model_url": "https://fal.run/fal-ai/moondream3-preview/detect", + "license_type": "commercial", + "date": "2025-10-09T20:57:37.622Z", + "group": { + "key": "moondream3-preview", + "label": "Detect" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/moondream3-preview/detect", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/moondream3-preview/detect queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/moondream3-preview/detect", + "category": "vision", + "thumbnailUrl": "https://v3b.fal.media/files/b/penguin/Ma6NXbAm7Gi18CcciKraL_c5133f964896437ca8fdb88652c1a3b9.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/moondream3-preview/detect", + "documentationUrl": "https://fal.ai/models/fal-ai/moondream3-preview/detect/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Moondream3PreviewDetectInput": { + "x-fal-order-properties": [ + "image_url", + "prompt", + "preview" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Speed limit" + ], + "title": "Prompt", + "type": "string", + "description": "Object to be detected in the image", + "minLength": 1 + }, + "preview": { + "examples": [ + true + ], + "title": "Preview", + "type": "boolean", + "description": "Whether to preview the output", + "default": false + }, + "image_url": { + "x-fal": { + "timeout": 20, + "max_height": 7000, + "max_width": 7000 + }, + "title": "Image URL", + "type": "string", + "description": "URL of the image to be processed\n\nMax width: 7000px, Max height: 7000px, Timeout: 20.0s", + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/moondream-3-preview/detect_in.jpg" + ] + } + }, + "title": "MoondreamDetectInput", + "required": [ + "image_url", + "prompt" + ] + }, + "Moondream3PreviewDetectOutput": { + "x-fal-order-properties": [ + "finish_reason", + "usage_info", + "objects", + "image" + ], + "type": "object", + "properties": { + "finish_reason": { + "examples": [ + "stop" + ], + "title": "Finish Reason", + "type": "string", + "description": "Reason for finishing the output generation" + }, + "image": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/moondream-3-preview/detect_out.png" + } + ], + "title": "Image", + "description": "Image with bounding boxes drawn around detected objects", + "allOf": [ + { + "$ref": "#/components/schemas/ImageFile" + } + ] + }, + "objects": { + "examples": [ + [ + { + "y_min": 0.16308235274382246, + "x_max": 0.8755747037932524, + "x_min": 0.8174849247502471, + "y_max": 0.3061258583998726 + }, + { + "y_min": 0.0987853935125991, + "x_max": 0.7155113776357592, + "x_min": 0.6706078794512399, + "y_max": 0.21011001215700012 + } + ] + ], + "title": "Objects", + "type": "array", + "description": "List of detected objects with their bounding boxes", + "items": { + "$ref": "#/components/schemas/Object" + } + }, + "usage_info": { + "examples": [ + { + "output_tokens": 23, + "decode_time_ms": 811.5944429300725, + "input_tokens": 737, + "ttft_ms": 91.87838807702065, + "prefill_time_ms": 54.45315001998097 + } + ], + "title": "Usage Info", + "description": "Usage information for the request", + "allOf": [ + { + "$ref": "#/components/schemas/UsageInfo" + } + ] + } + }, + "title": "MoondreamDetectOutput", + "required": [ + "finish_reason", + "usage_info", + "objects" + ] + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + }, + "Object": { + "x-fal-order-properties": [ + "x_min", + "y_min", + "x_max", + "y_max" + ], + "type": "object", + "properties": { + "y_min": { + "title": "Y Min", + "type": "number", + "description": "Top boundary of detection box in normalized format (0 to 1)" + }, + "x_max": { + "title": "X Max", + "type": "number", + "description": "Right boundary of detection box in normalized format (0 to 1)" + }, + "x_min": { + "title": "X Min", + "type": "number", + "description": "Left boundary of detection box in normalized format (0 to 1)" + }, + "y_max": { + "title": "Y Max", + "type": "number", + "description": "Bottom boundary of detection box in normalized format (0 to 1)" + } + }, + "title": "Object", + "required": [ + "x_min", + "y_min", + "x_max", + "y_max" + ] + }, + "UsageInfo": { + "x-fal-order-properties": [ + "input_tokens", + "output_tokens", + "prefill_time_ms", + "decode_time_ms", + "ttft_ms" + ], + "type": "object", + "properties": { + "output_tokens": { + "title": "Output Tokens", + "type": "integer", + "description": "Number of output tokens generated" + }, + "decode_time_ms": { + "title": "Decode Time Ms", + "type": "number", + "description": "Time taken for decoding in milliseconds" + }, + "input_tokens": { + "title": "Input Tokens", + "type": "integer", + "description": "Number of input tokens processed" + }, + "ttft_ms": { + "title": "Ttft Ms", + "type": "number", + "description": "Time to first token in milliseconds" + }, + "prefill_time_ms": { + "title": "Prefill Time Ms", + "type": "number", + "description": "Time taken for prefill in milliseconds" + } + }, + "title": "UsageInfo", + "required": [ + "input_tokens", + "output_tokens", + "prefill_time_ms", + "decode_time_ms", + "ttft_ms" + ] + } + } + }, + "paths": { + "/fal-ai/moondream3-preview/detect/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream3-preview/detect/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/moondream3-preview/detect": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Moondream3PreviewDetectInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream3-preview/detect/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Moondream3PreviewDetectOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/moondream3-preview/point", + "metadata": { + "display_name": "Moondream3 Preview [Point]", + "category": "vision", + "description": "Moondream 3 is a vision language model that brings frontier-level visual reasoning with native object detection, pointing, and OCR capabilities to real-world applications requiring fast, inexpensive inference at scale.", + "status": "active", + "tags": [ + "Vision" + ], + "updated_at": "2026-01-26T21:42:40.848Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/koala/r903D3raEamwM13Bmn1Lp_fa12f25ab96c41008fc48492bbdbbea2.jpg", + "model_url": "https://fal.run/fal-ai/moondream3-preview/point", + "license_type": "commercial", + "date": "2025-10-09T20:55:39.296Z", + "group": { + "key": "moondream3-preview", + "label": "Point" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/moondream3-preview/point", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/moondream3-preview/point queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/moondream3-preview/point", + "category": "vision", + "thumbnailUrl": "https://v3b.fal.media/files/b/koala/r903D3raEamwM13Bmn1Lp_fa12f25ab96c41008fc48492bbdbbea2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/moondream3-preview/point", + "documentationUrl": "https://fal.ai/models/fal-ai/moondream3-preview/point/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Moondream3PreviewPointInput": { + "x-fal-order-properties": [ + "image_url", + "prompt", + "preview" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "bottle caps" + ], + "title": "Prompt", + "type": "string", + "description": "Object to be located in the image", + "minLength": 1 + }, + "preview": { + "examples": [ + true + ], + "title": "Preview", + "type": "boolean", + "description": "Whether to preview the output", + "default": false + }, + "image_url": { + "x-fal": { + "timeout": 20, + "max_height": 7000, + "max_width": 7000 + }, + "title": "Image URL", + "type": "string", + "description": "URL of the image to be processed\n\nMax width: 7000px, Max height: 7000px, Timeout: 20.0s", + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/moondream-3-preview/point_in.jpg" + ] + } + }, + "title": "MoondreamPointInput", + "required": [ + "image_url", + "prompt" + ] + }, + "Moondream3PreviewPointOutput": { + "x-fal-order-properties": [ + "finish_reason", + "usage_info", + "points", + "image" + ], + "type": "object", + "properties": { + "points": { + "examples": [ + [ + { + "y": 0.8660801564027371, + "x": 0.11827956989247312 + }, + { + "y": 0.8660801564027371, + "x": 0.3118279569892473 + }, + { + "y": 0.8660801564027371, + "x": 0.5953079178885631 + }, + { + "y": 0.8758553274682307, + "x": 0.7888563049853372 + }, + { + "y": 0.5796676441837733, + "x": 0.9423264907135875 + }, + { + "y": 0.5796676441837733, + "x": 0.6324535679374389 + }, + { + "y": 0.6021505376344086, + "x": 0.44281524926686217 + }, + { + "y": 0.5982404692082112, + "x": 0.3010752688172043 + }, + { + "y": 0.4701857282502444, + "x": 0.20332355816226785 + }, + { + "y": 0.4506353861192571, + "x": 0.053763440860215055 + }, + { + "y": 0.6021505376344086, + "x": 0.053763440860215055 + } + ] + ], + "title": "Points", + "type": "array", + "description": "List of points marking the detected objects", + "items": { + "$ref": "#/components/schemas/Point" + } + }, + "finish_reason": { + "examples": [ + "stop" + ], + "title": "Finish Reason", + "type": "string", + "description": "Reason for finishing the output generation" + }, + "image": { + "examples": [ + { + "url": "https://storage.googleapis.com/falserverless/example_outputs/moondream-3-preview/point_out.png" + } + ], + "title": "Image", + "description": "Image with points drawn on detected objects", + "allOf": [ + { + "$ref": "#/components/schemas/ImageFile" + } + ] + }, + "usage_info": { + "examples": [ + { + "output_tokens": 23, + "decode_time_ms": 811.5944429300725, + "input_tokens": 737, + "ttft_ms": 91.87838807702065, + "prefill_time_ms": 54.45315001998097 + } + ], + "title": "Usage Info", + "description": "Usage information for the request", + "allOf": [ + { + "$ref": "#/components/schemas/UsageInfo" + } + ] + } + }, + "title": "MoondreamPointOutput", + "required": [ + "finish_reason", + "usage_info", + "points" + ] + }, + "Point": { + "x-fal-order-properties": [ + "x", + "y" + ], + "type": "object", + "properties": { + "y": { + "title": "Y", + "type": "number", + "description": "Y coordinate of the point in normalized format (0 to 1)" + }, + "x": { + "title": "X", + "type": "number", + "description": "X coordinate of the point in normalized format (0 to 1)" + } + }, + "title": "Point", + "required": [ + "x", + "y" + ] + }, + "ImageFile": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "properties": { + "height": { + "title": "Height", + "type": "integer", + "description": "The height of the image" + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "title": "Width", + "type": "integer", + "description": "The width of the image" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "title": "ImageFile", + "required": [ + "url" + ] + }, + "UsageInfo": { + "x-fal-order-properties": [ + "input_tokens", + "output_tokens", + "prefill_time_ms", + "decode_time_ms", + "ttft_ms" + ], + "type": "object", + "properties": { + "output_tokens": { + "title": "Output Tokens", + "type": "integer", + "description": "Number of output tokens generated" + }, + "decode_time_ms": { + "title": "Decode Time Ms", + "type": "number", + "description": "Time taken for decoding in milliseconds" + }, + "input_tokens": { + "title": "Input Tokens", + "type": "integer", + "description": "Number of input tokens processed" + }, + "ttft_ms": { + "title": "Ttft Ms", + "type": "number", + "description": "Time to first token in milliseconds" + }, + "prefill_time_ms": { + "title": "Prefill Time Ms", + "type": "number", + "description": "Time taken for prefill in milliseconds" + } + }, + "title": "UsageInfo", + "required": [ + "input_tokens", + "output_tokens", + "prefill_time_ms", + "decode_time_ms", + "ttft_ms" + ] + } + } + }, + "paths": { + "/fal-ai/moondream3-preview/point/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream3-preview/point/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/moondream3-preview/point": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Moondream3PreviewPointInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream3-preview/point/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Moondream3PreviewPointOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/moondream3-preview/query", + "metadata": { + "display_name": "Moondream 3 Preview [Query]", + "category": "vision", + "description": "Moondream 3 is a vision language model that brings frontier-level visual reasoning with native object detection, pointing, and OCR capabilities to real-world applications requiring fast, inexpensive inference at scale.", + "status": "active", + "tags": [ + "Vision" + ], + "updated_at": "2026-01-26T21:42:41.040Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/monkey/nWpJhvShCa7NcOUhC0JJ-_7dec4ab5fb2c49869d74af99dfa66bf4.jpg", + "model_url": "https://fal.run/fal-ai/moondream3-preview/query", + "license_type": "commercial", + "date": "2025-10-09T20:54:23.184Z", + "group": { + "key": "moondream3-preview", + "label": "Query" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/moondream3-preview/query", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/moondream3-preview/query queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/moondream3-preview/query", + "category": "vision", + "thumbnailUrl": "https://v3b.fal.media/files/b/monkey/nWpJhvShCa7NcOUhC0JJ-_7dec4ab5fb2c49869d74af99dfa66bf4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/moondream3-preview/query", + "documentationUrl": "https://fal.ai/models/fal-ai/moondream3-preview/query/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Moondream3PreviewQueryInput": { + "x-fal-order-properties": [ + "image_url", + "prompt", + "reasoning", + "temperature", + "top_p" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "List the safety measures taken by this worker in a JSON array under `safety_measures` key" + ], + "title": "Prompt", + "type": "string", + "description": "Query to be asked in the image", + "minLength": 1 + }, + "top_p": { + "minimum": 0, + "title": "Top P", + "type": "number", + "description": "Nucleus sampling probability mass to use, between 0 and 1.", + "maximum": 1 + }, + "temperature": { + "minimum": 0, + "title": "Temperature", + "type": "number", + "description": "Sampling temperature to use, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If not set, defaults to 0.", + "maximum": 1 + }, + "reasoning": { + "title": "Reasoning", + "type": "boolean", + "description": "Whether to include detailed reasoning behind the answer", + "default": true + }, + "image_url": { + "x-fal": { + "timeout": 20, + "max_height": 7000, + "max_width": 7000 + }, + "title": "Image URL", + "type": "string", + "description": "URL of the image to be processed\n\nMax width: 7000px, Max height: 7000px, Timeout: 20.0s", + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/moondream-3-preview/query_in.jpg" + ] + } + }, + "title": "MoondreamQueryInput", + "required": [ + "image_url", + "prompt" + ] + }, + "Moondream3PreviewQueryOutput": { + "x-fal-order-properties": [ + "finish_reason", + "usage_info", + "output", + "reasoning" + ], + "type": "object", + "properties": { + "finish_reason": { + "examples": [ + "stop" + ], + "title": "Finish Reason", + "type": "string", + "description": "Reason for finishing the output generation" + }, + "reasoning": { + "examples": [ + "The worker is wearing a red hard hat for head protection and safety glasses for eye protection." + ], + "title": "Reasoning", + "type": "string", + "description": "Detailed reasoning behind the answer, if enabled" + }, + "output": { + "examples": [ + "{\n \"safety_measures\": [\n \"Red hard hat\",\n \"Safety glasses\"\n ]\n}" + ], + "title": "Output", + "type": "string", + "description": "Answer to the query about the image" + }, + "usage_info": { + "examples": [ + { + "output_tokens": 23, + "decode_time_ms": 811.5944429300725, + "input_tokens": 737, + "ttft_ms": 91.87838807702065, + "prefill_time_ms": 54.45315001998097 + } + ], + "title": "Usage Info", + "description": "Usage information for the request", + "allOf": [ + { + "$ref": "#/components/schemas/UsageInfo" + } + ] + } + }, + "title": "MoondreamQueryOutput", + "required": [ + "finish_reason", + "usage_info", + "output" + ] + }, + "UsageInfo": { + "x-fal-order-properties": [ + "input_tokens", + "output_tokens", + "prefill_time_ms", + "decode_time_ms", + "ttft_ms" + ], + "type": "object", + "properties": { + "output_tokens": { + "title": "Output Tokens", + "type": "integer", + "description": "Number of output tokens generated" + }, + "decode_time_ms": { + "title": "Decode Time Ms", + "type": "number", + "description": "Time taken for decoding in milliseconds" + }, + "input_tokens": { + "title": "Input Tokens", + "type": "integer", + "description": "Number of input tokens processed" + }, + "ttft_ms": { + "title": "Ttft Ms", + "type": "number", + "description": "Time to first token in milliseconds" + }, + "prefill_time_ms": { + "title": "Prefill Time Ms", + "type": "number", + "description": "Time taken for prefill in milliseconds" + } + }, + "title": "UsageInfo", + "required": [ + "input_tokens", + "output_tokens", + "prefill_time_ms", + "decode_time_ms", + "ttft_ms" + ] + } + } + }, + "paths": { + "/fal-ai/moondream3-preview/query/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream3-preview/query/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/moondream3-preview/query": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Moondream3PreviewQueryInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream3-preview/query/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Moondream3PreviewQueryOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/moondream3-preview/caption", + "metadata": { + "display_name": "Moondream3 Preview [Caption]", + "category": "vision", + "description": "Moondream 3 is a vision language model that brings frontier-level visual reasoning with native object detection, pointing, and OCR capabilities to real-world applications requiring fast, inexpensive inference at scale.", + "status": "active", + "tags": [ + "Vision" + ], + "updated_at": "2026-01-26T21:42:41.169Z", + "is_favorited": false, + "thumbnail_url": "https://v3b.fal.media/files/b/elephant/MThU-RSBqLWpvP60bOvk__d2902b4bea314d49bb01fe92584a0922.jpg", + "model_url": "https://fal.run/fal-ai/moondream3-preview/caption", + "license_type": "commercial", + "date": "2025-10-09T20:50:44.363Z", + "group": { + "key": "moondream3-preview", + "label": "Caption" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/moondream3-preview/caption", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/moondream3-preview/caption queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/moondream3-preview/caption", + "category": "vision", + "thumbnailUrl": "https://v3b.fal.media/files/b/elephant/MThU-RSBqLWpvP60bOvk__d2902b4bea314d49bb01fe92584a0922.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/moondream3-preview/caption", + "documentationUrl": "https://fal.ai/models/fal-ai/moondream3-preview/caption/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Moondream3PreviewCaptionInput": { + "x-fal-order-properties": [ + "image_url", + "length", + "temperature", + "top_p" + ], + "type": "object", + "properties": { + "top_p": { + "minimum": 0, + "title": "Top P", + "type": "number", + "description": "Nucleus sampling probability mass to use, between 0 and 1.", + "maximum": 1 + }, + "length": { + "enum": [ + "short", + "normal", + "long" + ], + "title": "Length", + "type": "string", + "description": "Length of the caption to generate", + "default": "normal" + }, + "temperature": { + "minimum": 0, + "title": "Temperature", + "type": "number", + "description": "Sampling temperature to use, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If not set, defaults to 0.", + "maximum": 1 + }, + "image_url": { + "x-fal": { + "timeout": 20, + "max_height": 7000, + "max_width": 7000 + }, + "title": "Image URL", + "type": "string", + "description": "URL of the image to be processed\n\nMax width: 7000px, Max height: 7000px, Timeout: 20.0s", + "examples": [ + "https://storage.googleapis.com/falserverless/example_inputs/moondream-3-preview/caption_in.jpg" + ] + } + }, + "title": "MoondreamCaptionInput", + "required": [ + "image_url" + ] + }, + "Moondream3PreviewCaptionOutput": { + "x-fal-order-properties": [ + "finish_reason", + "usage_info", + "output" + ], + "type": "object", + "properties": { + "finish_reason": { + "examples": [ + "stop" + ], + "title": "Finish Reason", + "type": "string", + "description": "Reason for finishing the output generation" + }, + "output": { + "examples": [ + "A hedgehog is captured in a close-up shot, focusing on its face and nose. The hedgehog's spines are visible along its back, and its nose is dark and wet-looking. A gold ring with a small diamond is positioned on the grass in front of the hedgehog, partially obscured by its nose. The background is a blurred green grassy field, with small white flowers scattered throughout the grass." + ], + "title": "Output", + "type": "string", + "description": "Generated caption for the image" + }, + "usage_info": { + "examples": [ + { + "output_tokens": 23, + "decode_time_ms": 811.5944429300725, + "input_tokens": 737, + "ttft_ms": 91.87838807702065, + "prefill_time_ms": 54.45315001998097 + } + ], + "title": "Usage Info", + "description": "Usage information for the request", + "allOf": [ + { + "$ref": "#/components/schemas/UsageInfo" + } + ] + } + }, + "title": "MoondreamCaptionOutput", + "required": [ + "finish_reason", + "usage_info", + "output" + ] + }, + "UsageInfo": { + "x-fal-order-properties": [ + "input_tokens", + "output_tokens", + "prefill_time_ms", + "decode_time_ms", + "ttft_ms" + ], + "type": "object", + "properties": { + "output_tokens": { + "title": "Output Tokens", + "type": "integer", + "description": "Number of output tokens generated" + }, + "decode_time_ms": { + "title": "Decode Time Ms", + "type": "number", + "description": "Time taken for decoding in milliseconds" + }, + "input_tokens": { + "title": "Input Tokens", + "type": "integer", + "description": "Number of input tokens processed" + }, + "ttft_ms": { + "title": "Ttft Ms", + "type": "number", + "description": "Time to first token in milliseconds" + }, + "prefill_time_ms": { + "title": "Prefill Time Ms", + "type": "number", + "description": "Time taken for prefill in milliseconds" + } + }, + "title": "UsageInfo", + "required": [ + "input_tokens", + "output_tokens", + "prefill_time_ms", + "decode_time_ms", + "ttft_ms" + ] + } + } + }, + "paths": { + "/fal-ai/moondream3-preview/caption/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream3-preview/caption/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/moondream3-preview/caption": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Moondream3PreviewCaptionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream3-preview/caption/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Moondream3PreviewCaptionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "perceptron/isaac-01/openai/v1/chat/completions", + "metadata": { + "display_name": "Isaac 0.1 [OpenAI Compatible Endpoint]", + "category": "vision", + "description": "OpenAI spec compatible endpoint of Isaac-01 which is a multimodal vision-language model from Perceptron for various vision language tasks.", + "status": "active", + "tags": [ + "multimodal", + "vision" + ], + "updated_at": "2026-01-26T21:42:54.874Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/monkey/720yUJX8pFwlDjjJKAuWT_f7c3757b6d9c4c63b808704c78ff0758.jpg", + "model_url": "https://fal.run/perceptron/isaac-01/openai/v1/chat/completions", + "license_type": "commercial", + "date": "2025-09-17T18:11:25.154Z", + "group": { + "key": "perceptron/isaac-01", + "label": "Isaac 0.1 [OpenAI Compatible Endpoint]" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for perceptron/isaac-01/openai/v1/chat/completions", + "version": "1.0.0", + "description": "The OpenAPI schema for the perceptron/isaac-01/openai/v1/chat/completions queue.", + "x-fal-metadata": { + "endpointId": "perceptron/isaac-01/openai/v1/chat/completions", + "category": "vision", + "thumbnailUrl": "https://fal.media/files/monkey/720yUJX8pFwlDjjJKAuWT_f7c3757b6d9c4c63b808704c78ff0758.jpg", + "playgroundUrl": "https://fal.ai/models/perceptron/isaac-01/openai/v1/chat/completions", + "documentationUrl": "https://fal.ai/models/perceptron/isaac-01/openai/v1/chat/completions/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Isaac01OpenaiV1ChatCompletionsOutput": {} + } + }, + "paths": { + "/perceptron/isaac-01/openai/v1/chat/completions/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/perceptron/isaac-01/openai/v1/chat/completions/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/perceptron/isaac-01/openai/v1/chat/completions": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Isaac01OpenaiV1ChatCompletionsInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/perceptron/isaac-01/openai/v1/chat/completions/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Isaac01OpenaiV1ChatCompletionsOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "perceptron/isaac-01", + "metadata": { + "display_name": "Isaac 0.1", + "category": "vision", + "description": "Isaac-01 is a multimodal vision-language model from Perceptron for various vision language tasks.", + "status": "active", + "tags": [ + "multimodal", + "vision" + ], + "updated_at": "2026-01-26T21:42:55.189Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/panda/nuuSn6Wf3q72FHqD8uppE_86098dbea31a439bb14ee16b29277afe.jpg", + "model_url": "https://fal.run/perceptron/isaac-01", + "license_type": "commercial", + "date": "2025-09-17T15:12:22.823Z", + "group": { + "key": "perceptron/isaac-01", + "label": "Isaac 0.1" + }, + "highlighted": false, + "kind": "inference", + "stream_url": "https://fal.run/perceptron/isaac-01/stream", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for perceptron/isaac-01", + "version": "1.0.0", + "description": "The OpenAPI schema for the perceptron/isaac-01 queue.", + "x-fal-metadata": { + "endpointId": "perceptron/isaac-01", + "category": "vision", + "thumbnailUrl": "https://v3.fal.media/files/panda/nuuSn6Wf3q72FHqD8uppE_86098dbea31a439bb14ee16b29277afe.jpg", + "playgroundUrl": "https://fal.ai/models/perceptron/isaac-01", + "documentationUrl": "https://fal.ai/models/perceptron/isaac-01/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Isaac01Input": { + "title": "VisionInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Which car is trying to park into garage?" + ], + "description": "Prompt to be used for the image", + "type": "string", + "title": "Prompt" + }, + "response_style": { + "enum": [ + "text", + "box", + "point", + "polygon" + ], + "description": "\nResponse style to be used for the image.\n\n- text: Model will output text. Good for descriptions and captioning.\n- box: Model will output a combination of text and bounding boxes. Good for\nlocalization.\n- point: Model will output a combination of text and points. Good for counting many\nobjects.\n- polygon: Model will output a combination of text and polygons. Good for granular\nsegmentation.\n", + "type": "string", + "title": "Response Style", + "default": "text" + }, + "image_url": { + "examples": [ + "https://v3b.fal.media/files/b/penguin/BxDPafViqMBGfNyvcmG-C_image-1d100e9%20(4).jpg" + ], + "description": "Image URL to be processed", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt", + "response_style" + ], + "required": [ + "image_url", + "prompt" + ] + }, + "Isaac01Output": { + "title": "ChatOutput", + "type": "object", + "properties": { + "usage": { + "anyOf": [ + { + "$ref": "#/components/schemas/CompletionUsage" + }, + { + "type": "null" + } + ], + "description": "Usage information" + }, + "error": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Error message if an error occurred", + "title": "Error" + }, + "partial": { + "description": "Whether the output is partial", + "type": "boolean", + "title": "Partial", + "default": false + }, + "output": { + "examples": [ + "To determine which car is trying to park into the garage, we need to carefully observe the positions and movements of the vehicles in the image.\n\n1. **Identify the Vehicles**: \n - There are three vehicles visible: a green truck, an orange car, and a brown car.\n - The green truck is positioned in front of the garage entrance.\n - The orange car is parked to the left of the green truck.\n - The brown car is parked to the right of the green truck.\n\n2. **Analyze the Positions**:\n - The green truck is directly in front of the garage entrance, suggesting it is in the process of moving towards the entrance.\n - The orange car is parked parallel to the garage but is not in the path of the green truck.\n - The brown car is also parked parallel to the garage but is not in the path of the green truck.\n\n3. **Determine the Action**:\n - Given the position of the green truck directly in front of the garage entrance, it is most likely that the green truck is trying to park into the garage.\n - The orange and brown cars are already parked and do not appear to be in motion or attempting to park.\n\nTherefore, the car that is trying to park into the garage is the green truck." + ], + "description": "Generated output", + "type": "string", + "title": "Output" + } + }, + "x-fal-order-properties": [ + "output", + "partial", + "error", + "usage" + ], + "required": [ + "output" + ] + }, + "CompletionUsage": { + "title": "CompletionUsage", + "type": "object", + "properties": { + "completion_tokens": { + "description": "Number of tokens in the completion", + "type": "integer", + "title": "Completion Tokens" + }, + "total_tokens": { + "description": "Total tokens used", + "type": "integer", + "title": "Total Tokens" + }, + "prompt_tokens": { + "description": "Number of tokens in the prompt", + "type": "integer", + "title": "Prompt Tokens" + } + }, + "x-fal-order-properties": [ + "completion_tokens", + "prompt_tokens", + "total_tokens" + ], + "required": [ + "completion_tokens", + "prompt_tokens", + "total_tokens" + ] + } + } + }, + "paths": { + "/perceptron/isaac-01/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/perceptron/isaac-01/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/perceptron/isaac-01": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Isaac01Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/perceptron/isaac-01/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Isaac01Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/x-ailab/nsfw", + "metadata": { + "display_name": "NSFW Checker", + "category": "vision", + "description": "Predict whether an image is NSFW or SFW.", + "status": "active", + "tags": [ + "filter", + "safety", + "utility" + ], + "updated_at": "2026-01-26T21:43:12.406Z", + "is_favorited": false, + "thumbnail_url": "https://v3.fal.media/files/monkey/Tja6xudNPajR7Wiv3CI2z_6fa01860226940cbab39842db772f7da.jpg", + "model_url": "https://fal.run/fal-ai/x-ailab/nsfw", + "license_type": "commercial", + "date": "2025-07-28T09:37:20.844Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/x-ailab/nsfw", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/x-ailab/nsfw queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/x-ailab/nsfw", + "category": "vision", + "thumbnailUrl": "https://v3.fal.media/files/monkey/Tja6xudNPajR7Wiv3CI2z_6fa01860226940cbab39842db772f7da.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/x-ailab/nsfw", + "documentationUrl": "https://fal.ai/models/fal-ai/x-ailab/nsfw/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "XAilabNsfwInput": { + "title": "NSFWInput", + "type": "object", + "properties": { + "image_urls": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/remove_background/elephant.jpg" + ], + "description": "List of image URLs to check. If more than 10 images are provided, only the first 10 will be checked.", + "type": "array", + "title": "Image Urls", + "items": { + "type": "string" + } + } + }, + "x-fal-order-properties": [ + "image_urls" + ], + "required": [ + "image_urls" + ] + }, + "XAilabNsfwOutput": { + "title": "NSFWOutput", + "type": "object", + "properties": { + "has_nsfw_concepts": { + "examples": [ + [ + true + ] + ], + "description": "List of booleans indicating if the image has an NSFW concept", + "type": "array", + "title": "Has Nsfw Concepts", + "items": { + "type": "boolean" + } + } + }, + "x-fal-order-properties": [ + "has_nsfw_concepts" + ], + "required": [ + "has_nsfw_concepts" + ] + } + } + }, + "paths": { + "/fal-ai/x-ailab/nsfw/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/x-ailab/nsfw/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/x-ailab/nsfw": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/XAilabNsfwInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/x-ailab/nsfw/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/XAilabNsfwOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/video-understanding", + "metadata": { + "display_name": "Video Understanding", + "category": "vision", + "description": "A video understanding model to analyze video content and answer questions about what's happening in the video based on user prompts.", + "status": "active", + "tags": [ + "utility", + "vision" + ], + "updated_at": "2026-01-26T21:43:24.893Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-4.jpg", + "model_url": "https://fal.run/fal-ai/video-understanding", + "license_type": "commercial", + "date": "2025-06-20T22:35:38.016Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/video-understanding", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/video-understanding queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/video-understanding", + "category": "vision", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/for%20videos-4.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/video-understanding", + "documentationUrl": "https://fal.ai/models/fal-ai/video-understanding/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "VideoUnderstandingInput": { + "title": "VideoUnderstandingInput", + "type": "object", + "properties": { + "detailed_analysis": { + "description": "Whether to request a more detailed analysis of the video", + "type": "boolean", + "title": "Detailed Analysis", + "default": false + }, + "video_url": { + "examples": [ + "https://v3.fal.media/files/elephant/mLAMkUTxFMbe2xF0qpLdA_Ll9mDE8webFA6GAu3vD_M_71ee7217db1d4aa4af1d2f1ae060389b.mp4" + ], + "description": "URL of the video to analyze", + "type": "string", + "title": "Video Url" + }, + "prompt": { + "examples": [ + "What is happening in this video?" + ], + "maxLength": 5000, + "type": "string", + "minLength": 1, + "description": "The question or prompt about the video content.", + "title": "Prompt" + } + }, + "x-fal-order-properties": [ + "video_url", + "prompt", + "detailed_analysis" + ], + "required": [ + "video_url", + "prompt" + ] + }, + "VideoUnderstandingOutput": { + "title": "VideoUnderstandingOutput", + "type": "object", + "properties": { + "output": { + "examples": [ + "Based on the video, a woman is singing passionately into a microphone in what appears to be a professional recording studio. She is wearing headphones, and behind her, there are sound-dampening foam panels, a mixing board, and other studio equipment." + ], + "description": "The analysis of the video content based on the prompt", + "type": "string", + "title": "Output" + } + }, + "x-fal-order-properties": [ + "output" + ], + "required": [ + "output" + ] + } + } + }, + "paths": { + "/fal-ai/video-understanding/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/video-understanding/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/video-understanding": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoUnderstandingInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/video-understanding/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VideoUnderstandingOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/moondream2/visual-query", + "metadata": { + "display_name": "Moondream2", + "category": "vision", + "description": "Moondream2 is a highly efficient open-source vision language model that combines powerful image understanding capabilities with a remarkably small footprint.", + "status": "active", + "tags": [ + "Vision" + ], + "updated_at": "2026-01-26T21:43:50.997Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "model_url": "https://fal.run/fal-ai/moondream2/visual-query", + "license_type": "commercial", + "date": "2025-04-26T07:12:07.007Z", + "group": { + "key": "Moondream2", + "label": "Visual Query" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/moondream2/visual-query", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/moondream2/visual-query queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/moondream2/visual-query", + "category": "vision", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/moondream2/visual-query", + "documentationUrl": "https://fal.ai/models/fal-ai/moondream2/visual-query/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Moondream2VisualQueryInput": { + "x-fal-order-properties": [ + "image_url", + "prompt" + ], + "type": "object", + "properties": { + "prompt": { + "description": "Query to be asked in the image", + "type": "string", + "title": "Query" + }, + "image_url": { + "examples": [ + "https://llava-vl.github.io/static/images/monalisa.jpg" + ], + "description": "URL of the image to be processed", + "type": "string", + "title": "Image URL" + } + }, + "title": "MoondreamQueryInput", + "required": [ + "image_url", + "prompt" + ] + }, + "Moondream2VisualQueryOutput": { + "x-fal-order-properties": [ + "output" + ], + "type": "object", + "properties": { + "output": { + "description": "Output for the given query", + "type": "string", + "title": "Output" + } + }, + "title": "MoondreamOutput", + "required": [ + "output" + ] + } + } + }, + "paths": { + "/fal-ai/moondream2/visual-query/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream2/visual-query/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/moondream2/visual-query": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Moondream2VisualQueryInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream2/visual-query/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Moondream2VisualQueryOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/moondream2", + "metadata": { + "display_name": "Moondream2", + "category": "vision", + "description": "Moondream2 is a highly efficient open-source vision language model that combines powerful image understanding capabilities with a remarkably small footprint. ", + "status": "active", + "tags": [ + "Vision" + ], + "updated_at": "2026-01-26T21:43:51.122Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "model_url": "https://fal.run/fal-ai/moondream2", + "license_type": "commercial", + "date": "2025-04-26T07:09:30.111Z", + "group": { + "key": "Moondream2", + "label": "Caption" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/moondream2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/moondream2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/moondream2", + "category": "vision", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/moondream2", + "documentationUrl": "https://fal.ai/models/fal-ai/moondream2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Moondream2Input": { + "x-fal-order-properties": [ + "image_url" + ], + "type": "object", + "properties": { + "image_url": { + "examples": [ + "https://llava-vl.github.io/static/images/monalisa.jpg" + ], + "description": "URL of the image to be processed", + "type": "string", + "title": "Image URL" + } + }, + "title": "MoondreamInput", + "required": [ + "image_url" + ] + }, + "Moondream2Output": { + "x-fal-order-properties": [ + "output" + ], + "type": "object", + "properties": { + "output": { + "description": "Output for the given query", + "type": "string", + "title": "Output" + } + }, + "title": "MoondreamOutput", + "required": [ + "output" + ] + } + } + }, + "paths": { + "/fal-ai/moondream2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/moondream2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Moondream2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Moondream2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/moondream2/point-object-detection", + "metadata": { + "display_name": "Moondream2", + "category": "vision", + "description": "Moondream2 is a highly efficient open-source vision language model that combines powerful image understanding capabilities with a remarkably small footprint.", + "status": "active", + "tags": [ + "image-to-image" + ], + "updated_at": "2026-01-26T21:43:51.246Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "model_url": "https://fal.run/fal-ai/moondream2/point-object-detection", + "license_type": "commercial", + "date": "2025-04-26T07:09:10.417Z", + "group": { + "key": "Moondream2", + "label": "Point Object Detection" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/moondream2/point-object-detection", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/moondream2/point-object-detection queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/moondream2/point-object-detection", + "category": "vision", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/moondream2/point-object-detection", + "documentationUrl": "https://fal.ai/models/fal-ai/moondream2/point-object-detection/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Moondream2PointObjectDetectionInput": { + "x-fal-order-properties": [ + "image_url", + "object" + ], + "type": "object", + "properties": { + "object": { + "description": "Object to be detected in the image", + "type": "string", + "title": "Object" + }, + "image_url": { + "examples": [ + "https://llava-vl.github.io/static/images/monalisa.jpg" + ], + "description": "URL of the image to be processed", + "type": "string", + "title": "Image URL" + } + }, + "title": "MoondreamObjectInput", + "required": [ + "image_url", + "object" + ] + }, + "Moondream2PointObjectDetectionOutput": { + "x-fal-order-properties": [ + "objects", + "image" + ], + "type": "object", + "properties": { + "image": { + "description": "Image with detected objects", + "$ref": "#/components/schemas/Image" + }, + "objects": { + "description": "Objects detected in the image", + "type": "array", + "title": "Objects", + "items": { + "additionalProperties": true, + "type": "object" + } + } + }, + "title": "MoondreamObjectOutput", + "required": [ + "objects", + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height" + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/moondream2/point-object-detection/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream2/point-object-detection/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/moondream2/point-object-detection": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Moondream2PointObjectDetectionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream2/point-object-detection/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Moondream2PointObjectDetectionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/moondream2/object-detection", + "metadata": { + "display_name": "Moondream2", + "category": "vision", + "description": "Moondream2 is a highly efficient open-source vision language model that combines powerful image understanding capabilities with a remarkably small footprint.", + "status": "active", + "tags": [ + "image-to-image" + ], + "updated_at": "2026-01-26T21:43:51.371Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "model_url": "https://fal.run/fal-ai/moondream2/object-detection", + "license_type": "commercial", + "date": "2025-04-26T07:09:05.554Z", + "group": { + "key": "Moondream2", + "label": "Object-Detection" + }, + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/moondream2/object-detection", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/moondream2/object-detection queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/moondream2/object-detection", + "category": "vision", + "thumbnailUrl": "https://storage.googleapis.com/fal_cdn/fal/Sound-2.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/moondream2/object-detection", + "documentationUrl": "https://fal.ai/models/fal-ai/moondream2/object-detection/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Moondream2ObjectDetectionInput": { + "x-fal-order-properties": [ + "image_url", + "object" + ], + "type": "object", + "properties": { + "object": { + "description": "Object to be detected in the image", + "type": "string", + "title": "Object" + }, + "image_url": { + "examples": [ + "https://llava-vl.github.io/static/images/monalisa.jpg" + ], + "description": "URL of the image to be processed", + "type": "string", + "title": "Image URL" + } + }, + "title": "MoondreamObjectInput", + "required": [ + "image_url", + "object" + ] + }, + "Moondream2ObjectDetectionOutput": { + "x-fal-order-properties": [ + "objects", + "image" + ], + "type": "object", + "properties": { + "image": { + "description": "Image with detected objects", + "$ref": "#/components/schemas/Image" + }, + "objects": { + "description": "Objects detected in the image", + "type": "array", + "title": "Objects", + "items": { + "additionalProperties": true, + "type": "object" + } + } + }, + "title": "MoondreamObjectOutput", + "required": [ + "objects", + "image" + ] + }, + "Image": { + "description": "Represents an image file.", + "type": "object", + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "width", + "height" + ], + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "description": "The height of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Height" + }, + "file_size": { + "examples": [ + 4404019 + ], + "description": "The size of the file in bytes.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "File Size" + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "description": "The name of the file. It will be auto-generated if not provided.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "File Name" + }, + "content_type": { + "examples": [ + "image/png" + ], + "description": "The mime type of the file.", + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Content Type" + }, + "url": { + "description": "The URL where the file can be downloaded from.", + "type": "string", + "title": "Url" + }, + "width": { + "examples": [ + 1024 + ], + "description": "The width of the image in pixels.", + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Width" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/moondream2/object-detection/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream2/object-detection/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/moondream2/object-detection": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Moondream2ObjectDetectionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream2/object-detection/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Moondream2ObjectDetectionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/got-ocr/v2", + "metadata": { + "display_name": "GOT OCR 2.0", + "category": "vision", + "description": "GOT-OCR2 works on a wide range of tasks, including plain document OCR, scene text OCR, formatted document OCR, and even OCR for tables, charts, mathematical formulas, geometric shapes, molecular formulas and sheet music.", + "status": "active", + "tags": [ + "optical character recognition", + "high-res", + "utility" + ], + "updated_at": "2026-01-26T21:44:05.690Z", + "is_favorited": false, + "thumbnail_url": "https://fal.media/files/lion/roeSKm7MGJpsQpuFfpd5S_05ed784bd82b4519bd3b0dbd41c0e946.jpg", + "model_url": "https://fal.run/fal-ai/got-ocr/v2", + "license_type": "commercial", + "date": "2025-02-12T00:00:00.000Z", + "highlighted": false, + "kind": "inference", + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/got-ocr/v2", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/got-ocr/v2 queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/got-ocr/v2", + "category": "vision", + "thumbnailUrl": "https://fal.media/files/lion/roeSKm7MGJpsQpuFfpd5S_05ed784bd82b4519bd3b0dbd41c0e946.jpg", + "playgroundUrl": "https://fal.ai/models/fal-ai/got-ocr/v2", + "documentationUrl": "https://fal.ai/models/fal-ai/got-ocr/v2/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "GotOcrV2Input": { + "title": "ImageInput", + "type": "object", + "properties": { + "do_format": { + "title": "Do Format", + "type": "boolean", + "description": "Generate the output in formatted mode.", + "default": false + }, + "multi_page": { + "title": "Multi Page", + "type": "boolean", + "description": "Use provided images to generate a single output.", + "default": false + }, + "input_image_urls": { + "description": "URL of images.", + "type": "array", + "items": { + "type": "string" + }, + "examples": [], + "title": "Input Image Urls", + "default": [] + } + }, + "x-fal-order-properties": [ + "input_image_urls", + "do_format", + "multi_page" + ] + }, + "GotOcrV2Output": { + "title": "ImageChatOutput", + "type": "object", + "properties": { + "outputs": { + "title": "Output", + "type": "array", + "description": "Generated output", + "items": { + "type": "string" + } + } + }, + "x-fal-order-properties": [ + "outputs" + ], + "required": [ + "outputs" + ] + } + } + }, + "paths": { + "/fal-ai/got-ocr/v2/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/got-ocr/v2/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/got-ocr/v2": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GotOcrV2Input" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/got-ocr/v2/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GotOcrV2Output" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/moondream-next/batch", + "metadata": { + "display_name": "MoonDreamNext Batch", + "category": "vision", + "description": "MoonDreamNext Batch is a multimodal vision-language model for batch captioning.", + "status": "active", + "tags": [ + "multimodal" + ], + "updated_at": "2026-01-26T21:44:30.589Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/moondreamnext/moondream-next.webp", + "model_url": "https://fal.run/fal-ai/moondream-next/batch", + "license_type": "commercial", + "date": "2025-01-17T00:00:00.000Z", + "group": { + "key": "moondreamnext", + "label": "Batch" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/moondream-next/batch", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/moondream-next/batch queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/moondream-next/batch", + "category": "vision", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/moondreamnext/moondream-next.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/moondream-next/batch", + "documentationUrl": "https://fal.ai/models/fal-ai/moondream-next/batch/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MoondreamNextBatchInput": { + "title": "BatchQueryInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Describe this image in detail." + ], + "title": "Prompt", + "type": "string", + "description": "Single prompt to apply to all images" + }, + "images_data_url": { + "title": "Image URLs", + "type": "string", + "description": "List of image URLs to be processed (maximum 32 images)" + }, + "max_tokens": { + "minimum": 1, + "title": "Max Tokens", + "type": "integer", + "maximum": 512, + "description": "Maximum number of tokens to generate", + "default": 64 + } + }, + "x-fal-order-properties": [ + "images_data_url", + "prompt", + "max_tokens" + ], + "required": [ + "images_data_url", + "prompt" + ] + }, + "MoondreamNextBatchOutput": { + "title": "BatchMoonDreamOutput", + "type": "object", + "properties": { + "outputs": { + "title": "Outputs", + "type": "array", + "description": "List of generated captions", + "items": { + "type": "string" + } + }, + "captions_file": { + "title": "Captions File", + "description": "URL to the generated captions JSON file containing filename-caption pairs.", + "allOf": [ + { + "$ref": "#/components/schemas/File" + } + ] + } + }, + "x-fal-order-properties": [ + "captions_file", + "outputs" + ], + "required": [ + "captions_file", + "outputs" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/moondream-next/batch/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream-next/batch/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/moondream-next/batch": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MoondreamNextBatchInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream-next/batch/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MoondreamNextBatchOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sa2va/4b/video", + "metadata": { + "display_name": "Sa2VA 4B Video", + "category": "vision", + "description": "Sa2VA is an MLLM capable of question answering, visual prompt understanding, and dense object segmentation at both image and video levels", + "status": "active", + "tags": [ + "multimodal", + "vision" + ], + "updated_at": "2026-01-26T21:44:31.621Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/sa2va.webp", + "model_url": "https://fal.run/fal-ai/sa2va/4b/video", + "license_type": "commercial", + "date": "2025-01-13T00:00:00.000Z", + "group": { + "key": "sa2va", + "label": "4B (Video)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sa2va/4b/video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sa2va/4b/video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sa2va/4b/video", + "category": "vision", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/sa2va.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/sa2va/4b/video", + "documentationUrl": "https://fal.ai/models/fal-ai/sa2va/4b/video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Sa2va4bVideoInput": { + "title": "VideoInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Could you please give me a brief description of the video? Please respond with interleaved segmentation masks for the corresponding parts of the answer." + ], + "title": "Prompt", + "type": "string", + "description": "Prompt to be used for the chat completion" + }, + "video_url": { + "examples": [ + "https://drive.google.com/uc?id=1iOFYbNITYwrebBBp9kaEGhBndFSRLz8k" + ], + "title": "Video Url", + "type": "string", + "description": "The URL of the input video." + }, + "num_frames_to_sample": { + "minimum": 1, + "title": "Num Frames To Sample", + "type": "integer", + "maximum": 100, + "description": "Number of frames to sample from the video. If not provided, all frames are sampled." + } + }, + "x-fal-order-properties": [ + "prompt", + "video_url", + "num_frames_to_sample" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "Sa2va4bVideoOutput": { + "title": "VideoChatOutput", + "type": "object", + "properties": { + "masks": { + "examples": [ + [ + { + "file_size": 3259012, + "file_name": "output_0.mp4", + "content_type": "application/octet-stream", + "url": "https://v3.fal.media/files/kangaroo/KSuUWm24leGew4jTouuTM_output_0.mp4" + }, + { + "file_size": 1241471, + "file_name": "output_1.mp4", + "content_type": "application/octet-stream", + "url": "https://v3.fal.media/files/monkey/0jHCYm2lZM6FjDmtXw1Kt_output_1.mp4" + } + ] + ], + "title": "Masks", + "type": "array", + "description": "Dictionary of label: mask video", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "output": { + "examples": [ + "

Two children

[SEG] are jumping on

a bed

[SEG] .<|im_end|>" + ], + "title": "Output", + "type": "string", + "description": "Generated output" + } + }, + "x-fal-order-properties": [ + "output", + "masks" + ], + "required": [ + "output", + "masks" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sa2va/4b/video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sa2va/4b/video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sa2va/4b/video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sa2va4bVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sa2va/4b/video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sa2va4bVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sa2va/8b/video", + "metadata": { + "display_name": "Sa2VA 8B Video", + "category": "vision", + "description": "Sa2VA is an MLLM capable of question answering, visual prompt understanding, and dense object segmentation at both image and video levels", + "status": "active", + "tags": [ + "multimodal", + "vision" + ], + "updated_at": "2026-01-26T21:44:32.007Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/sa2va.webp", + "model_url": "https://fal.run/fal-ai/sa2va/8b/video", + "license_type": "commercial", + "date": "2025-01-13T00:00:00.000Z", + "group": { + "key": "sa2va", + "label": "8B (Video)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sa2va/8b/video", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sa2va/8b/video queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sa2va/8b/video", + "category": "vision", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/sa2va.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/sa2va/8b/video", + "documentationUrl": "https://fal.ai/models/fal-ai/sa2va/8b/video/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Sa2va8bVideoInput": { + "title": "VideoInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Could you please give me a brief description of the video? Please respond with interleaved segmentation masks for the corresponding parts of the answer." + ], + "title": "Prompt", + "type": "string", + "description": "Prompt to be used for the chat completion" + }, + "video_url": { + "examples": [ + "https://drive.google.com/uc?id=1iOFYbNITYwrebBBp9kaEGhBndFSRLz8k" + ], + "title": "Video Url", + "type": "string", + "description": "The URL of the input video." + }, + "num_frames_to_sample": { + "minimum": 1, + "title": "Num Frames To Sample", + "type": "integer", + "maximum": 100, + "description": "Number of frames to sample from the video. If not provided, all frames are sampled." + } + }, + "x-fal-order-properties": [ + "prompt", + "video_url", + "num_frames_to_sample" + ], + "required": [ + "prompt", + "video_url" + ] + }, + "Sa2va8bVideoOutput": { + "title": "VideoChatOutput", + "type": "object", + "properties": { + "masks": { + "examples": [ + [ + { + "file_size": 3259012, + "file_name": "output_0.mp4", + "content_type": "application/octet-stream", + "url": "https://v3.fal.media/files/kangaroo/KSuUWm24leGew4jTouuTM_output_0.mp4" + }, + { + "file_size": 1241471, + "file_name": "output_1.mp4", + "content_type": "application/octet-stream", + "url": "https://v3.fal.media/files/monkey/0jHCYm2lZM6FjDmtXw1Kt_output_1.mp4" + } + ] + ], + "title": "Masks", + "type": "array", + "description": "Dictionary of label: mask video", + "items": { + "$ref": "#/components/schemas/File" + } + }, + "output": { + "examples": [ + "

Two children

[SEG] are jumping on

a bed

[SEG] .<|im_end|>" + ], + "title": "Output", + "type": "string", + "description": "Generated output" + } + }, + "x-fal-order-properties": [ + "output", + "masks" + ], + "required": [ + "output", + "masks" + ] + }, + "File": { + "title": "File", + "type": "object", + "properties": { + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data" + ], + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sa2va/8b/video/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sa2va/8b/video/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sa2va/8b/video": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sa2va8bVideoInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sa2va/8b/video/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sa2va8bVideoOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sa2va/4b/image", + "metadata": { + "display_name": "Sa2VA 4B Image", + "category": "vision", + "description": "Sa2VA is an MLLM capable of question answering, visual prompt understanding, and dense object segmentation at both image and video levels", + "status": "active", + "tags": [ + "multimodal", + "vision" + ], + "updated_at": "2026-01-26T21:44:31.878Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/sa2va.webp", + "model_url": "https://fal.run/fal-ai/sa2va/4b/image", + "license_type": "commercial", + "date": "2025-01-13T00:00:00.000Z", + "group": { + "key": "sa2va", + "label": "4B (Image)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sa2va/4b/image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sa2va/4b/image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sa2va/4b/image", + "category": "vision", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/sa2va.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/sa2va/4b/image", + "documentationUrl": "https://fal.ai/models/fal-ai/sa2va/4b/image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Sa2va4bImageInput": { + "title": "ImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Could you please give me a brief description of the image? Please respond with interleaved segmentation masks for the corresponding parts of the answer." + ], + "title": "Prompt", + "type": "string", + "description": "Prompt to be used for the chat completion" + }, + "image_url": { + "examples": [ + "https://raw.githubusercontent.com/facebookresearch/segment-anything-2/main/notebooks/images/truck.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "Url for the Input image." + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "Sa2va4bImageOutput": { + "title": "ImageChatOutput", + "type": "object", + "properties": { + "masks": { + "examples": [ + [ + { + "height": 1200, + "file_size": 15724, + "file_name": "019c3c1e3c50446e9996f709d36debb4.png", + "content_type": "image/png", + "url": "https://v3.fal.media/files/monkey/6ITmhHQJ-69s-UxajrY5T_019c3c1e3c50446e9996f709d36debb4.png", + "width": 1800 + }, + { + "height": 1200, + "file_size": 14905, + "file_name": "0a1522ca410942c7ad6c73efa15b3549.png", + "content_type": "image/png", + "url": "https://v3.fal.media/files/monkey/IljtMxahoo9-7SUpx0fth_0a1522ca410942c7ad6c73efa15b3549.png", + "width": 1800 + } + ] + ], + "title": "Masks", + "type": "array", + "description": "Dictionary of label: mask image", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "output": { + "examples": [ + "

A white pickup truck

[SEG] is parked on the side of

the red building

[SEG] , creating a unique and eye-catching contrast.<|im_end|>" + ], + "title": "Output", + "type": "string", + "description": "Generated output" + } + }, + "x-fal-order-properties": [ + "output", + "masks" + ], + "required": [ + "output", + "masks" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sa2va/4b/image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sa2va/4b/image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sa2va/4b/image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sa2va4bImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sa2va/4b/image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sa2va4bImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/sa2va/8b/image", + "metadata": { + "display_name": "Sa2VA 8B Image", + "category": "vision", + "description": "Sa2VA is an MLLM capable of question answering, visual prompt understanding, and dense object segmentation at both image and video levels", + "status": "active", + "tags": [ + "multimodal", + "vision" + ], + "updated_at": "2026-01-26T21:44:31.749Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/sa2va.webp", + "model_url": "https://fal.run/fal-ai/sa2va/8b/image", + "license_type": "commercial", + "date": "2025-01-13T00:00:00.000Z", + "group": { + "key": "sa2va", + "label": "8B (Image)" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/sa2va/8b/image", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/sa2va/8b/image queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/sa2va/8b/image", + "category": "vision", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/sa2va.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/sa2va/8b/image", + "documentationUrl": "https://fal.ai/models/fal-ai/sa2va/8b/image/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Sa2va8bImageInput": { + "title": "ImageInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Could you please give me a brief description of the image? Please respond with interleaved segmentation masks for the corresponding parts of the answer." + ], + "title": "Prompt", + "type": "string", + "description": "Prompt to be used for the chat completion" + }, + "image_url": { + "examples": [ + "https://raw.githubusercontent.com/facebookresearch/segment-anything-2/main/notebooks/images/truck.jpg" + ], + "title": "Image Url", + "type": "string", + "description": "Url for the Input image." + } + }, + "x-fal-order-properties": [ + "prompt", + "image_url" + ], + "required": [ + "prompt", + "image_url" + ] + }, + "Sa2va8bImageOutput": { + "title": "ImageChatOutput", + "type": "object", + "properties": { + "masks": { + "examples": [ + [ + { + "height": 1200, + "file_size": 15724, + "file_name": "019c3c1e3c50446e9996f709d36debb4.png", + "content_type": "image/png", + "url": "https://v3.fal.media/files/monkey/6ITmhHQJ-69s-UxajrY5T_019c3c1e3c50446e9996f709d36debb4.png", + "width": 1800 + }, + { + "height": 1200, + "file_size": 14905, + "file_name": "0a1522ca410942c7ad6c73efa15b3549.png", + "content_type": "image/png", + "url": "https://v3.fal.media/files/monkey/IljtMxahoo9-7SUpx0fth_0a1522ca410942c7ad6c73efa15b3549.png", + "width": 1800 + } + ] + ], + "title": "Masks", + "type": "array", + "description": "Dictionary of label: mask image", + "items": { + "$ref": "#/components/schemas/Image" + } + }, + "output": { + "examples": [ + "

A white pickup truck

[SEG] is parked on the side of

the red building

[SEG] , creating a unique and eye-catching contrast.<|im_end|>" + ], + "title": "Output", + "type": "string", + "description": "Generated output" + } + }, + "x-fal-order-properties": [ + "output", + "masks" + ], + "required": [ + "output", + "masks" + ] + }, + "Image": { + "x-fal-order-properties": [ + "url", + "content_type", + "file_name", + "file_size", + "file_data", + "width", + "height" + ], + "type": "object", + "description": "Represents an image file.", + "title": "Image", + "properties": { + "height": { + "examples": [ + 1024 + ], + "title": "Height", + "type": "integer", + "description": "The height of the image in pixels." + }, + "file_size": { + "examples": [ + 4404019 + ], + "title": "File Size", + "type": "integer", + "description": "The size of the file in bytes." + }, + "url": { + "title": "Url", + "type": "string", + "description": "The URL where the file can be downloaded from." + }, + "width": { + "examples": [ + 1024 + ], + "title": "Width", + "type": "integer", + "description": "The width of the image in pixels." + }, + "file_name": { + "examples": [ + "z9RV14K95DvU.png" + ], + "title": "File Name", + "type": "string", + "description": "The name of the file. It will be auto-generated if not provided." + }, + "content_type": { + "examples": [ + "image/png" + ], + "title": "Content Type", + "type": "string", + "description": "The mime type of the file." + }, + "file_data": { + "format": "binary", + "title": "File Data", + "type": "string", + "description": "File data" + } + }, + "required": [ + "url" + ] + } + } + }, + "paths": { + "/fal-ai/sa2va/8b/image/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sa2va/8b/image/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/sa2va/8b/image": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sa2va8bImageInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/sa2va/8b/image/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Sa2va8bImageOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/moondream-next", + "metadata": { + "display_name": "MoonDreamNext", + "category": "vision", + "description": "MoonDreamNext is a multimodal vision-language model for captioning, gaze detection, bbox detection, point detection, and more.", + "status": "active", + "tags": [ + "multimodal", + "vision" + ], + "updated_at": "2026-01-26T21:44:32.399Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/web-examples/moondreamnext/moondream-next.webp", + "model_url": "https://fal.run/fal-ai/moondream-next", + "license_type": "commercial", + "date": "2025-01-09T00:00:00.000Z", + "group": { + "key": "moondreamnext", + "label": "Caption" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/moondream-next", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/moondream-next queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/moondream-next", + "category": "vision", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/web-examples/moondreamnext/moondream-next.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/moondream-next", + "documentationUrl": "https://fal.ai/models/fal-ai/moondream-next/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MoondreamNextInput": { + "title": "QueryInput", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Describe this image in detail." + ], + "title": "Prompt", + "type": "string", + "description": "Prompt for query task" + }, + "task_type": { + "enum": [ + "caption", + "query" + ], + "title": "Task Type", + "type": "string", + "description": "Type of task to perform", + "default": "caption" + }, + "max_tokens": { + "minimum": 1, + "title": "Max Tokens", + "type": "integer", + "maximum": 512, + "description": "Maximum number of tokens to generate", + "default": 64 + }, + "image_url": { + "examples": [ + "https://llava-vl.github.io/static/images/monalisa.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "Image URL to be processed" + } + }, + "x-fal-order-properties": [ + "image_url", + "task_type", + "prompt", + "max_tokens" + ], + "required": [ + "image_url", + "prompt" + ] + }, + "MoondreamNextOutput": { + "title": "MoonDreamOutput", + "type": "object", + "properties": { + "output": { + "title": "Output", + "type": "string", + "description": "Response from the model" + } + }, + "x-fal-order-properties": [ + "output" + ], + "required": [ + "output" + ] + } + } + }, + "paths": { + "/fal-ai/moondream-next/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream-next/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/moondream-next": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MoondreamNextInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream-next/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MoondreamNextOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/florence-2-large/region-to-description", + "metadata": { + "display_name": "Florence-2 Large", + "category": "vision", + "description": "Florence-2 is an advanced vision foundation model that uses a prompt-based approach to handle a wide range of vision and vision-language tasks", + "status": "active", + "tags": [ + "multimodal", + "vision" + ], + "updated_at": "2026-01-26T21:44:45.848Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "model_url": "https://fal.run/fal-ai/florence-2-large/region-to-description", + "github_url": "https://huggingface.co/microsoft/Florence-2-large/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-06-22T00:00:00.000Z", + "group": { + "key": "florence-2-large", + "label": "Region to Description" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/florence-2-large/region-to-description", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/florence-2-large/region-to-description queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/florence-2-large/region-to-description", + "category": "vision", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/florence-2-large/region-to-description", + "documentationUrl": "https://fal.ai/models/fal-ai/florence-2-large/region-to-description/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Florence2LargeRegionToDescriptionInput": { + "title": "ImageWithUserCoordinatesInput", + "type": "object", + "properties": { + "region": { + "examples": [ + { + "y1": 100, + "x2": 200, + "x1": 100, + "y2": 200 + } + ], + "description": "The user input coordinates", + "title": "Region", + "allOf": [ + { + "$ref": "#/components/schemas/Region" + } + ] + }, + "image_url": { + "examples": [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg", + "http://ecx.images-amazon.com/images/I/51UUzBDAMsL.jpg" + ], + "description": "The URL of the image to be processed.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url", + "region" + ], + "required": [ + "image_url", + "region" + ] + }, + "Florence2LargeRegionToDescriptionOutput": { + "title": "TextOutput", + "type": "object", + "properties": { + "results": { + "description": "Results from the model", + "type": "string", + "title": "Results" + } + }, + "x-fal-order-properties": [ + "results" + ], + "required": [ + "results" + ] + }, + "Region": { + "title": "Region", + "type": "object", + "properties": { + "y1": { + "minimum": 0, + "maximum": 999, + "type": "integer", + "title": "Y1", + "description": "Y-coordinate of the top-left corner" + }, + "x2": { + "minimum": 0, + "maximum": 999, + "type": "integer", + "title": "X2", + "description": "X-coordinate of the bottom-right corner" + }, + "x1": { + "minimum": 0, + "maximum": 999, + "type": "integer", + "title": "X1", + "description": "X-coordinate of the top-left corner" + }, + "y2": { + "minimum": 0, + "maximum": 999, + "type": "integer", + "title": "Y2", + "description": "Y-coordinate of the bottom-right corner" + } + }, + "x-fal-order-properties": [ + "x1", + "y1", + "x2", + "y2" + ], + "required": [ + "x1", + "y1", + "x2", + "y2" + ] + } + } + }, + "paths": { + "/fal-ai/florence-2-large/region-to-description/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/region-to-description/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/region-to-description": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeRegionToDescriptionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/region-to-description/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeRegionToDescriptionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/florence-2-large/ocr", + "metadata": { + "display_name": "Florence-2 Large", + "category": "vision", + "description": "Florence-2 is an advanced vision foundation model that uses a prompt-based approach to handle a wide range of vision and vision-language tasks", + "status": "active", + "tags": [ + "ocr", + "multimodal", + "vision" + ], + "updated_at": "2026-01-26T21:44:45.974Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "model_url": "https://fal.run/fal-ai/florence-2-large/ocr", + "github_url": "https://huggingface.co/microsoft/Florence-2-large/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-06-22T00:00:00.000Z", + "group": { + "key": "florence-2-large", + "label": "OCR" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/florence-2-large/ocr", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/florence-2-large/ocr queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/florence-2-large/ocr", + "category": "vision", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/florence-2-large/ocr", + "documentationUrl": "https://fal.ai/models/fal-ai/florence-2-large/ocr/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Florence2LargeOcrInput": { + "title": "ImageInput", + "type": "object", + "properties": { + "image_url": { + "examples": [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg", + "http://ecx.images-amazon.com/images/I/51UUzBDAMsL.jpg" + ], + "description": "The URL of the image to be processed.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url" + ], + "required": [ + "image_url" + ] + }, + "Florence2LargeOcrOutput": { + "title": "TextOutput", + "type": "object", + "properties": { + "results": { + "description": "Results from the model", + "type": "string", + "title": "Results" + } + }, + "x-fal-order-properties": [ + "results" + ], + "required": [ + "results" + ] + } + } + }, + "paths": { + "/fal-ai/florence-2-large/ocr/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/ocr/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/ocr": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeOcrInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/ocr/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeOcrOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/florence-2-large/more-detailed-caption", + "metadata": { + "display_name": "Florence-2 Large", + "category": "vision", + "description": "Florence-2 is an advanced vision foundation model that uses a prompt-based approach to handle a wide range of vision and vision-language tasks", + "status": "active", + "tags": [ + "captioning", + "multimodal", + "vision" + ], + "updated_at": "2026-01-26T21:44:46.400Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "model_url": "https://fal.run/fal-ai/florence-2-large/more-detailed-caption", + "github_url": "https://huggingface.co/microsoft/Florence-2-large/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-06-22T00:00:00.000Z", + "group": { + "key": "florence-2-large", + "label": "More Detailed Caption" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/florence-2-large/more-detailed-caption", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/florence-2-large/more-detailed-caption queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/florence-2-large/more-detailed-caption", + "category": "vision", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/florence-2-large/more-detailed-caption", + "documentationUrl": "https://fal.ai/models/fal-ai/florence-2-large/more-detailed-caption/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Florence2LargeMoreDetailedCaptionInput": { + "title": "ImageInput", + "type": "object", + "properties": { + "image_url": { + "examples": [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg", + "http://ecx.images-amazon.com/images/I/51UUzBDAMsL.jpg" + ], + "description": "The URL of the image to be processed.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url" + ], + "required": [ + "image_url" + ] + }, + "Florence2LargeMoreDetailedCaptionOutput": { + "title": "TextOutput", + "type": "object", + "properties": { + "results": { + "description": "Results from the model", + "type": "string", + "title": "Results" + } + }, + "x-fal-order-properties": [ + "results" + ], + "required": [ + "results" + ] + } + } + }, + "paths": { + "/fal-ai/florence-2-large/more-detailed-caption/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/more-detailed-caption/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/more-detailed-caption": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeMoreDetailedCaptionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/more-detailed-caption/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeMoreDetailedCaptionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/florence-2-large/region-to-category", + "metadata": { + "display_name": "Florence-2 Large", + "category": "vision", + "description": "Florence-2 is an advanced vision foundation model that uses a prompt-based approach to handle a wide range of vision and vision-language tasks", + "status": "active", + "tags": [ + "multimodal", + "vision" + ], + "updated_at": "2026-01-26T21:44:46.104Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "model_url": "https://fal.run/fal-ai/florence-2-large/region-to-category", + "github_url": "https://huggingface.co/microsoft/Florence-2-large/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-06-22T00:00:00.000Z", + "group": { + "key": "florence-2-large", + "label": "Region to Category" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/florence-2-large/region-to-category", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/florence-2-large/region-to-category queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/florence-2-large/region-to-category", + "category": "vision", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/florence-2-large/region-to-category", + "documentationUrl": "https://fal.ai/models/fal-ai/florence-2-large/region-to-category/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Florence2LargeRegionToCategoryInput": { + "title": "ImageWithUserCoordinatesInput", + "type": "object", + "properties": { + "region": { + "examples": [ + { + "y1": 100, + "x2": 200, + "x1": 100, + "y2": 200 + } + ], + "description": "The user input coordinates", + "title": "Region", + "allOf": [ + { + "$ref": "#/components/schemas/Region" + } + ] + }, + "image_url": { + "examples": [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg", + "http://ecx.images-amazon.com/images/I/51UUzBDAMsL.jpg" + ], + "description": "The URL of the image to be processed.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url", + "region" + ], + "required": [ + "image_url", + "region" + ] + }, + "Florence2LargeRegionToCategoryOutput": { + "title": "TextOutput", + "type": "object", + "properties": { + "results": { + "description": "Results from the model", + "type": "string", + "title": "Results" + } + }, + "x-fal-order-properties": [ + "results" + ], + "required": [ + "results" + ] + }, + "Region": { + "title": "Region", + "type": "object", + "properties": { + "y1": { + "minimum": 0, + "maximum": 999, + "type": "integer", + "title": "Y1", + "description": "Y-coordinate of the top-left corner" + }, + "x2": { + "minimum": 0, + "maximum": 999, + "type": "integer", + "title": "X2", + "description": "X-coordinate of the bottom-right corner" + }, + "x1": { + "minimum": 0, + "maximum": 999, + "type": "integer", + "title": "X1", + "description": "X-coordinate of the top-left corner" + }, + "y2": { + "minimum": 0, + "maximum": 999, + "type": "integer", + "title": "Y2", + "description": "Y-coordinate of the bottom-right corner" + } + }, + "x-fal-order-properties": [ + "x1", + "y1", + "x2", + "y2" + ], + "required": [ + "x1", + "y1", + "x2", + "y2" + ] + } + } + }, + "paths": { + "/fal-ai/florence-2-large/region-to-category/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/region-to-category/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/region-to-category": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeRegionToCategoryInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/region-to-category/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeRegionToCategoryOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/florence-2-large/caption", + "metadata": { + "display_name": "Florence-2 Large", + "category": "vision", + "description": "Florence-2 is an advanced vision foundation model that uses a prompt-based approach to handle a wide range of vision and vision-language tasks", + "status": "active", + "tags": [ + "captioning", + "multimodal", + "vision" + ], + "updated_at": "2026-01-26T21:44:45.223Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "model_url": "https://fal.run/fal-ai/florence-2-large/caption", + "github_url": "https://huggingface.co/microsoft/Florence-2-large/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-06-22T00:00:00.000Z", + "group": { + "key": "florence-2-large", + "label": "Caption" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/florence-2-large/caption", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/florence-2-large/caption queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/florence-2-large/caption", + "category": "vision", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/florence-2-large/caption", + "documentationUrl": "https://fal.ai/models/fal-ai/florence-2-large/caption/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Florence2LargeCaptionInput": { + "title": "ImageInput", + "type": "object", + "properties": { + "image_url": { + "examples": [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg", + "http://ecx.images-amazon.com/images/I/51UUzBDAMsL.jpg" + ], + "description": "The URL of the image to be processed.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url" + ], + "required": [ + "image_url" + ] + }, + "Florence2LargeCaptionOutput": { + "title": "TextOutput", + "type": "object", + "properties": { + "results": { + "description": "Results from the model", + "type": "string", + "title": "Results" + } + }, + "x-fal-order-properties": [ + "results" + ], + "required": [ + "results" + ] + } + } + }, + "paths": { + "/fal-ai/florence-2-large/caption/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/caption/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/caption": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeCaptionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/caption/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeCaptionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/florence-2-large/detailed-caption", + "metadata": { + "display_name": "Florence-2 Large", + "category": "vision", + "description": "Florence-2 is an advanced vision foundation model that uses a prompt-based approach to handle a wide range of vision and vision-language tasks", + "status": "active", + "tags": [ + "captioning", + "multimodal", + "vision" + ], + "updated_at": "2026-01-26T21:44:44.657Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "model_url": "https://fal.run/fal-ai/florence-2-large/detailed-caption", + "github_url": "https://huggingface.co/microsoft/Florence-2-large/blob/main/LICENSE", + "license_type": "commercial", + "date": "2024-06-22T00:00:00.000Z", + "group": { + "key": "florence-2-large", + "label": "Detailed Caption" + }, + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/florence-2-large/detailed-caption", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/florence-2-large/detailed-caption queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/florence-2-large/detailed-caption", + "category": "vision", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/florence-2-large.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/florence-2-large/detailed-caption", + "documentationUrl": "https://fal.ai/models/fal-ai/florence-2-large/detailed-caption/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "Florence2LargeDetailedCaptionInput": { + "title": "ImageInput", + "type": "object", + "properties": { + "image_url": { + "examples": [ + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg", + "http://ecx.images-amazon.com/images/I/51UUzBDAMsL.jpg" + ], + "description": "The URL of the image to be processed.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url" + ], + "required": [ + "image_url" + ] + }, + "Florence2LargeDetailedCaptionOutput": { + "title": "TextOutput", + "type": "object", + "properties": { + "results": { + "description": "Results from the model", + "type": "string", + "title": "Results" + } + }, + "x-fal-order-properties": [ + "results" + ], + "required": [ + "results" + ] + } + } + }, + "paths": { + "/fal-ai/florence-2-large/detailed-caption/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/detailed-caption/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/detailed-caption": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeDetailedCaptionInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/florence-2-large/detailed-caption/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Florence2LargeDetailedCaptionOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/imageutils/nsfw", + "metadata": { + "display_name": "NSFW Filter", + "category": "vision", + "description": "Predict the probability of an image being NSFW.", + "status": "active", + "tags": [ + "filter", + "safety", + "utility" + ], + "updated_at": "2026-01-26T21:44:52.720Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/nsfw-filter.webp", + "model_url": "https://fal.run/fal-ai/imageutils/nsfw", + "github_url": "https://huggingface.co/Falconsai/nsfw_image_detection", + "date": "2024-03-22T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/imageutils/nsfw", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/imageutils/nsfw queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/imageutils/nsfw", + "category": "vision", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/nsfw-filter.webp", + "playgroundUrl": "https://fal.ai/models/fal-ai/imageutils/nsfw", + "documentationUrl": "https://fal.ai/models/fal-ai/imageutils/nsfw/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "ImageutilsNsfwInput": { + "title": "NSFWImageDetectionInput", + "type": "object", + "properties": { + "image_url": { + "examples": [ + "https://storage.googleapis.com/falserverless/model_tests/remove_background/elephant.jpg" + ], + "description": "Input image url.", + "type": "string", + "title": "Image Url" + } + }, + "x-fal-order-properties": [ + "image_url" + ], + "required": [ + "image_url" + ] + }, + "ImageutilsNsfwOutput": { + "title": "NSFWImageDetectionOutput", + "type": "object", + "properties": { + "nsfw_probability": { + "description": "The probability of the image being NSFW.", + "type": "number", + "title": "Nsfw Probability" + } + }, + "x-fal-order-properties": [ + "nsfw_probability" + ], + "required": [ + "nsfw_probability" + ] + } + } + }, + "paths": { + "/fal-ai/imageutils/nsfw/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/imageutils/nsfw/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/imageutils/nsfw": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageutilsNsfwInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/imageutils/nsfw/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ImageutilsNsfwOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/moondream/batched", + "metadata": { + "display_name": "Moondream", + "category": "vision", + "description": "Answer questions from the images.", + "status": "active", + "tags": [ + "multimodal", + "vision" + ], + "updated_at": "2026-01-26T21:44:52.844Z", + "is_favorited": false, + "thumbnail_url": "https://storage.googleapis.com/falserverless/gallery/moondream.jpeg", + "model_url": "https://fal.run/fal-ai/moondream/batched", + "github_url": "https://github.com/vikhyat/moondream/blob/main/LICENSE", + "date": "2024-03-20T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/moondream/batched", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/moondream/batched queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/moondream/batched", + "category": "vision", + "thumbnailUrl": "https://storage.googleapis.com/falserverless/gallery/moondream.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/moondream/batched", + "documentationUrl": "https://fal.ai/models/fal-ai/moondream/batched/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "MoondreamBatchedInput": { + "title": "BatchedMoondreamInput", + "type": "object", + "properties": { + "model_id": { + "enum": [ + "vikhyatk/moondream2", + "fal-ai/moondream2-docci" + ], + "title": "Model ID", + "type": "string", + "description": "Model ID to use for inference", + "default": "vikhyatk/moondream2" + }, + "repetition_penalty": { + "minimum": 1, + "maximum": 2, + "type": "number", + "title": "Repetition Penalty", + "description": "Repetition penalty for sampling", + "default": 1 + }, + "inputs": { + "examples": [ + [ + { + "prompt": "What is the girl doing?", + "image_url": "https://github.com/vikhyat/moondream/raw/main/assets/demo-1.jpg" + } + ] + ], + "title": "Input prompt & image pairs", + "type": "array", + "description": "List of input prompts and image URLs", + "items": { + "$ref": "#/components/schemas/MoondreamInputParam" + } + }, + "max_tokens": { + "minimum": 32, + "maximum": 1024, + "type": "integer", + "title": "Max Tokens", + "description": "Maximum number of new tokens to generate", + "default": 64 + }, + "temperature": { + "maximum": 1, + "type": "number", + "title": "Temperature", + "description": "Temperature for sampling", + "exclusiveMinimum": 0, + "default": 0.2 + }, + "top_p": { + "minimum": 0, + "maximum": 1, + "type": "number", + "title": "Top P", + "description": "Top P for sampling", + "default": 1 + } + }, + "x-fal-order-properties": [ + "model_id", + "inputs", + "max_tokens", + "temperature", + "top_p", + "repetition_penalty" + ], + "required": [ + "inputs" + ] + }, + "MoondreamBatchedOutput": { + "title": "BatchedMoondreamOutput", + "type": "object", + "properties": { + "filenames": { + "title": "Filenames", + "type": "array", + "description": "Filenames of the images processed", + "items": { + "type": "string" + }, + "nullable": true + }, + "outputs": { + "title": "Outputs", + "type": "array", + "description": "List of generated outputs", + "items": { + "type": "string" + } + }, + "partial": { + "title": "Partial", + "type": "boolean", + "description": "Whether the output is partial", + "default": false + }, + "timings": { + "additionalProperties": { + "type": "number" + }, + "type": "object", + "title": "Timings", + "description": "Timings for different parts of the process" + } + }, + "x-fal-order-properties": [ + "outputs", + "partial", + "timings", + "filenames" + ], + "required": [ + "outputs", + "timings" + ] + }, + "MoondreamInputParam": { + "title": "MoondreamInputParam", + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Do you know who drew this painting?" + ], + "title": "Prompt", + "type": "string", + "description": "Prompt to be used for the image", + "default": "Describe this image." + }, + "image_url": { + "examples": [ + "https://llava-vl.github.io/static/images/monalisa.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "URL of the image to be processed" + } + }, + "x-fal-order-properties": [ + "image_url", + "prompt" + ], + "required": [ + "image_url" + ] + } + } + }, + "paths": { + "/fal-ai/moondream/batched/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream/batched/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/moondream/batched": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MoondreamBatchedInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/moondream/batched/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MoondreamBatchedOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + }, + { + "endpoint_id": "fal-ai/llava-next", + "metadata": { + "display_name": "LLaVA v1.6 34B", + "category": "vision", + "description": "Vision", + "status": "active", + "tags": [ + "multimodal", + "vision" + ], + "updated_at": "2026-01-26T21:44:56.370Z", + "is_favorited": false, + "thumbnail_url": "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/GkS_M-jVZnM3ioJCCkwhh.jpeg", + "model_url": "https://fal.run/fal-ai/llava-next", + "license_type": "research", + "date": "2024-02-14T00:00:00.000Z", + "highlighted": false, + "pinned": false + }, + "openapi": { + "openapi": "3.0.4", + "info": { + "title": "Queue OpenAPI for fal-ai/llava-next", + "version": "1.0.0", + "description": "The OpenAPI schema for the fal-ai/llava-next queue.", + "x-fal-metadata": { + "endpointId": "fal-ai/llava-next", + "category": "vision", + "thumbnailUrl": "https://fal-cdn.batuhan-941.workers.dev/files/rabbit/GkS_M-jVZnM3ioJCCkwhh.jpeg", + "playgroundUrl": "https://fal.ai/models/fal-ai/llava-next", + "documentationUrl": "https://fal.ai/models/fal-ai/llava-next/api" + } + }, + "components": { + "securitySchemes": { + "apiKeyAuth": { + "type": "apiKey", + "in": "header", + "name": "Authorization", + "description": "Fal Key" + } + }, + "schemas": { + "QueueStatus": { + "type": "object", + "properties": { + "status": { + "type": "string", + "enum": [ + "IN_QUEUE", + "IN_PROGRESS", + "COMPLETED" + ] + }, + "request_id": { + "type": "string", + "description": "The request id." + }, + "response_url": { + "type": "string", + "description": "The response url." + }, + "status_url": { + "type": "string", + "description": "The status url." + }, + "cancel_url": { + "type": "string", + "description": "The cancel url." + }, + "logs": { + "type": "object", + "description": "The logs.", + "additionalProperties": true + }, + "metrics": { + "type": "object", + "description": "The metrics.", + "additionalProperties": true + }, + "queue_position": { + "type": "integer", + "description": "The queue position." + } + }, + "required": [ + "status", + "request_id" + ] + }, + "LlavaNextInput": { + "x-fal-order-properties": [ + "image_url", + "prompt", + "max_tokens", + "temperature", + "top_p" + ], + "type": "object", + "properties": { + "prompt": { + "examples": [ + "Do you know who drew this painting?" + ], + "title": "Prompt", + "type": "string", + "description": "Prompt to be used for the image" + }, + "top_p": { + "minimum": 0, + "maximum": 1, + "type": "number", + "description": "Top P for sampling", + "title": "Top P", + "default": 1 + }, + "max_tokens": { + "min": 32, + "description": "Maximum number of tokens to generate", + "type": "integer", + "title": "Max Tokens", + "max": 1024, + "default": 64 + }, + "temperature": { + "maximum": 1, + "type": "number", + "description": "Temperature for sampling", + "title": "Temperature", + "exclusiveMinimum": 0, + "default": 0.2 + }, + "image_url": { + "examples": [ + "https://llava-vl.github.io/static/images/monalisa.jpg" + ], + "title": "Image URL", + "type": "string", + "description": "URL of the image to be processed" + } + }, + "title": "LLavaInput", + "required": [ + "image_url", + "prompt" + ] + }, + "LlavaNextOutput": { + "x-fal-order-properties": [ + "output", + "partial" + ], + "type": "object", + "properties": { + "partial": { + "description": "Whether the output is partial", + "type": "boolean", + "title": "Partial", + "default": false + }, + "output": { + "examples": [ + "Leonardo da Vinci" + ], + "title": "Output", + "type": "string", + "description": "Generated output" + } + }, + "title": "LLavaOutput", + "required": [ + "output" + ] + } + } + }, + "paths": { + "/fal-ai/llava-next/requests/{request_id}/status": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + }, + { + "name": "logs", + "in": "query", + "required": false, + "schema": { + "type": "number", + "description": "Whether to include logs (`1`) in the response or not (`0`)." + } + } + ], + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/llava-next/requests/{request_id}/cancel": { + "put": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "The request was cancelled.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "success": { + "type": "boolean", + "description": "Whether the request was cancelled successfully." + } + } + } + } + } + } + } + } + }, + "/fal-ai/llava-next": { + "post": { + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LlavaNextInput" + } + } + } + }, + "responses": { + "200": { + "description": "The request status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueueStatus" + } + } + } + } + } + } + }, + "/fal-ai/llava-next/requests/{request_id}": { + "get": { + "parameters": [ + { + "name": "request_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "Request ID" + } + } + ], + "responses": { + "200": { + "description": "Result of the request.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LlavaNextOutput" + } + } + } + } + } + } + } + }, + "servers": [ + { + "url": "https://queue.fal.run" + } + ], + "security": [ + { + "apiKeyAuth": [] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/typescript/ai-fal/openapi-ts.config.ts b/packages/typescript/ai-fal/openapi-ts.config.ts new file mode 100644 index 00000000..b67ce450 --- /dev/null +++ b/packages/typescript/ai-fal/openapi-ts.config.ts @@ -0,0 +1,162 @@ +// This file is manually maintained (not auto-generated) +import { readFileSync, readdirSync } from 'node:fs' +import { dirname, join } from 'node:path' +import { fileURLToPath } from 'node:url' + +const __filename = fileURLToPath(import.meta.url) +const __dirname = dirname(__filename) + +/** + * Registry of known missing schemas that fal.ai references but doesn't define. + * + * fal.ai's OpenAPI specs sometimes contain $ref pointers to schemas that don't exist + * in the components.schemas section. This is a data quality issue from their API. + * + * We resolve these missing $refs by injecting proper schema definitions BEFORE + * @hey-api/openapi-ts sees the specs (since the parser fails on missing $refs). + * + * When console warnings show unknown placeholders, research the schema structure + * and add proper definitions here to get correct TypeScript types. + */ +const KNOWN_MISSING_SCHEMAS: Record = { + TrackPoint: { + type: 'object', + description: 'A coordinate point with x and y values for motion tracking', + properties: { + x: { type: 'number', description: 'X coordinate' }, + y: { type: 'number', description: 'Y coordinate' }, + }, + required: ['x', 'y'], + }, + // Add more known missing schemas here as they're discovered +} + +/** + * Recursively find all $ref pointers in an OpenAPI spec object. + * Extracts schema names from references like "#/components/schemas/SchemaName". + */ +function findAllRefs(obj: any, refs: Set = new Set()): Set { + if (!obj || typeof obj !== 'object') return refs + + if (Array.isArray(obj)) { + obj.forEach((item) => findAllRefs(item, refs)) + } else { + for (const [key, value] of Object.entries(obj)) { + if (key === '$ref' && typeof value === 'string') { + // Extract schema name from "#/components/schemas/SchemaName" + const match = value.match(/#\/components\/schemas\/(.+)/) + if (match?.[1]) refs.add(match[1]) + } + if (typeof value === 'object') { + findAllRefs(value, refs) + } + } + } + + return refs +} + +/** + * Resolve missing $refs by injecting schema definitions. + * + * For known missing schemas (in KNOWN_MISSING_SCHEMAS), uses proper definitions. + * For unknown missing schemas, creates generic placeholders to prevent parser failures. + * + * This runs during config evaluation, before @hey-api/openapi-ts parses the specs. + */ +function resolveMissingRefs(spec: any): { + fixed: number + unknown: Array +} { + if (!spec.components?.schemas) return { fixed: 0, unknown: [] } + + const allRefs = findAllRefs(spec) + const existingSchemas = new Set(Object.keys(spec.components.schemas)) + const missingRefs = [...allRefs].filter((ref) => !existingSchemas.has(ref)) + + let fixed = 0 + const unknown: Array = [] + + for (const missingRef of missingRefs) { + if (!spec.components.schemas) spec.components.schemas = {} + + if (KNOWN_MISSING_SCHEMAS[missingRef]) { + // Use known schema definition for proper TypeScript types + spec.components.schemas[missingRef] = KNOWN_MISSING_SCHEMAS[missingRef] + fixed++ + } else { + // Create generic placeholder to prevent parser failure + // This will generate { [key: string]: unknown } TypeScript types + spec.components.schemas[missingRef] = { + type: 'object', + description: `Schema referenced but not defined by fal.ai (missing from source OpenAPI spec)`, + additionalProperties: true, + } + unknown.push(missingRef) + } + } + + return { fixed, unknown } +} + +function getFalCategoryFiles(): Array { + const categoryDir = join(__dirname, 'json') + const files = readdirSync(categoryDir) + .filter((file) => file.endsWith('.json')) + .sort() + return files +} + +function getFalModelOpenApiObjects(filename: string): Array { + const fileContents = readFileSync(join(__dirname, 'json', filename), 'utf8') + const json = JSON.parse(fileContents) + + let totalFixed = 0 + const allUnknown = new Set() + + const specs = json.models.map((model: any) => { + const spec = model.openapi + const { fixed, unknown } = resolveMissingRefs(spec) + + totalFixed += fixed + unknown.forEach((u) => allUnknown.add(u)) + + return spec + }) + + // Log summary if any refs were fixed + if (totalFixed > 0 || allUnknown.size > 0) { + console.log(`[${filename}] Resolved ${totalFixed} known missing refs`) + if (allUnknown.size > 0) { + console.warn( + `[${filename}] Created placeholders for unknown refs: ${[...allUnknown].join(', ')}`, + ) + } + } + + return specs +} + +export default [ + ...getFalCategoryFiles().map((file) => ({ + input: getFalModelOpenApiObjects(file), + output: { + path: `./src/generated/${file.replace(/fal\.models\.([^.]+)\.json/, '$1')}`, + indexFile: false, + postProcess: ['prettier'], + }, + plugins: ['@hey-api/typescript', { name: 'zod', metadata: true }], + parser: { + filters: { + schemas: { + include: '/Input$|Output$|^Post.*Data$/', + }, + operations: { + include: ['/post .*/'], + exclude: ['/get .*/'], + }, + orphans: false, + }, + }, + })), +] diff --git a/packages/typescript/ai-fal/package.json b/packages/typescript/ai-fal/package.json new file mode 100644 index 00000000..1dd7723a --- /dev/null +++ b/packages/typescript/ai-fal/package.json @@ -0,0 +1,62 @@ +{ + "name": "@tanstack/ai-fal", + "version": "0.0.1", + "description": "fal.ai adapter for TanStack AI", + "author": "", + "license": "MIT", + "repository": { + "type": "git", + "url": "git+https://github.com/TanStack/ai.git", + "directory": "packages/typescript/ai-fal" + }, + "type": "module", + "module": "./dist/esm/index.js", + "types": "./dist/esm/index.d.ts", + "exports": { + ".": { + "types": "./dist/esm/index.d.ts", + "import": "./dist/esm/index.js" + } + }, + "files": [ + "dist", + "src" + ], + "scripts": { + "build": "vite build", + "clean": "premove ./build ./dist", + "generate:types": "pnpx @hey-api/openapi-ts", + "generate:maps": "tsx scripts/generate-fal-endpoint-maps.ts", + "generate:fal": "pnpm fetch:fal-all && pnpm generate:types && pnpm generate:maps", + "fetch:fal-all": "tsx scripts/fetch-fal-models.ts", + "fetch:fal-image": "tsx scripts/fetch-fal-models.ts --categories=image-to-image,text-to-image", + "fetch:fal-video": "tsx scripts/fetch-fal-models.ts --categories=video-to-video,text-to-video,image-to-video", + "fetch:fal-audio": "tsx scripts/fetch-fal-models.ts --categories=text-to-audio,audio-to-audio,text-to-speech", + "lint:fix": "eslint ./src --fix", + "test:build": "publint --strict", + "test:eslint": "eslint ./src", + "test:lib": "vitest run", + "test:lib:dev": "pnpm test:lib --watch", + "test:types": "tsc" + }, + "keywords": [ + "ai", + "fal", + "tanstack", + "adapter", + "image-generation", + "video-generation" + ], + "dependencies": { + "@fal-ai/client": "^1.8.3" + }, + "devDependencies": { + "@hey-api/openapi-ts": "^0.90.10", + "@vitest/coverage-v8": "4.0.14", + "vite": "^7.2.7" + }, + "peerDependencies": { + "@tanstack/ai": "workspace:*", + "zod": "^4" + } +} diff --git a/packages/typescript/ai-fal/scripts/compare-fal-models.ts b/packages/typescript/ai-fal/scripts/compare-fal-models.ts new file mode 100644 index 00000000..9b113a67 --- /dev/null +++ b/packages/typescript/ai-fal/scripts/compare-fal-models.ts @@ -0,0 +1,360 @@ +/** + * Script to compare Fal API models with @fal-ai/client EndpointTypeMap + * + * This script identifies models that exist in the Fal API but are missing + * from the TypeScript SDK, helping track when Fal introduces new models. + * + * Usage: + * pnpm exec tsx scripts/compare-fal-models.ts + * pnpm exec tsx scripts/compare-fal-models.ts --csv + * pnpm exec tsx scripts/compare-fal-models.ts --csv=output.csv + * + * Note: Run 'pnpm fetch:fal-models' first to download the latest models + */ + +import { readFileSync, writeFileSync } from 'node:fs' +import { dirname, join } from 'node:path' +import { fileURLToPath } from 'node:url' + +const __dirname = dirname(fileURLToPath(import.meta.url)) + +// ============================================================ +// Type Definitions +// ============================================================ + +interface FalApiModel { + endpoint_id: string + metadata: { + display_name: string + category: string + description: string + status: 'active' | 'inactive' | 'deprecated' + tags: Array + updated_at: string + [key: string]: any + } +} + +interface ComparisonResults { + missingFromSDK: Array // In API but not in EndpointTypeMap + deprecatedInAPI: Array // In EndpointTypeMap but not in API + totalApiModels: number + totalSDKModels: number + activeApiModels: number +} + +// ============================================================ +// Core Functions +// ============================================================ + +/** + * Load models from JSON file + */ +function loadFalModels(): Array { + try { + const modelsPath = join(__dirname, 'fal.models.json') + const content = readFileSync(modelsPath, 'utf-8') + const data = JSON.parse(content) as { + generated_at: string + total_models: number + models: Array + } + return data.models + } catch (error) { + if ( + error instanceof Error && + (error.message.includes('no such file') || + error.message.includes('ENOENT')) + ) { + throw new Error( + 'Models file not found. Run "pnpm fetch:fal-models" first to download the models.', + ) + } + throw error + } +} + +/** + * Parse EndpointTypeMap from the @fal-ai/client type definitions + */ +function getEndpointTypeMapKeys(): Set { + try { + const typesPath = join( + __dirname, + '../node_modules/@fal-ai/client/src/types/endpoints.d.ts', + ) + + const content = readFileSync(typesPath, 'utf-8') + + // Find the EndpointTypeMap section + const endpointMapMatch = content.match( + /export type EndpointTypeMap = \{([\s\S]*?)\n\};/, + ) + + if (!endpointMapMatch) { + throw new Error('Could not find EndpointTypeMap in endpoints.d.ts') + } + + const mapContent = endpointMapMatch[1] + if (!mapContent) { + throw new Error('Could not find EndpointTypeMap in endpoints.d.ts') + } + // Extract model IDs using regex: "model-id": { + const modelIdRegex = /"([^"]+)":\s*\{/g + const modelIds = new Set() + + let match + while ((match = modelIdRegex.exec(mapContent)) !== null) { + modelIds.add(match[1]!) + } + + return modelIds + } catch (error) { + if (error instanceof Error) { + throw new Error(`Error reading EndpointTypeMap: ${error.message}`) + } + throw error + } +} + +/** + * Compare API models with SDK type definitions + */ +function compareModelSets( + apiModels: Array, + sdkKeys: Set, +): ComparisonResults { + const apiModelIds = new Set(apiModels.map((m) => m.endpoint_id)) + + // Find models in API but not in SDK + const missingFromSDK = apiModels.filter( + (model) => !sdkKeys.has(model.endpoint_id), + ) + + // Find models in SDK but not in API (potentially deprecated) + const deprecatedInAPI = Array.from(sdkKeys).filter( + (id) => !apiModelIds.has(id), + ) + + // Calculate statistics + const activeApiModels = apiModels.filter( + (m) => m.metadata.status === 'active', + ).length + + return { + missingFromSDK, + deprecatedInAPI, + totalApiModels: apiModels.length, + totalSDKModels: sdkKeys.size, + activeApiModels, + } +} + +/** + * Print comparison results to console + */ +function printResults(results: ComparisonResults): void { + console.log('\n' + '='.repeat(80)) + console.log('Fal Model Comparison Results') + console.log('='.repeat(80) + '\n') + + // Summary Statistics + console.log('📊 Summary Statistics') + console.log('-'.repeat(80)) + console.log(`Total API Models: ${results.totalApiModels}`) + console.log(`Active API Models: ${results.activeApiModels}`) + console.log(`Total SDK Models: ${results.totalSDKModels}`) + console.log(`Missing from SDK: ${results.missingFromSDK.length}`) + console.log(`Deprecated in API: ${results.deprecatedInAPI.length}`) + console.log('') + + // Missing from SDK (grouped by category and status) + if (results.missingFromSDK.length > 0) { + console.log('🔍 Models in API but Missing from SDK') + console.log('-'.repeat(80)) + + // Group by category + const byCategory = results.missingFromSDK.reduce( + (acc, model) => { + const category = model.metadata.category || 'uncategorized' + if (!acc[category]) { + acc[category] = [] + } + acc[category].push(model) + return acc + }, + {} as Record>, + ) + + // Print each category + for (const [category, models] of Object.entries(byCategory)) { + console.log(`\n📁 ${category.toUpperCase()} (${models.length})`) + console.log('') + + for (const model of models) { + const status = model.metadata.status === 'active' ? '✅' : '⚠️' + console.log(` ${status} ${model.endpoint_id}`) + console.log(` Name: ${model.metadata.display_name}`) + console.log(` Status: ${model.metadata.status}`) + console.log( + ` Updated: ${new Date(model.metadata.updated_at).toLocaleDateString()}`, + ) + if (model.metadata.description) { + console.log( + ` Description: ${model.metadata.description.slice(0, 80)}${model.metadata.description.length > 80 ? '...' : ''}`, + ) + } + console.log(` URL: https://fal.ai/models/${model.endpoint_id}`) + console.log('') + } + } + } else { + console.log('✅ All API models are present in the SDK!') + console.log('') + } + + // Deprecated in API + if (results.deprecatedInAPI.length > 0) { + console.log('⚠️ Models in SDK but Not in API (Potentially Deprecated)') + console.log('-'.repeat(80)) + for (const modelId of results.deprecatedInAPI.slice(0, 10)) { + console.log(` - ${modelId}`) + } + if (results.deprecatedInAPI.length > 10) { + console.log(` ... and ${results.deprecatedInAPI.length - 10} more`) + } + console.log('') + } + + console.log('='.repeat(80)) +} + +/** + * Escape CSV field value + */ +function escapeCsvField(value: string): string { + if (value.includes(',') || value.includes('"') || value.includes('\n')) { + return `"${value.replace(/"/g, '""')}"` + } + return value +} + +/** + * Generate CSV content from comparison results + */ +function generateCSV(results: ComparisonResults): string { + const lines: Array = [] + + // CSV Header + lines.push( + 'Type,Endpoint ID,Display Name,Category,Status,Updated At,Description,URL,Tags', + ) + + // Missing from SDK models + for (const model of results.missingFromSDK) { + const row = [ + 'MISSING_FROM_SDK', + escapeCsvField(model.endpoint_id), + escapeCsvField(model.metadata.display_name), + escapeCsvField(model.metadata.category || ''), + escapeCsvField(model.metadata.status), + escapeCsvField(model.metadata.updated_at), + escapeCsvField(model.metadata.description || ''), + escapeCsvField(`https://fal.ai/models/${model.endpoint_id}`), + escapeCsvField(model.metadata.tags.join('; ') || ''), + ] + lines.push(row.join(',')) + } + + // Deprecated in API models + for (const modelId of results.deprecatedInAPI) { + const row = [ + 'DEPRECATED_IN_API', + escapeCsvField(modelId), + '', // No display name + '', // No category + '', // No status + '', // No updated_at + '', // No description + '', // No URL + '', // No tags + ] + lines.push(row.join(',')) + } + + return lines.join('\n') +} + +/** + * Write CSV file + */ +function writeCsvFile( + results: ComparisonResults, + filename: string = 'fal-models-comparison.csv', +): void { + const csv = generateCSV(results) + const outputPath = join(process.cwd(), filename) + + writeFileSync(outputPath, csv, 'utf-8') + console.log(`\n✅ CSV exported to: ${outputPath}`) + console.log( + ` Total rows: ${results.missingFromSDK.length + results.deprecatedInAPI.length + 1}`, + ) + console.log(` - Missing from SDK: ${results.missingFromSDK.length}`) + console.log(` - Deprecated in API: ${results.deprecatedInAPI.length}\n`) +} + +/** + * Parse command-line arguments + */ +function parseArgs(): { csv: boolean; csvFilename: string } { + const args = process.argv.slice(2) + let csv = false + let csvFilename = 'fal-models-comparison.csv' + + for (const arg of args) { + if (arg === '--csv') { + csv = true + } else if (arg.startsWith('--csv=')) { + csv = true + const csvFilenameArg = arg.split('=')[1] + if (!csvFilenameArg) { + throw new Error('csv filename not specified') + } + csvFilename = csvFilenameArg + } + } + + return { csv, csvFilename } +} + +// ============================================================ +// Main Execution +// ============================================================ + +async function main() { + try { + const { csv, csvFilename } = parseArgs() + + console.log('Loading Fal models...') + const apiModels = await loadFalModels() + console.log(`Loaded ${apiModels.length} models`) + + console.log('Reading EndpointTypeMap from SDK...') + const sdkKeys = getEndpointTypeMapKeys() + + console.log('Comparing model sets...') + const results = compareModelSets(apiModels, sdkKeys) + + if (csv) { + writeCsvFile(results, csvFilename) + } else { + printResults(results) + } + } catch (error) { + console.error('\n❌ Error:', error instanceof Error ? error.message : error) + process.exit(1) + } +} + +main() diff --git a/packages/typescript/ai-fal/scripts/fetch-fal-models.ts b/packages/typescript/ai-fal/scripts/fetch-fal-models.ts new file mode 100644 index 00000000..60781188 --- /dev/null +++ b/packages/typescript/ai-fal/scripts/fetch-fal-models.ts @@ -0,0 +1,385 @@ +/** + * Script to fetch all models from Fal API and save as JSON files per category + * + * This script downloads models from the Fal API and saves them to separate + * JSON files, one per category, for use by other scripts. Supports filtering + * by category to reduce file size and improve targeted workflows. + * + * Usage: + * # Fetch all models (default) - saves all categories + * pnpm exec tsx scripts/fetch-fal-models.ts + * + * # Fetch specific categories (server-side filtering) + * pnpm exec tsx scripts/fetch-fal-models.ts --categories=image-to-image,text-to-video + * + * # Fetch single category + * pnpm exec tsx scripts/fetch-fal-models.ts --category=image-to-image + * + * Environment Variables: + * FAL_KEY - Required API key for Fal API authentication + */ + +import { mkdirSync, writeFileSync } from 'node:fs' +import { dirname, join } from 'node:path' +import { fileURLToPath } from 'node:url' + +// ============================================================ +// Type Definitions +// ============================================================ + +interface FalApiModel { + endpoint_id: string + metadata: { + display_name: string + category: string + description: string + status: 'active' | 'inactive' | 'deprecated' + tags: Array + updated_at: string + [key: string]: any + } +} + +interface FalApiResponse { + models: Array + has_more: boolean + next_cursor: string | null +} + +interface FilterOptions { + categories: Array | null // null = no filtering +} + +interface ParsedArgs { + categories: Array | null +} + +// ============================================================ +// Core Functions +// ============================================================ + +/** + * Sleep for a specified number of milliseconds + */ +function sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)) +} + +/** + * Parse command-line arguments + */ +function parseCliArguments(): ParsedArgs { + const args = process.argv.slice(2) + let categories: Array | null = null + + for (const arg of args) { + if (arg.startsWith('--categories=')) { + const categoriesArg = arg.split('=')[1] + if (!categoriesArg) { + throw new Error('Categories not specified') + } + categories = categoriesArg + .split(',') + .map((c) => c.trim()) + .filter((c) => c.length > 0) + } else if (arg.startsWith('--category=')) { + const categoryArg = arg.split('=')[1] + if (!categoryArg) { + throw new Error('Category not specified') + } + categories = [categoryArg.trim()] + } else { + throw new Error(`Unknown argument: ${arg}`) + } + } + + return { categories } +} + +/** + * Extract unique categories from models with counts + */ +function extractCategories(models: Array): Map { + const categoryMap = new Map() + + for (const model of models) { + const category = model.metadata.category || 'uncategorized' + categoryMap.set(category, (categoryMap.get(category) || 0) + 1) + } + + return categoryMap +} + +/** + * Sanitize category name for filesystem-safe filename + */ +function sanitizeCategoryName(category: string): string { + // Replace any characters that aren't alphanumeric, dash, or underscore + return category.replace(/[^a-zA-Z0-9_-]/g, '-') +} + +/** + * Group models by category + */ +function groupModelsByCategory( + models: Array, +): Map> { + const categoryMap = new Map>() + + for (const model of models) { + const category = model.metadata.category || 'uncategorized' + if (!categoryMap.has(category)) { + categoryMap.set(category, []) + } + categoryMap.get(category)!.push(model) + } + + return categoryMap +} + +/** + * Fetch a single page with retry logic + */ +async function fetchPageWithRetry( + url: string, + apiKey: string, + retries: number = 3, +): Promise { + for (let attempt = 1; attempt <= retries; attempt++) { + try { + const response = await fetch(url, { + headers: { + Authorization: `Key ${apiKey}`, + }, + }) + + if (response.status === 429) { + // Rate limited - wait and retry with exponential backoff + const waitTime = Math.min(2000 * Math.pow(2, attempt), 10000) + console.log(` Rate limited. Waiting ${waitTime}ms before retry...`) + await sleep(waitTime) + continue + } + + if (!response.ok) { + throw new Error( + `Failed to fetch Fal models: ${response.status} ${response.statusText}`, + ) + } + + return (await response.json()) as FalApiResponse + } catch (error) { + if (attempt === retries) { + throw error + } + console.log(` Attempt ${attempt} failed, retrying...`) + await sleep(1000 * attempt) + } + } + + throw new Error('Max retries exceeded') +} + +/** + * Fetch models from the Fal API with pagination and optional category filter + */ +async function fetchFalModels(category?: string): Promise> { + // Validate API key exists + const apiKey = process.env.FAL_KEY + if (!apiKey) { + throw new Error('FAL_KEY environment variable is required') + } + + const allModels: Array = [] + let cursor: string | null = null + let pageNumber = 1 + + const categoryLabel = category ? ` (category: ${category})` : '' + console.log(`Fetching models from Fal API${categoryLabel}...`) + + do { + // Build URL with category filter if specified + const params = new URLSearchParams({ + status: 'active', + expand: 'openapi-3.0', + }) + + if (category) { + params.set('category', category) + } + + if (cursor) { + params.set('cursor', cursor) + } + + const url = `https://api.fal.ai/v1/models?${params.toString()}` + + console.log(` Fetching page ${pageNumber}...`) + + const data = await fetchPageWithRetry(url, apiKey) + allModels.push(...data.models) + + console.log( + ` Retrieved ${data.models.length} models (total: ${allModels.length})`, + ) + + cursor = data.has_more ? data.next_cursor : null + pageNumber++ + } while (cursor) + + return allModels +} + +/** + * Fetch models for multiple categories in parallel + */ +async function fetchModelsByCategories( + categories: Array, +): Promise> { + console.log(`\nFetching ${categories.length} categories in parallel...\n`) + + // Fetch each category in parallel + const results = await Promise.all( + categories.map((category) => fetchFalModels(category)), + ) + + // Combine all results + const allModels = results.flat() + + // Deduplicate models by endpoint_id (in case a model appears in multiple categories) + const uniqueModels = Array.from( + new Map(allModels.map((model) => [model.endpoint_id, model])).values(), + ) + + console.log( + `\nCombined ${uniqueModels.length} unique models from ${categories.length} categories`, + ) + + return uniqueModels +} + +/** + * Generate JSON file content with metadata for a category + */ +function generateCategoryJsonFile( + category: string, + models: Array, + filterOptions: FilterOptions | null, +): string { + const data: any = { + generated_at: new Date().toISOString(), + total_models: models.length, + category: category, + } + + // Add filter metadata if filtering was applied + if (filterOptions?.categories && filterOptions.categories.length > 0) { + data.filter = { + categories: filterOptions.categories, + filtered_at: new Date().toISOString(), + } + } + + data.models = models + + return JSON.stringify(data, null, 2) +} + +/** + * Save models grouped by category to separate JSON files + */ +function saveModelsByCategory( + models: Array, + filterOptions: FilterOptions | null, +): void { + console.log('\nGrouping models by category...') + + // Group models by category + const categoryMap = groupModelsByCategory(models) + + console.log(`Found ${categoryMap.size} categories:`) + for (const [category, categoryModels] of categoryMap.entries()) { + console.log(` - ${category}: ${categoryModels.length} models`) + } + + console.log('\nSaving category files...') + + // Compute script directory (works for both ESM and CommonJS) + const scriptDir = dirname(fileURLToPath(import.meta.url)) + const scriptsDir = join(scriptDir, 'scripts') + + // Ensure the target directory exists + mkdirSync(scriptsDir, { recursive: true }) + + let savedCount = 0 + + // Save each category to its own file + for (const [category, categoryModels] of categoryMap.entries()) { + const sanitizedCategory = sanitizeCategoryName(category) + const filename = `fal.models.${sanitizedCategory}.json` + const outputPath = join(scriptsDir, filename) + + console.log( + ` Saving category "${category}" (${categoryModels.length} models) to ${filename}...`, + ) + + const content = generateCategoryJsonFile( + category, + categoryModels, + filterOptions, + ) + writeFileSync(outputPath, content, 'utf-8') + + savedCount++ + } + + console.log(`\n✅ Successfully saved ${savedCount} category files`) + console.log(`✅ Total models saved: ${models.length}`) +} + +// ============================================================ +// Main Execution +// ============================================================ + +async function main() { + try { + // Parse CLI arguments + const args = parseCliArguments() + + // Fetch models based on arguments + let models: Array + + if (args.categories && args.categories.length > 0) { + // Use server-side filtering for categories + models = await fetchModelsByCategories(args.categories) + + // Show category breakdown + const categoryCounts = extractCategories(models) + console.log('\nFetched categories:') + for (const cat of args.categories) { + const count = categoryCounts.get(cat) + if (count) { + console.log(` ✓ ${cat} (${count} models)`) + } else { + console.log(` ⚠️ ${cat} (0 models)`) + } + } + } else { + // No filter - fetch all models + models = await fetchFalModels() + } + + // Prepare filter options for metadata + const filterOptions: FilterOptions = { + categories: args.categories, + } + + // Save results grouped by category + saveModelsByCategory(models, filterOptions) + } catch (error) { + console.error('\n❌ Error:', error instanceof Error ? error.message : error) + process.exit(1) + } +} + +main() diff --git a/packages/typescript/ai-fal/scripts/generate-fal-endpoint-maps.ts b/packages/typescript/ai-fal/scripts/generate-fal-endpoint-maps.ts new file mode 100644 index 00000000..d2271f58 --- /dev/null +++ b/packages/typescript/ai-fal/scripts/generate-fal-endpoint-maps.ts @@ -0,0 +1,505 @@ +#!/usr/bin/env tsx +/** + * Generate category-specific EndpointTypeMap files from heyapi-generated types + * + * This script: + * 1. Scans each category directory for types.gen.ts + * 2. Extracts endpoint information from Post*Data and Get*Responses types + * 3. For each category, generates {category}/endpoint-map.ts with: + * - TypeScript type imports from types.gen.ts + * - Zod schema imports from zod.gen.ts + * - CategoryEndpointMap type + * - CategorySchemaMap constant (Zod schemas) + * - CategoryModel utility type + * - CategoryInput utility type + * - CategoryOutput utility type + * 4. Generates unified index.ts that re-exports all categories + */ + +import { existsSync, readFileSync, readdirSync, writeFileSync } from 'node:fs' +import { dirname, join } from 'node:path' +import { fileURLToPath } from 'node:url' +import * as prettier from 'prettier' + +const __dirname = dirname(fileURLToPath(import.meta.url)) + +interface EndpointInfo { + endpointId: string + inputType: string + outputType: string +} + +/** + * Extract endpoints from types.gen.ts file + */ +function extractEndpointsFromTypes(categoryPath: string): Array { + const typesPath = join(categoryPath, 'types.gen.ts') + if (!existsSync(typesPath)) { + return [] + } + + const content = readFileSync(typesPath, 'utf-8') + const endpoints: Array = [] + + // Match: export type Post*Data = { + // body: SchemaXxxInput + // ... + // url: '/endpoint-path' + // } + const postTypeRegex = + /export type (Post\w+)Data = \{[\s\S]*?body: (\w+)[\s\S]*?url: '([^']+)'/g + + let match + while ((match = postTypeRegex.exec(content)) !== null) { + const inputType = match[2]! + const urlPath = match[3]! + + // Remove leading slash from URL to get endpoint ID + const endpointId = urlPath.replace(/^\//, '') + + // Derive output type from input type by replacing "Input" with "Output" + const outputType = inputType.replace(/Input$/, 'Output') + + // Verify the output type exists in the content + if (!content.includes(`export type ${outputType}`)) { + console.warn( + ` Warning: Could not find output type ${outputType} for ${endpointId}`, + ) + continue + } + + endpoints.push({ + endpointId, + inputType, + outputType, + }) + } + + return endpoints +} + +/** + * Get Zod schema name from TypeScript type name + * SchemaWanEffectsInput -> zSchemaWanEffectsInput + */ +function getZodSchemaName(typeName: string): string { + return 'z' + typeName +} + +/** + * Convert category name to PascalCase + * Prefix with "Gen" if starts with a digit + */ +function toPascalCase(str: string): string { + const pascalCase = str + .split(/[-_]/) + .map((word) => word.charAt(0).toUpperCase() + word.slice(1)) + .join('') + + // TypeScript identifiers cannot start with a number + // Prefix with "Gen" if it starts with a digit + if (/^\d/.test(pascalCase)) { + return 'Gen' + pascalCase + } + + return pascalCase +} + +/** + * Mapping of output types to their source categories + */ +const outputTypeMapping: Record> = { + image: ['text-to-image', 'image-to-image'], + video: [ + 'text-to-video', + 'image-to-video', + 'video-to-video', + 'audio-to-video', + ], + audio: [ + 'text-to-audio', + 'audio-to-audio', + 'speech-to-speech', + 'text-to-speech', + ], + text: [ + 'text-to-text', + 'audio-to-text', + 'video-to-text', + 'vision', + 'speech-to-text', + ], + '3d': ['text-to-3d', 'image-to-3d', '3d-to-3d'], + json: ['text-to-json', 'image-to-json', 'json'], +} + +/** + * Generate output-type-based unions (FalImageModel, FalVideoModel, etc.) + */ +function generateOutputTypeUnions( + processedCategories: Array, +): Array { + const lines: Array = [] + + for (const [outputType, categories] of Object.entries(outputTypeMapping)) { + // Filter to only categories that were actually processed + const availableCategories = categories.filter((cat) => + processedCategories.includes(cat), + ) + + if (availableCategories.length === 0) { + continue + } + + // Convert output type to PascalCase (e.g., 'image' -> 'Image', '3d' -> '3d') + const outputTypePascal = + outputType.charAt(0).toUpperCase() + outputType.slice(1) + + // Generate Model union type + lines.push(`/** Union of all ${outputType} generation models */`) + lines.push(`export type Fal${outputTypePascal}Model =`) + for (let i = 0; i < availableCategories.length; i++) { + const category = availableCategories[i]! + const isLast = i === availableCategories.length - 1 + lines.push(` | ${toPascalCase(category)}Model${isLast ? '' : ''}`) + } + lines.push(``) + + // Generate Input type + lines.push(`/**`) + lines.push(` * Get the input type for a specific ${outputType} model.`) + lines.push( + ` * Checks official fal.ai EndpointTypeMap first, then falls back to category-specific types.`, + ) + lines.push(` */`) + lines.push( + `export type Fal${outputTypePascal}Input =`, + ) + lines.push( + ` T extends keyof EndpointTypeMap ? EndpointTypeMap[T]['input'] :`, + ) + for (let i = 0; i < availableCategories.length; i++) { + const category = availableCategories[i]! + const typeName = toPascalCase(category) + const isLast = i === availableCategories.length - 1 + lines.push(` T extends ${typeName}Model ? ${typeName}ModelInput :`) + if (isLast) { + lines.push(` never`) + } + } + lines.push(``) + + // Generate Output type + lines.push(`/**`) + lines.push(` * Get the output type for a specific ${outputType} model.`) + lines.push( + ` * Checks official fal.ai EndpointTypeMap first, then falls back to category-specific types.`, + ) + lines.push(` */`) + lines.push( + `export type Fal${outputTypePascal}Output =`, + ) + lines.push( + ` T extends keyof EndpointTypeMap ? EndpointTypeMap[T]['output'] :`, + ) + for (let i = 0; i < availableCategories.length; i++) { + const category = availableCategories[i]! + const typeName = toPascalCase(category) + const isLast = i === availableCategories.length - 1 + lines.push(` T extends ${typeName}Model ? ${typeName}ModelOutput :`) + if (isLast) { + lines.push(` never`) + } + } + lines.push(``) + + // Generate combined SchemaMap + lines.push(`/** Combined schema map for all ${outputType} models */`) + lines.push( + `export const Fal${outputTypePascal}SchemaMap: Record = {`, + ) + for (const category of availableCategories) { + const typeName = toPascalCase(category) + lines.push(` ...${typeName}SchemaMap,`) + } + lines.push(`} as const`) + lines.push(``) + } + + return lines +} + +/** + * Format TypeScript code using prettier + */ +async function formatTypeScript(content: string): Promise { + return prettier.format(content, { + parser: 'typescript', + semi: false, + singleQuote: true, + trailingComma: 'all', + }) +} + +/** + * Generate endpoint-map.ts for a category + */ +async function generateEndpointMap( + category: string, + categoryPath: string, + endpoints: Array, +): Promise { + const typeName = toPascalCase(category) + + // Collect unique type and schema names + const inputTypes = new Set() + const outputTypes = new Set() + const inputSchemas = new Set() + const outputSchemas = new Set() + + for (const { inputType, outputType } of endpoints) { + inputTypes.add(inputType) + outputTypes.add(outputType) + inputSchemas.add(getZodSchemaName(inputType)) + outputSchemas.add(getZodSchemaName(outputType)) + } + + // Generate imports + const typeImports = Array.from( + new Set([...inputTypes, ...outputTypes]), + ).sort() + const schemaImports = Array.from( + new Set([...inputSchemas, ...outputSchemas]), + ).sort() + + const imports = [ + `// AUTO-GENERATED - Do not edit manually`, + `// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts`, + ``, + `import {`, + ...schemaImports.map((t) => ` ${t},`), + `} from './zod.gen'`, + ``, + `import type {`, + ...typeImports.map((t) => ` ${t},`), + `} from './types.gen'`, + ``, + `import type { z } from 'zod'`, + ``, + ] + + // Generate TypeScript EndpointMap type + const typeMapLines = [`export type ${typeName}EndpointMap = {`] + + for (const { endpointId, inputType, outputType } of endpoints) { + typeMapLines.push(` '${endpointId}': {`) + typeMapLines.push(` input: ${inputType}`) + typeMapLines.push(` output: ${outputType}`) + typeMapLines.push(` }`) + } + + typeMapLines.push(`}`) + + // Generate Zod SchemaMap constant + const schemaMapLines = [ + ``, + `export const ${typeName}SchemaMap: Record<`, + ` ${typeName}Model,`, + ` { input: z.ZodSchema; output: z.ZodSchema }`, + `> = {`, + ] + + for (const { endpointId, inputType, outputType } of endpoints) { + const inputSchema = getZodSchemaName(inputType) + const outputSchema = getZodSchemaName(outputType) + schemaMapLines.push(` ['${endpointId}']: {`) + schemaMapLines.push(` input: ${inputSchema},`) + schemaMapLines.push(` output: ${outputSchema},`) + schemaMapLines.push(` },`) + } + + schemaMapLines.push(`} as const`) + + // Generate Model type (must come before SchemaMap which references it) + const modelType = [ + ``, + `/** Union type of all ${category} model endpoint IDs */`, + `export type ${typeName}Model = keyof ${typeName}EndpointMap`, + ] + + // Generate utility types + const utilityTypes = [ + ``, + `/** Get the input type for a specific ${category} model */`, + `export type ${typeName}ModelInput = ${typeName}EndpointMap[T]['input']`, + ``, + `/** Get the output type for a specific ${category} model */`, + `export type ${typeName}ModelOutput = ${typeName}EndpointMap[T]['output']`, + ``, + ] + + // Combine all parts + const content = [ + ...imports, + ...typeMapLines, + ...modelType, + ...schemaMapLines, + ...utilityTypes, + ].join('\n') + + // Format and write to file + const outputPath = join(categoryPath, 'endpoint-map.ts') + const formattedContent = await formatTypeScript(content) + writeFileSync(outputPath, formattedContent) + console.log( + ` ✓ Generated ${category}/endpoint-map.ts (${endpoints.length} endpoints)`, + ) +} + +async function main() { + const generatedDir = join(__dirname, '..', 'src', 'generated') + + if (!existsSync(generatedDir)) { + console.error('Error: src/generated/ directory not found.') + process.exit(1) + } + + console.log('Scanning generated/ directory for categories...') + + // Get all category directories + const categories = readdirSync(generatedDir, { withFileTypes: true }) + .filter((dirent) => dirent.isDirectory()) + .map((dirent) => dirent.name) + .sort() + + console.log(`Found ${categories.length} categories:`) + for (const category of categories) { + console.log(` - ${category}`) + } + + console.log('\nGenerating endpoint maps...') + + const processedCategories: Array = [] + + for (const category of categories) { + const categoryPath = join(generatedDir, category) + + // Extract endpoints from types.gen.ts + const endpoints = extractEndpointsFromTypes(categoryPath) + + if (endpoints.length === 0) { + console.warn(` Warning: No endpoints found for ${category}, skipping`) + continue + } + + // Generate endpoint-map.ts + await generateEndpointMap(category, categoryPath, endpoints) + processedCategories.push(category) + } + + // Generate unified index.ts + console.log('\nGenerating unified index.ts...') + const indexLines = [ + `// AUTO-GENERATED - Do not edit manually`, + `// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts`, + ``, + ] + + // Collect categories that are used in output-type unions + const usedCategories = new Set() + for (const categories of Object.values(outputTypeMapping)) { + for (const cat of categories) { + if (processedCategories.includes(cat)) { + usedCategories.add(cat) + } + } + } + const usedCategoriesList = Array.from(usedCategories).sort() + + const pascalCaseCategories = processedCategories + .map((category) => toPascalCase(category)) + .sort() + + // Generate imports first (before exports) to satisfy import/first rule + indexLines.push( + `// Import value exports (SchemaMap constants) from category endpoint maps`, + ) + for (const category of usedCategoriesList) { + const pascalCaseCategory = toPascalCase(category) + indexLines.push( + `import { ${pascalCaseCategory}SchemaMap } from './${category}/endpoint-map'`, + ) + } + indexLines.push(``) + + // Generate type imports grouped by category + indexLines.push(`// Import type exports from category endpoint maps`) + for (const category of processedCategories) { + const pascalCaseCategory = toPascalCase(category) + const isUsedCategory = usedCategoriesList.includes(category) + const typeImports: Array = [`${pascalCaseCategory}Model`] + if (isUsedCategory) { + typeImports.push(`${pascalCaseCategory}ModelInput`) + typeImports.push(`${pascalCaseCategory}ModelOutput`) + } + indexLines.push( + `import type { ${typeImports.join(', ')} } from './${category}/endpoint-map'`, + ) + } + indexLines.push(``) + + // Import external zod type after local imports + indexLines.push(`import type { z } from 'zod'`) + indexLines.push(``) + + // Import fal.ai EndpointTypeMap for type checking + indexLines.push(`// Import official fal.ai endpoint types`) + indexLines.push( + `import type { EndpointTypeMap } from '@fal-ai/client/endpoints'`, + ) + indexLines.push(``) + + // Now add the re-exports + indexLines.push(`// Re-export all category endpoint maps`) + for (const category of processedCategories) { + indexLines.push(`export * from './${category}/endpoint-map'`) + } + indexLines.push(``) + indexLines.push(`/**`) + indexLines.push( + ` * Union type of all Fal.ai model endpoint IDs across all categories.`, + ) + indexLines.push(` * `) + indexLines.push( + ` * Note: Using this union type loses some type precision. For better type safety,`, + ) + indexLines.push( + ` * import category-specific types like ImageToImageModel, TextToImageModel, etc.`, + ) + indexLines.push(` */`) + indexLines.push(`export type FalModel =`) + for (const pascalCaseCategory of pascalCaseCategories) { + indexLines.push(` | ${pascalCaseCategory}Model`) + } + indexLines.push(``) + + // Generate output-type-based unions + const outputTypeLines = generateOutputTypeUnions(processedCategories) + indexLines.push(...outputTypeLines) + + const indexPath = join(generatedDir, 'index.ts') + const formattedIndex = await formatTypeScript(indexLines.join('\n')) + writeFileSync(indexPath, formattedIndex) + console.log(` ✓ Generated index.ts`) + + console.log(`\n✓ Done! Generated endpoint maps in src/generated/`) + console.log(`\nCategories generated:`) + for (const category of processedCategories) { + console.log(` - ${category} (${toPascalCase(category)}Model)`) + } +} + +main().catch((error) => { + console.error('Error:', error) + process.exit(1) +}) diff --git a/packages/typescript/ai-fal/src/adapters/image.ts b/packages/typescript/ai-fal/src/adapters/image.ts new file mode 100644 index 00000000..f37356fe --- /dev/null +++ b/packages/typescript/ai-fal/src/adapters/image.ts @@ -0,0 +1,249 @@ +import { fal } from '@fal-ai/client' +import { BaseImageAdapter } from '@tanstack/ai/adapters' + +import { FalImageSchemaMap } from '../generated' +import { configureFalClient, generateId as utilGenerateId } from '../utils' + +import type { OutputType, Result } from '@fal-ai/client' +import type { + GeneratedImage, + ImageGenerationOptions, + ImageGenerationResult, +} from '@tanstack/ai' +import type { FalImageProviderOptions } from '../model-meta' +import type { FalImageInput, FalImageModel, FalImageOutput } from '../generated' + +import type { FalClientConfig } from '../utils' +import type { z } from 'zod' + +/** Map common size strings to fal.ai image_size presets */ +const SIZE_TO_PRESET: Record = { + '1024x1024': 'square_hd', + '512x512': 'square', + '1024x768': 'landscape_4_3', + '768x1024': 'portrait_4_3', + '1280x720': 'landscape_16_9', + '720x1280': 'portrait_16_9', + '1920x1080': 'landscape_16_9', + '1080x1920': 'portrait_16_9', + '2560x1440': 'landscape_16_9', + '1440x2560': 'portrait_16_9', + '3840x2160': 'landscape_16_9', + '2160x3840': 'portrait_16_9', + '4096x2160': 'landscape_16_9', + '2160x4096': 'portrait_16_9', + '4320x2160': 'landscape_16_9', + '2160x4320': 'portrait_16_9', +} + +/** + * fal.ai image generation adapter with full type inference. + * + * Uses fal.ai's comprehensive type system to provide autocomplete + * + * and type safety for all 600+ supported models. + * + * @example + * ```typescript + * const adapter = falImage('fal-ai/flux/dev') + * const result = await adapter.generateImages({ + * model: 'fal-ai/flux/dev', + * prompt: 'a cat', + * modelOptions: { + * num_inference_steps: 28, // Type-safe! Autocomplete works + * guidance_scale: 3.5, + * }, + * }) + * ``` + */ +export class FalImageAdapter< + TModel extends FalImageModel, +> extends BaseImageAdapter< + TModel, + FalImageProviderOptions, + Record>, + Record +> { + readonly kind = 'image' as const + readonly name = 'fal' as const + readonly model: TModel + readonly inputSchema: z.ZodSchema> + readonly outputSchema: z.ZodSchema> + + constructor(model: TModel, config?: FalClientConfig) { + super({}, model) + this.model = model + // The only reason we need to cast here, is because the number of image models is so large, + // that typescript has a hard time inferring the type of the input and output schemas. + // I had to type it as generic zod schemas. + this.inputSchema = FalImageSchemaMap[model].input as z.ZodSchema< + FalImageInput + > + this.outputSchema = FalImageSchemaMap[model].output as z.ZodSchema< + FalImageOutput + > + + configureFalClient(config) + } + + async generateImages( + options: ImageGenerationOptions>, + ): Promise { + const { prompt, numberOfImages, size, modelOptions } = options + const { width, height } = this.parseSize(size ?? '0x0') + + // Build the input object - spread modelOptions first, then override with standard options + const input = this.inputSchema.parse({ + ...modelOptions, + prompt, + image_size: this.mapSizeToImageSize(size ?? '0x0', width, height), + aspect_ratio: this.calculateAspectRatio(width, height), + resolution: this.determineResolution(width, height), + num_images: numberOfImages, + }) + + const result = await fal.subscribe(this.model, { input: input }) + + return this.transformResponse(this.model, result) + } + + protected override generateId(): string { + return utilGenerateId(this.name) + } + + /** Parse size string (WIDTHxHEIGHT) into width and height */ + private parseSize(size: string): { + width: number | null + height: number | null + } { + const match = size.match(/^(\d+)x(\d+)$/) + return { + width: match?.[1] ? parseInt(match[1], 10) : null, + height: match?.[2] ? parseInt(match[2], 10) : null, + } + } + + /** Maps size to image_size field (preset or {width, height}) */ + private mapSizeToImageSize( + size: string, + width: number | null, + height: number | null, + ): string | { width: number; height: number } { + const preset = SIZE_TO_PRESET[size] + if (preset) return preset + if (width && height) return { width, height } + return size + } + + /** Calculate aspect ratio from width and height */ + private calculateAspectRatio( + width: number | null, + height: number | null, + ): string | null { + if (!width || !height) return null + + const gcd = (a: number, b: number): number => (b === 0 ? a : gcd(b, a % b)) + const divisor = gcd(width, height) + return `${width / divisor}:${height / divisor}` + } + + /** Determine resolution string from dimensions */ + private determineResolution( + width: number | null, + height: number | null, + ): string | null { + if (!width || !height) return null + + const maxDimension = Math.max(width, height) + + if (maxDimension >= 3840) return '4k' + if (maxDimension >= 2560) return '2k' + if (maxDimension >= 1920) return '1k' + if (maxDimension >= 1080) return '1080p' + if (maxDimension >= 720) return '720p' + if (maxDimension >= 580) return '580p' + if (maxDimension >= 540) return '540p' + if (maxDimension >= 480) return '480p' + if (maxDimension >= 360) return '360p' + + return null + } + + private transformResponse( + model: string, + response: Result>, + ): ImageGenerationResult { + const data = response.data + let images: Array = [] + + if ('images' in data && Array.isArray(data.images)) { + images = data.images.map((img: any) => this.parseImage(img)) + } else if ( + 'image' in data && + data.image && + typeof data.image === 'object' + ) { + images = [this.parseImage(data.image)] + } + + return { + id: response.requestId || this.generateId(), + model, + images, + } + } + + private parseImage(img: { url: string }): GeneratedImage { + const { url } = img + const base64Match = url.match(/^data:image\/[^;]+;base64,(.+)$/) + if (base64Match) { + return { b64Json: base64Match[1], url } + } + return { url } + } +} + +/** + * Create a fal.ai image adapter with an explicit API key. + * + * @example + * ```typescript + * const adapter = createFalImage('fal-ai/flux-pro/v1.1-ultra', process.env.FAL_KEY!) + * ``` + */ +export function createFalImage( + model: TModel, + config?: FalClientConfig, +): FalImageAdapter { + return new FalImageAdapter(model, config) +} + +/** + * Create a fal.ai image adapter using config.apiKey or the FAL_KEY environment variable. + * + * The model parameter accepts any fal.ai model ID with full type inference. + * As you type, you'll get autocomplete for all 600+ supported models. + * + * @example + * ```typescript + * // Full autocomplete as you type the model name + * const adapter = falImage('fal-ai/flux/dev') + * + * // modelOptions are type-safe based on the model + * const result = await adapter.generateImages({ + * model: 'fal-ai/flux/dev', + * prompt: 'a cat', + * modelOptions: { + * num_inference_steps: 28, + * guidance_scale: 3.5, + * seed: 12345, + * }, + * }) + * ``` + */ +export function falImage( + model: TModel, + config?: FalClientConfig, +): FalImageAdapter { + return createFalImage(model, config) +} diff --git a/packages/typescript/ai-fal/src/adapters/video.ts b/packages/typescript/ai-fal/src/adapters/video.ts new file mode 100644 index 00000000..2b6f1648 --- /dev/null +++ b/packages/typescript/ai-fal/src/adapters/video.ts @@ -0,0 +1,183 @@ +import { fal } from '@fal-ai/client' +import { BaseVideoAdapter } from '@tanstack/ai/adapters' +import { configureFalClient, generateId as utilGenerateId } from '../utils' +import { FalVideoSchemaMap } from '../generated' +import type { FalVideoInput, FalVideoModel, FalVideoOutput } from '../generated' +import type { + VideoGenerationOptions, + VideoJobResult, + VideoStatusResult, + VideoUrlResult, +} from '@tanstack/ai' +import type { FalVideoProviderOptions } from '../model-meta' +import type { FalClientConfig } from '../utils' +import type { z } from 'zod' + +type FalQueueStatus = 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + +interface FalStatusResponse { + status: FalQueueStatus + queue_position?: number + logs?: Array<{ message: string }> +} + +interface FalVideoResultData { + video?: { url: string } + video_url?: string +} + +/** + * Maps fal.ai queue status to TanStack AI video status. + */ +function mapFalStatusToVideoStatus( + falStatus: FalQueueStatus, +): VideoStatusResult['status'] { + switch (falStatus) { + case 'IN_QUEUE': + return 'pending' + case 'IN_PROGRESS': + return 'processing' + case 'COMPLETED': + return 'completed' + default: + return 'processing' + } +} + +/** + * fal.ai video generation adapter. + * Supports MiniMax, Luma, Kling, Hunyuan, and other fal.ai video models. + * + * Uses fal.ai's comprehensive type system to provide autocomplete + * and type safety for all supported video models. + * + * @experimental Video generation is an experimental feature and may change. + */ +export class FalVideoAdapter< + TModel extends FalVideoModel, +> extends BaseVideoAdapter> { + readonly kind = 'video' as const + readonly name = 'fal' as const + readonly model: TModel + readonly inputSchema: z.ZodSchema> + readonly outputSchema: z.ZodSchema> + + constructor(model: TModel, config?: FalClientConfig) { + super({}, model) + this.model = model + // The only reason we need to cast here, is because the number of video models is so large, + // that typescript has a hard time inferring the type of the input and output schemas. + // I had to type it as generic zod schemas. + this.inputSchema = FalVideoSchemaMap[model].input as z.ZodSchema< + FalVideoInput + > + this.outputSchema = FalVideoSchemaMap[model].output as z.ZodSchema< + FalVideoOutput + > + configureFalClient(config) + } + + async createVideoJob( + options: VideoGenerationOptions>, + ): Promise { + const { model, prompt, size, duration, modelOptions } = options + + // Build the input object for fal.ai + const input = this.inputSchema.parse({ + ...modelOptions, + prompt, + ...(duration ? { duration } : {}), + ...(size ? { aspect_ratio: this.sizeToAspectRatio(size) } : {}), + }) + + // Submit to queue and get request ID + const { request_id } = await fal.queue.submit(model, { + input, + }) + + return { + jobId: request_id, + model, + } + } + + async getVideoStatus(jobId: string): Promise { + const statusResponse = (await fal.queue.status(this.model, { + requestId: jobId, + logs: true, + })) as FalStatusResponse + + return { + jobId, + status: mapFalStatusToVideoStatus(statusResponse.status), + progress: + statusResponse.queue_position != null + ? Math.max(0, 100 - statusResponse.queue_position * 10) + : undefined, + } + } + + async getVideoUrl(jobId: string): Promise { + const result = await fal.queue.result(this.model, { + requestId: jobId, + }) + + const data = result.data as FalVideoResultData + + // Different models return video URL in different formats + const url = data.video?.url || data.video_url + if (!url) { + throw new Error('Video URL not found in response') + } + + return { + jobId, + url, + } + } + + protected override generateId(): string { + return utilGenerateId(this.name) + } + + /** + * Convert WIDTHxHEIGHT size format to aspect ratio. + */ + private sizeToAspectRatio(size: string): string | undefined { + const match = size.match(/^(\d+)x(\d+)$/) + if (!match || !match[1] || !match[2]) return undefined + + const width = parseInt(match[1], 10) + const height = parseInt(match[2], 10) + + // Calculate GCD for simplest ratio + const gcd = (a: number, b: number): number => (b === 0 ? a : gcd(b, a % b)) + const divisor = gcd(width, height) + + return `${width / divisor}:${height / divisor}` + } +} + +/** + * Create a fal.ai video adapter with an explicit API key. + * + * @experimental Video generation is an experimental feature and may change. + */ +export function createFalVideo( + model: TModel, + config?: FalClientConfig, +): FalVideoAdapter { + return new FalVideoAdapter(model, config) +} + +/** + * Create a fal.ai video adapter using config.apiKey or the FAL_KEY environment variable. + * + * @experimental Video generation is an experimental feature and may change. + */ +export function falVideo( + model: TModel, + config?: FalClientConfig, +): FalVideoAdapter { + return createFalVideo(model, config) +} diff --git a/packages/typescript/ai-fal/src/generated/3d-to-3d/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/3d-to-3d/endpoint-map.ts new file mode 100644 index 00000000..1ad18e67 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/3d-to-3d/endpoint-map.ts @@ -0,0 +1,90 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zSchemaHunyuanPartInput, + zSchemaHunyuanPartOutput, + zSchemaMeshyV5RemeshInput, + zSchemaMeshyV5RemeshOutput, + zSchemaMeshyV5RetextureInput, + zSchemaMeshyV5RetextureOutput, + zSchemaSam33dAlignInput, + zSchemaSam33dAlignOutput, + zSchemaUltrashapeInput, + zSchemaUltrashapeOutput, +} from './zod.gen' + +import type { + SchemaHunyuanPartInput, + SchemaHunyuanPartOutput, + SchemaMeshyV5RemeshInput, + SchemaMeshyV5RemeshOutput, + SchemaMeshyV5RetextureInput, + SchemaMeshyV5RetextureOutput, + SchemaSam33dAlignInput, + SchemaSam33dAlignOutput, + SchemaUltrashapeInput, + SchemaUltrashapeOutput, +} from './types.gen' + +import type { z } from 'zod' + +export type Gen3dTo3dEndpointMap = { + 'fal-ai/ultrashape': { + input: SchemaUltrashapeInput + output: SchemaUltrashapeOutput + } + 'fal-ai/sam-3/3d-align': { + input: SchemaSam33dAlignInput + output: SchemaSam33dAlignOutput + } + 'fal-ai/meshy/v5/retexture': { + input: SchemaMeshyV5RetextureInput + output: SchemaMeshyV5RetextureOutput + } + 'fal-ai/meshy/v5/remesh': { + input: SchemaMeshyV5RemeshInput + output: SchemaMeshyV5RemeshOutput + } + 'fal-ai/hunyuan-part': { + input: SchemaHunyuanPartInput + output: SchemaHunyuanPartOutput + } +} + +/** Union type of all 3d-to-3d model endpoint IDs */ +export type Gen3dTo3dModel = keyof Gen3dTo3dEndpointMap + +export const Gen3dTo3dSchemaMap: Record< + Gen3dTo3dModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['fal-ai/ultrashape']: { + input: zSchemaUltrashapeInput, + output: zSchemaUltrashapeOutput, + }, + ['fal-ai/sam-3/3d-align']: { + input: zSchemaSam33dAlignInput, + output: zSchemaSam33dAlignOutput, + }, + ['fal-ai/meshy/v5/retexture']: { + input: zSchemaMeshyV5RetextureInput, + output: zSchemaMeshyV5RetextureOutput, + }, + ['fal-ai/meshy/v5/remesh']: { + input: zSchemaMeshyV5RemeshInput, + output: zSchemaMeshyV5RemeshOutput, + }, + ['fal-ai/hunyuan-part']: { + input: zSchemaHunyuanPartInput, + output: zSchemaHunyuanPartOutput, + }, +} as const + +/** Get the input type for a specific 3d-to-3d model */ +export type Gen3dTo3dModelInput = + Gen3dTo3dEndpointMap[T]['input'] + +/** Get the output type for a specific 3d-to-3d model */ +export type Gen3dTo3dModelOutput = + Gen3dTo3dEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/3d-to-3d/types.gen.ts b/packages/typescript/ai-fal/src/generated/3d-to-3d/types.gen.ts new file mode 100644 index 00000000..95c9a378 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/3d-to-3d/types.gen.ts @@ -0,0 +1,1044 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * HunyuanPartOutput + */ +export type SchemaHunyuanPartOutput = { + /** + * Iou Scores + * + * IoU scores for each of the three masks. + */ + iou_scores: Array + /** + * Best Mask Index + * + * Index of the best mask (1, 2, or 3) based on IoU score. + */ + best_mask_index: number + /** + * Mask 2 Mesh + * + * Mesh showing segmentation mask 2. + */ + mask_2_mesh: SchemaFile + /** + * Mask 1 Mesh + * + * Mesh showing segmentation mask 1. + */ + mask_1_mesh: SchemaFile + /** + * Segmented Mesh + * + * Segmented 3D mesh with mask applied. + */ + segmented_mesh: SchemaFile + /** + * Seed + * + * Seed value used for generation. + */ + seed: number + /** + * Mask 3 Mesh + * + * Mesh showing segmentation mask 3. + */ + mask_3_mesh: SchemaFile +} + +/** + * File + */ +export type SchemaFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * HunyuanPartInput + */ +export type SchemaHunyuanPartInput = { + /** + * Point Prompt X + * + * X coordinate of the point prompt for segmentation (normalized space -1 to 1). + */ + point_prompt_x?: number + /** + * Point Prompt Z + * + * Z coordinate of the point prompt for segmentation (normalized space -1 to 1). + */ + point_prompt_z?: number + /** + * Use Normal + * + * Whether to use normal information for segmentation. + */ + use_normal?: boolean + /** + * Noise Std + * + * Standard deviation of noise to add to sampled points. + */ + noise_std?: number + /** + * Point Num + * + * Number of points to sample from the mesh. + */ + point_num?: number + /** + * Model File Url + * + * URL of the 3D model file (.glb or .obj) to process for segmentation. + */ + model_file_url: string + /** + * Point Prompt Y + * + * Y coordinate of the point prompt for segmentation (normalized space -1 to 1). + */ + point_prompt_y?: number + /** + * Seed + * + * + * The same seed and input will produce the same segmentation results. + * + */ + seed?: number +} + +/** + * RemeshOutput + * + * Output for 3D Model Remeshing + */ +export type SchemaMeshyV5RemeshOutput = { + /** + * Model Urls + * + * URLs for different 3D model formats + */ + model_urls: SchemaModelUrls + /** + * Model Glb + * + * Remeshed 3D object in GLB format (if GLB was requested). + */ + model_glb?: SchemaFile +} + +/** + * ModelUrls + * + * 3D model files in various formats + */ +export type SchemaModelUrls = { + /** + * Usdz + * + * USDZ format 3D model + */ + usdz?: SchemaFile + /** + * Fbx + * + * FBX format 3D model + */ + fbx?: SchemaFile + /** + * Blend + * + * Blender format 3D model + */ + blend?: SchemaFile + /** + * Stl + * + * STL format 3D model + */ + stl?: SchemaFile + /** + * Glb + * + * GLB format 3D model + */ + glb?: SchemaFile + /** + * Obj + * + * OBJ format 3D model + */ + obj?: SchemaFile +} + +/** + * RemeshInput + * + * Input for 3D Model Remeshing + */ +export type SchemaMeshyV5RemeshInput = { + /** + * Resize Height + * + * Resize the model to a certain height measured in meters. Set to 0 for no resizing. + */ + resize_height?: number + /** + * Topology + * + * Specify the topology of the generated model. Quad for smooth surfaces, Triangle for detailed geometry. + */ + topology?: 'quad' | 'triangle' + /** + * Target Polycount + * + * Target number of polygons in the generated model. Actual count may vary based on geometry complexity. + */ + target_polycount?: number + /** + * Model Url + * + * URL or base64 data URI of a 3D model to remesh. Supports .glb, .gltf, .obj, .fbx, .stl formats. Can be a publicly accessible URL or data URI with MIME type application/octet-stream. + */ + model_url: string + /** + * Origin At + * + * Position of the origin. None means no effect. + */ + origin_at?: 'bottom' | 'center' + /** + * Target Formats + * + * List of target formats for the remeshed model. + */ + target_formats?: Array<'glb' | 'fbx' | 'obj' | 'usdz' | 'blend' | 'stl'> +} + +/** + * TextureFiles + * + * Texture files downloaded and uploaded to CDN + */ +export type SchemaTextureFiles = { + /** + * Base Color + * + * Base color texture + */ + base_color: SchemaFile + /** + * Normal + * + * Normal texture (PBR) + */ + normal?: SchemaFile + /** + * Roughness + * + * Roughness texture (PBR) + */ + roughness?: SchemaFile + /** + * Metallic + * + * Metallic texture (PBR) + */ + metallic?: SchemaFile +} + +/** + * RetextureOutput + * + * Output for 3D Model Retexturing + */ +export type SchemaMeshyV5RetextureOutput = { + /** + * Model Urls + * + * URLs for different 3D model formats + */ + model_urls: SchemaModelUrls + /** + * Text Style Prompt + * + * The text prompt used for texturing (if provided) + */ + text_style_prompt?: string + /** + * Texture Urls + * + * Array of texture file objects + */ + texture_urls?: Array + /** + * Thumbnail + * + * Preview thumbnail of the retextured model + */ + thumbnail?: SchemaFile + /** + * Image Style Url + * + * The image URL used for texturing (if provided) + */ + image_style_url?: string + /** + * Model Glb + * + * Retextured 3D object in GLB format. + */ + model_glb: SchemaFile +} + +/** + * RetextureInput + * + * Input for 3D Model Retexturing + */ +export type SchemaMeshyV5RetextureInput = { + /** + * Enable Pbr + * + * Generate PBR Maps (metallic, roughness, normal) in addition to base color. + */ + enable_pbr?: boolean + /** + * Text Style Prompt + * + * Describe your desired texture style using text. Maximum 600 characters. Required if image_style_url is not provided. + */ + text_style_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, input data will be checked for safety before processing. + */ + enable_safety_checker?: boolean + /** + * Enable Original Uv + * + * Use the original UV mapping of the model instead of generating new UVs. If the model has no original UV, output quality may be reduced. + */ + enable_original_uv?: boolean + /** + * Model Url + * + * URL or base64 data URI of a 3D model to texture. Supports .glb, .gltf, .obj, .fbx, .stl formats. Can be a publicly accessible URL or data URI with MIME type application/octet-stream. + */ + model_url: string + /** + * Image Style Url + * + * 2D image to guide the texturing process. Supports .jpg, .jpeg, and .png formats. Required if text_style_prompt is not provided. If both are provided, image_style_url takes precedence. + */ + image_style_url?: string +} + +/** + * SAM3DBodyAlignmentInfo + * + * Per-person alignment metadata. + */ +export type SchemaSam3dBodyAlignmentInfo = { + /** + * Translation + * + * Translation [tx, ty, tz] + */ + translation: Array + /** + * Cropped Vertices Count + * + * Number of cropped vertices + */ + cropped_vertices_count: number + /** + * Person Id + * + * Index of the person + */ + person_id: number + /** + * Target Points Count + * + * Number of target points for alignment + */ + target_points_count: number + /** + * Scale Factor + * + * Scale factor applied for alignment + */ + scale_factor: number + /** + * Focal Length + * + * Focal length used + */ + focal_length: number +} + +/** + * SAM3DAlignmentOutput + */ +export type SchemaSam33dAlignOutput = { + /** + * Scene Glb + * + * Combined scene with body + object meshes in GLB format (only when object_mesh_url provided) + */ + scene_glb?: SchemaFile + /** + * Visualization + * + * Visualization of aligned mesh overlaid on input image + */ + visualization: SchemaFile + /** + * Metadata + * + * Alignment info (scale, translation, etc.) + */ + metadata: SchemaSam3dBodyAlignmentInfo + /** + * Body Mesh Ply + * + * Aligned body mesh in PLY format + */ + body_mesh_ply: SchemaFile + /** + * Model Glb + * + * Aligned body mesh in GLB format (for 3D preview) + */ + model_glb: SchemaFile +} + +/** + * SAM3DAlignmentInput + */ +export type SchemaSam33dAlignInput = { + /** + * Image Url + * + * URL of the original image used for MoGe depth estimation + */ + image_url: string + /** + * Body Mesh Url + * + * URL of the SAM-3D Body mesh file (.ply or .glb) to align + */ + body_mesh_url: string + /** + * Object Mesh Url + * + * Optional URL of SAM-3D Object mesh (.glb) to create combined scene + */ + object_mesh_url?: string + /** + * Focal Length + * + * Focal length from SAM-3D Body metadata. If not provided, estimated from MoGe. + */ + focal_length?: number + /** + * Body Mask Url + * + * URL of the human mask image. If not provided, uses full image. + */ + body_mask_url?: string +} + +/** + * UltraShapeResponse + */ +export type SchemaUltrashapeOutput = { + /** + * Model Glb + * + * Generated 3D object. + */ + model_glb: SchemaFile +} + +/** + * UltraShapeRequest + */ +export type SchemaUltrashapeInput = { + /** + * Octree Resolution + * + * Marching cubes resolution. + */ + octree_resolution?: number + /** + * Remove Background + * + * Remove image background. + */ + remove_background?: boolean + /** + * Num Inference Steps + * + * Diffusion steps. + */ + num_inference_steps?: number + /** + * Model Url + * + * URL of the coarse mesh (.glb or .obj) to refine. + */ + model_url: string + /** + * Seed + * + * Random seed. + */ + seed?: number + /** + * Image Url + * + * URL of the reference image for mesh refinement. + */ + image_url: string +} + +export type SchemaQueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetFalAiUltrashapeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ultrashape/requests/{request_id}/status' +} + +export type GetFalAiUltrashapeRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiUltrashapeRequestsByRequestIdStatusResponse = + GetFalAiUltrashapeRequestsByRequestIdStatusResponses[keyof GetFalAiUltrashapeRequestsByRequestIdStatusResponses] + +export type PutFalAiUltrashapeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ultrashape/requests/{request_id}/cancel' +} + +export type PutFalAiUltrashapeRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiUltrashapeRequestsByRequestIdCancelResponse = + PutFalAiUltrashapeRequestsByRequestIdCancelResponses[keyof PutFalAiUltrashapeRequestsByRequestIdCancelResponses] + +export type PostFalAiUltrashapeData = { + body: SchemaUltrashapeInput + path?: never + query?: never + url: '/fal-ai/ultrashape' +} + +export type PostFalAiUltrashapeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiUltrashapeResponse = + PostFalAiUltrashapeResponses[keyof PostFalAiUltrashapeResponses] + +export type GetFalAiUltrashapeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ultrashape/requests/{request_id}' +} + +export type GetFalAiUltrashapeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaUltrashapeOutput +} + +export type GetFalAiUltrashapeRequestsByRequestIdResponse = + GetFalAiUltrashapeRequestsByRequestIdResponses[keyof GetFalAiUltrashapeRequestsByRequestIdResponses] + +export type GetFalAiSam33dAlignRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sam-3/3d-align/requests/{request_id}/status' +} + +export type GetFalAiSam33dAlignRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSam33dAlignRequestsByRequestIdStatusResponse = + GetFalAiSam33dAlignRequestsByRequestIdStatusResponses[keyof GetFalAiSam33dAlignRequestsByRequestIdStatusResponses] + +export type PutFalAiSam33dAlignRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam-3/3d-align/requests/{request_id}/cancel' +} + +export type PutFalAiSam33dAlignRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSam33dAlignRequestsByRequestIdCancelResponse = + PutFalAiSam33dAlignRequestsByRequestIdCancelResponses[keyof PutFalAiSam33dAlignRequestsByRequestIdCancelResponses] + +export type PostFalAiSam33dAlignData = { + body: SchemaSam33dAlignInput + path?: never + query?: never + url: '/fal-ai/sam-3/3d-align' +} + +export type PostFalAiSam33dAlignResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSam33dAlignResponse = + PostFalAiSam33dAlignResponses[keyof PostFalAiSam33dAlignResponses] + +export type GetFalAiSam33dAlignRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam-3/3d-align/requests/{request_id}' +} + +export type GetFalAiSam33dAlignRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSam33dAlignOutput +} + +export type GetFalAiSam33dAlignRequestsByRequestIdResponse = + GetFalAiSam33dAlignRequestsByRequestIdResponses[keyof GetFalAiSam33dAlignRequestsByRequestIdResponses] + +export type GetFalAiMeshyV5RetextureRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/meshy/v5/retexture/requests/{request_id}/status' +} + +export type GetFalAiMeshyV5RetextureRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMeshyV5RetextureRequestsByRequestIdStatusResponse = + GetFalAiMeshyV5RetextureRequestsByRequestIdStatusResponses[keyof GetFalAiMeshyV5RetextureRequestsByRequestIdStatusResponses] + +export type PutFalAiMeshyV5RetextureRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/meshy/v5/retexture/requests/{request_id}/cancel' +} + +export type PutFalAiMeshyV5RetextureRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMeshyV5RetextureRequestsByRequestIdCancelResponse = + PutFalAiMeshyV5RetextureRequestsByRequestIdCancelResponses[keyof PutFalAiMeshyV5RetextureRequestsByRequestIdCancelResponses] + +export type PostFalAiMeshyV5RetextureData = { + body: SchemaMeshyV5RetextureInput + path?: never + query?: never + url: '/fal-ai/meshy/v5/retexture' +} + +export type PostFalAiMeshyV5RetextureResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMeshyV5RetextureResponse = + PostFalAiMeshyV5RetextureResponses[keyof PostFalAiMeshyV5RetextureResponses] + +export type GetFalAiMeshyV5RetextureRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/meshy/v5/retexture/requests/{request_id}' +} + +export type GetFalAiMeshyV5RetextureRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMeshyV5RetextureOutput +} + +export type GetFalAiMeshyV5RetextureRequestsByRequestIdResponse = + GetFalAiMeshyV5RetextureRequestsByRequestIdResponses[keyof GetFalAiMeshyV5RetextureRequestsByRequestIdResponses] + +export type GetFalAiMeshyV5RemeshRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/meshy/v5/remesh/requests/{request_id}/status' +} + +export type GetFalAiMeshyV5RemeshRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMeshyV5RemeshRequestsByRequestIdStatusResponse = + GetFalAiMeshyV5RemeshRequestsByRequestIdStatusResponses[keyof GetFalAiMeshyV5RemeshRequestsByRequestIdStatusResponses] + +export type PutFalAiMeshyV5RemeshRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/meshy/v5/remesh/requests/{request_id}/cancel' +} + +export type PutFalAiMeshyV5RemeshRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMeshyV5RemeshRequestsByRequestIdCancelResponse = + PutFalAiMeshyV5RemeshRequestsByRequestIdCancelResponses[keyof PutFalAiMeshyV5RemeshRequestsByRequestIdCancelResponses] + +export type PostFalAiMeshyV5RemeshData = { + body: SchemaMeshyV5RemeshInput + path?: never + query?: never + url: '/fal-ai/meshy/v5/remesh' +} + +export type PostFalAiMeshyV5RemeshResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMeshyV5RemeshResponse = + PostFalAiMeshyV5RemeshResponses[keyof PostFalAiMeshyV5RemeshResponses] + +export type GetFalAiMeshyV5RemeshRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/meshy/v5/remesh/requests/{request_id}' +} + +export type GetFalAiMeshyV5RemeshRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMeshyV5RemeshOutput +} + +export type GetFalAiMeshyV5RemeshRequestsByRequestIdResponse = + GetFalAiMeshyV5RemeshRequestsByRequestIdResponses[keyof GetFalAiMeshyV5RemeshRequestsByRequestIdResponses] + +export type GetFalAiHunyuanPartRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan-part/requests/{request_id}/status' +} + +export type GetFalAiHunyuanPartRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHunyuanPartRequestsByRequestIdStatusResponse = + GetFalAiHunyuanPartRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuanPartRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuanPartRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-part/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuanPartRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHunyuanPartRequestsByRequestIdCancelResponse = + PutFalAiHunyuanPartRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuanPartRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuanPartData = { + body: SchemaHunyuanPartInput + path?: never + query?: never + url: '/fal-ai/hunyuan-part' +} + +export type PostFalAiHunyuanPartResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuanPartResponse = + PostFalAiHunyuanPartResponses[keyof PostFalAiHunyuanPartResponses] + +export type GetFalAiHunyuanPartRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-part/requests/{request_id}' +} + +export type GetFalAiHunyuanPartRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuanPartOutput +} + +export type GetFalAiHunyuanPartRequestsByRequestIdResponse = + GetFalAiHunyuanPartRequestsByRequestIdResponses[keyof GetFalAiHunyuanPartRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/3d-to-3d/zod.gen.ts b/packages/typescript/ai-fal/src/generated/3d-to-3d/zod.gen.ts new file mode 100644 index 00000000..9bc5ca42 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/3d-to-3d/zod.gen.ts @@ -0,0 +1,835 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * File + */ +export const zSchemaFile = z.object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), +}) + +/** + * HunyuanPartOutput + */ +export const zSchemaHunyuanPartOutput = z.object({ + iou_scores: z.array(z.number()).register(z.globalRegistry, { + description: 'IoU scores for each of the three masks.', + }), + best_mask_index: z.int().register(z.globalRegistry, { + description: 'Index of the best mask (1, 2, or 3) based on IoU score.', + }), + mask_2_mesh: zSchemaFile, + mask_1_mesh: zSchemaFile, + segmented_mesh: zSchemaFile, + seed: z.int().register(z.globalRegistry, { + description: 'Seed value used for generation.', + }), + mask_3_mesh: zSchemaFile, +}) + +/** + * HunyuanPartInput + */ +export const zSchemaHunyuanPartInput = z.object({ + point_prompt_x: z + .optional( + z.number().gte(-1).lte(1).register(z.globalRegistry, { + description: + 'X coordinate of the point prompt for segmentation (normalized space -1 to 1).', + }), + ) + .default(0), + point_prompt_z: z + .optional( + z.number().gte(-1).lte(1).register(z.globalRegistry, { + description: + 'Z coordinate of the point prompt for segmentation (normalized space -1 to 1).', + }), + ) + .default(0), + use_normal: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to use normal information for segmentation.', + }), + ) + .default(true), + noise_std: z + .optional( + z.number().gte(0).lte(0.02).register(z.globalRegistry, { + description: 'Standard deviation of noise to add to sampled points.', + }), + ) + .default(0), + point_num: z + .optional( + z.int().gte(10000).lte(500000).register(z.globalRegistry, { + description: 'Number of points to sample from the mesh.', + }), + ) + .default(100000), + model_file_url: z.string().register(z.globalRegistry, { + description: + 'URL of the 3D model file (.glb or .obj) to process for segmentation.', + }), + point_prompt_y: z + .optional( + z.number().gte(-1).lte(1).register(z.globalRegistry, { + description: + 'Y coordinate of the point prompt for segmentation (normalized space -1 to 1).', + }), + ) + .default(0), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and input will produce the same segmentation results.\n ', + }), + ), +}) + +/** + * ModelUrls + * + * 3D model files in various formats + */ +export const zSchemaModelUrls = z + .object({ + usdz: z.optional(zSchemaFile), + fbx: z.optional(zSchemaFile), + blend: z.optional(zSchemaFile), + stl: z.optional(zSchemaFile), + glb: z.optional(zSchemaFile), + obj: z.optional(zSchemaFile), + }) + .register(z.globalRegistry, { + description: '3D model files in various formats', + }) + +/** + * RemeshOutput + * + * Output for 3D Model Remeshing + */ +export const zSchemaMeshyV5RemeshOutput = z + .object({ + model_urls: zSchemaModelUrls, + model_glb: z.optional(zSchemaFile), + }) + .register(z.globalRegistry, { + description: 'Output for 3D Model Remeshing', + }) + +/** + * RemeshInput + * + * Input for 3D Model Remeshing + */ +export const zSchemaMeshyV5RemeshInput = z + .object({ + resize_height: z + .optional( + z.number().gte(0).register(z.globalRegistry, { + description: + 'Resize the model to a certain height measured in meters. Set to 0 for no resizing.', + }), + ) + .default(0), + topology: z.optional( + z.enum(['quad', 'triangle']).register(z.globalRegistry, { + description: + 'Specify the topology of the generated model. Quad for smooth surfaces, Triangle for detailed geometry.', + }), + ), + target_polycount: z + .optional( + z.int().gte(100).lte(300000).register(z.globalRegistry, { + description: + 'Target number of polygons in the generated model. Actual count may vary based on geometry complexity.', + }), + ) + .default(30000), + model_url: z.string().register(z.globalRegistry, { + description: + 'URL or base64 data URI of a 3D model to remesh. Supports .glb, .gltf, .obj, .fbx, .stl formats. Can be a publicly accessible URL or data URI with MIME type application/octet-stream.', + }), + origin_at: z.optional( + z.enum(['bottom', 'center']).register(z.globalRegistry, { + description: 'Position of the origin. None means no effect.', + }), + ), + target_formats: z + .optional( + z + .array(z.enum(['glb', 'fbx', 'obj', 'usdz', 'blend', 'stl'])) + .register(z.globalRegistry, { + description: 'List of target formats for the remeshed model.', + }), + ) + .default(['glb']), + }) + .register(z.globalRegistry, { + description: 'Input for 3D Model Remeshing', + }) + +/** + * TextureFiles + * + * Texture files downloaded and uploaded to CDN + */ +export const zSchemaTextureFiles = z + .object({ + base_color: zSchemaFile, + normal: z.optional(zSchemaFile), + roughness: z.optional(zSchemaFile), + metallic: z.optional(zSchemaFile), + }) + .register(z.globalRegistry, { + description: 'Texture files downloaded and uploaded to CDN', + }) + +/** + * RetextureOutput + * + * Output for 3D Model Retexturing + */ +export const zSchemaMeshyV5RetextureOutput = z + .object({ + model_urls: zSchemaModelUrls, + text_style_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'The text prompt used for texturing (if provided)', + }), + ), + texture_urls: z.optional( + z.array(zSchemaTextureFiles).register(z.globalRegistry, { + description: 'Array of texture file objects', + }), + ), + thumbnail: z.optional(zSchemaFile), + image_style_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The image URL used for texturing (if provided)', + }), + ), + model_glb: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output for 3D Model Retexturing', + }) + +/** + * RetextureInput + * + * Input for 3D Model Retexturing + */ +export const zSchemaMeshyV5RetextureInput = z + .object({ + enable_pbr: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Generate PBR Maps (metallic, roughness, normal) in addition to base color.', + }), + ) + .default(false), + text_style_prompt: z.optional( + z.string().max(600).register(z.globalRegistry, { + description: + 'Describe your desired texture style using text. Maximum 600 characters. Required if image_style_url is not provided.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, input data will be checked for safety before processing.', + }), + ) + .default(true), + enable_original_uv: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Use the original UV mapping of the model instead of generating new UVs. If the model has no original UV, output quality may be reduced.', + }), + ) + .default(true), + model_url: z.string().register(z.globalRegistry, { + description: + 'URL or base64 data URI of a 3D model to texture. Supports .glb, .gltf, .obj, .fbx, .stl formats. Can be a publicly accessible URL or data URI with MIME type application/octet-stream.', + }), + image_style_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '2D image to guide the texturing process. Supports .jpg, .jpeg, and .png formats. Required if text_style_prompt is not provided. If both are provided, image_style_url takes precedence.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Input for 3D Model Retexturing', + }) + +/** + * SAM3DBodyAlignmentInfo + * + * Per-person alignment metadata. + */ +export const zSchemaSam3dBodyAlignmentInfo = z + .object({ + translation: z.array(z.number()).register(z.globalRegistry, { + description: 'Translation [tx, ty, tz]', + }), + cropped_vertices_count: z.int().register(z.globalRegistry, { + description: 'Number of cropped vertices', + }), + person_id: z.int().register(z.globalRegistry, { + description: 'Index of the person', + }), + target_points_count: z.int().register(z.globalRegistry, { + description: 'Number of target points for alignment', + }), + scale_factor: z.number().register(z.globalRegistry, { + description: 'Scale factor applied for alignment', + }), + focal_length: z.number().register(z.globalRegistry, { + description: 'Focal length used', + }), + }) + .register(z.globalRegistry, { + description: 'Per-person alignment metadata.', + }) + +/** + * SAM3DAlignmentOutput + */ +export const zSchemaSam33dAlignOutput = z.object({ + scene_glb: z.optional(zSchemaFile), + visualization: zSchemaFile, + metadata: zSchemaSam3dBodyAlignmentInfo, + body_mesh_ply: zSchemaFile, + model_glb: zSchemaFile, +}) + +/** + * SAM3DAlignmentInput + */ +export const zSchemaSam33dAlignInput = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the original image used for MoGe depth estimation', + }), + body_mesh_url: z.string().register(z.globalRegistry, { + description: 'URL of the SAM-3D Body mesh file (.ply or .glb) to align', + }), + object_mesh_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Optional URL of SAM-3D Object mesh (.glb) to create combined scene', + }), + ), + focal_length: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Focal length from SAM-3D Body metadata. If not provided, estimated from MoGe.', + }), + ), + body_mask_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'URL of the human mask image. If not provided, uses full image.', + }), + ), +}) + +/** + * UltraShapeResponse + */ +export const zSchemaUltrashapeOutput = z.object({ + model_glb: zSchemaFile, +}) + +/** + * UltraShapeRequest + */ +export const zSchemaUltrashapeInput = z.object({ + octree_resolution: z + .optional( + z.int().gte(128).lte(1024).register(z.globalRegistry, { + description: 'Marching cubes resolution.', + }), + ) + .default(1024), + remove_background: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Remove image background.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Diffusion steps.', + }), + ) + .default(50), + model_url: z.string().register(z.globalRegistry, { + description: 'URL of the coarse mesh (.glb or .obj) to refine.', + }), + seed: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Random seed.', + }), + ) + .default(42), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the reference image for mesh refinement.', + }), +}) + +export const zSchemaQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetFalAiUltrashapeRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiUltrashapeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiUltrashapeRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiUltrashapeRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiUltrashapeData = z.object({ + body: zSchemaUltrashapeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiUltrashapeResponse = zSchemaQueueStatus + +export const zGetFalAiUltrashapeRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiUltrashapeRequestsByRequestIdResponse = + zSchemaUltrashapeOutput + +export const zGetFalAiSam33dAlignRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSam33dAlignRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSam33dAlignRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSam33dAlignRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSam33dAlignData = z.object({ + body: zSchemaSam33dAlignInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSam33dAlignResponse = zSchemaQueueStatus + +export const zGetFalAiSam33dAlignRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSam33dAlignRequestsByRequestIdResponse = + zSchemaSam33dAlignOutput + +export const zGetFalAiMeshyV5RetextureRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiMeshyV5RetextureRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMeshyV5RetextureRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiMeshyV5RetextureRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMeshyV5RetextureData = z.object({ + body: zSchemaMeshyV5RetextureInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMeshyV5RetextureResponse = zSchemaQueueStatus + +export const zGetFalAiMeshyV5RetextureRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMeshyV5RetextureRequestsByRequestIdResponse = + zSchemaMeshyV5RetextureOutput + +export const zGetFalAiMeshyV5RemeshRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiMeshyV5RemeshRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMeshyV5RemeshRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiMeshyV5RemeshRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMeshyV5RemeshData = z.object({ + body: zSchemaMeshyV5RemeshInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMeshyV5RemeshResponse = zSchemaQueueStatus + +export const zGetFalAiMeshyV5RemeshRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMeshyV5RemeshRequestsByRequestIdResponse = + zSchemaMeshyV5RemeshOutput + +export const zGetFalAiHunyuanPartRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiHunyuanPartRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuanPartRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuanPartRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuanPartData = z.object({ + body: zSchemaHunyuanPartInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuanPartResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuanPartRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuanPartRequestsByRequestIdResponse = + zSchemaHunyuanPartOutput diff --git a/packages/typescript/ai-fal/src/generated/audio-to-audio/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/audio-to-audio/endpoint-map.ts new file mode 100644 index 00000000..07e005bb --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/audio-to-audio/endpoint-map.ts @@ -0,0 +1,234 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zSchemaAceStepAudioInpaintInput, + zSchemaAceStepAudioInpaintOutput, + zSchemaAceStepAudioOutpaintInput, + zSchemaAceStepAudioOutpaintOutput, + zSchemaAceStepAudioToAudioInput, + zSchemaAceStepAudioToAudioOutput, + zSchemaAudioUnderstandingInput, + zSchemaAudioUnderstandingOutput, + zSchemaDeepfilternet3Input, + zSchemaDeepfilternet3Output, + zSchemaDemucsInput, + zSchemaDemucsOutput, + zSchemaDiaTtsVoiceCloneInput, + zSchemaDiaTtsVoiceCloneOutput, + zSchemaElevenlabsAudioIsolationInput, + zSchemaElevenlabsAudioIsolationOutput, + zSchemaElevenlabsVoiceChangerInput, + zSchemaElevenlabsVoiceChangerOutput, + zSchemaFfmpegApiMergeAudiosInput, + zSchemaFfmpegApiMergeAudiosOutput, + zSchemaKlingVideoCreateVoiceInput, + zSchemaKlingVideoCreateVoiceOutput, + zSchemaNovaSrInput, + zSchemaNovaSrOutput, + zSchemaSamAudioSeparateInput, + zSchemaSamAudioSeparateOutput, + zSchemaSamAudioSpanSeparateInput, + zSchemaSamAudioSpanSeparateOutput, + zSchemaStableAudio25AudioToAudioInput, + zSchemaStableAudio25AudioToAudioOutput, + zSchemaStableAudio25InpaintInput, + zSchemaStableAudio25InpaintOutput, + zSchemaV2ExtendInput, + zSchemaV2ExtendOutput, +} from './zod.gen' + +import type { + SchemaAceStepAudioInpaintInput, + SchemaAceStepAudioInpaintOutput, + SchemaAceStepAudioOutpaintInput, + SchemaAceStepAudioOutpaintOutput, + SchemaAceStepAudioToAudioInput, + SchemaAceStepAudioToAudioOutput, + SchemaAudioUnderstandingInput, + SchemaAudioUnderstandingOutput, + SchemaDeepfilternet3Input, + SchemaDeepfilternet3Output, + SchemaDemucsInput, + SchemaDemucsOutput, + SchemaDiaTtsVoiceCloneInput, + SchemaDiaTtsVoiceCloneOutput, + SchemaElevenlabsAudioIsolationInput, + SchemaElevenlabsAudioIsolationOutput, + SchemaElevenlabsVoiceChangerInput, + SchemaElevenlabsVoiceChangerOutput, + SchemaFfmpegApiMergeAudiosInput, + SchemaFfmpegApiMergeAudiosOutput, + SchemaKlingVideoCreateVoiceInput, + SchemaKlingVideoCreateVoiceOutput, + SchemaNovaSrInput, + SchemaNovaSrOutput, + SchemaSamAudioSeparateInput, + SchemaSamAudioSeparateOutput, + SchemaSamAudioSpanSeparateInput, + SchemaSamAudioSpanSeparateOutput, + SchemaStableAudio25AudioToAudioInput, + SchemaStableAudio25AudioToAudioOutput, + SchemaStableAudio25InpaintInput, + SchemaStableAudio25InpaintOutput, + SchemaV2ExtendInput, + SchemaV2ExtendOutput, +} from './types.gen' + +import type { z } from 'zod' + +export type AudioToAudioEndpointMap = { + 'fal-ai/elevenlabs/voice-changer': { + input: SchemaElevenlabsVoiceChangerInput + output: SchemaElevenlabsVoiceChangerOutput + } + 'fal-ai/nova-sr': { + input: SchemaNovaSrInput + output: SchemaNovaSrOutput + } + 'fal-ai/deepfilternet3': { + input: SchemaDeepfilternet3Input + output: SchemaDeepfilternet3Output + } + 'fal-ai/sam-audio/separate': { + input: SchemaSamAudioSeparateInput + output: SchemaSamAudioSeparateOutput + } + 'fal-ai/sam-audio/span-separate': { + input: SchemaSamAudioSpanSeparateInput + output: SchemaSamAudioSpanSeparateOutput + } + 'fal-ai/ffmpeg-api/merge-audios': { + input: SchemaFfmpegApiMergeAudiosInput + output: SchemaFfmpegApiMergeAudiosOutput + } + 'fal-ai/kling-video/create-voice': { + input: SchemaKlingVideoCreateVoiceInput + output: SchemaKlingVideoCreateVoiceOutput + } + 'fal-ai/demucs': { + input: SchemaDemucsInput + output: SchemaDemucsOutput + } + 'fal-ai/audio-understanding': { + input: SchemaAudioUnderstandingInput + output: SchemaAudioUnderstandingOutput + } + 'fal-ai/stable-audio-25/audio-to-audio': { + input: SchemaStableAudio25AudioToAudioInput + output: SchemaStableAudio25AudioToAudioOutput + } + 'fal-ai/stable-audio-25/inpaint': { + input: SchemaStableAudio25InpaintInput + output: SchemaStableAudio25InpaintOutput + } + 'sonauto/v2/extend': { + input: SchemaV2ExtendInput + output: SchemaV2ExtendOutput + } + 'fal-ai/ace-step/audio-outpaint': { + input: SchemaAceStepAudioOutpaintInput + output: SchemaAceStepAudioOutpaintOutput + } + 'fal-ai/ace-step/audio-inpaint': { + input: SchemaAceStepAudioInpaintInput + output: SchemaAceStepAudioInpaintOutput + } + 'fal-ai/ace-step/audio-to-audio': { + input: SchemaAceStepAudioToAudioInput + output: SchemaAceStepAudioToAudioOutput + } + 'fal-ai/dia-tts/voice-clone': { + input: SchemaDiaTtsVoiceCloneInput + output: SchemaDiaTtsVoiceCloneOutput + } + 'fal-ai/elevenlabs/audio-isolation': { + input: SchemaElevenlabsAudioIsolationInput + output: SchemaElevenlabsAudioIsolationOutput + } +} + +/** Union type of all audio-to-audio model endpoint IDs */ +export type AudioToAudioModel = keyof AudioToAudioEndpointMap + +export const AudioToAudioSchemaMap: Record< + AudioToAudioModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['fal-ai/elevenlabs/voice-changer']: { + input: zSchemaElevenlabsVoiceChangerInput, + output: zSchemaElevenlabsVoiceChangerOutput, + }, + ['fal-ai/nova-sr']: { + input: zSchemaNovaSrInput, + output: zSchemaNovaSrOutput, + }, + ['fal-ai/deepfilternet3']: { + input: zSchemaDeepfilternet3Input, + output: zSchemaDeepfilternet3Output, + }, + ['fal-ai/sam-audio/separate']: { + input: zSchemaSamAudioSeparateInput, + output: zSchemaSamAudioSeparateOutput, + }, + ['fal-ai/sam-audio/span-separate']: { + input: zSchemaSamAudioSpanSeparateInput, + output: zSchemaSamAudioSpanSeparateOutput, + }, + ['fal-ai/ffmpeg-api/merge-audios']: { + input: zSchemaFfmpegApiMergeAudiosInput, + output: zSchemaFfmpegApiMergeAudiosOutput, + }, + ['fal-ai/kling-video/create-voice']: { + input: zSchemaKlingVideoCreateVoiceInput, + output: zSchemaKlingVideoCreateVoiceOutput, + }, + ['fal-ai/demucs']: { + input: zSchemaDemucsInput, + output: zSchemaDemucsOutput, + }, + ['fal-ai/audio-understanding']: { + input: zSchemaAudioUnderstandingInput, + output: zSchemaAudioUnderstandingOutput, + }, + ['fal-ai/stable-audio-25/audio-to-audio']: { + input: zSchemaStableAudio25AudioToAudioInput, + output: zSchemaStableAudio25AudioToAudioOutput, + }, + ['fal-ai/stable-audio-25/inpaint']: { + input: zSchemaStableAudio25InpaintInput, + output: zSchemaStableAudio25InpaintOutput, + }, + ['sonauto/v2/extend']: { + input: zSchemaV2ExtendInput, + output: zSchemaV2ExtendOutput, + }, + ['fal-ai/ace-step/audio-outpaint']: { + input: zSchemaAceStepAudioOutpaintInput, + output: zSchemaAceStepAudioOutpaintOutput, + }, + ['fal-ai/ace-step/audio-inpaint']: { + input: zSchemaAceStepAudioInpaintInput, + output: zSchemaAceStepAudioInpaintOutput, + }, + ['fal-ai/ace-step/audio-to-audio']: { + input: zSchemaAceStepAudioToAudioInput, + output: zSchemaAceStepAudioToAudioOutput, + }, + ['fal-ai/dia-tts/voice-clone']: { + input: zSchemaDiaTtsVoiceCloneInput, + output: zSchemaDiaTtsVoiceCloneOutput, + }, + ['fal-ai/elevenlabs/audio-isolation']: { + input: zSchemaElevenlabsAudioIsolationInput, + output: zSchemaElevenlabsAudioIsolationOutput, + }, +} as const + +/** Get the input type for a specific audio-to-audio model */ +export type AudioToAudioModelInput = + AudioToAudioEndpointMap[T]['input'] + +/** Get the output type for a specific audio-to-audio model */ +export type AudioToAudioModelOutput = + AudioToAudioEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/audio-to-audio/types.gen.ts b/packages/typescript/ai-fal/src/generated/audio-to-audio/types.gen.ts new file mode 100644 index 00000000..478ec33a --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/audio-to-audio/types.gen.ts @@ -0,0 +1,3064 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * TTSOutput + */ +export type SchemaElevenlabsAudioIsolationOutput = { + audio: SchemaFile + /** + * Timestamps + * + * Timestamps for each word in the generated speech. Only returned if `timestamps` is set to True in the request. + */ + timestamps?: Array | unknown +} + +/** + * File + */ +export type SchemaFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number | unknown + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string | unknown + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string | unknown + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string +} + +/** + * AudioIsolationRequest + */ +export type SchemaElevenlabsAudioIsolationInput = { + /** + * Video Url + * + * Video file to use for audio isolation. Either `audio_url` or `video_url` must be provided. + */ + video_url?: string | unknown + /** + * Audio Url + * + * URL of the audio file to isolate voice from + */ + audio_url?: string | unknown +} + +/** + * DiaCloneOutput + */ +export type SchemaDiaTtsVoiceCloneOutput = { + /** + * The generated speech audio + */ + audio: SchemaFile +} + +/** + * CloneRequest + */ +export type SchemaDiaTtsVoiceCloneInput = { + /** + * Text + * + * The text to be converted to speech. + */ + text: string + /** + * Reference Text for the Reference Audio + * + * The reference text to be used for TTS. + */ + ref_text: string + /** + * Reference Audio URL + * + * The URL of the reference audio file. + */ + ref_audio_url: string +} + +/** + * ACEStepAudioToAudioResponse + */ +export type SchemaAceStepAudioToAudioOutput = { + /** + * Tags + * + * The genre tags used in the generation process. + */ + tags: string + /** + * Lyrics + * + * The lyrics used in the generation process. + */ + lyrics: string + /** + * Seed + * + * The random seed used for the generation process. + */ + seed: number + /** + * Audio + * + * The generated audio file. + */ + audio: SchemaFile +} + +/** + * ACEStepAudioToAudioRequest + */ +export type SchemaAceStepAudioToAudioInput = { + /** + * Number Of Steps + * + * Number of steps to generate the audio. + */ + number_of_steps?: number + /** + * Tags + * + * Comma-separated list of genre tags to control the style of the generated audio. + */ + tags: string + /** + * Minimum Guidance Scale + * + * Minimum guidance scale for the generation after the decay. + */ + minimum_guidance_scale?: number + /** + * Lyrics + * + * Lyrics to be sung in the audio. If not provided or if [inst] or [instrumental] is the content of this field, no lyrics will be sung. Use control structures like [verse], [chorus] and [bridge] to control the structure of the song. + */ + lyrics?: string + /** + * Tag Guidance Scale + * + * Tag guidance scale for the generation. + */ + tag_guidance_scale?: number + /** + * Original Lyrics + * + * Original lyrics of the audio file. + */ + original_lyrics?: string + /** + * Scheduler + * + * Scheduler to use for the generation process. + */ + scheduler?: 'euler' | 'heun' + /** + * Guidance Scale + * + * Guidance scale for the generation. + */ + guidance_scale?: number + /** + * Guidance Type + * + * Type of CFG to use for the generation process. + */ + guidance_type?: 'cfg' | 'apg' | 'cfg_star' + /** + * Lyric Guidance Scale + * + * Lyric guidance scale for the generation. + */ + lyric_guidance_scale?: number + /** + * Guidance Interval + * + * Guidance interval for the generation. 0.5 means only apply guidance in the middle steps (0.25 * infer_steps to 0.75 * infer_steps) + */ + guidance_interval?: number + /** + * Edit Mode + * + * Whether to edit the lyrics only or remix the audio. + */ + edit_mode?: 'lyrics' | 'remix' + /** + * Guidance Interval Decay + * + * Guidance interval decay for the generation. Guidance scale will decay from guidance_scale to min_guidance_scale in the interval. 0.0 means no decay. + */ + guidance_interval_decay?: number + /** + * Audio Url + * + * URL of the audio file to be outpainted. + */ + audio_url: string + /** + * Seed + * + * Random seed for reproducibility. If not provided, a random seed will be used. + */ + seed?: number + /** + * Granularity Scale + * + * Granularity scale for the generation process. Higher values can reduce artifacts. + */ + granularity_scale?: number + /** + * Original Tags + * + * Original tags of the audio file. + */ + original_tags: string + /** + * Original Seed + * + * Original seed of the audio file. + */ + original_seed?: number +} + +/** + * ACEStepAudioInpaintResponse + */ +export type SchemaAceStepAudioInpaintOutput = { + /** + * Tags + * + * The genre tags used in the generation process. + */ + tags: string + /** + * Lyrics + * + * The lyrics used in the generation process. + */ + lyrics: string + /** + * Seed + * + * The random seed used for the generation process. + */ + seed: number + /** + * Audio + * + * The generated audio file. + */ + audio: SchemaFile +} + +/** + * ACEStepAudioInpaintRequest + */ +export type SchemaAceStepAudioInpaintInput = { + /** + * Number Of Steps + * + * Number of steps to generate the audio. + */ + number_of_steps?: number + /** + * Start Time + * + * start time in seconds for the inpainting process. + */ + start_time?: number + /** + * Tags + * + * Comma-separated list of genre tags to control the style of the generated audio. + */ + tags: string + /** + * Minimum Guidance Scale + * + * Minimum guidance scale for the generation after the decay. + */ + minimum_guidance_scale?: number + /** + * Lyrics + * + * Lyrics to be sung in the audio. If not provided or if [inst] or [instrumental] is the content of this field, no lyrics will be sung. Use control structures like [verse], [chorus] and [bridge] to control the structure of the song. + */ + lyrics?: string + /** + * End Time Relative To + * + * Whether the end time is relative to the start or end of the audio. + */ + end_time_relative_to?: 'start' | 'end' + /** + * Tag Guidance Scale + * + * Tag guidance scale for the generation. + */ + tag_guidance_scale?: number + /** + * Scheduler + * + * Scheduler to use for the generation process. + */ + scheduler?: 'euler' | 'heun' + /** + * End Time + * + * end time in seconds for the inpainting process. + */ + end_time?: number + /** + * Guidance Type + * + * Type of CFG to use for the generation process. + */ + guidance_type?: 'cfg' | 'apg' | 'cfg_star' + /** + * Guidance Scale + * + * Guidance scale for the generation. + */ + guidance_scale?: number + /** + * Lyric Guidance Scale + * + * Lyric guidance scale for the generation. + */ + lyric_guidance_scale?: number + /** + * Guidance Interval + * + * Guidance interval for the generation. 0.5 means only apply guidance in the middle steps (0.25 * infer_steps to 0.75 * infer_steps) + */ + guidance_interval?: number + /** + * Variance + * + * Variance for the inpainting process. Higher values can lead to more diverse results. + */ + variance?: number + /** + * Guidance Interval Decay + * + * Guidance interval decay for the generation. Guidance scale will decay from guidance_scale to min_guidance_scale in the interval. 0.0 means no decay. + */ + guidance_interval_decay?: number + /** + * Start Time Relative To + * + * Whether the start time is relative to the start or end of the audio. + */ + start_time_relative_to?: 'start' | 'end' + /** + * Audio Url + * + * URL of the audio file to be inpainted. + */ + audio_url: string + /** + * Seed + * + * Random seed for reproducibility. If not provided, a random seed will be used. + */ + seed?: number + /** + * Granularity Scale + * + * Granularity scale for the generation process. Higher values can reduce artifacts. + */ + granularity_scale?: number +} + +/** + * ACEStepResponse + */ +export type SchemaAceStepAudioOutpaintOutput = { + /** + * Tags + * + * The genre tags used in the generation process. + */ + tags: string + /** + * Lyrics + * + * The lyrics used in the generation process. + */ + lyrics: string + /** + * Seed + * + * The random seed used for the generation process. + */ + seed: number + /** + * Audio + * + * The generated audio file. + */ + audio: SchemaFile +} + +/** + * ACEStepAudioOutpaintRequest + */ +export type SchemaAceStepAudioOutpaintInput = { + /** + * Number Of Steps + * + * Number of steps to generate the audio. + */ + number_of_steps?: number + /** + * Tags + * + * Comma-separated list of genre tags to control the style of the generated audio. + */ + tags: string + /** + * Minimum Guidance Scale + * + * Minimum guidance scale for the generation after the decay. + */ + minimum_guidance_scale?: number + /** + * Extend After Duration + * + * Duration in seconds to extend the audio from the end. + */ + extend_after_duration?: number + /** + * Lyrics + * + * Lyrics to be sung in the audio. If not provided or if [inst] or [instrumental] is the content of this field, no lyrics will be sung. Use control structures like [verse], [chorus] and [bridge] to control the structure of the song. + */ + lyrics?: string + /** + * Tag Guidance Scale + * + * Tag guidance scale for the generation. + */ + tag_guidance_scale?: number + /** + * Scheduler + * + * Scheduler to use for the generation process. + */ + scheduler?: 'euler' | 'heun' + /** + * Extend Before Duration + * + * Duration in seconds to extend the audio from the start. + */ + extend_before_duration?: number + /** + * Guidance Type + * + * Type of CFG to use for the generation process. + */ + guidance_type?: 'cfg' | 'apg' | 'cfg_star' + /** + * Guidance Scale + * + * Guidance scale for the generation. + */ + guidance_scale?: number + /** + * Lyric Guidance Scale + * + * Lyric guidance scale for the generation. + */ + lyric_guidance_scale?: number + /** + * Guidance Interval + * + * Guidance interval for the generation. 0.5 means only apply guidance in the middle steps (0.25 * infer_steps to 0.75 * infer_steps) + */ + guidance_interval?: number + /** + * Guidance Interval Decay + * + * Guidance interval decay for the generation. Guidance scale will decay from guidance_scale to min_guidance_scale in the interval. 0.0 means no decay. + */ + guidance_interval_decay?: number + /** + * Audio Url + * + * URL of the audio file to be outpainted. + */ + audio_url: string + /** + * Seed + * + * Random seed for reproducibility. If not provided, a random seed will be used. + */ + seed?: number + /** + * Granularity Scale + * + * Granularity scale for the generation process. Higher values can reduce artifacts. + */ + granularity_scale?: number +} + +/** + * ExtendOutput + */ +export type SchemaV2ExtendOutput = { + /** + * Tags + * + * The style tags used for generation. + */ + tags?: Array | unknown + /** + * Seed + * + * The seed used for generation. This can be used to generate an identical song by passing the same parameters with this seed in a future request. + */ + seed: number + /** + * Extend Duration + * + * The duration in seconds that the song was extended by. + */ + extend_duration: number + /** + * Audio + * + * The generated audio files. + */ + audio: Array + /** + * Lyrics + * + * The lyrics used for generation. + */ + lyrics?: string | unknown +} + +/** + * ExtendInput + */ +export type SchemaV2ExtendInput = { + /** + * Prompt + * + * A description of the track you want to generate. This prompt will be used to automatically generate the tags and lyrics unless you manually set them. For example, if you set prompt and tags, then the prompt will be used to generate only the lyrics. + */ + prompt?: string | unknown + /** + * Lyrics Prompt + * + * The lyrics sung in the generated song. An empty string will generate an instrumental track. + */ + lyrics_prompt?: string | unknown + /** + * Tags + * + * Tags/styles of the music to generate. You can view a list of all available tags at https://sonauto.ai/tag-explorer. + */ + tags?: Array | unknown + /** + * Prompt Strength + * + * Controls how strongly your prompt influences the output. Greater values adhere more to the prompt but sound less natural. (This is CFG.) + */ + prompt_strength?: number + /** + * Output Bit Rate + * + * The bit rate to use for mp3 and m4a formats. Not available for other formats. + */ + output_bit_rate?: 128 | 192 | 256 | 320 | unknown + /** + * Num Songs + * + * Generating 2 songs costs 1.5x the price of generating 1 song. Also, note that using the same seed may not result in identical songs if the number of songs generated is changed. + */ + num_songs?: number + /** + * Output Format + */ + output_format?: 'flac' | 'mp3' | 'wav' | 'ogg' | 'm4a' + /** + * Side + * + * Add more to the beginning (left) or end (right) of the song + */ + side: 'left' | 'right' + /** + * Balance Strength + * + * Greater means more natural vocals. Lower means sharper instrumentals. We recommend 0.7. + */ + balance_strength?: number + /** + * Crop Duration + * + * Duration in seconds to crop from the selected side before extending from that side. + */ + crop_duration?: number + /** + * Audio Url + * + * The URL of the audio file to alter. Must be a valid publicly accessible URL. + */ + audio_url: string + /** + * Seed + * + * The seed to use for generation. Will pick a random seed if not provided. Repeating a request with identical parameters (must use lyrics and tags, not prompt) and the same seed will generate the same song. + */ + seed?: number | unknown + /** + * Extend Duration + * + * Duration in seconds to extend the song. If not provided, will attempt to automatically determine. + */ + extend_duration?: number | unknown +} + +/** + * InpaintOutput + */ +export type SchemaStableAudio25InpaintOutput = { + /** + * Seed + * + * The random seed used for generation + */ + seed: number + /** + * Audio + * + * The generated audio clip + */ + audio: SchemaFile +} + +/** + * InpaintInput + */ +export type SchemaStableAudio25InpaintInput = { + /** + * Prompt + * + * The prompt to guide the audio generation + */ + prompt: string + /** + * Guidance Scale + * + * How strictly the diffusion process adheres to the prompt text (higher values make your audio closer to your prompt). + */ + guidance_scale?: number + /** + * Mask End + * + * The end point of the audio mask + */ + mask_end?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Audio Url + * + * The audio clip to inpaint + */ + audio_url: string + /** + * Seed + */ + seed?: number + /** + * Seconds Total + * + * The duration of the audio clip to generate. If not provided, it will be set to the duration of the input audio. + */ + seconds_total?: number + /** + * Num Inference Steps + * + * The number of steps to denoise the audio for + */ + num_inference_steps?: number + /** + * Mask Start + * + * The start point of the audio mask + */ + mask_start?: number +} + +/** + * AudioToAudioOutput + */ +export type SchemaStableAudio25AudioToAudioOutput = { + /** + * Seed + * + * The random seed used for generation + */ + seed: number + /** + * Audio + * + * The generated audio clip + */ + audio: SchemaFile +} + +/** + * AudioToAudioInput + */ +export type SchemaStableAudio25AudioToAudioInput = { + /** + * Prompt + * + * The prompt to guide the audio generation + */ + prompt: string + /** + * Strength + * + * Sometimes referred to as denoising, this parameter controls how much influence the `audio_url` parameter has on the generated audio. A value of 0 would yield audio that is identical to the input. A value of 1 would be as if you passed in no audio at all. + */ + strength?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Audio Url + * + * The audio clip to transform + */ + audio_url: string + /** + * Num Inference Steps + * + * The number of steps to denoise the audio for + */ + num_inference_steps?: number + /** + * Guidance Scale + * + * How strictly the diffusion process adheres to the prompt text (higher values make your audio closer to your prompt). + */ + guidance_scale?: number + /** + * Seed + */ + seed?: number + /** + * Total Seconds + * + * The duration of the audio clip to generate. If not provided, it will be set to the duration of the input audio. + */ + total_seconds?: number +} + +/** + * AudioUnderstandingOutput + */ +export type SchemaAudioUnderstandingOutput = { + /** + * Output + * + * The analysis of the audio content based on the prompt + */ + output: string +} + +/** + * AudioUnderstandingInput + */ +export type SchemaAudioUnderstandingInput = { + /** + * Prompt + * + * The question or prompt about the audio content. + */ + prompt: string + /** + * Detailed Analysis + * + * Whether to request a more detailed analysis of the audio + */ + detailed_analysis?: boolean + /** + * Audio Url + * + * URL of the audio file to analyze + */ + audio_url: string +} + +/** + * DemucsOutput + */ +export type SchemaDemucsOutput = { + /** + * Separated vocals audio file + */ + vocals?: SchemaFile | unknown + /** + * Separated guitar audio file (only available for 6s models) + */ + guitar?: SchemaFile | unknown + /** + * Separated bass audio file + */ + bass?: SchemaFile | unknown + /** + * Separated piano audio file (only available for 6s models) + */ + piano?: SchemaFile | unknown + /** + * Separated other instruments audio file + */ + other?: SchemaFile | unknown + /** + * Separated drums audio file + */ + drums?: SchemaFile | unknown +} + +/** + * DemucsInput + */ +export type SchemaDemucsInput = { + /** + * Segment Length + * + * Length in seconds of each segment for processing. Smaller values use less memory but may reduce quality. Default is model-specific. + */ + segment_length?: number | unknown + /** + * Output Format + * + * Output audio format for the separated stems + */ + output_format?: 'wav' | 'mp3' + /** + * Stems + * + * Specific stems to extract. If None, extracts all available stems. Available stems depend on model: vocals, drums, bass, other, guitar, piano (for 6s model) + */ + stems?: + | Array<'vocals' | 'drums' | 'bass' | 'other' | 'guitar' | 'piano'> + | unknown + /** + * Overlap + * + * Overlap between segments (0.0 to 1.0). Higher values may improve quality but increase processing time. + */ + overlap?: number + /** + * Model + * + * Demucs model to use for separation + */ + model?: + | 'htdemucs' + | 'htdemucs_ft' + | 'htdemucs_6s' + | 'hdemucs_mmi' + | 'mdx' + | 'mdx_extra' + | 'mdx_q' + | 'mdx_extra_q' + /** + * Audio Url + * + * URL of the audio file to separate into stems + */ + audio_url: string + /** + * Shifts + * + * Number of random shifts for equivariant stabilization. Higher values improve quality but increase processing time. + */ + shifts?: number +} + +/** + * CreateVoiceOutput + * + * Response model for creating a custom voice. + */ +export type SchemaKlingVideoCreateVoiceOutput = { + /** + * Voice Id + * + * Unique identifier for the created voice + */ + voice_id: string +} + +/** + * CreateVoiceInput + * + * Request model for creating a custom voice. + */ +export type SchemaKlingVideoCreateVoiceInput = { + /** + * Voice Url + * + * URL of the voice audio file. Supports .mp3/.wav audio or .mp4/.mov video. Duration must be 5-30 seconds with clean, single-voice audio. + */ + voice_url: string +} + +/** + * MergeAudiosOutput + */ +export type SchemaFfmpegApiMergeAudiosOutput = { + audio: SchemaFile +} + +/** + * MergeAudiosInput + */ +export type SchemaFfmpegApiMergeAudiosInput = { + /** + * Audio Urls + * + * List of audio URLs to merge in order. The 0th stream of the audio will be considered as the merge candidate. + */ + audio_urls: Array + /** + * Output Format + * + * Output format of the combined audio. If not used, will be determined automatically using FFMPEG. Formatted as codec_sample_rate_bitrate. + */ + output_format?: + | 'mp3_22050_32' + | 'mp3_44100_32' + | 'mp3_44100_64' + | 'mp3_44100_96' + | 'mp3_44100_128' + | 'mp3_44100_192' + | 'pcm_8000' + | 'pcm_16000' + | 'pcm_22050' + | 'pcm_24000' + | 'pcm_44100' + | 'pcm_48000' + | 'ulaw_8000' + | 'alaw_8000' + | 'opus_48000_32' + | 'opus_48000_64' + | 'opus_48000_96' + | 'opus_48000_128' + | 'opus_48000_192' + | unknown +} + +/** + * AudioTimeSpan + * + * A time span indicating where the target sound occurs. + */ +export type SchemaAudioTimeSpan = { + /** + * End + * + * End time of the span in seconds + */ + end: number + /** + * Start + * + * Start time of the span in seconds + */ + start: number + /** + * Include + * + * Whether to include (True) or exclude (False) sounds in this span + */ + include?: boolean +} + +/** + * SAMAudioSpanSeparateOutput + * + * Output for span-based audio separation. + */ +export type SchemaSamAudioSpanSeparateOutput = { + /** + * Target + * + * The isolated target sound. + */ + target: SchemaFile + /** + * Duration + * + * Duration of the output audio in seconds. + */ + duration: number + /** + * Sample Rate + * + * Sample rate of the output audio in Hz. + */ + sample_rate?: number + /** + * Residual + * + * Everything else in the audio. + */ + residual: SchemaFile +} + +/** + * SAMAudioSpanInput + * + * Input for temporal span-based audio separation. + */ +export type SchemaSamAudioSpanSeparateInput = { + /** + * Prompt + * + * Text prompt describing the sound to isolate. Optional but recommended - helps the model identify what type of sound to extract from the span. + */ + prompt?: string + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'fast' | 'balanced' | 'quality' + /** + * Spans + * + * Time spans where the target sound occurs which should be isolated. + */ + spans: Array + /** + * Output Format + * + * Output audio format. + */ + output_format?: 'wav' | 'mp3' + /** + * Trim To Span + * + * Trim output audio to only include the specified span time range. If False, returns the full audio length with the target sound isolated throughout. + */ + trim_to_span?: boolean + /** + * Audio Url + * + * URL of the audio file to process. + */ + audio_url: string + /** + * Reranking Candidates + * + * Number of candidates to generate and rank. Higher improves quality but increases latency and cost. Requires text prompt; ignored for span-only separation. + */ + reranking_candidates?: number +} + +/** + * SAMAudioSeparateOutput + * + * Output for text-based audio separation. + */ +export type SchemaSamAudioSeparateOutput = { + /** + * Target + * + * The isolated target sound. + */ + target: SchemaFile + /** + * Duration + * + * Duration of the output audio in seconds. + */ + duration: number + /** + * Sample Rate + * + * Sample rate of the output audio in Hz. + */ + sample_rate?: number + /** + * Residual + * + * Everything else in the audio. + */ + residual: SchemaFile +} + +/** + * SAMAudioInput + * + * Input for text-based audio separation. + */ +export type SchemaSamAudioSeparateInput = { + /** + * Prompt + * + * Text prompt describing the sound to isolate. + */ + prompt: string + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'fast' | 'balanced' | 'quality' + /** + * Audio Url + * + * URL of the audio file to process (WAV, MP3, FLAC supported) + */ + audio_url: string + /** + * Predict Spans + * + * Automatically predict temporal spans where the target sound occurs. + */ + predict_spans?: boolean + /** + * Output Format + * + * Output audio format. + */ + output_format?: 'wav' | 'mp3' + /** + * Reranking Candidates + * + * Number of candidates to generate and rank. Higher improves quality but increases latency and cost. + */ + reranking_candidates?: number +} + +/** + * DeepFilterNetTimings + */ +export type SchemaDeepFilterNetTimings = { + /** + * Postprocess + * + * Postprocessing time. + */ + postprocess: number + /** + * Inference + * + * Inference time. + */ + inference: number + /** + * Preprocess + * + * Preprocessing time. + */ + preprocess: number +} + +/** + * DeepFilterNet3Output + */ +export type SchemaDeepfilternet3Output = { + /** + * Timings + * + * Timings for each step in the pipeline. + */ + timings: SchemaDeepFilterNetTimings + /** + * Audio File + * + * The audio file that was enhanced. + */ + audio_file: SchemaAudioFile +} + +/** + * AudioFile + */ +export type SchemaAudioFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * Duration + * + * The duration of the audio + */ + duration?: number + /** + * File Data + * + * File data + */ + file_data?: Blob | File + /** + * Bitrate + * + * The bitrate of the audio + */ + bitrate?: string + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Sample Rate + * + * The sample rate of the audio + */ + sample_rate?: number + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * Channels + * + * The number of channels in the audio + */ + channels?: number +} + +/** + * DeepFilterNet3Input + */ +export type SchemaDeepfilternet3Input = { + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Audio Format + * + * The format for the output audio. + */ + audio_format?: 'mp3' | 'aac' | 'm4a' | 'ogg' | 'opus' | 'flac' | 'wav' + /** + * Audio URL + * + * The URL of the audio to enhance. + */ + audio_url: string + /** + * Bitrate + * + * The bitrate of the output audio. + */ + bitrate?: string +} + +/** + * NovaSRTimings + */ +export type SchemaNovaSrTimings = { + /** + * Postprocess + * + * Time taken to postprocess the audio in seconds. + */ + postprocess: number + /** + * Inference + * + * Time taken to run the inference in seconds. + */ + inference: number + /** + * Preprocess + * + * Time taken to preprocess the audio in seconds. + */ + preprocess: number +} + +/** + * NovaSROutput + */ +export type SchemaNovaSrOutput = { + /** + * Timings + * + * Timings for each step in the pipeline. + */ + timings: SchemaNovaSrTimings + /** + * Audio + * + * The enhanced audio file. + */ + audio: SchemaAudioFile +} + +/** + * NovaSRInput + */ +export type SchemaNovaSrInput = { + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Bitrate + * + * The bitrate of the output audio. + */ + bitrate?: string + /** + * Audio URL + * + * The URL of the audio file to enhance. + */ + audio_url: string + /** + * Audio Format + * + * The format for the output audio. + */ + audio_format?: 'mp3' | 'aac' | 'm4a' | 'ogg' | 'opus' | 'flac' | 'wav' +} + +/** + * VoiceChangerOutput + */ +export type SchemaElevenlabsVoiceChangerOutput = { + /** + * Seed + * + * Random seed for reproducibility. + */ + seed: number + audio: SchemaFile +} + +/** + * VoiceChangerRequest + */ +export type SchemaElevenlabsVoiceChangerInput = { + /** + * Voice + * + * The voice to use for speech generation + */ + voice?: string + /** + * Audio Url + * + * The input audio file + */ + audio_url: string + /** + * Seed + * + * Random seed for reproducibility. + */ + seed?: number + /** + * Output Format + * + * Output format of the generated audio. Formatted as codec_sample_rate_bitrate. + */ + output_format?: + | 'mp3_22050_32' + | 'mp3_44100_32' + | 'mp3_44100_64' + | 'mp3_44100_96' + | 'mp3_44100_128' + | 'mp3_44100_192' + | 'pcm_8000' + | 'pcm_16000' + | 'pcm_22050' + | 'pcm_24000' + | 'pcm_44100' + | 'pcm_48000' + | 'ulaw_8000' + | 'alaw_8000' + | 'opus_48000_32' + | 'opus_48000_64' + | 'opus_48000_96' + | 'opus_48000_128' + | 'opus_48000_192' + /** + * Remove Background Noise + * + * If set, will remove the background noise from your audio input using our audio isolation model. + */ + remove_background_noise?: boolean +} + +export type SchemaQueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetFalAiElevenlabsVoiceChangerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/elevenlabs/voice-changer/requests/{request_id}/status' +} + +export type GetFalAiElevenlabsVoiceChangerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiElevenlabsVoiceChangerRequestsByRequestIdStatusResponse = + GetFalAiElevenlabsVoiceChangerRequestsByRequestIdStatusResponses[keyof GetFalAiElevenlabsVoiceChangerRequestsByRequestIdStatusResponses] + +export type PutFalAiElevenlabsVoiceChangerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/elevenlabs/voice-changer/requests/{request_id}/cancel' +} + +export type PutFalAiElevenlabsVoiceChangerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiElevenlabsVoiceChangerRequestsByRequestIdCancelResponse = + PutFalAiElevenlabsVoiceChangerRequestsByRequestIdCancelResponses[keyof PutFalAiElevenlabsVoiceChangerRequestsByRequestIdCancelResponses] + +export type PostFalAiElevenlabsVoiceChangerData = { + body: SchemaElevenlabsVoiceChangerInput + path?: never + query?: never + url: '/fal-ai/elevenlabs/voice-changer' +} + +export type PostFalAiElevenlabsVoiceChangerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiElevenlabsVoiceChangerResponse = + PostFalAiElevenlabsVoiceChangerResponses[keyof PostFalAiElevenlabsVoiceChangerResponses] + +export type GetFalAiElevenlabsVoiceChangerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/elevenlabs/voice-changer/requests/{request_id}' +} + +export type GetFalAiElevenlabsVoiceChangerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaElevenlabsVoiceChangerOutput +} + +export type GetFalAiElevenlabsVoiceChangerRequestsByRequestIdResponse = + GetFalAiElevenlabsVoiceChangerRequestsByRequestIdResponses[keyof GetFalAiElevenlabsVoiceChangerRequestsByRequestIdResponses] + +export type GetFalAiNovaSrRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/nova-sr/requests/{request_id}/status' +} + +export type GetFalAiNovaSrRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiNovaSrRequestsByRequestIdStatusResponse = + GetFalAiNovaSrRequestsByRequestIdStatusResponses[keyof GetFalAiNovaSrRequestsByRequestIdStatusResponses] + +export type PutFalAiNovaSrRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/nova-sr/requests/{request_id}/cancel' +} + +export type PutFalAiNovaSrRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiNovaSrRequestsByRequestIdCancelResponse = + PutFalAiNovaSrRequestsByRequestIdCancelResponses[keyof PutFalAiNovaSrRequestsByRequestIdCancelResponses] + +export type PostFalAiNovaSrData = { + body: SchemaNovaSrInput + path?: never + query?: never + url: '/fal-ai/nova-sr' +} + +export type PostFalAiNovaSrResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiNovaSrResponse = + PostFalAiNovaSrResponses[keyof PostFalAiNovaSrResponses] + +export type GetFalAiNovaSrRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/nova-sr/requests/{request_id}' +} + +export type GetFalAiNovaSrRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaNovaSrOutput +} + +export type GetFalAiNovaSrRequestsByRequestIdResponse = + GetFalAiNovaSrRequestsByRequestIdResponses[keyof GetFalAiNovaSrRequestsByRequestIdResponses] + +export type GetFalAiDeepfilternet3RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/deepfilternet3/requests/{request_id}/status' +} + +export type GetFalAiDeepfilternet3RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiDeepfilternet3RequestsByRequestIdStatusResponse = + GetFalAiDeepfilternet3RequestsByRequestIdStatusResponses[keyof GetFalAiDeepfilternet3RequestsByRequestIdStatusResponses] + +export type PutFalAiDeepfilternet3RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/deepfilternet3/requests/{request_id}/cancel' +} + +export type PutFalAiDeepfilternet3RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiDeepfilternet3RequestsByRequestIdCancelResponse = + PutFalAiDeepfilternet3RequestsByRequestIdCancelResponses[keyof PutFalAiDeepfilternet3RequestsByRequestIdCancelResponses] + +export type PostFalAiDeepfilternet3Data = { + body: SchemaDeepfilternet3Input + path?: never + query?: never + url: '/fal-ai/deepfilternet3' +} + +export type PostFalAiDeepfilternet3Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiDeepfilternet3Response = + PostFalAiDeepfilternet3Responses[keyof PostFalAiDeepfilternet3Responses] + +export type GetFalAiDeepfilternet3RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/deepfilternet3/requests/{request_id}' +} + +export type GetFalAiDeepfilternet3RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaDeepfilternet3Output +} + +export type GetFalAiDeepfilternet3RequestsByRequestIdResponse = + GetFalAiDeepfilternet3RequestsByRequestIdResponses[keyof GetFalAiDeepfilternet3RequestsByRequestIdResponses] + +export type GetFalAiSamAudioSeparateRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sam-audio/separate/requests/{request_id}/status' +} + +export type GetFalAiSamAudioSeparateRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSamAudioSeparateRequestsByRequestIdStatusResponse = + GetFalAiSamAudioSeparateRequestsByRequestIdStatusResponses[keyof GetFalAiSamAudioSeparateRequestsByRequestIdStatusResponses] + +export type PutFalAiSamAudioSeparateRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam-audio/separate/requests/{request_id}/cancel' +} + +export type PutFalAiSamAudioSeparateRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSamAudioSeparateRequestsByRequestIdCancelResponse = + PutFalAiSamAudioSeparateRequestsByRequestIdCancelResponses[keyof PutFalAiSamAudioSeparateRequestsByRequestIdCancelResponses] + +export type PostFalAiSamAudioSeparateData = { + body: SchemaSamAudioSeparateInput + path?: never + query?: never + url: '/fal-ai/sam-audio/separate' +} + +export type PostFalAiSamAudioSeparateResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSamAudioSeparateResponse = + PostFalAiSamAudioSeparateResponses[keyof PostFalAiSamAudioSeparateResponses] + +export type GetFalAiSamAudioSeparateRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam-audio/separate/requests/{request_id}' +} + +export type GetFalAiSamAudioSeparateRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSamAudioSeparateOutput +} + +export type GetFalAiSamAudioSeparateRequestsByRequestIdResponse = + GetFalAiSamAudioSeparateRequestsByRequestIdResponses[keyof GetFalAiSamAudioSeparateRequestsByRequestIdResponses] + +export type GetFalAiSamAudioSpanSeparateRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sam-audio/span-separate/requests/{request_id}/status' +} + +export type GetFalAiSamAudioSpanSeparateRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSamAudioSpanSeparateRequestsByRequestIdStatusResponse = + GetFalAiSamAudioSpanSeparateRequestsByRequestIdStatusResponses[keyof GetFalAiSamAudioSpanSeparateRequestsByRequestIdStatusResponses] + +export type PutFalAiSamAudioSpanSeparateRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam-audio/span-separate/requests/{request_id}/cancel' +} + +export type PutFalAiSamAudioSpanSeparateRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSamAudioSpanSeparateRequestsByRequestIdCancelResponse = + PutFalAiSamAudioSpanSeparateRequestsByRequestIdCancelResponses[keyof PutFalAiSamAudioSpanSeparateRequestsByRequestIdCancelResponses] + +export type PostFalAiSamAudioSpanSeparateData = { + body: SchemaSamAudioSpanSeparateInput + path?: never + query?: never + url: '/fal-ai/sam-audio/span-separate' +} + +export type PostFalAiSamAudioSpanSeparateResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSamAudioSpanSeparateResponse = + PostFalAiSamAudioSpanSeparateResponses[keyof PostFalAiSamAudioSpanSeparateResponses] + +export type GetFalAiSamAudioSpanSeparateRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam-audio/span-separate/requests/{request_id}' +} + +export type GetFalAiSamAudioSpanSeparateRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSamAudioSpanSeparateOutput +} + +export type GetFalAiSamAudioSpanSeparateRequestsByRequestIdResponse = + GetFalAiSamAudioSpanSeparateRequestsByRequestIdResponses[keyof GetFalAiSamAudioSpanSeparateRequestsByRequestIdResponses] + +export type GetFalAiFfmpegApiMergeAudiosRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ffmpeg-api/merge-audios/requests/{request_id}/status' +} + +export type GetFalAiFfmpegApiMergeAudiosRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFfmpegApiMergeAudiosRequestsByRequestIdStatusResponse = + GetFalAiFfmpegApiMergeAudiosRequestsByRequestIdStatusResponses[keyof GetFalAiFfmpegApiMergeAudiosRequestsByRequestIdStatusResponses] + +export type PutFalAiFfmpegApiMergeAudiosRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ffmpeg-api/merge-audios/requests/{request_id}/cancel' +} + +export type PutFalAiFfmpegApiMergeAudiosRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFfmpegApiMergeAudiosRequestsByRequestIdCancelResponse = + PutFalAiFfmpegApiMergeAudiosRequestsByRequestIdCancelResponses[keyof PutFalAiFfmpegApiMergeAudiosRequestsByRequestIdCancelResponses] + +export type PostFalAiFfmpegApiMergeAudiosData = { + body: SchemaFfmpegApiMergeAudiosInput + path?: never + query?: never + url: '/fal-ai/ffmpeg-api/merge-audios' +} + +export type PostFalAiFfmpegApiMergeAudiosResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFfmpegApiMergeAudiosResponse = + PostFalAiFfmpegApiMergeAudiosResponses[keyof PostFalAiFfmpegApiMergeAudiosResponses] + +export type GetFalAiFfmpegApiMergeAudiosRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ffmpeg-api/merge-audios/requests/{request_id}' +} + +export type GetFalAiFfmpegApiMergeAudiosRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFfmpegApiMergeAudiosOutput +} + +export type GetFalAiFfmpegApiMergeAudiosRequestsByRequestIdResponse = + GetFalAiFfmpegApiMergeAudiosRequestsByRequestIdResponses[keyof GetFalAiFfmpegApiMergeAudiosRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoCreateVoiceRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/create-voice/requests/{request_id}/status' +} + +export type GetFalAiKlingVideoCreateVoiceRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiKlingVideoCreateVoiceRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoCreateVoiceRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoCreateVoiceRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoCreateVoiceRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/create-voice/requests/{request_id}/cancel' +} + +export type PutFalAiKlingVideoCreateVoiceRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiKlingVideoCreateVoiceRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoCreateVoiceRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoCreateVoiceRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoCreateVoiceData = { + body: SchemaKlingVideoCreateVoiceInput + path?: never + query?: never + url: '/fal-ai/kling-video/create-voice' +} + +export type PostFalAiKlingVideoCreateVoiceResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoCreateVoiceResponse = + PostFalAiKlingVideoCreateVoiceResponses[keyof PostFalAiKlingVideoCreateVoiceResponses] + +export type GetFalAiKlingVideoCreateVoiceRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/create-voice/requests/{request_id}' +} + +export type GetFalAiKlingVideoCreateVoiceRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingVideoCreateVoiceOutput +} + +export type GetFalAiKlingVideoCreateVoiceRequestsByRequestIdResponse = + GetFalAiKlingVideoCreateVoiceRequestsByRequestIdResponses[keyof GetFalAiKlingVideoCreateVoiceRequestsByRequestIdResponses] + +export type GetFalAiDemucsRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/demucs/requests/{request_id}/status' +} + +export type GetFalAiDemucsRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiDemucsRequestsByRequestIdStatusResponse = + GetFalAiDemucsRequestsByRequestIdStatusResponses[keyof GetFalAiDemucsRequestsByRequestIdStatusResponses] + +export type PutFalAiDemucsRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/demucs/requests/{request_id}/cancel' +} + +export type PutFalAiDemucsRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiDemucsRequestsByRequestIdCancelResponse = + PutFalAiDemucsRequestsByRequestIdCancelResponses[keyof PutFalAiDemucsRequestsByRequestIdCancelResponses] + +export type PostFalAiDemucsData = { + body: SchemaDemucsInput + path?: never + query?: never + url: '/fal-ai/demucs' +} + +export type PostFalAiDemucsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiDemucsResponse = + PostFalAiDemucsResponses[keyof PostFalAiDemucsResponses] + +export type GetFalAiDemucsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/demucs/requests/{request_id}' +} + +export type GetFalAiDemucsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaDemucsOutput +} + +export type GetFalAiDemucsRequestsByRequestIdResponse = + GetFalAiDemucsRequestsByRequestIdResponses[keyof GetFalAiDemucsRequestsByRequestIdResponses] + +export type GetFalAiAudioUnderstandingRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/audio-understanding/requests/{request_id}/status' +} + +export type GetFalAiAudioUnderstandingRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiAudioUnderstandingRequestsByRequestIdStatusResponse = + GetFalAiAudioUnderstandingRequestsByRequestIdStatusResponses[keyof GetFalAiAudioUnderstandingRequestsByRequestIdStatusResponses] + +export type PutFalAiAudioUnderstandingRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/audio-understanding/requests/{request_id}/cancel' +} + +export type PutFalAiAudioUnderstandingRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiAudioUnderstandingRequestsByRequestIdCancelResponse = + PutFalAiAudioUnderstandingRequestsByRequestIdCancelResponses[keyof PutFalAiAudioUnderstandingRequestsByRequestIdCancelResponses] + +export type PostFalAiAudioUnderstandingData = { + body: SchemaAudioUnderstandingInput + path?: never + query?: never + url: '/fal-ai/audio-understanding' +} + +export type PostFalAiAudioUnderstandingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiAudioUnderstandingResponse = + PostFalAiAudioUnderstandingResponses[keyof PostFalAiAudioUnderstandingResponses] + +export type GetFalAiAudioUnderstandingRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/audio-understanding/requests/{request_id}' +} + +export type GetFalAiAudioUnderstandingRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAudioUnderstandingOutput +} + +export type GetFalAiAudioUnderstandingRequestsByRequestIdResponse = + GetFalAiAudioUnderstandingRequestsByRequestIdResponses[keyof GetFalAiAudioUnderstandingRequestsByRequestIdResponses] + +export type GetFalAiStableAudio25AudioToAudioRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/stable-audio-25/audio-to-audio/requests/{request_id}/status' +} + +export type GetFalAiStableAudio25AudioToAudioRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiStableAudio25AudioToAudioRequestsByRequestIdStatusResponse = + GetFalAiStableAudio25AudioToAudioRequestsByRequestIdStatusResponses[keyof GetFalAiStableAudio25AudioToAudioRequestsByRequestIdStatusResponses] + +export type PutFalAiStableAudio25AudioToAudioRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-audio-25/audio-to-audio/requests/{request_id}/cancel' +} + +export type PutFalAiStableAudio25AudioToAudioRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiStableAudio25AudioToAudioRequestsByRequestIdCancelResponse = + PutFalAiStableAudio25AudioToAudioRequestsByRequestIdCancelResponses[keyof PutFalAiStableAudio25AudioToAudioRequestsByRequestIdCancelResponses] + +export type PostFalAiStableAudio25AudioToAudioData = { + body: SchemaStableAudio25AudioToAudioInput + path?: never + query?: never + url: '/fal-ai/stable-audio-25/audio-to-audio' +} + +export type PostFalAiStableAudio25AudioToAudioResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiStableAudio25AudioToAudioResponse = + PostFalAiStableAudio25AudioToAudioResponses[keyof PostFalAiStableAudio25AudioToAudioResponses] + +export type GetFalAiStableAudio25AudioToAudioRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-audio-25/audio-to-audio/requests/{request_id}' +} + +export type GetFalAiStableAudio25AudioToAudioRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaStableAudio25AudioToAudioOutput +} + +export type GetFalAiStableAudio25AudioToAudioRequestsByRequestIdResponse = + GetFalAiStableAudio25AudioToAudioRequestsByRequestIdResponses[keyof GetFalAiStableAudio25AudioToAudioRequestsByRequestIdResponses] + +export type GetFalAiStableAudio25InpaintRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/stable-audio-25/inpaint/requests/{request_id}/status' +} + +export type GetFalAiStableAudio25InpaintRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiStableAudio25InpaintRequestsByRequestIdStatusResponse = + GetFalAiStableAudio25InpaintRequestsByRequestIdStatusResponses[keyof GetFalAiStableAudio25InpaintRequestsByRequestIdStatusResponses] + +export type PutFalAiStableAudio25InpaintRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-audio-25/inpaint/requests/{request_id}/cancel' +} + +export type PutFalAiStableAudio25InpaintRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiStableAudio25InpaintRequestsByRequestIdCancelResponse = + PutFalAiStableAudio25InpaintRequestsByRequestIdCancelResponses[keyof PutFalAiStableAudio25InpaintRequestsByRequestIdCancelResponses] + +export type PostFalAiStableAudio25InpaintData = { + body: SchemaStableAudio25InpaintInput + path?: never + query?: never + url: '/fal-ai/stable-audio-25/inpaint' +} + +export type PostFalAiStableAudio25InpaintResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiStableAudio25InpaintResponse = + PostFalAiStableAudio25InpaintResponses[keyof PostFalAiStableAudio25InpaintResponses] + +export type GetFalAiStableAudio25InpaintRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-audio-25/inpaint/requests/{request_id}' +} + +export type GetFalAiStableAudio25InpaintRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaStableAudio25InpaintOutput +} + +export type GetFalAiStableAudio25InpaintRequestsByRequestIdResponse = + GetFalAiStableAudio25InpaintRequestsByRequestIdResponses[keyof GetFalAiStableAudio25InpaintRequestsByRequestIdResponses] + +export type GetSonautoV2ExtendRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/sonauto/v2/extend/requests/{request_id}/status' +} + +export type GetSonautoV2ExtendRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetSonautoV2ExtendRequestsByRequestIdStatusResponse = + GetSonautoV2ExtendRequestsByRequestIdStatusResponses[keyof GetSonautoV2ExtendRequestsByRequestIdStatusResponses] + +export type PutSonautoV2ExtendRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/sonauto/v2/extend/requests/{request_id}/cancel' +} + +export type PutSonautoV2ExtendRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutSonautoV2ExtendRequestsByRequestIdCancelResponse = + PutSonautoV2ExtendRequestsByRequestIdCancelResponses[keyof PutSonautoV2ExtendRequestsByRequestIdCancelResponses] + +export type PostSonautoV2ExtendData = { + body: SchemaV2ExtendInput + path?: never + query?: never + url: '/sonauto/v2/extend' +} + +export type PostSonautoV2ExtendResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostSonautoV2ExtendResponse = + PostSonautoV2ExtendResponses[keyof PostSonautoV2ExtendResponses] + +export type GetSonautoV2ExtendRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/sonauto/v2/extend/requests/{request_id}' +} + +export type GetSonautoV2ExtendRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaV2ExtendOutput +} + +export type GetSonautoV2ExtendRequestsByRequestIdResponse = + GetSonautoV2ExtendRequestsByRequestIdResponses[keyof GetSonautoV2ExtendRequestsByRequestIdResponses] + +export type GetFalAiAceStepAudioOutpaintRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ace-step/audio-outpaint/requests/{request_id}/status' +} + +export type GetFalAiAceStepAudioOutpaintRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiAceStepAudioOutpaintRequestsByRequestIdStatusResponse = + GetFalAiAceStepAudioOutpaintRequestsByRequestIdStatusResponses[keyof GetFalAiAceStepAudioOutpaintRequestsByRequestIdStatusResponses] + +export type PutFalAiAceStepAudioOutpaintRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ace-step/audio-outpaint/requests/{request_id}/cancel' +} + +export type PutFalAiAceStepAudioOutpaintRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiAceStepAudioOutpaintRequestsByRequestIdCancelResponse = + PutFalAiAceStepAudioOutpaintRequestsByRequestIdCancelResponses[keyof PutFalAiAceStepAudioOutpaintRequestsByRequestIdCancelResponses] + +export type PostFalAiAceStepAudioOutpaintData = { + body: SchemaAceStepAudioOutpaintInput + path?: never + query?: never + url: '/fal-ai/ace-step/audio-outpaint' +} + +export type PostFalAiAceStepAudioOutpaintResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiAceStepAudioOutpaintResponse = + PostFalAiAceStepAudioOutpaintResponses[keyof PostFalAiAceStepAudioOutpaintResponses] + +export type GetFalAiAceStepAudioOutpaintRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ace-step/audio-outpaint/requests/{request_id}' +} + +export type GetFalAiAceStepAudioOutpaintRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAceStepAudioOutpaintOutput +} + +export type GetFalAiAceStepAudioOutpaintRequestsByRequestIdResponse = + GetFalAiAceStepAudioOutpaintRequestsByRequestIdResponses[keyof GetFalAiAceStepAudioOutpaintRequestsByRequestIdResponses] + +export type GetFalAiAceStepAudioInpaintRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ace-step/audio-inpaint/requests/{request_id}/status' +} + +export type GetFalAiAceStepAudioInpaintRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiAceStepAudioInpaintRequestsByRequestIdStatusResponse = + GetFalAiAceStepAudioInpaintRequestsByRequestIdStatusResponses[keyof GetFalAiAceStepAudioInpaintRequestsByRequestIdStatusResponses] + +export type PutFalAiAceStepAudioInpaintRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ace-step/audio-inpaint/requests/{request_id}/cancel' +} + +export type PutFalAiAceStepAudioInpaintRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiAceStepAudioInpaintRequestsByRequestIdCancelResponse = + PutFalAiAceStepAudioInpaintRequestsByRequestIdCancelResponses[keyof PutFalAiAceStepAudioInpaintRequestsByRequestIdCancelResponses] + +export type PostFalAiAceStepAudioInpaintData = { + body: SchemaAceStepAudioInpaintInput + path?: never + query?: never + url: '/fal-ai/ace-step/audio-inpaint' +} + +export type PostFalAiAceStepAudioInpaintResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiAceStepAudioInpaintResponse = + PostFalAiAceStepAudioInpaintResponses[keyof PostFalAiAceStepAudioInpaintResponses] + +export type GetFalAiAceStepAudioInpaintRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ace-step/audio-inpaint/requests/{request_id}' +} + +export type GetFalAiAceStepAudioInpaintRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAceStepAudioInpaintOutput +} + +export type GetFalAiAceStepAudioInpaintRequestsByRequestIdResponse = + GetFalAiAceStepAudioInpaintRequestsByRequestIdResponses[keyof GetFalAiAceStepAudioInpaintRequestsByRequestIdResponses] + +export type GetFalAiAceStepAudioToAudioRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ace-step/audio-to-audio/requests/{request_id}/status' +} + +export type GetFalAiAceStepAudioToAudioRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiAceStepAudioToAudioRequestsByRequestIdStatusResponse = + GetFalAiAceStepAudioToAudioRequestsByRequestIdStatusResponses[keyof GetFalAiAceStepAudioToAudioRequestsByRequestIdStatusResponses] + +export type PutFalAiAceStepAudioToAudioRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ace-step/audio-to-audio/requests/{request_id}/cancel' +} + +export type PutFalAiAceStepAudioToAudioRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiAceStepAudioToAudioRequestsByRequestIdCancelResponse = + PutFalAiAceStepAudioToAudioRequestsByRequestIdCancelResponses[keyof PutFalAiAceStepAudioToAudioRequestsByRequestIdCancelResponses] + +export type PostFalAiAceStepAudioToAudioData = { + body: SchemaAceStepAudioToAudioInput + path?: never + query?: never + url: '/fal-ai/ace-step/audio-to-audio' +} + +export type PostFalAiAceStepAudioToAudioResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiAceStepAudioToAudioResponse = + PostFalAiAceStepAudioToAudioResponses[keyof PostFalAiAceStepAudioToAudioResponses] + +export type GetFalAiAceStepAudioToAudioRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ace-step/audio-to-audio/requests/{request_id}' +} + +export type GetFalAiAceStepAudioToAudioRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAceStepAudioToAudioOutput +} + +export type GetFalAiAceStepAudioToAudioRequestsByRequestIdResponse = + GetFalAiAceStepAudioToAudioRequestsByRequestIdResponses[keyof GetFalAiAceStepAudioToAudioRequestsByRequestIdResponses] + +export type GetFalAiDiaTtsVoiceCloneRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/dia-tts/voice-clone/requests/{request_id}/status' +} + +export type GetFalAiDiaTtsVoiceCloneRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiDiaTtsVoiceCloneRequestsByRequestIdStatusResponse = + GetFalAiDiaTtsVoiceCloneRequestsByRequestIdStatusResponses[keyof GetFalAiDiaTtsVoiceCloneRequestsByRequestIdStatusResponses] + +export type PutFalAiDiaTtsVoiceCloneRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/dia-tts/voice-clone/requests/{request_id}/cancel' +} + +export type PutFalAiDiaTtsVoiceCloneRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiDiaTtsVoiceCloneRequestsByRequestIdCancelResponse = + PutFalAiDiaTtsVoiceCloneRequestsByRequestIdCancelResponses[keyof PutFalAiDiaTtsVoiceCloneRequestsByRequestIdCancelResponses] + +export type PostFalAiDiaTtsVoiceCloneData = { + body: SchemaDiaTtsVoiceCloneInput + path?: never + query?: never + url: '/fal-ai/dia-tts/voice-clone' +} + +export type PostFalAiDiaTtsVoiceCloneResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiDiaTtsVoiceCloneResponse = + PostFalAiDiaTtsVoiceCloneResponses[keyof PostFalAiDiaTtsVoiceCloneResponses] + +export type GetFalAiDiaTtsVoiceCloneRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/dia-tts/voice-clone/requests/{request_id}' +} + +export type GetFalAiDiaTtsVoiceCloneRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaDiaTtsVoiceCloneOutput +} + +export type GetFalAiDiaTtsVoiceCloneRequestsByRequestIdResponse = + GetFalAiDiaTtsVoiceCloneRequestsByRequestIdResponses[keyof GetFalAiDiaTtsVoiceCloneRequestsByRequestIdResponses] + +export type GetFalAiElevenlabsAudioIsolationRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/elevenlabs/audio-isolation/requests/{request_id}/status' +} + +export type GetFalAiElevenlabsAudioIsolationRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiElevenlabsAudioIsolationRequestsByRequestIdStatusResponse = + GetFalAiElevenlabsAudioIsolationRequestsByRequestIdStatusResponses[keyof GetFalAiElevenlabsAudioIsolationRequestsByRequestIdStatusResponses] + +export type PutFalAiElevenlabsAudioIsolationRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/elevenlabs/audio-isolation/requests/{request_id}/cancel' +} + +export type PutFalAiElevenlabsAudioIsolationRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiElevenlabsAudioIsolationRequestsByRequestIdCancelResponse = + PutFalAiElevenlabsAudioIsolationRequestsByRequestIdCancelResponses[keyof PutFalAiElevenlabsAudioIsolationRequestsByRequestIdCancelResponses] + +export type PostFalAiElevenlabsAudioIsolationData = { + body: SchemaElevenlabsAudioIsolationInput + path?: never + query?: never + url: '/fal-ai/elevenlabs/audio-isolation' +} + +export type PostFalAiElevenlabsAudioIsolationResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiElevenlabsAudioIsolationResponse = + PostFalAiElevenlabsAudioIsolationResponses[keyof PostFalAiElevenlabsAudioIsolationResponses] + +export type GetFalAiElevenlabsAudioIsolationRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/elevenlabs/audio-isolation/requests/{request_id}' +} + +export type GetFalAiElevenlabsAudioIsolationRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaElevenlabsAudioIsolationOutput +} + +export type GetFalAiElevenlabsAudioIsolationRequestsByRequestIdResponse = + GetFalAiElevenlabsAudioIsolationRequestsByRequestIdResponses[keyof GetFalAiElevenlabsAudioIsolationRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/audio-to-audio/zod.gen.ts b/packages/typescript/ai-fal/src/generated/audio-to-audio/zod.gen.ts new file mode 100644 index 00000000..e9e515a2 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/audio-to-audio/zod.gen.ts @@ -0,0 +1,2601 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * File + */ +export const zSchemaFile = z.object({ + file_size: z.optional(z.union([z.int(), z.unknown()])), + file_name: z.optional(z.union([z.string(), z.unknown()])), + content_type: z.optional(z.union([z.string(), z.unknown()])), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), +}) + +/** + * TTSOutput + */ +export const zSchemaElevenlabsAudioIsolationOutput = z.object({ + audio: zSchemaFile, + timestamps: z.optional(z.union([z.array(z.unknown()), z.unknown()])), +}) + +/** + * AudioIsolationRequest + */ +export const zSchemaElevenlabsAudioIsolationInput = z.object({ + video_url: z.optional(z.union([z.string(), z.unknown()])), + audio_url: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * DiaCloneOutput + */ +export const zSchemaDiaTtsVoiceCloneOutput = z.object({ + audio: zSchemaFile, +}) + +/** + * CloneRequest + */ +export const zSchemaDiaTtsVoiceCloneInput = z.object({ + text: z.string().register(z.globalRegistry, { + description: 'The text to be converted to speech.', + }), + ref_text: z.string().register(z.globalRegistry, { + description: 'The reference text to be used for TTS.', + }), + ref_audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the reference audio file.', + }), +}) + +/** + * ACEStepAudioToAudioResponse + */ +export const zSchemaAceStepAudioToAudioOutput = z.object({ + tags: z.string().register(z.globalRegistry, { + description: 'The genre tags used in the generation process.', + }), + lyrics: z.string().register(z.globalRegistry, { + description: 'The lyrics used in the generation process.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The random seed used for the generation process.', + }), + audio: zSchemaFile, +}) + +/** + * ACEStepAudioToAudioRequest + */ +export const zSchemaAceStepAudioToAudioInput = z.object({ + number_of_steps: z + .optional( + z.int().gte(3).lte(60).register(z.globalRegistry, { + description: 'Number of steps to generate the audio.', + }), + ) + .default(27), + tags: z.string().register(z.globalRegistry, { + description: + 'Comma-separated list of genre tags to control the style of the generated audio.', + }), + minimum_guidance_scale: z + .optional( + z.number().gte(0).lte(200).register(z.globalRegistry, { + description: + 'Minimum guidance scale for the generation after the decay.', + }), + ) + .default(3), + lyrics: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Lyrics to be sung in the audio. If not provided or if [inst] or [instrumental] is the content of this field, no lyrics will be sung. Use control structures like [verse], [chorus] and [bridge] to control the structure of the song.', + }), + ) + .default(''), + tag_guidance_scale: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'Tag guidance scale for the generation.', + }), + ) + .default(5), + original_lyrics: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Original lyrics of the audio file.', + }), + ) + .default(''), + scheduler: z.optional( + z.enum(['euler', 'heun']).register(z.globalRegistry, { + description: 'Scheduler to use for the generation process.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(200).register(z.globalRegistry, { + description: 'Guidance scale for the generation.', + }), + ) + .default(15), + guidance_type: z.optional( + z.enum(['cfg', 'apg', 'cfg_star']).register(z.globalRegistry, { + description: 'Type of CFG to use for the generation process.', + }), + ), + lyric_guidance_scale: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'Lyric guidance scale for the generation.', + }), + ) + .default(1.5), + guidance_interval: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Guidance interval for the generation. 0.5 means only apply guidance in the middle steps (0.25 * infer_steps to 0.75 * infer_steps)', + }), + ) + .default(0.5), + edit_mode: z.optional( + z.enum(['lyrics', 'remix']).register(z.globalRegistry, { + description: 'Whether to edit the lyrics only or remix the audio.', + }), + ), + guidance_interval_decay: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Guidance interval decay for the generation. Guidance scale will decay from guidance_scale to min_guidance_scale in the interval. 0.0 means no decay.', + }), + ) + .default(0), + audio_url: z.string().register(z.globalRegistry, { + description: 'URL of the audio file to be outpainted.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If not provided, a random seed will be used.', + }), + ), + granularity_scale: z + .optional( + z.int().gte(-100).lte(100).register(z.globalRegistry, { + description: + 'Granularity scale for the generation process. Higher values can reduce artifacts.', + }), + ) + .default(10), + original_tags: z.string().register(z.globalRegistry, { + description: 'Original tags of the audio file.', + }), + original_seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Original seed of the audio file.', + }), + ), +}) + +/** + * ACEStepAudioInpaintResponse + */ +export const zSchemaAceStepAudioInpaintOutput = z.object({ + tags: z.string().register(z.globalRegistry, { + description: 'The genre tags used in the generation process.', + }), + lyrics: z.string().register(z.globalRegistry, { + description: 'The lyrics used in the generation process.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The random seed used for the generation process.', + }), + audio: zSchemaFile, +}) + +/** + * ACEStepAudioInpaintRequest + */ +export const zSchemaAceStepAudioInpaintInput = z.object({ + number_of_steps: z + .optional( + z.int().gte(3).lte(60).register(z.globalRegistry, { + description: 'Number of steps to generate the audio.', + }), + ) + .default(27), + start_time: z + .optional( + z.number().gte(0).lte(240).register(z.globalRegistry, { + description: 'start time in seconds for the inpainting process.', + }), + ) + .default(0), + tags: z.string().register(z.globalRegistry, { + description: + 'Comma-separated list of genre tags to control the style of the generated audio.', + }), + minimum_guidance_scale: z + .optional( + z.number().gte(0).lte(200).register(z.globalRegistry, { + description: + 'Minimum guidance scale for the generation after the decay.', + }), + ) + .default(3), + lyrics: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Lyrics to be sung in the audio. If not provided or if [inst] or [instrumental] is the content of this field, no lyrics will be sung. Use control structures like [verse], [chorus] and [bridge] to control the structure of the song.', + }), + ) + .default(''), + end_time_relative_to: z.optional( + z.enum(['start', 'end']).register(z.globalRegistry, { + description: + 'Whether the end time is relative to the start or end of the audio.', + }), + ), + tag_guidance_scale: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'Tag guidance scale for the generation.', + }), + ) + .default(5), + scheduler: z.optional( + z.enum(['euler', 'heun']).register(z.globalRegistry, { + description: 'Scheduler to use for the generation process.', + }), + ), + end_time: z + .optional( + z.number().gte(0).lte(240).register(z.globalRegistry, { + description: 'end time in seconds for the inpainting process.', + }), + ) + .default(30), + guidance_type: z.optional( + z.enum(['cfg', 'apg', 'cfg_star']).register(z.globalRegistry, { + description: 'Type of CFG to use for the generation process.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(200).register(z.globalRegistry, { + description: 'Guidance scale for the generation.', + }), + ) + .default(15), + lyric_guidance_scale: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'Lyric guidance scale for the generation.', + }), + ) + .default(1.5), + guidance_interval: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Guidance interval for the generation. 0.5 means only apply guidance in the middle steps (0.25 * infer_steps to 0.75 * infer_steps)', + }), + ) + .default(0.5), + variance: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Variance for the inpainting process. Higher values can lead to more diverse results.', + }), + ) + .default(0.5), + guidance_interval_decay: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Guidance interval decay for the generation. Guidance scale will decay from guidance_scale to min_guidance_scale in the interval. 0.0 means no decay.', + }), + ) + .default(0), + start_time_relative_to: z.optional( + z.enum(['start', 'end']).register(z.globalRegistry, { + description: + 'Whether the start time is relative to the start or end of the audio.', + }), + ), + audio_url: z.string().register(z.globalRegistry, { + description: 'URL of the audio file to be inpainted.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If not provided, a random seed will be used.', + }), + ), + granularity_scale: z + .optional( + z.int().gte(-100).lte(100).register(z.globalRegistry, { + description: + 'Granularity scale for the generation process. Higher values can reduce artifacts.', + }), + ) + .default(10), +}) + +/** + * ACEStepResponse + */ +export const zSchemaAceStepAudioOutpaintOutput = z.object({ + tags: z.string().register(z.globalRegistry, { + description: 'The genre tags used in the generation process.', + }), + lyrics: z.string().register(z.globalRegistry, { + description: 'The lyrics used in the generation process.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The random seed used for the generation process.', + }), + audio: zSchemaFile, +}) + +/** + * ACEStepAudioOutpaintRequest + */ +export const zSchemaAceStepAudioOutpaintInput = z.object({ + number_of_steps: z + .optional( + z.int().gte(3).lte(60).register(z.globalRegistry, { + description: 'Number of steps to generate the audio.', + }), + ) + .default(27), + tags: z.string().register(z.globalRegistry, { + description: + 'Comma-separated list of genre tags to control the style of the generated audio.', + }), + minimum_guidance_scale: z + .optional( + z.number().gte(0).lte(200).register(z.globalRegistry, { + description: + 'Minimum guidance scale for the generation after the decay.', + }), + ) + .default(3), + extend_after_duration: z + .optional( + z.number().gte(0).lte(240).register(z.globalRegistry, { + description: 'Duration in seconds to extend the audio from the end.', + }), + ) + .default(30), + lyrics: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Lyrics to be sung in the audio. If not provided or if [inst] or [instrumental] is the content of this field, no lyrics will be sung. Use control structures like [verse], [chorus] and [bridge] to control the structure of the song.', + }), + ) + .default(''), + tag_guidance_scale: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'Tag guidance scale for the generation.', + }), + ) + .default(5), + scheduler: z.optional( + z.enum(['euler', 'heun']).register(z.globalRegistry, { + description: 'Scheduler to use for the generation process.', + }), + ), + extend_before_duration: z + .optional( + z.number().gte(0).lte(240).register(z.globalRegistry, { + description: 'Duration in seconds to extend the audio from the start.', + }), + ) + .default(0), + guidance_type: z.optional( + z.enum(['cfg', 'apg', 'cfg_star']).register(z.globalRegistry, { + description: 'Type of CFG to use for the generation process.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(200).register(z.globalRegistry, { + description: 'Guidance scale for the generation.', + }), + ) + .default(15), + lyric_guidance_scale: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'Lyric guidance scale for the generation.', + }), + ) + .default(1.5), + guidance_interval: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Guidance interval for the generation. 0.5 means only apply guidance in the middle steps (0.25 * infer_steps to 0.75 * infer_steps)', + }), + ) + .default(0.5), + guidance_interval_decay: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Guidance interval decay for the generation. Guidance scale will decay from guidance_scale to min_guidance_scale in the interval. 0.0 means no decay.', + }), + ) + .default(0), + audio_url: z.string().register(z.globalRegistry, { + description: 'URL of the audio file to be outpainted.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If not provided, a random seed will be used.', + }), + ), + granularity_scale: z + .optional( + z.int().gte(-100).lte(100).register(z.globalRegistry, { + description: + 'Granularity scale for the generation process. Higher values can reduce artifacts.', + }), + ) + .default(10), +}) + +/** + * ExtendOutput + */ +export const zSchemaV2ExtendOutput = z.object({ + tags: z.optional(z.union([z.array(z.string()), z.unknown()])), + seed: z.int().register(z.globalRegistry, { + description: + 'The seed used for generation. This can be used to generate an identical song by passing the same parameters with this seed in a future request.', + }), + extend_duration: z.number().register(z.globalRegistry, { + description: 'The duration in seconds that the song was extended by.', + }), + audio: z.array(zSchemaFile).register(z.globalRegistry, { + description: 'The generated audio files.', + }), + lyrics: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * ExtendInput + */ +export const zSchemaV2ExtendInput = z.object({ + prompt: z.optional(z.union([z.string(), z.unknown()])), + lyrics_prompt: z.optional(z.union([z.string(), z.unknown()])), + tags: z.optional(z.union([z.array(z.string()), z.unknown()])), + prompt_strength: z + .optional( + z.number().gte(1.4).lte(3.1).register(z.globalRegistry, { + description: + 'Controls how strongly your prompt influences the output. Greater values adhere more to the prompt but sound less natural. (This is CFG.)', + }), + ) + .default(1.8), + output_bit_rate: z.optional( + z.union([ + z.union([z.literal(128), z.literal(192), z.literal(256), z.literal(320)]), + z.unknown(), + ]), + ), + num_songs: z + .optional( + z.int().gte(1).lte(2).register(z.globalRegistry, { + description: + 'Generating 2 songs costs 1.5x the price of generating 1 song. Also, note that using the same seed may not result in identical songs if the number of songs generated is changed.', + }), + ) + .default(1), + output_format: z.optional(z.enum(['flac', 'mp3', 'wav', 'ogg', 'm4a'])), + side: z.enum(['left', 'right']).register(z.globalRegistry, { + description: 'Add more to the beginning (left) or end (right) of the song', + }), + balance_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Greater means more natural vocals. Lower means sharper instrumentals. We recommend 0.7.', + }), + ) + .default(0.7), + crop_duration: z + .optional( + z.number().register(z.globalRegistry, { + description: + 'Duration in seconds to crop from the selected side before extending from that side.', + }), + ) + .default(0), + audio_url: z.url().min(1).max(2083).register(z.globalRegistry, { + description: + 'The URL of the audio file to alter. Must be a valid publicly accessible URL.', + }), + seed: z.optional( + z.union([ + z.int().gte(-9223372036854776000).lte(9223372036854776000), + z.unknown(), + ]), + ), + extend_duration: z.optional(z.union([z.number().lte(85), z.unknown()])), +}) + +/** + * InpaintOutput + */ +export const zSchemaStableAudio25InpaintOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The random seed used for generation', + }), + audio: zSchemaFile, +}) + +/** + * InpaintInput + */ +export const zSchemaStableAudio25InpaintInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to guide the audio generation', + }), + guidance_scale: z + .optional( + z.int().gte(1).lte(25).register(z.globalRegistry, { + description: + 'How strictly the diffusion process adheres to the prompt text (higher values make your audio closer to your prompt). ', + }), + ) + .default(1), + mask_end: z + .optional( + z.int().gte(0).lte(190).register(z.globalRegistry, { + description: 'The end point of the audio mask', + }), + ) + .default(190), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + audio_url: z.string().register(z.globalRegistry, { + description: 'The audio clip to inpaint', + }), + seed: z.optional(z.int()), + seconds_total: z + .optional( + z.int().gte(1).lte(190).register(z.globalRegistry, { + description: + 'The duration of the audio clip to generate. If not provided, it will be set to the duration of the input audio.', + }), + ) + .default(190), + num_inference_steps: z + .optional( + z.int().gte(4).lte(8).register(z.globalRegistry, { + description: 'The number of steps to denoise the audio for', + }), + ) + .default(8), + mask_start: z + .optional( + z.int().gte(0).lte(190).register(z.globalRegistry, { + description: 'The start point of the audio mask', + }), + ) + .default(30), +}) + +/** + * AudioToAudioOutput + */ +export const zSchemaStableAudio25AudioToAudioOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The random seed used for generation', + }), + audio: zSchemaFile, +}) + +/** + * AudioToAudioInput + */ +export const zSchemaStableAudio25AudioToAudioInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to guide the audio generation', + }), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'Sometimes referred to as denoising, this parameter controls how much influence the `audio_url` parameter has on the generated audio. A value of 0 would yield audio that is identical to the input. A value of 1 would be as if you passed in no audio at all.', + }), + ) + .default(0.8), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + audio_url: z.string().register(z.globalRegistry, { + description: 'The audio clip to transform', + }), + num_inference_steps: z + .optional( + z.int().gte(4).lte(8).register(z.globalRegistry, { + description: 'The number of steps to denoise the audio for', + }), + ) + .default(8), + guidance_scale: z + .optional( + z.int().gte(1).lte(25).register(z.globalRegistry, { + description: + 'How strictly the diffusion process adheres to the prompt text (higher values make your audio closer to your prompt). ', + }), + ) + .default(1), + seed: z.optional(z.int()), + total_seconds: z.optional( + z.int().gte(1).lte(190).register(z.globalRegistry, { + description: + 'The duration of the audio clip to generate. If not provided, it will be set to the duration of the input audio.', + }), + ), +}) + +/** + * AudioUnderstandingOutput + */ +export const zSchemaAudioUnderstandingOutput = z.object({ + output: z.string().register(z.globalRegistry, { + description: 'The analysis of the audio content based on the prompt', + }), +}) + +/** + * AudioUnderstandingInput + */ +export const zSchemaAudioUnderstandingInput = z.object({ + prompt: z.string().min(1).max(10000).register(z.globalRegistry, { + description: 'The question or prompt about the audio content.', + }), + detailed_analysis: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to request a more detailed analysis of the audio', + }), + ) + .default(false), + audio_url: z.string().register(z.globalRegistry, { + description: 'URL of the audio file to analyze', + }), +}) + +/** + * DemucsOutput + */ +export const zSchemaDemucsOutput = z.object({ + vocals: z.optional(z.union([zSchemaFile, z.unknown()])), + guitar: z.optional(z.union([zSchemaFile, z.unknown()])), + bass: z.optional(z.union([zSchemaFile, z.unknown()])), + piano: z.optional(z.union([zSchemaFile, z.unknown()])), + other: z.optional(z.union([zSchemaFile, z.unknown()])), + drums: z.optional(z.union([zSchemaFile, z.unknown()])), +}) + +/** + * DemucsInput + */ +export const zSchemaDemucsInput = z.object({ + segment_length: z.optional(z.union([z.int(), z.unknown()])), + output_format: z.optional( + z.enum(['wav', 'mp3']).register(z.globalRegistry, { + description: 'Output audio format for the separated stems', + }), + ), + stems: z.optional( + z.union([ + z.array(z.enum(['vocals', 'drums', 'bass', 'other', 'guitar', 'piano'])), + z.unknown(), + ]), + ), + overlap: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Overlap between segments (0.0 to 1.0). Higher values may improve quality but increase processing time.', + }), + ) + .default(0.25), + model: z.optional( + z + .enum([ + 'htdemucs', + 'htdemucs_ft', + 'htdemucs_6s', + 'hdemucs_mmi', + 'mdx', + 'mdx_extra', + 'mdx_q', + 'mdx_extra_q', + ]) + .register(z.globalRegistry, { + description: 'Demucs model to use for separation', + }), + ), + audio_url: z.string().register(z.globalRegistry, { + description: 'URL of the audio file to separate into stems', + }), + shifts: z + .optional( + z.int().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Number of random shifts for equivariant stabilization. Higher values improve quality but increase processing time.', + }), + ) + .default(1), +}) + +/** + * CreateVoiceOutput + * + * Response model for creating a custom voice. + */ +export const zSchemaKlingVideoCreateVoiceOutput = z + .object({ + voice_id: z.string().register(z.globalRegistry, { + description: 'Unique identifier for the created voice', + }), + }) + .register(z.globalRegistry, { + description: 'Response model for creating a custom voice.', + }) + +/** + * CreateVoiceInput + * + * Request model for creating a custom voice. + */ +export const zSchemaKlingVideoCreateVoiceInput = z + .object({ + voice_url: z.string().register(z.globalRegistry, { + description: + 'URL of the voice audio file. Supports .mp3/.wav audio or .mp4/.mov video. Duration must be 5-30 seconds with clean, single-voice audio.', + }), + }) + .register(z.globalRegistry, { + description: 'Request model for creating a custom voice.', + }) + +/** + * MergeAudiosOutput + */ +export const zSchemaFfmpegApiMergeAudiosOutput = z.object({ + audio: zSchemaFile, +}) + +/** + * MergeAudiosInput + */ +export const zSchemaFfmpegApiMergeAudiosInput = z.object({ + audio_urls: z.array(z.string()).min(2).max(5).register(z.globalRegistry, { + description: + 'List of audio URLs to merge in order. The 0th stream of the audio will be considered as the merge candidate.', + }), + output_format: z.optional( + z.union([ + z.enum([ + 'mp3_22050_32', + 'mp3_44100_32', + 'mp3_44100_64', + 'mp3_44100_96', + 'mp3_44100_128', + 'mp3_44100_192', + 'pcm_8000', + 'pcm_16000', + 'pcm_22050', + 'pcm_24000', + 'pcm_44100', + 'pcm_48000', + 'ulaw_8000', + 'alaw_8000', + 'opus_48000_32', + 'opus_48000_64', + 'opus_48000_96', + 'opus_48000_128', + 'opus_48000_192', + ]), + z.unknown(), + ]), + ), +}) + +/** + * AudioTimeSpan + * + * A time span indicating where the target sound occurs. + */ +export const zSchemaAudioTimeSpan = z + .object({ + end: z.number().gte(0).register(z.globalRegistry, { + description: 'End time of the span in seconds', + }), + start: z.number().gte(0).register(z.globalRegistry, { + description: 'Start time of the span in seconds', + }), + include: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to include (True) or exclude (False) sounds in this span', + }), + ) + .default(true), + }) + .register(z.globalRegistry, { + description: 'A time span indicating where the target sound occurs.', + }) + +/** + * SAMAudioSpanSeparateOutput + * + * Output for span-based audio separation. + */ +export const zSchemaSamAudioSpanSeparateOutput = z + .object({ + target: zSchemaFile, + duration: z.number().register(z.globalRegistry, { + description: 'Duration of the output audio in seconds.', + }), + sample_rate: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Sample rate of the output audio in Hz.', + }), + ) + .default(48000), + residual: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output for span-based audio separation.', + }) + +/** + * SAMAudioSpanInput + * + * Input for temporal span-based audio separation. + */ +export const zSchemaSamAudioSpanSeparateInput = z + .object({ + prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Text prompt describing the sound to isolate. Optional but recommended - helps the model identify what type of sound to extract from the span.', + }), + ), + acceleration: z.optional( + z.enum(['fast', 'balanced', 'quality']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + spans: z.array(zSchemaAudioTimeSpan).register(z.globalRegistry, { + description: + 'Time spans where the target sound occurs which should be isolated.', + }), + output_format: z.optional( + z.enum(['wav', 'mp3']).register(z.globalRegistry, { + description: 'Output audio format.', + }), + ), + trim_to_span: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Trim output audio to only include the specified span time range. If False, returns the full audio length with the target sound isolated throughout.', + }), + ) + .default(false), + audio_url: z.string().register(z.globalRegistry, { + description: 'URL of the audio file to process.', + }), + reranking_candidates: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + 'Number of candidates to generate and rank. Higher improves quality but increases latency and cost. Requires text prompt; ignored for span-only separation.', + }), + ) + .default(1), + }) + .register(z.globalRegistry, { + description: 'Input for temporal span-based audio separation.', + }) + +/** + * SAMAudioSeparateOutput + * + * Output for text-based audio separation. + */ +export const zSchemaSamAudioSeparateOutput = z + .object({ + target: zSchemaFile, + duration: z.number().register(z.globalRegistry, { + description: 'Duration of the output audio in seconds.', + }), + sample_rate: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Sample rate of the output audio in Hz.', + }), + ) + .default(48000), + residual: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output for text-based audio separation.', + }) + +/** + * SAMAudioInput + * + * Input for text-based audio separation. + */ +export const zSchemaSamAudioSeparateInput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt describing the sound to isolate.', + }), + acceleration: z.optional( + z.enum(['fast', 'balanced', 'quality']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + audio_url: z.string().register(z.globalRegistry, { + description: + 'URL of the audio file to process (WAV, MP3, FLAC supported)', + }), + predict_spans: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Automatically predict temporal spans where the target sound occurs.', + }), + ) + .default(false), + output_format: z.optional( + z.enum(['wav', 'mp3']).register(z.globalRegistry, { + description: 'Output audio format.', + }), + ), + reranking_candidates: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + 'Number of candidates to generate and rank. Higher improves quality but increases latency and cost.', + }), + ) + .default(1), + }) + .register(z.globalRegistry, { + description: 'Input for text-based audio separation.', + }) + +/** + * DeepFilterNetTimings + */ +export const zSchemaDeepFilterNetTimings = z.object({ + postprocess: z.number().register(z.globalRegistry, { + description: 'Postprocessing time.', + }), + inference: z.number().register(z.globalRegistry, { + description: 'Inference time.', + }), + preprocess: z.number().register(z.globalRegistry, { + description: 'Preprocessing time.', + }), +}) + +/** + * AudioFile + */ +export const zSchemaAudioFile = z.object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + duration: z.optional( + z.number().register(z.globalRegistry, { + description: 'The duration of the audio', + }), + ), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), + bitrate: z.optional( + z.string().register(z.globalRegistry, { + description: 'The bitrate of the audio', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + sample_rate: z.optional( + z.int().register(z.globalRegistry, { + description: 'The sample rate of the audio', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + channels: z.optional( + z.int().register(z.globalRegistry, { + description: 'The number of channels in the audio', + }), + ), +}) + +/** + * DeepFilterNet3Output + */ +export const zSchemaDeepfilternet3Output = z.object({ + timings: zSchemaDeepFilterNetTimings, + audio_file: zSchemaAudioFile, +}) + +/** + * DeepFilterNet3Input + */ +export const zSchemaDeepfilternet3Input = z.object({ + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + audio_format: z.optional( + z + .enum(['mp3', 'aac', 'm4a', 'ogg', 'opus', 'flac', 'wav']) + .register(z.globalRegistry, { + description: 'The format for the output audio.', + }), + ), + audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the audio to enhance.', + }), + bitrate: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The bitrate of the output audio.', + }), + ) + .default('192k'), +}) + +/** + * NovaSRTimings + */ +export const zSchemaNovaSrTimings = z.object({ + postprocess: z.number().register(z.globalRegistry, { + description: 'Time taken to postprocess the audio in seconds.', + }), + inference: z.number().register(z.globalRegistry, { + description: 'Time taken to run the inference in seconds.', + }), + preprocess: z.number().register(z.globalRegistry, { + description: 'Time taken to preprocess the audio in seconds.', + }), +}) + +/** + * NovaSROutput + */ +export const zSchemaNovaSrOutput = z.object({ + timings: zSchemaNovaSrTimings, + audio: zSchemaAudioFile, +}) + +/** + * NovaSRInput + */ +export const zSchemaNovaSrInput = z.object({ + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + bitrate: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The bitrate of the output audio.', + }), + ) + .default('192k'), + audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the audio file to enhance.', + }), + audio_format: z.optional( + z + .enum(['mp3', 'aac', 'm4a', 'ogg', 'opus', 'flac', 'wav']) + .register(z.globalRegistry, { + description: 'The format for the output audio.', + }), + ), +}) + +/** + * VoiceChangerOutput + */ +export const zSchemaElevenlabsVoiceChangerOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'Random seed for reproducibility.', + }), + audio: zSchemaFile, +}) + +/** + * VoiceChangerRequest + */ +export const zSchemaElevenlabsVoiceChangerInput = z.object({ + voice: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The voice to use for speech generation', + }), + ) + .default('Rachel'), + audio_url: z.string().register(z.globalRegistry, { + description: 'The input audio file', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducibility.', + }), + ), + output_format: z.optional( + z + .enum([ + 'mp3_22050_32', + 'mp3_44100_32', + 'mp3_44100_64', + 'mp3_44100_96', + 'mp3_44100_128', + 'mp3_44100_192', + 'pcm_8000', + 'pcm_16000', + 'pcm_22050', + 'pcm_24000', + 'pcm_44100', + 'pcm_48000', + 'ulaw_8000', + 'alaw_8000', + 'opus_48000_32', + 'opus_48000_64', + 'opus_48000_96', + 'opus_48000_128', + 'opus_48000_192', + ]) + .register(z.globalRegistry, { + description: + 'Output format of the generated audio. Formatted as codec_sample_rate_bitrate.', + }), + ), + remove_background_noise: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set, will remove the background noise from your audio input using our audio isolation model.', + }), + ) + .default(false), +}) + +export const zSchemaQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetFalAiElevenlabsVoiceChangerRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiElevenlabsVoiceChangerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiElevenlabsVoiceChangerRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiElevenlabsVoiceChangerRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiElevenlabsVoiceChangerData = z.object({ + body: zSchemaElevenlabsVoiceChangerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiElevenlabsVoiceChangerResponse = zSchemaQueueStatus + +export const zGetFalAiElevenlabsVoiceChangerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiElevenlabsVoiceChangerRequestsByRequestIdResponse = + zSchemaElevenlabsVoiceChangerOutput + +export const zGetFalAiNovaSrRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiNovaSrRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiNovaSrRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiNovaSrRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiNovaSrData = z.object({ + body: zSchemaNovaSrInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiNovaSrResponse = zSchemaQueueStatus + +export const zGetFalAiNovaSrRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiNovaSrRequestsByRequestIdResponse = zSchemaNovaSrOutput + +export const zGetFalAiDeepfilternet3RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiDeepfilternet3RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiDeepfilternet3RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiDeepfilternet3RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiDeepfilternet3Data = z.object({ + body: zSchemaDeepfilternet3Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiDeepfilternet3Response = zSchemaQueueStatus + +export const zGetFalAiDeepfilternet3RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiDeepfilternet3RequestsByRequestIdResponse = + zSchemaDeepfilternet3Output + +export const zGetFalAiSamAudioSeparateRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSamAudioSeparateRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSamAudioSeparateRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSamAudioSeparateRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSamAudioSeparateData = z.object({ + body: zSchemaSamAudioSeparateInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSamAudioSeparateResponse = zSchemaQueueStatus + +export const zGetFalAiSamAudioSeparateRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSamAudioSeparateRequestsByRequestIdResponse = + zSchemaSamAudioSeparateOutput + +export const zGetFalAiSamAudioSpanSeparateRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiSamAudioSpanSeparateRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSamAudioSpanSeparateRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiSamAudioSpanSeparateRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSamAudioSpanSeparateData = z.object({ + body: zSchemaSamAudioSpanSeparateInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSamAudioSpanSeparateResponse = zSchemaQueueStatus + +export const zGetFalAiSamAudioSpanSeparateRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSamAudioSpanSeparateRequestsByRequestIdResponse = + zSchemaSamAudioSpanSeparateOutput + +export const zGetFalAiFfmpegApiMergeAudiosRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFfmpegApiMergeAudiosRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFfmpegApiMergeAudiosRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFfmpegApiMergeAudiosRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFfmpegApiMergeAudiosData = z.object({ + body: zSchemaFfmpegApiMergeAudiosInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFfmpegApiMergeAudiosResponse = zSchemaQueueStatus + +export const zGetFalAiFfmpegApiMergeAudiosRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFfmpegApiMergeAudiosRequestsByRequestIdResponse = + zSchemaFfmpegApiMergeAudiosOutput + +export const zGetFalAiKlingVideoCreateVoiceRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoCreateVoiceRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoCreateVoiceRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoCreateVoiceRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoCreateVoiceData = z.object({ + body: zSchemaKlingVideoCreateVoiceInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoCreateVoiceResponse = zSchemaQueueStatus + +export const zGetFalAiKlingVideoCreateVoiceRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoCreateVoiceRequestsByRequestIdResponse = + zSchemaKlingVideoCreateVoiceOutput + +export const zGetFalAiDemucsRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiDemucsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiDemucsRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiDemucsRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiDemucsData = z.object({ + body: zSchemaDemucsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiDemucsResponse = zSchemaQueueStatus + +export const zGetFalAiDemucsRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiDemucsRequestsByRequestIdResponse = zSchemaDemucsOutput + +export const zGetFalAiAudioUnderstandingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiAudioUnderstandingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiAudioUnderstandingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiAudioUnderstandingRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiAudioUnderstandingData = z.object({ + body: zSchemaAudioUnderstandingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiAudioUnderstandingResponse = zSchemaQueueStatus + +export const zGetFalAiAudioUnderstandingRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiAudioUnderstandingRequestsByRequestIdResponse = + zSchemaAudioUnderstandingOutput + +export const zGetFalAiStableAudio25AudioToAudioRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiStableAudio25AudioToAudioRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiStableAudio25AudioToAudioRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiStableAudio25AudioToAudioRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiStableAudio25AudioToAudioData = z.object({ + body: zSchemaStableAudio25AudioToAudioInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiStableAudio25AudioToAudioResponse = zSchemaQueueStatus + +export const zGetFalAiStableAudio25AudioToAudioRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiStableAudio25AudioToAudioRequestsByRequestIdResponse = + zSchemaStableAudio25AudioToAudioOutput + +export const zGetFalAiStableAudio25InpaintRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiStableAudio25InpaintRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiStableAudio25InpaintRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiStableAudio25InpaintRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiStableAudio25InpaintData = z.object({ + body: zSchemaStableAudio25InpaintInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiStableAudio25InpaintResponse = zSchemaQueueStatus + +export const zGetFalAiStableAudio25InpaintRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiStableAudio25InpaintRequestsByRequestIdResponse = + zSchemaStableAudio25InpaintOutput + +export const zGetSonautoV2ExtendRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetSonautoV2ExtendRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutSonautoV2ExtendRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutSonautoV2ExtendRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostSonautoV2ExtendData = z.object({ + body: zSchemaV2ExtendInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostSonautoV2ExtendResponse = zSchemaQueueStatus + +export const zGetSonautoV2ExtendRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetSonautoV2ExtendRequestsByRequestIdResponse = + zSchemaV2ExtendOutput + +export const zGetFalAiAceStepAudioOutpaintRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiAceStepAudioOutpaintRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiAceStepAudioOutpaintRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiAceStepAudioOutpaintRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiAceStepAudioOutpaintData = z.object({ + body: zSchemaAceStepAudioOutpaintInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiAceStepAudioOutpaintResponse = zSchemaQueueStatus + +export const zGetFalAiAceStepAudioOutpaintRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiAceStepAudioOutpaintRequestsByRequestIdResponse = + zSchemaAceStepAudioOutpaintOutput + +export const zGetFalAiAceStepAudioInpaintRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiAceStepAudioInpaintRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiAceStepAudioInpaintRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiAceStepAudioInpaintRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiAceStepAudioInpaintData = z.object({ + body: zSchemaAceStepAudioInpaintInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiAceStepAudioInpaintResponse = zSchemaQueueStatus + +export const zGetFalAiAceStepAudioInpaintRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiAceStepAudioInpaintRequestsByRequestIdResponse = + zSchemaAceStepAudioInpaintOutput + +export const zGetFalAiAceStepAudioToAudioRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiAceStepAudioToAudioRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiAceStepAudioToAudioRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiAceStepAudioToAudioRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiAceStepAudioToAudioData = z.object({ + body: zSchemaAceStepAudioToAudioInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiAceStepAudioToAudioResponse = zSchemaQueueStatus + +export const zGetFalAiAceStepAudioToAudioRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiAceStepAudioToAudioRequestsByRequestIdResponse = + zSchemaAceStepAudioToAudioOutput + +export const zGetFalAiDiaTtsVoiceCloneRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiDiaTtsVoiceCloneRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiDiaTtsVoiceCloneRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiDiaTtsVoiceCloneRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiDiaTtsVoiceCloneData = z.object({ + body: zSchemaDiaTtsVoiceCloneInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiDiaTtsVoiceCloneResponse = zSchemaQueueStatus + +export const zGetFalAiDiaTtsVoiceCloneRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiDiaTtsVoiceCloneRequestsByRequestIdResponse = + zSchemaDiaTtsVoiceCloneOutput + +export const zGetFalAiElevenlabsAudioIsolationRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiElevenlabsAudioIsolationRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiElevenlabsAudioIsolationRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiElevenlabsAudioIsolationRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiElevenlabsAudioIsolationData = z.object({ + body: zSchemaElevenlabsAudioIsolationInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiElevenlabsAudioIsolationResponse = zSchemaQueueStatus + +export const zGetFalAiElevenlabsAudioIsolationRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiElevenlabsAudioIsolationRequestsByRequestIdResponse = + zSchemaElevenlabsAudioIsolationOutput diff --git a/packages/typescript/ai-fal/src/generated/audio-to-text/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/audio-to-text/endpoint-map.ts new file mode 100644 index 00000000..afcee76d --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/audio-to-text/endpoint-map.ts @@ -0,0 +1,66 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zSchemaNemotronAsrInput, + zSchemaNemotronAsrOutput, + zSchemaNemotronAsrStreamInput, + zSchemaNemotronAsrStreamOutput, + zSchemaSileroVadInput, + zSchemaSileroVadOutput, +} from './zod.gen' + +import type { + SchemaNemotronAsrInput, + SchemaNemotronAsrOutput, + SchemaNemotronAsrStreamInput, + SchemaNemotronAsrStreamOutput, + SchemaSileroVadInput, + SchemaSileroVadOutput, +} from './types.gen' + +import type { z } from 'zod' + +export type AudioToTextEndpointMap = { + 'fal-ai/nemotron/asr/stream': { + input: SchemaNemotronAsrStreamInput + output: SchemaNemotronAsrStreamOutput + } + 'fal-ai/nemotron/asr': { + input: SchemaNemotronAsrInput + output: SchemaNemotronAsrOutput + } + 'fal-ai/silero-vad': { + input: SchemaSileroVadInput + output: SchemaSileroVadOutput + } +} + +/** Union type of all audio-to-text model endpoint IDs */ +export type AudioToTextModel = keyof AudioToTextEndpointMap + +export const AudioToTextSchemaMap: Record< + AudioToTextModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['fal-ai/nemotron/asr/stream']: { + input: zSchemaNemotronAsrStreamInput, + output: zSchemaNemotronAsrStreamOutput, + }, + ['fal-ai/nemotron/asr']: { + input: zSchemaNemotronAsrInput, + output: zSchemaNemotronAsrOutput, + }, + ['fal-ai/silero-vad']: { + input: zSchemaSileroVadInput, + output: zSchemaSileroVadOutput, + }, +} as const + +/** Get the input type for a specific audio-to-text model */ +export type AudioToTextModelInput = + AudioToTextEndpointMap[T]['input'] + +/** Get the output type for a specific audio-to-text model */ +export type AudioToTextModelOutput = + AudioToTextEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/audio-to-text/types.gen.ts b/packages/typescript/ai-fal/src/generated/audio-to-text/types.gen.ts new file mode 100644 index 00000000..979390d6 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/audio-to-text/types.gen.ts @@ -0,0 +1,424 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * SpeechTimestamp + */ +export type SchemaSpeechTimestamp = { + /** + * End Time + * + * The end time of the speech in seconds. + */ + end: number + /** + * Start Time + * + * The start time of the speech in seconds. + */ + start: number +} + +/** + * SileroVADOutput + */ +export type SchemaSileroVadOutput = { + /** + * Has Speech + * + * Whether the audio has speech. + */ + has_speech: boolean + /** + * Speech Timestamps + * + * The speech timestamps. + */ + timestamps: Array +} + +/** + * SileroVADInput + */ +export type SchemaSileroVadInput = { + /** + * Audio URL + * + * The URL of the audio to get speech timestamps from. + */ + audio_url: string +} + +/** + * SpeechOutput + */ +export type SchemaNemotronAsrOutput = { + /** + * Partial Result + * + * True if this is an intermediate result during streaming. + */ + partial?: boolean + /** + * Transcribed Text + * + * The transcribed text from the audio. + */ + output: string +} + +/** + * SpeechInput + */ +export type SchemaNemotronAsrInput = { + /** + * Acceleration + * + * Controls the speed/accuracy trade-off. 'none' = best accuracy (1.12s chunks, ~7.16% WER), 'low' = balanced (0.56s chunks, ~7.22% WER), 'medium' = faster (0.16s chunks, ~7.84% WER), 'high' = fastest (0.08s chunks, ~8.53% WER). + */ + acceleration?: 'none' | 'low' | 'medium' | 'high' + /** + * Audio URL + * + * URL of the audio file. + */ + audio_url: string +} + +export type SchemaNemotronAsrStreamOutput = unknown + +/** + * SpeechInput + */ +export type SchemaNemotronAsrStreamInput = { + /** + * Acceleration + * + * Controls the speed/accuracy trade-off. 'none' = best accuracy (1.12s chunks, ~7.16% WER), 'low' = balanced (0.56s chunks, ~7.22% WER), 'medium' = faster (0.16s chunks, ~7.84% WER), 'high' = fastest (0.08s chunks, ~8.53% WER). + */ + acceleration?: 'none' | 'low' | 'medium' | 'high' + /** + * Audio URL + * + * URL of the audio file. + */ + audio_url: string +} + +export type SchemaQueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetFalAiNemotronAsrStreamRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/nemotron/asr/stream/requests/{request_id}/status' +} + +export type GetFalAiNemotronAsrStreamRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiNemotronAsrStreamRequestsByRequestIdStatusResponse = + GetFalAiNemotronAsrStreamRequestsByRequestIdStatusResponses[keyof GetFalAiNemotronAsrStreamRequestsByRequestIdStatusResponses] + +export type PutFalAiNemotronAsrStreamRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/nemotron/asr/stream/requests/{request_id}/cancel' +} + +export type PutFalAiNemotronAsrStreamRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiNemotronAsrStreamRequestsByRequestIdCancelResponse = + PutFalAiNemotronAsrStreamRequestsByRequestIdCancelResponses[keyof PutFalAiNemotronAsrStreamRequestsByRequestIdCancelResponses] + +export type PostFalAiNemotronAsrStreamData = { + body: SchemaNemotronAsrStreamInput + path?: never + query?: never + url: '/fal-ai/nemotron/asr/stream' +} + +export type PostFalAiNemotronAsrStreamResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiNemotronAsrStreamResponse = + PostFalAiNemotronAsrStreamResponses[keyof PostFalAiNemotronAsrStreamResponses] + +export type GetFalAiNemotronAsrStreamRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/nemotron/asr/stream/requests/{request_id}' +} + +export type GetFalAiNemotronAsrStreamRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaNemotronAsrStreamOutput +} + +export type GetFalAiNemotronAsrStreamRequestsByRequestIdResponse = + GetFalAiNemotronAsrStreamRequestsByRequestIdResponses[keyof GetFalAiNemotronAsrStreamRequestsByRequestIdResponses] + +export type GetFalAiNemotronAsrRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/nemotron/asr/requests/{request_id}/status' +} + +export type GetFalAiNemotronAsrRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiNemotronAsrRequestsByRequestIdStatusResponse = + GetFalAiNemotronAsrRequestsByRequestIdStatusResponses[keyof GetFalAiNemotronAsrRequestsByRequestIdStatusResponses] + +export type PutFalAiNemotronAsrRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/nemotron/asr/requests/{request_id}/cancel' +} + +export type PutFalAiNemotronAsrRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiNemotronAsrRequestsByRequestIdCancelResponse = + PutFalAiNemotronAsrRequestsByRequestIdCancelResponses[keyof PutFalAiNemotronAsrRequestsByRequestIdCancelResponses] + +export type PostFalAiNemotronAsrData = { + body: SchemaNemotronAsrInput + path?: never + query?: never + url: '/fal-ai/nemotron/asr' +} + +export type PostFalAiNemotronAsrResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiNemotronAsrResponse = + PostFalAiNemotronAsrResponses[keyof PostFalAiNemotronAsrResponses] + +export type GetFalAiNemotronAsrRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/nemotron/asr/requests/{request_id}' +} + +export type GetFalAiNemotronAsrRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaNemotronAsrOutput +} + +export type GetFalAiNemotronAsrRequestsByRequestIdResponse = + GetFalAiNemotronAsrRequestsByRequestIdResponses[keyof GetFalAiNemotronAsrRequestsByRequestIdResponses] + +export type GetFalAiSileroVadRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/silero-vad/requests/{request_id}/status' +} + +export type GetFalAiSileroVadRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSileroVadRequestsByRequestIdStatusResponse = + GetFalAiSileroVadRequestsByRequestIdStatusResponses[keyof GetFalAiSileroVadRequestsByRequestIdStatusResponses] + +export type PutFalAiSileroVadRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/silero-vad/requests/{request_id}/cancel' +} + +export type PutFalAiSileroVadRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSileroVadRequestsByRequestIdCancelResponse = + PutFalAiSileroVadRequestsByRequestIdCancelResponses[keyof PutFalAiSileroVadRequestsByRequestIdCancelResponses] + +export type PostFalAiSileroVadData = { + body: SchemaSileroVadInput + path?: never + query?: never + url: '/fal-ai/silero-vad' +} + +export type PostFalAiSileroVadResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSileroVadResponse = + PostFalAiSileroVadResponses[keyof PostFalAiSileroVadResponses] + +export type GetFalAiSileroVadRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/silero-vad/requests/{request_id}' +} + +export type GetFalAiSileroVadRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSileroVadOutput +} + +export type GetFalAiSileroVadRequestsByRequestIdResponse = + GetFalAiSileroVadRequestsByRequestIdResponses[keyof GetFalAiSileroVadRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/audio-to-text/zod.gen.ts b/packages/typescript/ai-fal/src/generated/audio-to-text/zod.gen.ts new file mode 100644 index 00000000..bf739173 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/audio-to-text/zod.gen.ts @@ -0,0 +1,356 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * SpeechTimestamp + */ +export const zSchemaSpeechTimestamp = z.object({ + end: z.number().register(z.globalRegistry, { + description: 'The end time of the speech in seconds.', + }), + start: z.number().register(z.globalRegistry, { + description: 'The start time of the speech in seconds.', + }), +}) + +/** + * SileroVADOutput + */ +export const zSchemaSileroVadOutput = z.object({ + has_speech: z.boolean().register(z.globalRegistry, { + description: 'Whether the audio has speech.', + }), + timestamps: z.array(zSchemaSpeechTimestamp).register(z.globalRegistry, { + description: 'The speech timestamps.', + }), +}) + +/** + * SileroVADInput + */ +export const zSchemaSileroVadInput = z.object({ + audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the audio to get speech timestamps from.', + }), +}) + +/** + * SpeechOutput + */ +export const zSchemaNemotronAsrOutput = z.object({ + partial: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'True if this is an intermediate result during streaming.', + }), + ) + .default(false), + output: z.string().register(z.globalRegistry, { + description: 'The transcribed text from the audio.', + }), +}) + +/** + * SpeechInput + */ +export const zSchemaNemotronAsrInput = z.object({ + acceleration: z.optional( + z.enum(['none', 'low', 'medium', 'high']).register(z.globalRegistry, { + description: + "Controls the speed/accuracy trade-off. 'none' = best accuracy (1.12s chunks, ~7.16% WER), 'low' = balanced (0.56s chunks, ~7.22% WER), 'medium' = faster (0.16s chunks, ~7.84% WER), 'high' = fastest (0.08s chunks, ~8.53% WER).", + }), + ), + audio_url: z.string().register(z.globalRegistry, { + description: 'URL of the audio file.', + }), +}) + +export const zSchemaNemotronAsrStreamOutput = z.unknown() + +/** + * SpeechInput + */ +export const zSchemaNemotronAsrStreamInput = z.object({ + acceleration: z.optional( + z.enum(['none', 'low', 'medium', 'high']).register(z.globalRegistry, { + description: + "Controls the speed/accuracy trade-off. 'none' = best accuracy (1.12s chunks, ~7.16% WER), 'low' = balanced (0.56s chunks, ~7.22% WER), 'medium' = faster (0.16s chunks, ~7.84% WER), 'high' = fastest (0.08s chunks, ~8.53% WER).", + }), + ), + audio_url: z.string().register(z.globalRegistry, { + description: 'URL of the audio file.', + }), +}) + +export const zSchemaQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetFalAiNemotronAsrStreamRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiNemotronAsrStreamRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiNemotronAsrStreamRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiNemotronAsrStreamRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiNemotronAsrStreamData = z.object({ + body: zSchemaNemotronAsrStreamInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiNemotronAsrStreamResponse = zSchemaQueueStatus + +export const zGetFalAiNemotronAsrStreamRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiNemotronAsrStreamRequestsByRequestIdResponse = + zSchemaNemotronAsrStreamOutput + +export const zGetFalAiNemotronAsrRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiNemotronAsrRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiNemotronAsrRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiNemotronAsrRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiNemotronAsrData = z.object({ + body: zSchemaNemotronAsrInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiNemotronAsrResponse = zSchemaQueueStatus + +export const zGetFalAiNemotronAsrRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiNemotronAsrRequestsByRequestIdResponse = + zSchemaNemotronAsrOutput + +export const zGetFalAiSileroVadRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSileroVadRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSileroVadRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSileroVadRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSileroVadData = z.object({ + body: zSchemaSileroVadInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSileroVadResponse = zSchemaQueueStatus + +export const zGetFalAiSileroVadRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSileroVadRequestsByRequestIdResponse = + zSchemaSileroVadOutput diff --git a/packages/typescript/ai-fal/src/generated/audio-to-video/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/audio-to-video/endpoint-map.ts new file mode 100644 index 00000000..ccd04201 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/audio-to-video/endpoint-map.ts @@ -0,0 +1,182 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zSchemaAvatarsAudioToVideoInput, + zSchemaAvatarsAudioToVideoOutput, + zSchemaEchomimicV3Input, + zSchemaEchomimicV3Output, + zSchemaElevenlabsDubbingInput, + zSchemaElevenlabsDubbingOutput, + zSchemaLongcatMultiAvatarImageAudioToVideoInput, + zSchemaLongcatMultiAvatarImageAudioToVideoOutput, + zSchemaLongcatSingleAvatarAudioToVideoInput, + zSchemaLongcatSingleAvatarAudioToVideoOutput, + zSchemaLongcatSingleAvatarImageAudioToVideoInput, + zSchemaLongcatSingleAvatarImageAudioToVideoOutput, + zSchemaLtx219bAudioToVideoInput, + zSchemaLtx219bAudioToVideoLoraInput, + zSchemaLtx219bAudioToVideoLoraOutput, + zSchemaLtx219bAudioToVideoOutput, + zSchemaLtx219bDistilledAudioToVideoInput, + zSchemaLtx219bDistilledAudioToVideoLoraInput, + zSchemaLtx219bDistilledAudioToVideoLoraOutput, + zSchemaLtx219bDistilledAudioToVideoOutput, + zSchemaStableAvatarInput, + zSchemaStableAvatarOutput, + zSchemaWanV2214bSpeechToVideoInput, + zSchemaWanV2214bSpeechToVideoOutput, +} from './zod.gen' + +import type { + SchemaAvatarsAudioToVideoInput, + SchemaAvatarsAudioToVideoOutput, + SchemaEchomimicV3Input, + SchemaEchomimicV3Output, + SchemaElevenlabsDubbingInput, + SchemaElevenlabsDubbingOutput, + SchemaLongcatMultiAvatarImageAudioToVideoInput, + SchemaLongcatMultiAvatarImageAudioToVideoOutput, + SchemaLongcatSingleAvatarAudioToVideoInput, + SchemaLongcatSingleAvatarAudioToVideoOutput, + SchemaLongcatSingleAvatarImageAudioToVideoInput, + SchemaLongcatSingleAvatarImageAudioToVideoOutput, + SchemaLtx219bAudioToVideoInput, + SchemaLtx219bAudioToVideoLoraInput, + SchemaLtx219bAudioToVideoLoraOutput, + SchemaLtx219bAudioToVideoOutput, + SchemaLtx219bDistilledAudioToVideoInput, + SchemaLtx219bDistilledAudioToVideoLoraInput, + SchemaLtx219bDistilledAudioToVideoLoraOutput, + SchemaLtx219bDistilledAudioToVideoOutput, + SchemaStableAvatarInput, + SchemaStableAvatarOutput, + SchemaWanV2214bSpeechToVideoInput, + SchemaWanV2214bSpeechToVideoOutput, +} from './types.gen' + +import type { z } from 'zod' + +export type AudioToVideoEndpointMap = { + 'fal-ai/ltx-2-19b/distilled/audio-to-video/lora': { + input: SchemaLtx219bDistilledAudioToVideoLoraInput + output: SchemaLtx219bDistilledAudioToVideoLoraOutput + } + 'fal-ai/ltx-2-19b/audio-to-video/lora': { + input: SchemaLtx219bAudioToVideoLoraInput + output: SchemaLtx219bAudioToVideoLoraOutput + } + 'fal-ai/ltx-2-19b/distilled/audio-to-video': { + input: SchemaLtx219bDistilledAudioToVideoInput + output: SchemaLtx219bDistilledAudioToVideoOutput + } + 'fal-ai/ltx-2-19b/audio-to-video': { + input: SchemaLtx219bAudioToVideoInput + output: SchemaLtx219bAudioToVideoOutput + } + 'fal-ai/elevenlabs/dubbing': { + input: SchemaElevenlabsDubbingInput + output: SchemaElevenlabsDubbingOutput + } + 'fal-ai/longcat-multi-avatar/image-audio-to-video': { + input: SchemaLongcatMultiAvatarImageAudioToVideoInput + output: SchemaLongcatMultiAvatarImageAudioToVideoOutput + } + 'fal-ai/longcat-single-avatar/image-audio-to-video': { + input: SchemaLongcatSingleAvatarImageAudioToVideoInput + output: SchemaLongcatSingleAvatarImageAudioToVideoOutput + } + 'fal-ai/longcat-single-avatar/audio-to-video': { + input: SchemaLongcatSingleAvatarAudioToVideoInput + output: SchemaLongcatSingleAvatarAudioToVideoOutput + } + 'argil/avatars/audio-to-video': { + input: SchemaAvatarsAudioToVideoInput + output: SchemaAvatarsAudioToVideoOutput + } + 'fal-ai/wan/v2.2-14b/speech-to-video': { + input: SchemaWanV2214bSpeechToVideoInput + output: SchemaWanV2214bSpeechToVideoOutput + } + 'fal-ai/stable-avatar': { + input: SchemaStableAvatarInput + output: SchemaStableAvatarOutput + } + 'fal-ai/echomimic-v3': { + input: SchemaEchomimicV3Input + output: SchemaEchomimicV3Output + } + 'veed/avatars/audio-to-video': { + input: SchemaAvatarsAudioToVideoInput + output: SchemaAvatarsAudioToVideoOutput + } +} + +/** Union type of all audio-to-video model endpoint IDs */ +export type AudioToVideoModel = keyof AudioToVideoEndpointMap + +export const AudioToVideoSchemaMap: Record< + AudioToVideoModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['fal-ai/ltx-2-19b/distilled/audio-to-video/lora']: { + input: zSchemaLtx219bDistilledAudioToVideoLoraInput, + output: zSchemaLtx219bDistilledAudioToVideoLoraOutput, + }, + ['fal-ai/ltx-2-19b/audio-to-video/lora']: { + input: zSchemaLtx219bAudioToVideoLoraInput, + output: zSchemaLtx219bAudioToVideoLoraOutput, + }, + ['fal-ai/ltx-2-19b/distilled/audio-to-video']: { + input: zSchemaLtx219bDistilledAudioToVideoInput, + output: zSchemaLtx219bDistilledAudioToVideoOutput, + }, + ['fal-ai/ltx-2-19b/audio-to-video']: { + input: zSchemaLtx219bAudioToVideoInput, + output: zSchemaLtx219bAudioToVideoOutput, + }, + ['fal-ai/elevenlabs/dubbing']: { + input: zSchemaElevenlabsDubbingInput, + output: zSchemaElevenlabsDubbingOutput, + }, + ['fal-ai/longcat-multi-avatar/image-audio-to-video']: { + input: zSchemaLongcatMultiAvatarImageAudioToVideoInput, + output: zSchemaLongcatMultiAvatarImageAudioToVideoOutput, + }, + ['fal-ai/longcat-single-avatar/image-audio-to-video']: { + input: zSchemaLongcatSingleAvatarImageAudioToVideoInput, + output: zSchemaLongcatSingleAvatarImageAudioToVideoOutput, + }, + ['fal-ai/longcat-single-avatar/audio-to-video']: { + input: zSchemaLongcatSingleAvatarAudioToVideoInput, + output: zSchemaLongcatSingleAvatarAudioToVideoOutput, + }, + ['argil/avatars/audio-to-video']: { + input: zSchemaAvatarsAudioToVideoInput, + output: zSchemaAvatarsAudioToVideoOutput, + }, + ['fal-ai/wan/v2.2-14b/speech-to-video']: { + input: zSchemaWanV2214bSpeechToVideoInput, + output: zSchemaWanV2214bSpeechToVideoOutput, + }, + ['fal-ai/stable-avatar']: { + input: zSchemaStableAvatarInput, + output: zSchemaStableAvatarOutput, + }, + ['fal-ai/echomimic-v3']: { + input: zSchemaEchomimicV3Input, + output: zSchemaEchomimicV3Output, + }, + ['veed/avatars/audio-to-video']: { + input: zSchemaAvatarsAudioToVideoInput, + output: zSchemaAvatarsAudioToVideoOutput, + }, +} as const + +/** Get the input type for a specific audio-to-video model */ +export type AudioToVideoModelInput = + AudioToVideoEndpointMap[T]['input'] + +/** Get the output type for a specific audio-to-video model */ +export type AudioToVideoModelOutput = + AudioToVideoEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/audio-to-video/types.gen.ts b/packages/typescript/ai-fal/src/generated/audio-to-video/types.gen.ts new file mode 100644 index 00000000..1e171a80 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/audio-to-video/types.gen.ts @@ -0,0 +1,2844 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * EchoMimicResponse + */ +export type SchemaEchomimicV3Output = { + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * File + */ +export type SchemaFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number | unknown + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string | unknown + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string | unknown + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string +} + +/** + * EchoMimicRequest + */ +export type SchemaEchomimicV3Input = { + /** + * Prompt + * + * The prompt to use for the video generation. + */ + prompt: string + /** + * Audio URL + * + * The URL of the audio to use as a reference for the video generation. + */ + audio_url: string + /** + * Image URL + * + * The URL of the image to use as a reference for the video generation. + */ + image_url: string + /** + * Guidance Scale + * + * The guidance scale to use for the video generation. + */ + guidance_scale?: number + /** + * Audio Guidance Scale + * + * The audio guidance scale to use for the video generation. + */ + audio_guidance_scale?: number + /** + * Number of frames per generation + * + * The number of frames to generate at once. + */ + num_frames_per_generation?: number + /** + * Negative Prompt + * + * The negative prompt to use for the video generation. + */ + negative_prompt?: string + /** + * Seed + * + * The seed to use for the video generation. + */ + seed?: number +} + +/** + * StableAvatarResponse + */ +export type SchemaStableAvatarOutput = { + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * StableAvatarRequest + */ +export type SchemaStableAvatarInput = { + /** + * Prompt + * + * The prompt to use for the video generation. + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the video to generate. If 'auto', the aspect ratio will be determined by the reference image. + */ + aspect_ratio?: '16:9' | '1:1' | '9:16' | 'auto' + /** + * Perturbation + * + * The amount of perturbation to use for the video generation. 0.0 means no perturbation, 1.0 means full perturbation. + */ + perturbation?: number + /** + * Image URL + * + * The URL of the image to use as a reference for the video generation. + */ + image_url: string + /** + * Guidance Scale + * + * The guidance scale to use for the video generation. + */ + guidance_scale?: number + /** + * Seed + * + * The seed to use for the video generation. + */ + seed?: number + /** + * Number of Inference Steps + * + * The number of inference steps to use for the video generation. + */ + num_inference_steps?: number + /** + * Audio URL + * + * The URL of the audio to use as a reference for the video generation. + */ + audio_url: string + /** + * Audio Guidance Scale + * + * The audio guidance scale to use for the video generation. + */ + audio_guidance_scale?: number +} + +/** + * WanS2VResponse + */ +export type SchemaWanV2214bSpeechToVideoOutput = { + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * WanS2VRequest + */ +export type SchemaWanV2214bSpeechToVideoInput = { + /** + * Prompt + * + * The text prompt used for video generation. + */ + prompt: string + /** + * Shift + * + * Shift value for the video. Must be between 1.0 and 10.0. + */ + shift?: number + /** + * Frames per Second + * + * Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is. + */ + frames_per_second?: number + /** + * Guidance Scale + * + * Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality. + */ + guidance_scale?: number + /** + * Number of Frames + * + * Number of frames to generate. Must be between 40 to 120, (must be multiple of 4). + */ + num_frames?: number + /** + * Enable Safety Checker + * + * If set to true, input data will be checked for safety before processing. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Resolution + * + * Resolution of the generated video (480p, 580p, or 720p). + */ + resolution?: '480p' | '580p' | '720p' + /** + * Enable Output Safety Checker + * + * If set to true, output video will be checked for safety after generation. + */ + enable_output_safety_checker?: boolean + /** + * Image URL + * + * URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped. + */ + image_url: string + /** + * Video Quality + * + * The quality of the output video. Higher quality means better visual quality but larger file size. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Audio URL + * + * The URL of the audio file. + */ + audio_url: string + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number +} + +/** + * AvatarsAppOutput + */ +export type SchemaAvatarsAudioToVideoOutput = { + video: SchemaFile +} + +/** + * Audio2VideoInput + */ +export type SchemaAvatarsAudioToVideoInput = { + /** + * Audio Url + */ + audio_url: string + /** + * Avatar Id + * + * The avatar to use for the video + */ + avatar_id: + | 'emily_vertical_primary' + | 'emily_vertical_secondary' + | 'marcus_vertical_primary' + | 'marcus_vertical_secondary' + | 'mira_vertical_primary' + | 'mira_vertical_secondary' + | 'jasmine_vertical_primary' + | 'jasmine_vertical_secondary' + | 'jasmine_vertical_walking' + | 'aisha_vertical_walking' + | 'elena_vertical_primary' + | 'elena_vertical_secondary' + | 'any_male_vertical_primary' + | 'any_female_vertical_primary' + | 'any_male_vertical_secondary' + | 'any_female_vertical_secondary' + | 'any_female_vertical_walking' + | 'emily_primary' + | 'emily_side' + | 'marcus_primary' + | 'marcus_side' + | 'aisha_walking' + | 'elena_primary' + | 'elena_side' + | 'any_male_primary' + | 'any_female_primary' + | 'any_male_side' + | 'any_female_side' +} + +/** + * AudioToVideoResponse + * + * Response model for audio-to-video generation (no reference image). + */ +export type SchemaLongcatSingleAvatarAudioToVideoOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * AudioToVideoRequest + * + * Request model for audio-to-video generation. + */ +export type SchemaLongcatSingleAvatarAudioToVideoInput = { + /** + * Prompt + * + * The prompt to guide the video generation. + */ + prompt?: string + /** + * Resolution + * + * Resolution of the generated video (480p or 720p). Billing is per video-second (16 frames): 480p is 1 unit per second and 720p is 4 units per second. + */ + resolution?: '480p' | '720p' + /** + * Enable Safety Checker + * + * Whether to enable safety checker. + */ + enable_safety_checker?: boolean + /** + * Audio Guidance Scale + * + * The audio guidance scale. Higher values may lead to exaggerated mouth movements. + */ + audio_guidance_scale?: number + /** + * Number of Segments + * + * Number of video segments to generate. Each segment adds ~5 seconds of video. First segment is ~5.8s, additional segments are 5s each. + */ + num_segments?: number + /** + * Audio URL + * + * The URL of the audio file to drive the avatar. + */ + audio_url: string + /** + * Number of Inference Steps + * + * The number of inference steps to use. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number + /** + * Negative Prompt + * + * The negative prompt to avoid in the video generation. + */ + negative_prompt?: string + /** + * Text Guidance Scale + * + * The text guidance scale for classifier-free guidance. + */ + text_guidance_scale?: number +} + +/** + * ImageAudioToVideoResponse + * + * Response model for image+audio to video generation. + */ +export type SchemaLongcatSingleAvatarImageAudioToVideoOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * ImageAudioToVideoRequest + * + * Request model for image+audio to video generation. + */ +export type SchemaLongcatSingleAvatarImageAudioToVideoInput = { + /** + * Prompt + * + * The prompt to guide the video generation. + */ + prompt: string + /** + * Resolution + * + * Resolution of the generated video (480p or 720p). Billing is per video-second (16 frames): 480p is 1 unit per second and 720p is 4 units per second. + */ + resolution?: '480p' | '720p' + /** + * Enable Safety Checker + * + * Whether to enable safety checker. + */ + enable_safety_checker?: boolean + /** + * Audio Guidance Scale + * + * The audio guidance scale. Higher values may lead to exaggerated mouth movements. + */ + audio_guidance_scale?: number + /** + * Number of Segments + * + * Number of video segments to generate. Each segment adds ~5 seconds of video. First segment is ~5.8s, additional segments are 5s each. + */ + num_segments?: number + /** + * Image URL + * + * The URL of the image to animate. + */ + image_url: string + /** + * Audio URL + * + * The URL of the audio file to drive the avatar. + */ + audio_url: string + /** + * Number of Inference Steps + * + * The number of inference steps to use. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number + /** + * Negative Prompt + * + * The negative prompt to avoid in the video generation. + */ + negative_prompt?: string + /** + * Text Guidance Scale + * + * The text guidance scale for classifier-free guidance. + */ + text_guidance_scale?: number +} + +/** + * BoundingBox + */ +export type SchemaBoundingBox = { + /** + * Y + * + * Y-coordinate of the top-left corner + */ + y: number + /** + * X + * + * X-coordinate of the top-left corner + */ + x: number + /** + * H + * + * Height of the bounding box + */ + h: number + /** + * W + * + * Width of the bounding box + */ + w: number + /** + * Label + * + * Label of the bounding box + */ + label: string +} + +/** + * MultiSpeakerImageAudioToVideoResponse + * + * Response model for multi-speaker image+audio to video generation. + */ +export type SchemaLongcatMultiAvatarImageAudioToVideoOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * MultiSpeakerImageAudioToVideoRequest + * + * Request model for multi-speaker image+audio to video generation. + */ +export type SchemaLongcatMultiAvatarImageAudioToVideoInput = { + /** + * Prompt + * + * The prompt to guide the video generation. + */ + prompt?: string + /** + * Number of Inference Steps + * + * The number of inference steps to use. + */ + num_inference_steps?: number + /** + * Audio URL Person 2 + * + * The URL of the audio file for person 2 (right side). + */ + audio_url_person2?: string + /** + * Enable Safety Checker + * + * Whether to enable safety checker. + */ + enable_safety_checker?: boolean + /** + * Bbox Person1 + * + * Bounding box for person 1. If not provided, defaults to left half of image. + */ + bbox_person1?: SchemaBoundingBox + /** + * Negative Prompt + * + * The negative prompt to avoid in the video generation. + */ + negative_prompt?: string + /** + * Text Guidance Scale + * + * The text guidance scale for classifier-free guidance. + */ + text_guidance_scale?: number + /** + * Resolution + * + * Resolution of the generated video (480p or 720p). Billing is per video-second (16 frames): 480p is 1 unit per second and 720p is 4 units per second. + */ + resolution?: '480p' | '720p' + /** + * Audio Type + * + * How to combine the two audio tracks. 'para' (parallel) plays both simultaneously, 'add' (sequential) plays person 1 first then person 2. + */ + audio_type?: 'para' | 'add' + /** + * Image URL + * + * The URL of the image containing two speakers. + */ + image_url: string + /** + * Audio URL Person 1 + * + * The URL of the audio file for person 1 (left side). + */ + audio_url_person1?: string + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number + /** + * Audio Guidance Scale + * + * The audio guidance scale. Higher values may lead to exaggerated mouth movements. + */ + audio_guidance_scale?: number + /** + * Bbox Person2 + * + * Bounding box for person 2. If not provided, defaults to right half of image. + */ + bbox_person2?: SchemaBoundingBox + /** + * Number of Segments + * + * Number of video segments to generate. Each segment adds ~5 seconds of video. First segment is ~5.8s, additional segments are 5s each. + */ + num_segments?: number +} + +/** + * DubbingVideoOutput + */ +export type SchemaElevenlabsDubbingOutput = { + /** + * Target Lang + * + * The target language of the dubbed content + */ + target_lang: string + video: SchemaFile +} + +/** + * DubbingRequest + */ +export type SchemaElevenlabsDubbingInput = { + /** + * Video Url + * + * URL of the video file to dub. Either audio_url or video_url must be provided. If both are provided, video_url takes priority. + */ + video_url?: string | unknown + /** + * Audio Url + * + * URL of the audio file to dub. Either audio_url or video_url must be provided. + */ + audio_url?: string | unknown + /** + * Highest Resolution + * + * Whether to use the highest resolution for dubbing. + */ + highest_resolution?: boolean + /** + * Target Lang + * + * Target language code for dubbing (ISO 639-1) + */ + target_lang: string + /** + * Source Lang + * + * Source language code. If not provided, will be auto-detected. + */ + source_lang?: string | unknown + /** + * Num Speakers + * + * Number of speakers in the audio. If not provided, will be auto-detected. + */ + num_speakers?: number | unknown +} + +/** + * LTX2AudioToVideoOutput + */ +export type SchemaLtx219bAudioToVideoOutput = { + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Seed + * + * The seed used for the random number generator. + */ + seed: number + video: SchemaVideoFile +} + +/** + * VideoFile + */ +export type SchemaVideoFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number | unknown + /** + * Duration + * + * The duration of the video + */ + duration?: number | unknown + /** + * Height + * + * The height of the video + */ + height?: number | unknown + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * Width + * + * The width of the video + */ + width?: number | unknown + /** + * Fps + * + * The FPS of the video + */ + fps?: number | unknown + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string | unknown + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string | unknown + /** + * Num Frames + * + * The number of frames in the video + */ + num_frames?: number | unknown +} + +/** + * LTX2AudioToVideoInput + */ +export type SchemaLtx219bAudioToVideoInput = { + /** + * Match Audio Length + * + * When enabled, the number of frames will be calculated based on the audio duration and FPS. When disabled, use the specified num_frames. + */ + match_audio_length?: boolean + /** + * Use Multi-Scale + * + * Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details. + */ + use_multiscale?: boolean + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' | 'full' + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * FPS + * + * The frames per second of the generated video. + */ + fps?: number + /** + * Camera LoRA + * + * The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora?: + | 'dolly_in' + | 'dolly_out' + | 'dolly_left' + | 'dolly_right' + | 'jib_up' + | 'jib_down' + | 'static' + | 'none' + /** + * Video Size + * + * The size of the generated video. Use 'auto' to match the input image dimensions if provided. + */ + video_size?: + | SchemaImageSize + | 'auto' + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Image Strength + * + * The strength of the image to use for the video generation. + */ + image_strength?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt?: string + /** + * Guidance Scale + * + * The guidance scale to use. + */ + guidance_scale?: number + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Camera LoRA Scale + * + * The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora_scale?: number + /** + * Preprocess Audio + * + * Whether to preprocess the audio before using it as conditioning. + */ + preprocess_audio?: boolean + /** + * Image URL + * + * Optional URL of an image to use as the first frame of the video. + */ + image_url?: string | unknown + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Audio URL + * + * The URL of the audio to generate the video from. + */ + audio_url: string + /** + * Audio Strength + * + * Audio conditioning strength. Values below 1.0 will allow the model to change the audio, while a value of exactly 1.0 will use the input audio without modification. + */ + audio_strength?: number + /** + * Number of Inference Steps + * + * The number of inference steps to use. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number | unknown + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean +} + +/** + * ImageSize + */ +export type SchemaImageSize = { + /** + * Height + * + * The height of the generated image. + */ + height?: number + /** + * Width + * + * The width of the generated image. + */ + width?: number +} + +/** + * LTX2AudioToVideoOutput + */ +export type SchemaLtx219bDistilledAudioToVideoOutput = { + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Seed + * + * The seed used for the random number generator. + */ + seed: number + video: SchemaVideoFile +} + +/** + * LTX2DistilledAudioToVideoInput + */ +export type SchemaLtx219bDistilledAudioToVideoInput = { + /** + * Match Audio Length + * + * When enabled, the number of frames will be calculated based on the audio duration and FPS. When disabled, use the specified num_frames. + */ + match_audio_length?: boolean + /** + * Use Multi-Scale + * + * Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details. + */ + use_multiscale?: boolean + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' | 'full' + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * FPS + * + * The frames per second of the generated video. + */ + fps?: number + /** + * Camera LoRA + * + * The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora?: + | 'dolly_in' + | 'dolly_out' + | 'dolly_left' + | 'dolly_right' + | 'jib_up' + | 'jib_down' + | 'static' + | 'none' + /** + * Video Size + * + * The size of the generated video. Use 'auto' to match the input image dimensions if provided. + */ + video_size?: + | SchemaImageSize + | 'auto' + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Image Strength + * + * The strength of the image to use for the video generation. + */ + image_strength?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt?: string + /** + * Camera LoRA Scale + * + * The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora_scale?: number + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Preprocess Audio + * + * Whether to preprocess the audio before using it as conditioning. + */ + preprocess_audio?: boolean + /** + * Image URL + * + * Optional URL of an image to use as the first frame of the video. + */ + image_url?: string | unknown + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Audio URL + * + * The URL of the audio to generate the video from. + */ + audio_url: string + /** + * Audio Strength + * + * Audio conditioning strength. Values below 1.0 will allow the model to change the audio, while a value of exactly 1.0 will use the input audio without modification. + */ + audio_strength?: number + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number | unknown + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean +} + +/** + * LTX2AudioToVideoOutput + */ +export type SchemaLtx219bAudioToVideoLoraOutput = { + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Seed + * + * The seed used for the random number generator. + */ + seed: number + video: SchemaVideoFile +} + +/** + * LTX2LoRAAudioToVideoInput + */ +export type SchemaLtx219bAudioToVideoLoraInput = { + /** + * Match Audio Length + * + * When enabled, the number of frames will be calculated based on the audio duration and FPS. When disabled, use the specified num_frames. + */ + match_audio_length?: boolean + /** + * Use Multi-Scale + * + * Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details. + */ + use_multiscale?: boolean + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' | 'full' + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * FPS + * + * The frames per second of the generated video. + */ + fps?: number + /** + * LoRAs + * + * The LoRAs to use for the generation. + */ + loras: Array + /** + * Camera LoRA + * + * The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora?: + | 'dolly_in' + | 'dolly_out' + | 'dolly_left' + | 'dolly_right' + | 'jib_up' + | 'jib_down' + | 'static' + | 'none' + /** + * Video Size + * + * The size of the generated video. Use 'auto' to match the input image dimensions if provided. + */ + video_size?: + | SchemaImageSize + | 'auto' + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Image Strength + * + * The strength of the image to use for the video generation. + */ + image_strength?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt?: string + /** + * Guidance Scale + * + * The guidance scale to use. + */ + guidance_scale?: number + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Camera LoRA Scale + * + * The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora_scale?: number + /** + * Preprocess Audio + * + * Whether to preprocess the audio before using it as conditioning. + */ + preprocess_audio?: boolean + /** + * Image URL + * + * Optional URL of an image to use as the first frame of the video. + */ + image_url?: string | unknown + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Audio URL + * + * The URL of the audio to generate the video from. + */ + audio_url: string + /** + * Audio Strength + * + * Audio conditioning strength. Values below 1.0 will allow the model to change the audio, while a value of exactly 1.0 will use the input audio without modification. + */ + audio_strength?: number + /** + * Number of Inference Steps + * + * The number of inference steps to use. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number | unknown + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean +} + +/** + * LoRAInput + * + * LoRA weight configuration. + */ +export type SchemaLoRaInput = { + /** + * Path + * + * URL, HuggingFace repo ID (owner/repo) to lora weights. + */ + path: string + /** + * Scale + * + * Scale factor for LoRA application (0.0 to 4.0). + */ + scale?: number + /** + * Weight Name + * + * Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights. + */ + weight_name?: string | unknown +} + +/** + * LTX2AudioToVideoOutput + */ +export type SchemaLtx219bDistilledAudioToVideoLoraOutput = { + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Seed + * + * The seed used for the random number generator. + */ + seed: number + video: SchemaVideoFile +} + +/** + * LTX2LoRADistilledAudioToVideoInput + */ +export type SchemaLtx219bDistilledAudioToVideoLoraInput = { + /** + * Match Audio Length + * + * When enabled, the number of frames will be calculated based on the audio duration and FPS. When disabled, use the specified num_frames. + */ + match_audio_length?: boolean + /** + * Use Multi-Scale + * + * Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details. + */ + use_multiscale?: boolean + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' | 'full' + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * FPS + * + * The frames per second of the generated video. + */ + fps?: number + /** + * LoRAs + * + * The LoRAs to use for the generation. + */ + loras: Array + /** + * Camera LoRA + * + * The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora?: + | 'dolly_in' + | 'dolly_out' + | 'dolly_left' + | 'dolly_right' + | 'jib_up' + | 'jib_down' + | 'static' + | 'none' + /** + * Video Size + * + * The size of the generated video. Use 'auto' to match the input image dimensions if provided. + */ + video_size?: + | SchemaImageSize + | 'auto' + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Image Strength + * + * The strength of the image to use for the video generation. + */ + image_strength?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt?: string + /** + * Camera LoRA Scale + * + * The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora_scale?: number + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Preprocess Audio + * + * Whether to preprocess the audio before using it as conditioning. + */ + preprocess_audio?: boolean + /** + * Image URL + * + * Optional URL of an image to use as the first frame of the video. + */ + image_url?: string | unknown + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Audio URL + * + * The URL of the audio to generate the video from. + */ + audio_url: string + /** + * Audio Strength + * + * Audio conditioning strength. Values below 1.0 will allow the model to change the audio, while a value of exactly 1.0 will use the input audio without modification. + */ + audio_strength?: number + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number | unknown + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean +} + +export type SchemaQueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetFalAiLtx219bDistilledAudioToVideoLoraRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2-19b/distilled/audio-to-video/lora/requests/{request_id}/status' + } + +export type GetFalAiLtx219bDistilledAudioToVideoLoraRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtx219bDistilledAudioToVideoLoraRequestsByRequestIdStatusResponse = + GetFalAiLtx219bDistilledAudioToVideoLoraRequestsByRequestIdStatusResponses[keyof GetFalAiLtx219bDistilledAudioToVideoLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx219bDistilledAudioToVideoLoraRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/distilled/audio-to-video/lora/requests/{request_id}/cancel' + } + +export type PutFalAiLtx219bDistilledAudioToVideoLoraRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtx219bDistilledAudioToVideoLoraRequestsByRequestIdCancelResponse = + PutFalAiLtx219bDistilledAudioToVideoLoraRequestsByRequestIdCancelResponses[keyof PutFalAiLtx219bDistilledAudioToVideoLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx219bDistilledAudioToVideoLoraData = { + body: SchemaLtx219bDistilledAudioToVideoLoraInput + path?: never + query?: never + url: '/fal-ai/ltx-2-19b/distilled/audio-to-video/lora' +} + +export type PostFalAiLtx219bDistilledAudioToVideoLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx219bDistilledAudioToVideoLoraResponse = + PostFalAiLtx219bDistilledAudioToVideoLoraResponses[keyof PostFalAiLtx219bDistilledAudioToVideoLoraResponses] + +export type GetFalAiLtx219bDistilledAudioToVideoLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/distilled/audio-to-video/lora/requests/{request_id}' +} + +export type GetFalAiLtx219bDistilledAudioToVideoLoraRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaLtx219bDistilledAudioToVideoLoraOutput + } + +export type GetFalAiLtx219bDistilledAudioToVideoLoraRequestsByRequestIdResponse = + GetFalAiLtx219bDistilledAudioToVideoLoraRequestsByRequestIdResponses[keyof GetFalAiLtx219bDistilledAudioToVideoLoraRequestsByRequestIdResponses] + +export type GetFalAiLtx219bAudioToVideoLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2-19b/audio-to-video/lora/requests/{request_id}/status' +} + +export type GetFalAiLtx219bAudioToVideoLoraRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtx219bAudioToVideoLoraRequestsByRequestIdStatusResponse = + GetFalAiLtx219bAudioToVideoLoraRequestsByRequestIdStatusResponses[keyof GetFalAiLtx219bAudioToVideoLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx219bAudioToVideoLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/audio-to-video/lora/requests/{request_id}/cancel' +} + +export type PutFalAiLtx219bAudioToVideoLoraRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtx219bAudioToVideoLoraRequestsByRequestIdCancelResponse = + PutFalAiLtx219bAudioToVideoLoraRequestsByRequestIdCancelResponses[keyof PutFalAiLtx219bAudioToVideoLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx219bAudioToVideoLoraData = { + body: SchemaLtx219bAudioToVideoLoraInput + path?: never + query?: never + url: '/fal-ai/ltx-2-19b/audio-to-video/lora' +} + +export type PostFalAiLtx219bAudioToVideoLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx219bAudioToVideoLoraResponse = + PostFalAiLtx219bAudioToVideoLoraResponses[keyof PostFalAiLtx219bAudioToVideoLoraResponses] + +export type GetFalAiLtx219bAudioToVideoLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/audio-to-video/lora/requests/{request_id}' +} + +export type GetFalAiLtx219bAudioToVideoLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtx219bAudioToVideoLoraOutput +} + +export type GetFalAiLtx219bAudioToVideoLoraRequestsByRequestIdResponse = + GetFalAiLtx219bAudioToVideoLoraRequestsByRequestIdResponses[keyof GetFalAiLtx219bAudioToVideoLoraRequestsByRequestIdResponses] + +export type GetFalAiLtx219bDistilledAudioToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2-19b/distilled/audio-to-video/requests/{request_id}/status' + } + +export type GetFalAiLtx219bDistilledAudioToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtx219bDistilledAudioToVideoRequestsByRequestIdStatusResponse = + GetFalAiLtx219bDistilledAudioToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLtx219bDistilledAudioToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx219bDistilledAudioToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/distilled/audio-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiLtx219bDistilledAudioToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtx219bDistilledAudioToVideoRequestsByRequestIdCancelResponse = + PutFalAiLtx219bDistilledAudioToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLtx219bDistilledAudioToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx219bDistilledAudioToVideoData = { + body: SchemaLtx219bDistilledAudioToVideoInput + path?: never + query?: never + url: '/fal-ai/ltx-2-19b/distilled/audio-to-video' +} + +export type PostFalAiLtx219bDistilledAudioToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx219bDistilledAudioToVideoResponse = + PostFalAiLtx219bDistilledAudioToVideoResponses[keyof PostFalAiLtx219bDistilledAudioToVideoResponses] + +export type GetFalAiLtx219bDistilledAudioToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/distilled/audio-to-video/requests/{request_id}' +} + +export type GetFalAiLtx219bDistilledAudioToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtx219bDistilledAudioToVideoOutput +} + +export type GetFalAiLtx219bDistilledAudioToVideoRequestsByRequestIdResponse = + GetFalAiLtx219bDistilledAudioToVideoRequestsByRequestIdResponses[keyof GetFalAiLtx219bDistilledAudioToVideoRequestsByRequestIdResponses] + +export type GetFalAiLtx219bAudioToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2-19b/audio-to-video/requests/{request_id}/status' +} + +export type GetFalAiLtx219bAudioToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLtx219bAudioToVideoRequestsByRequestIdStatusResponse = + GetFalAiLtx219bAudioToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLtx219bAudioToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx219bAudioToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/audio-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiLtx219bAudioToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLtx219bAudioToVideoRequestsByRequestIdCancelResponse = + PutFalAiLtx219bAudioToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLtx219bAudioToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx219bAudioToVideoData = { + body: SchemaLtx219bAudioToVideoInput + path?: never + query?: never + url: '/fal-ai/ltx-2-19b/audio-to-video' +} + +export type PostFalAiLtx219bAudioToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx219bAudioToVideoResponse = + PostFalAiLtx219bAudioToVideoResponses[keyof PostFalAiLtx219bAudioToVideoResponses] + +export type GetFalAiLtx219bAudioToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/audio-to-video/requests/{request_id}' +} + +export type GetFalAiLtx219bAudioToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtx219bAudioToVideoOutput +} + +export type GetFalAiLtx219bAudioToVideoRequestsByRequestIdResponse = + GetFalAiLtx219bAudioToVideoRequestsByRequestIdResponses[keyof GetFalAiLtx219bAudioToVideoRequestsByRequestIdResponses] + +export type GetFalAiElevenlabsDubbingRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/elevenlabs/dubbing/requests/{request_id}/status' +} + +export type GetFalAiElevenlabsDubbingRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiElevenlabsDubbingRequestsByRequestIdStatusResponse = + GetFalAiElevenlabsDubbingRequestsByRequestIdStatusResponses[keyof GetFalAiElevenlabsDubbingRequestsByRequestIdStatusResponses] + +export type PutFalAiElevenlabsDubbingRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/elevenlabs/dubbing/requests/{request_id}/cancel' +} + +export type PutFalAiElevenlabsDubbingRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiElevenlabsDubbingRequestsByRequestIdCancelResponse = + PutFalAiElevenlabsDubbingRequestsByRequestIdCancelResponses[keyof PutFalAiElevenlabsDubbingRequestsByRequestIdCancelResponses] + +export type PostFalAiElevenlabsDubbingData = { + body: SchemaElevenlabsDubbingInput + path?: never + query?: never + url: '/fal-ai/elevenlabs/dubbing' +} + +export type PostFalAiElevenlabsDubbingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiElevenlabsDubbingResponse = + PostFalAiElevenlabsDubbingResponses[keyof PostFalAiElevenlabsDubbingResponses] + +export type GetFalAiElevenlabsDubbingRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/elevenlabs/dubbing/requests/{request_id}' +} + +export type GetFalAiElevenlabsDubbingRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaElevenlabsDubbingOutput +} + +export type GetFalAiElevenlabsDubbingRequestsByRequestIdResponse = + GetFalAiElevenlabsDubbingRequestsByRequestIdResponses[keyof GetFalAiElevenlabsDubbingRequestsByRequestIdResponses] + +export type GetFalAiLongcatMultiAvatarImageAudioToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/longcat-multi-avatar/image-audio-to-video/requests/{request_id}/status' + } + +export type GetFalAiLongcatMultiAvatarImageAudioToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLongcatMultiAvatarImageAudioToVideoRequestsByRequestIdStatusResponse = + GetFalAiLongcatMultiAvatarImageAudioToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLongcatMultiAvatarImageAudioToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLongcatMultiAvatarImageAudioToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-multi-avatar/image-audio-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiLongcatMultiAvatarImageAudioToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLongcatMultiAvatarImageAudioToVideoRequestsByRequestIdCancelResponse = + PutFalAiLongcatMultiAvatarImageAudioToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLongcatMultiAvatarImageAudioToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLongcatMultiAvatarImageAudioToVideoData = { + body: SchemaLongcatMultiAvatarImageAudioToVideoInput + path?: never + query?: never + url: '/fal-ai/longcat-multi-avatar/image-audio-to-video' +} + +export type PostFalAiLongcatMultiAvatarImageAudioToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLongcatMultiAvatarImageAudioToVideoResponse = + PostFalAiLongcatMultiAvatarImageAudioToVideoResponses[keyof PostFalAiLongcatMultiAvatarImageAudioToVideoResponses] + +export type GetFalAiLongcatMultiAvatarImageAudioToVideoRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-multi-avatar/image-audio-to-video/requests/{request_id}' + } + +export type GetFalAiLongcatMultiAvatarImageAudioToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaLongcatMultiAvatarImageAudioToVideoOutput + } + +export type GetFalAiLongcatMultiAvatarImageAudioToVideoRequestsByRequestIdResponse = + GetFalAiLongcatMultiAvatarImageAudioToVideoRequestsByRequestIdResponses[keyof GetFalAiLongcatMultiAvatarImageAudioToVideoRequestsByRequestIdResponses] + +export type GetFalAiLongcatSingleAvatarImageAudioToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/longcat-single-avatar/image-audio-to-video/requests/{request_id}/status' + } + +export type GetFalAiLongcatSingleAvatarImageAudioToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLongcatSingleAvatarImageAudioToVideoRequestsByRequestIdStatusResponse = + GetFalAiLongcatSingleAvatarImageAudioToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLongcatSingleAvatarImageAudioToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLongcatSingleAvatarImageAudioToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-single-avatar/image-audio-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiLongcatSingleAvatarImageAudioToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLongcatSingleAvatarImageAudioToVideoRequestsByRequestIdCancelResponse = + PutFalAiLongcatSingleAvatarImageAudioToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLongcatSingleAvatarImageAudioToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLongcatSingleAvatarImageAudioToVideoData = { + body: SchemaLongcatSingleAvatarImageAudioToVideoInput + path?: never + query?: never + url: '/fal-ai/longcat-single-avatar/image-audio-to-video' +} + +export type PostFalAiLongcatSingleAvatarImageAudioToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLongcatSingleAvatarImageAudioToVideoResponse = + PostFalAiLongcatSingleAvatarImageAudioToVideoResponses[keyof PostFalAiLongcatSingleAvatarImageAudioToVideoResponses] + +export type GetFalAiLongcatSingleAvatarImageAudioToVideoRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-single-avatar/image-audio-to-video/requests/{request_id}' + } + +export type GetFalAiLongcatSingleAvatarImageAudioToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaLongcatSingleAvatarImageAudioToVideoOutput + } + +export type GetFalAiLongcatSingleAvatarImageAudioToVideoRequestsByRequestIdResponse = + GetFalAiLongcatSingleAvatarImageAudioToVideoRequestsByRequestIdResponses[keyof GetFalAiLongcatSingleAvatarImageAudioToVideoRequestsByRequestIdResponses] + +export type GetFalAiLongcatSingleAvatarAudioToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/longcat-single-avatar/audio-to-video/requests/{request_id}/status' + } + +export type GetFalAiLongcatSingleAvatarAudioToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLongcatSingleAvatarAudioToVideoRequestsByRequestIdStatusResponse = + GetFalAiLongcatSingleAvatarAudioToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLongcatSingleAvatarAudioToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLongcatSingleAvatarAudioToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-single-avatar/audio-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiLongcatSingleAvatarAudioToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLongcatSingleAvatarAudioToVideoRequestsByRequestIdCancelResponse = + PutFalAiLongcatSingleAvatarAudioToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLongcatSingleAvatarAudioToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLongcatSingleAvatarAudioToVideoData = { + body: SchemaLongcatSingleAvatarAudioToVideoInput + path?: never + query?: never + url: '/fal-ai/longcat-single-avatar/audio-to-video' +} + +export type PostFalAiLongcatSingleAvatarAudioToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLongcatSingleAvatarAudioToVideoResponse = + PostFalAiLongcatSingleAvatarAudioToVideoResponses[keyof PostFalAiLongcatSingleAvatarAudioToVideoResponses] + +export type GetFalAiLongcatSingleAvatarAudioToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-single-avatar/audio-to-video/requests/{request_id}' +} + +export type GetFalAiLongcatSingleAvatarAudioToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaLongcatSingleAvatarAudioToVideoOutput + } + +export type GetFalAiLongcatSingleAvatarAudioToVideoRequestsByRequestIdResponse = + GetFalAiLongcatSingleAvatarAudioToVideoRequestsByRequestIdResponses[keyof GetFalAiLongcatSingleAvatarAudioToVideoRequestsByRequestIdResponses] + +export type GetArgilAvatarsAudioToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/argil/avatars/audio-to-video/requests/{request_id}/status' +} + +export type GetArgilAvatarsAudioToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetArgilAvatarsAudioToVideoRequestsByRequestIdStatusResponse = + GetArgilAvatarsAudioToVideoRequestsByRequestIdStatusResponses[keyof GetArgilAvatarsAudioToVideoRequestsByRequestIdStatusResponses] + +export type PutArgilAvatarsAudioToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/argil/avatars/audio-to-video/requests/{request_id}/cancel' +} + +export type PutArgilAvatarsAudioToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutArgilAvatarsAudioToVideoRequestsByRequestIdCancelResponse = + PutArgilAvatarsAudioToVideoRequestsByRequestIdCancelResponses[keyof PutArgilAvatarsAudioToVideoRequestsByRequestIdCancelResponses] + +export type PostArgilAvatarsAudioToVideoData = { + body: SchemaAvatarsAudioToVideoInput + path?: never + query?: never + url: '/argil/avatars/audio-to-video' +} + +export type PostArgilAvatarsAudioToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostArgilAvatarsAudioToVideoResponse = + PostArgilAvatarsAudioToVideoResponses[keyof PostArgilAvatarsAudioToVideoResponses] + +export type GetArgilAvatarsAudioToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/argil/avatars/audio-to-video/requests/{request_id}' +} + +export type GetArgilAvatarsAudioToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAvatarsAudioToVideoOutput +} + +export type GetArgilAvatarsAudioToVideoRequestsByRequestIdResponse = + GetArgilAvatarsAudioToVideoRequestsByRequestIdResponses[keyof GetArgilAvatarsAudioToVideoRequestsByRequestIdResponses] + +export type GetFalAiWanV2214bSpeechToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan/v2.2-14b/speech-to-video/requests/{request_id}/status' +} + +export type GetFalAiWanV2214bSpeechToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanV2214bSpeechToVideoRequestsByRequestIdStatusResponse = + GetFalAiWanV2214bSpeechToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiWanV2214bSpeechToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiWanV2214bSpeechToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-14b/speech-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiWanV2214bSpeechToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanV2214bSpeechToVideoRequestsByRequestIdCancelResponse = + PutFalAiWanV2214bSpeechToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiWanV2214bSpeechToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiWanV2214bSpeechToVideoData = { + body: SchemaWanV2214bSpeechToVideoInput + path?: never + query?: never + url: '/fal-ai/wan/v2.2-14b/speech-to-video' +} + +export type PostFalAiWanV2214bSpeechToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanV2214bSpeechToVideoResponse = + PostFalAiWanV2214bSpeechToVideoResponses[keyof PostFalAiWanV2214bSpeechToVideoResponses] + +export type GetFalAiWanV2214bSpeechToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-14b/speech-to-video/requests/{request_id}' +} + +export type GetFalAiWanV2214bSpeechToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanV2214bSpeechToVideoOutput +} + +export type GetFalAiWanV2214bSpeechToVideoRequestsByRequestIdResponse = + GetFalAiWanV2214bSpeechToVideoRequestsByRequestIdResponses[keyof GetFalAiWanV2214bSpeechToVideoRequestsByRequestIdResponses] + +export type GetFalAiStableAvatarRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/stable-avatar/requests/{request_id}/status' +} + +export type GetFalAiStableAvatarRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiStableAvatarRequestsByRequestIdStatusResponse = + GetFalAiStableAvatarRequestsByRequestIdStatusResponses[keyof GetFalAiStableAvatarRequestsByRequestIdStatusResponses] + +export type PutFalAiStableAvatarRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-avatar/requests/{request_id}/cancel' +} + +export type PutFalAiStableAvatarRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiStableAvatarRequestsByRequestIdCancelResponse = + PutFalAiStableAvatarRequestsByRequestIdCancelResponses[keyof PutFalAiStableAvatarRequestsByRequestIdCancelResponses] + +export type PostFalAiStableAvatarData = { + body: SchemaStableAvatarInput + path?: never + query?: never + url: '/fal-ai/stable-avatar' +} + +export type PostFalAiStableAvatarResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiStableAvatarResponse = + PostFalAiStableAvatarResponses[keyof PostFalAiStableAvatarResponses] + +export type GetFalAiStableAvatarRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-avatar/requests/{request_id}' +} + +export type GetFalAiStableAvatarRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaStableAvatarOutput +} + +export type GetFalAiStableAvatarRequestsByRequestIdResponse = + GetFalAiStableAvatarRequestsByRequestIdResponses[keyof GetFalAiStableAvatarRequestsByRequestIdResponses] + +export type GetFalAiEchomimicV3RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/echomimic-v3/requests/{request_id}/status' +} + +export type GetFalAiEchomimicV3RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiEchomimicV3RequestsByRequestIdStatusResponse = + GetFalAiEchomimicV3RequestsByRequestIdStatusResponses[keyof GetFalAiEchomimicV3RequestsByRequestIdStatusResponses] + +export type PutFalAiEchomimicV3RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/echomimic-v3/requests/{request_id}/cancel' +} + +export type PutFalAiEchomimicV3RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiEchomimicV3RequestsByRequestIdCancelResponse = + PutFalAiEchomimicV3RequestsByRequestIdCancelResponses[keyof PutFalAiEchomimicV3RequestsByRequestIdCancelResponses] + +export type PostFalAiEchomimicV3Data = { + body: SchemaEchomimicV3Input + path?: never + query?: never + url: '/fal-ai/echomimic-v3' +} + +export type PostFalAiEchomimicV3Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiEchomimicV3Response = + PostFalAiEchomimicV3Responses[keyof PostFalAiEchomimicV3Responses] + +export type GetFalAiEchomimicV3RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/echomimic-v3/requests/{request_id}' +} + +export type GetFalAiEchomimicV3RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaEchomimicV3Output +} + +export type GetFalAiEchomimicV3RequestsByRequestIdResponse = + GetFalAiEchomimicV3RequestsByRequestIdResponses[keyof GetFalAiEchomimicV3RequestsByRequestIdResponses] + +export type GetVeedAvatarsAudioToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/veed/avatars/audio-to-video/requests/{request_id}/status' +} + +export type GetVeedAvatarsAudioToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetVeedAvatarsAudioToVideoRequestsByRequestIdStatusResponse = + GetVeedAvatarsAudioToVideoRequestsByRequestIdStatusResponses[keyof GetVeedAvatarsAudioToVideoRequestsByRequestIdStatusResponses] + +export type PutVeedAvatarsAudioToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/veed/avatars/audio-to-video/requests/{request_id}/cancel' +} + +export type PutVeedAvatarsAudioToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutVeedAvatarsAudioToVideoRequestsByRequestIdCancelResponse = + PutVeedAvatarsAudioToVideoRequestsByRequestIdCancelResponses[keyof PutVeedAvatarsAudioToVideoRequestsByRequestIdCancelResponses] + +export type PostVeedAvatarsAudioToVideoData = { + body: SchemaAvatarsAudioToVideoInput + path?: never + query?: never + url: '/veed/avatars/audio-to-video' +} + +export type PostVeedAvatarsAudioToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostVeedAvatarsAudioToVideoResponse = + PostVeedAvatarsAudioToVideoResponses[keyof PostVeedAvatarsAudioToVideoResponses] + +export type GetVeedAvatarsAudioToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/veed/avatars/audio-to-video/requests/{request_id}' +} + +export type GetVeedAvatarsAudioToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAvatarsAudioToVideoOutput +} + +export type GetVeedAvatarsAudioToVideoRequestsByRequestIdResponse = + GetVeedAvatarsAudioToVideoRequestsByRequestIdResponses[keyof GetVeedAvatarsAudioToVideoRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/audio-to-video/zod.gen.ts b/packages/typescript/ai-fal/src/generated/audio-to-video/zod.gen.ts new file mode 100644 index 00000000..283e7f97 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/audio-to-video/zod.gen.ts @@ -0,0 +1,2529 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * File + */ +export const zSchemaFile = z.object({ + file_size: z.optional(z.union([z.int(), z.unknown()])), + file_name: z.optional(z.union([z.string(), z.unknown()])), + content_type: z.optional(z.union([z.string(), z.unknown()])), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), +}) + +/** + * EchoMimicResponse + */ +export const zSchemaEchomimicV3Output = z.object({ + video: zSchemaFile, +}) + +/** + * EchoMimicRequest + */ +export const zSchemaEchomimicV3Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to use for the video generation.', + }), + audio_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the audio to use as a reference for the video generation.', + }), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image to use as a reference for the video generation.', + }), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The guidance scale to use for the video generation.', + }), + ) + .default(4.5), + audio_guidance_scale: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: + 'The audio guidance scale to use for the video generation.', + }), + ) + .default(2.5), + num_frames_per_generation: z + .optional( + z.int().gte(49).lte(161).register(z.globalRegistry, { + description: 'The number of frames to generate at once.', + }), + ) + .default(121), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to use for the video generation.', + }), + ) + .default(''), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for the video generation.', + }), + ), +}) + +/** + * StableAvatarResponse + */ +export const zSchemaStableAvatarOutput = z.object({ + video: zSchemaFile, +}) + +/** + * StableAvatarRequest + */ +export const zSchemaStableAvatarInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to use for the video generation.', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '1:1', '9:16', 'auto']).register(z.globalRegistry, { + description: + "The aspect ratio of the video to generate. If 'auto', the aspect ratio will be determined by the reference image.", + }), + ), + perturbation: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The amount of perturbation to use for the video generation. 0.0 means no perturbation, 1.0 means full perturbation.', + }), + ) + .default(0.1), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image to use as a reference for the video generation.', + }), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The guidance scale to use for the video generation.', + }), + ) + .default(5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for the video generation.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(10).lte(50).register(z.globalRegistry, { + description: + 'The number of inference steps to use for the video generation.', + }), + ) + .default(50), + audio_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the audio to use as a reference for the video generation.', + }), + audio_guidance_scale: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: + 'The audio guidance scale to use for the video generation.', + }), + ) + .default(4), +}) + +/** + * WanS2VResponse + */ +export const zSchemaWanV2214bSpeechToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * WanS2VRequest + */ +export const zSchemaWanV2214bSpeechToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt used for video generation.', + }), + shift: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Shift value for the video. Must be between 1.0 and 10.0.', + }), + ) + .default(5), + frames_per_second: z + .optional( + z.int().gte(4).lte(60).register(z.globalRegistry, { + description: + 'Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is.', + }), + ) + .default(16), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.', + }), + ) + .default(3.5), + num_frames: z + .optional( + z.int().gte(40).lte(120).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 40 to 120, (must be multiple of 4).', + }), + ) + .default(80), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, input data will be checked for safety before processing.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default(''), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: + 'The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.', + }), + ), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p, 580p, or 720p).', + }), + ), + enable_output_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, output video will be checked for safety after generation.', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.', + }), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: + 'The quality of the output video. Higher quality means better visual quality but larger file size.', + }), + ), + audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the audio file.', + }), + num_inference_steps: z + .optional( + z.int().gte(2).lte(40).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(27), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), +}) + +/** + * AvatarsAppOutput + */ +export const zSchemaAvatarsAudioToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Audio2VideoInput + */ +export const zSchemaAvatarsAudioToVideoInput = z.object({ + audio_url: z.url().min(1).max(2083), + avatar_id: z + .enum([ + 'emily_vertical_primary', + 'emily_vertical_secondary', + 'marcus_vertical_primary', + 'marcus_vertical_secondary', + 'mira_vertical_primary', + 'mira_vertical_secondary', + 'jasmine_vertical_primary', + 'jasmine_vertical_secondary', + 'jasmine_vertical_walking', + 'aisha_vertical_walking', + 'elena_vertical_primary', + 'elena_vertical_secondary', + 'any_male_vertical_primary', + 'any_female_vertical_primary', + 'any_male_vertical_secondary', + 'any_female_vertical_secondary', + 'any_female_vertical_walking', + 'emily_primary', + 'emily_side', + 'marcus_primary', + 'marcus_side', + 'aisha_walking', + 'elena_primary', + 'elena_side', + 'any_male_primary', + 'any_female_primary', + 'any_male_side', + 'any_female_side', + ]) + .register(z.globalRegistry, { + description: 'The avatar to use for the video', + }), +}) + +/** + * AudioToVideoResponse + * + * Response model for audio-to-video generation (no reference image). + */ +export const zSchemaLongcatSingleAvatarAudioToVideoOutput = z + .object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, + }) + .register(z.globalRegistry, { + description: + 'Response model for audio-to-video generation (no reference image).', + }) + +/** + * AudioToVideoRequest + * + * Request model for audio-to-video generation. + */ +export const zSchemaLongcatSingleAvatarAudioToVideoInput = z + .object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The prompt to guide the video generation.', + }), + ) + .default( + 'A person is talking naturally with natural expressions and movements.', + ), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: + 'Resolution of the generated video (480p or 720p). Billing is per video-second (16 frames): 480p is 1 unit per second and 720p is 4 units per second.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable safety checker.', + }), + ) + .default(true), + audio_guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'The audio guidance scale. Higher values may lead to exaggerated mouth movements.', + }), + ) + .default(4), + num_segments: z + .optional( + z.int().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Number of video segments to generate. Each segment adds ~5 seconds of video. First segment is ~5.8s, additional segments are 5s each.', + }), + ) + .default(1), + audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the audio file to drive the avatar.', + }), + num_inference_steps: z + .optional( + z.int().gte(10).lte(100).register(z.globalRegistry, { + description: 'The number of inference steps to use.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to avoid in the video generation.', + }), + ) + .default( + 'Close-up, Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards', + ), + text_guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The text guidance scale for classifier-free guidance.', + }), + ) + .default(4), + }) + .register(z.globalRegistry, { + description: 'Request model for audio-to-video generation.', + }) + +/** + * ImageAudioToVideoResponse + * + * Response model for image+audio to video generation. + */ +export const zSchemaLongcatSingleAvatarImageAudioToVideoOutput = z + .object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Response model for image+audio to video generation.', + }) + +/** + * ImageAudioToVideoRequest + * + * Request model for image+audio to video generation. + */ +export const zSchemaLongcatSingleAvatarImageAudioToVideoInput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to guide the video generation.', + }), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: + 'Resolution of the generated video (480p or 720p). Billing is per video-second (16 frames): 480p is 1 unit per second and 720p is 4 units per second.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable safety checker.', + }), + ) + .default(true), + audio_guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'The audio guidance scale. Higher values may lead to exaggerated mouth movements.', + }), + ) + .default(4), + num_segments: z + .optional( + z.int().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Number of video segments to generate. Each segment adds ~5 seconds of video. First segment is ~5.8s, additional segments are 5s each.', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to animate.', + }), + audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the audio file to drive the avatar.', + }), + num_inference_steps: z + .optional( + z.int().gte(10).lte(100).register(z.globalRegistry, { + description: 'The number of inference steps to use.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to avoid in the video generation.', + }), + ) + .default( + 'Close-up, Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards', + ), + text_guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The text guidance scale for classifier-free guidance.', + }), + ) + .default(4), + }) + .register(z.globalRegistry, { + description: 'Request model for image+audio to video generation.', + }) + +/** + * BoundingBox + */ +export const zSchemaBoundingBox = z.object({ + y: z.number().register(z.globalRegistry, { + description: 'Y-coordinate of the top-left corner', + }), + x: z.number().register(z.globalRegistry, { + description: 'X-coordinate of the top-left corner', + }), + h: z.number().register(z.globalRegistry, { + description: 'Height of the bounding box', + }), + w: z.number().register(z.globalRegistry, { + description: 'Width of the bounding box', + }), + label: z.string().register(z.globalRegistry, { + description: 'Label of the bounding box', + }), +}) + +/** + * MultiSpeakerImageAudioToVideoResponse + * + * Response model for multi-speaker image+audio to video generation. + */ +export const zSchemaLongcatMultiAvatarImageAudioToVideoOutput = z + .object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, + }) + .register(z.globalRegistry, { + description: + 'Response model for multi-speaker image+audio to video generation.', + }) + +/** + * MultiSpeakerImageAudioToVideoRequest + * + * Request model for multi-speaker image+audio to video generation. + */ +export const zSchemaLongcatMultiAvatarImageAudioToVideoInput = z + .object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The prompt to guide the video generation.', + }), + ) + .default( + 'Two people are having a conversation with natural expressions and movements.', + ), + num_inference_steps: z + .optional( + z.int().gte(10).lte(100).register(z.globalRegistry, { + description: 'The number of inference steps to use.', + }), + ) + .default(30), + audio_url_person2: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The URL of the audio file for person 2 (right side).', + }), + ) + .default( + 'https://raw.githubusercontent.com/meituan-longcat/LongCat-Video/refs/heads/main/assets/avatar/multi/sing_woman.WAV', + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable safety checker.', + }), + ) + .default(true), + bbox_person1: z.optional(zSchemaBoundingBox), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to avoid in the video generation.', + }), + ) + .default( + 'Close-up, Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards', + ), + text_guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The text guidance scale for classifier-free guidance.', + }), + ) + .default(4), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: + 'Resolution of the generated video (480p or 720p). Billing is per video-second (16 frames): 480p is 1 unit per second and 720p is 4 units per second.', + }), + ), + audio_type: z.optional( + z.enum(['para', 'add']).register(z.globalRegistry, { + description: + "How to combine the two audio tracks. 'para' (parallel) plays both simultaneously, 'add' (sequential) plays person 1 first then person 2.", + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image containing two speakers.', + }), + audio_url_person1: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The URL of the audio file for person 1 (left side).', + }), + ) + .default( + 'https://raw.githubusercontent.com/meituan-longcat/LongCat-Video/refs/heads/main/assets/avatar/multi/sing_man.WAV', + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), + audio_guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'The audio guidance scale. Higher values may lead to exaggerated mouth movements.', + }), + ) + .default(4), + bbox_person2: z.optional(zSchemaBoundingBox), + num_segments: z + .optional( + z.int().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Number of video segments to generate. Each segment adds ~5 seconds of video. First segment is ~5.8s, additional segments are 5s each.', + }), + ) + .default(1), + }) + .register(z.globalRegistry, { + description: + 'Request model for multi-speaker image+audio to video generation.', + }) + +/** + * DubbingVideoOutput + */ +export const zSchemaElevenlabsDubbingOutput = z.object({ + target_lang: z.string().register(z.globalRegistry, { + description: 'The target language of the dubbed content', + }), + video: zSchemaFile, +}) + +/** + * DubbingRequest + */ +export const zSchemaElevenlabsDubbingInput = z.object({ + video_url: z.optional(z.union([z.string(), z.unknown()])), + audio_url: z.optional(z.union([z.string(), z.unknown()])), + highest_resolution: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to use the highest resolution for dubbing.', + }), + ) + .default(true), + target_lang: z.string().register(z.globalRegistry, { + description: 'Target language code for dubbing (ISO 639-1)', + }), + source_lang: z.optional(z.union([z.string(), z.unknown()])), + num_speakers: z.optional(z.union([z.int().gte(1).lte(50), z.unknown()])), +}) + +/** + * VideoFile + */ +export const zSchemaVideoFile = z.object({ + file_size: z.optional(z.union([z.int(), z.unknown()])), + duration: z.optional(z.union([z.number(), z.unknown()])), + height: z.optional(z.union([z.int(), z.unknown()])), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + width: z.optional(z.union([z.int(), z.unknown()])), + fps: z.optional(z.union([z.number(), z.unknown()])), + file_name: z.optional(z.union([z.string(), z.unknown()])), + content_type: z.optional(z.union([z.string(), z.unknown()])), + num_frames: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * LTX2AudioToVideoOutput + */ +export const zSchemaLtx219bAudioToVideoOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the random number generator.', + }), + video: zSchemaVideoFile, +}) + +/** + * ImageSize + */ +export const zSchemaImageSize = z.object({ + height: z + .optional( + z.int().lte(14142).register(z.globalRegistry, { + description: 'The height of the generated image.', + }), + ) + .default(512), + width: z + .optional( + z.int().lte(14142).register(z.globalRegistry, { + description: 'The width of the generated image.', + }), + ) + .default(512), +}) + +/** + * LTX2AudioToVideoInput + */ +export const zSchemaLtx219bAudioToVideoInput = z.object({ + match_audio_length: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'When enabled, the number of frames will be calculated based on the audio duration and FPS. When disabled, use the specified num_frames.', + }), + ) + .default(true), + use_multiscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.', + }), + ) + .default(true), + acceleration: z.optional( + z.enum(['none', 'regular', 'high', 'full']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frames per second of the generated video.', + }), + ) + .default(25), + camera_lora: z.optional( + z + .enum([ + 'dolly_in', + 'dolly_out', + 'dolly_left', + 'dolly_right', + 'jib_up', + 'jib_down', + 'static', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'auto', + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + num_frames: z + .optional( + z.int().gte(9).lte(481).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(121), + image_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The strength of the image to use for the video generation.', + }), + ) + .default(1), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), + ) + .default( + 'blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts.', + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The guidance scale to use.', + }), + ) + .default(3), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + camera_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ) + .default(1), + preprocess_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to preprocess the audio before using it as conditioning.', + }), + ) + .default(true), + image_url: z.optional(z.union([z.string(), z.unknown()])), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the audio to generate the video from.', + }), + audio_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Audio conditioning strength. Values below 1.0 will allow the model to change the audio, while a value of exactly 1.0 will use the input audio without modification.', + }), + ) + .default(1), + num_inference_steps: z + .optional( + z.int().gte(8).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to use.', + }), + ) + .default(40), + seed: z.optional(z.union([z.int(), z.unknown()])), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), +}) + +/** + * LTX2AudioToVideoOutput + */ +export const zSchemaLtx219bDistilledAudioToVideoOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the random number generator.', + }), + video: zSchemaVideoFile, +}) + +/** + * LTX2DistilledAudioToVideoInput + */ +export const zSchemaLtx219bDistilledAudioToVideoInput = z.object({ + match_audio_length: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'When enabled, the number of frames will be calculated based on the audio duration and FPS. When disabled, use the specified num_frames.', + }), + ) + .default(true), + use_multiscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.', + }), + ) + .default(true), + acceleration: z.optional( + z.enum(['none', 'regular', 'high', 'full']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frames per second of the generated video.', + }), + ) + .default(25), + camera_lora: z.optional( + z + .enum([ + 'dolly_in', + 'dolly_out', + 'dolly_left', + 'dolly_right', + 'jib_up', + 'jib_down', + 'static', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'auto', + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + num_frames: z + .optional( + z.int().gte(9).lte(481).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(121), + image_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The strength of the image to use for the video generation.', + }), + ) + .default(1), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), + ) + .default( + 'blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts.', + ), + camera_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ) + .default(1), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + preprocess_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to preprocess the audio before using it as conditioning.', + }), + ) + .default(true), + image_url: z.optional(z.union([z.string(), z.unknown()])), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the audio to generate the video from.', + }), + audio_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Audio conditioning strength. Values below 1.0 will allow the model to change the audio, while a value of exactly 1.0 will use the input audio without modification.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), +}) + +/** + * LTX2AudioToVideoOutput + */ +export const zSchemaLtx219bAudioToVideoLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the random number generator.', + }), + video: zSchemaVideoFile, +}) + +/** + * LoRAInput + * + * LoRA weight configuration. + */ +export const zSchemaLoRaInput = z + .object({ + path: z.string().register(z.globalRegistry, { + description: 'URL, HuggingFace repo ID (owner/repo) to lora weights.', + }), + scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: 'Scale factor for LoRA application (0.0 to 4.0).', + }), + ) + .default(1), + weight_name: z.optional(z.union([z.string(), z.unknown()])), + }) + .register(z.globalRegistry, { + description: 'LoRA weight configuration.', + }) + +/** + * LTX2LoRAAudioToVideoInput + */ +export const zSchemaLtx219bAudioToVideoLoraInput = z.object({ + match_audio_length: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'When enabled, the number of frames will be calculated based on the audio duration and FPS. When disabled, use the specified num_frames.', + }), + ) + .default(true), + use_multiscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.', + }), + ) + .default(true), + acceleration: z.optional( + z.enum(['none', 'regular', 'high', 'full']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frames per second of the generated video.', + }), + ) + .default(25), + loras: z.array(zSchemaLoRaInput).register(z.globalRegistry, { + description: 'The LoRAs to use for the generation.', + }), + camera_lora: z.optional( + z + .enum([ + 'dolly_in', + 'dolly_out', + 'dolly_left', + 'dolly_right', + 'jib_up', + 'jib_down', + 'static', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'auto', + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + num_frames: z + .optional( + z.int().gte(9).lte(481).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(121), + image_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The strength of the image to use for the video generation.', + }), + ) + .default(1), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), + ) + .default( + 'blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts.', + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The guidance scale to use.', + }), + ) + .default(3), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + camera_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ) + .default(1), + preprocess_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to preprocess the audio before using it as conditioning.', + }), + ) + .default(true), + image_url: z.optional(z.union([z.string(), z.unknown()])), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the audio to generate the video from.', + }), + audio_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Audio conditioning strength. Values below 1.0 will allow the model to change the audio, while a value of exactly 1.0 will use the input audio without modification.', + }), + ) + .default(1), + num_inference_steps: z + .optional( + z.int().gte(8).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to use.', + }), + ) + .default(40), + seed: z.optional(z.union([z.int(), z.unknown()])), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), +}) + +/** + * LTX2AudioToVideoOutput + */ +export const zSchemaLtx219bDistilledAudioToVideoLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the random number generator.', + }), + video: zSchemaVideoFile, +}) + +/** + * LTX2LoRADistilledAudioToVideoInput + */ +export const zSchemaLtx219bDistilledAudioToVideoLoraInput = z.object({ + match_audio_length: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'When enabled, the number of frames will be calculated based on the audio duration and FPS. When disabled, use the specified num_frames.', + }), + ) + .default(true), + use_multiscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.', + }), + ) + .default(true), + acceleration: z.optional( + z.enum(['none', 'regular', 'high', 'full']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frames per second of the generated video.', + }), + ) + .default(25), + loras: z.array(zSchemaLoRaInput).register(z.globalRegistry, { + description: 'The LoRAs to use for the generation.', + }), + camera_lora: z.optional( + z + .enum([ + 'dolly_in', + 'dolly_out', + 'dolly_left', + 'dolly_right', + 'jib_up', + 'jib_down', + 'static', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'auto', + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + num_frames: z + .optional( + z.int().gte(9).lte(481).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(121), + image_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The strength of the image to use for the video generation.', + }), + ) + .default(1), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), + ) + .default( + 'blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts.', + ), + camera_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ) + .default(1), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + preprocess_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to preprocess the audio before using it as conditioning.', + }), + ) + .default(true), + image_url: z.optional(z.union([z.string(), z.unknown()])), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the audio to generate the video from.', + }), + audio_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Audio conditioning strength. Values below 1.0 will allow the model to change the audio, while a value of exactly 1.0 will use the input audio without modification.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), +}) + +export const zSchemaQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetFalAiLtx219bDistilledAudioToVideoLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtx219bDistilledAudioToVideoLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx219bDistilledAudioToVideoLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx219bDistilledAudioToVideoLoraRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx219bDistilledAudioToVideoLoraData = z.object({ + body: zSchemaLtx219bDistilledAudioToVideoLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx219bDistilledAudioToVideoLoraResponse = + zSchemaQueueStatus + +export const zGetFalAiLtx219bDistilledAudioToVideoLoraRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLtx219bDistilledAudioToVideoLoraRequestsByRequestIdResponse = + zSchemaLtx219bDistilledAudioToVideoLoraOutput + +export const zGetFalAiLtx219bAudioToVideoLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtx219bAudioToVideoLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx219bAudioToVideoLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx219bAudioToVideoLoraRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx219bAudioToVideoLoraData = z.object({ + body: zSchemaLtx219bAudioToVideoLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx219bAudioToVideoLoraResponse = zSchemaQueueStatus + +export const zGetFalAiLtx219bAudioToVideoLoraRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiLtx219bAudioToVideoLoraRequestsByRequestIdResponse = + zSchemaLtx219bAudioToVideoLoraOutput + +export const zGetFalAiLtx219bDistilledAudioToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtx219bDistilledAudioToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx219bDistilledAudioToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx219bDistilledAudioToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx219bDistilledAudioToVideoData = z.object({ + body: zSchemaLtx219bDistilledAudioToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx219bDistilledAudioToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiLtx219bDistilledAudioToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLtx219bDistilledAudioToVideoRequestsByRequestIdResponse = + zSchemaLtx219bDistilledAudioToVideoOutput + +export const zGetFalAiLtx219bAudioToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtx219bAudioToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx219bAudioToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx219bAudioToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx219bAudioToVideoData = z.object({ + body: zSchemaLtx219bAudioToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx219bAudioToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiLtx219bAudioToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLtx219bAudioToVideoRequestsByRequestIdResponse = + zSchemaLtx219bAudioToVideoOutput + +export const zGetFalAiElevenlabsDubbingRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiElevenlabsDubbingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiElevenlabsDubbingRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiElevenlabsDubbingRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiElevenlabsDubbingData = z.object({ + body: zSchemaElevenlabsDubbingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiElevenlabsDubbingResponse = zSchemaQueueStatus + +export const zGetFalAiElevenlabsDubbingRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiElevenlabsDubbingRequestsByRequestIdResponse = + zSchemaElevenlabsDubbingOutput + +export const zGetFalAiLongcatMultiAvatarImageAudioToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLongcatMultiAvatarImageAudioToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLongcatMultiAvatarImageAudioToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLongcatMultiAvatarImageAudioToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLongcatMultiAvatarImageAudioToVideoData = z.object({ + body: zSchemaLongcatMultiAvatarImageAudioToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLongcatMultiAvatarImageAudioToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiLongcatMultiAvatarImageAudioToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLongcatMultiAvatarImageAudioToVideoRequestsByRequestIdResponse = + zSchemaLongcatMultiAvatarImageAudioToVideoOutput + +export const zGetFalAiLongcatSingleAvatarImageAudioToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLongcatSingleAvatarImageAudioToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLongcatSingleAvatarImageAudioToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLongcatSingleAvatarImageAudioToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLongcatSingleAvatarImageAudioToVideoData = z.object({ + body: zSchemaLongcatSingleAvatarImageAudioToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLongcatSingleAvatarImageAudioToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiLongcatSingleAvatarImageAudioToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLongcatSingleAvatarImageAudioToVideoRequestsByRequestIdResponse = + zSchemaLongcatSingleAvatarImageAudioToVideoOutput + +export const zGetFalAiLongcatSingleAvatarAudioToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLongcatSingleAvatarAudioToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLongcatSingleAvatarAudioToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLongcatSingleAvatarAudioToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLongcatSingleAvatarAudioToVideoData = z.object({ + body: zSchemaLongcatSingleAvatarAudioToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLongcatSingleAvatarAudioToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiLongcatSingleAvatarAudioToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLongcatSingleAvatarAudioToVideoRequestsByRequestIdResponse = + zSchemaLongcatSingleAvatarAudioToVideoOutput + +export const zGetArgilAvatarsAudioToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetArgilAvatarsAudioToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutArgilAvatarsAudioToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutArgilAvatarsAudioToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostArgilAvatarsAudioToVideoData = z.object({ + body: zSchemaAvatarsAudioToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostArgilAvatarsAudioToVideoResponse = zSchemaQueueStatus + +export const zGetArgilAvatarsAudioToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetArgilAvatarsAudioToVideoRequestsByRequestIdResponse = + zSchemaAvatarsAudioToVideoOutput + +export const zGetFalAiWanV2214bSpeechToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanV2214bSpeechToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanV2214bSpeechToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanV2214bSpeechToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanV2214bSpeechToVideoData = z.object({ + body: zSchemaWanV2214bSpeechToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanV2214bSpeechToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiWanV2214bSpeechToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanV2214bSpeechToVideoRequestsByRequestIdResponse = + zSchemaWanV2214bSpeechToVideoOutput + +export const zGetFalAiStableAvatarRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiStableAvatarRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiStableAvatarRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiStableAvatarRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiStableAvatarData = z.object({ + body: zSchemaStableAvatarInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiStableAvatarResponse = zSchemaQueueStatus + +export const zGetFalAiStableAvatarRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiStableAvatarRequestsByRequestIdResponse = + zSchemaStableAvatarOutput + +export const zGetFalAiEchomimicV3RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiEchomimicV3RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiEchomimicV3RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiEchomimicV3RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiEchomimicV3Data = z.object({ + body: zSchemaEchomimicV3Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiEchomimicV3Response = zSchemaQueueStatus + +export const zGetFalAiEchomimicV3RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiEchomimicV3RequestsByRequestIdResponse = + zSchemaEchomimicV3Output + +export const zGetVeedAvatarsAudioToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetVeedAvatarsAudioToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutVeedAvatarsAudioToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutVeedAvatarsAudioToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostVeedAvatarsAudioToVideoData = z.object({ + body: zSchemaAvatarsAudioToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostVeedAvatarsAudioToVideoResponse = zSchemaQueueStatus + +export const zGetVeedAvatarsAudioToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetVeedAvatarsAudioToVideoRequestsByRequestIdResponse = + zSchemaAvatarsAudioToVideoOutput diff --git a/packages/typescript/ai-fal/src/generated/image-to-3d/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/image-to-3d/endpoint-map.ts new file mode 100644 index 00000000..e49d178d --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/image-to-3d/endpoint-map.ts @@ -0,0 +1,330 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zSchemaBytedanceSeed3dImageTo3dInput, + zSchemaBytedanceSeed3dImageTo3dOutput, + zSchemaHunyuan3dV21Input, + zSchemaHunyuan3dV21Output, + zSchemaHunyuan3dV2Input, + zSchemaHunyuan3dV2MiniInput, + zSchemaHunyuan3dV2MiniOutput, + zSchemaHunyuan3dV2MiniTurboInput, + zSchemaHunyuan3dV2MiniTurboOutput, + zSchemaHunyuan3dV2MultiViewInput, + zSchemaHunyuan3dV2MultiViewOutput, + zSchemaHunyuan3dV2MultiViewTurboInput, + zSchemaHunyuan3dV2MultiViewTurboOutput, + zSchemaHunyuan3dV2Output, + zSchemaHunyuan3dV2TurboInput, + zSchemaHunyuan3dV2TurboOutput, + zSchemaHunyuan3dV3ImageTo3dInput, + zSchemaHunyuan3dV3ImageTo3dOutput, + zSchemaHunyuan3dV3SketchTo3dInput, + zSchemaHunyuan3dV3SketchTo3dOutput, + zSchemaHunyuanWorldImageToWorldInput, + zSchemaHunyuanWorldImageToWorldOutput, + zSchemaHyper3dRodinInput, + zSchemaHyper3dRodinOutput, + zSchemaHyper3dRodinV2Input, + zSchemaHyper3dRodinV2Output, + zSchemaMeshyV5MultiImageTo3dInput, + zSchemaMeshyV5MultiImageTo3dOutput, + zSchemaMeshyV6PreviewImageTo3dInput, + zSchemaMeshyV6PreviewImageTo3dOutput, + zSchemaOmnipartInput, + zSchemaOmnipartOutput, + zSchemaPshumanInput, + zSchemaPshumanOutput, + zSchemaSam33dBodyInput, + zSchemaSam33dBodyOutput, + zSchemaSam33dObjectsInput, + zSchemaSam33dObjectsOutput, + zSchemaTrellis2Input, + zSchemaTrellis2Output, + zSchemaTrellisInput, + zSchemaTrellisMultiInput, + zSchemaTrellisMultiOutput, + zSchemaTrellisOutput, + zSchemaTripoV25ImageTo3dInput, + zSchemaTripoV25ImageTo3dOutput, + zSchemaTripoV25MultiviewTo3dInput, + zSchemaTripoV25MultiviewTo3dOutput, + zSchemaTriposrInput, + zSchemaTriposrOutput, +} from './zod.gen' + +import type { + SchemaBytedanceSeed3dImageTo3dInput, + SchemaBytedanceSeed3dImageTo3dOutput, + SchemaHunyuan3dV21Input, + SchemaHunyuan3dV21Output, + SchemaHunyuan3dV2Input, + SchemaHunyuan3dV2MiniInput, + SchemaHunyuan3dV2MiniOutput, + SchemaHunyuan3dV2MiniTurboInput, + SchemaHunyuan3dV2MiniTurboOutput, + SchemaHunyuan3dV2MultiViewInput, + SchemaHunyuan3dV2MultiViewOutput, + SchemaHunyuan3dV2MultiViewTurboInput, + SchemaHunyuan3dV2MultiViewTurboOutput, + SchemaHunyuan3dV2Output, + SchemaHunyuan3dV2TurboInput, + SchemaHunyuan3dV2TurboOutput, + SchemaHunyuan3dV3ImageTo3dInput, + SchemaHunyuan3dV3ImageTo3dOutput, + SchemaHunyuan3dV3SketchTo3dInput, + SchemaHunyuan3dV3SketchTo3dOutput, + SchemaHunyuanWorldImageToWorldInput, + SchemaHunyuanWorldImageToWorldOutput, + SchemaHyper3dRodinInput, + SchemaHyper3dRodinOutput, + SchemaHyper3dRodinV2Input, + SchemaHyper3dRodinV2Output, + SchemaMeshyV5MultiImageTo3dInput, + SchemaMeshyV5MultiImageTo3dOutput, + SchemaMeshyV6PreviewImageTo3dInput, + SchemaMeshyV6PreviewImageTo3dOutput, + SchemaOmnipartInput, + SchemaOmnipartOutput, + SchemaPshumanInput, + SchemaPshumanOutput, + SchemaSam33dBodyInput, + SchemaSam33dBodyOutput, + SchemaSam33dObjectsInput, + SchemaSam33dObjectsOutput, + SchemaTrellis2Input, + SchemaTrellis2Output, + SchemaTrellisInput, + SchemaTrellisMultiInput, + SchemaTrellisMultiOutput, + SchemaTrellisOutput, + SchemaTripoV25ImageTo3dInput, + SchemaTripoV25ImageTo3dOutput, + SchemaTripoV25MultiviewTo3dInput, + SchemaTripoV25MultiviewTo3dOutput, + SchemaTriposrInput, + SchemaTriposrOutput, +} from './types.gen' + +import type { z } from 'zod' + +export type ImageTo3dEndpointMap = { + 'fal-ai/trellis-2': { + input: SchemaTrellis2Input + output: SchemaTrellis2Output + } + 'fal-ai/hunyuan3d-v3/sketch-to-3d': { + input: SchemaHunyuan3dV3SketchTo3dInput + output: SchemaHunyuan3dV3SketchTo3dOutput + } + 'fal-ai/hunyuan3d-v3/image-to-3d': { + input: SchemaHunyuan3dV3ImageTo3dInput + output: SchemaHunyuan3dV3ImageTo3dOutput + } + 'fal-ai/sam-3/3d-body': { + input: SchemaSam33dBodyInput + output: SchemaSam33dBodyOutput + } + 'fal-ai/sam-3/3d-objects': { + input: SchemaSam33dObjectsInput + output: SchemaSam33dObjectsOutput + } + 'fal-ai/omnipart': { + input: SchemaOmnipartInput + output: SchemaOmnipartOutput + } + 'fal-ai/bytedance/seed3d/image-to-3d': { + input: SchemaBytedanceSeed3dImageTo3dInput + output: SchemaBytedanceSeed3dImageTo3dOutput + } + 'fal-ai/meshy/v5/multi-image-to-3d': { + input: SchemaMeshyV5MultiImageTo3dInput + output: SchemaMeshyV5MultiImageTo3dOutput + } + 'fal-ai/meshy/v6-preview/image-to-3d': { + input: SchemaMeshyV6PreviewImageTo3dInput + output: SchemaMeshyV6PreviewImageTo3dOutput + } + 'fal-ai/hyper3d/rodin/v2': { + input: SchemaHyper3dRodinV2Input + output: SchemaHyper3dRodinV2Output + } + 'fal-ai/pshuman': { + input: SchemaPshumanInput + output: SchemaPshumanOutput + } + 'fal-ai/hunyuan_world/image-to-world': { + input: SchemaHunyuanWorldImageToWorldInput + output: SchemaHunyuanWorldImageToWorldOutput + } + 'tripo3d/tripo/v2.5/multiview-to-3d': { + input: SchemaTripoV25MultiviewTo3dInput + output: SchemaTripoV25MultiviewTo3dOutput + } + 'fal-ai/hunyuan3d-v21': { + input: SchemaHunyuan3dV21Input + output: SchemaHunyuan3dV21Output + } + 'fal-ai/trellis/multi': { + input: SchemaTrellisMultiInput + output: SchemaTrellisMultiOutput + } + 'tripo3d/tripo/v2.5/image-to-3d': { + input: SchemaTripoV25ImageTo3dInput + output: SchemaTripoV25ImageTo3dOutput + } + 'fal-ai/hunyuan3d/v2/multi-view/turbo': { + input: SchemaHunyuan3dV2MultiViewTurboInput + output: SchemaHunyuan3dV2MultiViewTurboOutput + } + 'fal-ai/hunyuan3d/v2': { + input: SchemaHunyuan3dV2Input + output: SchemaHunyuan3dV2Output + } + 'fal-ai/hunyuan3d/v2/mini': { + input: SchemaHunyuan3dV2MiniInput + output: SchemaHunyuan3dV2MiniOutput + } + 'fal-ai/hunyuan3d/v2/multi-view': { + input: SchemaHunyuan3dV2MultiViewInput + output: SchemaHunyuan3dV2MultiViewOutput + } + 'fal-ai/hunyuan3d/v2/turbo': { + input: SchemaHunyuan3dV2TurboInput + output: SchemaHunyuan3dV2TurboOutput + } + 'fal-ai/hunyuan3d/v2/mini/turbo': { + input: SchemaHunyuan3dV2MiniTurboInput + output: SchemaHunyuan3dV2MiniTurboOutput + } + 'fal-ai/hyper3d/rodin': { + input: SchemaHyper3dRodinInput + output: SchemaHyper3dRodinOutput + } + 'fal-ai/trellis': { + input: SchemaTrellisInput + output: SchemaTrellisOutput + } + 'fal-ai/triposr': { + input: SchemaTriposrInput + output: SchemaTriposrOutput + } +} + +/** Union type of all image-to-3d model endpoint IDs */ +export type ImageTo3dModel = keyof ImageTo3dEndpointMap + +export const ImageTo3dSchemaMap: Record< + ImageTo3dModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['fal-ai/trellis-2']: { + input: zSchemaTrellis2Input, + output: zSchemaTrellis2Output, + }, + ['fal-ai/hunyuan3d-v3/sketch-to-3d']: { + input: zSchemaHunyuan3dV3SketchTo3dInput, + output: zSchemaHunyuan3dV3SketchTo3dOutput, + }, + ['fal-ai/hunyuan3d-v3/image-to-3d']: { + input: zSchemaHunyuan3dV3ImageTo3dInput, + output: zSchemaHunyuan3dV3ImageTo3dOutput, + }, + ['fal-ai/sam-3/3d-body']: { + input: zSchemaSam33dBodyInput, + output: zSchemaSam33dBodyOutput, + }, + ['fal-ai/sam-3/3d-objects']: { + input: zSchemaSam33dObjectsInput, + output: zSchemaSam33dObjectsOutput, + }, + ['fal-ai/omnipart']: { + input: zSchemaOmnipartInput, + output: zSchemaOmnipartOutput, + }, + ['fal-ai/bytedance/seed3d/image-to-3d']: { + input: zSchemaBytedanceSeed3dImageTo3dInput, + output: zSchemaBytedanceSeed3dImageTo3dOutput, + }, + ['fal-ai/meshy/v5/multi-image-to-3d']: { + input: zSchemaMeshyV5MultiImageTo3dInput, + output: zSchemaMeshyV5MultiImageTo3dOutput, + }, + ['fal-ai/meshy/v6-preview/image-to-3d']: { + input: zSchemaMeshyV6PreviewImageTo3dInput, + output: zSchemaMeshyV6PreviewImageTo3dOutput, + }, + ['fal-ai/hyper3d/rodin/v2']: { + input: zSchemaHyper3dRodinV2Input, + output: zSchemaHyper3dRodinV2Output, + }, + ['fal-ai/pshuman']: { + input: zSchemaPshumanInput, + output: zSchemaPshumanOutput, + }, + ['fal-ai/hunyuan_world/image-to-world']: { + input: zSchemaHunyuanWorldImageToWorldInput, + output: zSchemaHunyuanWorldImageToWorldOutput, + }, + ['tripo3d/tripo/v2.5/multiview-to-3d']: { + input: zSchemaTripoV25MultiviewTo3dInput, + output: zSchemaTripoV25MultiviewTo3dOutput, + }, + ['fal-ai/hunyuan3d-v21']: { + input: zSchemaHunyuan3dV21Input, + output: zSchemaHunyuan3dV21Output, + }, + ['fal-ai/trellis/multi']: { + input: zSchemaTrellisMultiInput, + output: zSchemaTrellisMultiOutput, + }, + ['tripo3d/tripo/v2.5/image-to-3d']: { + input: zSchemaTripoV25ImageTo3dInput, + output: zSchemaTripoV25ImageTo3dOutput, + }, + ['fal-ai/hunyuan3d/v2/multi-view/turbo']: { + input: zSchemaHunyuan3dV2MultiViewTurboInput, + output: zSchemaHunyuan3dV2MultiViewTurboOutput, + }, + ['fal-ai/hunyuan3d/v2']: { + input: zSchemaHunyuan3dV2Input, + output: zSchemaHunyuan3dV2Output, + }, + ['fal-ai/hunyuan3d/v2/mini']: { + input: zSchemaHunyuan3dV2MiniInput, + output: zSchemaHunyuan3dV2MiniOutput, + }, + ['fal-ai/hunyuan3d/v2/multi-view']: { + input: zSchemaHunyuan3dV2MultiViewInput, + output: zSchemaHunyuan3dV2MultiViewOutput, + }, + ['fal-ai/hunyuan3d/v2/turbo']: { + input: zSchemaHunyuan3dV2TurboInput, + output: zSchemaHunyuan3dV2TurboOutput, + }, + ['fal-ai/hunyuan3d/v2/mini/turbo']: { + input: zSchemaHunyuan3dV2MiniTurboInput, + output: zSchemaHunyuan3dV2MiniTurboOutput, + }, + ['fal-ai/hyper3d/rodin']: { + input: zSchemaHyper3dRodinInput, + output: zSchemaHyper3dRodinOutput, + }, + ['fal-ai/trellis']: { + input: zSchemaTrellisInput, + output: zSchemaTrellisOutput, + }, + ['fal-ai/triposr']: { + input: zSchemaTriposrInput, + output: zSchemaTriposrOutput, + }, +} as const + +/** Get the input type for a specific image-to-3d model */ +export type ImageTo3dModelInput = + ImageTo3dEndpointMap[T]['input'] + +/** Get the output type for a specific image-to-3d model */ +export type ImageTo3dModelOutput = + ImageTo3dEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/image-to-3d/types.gen.ts b/packages/typescript/ai-fal/src/generated/image-to-3d/types.gen.ts new file mode 100644 index 00000000..31b715b3 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/image-to-3d/types.gen.ts @@ -0,0 +1,4657 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * ObjectOutput + */ +export type SchemaTriposrOutput = { + /** + * Remeshing Dir + * + * Directory containing textures for the remeshed model. + */ + remeshing_dir?: SchemaFile + /** + * Model Mesh + * + * Generated 3D object file. + */ + model_mesh: SchemaFile + /** + * Timings + * + * Inference timings. + */ + timings: { + [key: string]: number + } +} + +/** + * File + */ +export type SchemaFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * TripoSRInput + */ +export type SchemaTriposrInput = { + /** + * Mc Resolution + * + * Resolution of the marching cubes. Above 512 is not recommended. + */ + mc_resolution?: number + /** + * Do Remove Background + * + * Whether to remove the background from the input image. + */ + do_remove_background?: boolean + /** + * Foreground Ratio + * + * Ratio of the foreground image to the original image. + */ + foreground_ratio?: number + /** + * Output Format + * + * Output format for the 3D model. + */ + output_format?: 'glb' | 'obj' + /** + * Image Url + * + * Path for the image file to be processed. + */ + image_url: string +} + +/** + * ObjectOutput + */ +export type SchemaTrellisOutput = { + model_mesh: SchemaFile + /** + * Timings + * + * Processing timings + */ + timings: { + [key: string]: number + } +} + +/** + * InputModel + */ +export type SchemaTrellisInput = { + /** + * Slat Sampling Steps + * + * Sampling steps for structured latent generation + */ + slat_sampling_steps?: number + /** + * Ss Sampling Steps + * + * Sampling steps for sparse structure generation + */ + ss_sampling_steps?: number + /** + * Image Url + * + * URL of the input image to convert to 3D + */ + image_url: string + /** + * Slat Guidance Strength + * + * Guidance strength for structured latent generation + */ + slat_guidance_strength?: number + /** + * Ss Guidance Strength + * + * Guidance strength for sparse structure generation + */ + ss_guidance_strength?: number + /** + * Mesh Simplify + * + * Mesh simplification factor + */ + mesh_simplify?: number + /** + * Seed + * + * Random seed for reproducibility + */ + seed?: number | unknown + /** + * Texture Size + * + * Texture resolution + */ + texture_size?: 512 | 1024 | 2048 +} + +/** + * ObjectOutput + */ +export type SchemaHyper3dRodinOutput = { + /** + * Model Mesh + * + * Generated 3D object file. + */ + model_mesh: SchemaFile + /** + * Seed + * + * Seed value used for generation. + */ + seed: number + /** + * Textures + * + * Generated textures for the 3D object. + */ + textures: Array +} + +/** + * Image + * + * Represents an image file. + */ +export type SchemaImage = { + /** + * Height + * + * The height of the image in pixels. + */ + height?: number + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * Width + * + * The width of the image in pixels. + */ + width?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * Rodin3DInput + */ +export type SchemaHyper3dRodinInput = { + /** + * Prompt + * + * A textual prompt to guide model generation. Required for Text-to-3D mode. Optional for Image-to-3D mode. + */ + prompt?: string + /** + * Condition Mode + * + * For fuse mode, One or more images are required.It will generate a model by extracting and fusing features of objects from multiple images.For concat mode, need to upload multiple multi-view images of the same object and generate the model. (You can upload multi-view images in any order, regardless of the order of view.) + */ + condition_mode?: 'fuse' | 'concat' + /** + * Bbox Condition + * + * An array that specifies the dimensions and scaling factor of the bounding box. Typically, this array contains 3 elements, Length(X-axis), Width(Y-axis) and Height(Z-axis). + */ + bbox_condition?: Array + /** + * Tier + * + * Tier of generation. For Rodin Sketch, set to Sketch. For Rodin Regular, set to Regular. + */ + tier?: 'Regular' | 'Sketch' + /** + * Quality + * + * Generation quality. Possible values: high, medium, low, extra-low. Default is medium. + */ + quality?: 'high' | 'medium' | 'low' | 'extra-low' + /** + * T/A Pose + * + * When generating the human-like model, this parameter control the generation result to T/A Pose. + */ + TAPose?: boolean + /** + * Input Image Urls + * + * URL of images to use while generating the 3D model. Required for Image-to-3D mode. Optional for Text-to-3D mode. + */ + input_image_urls?: Array + /** + * Geometry File Format + * + * Format of the geometry file. Possible values: glb, usdz, fbx, obj, stl. Default is glb. + */ + geometry_file_format?: 'glb' | 'usdz' | 'fbx' | 'obj' | 'stl' + /** + * Use Hyper + * + * Whether to export the model using hyper mode. Default is false. + */ + use_hyper?: boolean + /** + * Addons + * + * Generation add-on features. Default is []. Possible values are HighPack. The HighPack option will provide 4K resolution textures instead of the default 1K, as well as models with high-poly. It will cost triple the billable units. + */ + addons?: 'HighPack' + /** + * Seed + * + * Seed value for randomization, ranging from 0 to 65535. Optional. + */ + seed?: number + /** + * Material + * + * Material type. Possible values: PBR, Shaded. Default is PBR. + */ + material?: 'PBR' | 'Shaded' +} + +/** + * ObjectOutput + */ +export type SchemaHunyuan3dV2MiniTurboOutput = { + /** + * Model Mesh + * + * Generated 3D object file. + */ + model_mesh: SchemaFile + /** + * Seed + * + * Seed value used for generation. + */ + seed: number +} + +/** + * Hunyuan3DInput + */ +export type SchemaHunyuan3dV2MiniTurboInput = { + /** + * Input Image Url + * + * URL of image to use while generating the 3D model. + */ + input_image_url: string + /** + * Octree Resolution + * + * Octree resolution for the model. + */ + octree_resolution?: number + /** + * Guidance Scale + * + * Guidance scale for the model. + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Num Inference Steps + * + * Number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Textured Mesh + * + * If set true, textured mesh will be generated and the price charged would be 3 times that of white mesh. + */ + textured_mesh?: boolean +} + +/** + * ObjectOutput + */ +export type SchemaHunyuan3dV2TurboOutput = { + /** + * Model Mesh + * + * Generated 3D object file. + */ + model_mesh: SchemaFile + /** + * Seed + * + * Seed value used for generation. + */ + seed: number +} + +/** + * Hunyuan3DInput + */ +export type SchemaHunyuan3dV2TurboInput = { + /** + * Input Image Url + * + * URL of image to use while generating the 3D model. + */ + input_image_url: string + /** + * Octree Resolution + * + * Octree resolution for the model. + */ + octree_resolution?: number + /** + * Guidance Scale + * + * Guidance scale for the model. + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Num Inference Steps + * + * Number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Textured Mesh + * + * If set true, textured mesh will be generated and the price charged would be 3 times that of white mesh. + */ + textured_mesh?: boolean +} + +/** + * MultiViewObjectOutput + */ +export type SchemaHunyuan3dV2MultiViewOutput = { + /** + * Model Mesh + * + * Generated 3D object file. + */ + model_mesh: SchemaFile + /** + * Seed + * + * Seed value used for generation. + */ + seed: number +} + +/** + * Hunyuan3DInputMultiView + */ +export type SchemaHunyuan3dV2MultiViewInput = { + /** + * Front Image Url + * + * URL of image to use while generating the 3D model. + */ + front_image_url: string + /** + * Octree Resolution + * + * Octree resolution for the model. + */ + octree_resolution?: number + /** + * Back Image Url + * + * URL of image to use while generating the 3D model. + */ + back_image_url: string + /** + * Guidance Scale + * + * Guidance scale for the model. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Textured Mesh + * + * If set true, textured mesh will be generated and the price charged would be 3 times that of white mesh. + */ + textured_mesh?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Left Image Url + * + * URL of image to use while generating the 3D model. + */ + left_image_url: string +} + +/** + * ObjectOutput + */ +export type SchemaHunyuan3dV2MiniOutput = { + /** + * Model Mesh + * + * Generated 3D object file. + */ + model_mesh: SchemaFile + /** + * Seed + * + * Seed value used for generation. + */ + seed: number +} + +/** + * Hunyuan3DInput + */ +export type SchemaHunyuan3dV2MiniInput = { + /** + * Input Image Url + * + * URL of image to use while generating the 3D model. + */ + input_image_url: string + /** + * Octree Resolution + * + * Octree resolution for the model. + */ + octree_resolution?: number + /** + * Guidance Scale + * + * Guidance scale for the model. + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Num Inference Steps + * + * Number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Textured Mesh + * + * If set true, textured mesh will be generated and the price charged would be 3 times that of white mesh. + */ + textured_mesh?: boolean +} + +/** + * ObjectOutput + */ +export type SchemaHunyuan3dV2Output = { + /** + * Model Mesh + * + * Generated 3D object file. + */ + model_mesh: SchemaFile + /** + * Seed + * + * Seed value used for generation. + */ + seed: number +} + +/** + * Hunyuan3DInput + */ +export type SchemaHunyuan3dV2Input = { + /** + * Input Image Url + * + * URL of image to use while generating the 3D model. + */ + input_image_url: string + /** + * Octree Resolution + * + * Octree resolution for the model. + */ + octree_resolution?: number + /** + * Guidance Scale + * + * Guidance scale for the model. + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Num Inference Steps + * + * Number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Textured Mesh + * + * If set true, textured mesh will be generated and the price charged would be 3 times that of white mesh. + */ + textured_mesh?: boolean +} + +/** + * MultiViewObjectOutput + */ +export type SchemaHunyuan3dV2MultiViewTurboOutput = { + /** + * Model Mesh + * + * Generated 3D object file. + */ + model_mesh: SchemaFile + /** + * Seed + * + * Seed value used for generation. + */ + seed: number +} + +/** + * Hunyuan3DInputMultiView + */ +export type SchemaHunyuan3dV2MultiViewTurboInput = { + /** + * Front Image Url + * + * URL of image to use while generating the 3D model. + */ + front_image_url: string + /** + * Octree Resolution + * + * Octree resolution for the model. + */ + octree_resolution?: number + /** + * Back Image Url + * + * URL of image to use while generating the 3D model. + */ + back_image_url: string + /** + * Guidance Scale + * + * Guidance scale for the model. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Textured Mesh + * + * If set true, textured mesh will be generated and the price charged would be 3 times that of white mesh. + */ + textured_mesh?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Left Image Url + * + * URL of image to use while generating the 3D model. + */ + left_image_url: string +} + +/** + * Tripo3dOutput + */ +export type SchemaTripoV25ImageTo3dOutput = { + /** + * Base Model + * + * Base model + */ + base_model?: SchemaFile + /** + * Task Id + * + * The task id of the 3D model generation. + */ + task_id: string + /** + * Rendered Image + * + * A preview image of the model + */ + rendered_image?: SchemaFile + /** + * Model Mesh + * + * Model + */ + model_mesh?: SchemaFile + /** + * Pbr Model + * + * Pbr model + */ + pbr_model?: SchemaFile +} + +/** + * ImageTo3dInput + */ +export type SchemaTripoV25ImageTo3dInput = { + /** + * Face Limit + * + * Limits the number of faces on the output model. If this option is not set, the face limit will be adaptively determined. + */ + face_limit?: number + /** + * Style + * + * [DEPRECATED] Defines the artistic style or transformation to be applied to the 3D model, altering its appearance according to preset options (extra $0.05 per generation). Omit this option to keep the original style and apperance. + * + * @deprecated + */ + style?: + | 'person:person2cartoon' + | 'object:clay' + | 'object:steampunk' + | 'animal:venom' + | 'object:barbie' + | 'object:christmas' + | 'gold' + | 'ancient_bronze' + /** + * Pbr + * + * A boolean option to enable pbr. The default value is True, set False to get a model without pbr. If this option is set to True, texture will be ignored and used as True. + */ + pbr?: boolean + /** + * Texture Alignment + * + * Determines the prioritization of texture alignment in the 3D model. The default value is original_image. + */ + texture_alignment?: 'original_image' | 'geometry' + /** + * Image Url + * + * URL of the image to use for model generation. + */ + image_url: string + /** + * Texture + * + * An option to enable texturing. Default is 'standard', set 'no' to get a model without any textures, and set 'HD' to get a model with hd quality textures. + */ + texture?: 'no' | 'standard' | 'HD' + /** + * Auto Size + * + * Automatically scale the model to real-world dimensions, with the unit in meters. The default value is False. + */ + auto_size?: boolean + /** + * Seed + * + * This is the random seed for model generation. The seed controls the geometry generation process, ensuring identical models when the same seed is used. This parameter is an integer and is randomly chosen if not set. + */ + seed?: number + /** + * Quad + * + * Set True to enable quad mesh output (extra $0.05 per generation). If quad=True and face_limit is not set, the default face_limit will be 10000. Note: Enabling this option will force the output to be an FBX model. + */ + quad?: boolean + /** + * Orientation + * + * Set orientation=align_image to automatically rotate the model to align the original image. The default value is default. + */ + orientation?: 'default' | 'align_image' + /** + * Texture Seed + * + * This is the random seed for texture generation. Using the same seed will produce identical textures. This parameter is an integer and is randomly chosen if not set. If you want a model with different textures, please use same seed and different texture_seed. + */ + texture_seed?: number +} + +/** + * ObjectOutput + */ +export type SchemaTrellisMultiOutput = { + model_mesh: SchemaFile + /** + * Timings + * + * Processing timings + */ + timings: { + [key: string]: number + } +} + +/** + * MultiImageInputModel + */ +export type SchemaTrellisMultiInput = { + /** + * Multiimage Algo + * + * Algorithm for multi-image generation + */ + multiimage_algo?: 'stochastic' | 'multidiffusion' + /** + * Slat Sampling Steps + * + * Sampling steps for structured latent generation + */ + slat_sampling_steps?: number + /** + * Ss Sampling Steps + * + * Sampling steps for sparse structure generation + */ + ss_sampling_steps?: number + /** + * Ss Guidance Strength + * + * Guidance strength for sparse structure generation + */ + ss_guidance_strength?: number + /** + * Slat Guidance Strength + * + * Guidance strength for structured latent generation + */ + slat_guidance_strength?: number + /** + * Mesh Simplify + * + * Mesh simplification factor + */ + mesh_simplify?: number + /** + * Seed + * + * Random seed for reproducibility + */ + seed?: number | unknown + /** + * Texture Size + * + * Texture resolution + */ + texture_size?: 512 | 1024 | 2048 + /** + * Image Urls + * + * List of URLs of input images to convert to 3D + */ + image_urls: Array +} + +/** + * ObjectOutput + */ +export type SchemaHunyuan3dV21Output = { + /** + * Model Glb Pbr + * + * Generated 3D object with PBR materials. + */ + model_glb_pbr?: SchemaFile + /** + * Seed + * + * Seed value used for generation. + */ + seed: number + /** + * Model Mesh + * + * Generated 3D object assets zip. + */ + model_mesh: SchemaFile + /** + * Model Glb + * + * Generated 3D object. + */ + model_glb: SchemaFile +} + +/** + * Hunyuan3DInput + */ +export type SchemaHunyuan3dV21Input = { + /** + * Input Image Url + * + * URL of image to use while generating the 3D model. + */ + input_image_url: string + /** + * Octree Resolution + * + * Octree resolution for the model. + */ + octree_resolution?: number + /** + * Guidance Scale + * + * Guidance scale for the model. + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Num Inference Steps + * + * Number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Textured Mesh + * + * If set true, textured mesh will be generated and the price charged would be 3 times that of white mesh. + */ + textured_mesh?: boolean +} + +/** + * Tripo3dOutput + */ +export type SchemaTripoV25MultiviewTo3dOutput = { + /** + * Base Model + * + * Base model + */ + base_model?: SchemaFile + /** + * Task Id + * + * The task id of the 3D model generation. + */ + task_id: string + /** + * Rendered Image + * + * A preview image of the model + */ + rendered_image?: SchemaFile + /** + * Model Mesh + * + * Model + */ + model_mesh?: SchemaFile + /** + * Pbr Model + * + * Pbr model + */ + pbr_model?: SchemaFile +} + +/** + * MultiviewTo3dInput + */ +export type SchemaTripoV25MultiviewTo3dInput = { + /** + * Face Limit + * + * Limits the number of faces on the output model. If this option is not set, the face limit will be adaptively determined. + */ + face_limit?: number + /** + * Right Image Url + * + * Right view image of the object. + */ + right_image_url?: string + /** + * Style + * + * [DEPRECATED] Defines the artistic style or transformation to be applied to the 3D model, altering its appearance according to preset options (extra $0.05 per generation). Omit this option to keep the original style and apperance. + * + * @deprecated + */ + style?: + | 'person:person2cartoon' + | 'object:clay' + | 'object:steampunk' + | 'animal:venom' + | 'object:barbie' + | 'object:christmas' + | 'gold' + | 'ancient_bronze' + /** + * Quad + * + * Set True to enable quad mesh output (extra $0.05 per generation). If quad=True and face_limit is not set, the default face_limit will be 10000. Note: Enabling this option will force the output to be an FBX model. + */ + quad?: boolean + /** + * Front Image Url + * + * Front view image of the object. + */ + front_image_url: string + /** + * Texture Seed + * + * This is the random seed for texture generation. Using the same seed will produce identical textures. This parameter is an integer and is randomly chosen if not set. If you want a model with different textures, please use same seed and different texture_seed. + */ + texture_seed?: number + /** + * Back Image Url + * + * Back view image of the object. + */ + back_image_url?: string + /** + * Pbr + * + * A boolean option to enable pbr. The default value is True, set False to get a model without pbr. If this option is set to True, texture will be ignored and used as True. + */ + pbr?: boolean + /** + * Texture Alignment + * + * Determines the prioritization of texture alignment in the 3D model. The default value is original_image. + */ + texture_alignment?: 'original_image' | 'geometry' + /** + * Texture + * + * An option to enable texturing. Default is 'standard', set 'no' to get a model without any textures, and set 'HD' to get a model with hd quality textures. + */ + texture?: 'no' | 'standard' | 'HD' + /** + * Auto Size + * + * Automatically scale the model to real-world dimensions, with the unit in meters. The default value is False. + */ + auto_size?: boolean + /** + * Seed + * + * This is the random seed for model generation. The seed controls the geometry generation process, ensuring identical models when the same seed is used. This parameter is an integer and is randomly chosen if not set. + */ + seed?: number + /** + * Orientation + * + * Set orientation=align_image to automatically rotate the model to align the original image. The default value is default. + */ + orientation?: 'default' | 'align_image' + /** + * Left Image Url + * + * Left view image of the object. + */ + left_image_url?: string +} + +/** + * ImageToWorldResponse + */ +export type SchemaHunyuanWorldImageToWorldOutput = { + /** + * World File + * + * The generated world. + */ + world_file: SchemaFile +} + +/** + * ImageToWorldRequest + */ +export type SchemaHunyuanWorldImageToWorldInput = { + /** + * Classes + * + * Classes to use for the world generation. + */ + classes: string + /** + * Export Drc + * + * Whether to export DRC (Dynamic Resource Configuration). + */ + export_drc?: boolean + /** + * Labels Fg1 + * + * Labels for the first foreground object. + */ + labels_fg1: string + /** + * Labels Fg2 + * + * Labels for the second foreground object. + */ + labels_fg2: string + /** + * Image Url + * + * The URL of the image to convert to a world. + */ + image_url: string +} + +/** + * PSHumanResponse + */ +export type SchemaPshumanOutput = { + /** + * Model Obj + * + * The generated 3D model in OBJ format. + */ + model_obj: SchemaFile + /** + * Preview Image + * + * A preview image showing the input and the generated multi-view outputs. + */ + preview_image: SchemaFile +} + +/** + * PSHumanRequest + */ +export type SchemaPshumanInput = { + /** + * Guidance Scale + * + * Guidance scale for the diffusion process. Controls how much the output adheres to the generated views. + */ + guidance_scale?: number + /** + * Seed + * + * Seed for reproducibility. If None, a random seed will be used. + */ + seed?: number + /** + * Image Url + * + * A direct URL to the input image of a person. + */ + image_url: string +} + +/** + * ObjectOutputv2 + */ +export type SchemaHyper3dRodinV2Output = { + /** + * Model Mesh + * + * Generated 3D object file. + */ + model_mesh: SchemaFile + /** + * Seed + * + * Seed value used for generation. + */ + seed: number + /** + * Textures + * + * Generated textures for the 3D object. + */ + textures: Array +} + +/** + * RodinGen2Input + */ +export type SchemaHyper3dRodinV2Input = { + /** + * Quality Mesh Option + * + * Combined quality and mesh type selection. Quad = smooth surfaces, Triangle = detailed geometry. These corresponds to `mesh_mode` (if the option contains 'Triangle', mesh_mode is 'Raw', otherwise 'Quad') and `quality_override` (the numeric part of the option) parameters in Hyper3D API. + */ + quality_mesh_option?: + | '4K Quad' + | '8K Quad' + | '18K Quad' + | '50K Quad' + | '2K Triangle' + | '20K Triangle' + | '150K Triangle' + | '500K Triangle' + /** + * Prompt + * + * A textual prompt to guide model generation. Optional for Image-to-3D mode - if empty, AI will generate a prompt based on your images. + */ + prompt?: string + /** + * Preview Render + * + * Generate a preview render image of the 3D model along with the model files. + */ + preview_render?: boolean + /** + * Bbox Condition + * + * An array that specifies the bounding box dimensions [width, height, length]. + */ + bbox_condition?: Array + /** + * T/A Pose + * + * Generate characters in T-pose or A-pose format, making them easier to rig and animate in 3D software. + */ + TAPose?: boolean + /** + * Input Image Urls + * + * URL of images to use while generating the 3D model. Required for Image-to-3D mode. Up to 5 images allowed. + */ + input_image_urls?: Array + /** + * Use Original Alpha + * + * When enabled, preserves the transparency channel from input images during 3D generation. + */ + use_original_alpha?: boolean + /** + * Geometry File Format + * + * Format of the geometry file. Possible values: glb, usdz, fbx, obj, stl. Default is glb. + */ + geometry_file_format?: 'glb' | 'usdz' | 'fbx' | 'obj' | 'stl' + /** + * Addons + * + * The HighPack option will provide 4K resolution textures instead of the default 1K, as well as models with high-poly. It will cost **triple the billable units**. + */ + addons?: 'HighPack' + /** + * Seed + * + * Seed value for randomization, ranging from 0 to 65535. Optional. + */ + seed?: number + /** + * Material + * + * Material type. PBR: Physically-based materials with realistic lighting. Shaded: Simple materials with baked lighting. All: Both types included. + */ + material?: 'PBR' | 'Shaded' | 'All' +} + +/** + * ImageTo3DOutput + * + * Output for Image to 3D conversion + */ +export type SchemaMeshyV6PreviewImageTo3dOutput = { + /** + * Model Urls + * + * URLs for different 3D model formats + */ + model_urls: SchemaModelUrls + /** + * Texture Urls + * + * Array of texture file objects, matching Meshy API structure + */ + texture_urls?: Array + /** + * Thumbnail + * + * Preview thumbnail of the generated model + */ + thumbnail?: SchemaFile + /** + * Seed + * + * The seed used for generation (if available) + */ + seed?: number + /** + * Model Glb + * + * Generated 3D object in GLB format. + */ + model_glb: SchemaFile +} + +/** + * TextureFiles + * + * Texture files downloaded and uploaded to CDN + */ +export type SchemaTextureFiles = { + /** + * Base Color + * + * Base color texture + */ + base_color: SchemaFile + /** + * Normal + * + * Normal texture (PBR) + */ + normal?: SchemaFile + /** + * Roughness + * + * Roughness texture (PBR) + */ + roughness?: SchemaFile + /** + * Metallic + * + * Metallic texture (PBR) + */ + metallic?: SchemaFile +} + +/** + * ModelUrls + * + * 3D model files in various formats + */ +export type SchemaModelUrls = { + /** + * Usdz + * + * USDZ format 3D model + */ + usdz?: SchemaFile + /** + * Fbx + * + * FBX format 3D model + */ + fbx?: SchemaFile + /** + * Blend + * + * Blender format 3D model + */ + blend?: SchemaFile + /** + * Stl + * + * STL format 3D model + */ + stl?: SchemaFile + /** + * Glb + * + * GLB format 3D model + */ + glb?: SchemaFile + /** + * Obj + * + * OBJ format 3D model + */ + obj?: SchemaFile +} + +/** + * ImageTo3DInput + * + * Input for Image to 3D conversion + */ +export type SchemaMeshyV6PreviewImageTo3dInput = { + /** + * Enable Pbr + * + * Generate PBR Maps (metallic, roughness, normal) in addition to base color + */ + enable_pbr?: boolean + /** + * Is A T Pose + * + * Whether to generate the model in an A/T pose + */ + is_a_t_pose?: boolean + /** + * Target Polycount + * + * Target number of polygons in the generated model + */ + target_polycount?: number + /** + * Should Texture + * + * Whether to generate textures + */ + should_texture?: boolean + /** + * Texture Image Url + * + * 2D image to guide the texturing process + */ + texture_image_url?: string + /** + * Topology + * + * Specify the topology of the generated model. Quad for smooth surfaces, Triangle for detailed geometry. + */ + topology?: 'quad' | 'triangle' + /** + * Image Url + * + * Image URL or base64 data URI for 3D model creation. Supports .jpg, .jpeg, and .png formats. Also supports AVIF and HEIF formats which will be automatically converted. + */ + image_url: string + /** + * Enable Safety Checker + * + * If set to true, input data will be checked for safety before processing. + */ + enable_safety_checker?: boolean + /** + * Symmetry Mode + * + * Controls symmetry behavior during model generation. Off disables symmetry, Auto determines it automatically, On enforces symmetry. + */ + symmetry_mode?: 'off' | 'auto' | 'on' + /** + * Texture Prompt + * + * Text prompt to guide the texturing process + */ + texture_prompt?: string + /** + * Should Remesh + * + * Whether to enable the remesh phase + */ + should_remesh?: boolean +} + +/** + * MultiImageTo3DOutput + * + * Output for Multi-Image to 3D conversion + */ +export type SchemaMeshyV5MultiImageTo3dOutput = { + /** + * Model Urls + * + * URLs for different 3D model formats + */ + model_urls: SchemaModelUrls + /** + * Texture Urls + * + * Array of texture file objects + */ + texture_urls?: Array + /** + * Thumbnail + * + * Preview thumbnail of the generated model + */ + thumbnail?: SchemaFile + /** + * Seed + * + * The seed used for generation (if available) + */ + seed?: number + /** + * Model Glb + * + * Generated 3D object in GLB format. + */ + model_glb: SchemaFile +} + +/** + * MultiImageTo3DInput + * + * Input for Multi-Image to 3D conversion + */ +export type SchemaMeshyV5MultiImageTo3dInput = { + /** + * Enable Pbr + * + * Generate PBR Maps (metallic, roughness, normal) in addition to base color. Requires should_texture to be true. + */ + enable_pbr?: boolean + /** + * Should Texture + * + * Whether to generate textures. False provides mesh without textures for 5 credits, True adds texture generation for additional 10 credits. + */ + should_texture?: boolean + /** + * Target Polycount + * + * Target number of polygons in the generated model + */ + target_polycount?: number + /** + * Is A T Pose + * + * Whether to generate the model in an A/T pose + */ + is_a_t_pose?: boolean + /** + * Texture Image Url + * + * 2D image to guide the texturing process. Requires should_texture to be true. + */ + texture_image_url?: string + /** + * Topology + * + * Specify the topology of the generated model. Quad for smooth surfaces, Triangle for detailed geometry. + */ + topology?: 'quad' | 'triangle' + /** + * Enable Safety Checker + * + * If set to true, input data will be checked for safety before processing. + */ + enable_safety_checker?: boolean + /** + * Symmetry Mode + * + * Controls symmetry behavior during model generation. + */ + symmetry_mode?: 'off' | 'auto' | 'on' + /** + * Image Urls + * + * 1 to 4 images for 3D model creation. All images should depict the same object from different angles. Supports .jpg, .jpeg, .png formats, and AVIF/HEIF which will be automatically converted. If more than 4 images are provided, only the first 4 will be used. + */ + image_urls: Array + /** + * Texture Prompt + * + * Text prompt to guide the texturing process. Requires should_texture to be true. + */ + texture_prompt?: string + /** + * Should Remesh + * + * Whether to enable the remesh phase. When false, returns triangular mesh ignoring topology and target_polycount. + */ + should_remesh?: boolean +} + +/** + * Seed3DImageTo3DOutput + */ +export type SchemaBytedanceSeed3dImageTo3dOutput = { + /** + * Model + * + * The generated 3D model files + */ + model: SchemaFile + /** + * Usage Tokens + * + * The number of tokens used for the 3D model generation + */ + usage_tokens: number +} + +/** + * Seed3DImageTo3DInput + */ +export type SchemaBytedanceSeed3dImageTo3dInput = { + /** + * Image Url + * + * URL of the image for the 3D asset generation. + */ + image_url: string +} + +/** + * MultiViewObjectOutput + */ +export type SchemaOmnipartOutput = { + full_model_mesh: SchemaFile + output_zip: SchemaFile + /** + * Seed + * + * Seed value used for generation. + */ + seed: number + model_mesh: SchemaFile +} + +/** + * OmnipartInput + */ +export type SchemaOmnipartInput = { + /** + * Input Image Url + * + * URL of image to use while generating the 3D model. + */ + input_image_url: string + /** + * Parts + * + * Specify which segments to merge (e.g., '0,1;3,4' merges segments 0&1 together and 3&4 together) + */ + parts?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Minimum Segment Size + * + * Minimum segment size (pixels) for the model. + */ + minimum_segment_size?: number + /** + * Guidance Scale + * + * Guidance scale for the model. + */ + guidance_scale?: number +} + +/** + * SAM3DObjectMetadata + * + * Per-object metadata for 3D reconstruction. + */ +export type SchemaSam3dObjectMetadata = { + /** + * Rotation + * + * Rotation quaternion [x, y, z, w] + */ + rotation?: Array> + /** + * Translation + * + * Translation [tx, ty, tz] + */ + translation?: Array> + /** + * Object Index + * + * Index of the object in the scene + */ + object_index: number + /** + * Scale + * + * Scale factors [sx, sy, sz] + */ + scale?: Array> + /** + * Camera Pose + * + * Camera pose matrix + */ + camera_pose?: Array> +} + +/** + * PointPromptBase + */ +export type SchemaPointPromptBase = { + /** + * Y + * + * Y Coordinate of the prompt + */ + y?: number + /** + * X + * + * X Coordinate of the prompt + */ + x?: number + /** + * Object Id + * + * Optional object identifier. Prompts sharing an object id refine the same object. + */ + object_id?: number + /** + * Label + * + * 1 for foreground, 0 for background + */ + label?: 0 | 1 +} + +/** + * BoxPromptBase + */ +export type SchemaBoxPromptBase = { + /** + * Y Min + * + * Y Min Coordinate of the box + */ + y_min?: number + /** + * Object Id + * + * Optional object identifier. Boxes sharing an object id refine the same object. + */ + object_id?: number + /** + * X Max + * + * X Max Coordinate of the box + */ + x_max?: number + /** + * X Min + * + * X Min Coordinate of the box + */ + x_min?: number + /** + * Y Max + * + * Y Max Coordinate of the box + */ + y_max?: number +} + +/** + * SAM3DObjectOutput + */ +export type SchemaSam33dObjectsOutput = { + /** + * Model Glb + * + * 3D mesh in GLB format - combined scene for multi-object, single mesh otherwise + */ + model_glb?: SchemaFile + /** + * Metadata + * + * Per-object metadata (rotation/translation/scale) + */ + metadata: Array + /** + * Gaussian Splat + * + * Gaussian splat file (.ply) - combined scene splat for multi-object, single splat otherwise + */ + gaussian_splat: SchemaFile + /** + * Artifacts Zip + * + * Zip bundle containing all artifacts and metadata + */ + artifacts_zip?: SchemaFile + /** + * Individual Glbs + * + * Individual GLB mesh files per object (only for multi-object scenes) + */ + individual_glbs?: Array + /** + * Individual Splats + * + * Individual Gaussian splat files per object (only for multi-object scenes) + */ + individual_splats?: Array +} + +/** + * SAM3DObjectInput + */ +export type SchemaSam33dObjectsInput = { + /** + * Pointmap Url + * + * Optional URL to external pointmap/depth data (NPY or NPZ format) for improved 3D reconstruction depth estimation + */ + pointmap_url?: string + /** + * Export Textured Glb + * + * If True, exports GLB with baked texture and UVs instead of vertex colors. + */ + export_textured_glb?: boolean + /** + * Prompt + * + * Text prompt for auto-segmentation when no masks provided (e.g., 'chair', 'lamp') + */ + prompt?: string + /** + * Box Prompts + * + * Box prompts for auto-segmentation when no masks provided. Multiple boxes supported - each produces a separate object mask for 3D reconstruction. + */ + box_prompts?: Array + /** + * Image Url + * + * URL of the image to reconstruct in 3D + */ + image_url: string + /** + * Mask Urls + * + * Optional list of mask URLs (one per object). If not provided, use prompt/point_prompts/box_prompts to auto-segment, or entire image will be used. + */ + mask_urls?: Array + /** + * Point Prompts + * + * Point prompts for auto-segmentation when no masks provided + */ + point_prompts?: Array + /** + * Seed + * + * Random seed for reproducibility + */ + seed?: number +} + +/** + * SAM3DBodyPersonMetadata + * + * Per-person metadata for body reconstruction. + */ +export type SchemaSam3dBodyPersonMetadata = { + /** + * Pred Cam T + * + * Predicted camera translation [tx, ty, tz] + */ + pred_cam_t: Array + /** + * Person Id + * + * Index of the person in the scene + */ + person_id: number + /** + * Focal Length + * + * Estimated focal length + */ + focal_length: number + /** + * Keypoints 3D + * + * 3D keypoints [[x, y, z], ...] - 70 body keypoints in camera space + */ + keypoints_3d?: Array> + /** + * Keypoints 2D + * + * 2D keypoints [[x, y], ...] - 70 body keypoints + */ + keypoints_2d: Array> + /** + * Bbox + * + * Bounding box [x_min, y_min, x_max, y_max] + */ + bbox: Array +} + +/** + * SAM3DBodyMetadata + * + * Metadata for body reconstruction output. + */ +export type SchemaSam3dBodyMetadata = { + /** + * People + * + * Per-person metadata + */ + people: Array + /** + * Num People + * + * Number of people detected + */ + num_people: number +} + +/** + * SAM3DBodyOutput + */ +export type SchemaSam33dBodyOutput = { + /** + * Visualization + * + * Combined visualization image (original + keypoints + mesh + side view) + */ + visualization: SchemaFile + /** + * Metadata + * + * Structured metadata including keypoints and camera parameters + */ + metadata: SchemaSam3dBodyMetadata + /** + * Meshes + * + * Individual mesh files (.ply), one per detected person (when export_meshes=True) + */ + meshes?: Array + /** + * Model Glb + * + * 3D body mesh in GLB format with optional 3D keypoint markers + */ + model_glb: SchemaFile +} + +/** + * SAM3DBodyInput + */ +export type SchemaSam33dBodyInput = { + /** + * Include 3D Keypoints + * + * Include 3D keypoint markers (spheres) in the GLB mesh for visualization + */ + include_3d_keypoints?: boolean + /** + * Export Meshes + * + * Export individual mesh files (.ply) per person + */ + export_meshes?: boolean + /** + * Mask Url + * + * Optional URL of a binary mask image (white=person, black=background). When provided, skips auto human detection and uses this mask instead. Bbox is auto-computed from the mask. + */ + mask_url?: string + /** + * Image Url + * + * URL of the image containing humans + */ + image_url: string +} + +/** + * ImageTo3DOutput + */ +export type SchemaHunyuan3dV3ImageTo3dOutput = { + /** + * Model Urls + * + * URLs for different 3D model formats + */ + model_urls: SchemaModelUrls + /** + * Thumbnail + * + * Preview thumbnail of the generated model + */ + thumbnail?: SchemaFile + /** + * Seed + * + * The seed used for generation + */ + seed?: number + /** + * Model Glb + * + * Generated 3D object in GLB format. + */ + model_glb: SchemaFile +} + +/** + * ImageTo3DInput + */ +export type SchemaHunyuan3dV3ImageTo3dInput = { + /** + * Input Image Url + * + * URL of image to use while generating the 3D model. + */ + input_image_url: string + /** + * Polygon Type + * + * Polygon type. Only takes effect when GenerateType is LowPoly. + */ + polygon_type?: 'triangle' | 'quadrilateral' + /** + * Face Count + * + * Target face count. Range: 40000-1500000 + */ + face_count?: number + /** + * Right Image Url + * + * Optional right view image URL for better 3D reconstruction. + */ + right_image_url?: string + /** + * Back Image Url + * + * Optional back view image URL for better 3D reconstruction. + */ + back_image_url?: string + /** + * Enable Pbr + * + * Whether to enable PBR material generation. Does not take effect when generate_type is Geometry. + */ + enable_pbr?: boolean + /** + * Generate Type + * + * Generation type. Normal: textured model. LowPoly: polygon reduction. Geometry: white model without texture. + */ + generate_type?: 'Normal' | 'LowPoly' | 'Geometry' + /** + * Left Image Url + * + * Optional left view image URL for better 3D reconstruction. + */ + left_image_url?: string +} + +/** + * SketchTo3DOutput + */ +export type SchemaHunyuan3dV3SketchTo3dOutput = { + /** + * Model Urls + * + * URLs for different 3D model formats + */ + model_urls: SchemaModelUrls + /** + * Thumbnail + * + * Preview thumbnail of the generated model + */ + thumbnail?: SchemaFile + /** + * Seed + * + * The seed used for generation + */ + seed?: number + /** + * Model Glb + * + * Generated 3D object in GLB format. + */ + model_glb: SchemaFile +} + +/** + * SketchTo3DInput + */ +export type SchemaHunyuan3dV3SketchTo3dInput = { + /** + * Input Image Url + * + * URL of sketch or line art image to transform into a 3D model. Image resolution must be between 128x128 and 5000x5000 pixels. + */ + input_image_url: string + /** + * Prompt + * + * Text prompt describing the 3D content attributes such as color, category, and material. + */ + prompt: string + /** + * Face Count + * + * Target face count. Range: 40000-1500000 + */ + face_count?: number + /** + * Enable Pbr + * + * Whether to enable PBR material generation. + */ + enable_pbr?: boolean +} + +/** + * ObjectOutput + */ +export type SchemaTrellis2Output = { + /** + * Model Glb + * + * Generated 3D GLB file + */ + model_glb: SchemaFile +} + +/** + * SingleImageInputModel + */ +export type SchemaTrellis2Input = { + /** + * Remesh Band + */ + remesh_band?: number + /** + * Ss Guidance Rescale + */ + ss_guidance_rescale?: number + /** + * Ss Rescale T + */ + ss_rescale_t?: number + /** + * Shape Slat Sampling Steps + */ + shape_slat_sampling_steps?: number + /** + * Tex Slat Rescale T + */ + tex_slat_rescale_t?: number + /** + * Ss Guidance Strength + */ + ss_guidance_strength?: number + /** + * Ss Sampling Steps + */ + ss_sampling_steps?: number + /** + * Tex Slat Sampling Steps + */ + tex_slat_sampling_steps?: number + /** + * Remesh Project + */ + remesh_project?: number + /** + * Texture Size + * + * Texture resolution + */ + texture_size?: 1024 | 2048 | 4096 + /** + * Shape Slat Rescale T + */ + shape_slat_rescale_t?: number + /** + * Resolution + * + * Output resolution; higher is slower but more detailed + */ + resolution?: 512 | 1024 | 1536 + /** + * Remesh + * + * Run remeshing (slower; often improves topology) + */ + remesh?: boolean + /** + * Tex Slat Guidance Rescale + */ + tex_slat_guidance_rescale?: number + /** + * Shape Slat Guidance Rescale + */ + shape_slat_guidance_rescale?: number + /** + * Image Url + * + * URL of the input image to convert to 3D + */ + image_url: string + /** + * Seed + * + * Random seed for reproducibility + */ + seed?: number + /** + * Shape Slat Guidance Strength + */ + shape_slat_guidance_strength?: number + /** + * Tex Slat Guidance Strength + */ + tex_slat_guidance_strength?: number + /** + * Decimation Target + * + * Target vertex count for mesh simplification during export + */ + decimation_target?: number +} + +export type SchemaQueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetFalAiTrellis2RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/trellis-2/requests/{request_id}/status' +} + +export type GetFalAiTrellis2RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiTrellis2RequestsByRequestIdStatusResponse = + GetFalAiTrellis2RequestsByRequestIdStatusResponses[keyof GetFalAiTrellis2RequestsByRequestIdStatusResponses] + +export type PutFalAiTrellis2RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/trellis-2/requests/{request_id}/cancel' +} + +export type PutFalAiTrellis2RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiTrellis2RequestsByRequestIdCancelResponse = + PutFalAiTrellis2RequestsByRequestIdCancelResponses[keyof PutFalAiTrellis2RequestsByRequestIdCancelResponses] + +export type PostFalAiTrellis2Data = { + body: SchemaTrellis2Input + path?: never + query?: never + url: '/fal-ai/trellis-2' +} + +export type PostFalAiTrellis2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiTrellis2Response = + PostFalAiTrellis2Responses[keyof PostFalAiTrellis2Responses] + +export type GetFalAiTrellis2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/trellis-2/requests/{request_id}' +} + +export type GetFalAiTrellis2RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaTrellis2Output +} + +export type GetFalAiTrellis2RequestsByRequestIdResponse = + GetFalAiTrellis2RequestsByRequestIdResponses[keyof GetFalAiTrellis2RequestsByRequestIdResponses] + +export type GetFalAiHunyuan3dV3SketchTo3dRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan3d-v3/sketch-to-3d/requests/{request_id}/status' +} + +export type GetFalAiHunyuan3dV3SketchTo3dRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHunyuan3dV3SketchTo3dRequestsByRequestIdStatusResponse = + GetFalAiHunyuan3dV3SketchTo3dRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuan3dV3SketchTo3dRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuan3dV3SketchTo3dRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan3d-v3/sketch-to-3d/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuan3dV3SketchTo3dRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHunyuan3dV3SketchTo3dRequestsByRequestIdCancelResponse = + PutFalAiHunyuan3dV3SketchTo3dRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuan3dV3SketchTo3dRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuan3dV3SketchTo3dData = { + body: SchemaHunyuan3dV3SketchTo3dInput + path?: never + query?: never + url: '/fal-ai/hunyuan3d-v3/sketch-to-3d' +} + +export type PostFalAiHunyuan3dV3SketchTo3dResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuan3dV3SketchTo3dResponse = + PostFalAiHunyuan3dV3SketchTo3dResponses[keyof PostFalAiHunyuan3dV3SketchTo3dResponses] + +export type GetFalAiHunyuan3dV3SketchTo3dRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan3d-v3/sketch-to-3d/requests/{request_id}' +} + +export type GetFalAiHunyuan3dV3SketchTo3dRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuan3dV3SketchTo3dOutput +} + +export type GetFalAiHunyuan3dV3SketchTo3dRequestsByRequestIdResponse = + GetFalAiHunyuan3dV3SketchTo3dRequestsByRequestIdResponses[keyof GetFalAiHunyuan3dV3SketchTo3dRequestsByRequestIdResponses] + +export type GetFalAiHunyuan3dV3ImageTo3dRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan3d-v3/image-to-3d/requests/{request_id}/status' +} + +export type GetFalAiHunyuan3dV3ImageTo3dRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHunyuan3dV3ImageTo3dRequestsByRequestIdStatusResponse = + GetFalAiHunyuan3dV3ImageTo3dRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuan3dV3ImageTo3dRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuan3dV3ImageTo3dRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan3d-v3/image-to-3d/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuan3dV3ImageTo3dRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHunyuan3dV3ImageTo3dRequestsByRequestIdCancelResponse = + PutFalAiHunyuan3dV3ImageTo3dRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuan3dV3ImageTo3dRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuan3dV3ImageTo3dData = { + body: SchemaHunyuan3dV3ImageTo3dInput + path?: never + query?: never + url: '/fal-ai/hunyuan3d-v3/image-to-3d' +} + +export type PostFalAiHunyuan3dV3ImageTo3dResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuan3dV3ImageTo3dResponse = + PostFalAiHunyuan3dV3ImageTo3dResponses[keyof PostFalAiHunyuan3dV3ImageTo3dResponses] + +export type GetFalAiHunyuan3dV3ImageTo3dRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan3d-v3/image-to-3d/requests/{request_id}' +} + +export type GetFalAiHunyuan3dV3ImageTo3dRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuan3dV3ImageTo3dOutput +} + +export type GetFalAiHunyuan3dV3ImageTo3dRequestsByRequestIdResponse = + GetFalAiHunyuan3dV3ImageTo3dRequestsByRequestIdResponses[keyof GetFalAiHunyuan3dV3ImageTo3dRequestsByRequestIdResponses] + +export type GetFalAiSam33dBodyRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sam-3/3d-body/requests/{request_id}/status' +} + +export type GetFalAiSam33dBodyRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSam33dBodyRequestsByRequestIdStatusResponse = + GetFalAiSam33dBodyRequestsByRequestIdStatusResponses[keyof GetFalAiSam33dBodyRequestsByRequestIdStatusResponses] + +export type PutFalAiSam33dBodyRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam-3/3d-body/requests/{request_id}/cancel' +} + +export type PutFalAiSam33dBodyRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSam33dBodyRequestsByRequestIdCancelResponse = + PutFalAiSam33dBodyRequestsByRequestIdCancelResponses[keyof PutFalAiSam33dBodyRequestsByRequestIdCancelResponses] + +export type PostFalAiSam33dBodyData = { + body: SchemaSam33dBodyInput + path?: never + query?: never + url: '/fal-ai/sam-3/3d-body' +} + +export type PostFalAiSam33dBodyResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSam33dBodyResponse = + PostFalAiSam33dBodyResponses[keyof PostFalAiSam33dBodyResponses] + +export type GetFalAiSam33dBodyRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam-3/3d-body/requests/{request_id}' +} + +export type GetFalAiSam33dBodyRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSam33dBodyOutput +} + +export type GetFalAiSam33dBodyRequestsByRequestIdResponse = + GetFalAiSam33dBodyRequestsByRequestIdResponses[keyof GetFalAiSam33dBodyRequestsByRequestIdResponses] + +export type GetFalAiSam33dObjectsRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sam-3/3d-objects/requests/{request_id}/status' +} + +export type GetFalAiSam33dObjectsRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSam33dObjectsRequestsByRequestIdStatusResponse = + GetFalAiSam33dObjectsRequestsByRequestIdStatusResponses[keyof GetFalAiSam33dObjectsRequestsByRequestIdStatusResponses] + +export type PutFalAiSam33dObjectsRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam-3/3d-objects/requests/{request_id}/cancel' +} + +export type PutFalAiSam33dObjectsRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSam33dObjectsRequestsByRequestIdCancelResponse = + PutFalAiSam33dObjectsRequestsByRequestIdCancelResponses[keyof PutFalAiSam33dObjectsRequestsByRequestIdCancelResponses] + +export type PostFalAiSam33dObjectsData = { + body: SchemaSam33dObjectsInput + path?: never + query?: never + url: '/fal-ai/sam-3/3d-objects' +} + +export type PostFalAiSam33dObjectsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSam33dObjectsResponse = + PostFalAiSam33dObjectsResponses[keyof PostFalAiSam33dObjectsResponses] + +export type GetFalAiSam33dObjectsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam-3/3d-objects/requests/{request_id}' +} + +export type GetFalAiSam33dObjectsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSam33dObjectsOutput +} + +export type GetFalAiSam33dObjectsRequestsByRequestIdResponse = + GetFalAiSam33dObjectsRequestsByRequestIdResponses[keyof GetFalAiSam33dObjectsRequestsByRequestIdResponses] + +export type GetFalAiOmnipartRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/omnipart/requests/{request_id}/status' +} + +export type GetFalAiOmnipartRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiOmnipartRequestsByRequestIdStatusResponse = + GetFalAiOmnipartRequestsByRequestIdStatusResponses[keyof GetFalAiOmnipartRequestsByRequestIdStatusResponses] + +export type PutFalAiOmnipartRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/omnipart/requests/{request_id}/cancel' +} + +export type PutFalAiOmnipartRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiOmnipartRequestsByRequestIdCancelResponse = + PutFalAiOmnipartRequestsByRequestIdCancelResponses[keyof PutFalAiOmnipartRequestsByRequestIdCancelResponses] + +export type PostFalAiOmnipartData = { + body: SchemaOmnipartInput + path?: never + query?: never + url: '/fal-ai/omnipart' +} + +export type PostFalAiOmnipartResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiOmnipartResponse = + PostFalAiOmnipartResponses[keyof PostFalAiOmnipartResponses] + +export type GetFalAiOmnipartRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/omnipart/requests/{request_id}' +} + +export type GetFalAiOmnipartRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaOmnipartOutput +} + +export type GetFalAiOmnipartRequestsByRequestIdResponse = + GetFalAiOmnipartRequestsByRequestIdResponses[keyof GetFalAiOmnipartRequestsByRequestIdResponses] + +export type GetFalAiBytedanceSeed3dImageTo3dRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bytedance/seed3d/image-to-3d/requests/{request_id}/status' +} + +export type GetFalAiBytedanceSeed3dImageTo3dRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiBytedanceSeed3dImageTo3dRequestsByRequestIdStatusResponse = + GetFalAiBytedanceSeed3dImageTo3dRequestsByRequestIdStatusResponses[keyof GetFalAiBytedanceSeed3dImageTo3dRequestsByRequestIdStatusResponses] + +export type PutFalAiBytedanceSeed3dImageTo3dRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seed3d/image-to-3d/requests/{request_id}/cancel' +} + +export type PutFalAiBytedanceSeed3dImageTo3dRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiBytedanceSeed3dImageTo3dRequestsByRequestIdCancelResponse = + PutFalAiBytedanceSeed3dImageTo3dRequestsByRequestIdCancelResponses[keyof PutFalAiBytedanceSeed3dImageTo3dRequestsByRequestIdCancelResponses] + +export type PostFalAiBytedanceSeed3dImageTo3dData = { + body: SchemaBytedanceSeed3dImageTo3dInput + path?: never + query?: never + url: '/fal-ai/bytedance/seed3d/image-to-3d' +} + +export type PostFalAiBytedanceSeed3dImageTo3dResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBytedanceSeed3dImageTo3dResponse = + PostFalAiBytedanceSeed3dImageTo3dResponses[keyof PostFalAiBytedanceSeed3dImageTo3dResponses] + +export type GetFalAiBytedanceSeed3dImageTo3dRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seed3d/image-to-3d/requests/{request_id}' +} + +export type GetFalAiBytedanceSeed3dImageTo3dRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBytedanceSeed3dImageTo3dOutput +} + +export type GetFalAiBytedanceSeed3dImageTo3dRequestsByRequestIdResponse = + GetFalAiBytedanceSeed3dImageTo3dRequestsByRequestIdResponses[keyof GetFalAiBytedanceSeed3dImageTo3dRequestsByRequestIdResponses] + +export type GetFalAiMeshyV5MultiImageTo3dRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/meshy/v5/multi-image-to-3d/requests/{request_id}/status' +} + +export type GetFalAiMeshyV5MultiImageTo3dRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMeshyV5MultiImageTo3dRequestsByRequestIdStatusResponse = + GetFalAiMeshyV5MultiImageTo3dRequestsByRequestIdStatusResponses[keyof GetFalAiMeshyV5MultiImageTo3dRequestsByRequestIdStatusResponses] + +export type PutFalAiMeshyV5MultiImageTo3dRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/meshy/v5/multi-image-to-3d/requests/{request_id}/cancel' +} + +export type PutFalAiMeshyV5MultiImageTo3dRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMeshyV5MultiImageTo3dRequestsByRequestIdCancelResponse = + PutFalAiMeshyV5MultiImageTo3dRequestsByRequestIdCancelResponses[keyof PutFalAiMeshyV5MultiImageTo3dRequestsByRequestIdCancelResponses] + +export type PostFalAiMeshyV5MultiImageTo3dData = { + body: SchemaMeshyV5MultiImageTo3dInput + path?: never + query?: never + url: '/fal-ai/meshy/v5/multi-image-to-3d' +} + +export type PostFalAiMeshyV5MultiImageTo3dResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMeshyV5MultiImageTo3dResponse = + PostFalAiMeshyV5MultiImageTo3dResponses[keyof PostFalAiMeshyV5MultiImageTo3dResponses] + +export type GetFalAiMeshyV5MultiImageTo3dRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/meshy/v5/multi-image-to-3d/requests/{request_id}' +} + +export type GetFalAiMeshyV5MultiImageTo3dRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMeshyV5MultiImageTo3dOutput +} + +export type GetFalAiMeshyV5MultiImageTo3dRequestsByRequestIdResponse = + GetFalAiMeshyV5MultiImageTo3dRequestsByRequestIdResponses[keyof GetFalAiMeshyV5MultiImageTo3dRequestsByRequestIdResponses] + +export type GetFalAiMeshyV6PreviewImageTo3dRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/meshy/v6-preview/image-to-3d/requests/{request_id}/status' +} + +export type GetFalAiMeshyV6PreviewImageTo3dRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMeshyV6PreviewImageTo3dRequestsByRequestIdStatusResponse = + GetFalAiMeshyV6PreviewImageTo3dRequestsByRequestIdStatusResponses[keyof GetFalAiMeshyV6PreviewImageTo3dRequestsByRequestIdStatusResponses] + +export type PutFalAiMeshyV6PreviewImageTo3dRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/meshy/v6-preview/image-to-3d/requests/{request_id}/cancel' +} + +export type PutFalAiMeshyV6PreviewImageTo3dRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMeshyV6PreviewImageTo3dRequestsByRequestIdCancelResponse = + PutFalAiMeshyV6PreviewImageTo3dRequestsByRequestIdCancelResponses[keyof PutFalAiMeshyV6PreviewImageTo3dRequestsByRequestIdCancelResponses] + +export type PostFalAiMeshyV6PreviewImageTo3dData = { + body: SchemaMeshyV6PreviewImageTo3dInput + path?: never + query?: never + url: '/fal-ai/meshy/v6-preview/image-to-3d' +} + +export type PostFalAiMeshyV6PreviewImageTo3dResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMeshyV6PreviewImageTo3dResponse = + PostFalAiMeshyV6PreviewImageTo3dResponses[keyof PostFalAiMeshyV6PreviewImageTo3dResponses] + +export type GetFalAiMeshyV6PreviewImageTo3dRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/meshy/v6-preview/image-to-3d/requests/{request_id}' +} + +export type GetFalAiMeshyV6PreviewImageTo3dRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMeshyV6PreviewImageTo3dOutput +} + +export type GetFalAiMeshyV6PreviewImageTo3dRequestsByRequestIdResponse = + GetFalAiMeshyV6PreviewImageTo3dRequestsByRequestIdResponses[keyof GetFalAiMeshyV6PreviewImageTo3dRequestsByRequestIdResponses] + +export type GetFalAiHyper3dRodinV2RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hyper3d/rodin/v2/requests/{request_id}/status' +} + +export type GetFalAiHyper3dRodinV2RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHyper3dRodinV2RequestsByRequestIdStatusResponse = + GetFalAiHyper3dRodinV2RequestsByRequestIdStatusResponses[keyof GetFalAiHyper3dRodinV2RequestsByRequestIdStatusResponses] + +export type PutFalAiHyper3dRodinV2RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hyper3d/rodin/v2/requests/{request_id}/cancel' +} + +export type PutFalAiHyper3dRodinV2RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHyper3dRodinV2RequestsByRequestIdCancelResponse = + PutFalAiHyper3dRodinV2RequestsByRequestIdCancelResponses[keyof PutFalAiHyper3dRodinV2RequestsByRequestIdCancelResponses] + +export type PostFalAiHyper3dRodinV2Data = { + body: SchemaHyper3dRodinV2Input + path?: never + query?: never + url: '/fal-ai/hyper3d/rodin/v2' +} + +export type PostFalAiHyper3dRodinV2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHyper3dRodinV2Response = + PostFalAiHyper3dRodinV2Responses[keyof PostFalAiHyper3dRodinV2Responses] + +export type GetFalAiHyper3dRodinV2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hyper3d/rodin/v2/requests/{request_id}' +} + +export type GetFalAiHyper3dRodinV2RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHyper3dRodinV2Output +} + +export type GetFalAiHyper3dRodinV2RequestsByRequestIdResponse = + GetFalAiHyper3dRodinV2RequestsByRequestIdResponses[keyof GetFalAiHyper3dRodinV2RequestsByRequestIdResponses] + +export type GetFalAiPshumanRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pshuman/requests/{request_id}/status' +} + +export type GetFalAiPshumanRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPshumanRequestsByRequestIdStatusResponse = + GetFalAiPshumanRequestsByRequestIdStatusResponses[keyof GetFalAiPshumanRequestsByRequestIdStatusResponses] + +export type PutFalAiPshumanRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pshuman/requests/{request_id}/cancel' +} + +export type PutFalAiPshumanRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPshumanRequestsByRequestIdCancelResponse = + PutFalAiPshumanRequestsByRequestIdCancelResponses[keyof PutFalAiPshumanRequestsByRequestIdCancelResponses] + +export type PostFalAiPshumanData = { + body: SchemaPshumanInput + path?: never + query?: never + url: '/fal-ai/pshuman' +} + +export type PostFalAiPshumanResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPshumanResponse = + PostFalAiPshumanResponses[keyof PostFalAiPshumanResponses] + +export type GetFalAiPshumanRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pshuman/requests/{request_id}' +} + +export type GetFalAiPshumanRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPshumanOutput +} + +export type GetFalAiPshumanRequestsByRequestIdResponse = + GetFalAiPshumanRequestsByRequestIdResponses[keyof GetFalAiPshumanRequestsByRequestIdResponses] + +export type GetFalAiHunyuanWorldImageToWorldRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan_world/image-to-world/requests/{request_id}/status' +} + +export type GetFalAiHunyuanWorldImageToWorldRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiHunyuanWorldImageToWorldRequestsByRequestIdStatusResponse = + GetFalAiHunyuanWorldImageToWorldRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuanWorldImageToWorldRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuanWorldImageToWorldRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan_world/image-to-world/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuanWorldImageToWorldRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiHunyuanWorldImageToWorldRequestsByRequestIdCancelResponse = + PutFalAiHunyuanWorldImageToWorldRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuanWorldImageToWorldRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuanWorldImageToWorldData = { + body: SchemaHunyuanWorldImageToWorldInput + path?: never + query?: never + url: '/fal-ai/hunyuan_world/image-to-world' +} + +export type PostFalAiHunyuanWorldImageToWorldResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuanWorldImageToWorldResponse = + PostFalAiHunyuanWorldImageToWorldResponses[keyof PostFalAiHunyuanWorldImageToWorldResponses] + +export type GetFalAiHunyuanWorldImageToWorldRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan_world/image-to-world/requests/{request_id}' +} + +export type GetFalAiHunyuanWorldImageToWorldRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuanWorldImageToWorldOutput +} + +export type GetFalAiHunyuanWorldImageToWorldRequestsByRequestIdResponse = + GetFalAiHunyuanWorldImageToWorldRequestsByRequestIdResponses[keyof GetFalAiHunyuanWorldImageToWorldRequestsByRequestIdResponses] + +export type GetTripo3dTripoV25MultiviewTo3dRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/tripo3d/tripo/v2.5/multiview-to-3d/requests/{request_id}/status' +} + +export type GetTripo3dTripoV25MultiviewTo3dRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetTripo3dTripoV25MultiviewTo3dRequestsByRequestIdStatusResponse = + GetTripo3dTripoV25MultiviewTo3dRequestsByRequestIdStatusResponses[keyof GetTripo3dTripoV25MultiviewTo3dRequestsByRequestIdStatusResponses] + +export type PutTripo3dTripoV25MultiviewTo3dRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/tripo3d/tripo/v2.5/multiview-to-3d/requests/{request_id}/cancel' +} + +export type PutTripo3dTripoV25MultiviewTo3dRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutTripo3dTripoV25MultiviewTo3dRequestsByRequestIdCancelResponse = + PutTripo3dTripoV25MultiviewTo3dRequestsByRequestIdCancelResponses[keyof PutTripo3dTripoV25MultiviewTo3dRequestsByRequestIdCancelResponses] + +export type PostTripo3dTripoV25MultiviewTo3dData = { + body: SchemaTripoV25MultiviewTo3dInput + path?: never + query?: never + url: '/tripo3d/tripo/v2.5/multiview-to-3d' +} + +export type PostTripo3dTripoV25MultiviewTo3dResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostTripo3dTripoV25MultiviewTo3dResponse = + PostTripo3dTripoV25MultiviewTo3dResponses[keyof PostTripo3dTripoV25MultiviewTo3dResponses] + +export type GetTripo3dTripoV25MultiviewTo3dRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/tripo3d/tripo/v2.5/multiview-to-3d/requests/{request_id}' +} + +export type GetTripo3dTripoV25MultiviewTo3dRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaTripoV25MultiviewTo3dOutput +} + +export type GetTripo3dTripoV25MultiviewTo3dRequestsByRequestIdResponse = + GetTripo3dTripoV25MultiviewTo3dRequestsByRequestIdResponses[keyof GetTripo3dTripoV25MultiviewTo3dRequestsByRequestIdResponses] + +export type GetFalAiHunyuan3dV21RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan3d-v21/requests/{request_id}/status' +} + +export type GetFalAiHunyuan3dV21RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHunyuan3dV21RequestsByRequestIdStatusResponse = + GetFalAiHunyuan3dV21RequestsByRequestIdStatusResponses[keyof GetFalAiHunyuan3dV21RequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuan3dV21RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan3d-v21/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuan3dV21RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHunyuan3dV21RequestsByRequestIdCancelResponse = + PutFalAiHunyuan3dV21RequestsByRequestIdCancelResponses[keyof PutFalAiHunyuan3dV21RequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuan3dV21Data = { + body: SchemaHunyuan3dV21Input + path?: never + query?: never + url: '/fal-ai/hunyuan3d-v21' +} + +export type PostFalAiHunyuan3dV21Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuan3dV21Response = + PostFalAiHunyuan3dV21Responses[keyof PostFalAiHunyuan3dV21Responses] + +export type GetFalAiHunyuan3dV21RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan3d-v21/requests/{request_id}' +} + +export type GetFalAiHunyuan3dV21RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuan3dV21Output +} + +export type GetFalAiHunyuan3dV21RequestsByRequestIdResponse = + GetFalAiHunyuan3dV21RequestsByRequestIdResponses[keyof GetFalAiHunyuan3dV21RequestsByRequestIdResponses] + +export type GetFalAiTrellisMultiRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/trellis/multi/requests/{request_id}/status' +} + +export type GetFalAiTrellisMultiRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiTrellisMultiRequestsByRequestIdStatusResponse = + GetFalAiTrellisMultiRequestsByRequestIdStatusResponses[keyof GetFalAiTrellisMultiRequestsByRequestIdStatusResponses] + +export type PutFalAiTrellisMultiRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/trellis/multi/requests/{request_id}/cancel' +} + +export type PutFalAiTrellisMultiRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiTrellisMultiRequestsByRequestIdCancelResponse = + PutFalAiTrellisMultiRequestsByRequestIdCancelResponses[keyof PutFalAiTrellisMultiRequestsByRequestIdCancelResponses] + +export type PostFalAiTrellisMultiData = { + body: SchemaTrellisMultiInput + path?: never + query?: never + url: '/fal-ai/trellis/multi' +} + +export type PostFalAiTrellisMultiResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiTrellisMultiResponse = + PostFalAiTrellisMultiResponses[keyof PostFalAiTrellisMultiResponses] + +export type GetFalAiTrellisMultiRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/trellis/multi/requests/{request_id}' +} + +export type GetFalAiTrellisMultiRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaTrellisMultiOutput +} + +export type GetFalAiTrellisMultiRequestsByRequestIdResponse = + GetFalAiTrellisMultiRequestsByRequestIdResponses[keyof GetFalAiTrellisMultiRequestsByRequestIdResponses] + +export type GetTripo3dTripoV25ImageTo3dRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/tripo3d/tripo/v2.5/image-to-3d/requests/{request_id}/status' +} + +export type GetTripo3dTripoV25ImageTo3dRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetTripo3dTripoV25ImageTo3dRequestsByRequestIdStatusResponse = + GetTripo3dTripoV25ImageTo3dRequestsByRequestIdStatusResponses[keyof GetTripo3dTripoV25ImageTo3dRequestsByRequestIdStatusResponses] + +export type PutTripo3dTripoV25ImageTo3dRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/tripo3d/tripo/v2.5/image-to-3d/requests/{request_id}/cancel' +} + +export type PutTripo3dTripoV25ImageTo3dRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutTripo3dTripoV25ImageTo3dRequestsByRequestIdCancelResponse = + PutTripo3dTripoV25ImageTo3dRequestsByRequestIdCancelResponses[keyof PutTripo3dTripoV25ImageTo3dRequestsByRequestIdCancelResponses] + +export type PostTripo3dTripoV25ImageTo3dData = { + body: SchemaTripoV25ImageTo3dInput + path?: never + query?: never + url: '/tripo3d/tripo/v2.5/image-to-3d' +} + +export type PostTripo3dTripoV25ImageTo3dResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostTripo3dTripoV25ImageTo3dResponse = + PostTripo3dTripoV25ImageTo3dResponses[keyof PostTripo3dTripoV25ImageTo3dResponses] + +export type GetTripo3dTripoV25ImageTo3dRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/tripo3d/tripo/v2.5/image-to-3d/requests/{request_id}' +} + +export type GetTripo3dTripoV25ImageTo3dRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaTripoV25ImageTo3dOutput +} + +export type GetTripo3dTripoV25ImageTo3dRequestsByRequestIdResponse = + GetTripo3dTripoV25ImageTo3dRequestsByRequestIdResponses[keyof GetTripo3dTripoV25ImageTo3dRequestsByRequestIdResponses] + +export type GetFalAiHunyuan3dV2MultiViewTurboRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan3d/v2/multi-view/turbo/requests/{request_id}/status' +} + +export type GetFalAiHunyuan3dV2MultiViewTurboRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiHunyuan3dV2MultiViewTurboRequestsByRequestIdStatusResponse = + GetFalAiHunyuan3dV2MultiViewTurboRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuan3dV2MultiViewTurboRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuan3dV2MultiViewTurboRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan3d/v2/multi-view/turbo/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuan3dV2MultiViewTurboRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiHunyuan3dV2MultiViewTurboRequestsByRequestIdCancelResponse = + PutFalAiHunyuan3dV2MultiViewTurboRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuan3dV2MultiViewTurboRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuan3dV2MultiViewTurboData = { + body: SchemaHunyuan3dV2MultiViewTurboInput + path?: never + query?: never + url: '/fal-ai/hunyuan3d/v2/multi-view/turbo' +} + +export type PostFalAiHunyuan3dV2MultiViewTurboResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuan3dV2MultiViewTurboResponse = + PostFalAiHunyuan3dV2MultiViewTurboResponses[keyof PostFalAiHunyuan3dV2MultiViewTurboResponses] + +export type GetFalAiHunyuan3dV2MultiViewTurboRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan3d/v2/multi-view/turbo/requests/{request_id}' +} + +export type GetFalAiHunyuan3dV2MultiViewTurboRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuan3dV2MultiViewTurboOutput +} + +export type GetFalAiHunyuan3dV2MultiViewTurboRequestsByRequestIdResponse = + GetFalAiHunyuan3dV2MultiViewTurboRequestsByRequestIdResponses[keyof GetFalAiHunyuan3dV2MultiViewTurboRequestsByRequestIdResponses] + +export type GetFalAiHunyuan3dV2RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan3d/v2/requests/{request_id}/status' +} + +export type GetFalAiHunyuan3dV2RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHunyuan3dV2RequestsByRequestIdStatusResponse = + GetFalAiHunyuan3dV2RequestsByRequestIdStatusResponses[keyof GetFalAiHunyuan3dV2RequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuan3dV2RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan3d/v2/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuan3dV2RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHunyuan3dV2RequestsByRequestIdCancelResponse = + PutFalAiHunyuan3dV2RequestsByRequestIdCancelResponses[keyof PutFalAiHunyuan3dV2RequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuan3dV2Data = { + body: SchemaHunyuan3dV2Input + path?: never + query?: never + url: '/fal-ai/hunyuan3d/v2' +} + +export type PostFalAiHunyuan3dV2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuan3dV2Response = + PostFalAiHunyuan3dV2Responses[keyof PostFalAiHunyuan3dV2Responses] + +export type GetFalAiHunyuan3dV2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan3d/v2/requests/{request_id}' +} + +export type GetFalAiHunyuan3dV2RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuan3dV2Output +} + +export type GetFalAiHunyuan3dV2RequestsByRequestIdResponse = + GetFalAiHunyuan3dV2RequestsByRequestIdResponses[keyof GetFalAiHunyuan3dV2RequestsByRequestIdResponses] + +export type GetFalAiHunyuan3dV2MiniRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan3d/v2/mini/requests/{request_id}/status' +} + +export type GetFalAiHunyuan3dV2MiniRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHunyuan3dV2MiniRequestsByRequestIdStatusResponse = + GetFalAiHunyuan3dV2MiniRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuan3dV2MiniRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuan3dV2MiniRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan3d/v2/mini/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuan3dV2MiniRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHunyuan3dV2MiniRequestsByRequestIdCancelResponse = + PutFalAiHunyuan3dV2MiniRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuan3dV2MiniRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuan3dV2MiniData = { + body: SchemaHunyuan3dV2MiniInput + path?: never + query?: never + url: '/fal-ai/hunyuan3d/v2/mini' +} + +export type PostFalAiHunyuan3dV2MiniResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuan3dV2MiniResponse = + PostFalAiHunyuan3dV2MiniResponses[keyof PostFalAiHunyuan3dV2MiniResponses] + +export type GetFalAiHunyuan3dV2MiniRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan3d/v2/mini/requests/{request_id}' +} + +export type GetFalAiHunyuan3dV2MiniRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuan3dV2MiniOutput +} + +export type GetFalAiHunyuan3dV2MiniRequestsByRequestIdResponse = + GetFalAiHunyuan3dV2MiniRequestsByRequestIdResponses[keyof GetFalAiHunyuan3dV2MiniRequestsByRequestIdResponses] + +export type GetFalAiHunyuan3dV2MultiViewRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan3d/v2/multi-view/requests/{request_id}/status' +} + +export type GetFalAiHunyuan3dV2MultiViewRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHunyuan3dV2MultiViewRequestsByRequestIdStatusResponse = + GetFalAiHunyuan3dV2MultiViewRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuan3dV2MultiViewRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuan3dV2MultiViewRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan3d/v2/multi-view/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuan3dV2MultiViewRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHunyuan3dV2MultiViewRequestsByRequestIdCancelResponse = + PutFalAiHunyuan3dV2MultiViewRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuan3dV2MultiViewRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuan3dV2MultiViewData = { + body: SchemaHunyuan3dV2MultiViewInput + path?: never + query?: never + url: '/fal-ai/hunyuan3d/v2/multi-view' +} + +export type PostFalAiHunyuan3dV2MultiViewResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuan3dV2MultiViewResponse = + PostFalAiHunyuan3dV2MultiViewResponses[keyof PostFalAiHunyuan3dV2MultiViewResponses] + +export type GetFalAiHunyuan3dV2MultiViewRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan3d/v2/multi-view/requests/{request_id}' +} + +export type GetFalAiHunyuan3dV2MultiViewRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuan3dV2MultiViewOutput +} + +export type GetFalAiHunyuan3dV2MultiViewRequestsByRequestIdResponse = + GetFalAiHunyuan3dV2MultiViewRequestsByRequestIdResponses[keyof GetFalAiHunyuan3dV2MultiViewRequestsByRequestIdResponses] + +export type GetFalAiHunyuan3dV2TurboRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan3d/v2/turbo/requests/{request_id}/status' +} + +export type GetFalAiHunyuan3dV2TurboRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHunyuan3dV2TurboRequestsByRequestIdStatusResponse = + GetFalAiHunyuan3dV2TurboRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuan3dV2TurboRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuan3dV2TurboRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan3d/v2/turbo/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuan3dV2TurboRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHunyuan3dV2TurboRequestsByRequestIdCancelResponse = + PutFalAiHunyuan3dV2TurboRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuan3dV2TurboRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuan3dV2TurboData = { + body: SchemaHunyuan3dV2TurboInput + path?: never + query?: never + url: '/fal-ai/hunyuan3d/v2/turbo' +} + +export type PostFalAiHunyuan3dV2TurboResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuan3dV2TurboResponse = + PostFalAiHunyuan3dV2TurboResponses[keyof PostFalAiHunyuan3dV2TurboResponses] + +export type GetFalAiHunyuan3dV2TurboRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan3d/v2/turbo/requests/{request_id}' +} + +export type GetFalAiHunyuan3dV2TurboRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuan3dV2TurboOutput +} + +export type GetFalAiHunyuan3dV2TurboRequestsByRequestIdResponse = + GetFalAiHunyuan3dV2TurboRequestsByRequestIdResponses[keyof GetFalAiHunyuan3dV2TurboRequestsByRequestIdResponses] + +export type GetFalAiHunyuan3dV2MiniTurboRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan3d/v2/mini/turbo/requests/{request_id}/status' +} + +export type GetFalAiHunyuan3dV2MiniTurboRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHunyuan3dV2MiniTurboRequestsByRequestIdStatusResponse = + GetFalAiHunyuan3dV2MiniTurboRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuan3dV2MiniTurboRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuan3dV2MiniTurboRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan3d/v2/mini/turbo/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuan3dV2MiniTurboRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHunyuan3dV2MiniTurboRequestsByRequestIdCancelResponse = + PutFalAiHunyuan3dV2MiniTurboRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuan3dV2MiniTurboRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuan3dV2MiniTurboData = { + body: SchemaHunyuan3dV2MiniTurboInput + path?: never + query?: never + url: '/fal-ai/hunyuan3d/v2/mini/turbo' +} + +export type PostFalAiHunyuan3dV2MiniTurboResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuan3dV2MiniTurboResponse = + PostFalAiHunyuan3dV2MiniTurboResponses[keyof PostFalAiHunyuan3dV2MiniTurboResponses] + +export type GetFalAiHunyuan3dV2MiniTurboRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan3d/v2/mini/turbo/requests/{request_id}' +} + +export type GetFalAiHunyuan3dV2MiniTurboRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuan3dV2MiniTurboOutput +} + +export type GetFalAiHunyuan3dV2MiniTurboRequestsByRequestIdResponse = + GetFalAiHunyuan3dV2MiniTurboRequestsByRequestIdResponses[keyof GetFalAiHunyuan3dV2MiniTurboRequestsByRequestIdResponses] + +export type GetFalAiHyper3dRodinRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hyper3d/rodin/requests/{request_id}/status' +} + +export type GetFalAiHyper3dRodinRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHyper3dRodinRequestsByRequestIdStatusResponse = + GetFalAiHyper3dRodinRequestsByRequestIdStatusResponses[keyof GetFalAiHyper3dRodinRequestsByRequestIdStatusResponses] + +export type PutFalAiHyper3dRodinRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hyper3d/rodin/requests/{request_id}/cancel' +} + +export type PutFalAiHyper3dRodinRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHyper3dRodinRequestsByRequestIdCancelResponse = + PutFalAiHyper3dRodinRequestsByRequestIdCancelResponses[keyof PutFalAiHyper3dRodinRequestsByRequestIdCancelResponses] + +export type PostFalAiHyper3dRodinData = { + body: SchemaHyper3dRodinInput + path?: never + query?: never + url: '/fal-ai/hyper3d/rodin' +} + +export type PostFalAiHyper3dRodinResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHyper3dRodinResponse = + PostFalAiHyper3dRodinResponses[keyof PostFalAiHyper3dRodinResponses] + +export type GetFalAiHyper3dRodinRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hyper3d/rodin/requests/{request_id}' +} + +export type GetFalAiHyper3dRodinRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHyper3dRodinOutput +} + +export type GetFalAiHyper3dRodinRequestsByRequestIdResponse = + GetFalAiHyper3dRodinRequestsByRequestIdResponses[keyof GetFalAiHyper3dRodinRequestsByRequestIdResponses] + +export type GetFalAiTrellisRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/trellis/requests/{request_id}/status' +} + +export type GetFalAiTrellisRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiTrellisRequestsByRequestIdStatusResponse = + GetFalAiTrellisRequestsByRequestIdStatusResponses[keyof GetFalAiTrellisRequestsByRequestIdStatusResponses] + +export type PutFalAiTrellisRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/trellis/requests/{request_id}/cancel' +} + +export type PutFalAiTrellisRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiTrellisRequestsByRequestIdCancelResponse = + PutFalAiTrellisRequestsByRequestIdCancelResponses[keyof PutFalAiTrellisRequestsByRequestIdCancelResponses] + +export type PostFalAiTrellisData = { + body: SchemaTrellisInput + path?: never + query?: never + url: '/fal-ai/trellis' +} + +export type PostFalAiTrellisResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiTrellisResponse = + PostFalAiTrellisResponses[keyof PostFalAiTrellisResponses] + +export type GetFalAiTrellisRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/trellis/requests/{request_id}' +} + +export type GetFalAiTrellisRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaTrellisOutput +} + +export type GetFalAiTrellisRequestsByRequestIdResponse = + GetFalAiTrellisRequestsByRequestIdResponses[keyof GetFalAiTrellisRequestsByRequestIdResponses] + +export type GetFalAiTriposrRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/triposr/requests/{request_id}/status' +} + +export type GetFalAiTriposrRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiTriposrRequestsByRequestIdStatusResponse = + GetFalAiTriposrRequestsByRequestIdStatusResponses[keyof GetFalAiTriposrRequestsByRequestIdStatusResponses] + +export type PutFalAiTriposrRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/triposr/requests/{request_id}/cancel' +} + +export type PutFalAiTriposrRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiTriposrRequestsByRequestIdCancelResponse = + PutFalAiTriposrRequestsByRequestIdCancelResponses[keyof PutFalAiTriposrRequestsByRequestIdCancelResponses] + +export type PostFalAiTriposrData = { + body: SchemaTriposrInput + path?: never + query?: never + url: '/fal-ai/triposr' +} + +export type PostFalAiTriposrResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiTriposrResponse = + PostFalAiTriposrResponses[keyof PostFalAiTriposrResponses] + +export type GetFalAiTriposrRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/triposr/requests/{request_id}' +} + +export type GetFalAiTriposrRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaTriposrOutput +} + +export type GetFalAiTriposrRequestsByRequestIdResponse = + GetFalAiTriposrRequestsByRequestIdResponses[keyof GetFalAiTriposrRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/image-to-3d/zod.gen.ts b/packages/typescript/ai-fal/src/generated/image-to-3d/zod.gen.ts new file mode 100644 index 00000000..3eeeb0c0 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/image-to-3d/zod.gen.ts @@ -0,0 +1,3887 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * File + */ +export const zSchemaFile = z.object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), +}) + +/** + * ObjectOutput + */ +export const zSchemaTriposrOutput = z.object({ + remeshing_dir: z.optional(zSchemaFile), + model_mesh: zSchemaFile, + timings: z.record(z.string(), z.number()).register(z.globalRegistry, { + description: 'Inference timings.', + }), +}) + +/** + * TripoSRInput + */ +export const zSchemaTriposrInput = z.object({ + mc_resolution: z + .optional( + z.int().gte(32).lte(1024).register(z.globalRegistry, { + description: + 'Resolution of the marching cubes. Above 512 is not recommended.', + }), + ) + .default(256), + do_remove_background: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to remove the background from the input image.', + }), + ) + .default(true), + foreground_ratio: z + .optional( + z.number().gte(0.5).lte(1).register(z.globalRegistry, { + description: 'Ratio of the foreground image to the original image.', + }), + ) + .default(0.9), + output_format: z.optional( + z.enum(['glb', 'obj']).register(z.globalRegistry, { + description: 'Output format for the 3D model.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Path for the image file to be processed.', + }), +}) + +/** + * ObjectOutput + */ +export const zSchemaTrellisOutput = z.object({ + model_mesh: zSchemaFile, + timings: z.record(z.string(), z.number()).register(z.globalRegistry, { + description: 'Processing timings', + }), +}) + +/** + * InputModel + */ +export const zSchemaTrellisInput = z.object({ + slat_sampling_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Sampling steps for structured latent generation', + }), + ) + .default(12), + ss_sampling_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Sampling steps for sparse structure generation', + }), + ) + .default(12), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the input image to convert to 3D', + }), + slat_guidance_strength: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'Guidance strength for structured latent generation', + }), + ) + .default(3), + ss_guidance_strength: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'Guidance strength for sparse structure generation', + }), + ) + .default(7.5), + mesh_simplify: z + .optional( + z.number().gte(0.9).lte(0.98).register(z.globalRegistry, { + description: 'Mesh simplification factor', + }), + ) + .default(0.95), + seed: z.optional(z.union([z.int(), z.unknown()])), + texture_size: z.optional( + z + .union([z.literal(512), z.literal(1024), z.literal(2048)]) + .register(z.globalRegistry, { + description: 'Texture resolution', + }), + ), +}) + +/** + * Image + * + * Represents an image file. + */ +export const zSchemaImage = z + .object({ + height: z.optional( + z.int().register(z.globalRegistry, { + description: 'The height of the image in pixels.', + }), + ), + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + width: z.optional( + z.int().register(z.globalRegistry, { + description: 'The width of the image in pixels.', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Represents an image file.', + }) + +/** + * ObjectOutput + */ +export const zSchemaHyper3dRodinOutput = z.object({ + model_mesh: zSchemaFile, + seed: z.int().register(z.globalRegistry, { + description: 'Seed value used for generation.', + }), + textures: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Generated textures for the 3D object.', + }), +}) + +/** + * Rodin3DInput + */ +export const zSchemaHyper3dRodinInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'A textual prompt to guide model generation. Required for Text-to-3D mode. Optional for Image-to-3D mode.', + }), + ) + .default(''), + condition_mode: z.optional( + z.enum(['fuse', 'concat']).register(z.globalRegistry, { + description: + 'For fuse mode, One or more images are required.It will generate a model by extracting and fusing features of objects from multiple images.For concat mode, need to upload multiple multi-view images of the same object and generate the model. (You can upload multi-view images in any order, regardless of the order of view.)', + }), + ), + bbox_condition: z.optional( + z.array(z.int()).register(z.globalRegistry, { + description: + 'An array that specifies the dimensions and scaling factor of the bounding box. Typically, this array contains 3 elements, Length(X-axis), Width(Y-axis) and Height(Z-axis).', + }), + ), + tier: z.optional( + z.enum(['Regular', 'Sketch']).register(z.globalRegistry, { + description: + 'Tier of generation. For Rodin Sketch, set to Sketch. For Rodin Regular, set to Regular.', + }), + ), + quality: z.optional( + z.enum(['high', 'medium', 'low', 'extra-low']).register(z.globalRegistry, { + description: + 'Generation quality. Possible values: high, medium, low, extra-low. Default is medium.', + }), + ), + TAPose: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'When generating the human-like model, this parameter control the generation result to T/A Pose.', + }), + ) + .default(false), + input_image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'URL of images to use while generating the 3D model. Required for Image-to-3D mode. Optional for Text-to-3D mode.', + }), + ), + geometry_file_format: z.optional( + z.enum(['glb', 'usdz', 'fbx', 'obj', 'stl']).register(z.globalRegistry, { + description: + 'Format of the geometry file. Possible values: glb, usdz, fbx, obj, stl. Default is glb.', + }), + ), + use_hyper: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to export the model using hyper mode. Default is false.', + }), + ) + .default(false), + addons: z.optional( + z.enum(['HighPack']).register(z.globalRegistry, { + description: + 'Generation add-on features. Default is []. Possible values are HighPack. The HighPack option will provide 4K resolution textures instead of the default 1K, as well as models with high-poly. It will cost triple the billable units.', + }), + ), + seed: z.optional( + z.int().gte(0).lte(65535).register(z.globalRegistry, { + description: + 'Seed value for randomization, ranging from 0 to 65535. Optional.', + }), + ), + material: z.optional( + z.enum(['PBR', 'Shaded']).register(z.globalRegistry, { + description: + 'Material type. Possible values: PBR, Shaded. Default is PBR.', + }), + ), +}) + +/** + * ObjectOutput + */ +export const zSchemaHunyuan3dV2MiniTurboOutput = z.object({ + model_mesh: zSchemaFile, + seed: z.int().register(z.globalRegistry, { + description: 'Seed value used for generation.', + }), +}) + +/** + * Hunyuan3DInput + */ +export const zSchemaHunyuan3dV2MiniTurboInput = z.object({ + input_image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use while generating the 3D model.', + }), + octree_resolution: z + .optional( + z.int().gte(1).lte(1024).register(z.globalRegistry, { + description: 'Octree resolution for the model.', + }), + ) + .default(256), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'Guidance scale for the model.', + }), + ) + .default(7.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps to perform.', + }), + ) + .default(50), + textured_mesh: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set true, textured mesh will be generated and the price charged would be 3 times that of white mesh.', + }), + ) + .default(false), +}) + +/** + * ObjectOutput + */ +export const zSchemaHunyuan3dV2TurboOutput = z.object({ + model_mesh: zSchemaFile, + seed: z.int().register(z.globalRegistry, { + description: 'Seed value used for generation.', + }), +}) + +/** + * Hunyuan3DInput + */ +export const zSchemaHunyuan3dV2TurboInput = z.object({ + input_image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use while generating the 3D model.', + }), + octree_resolution: z + .optional( + z.int().gte(1).lte(1024).register(z.globalRegistry, { + description: 'Octree resolution for the model.', + }), + ) + .default(256), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'Guidance scale for the model.', + }), + ) + .default(7.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps to perform.', + }), + ) + .default(50), + textured_mesh: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set true, textured mesh will be generated and the price charged would be 3 times that of white mesh.', + }), + ) + .default(false), +}) + +/** + * MultiViewObjectOutput + */ +export const zSchemaHunyuan3dV2MultiViewOutput = z.object({ + model_mesh: zSchemaFile, + seed: z.int().register(z.globalRegistry, { + description: 'Seed value used for generation.', + }), +}) + +/** + * Hunyuan3DInputMultiView + */ +export const zSchemaHunyuan3dV2MultiViewInput = z.object({ + front_image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use while generating the 3D model.', + }), + octree_resolution: z + .optional( + z.int().gte(1).lte(1024).register(z.globalRegistry, { + description: 'Octree resolution for the model.', + }), + ) + .default(256), + back_image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use while generating the 3D model.', + }), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'Guidance scale for the model.', + }), + ) + .default(7.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps to perform.', + }), + ) + .default(50), + textured_mesh: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set true, textured mesh will be generated and the price charged would be 3 times that of white mesh.', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + left_image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use while generating the 3D model.', + }), +}) + +/** + * ObjectOutput + */ +export const zSchemaHunyuan3dV2MiniOutput = z.object({ + model_mesh: zSchemaFile, + seed: z.int().register(z.globalRegistry, { + description: 'Seed value used for generation.', + }), +}) + +/** + * Hunyuan3DInput + */ +export const zSchemaHunyuan3dV2MiniInput = z.object({ + input_image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use while generating the 3D model.', + }), + octree_resolution: z + .optional( + z.int().gte(1).lte(1024).register(z.globalRegistry, { + description: 'Octree resolution for the model.', + }), + ) + .default(256), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'Guidance scale for the model.', + }), + ) + .default(7.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps to perform.', + }), + ) + .default(50), + textured_mesh: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set true, textured mesh will be generated and the price charged would be 3 times that of white mesh.', + }), + ) + .default(false), +}) + +/** + * ObjectOutput + */ +export const zSchemaHunyuan3dV2Output = z.object({ + model_mesh: zSchemaFile, + seed: z.int().register(z.globalRegistry, { + description: 'Seed value used for generation.', + }), +}) + +/** + * Hunyuan3DInput + */ +export const zSchemaHunyuan3dV2Input = z.object({ + input_image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use while generating the 3D model.', + }), + octree_resolution: z + .optional( + z.int().gte(1).lte(1024).register(z.globalRegistry, { + description: 'Octree resolution for the model.', + }), + ) + .default(256), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'Guidance scale for the model.', + }), + ) + .default(7.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps to perform.', + }), + ) + .default(50), + textured_mesh: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set true, textured mesh will be generated and the price charged would be 3 times that of white mesh.', + }), + ) + .default(false), +}) + +/** + * MultiViewObjectOutput + */ +export const zSchemaHunyuan3dV2MultiViewTurboOutput = z.object({ + model_mesh: zSchemaFile, + seed: z.int().register(z.globalRegistry, { + description: 'Seed value used for generation.', + }), +}) + +/** + * Hunyuan3DInputMultiView + */ +export const zSchemaHunyuan3dV2MultiViewTurboInput = z.object({ + front_image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use while generating the 3D model.', + }), + octree_resolution: z + .optional( + z.int().gte(1).lte(1024).register(z.globalRegistry, { + description: 'Octree resolution for the model.', + }), + ) + .default(256), + back_image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use while generating the 3D model.', + }), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'Guidance scale for the model.', + }), + ) + .default(7.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps to perform.', + }), + ) + .default(50), + textured_mesh: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set true, textured mesh will be generated and the price charged would be 3 times that of white mesh.', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + left_image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use while generating the 3D model.', + }), +}) + +/** + * Tripo3dOutput + */ +export const zSchemaTripoV25ImageTo3dOutput = z.object({ + base_model: z.optional(zSchemaFile), + task_id: z.string().register(z.globalRegistry, { + description: 'The task id of the 3D model generation.', + }), + rendered_image: z.optional(zSchemaFile), + model_mesh: z.optional(zSchemaFile), + pbr_model: z.optional(zSchemaFile), +}) + +/** + * ImageTo3dInput + */ +export const zSchemaTripoV25ImageTo3dInput = z.object({ + face_limit: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Limits the number of faces on the output model. If this option is not set, the face limit will be adaptively determined.', + }), + ), + style: z.optional( + z + .enum([ + 'person:person2cartoon', + 'object:clay', + 'object:steampunk', + 'animal:venom', + 'object:barbie', + 'object:christmas', + 'gold', + 'ancient_bronze', + ]) + .register(z.globalRegistry, { + description: + '[DEPRECATED] Defines the artistic style or transformation to be applied to the 3D model, altering its appearance according to preset options (extra $0.05 per generation). Omit this option to keep the original style and apperance.', + }), + ), + pbr: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'A boolean option to enable pbr. The default value is True, set False to get a model without pbr. If this option is set to True, texture will be ignored and used as True.', + }), + ) + .default(false), + texture_alignment: z.optional( + z.enum(['original_image', 'geometry']).register(z.globalRegistry, { + description: + 'Determines the prioritization of texture alignment in the 3D model. The default value is original_image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use for model generation.', + }), + texture: z.optional( + z.enum(['no', 'standard', 'HD']).register(z.globalRegistry, { + description: + "An option to enable texturing. Default is 'standard', set 'no' to get a model without any textures, and set 'HD' to get a model with hd quality textures.", + }), + ), + auto_size: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Automatically scale the model to real-world dimensions, with the unit in meters. The default value is False.', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'This is the random seed for model generation. The seed controls the geometry generation process, ensuring identical models when the same seed is used. This parameter is an integer and is randomly chosen if not set.', + }), + ), + quad: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Set True to enable quad mesh output (extra $0.05 per generation). If quad=True and face_limit is not set, the default face_limit will be 10000. Note: Enabling this option will force the output to be an FBX model.', + }), + ) + .default(false), + orientation: z.optional( + z.enum(['default', 'align_image']).register(z.globalRegistry, { + description: + 'Set orientation=align_image to automatically rotate the model to align the original image. The default value is default.', + }), + ), + texture_seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'This is the random seed for texture generation. Using the same seed will produce identical textures. This parameter is an integer and is randomly chosen if not set. If you want a model with different textures, please use same seed and different texture_seed.', + }), + ), +}) + +/** + * ObjectOutput + */ +export const zSchemaTrellisMultiOutput = z.object({ + model_mesh: zSchemaFile, + timings: z.record(z.string(), z.number()).register(z.globalRegistry, { + description: 'Processing timings', + }), +}) + +/** + * MultiImageInputModel + */ +export const zSchemaTrellisMultiInput = z.object({ + multiimage_algo: z.optional( + z.enum(['stochastic', 'multidiffusion']).register(z.globalRegistry, { + description: 'Algorithm for multi-image generation', + }), + ), + slat_sampling_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Sampling steps for structured latent generation', + }), + ) + .default(12), + ss_sampling_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Sampling steps for sparse structure generation', + }), + ) + .default(12), + ss_guidance_strength: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'Guidance strength for sparse structure generation', + }), + ) + .default(7.5), + slat_guidance_strength: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'Guidance strength for structured latent generation', + }), + ) + .default(3), + mesh_simplify: z + .optional( + z.number().gte(0.9).lte(0.98).register(z.globalRegistry, { + description: 'Mesh simplification factor', + }), + ) + .default(0.95), + seed: z.optional(z.union([z.int(), z.unknown()])), + texture_size: z.optional( + z + .union([z.literal(512), z.literal(1024), z.literal(2048)]) + .register(z.globalRegistry, { + description: 'Texture resolution', + }), + ), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'List of URLs of input images to convert to 3D', + }), +}) + +/** + * ObjectOutput + */ +export const zSchemaHunyuan3dV21Output = z.object({ + model_glb_pbr: z.optional(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed value used for generation.', + }), + model_mesh: zSchemaFile, + model_glb: zSchemaFile, +}) + +/** + * Hunyuan3DInput + */ +export const zSchemaHunyuan3dV21Input = z.object({ + input_image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use while generating the 3D model.', + }), + octree_resolution: z + .optional( + z.int().gte(1).lte(1024).register(z.globalRegistry, { + description: 'Octree resolution for the model.', + }), + ) + .default(256), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'Guidance scale for the model.', + }), + ) + .default(7.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps to perform.', + }), + ) + .default(50), + textured_mesh: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set true, textured mesh will be generated and the price charged would be 3 times that of white mesh.', + }), + ) + .default(false), +}) + +/** + * Tripo3dOutput + */ +export const zSchemaTripoV25MultiviewTo3dOutput = z.object({ + base_model: z.optional(zSchemaFile), + task_id: z.string().register(z.globalRegistry, { + description: 'The task id of the 3D model generation.', + }), + rendered_image: z.optional(zSchemaFile), + model_mesh: z.optional(zSchemaFile), + pbr_model: z.optional(zSchemaFile), +}) + +/** + * MultiviewTo3dInput + */ +export const zSchemaTripoV25MultiviewTo3dInput = z.object({ + face_limit: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Limits the number of faces on the output model. If this option is not set, the face limit will be adaptively determined.', + }), + ), + right_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'Right view image of the object.', + }), + ), + style: z.optional( + z + .enum([ + 'person:person2cartoon', + 'object:clay', + 'object:steampunk', + 'animal:venom', + 'object:barbie', + 'object:christmas', + 'gold', + 'ancient_bronze', + ]) + .register(z.globalRegistry, { + description: + '[DEPRECATED] Defines the artistic style or transformation to be applied to the 3D model, altering its appearance according to preset options (extra $0.05 per generation). Omit this option to keep the original style and apperance.', + }), + ), + quad: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Set True to enable quad mesh output (extra $0.05 per generation). If quad=True and face_limit is not set, the default face_limit will be 10000. Note: Enabling this option will force the output to be an FBX model.', + }), + ) + .default(false), + front_image_url: z.string().register(z.globalRegistry, { + description: 'Front view image of the object.', + }), + texture_seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'This is the random seed for texture generation. Using the same seed will produce identical textures. This parameter is an integer and is randomly chosen if not set. If you want a model with different textures, please use same seed and different texture_seed.', + }), + ), + back_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'Back view image of the object.', + }), + ), + pbr: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'A boolean option to enable pbr. The default value is True, set False to get a model without pbr. If this option is set to True, texture will be ignored and used as True.', + }), + ) + .default(false), + texture_alignment: z.optional( + z.enum(['original_image', 'geometry']).register(z.globalRegistry, { + description: + 'Determines the prioritization of texture alignment in the 3D model. The default value is original_image.', + }), + ), + texture: z.optional( + z.enum(['no', 'standard', 'HD']).register(z.globalRegistry, { + description: + "An option to enable texturing. Default is 'standard', set 'no' to get a model without any textures, and set 'HD' to get a model with hd quality textures.", + }), + ), + auto_size: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Automatically scale the model to real-world dimensions, with the unit in meters. The default value is False.', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'This is the random seed for model generation. The seed controls the geometry generation process, ensuring identical models when the same seed is used. This parameter is an integer and is randomly chosen if not set.', + }), + ), + orientation: z.optional( + z.enum(['default', 'align_image']).register(z.globalRegistry, { + description: + 'Set orientation=align_image to automatically rotate the model to align the original image. The default value is default.', + }), + ), + left_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'Left view image of the object.', + }), + ), +}) + +/** + * ImageToWorldResponse + */ +export const zSchemaHunyuanWorldImageToWorldOutput = z.object({ + world_file: zSchemaFile, +}) + +/** + * ImageToWorldRequest + */ +export const zSchemaHunyuanWorldImageToWorldInput = z.object({ + classes: z.string().register(z.globalRegistry, { + description: 'Classes to use for the world generation.', + }), + export_drc: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to export DRC (Dynamic Resource Configuration).', + }), + ) + .default(false), + labels_fg1: z.string().register(z.globalRegistry, { + description: 'Labels for the first foreground object.', + }), + labels_fg2: z.string().register(z.globalRegistry, { + description: 'Labels for the second foreground object.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to convert to a world.', + }), +}) + +/** + * PSHumanResponse + */ +export const zSchemaPshumanOutput = z.object({ + model_obj: zSchemaFile, + preview_image: zSchemaFile, +}) + +/** + * PSHumanRequest + */ +export const zSchemaPshumanInput = z.object({ + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Guidance scale for the diffusion process. Controls how much the output adheres to the generated views.', + }), + ) + .default(4), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Seed for reproducibility. If None, a random seed will be used.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'A direct URL to the input image of a person.', + }), +}) + +/** + * ObjectOutputv2 + */ +export const zSchemaHyper3dRodinV2Output = z.object({ + model_mesh: zSchemaFile, + seed: z.int().register(z.globalRegistry, { + description: 'Seed value used for generation.', + }), + textures: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Generated textures for the 3D object.', + }), +}) + +/** + * RodinGen2Input + */ +export const zSchemaHyper3dRodinV2Input = z.object({ + quality_mesh_option: z.optional( + z + .enum([ + '4K Quad', + '8K Quad', + '18K Quad', + '50K Quad', + '2K Triangle', + '20K Triangle', + '150K Triangle', + '500K Triangle', + ]) + .register(z.globalRegistry, { + description: + "Combined quality and mesh type selection. Quad = smooth surfaces, Triangle = detailed geometry. These corresponds to `mesh_mode` (if the option contains 'Triangle', mesh_mode is 'Raw', otherwise 'Quad') and `quality_override` (the numeric part of the option) parameters in Hyper3D API.", + }), + ), + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'A textual prompt to guide model generation. Optional for Image-to-3D mode - if empty, AI will generate a prompt based on your images.', + }), + ) + .default(''), + preview_render: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Generate a preview render image of the 3D model along with the model files.', + }), + ) + .default(false), + bbox_condition: z.optional( + z.array(z.int()).register(z.globalRegistry, { + description: + 'An array that specifies the bounding box dimensions [width, height, length].', + }), + ), + TAPose: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Generate characters in T-pose or A-pose format, making them easier to rig and animate in 3D software.', + }), + ) + .default(false), + input_image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'URL of images to use while generating the 3D model. Required for Image-to-3D mode. Up to 5 images allowed.', + }), + ), + use_original_alpha: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'When enabled, preserves the transparency channel from input images during 3D generation.', + }), + ) + .default(false), + geometry_file_format: z.optional( + z.enum(['glb', 'usdz', 'fbx', 'obj', 'stl']).register(z.globalRegistry, { + description: + 'Format of the geometry file. Possible values: glb, usdz, fbx, obj, stl. Default is glb.', + }), + ), + addons: z.optional( + z.enum(['HighPack']).register(z.globalRegistry, { + description: + 'The HighPack option will provide 4K resolution textures instead of the default 1K, as well as models with high-poly. It will cost **triple the billable units**.', + }), + ), + seed: z.optional( + z.int().gte(0).lte(65535).register(z.globalRegistry, { + description: + 'Seed value for randomization, ranging from 0 to 65535. Optional.', + }), + ), + material: z.optional( + z.enum(['PBR', 'Shaded', 'All']).register(z.globalRegistry, { + description: + 'Material type. PBR: Physically-based materials with realistic lighting. Shaded: Simple materials with baked lighting. All: Both types included.', + }), + ), +}) + +/** + * TextureFiles + * + * Texture files downloaded and uploaded to CDN + */ +export const zSchemaTextureFiles = z + .object({ + base_color: zSchemaFile, + normal: z.optional(zSchemaFile), + roughness: z.optional(zSchemaFile), + metallic: z.optional(zSchemaFile), + }) + .register(z.globalRegistry, { + description: 'Texture files downloaded and uploaded to CDN', + }) + +/** + * ModelUrls + * + * 3D model files in various formats + */ +export const zSchemaModelUrls = z + .object({ + usdz: z.optional(zSchemaFile), + fbx: z.optional(zSchemaFile), + blend: z.optional(zSchemaFile), + stl: z.optional(zSchemaFile), + glb: z.optional(zSchemaFile), + obj: z.optional(zSchemaFile), + }) + .register(z.globalRegistry, { + description: '3D model files in various formats', + }) + +/** + * ImageTo3DOutput + * + * Output for Image to 3D conversion + */ +export const zSchemaMeshyV6PreviewImageTo3dOutput = z + .object({ + model_urls: zSchemaModelUrls, + texture_urls: z.optional( + z.array(zSchemaTextureFiles).register(z.globalRegistry, { + description: + 'Array of texture file objects, matching Meshy API structure', + }), + ), + thumbnail: z.optional(zSchemaFile), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed used for generation (if available)', + }), + ), + model_glb: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output for Image to 3D conversion', + }) + +/** + * ImageTo3DInput + * + * Input for Image to 3D conversion + */ +export const zSchemaMeshyV6PreviewImageTo3dInput = z + .object({ + enable_pbr: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Generate PBR Maps (metallic, roughness, normal) in addition to base color', + }), + ) + .default(false), + is_a_t_pose: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate the model in an A/T pose', + }), + ) + .default(false), + target_polycount: z + .optional( + z.int().gte(100).lte(300000).register(z.globalRegistry, { + description: 'Target number of polygons in the generated model', + }), + ) + .default(30000), + should_texture: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate textures', + }), + ) + .default(true), + texture_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: '2D image to guide the texturing process', + }), + ), + topology: z.optional( + z.enum(['quad', 'triangle']).register(z.globalRegistry, { + description: + 'Specify the topology of the generated model. Quad for smooth surfaces, Triangle for detailed geometry.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'Image URL or base64 data URI for 3D model creation. Supports .jpg, .jpeg, and .png formats. Also supports AVIF and HEIF formats which will be automatically converted.', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, input data will be checked for safety before processing.', + }), + ) + .default(true), + symmetry_mode: z.optional( + z.enum(['off', 'auto', 'on']).register(z.globalRegistry, { + description: + 'Controls symmetry behavior during model generation. Off disables symmetry, Auto determines it automatically, On enforces symmetry.', + }), + ), + texture_prompt: z.optional( + z.string().max(600).register(z.globalRegistry, { + description: 'Text prompt to guide the texturing process', + }), + ), + should_remesh: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the remesh phase', + }), + ) + .default(true), + }) + .register(z.globalRegistry, { + description: 'Input for Image to 3D conversion', + }) + +/** + * MultiImageTo3DOutput + * + * Output for Multi-Image to 3D conversion + */ +export const zSchemaMeshyV5MultiImageTo3dOutput = z + .object({ + model_urls: zSchemaModelUrls, + texture_urls: z.optional( + z.array(zSchemaTextureFiles).register(z.globalRegistry, { + description: 'Array of texture file objects', + }), + ), + thumbnail: z.optional(zSchemaFile), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed used for generation (if available)', + }), + ), + model_glb: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output for Multi-Image to 3D conversion', + }) + +/** + * MultiImageTo3DInput + * + * Input for Multi-Image to 3D conversion + */ +export const zSchemaMeshyV5MultiImageTo3dInput = z + .object({ + enable_pbr: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Generate PBR Maps (metallic, roughness, normal) in addition to base color. Requires should_texture to be true.', + }), + ) + .default(false), + should_texture: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to generate textures. False provides mesh without textures for 5 credits, True adds texture generation for additional 10 credits.', + }), + ) + .default(true), + target_polycount: z + .optional( + z.int().gte(100).lte(300000).register(z.globalRegistry, { + description: 'Target number of polygons in the generated model', + }), + ) + .default(30000), + is_a_t_pose: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate the model in an A/T pose', + }), + ) + .default(false), + texture_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '2D image to guide the texturing process. Requires should_texture to be true.', + }), + ), + topology: z.optional( + z.enum(['quad', 'triangle']).register(z.globalRegistry, { + description: + 'Specify the topology of the generated model. Quad for smooth surfaces, Triangle for detailed geometry.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, input data will be checked for safety before processing.', + }), + ) + .default(true), + symmetry_mode: z.optional( + z.enum(['off', 'auto', 'on']).register(z.globalRegistry, { + description: 'Controls symmetry behavior during model generation.', + }), + ), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + '1 to 4 images for 3D model creation. All images should depict the same object from different angles. Supports .jpg, .jpeg, .png formats, and AVIF/HEIF which will be automatically converted. If more than 4 images are provided, only the first 4 will be used.', + }), + texture_prompt: z.optional( + z.string().max(600).register(z.globalRegistry, { + description: + 'Text prompt to guide the texturing process. Requires should_texture to be true.', + }), + ), + should_remesh: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the remesh phase. When false, returns triangular mesh ignoring topology and target_polycount.', + }), + ) + .default(true), + }) + .register(z.globalRegistry, { + description: 'Input for Multi-Image to 3D conversion', + }) + +/** + * Seed3DImageTo3DOutput + */ +export const zSchemaBytedanceSeed3dImageTo3dOutput = z.object({ + model: zSchemaFile, + usage_tokens: z.int().register(z.globalRegistry, { + description: 'The number of tokens used for the 3D model generation', + }), +}) + +/** + * Seed3DImageTo3DInput + */ +export const zSchemaBytedanceSeed3dImageTo3dInput = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image for the 3D asset generation.', + }), +}) + +/** + * MultiViewObjectOutput + */ +export const zSchemaOmnipartOutput = z.object({ + full_model_mesh: zSchemaFile, + output_zip: zSchemaFile, + seed: z.int().register(z.globalRegistry, { + description: 'Seed value used for generation.', + }), + model_mesh: zSchemaFile, +}) + +/** + * OmnipartInput + */ +export const zSchemaOmnipartInput = z.object({ + input_image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use while generating the 3D model.', + }), + parts: z + .optional( + z.string().register(z.globalRegistry, { + description: + "Specify which segments to merge (e.g., '0,1;3,4' merges segments 0&1 together and 3&4 together)", + }), + ) + .default(''), + seed: z + .optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ) + .default(765464), + minimum_segment_size: z + .optional( + z.int().gte(1).lte(10000).register(z.globalRegistry, { + description: 'Minimum segment size (pixels) for the model.', + }), + ) + .default(2000), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'Guidance scale for the model.', + }), + ) + .default(7.5), +}) + +/** + * SAM3DObjectMetadata + * + * Per-object metadata for 3D reconstruction. + */ +export const zSchemaSam3dObjectMetadata = z + .object({ + rotation: z.optional( + z.array(z.array(z.number())).register(z.globalRegistry, { + description: 'Rotation quaternion [x, y, z, w]', + }), + ), + translation: z.optional( + z.array(z.array(z.number())).register(z.globalRegistry, { + description: 'Translation [tx, ty, tz]', + }), + ), + object_index: z.int().register(z.globalRegistry, { + description: 'Index of the object in the scene', + }), + scale: z.optional( + z.array(z.array(z.number())).register(z.globalRegistry, { + description: 'Scale factors [sx, sy, sz]', + }), + ), + camera_pose: z.optional( + z.array(z.array(z.number())).register(z.globalRegistry, { + description: 'Camera pose matrix', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Per-object metadata for 3D reconstruction.', + }) + +/** + * PointPromptBase + */ +export const zSchemaPointPromptBase = z.object({ + y: z.optional( + z.int().register(z.globalRegistry, { + description: 'Y Coordinate of the prompt', + }), + ), + x: z.optional( + z.int().register(z.globalRegistry, { + description: 'X Coordinate of the prompt', + }), + ), + object_id: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Optional object identifier. Prompts sharing an object id refine the same object.', + }), + ), + label: z.optional( + z.union([z.literal(0), z.literal(1)]).register(z.globalRegistry, { + description: '1 for foreground, 0 for background', + }), + ), +}) + +/** + * BoxPromptBase + */ +export const zSchemaBoxPromptBase = z.object({ + y_min: z.optional( + z.int().register(z.globalRegistry, { + description: 'Y Min Coordinate of the box', + }), + ), + object_id: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Optional object identifier. Boxes sharing an object id refine the same object.', + }), + ), + x_max: z.optional( + z.int().register(z.globalRegistry, { + description: 'X Max Coordinate of the box', + }), + ), + x_min: z.optional( + z.int().register(z.globalRegistry, { + description: 'X Min Coordinate of the box', + }), + ), + y_max: z.optional( + z.int().register(z.globalRegistry, { + description: 'Y Max Coordinate of the box', + }), + ), +}) + +/** + * SAM3DObjectOutput + */ +export const zSchemaSam33dObjectsOutput = z.object({ + model_glb: z.optional(zSchemaFile), + metadata: z.array(zSchemaSam3dObjectMetadata).register(z.globalRegistry, { + description: 'Per-object metadata (rotation/translation/scale)', + }), + gaussian_splat: zSchemaFile, + artifacts_zip: z.optional(zSchemaFile), + individual_glbs: z.optional( + z.array(zSchemaFile).register(z.globalRegistry, { + description: + 'Individual GLB mesh files per object (only for multi-object scenes)', + }), + ), + individual_splats: z.optional( + z.array(zSchemaFile).register(z.globalRegistry, { + description: + 'Individual Gaussian splat files per object (only for multi-object scenes)', + }), + ), +}) + +/** + * SAM3DObjectInput + */ +export const zSchemaSam33dObjectsInput = z.object({ + pointmap_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Optional URL to external pointmap/depth data (NPY or NPZ format) for improved 3D reconstruction depth estimation', + }), + ), + export_textured_glb: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If True, exports GLB with baked texture and UVs instead of vertex colors.', + }), + ) + .default(false), + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "Text prompt for auto-segmentation when no masks provided (e.g., 'chair', 'lamp')", + }), + ) + .default('car'), + box_prompts: z + .optional( + z.array(zSchemaBoxPromptBase).register(z.globalRegistry, { + description: + 'Box prompts for auto-segmentation when no masks provided. Multiple boxes supported - each produces a separate object mask for 3D reconstruction.', + }), + ) + .default([]), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to reconstruct in 3D', + }), + mask_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'Optional list of mask URLs (one per object). If not provided, use prompt/point_prompts/box_prompts to auto-segment, or entire image will be used.', + }), + ), + point_prompts: z + .optional( + z.array(zSchemaPointPromptBase).register(z.globalRegistry, { + description: + 'Point prompts for auto-segmentation when no masks provided', + }), + ) + .default([]), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducibility', + }), + ), +}) + +/** + * SAM3DBodyPersonMetadata + * + * Per-person metadata for body reconstruction. + */ +export const zSchemaSam3dBodyPersonMetadata = z + .object({ + pred_cam_t: z.array(z.number()).register(z.globalRegistry, { + description: 'Predicted camera translation [tx, ty, tz]', + }), + person_id: z.int().register(z.globalRegistry, { + description: 'Index of the person in the scene', + }), + focal_length: z.number().register(z.globalRegistry, { + description: 'Estimated focal length', + }), + keypoints_3d: z.optional( + z.array(z.array(z.number())).register(z.globalRegistry, { + description: + '3D keypoints [[x, y, z], ...] - 70 body keypoints in camera space', + }), + ), + keypoints_2d: z.array(z.array(z.number())).register(z.globalRegistry, { + description: '2D keypoints [[x, y], ...] - 70 body keypoints', + }), + bbox: z.array(z.number()).register(z.globalRegistry, { + description: 'Bounding box [x_min, y_min, x_max, y_max]', + }), + }) + .register(z.globalRegistry, { + description: 'Per-person metadata for body reconstruction.', + }) + +/** + * SAM3DBodyMetadata + * + * Metadata for body reconstruction output. + */ +export const zSchemaSam3dBodyMetadata = z + .object({ + people: z.array(zSchemaSam3dBodyPersonMetadata).register(z.globalRegistry, { + description: 'Per-person metadata', + }), + num_people: z.int().register(z.globalRegistry, { + description: 'Number of people detected', + }), + }) + .register(z.globalRegistry, { + description: 'Metadata for body reconstruction output.', + }) + +/** + * SAM3DBodyOutput + */ +export const zSchemaSam33dBodyOutput = z.object({ + visualization: zSchemaFile, + metadata: zSchemaSam3dBodyMetadata, + meshes: z.optional( + z.array(zSchemaFile).register(z.globalRegistry, { + description: + 'Individual mesh files (.ply), one per detected person (when export_meshes=True)', + }), + ), + model_glb: zSchemaFile, +}) + +/** + * SAM3DBodyInput + */ +export const zSchemaSam33dBodyInput = z.object({ + include_3d_keypoints: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Include 3D keypoint markers (spheres) in the GLB mesh for visualization', + }), + ) + .default(true), + export_meshes: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Export individual mesh files (.ply) per person', + }), + ) + .default(true), + mask_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Optional URL of a binary mask image (white=person, black=background). When provided, skips auto human detection and uses this mask instead. Bbox is auto-computed from the mask.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image containing humans', + }), +}) + +/** + * ImageTo3DOutput + */ +export const zSchemaHunyuan3dV3ImageTo3dOutput = z.object({ + model_urls: zSchemaModelUrls, + thumbnail: z.optional(zSchemaFile), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + ), + model_glb: zSchemaFile, +}) + +/** + * ImageTo3DInput + */ +export const zSchemaHunyuan3dV3ImageTo3dInput = z.object({ + input_image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use while generating the 3D model.', + }), + polygon_type: z.optional( + z.enum(['triangle', 'quadrilateral']).register(z.globalRegistry, { + description: + 'Polygon type. Only takes effect when GenerateType is LowPoly.', + }), + ), + face_count: z + .optional( + z.int().gte(40000).lte(1500000).register(z.globalRegistry, { + description: 'Target face count. Range: 40000-1500000', + }), + ) + .default(500000), + right_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Optional right view image URL for better 3D reconstruction.', + }), + ), + back_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'Optional back view image URL for better 3D reconstruction.', + }), + ), + enable_pbr: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable PBR material generation. Does not take effect when generate_type is Geometry.', + }), + ) + .default(false), + generate_type: z.optional( + z.enum(['Normal', 'LowPoly', 'Geometry']).register(z.globalRegistry, { + description: + 'Generation type. Normal: textured model. LowPoly: polygon reduction. Geometry: white model without texture.', + }), + ), + left_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'Optional left view image URL for better 3D reconstruction.', + }), + ), +}) + +/** + * SketchTo3DOutput + */ +export const zSchemaHunyuan3dV3SketchTo3dOutput = z.object({ + model_urls: zSchemaModelUrls, + thumbnail: z.optional(zSchemaFile), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + ), + model_glb: zSchemaFile, +}) + +/** + * SketchTo3DInput + */ +export const zSchemaHunyuan3dV3SketchTo3dInput = z.object({ + input_image_url: z.string().register(z.globalRegistry, { + description: + 'URL of sketch or line art image to transform into a 3D model. Image resolution must be between 128x128 and 5000x5000 pixels.', + }), + prompt: z.string().max(1024).register(z.globalRegistry, { + description: + 'Text prompt describing the 3D content attributes such as color, category, and material.', + }), + face_count: z + .optional( + z.int().gte(40000).lte(1500000).register(z.globalRegistry, { + description: 'Target face count. Range: 40000-1500000', + }), + ) + .default(500000), + enable_pbr: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable PBR material generation.', + }), + ) + .default(false), +}) + +/** + * ObjectOutput + */ +export const zSchemaTrellis2Output = z.object({ + model_glb: zSchemaFile, +}) + +/** + * SingleImageInputModel + */ +export const zSchemaTrellis2Input = z.object({ + remesh_band: z.optional(z.number().gte(0).lte(4)).default(1), + ss_guidance_rescale: z.optional(z.number().gte(0).lte(1)).default(0.7), + ss_rescale_t: z.optional(z.number().gte(1).lte(6)).default(5), + shape_slat_sampling_steps: z.optional(z.int().gte(1).lte(50)).default(12), + tex_slat_rescale_t: z.optional(z.number().gte(1).lte(6)).default(3), + ss_guidance_strength: z.optional(z.number().gte(0).lte(10)).default(7.5), + ss_sampling_steps: z.optional(z.int().gte(1).lte(50)).default(12), + tex_slat_sampling_steps: z.optional(z.int().gte(1).lte(50)).default(12), + remesh_project: z.optional(z.number().gte(0).lte(1)).default(0), + texture_size: z.optional( + z + .union([z.literal(1024), z.literal(2048), z.literal(4096)]) + .register(z.globalRegistry, { + description: 'Texture resolution', + }), + ), + shape_slat_rescale_t: z.optional(z.number().gte(1).lte(6)).default(3), + resolution: z.optional( + z + .union([z.literal(512), z.literal(1024), z.literal(1536)]) + .register(z.globalRegistry, { + description: 'Output resolution; higher is slower but more detailed', + }), + ), + remesh: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Run remeshing (slower; often improves topology)', + }), + ) + .default(true), + tex_slat_guidance_rescale: z.optional(z.number().gte(0).lte(1)).default(0), + shape_slat_guidance_rescale: z + .optional(z.number().gte(0).lte(1)) + .default(0.5), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the input image to convert to 3D', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducibility', + }), + ), + shape_slat_guidance_strength: z + .optional(z.number().gte(0).lte(10)) + .default(7.5), + tex_slat_guidance_strength: z.optional(z.number().gte(0).lte(10)).default(1), + decimation_target: z + .optional( + z.int().gte(100000).lte(2000000).register(z.globalRegistry, { + description: + 'Target vertex count for mesh simplification during export', + }), + ) + .default(500000), +}) + +export const zSchemaQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetFalAiTrellis2RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiTrellis2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiTrellis2RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiTrellis2RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiTrellis2Data = z.object({ + body: zSchemaTrellis2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiTrellis2Response = zSchemaQueueStatus + +export const zGetFalAiTrellis2RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiTrellis2RequestsByRequestIdResponse = + zSchemaTrellis2Output + +export const zGetFalAiHunyuan3dV3SketchTo3dRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiHunyuan3dV3SketchTo3dRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuan3dV3SketchTo3dRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuan3dV3SketchTo3dRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuan3dV3SketchTo3dData = z.object({ + body: zSchemaHunyuan3dV3SketchTo3dInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuan3dV3SketchTo3dResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuan3dV3SketchTo3dRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuan3dV3SketchTo3dRequestsByRequestIdResponse = + zSchemaHunyuan3dV3SketchTo3dOutput + +export const zGetFalAiHunyuan3dV3ImageTo3dRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiHunyuan3dV3ImageTo3dRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuan3dV3ImageTo3dRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuan3dV3ImageTo3dRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuan3dV3ImageTo3dData = z.object({ + body: zSchemaHunyuan3dV3ImageTo3dInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuan3dV3ImageTo3dResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuan3dV3ImageTo3dRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuan3dV3ImageTo3dRequestsByRequestIdResponse = + zSchemaHunyuan3dV3ImageTo3dOutput + +export const zGetFalAiSam33dBodyRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSam33dBodyRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSam33dBodyRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSam33dBodyRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSam33dBodyData = z.object({ + body: zSchemaSam33dBodyInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSam33dBodyResponse = zSchemaQueueStatus + +export const zGetFalAiSam33dBodyRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSam33dBodyRequestsByRequestIdResponse = + zSchemaSam33dBodyOutput + +export const zGetFalAiSam33dObjectsRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSam33dObjectsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSam33dObjectsRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSam33dObjectsRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSam33dObjectsData = z.object({ + body: zSchemaSam33dObjectsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSam33dObjectsResponse = zSchemaQueueStatus + +export const zGetFalAiSam33dObjectsRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSam33dObjectsRequestsByRequestIdResponse = + zSchemaSam33dObjectsOutput + +export const zGetFalAiOmnipartRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiOmnipartRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiOmnipartRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiOmnipartRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiOmnipartData = z.object({ + body: zSchemaOmnipartInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiOmnipartResponse = zSchemaQueueStatus + +export const zGetFalAiOmnipartRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiOmnipartRequestsByRequestIdResponse = + zSchemaOmnipartOutput + +export const zGetFalAiBytedanceSeed3dImageTo3dRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBytedanceSeed3dImageTo3dRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBytedanceSeed3dImageTo3dRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBytedanceSeed3dImageTo3dRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBytedanceSeed3dImageTo3dData = z.object({ + body: zSchemaBytedanceSeed3dImageTo3dInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBytedanceSeed3dImageTo3dResponse = zSchemaQueueStatus + +export const zGetFalAiBytedanceSeed3dImageTo3dRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiBytedanceSeed3dImageTo3dRequestsByRequestIdResponse = + zSchemaBytedanceSeed3dImageTo3dOutput + +export const zGetFalAiMeshyV5MultiImageTo3dRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMeshyV5MultiImageTo3dRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMeshyV5MultiImageTo3dRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMeshyV5MultiImageTo3dRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMeshyV5MultiImageTo3dData = z.object({ + body: zSchemaMeshyV5MultiImageTo3dInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMeshyV5MultiImageTo3dResponse = zSchemaQueueStatus + +export const zGetFalAiMeshyV5MultiImageTo3dRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMeshyV5MultiImageTo3dRequestsByRequestIdResponse = + zSchemaMeshyV5MultiImageTo3dOutput + +export const zGetFalAiMeshyV6PreviewImageTo3dRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMeshyV6PreviewImageTo3dRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMeshyV6PreviewImageTo3dRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMeshyV6PreviewImageTo3dRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMeshyV6PreviewImageTo3dData = z.object({ + body: zSchemaMeshyV6PreviewImageTo3dInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMeshyV6PreviewImageTo3dResponse = zSchemaQueueStatus + +export const zGetFalAiMeshyV6PreviewImageTo3dRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiMeshyV6PreviewImageTo3dRequestsByRequestIdResponse = + zSchemaMeshyV6PreviewImageTo3dOutput + +export const zGetFalAiHyper3dRodinV2RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiHyper3dRodinV2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHyper3dRodinV2RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiHyper3dRodinV2RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHyper3dRodinV2Data = z.object({ + body: zSchemaHyper3dRodinV2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHyper3dRodinV2Response = zSchemaQueueStatus + +export const zGetFalAiHyper3dRodinV2RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHyper3dRodinV2RequestsByRequestIdResponse = + zSchemaHyper3dRodinV2Output + +export const zGetFalAiPshumanRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiPshumanRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPshumanRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiPshumanRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPshumanData = z.object({ + body: zSchemaPshumanInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPshumanResponse = zSchemaQueueStatus + +export const zGetFalAiPshumanRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPshumanRequestsByRequestIdResponse = zSchemaPshumanOutput + +export const zGetFalAiHunyuanWorldImageToWorldRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiHunyuanWorldImageToWorldRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuanWorldImageToWorldRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuanWorldImageToWorldRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuanWorldImageToWorldData = z.object({ + body: zSchemaHunyuanWorldImageToWorldInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuanWorldImageToWorldResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuanWorldImageToWorldRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuanWorldImageToWorldRequestsByRequestIdResponse = + zSchemaHunyuanWorldImageToWorldOutput + +export const zGetTripo3dTripoV25MultiviewTo3dRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetTripo3dTripoV25MultiviewTo3dRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutTripo3dTripoV25MultiviewTo3dRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutTripo3dTripoV25MultiviewTo3dRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostTripo3dTripoV25MultiviewTo3dData = z.object({ + body: zSchemaTripoV25MultiviewTo3dInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostTripo3dTripoV25MultiviewTo3dResponse = zSchemaQueueStatus + +export const zGetTripo3dTripoV25MultiviewTo3dRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetTripo3dTripoV25MultiviewTo3dRequestsByRequestIdResponse = + zSchemaTripoV25MultiviewTo3dOutput + +export const zGetFalAiHunyuan3dV21RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiHunyuan3dV21RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuan3dV21RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuan3dV21RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuan3dV21Data = z.object({ + body: zSchemaHunyuan3dV21Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuan3dV21Response = zSchemaQueueStatus + +export const zGetFalAiHunyuan3dV21RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuan3dV21RequestsByRequestIdResponse = + zSchemaHunyuan3dV21Output + +export const zGetFalAiTrellisMultiRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiTrellisMultiRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiTrellisMultiRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiTrellisMultiRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiTrellisMultiData = z.object({ + body: zSchemaTrellisMultiInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiTrellisMultiResponse = zSchemaQueueStatus + +export const zGetFalAiTrellisMultiRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiTrellisMultiRequestsByRequestIdResponse = + zSchemaTrellisMultiOutput + +export const zGetTripo3dTripoV25ImageTo3dRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetTripo3dTripoV25ImageTo3dRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutTripo3dTripoV25ImageTo3dRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutTripo3dTripoV25ImageTo3dRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostTripo3dTripoV25ImageTo3dData = z.object({ + body: zSchemaTripoV25ImageTo3dInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostTripo3dTripoV25ImageTo3dResponse = zSchemaQueueStatus + +export const zGetTripo3dTripoV25ImageTo3dRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetTripo3dTripoV25ImageTo3dRequestsByRequestIdResponse = + zSchemaTripoV25ImageTo3dOutput + +export const zGetFalAiHunyuan3dV2MultiViewTurboRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiHunyuan3dV2MultiViewTurboRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuan3dV2MultiViewTurboRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuan3dV2MultiViewTurboRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuan3dV2MultiViewTurboData = z.object({ + body: zSchemaHunyuan3dV2MultiViewTurboInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuan3dV2MultiViewTurboResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuan3dV2MultiViewTurboRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuan3dV2MultiViewTurboRequestsByRequestIdResponse = + zSchemaHunyuan3dV2MultiViewTurboOutput + +export const zGetFalAiHunyuan3dV2RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiHunyuan3dV2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuan3dV2RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuan3dV2RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuan3dV2Data = z.object({ + body: zSchemaHunyuan3dV2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuan3dV2Response = zSchemaQueueStatus + +export const zGetFalAiHunyuan3dV2RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuan3dV2RequestsByRequestIdResponse = + zSchemaHunyuan3dV2Output + +export const zGetFalAiHunyuan3dV2MiniRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiHunyuan3dV2MiniRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuan3dV2MiniRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuan3dV2MiniRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuan3dV2MiniData = z.object({ + body: zSchemaHunyuan3dV2MiniInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuan3dV2MiniResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuan3dV2MiniRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuan3dV2MiniRequestsByRequestIdResponse = + zSchemaHunyuan3dV2MiniOutput + +export const zGetFalAiHunyuan3dV2MultiViewRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiHunyuan3dV2MultiViewRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuan3dV2MultiViewRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuan3dV2MultiViewRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuan3dV2MultiViewData = z.object({ + body: zSchemaHunyuan3dV2MultiViewInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuan3dV2MultiViewResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuan3dV2MultiViewRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuan3dV2MultiViewRequestsByRequestIdResponse = + zSchemaHunyuan3dV2MultiViewOutput + +export const zGetFalAiHunyuan3dV2TurboRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiHunyuan3dV2TurboRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuan3dV2TurboRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuan3dV2TurboRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuan3dV2TurboData = z.object({ + body: zSchemaHunyuan3dV2TurboInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuan3dV2TurboResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuan3dV2TurboRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuan3dV2TurboRequestsByRequestIdResponse = + zSchemaHunyuan3dV2TurboOutput + +export const zGetFalAiHunyuan3dV2MiniTurboRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiHunyuan3dV2MiniTurboRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuan3dV2MiniTurboRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuan3dV2MiniTurboRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuan3dV2MiniTurboData = z.object({ + body: zSchemaHunyuan3dV2MiniTurboInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuan3dV2MiniTurboResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuan3dV2MiniTurboRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuan3dV2MiniTurboRequestsByRequestIdResponse = + zSchemaHunyuan3dV2MiniTurboOutput + +export const zGetFalAiHyper3dRodinRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiHyper3dRodinRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHyper3dRodinRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiHyper3dRodinRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHyper3dRodinData = z.object({ + body: zSchemaHyper3dRodinInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHyper3dRodinResponse = zSchemaQueueStatus + +export const zGetFalAiHyper3dRodinRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHyper3dRodinRequestsByRequestIdResponse = + zSchemaHyper3dRodinOutput + +export const zGetFalAiTrellisRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiTrellisRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiTrellisRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiTrellisRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiTrellisData = z.object({ + body: zSchemaTrellisInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiTrellisResponse = zSchemaQueueStatus + +export const zGetFalAiTrellisRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiTrellisRequestsByRequestIdResponse = zSchemaTrellisOutput + +export const zGetFalAiTriposrRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiTriposrRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiTriposrRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiTriposrRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiTriposrData = z.object({ + body: zSchemaTriposrInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiTriposrResponse = zSchemaQueueStatus + +export const zGetFalAiTriposrRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiTriposrRequestsByRequestIdResponse = zSchemaTriposrOutput diff --git a/packages/typescript/ai-fal/src/generated/image-to-image/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/image-to-image/endpoint-map.ts new file mode 100644 index 00000000..4cd7708b --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/image-to-image/endpoint-map.ts @@ -0,0 +1,4194 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zSchemaAiBabyAndAgingGeneratorMultiInput, + zSchemaAiBabyAndAgingGeneratorMultiOutput, + zSchemaAiBabyAndAgingGeneratorSingleInput, + zSchemaAiBabyAndAgingGeneratorSingleOutput, + zSchemaAiFaceSwapFaceswapimageInput, + zSchemaAiFaceSwapFaceswapimageOutput, + zSchemaAiHomeEditInput, + zSchemaAiHomeEditOutput, + zSchemaAiHomeStyleInput, + zSchemaAiHomeStyleOutput, + zSchemaAuraSrInput, + zSchemaAuraSrOutput, + zSchemaBagelEditInput, + zSchemaBagelEditOutput, + zSchemaBenV2ImageInput, + zSchemaBenV2ImageOutput, + zSchemaBirefnetInput, + zSchemaBirefnetOutput, + zSchemaBirefnetV2Input, + zSchemaBirefnetV2Output, + zSchemaBriaBackgroundRemoveInput, + zSchemaBriaBackgroundRemoveOutput, + zSchemaBriaBackgroundReplaceInput, + zSchemaBriaBackgroundReplaceOutput, + zSchemaBriaEraserInput, + zSchemaBriaEraserOutput, + zSchemaBriaExpandInput, + zSchemaBriaExpandOutput, + zSchemaBriaGenfillInput, + zSchemaBriaGenfillOutput, + zSchemaBriaProductShotInput, + zSchemaBriaProductShotOutput, + zSchemaBriaReimagineInput, + zSchemaBriaReimagineOutput, + zSchemaBytedanceSeedreamV45EditInput, + zSchemaBytedanceSeedreamV45EditOutput, + zSchemaBytedanceSeedreamV4EditInput, + zSchemaBytedanceSeedreamV4EditOutput, + zSchemaCalligrapherInput, + zSchemaCalligrapherOutput, + zSchemaCartoonifyInput, + zSchemaCartoonifyOutput, + zSchemaCatVtonInput, + zSchemaCatVtonOutput, + zSchemaCcsrInput, + zSchemaCcsrOutput, + zSchemaChainOfZoomInput, + zSchemaChainOfZoomOutput, + zSchemaChronoEditInput, + zSchemaChronoEditLoraGalleryPaintbrushInput, + zSchemaChronoEditLoraGalleryPaintbrushOutput, + zSchemaChronoEditLoraGalleryUpscalerInput, + zSchemaChronoEditLoraGalleryUpscalerOutput, + zSchemaChronoEditLoraInput, + zSchemaChronoEditLoraOutput, + zSchemaChronoEditOutput, + zSchemaClarityUpscalerInput, + zSchemaClarityUpscalerOutput, + zSchemaCodeformerInput, + zSchemaCodeformerOutput, + zSchemaCreativeUpscalerInput, + zSchemaCreativeUpscalerOutput, + zSchemaCrystalUpscalerInput, + zSchemaCrystalUpscalerOutput, + zSchemaDdcolorInput, + zSchemaDdcolorOutput, + zSchemaDocresDewarpInput, + zSchemaDocresDewarpOutput, + zSchemaDocresInput, + zSchemaDocresOutput, + zSchemaDrctSuperResolutionInput, + zSchemaDrctSuperResolutionOutput, + zSchemaDreamomni2EditInput, + zSchemaDreamomni2EditOutput, + zSchemaDwposeInput, + zSchemaDwposeOutput, + zSchemaEmu35ImageEditImageInput, + zSchemaEmu35ImageEditImageOutput, + zSchemaEra3dInput, + zSchemaEra3dOutput, + zSchemaEsrganInput, + zSchemaEsrganOutput, + zSchemaEvfSamInput, + zSchemaEvfSamOutput, + zSchemaFaceToStickerInput, + zSchemaFaceToStickerOutput, + zSchemaFashnTryonV15Input, + zSchemaFashnTryonV15Output, + zSchemaFashnTryonV16Input, + zSchemaFashnTryonV16Output, + zSchemaFastLcmDiffusionImageToImageInput, + zSchemaFastLcmDiffusionImageToImageOutput, + zSchemaFastLcmDiffusionInpaintingInput, + zSchemaFastLcmDiffusionInpaintingOutput, + zSchemaFastLightningSdxlImageToImageInput, + zSchemaFastLightningSdxlImageToImageOutput, + zSchemaFastLightningSdxlInpaintingInput, + zSchemaFastLightningSdxlInpaintingOutput, + zSchemaFastSdxlControlnetCannyImageToImageInput, + zSchemaFastSdxlControlnetCannyImageToImageOutput, + zSchemaFastSdxlControlnetCannyInpaintingInput, + zSchemaFastSdxlControlnetCannyInpaintingOutput, + zSchemaFastSdxlImageToImageInput, + zSchemaFastSdxlImageToImageOutput, + zSchemaFastSdxlInpaintingInput, + zSchemaFastSdxlInpaintingOutput, + zSchemaFfmpegApiExtractFrameInput, + zSchemaFfmpegApiExtractFrameOutput, + zSchemaFiboEditAddObjectByTextInput, + zSchemaFiboEditAddObjectByTextOutput, + zSchemaFiboEditBlendInput, + zSchemaFiboEditBlendOutput, + zSchemaFiboEditColorizeInput, + zSchemaFiboEditColorizeOutput, + zSchemaFiboEditEditInput, + zSchemaFiboEditEditOutput, + zSchemaFiboEditEraseByTextInput, + zSchemaFiboEditEraseByTextOutput, + zSchemaFiboEditRelightInput, + zSchemaFiboEditRelightOutput, + zSchemaFiboEditReplaceObjectByTextInput, + zSchemaFiboEditReplaceObjectByTextOutput, + zSchemaFiboEditReseasonInput, + zSchemaFiboEditReseasonOutput, + zSchemaFiboEditRestoreInput, + zSchemaFiboEditRestoreOutput, + zSchemaFiboEditRestyleInput, + zSchemaFiboEditRestyleOutput, + zSchemaFiboEditRewriteTextInput, + zSchemaFiboEditRewriteTextOutput, + zSchemaFiboEditSketchToColoredImageInput, + zSchemaFiboEditSketchToColoredImageOutput, + zSchemaFilmInput, + zSchemaFilmOutput, + zSchemaFinegrainEraserBboxInput, + zSchemaFinegrainEraserBboxOutput, + zSchemaFinegrainEraserInput, + zSchemaFinegrainEraserMaskInput, + zSchemaFinegrainEraserMaskOutput, + zSchemaFinegrainEraserOutput, + zSchemaFlorence2LargeCaptionToPhraseGroundingInput, + zSchemaFlorence2LargeCaptionToPhraseGroundingOutput, + zSchemaFlorence2LargeDenseRegionCaptionInput, + zSchemaFlorence2LargeDenseRegionCaptionOutput, + zSchemaFlorence2LargeObjectDetectionInput, + zSchemaFlorence2LargeObjectDetectionOutput, + zSchemaFlorence2LargeOcrWithRegionInput, + zSchemaFlorence2LargeOcrWithRegionOutput, + zSchemaFlorence2LargeOpenVocabularyDetectionInput, + zSchemaFlorence2LargeOpenVocabularyDetectionOutput, + zSchemaFlorence2LargeReferringExpressionSegmentationInput, + zSchemaFlorence2LargeReferringExpressionSegmentationOutput, + zSchemaFlorence2LargeRegionProposalInput, + zSchemaFlorence2LargeRegionProposalOutput, + zSchemaFlorence2LargeRegionToSegmentationInput, + zSchemaFlorence2LargeRegionToSegmentationOutput, + zSchemaFloweditInput, + zSchemaFloweditOutput, + zSchemaFlux1DevImageToImageInput, + zSchemaFlux1DevImageToImageOutput, + zSchemaFlux1DevReduxInput, + zSchemaFlux1DevReduxOutput, + zSchemaFlux1KreaImageToImageInput, + zSchemaFlux1KreaImageToImageOutput, + zSchemaFlux1KreaReduxInput, + zSchemaFlux1KreaReduxOutput, + zSchemaFlux1SchnellReduxInput, + zSchemaFlux1SchnellReduxOutput, + zSchemaFlux1SrpoImageToImageInput, + zSchemaFlux1SrpoImageToImageOutput, + zSchemaFlux2EditInput, + zSchemaFlux2EditOutput, + zSchemaFlux2FlashEditInput, + zSchemaFlux2FlashEditOutput, + zSchemaFlux2FlexEditInput, + zSchemaFlux2FlexEditOutput, + zSchemaFlux2Klein4bBaseEditInput, + zSchemaFlux2Klein4bBaseEditLoraInput, + zSchemaFlux2Klein4bBaseEditLoraOutput, + zSchemaFlux2Klein4bBaseEditOutput, + zSchemaFlux2Klein4bEditInput, + zSchemaFlux2Klein4bEditOutput, + zSchemaFlux2Klein9bBaseEditInput, + zSchemaFlux2Klein9bBaseEditLoraInput, + zSchemaFlux2Klein9bBaseEditLoraOutput, + zSchemaFlux2Klein9bBaseEditOutput, + zSchemaFlux2Klein9bEditInput, + zSchemaFlux2Klein9bEditOutput, + zSchemaFlux2LoraEditInput, + zSchemaFlux2LoraEditOutput, + zSchemaFlux2LoraGalleryAddBackgroundInput, + zSchemaFlux2LoraGalleryAddBackgroundOutput, + zSchemaFlux2LoraGalleryApartmentStagingInput, + zSchemaFlux2LoraGalleryApartmentStagingOutput, + zSchemaFlux2LoraGalleryFaceToFullPortraitInput, + zSchemaFlux2LoraGalleryFaceToFullPortraitOutput, + zSchemaFlux2LoraGalleryMultipleAnglesInput, + zSchemaFlux2LoraGalleryMultipleAnglesOutput, + zSchemaFlux2LoraGalleryVirtualTryonInput, + zSchemaFlux2LoraGalleryVirtualTryonOutput, + zSchemaFlux2MaxEditInput, + zSchemaFlux2MaxEditOutput, + zSchemaFlux2ProEditInput, + zSchemaFlux2ProEditOutput, + zSchemaFlux2TurboEditInput, + zSchemaFlux2TurboEditOutput, + zSchemaFluxControlLoraCannyImageToImageInput, + zSchemaFluxControlLoraCannyImageToImageOutput, + zSchemaFluxControlLoraDepthImageToImageInput, + zSchemaFluxControlLoraDepthImageToImageOutput, + zSchemaFluxDevImageToImageInput, + zSchemaFluxDevImageToImageOutput, + zSchemaFluxDevReduxInput, + zSchemaFluxDevReduxOutput, + zSchemaFluxDifferentialDiffusionInput, + zSchemaFluxDifferentialDiffusionOutput, + zSchemaFluxGeneralDifferentialDiffusionInput, + zSchemaFluxGeneralDifferentialDiffusionOutput, + zSchemaFluxGeneralImageToImageInput, + zSchemaFluxGeneralImageToImageOutput, + zSchemaFluxGeneralInpaintingInput, + zSchemaFluxGeneralInpaintingOutput, + zSchemaFluxGeneralRfInversionInput, + zSchemaFluxGeneralRfInversionOutput, + zSchemaFluxKontextDevInput, + zSchemaFluxKontextDevOutput, + zSchemaFluxKontextLoraInpaintInput, + zSchemaFluxKontextLoraInpaintOutput, + zSchemaFluxKontextLoraInput, + zSchemaFluxKontextLoraOutput, + zSchemaFluxKreaImageToImageInput, + zSchemaFluxKreaImageToImageOutput, + zSchemaFluxKreaLoraImageToImageInput, + zSchemaFluxKreaLoraImageToImageOutput, + zSchemaFluxKreaLoraInpaintingInput, + zSchemaFluxKreaLoraInpaintingOutput, + zSchemaFluxKreaReduxInput, + zSchemaFluxKreaReduxOutput, + zSchemaFluxLoraCannyInput, + zSchemaFluxLoraCannyOutput, + zSchemaFluxLoraDepthInput, + zSchemaFluxLoraDepthOutput, + zSchemaFluxLoraFillInput, + zSchemaFluxLoraFillOutput, + zSchemaFluxLoraImageToImageInput, + zSchemaFluxLoraImageToImageOutput, + zSchemaFluxProKontextInput, + zSchemaFluxProKontextMaxInput, + zSchemaFluxProKontextMaxMultiInput, + zSchemaFluxProKontextMaxMultiOutput, + zSchemaFluxProKontextMaxOutput, + zSchemaFluxProKontextMultiInput, + zSchemaFluxProKontextMultiOutput, + zSchemaFluxProKontextOutput, + zSchemaFluxProV11ReduxInput, + zSchemaFluxProV11ReduxOutput, + zSchemaFluxProV11UltraReduxInput, + zSchemaFluxProV11UltraReduxOutput, + zSchemaFluxProV1FillFinetunedInput, + zSchemaFluxProV1FillFinetunedOutput, + zSchemaFluxProV1FillInput, + zSchemaFluxProV1FillOutput, + zSchemaFluxPulidInput, + zSchemaFluxPulidOutput, + zSchemaFluxSchnellReduxInput, + zSchemaFluxSchnellReduxOutput, + zSchemaFluxSrpoImageToImageInput, + zSchemaFluxSrpoImageToImageOutput, + zSchemaFluxVisionUpscalerInput, + zSchemaFluxVisionUpscalerOutput, + zSchemaGemini25FlashImageEditInput, + zSchemaGemini25FlashImageEditOutput, + zSchemaGemini3ProImagePreviewEditInput, + zSchemaGemini3ProImagePreviewEditOutput, + zSchemaGeminiFlashEditInput, + zSchemaGeminiFlashEditMultiInput, + zSchemaGeminiFlashEditMultiOutput, + zSchemaGeminiFlashEditOutput, + zSchemaGhiblifyInput, + zSchemaGhiblifyOutput, + zSchemaGlmImageImageToImageInput, + zSchemaGlmImageImageToImageOutput, + zSchemaGptImage15EditInput, + zSchemaGptImage15EditOutput, + zSchemaGptImage1EditImageInput, + zSchemaGptImage1EditImageOutput, + zSchemaGptImage1MiniEditInput, + zSchemaGptImage1MiniEditOutput, + zSchemaHidreamE11Input, + zSchemaHidreamE11Output, + zSchemaHidreamI1FullImageToImageInput, + zSchemaHidreamI1FullImageToImageOutput, + zSchemaHunyuanWorldInput, + zSchemaHunyuanWorldOutput, + zSchemaIclightV2Input, + zSchemaIclightV2Output, + zSchemaIdeogramCharacterEditInput, + zSchemaIdeogramCharacterEditOutput, + zSchemaIdeogramCharacterInput, + zSchemaIdeogramCharacterOutput, + zSchemaIdeogramCharacterRemixInput, + zSchemaIdeogramCharacterRemixOutput, + zSchemaIdeogramUpscaleInput, + zSchemaIdeogramUpscaleOutput, + zSchemaIdeogramV2EditInput, + zSchemaIdeogramV2EditOutput, + zSchemaIdeogramV2RemixInput, + zSchemaIdeogramV2RemixOutput, + zSchemaIdeogramV2TurboEditInput, + zSchemaIdeogramV2TurboEditOutput, + zSchemaIdeogramV2TurboRemixInput, + zSchemaIdeogramV2TurboRemixOutput, + zSchemaIdeogramV2aRemixInput, + zSchemaIdeogramV2aRemixOutput, + zSchemaIdeogramV2aTurboRemixInput, + zSchemaIdeogramV2aTurboRemixOutput, + zSchemaIdeogramV3EditInput, + zSchemaIdeogramV3EditOutput, + zSchemaIdeogramV3ReframeInput, + zSchemaIdeogramV3ReframeOutput, + zSchemaIdeogramV3RemixInput, + zSchemaIdeogramV3RemixOutput, + zSchemaIdeogramV3ReplaceBackgroundInput, + zSchemaIdeogramV3ReplaceBackgroundOutput, + zSchemaImage2PixelInput, + zSchemaImage2PixelOutput, + zSchemaImage2SvgInput, + zSchemaImage2SvgOutput, + zSchemaImageAppsV2AgeModifyInput, + zSchemaImageAppsV2AgeModifyOutput, + zSchemaImageAppsV2CityTeleportInput, + zSchemaImageAppsV2CityTeleportOutput, + zSchemaImageAppsV2ExpressionChangeInput, + zSchemaImageAppsV2ExpressionChangeOutput, + zSchemaImageAppsV2HairChangeInput, + zSchemaImageAppsV2HairChangeOutput, + zSchemaImageAppsV2HeadshotPhotoInput, + zSchemaImageAppsV2HeadshotPhotoOutput, + zSchemaImageAppsV2MakeupApplicationInput, + zSchemaImageAppsV2MakeupApplicationOutput, + zSchemaImageAppsV2ObjectRemovalInput, + zSchemaImageAppsV2ObjectRemovalOutput, + zSchemaImageAppsV2OutpaintInput, + zSchemaImageAppsV2OutpaintOutput, + zSchemaImageAppsV2PerspectiveInput, + zSchemaImageAppsV2PerspectiveOutput, + zSchemaImageAppsV2PhotoRestorationInput, + zSchemaImageAppsV2PhotoRestorationOutput, + zSchemaImageAppsV2PhotographyEffectsInput, + zSchemaImageAppsV2PhotographyEffectsOutput, + zSchemaImageAppsV2PortraitEnhanceInput, + zSchemaImageAppsV2PortraitEnhanceOutput, + zSchemaImageAppsV2ProductHoldingInput, + zSchemaImageAppsV2ProductHoldingOutput, + zSchemaImageAppsV2ProductPhotographyInput, + zSchemaImageAppsV2ProductPhotographyOutput, + zSchemaImageAppsV2RelightingInput, + zSchemaImageAppsV2RelightingOutput, + zSchemaImageAppsV2StyleTransferInput, + zSchemaImageAppsV2StyleTransferOutput, + zSchemaImageAppsV2TextureTransformInput, + zSchemaImageAppsV2TextureTransformOutput, + zSchemaImageAppsV2VirtualTryOnInput, + zSchemaImageAppsV2VirtualTryOnOutput, + zSchemaImageEditingAgeProgressionInput, + zSchemaImageEditingAgeProgressionOutput, + zSchemaImageEditingBabyVersionInput, + zSchemaImageEditingBabyVersionOutput, + zSchemaImageEditingBackgroundChangeInput, + zSchemaImageEditingBackgroundChangeOutput, + zSchemaImageEditingBroccoliHaircutInput, + zSchemaImageEditingBroccoliHaircutOutput, + zSchemaImageEditingCartoonifyInput, + zSchemaImageEditingCartoonifyOutput, + zSchemaImageEditingColorCorrectionInput, + zSchemaImageEditingColorCorrectionOutput, + zSchemaImageEditingExpressionChangeInput, + zSchemaImageEditingExpressionChangeOutput, + zSchemaImageEditingFaceEnhancementInput, + zSchemaImageEditingFaceEnhancementOutput, + zSchemaImageEditingHairChangeInput, + zSchemaImageEditingHairChangeOutput, + zSchemaImageEditingObjectRemovalInput, + zSchemaImageEditingObjectRemovalOutput, + zSchemaImageEditingPhotoRestorationInput, + zSchemaImageEditingPhotoRestorationOutput, + zSchemaImageEditingPlushieStyleInput, + zSchemaImageEditingPlushieStyleOutput, + zSchemaImageEditingProfessionalPhotoInput, + zSchemaImageEditingProfessionalPhotoOutput, + zSchemaImageEditingRealismInput, + zSchemaImageEditingRealismOutput, + zSchemaImageEditingReframeInput, + zSchemaImageEditingReframeOutput, + zSchemaImageEditingRetouchInput, + zSchemaImageEditingRetouchOutput, + zSchemaImageEditingSceneCompositionInput, + zSchemaImageEditingSceneCompositionOutput, + zSchemaImageEditingStyleTransferInput, + zSchemaImageEditingStyleTransferOutput, + zSchemaImageEditingTextRemovalInput, + zSchemaImageEditingTextRemovalOutput, + zSchemaImageEditingTimeOfDayInput, + zSchemaImageEditingTimeOfDayOutput, + zSchemaImageEditingWeatherEffectInput, + zSchemaImageEditingWeatherEffectOutput, + zSchemaImageEditingWojakStyleInput, + zSchemaImageEditingWojakStyleOutput, + zSchemaImageEditingYoutubeThumbnailsInput, + zSchemaImageEditingYoutubeThumbnailsOutput, + zSchemaImagePreprocessorsDepthAnythingV2Input, + zSchemaImagePreprocessorsDepthAnythingV2Output, + zSchemaImagePreprocessorsHedInput, + zSchemaImagePreprocessorsHedOutput, + zSchemaImagePreprocessorsLineartInput, + zSchemaImagePreprocessorsLineartOutput, + zSchemaImagePreprocessorsMidasInput, + zSchemaImagePreprocessorsMidasOutput, + zSchemaImagePreprocessorsMlsdInput, + zSchemaImagePreprocessorsMlsdOutput, + zSchemaImagePreprocessorsPidiInput, + zSchemaImagePreprocessorsPidiOutput, + zSchemaImagePreprocessorsSamInput, + zSchemaImagePreprocessorsSamOutput, + zSchemaImagePreprocessorsScribbleInput, + zSchemaImagePreprocessorsScribbleOutput, + zSchemaImagePreprocessorsTeedInput, + zSchemaImagePreprocessorsTeedOutput, + zSchemaImagePreprocessorsZoeInput, + zSchemaImagePreprocessorsZoeOutput, + zSchemaImageutilsDepthInput, + zSchemaImageutilsDepthOutput, + zSchemaImageutilsMarigoldDepthInput, + zSchemaImageutilsMarigoldDepthOutput, + zSchemaImageutilsRembgInput, + zSchemaImageutilsRembgOutput, + zSchemaInpaintInput, + zSchemaInpaintOutput, + zSchemaInstantCharacterInput, + zSchemaInstantCharacterOutput, + zSchemaInvisibleWatermarkInput, + zSchemaInvisibleWatermarkOutput, + zSchemaIpAdapterFaceIdInput, + zSchemaIpAdapterFaceIdOutput, + zSchemaJuggernautFluxBaseImageToImageInput, + zSchemaJuggernautFluxBaseImageToImageOutput, + zSchemaJuggernautFluxLoraInpaintingInput, + zSchemaJuggernautFluxLoraInpaintingOutput, + zSchemaJuggernautFluxProImageToImageInput, + zSchemaJuggernautFluxProImageToImageOutput, + zSchemaKlingImageO1Input, + zSchemaKlingImageO1Output, + zSchemaKlingV15KolorsVirtualTryOnInput, + zSchemaKlingV15KolorsVirtualTryOnOutput, + zSchemaKolorsImageToImageInput, + zSchemaKolorsImageToImageOutput, + zSchemaLcmSd15I2iInput, + zSchemaLcmSd15I2iOutput, + zSchemaLeffaPoseTransferInput, + zSchemaLeffaPoseTransferOutput, + zSchemaLeffaVirtualTryonInput, + zSchemaLeffaVirtualTryonOutput, + zSchemaLivePortraitImageInput, + zSchemaLivePortraitImageOutput, + zSchemaLongcatImageEditInput, + zSchemaLongcatImageEditOutput, + zSchemaLoraImageToImageInput, + zSchemaLoraImageToImageOutput, + zSchemaLoraInpaintInput, + zSchemaLoraInpaintOutput, + zSchemaLucidfluxInput, + zSchemaLucidfluxOutput, + zSchemaLumaPhotonFlashModifyInput, + zSchemaLumaPhotonFlashModifyOutput, + zSchemaLumaPhotonFlashReframeInput, + zSchemaLumaPhotonFlashReframeOutput, + zSchemaLumaPhotonModifyInput, + zSchemaLumaPhotonModifyOutput, + zSchemaLumaPhotonReframeInput, + zSchemaLumaPhotonReframeOutput, + zSchemaMinimaxImage01SubjectReferenceInput, + zSchemaMinimaxImage01SubjectReferenceOutput, + zSchemaMixDehazeNetInput, + zSchemaMixDehazeNetOutput, + zSchemaMoondream3PreviewSegmentInput, + zSchemaMoondream3PreviewSegmentOutput, + zSchemaMoondreamNextDetectionInput, + zSchemaMoondreamNextDetectionOutput, + zSchemaNafnetDeblurInput, + zSchemaNafnetDeblurOutput, + zSchemaNafnetDenoiseInput, + zSchemaNafnetDenoiseOutput, + zSchemaNanoBananaEditInput, + zSchemaNanoBananaEditOutput, + zSchemaNanoBananaProEditInput, + zSchemaNanoBananaProEditOutput, + zSchemaNextstep1Input, + zSchemaNextstep1Output, + zSchemaObjectRemovalBboxInput, + zSchemaObjectRemovalBboxOutput, + zSchemaObjectRemovalInput, + zSchemaObjectRemovalMaskInput, + zSchemaObjectRemovalMaskOutput, + zSchemaObjectRemovalOutput, + zSchemaOmniZeroInput, + zSchemaOmniZeroOutput, + zSchemaPasdInput, + zSchemaPasdOutput, + zSchemaPhotomakerInput, + zSchemaPhotomakerOutput, + zSchemaPlaygroundV25ImageToImageInput, + zSchemaPlaygroundV25ImageToImageOutput, + zSchemaPlaygroundV25InpaintingInput, + zSchemaPlaygroundV25InpaintingOutput, + zSchemaPlushifyInput, + zSchemaPlushifyOutput, + zSchemaPostProcessingBlurInput, + zSchemaPostProcessingBlurOutput, + zSchemaPostProcessingChromaticAberrationInput, + zSchemaPostProcessingChromaticAberrationOutput, + zSchemaPostProcessingColorCorrectionInput, + zSchemaPostProcessingColorCorrectionOutput, + zSchemaPostProcessingColorTintInput, + zSchemaPostProcessingColorTintOutput, + zSchemaPostProcessingDesaturateInput, + zSchemaPostProcessingDesaturateOutput, + zSchemaPostProcessingDissolveInput, + zSchemaPostProcessingDissolveOutput, + zSchemaPostProcessingDodgeBurnInput, + zSchemaPostProcessingDodgeBurnOutput, + zSchemaPostProcessingGrainInput, + zSchemaPostProcessingGrainOutput, + zSchemaPostProcessingInput, + zSchemaPostProcessingOutput, + zSchemaPostProcessingParabolizeInput, + zSchemaPostProcessingParabolizeOutput, + zSchemaPostProcessingSharpenInput, + zSchemaPostProcessingSharpenOutput, + zSchemaPostProcessingSolarizeInput, + zSchemaPostProcessingSolarizeOutput, + zSchemaPostProcessingVignetteInput, + zSchemaPostProcessingVignetteOutput, + zSchemaPulidInput, + zSchemaPulidOutput, + zSchemaQwenImageEdit2509Input, + zSchemaQwenImageEdit2509LoraGalleryAddBackgroundInput, + zSchemaQwenImageEdit2509LoraGalleryAddBackgroundOutput, + zSchemaQwenImageEdit2509LoraGalleryFaceToFullPortraitInput, + zSchemaQwenImageEdit2509LoraGalleryFaceToFullPortraitOutput, + zSchemaQwenImageEdit2509LoraGalleryGroupPhotoInput, + zSchemaQwenImageEdit2509LoraGalleryGroupPhotoOutput, + zSchemaQwenImageEdit2509LoraGalleryIntegrateProductInput, + zSchemaQwenImageEdit2509LoraGalleryIntegrateProductOutput, + zSchemaQwenImageEdit2509LoraGalleryLightingRestorationInput, + zSchemaQwenImageEdit2509LoraGalleryLightingRestorationOutput, + zSchemaQwenImageEdit2509LoraGalleryMultipleAnglesInput, + zSchemaQwenImageEdit2509LoraGalleryMultipleAnglesOutput, + zSchemaQwenImageEdit2509LoraGalleryNextSceneInput, + zSchemaQwenImageEdit2509LoraGalleryNextSceneOutput, + zSchemaQwenImageEdit2509LoraGalleryRemoveElementInput, + zSchemaQwenImageEdit2509LoraGalleryRemoveElementOutput, + zSchemaQwenImageEdit2509LoraGalleryRemoveLightingInput, + zSchemaQwenImageEdit2509LoraGalleryRemoveLightingOutput, + zSchemaQwenImageEdit2509LoraGalleryShirtDesignInput, + zSchemaQwenImageEdit2509LoraGalleryShirtDesignOutput, + zSchemaQwenImageEdit2509LoraInput, + zSchemaQwenImageEdit2509LoraOutput, + zSchemaQwenImageEdit2509Output, + zSchemaQwenImageEdit2511Input, + zSchemaQwenImageEdit2511LoraInput, + zSchemaQwenImageEdit2511LoraOutput, + zSchemaQwenImageEdit2511MultipleAnglesInput, + zSchemaQwenImageEdit2511MultipleAnglesOutput, + zSchemaQwenImageEdit2511Output, + zSchemaQwenImageEditImageToImageInput, + zSchemaQwenImageEditImageToImageOutput, + zSchemaQwenImageEditInpaintInput, + zSchemaQwenImageEditInpaintOutput, + zSchemaQwenImageEditInput, + zSchemaQwenImageEditLoraInput, + zSchemaQwenImageEditLoraOutput, + zSchemaQwenImageEditOutput, + zSchemaQwenImageEditPlusInput, + zSchemaQwenImageEditPlusLoraGalleryAddBackgroundInput, + zSchemaQwenImageEditPlusLoraGalleryAddBackgroundOutput, + zSchemaQwenImageEditPlusLoraGalleryFaceToFullPortraitInput, + zSchemaQwenImageEditPlusLoraGalleryFaceToFullPortraitOutput, + zSchemaQwenImageEditPlusLoraGalleryGroupPhotoInput, + zSchemaQwenImageEditPlusLoraGalleryGroupPhotoOutput, + zSchemaQwenImageEditPlusLoraGalleryIntegrateProductInput, + zSchemaQwenImageEditPlusLoraGalleryIntegrateProductOutput, + zSchemaQwenImageEditPlusLoraGalleryLightingRestorationInput, + zSchemaQwenImageEditPlusLoraGalleryLightingRestorationOutput, + zSchemaQwenImageEditPlusLoraGalleryMultipleAnglesInput, + zSchemaQwenImageEditPlusLoraGalleryMultipleAnglesOutput, + zSchemaQwenImageEditPlusLoraGalleryNextSceneInput, + zSchemaQwenImageEditPlusLoraGalleryNextSceneOutput, + zSchemaQwenImageEditPlusLoraGalleryRemoveElementInput, + zSchemaQwenImageEditPlusLoraGalleryRemoveElementOutput, + zSchemaQwenImageEditPlusLoraGalleryRemoveLightingInput, + zSchemaQwenImageEditPlusLoraGalleryRemoveLightingOutput, + zSchemaQwenImageEditPlusLoraGalleryShirtDesignInput, + zSchemaQwenImageEditPlusLoraGalleryShirtDesignOutput, + zSchemaQwenImageEditPlusLoraInput, + zSchemaQwenImageEditPlusLoraOutput, + zSchemaQwenImageEditPlusOutput, + zSchemaQwenImageImageToImageInput, + zSchemaQwenImageImageToImageOutput, + zSchemaQwenImageLayeredInput, + zSchemaQwenImageLayeredLoraInput, + zSchemaQwenImageLayeredLoraOutput, + zSchemaQwenImageLayeredOutput, + zSchemaRecraftUpscaleCreativeInput, + zSchemaRecraftUpscaleCreativeOutput, + zSchemaRecraftUpscaleCrispInput, + zSchemaRecraftUpscaleCrispOutput, + zSchemaRecraftV3ImageToImageInput, + zSchemaRecraftV3ImageToImageOutput, + zSchemaRecraftVectorizeInput, + zSchemaRecraftVectorizeOutput, + zSchemaReimagine32Input, + zSchemaReimagine32Output, + zSchemaRembgEnhanceInput, + zSchemaRembgEnhanceOutput, + zSchemaReplaceBackgroundInput, + zSchemaReplaceBackgroundOutput, + zSchemaRetoucherInput, + zSchemaRetoucherOutput, + zSchemaReveEditInput, + zSchemaReveEditOutput, + zSchemaReveFastEditInput, + zSchemaReveFastEditOutput, + zSchemaReveFastRemixInput, + zSchemaReveFastRemixOutput, + zSchemaReveRemixInput, + zSchemaReveRemixOutput, + zSchemaRifeInput, + zSchemaRifeOutput, + zSchemaSam2AutoSegmentInput, + zSchemaSam2AutoSegmentOutput, + zSchemaSam2ImageInput, + zSchemaSam2ImageOutput, + zSchemaSam3ImageInput, + zSchemaSam3ImageOutput, + zSchemaSam3ImageRleInput, + zSchemaSam3ImageRleOutput, + zSchemaSd15DepthControlnetInput, + zSchemaSd15DepthControlnetOutput, + zSchemaSdxlControlnetUnionImageToImageInput, + zSchemaSdxlControlnetUnionImageToImageOutput, + zSchemaSdxlControlnetUnionInpaintingInput, + zSchemaSdxlControlnetUnionInpaintingOutput, + zSchemaSeedvrUpscaleImageInput, + zSchemaSeedvrUpscaleImageOutput, + zSchemaStableDiffusionV3MediumImageToImageInput, + zSchemaStableDiffusionV3MediumImageToImageOutput, + zSchemaStarVectorInput, + zSchemaStarVectorOutput, + zSchemaStep1xEditInput, + zSchemaStep1xEditOutput, + zSchemaStepxEdit2Input, + zSchemaStepxEdit2Output, + zSchemaSwin2SrInput, + zSchemaSwin2SrOutput, + zSchemaTheraInput, + zSchemaTheraOutput, + zSchemaTopazUpscaleImageInput, + zSchemaTopazUpscaleImageOutput, + zSchemaUnoInput, + zSchemaUnoOutput, + zSchemaUsoInput, + zSchemaUsoOutput, + zSchemaV26ImageToImageInput, + zSchemaV26ImageToImageOutput, + zSchemaViduQ2ReferenceToImageInput, + zSchemaViduQ2ReferenceToImageOutput, + zSchemaViduReferenceToImageInput, + zSchemaViduReferenceToImageOutput, + zSchemaWan25PreviewImageToImageInput, + zSchemaWan25PreviewImageToImageOutput, + zSchemaWanV22A14bImageToImageInput, + zSchemaWanV22A14bImageToImageOutput, + zSchemaZImageTurboControlnetInput, + zSchemaZImageTurboControlnetLoraInput, + zSchemaZImageTurboControlnetLoraOutput, + zSchemaZImageTurboControlnetOutput, + zSchemaZImageTurboImageToImageInput, + zSchemaZImageTurboImageToImageLoraInput, + zSchemaZImageTurboImageToImageLoraOutput, + zSchemaZImageTurboImageToImageOutput, + zSchemaZImageTurboInpaintInput, + zSchemaZImageTurboInpaintLoraInput, + zSchemaZImageTurboInpaintLoraOutput, + zSchemaZImageTurboInpaintOutput, +} from './zod.gen' + +import type { + SchemaAiBabyAndAgingGeneratorMultiInput, + SchemaAiBabyAndAgingGeneratorMultiOutput, + SchemaAiBabyAndAgingGeneratorSingleInput, + SchemaAiBabyAndAgingGeneratorSingleOutput, + SchemaAiFaceSwapFaceswapimageInput, + SchemaAiFaceSwapFaceswapimageOutput, + SchemaAiHomeEditInput, + SchemaAiHomeEditOutput, + SchemaAiHomeStyleInput, + SchemaAiHomeStyleOutput, + SchemaAuraSrInput, + SchemaAuraSrOutput, + SchemaBagelEditInput, + SchemaBagelEditOutput, + SchemaBenV2ImageInput, + SchemaBenV2ImageOutput, + SchemaBirefnetInput, + SchemaBirefnetOutput, + SchemaBirefnetV2Input, + SchemaBirefnetV2Output, + SchemaBriaBackgroundRemoveInput, + SchemaBriaBackgroundRemoveOutput, + SchemaBriaBackgroundReplaceInput, + SchemaBriaBackgroundReplaceOutput, + SchemaBriaEraserInput, + SchemaBriaEraserOutput, + SchemaBriaExpandInput, + SchemaBriaExpandOutput, + SchemaBriaGenfillInput, + SchemaBriaGenfillOutput, + SchemaBriaProductShotInput, + SchemaBriaProductShotOutput, + SchemaBriaReimagineInput, + SchemaBriaReimagineOutput, + SchemaBytedanceSeedreamV45EditInput, + SchemaBytedanceSeedreamV45EditOutput, + SchemaBytedanceSeedreamV4EditInput, + SchemaBytedanceSeedreamV4EditOutput, + SchemaCalligrapherInput, + SchemaCalligrapherOutput, + SchemaCartoonifyInput, + SchemaCartoonifyOutput, + SchemaCatVtonInput, + SchemaCatVtonOutput, + SchemaCcsrInput, + SchemaCcsrOutput, + SchemaChainOfZoomInput, + SchemaChainOfZoomOutput, + SchemaChronoEditInput, + SchemaChronoEditLoraGalleryPaintbrushInput, + SchemaChronoEditLoraGalleryPaintbrushOutput, + SchemaChronoEditLoraGalleryUpscalerInput, + SchemaChronoEditLoraGalleryUpscalerOutput, + SchemaChronoEditLoraInput, + SchemaChronoEditLoraOutput, + SchemaChronoEditOutput, + SchemaClarityUpscalerInput, + SchemaClarityUpscalerOutput, + SchemaCodeformerInput, + SchemaCodeformerOutput, + SchemaCreativeUpscalerInput, + SchemaCreativeUpscalerOutput, + SchemaCrystalUpscalerInput, + SchemaCrystalUpscalerOutput, + SchemaDdcolorInput, + SchemaDdcolorOutput, + SchemaDocresDewarpInput, + SchemaDocresDewarpOutput, + SchemaDocresInput, + SchemaDocresOutput, + SchemaDrctSuperResolutionInput, + SchemaDrctSuperResolutionOutput, + SchemaDreamomni2EditInput, + SchemaDreamomni2EditOutput, + SchemaDwposeInput, + SchemaDwposeOutput, + SchemaEmu35ImageEditImageInput, + SchemaEmu35ImageEditImageOutput, + SchemaEra3dInput, + SchemaEra3dOutput, + SchemaEsrganInput, + SchemaEsrganOutput, + SchemaEvfSamInput, + SchemaEvfSamOutput, + SchemaFaceToStickerInput, + SchemaFaceToStickerOutput, + SchemaFashnTryonV15Input, + SchemaFashnTryonV15Output, + SchemaFashnTryonV16Input, + SchemaFashnTryonV16Output, + SchemaFastLcmDiffusionImageToImageInput, + SchemaFastLcmDiffusionImageToImageOutput, + SchemaFastLcmDiffusionInpaintingInput, + SchemaFastLcmDiffusionInpaintingOutput, + SchemaFastLightningSdxlImageToImageInput, + SchemaFastLightningSdxlImageToImageOutput, + SchemaFastLightningSdxlInpaintingInput, + SchemaFastLightningSdxlInpaintingOutput, + SchemaFastSdxlControlnetCannyImageToImageInput, + SchemaFastSdxlControlnetCannyImageToImageOutput, + SchemaFastSdxlControlnetCannyInpaintingInput, + SchemaFastSdxlControlnetCannyInpaintingOutput, + SchemaFastSdxlImageToImageInput, + SchemaFastSdxlImageToImageOutput, + SchemaFastSdxlInpaintingInput, + SchemaFastSdxlInpaintingOutput, + SchemaFfmpegApiExtractFrameInput, + SchemaFfmpegApiExtractFrameOutput, + SchemaFiboEditAddObjectByTextInput, + SchemaFiboEditAddObjectByTextOutput, + SchemaFiboEditBlendInput, + SchemaFiboEditBlendOutput, + SchemaFiboEditColorizeInput, + SchemaFiboEditColorizeOutput, + SchemaFiboEditEditInput, + SchemaFiboEditEditOutput, + SchemaFiboEditEraseByTextInput, + SchemaFiboEditEraseByTextOutput, + SchemaFiboEditRelightInput, + SchemaFiboEditRelightOutput, + SchemaFiboEditReplaceObjectByTextInput, + SchemaFiboEditReplaceObjectByTextOutput, + SchemaFiboEditReseasonInput, + SchemaFiboEditReseasonOutput, + SchemaFiboEditRestoreInput, + SchemaFiboEditRestoreOutput, + SchemaFiboEditRestyleInput, + SchemaFiboEditRestyleOutput, + SchemaFiboEditRewriteTextInput, + SchemaFiboEditRewriteTextOutput, + SchemaFiboEditSketchToColoredImageInput, + SchemaFiboEditSketchToColoredImageOutput, + SchemaFilmInput, + SchemaFilmOutput, + SchemaFinegrainEraserBboxInput, + SchemaFinegrainEraserBboxOutput, + SchemaFinegrainEraserInput, + SchemaFinegrainEraserMaskInput, + SchemaFinegrainEraserMaskOutput, + SchemaFinegrainEraserOutput, + SchemaFlorence2LargeCaptionToPhraseGroundingInput, + SchemaFlorence2LargeCaptionToPhraseGroundingOutput, + SchemaFlorence2LargeDenseRegionCaptionInput, + SchemaFlorence2LargeDenseRegionCaptionOutput, + SchemaFlorence2LargeObjectDetectionInput, + SchemaFlorence2LargeObjectDetectionOutput, + SchemaFlorence2LargeOcrWithRegionInput, + SchemaFlorence2LargeOcrWithRegionOutput, + SchemaFlorence2LargeOpenVocabularyDetectionInput, + SchemaFlorence2LargeOpenVocabularyDetectionOutput, + SchemaFlorence2LargeReferringExpressionSegmentationInput, + SchemaFlorence2LargeReferringExpressionSegmentationOutput, + SchemaFlorence2LargeRegionProposalInput, + SchemaFlorence2LargeRegionProposalOutput, + SchemaFlorence2LargeRegionToSegmentationInput, + SchemaFlorence2LargeRegionToSegmentationOutput, + SchemaFloweditInput, + SchemaFloweditOutput, + SchemaFlux1DevImageToImageInput, + SchemaFlux1DevImageToImageOutput, + SchemaFlux1DevReduxInput, + SchemaFlux1DevReduxOutput, + SchemaFlux1KreaImageToImageInput, + SchemaFlux1KreaImageToImageOutput, + SchemaFlux1KreaReduxInput, + SchemaFlux1KreaReduxOutput, + SchemaFlux1SchnellReduxInput, + SchemaFlux1SchnellReduxOutput, + SchemaFlux1SrpoImageToImageInput, + SchemaFlux1SrpoImageToImageOutput, + SchemaFlux2EditInput, + SchemaFlux2EditOutput, + SchemaFlux2FlashEditInput, + SchemaFlux2FlashEditOutput, + SchemaFlux2FlexEditInput, + SchemaFlux2FlexEditOutput, + SchemaFlux2Klein4bBaseEditInput, + SchemaFlux2Klein4bBaseEditLoraInput, + SchemaFlux2Klein4bBaseEditLoraOutput, + SchemaFlux2Klein4bBaseEditOutput, + SchemaFlux2Klein4bEditInput, + SchemaFlux2Klein4bEditOutput, + SchemaFlux2Klein9bBaseEditInput, + SchemaFlux2Klein9bBaseEditLoraInput, + SchemaFlux2Klein9bBaseEditLoraOutput, + SchemaFlux2Klein9bBaseEditOutput, + SchemaFlux2Klein9bEditInput, + SchemaFlux2Klein9bEditOutput, + SchemaFlux2LoraEditInput, + SchemaFlux2LoraEditOutput, + SchemaFlux2LoraGalleryAddBackgroundInput, + SchemaFlux2LoraGalleryAddBackgroundOutput, + SchemaFlux2LoraGalleryApartmentStagingInput, + SchemaFlux2LoraGalleryApartmentStagingOutput, + SchemaFlux2LoraGalleryFaceToFullPortraitInput, + SchemaFlux2LoraGalleryFaceToFullPortraitOutput, + SchemaFlux2LoraGalleryMultipleAnglesInput, + SchemaFlux2LoraGalleryMultipleAnglesOutput, + SchemaFlux2LoraGalleryVirtualTryonInput, + SchemaFlux2LoraGalleryVirtualTryonOutput, + SchemaFlux2MaxEditInput, + SchemaFlux2MaxEditOutput, + SchemaFlux2ProEditInput, + SchemaFlux2ProEditOutput, + SchemaFlux2TurboEditInput, + SchemaFlux2TurboEditOutput, + SchemaFluxControlLoraCannyImageToImageInput, + SchemaFluxControlLoraCannyImageToImageOutput, + SchemaFluxControlLoraDepthImageToImageInput, + SchemaFluxControlLoraDepthImageToImageOutput, + SchemaFluxDevImageToImageInput, + SchemaFluxDevImageToImageOutput, + SchemaFluxDevReduxInput, + SchemaFluxDevReduxOutput, + SchemaFluxDifferentialDiffusionInput, + SchemaFluxDifferentialDiffusionOutput, + SchemaFluxGeneralDifferentialDiffusionInput, + SchemaFluxGeneralDifferentialDiffusionOutput, + SchemaFluxGeneralImageToImageInput, + SchemaFluxGeneralImageToImageOutput, + SchemaFluxGeneralInpaintingInput, + SchemaFluxGeneralInpaintingOutput, + SchemaFluxGeneralRfInversionInput, + SchemaFluxGeneralRfInversionOutput, + SchemaFluxKontextDevInput, + SchemaFluxKontextDevOutput, + SchemaFluxKontextLoraInpaintInput, + SchemaFluxKontextLoraInpaintOutput, + SchemaFluxKontextLoraInput, + SchemaFluxKontextLoraOutput, + SchemaFluxKreaImageToImageInput, + SchemaFluxKreaImageToImageOutput, + SchemaFluxKreaLoraImageToImageInput, + SchemaFluxKreaLoraImageToImageOutput, + SchemaFluxKreaLoraInpaintingInput, + SchemaFluxKreaLoraInpaintingOutput, + SchemaFluxKreaReduxInput, + SchemaFluxKreaReduxOutput, + SchemaFluxLoraCannyInput, + SchemaFluxLoraCannyOutput, + SchemaFluxLoraDepthInput, + SchemaFluxLoraDepthOutput, + SchemaFluxLoraFillInput, + SchemaFluxLoraFillOutput, + SchemaFluxLoraImageToImageInput, + SchemaFluxLoraImageToImageOutput, + SchemaFluxProKontextInput, + SchemaFluxProKontextMaxInput, + SchemaFluxProKontextMaxMultiInput, + SchemaFluxProKontextMaxMultiOutput, + SchemaFluxProKontextMaxOutput, + SchemaFluxProKontextMultiInput, + SchemaFluxProKontextMultiOutput, + SchemaFluxProKontextOutput, + SchemaFluxProV11ReduxInput, + SchemaFluxProV11ReduxOutput, + SchemaFluxProV11UltraReduxInput, + SchemaFluxProV11UltraReduxOutput, + SchemaFluxProV1FillFinetunedInput, + SchemaFluxProV1FillFinetunedOutput, + SchemaFluxProV1FillInput, + SchemaFluxProV1FillOutput, + SchemaFluxPulidInput, + SchemaFluxPulidOutput, + SchemaFluxSchnellReduxInput, + SchemaFluxSchnellReduxOutput, + SchemaFluxSrpoImageToImageInput, + SchemaFluxSrpoImageToImageOutput, + SchemaFluxVisionUpscalerInput, + SchemaFluxVisionUpscalerOutput, + SchemaGemini25FlashImageEditInput, + SchemaGemini25FlashImageEditOutput, + SchemaGemini3ProImagePreviewEditInput, + SchemaGemini3ProImagePreviewEditOutput, + SchemaGeminiFlashEditInput, + SchemaGeminiFlashEditMultiInput, + SchemaGeminiFlashEditMultiOutput, + SchemaGeminiFlashEditOutput, + SchemaGhiblifyInput, + SchemaGhiblifyOutput, + SchemaGlmImageImageToImageInput, + SchemaGlmImageImageToImageOutput, + SchemaGptImage15EditInput, + SchemaGptImage15EditOutput, + SchemaGptImage1EditImageInput, + SchemaGptImage1EditImageOutput, + SchemaGptImage1MiniEditInput, + SchemaGptImage1MiniEditOutput, + SchemaHidreamE11Input, + SchemaHidreamE11Output, + SchemaHidreamI1FullImageToImageInput, + SchemaHidreamI1FullImageToImageOutput, + SchemaHunyuanWorldInput, + SchemaHunyuanWorldOutput, + SchemaIclightV2Input, + SchemaIclightV2Output, + SchemaIdeogramCharacterEditInput, + SchemaIdeogramCharacterEditOutput, + SchemaIdeogramCharacterInput, + SchemaIdeogramCharacterOutput, + SchemaIdeogramCharacterRemixInput, + SchemaIdeogramCharacterRemixOutput, + SchemaIdeogramUpscaleInput, + SchemaIdeogramUpscaleOutput, + SchemaIdeogramV2EditInput, + SchemaIdeogramV2EditOutput, + SchemaIdeogramV2RemixInput, + SchemaIdeogramV2RemixOutput, + SchemaIdeogramV2TurboEditInput, + SchemaIdeogramV2TurboEditOutput, + SchemaIdeogramV2TurboRemixInput, + SchemaIdeogramV2TurboRemixOutput, + SchemaIdeogramV2aRemixInput, + SchemaIdeogramV2aRemixOutput, + SchemaIdeogramV2aTurboRemixInput, + SchemaIdeogramV2aTurboRemixOutput, + SchemaIdeogramV3EditInput, + SchemaIdeogramV3EditOutput, + SchemaIdeogramV3ReframeInput, + SchemaIdeogramV3ReframeOutput, + SchemaIdeogramV3RemixInput, + SchemaIdeogramV3RemixOutput, + SchemaIdeogramV3ReplaceBackgroundInput, + SchemaIdeogramV3ReplaceBackgroundOutput, + SchemaImage2PixelInput, + SchemaImage2PixelOutput, + SchemaImage2SvgInput, + SchemaImage2SvgOutput, + SchemaImageAppsV2AgeModifyInput, + SchemaImageAppsV2AgeModifyOutput, + SchemaImageAppsV2CityTeleportInput, + SchemaImageAppsV2CityTeleportOutput, + SchemaImageAppsV2ExpressionChangeInput, + SchemaImageAppsV2ExpressionChangeOutput, + SchemaImageAppsV2HairChangeInput, + SchemaImageAppsV2HairChangeOutput, + SchemaImageAppsV2HeadshotPhotoInput, + SchemaImageAppsV2HeadshotPhotoOutput, + SchemaImageAppsV2MakeupApplicationInput, + SchemaImageAppsV2MakeupApplicationOutput, + SchemaImageAppsV2ObjectRemovalInput, + SchemaImageAppsV2ObjectRemovalOutput, + SchemaImageAppsV2OutpaintInput, + SchemaImageAppsV2OutpaintOutput, + SchemaImageAppsV2PerspectiveInput, + SchemaImageAppsV2PerspectiveOutput, + SchemaImageAppsV2PhotoRestorationInput, + SchemaImageAppsV2PhotoRestorationOutput, + SchemaImageAppsV2PhotographyEffectsInput, + SchemaImageAppsV2PhotographyEffectsOutput, + SchemaImageAppsV2PortraitEnhanceInput, + SchemaImageAppsV2PortraitEnhanceOutput, + SchemaImageAppsV2ProductHoldingInput, + SchemaImageAppsV2ProductHoldingOutput, + SchemaImageAppsV2ProductPhotographyInput, + SchemaImageAppsV2ProductPhotographyOutput, + SchemaImageAppsV2RelightingInput, + SchemaImageAppsV2RelightingOutput, + SchemaImageAppsV2StyleTransferInput, + SchemaImageAppsV2StyleTransferOutput, + SchemaImageAppsV2TextureTransformInput, + SchemaImageAppsV2TextureTransformOutput, + SchemaImageAppsV2VirtualTryOnInput, + SchemaImageAppsV2VirtualTryOnOutput, + SchemaImageEditingAgeProgressionInput, + SchemaImageEditingAgeProgressionOutput, + SchemaImageEditingBabyVersionInput, + SchemaImageEditingBabyVersionOutput, + SchemaImageEditingBackgroundChangeInput, + SchemaImageEditingBackgroundChangeOutput, + SchemaImageEditingBroccoliHaircutInput, + SchemaImageEditingBroccoliHaircutOutput, + SchemaImageEditingCartoonifyInput, + SchemaImageEditingCartoonifyOutput, + SchemaImageEditingColorCorrectionInput, + SchemaImageEditingColorCorrectionOutput, + SchemaImageEditingExpressionChangeInput, + SchemaImageEditingExpressionChangeOutput, + SchemaImageEditingFaceEnhancementInput, + SchemaImageEditingFaceEnhancementOutput, + SchemaImageEditingHairChangeInput, + SchemaImageEditingHairChangeOutput, + SchemaImageEditingObjectRemovalInput, + SchemaImageEditingObjectRemovalOutput, + SchemaImageEditingPhotoRestorationInput, + SchemaImageEditingPhotoRestorationOutput, + SchemaImageEditingPlushieStyleInput, + SchemaImageEditingPlushieStyleOutput, + SchemaImageEditingProfessionalPhotoInput, + SchemaImageEditingProfessionalPhotoOutput, + SchemaImageEditingRealismInput, + SchemaImageEditingRealismOutput, + SchemaImageEditingReframeInput, + SchemaImageEditingReframeOutput, + SchemaImageEditingRetouchInput, + SchemaImageEditingRetouchOutput, + SchemaImageEditingSceneCompositionInput, + SchemaImageEditingSceneCompositionOutput, + SchemaImageEditingStyleTransferInput, + SchemaImageEditingStyleTransferOutput, + SchemaImageEditingTextRemovalInput, + SchemaImageEditingTextRemovalOutput, + SchemaImageEditingTimeOfDayInput, + SchemaImageEditingTimeOfDayOutput, + SchemaImageEditingWeatherEffectInput, + SchemaImageEditingWeatherEffectOutput, + SchemaImageEditingWojakStyleInput, + SchemaImageEditingWojakStyleOutput, + SchemaImageEditingYoutubeThumbnailsInput, + SchemaImageEditingYoutubeThumbnailsOutput, + SchemaImagePreprocessorsDepthAnythingV2Input, + SchemaImagePreprocessorsDepthAnythingV2Output, + SchemaImagePreprocessorsHedInput, + SchemaImagePreprocessorsHedOutput, + SchemaImagePreprocessorsLineartInput, + SchemaImagePreprocessorsLineartOutput, + SchemaImagePreprocessorsMidasInput, + SchemaImagePreprocessorsMidasOutput, + SchemaImagePreprocessorsMlsdInput, + SchemaImagePreprocessorsMlsdOutput, + SchemaImagePreprocessorsPidiInput, + SchemaImagePreprocessorsPidiOutput, + SchemaImagePreprocessorsSamInput, + SchemaImagePreprocessorsSamOutput, + SchemaImagePreprocessorsScribbleInput, + SchemaImagePreprocessorsScribbleOutput, + SchemaImagePreprocessorsTeedInput, + SchemaImagePreprocessorsTeedOutput, + SchemaImagePreprocessorsZoeInput, + SchemaImagePreprocessorsZoeOutput, + SchemaImageutilsDepthInput, + SchemaImageutilsDepthOutput, + SchemaImageutilsMarigoldDepthInput, + SchemaImageutilsMarigoldDepthOutput, + SchemaImageutilsRembgInput, + SchemaImageutilsRembgOutput, + SchemaInpaintInput, + SchemaInpaintOutput, + SchemaInstantCharacterInput, + SchemaInstantCharacterOutput, + SchemaInvisibleWatermarkInput, + SchemaInvisibleWatermarkOutput, + SchemaIpAdapterFaceIdInput, + SchemaIpAdapterFaceIdOutput, + SchemaJuggernautFluxBaseImageToImageInput, + SchemaJuggernautFluxBaseImageToImageOutput, + SchemaJuggernautFluxLoraInpaintingInput, + SchemaJuggernautFluxLoraInpaintingOutput, + SchemaJuggernautFluxProImageToImageInput, + SchemaJuggernautFluxProImageToImageOutput, + SchemaKlingImageO1Input, + SchemaKlingImageO1Output, + SchemaKlingV15KolorsVirtualTryOnInput, + SchemaKlingV15KolorsVirtualTryOnOutput, + SchemaKolorsImageToImageInput, + SchemaKolorsImageToImageOutput, + SchemaLcmSd15I2iInput, + SchemaLcmSd15I2iOutput, + SchemaLeffaPoseTransferInput, + SchemaLeffaPoseTransferOutput, + SchemaLeffaVirtualTryonInput, + SchemaLeffaVirtualTryonOutput, + SchemaLivePortraitImageInput, + SchemaLivePortraitImageOutput, + SchemaLongcatImageEditInput, + SchemaLongcatImageEditOutput, + SchemaLoraImageToImageInput, + SchemaLoraImageToImageOutput, + SchemaLoraInpaintInput, + SchemaLoraInpaintOutput, + SchemaLucidfluxInput, + SchemaLucidfluxOutput, + SchemaLumaPhotonFlashModifyInput, + SchemaLumaPhotonFlashModifyOutput, + SchemaLumaPhotonFlashReframeInput, + SchemaLumaPhotonFlashReframeOutput, + SchemaLumaPhotonModifyInput, + SchemaLumaPhotonModifyOutput, + SchemaLumaPhotonReframeInput, + SchemaLumaPhotonReframeOutput, + SchemaMinimaxImage01SubjectReferenceInput, + SchemaMinimaxImage01SubjectReferenceOutput, + SchemaMixDehazeNetInput, + SchemaMixDehazeNetOutput, + SchemaMoondream3PreviewSegmentInput, + SchemaMoondream3PreviewSegmentOutput, + SchemaMoondreamNextDetectionInput, + SchemaMoondreamNextDetectionOutput, + SchemaNafnetDeblurInput, + SchemaNafnetDeblurOutput, + SchemaNafnetDenoiseInput, + SchemaNafnetDenoiseOutput, + SchemaNanoBananaEditInput, + SchemaNanoBananaEditOutput, + SchemaNanoBananaProEditInput, + SchemaNanoBananaProEditOutput, + SchemaNextstep1Input, + SchemaNextstep1Output, + SchemaObjectRemovalBboxInput, + SchemaObjectRemovalBboxOutput, + SchemaObjectRemovalInput, + SchemaObjectRemovalMaskInput, + SchemaObjectRemovalMaskOutput, + SchemaObjectRemovalOutput, + SchemaOmniZeroInput, + SchemaOmniZeroOutput, + SchemaPasdInput, + SchemaPasdOutput, + SchemaPhotomakerInput, + SchemaPhotomakerOutput, + SchemaPlaygroundV25ImageToImageInput, + SchemaPlaygroundV25ImageToImageOutput, + SchemaPlaygroundV25InpaintingInput, + SchemaPlaygroundV25InpaintingOutput, + SchemaPlushifyInput, + SchemaPlushifyOutput, + SchemaPostProcessingBlurInput, + SchemaPostProcessingBlurOutput, + SchemaPostProcessingChromaticAberrationInput, + SchemaPostProcessingChromaticAberrationOutput, + SchemaPostProcessingColorCorrectionInput, + SchemaPostProcessingColorCorrectionOutput, + SchemaPostProcessingColorTintInput, + SchemaPostProcessingColorTintOutput, + SchemaPostProcessingDesaturateInput, + SchemaPostProcessingDesaturateOutput, + SchemaPostProcessingDissolveInput, + SchemaPostProcessingDissolveOutput, + SchemaPostProcessingDodgeBurnInput, + SchemaPostProcessingDodgeBurnOutput, + SchemaPostProcessingGrainInput, + SchemaPostProcessingGrainOutput, + SchemaPostProcessingInput, + SchemaPostProcessingOutput, + SchemaPostProcessingParabolizeInput, + SchemaPostProcessingParabolizeOutput, + SchemaPostProcessingSharpenInput, + SchemaPostProcessingSharpenOutput, + SchemaPostProcessingSolarizeInput, + SchemaPostProcessingSolarizeOutput, + SchemaPostProcessingVignetteInput, + SchemaPostProcessingVignetteOutput, + SchemaPulidInput, + SchemaPulidOutput, + SchemaQwenImageEdit2509Input, + SchemaQwenImageEdit2509LoraGalleryAddBackgroundInput, + SchemaQwenImageEdit2509LoraGalleryAddBackgroundOutput, + SchemaQwenImageEdit2509LoraGalleryFaceToFullPortraitInput, + SchemaQwenImageEdit2509LoraGalleryFaceToFullPortraitOutput, + SchemaQwenImageEdit2509LoraGalleryGroupPhotoInput, + SchemaQwenImageEdit2509LoraGalleryGroupPhotoOutput, + SchemaQwenImageEdit2509LoraGalleryIntegrateProductInput, + SchemaQwenImageEdit2509LoraGalleryIntegrateProductOutput, + SchemaQwenImageEdit2509LoraGalleryLightingRestorationInput, + SchemaQwenImageEdit2509LoraGalleryLightingRestorationOutput, + SchemaQwenImageEdit2509LoraGalleryMultipleAnglesInput, + SchemaQwenImageEdit2509LoraGalleryMultipleAnglesOutput, + SchemaQwenImageEdit2509LoraGalleryNextSceneInput, + SchemaQwenImageEdit2509LoraGalleryNextSceneOutput, + SchemaQwenImageEdit2509LoraGalleryRemoveElementInput, + SchemaQwenImageEdit2509LoraGalleryRemoveElementOutput, + SchemaQwenImageEdit2509LoraGalleryRemoveLightingInput, + SchemaQwenImageEdit2509LoraGalleryRemoveLightingOutput, + SchemaQwenImageEdit2509LoraGalleryShirtDesignInput, + SchemaQwenImageEdit2509LoraGalleryShirtDesignOutput, + SchemaQwenImageEdit2509LoraInput, + SchemaQwenImageEdit2509LoraOutput, + SchemaQwenImageEdit2509Output, + SchemaQwenImageEdit2511Input, + SchemaQwenImageEdit2511LoraInput, + SchemaQwenImageEdit2511LoraOutput, + SchemaQwenImageEdit2511MultipleAnglesInput, + SchemaQwenImageEdit2511MultipleAnglesOutput, + SchemaQwenImageEdit2511Output, + SchemaQwenImageEditImageToImageInput, + SchemaQwenImageEditImageToImageOutput, + SchemaQwenImageEditInpaintInput, + SchemaQwenImageEditInpaintOutput, + SchemaQwenImageEditInput, + SchemaQwenImageEditLoraInput, + SchemaQwenImageEditLoraOutput, + SchemaQwenImageEditOutput, + SchemaQwenImageEditPlusInput, + SchemaQwenImageEditPlusLoraGalleryAddBackgroundInput, + SchemaQwenImageEditPlusLoraGalleryAddBackgroundOutput, + SchemaQwenImageEditPlusLoraGalleryFaceToFullPortraitInput, + SchemaQwenImageEditPlusLoraGalleryFaceToFullPortraitOutput, + SchemaQwenImageEditPlusLoraGalleryGroupPhotoInput, + SchemaQwenImageEditPlusLoraGalleryGroupPhotoOutput, + SchemaQwenImageEditPlusLoraGalleryIntegrateProductInput, + SchemaQwenImageEditPlusLoraGalleryIntegrateProductOutput, + SchemaQwenImageEditPlusLoraGalleryLightingRestorationInput, + SchemaQwenImageEditPlusLoraGalleryLightingRestorationOutput, + SchemaQwenImageEditPlusLoraGalleryMultipleAnglesInput, + SchemaQwenImageEditPlusLoraGalleryMultipleAnglesOutput, + SchemaQwenImageEditPlusLoraGalleryNextSceneInput, + SchemaQwenImageEditPlusLoraGalleryNextSceneOutput, + SchemaQwenImageEditPlusLoraGalleryRemoveElementInput, + SchemaQwenImageEditPlusLoraGalleryRemoveElementOutput, + SchemaQwenImageEditPlusLoraGalleryRemoveLightingInput, + SchemaQwenImageEditPlusLoraGalleryRemoveLightingOutput, + SchemaQwenImageEditPlusLoraGalleryShirtDesignInput, + SchemaQwenImageEditPlusLoraGalleryShirtDesignOutput, + SchemaQwenImageEditPlusLoraInput, + SchemaQwenImageEditPlusLoraOutput, + SchemaQwenImageEditPlusOutput, + SchemaQwenImageImageToImageInput, + SchemaQwenImageImageToImageOutput, + SchemaQwenImageLayeredInput, + SchemaQwenImageLayeredLoraInput, + SchemaQwenImageLayeredLoraOutput, + SchemaQwenImageLayeredOutput, + SchemaRecraftUpscaleCreativeInput, + SchemaRecraftUpscaleCreativeOutput, + SchemaRecraftUpscaleCrispInput, + SchemaRecraftUpscaleCrispOutput, + SchemaRecraftV3ImageToImageInput, + SchemaRecraftV3ImageToImageOutput, + SchemaRecraftVectorizeInput, + SchemaRecraftVectorizeOutput, + SchemaReimagine32Input, + SchemaReimagine32Output, + SchemaRembgEnhanceInput, + SchemaRembgEnhanceOutput, + SchemaReplaceBackgroundInput, + SchemaReplaceBackgroundOutput, + SchemaRetoucherInput, + SchemaRetoucherOutput, + SchemaReveEditInput, + SchemaReveEditOutput, + SchemaReveFastEditInput, + SchemaReveFastEditOutput, + SchemaReveFastRemixInput, + SchemaReveFastRemixOutput, + SchemaReveRemixInput, + SchemaReveRemixOutput, + SchemaRifeInput, + SchemaRifeOutput, + SchemaSam2AutoSegmentInput, + SchemaSam2AutoSegmentOutput, + SchemaSam2ImageInput, + SchemaSam2ImageOutput, + SchemaSam3ImageInput, + SchemaSam3ImageOutput, + SchemaSam3ImageRleInput, + SchemaSam3ImageRleOutput, + SchemaSd15DepthControlnetInput, + SchemaSd15DepthControlnetOutput, + SchemaSdxlControlnetUnionImageToImageInput, + SchemaSdxlControlnetUnionImageToImageOutput, + SchemaSdxlControlnetUnionInpaintingInput, + SchemaSdxlControlnetUnionInpaintingOutput, + SchemaSeedvrUpscaleImageInput, + SchemaSeedvrUpscaleImageOutput, + SchemaStableDiffusionV3MediumImageToImageInput, + SchemaStableDiffusionV3MediumImageToImageOutput, + SchemaStarVectorInput, + SchemaStarVectorOutput, + SchemaStep1xEditInput, + SchemaStep1xEditOutput, + SchemaStepxEdit2Input, + SchemaStepxEdit2Output, + SchemaSwin2SrInput, + SchemaSwin2SrOutput, + SchemaTheraInput, + SchemaTheraOutput, + SchemaTopazUpscaleImageInput, + SchemaTopazUpscaleImageOutput, + SchemaUnoInput, + SchemaUnoOutput, + SchemaUsoInput, + SchemaUsoOutput, + SchemaV26ImageToImageInput, + SchemaV26ImageToImageOutput, + SchemaViduQ2ReferenceToImageInput, + SchemaViduQ2ReferenceToImageOutput, + SchemaViduReferenceToImageInput, + SchemaViduReferenceToImageOutput, + SchemaWan25PreviewImageToImageInput, + SchemaWan25PreviewImageToImageOutput, + SchemaWanV22A14bImageToImageInput, + SchemaWanV22A14bImageToImageOutput, + SchemaZImageTurboControlnetInput, + SchemaZImageTurboControlnetLoraInput, + SchemaZImageTurboControlnetLoraOutput, + SchemaZImageTurboControlnetOutput, + SchemaZImageTurboImageToImageInput, + SchemaZImageTurboImageToImageLoraInput, + SchemaZImageTurboImageToImageLoraOutput, + SchemaZImageTurboImageToImageOutput, + SchemaZImageTurboInpaintInput, + SchemaZImageTurboInpaintLoraInput, + SchemaZImageTurboInpaintLoraOutput, + SchemaZImageTurboInpaintOutput, +} from './types.gen' + +import type { z } from 'zod' + +export type ImageToImageEndpointMap = { + 'fal-ai/flux-pro/kontext': { + input: SchemaFluxProKontextInput + output: SchemaFluxProKontextOutput + } + 'fal-ai/flux-2/lora/edit': { + input: SchemaFlux2LoraEditInput + output: SchemaFlux2LoraEditOutput + } + 'fal-ai/flux-2/edit': { + input: SchemaFlux2EditInput + output: SchemaFlux2EditOutput + } + 'fal-ai/flux-2-pro/edit': { + input: SchemaFlux2ProEditInput + output: SchemaFlux2ProEditOutput + } + 'fal-ai/flux/dev/image-to-image': { + input: SchemaFluxDevImageToImageInput + output: SchemaFluxDevImageToImageOutput + } + 'fal-ai/aura-sr': { + input: SchemaAuraSrInput + output: SchemaAuraSrOutput + } + 'fal-ai/clarity-upscaler': { + input: SchemaClarityUpscalerInput + output: SchemaClarityUpscalerOutput + } + 'bria/replace-background': { + input: SchemaReplaceBackgroundInput + output: SchemaReplaceBackgroundOutput + } + 'half-moon-ai/ai-face-swap/faceswapimage': { + input: SchemaAiFaceSwapFaceswapimageInput + output: SchemaAiFaceSwapFaceswapimageOutput + } + 'bria/fibo-edit/replace_object_by_text': { + input: SchemaFiboEditReplaceObjectByTextInput + output: SchemaFiboEditReplaceObjectByTextOutput + } + 'bria/fibo-edit/sketch_to_colored_image': { + input: SchemaFiboEditSketchToColoredImageInput + output: SchemaFiboEditSketchToColoredImageOutput + } + 'bria/fibo-edit/restore': { + input: SchemaFiboEditRestoreInput + output: SchemaFiboEditRestoreOutput + } + 'bria/fibo-edit/reseason': { + input: SchemaFiboEditReseasonInput + output: SchemaFiboEditReseasonOutput + } + 'bria/fibo-edit/relight': { + input: SchemaFiboEditRelightInput + output: SchemaFiboEditRelightOutput + } + 'bria/fibo-edit/restyle': { + input: SchemaFiboEditRestyleInput + output: SchemaFiboEditRestyleOutput + } + 'bria/fibo-edit/rewrite_text': { + input: SchemaFiboEditRewriteTextInput + output: SchemaFiboEditRewriteTextOutput + } + 'bria/fibo-edit/erase_by_text': { + input: SchemaFiboEditEraseByTextInput + output: SchemaFiboEditEraseByTextOutput + } + 'bria/fibo-edit/edit': { + input: SchemaFiboEditEditInput + output: SchemaFiboEditEditOutput + } + 'bria/fibo-edit/add_object_by_text': { + input: SchemaFiboEditAddObjectByTextInput + output: SchemaFiboEditAddObjectByTextOutput + } + 'bria/fibo-edit/blend': { + input: SchemaFiboEditBlendInput + output: SchemaFiboEditBlendOutput + } + 'bria/fibo-edit/colorize': { + input: SchemaFiboEditColorizeInput + output: SchemaFiboEditColorizeOutput + } + 'fal-ai/flux-2/klein/9b/base/edit/lora': { + input: SchemaFlux2Klein9bBaseEditLoraInput + output: SchemaFlux2Klein9bBaseEditLoraOutput + } + 'fal-ai/flux-2/klein/4b/base/edit/lora': { + input: SchemaFlux2Klein4bBaseEditLoraInput + output: SchemaFlux2Klein4bBaseEditLoraOutput + } + 'fal-ai/flux-2/klein/4b/base/edit': { + input: SchemaFlux2Klein4bBaseEditInput + output: SchemaFlux2Klein4bBaseEditOutput + } + 'fal-ai/flux-2/klein/9b/base/edit': { + input: SchemaFlux2Klein9bBaseEditInput + output: SchemaFlux2Klein9bBaseEditOutput + } + 'fal-ai/flux-2/klein/4b/edit': { + input: SchemaFlux2Klein4bEditInput + output: SchemaFlux2Klein4bEditOutput + } + 'fal-ai/flux-2/klein/9b/edit': { + input: SchemaFlux2Klein9bEditInput + output: SchemaFlux2Klein9bEditOutput + } + 'fal-ai/glm-image/image-to-image': { + input: SchemaGlmImageImageToImageInput + output: SchemaGlmImageImageToImageOutput + } + 'fal-ai/qwen-image-edit-2511-multiple-angles': { + input: SchemaQwenImageEdit2511MultipleAnglesInput + output: SchemaQwenImageEdit2511MultipleAnglesOutput + } + 'fal-ai/qwen-image-edit-2511/lora': { + input: SchemaQwenImageEdit2511LoraInput + output: SchemaQwenImageEdit2511LoraOutput + } + 'half-moon-ai/ai-home/style': { + input: SchemaAiHomeStyleInput + output: SchemaAiHomeStyleOutput + } + 'half-moon-ai/ai-home/edit': { + input: SchemaAiHomeEditInput + output: SchemaAiHomeEditOutput + } + 'fal-ai/qwen-image-layered/lora': { + input: SchemaQwenImageLayeredLoraInput + output: SchemaQwenImageLayeredLoraOutput + } + 'wan/v2.6/image-to-image': { + input: SchemaV26ImageToImageInput + output: SchemaV26ImageToImageOutput + } + 'fal-ai/qwen-image-edit-2511': { + input: SchemaQwenImageEdit2511Input + output: SchemaQwenImageEdit2511Output + } + 'fal-ai/qwen-image-layered': { + input: SchemaQwenImageLayeredInput + output: SchemaQwenImageLayeredOutput + } + 'fal-ai/z-image/turbo/inpaint/lora': { + input: SchemaZImageTurboInpaintLoraInput + output: SchemaZImageTurboInpaintLoraOutput + } + 'fal-ai/z-image/turbo/inpaint': { + input: SchemaZImageTurboInpaintInput + output: SchemaZImageTurboInpaintOutput + } + 'fal-ai/flux-2/flash/edit': { + input: SchemaFlux2FlashEditInput + output: SchemaFlux2FlashEditOutput + } + 'fal-ai/gpt-image-1.5/edit': { + input: SchemaGptImage15EditInput + output: SchemaGptImage15EditOutput + } + 'fal-ai/flux-2/turbo/edit': { + input: SchemaFlux2TurboEditInput + output: SchemaFlux2TurboEditOutput + } + 'fal-ai/flux-2-max/edit': { + input: SchemaFlux2MaxEditInput + output: SchemaFlux2MaxEditOutput + } + 'half-moon-ai/ai-baby-and-aging-generator/multi': { + input: SchemaAiBabyAndAgingGeneratorMultiInput + output: SchemaAiBabyAndAgingGeneratorMultiOutput + } + 'half-moon-ai/ai-baby-and-aging-generator/single': { + input: SchemaAiBabyAndAgingGeneratorSingleInput + output: SchemaAiBabyAndAgingGeneratorSingleOutput + } + 'fal-ai/qwen-image-edit-2509-lora-gallery/shirt-design': { + input: SchemaQwenImageEdit2509LoraGalleryShirtDesignInput + output: SchemaQwenImageEdit2509LoraGalleryShirtDesignOutput + } + 'fal-ai/qwen-image-edit-2509-lora-gallery/remove-lighting': { + input: SchemaQwenImageEdit2509LoraGalleryRemoveLightingInput + output: SchemaQwenImageEdit2509LoraGalleryRemoveLightingOutput + } + 'fal-ai/qwen-image-edit-2509-lora-gallery/remove-element': { + input: SchemaQwenImageEdit2509LoraGalleryRemoveElementInput + output: SchemaQwenImageEdit2509LoraGalleryRemoveElementOutput + } + 'fal-ai/qwen-image-edit-2509-lora-gallery/lighting-restoration': { + input: SchemaQwenImageEdit2509LoraGalleryLightingRestorationInput + output: SchemaQwenImageEdit2509LoraGalleryLightingRestorationOutput + } + 'fal-ai/qwen-image-edit-2509-lora-gallery/integrate-product': { + input: SchemaQwenImageEdit2509LoraGalleryIntegrateProductInput + output: SchemaQwenImageEdit2509LoraGalleryIntegrateProductOutput + } + 'fal-ai/qwen-image-edit-2509-lora-gallery/group-photo': { + input: SchemaQwenImageEdit2509LoraGalleryGroupPhotoInput + output: SchemaQwenImageEdit2509LoraGalleryGroupPhotoOutput + } + 'fal-ai/qwen-image-edit-2509-lora-gallery/face-to-full-portrait': { + input: SchemaQwenImageEdit2509LoraGalleryFaceToFullPortraitInput + output: SchemaQwenImageEdit2509LoraGalleryFaceToFullPortraitOutput + } + 'fal-ai/qwen-image-edit-2509-lora-gallery/add-background': { + input: SchemaQwenImageEdit2509LoraGalleryAddBackgroundInput + output: SchemaQwenImageEdit2509LoraGalleryAddBackgroundOutput + } + 'fal-ai/qwen-image-edit-2509-lora-gallery/next-scene': { + input: SchemaQwenImageEdit2509LoraGalleryNextSceneInput + output: SchemaQwenImageEdit2509LoraGalleryNextSceneOutput + } + 'fal-ai/qwen-image-edit-2509-lora-gallery/multiple-angles': { + input: SchemaQwenImageEdit2509LoraGalleryMultipleAnglesInput + output: SchemaQwenImageEdit2509LoraGalleryMultipleAnglesOutput + } + 'fal-ai/qwen-image-edit-2509-lora': { + input: SchemaQwenImageEdit2509LoraInput + output: SchemaQwenImageEdit2509LoraOutput + } + 'fal-ai/qwen-image-edit-2509': { + input: SchemaQwenImageEdit2509Input + output: SchemaQwenImageEdit2509Output + } + 'fal-ai/qwen-image-edit-plus-lora-gallery/lighting-restoration': { + input: SchemaQwenImageEditPlusLoraGalleryLightingRestorationInput + output: SchemaQwenImageEditPlusLoraGalleryLightingRestorationOutput + } + 'fal-ai/moondream3-preview/segment': { + input: SchemaMoondream3PreviewSegmentInput + output: SchemaMoondream3PreviewSegmentOutput + } + 'fal-ai/stepx-edit2': { + input: SchemaStepxEdit2Input + output: SchemaStepxEdit2Output + } + 'fal-ai/z-image/turbo/controlnet/lora': { + input: SchemaZImageTurboControlnetLoraInput + output: SchemaZImageTurboControlnetLoraOutput + } + 'fal-ai/z-image/turbo/controlnet': { + input: SchemaZImageTurboControlnetInput + output: SchemaZImageTurboControlnetOutput + } + 'fal-ai/z-image/turbo/image-to-image/lora': { + input: SchemaZImageTurboImageToImageLoraInput + output: SchemaZImageTurboImageToImageLoraOutput + } + 'fal-ai/z-image/turbo/image-to-image': { + input: SchemaZImageTurboImageToImageInput + output: SchemaZImageTurboImageToImageOutput + } + 'fal-ai/longcat-image/edit': { + input: SchemaLongcatImageEditInput + output: SchemaLongcatImageEditOutput + } + 'fal-ai/bytedance/seedream/v4.5/edit': { + input: SchemaBytedanceSeedreamV45EditInput + output: SchemaBytedanceSeedreamV45EditOutput + } + 'fal-ai/vidu/q2/reference-to-image': { + input: SchemaViduQ2ReferenceToImageInput + output: SchemaViduQ2ReferenceToImageOutput + } + 'fal-ai/kling-image/o1': { + input: SchemaKlingImageO1Input + output: SchemaKlingImageO1Output + } + 'fal-ai/flux-2-lora-gallery/virtual-tryon': { + input: SchemaFlux2LoraGalleryVirtualTryonInput + output: SchemaFlux2LoraGalleryVirtualTryonOutput + } + 'fal-ai/flux-2-lora-gallery/multiple-angles': { + input: SchemaFlux2LoraGalleryMultipleAnglesInput + output: SchemaFlux2LoraGalleryMultipleAnglesOutput + } + 'fal-ai/flux-2-lora-gallery/face-to-full-portrait': { + input: SchemaFlux2LoraGalleryFaceToFullPortraitInput + output: SchemaFlux2LoraGalleryFaceToFullPortraitOutput + } + 'fal-ai/flux-2-lora-gallery/apartment-staging': { + input: SchemaFlux2LoraGalleryApartmentStagingInput + output: SchemaFlux2LoraGalleryApartmentStagingOutput + } + 'fal-ai/flux-2-lora-gallery/add-background': { + input: SchemaFlux2LoraGalleryAddBackgroundInput + output: SchemaFlux2LoraGalleryAddBackgroundOutput + } + 'clarityai/crystal-upscaler': { + input: SchemaCrystalUpscalerInput + output: SchemaCrystalUpscalerOutput + } + 'fal-ai/flux-2-flex/edit': { + input: SchemaFlux2FlexEditInput + output: SchemaFlux2FlexEditOutput + } + 'fal-ai/chrono-edit-lora': { + input: SchemaChronoEditLoraInput + output: SchemaChronoEditLoraOutput + } + 'fal-ai/chrono-edit-lora-gallery/paintbrush': { + input: SchemaChronoEditLoraGalleryPaintbrushInput + output: SchemaChronoEditLoraGalleryPaintbrushOutput + } + 'fal-ai/chrono-edit-lora-gallery/upscaler': { + input: SchemaChronoEditLoraGalleryUpscalerInput + output: SchemaChronoEditLoraGalleryUpscalerOutput + } + 'fal-ai/sam-3/image-rle': { + input: SchemaSam3ImageRleInput + output: SchemaSam3ImageRleOutput + } + 'fal-ai/sam-3/image': { + input: SchemaSam3ImageInput + output: SchemaSam3ImageOutput + } + 'fal-ai/gemini-3-pro-image-preview/edit': { + input: SchemaGemini3ProImagePreviewEditInput + output: SchemaGemini3ProImagePreviewEditOutput + } + 'fal-ai/nano-banana-pro/edit': { + input: SchemaNanoBananaProEditInput + output: SchemaNanoBananaProEditOutput + } + 'fal-ai/qwen-image-edit-plus-lora-gallery/multiple-angles': { + input: SchemaQwenImageEditPlusLoraGalleryMultipleAnglesInput + output: SchemaQwenImageEditPlusLoraGalleryMultipleAnglesOutput + } + 'fal-ai/qwen-image-edit-plus-lora-gallery/shirt-design': { + input: SchemaQwenImageEditPlusLoraGalleryShirtDesignInput + output: SchemaQwenImageEditPlusLoraGalleryShirtDesignOutput + } + 'fal-ai/qwen-image-edit-plus-lora-gallery/remove-lighting': { + input: SchemaQwenImageEditPlusLoraGalleryRemoveLightingInput + output: SchemaQwenImageEditPlusLoraGalleryRemoveLightingOutput + } + 'fal-ai/qwen-image-edit-plus-lora-gallery/remove-element': { + input: SchemaQwenImageEditPlusLoraGalleryRemoveElementInput + output: SchemaQwenImageEditPlusLoraGalleryRemoveElementOutput + } + 'fal-ai/qwen-image-edit-plus-lora-gallery/next-scene': { + input: SchemaQwenImageEditPlusLoraGalleryNextSceneInput + output: SchemaQwenImageEditPlusLoraGalleryNextSceneOutput + } + 'fal-ai/qwen-image-edit-plus-lora-gallery/integrate-product': { + input: SchemaQwenImageEditPlusLoraGalleryIntegrateProductInput + output: SchemaQwenImageEditPlusLoraGalleryIntegrateProductOutput + } + 'fal-ai/qwen-image-edit-plus-lora-gallery/group-photo': { + input: SchemaQwenImageEditPlusLoraGalleryGroupPhotoInput + output: SchemaQwenImageEditPlusLoraGalleryGroupPhotoOutput + } + 'fal-ai/qwen-image-edit-plus-lora-gallery/face-to-full-portrait': { + input: SchemaQwenImageEditPlusLoraGalleryFaceToFullPortraitInput + output: SchemaQwenImageEditPlusLoraGalleryFaceToFullPortraitOutput + } + 'fal-ai/qwen-image-edit-plus-lora-gallery/add-background': { + input: SchemaQwenImageEditPlusLoraGalleryAddBackgroundInput + output: SchemaQwenImageEditPlusLoraGalleryAddBackgroundOutput + } + 'fal-ai/reve/fast/remix': { + input: SchemaReveFastRemixInput + output: SchemaReveFastRemixOutput + } + 'fal-ai/reve/fast/edit': { + input: SchemaReveFastEditInput + output: SchemaReveFastEditOutput + } + 'fal-ai/image-apps-v2/outpaint': { + input: SchemaImageAppsV2OutpaintInput + output: SchemaImageAppsV2OutpaintOutput + } + 'fal-ai/flux-vision-upscaler': { + input: SchemaFluxVisionUpscalerInput + output: SchemaFluxVisionUpscalerOutput + } + 'fal-ai/emu-3.5-image/edit-image': { + input: SchemaEmu35ImageEditImageInput + output: SchemaEmu35ImageEditImageOutput + } + 'fal-ai/chrono-edit': { + input: SchemaChronoEditInput + output: SchemaChronoEditOutput + } + 'fal-ai/gpt-image-1-mini/edit': { + input: SchemaGptImage1MiniEditInput + output: SchemaGptImage1MiniEditOutput + } + 'fal-ai/reve/remix': { + input: SchemaReveRemixInput + output: SchemaReveRemixOutput + } + 'fal-ai/reve/edit': { + input: SchemaReveEditInput + output: SchemaReveEditOutput + } + 'fal-ai/image2pixel': { + input: SchemaImage2PixelInput + output: SchemaImage2PixelOutput + } + 'fal-ai/dreamomni2/edit': { + input: SchemaDreamomni2EditInput + output: SchemaDreamomni2EditOutput + } + 'fal-ai/qwen-image-edit-plus-lora': { + input: SchemaQwenImageEditPlusLoraInput + output: SchemaQwenImageEditPlusLoraOutput + } + 'fal-ai/lucidflux': { + input: SchemaLucidfluxInput + output: SchemaLucidfluxOutput + } + 'fal-ai/qwen-image-edit/image-to-image': { + input: SchemaQwenImageEditImageToImageInput + output: SchemaQwenImageEditImageToImageOutput + } + 'fal-ai/wan-25-preview/image-to-image': { + input: SchemaWan25PreviewImageToImageInput + output: SchemaWan25PreviewImageToImageOutput + } + 'fal-ai/qwen-image-edit-plus': { + input: SchemaQwenImageEditPlusInput + output: SchemaQwenImageEditPlusOutput + } + 'fal-ai/seedvr/upscale/image': { + input: SchemaSeedvrUpscaleImageInput + output: SchemaSeedvrUpscaleImageOutput + } + 'fal-ai/image-apps-v2/product-holding': { + input: SchemaImageAppsV2ProductHoldingInput + output: SchemaImageAppsV2ProductHoldingOutput + } + 'fal-ai/image-apps-v2/product-photography': { + input: SchemaImageAppsV2ProductPhotographyInput + output: SchemaImageAppsV2ProductPhotographyOutput + } + 'fal-ai/image-apps-v2/virtual-try-on': { + input: SchemaImageAppsV2VirtualTryOnInput + output: SchemaImageAppsV2VirtualTryOnOutput + } + 'fal-ai/image-apps-v2/texture-transform': { + input: SchemaImageAppsV2TextureTransformInput + output: SchemaImageAppsV2TextureTransformOutput + } + 'fal-ai/image-apps-v2/relighting': { + input: SchemaImageAppsV2RelightingInput + output: SchemaImageAppsV2RelightingOutput + } + 'fal-ai/image-apps-v2/style-transfer': { + input: SchemaImageAppsV2StyleTransferInput + output: SchemaImageAppsV2StyleTransferOutput + } + 'fal-ai/image-apps-v2/photo-restoration': { + input: SchemaImageAppsV2PhotoRestorationInput + output: SchemaImageAppsV2PhotoRestorationOutput + } + 'fal-ai/image-apps-v2/portrait-enhance': { + input: SchemaImageAppsV2PortraitEnhanceInput + output: SchemaImageAppsV2PortraitEnhanceOutput + } + 'fal-ai/image-apps-v2/photography-effects': { + input: SchemaImageAppsV2PhotographyEffectsInput + output: SchemaImageAppsV2PhotographyEffectsOutput + } + 'fal-ai/image-apps-v2/perspective': { + input: SchemaImageAppsV2PerspectiveInput + output: SchemaImageAppsV2PerspectiveOutput + } + 'fal-ai/image-apps-v2/object-removal': { + input: SchemaImageAppsV2ObjectRemovalInput + output: SchemaImageAppsV2ObjectRemovalOutput + } + 'fal-ai/image-apps-v2/headshot-photo': { + input: SchemaImageAppsV2HeadshotPhotoInput + output: SchemaImageAppsV2HeadshotPhotoOutput + } + 'fal-ai/image-apps-v2/hair-change': { + input: SchemaImageAppsV2HairChangeInput + output: SchemaImageAppsV2HairChangeOutput + } + 'fal-ai/image-apps-v2/expression-change': { + input: SchemaImageAppsV2ExpressionChangeInput + output: SchemaImageAppsV2ExpressionChangeOutput + } + 'fal-ai/image-apps-v2/city-teleport': { + input: SchemaImageAppsV2CityTeleportInput + output: SchemaImageAppsV2CityTeleportOutput + } + 'fal-ai/image-apps-v2/age-modify': { + input: SchemaImageAppsV2AgeModifyInput + output: SchemaImageAppsV2AgeModifyOutput + } + 'fal-ai/image-apps-v2/makeup-application': { + input: SchemaImageAppsV2MakeupApplicationInput + output: SchemaImageAppsV2MakeupApplicationOutput + } + 'fal-ai/qwen-image-edit/inpaint': { + input: SchemaQwenImageEditInpaintInput + output: SchemaQwenImageEditInpaintOutput + } + 'fal-ai/flux/srpo/image-to-image': { + input: SchemaFluxSrpoImageToImageInput + output: SchemaFluxSrpoImageToImageOutput + } + 'fal-ai/flux-1/srpo/image-to-image': { + input: SchemaFlux1SrpoImageToImageInput + output: SchemaFlux1SrpoImageToImageOutput + } + 'fal-ai/qwen-image-edit-lora': { + input: SchemaQwenImageEditLoraInput + output: SchemaQwenImageEditLoraOutput + } + 'fal-ai/vidu/reference-to-image': { + input: SchemaViduReferenceToImageInput + output: SchemaViduReferenceToImageOutput + } + 'fal-ai/bytedance/seedream/v4/edit': { + input: SchemaBytedanceSeedreamV4EditInput + output: SchemaBytedanceSeedreamV4EditOutput + } + 'fal-ai/wan/v2.2-a14b/image-to-image': { + input: SchemaWanV22A14bImageToImageInput + output: SchemaWanV22A14bImageToImageOutput + } + 'fal-ai/uso': { + input: SchemaUsoInput + output: SchemaUsoOutput + } + 'fal-ai/gemini-25-flash-image/edit': { + input: SchemaGemini25FlashImageEditInput + output: SchemaGemini25FlashImageEditOutput + } + 'fal-ai/qwen-image/image-to-image': { + input: SchemaQwenImageImageToImageInput + output: SchemaQwenImageImageToImageOutput + } + 'bria/reimagine/3.2': { + input: SchemaReimagine32Input + output: SchemaReimagine32Output + } + 'fal-ai/nano-banana/edit': { + input: SchemaNanoBananaEditInput + output: SchemaNanoBananaEditOutput + } + 'fal-ai/nextstep-1': { + input: SchemaNextstep1Input + output: SchemaNextstep1Output + } + 'fal-ai/qwen-image-edit': { + input: SchemaQwenImageEditInput + output: SchemaQwenImageEditOutput + } + 'fal-ai/ideogram/character/edit': { + input: SchemaIdeogramCharacterEditInput + output: SchemaIdeogramCharacterEditOutput + } + 'fal-ai/ideogram/character': { + input: SchemaIdeogramCharacterInput + output: SchemaIdeogramCharacterOutput + } + 'fal-ai/ideogram/character/remix': { + input: SchemaIdeogramCharacterRemixInput + output: SchemaIdeogramCharacterRemixOutput + } + 'fal-ai/flux-krea-lora/inpainting': { + input: SchemaFluxKreaLoraInpaintingInput + output: SchemaFluxKreaLoraInpaintingOutput + } + 'fal-ai/flux-krea-lora/image-to-image': { + input: SchemaFluxKreaLoraImageToImageInput + output: SchemaFluxKreaLoraImageToImageOutput + } + 'fal-ai/flux/krea/image-to-image': { + input: SchemaFluxKreaImageToImageInput + output: SchemaFluxKreaImageToImageOutput + } + 'fal-ai/flux/krea/redux': { + input: SchemaFluxKreaReduxInput + output: SchemaFluxKreaReduxOutput + } + 'fal-ai/flux-1/krea/image-to-image': { + input: SchemaFlux1KreaImageToImageInput + output: SchemaFlux1KreaImageToImageOutput + } + 'fal-ai/flux-1/krea/redux': { + input: SchemaFlux1KreaReduxInput + output: SchemaFlux1KreaReduxOutput + } + 'fal-ai/flux-kontext-lora/inpaint': { + input: SchemaFluxKontextLoraInpaintInput + output: SchemaFluxKontextLoraInpaintOutput + } + 'fal-ai/hunyuan_world': { + input: SchemaHunyuanWorldInput + output: SchemaHunyuanWorldOutput + } + 'fal-ai/image-editing/retouch': { + input: SchemaImageEditingRetouchInput + output: SchemaImageEditingRetouchOutput + } + 'fal-ai/hidream-e1-1': { + input: SchemaHidreamE11Input + output: SchemaHidreamE11Output + } + 'fal-ai/rife': { + input: SchemaRifeInput + output: SchemaRifeOutput + } + 'fal-ai/film': { + input: SchemaFilmInput + output: SchemaFilmOutput + } + 'fal-ai/calligrapher': { + input: SchemaCalligrapherInput + output: SchemaCalligrapherOutput + } + 'fal-ai/bria/reimagine': { + input: SchemaBriaReimagineInput + output: SchemaBriaReimagineOutput + } + 'fal-ai/image-editing/realism': { + input: SchemaImageEditingRealismInput + output: SchemaImageEditingRealismOutput + } + 'fal-ai/post-processing/vignette': { + input: SchemaPostProcessingVignetteInput + output: SchemaPostProcessingVignetteOutput + } + 'fal-ai/post-processing/solarize': { + input: SchemaPostProcessingSolarizeInput + output: SchemaPostProcessingSolarizeOutput + } + 'fal-ai/post-processing/sharpen': { + input: SchemaPostProcessingSharpenInput + output: SchemaPostProcessingSharpenOutput + } + 'fal-ai/post-processing/parabolize': { + input: SchemaPostProcessingParabolizeInput + output: SchemaPostProcessingParabolizeOutput + } + 'fal-ai/post-processing/grain': { + input: SchemaPostProcessingGrainInput + output: SchemaPostProcessingGrainOutput + } + 'fal-ai/post-processing/dodge-burn': { + input: SchemaPostProcessingDodgeBurnInput + output: SchemaPostProcessingDodgeBurnOutput + } + 'fal-ai/post-processing/dissolve': { + input: SchemaPostProcessingDissolveInput + output: SchemaPostProcessingDissolveOutput + } + 'fal-ai/post-processing/desaturate': { + input: SchemaPostProcessingDesaturateInput + output: SchemaPostProcessingDesaturateOutput + } + 'fal-ai/post-processing/color-tint': { + input: SchemaPostProcessingColorTintInput + output: SchemaPostProcessingColorTintOutput + } + 'fal-ai/post-processing/color-correction': { + input: SchemaPostProcessingColorCorrectionInput + output: SchemaPostProcessingColorCorrectionOutput + } + 'fal-ai/post-processing/chromatic-aberration': { + input: SchemaPostProcessingChromaticAberrationInput + output: SchemaPostProcessingChromaticAberrationOutput + } + 'fal-ai/post-processing/blur': { + input: SchemaPostProcessingBlurInput + output: SchemaPostProcessingBlurOutput + } + 'fal-ai/image-editing/youtube-thumbnails': { + input: SchemaImageEditingYoutubeThumbnailsInput + output: SchemaImageEditingYoutubeThumbnailsOutput + } + 'fal-ai/topaz/upscale/image': { + input: SchemaTopazUpscaleImageInput + output: SchemaTopazUpscaleImageOutput + } + 'fal-ai/image-editing/broccoli-haircut': { + input: SchemaImageEditingBroccoliHaircutInput + output: SchemaImageEditingBroccoliHaircutOutput + } + 'fal-ai/image-editing/wojak-style': { + input: SchemaImageEditingWojakStyleInput + output: SchemaImageEditingWojakStyleOutput + } + 'fal-ai/image-editing/plushie-style': { + input: SchemaImageEditingPlushieStyleInput + output: SchemaImageEditingPlushieStyleOutput + } + 'fal-ai/flux-kontext-lora': { + input: SchemaFluxKontextLoraInput + output: SchemaFluxKontextLoraOutput + } + 'fal-ai/fashn/tryon/v1.6': { + input: SchemaFashnTryonV16Input + output: SchemaFashnTryonV16Output + } + 'fal-ai/chain-of-zoom': { + input: SchemaChainOfZoomInput + output: SchemaChainOfZoomOutput + } + 'fal-ai/pasd': { + input: SchemaPasdInput + output: SchemaPasdOutput + } + 'fal-ai/object-removal/bbox': { + input: SchemaObjectRemovalBboxInput + output: SchemaObjectRemovalBboxOutput + } + 'fal-ai/object-removal/mask': { + input: SchemaObjectRemovalMaskInput + output: SchemaObjectRemovalMaskOutput + } + 'fal-ai/object-removal': { + input: SchemaObjectRemovalInput + output: SchemaObjectRemovalOutput + } + 'fal-ai/recraft/vectorize': { + input: SchemaRecraftVectorizeInput + output: SchemaRecraftVectorizeOutput + } + 'fal-ai/ffmpeg-api/extract-frame': { + input: SchemaFfmpegApiExtractFrameInput + output: SchemaFfmpegApiExtractFrameOutput + } + 'fal-ai/luma-photon/flash/modify': { + input: SchemaLumaPhotonFlashModifyInput + output: SchemaLumaPhotonFlashModifyOutput + } + 'fal-ai/luma-photon/modify': { + input: SchemaLumaPhotonModifyInput + output: SchemaLumaPhotonModifyOutput + } + 'fal-ai/image-editing/reframe': { + input: SchemaImageEditingReframeInput + output: SchemaImageEditingReframeOutput + } + 'fal-ai/image-editing/baby-version': { + input: SchemaImageEditingBabyVersionInput + output: SchemaImageEditingBabyVersionOutput + } + 'fal-ai/luma-photon/flash/reframe': { + input: SchemaLumaPhotonFlashReframeInput + output: SchemaLumaPhotonFlashReframeOutput + } + 'fal-ai/luma-photon/reframe': { + input: SchemaLumaPhotonReframeInput + output: SchemaLumaPhotonReframeOutput + } + 'fal-ai/flux-1/schnell/redux': { + input: SchemaFlux1SchnellReduxInput + output: SchemaFlux1SchnellReduxOutput + } + 'fal-ai/flux-1/dev/redux': { + input: SchemaFlux1DevReduxInput + output: SchemaFlux1DevReduxOutput + } + 'fal-ai/flux-1/dev/image-to-image': { + input: SchemaFlux1DevImageToImageInput + output: SchemaFlux1DevImageToImageOutput + } + 'fal-ai/image-editing/text-removal': { + input: SchemaImageEditingTextRemovalInput + output: SchemaImageEditingTextRemovalOutput + } + 'fal-ai/image-editing/photo-restoration': { + input: SchemaImageEditingPhotoRestorationInput + output: SchemaImageEditingPhotoRestorationOutput + } + 'fal-ai/image-editing/weather-effect': { + input: SchemaImageEditingWeatherEffectInput + output: SchemaImageEditingWeatherEffectOutput + } + 'fal-ai/image-editing/time-of-day': { + input: SchemaImageEditingTimeOfDayInput + output: SchemaImageEditingTimeOfDayOutput + } + 'fal-ai/image-editing/style-transfer': { + input: SchemaImageEditingStyleTransferInput + output: SchemaImageEditingStyleTransferOutput + } + 'fal-ai/image-editing/scene-composition': { + input: SchemaImageEditingSceneCompositionInput + output: SchemaImageEditingSceneCompositionOutput + } + 'fal-ai/image-editing/professional-photo': { + input: SchemaImageEditingProfessionalPhotoInput + output: SchemaImageEditingProfessionalPhotoOutput + } + 'fal-ai/image-editing/object-removal': { + input: SchemaImageEditingObjectRemovalInput + output: SchemaImageEditingObjectRemovalOutput + } + 'fal-ai/image-editing/hair-change': { + input: SchemaImageEditingHairChangeInput + output: SchemaImageEditingHairChangeOutput + } + 'fal-ai/image-editing/face-enhancement': { + input: SchemaImageEditingFaceEnhancementInput + output: SchemaImageEditingFaceEnhancementOutput + } + 'fal-ai/image-editing/expression-change': { + input: SchemaImageEditingExpressionChangeInput + output: SchemaImageEditingExpressionChangeOutput + } + 'fal-ai/image-editing/color-correction': { + input: SchemaImageEditingColorCorrectionInput + output: SchemaImageEditingColorCorrectionOutput + } + 'fal-ai/image-editing/cartoonify': { + input: SchemaImageEditingCartoonifyInput + output: SchemaImageEditingCartoonifyOutput + } + 'fal-ai/image-editing/background-change': { + input: SchemaImageEditingBackgroundChangeInput + output: SchemaImageEditingBackgroundChangeOutput + } + 'fal-ai/image-editing/age-progression': { + input: SchemaImageEditingAgeProgressionInput + output: SchemaImageEditingAgeProgressionOutput + } + 'fal-ai/flux-pro/kontext/max/multi': { + input: SchemaFluxProKontextMaxMultiInput + output: SchemaFluxProKontextMaxMultiOutput + } + 'fal-ai/flux-pro/kontext/multi': { + input: SchemaFluxProKontextMultiInput + output: SchemaFluxProKontextMultiOutput + } + 'fal-ai/flux-pro/kontext/max': { + input: SchemaFluxProKontextMaxInput + output: SchemaFluxProKontextMaxOutput + } + 'fal-ai/flux-kontext/dev': { + input: SchemaFluxKontextDevInput + output: SchemaFluxKontextDevOutput + } + 'fal-ai/bagel/edit': { + input: SchemaBagelEditInput + output: SchemaBagelEditOutput + } + 'smoretalk-ai/rembg-enhance': { + input: SchemaRembgEnhanceInput + output: SchemaRembgEnhanceOutput + } + 'fal-ai/recraft/upscale/creative': { + input: SchemaRecraftUpscaleCreativeInput + output: SchemaRecraftUpscaleCreativeOutput + } + 'fal-ai/recraft/upscale/crisp': { + input: SchemaRecraftUpscaleCrispInput + output: SchemaRecraftUpscaleCrispOutput + } + 'fal-ai/recraft/v3/image-to-image': { + input: SchemaRecraftV3ImageToImageInput + output: SchemaRecraftV3ImageToImageOutput + } + 'fal-ai/minimax/image-01/subject-reference': { + input: SchemaMinimaxImage01SubjectReferenceInput + output: SchemaMinimaxImage01SubjectReferenceOutput + } + 'fal-ai/hidream-i1-full/image-to-image': { + input: SchemaHidreamI1FullImageToImageInput + output: SchemaHidreamI1FullImageToImageOutput + } + 'fal-ai/ideogram/v3/reframe': { + input: SchemaIdeogramV3ReframeInput + output: SchemaIdeogramV3ReframeOutput + } + 'fal-ai/ideogram/v3/replace-background': { + input: SchemaIdeogramV3ReplaceBackgroundInput + output: SchemaIdeogramV3ReplaceBackgroundOutput + } + 'fal-ai/ideogram/v3/remix': { + input: SchemaIdeogramV3RemixInput + output: SchemaIdeogramV3RemixOutput + } + 'fal-ai/ideogram/v3/edit': { + input: SchemaIdeogramV3EditInput + output: SchemaIdeogramV3EditOutput + } + 'fal-ai/step1x-edit': { + input: SchemaStep1xEditInput + output: SchemaStep1xEditOutput + } + 'fal-ai/image2svg': { + input: SchemaImage2SvgInput + output: SchemaImage2SvgOutput + } + 'fal-ai/uno': { + input: SchemaUnoInput + output: SchemaUnoOutput + } + 'fal-ai/gpt-image-1/edit-image': { + input: SchemaGptImage1EditImageInput + output: SchemaGptImage1EditImageOutput + } + 'rundiffusion-fal/juggernaut-flux-lora/inpainting': { + input: SchemaJuggernautFluxLoraInpaintingInput + output: SchemaJuggernautFluxLoraInpaintingOutput + } + 'fal-ai/fashn/tryon/v1.5': { + input: SchemaFashnTryonV15Input + output: SchemaFashnTryonV15Output + } + 'fal-ai/plushify': { + input: SchemaPlushifyInput + output: SchemaPlushifyOutput + } + 'fal-ai/instant-character': { + input: SchemaInstantCharacterInput + output: SchemaInstantCharacterOutput + } + 'fal-ai/cartoonify': { + input: SchemaCartoonifyInput + output: SchemaCartoonifyOutput + } + 'fal-ai/finegrain-eraser/mask': { + input: SchemaFinegrainEraserMaskInput + output: SchemaFinegrainEraserMaskOutput + } + 'fal-ai/finegrain-eraser/bbox': { + input: SchemaFinegrainEraserBboxInput + output: SchemaFinegrainEraserBboxOutput + } + 'fal-ai/finegrain-eraser': { + input: SchemaFinegrainEraserInput + output: SchemaFinegrainEraserOutput + } + 'fal-ai/star-vector': { + input: SchemaStarVectorInput + output: SchemaStarVectorOutput + } + 'fal-ai/ghiblify': { + input: SchemaGhiblifyInput + output: SchemaGhiblifyOutput + } + 'fal-ai/thera': { + input: SchemaTheraInput + output: SchemaTheraOutput + } + 'fal-ai/mix-dehaze-net': { + input: SchemaMixDehazeNetInput + output: SchemaMixDehazeNetOutput + } + 'fal-ai/gemini-flash-edit/multi': { + input: SchemaGeminiFlashEditMultiInput + output: SchemaGeminiFlashEditMultiOutput + } + 'fal-ai/gemini-flash-edit': { + input: SchemaGeminiFlashEditInput + output: SchemaGeminiFlashEditOutput + } + 'fal-ai/invisible-watermark': { + input: SchemaInvisibleWatermarkInput + output: SchemaInvisibleWatermarkOutput + } + 'rundiffusion-fal/juggernaut-flux/pro/image-to-image': { + input: SchemaJuggernautFluxProImageToImageInput + output: SchemaJuggernautFluxProImageToImageOutput + } + 'rundiffusion-fal/juggernaut-flux/base/image-to-image': { + input: SchemaJuggernautFluxBaseImageToImageInput + output: SchemaJuggernautFluxBaseImageToImageOutput + } + 'fal-ai/docres/dewarp': { + input: SchemaDocresDewarpInput + output: SchemaDocresDewarpOutput + } + 'fal-ai/docres': { + input: SchemaDocresInput + output: SchemaDocresOutput + } + 'fal-ai/swin2sr': { + input: SchemaSwin2SrInput + output: SchemaSwin2SrOutput + } + 'fal-ai/ideogram/v2a/remix': { + input: SchemaIdeogramV2aRemixInput + output: SchemaIdeogramV2aRemixOutput + } + 'fal-ai/ideogram/v2a/turbo/remix': { + input: SchemaIdeogramV2aTurboRemixInput + output: SchemaIdeogramV2aTurboRemixOutput + } + 'fal-ai/evf-sam': { + input: SchemaEvfSamInput + output: SchemaEvfSamOutput + } + 'fal-ai/ddcolor': { + input: SchemaDdcolorInput + output: SchemaDdcolorOutput + } + 'fal-ai/sam2/auto-segment': { + input: SchemaSam2AutoSegmentInput + output: SchemaSam2AutoSegmentOutput + } + 'fal-ai/drct-super-resolution': { + input: SchemaDrctSuperResolutionInput + output: SchemaDrctSuperResolutionOutput + } + 'fal-ai/nafnet/deblur': { + input: SchemaNafnetDeblurInput + output: SchemaNafnetDeblurOutput + } + 'fal-ai/nafnet/denoise': { + input: SchemaNafnetDenoiseInput + output: SchemaNafnetDenoiseOutput + } + 'fal-ai/post-processing': { + input: SchemaPostProcessingInput + output: SchemaPostProcessingOutput + } + 'fal-ai/flowedit': { + input: SchemaFloweditInput + output: SchemaFloweditOutput + } + 'fal-ai/flux-control-lora-depth/image-to-image': { + input: SchemaFluxControlLoraDepthImageToImageInput + output: SchemaFluxControlLoraDepthImageToImageOutput + } + 'fal-ai/ben/v2/image': { + input: SchemaBenV2ImageInput + output: SchemaBenV2ImageOutput + } + 'fal-ai/flux-control-lora-canny/image-to-image': { + input: SchemaFluxControlLoraCannyImageToImageInput + output: SchemaFluxControlLoraCannyImageToImageOutput + } + 'fal-ai/ideogram/upscale': { + input: SchemaIdeogramUpscaleInput + output: SchemaIdeogramUpscaleOutput + } + 'fal-ai/codeformer': { + input: SchemaCodeformerInput + output: SchemaCodeformerOutput + } + 'fal-ai/kling/v1-5/kolors-virtual-try-on': { + input: SchemaKlingV15KolorsVirtualTryOnInput + output: SchemaKlingV15KolorsVirtualTryOnOutput + } + 'fal-ai/flux-lora-canny': { + input: SchemaFluxLoraCannyInput + output: SchemaFluxLoraCannyOutput + } + 'fal-ai/flux-pro/v1/fill-finetuned': { + input: SchemaFluxProV1FillFinetunedInput + output: SchemaFluxProV1FillFinetunedOutput + } + 'fal-ai/moondream-next/detection': { + input: SchemaMoondreamNextDetectionInput + output: SchemaMoondreamNextDetectionOutput + } + 'fal-ai/bria/expand': { + input: SchemaBriaExpandInput + output: SchemaBriaExpandOutput + } + 'fal-ai/bria/genfill': { + input: SchemaBriaGenfillInput + output: SchemaBriaGenfillOutput + } + 'fal-ai/flux-lora-fill': { + input: SchemaFluxLoraFillInput + output: SchemaFluxLoraFillOutput + } + 'fal-ai/bria/background/replace': { + input: SchemaBriaBackgroundReplaceInput + output: SchemaBriaBackgroundReplaceOutput + } + 'fal-ai/bria/eraser': { + input: SchemaBriaEraserInput + output: SchemaBriaEraserOutput + } + 'fal-ai/bria/product-shot': { + input: SchemaBriaProductShotInput + output: SchemaBriaProductShotOutput + } + 'fal-ai/bria/background/remove': { + input: SchemaBriaBackgroundRemoveInput + output: SchemaBriaBackgroundRemoveOutput + } + 'fal-ai/cat-vton': { + input: SchemaCatVtonInput + output: SchemaCatVtonOutput + } + 'fal-ai/leffa/pose-transfer': { + input: SchemaLeffaPoseTransferInput + output: SchemaLeffaPoseTransferOutput + } + 'fal-ai/leffa/virtual-tryon': { + input: SchemaLeffaVirtualTryonInput + output: SchemaLeffaVirtualTryonOutput + } + 'fal-ai/ideogram/v2/edit': { + input: SchemaIdeogramV2EditInput + output: SchemaIdeogramV2EditOutput + } + 'fal-ai/ideogram/v2/turbo/edit': { + input: SchemaIdeogramV2TurboEditInput + output: SchemaIdeogramV2TurboEditOutput + } + 'fal-ai/ideogram/v2/turbo/remix': { + input: SchemaIdeogramV2TurboRemixInput + output: SchemaIdeogramV2TurboRemixOutput + } + 'fal-ai/ideogram/v2/remix': { + input: SchemaIdeogramV2RemixInput + output: SchemaIdeogramV2RemixOutput + } + 'fal-ai/flux/schnell/redux': { + input: SchemaFluxSchnellReduxInput + output: SchemaFluxSchnellReduxOutput + } + 'fal-ai/flux-pro/v1.1/redux': { + input: SchemaFluxProV11ReduxInput + output: SchemaFluxProV11ReduxOutput + } + 'fal-ai/flux/dev/redux': { + input: SchemaFluxDevReduxInput + output: SchemaFluxDevReduxOutput + } + 'fal-ai/flux-pro/v1.1-ultra/redux': { + input: SchemaFluxProV11UltraReduxInput + output: SchemaFluxProV11UltraReduxOutput + } + 'fal-ai/flux-lora-depth': { + input: SchemaFluxLoraDepthInput + output: SchemaFluxLoraDepthOutput + } + 'fal-ai/flux-pro/v1/fill': { + input: SchemaFluxProV1FillInput + output: SchemaFluxProV1FillOutput + } + 'fal-ai/kolors/image-to-image': { + input: SchemaKolorsImageToImageInput + output: SchemaKolorsImageToImageOutput + } + 'fal-ai/iclight-v2': { + input: SchemaIclightV2Input + output: SchemaIclightV2Output + } + 'fal-ai/flux-differential-diffusion': { + input: SchemaFluxDifferentialDiffusionInput + output: SchemaFluxDifferentialDiffusionOutput + } + 'fal-ai/flux-pulid': { + input: SchemaFluxPulidInput + output: SchemaFluxPulidOutput + } + 'fal-ai/birefnet/v2': { + input: SchemaBirefnetV2Input + output: SchemaBirefnetV2Output + } + 'fal-ai/live-portrait/image': { + input: SchemaLivePortraitImageInput + output: SchemaLivePortraitImageOutput + } + 'fal-ai/flux-general/rf-inversion': { + input: SchemaFluxGeneralRfInversionInput + output: SchemaFluxGeneralRfInversionOutput + } + 'fal-ai/image-preprocessors/hed': { + input: SchemaImagePreprocessorsHedInput + output: SchemaImagePreprocessorsHedOutput + } + 'fal-ai/image-preprocessors/depth-anything/v2': { + input: SchemaImagePreprocessorsDepthAnythingV2Input + output: SchemaImagePreprocessorsDepthAnythingV2Output + } + 'fal-ai/image-preprocessors/scribble': { + input: SchemaImagePreprocessorsScribbleInput + output: SchemaImagePreprocessorsScribbleOutput + } + 'fal-ai/image-preprocessors/mlsd': { + input: SchemaImagePreprocessorsMlsdInput + output: SchemaImagePreprocessorsMlsdOutput + } + 'fal-ai/image-preprocessors/sam': { + input: SchemaImagePreprocessorsSamInput + output: SchemaImagePreprocessorsSamOutput + } + 'fal-ai/image-preprocessors/midas': { + input: SchemaImagePreprocessorsMidasInput + output: SchemaImagePreprocessorsMidasOutput + } + 'fal-ai/image-preprocessors/teed': { + input: SchemaImagePreprocessorsTeedInput + output: SchemaImagePreprocessorsTeedOutput + } + 'fal-ai/image-preprocessors/lineart': { + input: SchemaImagePreprocessorsLineartInput + output: SchemaImagePreprocessorsLineartOutput + } + 'fal-ai/image-preprocessors/zoe': { + input: SchemaImagePreprocessorsZoeInput + output: SchemaImagePreprocessorsZoeOutput + } + 'fal-ai/image-preprocessors/pidi': { + input: SchemaImagePreprocessorsPidiInput + output: SchemaImagePreprocessorsPidiOutput + } + 'fal-ai/sam2/image': { + input: SchemaSam2ImageInput + output: SchemaSam2ImageOutput + } + 'fal-ai/flux-general/image-to-image': { + input: SchemaFluxGeneralImageToImageInput + output: SchemaFluxGeneralImageToImageOutput + } + 'fal-ai/flux-general/inpainting': { + input: SchemaFluxGeneralInpaintingInput + output: SchemaFluxGeneralInpaintingOutput + } + 'fal-ai/flux-general/differential-diffusion': { + input: SchemaFluxGeneralDifferentialDiffusionInput + output: SchemaFluxGeneralDifferentialDiffusionOutput + } + 'fal-ai/flux-lora/image-to-image': { + input: SchemaFluxLoraImageToImageInput + output: SchemaFluxLoraImageToImageOutput + } + 'fal-ai/sdxl-controlnet-union/inpainting': { + input: SchemaSdxlControlnetUnionInpaintingInput + output: SchemaSdxlControlnetUnionInpaintingOutput + } + 'fal-ai/sdxl-controlnet-union/image-to-image': { + input: SchemaSdxlControlnetUnionImageToImageInput + output: SchemaSdxlControlnetUnionImageToImageOutput + } + 'fal-ai/era-3d': { + input: SchemaEra3dInput + output: SchemaEra3dOutput + } + 'fal-ai/florence-2-large/dense-region-caption': { + input: SchemaFlorence2LargeDenseRegionCaptionInput + output: SchemaFlorence2LargeDenseRegionCaptionOutput + } + 'fal-ai/florence-2-large/referring-expression-segmentation': { + input: SchemaFlorence2LargeReferringExpressionSegmentationInput + output: SchemaFlorence2LargeReferringExpressionSegmentationOutput + } + 'fal-ai/florence-2-large/object-detection': { + input: SchemaFlorence2LargeObjectDetectionInput + output: SchemaFlorence2LargeObjectDetectionOutput + } + 'fal-ai/florence-2-large/open-vocabulary-detection': { + input: SchemaFlorence2LargeOpenVocabularyDetectionInput + output: SchemaFlorence2LargeOpenVocabularyDetectionOutput + } + 'fal-ai/florence-2-large/caption-to-phrase-grounding': { + input: SchemaFlorence2LargeCaptionToPhraseGroundingInput + output: SchemaFlorence2LargeCaptionToPhraseGroundingOutput + } + 'fal-ai/florence-2-large/region-proposal': { + input: SchemaFlorence2LargeRegionProposalInput + output: SchemaFlorence2LargeRegionProposalOutput + } + 'fal-ai/florence-2-large/ocr-with-region': { + input: SchemaFlorence2LargeOcrWithRegionInput + output: SchemaFlorence2LargeOcrWithRegionOutput + } + 'fal-ai/florence-2-large/region-to-segmentation': { + input: SchemaFlorence2LargeRegionToSegmentationInput + output: SchemaFlorence2LargeRegionToSegmentationOutput + } + 'fal-ai/stable-diffusion-v3-medium/image-to-image': { + input: SchemaStableDiffusionV3MediumImageToImageInput + output: SchemaStableDiffusionV3MediumImageToImageOutput + } + 'fal-ai/dwpose': { + input: SchemaDwposeInput + output: SchemaDwposeOutput + } + 'fal-ai/sd15-depth-controlnet': { + input: SchemaSd15DepthControlnetInput + output: SchemaSd15DepthControlnetOutput + } + 'fal-ai/ccsr': { + input: SchemaCcsrInput + output: SchemaCcsrOutput + } + 'fal-ai/omni-zero': { + input: SchemaOmniZeroInput + output: SchemaOmniZeroOutput + } + 'fal-ai/ip-adapter-face-id': { + input: SchemaIpAdapterFaceIdInput + output: SchemaIpAdapterFaceIdOutput + } + 'fal-ai/lora/inpaint': { + input: SchemaLoraInpaintInput + output: SchemaLoraInpaintOutput + } + 'fal-ai/lora/image-to-image': { + input: SchemaLoraImageToImageInput + output: SchemaLoraImageToImageOutput + } + 'fal-ai/fast-sdxl/image-to-image': { + input: SchemaFastSdxlImageToImageInput + output: SchemaFastSdxlImageToImageOutput + } + 'fal-ai/fast-sdxl/inpainting': { + input: SchemaFastSdxlInpaintingInput + output: SchemaFastSdxlInpaintingOutput + } + 'fal-ai/face-to-sticker': { + input: SchemaFaceToStickerInput + output: SchemaFaceToStickerOutput + } + 'fal-ai/photomaker': { + input: SchemaPhotomakerInput + output: SchemaPhotomakerOutput + } + 'fal-ai/creative-upscaler': { + input: SchemaCreativeUpscalerInput + output: SchemaCreativeUpscalerOutput + } + 'fal-ai/birefnet': { + input: SchemaBirefnetInput + output: SchemaBirefnetOutput + } + 'fal-ai/playground-v25/image-to-image': { + input: SchemaPlaygroundV25ImageToImageInput + output: SchemaPlaygroundV25ImageToImageOutput + } + 'fal-ai/fast-lightning-sdxl/image-to-image': { + input: SchemaFastLightningSdxlImageToImageInput + output: SchemaFastLightningSdxlImageToImageOutput + } + 'fal-ai/fast-lightning-sdxl/inpainting': { + input: SchemaFastLightningSdxlInpaintingInput + output: SchemaFastLightningSdxlInpaintingOutput + } + 'fal-ai/playground-v25/inpainting': { + input: SchemaPlaygroundV25InpaintingInput + output: SchemaPlaygroundV25InpaintingOutput + } + 'fal-ai/fast-lcm-diffusion/inpainting': { + input: SchemaFastLcmDiffusionInpaintingInput + output: SchemaFastLcmDiffusionInpaintingOutput + } + 'fal-ai/fast-lcm-diffusion/image-to-image': { + input: SchemaFastLcmDiffusionImageToImageInput + output: SchemaFastLcmDiffusionImageToImageOutput + } + 'fal-ai/retoucher': { + input: SchemaRetoucherInput + output: SchemaRetoucherOutput + } + 'fal-ai/imageutils/depth': { + input: SchemaImageutilsDepthInput + output: SchemaImageutilsDepthOutput + } + 'fal-ai/imageutils/marigold-depth': { + input: SchemaImageutilsMarigoldDepthInput + output: SchemaImageutilsMarigoldDepthOutput + } + 'fal-ai/pulid': { + input: SchemaPulidInput + output: SchemaPulidOutput + } + 'fal-ai/fast-sdxl-controlnet-canny/image-to-image': { + input: SchemaFastSdxlControlnetCannyImageToImageInput + output: SchemaFastSdxlControlnetCannyImageToImageOutput + } + 'fal-ai/fast-sdxl-controlnet-canny/inpainting': { + input: SchemaFastSdxlControlnetCannyInpaintingInput + output: SchemaFastSdxlControlnetCannyInpaintingOutput + } + 'fal-ai/lcm-sd15-i2i': { + input: SchemaLcmSd15I2iInput + output: SchemaLcmSd15I2iOutput + } + 'fal-ai/inpaint': { + input: SchemaInpaintInput + output: SchemaInpaintOutput + } + 'fal-ai/esrgan': { + input: SchemaEsrganInput + output: SchemaEsrganOutput + } + 'fal-ai/imageutils/rembg': { + input: SchemaImageutilsRembgInput + output: SchemaImageutilsRembgOutput + } +} + +/** Union type of all image-to-image model endpoint IDs */ +export type ImageToImageModel = keyof ImageToImageEndpointMap + +export const ImageToImageSchemaMap: Record< + ImageToImageModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['fal-ai/flux-pro/kontext']: { + input: zSchemaFluxProKontextInput, + output: zSchemaFluxProKontextOutput, + }, + ['fal-ai/flux-2/lora/edit']: { + input: zSchemaFlux2LoraEditInput, + output: zSchemaFlux2LoraEditOutput, + }, + ['fal-ai/flux-2/edit']: { + input: zSchemaFlux2EditInput, + output: zSchemaFlux2EditOutput, + }, + ['fal-ai/flux-2-pro/edit']: { + input: zSchemaFlux2ProEditInput, + output: zSchemaFlux2ProEditOutput, + }, + ['fal-ai/flux/dev/image-to-image']: { + input: zSchemaFluxDevImageToImageInput, + output: zSchemaFluxDevImageToImageOutput, + }, + ['fal-ai/aura-sr']: { + input: zSchemaAuraSrInput, + output: zSchemaAuraSrOutput, + }, + ['fal-ai/clarity-upscaler']: { + input: zSchemaClarityUpscalerInput, + output: zSchemaClarityUpscalerOutput, + }, + ['bria/replace-background']: { + input: zSchemaReplaceBackgroundInput, + output: zSchemaReplaceBackgroundOutput, + }, + ['half-moon-ai/ai-face-swap/faceswapimage']: { + input: zSchemaAiFaceSwapFaceswapimageInput, + output: zSchemaAiFaceSwapFaceswapimageOutput, + }, + ['bria/fibo-edit/replace_object_by_text']: { + input: zSchemaFiboEditReplaceObjectByTextInput, + output: zSchemaFiboEditReplaceObjectByTextOutput, + }, + ['bria/fibo-edit/sketch_to_colored_image']: { + input: zSchemaFiboEditSketchToColoredImageInput, + output: zSchemaFiboEditSketchToColoredImageOutput, + }, + ['bria/fibo-edit/restore']: { + input: zSchemaFiboEditRestoreInput, + output: zSchemaFiboEditRestoreOutput, + }, + ['bria/fibo-edit/reseason']: { + input: zSchemaFiboEditReseasonInput, + output: zSchemaFiboEditReseasonOutput, + }, + ['bria/fibo-edit/relight']: { + input: zSchemaFiboEditRelightInput, + output: zSchemaFiboEditRelightOutput, + }, + ['bria/fibo-edit/restyle']: { + input: zSchemaFiboEditRestyleInput, + output: zSchemaFiboEditRestyleOutput, + }, + ['bria/fibo-edit/rewrite_text']: { + input: zSchemaFiboEditRewriteTextInput, + output: zSchemaFiboEditRewriteTextOutput, + }, + ['bria/fibo-edit/erase_by_text']: { + input: zSchemaFiboEditEraseByTextInput, + output: zSchemaFiboEditEraseByTextOutput, + }, + ['bria/fibo-edit/edit']: { + input: zSchemaFiboEditEditInput, + output: zSchemaFiboEditEditOutput, + }, + ['bria/fibo-edit/add_object_by_text']: { + input: zSchemaFiboEditAddObjectByTextInput, + output: zSchemaFiboEditAddObjectByTextOutput, + }, + ['bria/fibo-edit/blend']: { + input: zSchemaFiboEditBlendInput, + output: zSchemaFiboEditBlendOutput, + }, + ['bria/fibo-edit/colorize']: { + input: zSchemaFiboEditColorizeInput, + output: zSchemaFiboEditColorizeOutput, + }, + ['fal-ai/flux-2/klein/9b/base/edit/lora']: { + input: zSchemaFlux2Klein9bBaseEditLoraInput, + output: zSchemaFlux2Klein9bBaseEditLoraOutput, + }, + ['fal-ai/flux-2/klein/4b/base/edit/lora']: { + input: zSchemaFlux2Klein4bBaseEditLoraInput, + output: zSchemaFlux2Klein4bBaseEditLoraOutput, + }, + ['fal-ai/flux-2/klein/4b/base/edit']: { + input: zSchemaFlux2Klein4bBaseEditInput, + output: zSchemaFlux2Klein4bBaseEditOutput, + }, + ['fal-ai/flux-2/klein/9b/base/edit']: { + input: zSchemaFlux2Klein9bBaseEditInput, + output: zSchemaFlux2Klein9bBaseEditOutput, + }, + ['fal-ai/flux-2/klein/4b/edit']: { + input: zSchemaFlux2Klein4bEditInput, + output: zSchemaFlux2Klein4bEditOutput, + }, + ['fal-ai/flux-2/klein/9b/edit']: { + input: zSchemaFlux2Klein9bEditInput, + output: zSchemaFlux2Klein9bEditOutput, + }, + ['fal-ai/glm-image/image-to-image']: { + input: zSchemaGlmImageImageToImageInput, + output: zSchemaGlmImageImageToImageOutput, + }, + ['fal-ai/qwen-image-edit-2511-multiple-angles']: { + input: zSchemaQwenImageEdit2511MultipleAnglesInput, + output: zSchemaQwenImageEdit2511MultipleAnglesOutput, + }, + ['fal-ai/qwen-image-edit-2511/lora']: { + input: zSchemaQwenImageEdit2511LoraInput, + output: zSchemaQwenImageEdit2511LoraOutput, + }, + ['half-moon-ai/ai-home/style']: { + input: zSchemaAiHomeStyleInput, + output: zSchemaAiHomeStyleOutput, + }, + ['half-moon-ai/ai-home/edit']: { + input: zSchemaAiHomeEditInput, + output: zSchemaAiHomeEditOutput, + }, + ['fal-ai/qwen-image-layered/lora']: { + input: zSchemaQwenImageLayeredLoraInput, + output: zSchemaQwenImageLayeredLoraOutput, + }, + ['wan/v2.6/image-to-image']: { + input: zSchemaV26ImageToImageInput, + output: zSchemaV26ImageToImageOutput, + }, + ['fal-ai/qwen-image-edit-2511']: { + input: zSchemaQwenImageEdit2511Input, + output: zSchemaQwenImageEdit2511Output, + }, + ['fal-ai/qwen-image-layered']: { + input: zSchemaQwenImageLayeredInput, + output: zSchemaQwenImageLayeredOutput, + }, + ['fal-ai/z-image/turbo/inpaint/lora']: { + input: zSchemaZImageTurboInpaintLoraInput, + output: zSchemaZImageTurboInpaintLoraOutput, + }, + ['fal-ai/z-image/turbo/inpaint']: { + input: zSchemaZImageTurboInpaintInput, + output: zSchemaZImageTurboInpaintOutput, + }, + ['fal-ai/flux-2/flash/edit']: { + input: zSchemaFlux2FlashEditInput, + output: zSchemaFlux2FlashEditOutput, + }, + ['fal-ai/gpt-image-1.5/edit']: { + input: zSchemaGptImage15EditInput, + output: zSchemaGptImage15EditOutput, + }, + ['fal-ai/flux-2/turbo/edit']: { + input: zSchemaFlux2TurboEditInput, + output: zSchemaFlux2TurboEditOutput, + }, + ['fal-ai/flux-2-max/edit']: { + input: zSchemaFlux2MaxEditInput, + output: zSchemaFlux2MaxEditOutput, + }, + ['half-moon-ai/ai-baby-and-aging-generator/multi']: { + input: zSchemaAiBabyAndAgingGeneratorMultiInput, + output: zSchemaAiBabyAndAgingGeneratorMultiOutput, + }, + ['half-moon-ai/ai-baby-and-aging-generator/single']: { + input: zSchemaAiBabyAndAgingGeneratorSingleInput, + output: zSchemaAiBabyAndAgingGeneratorSingleOutput, + }, + ['fal-ai/qwen-image-edit-2509-lora-gallery/shirt-design']: { + input: zSchemaQwenImageEdit2509LoraGalleryShirtDesignInput, + output: zSchemaQwenImageEdit2509LoraGalleryShirtDesignOutput, + }, + ['fal-ai/qwen-image-edit-2509-lora-gallery/remove-lighting']: { + input: zSchemaQwenImageEdit2509LoraGalleryRemoveLightingInput, + output: zSchemaQwenImageEdit2509LoraGalleryRemoveLightingOutput, + }, + ['fal-ai/qwen-image-edit-2509-lora-gallery/remove-element']: { + input: zSchemaQwenImageEdit2509LoraGalleryRemoveElementInput, + output: zSchemaQwenImageEdit2509LoraGalleryRemoveElementOutput, + }, + ['fal-ai/qwen-image-edit-2509-lora-gallery/lighting-restoration']: { + input: zSchemaQwenImageEdit2509LoraGalleryLightingRestorationInput, + output: zSchemaQwenImageEdit2509LoraGalleryLightingRestorationOutput, + }, + ['fal-ai/qwen-image-edit-2509-lora-gallery/integrate-product']: { + input: zSchemaQwenImageEdit2509LoraGalleryIntegrateProductInput, + output: zSchemaQwenImageEdit2509LoraGalleryIntegrateProductOutput, + }, + ['fal-ai/qwen-image-edit-2509-lora-gallery/group-photo']: { + input: zSchemaQwenImageEdit2509LoraGalleryGroupPhotoInput, + output: zSchemaQwenImageEdit2509LoraGalleryGroupPhotoOutput, + }, + ['fal-ai/qwen-image-edit-2509-lora-gallery/face-to-full-portrait']: { + input: zSchemaQwenImageEdit2509LoraGalleryFaceToFullPortraitInput, + output: zSchemaQwenImageEdit2509LoraGalleryFaceToFullPortraitOutput, + }, + ['fal-ai/qwen-image-edit-2509-lora-gallery/add-background']: { + input: zSchemaQwenImageEdit2509LoraGalleryAddBackgroundInput, + output: zSchemaQwenImageEdit2509LoraGalleryAddBackgroundOutput, + }, + ['fal-ai/qwen-image-edit-2509-lora-gallery/next-scene']: { + input: zSchemaQwenImageEdit2509LoraGalleryNextSceneInput, + output: zSchemaQwenImageEdit2509LoraGalleryNextSceneOutput, + }, + ['fal-ai/qwen-image-edit-2509-lora-gallery/multiple-angles']: { + input: zSchemaQwenImageEdit2509LoraGalleryMultipleAnglesInput, + output: zSchemaQwenImageEdit2509LoraGalleryMultipleAnglesOutput, + }, + ['fal-ai/qwen-image-edit-2509-lora']: { + input: zSchemaQwenImageEdit2509LoraInput, + output: zSchemaQwenImageEdit2509LoraOutput, + }, + ['fal-ai/qwen-image-edit-2509']: { + input: zSchemaQwenImageEdit2509Input, + output: zSchemaQwenImageEdit2509Output, + }, + ['fal-ai/qwen-image-edit-plus-lora-gallery/lighting-restoration']: { + input: zSchemaQwenImageEditPlusLoraGalleryLightingRestorationInput, + output: zSchemaQwenImageEditPlusLoraGalleryLightingRestorationOutput, + }, + ['fal-ai/moondream3-preview/segment']: { + input: zSchemaMoondream3PreviewSegmentInput, + output: zSchemaMoondream3PreviewSegmentOutput, + }, + ['fal-ai/stepx-edit2']: { + input: zSchemaStepxEdit2Input, + output: zSchemaStepxEdit2Output, + }, + ['fal-ai/z-image/turbo/controlnet/lora']: { + input: zSchemaZImageTurboControlnetLoraInput, + output: zSchemaZImageTurboControlnetLoraOutput, + }, + ['fal-ai/z-image/turbo/controlnet']: { + input: zSchemaZImageTurboControlnetInput, + output: zSchemaZImageTurboControlnetOutput, + }, + ['fal-ai/z-image/turbo/image-to-image/lora']: { + input: zSchemaZImageTurboImageToImageLoraInput, + output: zSchemaZImageTurboImageToImageLoraOutput, + }, + ['fal-ai/z-image/turbo/image-to-image']: { + input: zSchemaZImageTurboImageToImageInput, + output: zSchemaZImageTurboImageToImageOutput, + }, + ['fal-ai/longcat-image/edit']: { + input: zSchemaLongcatImageEditInput, + output: zSchemaLongcatImageEditOutput, + }, + ['fal-ai/bytedance/seedream/v4.5/edit']: { + input: zSchemaBytedanceSeedreamV45EditInput, + output: zSchemaBytedanceSeedreamV45EditOutput, + }, + ['fal-ai/vidu/q2/reference-to-image']: { + input: zSchemaViduQ2ReferenceToImageInput, + output: zSchemaViduQ2ReferenceToImageOutput, + }, + ['fal-ai/kling-image/o1']: { + input: zSchemaKlingImageO1Input, + output: zSchemaKlingImageO1Output, + }, + ['fal-ai/flux-2-lora-gallery/virtual-tryon']: { + input: zSchemaFlux2LoraGalleryVirtualTryonInput, + output: zSchemaFlux2LoraGalleryVirtualTryonOutput, + }, + ['fal-ai/flux-2-lora-gallery/multiple-angles']: { + input: zSchemaFlux2LoraGalleryMultipleAnglesInput, + output: zSchemaFlux2LoraGalleryMultipleAnglesOutput, + }, + ['fal-ai/flux-2-lora-gallery/face-to-full-portrait']: { + input: zSchemaFlux2LoraGalleryFaceToFullPortraitInput, + output: zSchemaFlux2LoraGalleryFaceToFullPortraitOutput, + }, + ['fal-ai/flux-2-lora-gallery/apartment-staging']: { + input: zSchemaFlux2LoraGalleryApartmentStagingInput, + output: zSchemaFlux2LoraGalleryApartmentStagingOutput, + }, + ['fal-ai/flux-2-lora-gallery/add-background']: { + input: zSchemaFlux2LoraGalleryAddBackgroundInput, + output: zSchemaFlux2LoraGalleryAddBackgroundOutput, + }, + ['clarityai/crystal-upscaler']: { + input: zSchemaCrystalUpscalerInput, + output: zSchemaCrystalUpscalerOutput, + }, + ['fal-ai/flux-2-flex/edit']: { + input: zSchemaFlux2FlexEditInput, + output: zSchemaFlux2FlexEditOutput, + }, + ['fal-ai/chrono-edit-lora']: { + input: zSchemaChronoEditLoraInput, + output: zSchemaChronoEditLoraOutput, + }, + ['fal-ai/chrono-edit-lora-gallery/paintbrush']: { + input: zSchemaChronoEditLoraGalleryPaintbrushInput, + output: zSchemaChronoEditLoraGalleryPaintbrushOutput, + }, + ['fal-ai/chrono-edit-lora-gallery/upscaler']: { + input: zSchemaChronoEditLoraGalleryUpscalerInput, + output: zSchemaChronoEditLoraGalleryUpscalerOutput, + }, + ['fal-ai/sam-3/image-rle']: { + input: zSchemaSam3ImageRleInput, + output: zSchemaSam3ImageRleOutput, + }, + ['fal-ai/sam-3/image']: { + input: zSchemaSam3ImageInput, + output: zSchemaSam3ImageOutput, + }, + ['fal-ai/gemini-3-pro-image-preview/edit']: { + input: zSchemaGemini3ProImagePreviewEditInput, + output: zSchemaGemini3ProImagePreviewEditOutput, + }, + ['fal-ai/nano-banana-pro/edit']: { + input: zSchemaNanoBananaProEditInput, + output: zSchemaNanoBananaProEditOutput, + }, + ['fal-ai/qwen-image-edit-plus-lora-gallery/multiple-angles']: { + input: zSchemaQwenImageEditPlusLoraGalleryMultipleAnglesInput, + output: zSchemaQwenImageEditPlusLoraGalleryMultipleAnglesOutput, + }, + ['fal-ai/qwen-image-edit-plus-lora-gallery/shirt-design']: { + input: zSchemaQwenImageEditPlusLoraGalleryShirtDesignInput, + output: zSchemaQwenImageEditPlusLoraGalleryShirtDesignOutput, + }, + ['fal-ai/qwen-image-edit-plus-lora-gallery/remove-lighting']: { + input: zSchemaQwenImageEditPlusLoraGalleryRemoveLightingInput, + output: zSchemaQwenImageEditPlusLoraGalleryRemoveLightingOutput, + }, + ['fal-ai/qwen-image-edit-plus-lora-gallery/remove-element']: { + input: zSchemaQwenImageEditPlusLoraGalleryRemoveElementInput, + output: zSchemaQwenImageEditPlusLoraGalleryRemoveElementOutput, + }, + ['fal-ai/qwen-image-edit-plus-lora-gallery/next-scene']: { + input: zSchemaQwenImageEditPlusLoraGalleryNextSceneInput, + output: zSchemaQwenImageEditPlusLoraGalleryNextSceneOutput, + }, + ['fal-ai/qwen-image-edit-plus-lora-gallery/integrate-product']: { + input: zSchemaQwenImageEditPlusLoraGalleryIntegrateProductInput, + output: zSchemaQwenImageEditPlusLoraGalleryIntegrateProductOutput, + }, + ['fal-ai/qwen-image-edit-plus-lora-gallery/group-photo']: { + input: zSchemaQwenImageEditPlusLoraGalleryGroupPhotoInput, + output: zSchemaQwenImageEditPlusLoraGalleryGroupPhotoOutput, + }, + ['fal-ai/qwen-image-edit-plus-lora-gallery/face-to-full-portrait']: { + input: zSchemaQwenImageEditPlusLoraGalleryFaceToFullPortraitInput, + output: zSchemaQwenImageEditPlusLoraGalleryFaceToFullPortraitOutput, + }, + ['fal-ai/qwen-image-edit-plus-lora-gallery/add-background']: { + input: zSchemaQwenImageEditPlusLoraGalleryAddBackgroundInput, + output: zSchemaQwenImageEditPlusLoraGalleryAddBackgroundOutput, + }, + ['fal-ai/reve/fast/remix']: { + input: zSchemaReveFastRemixInput, + output: zSchemaReveFastRemixOutput, + }, + ['fal-ai/reve/fast/edit']: { + input: zSchemaReveFastEditInput, + output: zSchemaReveFastEditOutput, + }, + ['fal-ai/image-apps-v2/outpaint']: { + input: zSchemaImageAppsV2OutpaintInput, + output: zSchemaImageAppsV2OutpaintOutput, + }, + ['fal-ai/flux-vision-upscaler']: { + input: zSchemaFluxVisionUpscalerInput, + output: zSchemaFluxVisionUpscalerOutput, + }, + ['fal-ai/emu-3.5-image/edit-image']: { + input: zSchemaEmu35ImageEditImageInput, + output: zSchemaEmu35ImageEditImageOutput, + }, + ['fal-ai/chrono-edit']: { + input: zSchemaChronoEditInput, + output: zSchemaChronoEditOutput, + }, + ['fal-ai/gpt-image-1-mini/edit']: { + input: zSchemaGptImage1MiniEditInput, + output: zSchemaGptImage1MiniEditOutput, + }, + ['fal-ai/reve/remix']: { + input: zSchemaReveRemixInput, + output: zSchemaReveRemixOutput, + }, + ['fal-ai/reve/edit']: { + input: zSchemaReveEditInput, + output: zSchemaReveEditOutput, + }, + ['fal-ai/image2pixel']: { + input: zSchemaImage2PixelInput, + output: zSchemaImage2PixelOutput, + }, + ['fal-ai/dreamomni2/edit']: { + input: zSchemaDreamomni2EditInput, + output: zSchemaDreamomni2EditOutput, + }, + ['fal-ai/qwen-image-edit-plus-lora']: { + input: zSchemaQwenImageEditPlusLoraInput, + output: zSchemaQwenImageEditPlusLoraOutput, + }, + ['fal-ai/lucidflux']: { + input: zSchemaLucidfluxInput, + output: zSchemaLucidfluxOutput, + }, + ['fal-ai/qwen-image-edit/image-to-image']: { + input: zSchemaQwenImageEditImageToImageInput, + output: zSchemaQwenImageEditImageToImageOutput, + }, + ['fal-ai/wan-25-preview/image-to-image']: { + input: zSchemaWan25PreviewImageToImageInput, + output: zSchemaWan25PreviewImageToImageOutput, + }, + ['fal-ai/qwen-image-edit-plus']: { + input: zSchemaQwenImageEditPlusInput, + output: zSchemaQwenImageEditPlusOutput, + }, + ['fal-ai/seedvr/upscale/image']: { + input: zSchemaSeedvrUpscaleImageInput, + output: zSchemaSeedvrUpscaleImageOutput, + }, + ['fal-ai/image-apps-v2/product-holding']: { + input: zSchemaImageAppsV2ProductHoldingInput, + output: zSchemaImageAppsV2ProductHoldingOutput, + }, + ['fal-ai/image-apps-v2/product-photography']: { + input: zSchemaImageAppsV2ProductPhotographyInput, + output: zSchemaImageAppsV2ProductPhotographyOutput, + }, + ['fal-ai/image-apps-v2/virtual-try-on']: { + input: zSchemaImageAppsV2VirtualTryOnInput, + output: zSchemaImageAppsV2VirtualTryOnOutput, + }, + ['fal-ai/image-apps-v2/texture-transform']: { + input: zSchemaImageAppsV2TextureTransformInput, + output: zSchemaImageAppsV2TextureTransformOutput, + }, + ['fal-ai/image-apps-v2/relighting']: { + input: zSchemaImageAppsV2RelightingInput, + output: zSchemaImageAppsV2RelightingOutput, + }, + ['fal-ai/image-apps-v2/style-transfer']: { + input: zSchemaImageAppsV2StyleTransferInput, + output: zSchemaImageAppsV2StyleTransferOutput, + }, + ['fal-ai/image-apps-v2/photo-restoration']: { + input: zSchemaImageAppsV2PhotoRestorationInput, + output: zSchemaImageAppsV2PhotoRestorationOutput, + }, + ['fal-ai/image-apps-v2/portrait-enhance']: { + input: zSchemaImageAppsV2PortraitEnhanceInput, + output: zSchemaImageAppsV2PortraitEnhanceOutput, + }, + ['fal-ai/image-apps-v2/photography-effects']: { + input: zSchemaImageAppsV2PhotographyEffectsInput, + output: zSchemaImageAppsV2PhotographyEffectsOutput, + }, + ['fal-ai/image-apps-v2/perspective']: { + input: zSchemaImageAppsV2PerspectiveInput, + output: zSchemaImageAppsV2PerspectiveOutput, + }, + ['fal-ai/image-apps-v2/object-removal']: { + input: zSchemaImageAppsV2ObjectRemovalInput, + output: zSchemaImageAppsV2ObjectRemovalOutput, + }, + ['fal-ai/image-apps-v2/headshot-photo']: { + input: zSchemaImageAppsV2HeadshotPhotoInput, + output: zSchemaImageAppsV2HeadshotPhotoOutput, + }, + ['fal-ai/image-apps-v2/hair-change']: { + input: zSchemaImageAppsV2HairChangeInput, + output: zSchemaImageAppsV2HairChangeOutput, + }, + ['fal-ai/image-apps-v2/expression-change']: { + input: zSchemaImageAppsV2ExpressionChangeInput, + output: zSchemaImageAppsV2ExpressionChangeOutput, + }, + ['fal-ai/image-apps-v2/city-teleport']: { + input: zSchemaImageAppsV2CityTeleportInput, + output: zSchemaImageAppsV2CityTeleportOutput, + }, + ['fal-ai/image-apps-v2/age-modify']: { + input: zSchemaImageAppsV2AgeModifyInput, + output: zSchemaImageAppsV2AgeModifyOutput, + }, + ['fal-ai/image-apps-v2/makeup-application']: { + input: zSchemaImageAppsV2MakeupApplicationInput, + output: zSchemaImageAppsV2MakeupApplicationOutput, + }, + ['fal-ai/qwen-image-edit/inpaint']: { + input: zSchemaQwenImageEditInpaintInput, + output: zSchemaQwenImageEditInpaintOutput, + }, + ['fal-ai/flux/srpo/image-to-image']: { + input: zSchemaFluxSrpoImageToImageInput, + output: zSchemaFluxSrpoImageToImageOutput, + }, + ['fal-ai/flux-1/srpo/image-to-image']: { + input: zSchemaFlux1SrpoImageToImageInput, + output: zSchemaFlux1SrpoImageToImageOutput, + }, + ['fal-ai/qwen-image-edit-lora']: { + input: zSchemaQwenImageEditLoraInput, + output: zSchemaQwenImageEditLoraOutput, + }, + ['fal-ai/vidu/reference-to-image']: { + input: zSchemaViduReferenceToImageInput, + output: zSchemaViduReferenceToImageOutput, + }, + ['fal-ai/bytedance/seedream/v4/edit']: { + input: zSchemaBytedanceSeedreamV4EditInput, + output: zSchemaBytedanceSeedreamV4EditOutput, + }, + ['fal-ai/wan/v2.2-a14b/image-to-image']: { + input: zSchemaWanV22A14bImageToImageInput, + output: zSchemaWanV22A14bImageToImageOutput, + }, + ['fal-ai/uso']: { + input: zSchemaUsoInput, + output: zSchemaUsoOutput, + }, + ['fal-ai/gemini-25-flash-image/edit']: { + input: zSchemaGemini25FlashImageEditInput, + output: zSchemaGemini25FlashImageEditOutput, + }, + ['fal-ai/qwen-image/image-to-image']: { + input: zSchemaQwenImageImageToImageInput, + output: zSchemaQwenImageImageToImageOutput, + }, + ['bria/reimagine/3.2']: { + input: zSchemaReimagine32Input, + output: zSchemaReimagine32Output, + }, + ['fal-ai/nano-banana/edit']: { + input: zSchemaNanoBananaEditInput, + output: zSchemaNanoBananaEditOutput, + }, + ['fal-ai/nextstep-1']: { + input: zSchemaNextstep1Input, + output: zSchemaNextstep1Output, + }, + ['fal-ai/qwen-image-edit']: { + input: zSchemaQwenImageEditInput, + output: zSchemaQwenImageEditOutput, + }, + ['fal-ai/ideogram/character/edit']: { + input: zSchemaIdeogramCharacterEditInput, + output: zSchemaIdeogramCharacterEditOutput, + }, + ['fal-ai/ideogram/character']: { + input: zSchemaIdeogramCharacterInput, + output: zSchemaIdeogramCharacterOutput, + }, + ['fal-ai/ideogram/character/remix']: { + input: zSchemaIdeogramCharacterRemixInput, + output: zSchemaIdeogramCharacterRemixOutput, + }, + ['fal-ai/flux-krea-lora/inpainting']: { + input: zSchemaFluxKreaLoraInpaintingInput, + output: zSchemaFluxKreaLoraInpaintingOutput, + }, + ['fal-ai/flux-krea-lora/image-to-image']: { + input: zSchemaFluxKreaLoraImageToImageInput, + output: zSchemaFluxKreaLoraImageToImageOutput, + }, + ['fal-ai/flux/krea/image-to-image']: { + input: zSchemaFluxKreaImageToImageInput, + output: zSchemaFluxKreaImageToImageOutput, + }, + ['fal-ai/flux/krea/redux']: { + input: zSchemaFluxKreaReduxInput, + output: zSchemaFluxKreaReduxOutput, + }, + ['fal-ai/flux-1/krea/image-to-image']: { + input: zSchemaFlux1KreaImageToImageInput, + output: zSchemaFlux1KreaImageToImageOutput, + }, + ['fal-ai/flux-1/krea/redux']: { + input: zSchemaFlux1KreaReduxInput, + output: zSchemaFlux1KreaReduxOutput, + }, + ['fal-ai/flux-kontext-lora/inpaint']: { + input: zSchemaFluxKontextLoraInpaintInput, + output: zSchemaFluxKontextLoraInpaintOutput, + }, + ['fal-ai/hunyuan_world']: { + input: zSchemaHunyuanWorldInput, + output: zSchemaHunyuanWorldOutput, + }, + ['fal-ai/image-editing/retouch']: { + input: zSchemaImageEditingRetouchInput, + output: zSchemaImageEditingRetouchOutput, + }, + ['fal-ai/hidream-e1-1']: { + input: zSchemaHidreamE11Input, + output: zSchemaHidreamE11Output, + }, + ['fal-ai/rife']: { + input: zSchemaRifeInput, + output: zSchemaRifeOutput, + }, + ['fal-ai/film']: { + input: zSchemaFilmInput, + output: zSchemaFilmOutput, + }, + ['fal-ai/calligrapher']: { + input: zSchemaCalligrapherInput, + output: zSchemaCalligrapherOutput, + }, + ['fal-ai/bria/reimagine']: { + input: zSchemaBriaReimagineInput, + output: zSchemaBriaReimagineOutput, + }, + ['fal-ai/image-editing/realism']: { + input: zSchemaImageEditingRealismInput, + output: zSchemaImageEditingRealismOutput, + }, + ['fal-ai/post-processing/vignette']: { + input: zSchemaPostProcessingVignetteInput, + output: zSchemaPostProcessingVignetteOutput, + }, + ['fal-ai/post-processing/solarize']: { + input: zSchemaPostProcessingSolarizeInput, + output: zSchemaPostProcessingSolarizeOutput, + }, + ['fal-ai/post-processing/sharpen']: { + input: zSchemaPostProcessingSharpenInput, + output: zSchemaPostProcessingSharpenOutput, + }, + ['fal-ai/post-processing/parabolize']: { + input: zSchemaPostProcessingParabolizeInput, + output: zSchemaPostProcessingParabolizeOutput, + }, + ['fal-ai/post-processing/grain']: { + input: zSchemaPostProcessingGrainInput, + output: zSchemaPostProcessingGrainOutput, + }, + ['fal-ai/post-processing/dodge-burn']: { + input: zSchemaPostProcessingDodgeBurnInput, + output: zSchemaPostProcessingDodgeBurnOutput, + }, + ['fal-ai/post-processing/dissolve']: { + input: zSchemaPostProcessingDissolveInput, + output: zSchemaPostProcessingDissolveOutput, + }, + ['fal-ai/post-processing/desaturate']: { + input: zSchemaPostProcessingDesaturateInput, + output: zSchemaPostProcessingDesaturateOutput, + }, + ['fal-ai/post-processing/color-tint']: { + input: zSchemaPostProcessingColorTintInput, + output: zSchemaPostProcessingColorTintOutput, + }, + ['fal-ai/post-processing/color-correction']: { + input: zSchemaPostProcessingColorCorrectionInput, + output: zSchemaPostProcessingColorCorrectionOutput, + }, + ['fal-ai/post-processing/chromatic-aberration']: { + input: zSchemaPostProcessingChromaticAberrationInput, + output: zSchemaPostProcessingChromaticAberrationOutput, + }, + ['fal-ai/post-processing/blur']: { + input: zSchemaPostProcessingBlurInput, + output: zSchemaPostProcessingBlurOutput, + }, + ['fal-ai/image-editing/youtube-thumbnails']: { + input: zSchemaImageEditingYoutubeThumbnailsInput, + output: zSchemaImageEditingYoutubeThumbnailsOutput, + }, + ['fal-ai/topaz/upscale/image']: { + input: zSchemaTopazUpscaleImageInput, + output: zSchemaTopazUpscaleImageOutput, + }, + ['fal-ai/image-editing/broccoli-haircut']: { + input: zSchemaImageEditingBroccoliHaircutInput, + output: zSchemaImageEditingBroccoliHaircutOutput, + }, + ['fal-ai/image-editing/wojak-style']: { + input: zSchemaImageEditingWojakStyleInput, + output: zSchemaImageEditingWojakStyleOutput, + }, + ['fal-ai/image-editing/plushie-style']: { + input: zSchemaImageEditingPlushieStyleInput, + output: zSchemaImageEditingPlushieStyleOutput, + }, + ['fal-ai/flux-kontext-lora']: { + input: zSchemaFluxKontextLoraInput, + output: zSchemaFluxKontextLoraOutput, + }, + ['fal-ai/fashn/tryon/v1.6']: { + input: zSchemaFashnTryonV16Input, + output: zSchemaFashnTryonV16Output, + }, + ['fal-ai/chain-of-zoom']: { + input: zSchemaChainOfZoomInput, + output: zSchemaChainOfZoomOutput, + }, + ['fal-ai/pasd']: { + input: zSchemaPasdInput, + output: zSchemaPasdOutput, + }, + ['fal-ai/object-removal/bbox']: { + input: zSchemaObjectRemovalBboxInput, + output: zSchemaObjectRemovalBboxOutput, + }, + ['fal-ai/object-removal/mask']: { + input: zSchemaObjectRemovalMaskInput, + output: zSchemaObjectRemovalMaskOutput, + }, + ['fal-ai/object-removal']: { + input: zSchemaObjectRemovalInput, + output: zSchemaObjectRemovalOutput, + }, + ['fal-ai/recraft/vectorize']: { + input: zSchemaRecraftVectorizeInput, + output: zSchemaRecraftVectorizeOutput, + }, + ['fal-ai/ffmpeg-api/extract-frame']: { + input: zSchemaFfmpegApiExtractFrameInput, + output: zSchemaFfmpegApiExtractFrameOutput, + }, + ['fal-ai/luma-photon/flash/modify']: { + input: zSchemaLumaPhotonFlashModifyInput, + output: zSchemaLumaPhotonFlashModifyOutput, + }, + ['fal-ai/luma-photon/modify']: { + input: zSchemaLumaPhotonModifyInput, + output: zSchemaLumaPhotonModifyOutput, + }, + ['fal-ai/image-editing/reframe']: { + input: zSchemaImageEditingReframeInput, + output: zSchemaImageEditingReframeOutput, + }, + ['fal-ai/image-editing/baby-version']: { + input: zSchemaImageEditingBabyVersionInput, + output: zSchemaImageEditingBabyVersionOutput, + }, + ['fal-ai/luma-photon/flash/reframe']: { + input: zSchemaLumaPhotonFlashReframeInput, + output: zSchemaLumaPhotonFlashReframeOutput, + }, + ['fal-ai/luma-photon/reframe']: { + input: zSchemaLumaPhotonReframeInput, + output: zSchemaLumaPhotonReframeOutput, + }, + ['fal-ai/flux-1/schnell/redux']: { + input: zSchemaFlux1SchnellReduxInput, + output: zSchemaFlux1SchnellReduxOutput, + }, + ['fal-ai/flux-1/dev/redux']: { + input: zSchemaFlux1DevReduxInput, + output: zSchemaFlux1DevReduxOutput, + }, + ['fal-ai/flux-1/dev/image-to-image']: { + input: zSchemaFlux1DevImageToImageInput, + output: zSchemaFlux1DevImageToImageOutput, + }, + ['fal-ai/image-editing/text-removal']: { + input: zSchemaImageEditingTextRemovalInput, + output: zSchemaImageEditingTextRemovalOutput, + }, + ['fal-ai/image-editing/photo-restoration']: { + input: zSchemaImageEditingPhotoRestorationInput, + output: zSchemaImageEditingPhotoRestorationOutput, + }, + ['fal-ai/image-editing/weather-effect']: { + input: zSchemaImageEditingWeatherEffectInput, + output: zSchemaImageEditingWeatherEffectOutput, + }, + ['fal-ai/image-editing/time-of-day']: { + input: zSchemaImageEditingTimeOfDayInput, + output: zSchemaImageEditingTimeOfDayOutput, + }, + ['fal-ai/image-editing/style-transfer']: { + input: zSchemaImageEditingStyleTransferInput, + output: zSchemaImageEditingStyleTransferOutput, + }, + ['fal-ai/image-editing/scene-composition']: { + input: zSchemaImageEditingSceneCompositionInput, + output: zSchemaImageEditingSceneCompositionOutput, + }, + ['fal-ai/image-editing/professional-photo']: { + input: zSchemaImageEditingProfessionalPhotoInput, + output: zSchemaImageEditingProfessionalPhotoOutput, + }, + ['fal-ai/image-editing/object-removal']: { + input: zSchemaImageEditingObjectRemovalInput, + output: zSchemaImageEditingObjectRemovalOutput, + }, + ['fal-ai/image-editing/hair-change']: { + input: zSchemaImageEditingHairChangeInput, + output: zSchemaImageEditingHairChangeOutput, + }, + ['fal-ai/image-editing/face-enhancement']: { + input: zSchemaImageEditingFaceEnhancementInput, + output: zSchemaImageEditingFaceEnhancementOutput, + }, + ['fal-ai/image-editing/expression-change']: { + input: zSchemaImageEditingExpressionChangeInput, + output: zSchemaImageEditingExpressionChangeOutput, + }, + ['fal-ai/image-editing/color-correction']: { + input: zSchemaImageEditingColorCorrectionInput, + output: zSchemaImageEditingColorCorrectionOutput, + }, + ['fal-ai/image-editing/cartoonify']: { + input: zSchemaImageEditingCartoonifyInput, + output: zSchemaImageEditingCartoonifyOutput, + }, + ['fal-ai/image-editing/background-change']: { + input: zSchemaImageEditingBackgroundChangeInput, + output: zSchemaImageEditingBackgroundChangeOutput, + }, + ['fal-ai/image-editing/age-progression']: { + input: zSchemaImageEditingAgeProgressionInput, + output: zSchemaImageEditingAgeProgressionOutput, + }, + ['fal-ai/flux-pro/kontext/max/multi']: { + input: zSchemaFluxProKontextMaxMultiInput, + output: zSchemaFluxProKontextMaxMultiOutput, + }, + ['fal-ai/flux-pro/kontext/multi']: { + input: zSchemaFluxProKontextMultiInput, + output: zSchemaFluxProKontextMultiOutput, + }, + ['fal-ai/flux-pro/kontext/max']: { + input: zSchemaFluxProKontextMaxInput, + output: zSchemaFluxProKontextMaxOutput, + }, + ['fal-ai/flux-kontext/dev']: { + input: zSchemaFluxKontextDevInput, + output: zSchemaFluxKontextDevOutput, + }, + ['fal-ai/bagel/edit']: { + input: zSchemaBagelEditInput, + output: zSchemaBagelEditOutput, + }, + ['smoretalk-ai/rembg-enhance']: { + input: zSchemaRembgEnhanceInput, + output: zSchemaRembgEnhanceOutput, + }, + ['fal-ai/recraft/upscale/creative']: { + input: zSchemaRecraftUpscaleCreativeInput, + output: zSchemaRecraftUpscaleCreativeOutput, + }, + ['fal-ai/recraft/upscale/crisp']: { + input: zSchemaRecraftUpscaleCrispInput, + output: zSchemaRecraftUpscaleCrispOutput, + }, + ['fal-ai/recraft/v3/image-to-image']: { + input: zSchemaRecraftV3ImageToImageInput, + output: zSchemaRecraftV3ImageToImageOutput, + }, + ['fal-ai/minimax/image-01/subject-reference']: { + input: zSchemaMinimaxImage01SubjectReferenceInput, + output: zSchemaMinimaxImage01SubjectReferenceOutput, + }, + ['fal-ai/hidream-i1-full/image-to-image']: { + input: zSchemaHidreamI1FullImageToImageInput, + output: zSchemaHidreamI1FullImageToImageOutput, + }, + ['fal-ai/ideogram/v3/reframe']: { + input: zSchemaIdeogramV3ReframeInput, + output: zSchemaIdeogramV3ReframeOutput, + }, + ['fal-ai/ideogram/v3/replace-background']: { + input: zSchemaIdeogramV3ReplaceBackgroundInput, + output: zSchemaIdeogramV3ReplaceBackgroundOutput, + }, + ['fal-ai/ideogram/v3/remix']: { + input: zSchemaIdeogramV3RemixInput, + output: zSchemaIdeogramV3RemixOutput, + }, + ['fal-ai/ideogram/v3/edit']: { + input: zSchemaIdeogramV3EditInput, + output: zSchemaIdeogramV3EditOutput, + }, + ['fal-ai/step1x-edit']: { + input: zSchemaStep1xEditInput, + output: zSchemaStep1xEditOutput, + }, + ['fal-ai/image2svg']: { + input: zSchemaImage2SvgInput, + output: zSchemaImage2SvgOutput, + }, + ['fal-ai/uno']: { + input: zSchemaUnoInput, + output: zSchemaUnoOutput, + }, + ['fal-ai/gpt-image-1/edit-image']: { + input: zSchemaGptImage1EditImageInput, + output: zSchemaGptImage1EditImageOutput, + }, + ['rundiffusion-fal/juggernaut-flux-lora/inpainting']: { + input: zSchemaJuggernautFluxLoraInpaintingInput, + output: zSchemaJuggernautFluxLoraInpaintingOutput, + }, + ['fal-ai/fashn/tryon/v1.5']: { + input: zSchemaFashnTryonV15Input, + output: zSchemaFashnTryonV15Output, + }, + ['fal-ai/plushify']: { + input: zSchemaPlushifyInput, + output: zSchemaPlushifyOutput, + }, + ['fal-ai/instant-character']: { + input: zSchemaInstantCharacterInput, + output: zSchemaInstantCharacterOutput, + }, + ['fal-ai/cartoonify']: { + input: zSchemaCartoonifyInput, + output: zSchemaCartoonifyOutput, + }, + ['fal-ai/finegrain-eraser/mask']: { + input: zSchemaFinegrainEraserMaskInput, + output: zSchemaFinegrainEraserMaskOutput, + }, + ['fal-ai/finegrain-eraser/bbox']: { + input: zSchemaFinegrainEraserBboxInput, + output: zSchemaFinegrainEraserBboxOutput, + }, + ['fal-ai/finegrain-eraser']: { + input: zSchemaFinegrainEraserInput, + output: zSchemaFinegrainEraserOutput, + }, + ['fal-ai/star-vector']: { + input: zSchemaStarVectorInput, + output: zSchemaStarVectorOutput, + }, + ['fal-ai/ghiblify']: { + input: zSchemaGhiblifyInput, + output: zSchemaGhiblifyOutput, + }, + ['fal-ai/thera']: { + input: zSchemaTheraInput, + output: zSchemaTheraOutput, + }, + ['fal-ai/mix-dehaze-net']: { + input: zSchemaMixDehazeNetInput, + output: zSchemaMixDehazeNetOutput, + }, + ['fal-ai/gemini-flash-edit/multi']: { + input: zSchemaGeminiFlashEditMultiInput, + output: zSchemaGeminiFlashEditMultiOutput, + }, + ['fal-ai/gemini-flash-edit']: { + input: zSchemaGeminiFlashEditInput, + output: zSchemaGeminiFlashEditOutput, + }, + ['fal-ai/invisible-watermark']: { + input: zSchemaInvisibleWatermarkInput, + output: zSchemaInvisibleWatermarkOutput, + }, + ['rundiffusion-fal/juggernaut-flux/pro/image-to-image']: { + input: zSchemaJuggernautFluxProImageToImageInput, + output: zSchemaJuggernautFluxProImageToImageOutput, + }, + ['rundiffusion-fal/juggernaut-flux/base/image-to-image']: { + input: zSchemaJuggernautFluxBaseImageToImageInput, + output: zSchemaJuggernautFluxBaseImageToImageOutput, + }, + ['fal-ai/docres/dewarp']: { + input: zSchemaDocresDewarpInput, + output: zSchemaDocresDewarpOutput, + }, + ['fal-ai/docres']: { + input: zSchemaDocresInput, + output: zSchemaDocresOutput, + }, + ['fal-ai/swin2sr']: { + input: zSchemaSwin2SrInput, + output: zSchemaSwin2SrOutput, + }, + ['fal-ai/ideogram/v2a/remix']: { + input: zSchemaIdeogramV2aRemixInput, + output: zSchemaIdeogramV2aRemixOutput, + }, + ['fal-ai/ideogram/v2a/turbo/remix']: { + input: zSchemaIdeogramV2aTurboRemixInput, + output: zSchemaIdeogramV2aTurboRemixOutput, + }, + ['fal-ai/evf-sam']: { + input: zSchemaEvfSamInput, + output: zSchemaEvfSamOutput, + }, + ['fal-ai/ddcolor']: { + input: zSchemaDdcolorInput, + output: zSchemaDdcolorOutput, + }, + ['fal-ai/sam2/auto-segment']: { + input: zSchemaSam2AutoSegmentInput, + output: zSchemaSam2AutoSegmentOutput, + }, + ['fal-ai/drct-super-resolution']: { + input: zSchemaDrctSuperResolutionInput, + output: zSchemaDrctSuperResolutionOutput, + }, + ['fal-ai/nafnet/deblur']: { + input: zSchemaNafnetDeblurInput, + output: zSchemaNafnetDeblurOutput, + }, + ['fal-ai/nafnet/denoise']: { + input: zSchemaNafnetDenoiseInput, + output: zSchemaNafnetDenoiseOutput, + }, + ['fal-ai/post-processing']: { + input: zSchemaPostProcessingInput, + output: zSchemaPostProcessingOutput, + }, + ['fal-ai/flowedit']: { + input: zSchemaFloweditInput, + output: zSchemaFloweditOutput, + }, + ['fal-ai/flux-control-lora-depth/image-to-image']: { + input: zSchemaFluxControlLoraDepthImageToImageInput, + output: zSchemaFluxControlLoraDepthImageToImageOutput, + }, + ['fal-ai/ben/v2/image']: { + input: zSchemaBenV2ImageInput, + output: zSchemaBenV2ImageOutput, + }, + ['fal-ai/flux-control-lora-canny/image-to-image']: { + input: zSchemaFluxControlLoraCannyImageToImageInput, + output: zSchemaFluxControlLoraCannyImageToImageOutput, + }, + ['fal-ai/ideogram/upscale']: { + input: zSchemaIdeogramUpscaleInput, + output: zSchemaIdeogramUpscaleOutput, + }, + ['fal-ai/codeformer']: { + input: zSchemaCodeformerInput, + output: zSchemaCodeformerOutput, + }, + ['fal-ai/kling/v1-5/kolors-virtual-try-on']: { + input: zSchemaKlingV15KolorsVirtualTryOnInput, + output: zSchemaKlingV15KolorsVirtualTryOnOutput, + }, + ['fal-ai/flux-lora-canny']: { + input: zSchemaFluxLoraCannyInput, + output: zSchemaFluxLoraCannyOutput, + }, + ['fal-ai/flux-pro/v1/fill-finetuned']: { + input: zSchemaFluxProV1FillFinetunedInput, + output: zSchemaFluxProV1FillFinetunedOutput, + }, + ['fal-ai/moondream-next/detection']: { + input: zSchemaMoondreamNextDetectionInput, + output: zSchemaMoondreamNextDetectionOutput, + }, + ['fal-ai/bria/expand']: { + input: zSchemaBriaExpandInput, + output: zSchemaBriaExpandOutput, + }, + ['fal-ai/bria/genfill']: { + input: zSchemaBriaGenfillInput, + output: zSchemaBriaGenfillOutput, + }, + ['fal-ai/flux-lora-fill']: { + input: zSchemaFluxLoraFillInput, + output: zSchemaFluxLoraFillOutput, + }, + ['fal-ai/bria/background/replace']: { + input: zSchemaBriaBackgroundReplaceInput, + output: zSchemaBriaBackgroundReplaceOutput, + }, + ['fal-ai/bria/eraser']: { + input: zSchemaBriaEraserInput, + output: zSchemaBriaEraserOutput, + }, + ['fal-ai/bria/product-shot']: { + input: zSchemaBriaProductShotInput, + output: zSchemaBriaProductShotOutput, + }, + ['fal-ai/bria/background/remove']: { + input: zSchemaBriaBackgroundRemoveInput, + output: zSchemaBriaBackgroundRemoveOutput, + }, + ['fal-ai/cat-vton']: { + input: zSchemaCatVtonInput, + output: zSchemaCatVtonOutput, + }, + ['fal-ai/leffa/pose-transfer']: { + input: zSchemaLeffaPoseTransferInput, + output: zSchemaLeffaPoseTransferOutput, + }, + ['fal-ai/leffa/virtual-tryon']: { + input: zSchemaLeffaVirtualTryonInput, + output: zSchemaLeffaVirtualTryonOutput, + }, + ['fal-ai/ideogram/v2/edit']: { + input: zSchemaIdeogramV2EditInput, + output: zSchemaIdeogramV2EditOutput, + }, + ['fal-ai/ideogram/v2/turbo/edit']: { + input: zSchemaIdeogramV2TurboEditInput, + output: zSchemaIdeogramV2TurboEditOutput, + }, + ['fal-ai/ideogram/v2/turbo/remix']: { + input: zSchemaIdeogramV2TurboRemixInput, + output: zSchemaIdeogramV2TurboRemixOutput, + }, + ['fal-ai/ideogram/v2/remix']: { + input: zSchemaIdeogramV2RemixInput, + output: zSchemaIdeogramV2RemixOutput, + }, + ['fal-ai/flux/schnell/redux']: { + input: zSchemaFluxSchnellReduxInput, + output: zSchemaFluxSchnellReduxOutput, + }, + ['fal-ai/flux-pro/v1.1/redux']: { + input: zSchemaFluxProV11ReduxInput, + output: zSchemaFluxProV11ReduxOutput, + }, + ['fal-ai/flux/dev/redux']: { + input: zSchemaFluxDevReduxInput, + output: zSchemaFluxDevReduxOutput, + }, + ['fal-ai/flux-pro/v1.1-ultra/redux']: { + input: zSchemaFluxProV11UltraReduxInput, + output: zSchemaFluxProV11UltraReduxOutput, + }, + ['fal-ai/flux-lora-depth']: { + input: zSchemaFluxLoraDepthInput, + output: zSchemaFluxLoraDepthOutput, + }, + ['fal-ai/flux-pro/v1/fill']: { + input: zSchemaFluxProV1FillInput, + output: zSchemaFluxProV1FillOutput, + }, + ['fal-ai/kolors/image-to-image']: { + input: zSchemaKolorsImageToImageInput, + output: zSchemaKolorsImageToImageOutput, + }, + ['fal-ai/iclight-v2']: { + input: zSchemaIclightV2Input, + output: zSchemaIclightV2Output, + }, + ['fal-ai/flux-differential-diffusion']: { + input: zSchemaFluxDifferentialDiffusionInput, + output: zSchemaFluxDifferentialDiffusionOutput, + }, + ['fal-ai/flux-pulid']: { + input: zSchemaFluxPulidInput, + output: zSchemaFluxPulidOutput, + }, + ['fal-ai/birefnet/v2']: { + input: zSchemaBirefnetV2Input, + output: zSchemaBirefnetV2Output, + }, + ['fal-ai/live-portrait/image']: { + input: zSchemaLivePortraitImageInput, + output: zSchemaLivePortraitImageOutput, + }, + ['fal-ai/flux-general/rf-inversion']: { + input: zSchemaFluxGeneralRfInversionInput, + output: zSchemaFluxGeneralRfInversionOutput, + }, + ['fal-ai/image-preprocessors/hed']: { + input: zSchemaImagePreprocessorsHedInput, + output: zSchemaImagePreprocessorsHedOutput, + }, + ['fal-ai/image-preprocessors/depth-anything/v2']: { + input: zSchemaImagePreprocessorsDepthAnythingV2Input, + output: zSchemaImagePreprocessorsDepthAnythingV2Output, + }, + ['fal-ai/image-preprocessors/scribble']: { + input: zSchemaImagePreprocessorsScribbleInput, + output: zSchemaImagePreprocessorsScribbleOutput, + }, + ['fal-ai/image-preprocessors/mlsd']: { + input: zSchemaImagePreprocessorsMlsdInput, + output: zSchemaImagePreprocessorsMlsdOutput, + }, + ['fal-ai/image-preprocessors/sam']: { + input: zSchemaImagePreprocessorsSamInput, + output: zSchemaImagePreprocessorsSamOutput, + }, + ['fal-ai/image-preprocessors/midas']: { + input: zSchemaImagePreprocessorsMidasInput, + output: zSchemaImagePreprocessorsMidasOutput, + }, + ['fal-ai/image-preprocessors/teed']: { + input: zSchemaImagePreprocessorsTeedInput, + output: zSchemaImagePreprocessorsTeedOutput, + }, + ['fal-ai/image-preprocessors/lineart']: { + input: zSchemaImagePreprocessorsLineartInput, + output: zSchemaImagePreprocessorsLineartOutput, + }, + ['fal-ai/image-preprocessors/zoe']: { + input: zSchemaImagePreprocessorsZoeInput, + output: zSchemaImagePreprocessorsZoeOutput, + }, + ['fal-ai/image-preprocessors/pidi']: { + input: zSchemaImagePreprocessorsPidiInput, + output: zSchemaImagePreprocessorsPidiOutput, + }, + ['fal-ai/sam2/image']: { + input: zSchemaSam2ImageInput, + output: zSchemaSam2ImageOutput, + }, + ['fal-ai/flux-general/image-to-image']: { + input: zSchemaFluxGeneralImageToImageInput, + output: zSchemaFluxGeneralImageToImageOutput, + }, + ['fal-ai/flux-general/inpainting']: { + input: zSchemaFluxGeneralInpaintingInput, + output: zSchemaFluxGeneralInpaintingOutput, + }, + ['fal-ai/flux-general/differential-diffusion']: { + input: zSchemaFluxGeneralDifferentialDiffusionInput, + output: zSchemaFluxGeneralDifferentialDiffusionOutput, + }, + ['fal-ai/flux-lora/image-to-image']: { + input: zSchemaFluxLoraImageToImageInput, + output: zSchemaFluxLoraImageToImageOutput, + }, + ['fal-ai/sdxl-controlnet-union/inpainting']: { + input: zSchemaSdxlControlnetUnionInpaintingInput, + output: zSchemaSdxlControlnetUnionInpaintingOutput, + }, + ['fal-ai/sdxl-controlnet-union/image-to-image']: { + input: zSchemaSdxlControlnetUnionImageToImageInput, + output: zSchemaSdxlControlnetUnionImageToImageOutput, + }, + ['fal-ai/era-3d']: { + input: zSchemaEra3dInput, + output: zSchemaEra3dOutput, + }, + ['fal-ai/florence-2-large/dense-region-caption']: { + input: zSchemaFlorence2LargeDenseRegionCaptionInput, + output: zSchemaFlorence2LargeDenseRegionCaptionOutput, + }, + ['fal-ai/florence-2-large/referring-expression-segmentation']: { + input: zSchemaFlorence2LargeReferringExpressionSegmentationInput, + output: zSchemaFlorence2LargeReferringExpressionSegmentationOutput, + }, + ['fal-ai/florence-2-large/object-detection']: { + input: zSchemaFlorence2LargeObjectDetectionInput, + output: zSchemaFlorence2LargeObjectDetectionOutput, + }, + ['fal-ai/florence-2-large/open-vocabulary-detection']: { + input: zSchemaFlorence2LargeOpenVocabularyDetectionInput, + output: zSchemaFlorence2LargeOpenVocabularyDetectionOutput, + }, + ['fal-ai/florence-2-large/caption-to-phrase-grounding']: { + input: zSchemaFlorence2LargeCaptionToPhraseGroundingInput, + output: zSchemaFlorence2LargeCaptionToPhraseGroundingOutput, + }, + ['fal-ai/florence-2-large/region-proposal']: { + input: zSchemaFlorence2LargeRegionProposalInput, + output: zSchemaFlorence2LargeRegionProposalOutput, + }, + ['fal-ai/florence-2-large/ocr-with-region']: { + input: zSchemaFlorence2LargeOcrWithRegionInput, + output: zSchemaFlorence2LargeOcrWithRegionOutput, + }, + ['fal-ai/florence-2-large/region-to-segmentation']: { + input: zSchemaFlorence2LargeRegionToSegmentationInput, + output: zSchemaFlorence2LargeRegionToSegmentationOutput, + }, + ['fal-ai/stable-diffusion-v3-medium/image-to-image']: { + input: zSchemaStableDiffusionV3MediumImageToImageInput, + output: zSchemaStableDiffusionV3MediumImageToImageOutput, + }, + ['fal-ai/dwpose']: { + input: zSchemaDwposeInput, + output: zSchemaDwposeOutput, + }, + ['fal-ai/sd15-depth-controlnet']: { + input: zSchemaSd15DepthControlnetInput, + output: zSchemaSd15DepthControlnetOutput, + }, + ['fal-ai/ccsr']: { + input: zSchemaCcsrInput, + output: zSchemaCcsrOutput, + }, + ['fal-ai/omni-zero']: { + input: zSchemaOmniZeroInput, + output: zSchemaOmniZeroOutput, + }, + ['fal-ai/ip-adapter-face-id']: { + input: zSchemaIpAdapterFaceIdInput, + output: zSchemaIpAdapterFaceIdOutput, + }, + ['fal-ai/lora/inpaint']: { + input: zSchemaLoraInpaintInput, + output: zSchemaLoraInpaintOutput, + }, + ['fal-ai/lora/image-to-image']: { + input: zSchemaLoraImageToImageInput, + output: zSchemaLoraImageToImageOutput, + }, + ['fal-ai/fast-sdxl/image-to-image']: { + input: zSchemaFastSdxlImageToImageInput, + output: zSchemaFastSdxlImageToImageOutput, + }, + ['fal-ai/fast-sdxl/inpainting']: { + input: zSchemaFastSdxlInpaintingInput, + output: zSchemaFastSdxlInpaintingOutput, + }, + ['fal-ai/face-to-sticker']: { + input: zSchemaFaceToStickerInput, + output: zSchemaFaceToStickerOutput, + }, + ['fal-ai/photomaker']: { + input: zSchemaPhotomakerInput, + output: zSchemaPhotomakerOutput, + }, + ['fal-ai/creative-upscaler']: { + input: zSchemaCreativeUpscalerInput, + output: zSchemaCreativeUpscalerOutput, + }, + ['fal-ai/birefnet']: { + input: zSchemaBirefnetInput, + output: zSchemaBirefnetOutput, + }, + ['fal-ai/playground-v25/image-to-image']: { + input: zSchemaPlaygroundV25ImageToImageInput, + output: zSchemaPlaygroundV25ImageToImageOutput, + }, + ['fal-ai/fast-lightning-sdxl/image-to-image']: { + input: zSchemaFastLightningSdxlImageToImageInput, + output: zSchemaFastLightningSdxlImageToImageOutput, + }, + ['fal-ai/fast-lightning-sdxl/inpainting']: { + input: zSchemaFastLightningSdxlInpaintingInput, + output: zSchemaFastLightningSdxlInpaintingOutput, + }, + ['fal-ai/playground-v25/inpainting']: { + input: zSchemaPlaygroundV25InpaintingInput, + output: zSchemaPlaygroundV25InpaintingOutput, + }, + ['fal-ai/fast-lcm-diffusion/inpainting']: { + input: zSchemaFastLcmDiffusionInpaintingInput, + output: zSchemaFastLcmDiffusionInpaintingOutput, + }, + ['fal-ai/fast-lcm-diffusion/image-to-image']: { + input: zSchemaFastLcmDiffusionImageToImageInput, + output: zSchemaFastLcmDiffusionImageToImageOutput, + }, + ['fal-ai/retoucher']: { + input: zSchemaRetoucherInput, + output: zSchemaRetoucherOutput, + }, + ['fal-ai/imageutils/depth']: { + input: zSchemaImageutilsDepthInput, + output: zSchemaImageutilsDepthOutput, + }, + ['fal-ai/imageutils/marigold-depth']: { + input: zSchemaImageutilsMarigoldDepthInput, + output: zSchemaImageutilsMarigoldDepthOutput, + }, + ['fal-ai/pulid']: { + input: zSchemaPulidInput, + output: zSchemaPulidOutput, + }, + ['fal-ai/fast-sdxl-controlnet-canny/image-to-image']: { + input: zSchemaFastSdxlControlnetCannyImageToImageInput, + output: zSchemaFastSdxlControlnetCannyImageToImageOutput, + }, + ['fal-ai/fast-sdxl-controlnet-canny/inpainting']: { + input: zSchemaFastSdxlControlnetCannyInpaintingInput, + output: zSchemaFastSdxlControlnetCannyInpaintingOutput, + }, + ['fal-ai/lcm-sd15-i2i']: { + input: zSchemaLcmSd15I2iInput, + output: zSchemaLcmSd15I2iOutput, + }, + ['fal-ai/inpaint']: { + input: zSchemaInpaintInput, + output: zSchemaInpaintOutput, + }, + ['fal-ai/esrgan']: { + input: zSchemaEsrganInput, + output: zSchemaEsrganOutput, + }, + ['fal-ai/imageutils/rembg']: { + input: zSchemaImageutilsRembgInput, + output: zSchemaImageutilsRembgOutput, + }, +} as const + +/** Get the input type for a specific image-to-image model */ +export type ImageToImageModelInput = + ImageToImageEndpointMap[T]['input'] + +/** Get the output type for a specific image-to-image model */ +export type ImageToImageModelOutput = + ImageToImageEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/image-to-image/types.gen.ts b/packages/typescript/ai-fal/src/generated/image-to-image/types.gen.ts new file mode 100644 index 00000000..7d4f2fb5 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/image-to-image/types.gen.ts @@ -0,0 +1,66902 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * RemoveBackgroundOutput + */ +export type SchemaImageutilsRembgOutput = { + /** + * Image + * + * Background removed image. + */ + image: SchemaImage +} + +/** + * Image + * + * Represents an image file. + */ +export type SchemaImage = { + /** + * Height + * + * The height of the image in pixels. + */ + height?: number + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * Width + * + * The width of the image in pixels. + */ + width?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * RemoveBackgroundInput + */ +export type SchemaImageutilsRembgInput = { + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Crop To Bbox + * + * + * If set to true, the resulting image be cropped to a bounding box around the subject + * + */ + crop_to_bbox?: boolean + /** + * Image Url + * + * Input image url. + */ + image_url: string +} + +/** + * UpscaleOutput + */ +export type SchemaEsrganOutput = { + /** + * Image + * + * Upscaled image + */ + image: SchemaImage +} + +/** + * UpscaleInput + */ +export type SchemaEsrganInput = { + /** + * Model + * + * Model to use for upscaling + */ + model?: + | 'RealESRGAN_x4plus' + | 'RealESRGAN_x2plus' + | 'RealESRGAN_x4plus_anime_6B' + | 'RealESRGAN_x4_v3' + | 'RealESRGAN_x4_wdn_v3' + | 'RealESRGAN_x4_anime_v3' + /** + * Face + * + * Upscaling a face + */ + face?: boolean + /** + * Scale + * + * Rescaling factor + */ + scale?: number + /** + * Tile + * + * Tile size. Default is 0, that is no tile. When encountering the out-of-GPU-memory issue, please specify it, e.g., 400 or 200 + */ + tile?: number + /** + * Output Format + * + * Output image format (png or jpeg) + */ + output_format?: 'png' | 'jpeg' + /** + * Image Url + * + * Url to input image + */ + image_url: string +} + +/** + * InpaintOutput + */ +export type SchemaInpaintOutput = { + /** + * Image + * + * The generated image files info. + */ + image: SchemaImage + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * InpaintInput + */ +export type SchemaInpaintInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Image Url + * + * Input image for img2img or inpaint mode + */ + image_url: string + /** + * Model Name + * + * URL or HuggingFace ID of the base model to generate the image. + */ + model_name: string + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Number of inference steps + * + * + * Increasing the amount of steps tells Stable Diffusion that it should take more steps + * to generate your final result which can increase the amount of detail in your image. + * + */ + num_inference_steps?: number + /** + * Mask Url + * + * Input mask for inpaint mode. Black areas will be preserved, white areas will be inpainted. + */ + mask_url: string + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number +} + +/** + * LCMOutput + */ +export type SchemaLcmSd15I2iOutput = { + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Request Id + * + * + * An id bound to a request, can be used with response to identify the request + * itself. + * + */ + request_id?: string + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Num Inference Steps + * + * + * Number of inference steps used to generate the image. It will be the same value of the one passed in the + * input or the default one in case none was passed. + * + */ + num_inference_steps?: number + /** + * Nsfw Content Detected + * + * + * A list of booleans indicating whether the generated image contains any + * potentially unsafe content. If the safety check is disabled, this field + * will have a false for each generated image. + * + */ + nsfw_content_detected: Array +} + +/** + * LCMI2IInput + */ +export type SchemaLcmSd15I2iInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Num Images + * + * + * The number of images to generate. The function will return a list of images + * with the same prompt and negative prompt but different seeds. + * + */ + num_images?: number + /** + * Image Url + * + * The image to use as a base. + */ + image_url: string + /** + * Strength + * + * The strength of the image. + */ + strength?: number + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Enable Safety Checks + * + * + * If set to true, the resulting image will be checked whether it includes any + * potentially unsafe content. If it does, it will be replaced with a black + * image. + * + */ + enable_safety_checks?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Request Id + * + * + * An id bound to a request, can be used with response to identify the request + * itself. + * + */ + request_id?: string + /** + * Negative Prompt + * + * + * The negative prompt to use.Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * + * The number of inference steps to use for generating the image. The more steps + * the better the image will be but it will also take longer to generate. + * + */ + num_inference_steps?: number +} + +/** + * Output + */ +export type SchemaFastSdxlControlnetCannyInpaintingOutput = { + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * InpaintingControlNetInput + */ +export type SchemaFastSdxlControlnetCannyInpaintingInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. Leave it none to automatically infer from the control image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | null + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Loras + * + * The list of LoRA weights to use. + */ + loras?: Array + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use.Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Controlnet Conditioning Scale + * + * The scale of the controlnet conditioning. + */ + controlnet_conditioning_scale?: number + /** + * Image Url + * + * The URL of the image to use as a starting point for the generation. + */ + image_url: string + /** + * Strength + * + * determines how much the generated image resembles the initial image + */ + strength?: number + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Control Image Url + * + * The URL of the control image. + */ + control_image_url: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Mask Url + * + * The URL of the mask to use for inpainting. + */ + mask_url: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * LoraWeight + */ +export type SchemaLoraWeight = { + /** + * Path + * + * URL or the path to the LoRA weights. Or HF model name. + */ + path: string + /** + * Scale + * + * + * The scale of the LoRA weight. This is used to scale the LoRA weight + * before merging it with the base model. + * + */ + scale?: number +} + +/** + * ImageSize + */ +export type SchemaImageSize = { + /** + * Height + * + * The height of the generated image. + */ + height?: number + /** + * Width + * + * The width of the generated image. + */ + width?: number +} + +/** + * Output + */ +export type SchemaFastSdxlControlnetCannyImageToImageOutput = { + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * ImageToImageControlNetInput + */ +export type SchemaFastSdxlControlnetCannyImageToImageInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. Leave it none to automatically infer from the control image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | null + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Loras + * + * The list of LoRA weights to use. + */ + loras?: Array + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use.Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Controlnet Conditioning Scale + * + * The scale of the controlnet conditioning. + */ + controlnet_conditioning_scale?: number + /** + * Image Url + * + * The URL of the image to use as a starting point for the generation. + */ + image_url: string + /** + * Strength + * + * determines how much the generated image resembles the initial image + */ + strength?: number + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Control Image Url + * + * The URL of the control image. + */ + control_image_url: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number +} + +/** + * ReferenceFace + */ +export type SchemaReferenceFace = { + /** + * Image Url + * + * URL of the reference face image + */ + image_url: string +} + +/** + * OutputModel + */ +export type SchemaPulidOutput = { + /** + * Images + * + * List of generated images + */ + images: Array + /** + * Seed + * + * Random seed used for reproducibility + */ + seed: number +} + +/** + * InputModel + */ +export type SchemaPulidInput = { + /** + * Prompt + * + * Prompt to generate the face from + */ + prompt: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * Size of the generated image + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Id Scale + * + * ID scale + */ + id_scale?: number + /** + * Mode + * + * Mode of generation + */ + mode?: 'fidelity' | 'extreme style' + /** + * Id Mix + * + * if you want to mix two ID image, please turn this on, otherwise, turn this off + */ + id_mix?: boolean + /** + * Guidance Scale + * + * Guidance scale + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of steps to take + */ + num_inference_steps?: number + /** + * Reference Images + * + * List of reference faces, ideally 4 images. + */ + reference_images: Array + /** + * Negative Prompt + * + * Negative prompt to generate the face from + */ + negative_prompt?: string + /** + * Seed + * + * Random seed for reproducibility + */ + seed?: number +} + +/** + * MarigoldDepthMapOutput + */ +export type SchemaImageutilsMarigoldDepthOutput = { + /** + * Image + * + * The depth map. + */ + image: SchemaImage +} + +/** + * MarigoldDepthMapInput + */ +export type SchemaImageutilsMarigoldDepthInput = { + /** + * Ensemble Size + * + * Number of predictions to average over. Defaults to `10`. The higher the number, the more accurate the result, but the slower the inference. + */ + ensemble_size?: number + /** + * Num Inference Steps + * + * Number of denoising steps. Defaults to `10`. The higher the number, the more accurate the result, but the slower the inference. + */ + num_inference_steps?: number + /** + * Processing Res + * + * Maximum processing resolution. Defaults `0` which means it uses the size of the input image. + */ + processing_res?: number + /** + * Image Url + * + * Input image url. + */ + image_url: string +} + +/** + * DepthMapOutput + */ +export type SchemaImageutilsDepthOutput = { + /** + * Image + * + * The depth map. + */ + image: SchemaImage +} + +/** + * DepthMapInput + */ +export type SchemaImageutilsDepthInput = { + /** + * Bg Th + * + * bg_th + */ + bg_th?: number + /** + * A + * + * a + */ + a?: number + /** + * Depth And Normal + * + * depth_and_normal + */ + depth_and_normal?: boolean + /** + * Image Url + * + * Input image url. + */ + image_url: string +} + +/** + * RetoucherOutput + */ +export type SchemaRetoucherOutput = { + /** + * Image + * + * The generated image file info. + */ + image: SchemaImage + /** + * Seed + * + * The seed used for the generation. + */ + seed: number +} + +/** + * RetoucherInput + */ +export type SchemaRetoucherInput = { + /** + * Seed + * + * Seed for reproducibility. Different seeds will make slightly different results. + */ + seed?: number + /** + * Image Url + * + * The URL of the image to be retouched. + */ + image_url: string +} + +/** + * Output + */ +export type SchemaFastLcmDiffusionImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * ImageToImageLCMInput + */ +export type SchemaFastLcmDiffusionImageToImageInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Guidance Rescale + * + * The rescale factor for the CFG. + */ + guidance_rescale?: number + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Preserve Aspect Ratio + * + * + * If set to true, the aspect ratio of the generated image will be preserved even + * if the image size is too large. However, if the image is not a multiple of 32 + * in width or height, it will be resized to the nearest multiple of 32. By default, + * this snapping to the nearest multiple of 32 will not preserve the aspect ratio. + * Set crop_output to True, to crop the output to the proper aspect ratio + * after generating. + * + */ + preserve_aspect_ratio?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use.Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Crop Output + * + * + * If set to true, the output cropped to the proper aspect ratio after generating. + * + */ + crop_output?: boolean + /** + * Format + * + * The format of the generated image. + */ + format?: 'jpeg' | 'png' + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Url + * + * The URL of the image to use as a starting point for the generation. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Model Name + * + * The name of the model to use. + */ + model_name?: + | 'stabilityai/stable-diffusion-xl-base-1.0' + | 'runwayml/stable-diffusion-v1-5' + /** + * Safety Checker Version + * + * The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model. + */ + safety_checker_version?: 'v1' | 'v2' + /** + * Request Id + * + * + * An id bound to a request, can be used with response to identify the request + * itself. + * + */ + request_id?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Strength + * + * determines how much the generated image resembles the initial image + */ + strength?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number +} + +/** + * Output + */ +export type SchemaFastLcmDiffusionInpaintingOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * InpaintingLCMInput + */ +export type SchemaFastLcmDiffusionInpaintingInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Guidance Rescale + * + * The rescale factor for the CFG. + */ + guidance_rescale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Negative Prompt + * + * + * The negative prompt to use.Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Format + * + * The format of the generated image. + */ + format?: 'jpeg' | 'png' + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Url + * + * The URL of the image to use as a starting point for the generation. + */ + image_url: string + /** + * Strength + * + * determines how much the generated image resembles the initial image + */ + strength?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Checker Version + * + * The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model. + */ + safety_checker_version?: 'v1' | 'v2' + /** + * Request Id + * + * + * An id bound to a request, can be used with response to identify the request + * itself. + * + */ + request_id?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Mask Url + * + * The URL of the mask to use for inpainting. + */ + mask_url: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Model Name + * + * The name of the model to use. + */ + model_name?: + | 'stabilityai/stable-diffusion-xl-base-1.0' + | 'runwayml/stable-diffusion-v1-5' +} + +/** + * Output + */ +export type SchemaPlaygroundV25InpaintingOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * InpaintingPlaygroundv25Input + */ +export type SchemaPlaygroundV25InpaintingInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Embeddings + * + * The list of embeddings to use. + */ + embeddings?: Array + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Guidance Rescale + * + * The rescale factor for the CFG. + */ + guidance_rescale?: number + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use.Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Format + * + * The format of the generated image. + */ + format?: 'jpeg' | 'png' + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Url + * + * The URL of the image to use as a starting point for the generation. + */ + image_url: string + /** + * Strength + * + * determines how much the generated image resembles the initial image + */ + strength?: number + /** + * Safety Checker Version + * + * The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model. + */ + safety_checker_version?: 'v1' | 'v2' + /** + * Request Id + * + * + * An id bound to a request, can be used with response to identify the request + * itself. + * + */ + request_id?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Mask Url + * + * The URL of the mask to use for inpainting. + */ + mask_url: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * Embedding + */ +export type SchemaEmbedding = { + /** + * Tokens + * + * The list of tokens to use for the embedding. + */ + tokens?: Array + /** + * Path + * + * URL or the path to the embedding weights. + */ + path: string +} + +/** + * Output + */ +export type SchemaFastLightningSdxlInpaintingOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * InpaintingLightningInput + */ +export type SchemaFastLightningSdxlInpaintingInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Embeddings + * + * The list of embeddings to use. + */ + embeddings?: Array + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Guidance Rescale + * + * The rescale factor for the CFG. + */ + guidance_rescale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Format + * + * The format of the generated image. + */ + format?: 'jpeg' | 'png' + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Url + * + * The URL of the image to use as a starting point for the generation. + */ + image_url: string + /** + * Strength + * + * determines how much the generated image resembles the initial image + */ + strength?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Checker Version + * + * The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model. + */ + safety_checker_version?: 'v1' | 'v2' + /** + * Request Id + * + * + * An id bound to a request, can be used with response to identify the request + * itself. + * + */ + request_id?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: '1' | '2' | '4' | '8' + /** + * Mask Url + * + * The URL of the mask to use for inpainting. + */ + mask_url: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number +} + +/** + * Output + */ +export type SchemaFastLightningSdxlImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * ImageToImageLightningInput + */ +export type SchemaFastLightningSdxlImageToImageInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Embeddings + * + * The list of embeddings to use. + */ + embeddings?: Array + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Guidance Rescale + * + * The rescale factor for the CFG. + */ + guidance_rescale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Preserve Aspect Ratio + * + * + * If set to true, the aspect ratio of the generated image will be preserved even + * if the image size is too large. However, if the image is not a multiple of 32 + * in width or height, it will be resized to the nearest multiple of 32. By default, + * this snapping to the nearest multiple of 32 will not preserve the aspect ratio. + * Set crop_output to True, to crop the output to the proper aspect ratio + * after generating. + * + */ + preserve_aspect_ratio?: boolean + /** + * Crop Output + * + * + * If set to true, the output cropped to the proper aspect ratio after generating. + * + */ + crop_output?: boolean + /** + * Format + * + * The format of the generated image. + */ + format?: 'jpeg' | 'png' + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Url + * + * The URL of the image to use as a starting point for the generation. + */ + image_url: string + /** + * Strength + * + * determines how much the generated image resembles the initial image + */ + strength?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Checker Version + * + * The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model. + */ + safety_checker_version?: 'v1' | 'v2' + /** + * Request Id + * + * + * An id bound to a request, can be used with response to identify the request + * itself. + * + */ + request_id?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: '1' | '2' | '4' | '8' + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number +} + +/** + * Output + */ +export type SchemaPlaygroundV25ImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * ImageToImagePlaygroundv25Input + */ +export type SchemaPlaygroundV25ImageToImageInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Embeddings + * + * The list of embeddings to use. + */ + embeddings?: Array + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Guidance Rescale + * + * The rescale factor for the CFG. + */ + guidance_rescale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Preserve Aspect Ratio + * + * + * If set to true, the aspect ratio of the generated image will be preserved even + * if the image size is too large. However, if the image is not a multiple of 32 + * in width or height, it will be resized to the nearest multiple of 32. By default, + * this snapping to the nearest multiple of 32 will not preserve the aspect ratio. + * Set crop_output to True, to crop the output to the proper aspect ratio + * after generating. + * + */ + preserve_aspect_ratio?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use.Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Crop Output + * + * + * If set to true, the output cropped to the proper aspect ratio after generating. + * + */ + crop_output?: boolean + /** + * Format + * + * The format of the generated image. + */ + format?: 'jpeg' | 'png' + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Url + * + * The URL of the image to use as a starting point for the generation. + */ + image_url: string + /** + * Strength + * + * determines how much the generated image resembles the initial image + */ + strength?: number + /** + * Safety Checker Version + * + * The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model. + */ + safety_checker_version?: 'v1' | 'v2' + /** + * Request Id + * + * + * An id bound to a request, can be used with response to identify the request + * itself. + * + */ + request_id?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * Output + */ +export type SchemaBirefnetOutput = { + /** + * Image + * + * Image with background removed + */ + image: SchemaImageFile + /** + * Mask Image + * + * Mask used to remove the background + */ + mask_image?: SchemaImageFile +} + +/** + * ImageFile + */ +export type SchemaImageFile = { + /** + * Height + * + * The height of the image + */ + height?: number + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * Width + * + * The width of the image + */ + width?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * Input + */ +export type SchemaBirefnetInput = { + /** + * Operating Resolution + * + * The resolution to operate on. The higher the resolution, the more accurate the output will be for high res input images. + */ + operating_resolution?: '1024x1024' | '2048x2048' + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'webp' | 'png' | 'gif' + /** + * Image Url + * + * URL of the image to remove background from + */ + image_url: string + /** + * Model + * + * + * Model to use for background removal. + * The 'General Use (Light)' model is the original model used in the BiRefNet repository. + * The 'General Use (Heavy)' model is a slower but more accurate model. + * The 'Portrait' model is a model trained specifically for portrait images. + * The 'General Use (Light)' model is recommended for most use cases. + * + * The corresponding models are as follows: + * - 'General Use (Light)': BiRefNet-DIS_ep580.pth + * - 'General Use (Heavy)': BiRefNet-massive-epoch_240.pth + * - 'Portrait': BiRefNet-portrait-TR_P3M_10k-epoch_120.pth + * + */ + model?: 'General Use (Light)' | 'General Use (Heavy)' | 'Portrait' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Output Mask + * + * Whether to output the mask used to remove the background + */ + output_mask?: boolean + /** + * Refine Foreground + * + * Whether to refine the foreground using the estimated mask + */ + refine_foreground?: boolean +} + +/** + * CreativeUpscalerOutput + */ +export type SchemaCreativeUpscalerOutput = { + /** + * Image + * + * The generated image file info. + */ + image: SchemaImage + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * CreativeUpscalerInput + */ +export type SchemaCreativeUpscalerInput = { + /** + * Shape Preservation + * + * How much to preserve the shape of the original image + */ + shape_preservation?: number + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. If no prompt is provide BLIP2 will be used to generate a prompt. + */ + prompt?: string | null + /** + * Additional Embedding Url + * + * The URL to the additional embeddings to use for the upscaling. Default is None + */ + additional_embedding_url?: string + /** + * Enable Safety Checks + * + * + * If set to true, the resulting image will be checked whether it includes any + * potentially unsafe content. If it does, it will be replaced with a black + * image. + * + */ + enable_safety_checks?: boolean + /** + * Additional Lora Url + * + * The URL to the additional LORA model to use for the upscaling. Default is None + */ + additional_lora_url?: string + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Scale + * + * The scale of the output image. The higher the scale, the bigger the output image will be. + */ + scale?: number + /** + * Negative Prompt + * + * + * The negative prompt to use.Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Skip Ccsr + * + * + * If set to true, the image will not be processed by the CCSR model before + * being processed by the creativity model. + * + */ + skip_ccsr?: boolean + /** + * Additional Lora Scale + * + * The scale of the additional LORA model to use for the upscaling. Default is 1.0 + */ + additional_lora_scale?: number + /** + * Detail + * + * How much detail to add + */ + detail?: number + /** + * Base Model Url + * + * The URL to the base model to use for the upscaling + */ + base_model_url?: string + /** + * Image Url + * + * The image to upscale. + */ + image_url: string + /** + * Creativity + * + * How much the output can deviate from the original + */ + creativity?: number + /** + * Override Size Limits + * + * + * Allow for large uploads that could take a very long time. + * + */ + override_size_limits?: boolean + /** + * Prompt Suffix + * + * The suffix to add to the prompt. This is useful to add a common ending to all prompts such as 'high quality' etc or embedding tokens. + */ + prompt_suffix?: string + /** + * Num Inference Steps + * + * + * The number of inference steps to use for generating the image. The more steps + * the better the image will be but it will also take longer to generate. + * + */ + num_inference_steps?: number + /** + * Model Type + * + * The type of model to use for the upscaling. Default is SD_1_5 + */ + model_type?: 'SD_1_5' | 'SDXL' + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number +} + +/** + * PhotoMakerOutput + */ +export type SchemaPhotomakerOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * PhotoMakerInput + */ +export type SchemaPhotomakerInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Number of images + * + * + * Number of images to generate in one request. Note that the higher the batch size, + * the longer it will take to generate the images. + * + */ + num_images?: number + /** + * Style strength (in %) + */ + style_strength?: number + /** + * Style + */ + style?: + | '(No style)' + | 'Cinematic' + | 'Disney Character' + | 'Digital Art' + | 'Photographic' + | 'Fantasy art' + | 'Neonpunk' + | 'Enhance' + | 'Comic book' + | 'Lowpoly' + | 'Line art' + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Image Archive Url + * + * The URL of the image archive containing the images you want to use. + */ + image_archive_url: string + /** + * Initial Image Url + * + * Optional initial image for img2img + */ + initial_image_url?: string + /** + * Number of inference steps + * + * + * Increasing the amount of steps tells Stable Diffusion that it should take more steps + * to generate your final result which can increase the amount of detail in your image. + * + */ + num_inference_steps?: number + /** + * Initial Image Strength + * + * How much noise to add to the latent image. O for no noise, 1 for maximum noise. + */ + initial_image_strength?: number + /** + * Base Pipeline + * + * The base pipeline to use for generating the image. + */ + base_pipeline?: 'photomaker' | 'photomaker-style' + /** + * Negative Prompt + * + * + * The negative prompt to use.Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string +} + +/** + * FaceToStickerOutput + */ +export type SchemaFaceToStickerOutput = { + /** + * Images + * + * The generated images. + */ + images: Array + /** + * Sticker Image + * + * The generated face sticker image. + */ + sticker_image: SchemaImage + /** + * Sticker Image Background Removed + * + * The generated face sticker image with the background removed. + */ + sticker_image_background_removed: SchemaImage + /** + * Seed + * + * Seed used during the inference. + */ + seed: number + /** + * Has Nsfw Concepts + * + * + * Whether the generated images contain NSFW concepts. + * The key is the image type and the value is a boolean. + * + */ + has_nsfw_concepts: { + [key: string]: boolean + } +} + +/** + * FaceToStickerInput + */ +export type SchemaFaceToStickerInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Enable Safety Checker + * + * If set to false, the safety checker will be disabled. + */ + enable_safety_checker?: boolean + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * IP adapter weight + * + * The weight of the IP adapter. + */ + ip_adapter_weight?: number + /** + * Image Url + * + * URL of the video. + */ + image_url: string + /** + * Upscale steps + * + * The number of steps to use for upscaling. Only used if `upscale` is `true`. + */ + upscale_steps?: number + /** + * Instant ID strength + * + * The strength of the instant ID. + */ + instant_id_strength?: number + /** + * Upscale + * + * Whether to upscale the image 2x. + */ + upscale?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Number of inference steps + * + * + * Increasing the amount of steps tells Stable Diffusion that it should take more steps + * to generate your final result which can increase the amount of detail in your image. + * + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * IP adapter noise + * + * The amount of noise to add to the IP adapter. + */ + ip_adapter_noise?: number +} + +/** + * Output + */ +export type SchemaFastSdxlInpaintingOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * InpaintingInput + */ +export type SchemaFastSdxlInpaintingInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Embeddings + * + * The list of embeddings to use. + */ + embeddings?: Array + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Loras + * + * The list of LoRA weights to use. + */ + loras?: Array + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use.Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Format + * + * The format of the generated image. + */ + format?: 'jpeg' | 'png' + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Url + * + * The URL of the image to use as a starting point for the generation. + */ + image_url: string + /** + * Strength + * + * determines how much the generated image resembles the initial image + */ + strength?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Checker Version + * + * The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model. + */ + safety_checker_version?: 'v1' | 'v2' + /** + * Request Id + * + * + * An id bound to a request, can be used with response to identify the request + * itself. + * + */ + request_id?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Mask Url + * + * The URL of the mask to use for inpainting. + */ + mask_url: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number +} + +/** + * Output + */ +export type SchemaFastSdxlImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * ImageToImageInput + */ +export type SchemaFastSdxlImageToImageInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Embeddings + * + * The list of embeddings to use. + */ + embeddings?: Array + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Loras + * + * The list of LoRA weights to use. + */ + loras?: Array + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Preserve Aspect Ratio + * + * + * If set to true, the aspect ratio of the generated image will be preserved even + * if the image size is too large. However, if the image is not a multiple of 32 + * in width or height, it will be resized to the nearest multiple of 32. By default, + * this snapping to the nearest multiple of 32 will not preserve the aspect ratio. + * Set crop_output to True, to crop the output to the proper aspect ratio + * after generating. + * + */ + preserve_aspect_ratio?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use.Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Crop Output + * + * + * If set to true, the output cropped to the proper aspect ratio after generating. + * + */ + crop_output?: boolean + /** + * Format + * + * The format of the generated image. + */ + format?: 'jpeg' | 'png' + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Url + * + * The URL of the image to use as a starting point for the generation. + */ + image_url: string + /** + * Strength + * + * determines how much the generated image resembles the initial image + */ + strength?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Checker Version + * + * The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model. + */ + safety_checker_version?: 'v1' | 'v2' + /** + * Request Id + * + * + * An id bound to a request, can be used with response to identify the request + * itself. + * + */ + request_id?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * OutputParameters + */ +export type SchemaLoraImageToImageOutput = { + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Debug Latents + * + * The latents saved for debugging. + */ + debug_latents?: SchemaFile + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Debug Per Pass Latents + * + * The latents saved for debugging per pass. + */ + debug_per_pass_latents?: SchemaFile +} + +/** + * File + */ +export type SchemaFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * ImageToImageInput + */ +export type SchemaLoraImageToImageInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Noise Strength + * + * The amount of noise to add to noise image for image. Only used if the image_url is provided. 1.0 is complete noise and 0 is no noise. + */ + noise_strength?: number + /** + * Tile Height + * + * The size of the tiles to be used for the image generation. + */ + tile_height?: number + /** + * Embeddings + * + * + * The embeddings to use for the image generation. Only a single embedding is supported at the moment. + * The embeddings will be used to map the tokens in the prompt to the embedding weights. + * + */ + embeddings?: Array + /** + * Ic Light Model Url + * + * + * The URL of the IC Light model to use for the image generation. + * + */ + ic_light_model_url?: string + /** + * Image Encoder Weight Name + * + * + * The weight name of the image encoder model to use for the image generation. + * + */ + image_encoder_weight_name?: string + /** + * Ip Adapter + * + * + * The IP adapter to use for the image generation. + * + */ + ip_adapter?: Array + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Scheduler + * + * Scheduler / sampler to use for the image denoising process. + */ + scheduler?: + | 'DPM++ 2M' + | 'DPM++ 2M Karras' + | 'DPM++ 2M SDE' + | 'DPM++ 2M SDE Karras' + | 'Euler' + | 'Euler A' + | 'Euler (trailing timesteps)' + | 'LCM' + | 'LCM (trailing timesteps)' + | 'DDIM' + | 'TCD' + /** + * Sigmas + * + * + * Optionally override the sigmas to use for the denoising process. Only works with schedulers which support the `sigmas` argument in their `set_sigmas` method. + * Defaults to not overriding, in which case the scheduler automatically sets the sigmas based on the `num_inference_steps` parameter. + * If set to a custom sigma schedule, the `num_inference_steps` parameter will be ignored. Cannot be set if `timesteps` is set. + * + */ + sigmas?: SchemaSigmasInput + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Tile Stride Width + * + * The stride of the tiles to be used for the image generation. + */ + tile_stride_width?: number + /** + * Debug Per Pass Latents + * + * If set to true, the latents will be saved for debugging per pass. + */ + debug_per_pass_latents?: boolean + /** + * Timesteps + * + * + * Optionally override the timesteps to use for the denoising process. Only works with schedulers which support the `timesteps` argument in their `set_timesteps` method. + * Defaults to not overriding, in which case the scheduler automatically sets the timesteps based on the `num_inference_steps` parameter. + * If set to a custom timestep schedule, the `num_inference_steps` parameter will be ignored. Cannot be set if `sigmas` is set. + * + */ + timesteps?: SchemaTimestepsInput + /** + * Model Name + * + * URL or HuggingFace ID of the base model to generate the image. + */ + model_name: string + /** + * Prompt Weighting + * + * + * If set to true, the prompt weighting syntax will be used. + * Additionally, this will lift the 77 token limit by averaging embeddings. + * + */ + prompt_weighting?: boolean + /** + * Variant + * + * The variant of the model to use for huggingface models, e.g. 'fp16'. + */ + variant?: string + /** + * Image Url + * + * URL of image to use for image to image/inpainting. + */ + image_url?: string + /** + * Controlnet Guess Mode + * + * + * If set to true, the controlnet will be applied to only the conditional predictions. + * + */ + controlnet_guess_mode?: boolean + /** + * Image Encoder Subfolder + * + * + * The subfolder of the image encoder model to use for the image generation. + * + */ + image_encoder_subfolder?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Ic Light Model Background Image Url + * + * + * The URL of the IC Light model background image to use for the image generation. + * Make sure to use a background compatible with the model. + * + */ + ic_light_model_background_image_url?: string + /** + * Rescale Betas Snr Zero + * + * + * Whether to set the rescale_betas_snr_zero option or not for the sampler + * + */ + rescale_betas_snr_zero?: boolean + /** + * Tile Width + * + * The size of the tiles to be used for the image generation. + */ + tile_width?: number + /** + * Prediction Type + * + * + * The type of prediction to use for the image generation. + * The `epsilon` is the default. + * + */ + prediction_type?: 'v_prediction' | 'epsilon' + /** + * Eta + * + * The eta value to be used for the image generation. + */ + eta?: number + /** + * Image Encoder Path + * + * + * The path to the image encoder model to use for the image generation. + * + */ + image_encoder_path?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use.Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Image Format + * + * The format of the generated image. + */ + image_format?: 'jpeg' | 'png' + /** + * Number of images + * + * + * Number of images to generate in one request. Note that the higher the batch size, + * the longer it will take to generate the images. + * + */ + num_images?: number + /** + * Debug Latents + * + * If set to true, the latents will be saved for debugging. + */ + debug_latents?: boolean + /** + * Ic Light Image Url + * + * + * The URL of the IC Light model image to use for the image generation. + * + */ + ic_light_image_url?: string + /** + * Unet Name + * + * URL or HuggingFace ID of the custom U-Net model to use for the image generation. + */ + unet_name?: string + /** + * Clip Skip + * + * + * Skips part of the image generation process, leading to slightly different results. + * This means the image renders faster, too. + * + */ + clip_skip?: number + /** + * Tile Stride Height + * + * The stride of the tiles to be used for the image generation. + */ + tile_stride_height?: number + /** + * Controlnets + * + * + * The control nets to use for the image generation. You can use any number of control nets + * and they will be applied to the image at the specified timesteps. + * + */ + controlnets?: Array + /** + * Number of inference steps + * + * + * Increasing the amount of steps tells Stable Diffusion that it should take more steps + * to generate your final result which can increase the amount of detail in your image. + * + */ + num_inference_steps?: number +} + +/** + * ControlNet + */ +export type SchemaControlNet = { + /** + * Conditioning Scale + * + * + * The scale of the control net weight. This is used to scale the control net weight + * before merging it with the base model. + * + */ + conditioning_scale?: number + /** + * Path + * + * URL or the path to the control net weights. + */ + path: string + /** + * Ip Adapter Index + * + * + * The index of the IP adapter to be applied to the controlnet. This is only needed for InstantID ControlNets. + * + */ + ip_adapter_index?: number + /** + * End Percentage + * + * + * The percentage of the image to end applying the controlnet in terms of the total timesteps. + * + */ + end_percentage?: number + /** + * Config Url + * + * optional URL to the controlnet config.json file. + */ + config_url?: string + /** + * Image Url + * + * URL of the image to be used as the control net. + */ + image_url: string + /** + * Variant + * + * The optional variant if a Hugging Face repo key is used. + */ + variant?: string + /** + * Mask Url + * + * + * The mask to use for the controlnet. When using a mask, the control image size and the mask size must be the same and divisible by 32. + * + */ + mask_url?: string + /** + * Start Percentage + * + * + * The percentage of the image to start applying the controlnet in terms of the total timesteps. + * + */ + start_percentage?: number +} + +/** + * TimestepsInput + */ +export type SchemaTimestepsInput = { + /** + * Method + * + * + * The method to use for the timesteps. If set to 'array', the timesteps will be set based + * on the provided timesteps schedule in the `array` field. + * Defaults to 'default' which means the scheduler will use the `num_inference_steps` parameter. + * + */ + method?: 'default' | 'array' + /** + * Array + * + * + * Timesteps schedule to be used if 'custom' method is selected. + * + */ + array?: Array +} + +/** + * SigmasInput + */ +export type SchemaSigmasInput = { + /** + * Method + * + * + * The method to use for the sigmas. If set to 'custom', the sigmas will be set based + * on the provided sigmas schedule in the `array` field. + * Defaults to 'default' which means the scheduler will use the sigmas of the scheduler. + * + */ + method?: 'default' | 'array' + /** + * Array + * + * + * Sigmas schedule to be used if 'custom' method is selected. + * + */ + array?: Array +} + +/** + * IPAdapter + */ +export type SchemaIpAdapter = { + /** + * Unconditional Noising Factor + * + * The factor to apply to the unconditional noising of the IP adapter. + */ + unconditional_noising_factor?: number + /** + * Ip Adapter Image Url + * + * URL of the image to be used as the IP adapter. + */ + ip_adapter_image_url: string | Array + /** + * Path + * + * URL or the path to the IP adapter weights. + */ + path: string + /** + * Image Projection Shortcut + * + * + * The value to set the image projection shortcut to. For FaceID plus V1 models, + * this should be set to False. For FaceID plus V2 models, this should be set to True. + * Default is True. + * + */ + image_projection_shortcut?: boolean + /** + * Scale Json + * + * + * The scale of the IP adapter weight. This is used to scale the IP adapter weight + * before merging it with the base model. + * + */ + scale_json?: { + [key: string]: unknown + } + /** + * Ip Adapter Mask Url + * + * + * The mask to use for the IP adapter. When using a mask, the ip-adapter image size and the mask size must be the same + * + */ + ip_adapter_mask_url?: string + /** + * Model Subfolder + * + * Subfolder in the model directory where the IP adapter weights are stored. + */ + model_subfolder?: string + /** + * Scale + * + * + * The scale of the IP adapter weight. This is used to scale the IP adapter weight + * before merging it with the base model. + * + */ + scale?: number + /** + * Insight Face Model Path + * + * URL or the path to the InsightFace model weights. + */ + insight_face_model_path?: string + /** + * Weight Name + * + * Name of the weight file. + */ + weight_name?: string +} + +/** + * OutputParameters + */ +export type SchemaLoraInpaintOutput = { + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Debug Latents + * + * The latents saved for debugging. + */ + debug_latents?: SchemaFile + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Debug Per Pass Latents + * + * The latents saved for debugging per pass. + */ + debug_per_pass_latents?: SchemaFile +} + +/** + * InpaintInput + */ +export type SchemaLoraInpaintInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Noise Strength + * + * The amount of noise to add to noise image for image. Only used if the image_url is provided. 1.0 is complete noise and 0 is no noise. + */ + noise_strength?: number + /** + * Tile Height + * + * The size of the tiles to be used for the image generation. + */ + tile_height?: number + /** + * Embeddings + * + * + * The embeddings to use for the image generation. Only a single embedding is supported at the moment. + * The embeddings will be used to map the tokens in the prompt to the embedding weights. + * + */ + embeddings?: Array + /** + * Ic Light Model Url + * + * + * The URL of the IC Light model to use for the image generation. + * + */ + ic_light_model_url?: string + /** + * Image Encoder Weight Name + * + * + * The weight name of the image encoder model to use for the image generation. + * + */ + image_encoder_weight_name?: string + /** + * Ip Adapter + * + * + * The IP adapter to use for the image generation. + * + */ + ip_adapter?: Array + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Scheduler + * + * Scheduler / sampler to use for the image denoising process. + */ + scheduler?: + | 'DPM++ 2M' + | 'DPM++ 2M Karras' + | 'DPM++ 2M SDE' + | 'DPM++ 2M SDE Karras' + | 'Euler' + | 'Euler A' + | 'Euler (trailing timesteps)' + | 'LCM' + | 'LCM (trailing timesteps)' + | 'DDIM' + | 'TCD' + /** + * Sigmas + * + * + * Optionally override the sigmas to use for the denoising process. Only works with schedulers which support the `sigmas` argument in their `set_sigmas` method. + * Defaults to not overriding, in which case the scheduler automatically sets the sigmas based on the `num_inference_steps` parameter. + * If set to a custom sigma schedule, the `num_inference_steps` parameter will be ignored. Cannot be set if `timesteps` is set. + * + */ + sigmas?: SchemaSigmasInput + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Tile Stride Width + * + * The stride of the tiles to be used for the image generation. + */ + tile_stride_width?: number + /** + * Debug Per Pass Latents + * + * If set to true, the latents will be saved for debugging per pass. + */ + debug_per_pass_latents?: boolean + /** + * Timesteps + * + * + * Optionally override the timesteps to use for the denoising process. Only works with schedulers which support the `timesteps` argument in their `set_timesteps` method. + * Defaults to not overriding, in which case the scheduler automatically sets the timesteps based on the `num_inference_steps` parameter. + * If set to a custom timestep schedule, the `num_inference_steps` parameter will be ignored. Cannot be set if `sigmas` is set. + * + */ + timesteps?: SchemaTimestepsInput + /** + * Model Name + * + * URL or HuggingFace ID of the base model to generate the image. + */ + model_name: string + /** + * Prompt Weighting + * + * + * If set to true, the prompt weighting syntax will be used. + * Additionally, this will lift the 77 token limit by averaging embeddings. + * + */ + prompt_weighting?: boolean + /** + * Variant + * + * The variant of the model to use for huggingface models, e.g. 'fp16'. + */ + variant?: string + /** + * Image Url + * + * URL of image to use for image to image/inpainting. + */ + image_url?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Mask Url + * + * URL of black-and-white image to use as mask during inpainting. + */ + mask_url?: string + /** + * Image Encoder Subfolder + * + * + * The subfolder of the image encoder model to use for the image generation. + * + */ + image_encoder_subfolder?: string + /** + * Ic Light Model Background Image Url + * + * + * The URL of the IC Light model background image to use for the image generation. + * Make sure to use a background compatible with the model. + * + */ + ic_light_model_background_image_url?: string + /** + * Rescale Betas Snr Zero + * + * + * Whether to set the rescale_betas_snr_zero option or not for the sampler + * + */ + rescale_betas_snr_zero?: boolean + /** + * Tile Width + * + * The size of the tiles to be used for the image generation. + */ + tile_width?: number + /** + * Controlnet Guess Mode + * + * + * If set to true, the controlnet will be applied to only the conditional predictions. + * + */ + controlnet_guess_mode?: boolean + /** + * Prediction Type + * + * + * The type of prediction to use for the image generation. + * The `epsilon` is the default. + * + */ + prediction_type?: 'v_prediction' | 'epsilon' + /** + * Eta + * + * The eta value to be used for the image generation. + */ + eta?: number + /** + * Image Encoder Path + * + * + * The path to the image encoder model to use for the image generation. + * + */ + image_encoder_path?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use.Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Image Format + * + * The format of the generated image. + */ + image_format?: 'jpeg' | 'png' + /** + * Number of images + * + * + * Number of images to generate in one request. Note that the higher the batch size, + * the longer it will take to generate the images. + * + */ + num_images?: number + /** + * Debug Latents + * + * If set to true, the latents will be saved for debugging. + */ + debug_latents?: boolean + /** + * Ic Light Image Url + * + * + * The URL of the IC Light model image to use for the image generation. + * + */ + ic_light_image_url?: string + /** + * Unet Name + * + * URL or HuggingFace ID of the custom U-Net model to use for the image generation. + */ + unet_name?: string + /** + * Clip Skip + * + * + * Skips part of the image generation process, leading to slightly different results. + * This means the image renders faster, too. + * + */ + clip_skip?: number + /** + * Tile Stride Height + * + * The stride of the tiles to be used for the image generation. + */ + tile_stride_height?: number + /** + * Controlnets + * + * + * The control nets to use for the image generation. You can use any number of control nets + * and they will be applied to the image at the specified timesteps. + * + */ + controlnets?: Array + /** + * Number of inference steps + * + * + * Increasing the amount of steps tells Stable Diffusion that it should take more steps + * to generate your final result which can increase the amount of detail in your image. + * + */ + num_inference_steps?: number +} + +/** + * IpAdapterFaceIdOutput + */ +export type SchemaIpAdapterFaceIdOutput = { + /** + * Image + * + * The generated image file info. + */ + image: SchemaImage + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * IpAdapterFaceIdInput + */ +export type SchemaIpAdapterFaceIdInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Face Image Url + * + * An image of a face to match. If an image with a size of 640x640 is not provided, it will be scaled and cropped to that size. + */ + face_image_url?: string + /** + * Width + * + * + * The width of the generated image. + * + */ + width?: number + /** + * Face Id Det Size + * + * + * The size of the face detection model. The higher the number the more accurate + * the detection will be but it will also take longer to run. The higher the number the more + * likely it will fail to find a face as well. Lower it if you are having trouble + * finding a face in the image. + * + */ + face_id_det_size?: number + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Negative Prompt + * + * + * The negative prompt to use.Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Height + * + * + * The height of the generated image. + * + */ + height?: number + /** + * Num Samples + * + * + * The number of samples for face id. The more samples the better the image will + * be but it will also take longer to generate. Default is 4. + * + */ + num_samples?: number + /** + * Base Sdxl Model Repo + * + * The URL to the base SDXL model. Default is SG161222/RealVisXL_V3.0 + */ + base_sdxl_model_repo?: string + /** + * Base 1 5 Model Repo + * + * The URL to the base 1.5 model. Default is SG161222/Realistic_Vision_V4.0_noVAE + */ + base_1_5_model_repo?: string + /** + * Num Inference Steps + * + * + * The number of inference steps to use for generating the image. The more steps + * the better the image will be but it will also take longer to generate. + * + */ + num_inference_steps?: number + /** + * Model Type + * + * The model type to use. 1_5 is the default and is recommended for most use cases. + */ + model_type?: + | '1_5-v1' + | '1_5-v1-plus' + | '1_5-v2-plus' + | 'SDXL-v1' + | 'SDXL-v2-plus' + | '1_5-auraface-v1' + /** + * Face Images Data Url + * + * + * URL to zip archive with images of faces. The images embedding will be averaged to + * create a more accurate face id. + * + */ + face_images_data_url?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number +} + +/** + * OmniZeroOutput + */ +export type SchemaOmniZeroOutput = { + /** + * Image + * + * The generated image. + */ + image: SchemaImage +} + +/** + * OmniZeroInput + */ +export type SchemaOmniZeroInput = { + /** + * Prompt + * + * Prompt to guide the image generation. + */ + prompt: string + /** + * Identity Image Url + * + * Identity image url. + */ + identity_image_url: string + /** + * Identity Strength + * + * Identity strength. + */ + identity_strength?: number + /** + * Number Of Images + * + * Number of images. + */ + number_of_images?: number + /** + * Guidance Scale + * + * Guidance scale. + */ + guidance_scale?: number + /** + * Image Strength + * + * Image strength. + */ + image_strength?: number + /** + * Negative Prompt + * + * Negative prompt to guide the image generation. + */ + negative_prompt?: string + /** + * Composition Image Url + * + * Composition image url. + */ + composition_image_url: string + /** + * Depth Strength + * + * Depth strength. + */ + depth_strength?: number + /** + * Composition Strength + * + * Composition strength. + */ + composition_strength?: number + /** + * Image Url + * + * Input image url. + */ + image_url: string + /** + * Style Image Url + * + * Style image url. + */ + style_image_url: string + /** + * Face Strength + * + * Face strength. + */ + face_strength?: number + /** + * Style Strength + * + * Style strength. + */ + style_strength?: number + /** + * Seed + * + * Seed. + */ + seed?: number +} + +/** + * CCSROutput + */ +export type SchemaCcsrOutput = { + /** + * Image + * + * The generated image file info. + */ + image: SchemaImage + /** + * Seed + * + * The seed used for the generation. + */ + seed: number +} + +/** + * CCSRInput + */ +export type SchemaCcsrInput = { + /** + * Color Fix Type + * + * Type of color correction for samples. + */ + color_fix_type?: 'none' | 'wavelet' | 'adain' + /** + * Tile Diffusion Size + * + * Size of patch. + */ + tile_diffusion_size?: number + /** + * Tile Vae Decoder Size + * + * Size of VAE patch. + */ + tile_vae_decoder_size?: number + /** + * Tile Vae Encoder Size + * + * Size of latent image + */ + tile_vae_encoder_size?: number + /** + * T Min + * + * The starting point of uniform sampling strategy. + */ + t_min?: number + /** + * Image Url + * + * The URL or data URI of the image to upscale. + */ + image_url: string + /** + * Tile Diffusion Stride + * + * Stride of sliding patch. + */ + tile_diffusion_stride?: number + /** + * Tile Vae + * + * If specified, a patch-based sampling strategy will be used for VAE decoding. + */ + tile_vae?: boolean + /** + * Scale + * + * The scale of the output image. The higher the scale, the bigger the output image will be. + */ + scale?: number + /** + * Seed + * + * Seed for reproducibility. Different seeds will make slightly different results. + */ + seed?: number + /** + * T Max + * + * The ending point of uniform sampling strategy. + */ + t_max?: number + /** + * Steps + * + * The number of steps to run the model for. The higher the number the better the quality and longer it will take to generate. + */ + steps?: number + /** + * Tile Diffusion + * + * If specified, a patch-based sampling strategy will be used for sampling. + */ + tile_diffusion?: 'none' | 'mix' | 'gaussian' +} + +/** + * Output + */ +export type SchemaSd15DepthControlnetOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageControlNetInput + */ +export type SchemaSd15DepthControlnetInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. Leave it none to automatically infer from the control image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | null + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Loras + * + * The list of LoRA weights to use. + */ + loras?: Array + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Controlnet Conditioning Scale + * + * The scale of the controlnet conditioning. + */ + controlnet_conditioning_scale?: number + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Control Image Url + * + * The URL of the control image. + */ + control_image_url: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Enable Deep Cache + * + * + * If set to true, DeepCache will be enabled. TBD + * + */ + enable_deep_cache?: boolean +} + +/** + * DWPoseOutput + */ +export type SchemaDwposeOutput = { + /** + * Image + * + * The predicted pose image + */ + image: SchemaImage +} + +/** + * DWPoseInput + */ +export type SchemaDwposeInput = { + /** + * Draw Mode + * + * Mode of drawing the pose on the image. Options are: 'full-pose', 'body-pose', 'face-pose', 'hand-pose', 'face-hand-mask', 'face-mask', 'hand-mask'. + */ + draw_mode?: + | 'full-pose' + | 'body-pose' + | 'face-pose' + | 'hand-pose' + | 'face-hand-mask' + | 'face-mask' + | 'hand-mask' + /** + * Image Url + * + * URL of the image to be processed + */ + image_url: string +} + +/** + * SD3Output + */ +export type SchemaStableDiffusionV3MediumImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Number of Images + * + * The number of images generated. + */ + num_images: number + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * ImageToImageInput + */ +export type SchemaStableDiffusionV3MediumImageToImageInput = { + /** + * Enhance Prompt + * + * If set to true, prompt will be upsampled with more details. + */ + prompt_expansion?: boolean + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. Defaults to the conditioning image's size. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | null + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Image URL + * + * The image URL to generate an image from. + */ + image_url: string + /** + * Strength + * + * The strength of the image-to-image transformation. + */ + strength?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Negative Prompt + * + * The negative prompt to generate an image from. + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Region + */ +export type SchemaRegion = { + /** + * Y1 + * + * Y-coordinate of the top-left corner + */ + y1: number + /** + * X2 + * + * X-coordinate of the bottom-right corner + */ + x2: number + /** + * X1 + * + * X-coordinate of the top-left corner + */ + x1: number + /** + * Y2 + * + * Y-coordinate of the bottom-right corner + */ + y2: number +} + +/** + * PolygonOutputWithLabels + */ +export type SchemaFlorence2LargeRegionToSegmentationOutput = { + /** + * Image + * + * Processed image + */ + image?: SchemaImage + /** + * Results + * + * Results from the model + */ + results: SchemaPolygonOutput +} + +/** + * Polygon + */ +export type SchemaPolygon = { + /** + * Points + * + * List of points + */ + points: Array<{ + [key: string]: number + }> + /** + * Label + * + * Label of the polygon + */ + label: string +} + +/** + * PolygonOutput + */ +export type SchemaPolygonOutput = { + /** + * Polygons + * + * List of polygons + */ + polygons: Array +} + +/** + * ImageWithUserCoordinatesInput + */ +export type SchemaFlorence2LargeRegionToSegmentationInput = { + /** + * Region + * + * The user input coordinates + */ + region: SchemaRegion + /** + * Image Url + * + * The URL of the image to be processed. + */ + image_url: string +} + +/** + * OCRBoundingBoxSingle + */ +export type SchemaOcrBoundingBoxSingle = { + /** + * Y + * + * Y-coordinate of the top-left corner + */ + y: number + /** + * Label + * + * Label of the bounding box + */ + label: string + /** + * H + * + * Height of the bounding box + */ + h: number + /** + * W + * + * Width of the bounding box + */ + w: number + /** + * X + * + * X-coordinate of the top-left corner + */ + x: number +} + +/** + * OCRBoundingBox + */ +export type SchemaOcrBoundingBox = { + /** + * Quad Boxes + * + * List of quadrilateral boxes + */ + quad_boxes: Array +} + +/** + * OCRBoundingBoxOutputWithLabels + */ +export type SchemaFlorence2LargeOcrWithRegionOutput = { + /** + * Image + * + * Processed image + */ + image?: SchemaImage + /** + * Results + * + * Results from the model + */ + results: SchemaOcrBoundingBox +} + +/** + * ImageInput + */ +export type SchemaFlorence2LargeOcrWithRegionInput = { + /** + * Image Url + * + * The URL of the image to be processed. + */ + image_url: string +} + +/** + * BoundingBoxOutputWithLabels + */ +export type SchemaFlorence2LargeRegionProposalOutput = { + /** + * Image + * + * Processed image + */ + image?: SchemaImage + /** + * Results + * + * Results from the model + */ + results: SchemaBoundingBoxes +} + +/** + * BoundingBox + */ +export type SchemaBoundingBox = { + /** + * Y + * + * Y-coordinate of the top-left corner + */ + y: number + /** + * Label + * + * Label of the bounding box + */ + label: string + /** + * H + * + * Height of the bounding box + */ + h: number + /** + * W + * + * Width of the bounding box + */ + w: number + /** + * X + * + * X-coordinate of the top-left corner + */ + x: number +} + +/** + * BoundingBoxes + */ +export type SchemaBoundingBoxes = { + /** + * Bboxes + * + * List of bounding boxes + */ + bboxes: Array +} + +/** + * ImageInput + */ +export type SchemaFlorence2LargeRegionProposalInput = { + /** + * Image Url + * + * The URL of the image to be processed. + */ + image_url: string +} + +/** + * BoundingBoxOutputWithLabels + */ +export type SchemaFlorence2LargeCaptionToPhraseGroundingOutput = { + /** + * Image + * + * Processed image + */ + image?: SchemaImage + /** + * Results + * + * Results from the model + */ + results: SchemaBoundingBoxes +} + +/** + * ImageWithTextInput + */ +export type SchemaFlorence2LargeCaptionToPhraseGroundingInput = { + /** + * Text Input + * + * Text input for the task + */ + text_input: string + /** + * Image Url + * + * The URL of the image to be processed. + */ + image_url: string +} + +/** + * BoundingBoxOutputWithLabels + */ +export type SchemaFlorence2LargeOpenVocabularyDetectionOutput = { + /** + * Image + * + * Processed image + */ + image?: SchemaImage + /** + * Results + * + * Results from the model + */ + results: SchemaBoundingBoxes +} + +/** + * ImageWithTextInput + */ +export type SchemaFlorence2LargeOpenVocabularyDetectionInput = { + /** + * Text Input + * + * Text input for the task + */ + text_input: string + /** + * Image Url + * + * The URL of the image to be processed. + */ + image_url: string +} + +/** + * BoundingBoxOutputWithLabels + */ +export type SchemaFlorence2LargeObjectDetectionOutput = { + /** + * Image + * + * Processed image + */ + image?: SchemaImage + /** + * Results + * + * Results from the model + */ + results: SchemaBoundingBoxes +} + +/** + * ImageInput + */ +export type SchemaFlorence2LargeObjectDetectionInput = { + /** + * Image Url + * + * The URL of the image to be processed. + */ + image_url: string +} + +/** + * PolygonOutputWithLabels + */ +export type SchemaFlorence2LargeReferringExpressionSegmentationOutput = { + /** + * Image + * + * Processed image + */ + image?: SchemaImage + /** + * Results + * + * Results from the model + */ + results: SchemaPolygonOutput +} + +/** + * ImageWithTextInput + */ +export type SchemaFlorence2LargeReferringExpressionSegmentationInput = { + /** + * Text Input + * + * Text input for the task + */ + text_input: string + /** + * Image Url + * + * The URL of the image to be processed. + */ + image_url: string +} + +/** + * BoundingBoxOutputWithLabels + */ +export type SchemaFlorence2LargeDenseRegionCaptionOutput = { + /** + * Image + * + * Processed image + */ + image?: SchemaImage + /** + * Results + * + * Results from the model + */ + results: SchemaBoundingBoxes +} + +/** + * ImageInput + */ +export type SchemaFlorence2LargeDenseRegionCaptionInput = { + /** + * Image Url + * + * The URL of the image to be processed. + */ + image_url: string +} + +/** + * Era3DOutput + */ +export type SchemaEra3dOutput = { + /** + * Images + * + * Images with background removed + */ + images: Array + /** + * Seed + * + * Seed used for random number generation + */ + seed: number + /** + * Normal Images + * + * Normal images with background removed + */ + normal_images: Array +} + +/** + * Era3DInput + */ +export type SchemaEra3dInput = { + /** + * Cfg + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + cfg?: number + /** + * Background Removal + * + * Background removal + */ + background_removal?: boolean + /** + * Steps + * + * Number of steps to run the model for + */ + steps?: number + /** + * Crop Size + * + * Size of the image to crop to + */ + crop_size?: number + /** + * Seed + * + * Seed for random number generation + */ + seed?: number + /** + * Image Url + * + * URL of the image to remove background from + */ + image_url: string +} + +/** + * Output + */ +export type SchemaSdxlControlnetUnionImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * ImageToImageControlNetUnionInput + */ +export type SchemaSdxlControlnetUnionImageToImageInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Depth Preprocess + * + * Whether to preprocess the depth image. + */ + depth_preprocess?: boolean + /** + * Image Size + * + * The size of the generated image. Leave it none to automatically infer from the control image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | null + /** + * Normal Image Url + * + * The URL of the control image. + */ + normal_image_url?: string + /** + * Embeddings + * + * The list of embeddings to use. + */ + embeddings?: Array + /** + * Teed Image Url + * + * The URL of the control image. + */ + teed_image_url?: string + /** + * Loras + * + * The list of LoRA weights to use. + */ + loras?: Array + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Canny Image Url + * + * The URL of the control image. + */ + canny_image_url?: string + /** + * Segmentation Preprocess + * + * Whether to preprocess the segmentation image. + */ + segmentation_preprocess?: boolean + /** + * Format + * + * The format of the generated image. + */ + format?: 'jpeg' | 'png' + /** + * Image Url + * + * The URL of the image to use as a starting point for the generation. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Request Id + * + * + * An id bound to a request, can be used with response to identify the request + * itself. + * + */ + request_id?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Segmentation Image Url + * + * The URL of the control image. + */ + segmentation_image_url?: string + /** + * Openpose Image Url + * + * The URL of the control image. + */ + openpose_image_url?: string + /** + * Canny Preprocess + * + * Whether to preprocess the canny image. + */ + canny_preprocess?: boolean + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Depth Image Url + * + * The URL of the control image. + */ + depth_image_url?: string + /** + * Normal Preprocess + * + * Whether to preprocess the normal image. + */ + normal_preprocess?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Preserve Aspect Ratio + * + * + * If set to true, the aspect ratio of the generated image will be preserved even + * if the image size is too large. However, if the image is not a multiple of 32 + * in width or height, it will be resized to the nearest multiple of 32. By default, + * this snapping to the nearest multiple of 32 will not preserve the aspect ratio. + * Set crop_output to True, to crop the output to the proper aspect ratio + * after generating. + * + */ + preserve_aspect_ratio?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use.Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Crop Output + * + * + * If set to true, the output cropped to the proper aspect ratio after generating. + * + */ + crop_output?: boolean + /** + * Teed Preprocess + * + * Whether to preprocess the teed image. + */ + teed_preprocess?: boolean + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Controlnet Conditioning Scale + * + * The scale of the controlnet conditioning. + */ + controlnet_conditioning_scale?: number + /** + * Strength + * + * determines how much the generated image resembles the initial image + */ + strength?: number + /** + * Safety Checker Version + * + * The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model. + */ + safety_checker_version?: 'v1' | 'v2' + /** + * Openpose Preprocess + * + * Whether to preprocess the openpose image. + */ + openpose_preprocess?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * Output + */ +export type SchemaSdxlControlnetUnionInpaintingOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * InpaintingControlNetUnionInput + */ +export type SchemaSdxlControlnetUnionInpaintingInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Depth Preprocess + * + * Whether to preprocess the depth image. + */ + depth_preprocess?: boolean + /** + * Image Size + * + * The size of the generated image. Leave it none to automatically infer from the control image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | null + /** + * Normal Image Url + * + * The URL of the control image. + */ + normal_image_url?: string + /** + * Embeddings + * + * The list of embeddings to use. + */ + embeddings?: Array + /** + * Teed Image Url + * + * The URL of the control image. + */ + teed_image_url?: string + /** + * Loras + * + * The list of LoRA weights to use. + */ + loras?: Array + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Canny Image Url + * + * The URL of the control image. + */ + canny_image_url?: string + /** + * Segmentation Preprocess + * + * Whether to preprocess the segmentation image. + */ + segmentation_preprocess?: boolean + /** + * Format + * + * The format of the generated image. + */ + format?: 'jpeg' | 'png' + /** + * Image Url + * + * The URL of the image to use as a starting point for the generation. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Request Id + * + * + * An id bound to a request, can be used with response to identify the request + * itself. + * + */ + request_id?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Mask Url + * + * The URL of the mask to use for inpainting. + */ + mask_url: string + /** + * Segmentation Image Url + * + * The URL of the control image. + */ + segmentation_image_url?: string + /** + * Openpose Image Url + * + * The URL of the control image. + */ + openpose_image_url?: string + /** + * Canny Preprocess + * + * Whether to preprocess the canny image. + */ + canny_preprocess?: boolean + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Depth Image Url + * + * The URL of the control image. + */ + depth_image_url?: string + /** + * Normal Preprocess + * + * Whether to preprocess the normal image. + */ + normal_preprocess?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use.Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Teed Preprocess + * + * Whether to preprocess the teed image. + */ + teed_preprocess?: boolean + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Controlnet Conditioning Scale + * + * The scale of the controlnet conditioning. + */ + controlnet_conditioning_scale?: number + /** + * Strength + * + * determines how much the generated image resembles the initial image + */ + strength?: number + /** + * Safety Checker Version + * + * The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model. + */ + safety_checker_version?: 'v1' | 'v2' + /** + * Openpose Preprocess + * + * Whether to preprocess the openpose image. + */ + openpose_preprocess?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * Output + */ +export type SchemaFluxLoraImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * ImageToImageInput + */ +export type SchemaFluxLoraImageToImageInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. This is always set to 1 for streaming output. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image Url + * + * URL of image to use for inpainting. or img2img + */ + image_url: string + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Strength + * + * The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original. + */ + strength?: number + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaFluxGeneralDifferentialDiffusionOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * DifferentialDiffusionInput + */ +export type SchemaFluxGeneralDifferentialDiffusionInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Proportion of steps to apply NAG + * + * + * The proportion of steps to apply NAG. After the specified proportion + * of steps has been iterated, the remaining steps will use original + * attention processors in FLUX. + * + */ + nag_end?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Control Loras + * + * + * The LoRAs to use for the image generation which use a control image. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + control_loras?: Array + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Scheduler + * + * Scheduler for the denoising process. + */ + scheduler?: 'euler' | 'dpmpp_2m' + /** + * Easycontrols + * + * + * EasyControl Inputs to use for image generation. + * + */ + easycontrols?: Array + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Real CFG scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + real_cfg_scale?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Use CFG-Zero-Init + * + * + * Uses CFG-zero init sampling as in https://arxiv.org/abs/2503.18886. + * + */ + use_cfg_zero?: boolean + /** + * Fill Image + * + * Use an image input to influence the generation. Can be used to fill images in masked areas. + */ + fill_image?: SchemaImageFillInput + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Sigma Schedule + * + * Sigmas schedule for the denoising process. + */ + sigma_schedule?: 'sgm_uniform' + /** + * Reference End + * + * + * The percentage of the total timesteps when the reference guidance is to be ended. + * + */ + reference_end?: number + /** + * Reference Strength + * + * Strength of reference_only generation. Only used if a reference image is provided. + */ + reference_strength?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Image URL + * + * URL of image to use as initial image. + */ + image_url: string + /** + * NAG scale + * + * + * The scale for NAG. Higher values will result in a image that is more distant + * to the negative prompt. + * + */ + nag_scale?: number + /** + * Reference Image Url + * + * URL of Image for Reference-Only + */ + reference_image_url?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Controlnet Unions + * + * + * The controlnet unions to use for the image generation. Only one controlnet is supported at the moment. + * + */ + controlnet_unions?: Array + /** + * Negative Prompt + * + * + * Negative prompt to steer the image generation away from unwanted features. + * By default, we will be using NAG for processing the negative prompt. + * + */ + negative_prompt?: string + /** + * NAG Tau + * + * + * The tau for NAG. Controls the normalization of the hidden state. + * Higher values will result in a less aggressive normalization, + * but may also lead to unexpected changes with respect to the original image. + * Not recommended to change this value. + * + */ + nag_tau?: number + /** + * Change Map URL + * + * URL of change map. + */ + change_map_image_url: string + /** + * Num Images + * + * The number of images to generate. This is always set to 1 for streaming output. + */ + num_images?: number + /** + * Use Beta Schedule + * + * Specifies whether beta sigmas ought to be used. + */ + use_beta_schedule?: boolean + /** + * Ip Adapters + * + * + * IP-Adapter to use for image generation. + * + */ + ip_adapters?: Array + /** + * Base Shift + * + * Base shift for the scheduled timesteps + */ + base_shift?: number + /** + * NAG alpha + * + * + * The alpha value for NAG. This value is used as a final weighting + * factor for steering the normalized guidance (positive and negative prompts) + * in the direction of the positive prompt. Higher values will result in less + * steering on the normalized guidance where lower values will result in + * considering the positive prompt guidance more. + * + */ + nag_alpha?: number + /** + * Strength + * + * The strength to use for differential diffusion. 1.0 is completely remakes the image while 0.0 preserves the original. + */ + strength?: number + /** + * Max Shift + * + * Max shift for the scheduled timesteps + */ + max_shift?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Controlnets + * + * + * The controlnets to use for the image generation. Only one controlnet is supported at the moment. + * + */ + controlnets?: Array + /** + * Reference Start + * + * + * The percentage of the total timesteps when the reference guidance is to bestarted. + * + */ + reference_start?: number + /** + * Use Real CFG + * + * + * Uses classical CFG as in SD1.5, SDXL, etc. Increases generation times and price when set to be true. + * If using XLabs IP-Adapter v1, this will be turned on!. + * + */ + use_real_cfg?: boolean +} + +/** + * ControlNetUnionInput + */ +export type SchemaControlNetUnionInput = { + /** + * Conditioning Scale + * + * + * The scale of the control net weight. This is used to scale the control net weight + * before merging it with the base model. + * + */ + conditioning_scale?: number + /** + * Mask Threshold + * + * Threshold for mask. + */ + mask_threshold?: number + /** + * End Percentage + * + * + * The percentage of the image to end applying the controlnet in terms of the total timesteps. + * + */ + end_percentage?: number + /** + * Mask Image Url + * + * URL of the mask for the control image. + */ + mask_image_url?: string | null + /** + * Control Image Url + * + * URL of the image to be used as the control image. + */ + control_image_url: string + /** + * Control Mode + * + * Control Mode for Flux Controlnet Union. Supported values are: + * - canny: Uses the edges for guided generation. + * - tile: Uses the tiles for guided generation. + * - depth: Utilizes a grayscale depth map for guided generation. + * - blur: Adds a blur to the image. + * - pose: Uses the pose of the image for guided generation. + * - gray: Converts the image to grayscale. + * - low-quality: Converts the image to a low-quality image. + */ + control_mode: + | 'canny' + | 'tile' + | 'depth' + | 'blur' + | 'pose' + | 'gray' + | 'low-quality' + /** + * Start Percentage + * + * + * The percentage of the image to start applying the controlnet in terms of the total timesteps. + * + */ + start_percentage?: number +} + +/** + * ControlNetUnion + */ +export type SchemaControlNetUnion = { + /** + * Controls + * + * The control images and modes to use for the control net. + */ + controls: Array + /** + * Path + * + * URL or the path to the control net weights. + */ + path: string + /** + * Variant + * + * The optional variant if a Hugging Face repo key is used. + */ + variant?: string + /** + * Config Url + * + * optional URL to the controlnet config.json file. + */ + config_url?: string +} + +/** + * ImageFillInput + */ +export type SchemaImageFillInput = { + /** + * Fill Image Url + * + * URLs of images to be filled for redux prompting + */ + fill_image_url?: string | Array +} + +/** + * EasyControlWeight + */ +export type SchemaEasyControlWeight = { + /** + * Scale + * + * Scale for the control method. + */ + scale?: number + /** + * Image Control Type + * + * Control type of the image. Must be one of `spatial` or `subject`. + */ + image_control_type: 'subject' | 'spatial' + /** + * Control Method Url + * + * URL to safetensor weights of control method to be applied. Can also be one of `canny`, `depth`, `hedsketch`, `inpainting`, `pose`, `seg`, `subject`, `ghibli` + */ + control_method_url: string + /** + * Image Url + * + * URL of an image to use as a control + */ + image_url: string +} + +/** + * ControlLoraWeight + */ +export type SchemaControlLoraWeight = { + /** + * Path + * + * URL or the path to the LoRA weights. + */ + path: string + /** + * Scale + * + * + * The scale of the LoRA weight. This is used to scale the LoRA weight + * before merging it with the base model. Providing a dictionary as {"layer_name":layer_scale} allows per-layer lora scale settings. Layers with no scale provided will have scale 1.0. + * + */ + scale?: + | { + [key: string]: unknown + } + | number + /** + * Control Image Url + * + * URL of the image to be used as the control image. + */ + control_image_url: string + /** + * Preprocess + * + * Type of preprocessing to apply to the input image. + */ + preprocess?: 'canny' | 'depth' | 'None' +} + +/** + * Output + */ +export type SchemaFluxGeneralInpaintingOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * InpaintInput + */ +export type SchemaFluxGeneralInpaintingInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Proportion of steps to apply NAG + * + * + * The proportion of steps to apply NAG. After the specified proportion + * of steps has been iterated, the remaining steps will use original + * attention processors in FLUX. + * + */ + nag_end?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Control Loras + * + * + * The LoRAs to use for the image generation which use a control image. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + control_loras?: Array + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Scheduler + * + * Scheduler for the denoising process. + */ + scheduler?: 'euler' | 'dpmpp_2m' + /** + * Easycontrols + * + * + * EasyControl Inputs to use for image generation. + * + */ + easycontrols?: Array + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Real CFG scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + real_cfg_scale?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Use CFG-Zero-Init + * + * + * Uses CFG-zero init sampling as in https://arxiv.org/abs/2503.18886. + * + */ + use_cfg_zero?: boolean + /** + * Fill Image + * + * Use an image input to influence the generation. Can be used to fill images in masked areas. + */ + fill_image?: SchemaImageFillInput + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Sigma Schedule + * + * Sigmas schedule for the denoising process. + */ + sigma_schedule?: 'sgm_uniform' + /** + * Reference End + * + * + * The percentage of the total timesteps when the reference guidance is to be ended. + * + */ + reference_end?: number + /** + * Reference Strength + * + * Strength of reference_only generation. Only used if a reference image is provided. + */ + reference_strength?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Mask Url + * + * + * The mask to area to Inpaint in. + * + */ + mask_url: string + /** + * Image Url + * + * URL of image to use for inpainting. or img2img + */ + image_url: string + /** + * NAG scale + * + * + * The scale for NAG. Higher values will result in a image that is more distant + * to the negative prompt. + * + */ + nag_scale?: number + /** + * Reference Image Url + * + * URL of Image for Reference-Only + */ + reference_image_url?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Controlnet Unions + * + * + * The controlnet unions to use for the image generation. Only one controlnet is supported at the moment. + * + */ + controlnet_unions?: Array + /** + * Negative Prompt + * + * + * Negative prompt to steer the image generation away from unwanted features. + * By default, we will be using NAG for processing the negative prompt. + * + */ + negative_prompt?: string + /** + * NAG Tau + * + * + * The tau for NAG. Controls the normalization of the hidden state. + * Higher values will result in a less aggressive normalization, + * but may also lead to unexpected changes with respect to the original image. + * Not recommended to change this value. + * + */ + nag_tau?: number + /** + * Num Images + * + * The number of images to generate. This is always set to 1 for streaming output. + */ + num_images?: number + /** + * Use Beta Schedule + * + * Specifies whether beta sigmas ought to be used. + */ + use_beta_schedule?: boolean + /** + * Ip Adapters + * + * + * IP-Adapter to use for image generation. + * + */ + ip_adapters?: Array + /** + * Base Shift + * + * Base shift for the scheduled timesteps + */ + base_shift?: number + /** + * NAG alpha + * + * + * The alpha value for NAG. This value is used as a final weighting + * factor for steering the normalized guidance (positive and negative prompts) + * in the direction of the positive prompt. Higher values will result in less + * steering on the normalized guidance where lower values will result in + * considering the positive prompt guidance more. + * + */ + nag_alpha?: number + /** + * Strength + * + * The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original. + */ + strength?: number + /** + * Max Shift + * + * Max shift for the scheduled timesteps + */ + max_shift?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Controlnets + * + * + * The controlnets to use for the image generation. Only one controlnet is supported at the moment. + * + */ + controlnets?: Array + /** + * Reference Start + * + * + * The percentage of the total timesteps when the reference guidance is to bestarted. + * + */ + reference_start?: number + /** + * Use Real CFG + * + * + * Uses classical CFG as in SD1.5, SDXL, etc. Increases generation times and price when set to be true. + * If using XLabs IP-Adapter v1, this will be turned on!. + * + */ + use_real_cfg?: boolean +} + +/** + * Output + */ +export type SchemaFluxGeneralImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * ImageToImageInput + */ +export type SchemaFluxGeneralImageToImageInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Proportion of steps to apply NAG + * + * + * The proportion of steps to apply NAG. After the specified proportion + * of steps has been iterated, the remaining steps will use original + * attention processors in FLUX. + * + */ + nag_end?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Control Loras + * + * + * The LoRAs to use for the image generation which use a control image. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + control_loras?: Array + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Scheduler + * + * Scheduler for the denoising process. + */ + scheduler?: 'euler' | 'dpmpp_2m' + /** + * Easycontrols + * + * + * EasyControl Inputs to use for image generation. + * + */ + easycontrols?: Array + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Real CFG scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + real_cfg_scale?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Use CFG-Zero-Init + * + * + * Uses CFG-zero init sampling as in https://arxiv.org/abs/2503.18886. + * + */ + use_cfg_zero?: boolean + /** + * Fill Image + * + * Use an image input to influence the generation. Can be used to fill images in masked areas. + */ + fill_image?: SchemaImageFillInput + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Sigma Schedule + * + * Sigmas schedule for the denoising process. + */ + sigma_schedule?: 'sgm_uniform' + /** + * Reference End + * + * + * The percentage of the total timesteps when the reference guidance is to be ended. + * + */ + reference_end?: number + /** + * Reference Strength + * + * Strength of reference_only generation. Only used if a reference image is provided. + */ + reference_strength?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Image Url + * + * URL of image to use for inpainting. or img2img + */ + image_url: string + /** + * NAG scale + * + * + * The scale for NAG. Higher values will result in a image that is more distant + * to the negative prompt. + * + */ + nag_scale?: number + /** + * Reference Image Url + * + * URL of Image for Reference-Only + */ + reference_image_url?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Controlnet Unions + * + * + * The controlnet unions to use for the image generation. Only one controlnet is supported at the moment. + * + */ + controlnet_unions?: Array + /** + * Negative Prompt + * + * + * Negative prompt to steer the image generation away from unwanted features. + * By default, we will be using NAG for processing the negative prompt. + * + */ + negative_prompt?: string + /** + * NAG Tau + * + * + * The tau for NAG. Controls the normalization of the hidden state. + * Higher values will result in a less aggressive normalization, + * but may also lead to unexpected changes with respect to the original image. + * Not recommended to change this value. + * + */ + nag_tau?: number + /** + * Num Images + * + * The number of images to generate. This is always set to 1 for streaming output. + */ + num_images?: number + /** + * Use Beta Schedule + * + * Specifies whether beta sigmas ought to be used. + */ + use_beta_schedule?: boolean + /** + * Ip Adapters + * + * + * IP-Adapter to use for image generation. + * + */ + ip_adapters?: Array + /** + * Base Shift + * + * Base shift for the scheduled timesteps + */ + base_shift?: number + /** + * NAG alpha + * + * + * The alpha value for NAG. This value is used as a final weighting + * factor for steering the normalized guidance (positive and negative prompts) + * in the direction of the positive prompt. Higher values will result in less + * steering on the normalized guidance where lower values will result in + * considering the positive prompt guidance more. + * + */ + nag_alpha?: number + /** + * Strength + * + * The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original. + */ + strength?: number + /** + * Max Shift + * + * Max shift for the scheduled timesteps + */ + max_shift?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Controlnets + * + * + * The controlnets to use for the image generation. Only one controlnet is supported at the moment. + * + */ + controlnets?: Array + /** + * Reference Start + * + * + * The percentage of the total timesteps when the reference guidance is to bestarted. + * + */ + reference_start?: number + /** + * Use Real CFG + * + * + * Uses classical CFG as in SD1.5, SDXL, etc. Increases generation times and price when set to be true. + * If using XLabs IP-Adapter v1, this will be turned on!. + * + */ + use_real_cfg?: boolean +} + +/** + * SAM2ImageOutput + */ +export type SchemaSam2ImageOutput = { + /** + * Image + * + * Segmented image. + */ + image: SchemaImage +} + +/** + * SAM2ImageInput + */ +export type SchemaSam2ImageInput = { + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Prompts + * + * List of prompts to segment the image + */ + prompts?: Array + /** + * Box Prompts + * + * Coordinates for boxes + */ + box_prompts?: Array + /** + * Apply Mask + * + * Apply the mask on the image. + */ + apply_mask?: boolean + /** + * Image Url + * + * URL of the image to be segmented + */ + image_url: string +} + +/** + * BoxPrompt + */ +export type SchemaBoxPrompt = { + /** + * Y Min + * + * Y Min Coordinate of the box + */ + y_min?: number + /** + * Frame Index + * + * The frame index to interact with. + */ + frame_index?: number + /** + * X Max + * + * X Max Coordinate of the prompt + */ + x_max?: number + /** + * X Min + * + * X Min Coordinate of the box + */ + x_min?: number + /** + * Y Max + * + * Y Max Coordinate of the prompt + */ + y_max?: number +} + +/** + * PointPrompt + */ +export type SchemaPointPrompt = { + /** + * Y + * + * Y Coordinate of the prompt + */ + y?: number + /** + * Label + * + * Label of the prompt. 1 for foreground, 0 for background + */ + label?: 0 | 1 + /** + * Frame Index + * + * The frame index to interact with. + */ + frame_index?: number + /** + * X + * + * X Coordinate of the prompt + */ + x?: number +} + +/** + * PiDiOutput + */ +export type SchemaImagePreprocessorsPidiOutput = { + image: SchemaImage +} + +/** + * PiDiInput + */ +export type SchemaImagePreprocessorsPidiInput = { + /** + * Safe + * + * Whether to use the safe version of the Pidi detector + */ + safe?: boolean + /** + * Apply Filter + * + * Whether to apply the filter to the image. + */ + apply_filter?: boolean + /** + * Scribble + * + * Whether to use the scribble version of the Pidi detector + */ + scribble?: boolean + /** + * Image Url + * + * URL of the image to process + */ + image_url: string +} + +/** + * ZoeOutput + */ +export type SchemaImagePreprocessorsZoeOutput = { + image: SchemaImage +} + +/** + * ZoeInput + */ +export type SchemaImagePreprocessorsZoeInput = { + /** + * Image Url + * + * URL of the image to process + */ + image_url: string +} + +/** + * LineartOutput + */ +export type SchemaImagePreprocessorsLineartOutput = { + image: SchemaImage +} + +/** + * LineartInput + */ +export type SchemaImagePreprocessorsLineartInput = { + /** + * Coarse + * + * Whether to use the coarse model + */ + coarse?: boolean + /** + * Image Url + * + * URL of the image to process + */ + image_url: string +} + +/** + * TeeDOutput + */ +export type SchemaImagePreprocessorsTeedOutput = { + image: SchemaImage +} + +/** + * TeeDInput + */ +export type SchemaImagePreprocessorsTeedInput = { + /** + * Image Url + * + * URL of the image to process + */ + image_url: string +} + +/** + * MiDaSOutput + */ +export type SchemaImagePreprocessorsMidasOutput = { + normal_map: SchemaImage + depth_map: SchemaImage +} + +/** + * MiDaSInput + */ +export type SchemaImagePreprocessorsMidasInput = { + /** + * A + * + * A parameter for the MiDaS detector + */ + a?: number + /** + * Background Threshold + * + * Background threshold for the MiDaS detector + */ + background_threshold?: number + /** + * Image Url + * + * URL of the image to process + */ + image_url: string +} + +/** + * SamOutput + */ +export type SchemaImagePreprocessorsSamOutput = { + image: SchemaImage +} + +/** + * SamInput + */ +export type SchemaImagePreprocessorsSamInput = { + /** + * Image Url + * + * URL of the image to process + */ + image_url: string +} + +/** + * MLSDOutput + */ +export type SchemaImagePreprocessorsMlsdOutput = { + image: SchemaImage +} + +/** + * MLSDInput + */ +export type SchemaImagePreprocessorsMlsdInput = { + /** + * Distance Threshold + * + * Distance threshold for the MLSD detector + */ + distance_threshold?: number + /** + * Score Threshold + * + * Score threshold for the MLSD detector + */ + score_threshold?: number + /** + * Image Url + * + * URL of the image to process + */ + image_url: string +} + +/** + * ScribbleOutput + */ +export type SchemaImagePreprocessorsScribbleOutput = { + image: SchemaImage +} + +/** + * ScribbleInput + */ +export type SchemaImagePreprocessorsScribbleInput = { + /** + * Model + * + * The model to use for the Scribble detector + */ + model?: 'HED' | 'PiDi' + /** + * Safe + * + * Whether to use the safe version of the Scribble detector + */ + safe?: boolean + /** + * Image Url + * + * URL of the image to process + */ + image_url: string +} + +/** + * DepthAnythingV2Output + */ +export type SchemaImagePreprocessorsDepthAnythingV2Output = { + image: SchemaImage +} + +/** + * DepthAnythingV2Input + */ +export type SchemaImagePreprocessorsDepthAnythingV2Input = { + /** + * Image Url + * + * URL of the image to process + */ + image_url: string +} + +/** + * HEDOutput + */ +export type SchemaImagePreprocessorsHedOutput = { + image: SchemaImage +} + +/** + * HEDInput + */ +export type SchemaImagePreprocessorsHedInput = { + /** + * Safe + * + * Whether to use the safe version of the HED detector + */ + safe?: boolean + /** + * Scribble + * + * Whether to use the scribble version of the HED detector + */ + scribble?: boolean + /** + * Image Url + * + * URL of the image to process + */ + image_url: string +} + +/** + * Output + */ +export type SchemaFluxGeneralRfInversionOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * RFInversionInput + */ +export type SchemaFluxGeneralRfInversionInput = { + /** + * Prompt + * + * The prompt to edit the image with + */ + prompt: string + /** + * Proportion of steps to apply NAG + * + * + * The proportion of steps to apply NAG. After the specified proportion + * of steps has been iterated, the remaining steps will use original + * attention processors in FLUX. + * + */ + nag_end?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | null + /** + * Control Loras + * + * + * The LoRAs to use for the image generation which use a control image. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + control_loras?: Array + /** + * Controller Guidance Reverse + * + * The controller guidance (eta) used in the denoising process.Using values closer to 1 will result in an image closer to input. + */ + controller_guidance_reverse?: number + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Reverse Guidance Start + * + * Timestep to start guidance during reverse process. + */ + reverse_guidance_start?: number + /** + * Easycontrols + * + * + * EasyControl Inputs to use for image generation. + * + */ + easycontrols?: Array + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Scheduler + * + * Scheduler for the denoising process. + */ + scheduler?: 'euler' | 'dpmpp_2m' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Use CFG-Zero-Init + * + * + * Uses CFG-zero init sampling as in https://arxiv.org/abs/2503.18886. + * + */ + use_cfg_zero?: boolean + /** + * Reference Strength + * + * Strength of reference_only generation. Only used if a reference image is provided. + */ + reference_strength?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Sigma Schedule + * + * Sigmas schedule for the denoising process. + */ + sigma_schedule?: 'sgm_uniform' + /** + * Reference End + * + * + * The percentage of the total timesteps when the reference guidance is to be ended. + * + */ + reference_end?: number + /** + * Controller Guidance Forward + * + * The controller guidance (gamma) used in the creation of structured noise. + */ + controller_guidance_forward?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Image Url + * + * URL of image to be edited + */ + image_url: string + /** + * Fill Image + * + * Use an image input to influence the generation. Can be used to fill images in masked areas. + */ + fill_image?: SchemaImageFillInput + /** + * NAG scale + * + * + * The scale for NAG. Higher values will result in a image that is more distant + * to the negative prompt. + * + */ + nag_scale?: number + /** + * Reverse Guidance Schedule + * + * Scheduler for applying reverse guidance. + */ + reverse_guidance_schedule?: 'constant' | 'linear_increase' | 'linear_decrease' + /** + * Reference Image Url + * + * URL of Image for Reference-Only + */ + reference_image_url?: string + /** + * Reverse Guidance End + * + * Timestep to stop guidance during reverse process. + */ + reverse_guidance_end?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Controlnet Unions + * + * + * The controlnet unions to use for the image generation. Only one controlnet is supported at the moment. + * + */ + controlnet_unions?: Array + /** + * Negative Prompt + * + * + * Negative prompt to steer the image generation away from unwanted features. + * By default, we will be using NAG for processing the negative prompt. + * + */ + negative_prompt?: string + /** + * NAG Tau + * + * + * The tau for NAG. Controls the normalization of the hidden state. + * Higher values will result in a less aggressive normalization, + * but may also lead to unexpected changes with respect to the original image. + * Not recommended to change this value. + * + */ + nag_tau?: number + /** + * Num Images + * + * The number of images to generate. This is always set to 1 for streaming output. + */ + num_images?: number + /** + * Use Beta Schedule + * + * Specifies whether beta sigmas ought to be used. + */ + use_beta_schedule?: boolean + /** + * NAG alpha + * + * + * The alpha value for NAG. This value is used as a final weighting + * factor for steering the normalized guidance (positive and negative prompts) + * in the direction of the positive prompt. Higher values will result in less + * steering on the normalized guidance where lower values will result in + * considering the positive prompt guidance more. + * + */ + nag_alpha?: number + /** + * Base Shift + * + * Base shift for the scheduled timesteps + */ + base_shift?: number + /** + * Max Shift + * + * Max shift for the scheduled timesteps + */ + max_shift?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Reference Start + * + * + * The percentage of the total timesteps when the reference guidance is to bestarted. + * + */ + reference_start?: number + /** + * Controlnets + * + * + * The controlnets to use for the image generation. Only one controlnet is supported at the moment. + * + */ + controlnets?: Array +} + +/** + * LivePortraitImageOutput + */ +export type SchemaLivePortraitImageOutput = { + /** + * Image + * + * The generated image file. + */ + image: SchemaImage +} + +/** + * LivePortraitImageInput + */ +export type SchemaLivePortraitImageInput = { + /** + * Smile + * + * Amount to smile + */ + smile?: number + /** + * Eyebrow + * + * Amount to raise or lower eyebrows + */ + eyebrow?: number + /** + * Rotate Roll + * + * Amount to rotate the face in roll + */ + rotate_roll?: number + /** + * Wink + * + * Amount to wink + */ + wink?: number + /** + * Rotate Pitch + * + * Amount to rotate the face in pitch + */ + rotate_pitch?: number + /** + * Blink + * + * Amount to blink the eyes + */ + blink?: number + /** + * Dsize + * + * Size of the output image. + */ + dsize?: number + /** + * Vy Ratio + * + * Vertical offset ratio for face crop. Positive values move up, negative values move down. + */ + vy_ratio?: number + /** + * Scale + * + * Scaling factor for the face crop. + */ + scale?: number + /** + * Pupil X + * + * Amount to move pupils horizontally + */ + pupil_x?: number + /** + * Flag Pasteback + * + * Whether to paste-back/stitch the animated face cropping from the face-cropping space to the original image space. + */ + flag_pasteback?: boolean + /** + * Eee + * + * Amount to shape mouth in 'eee' position + */ + eee?: number + /** + * Enable Safety Checker + * + * + * Whether to enable the safety checker. If enabled, the model will check if the input image contains a face before processing it. + * The safety checker will process the input image + * + */ + enable_safety_checker?: boolean + /** + * Vx Ratio + * + * Horizontal offset ratio for face crop. + */ + vx_ratio?: number + /** + * Pupil Y + * + * Amount to move pupils vertically + */ + pupil_y?: number + /** + * Output Format + * + * Output format + */ + output_format?: 'jpeg' | 'png' + /** + * Rotate Yaw + * + * Amount to rotate the face in yaw + */ + rotate_yaw?: number + /** + * Flag Do Rot + * + * Whether to conduct the rotation when flag_do_crop is True. + */ + flag_do_rot?: boolean + /** + * Woo + * + * Amount to shape mouth in 'woo' position + */ + woo?: number + /** + * Aaa + * + * Amount to open mouth in 'aaa' shape + */ + aaa?: number + /** + * Image Url + * + * URL of the image to be animated + */ + image_url: string + /** + * Flag Do Crop + * + * Whether to crop the source portrait to the face-cropping space. + */ + flag_do_crop?: boolean + /** + * Flag Lip Zero + * + * Whether to set the lip to closed state before animation. Only takes effect when flag_eye_retargeting and flag_lip_retargeting are False. + */ + flag_lip_zero?: boolean +} + +/** + * Output + */ +export type SchemaBirefnetV2Output = { + /** + * Image + * + * Image with background removed + */ + image: SchemaImageFile + /** + * Mask Image + * + * Mask used to remove the background + */ + mask_image?: SchemaImageFile +} + +/** + * InputV2 + */ +export type SchemaBirefnetV2Input = { + /** + * Operating Resolution + * + * The resolution to operate on. The higher the resolution, the more accurate the output will be for high res input images. The '2304x2304' option is only available for the 'General Use (Dynamic)' model. + */ + operating_resolution?: '1024x1024' | '2048x2048' | '2304x2304' + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'webp' | 'png' | 'gif' + /** + * Image Url + * + * URL of the image to remove background from + */ + image_url: string + /** + * Model + * + * + * Model to use for background removal. + * The 'General Use (Light)' model is the original model used in the BiRefNet repository. + * The 'General Use (Light 2K)' model is the original model used in the BiRefNet repository but trained with 2K images. + * The 'General Use (Heavy)' model is a slower but more accurate model. + * The 'Matting' model is a model trained specifically for matting images. + * The 'Portrait' model is a model trained specifically for portrait images. + * The 'General Use (Dynamic)' model supports dynamic resolutions from 256x256 to 2304x2304. + * The 'General Use (Light)' model is recommended for most use cases. + * + * The corresponding models are as follows: + * - 'General Use (Light)': BiRefNet + * - 'General Use (Light 2K)': BiRefNet_lite-2K + * - 'General Use (Heavy)': BiRefNet_lite + * - 'Matting': BiRefNet-matting + * - 'Portrait': BiRefNet-portrait + * - 'General Use (Dynamic)': BiRefNet_dynamic + * + */ + model?: + | 'General Use (Light)' + | 'General Use (Light 2K)' + | 'General Use (Heavy)' + | 'Matting' + | 'Portrait' + | 'General Use (Dynamic)' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Output Mask + * + * Whether to output the mask used to remove the background + */ + output_mask?: boolean + /** + * Refine Foreground + * + * Whether to refine the foreground using the estimated mask + */ + refine_foreground?: boolean +} + +/** + * Output + */ +export type SchemaFluxPulidOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * FluxPulidInput + */ +export type SchemaFluxPulidInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Id Weight + * + * The weight of the ID loss. + */ + id_weight?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Start Step + * + * The number of steps to start the CFG from. + */ + start_step?: number + /** + * Reference Image URL + * + * URL of image to use for inpainting. + */ + reference_image_url: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Max Sequence Length + * + * The maximum sequence length for the model. + */ + max_sequence_length?: '128' | '256' | '512' + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * The prompt to generate an image from. + */ + negative_prompt?: string + /** + * True Cfg + * + * The weight of the CFG loss. + */ + true_cfg?: number +} + +/** + * Output + */ +export type SchemaFluxDifferentialDiffusionOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * DiffInput + */ +export type SchemaFluxDifferentialDiffusionInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image URL + * + * URL of image to use as initial image. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Strength + * + * The strength to use for image-to-image. 1.0 is completely remakes the image while 0.0 preserves the original. + */ + strength?: number + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Change Map URL + * + * URL of change map. + */ + change_map_image_url: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * Output + */ +export type SchemaIclightV2Output = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseInput + */ +export type SchemaIclightV2Input = { + /** + * Initial Latent + * + * + * Provide lighting conditions for the model + * + */ + initial_latent?: 'None' | 'Left' | 'Right' | 'Top' | 'Bottom' + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Background Threshold + * + * Threshold for the background removal algorithm. A high threshold will produce sharper masks. Note: This parameter is currently deprecated and has no effect on the output. + */ + background_threshold?: number + /** + * Mask Image Url + * + * URL of mask to be used for ic-light conditioning image + */ + mask_image_url?: string + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Lowres Denoise + * + * Strength for low-resolution pass. + */ + lowres_denoise?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * Negative Prompt for the image + */ + negative_prompt?: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Hr Downscale + */ + hr_downscale?: number + /** + * Image Url + * + * URL of image to be used for relighting + */ + image_url: string + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Highres Denoise + * + * Strength for high-resolution pass. Only used if enable_hr_fix is True. + */ + highres_denoise?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enable Hr Fix + * + * Use HR fix + */ + enable_hr_fix?: boolean + /** + * Cfg + * + * The real classifier-free-guidance scale for the generation. + */ + cfg?: number +} + +/** + * Output + */ +export type SchemaKolorsImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * KolorsImg2ImgInput + */ +export type SchemaKolorsImageToImageInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image Url + * + * URL of image to use for image to image + */ + image_url: string + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and + * uploaded before returning the response. This will increase the latency of + * the function but it allows you to get the image directly in the response + * without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Scheduler + * + * The scheduler to use for the model. + */ + scheduler?: + | 'EulerDiscreteScheduler' + | 'EulerAncestralDiscreteScheduler' + | 'DPMSolverMultistepScheduler' + | 'DPMSolverMultistepScheduler_SDE_karras' + | 'UniPCMultistepScheduler' + | 'DEISMultistepScheduler' + /** + * Strength + * + * The strength to use for image-to-image. 1.0 is completely remakes the image while 0.0 preserves the original. + */ + strength?: number + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show + * you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * Seed + */ + seed?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small + * details (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * Enable safety checker. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaFluxProV1FillOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * Image + */ +export type SchemaRegistryImageFastSdxlModelsImage = { + /** + * Height + */ + height: number + /** + * Content Type + */ + content_type?: string + /** + * Url + */ + url: string + /** + * Width + */ + width: number +} + +/** + * FluxProFillInput + */ +export type SchemaFluxProV1FillInput = { + /** + * Prompt + * + * The prompt to fill the masked part of the image. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The image URL to generate an image from. Needs to match the dimensions of the mask. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Mask URL + * + * The mask URL to inpaint the image. Needs to match the dimensions of the input image. + */ + mask_url: string + /** + * Enhance Prompt + * + * Whether to enhance the prompt for better results. + */ + enhance_prompt?: boolean +} + +/** + * Output + */ +export type SchemaFluxLoraDepthOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * DepthInput + */ +export type SchemaFluxLoraDepthInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. This is always set to 1 for streaming output. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image Url + * + * URL of image to use for depth input + */ + image_url: string + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaFluxProV11UltraReduxOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * FluxProUltraTextToImageInputRedux + */ +export type SchemaFluxProV11UltraReduxInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt?: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + | string + /** + * Enhance Prompt + * + * Whether to enhance the prompt for better results. + */ + enhance_prompt?: boolean + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The image URL to generate an image from. Needs to match the dimensions of the mask. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Image Prompt Strength + * + * The strength of the image prompt, between 0 and 1. + */ + image_prompt_strength?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Raw + * + * Generate less processed, more natural-looking images. + */ + raw?: boolean +} + +/** + * Output + */ +export type SchemaFluxDevReduxOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseReduxInput + */ +export type SchemaFluxDevReduxInput = { + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The URL of the image to generate an image from. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number | unknown + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * Output + */ +export type SchemaFluxProV11ReduxOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * FluxProRedux + */ +export type SchemaFluxProV11ReduxInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt?: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The image URL to generate an image from. Needs to match the dimensions of the mask. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enhance Prompt + * + * Whether to enhance the prompt for better results. + */ + enhance_prompt?: boolean +} + +/** + * Output + */ +export type SchemaFluxSchnellReduxOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * SchnellReduxInput + */ +export type SchemaFluxSchnellReduxInput = { + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The URL of the image to generate an image from. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number | unknown + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * Output + */ +export type SchemaIdeogramV2RemixOutput = { + /** + * Images + */ + images: Array + /** + * Seed + * + * Seed used for the random number generator + */ + seed: number +} + +/** + * RemixImageInput + */ +export type SchemaIdeogramV2RemixInput = { + /** + * Prompt + * + * The prompt to remix the image with + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated image + */ + aspect_ratio?: + | '10:16' + | '16:10' + | '9:16' + | '16:9' + | '4:3' + | '3:4' + | '1:1' + | '1:3' + | '3:1' + | '3:2' + | '2:3' + /** + * Style + * + * The style of the generated image + */ + style?: 'auto' | 'general' | 'realistic' | 'design' | 'render_3D' | 'anime' + /** + * Expand Prompt + * + * Whether to expand the prompt with MagicPrompt functionality. + */ + expand_prompt?: boolean + /** + * Image URL + * + * The image URL to remix + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Strength + * + * Strength of the input image in the remix + */ + strength?: number + /** + * Seed + * + * Seed for the random number generator + */ + seed?: number | unknown +} + +/** + * Output + */ +export type SchemaIdeogramV2TurboRemixOutput = { + /** + * Images + */ + images: Array + /** + * Seed + * + * Seed used for the random number generator + */ + seed: number +} + +/** + * RemixImageInput + */ +export type SchemaIdeogramV2TurboRemixInput = { + /** + * Prompt + * + * The prompt to remix the image with + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated image + */ + aspect_ratio?: + | '10:16' + | '16:10' + | '9:16' + | '16:9' + | '4:3' + | '3:4' + | '1:1' + | '1:3' + | '3:1' + | '3:2' + | '2:3' + /** + * Style + * + * The style of the generated image + */ + style?: 'auto' | 'general' | 'realistic' | 'design' | 'render_3D' | 'anime' + /** + * Expand Prompt + * + * Whether to expand the prompt with MagicPrompt functionality. + */ + expand_prompt?: boolean + /** + * Image URL + * + * The image URL to remix + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Strength + * + * Strength of the input image in the remix + */ + strength?: number + /** + * Seed + * + * Seed for the random number generator + */ + seed?: number | unknown +} + +/** + * Output + */ +export type SchemaIdeogramV2TurboEditOutput = { + /** + * Images + */ + images: Array + /** + * Seed + * + * Seed used for the random number generator + */ + seed: number +} + +/** + * EditImageInput + */ +export type SchemaIdeogramV2TurboEditInput = { + /** + * Prompt + * + * The prompt to fill the masked part of the image. + */ + prompt: string + /** + * Style + * + * The style of the generated image + */ + style?: 'auto' | 'general' | 'realistic' | 'design' | 'render_3D' | 'anime' + /** + * Expand Prompt + * + * Whether to expand the prompt with MagicPrompt functionality. + */ + expand_prompt?: boolean + /** + * Image URL + * + * The image URL to generate an image from. Needs to match the dimensions of the mask. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Seed + * + * Seed for the random number generator + */ + seed?: number | unknown + /** + * Mask URL + * + * The mask URL to inpaint the image. Needs to match the dimensions of the input image. + */ + mask_url: string +} + +/** + * Output + */ +export type SchemaIdeogramV2EditOutput = { + /** + * Images + */ + images: Array + /** + * Seed + * + * Seed used for the random number generator + */ + seed: number +} + +/** + * EditImageInput + */ +export type SchemaIdeogramV2EditInput = { + /** + * Prompt + * + * The prompt to fill the masked part of the image. + */ + prompt: string + /** + * Style + * + * The style of the generated image + */ + style?: 'auto' | 'general' | 'realistic' | 'design' | 'render_3D' | 'anime' + /** + * Expand Prompt + * + * Whether to expand the prompt with MagicPrompt functionality. + */ + expand_prompt?: boolean + /** + * Image URL + * + * The image URL to generate an image from. Needs to match the dimensions of the mask. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Seed + * + * Seed for the random number generator + */ + seed?: number | unknown + /** + * Mask URL + * + * The mask URL to inpaint the image. Needs to match the dimensions of the input image. + */ + mask_url: string +} + +/** + * VTONOutput + */ +export type SchemaLeffaVirtualTryonOutput = { + /** + * Image + * + * The output image. + */ + image: SchemaImage + /** + * Seed + * + * The seed for the inference. + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the image contains NSFW concepts. + */ + has_nsfw_concepts: boolean +} + +/** + * VTONInput + */ +export type SchemaLeffaVirtualTryonInput = { + /** + * Garment Image Url + * + * Url to the garment image. + */ + garment_image_url: string + /** + * Human Image Url + * + * Url for the human image. + */ + human_image_url: string + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Garment Type + * + * The type of the garment used for virtual try-on. + */ + garment_type: 'upper_body' | 'lower_body' | 'dresses' + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your input when generating the image. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same input given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * PoseTransferOutput + */ +export type SchemaLeffaPoseTransferOutput = { + /** + * Image + * + * The output image. + */ + image: SchemaImage + /** + * Seed + * + * The seed for the inference. + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the image contains NSFW concepts. + */ + has_nsfw_concepts: boolean +} + +/** + * PoseTransferInput + */ +export type SchemaLeffaPoseTransferInput = { + /** + * Pose Image Url + * + * Url for the human image. + */ + pose_image_url: string + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your input when generating the image. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same input given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Person Image Url + * + * Url to the garment image. + */ + person_image_url: string +} + +/** + * CATVTONOutput + */ +export type SchemaCatVtonOutput = { + /** + * Image + * + * The output image. + */ + image: SchemaImage +} + +/** + * CATVTONInput + */ +export type SchemaCatVtonInput = { + /** + * Garment Image Url + * + * Url to the garment image. + */ + garment_image_url: string + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Human Image Url + * + * Url for the human image. + */ + human_image_url: string + /** + * Cloth Type + * + * + * Type of the Cloth to be tried on. + * + * Options: + * upper: Upper body cloth + * lower: Lower body cloth + * overall: Full body cloth + * inner: Inner cloth, like T-shirt inside a jacket + * outer: Outer cloth, like a jacket over a T-shirt + * + */ + cloth_type: 'upper' | 'lower' | 'overall' | 'inner' | 'outer' + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same input given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * BGRemoveOutput + */ +export type SchemaBriaBackgroundRemoveOutput = { + /** + * Image + * + * The generated image + */ + image: SchemaImage +} + +/** + * BGRemoveInput + */ +export type SchemaBriaBackgroundRemoveInput = { + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Image Url + * + * Input Image to erase from + */ + image_url: string +} + +/** + * ProductShotOutput + */ +export type SchemaBriaProductShotOutput = { + /** + * Images + * + * The generated images + */ + images: Array +} + +/** + * ProductShotInput + */ +export type SchemaBriaProductShotInput = { + /** + * Ref Image Url + * + * The URL of the reference image to be used for generating the new scene or background for the product shot. Use "" to leave empty.Either ref_image_url or scene_description has to be provided but not both. If both ref_image_url and ref_image_file are provided, ref_image_url will be used. Accepted formats are jpeg, jpg, png, webp. + */ + ref_image_url?: string + /** + * Manual Placement Selection + * + * If you've selected placement_type=manual_placement, you should use this parameter to specify which placements/positions you would like to use from the list. You can select more than one placement in one request. + */ + manual_placement_selection?: + | 'upper_left' + | 'upper_right' + | 'bottom_left' + | 'bottom_right' + | 'right_center' + | 'left_center' + | 'upper_center' + | 'bottom_center' + | 'center_vertical' + | 'center_horizontal' + /** + * Num Results + * + * The number of lifestyle product shots you would like to generate. You will get num_results x 10 results when placement_type=automatic and according to the number of required placements x num_results if placement_type=manual_placement. + */ + num_results?: number + /** + * Padding Values + * + * The desired padding in pixels around the product, when using placement_type=manual_padding. The order of the values is [left, right, top, bottom]. For optimal results, the total number of pixels, including padding, should be around 1,000,000. It is recommended to first use the product cutout API, get the cutout and understand the size of the result, and then define the required padding and use the cutout as an input for this API. + */ + padding_values?: Array + /** + * Shot Size + * + * The desired size of the final product shot. For optimal results, the total number of pixels should be around 1,000,000. This parameter is only relevant when placement_type=automatic or placement_type=manual_placement. + */ + shot_size?: Array + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Placement Type + * + * This parameter allows you to control the positioning of the product in the image. Choosing 'original' will preserve the original position of the product in the image. Choosing 'automatic' will generate results with the 10 recommended positions for the product. Choosing 'manual_placement' will allow you to select predefined positions (using the parameter 'manual_placement_selection'). Selecting 'manual_padding' will allow you to control the position and size of the image by defining the desired padding in pixels around the product. + */ + placement_type?: + | 'original' + | 'automatic' + | 'manual_placement' + | 'manual_padding' + /** + * Original Quality + * + * This flag is only relevant when placement_type=original. If true, the output image retains the original input image's size; otherwise, the image is scaled to 1 megapixel (1MP) while preserving its aspect ratio. + */ + original_quality?: boolean + /** + * Fast + * + * Whether to use the fast model + */ + fast?: boolean + /** + * Optimize Description + * + * Whether to optimize the scene description + */ + optimize_description?: boolean + /** + * Scene Description + * + * Text description of the new scene or background for the provided product shot. Bria currently supports prompts in English only, excluding special characters. + */ + scene_description?: string + /** + * Image Url + * + * The URL of the product shot to be placed in a lifestyle shot. If both image_url and image_file are provided, image_url will be used. Accepted formats are jpeg, jpg, png, webp. Maximum file size 12MB. + */ + image_url: string +} + +/** + * EraserOutput + */ +export type SchemaBriaEraserOutput = { + /** + * Image + * + * The generated image + */ + image: SchemaImage +} + +/** + * EraserInput + */ +export type SchemaBriaEraserInput = { + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Preserve Alpha + * + * + * If set to true, attempts to preserve the alpha channel of the input image. + * + */ + preserve_alpha?: boolean + /** + * Mask Url + * + * The URL of the binary mask image that represents the area that will be cleaned. + */ + mask_url: string + /** + * Mask Type + * + * You can use this parameter to specify the type of the input mask from the list. 'manual' opttion should be used in cases in which the mask had been generated by a user (e.g. with a brush tool), and 'automatic' mask type should be used when mask had been generated by an algorithm like 'SAM'. + */ + mask_type?: 'manual' | 'automatic' + /** + * Image Url + * + * Input Image to erase from + */ + image_url: string +} + +/** + * BGReplaceOutput + */ +export type SchemaBriaBackgroundReplaceOutput = { + /** + * Images + * + * The generated images + */ + images: Array + /** + * Seed + * + * Seed value used for generation. + */ + seed: number +} + +/** + * BGReplaceInput + */ +export type SchemaBriaBackgroundReplaceInput = { + /** + * Prompt + * + * The prompt you would like to use to generate images. + */ + prompt?: string + /** + * Num Images + * + * Number of Images to generate. + */ + num_images?: number + /** + * Ref Image Url + * + * The URL of the reference image to be used for generating the new background. Use "" to leave empty. Either ref_image_url or bg_prompt has to be provided but not both. If both ref_image_url and ref_image_file are provided, ref_image_url will be used. Accepted formats are jpeg, jpg, png, webp. + */ + ref_image_url?: string + /** + * Refine Prompt + * + * Whether to refine prompt + */ + refine_prompt?: boolean + /** + * Image Url + * + * Input Image to erase from + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Fast + * + * Whether to use the fast model + */ + fast?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * The negative prompt you would like to use to generate images. + */ + negative_prompt?: string +} + +/** + * Output + */ +export type SchemaFluxLoraFillOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * FillInput + */ +export type SchemaFluxLoraFillInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt?: string + /** + * Resize To Original + * + * Resizes the image back to the original size. Use when you wish to preserve the exact image size as the originally provided image. + */ + resize_to_original?: boolean + /** + * Paste Back + * + * Specifies whether to paste-back the original image onto to the non-inpainted areas of the output + */ + paste_back?: boolean + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Images + * + * The number of images to generate. This is always set to 1 for streaming output. + */ + num_images?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image Url + * + * URL of image to use for fill operation + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Fill Image + * + * Use an image fill input to fill in particular images into the masked area. + */ + fill_image?: SchemaImageFillInput + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Mask Url + * + * + * The mask to area to Inpaint in. + * + */ + mask_url: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * GenFillOutput + */ +export type SchemaBriaGenfillOutput = { + /** + * Images + * + * Generated Images + */ + images: Array +} + +/** + * GenFillInput + */ +export type SchemaBriaGenfillInput = { + /** + * Prompt + * + * The prompt you would like to use to generate images. + */ + prompt: string + /** + * Num Images + * + * Number of Images to generate. + */ + num_images?: number + /** + * Image Url + * + * Input Image to erase from + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Mask Url + * + * The URL of the binary mask image that represents the area that will be cleaned. + */ + mask_url: string + /** + * Negative Prompt + * + * The negative prompt you would like to use to generate images. + */ + negative_prompt?: string +} + +/** + * ImageExpansionOutput + */ +export type SchemaBriaExpandOutput = { + /** + * Image + * + * The generated image + */ + image: SchemaImage + /** + * Seed + * + * Seed value used for generation. + */ + seed: number +} + +/** + * ImageExpansionInput + */ +export type SchemaBriaExpandInput = { + /** + * Prompt + * + * Text on which you wish to base the image expansion. This parameter is optional. Bria currently supports prompts in English only, excluding special characters. + */ + prompt?: string + /** + * Aspect Ratio + * + * The desired aspect ratio of the final image. Will be used over original_image_size and original_image_location if provided. + */ + aspect_ratio?: + | '1:1' + | '2:3' + | '3:2' + | '3:4' + | '4:3' + | '4:5' + | '5:4' + | '9:16' + | '16:9' + /** + * Original Image Location + * + * The desired location of the original image, inside the full canvas. Provide the location of the upper left corner of the original image. The location can also be outside the canvas (the original image will be cropped). Will be ignored if aspect_ratio is provided. + */ + original_image_location?: Array + /** + * Image Url + * + * The URL of the input image. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Original Image Size + * + * The desired size of the original image, inside the full canvas. Ensure that the ratio of input image foreground or main subject to the canvas area is greater than 15% to achieve optimal results. Will be ignored if aspect_ratio is provided. + */ + original_image_size?: Array + /** + * Canvas Size + * + * The desired size of the final image, after the expansion. should have an area of less than 5000x5000 pixels. + */ + canvas_size: Array + /** + * Seed + * + * You can choose whether you want your generated expension to be random or predictable. You can recreate the same result in the future by using the seed value of a result from the response. You can exclude this parameter if you are not interested in recreating your results. This parameter is optional. + */ + seed?: number + /** + * Negative Prompt + * + * The negative prompt you would like to use to generate images. + */ + negative_prompt?: string +} + +/** + * DetectionOutput + */ +export type SchemaMoondreamNextDetectionOutput = { + /** + * Output Image + * + * Output image with detection visualization + */ + image?: SchemaImage + /** + * Text Output + * + * Detection results as text + */ + text_output: string +} + +/** + * DetectionInput + */ +export type SchemaMoondreamNextDetectionInput = { + /** + * Detection Prompt + * + * Text description of what to detect + */ + detection_prompt: string + /** + * Use Ensemble + * + * Whether to use ensemble for gaze detection + */ + use_ensemble?: boolean + /** + * Task Type + * + * Type of detection to perform + */ + task_type: 'bbox_detection' | 'point_detection' | 'gaze_detection' + /** + * Show Visualization + * + * Whether to show visualization for detection + */ + show_visualization?: boolean + /** + * Combine Points + * + * Whether to combine points into a single point for point detection. This has no effect for bbox detection or gaze detection. + */ + combine_points?: boolean + /** + * Image URL + * + * Image URL to be processed + */ + image_url: string +} + +/** + * Output + */ +export type SchemaFluxProV1FillFinetunedOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * FluxProFillFinetunedInput + */ +export type SchemaFluxProV1FillFinetunedInput = { + /** + * Prompt + * + * The prompt to fill the masked part of the image. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Fine-tune Strength + * + * + * Controls finetune influence. + * Increase this value if your target concept isn't showing up strongly enough. + * The optimal setting depends on your finetune and prompt + * + */ + finetune_strength: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Fine-tune ID + * + * References your specific model + */ + finetune_id: string + /** + * Image URL + * + * The image URL to generate an image from. Needs to match the dimensions of the mask. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Mask URL + * + * The mask URL to inpaint the image. Needs to match the dimensions of the input image. + */ + mask_url: string + /** + * Enhance Prompt + * + * Whether to enhance the prompt for better results. + */ + enhance_prompt?: boolean +} + +/** + * Output + */ +export type SchemaFluxLoraCannyOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * CannyInput + */ +export type SchemaFluxLoraCannyInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image Url + * + * URL of image to use for canny input + */ + image_url: string + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * TryOnOutput + */ +export type SchemaKlingV15KolorsVirtualTryOnOutput = { + /** + * Image + * + * The output image. + */ + image: SchemaImage +} + +/** + * TryOnRequest + */ +export type SchemaKlingV15KolorsVirtualTryOnInput = { + /** + * Garment Image Url + * + * Url to the garment image. + */ + garment_image_url: string + /** + * Sync Mode + * + * If true, the function will return the image in the response. + */ + sync_mode?: boolean + /** + * Human Image Url + * + * Url for the human image. + */ + human_image_url: string +} + +/** + * ConformerOutput + */ +export type SchemaCodeformerOutput = { + /** + * Image + * + * The generated image file info. + */ + image: SchemaImage + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * CodeformerInput + */ +export type SchemaCodeformerInput = { + /** + * Aligned + * + * Should faces etc should be aligned. + */ + aligned?: boolean + /** + * Image Url + * + * URL of image to be used for relighting + */ + image_url: string + /** + * Upscale Factor + * + * Upscaling factor + */ + upscale_factor?: number + /** + * Fidelity + * + * Weight of the fidelity factor. + */ + fidelity?: number + /** + * Face Upscale + * + * Should faces be upscaled + */ + face_upscale?: boolean + /** + * Only Center Face + * + * Should only center face be restored + */ + only_center_face?: boolean + /** + * Seed + * + * Random seed for reproducible generation. + */ + seed?: number +} + +/** + * UpscaleOutput + */ +export type SchemaIdeogramUpscaleOutput = { + /** + * Images + */ + images: Array + /** + * Seed + * + * Seed used for the random number generator + */ + seed: number +} + +/** + * UpscaleImageInput + */ +export type SchemaIdeogramUpscaleInput = { + /** + * Prompt + * + * The prompt to upscale the image with + */ + prompt?: string | unknown + /** + * Detail + * + * The detail of the upscaled image + */ + detail?: number + /** + * Resemblance + * + * The resemblance of the upscaled image to the original image + */ + resemblance?: number + /** + * Expand Prompt + * + * Whether to expand the prompt with MagicPrompt functionality. + */ + expand_prompt?: boolean + /** + * Image URL + * + * The image URL to upscale + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Seed + * + * Seed for the random number generator + */ + seed?: number | unknown +} + +/** + * Output + */ +export type SchemaFluxControlLoraCannyImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * ImageToImageInput + */ +export type SchemaFluxControlLoraCannyImageToImageInput = { + /** + * Control Lora Strength + * + * The strength of the control lora. + */ + control_lora_strength?: number + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image Url + * + * URL of image to use for inpainting. or img2img + */ + image_url: string + /** + * Strength + * + * The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original. + */ + strength?: number + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Control Lora Image Url + * + * + * The image to use for control lora. This is used to control the style of the generated image. + * + */ + control_lora_image_url?: string +} + +/** + * Ben2OutputImage + */ +export type SchemaBenV2ImageOutput = { + /** + * Image + * + * The output image after background removal. + */ + image: SchemaImage + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * Ben2InputImage + */ +export type SchemaBenV2ImageInput = { + /** + * Seed + * + * Random seed for reproducible generation. + */ + seed?: number + /** + * Image Url + * + * URL of image to be used for background removal + */ + image_url: string +} + +/** + * Output + */ +export type SchemaFluxControlLoraDepthImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * ImageToImageInput + */ +export type SchemaFluxControlLoraDepthImageToImageInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Control Lora Strength + * + * The strength of the control lora. + */ + control_lora_strength?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image Url + * + * URL of image to use for inpainting. or img2img + */ + image_url: string + /** + * Strength + * + * The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original. + */ + strength?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Control Lora Image Url + * + * + * The image to use for control lora. This is used to control the style of the generated image. + * + */ + control_lora_image_url: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * FlowEditOutput + */ +export type SchemaFloweditOutput = { + /** + * Image + * + * The generated image file info. + */ + image: SchemaImage + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * FlowEditInput + */ +export type SchemaFloweditInput = { + /** + * Source Guidance scale (CFG) + * + * Guidance scale for the source. + */ + src_guidance_scale?: number + /** + * N Min + * + * Minimum step for improved style edits + */ + n_min?: number + /** + * N Max + * + * Control the strength of the edit + */ + n_max?: number + /** + * Image Url + * + * URL of image to be used for relighting + */ + image_url: string + /** + * Source Prompt + * + * Prompt of the image to be used. + */ + source_prompt: string + /** + * Target Guidance scale (CFG) + * + * Guidance scale for target. + */ + tar_guidance_scale?: number + /** + * Target Prompt + * + * Prompt of the image to be made. + */ + target_prompt: string + /** + * Seed + * + * Random seed for reproducible generation. If set none, a random seed will be used. + */ + seed?: number + /** + * Steps + * + * Steps for which the model should run. + */ + num_inference_steps?: number + /** + * N Avg + * + * Average step count + */ + n_avg?: number +} + +/** + * ProcessedOutput + */ +export type SchemaPostProcessingOutput = { + /** + * Images + * + * The processed images + */ + images: Array +} + +/** + * ImageProcessingInput + */ +export type SchemaPostProcessingInput = { + /** + * Blue Shift + * + * Blue channel shift amount + */ + blue_shift?: number + /** + * Vertex Y + * + * Vertex Y position + */ + vertex_y?: number + /** + * Green Direction + * + * Green channel shift direction + */ + green_direction?: 'horizontal' | 'vertical' + /** + * Enable Glow + * + * Enable glow effect + */ + enable_glow?: boolean + /** + * Dodge Burn Mode + * + * Dodge and burn mode + */ + dodge_burn_mode?: + | 'dodge' + | 'burn' + | 'dodge_and_burn' + | 'burn_and_dodge' + | 'color_dodge' + | 'color_burn' + | 'linear_dodge' + | 'linear_burn' + /** + * Glow Intensity + * + * Glow intensity + */ + glow_intensity?: number + /** + * Blur Sigma + * + * Sigma for Gaussian blur + */ + blur_sigma?: number + /** + * Desaturate Method + * + * Desaturation method + */ + desaturate_method?: + | 'luminance (Rec.709)' + | 'luminance (Rec.601)' + | 'average' + | 'lightness' + /** + * Enable Blur + * + * Enable blur effect + */ + enable_blur?: boolean + /** + * Blur Radius + * + * Blur radius + */ + blur_radius?: number + /** + * Grain Style + * + * Style of film grain to apply + */ + grain_style?: + | 'modern' + | 'analog' + | 'kodak' + | 'fuji' + | 'cinematic' + | 'newspaper' + /** + * Cas Amount + * + * CAS sharpening amount + */ + cas_amount?: number + /** + * Gamma + * + * Gamma adjustment + */ + gamma?: number + /** + * Tint Mode + * + * Tint color mode + */ + tint_mode?: + | 'sepia' + | 'red' + | 'green' + | 'blue' + | 'cyan' + | 'magenta' + | 'yellow' + | 'purple' + | 'orange' + | 'warm' + | 'cool' + | 'lime' + | 'navy' + | 'vintage' + | 'rose' + | 'teal' + | 'maroon' + | 'peach' + | 'lavender' + | 'olive' + /** + * Blur Type + * + * Type of blur to apply + */ + blur_type?: 'gaussian' | 'kuwahara' + /** + * Enable Vignette + * + * Enable vignette effect + */ + enable_vignette?: boolean + /** + * Dissolve Image Url + * + * URL of second image for dissolve + */ + dissolve_image_url?: string + /** + * Red Shift + * + * Red channel shift amount + */ + red_shift?: number + /** + * Enable Desaturate + * + * Enable desaturation effect + */ + enable_desaturate?: boolean + /** + * Grain Intensity + * + * Film grain intensity (when enabled) + */ + grain_intensity?: number + /** + * Dodge Burn Intensity + * + * Dodge and burn intensity + */ + dodge_burn_intensity?: number + /** + * Smart Sharpen Strength + * + * Smart sharpen strength + */ + smart_sharpen_strength?: number + /** + * Red Direction + * + * Red channel shift direction + */ + red_direction?: 'horizontal' | 'vertical' + /** + * Image Url + * + * URL of image to process + */ + image_url: string + /** + * Vertex X + * + * Vertex X position + */ + vertex_x?: number + /** + * Tint Strength + * + * Tint strength + */ + tint_strength?: number + /** + * Enable Dissolve + * + * Enable dissolve effect + */ + enable_dissolve?: boolean + /** + * Enable Parabolize + * + * Enable parabolize effect + */ + enable_parabolize?: boolean + /** + * Enable Grain + * + * Enable film grain effect + */ + enable_grain?: boolean + /** + * Solarize Threshold + * + * Solarize threshold + */ + solarize_threshold?: number + /** + * Enable Sharpen + * + * Enable sharpen effect + */ + enable_sharpen?: boolean + /** + * Enable Dodge Burn + * + * Enable dodge and burn effect + */ + enable_dodge_burn?: boolean + /** + * Glow Radius + * + * Glow blur radius + */ + glow_radius?: number + /** + * Sharpen Alpha + * + * Sharpen strength (for basic mode) + */ + sharpen_alpha?: number + /** + * Enable Color Correction + * + * Enable color correction + */ + enable_color_correction?: boolean + /** + * Contrast + * + * Contrast adjustment + */ + contrast?: number + /** + * Enable Solarize + * + * Enable solarize effect + */ + enable_solarize?: boolean + /** + * Noise Radius + * + * Noise radius for smart sharpen + */ + noise_radius?: number + /** + * Grain Scale + * + * Film grain scale (when enabled) + */ + grain_scale?: number + /** + * Temperature + * + * Color temperature adjustment + */ + temperature?: number + /** + * Brightness + * + * Brightness adjustment + */ + brightness?: number + /** + * Blue Direction + * + * Blue channel shift direction + */ + blue_direction?: 'horizontal' | 'vertical' + /** + * Dissolve Factor + * + * Dissolve blend factor + */ + dissolve_factor?: number + /** + * Sharpen Mode + * + * Type of sharpening to apply + */ + sharpen_mode?: 'basic' | 'smart' | 'cas' + /** + * Vignette Strength + * + * Vignette strength (when enabled) + */ + vignette_strength?: number + /** + * Sharpen Radius + * + * Sharpen radius (for basic mode) + */ + sharpen_radius?: number + /** + * Parabolize Coeff + * + * Parabolize coefficient + */ + parabolize_coeff?: number + /** + * Saturation + * + * Saturation adjustment + */ + saturation?: number + /** + * Enable Tint + * + * Enable color tint effect + */ + enable_tint?: boolean + /** + * Green Shift + * + * Green channel shift amount + */ + green_shift?: number + /** + * Preserve Edges + * + * Edge preservation factor + */ + preserve_edges?: number + /** + * Desaturate Factor + * + * Desaturation factor + */ + desaturate_factor?: number + /** + * Smart Sharpen Ratio + * + * Smart sharpen blend ratio + */ + smart_sharpen_ratio?: number + /** + * Enable Chromatic + * + * Enable chromatic aberration + */ + enable_chromatic?: boolean +} + +/** + * NafnetOutputDenoise + */ +export type SchemaNafnetDenoiseOutput = { + /** + * Image + * + * The generated image file info. + */ + image: SchemaImage +} + +/** + * NafnetInputDenoise + */ +export type SchemaNafnetDenoiseInput = { + /** + * Seed + * + * seed to be used for generation + */ + seed?: number + /** + * Image Url + * + * URL of image to be used for relighting + */ + image_url: string +} + +/** + * NafnetOutput + */ +export type SchemaNafnetDeblurOutput = { + /** + * Image + * + * The generated image file info. + */ + image: SchemaImage +} + +/** + * NafnetInput + */ +export type SchemaNafnetDeblurInput = { + /** + * Seed + * + * seed to be used for generation + */ + seed?: number + /** + * Image Url + * + * URL of image to be used for relighting + */ + image_url: string +} + +/** + * Output + */ +export type SchemaDrctSuperResolutionOutput = { + /** + * Image + * + * Upscaled image + */ + image: SchemaImage +} + +/** + * Input + */ +export type SchemaDrctSuperResolutionInput = { + /** + * Upscaling Factor (Xs) + * + * Upscaling factor. + */ + upscale_factor?: 4 + /** + * Image URL + * + * URL of the image to upscale. + */ + image_url: string +} + +/** + * SAM2AutomaticSegmentationOutput + */ +export type SchemaSam2AutoSegmentOutput = { + /** + * Combined Mask + * + * Combined segmentation mask. + */ + combined_mask: SchemaImage + /** + * Individual Masks + * + * Individual segmentation masks. + */ + individual_masks: Array +} + +/** + * SAM2AutomaticSegmentationInput + */ +export type SchemaSam2AutoSegmentInput = { + /** + * Points Per Side + * + * Number of points to sample along each side of the image. + */ + points_per_side?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Min Mask Region Area + * + * Minimum area of a mask region. + */ + min_mask_region_area?: number + /** + * Image Url + * + * URL of the image to be automatically segmented + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Pred Iou Thresh + * + * Threshold for predicted IOU score. + */ + pred_iou_thresh?: number + /** + * Stability Score Thresh + * + * Threshold for stability score. + */ + stability_score_thresh?: number +} + +/** + * DDColorOutput + */ +export type SchemaDdcolorOutput = { + /** + * Image + * + * The generated image file info. + */ + image: SchemaImage +} + +/** + * DDColorInput + */ +export type SchemaDdcolorInput = { + /** + * Seed + * + * seed to be used for generation + */ + seed?: number + /** + * Image Url + * + * URL of image to be used for relighting + */ + image_url: string +} + +/** + * ImageOutput + */ +export type SchemaEvfSamOutput = { + /** + * Image + * + * The segmented output image + */ + image: SchemaFile +} + +/** + * ImageInput + */ +export type SchemaEvfSamInput = { + /** + * Prompt + * + * The prompt to generate segmentation from. + */ + prompt: string + /** + * Use Grounding Dino + * + * Use GroundingDINO instead of SAM for segmentation + */ + use_grounding_dino?: boolean + /** + * Semantic Type + * + * Enable semantic level segmentation for body parts, background or multi objects + */ + semantic_type?: boolean + /** + * Fill Holes + * + * Fill holes in the mask using morphological operations + */ + fill_holes?: boolean + /** + * Expand Mask + * + * Expand/dilate the mask by specified pixels + */ + expand_mask?: number + /** + * Mask Only + * + * Output only the binary mask instead of masked image + */ + mask_only?: boolean + /** + * Revert Mask + * + * Invert the mask (background becomes foreground and vice versa) + */ + revert_mask?: boolean + /** + * Blur Mask + * + * Apply Gaussian blur to the mask. Value determines kernel size (must be odd number) + */ + blur_mask?: number + /** + * Negative Prompt + * + * Areas to exclude from segmentation (will be subtracted from prompt results) + */ + negative_prompt?: string + /** + * Image Url + * + * URL of the input image + */ + image_url: string +} + +/** + * Output + */ +export type SchemaIdeogramV2aTurboRemixOutput = { + /** + * Images + */ + images: Array + /** + * Seed + * + * Seed used for the random number generator + */ + seed: number +} + +/** + * RemixImageInput + */ +export type SchemaIdeogramV2aTurboRemixInput = { + /** + * Prompt + * + * The prompt to remix the image with + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated image + */ + aspect_ratio?: + | '10:16' + | '16:10' + | '9:16' + | '16:9' + | '4:3' + | '3:4' + | '1:1' + | '1:3' + | '3:1' + | '3:2' + | '2:3' + /** + * Style + * + * The style of the generated image + */ + style?: 'auto' | 'general' | 'realistic' | 'design' | 'render_3D' | 'anime' + /** + * Expand Prompt + * + * Whether to expand the prompt with MagicPrompt functionality. + */ + expand_prompt?: boolean + /** + * Image URL + * + * The image URL to remix + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Strength + * + * Strength of the input image in the remix + */ + strength?: number + /** + * Seed + * + * Seed for the random number generator + */ + seed?: number | unknown +} + +/** + * Output + */ +export type SchemaIdeogramV2aRemixOutput = { + /** + * Images + */ + images: Array + /** + * Seed + * + * Seed used for the random number generator + */ + seed: number +} + +/** + * RemixImageInput + */ +export type SchemaIdeogramV2aRemixInput = { + /** + * Prompt + * + * The prompt to remix the image with + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated image + */ + aspect_ratio?: + | '10:16' + | '16:10' + | '9:16' + | '16:9' + | '4:3' + | '3:4' + | '1:1' + | '1:3' + | '3:1' + | '3:2' + | '2:3' + /** + * Style + * + * The style of the generated image + */ + style?: 'auto' | 'general' | 'realistic' | 'design' | 'render_3D' | 'anime' + /** + * Expand Prompt + * + * Whether to expand the prompt with MagicPrompt functionality. + */ + expand_prompt?: boolean + /** + * Image URL + * + * The image URL to remix + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Strength + * + * Strength of the input image in the remix + */ + strength?: number + /** + * Seed + * + * Seed for the random number generator + */ + seed?: number | unknown +} + +/** + * SwinSrOutput + */ +export type SchemaSwin2SrOutput = { + /** + * Image + * + * The generated image file info. + */ + image: SchemaImage +} + +/** + * SwinSrInput + */ +export type SchemaSwin2SrInput = { + /** + * Task + * + * Task to perform + */ + task?: 'classical_sr' | 'compressed_sr' | 'real_sr' + /** + * Seed + * + * seed to be used for generation + */ + seed?: number + /** + * Image Url + * + * URL of image to be used for image enhancement + */ + image_url: string +} + +/** + * DocResOutput + */ +export type SchemaDocresOutput = { + /** + * Image + * + * The generated image file info. + */ + image: SchemaImage +} + +/** + * DocResInput + */ +export type SchemaDocresInput = { + /** + * Task + * + * Task to perform + */ + task: 'deshadowing' | 'appearance' | 'deblurring' | 'binarization' + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Image Url + * + * URL of image to be used for relighting + */ + image_url: string +} + +/** + * DocResOutput + */ +export type SchemaDocresDewarpOutput = { + /** + * Image + * + * The generated image file info. + */ + image: SchemaImage +} + +/** + * DocResInputDewarp + */ +export type SchemaDocresDewarpInput = { + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Image Url + * + * URL of image to be used for relighting + */ + image_url: string +} + +/** + * Output + */ +export type SchemaJuggernautFluxBaseImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * DevImageToImageInput + */ +export type SchemaJuggernautFluxBaseImageToImageInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The URL of the image to generate an image from. + */ + image_url: string + /** + * Strength + * + * The strength of the initial image. Higher strength values are better for this model. + */ + strength?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaJuggernautFluxProImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * DevImageToImageInput + */ +export type SchemaJuggernautFluxProImageToImageInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The URL of the image to generate an image from. + */ + image_url: string + /** + * Strength + * + * The strength of the initial image. Higher strength values are better for this model. + */ + strength?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * WatermarkOutput + */ +export type SchemaInvisibleWatermarkOutput = { + /** + * Image + * + * The watermarked image file info (when encoding) + */ + image?: SchemaImage + /** + * Extracted Watermark + * + * The extracted watermark text (when decoding) + */ + extracted_watermark?: string + /** + * Length + * + * Length of the watermark bits used (helpful for future decoding) + */ + length?: number +} + +/** + * WatermarkInput + */ +export type SchemaInvisibleWatermarkInput = { + /** + * Decode + * + * Whether to decode a watermark from the image instead of encoding + */ + decode?: boolean + /** + * Watermark + * + * Text to use as watermark (for encoding only) + */ + watermark?: string + /** + * Length + * + * Length of watermark bits to decode (required when decode=True) + */ + length?: number + /** + * Image Url + * + * URL of image to be watermarked or decoded + */ + image_url: string +} + +/** + * GeminiImageOutput + */ +export type SchemaGeminiFlashEditOutput = { + /** + * Description + * + * Text description or response from Gemini + */ + description: string + image: SchemaImage +} + +/** + * GeminiImageRequest + */ +export type SchemaGeminiFlashEditInput = { + /** + * Prompt + * + * The prompt for image generation or editing + */ + prompt: string + /** + * Image Url + * + * Optional URL of an input image for editing. If not provided, generates a new image. + */ + image_url: string +} + +/** + * GeminiImageOutput + */ +export type SchemaGeminiFlashEditMultiOutput = { + /** + * Description + * + * Text description or response from Gemini + */ + description: string + image: SchemaImage +} + +/** + * GeminiMultiImageRequest + */ +export type SchemaGeminiFlashEditMultiInput = { + /** + * Prompt + * + * The prompt for image generation or editing + */ + prompt: string + /** + * Input Image Urls + * + * List of URLs of input images for editing + */ + input_image_urls: Array +} + +/** + * MixDehazeNetOutput + */ +export type SchemaMixDehazeNetOutput = { + /** + * Image + * + * The generated image file info. + */ + image: SchemaImage +} + +/** + * MixDehazeNetInput + */ +export type SchemaMixDehazeNetInput = { + /** + * Model + * + * Model to be used for dehazing + */ + model?: 'indoor' | 'outdoor' + /** + * Seed + * + * seed to be used for generation + */ + seed?: number + /** + * Image Url + * + * URL of image to be used for image enhancement + */ + image_url: string +} + +/** + * TheraOutput + */ +export type SchemaTheraOutput = { + /** + * Image + * + * The generated image file info. + */ + image: SchemaImage + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TheraInput + */ +export type SchemaTheraInput = { + /** + * Upscale Factor + * + * The upscaling factor for the image. + */ + upscale_factor?: number + /** + * Seed + * + * Random seed for reproducible generation. + */ + seed?: number + /** + * Backbone + * + * Backbone to use for upscaling + */ + backbone: 'edsr' | 'rdn' + /** + * Image Url + * + * URL of image to be used for upscaling + */ + image_url: string +} + +/** + * Output + */ +export type SchemaGhiblifyOutput = { + /** + * The URL of the generated image. + */ + image: SchemaImage +} + +/** + * Input + */ +export type SchemaGhiblifyInput = { + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * The seed to use for the upscale. If not provided, a random seed will be used. + */ + seed?: number | unknown + /** + * Image Url + * + * The URL of the image to upscale. + */ + image_url: string +} + +/** + * StarVectorOutput + */ +export type SchemaStarVectorOutput = { + image: SchemaFile + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * StarVectorInput + */ +export type SchemaStarVectorInput = { + /** + * Seed + * + * seed to be used for generation + */ + seed?: number + /** + * Image Url + * + * URL of image to be used for relighting + */ + image_url: string +} + +/** + * EraseOutput + */ +export type SchemaFinegrainEraserOutput = { + /** + * Image + * + * The edited image with content erased + */ + image: SchemaFile + /** + * Used Seed + * + * Seed used for generation + */ + used_seed: number +} + +/** + * PromptEraseRequest + */ +export type SchemaFinegrainEraserInput = { + /** + * Prompt + * + * Text description of what to erase + */ + prompt: string + /** + * Mode + * + * Erase quality mode + */ + mode?: 'express' | 'standard' | 'premium' + /** + * Seed + * + * Random seed for reproducible generation + */ + seed?: number + /** + * Image Url + * + * URL of the image to edit + */ + image_url: string +} + +/** + * BoxPromptBase + */ +export type SchemaBoxPromptBase = { + /** + * Y Min + * + * Y Min Coordinate of the box + */ + y_min?: number + /** + * X Max + * + * X Max Coordinate of the prompt + */ + x_max?: number + /** + * X Min + * + * X Min Coordinate of the box + */ + x_min?: number + /** + * Y Max + * + * Y Max Coordinate of the prompt + */ + y_max?: number +} + +/** + * EraseOutput + */ +export type SchemaFinegrainEraserBboxOutput = { + /** + * Image + * + * The edited image with content erased + */ + image: SchemaFile + /** + * Used Seed + * + * Seed used for generation + */ + used_seed: number +} + +/** + * BBoxEraseRequest + */ +export type SchemaFinegrainEraserBboxInput = { + /** + * Mode + * + * Erase quality mode + */ + mode?: 'express' | 'standard' | 'premium' + /** + * Seed + * + * Random seed for reproducible generation + */ + seed?: number + /** + * Box Prompts + * + * List of bounding box coordinates to erase (only one box prompt is supported) + */ + box_prompts: Array + /** + * Image Url + * + * URL of the image to edit + */ + image_url: string +} + +/** + * EraseOutput + */ +export type SchemaFinegrainEraserMaskOutput = { + /** + * Image + * + * The edited image with content erased + */ + image: SchemaFile + /** + * Used Seed + * + * Seed used for generation + */ + used_seed: number +} + +/** + * MaskEraseRequest + */ +export type SchemaFinegrainEraserMaskInput = { + /** + * Mode + * + * Erase quality mode + */ + mode?: 'express' | 'standard' | 'premium' + /** + * Seed + * + * Random seed for reproducible generation + */ + seed?: number + /** + * Mask Url + * + * URL of the mask image. Should be a binary mask where white (255) indicates areas to erase + */ + mask_url: string + /** + * Image Url + * + * URL of the image to edit + */ + image_url: string +} + +/** + * Output + */ +export type SchemaCartoonifyOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * CartoonifyInput + */ +export type SchemaCartoonifyInput = { + /** + * Use Cfg Zero + * + * Whether to use CFG zero + */ + use_cfg_zero?: boolean + /** + * Image Url + * + * URL of the image to apply Pixar style to + */ + image_url: string + /** + * Guidance Scale + * + * Guidance scale for the generation + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps + */ + num_inference_steps?: number + /** + * Scale + * + * Scale factor for the Pixar effect + */ + scale?: number + /** + * Enable Safety Checker + * + * Whether to enable the safety checker + */ + enable_safety_checker?: boolean + /** + * Seed + * + * The seed for image generation. Same seed with same parameters will generate same image. + */ + seed?: number +} + +/** + * ImageOutput + */ +export type SchemaInstantCharacterOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaInstantCharacterInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Scale + * + * The scale of the subject image. Higher values will make the subject image more prominent in the generated image. + */ + scale?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The image URL to generate an image from. Needs to match the dimensions of the mask. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaPlushifyOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * PlushifyInput + */ +export type SchemaPlushifyInput = { + /** + * Prompt + * + * Prompt for the generation. Default is empty which is usually best, but sometimes it can help to add a description of the subject. + */ + prompt?: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Use Cfg Zero + * + * Whether to use CFG zero + */ + use_cfg_zero?: boolean + /** + * Image Url + * + * URL of the image to apply cartoon style to + */ + image_url: string + /** + * Scale + * + * Scale factor for the Cartoon effect + */ + scale?: number + /** + * Num Inference Steps + * + * Number of inference steps + */ + num_inference_steps?: number + /** + * Guidance Scale + * + * Guidance scale for the generation + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * Whether to enable the safety checker + */ + enable_safety_checker?: boolean + /** + * Seed + * + * The seed for image generation. Same seed with same parameters will generate same image. + */ + seed?: number +} + +/** + * Output + */ +export type SchemaFashnTryonV15Output = { + /** + * Images + */ + images: Array +} + +/** + * Input + */ +export type SchemaFashnTryonV15Input = { + /** + * Model Image + * + * URL or base64 of the model image + */ + model_image: string + /** + * Moderation Level + * + * Content moderation level for garment images. 'none' disables moderation, 'permissive' blocks only explicit content, 'conservative' also blocks underwear and swimwear. + */ + moderation_level?: 'none' | 'permissive' | 'conservative' + /** + * Garment Photo Type + * + * Specifies the type of garment photo to optimize internal parameters for better performance. 'model' is for photos of garments on a model, 'flat-lay' is for flat-lay or ghost mannequin images, and 'auto' attempts to automatically detect the photo type. + */ + garment_photo_type?: 'auto' | 'model' | 'flat-lay' + /** + * Garment Image + * + * URL or base64 of the garment image + */ + garment_image: string + /** + * Category + * + * Category of the garment to try-on. 'auto' will attempt to automatically detect the category of the garment. + */ + category?: 'tops' | 'bottoms' | 'one-pieces' | 'auto' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Segmentation Free + * + * Disables human parsing on the model image. + */ + segmentation_free?: boolean + /** + * Num Samples + * + * Number of images to generate in a single run. Image generation has a random element in it, so trying multiple images at once increases the chances of getting a good result. + */ + num_samples?: number + /** + * Mode + * + * Specifies the mode of operation. 'performance' mode is faster but may sacrifice quality, 'balanced' mode is a balance between speed and quality, and 'quality' mode is slower but produces higher quality results. + */ + mode?: 'performance' | 'balanced' | 'quality' + /** + * Seed + * + * Sets random operations to a fixed state. Use the same seed to reproduce results with the same inputs, or different seed to force different results. + */ + seed?: number + /** + * Output Format + * + * Output format of the generated images. 'png' is highest quality, while 'jpeg' is faster + */ + output_format?: 'png' | 'jpeg' +} + +/** + * Output + */ +export type SchemaJuggernautFluxLoraInpaintingOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * InpaintInput + */ +export type SchemaJuggernautFluxLoraInpaintingInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image Url + * + * URL of image to use for inpainting. or img2img + */ + image_url: string + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Strength + * + * The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original. + */ + strength?: number + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Mask Url + * + * + * The mask to area to Inpaint in. + * + */ + mask_url: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * EditImageResponse + */ +export type SchemaGptImage1EditImageOutput = { + /** + * Images + * + * The generated images. + */ + images: Array +} + +/** + * EditImageRequest + */ +export type SchemaGptImage1EditImageInput = { + /** + * Prompt + * + * The prompt for image generation + */ + prompt: string + /** + * Number of Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * Aspect ratio for the generated image + */ + image_size?: 'auto' | '1024x1024' | '1536x1024' | '1024x1536' + /** + * Background + * + * Background for the generated image + */ + background?: 'auto' | 'transparent' | 'opaque' + /** + * Quality + * + * Quality for the generated image + */ + quality?: 'auto' | 'low' | 'medium' | 'high' + /** + * Output Format + * + * Output format for the images + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Input Fidelity + * + * Input fidelity for the generated image + */ + input_fidelity?: 'low' | 'high' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Image URLs + * + * The URLs of the images to use as a reference for the generation. + */ + image_urls: Array +} + +/** + * UNOOutput + */ +export type SchemaUnoOutput = { + /** + * Prompt + * + * The prompt used to generate the image. + */ + prompt: string + /** + * Images + * + * The URLs of the generated images. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * UNOInput + */ +export type SchemaUnoInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * + * The size of the generated image. You can choose between some presets or custom height and width + * that **must be multiples of 8**. + * + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Input Image Urls + * + * URL of images to use while generating the image. + */ + input_image_urls: Array + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * Random seed for reproducible generation. If set none, a random seed will be used. + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Image2SVGOutput + */ +export type SchemaImage2SvgOutput = { + /** + * Images + * + * The converted SVG file + */ + images: Array +} + +/** + * Image2SVGInput + */ +export type SchemaImage2SvgInput = { + /** + * Splice Threshold + * + * Splice threshold for joining paths + */ + splice_threshold?: number + /** + * Hierarchical + * + * Hierarchical mode: stacked or cutout + */ + hierarchical?: 'stacked' | 'cutout' + /** + * Color Precision + * + * Color quantization level + */ + color_precision?: number + /** + * Colormode + * + * Choose between color or binary (black and white) output + */ + colormode?: 'color' | 'binary' + /** + * Max Iterations + * + * Maximum number of iterations for optimization + */ + max_iterations?: number + /** + * Length Threshold + * + * Length threshold for curves/lines + */ + length_threshold?: number + /** + * Image Url + * + * The image to convert to SVG + */ + image_url: string + /** + * Mode + * + * Mode: spline (curved) or polygon (straight lines) + */ + mode?: 'spline' | 'polygon' + /** + * Corner Threshold + * + * Corner detection threshold in degrees + */ + corner_threshold?: number + /** + * Path Precision + * + * Decimal precision for path coordinates + */ + path_precision?: number + /** + * Filter Speckle + * + * Filter out small speckles and noise + */ + filter_speckle?: number + /** + * Layer Difference + * + * Layer difference threshold for hierarchical mode + */ + layer_difference?: number +} + +/** + * ImageOutput + */ +export type SchemaStep1xEditOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaStep1xEditInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The image URL to generate an image from. Needs to match the dimensions of the mask. + */ + image_url: string + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * EditOutputV3 + */ +export type SchemaIdeogramV3EditOutput = { + /** + * Images + */ + images: Array + /** + * Seed + * + * Seed used for the random number generator + */ + seed: number +} + +/** + * EditImageInputV3 + */ +export type SchemaIdeogramV3EditInput = { + /** + * Prompt + * + * The prompt to fill the masked part of the image. + */ + prompt: string + /** + * Num Images + * + * Number of images to generate. + */ + num_images?: number + /** + * Style Preset + * + * Style preset for generation. The chosen style preset will guide the generation. + */ + style_preset?: + | '80S_ILLUSTRATION' + | '90S_NOSTALGIA' + | 'ABSTRACT_ORGANIC' + | 'ANALOG_NOSTALGIA' + | 'ART_BRUT' + | 'ART_DECO' + | 'ART_POSTER' + | 'AURA' + | 'AVANT_GARDE' + | 'BAUHAUS' + | 'BLUEPRINT' + | 'BLURRY_MOTION' + | 'BRIGHT_ART' + | 'C4D_CARTOON' + | 'CHILDRENS_BOOK' + | 'COLLAGE' + | 'COLORING_BOOK_I' + | 'COLORING_BOOK_II' + | 'CUBISM' + | 'DARK_AURA' + | 'DOODLE' + | 'DOUBLE_EXPOSURE' + | 'DRAMATIC_CINEMA' + | 'EDITORIAL' + | 'EMOTIONAL_MINIMAL' + | 'ETHEREAL_PARTY' + | 'EXPIRED_FILM' + | 'FLAT_ART' + | 'FLAT_VECTOR' + | 'FOREST_REVERIE' + | 'GEO_MINIMALIST' + | 'GLASS_PRISM' + | 'GOLDEN_HOUR' + | 'GRAFFITI_I' + | 'GRAFFITI_II' + | 'HALFTONE_PRINT' + | 'HIGH_CONTRAST' + | 'HIPPIE_ERA' + | 'ICONIC' + | 'JAPANDI_FUSION' + | 'JAZZY' + | 'LONG_EXPOSURE' + | 'MAGAZINE_EDITORIAL' + | 'MINIMAL_ILLUSTRATION' + | 'MIXED_MEDIA' + | 'MONOCHROME' + | 'NIGHTLIFE' + | 'OIL_PAINTING' + | 'OLD_CARTOONS' + | 'PAINT_GESTURE' + | 'POP_ART' + | 'RETRO_ETCHING' + | 'RIVIERA_POP' + | 'SPOTLIGHT_80S' + | 'STYLIZED_RED' + | 'SURREAL_COLLAGE' + | 'TRAVEL_POSTER' + | 'VINTAGE_GEO' + | 'VINTAGE_POSTER' + | 'WATERCOLOR' + | 'WEIRD' + | 'WOODBLOCK_PRINT' + | unknown + /** + * Expand Prompt + * + * Determine if MagicPrompt should be used in generating the request or not. + */ + expand_prompt?: boolean + /** + * Rendering Speed + * + * The rendering speed to use. + */ + rendering_speed?: 'TURBO' | 'BALANCED' | 'QUALITY' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * A color palette for generation, must EITHER be specified via one of the presets (name) or explicitly via hexadecimal representations of the color with optional weights (members) + */ + color_palette?: SchemaColorPalette | unknown + /** + * Style Codes + * + * A list of 8 character hexadecimal codes representing the style of the image. Cannot be used in conjunction with style_reference_images or style + */ + style_codes?: Array | unknown + /** + * Image URL + * + * The image URL to generate an image from. MUST have the exact same dimensions (width and height) as the mask image. + */ + image_url: string + /** + * Seed + * + * Seed for the random number generator + */ + seed?: number | unknown + /** + * Image Urls + * + * A set of images to use as style references (maximum total size 10MB across all style references). The images should be in JPEG, PNG or WebP format + */ + image_urls?: Array | unknown + /** + * Mask URL + * + * The mask URL to inpaint the image. MUST have the exact same dimensions (width and height) as the input image. + */ + mask_url: string +} + +/** + * RGBColor + */ +export type SchemaRgbColor = { + /** + * R + * + * Red color value + */ + r?: number + /** + * B + * + * Blue color value + */ + b?: number + /** + * G + * + * Green color value + */ + g?: number +} + +/** + * ColorPaletteMember + */ +export type SchemaColorPaletteMember = { + /** + * Color Weight + * + * The weight of the color in the color palette + */ + color_weight?: number | unknown + rgb: SchemaRgbColor +} + +/** + * ColorPalette + */ +export type SchemaColorPalette = { + /** + * Members + * + * A list of color palette members that define the color palette + */ + members?: Array | unknown + /** + * Name + * + * A color palette preset value + */ + name?: + | 'EMBER' + | 'FRESH' + | 'JUNGLE' + | 'MAGIC' + | 'MELON' + | 'MOSAIC' + | 'PASTEL' + | 'ULTRAMARINE' + | unknown +} + +/** + * RemixOutputV3 + */ +export type SchemaIdeogramV3RemixOutput = { + /** + * Images + */ + images: Array + /** + * Seed + * + * Seed used for the random number generator + */ + seed: number +} + +/** + * RemixImageInputV3 + */ +export type SchemaIdeogramV3RemixInput = { + /** + * Prompt + * + * The prompt to remix the image with + */ + prompt: string + /** + * Image Size + * + * The resolution of the generated image + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Style + * + * The style type to generate with. Cannot be used with style_codes. + */ + style?: 'AUTO' | 'GENERAL' | 'REALISTIC' | 'DESIGN' | unknown + /** + * Expand Prompt + * + * Determine if MagicPrompt should be used in generating the request or not. + */ + expand_prompt?: boolean + /** + * Rendering Speed + * + * The rendering speed to use. + */ + rendering_speed?: 'TURBO' | 'BALANCED' | 'QUALITY' + /** + * Image Urls + * + * A set of images to use as style references (maximum total size 10MB across all style references). The images should be in JPEG, PNG or WebP format + */ + image_urls?: Array | unknown + /** + * Negative Prompt + * + * Description of what to exclude from an image. Descriptions in the prompt take precedence to descriptions in the negative prompt. + */ + negative_prompt?: string + /** + * Num Images + * + * Number of images to generate. + */ + num_images?: number + /** + * Image URL + * + * The image URL to remix + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * A color palette for generation, must EITHER be specified via one of the presets (name) or explicitly via hexadecimal representations of the color with optional weights (members) + */ + color_palette?: SchemaColorPalette | unknown + /** + * Strength + * + * Strength of the input image in the remix + */ + strength?: number + /** + * Style Codes + * + * A list of 8 character hexadecimal codes representing the style of the image. Cannot be used in conjunction with style_reference_images or style + */ + style_codes?: Array | unknown + /** + * Seed + * + * Seed for the random number generator + */ + seed?: number | unknown +} + +/** + * ReplaceBackgroundOutputV3 + */ +export type SchemaIdeogramV3ReplaceBackgroundOutput = { + /** + * Images + */ + images: Array + /** + * Seed + * + * Seed used for the random number generator + */ + seed: number +} + +/** + * ReplaceBackgroundInputV3 + */ +export type SchemaIdeogramV3ReplaceBackgroundInput = { + /** + * Prompt + * + * Cyber punk city with neon lights and skyscrappers + */ + prompt: string + /** + * Num Images + * + * Number of images to generate. + */ + num_images?: number + /** + * Style + * + * The style type to generate with. Cannot be used with style_codes. + */ + style?: 'AUTO' | 'GENERAL' | 'REALISTIC' | 'DESIGN' | unknown + /** + * Style Preset + * + * Style preset for generation. The chosen style preset will guide the generation. + */ + style_preset?: + | '80S_ILLUSTRATION' + | '90S_NOSTALGIA' + | 'ABSTRACT_ORGANIC' + | 'ANALOG_NOSTALGIA' + | 'ART_BRUT' + | 'ART_DECO' + | 'ART_POSTER' + | 'AURA' + | 'AVANT_GARDE' + | 'BAUHAUS' + | 'BLUEPRINT' + | 'BLURRY_MOTION' + | 'BRIGHT_ART' + | 'C4D_CARTOON' + | 'CHILDRENS_BOOK' + | 'COLLAGE' + | 'COLORING_BOOK_I' + | 'COLORING_BOOK_II' + | 'CUBISM' + | 'DARK_AURA' + | 'DOODLE' + | 'DOUBLE_EXPOSURE' + | 'DRAMATIC_CINEMA' + | 'EDITORIAL' + | 'EMOTIONAL_MINIMAL' + | 'ETHEREAL_PARTY' + | 'EXPIRED_FILM' + | 'FLAT_ART' + | 'FLAT_VECTOR' + | 'FOREST_REVERIE' + | 'GEO_MINIMALIST' + | 'GLASS_PRISM' + | 'GOLDEN_HOUR' + | 'GRAFFITI_I' + | 'GRAFFITI_II' + | 'HALFTONE_PRINT' + | 'HIGH_CONTRAST' + | 'HIPPIE_ERA' + | 'ICONIC' + | 'JAPANDI_FUSION' + | 'JAZZY' + | 'LONG_EXPOSURE' + | 'MAGAZINE_EDITORIAL' + | 'MINIMAL_ILLUSTRATION' + | 'MIXED_MEDIA' + | 'MONOCHROME' + | 'NIGHTLIFE' + | 'OIL_PAINTING' + | 'OLD_CARTOONS' + | 'PAINT_GESTURE' + | 'POP_ART' + | 'RETRO_ETCHING' + | 'RIVIERA_POP' + | 'SPOTLIGHT_80S' + | 'STYLIZED_RED' + | 'SURREAL_COLLAGE' + | 'TRAVEL_POSTER' + | 'VINTAGE_GEO' + | 'VINTAGE_POSTER' + | 'WATERCOLOR' + | 'WEIRD' + | 'WOODBLOCK_PRINT' + | unknown + /** + * Expand Prompt + * + * Determine if MagicPrompt should be used in generating the request or not. + */ + expand_prompt?: boolean + /** + * Rendering Speed + * + * The rendering speed to use. + */ + rendering_speed?: 'TURBO' | 'BALANCED' | 'QUALITY' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * A color palette for generation, must EITHER be specified via one of the presets (name) or explicitly via hexadecimal representations of the color with optional weights (members) + */ + color_palette?: SchemaColorPalette | unknown + /** + * Style Codes + * + * A list of 8 character hexadecimal codes representing the style of the image. Cannot be used in conjunction with style_reference_images or style + */ + style_codes?: Array | unknown + /** + * Image URL + * + * The image URL whose background needs to be replaced + */ + image_url: string + /** + * Seed + * + * Seed for the random number generator + */ + seed?: number | unknown + /** + * Image Urls + * + * A set of images to use as style references (maximum total size 10MB across all style references). The images should be in JPEG, PNG or WebP format + */ + image_urls?: Array | unknown +} + +/** + * ReframeOutputV3 + */ +export type SchemaIdeogramV3ReframeOutput = { + /** + * Images + */ + images: Array + /** + * Seed + * + * Seed used for the random number generator + */ + seed: number +} + +/** + * ReframeImageInputV3 + */ +export type SchemaIdeogramV3ReframeInput = { + /** + * Num Images + * + * Number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The resolution for the reframed output image + */ + image_size: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Style + * + * The style type to generate with. Cannot be used with style_codes. + */ + style?: 'AUTO' | 'GENERAL' | 'REALISTIC' | 'DESIGN' | unknown + /** + * Style Preset + * + * Style preset for generation. The chosen style preset will guide the generation. + */ + style_preset?: + | '80S_ILLUSTRATION' + | '90S_NOSTALGIA' + | 'ABSTRACT_ORGANIC' + | 'ANALOG_NOSTALGIA' + | 'ART_BRUT' + | 'ART_DECO' + | 'ART_POSTER' + | 'AURA' + | 'AVANT_GARDE' + | 'BAUHAUS' + | 'BLUEPRINT' + | 'BLURRY_MOTION' + | 'BRIGHT_ART' + | 'C4D_CARTOON' + | 'CHILDRENS_BOOK' + | 'COLLAGE' + | 'COLORING_BOOK_I' + | 'COLORING_BOOK_II' + | 'CUBISM' + | 'DARK_AURA' + | 'DOODLE' + | 'DOUBLE_EXPOSURE' + | 'DRAMATIC_CINEMA' + | 'EDITORIAL' + | 'EMOTIONAL_MINIMAL' + | 'ETHEREAL_PARTY' + | 'EXPIRED_FILM' + | 'FLAT_ART' + | 'FLAT_VECTOR' + | 'FOREST_REVERIE' + | 'GEO_MINIMALIST' + | 'GLASS_PRISM' + | 'GOLDEN_HOUR' + | 'GRAFFITI_I' + | 'GRAFFITI_II' + | 'HALFTONE_PRINT' + | 'HIGH_CONTRAST' + | 'HIPPIE_ERA' + | 'ICONIC' + | 'JAPANDI_FUSION' + | 'JAZZY' + | 'LONG_EXPOSURE' + | 'MAGAZINE_EDITORIAL' + | 'MINIMAL_ILLUSTRATION' + | 'MIXED_MEDIA' + | 'MONOCHROME' + | 'NIGHTLIFE' + | 'OIL_PAINTING' + | 'OLD_CARTOONS' + | 'PAINT_GESTURE' + | 'POP_ART' + | 'RETRO_ETCHING' + | 'RIVIERA_POP' + | 'SPOTLIGHT_80S' + | 'STYLIZED_RED' + | 'SURREAL_COLLAGE' + | 'TRAVEL_POSTER' + | 'VINTAGE_GEO' + | 'VINTAGE_POSTER' + | 'WATERCOLOR' + | 'WEIRD' + | 'WOODBLOCK_PRINT' + | unknown + /** + * Rendering Speed + * + * The rendering speed to use. + */ + rendering_speed?: 'TURBO' | 'BALANCED' | 'QUALITY' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * A color palette for generation, must EITHER be specified via one of the presets (name) or explicitly via hexadecimal representations of the color with optional weights (members) + */ + color_palette?: SchemaColorPalette | unknown + /** + * Style Codes + * + * A list of 8 character hexadecimal codes representing the style of the image. Cannot be used in conjunction with style_reference_images or style + */ + style_codes?: Array | unknown + /** + * Image URL + * + * The image URL to reframe + */ + image_url: string + /** + * Seed + * + * Seed for the random number generator + */ + seed?: number | unknown + /** + * Image Urls + * + * A set of images to use as style references (maximum total size 10MB across all style references). The images should be in JPEG, PNG or WebP format + */ + image_urls?: Array | unknown +} + +/** + * Img2ImgOutput + */ +export type SchemaHidreamI1FullImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * ImageToImageInput + */ +export type SchemaHidreamI1FullImageToImageInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. Setting to None uses the input image's size. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The image URL to generate an image from. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Loras + * + * A list of LoRAs to apply to the model. Each LoRA specifies its path, scale, and optional weight name. + */ + loras?: Array + /** + * Strength + * + * Denoising strength for image-to-image generation. + */ + strength?: number + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * MiniMaxTextToImageWithReferenceOutput + */ +export type SchemaMinimaxImage01SubjectReferenceOutput = { + /** + * Images + * + * Generated images + */ + images: Array +} + +/** + * MiniMaxTextToImageWithReferenceRequest + */ +export type SchemaMinimaxImage01SubjectReferenceInput = { + /** + * Prompt Optimizer + * + * Whether to enable automatic prompt optimization + */ + prompt_optimizer?: boolean + /** + * Aspect Ratio + * + * Aspect ratio of the generated image + */ + aspect_ratio?: + | '1:1' + | '16:9' + | '4:3' + | '3:2' + | '2:3' + | '3:4' + | '9:16' + | '21:9' + /** + * Num Images + * + * Number of images to generate (1-9) + */ + num_images?: number + /** + * Prompt + * + * Text prompt for image generation (max 1500 characters) + */ + prompt: string + /** + * Image Url + * + * URL of the subject reference image to use for consistent character appearance + */ + image_url: string +} + +/** + * ImageToImageOutput + */ +export type SchemaRecraftV3ImageToImageOutput = { + /** + * Images + * + * The generated images + */ + images: Array +} + +/** + * ImageToImageInput + */ +export type SchemaRecraftV3ImageToImageInput = { + /** + * Prompt + * + * A text description of areas to change. + */ + prompt: string + /** + * Style + * + * The style of the generated images. Vector images cost 2X as much. + */ + style?: + | 'any' + | 'realistic_image' + | 'digital_illustration' + | 'vector_illustration' + | 'realistic_image/b_and_w' + | 'realistic_image/hard_flash' + | 'realistic_image/hdr' + | 'realistic_image/natural_light' + | 'realistic_image/studio_portrait' + | 'realistic_image/enterprise' + | 'realistic_image/motion_blur' + | 'realistic_image/evening_light' + | 'realistic_image/faded_nostalgia' + | 'realistic_image/forest_life' + | 'realistic_image/mystic_naturalism' + | 'realistic_image/natural_tones' + | 'realistic_image/organic_calm' + | 'realistic_image/real_life_glow' + | 'realistic_image/retro_realism' + | 'realistic_image/retro_snapshot' + | 'realistic_image/urban_drama' + | 'realistic_image/village_realism' + | 'realistic_image/warm_folk' + | 'digital_illustration/pixel_art' + | 'digital_illustration/hand_drawn' + | 'digital_illustration/grain' + | 'digital_illustration/infantile_sketch' + | 'digital_illustration/2d_art_poster' + | 'digital_illustration/handmade_3d' + | 'digital_illustration/hand_drawn_outline' + | 'digital_illustration/engraving_color' + | 'digital_illustration/2d_art_poster_2' + | 'digital_illustration/antiquarian' + | 'digital_illustration/bold_fantasy' + | 'digital_illustration/child_book' + | 'digital_illustration/child_books' + | 'digital_illustration/cover' + | 'digital_illustration/crosshatch' + | 'digital_illustration/digital_engraving' + | 'digital_illustration/expressionism' + | 'digital_illustration/freehand_details' + | 'digital_illustration/grain_20' + | 'digital_illustration/graphic_intensity' + | 'digital_illustration/hard_comics' + | 'digital_illustration/long_shadow' + | 'digital_illustration/modern_folk' + | 'digital_illustration/multicolor' + | 'digital_illustration/neon_calm' + | 'digital_illustration/noir' + | 'digital_illustration/nostalgic_pastel' + | 'digital_illustration/outline_details' + | 'digital_illustration/pastel_gradient' + | 'digital_illustration/pastel_sketch' + | 'digital_illustration/pop_art' + | 'digital_illustration/pop_renaissance' + | 'digital_illustration/street_art' + | 'digital_illustration/tablet_sketch' + | 'digital_illustration/urban_glow' + | 'digital_illustration/urban_sketching' + | 'digital_illustration/vanilla_dreams' + | 'digital_illustration/young_adult_book' + | 'digital_illustration/young_adult_book_2' + | 'vector_illustration/bold_stroke' + | 'vector_illustration/chemistry' + | 'vector_illustration/colored_stencil' + | 'vector_illustration/contour_pop_art' + | 'vector_illustration/cosmics' + | 'vector_illustration/cutout' + | 'vector_illustration/depressive' + | 'vector_illustration/editorial' + | 'vector_illustration/emotional_flat' + | 'vector_illustration/infographical' + | 'vector_illustration/marker_outline' + | 'vector_illustration/mosaic' + | 'vector_illustration/naivector' + | 'vector_illustration/roundish_flat' + | 'vector_illustration/segmented_colors' + | 'vector_illustration/sharp_contrast' + | 'vector_illustration/thin' + | 'vector_illustration/vector_photo' + | 'vector_illustration/vivid_shapes' + | 'vector_illustration/engraving' + | 'vector_illustration/line_art' + | 'vector_illustration/line_circuit' + | 'vector_illustration/linocut' + /** + * Style Id + * + * The ID of the custom style reference (optional) + */ + style_id?: string + /** + * Image Url + * + * The URL of the image to modify. Must be less than 5 MB in size, have resolution less than 16 MP and max dimension less than 4096 pixels. + */ + image_url: string + /** + * Strength + * + * Defines the difference with the original image, should lie in [0, 1], where 0 means almost identical, and 1 means miserable similarity + */ + strength?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Colors + * + * An array of preferable colors + */ + colors?: Array + /** + * Negative Prompt + * + * A text description of undesired elements on an image + */ + negative_prompt?: string +} + +/** + * UpscaleOutput + */ +export type SchemaRecraftUpscaleCrispOutput = { + /** + * Image + * + * The upscaled image. + */ + image: SchemaFile +} + +/** + * UpscaleInput + */ +export type SchemaRecraftUpscaleCrispInput = { + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Image Url + * + * The URL of the image to be upscaled. Must be in PNG format. + */ + image_url: string +} + +/** + * UpscaleOutput + */ +export type SchemaRecraftUpscaleCreativeOutput = { + /** + * Image + * + * The upscaled image. + */ + image: SchemaFile +} + +/** + * UpscaleInput + */ +export type SchemaRecraftUpscaleCreativeInput = { + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Image Url + * + * The URL of the image to be upscaled. Must be in PNG format. + */ + image_url: string +} + +/** + * ImageOutput + */ +export type SchemaRembgEnhanceOutput = { + image: SchemaFile +} + +/** + * ImageInput + */ +export type SchemaRembgEnhanceInput = { + /** + * Image Url + * + * URL of the input image + */ + image_url: string +} + +/** + * ImageEditOutput + */ +export type SchemaBagelEditOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The edited images. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * ImageEditInput + */ +export type SchemaBagelEditInput = { + /** + * Prompt + * + * The prompt to edit the image with. + */ + prompt: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * The seed to use for the generation. + */ + seed?: number + /** + * Use Thought + * + * Whether to use thought tokens for generation. If set to true, the model will "think" to potentially improve generation quality. Increases generation time and increases the cost by 20%. + */ + use_thought?: boolean + /** + * Image Url + * + * The image to edit. + */ + image_url: string +} + +/** + * KontextEditOutput + */ +export type SchemaFluxKontextDevOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * BaseKontextEditInput + */ +export type SchemaFluxKontextDevInput = { + /** + * Prompt + * + * The prompt to edit the image. + */ + prompt: string + /** + * Resolution Mode + * + * + * Determines how the output resolution is set for image editing. + * - `auto`: The model selects an optimal resolution from a predefined set that best matches the input image's aspect ratio. This is the recommended setting for most use cases as it's what the model was trained on. + * - `match_input`: The model will attempt to use the same resolution as the input image. The resolution will be adjusted to be compatible with the model's requirements (e.g. dimensions must be multiples of 16 and within supported limits). + * Apart from these, a few aspect ratios are also supported. + * + */ + resolution_mode?: + | 'auto' + | 'match_input' + | '1:1' + | '16:9' + | '21:9' + | '3:2' + | '2:3' + | '4:5' + | '5:4' + | '3:4' + | '4:3' + | '9:16' + | '9:21' + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Output Format + * + * Output format + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The URL of the image to edit. + */ + image_url: string + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number | unknown + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * FluxKontextOutput + */ +export type SchemaFluxProKontextMaxOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * Image + * + * Represents an image file. + */ +export type SchemaFalToolkitImageImageImage = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * Height + * + * The height of the image in pixels. + */ + height?: number + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * Width + * + * The width of the image in pixels. + */ + width?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * FluxKontextInput + */ +export type SchemaFluxProKontextMaxInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * Image prompt for the omni model. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enhance Prompt + * + * Whether to enhance the prompt for better results. + */ + enhance_prompt?: boolean +} + +/** + * Output + */ +export type SchemaFluxProKontextMultiOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * FluxKontextMultiInput + */ +export type SchemaFluxProKontextMultiInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Image URL + * + * Image prompt for the omni model. + */ + image_urls: Array + /** + * Enhance Prompt + * + * Whether to enhance the prompt for better results. + */ + enhance_prompt?: boolean +} + +/** + * Output + */ +export type SchemaFluxProKontextMaxMultiOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * FluxKontextMultiInput + */ +export type SchemaFluxProKontextMaxMultiInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Image URL + * + * Image prompt for the omni model. + */ + image_urls: Array + /** + * Enhance Prompt + * + * Whether to enhance the prompt for better results. + */ + enhance_prompt?: boolean +} + +/** + * AgeProgressionOutput + */ +export type SchemaImageEditingAgeProgressionOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * AgeProgressionInput + */ +export type SchemaImageEditingAgeProgressionInput = { + /** + * Age Change + * + * The age change to apply. + */ + prompt?: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * Image prompt for the omni model. + */ + image_url: string + /** + * Sync Mode + * + * If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. + */ + num_inference_steps?: number + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number +} + +/** + * BackgroundChangeOutput + */ +export type SchemaImageEditingBackgroundChangeOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * BackgroundChangeInput + */ +export type SchemaImageEditingBackgroundChangeInput = { + /** + * Background Prompt + * + * The desired background to apply. + */ + prompt?: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * Image prompt for the omni model. + */ + image_url: string + /** + * Sync Mode + * + * If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. + */ + num_inference_steps?: number + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number +} + +/** + * CartoonifyOutput + */ +export type SchemaImageEditingCartoonifyOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * BaseInput + */ +export type SchemaImageEditingCartoonifyInput = { + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * Image prompt for the omni model. + */ + image_url: string + /** + * Sync Mode + * + * If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. + */ + num_inference_steps?: number + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number +} + +/** + * ColorCorrectionOutput + */ +export type SchemaImageEditingColorCorrectionOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * BaseInput + */ +export type SchemaImageEditingColorCorrectionInput = { + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * Image prompt for the omni model. + */ + image_url: string + /** + * Sync Mode + * + * If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. + */ + num_inference_steps?: number + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number +} + +/** + * ExpressionChangeOutput + */ +export type SchemaImageEditingExpressionChangeOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * ExpressionChangeInput + */ +export type SchemaImageEditingExpressionChangeInput = { + /** + * Expression Prompt + * + * The desired facial expression to apply. + */ + prompt?: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * Image prompt for the omni model. + */ + image_url: string + /** + * Sync Mode + * + * If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. + */ + num_inference_steps?: number + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number +} + +/** + * FaceEnhancementOutput + */ +export type SchemaImageEditingFaceEnhancementOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * BaseInput + */ +export type SchemaImageEditingFaceEnhancementInput = { + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * Image prompt for the omni model. + */ + image_url: string + /** + * Sync Mode + * + * If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. + */ + num_inference_steps?: number + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number +} + +/** + * HairChangeOutput + */ +export type SchemaImageEditingHairChangeOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * HairChangeInput + */ +export type SchemaImageEditingHairChangeInput = { + /** + * Hair Style Prompt + * + * The desired hair style to apply. + */ + prompt?: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * Image prompt for the omni model. + */ + image_url: string + /** + * Sync Mode + * + * If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. + */ + num_inference_steps?: number + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number +} + +/** + * ObjectRemovalOutput + */ +export type SchemaImageEditingObjectRemovalOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * ObjectRemovalInput + */ +export type SchemaImageEditingObjectRemovalInput = { + /** + * Objects to Remove + * + * Specify which objects to remove from the image. + */ + prompt?: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * Image prompt for the omni model. + */ + image_url: string + /** + * Sync Mode + * + * If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. + */ + num_inference_steps?: number + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number +} + +/** + * ProfessionalPhotoOutput + */ +export type SchemaImageEditingProfessionalPhotoOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * BaseInput + */ +export type SchemaImageEditingProfessionalPhotoInput = { + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * Image prompt for the omni model. + */ + image_url: string + /** + * Sync Mode + * + * If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. + */ + num_inference_steps?: number + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number +} + +/** + * SceneCompositionOutput + */ +export type SchemaImageEditingSceneCompositionOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * SceneCompositionInput + */ +export type SchemaImageEditingSceneCompositionInput = { + /** + * Scene Description + * + * Describe the scene where you want to place the subject. + */ + prompt?: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * Image prompt for the omni model. + */ + image_url: string + /** + * Sync Mode + * + * If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. + */ + num_inference_steps?: number + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number +} + +/** + * StyleTransferOutput + */ +export type SchemaImageEditingStyleTransferOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * StyleTransferInput + */ +export type SchemaImageEditingStyleTransferInput = { + /** + * Style Prompt + * + * The artistic style to apply. + */ + prompt?: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * Image prompt for the omni model. + */ + image_url: string + /** + * Sync Mode + * + * If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. + */ + num_inference_steps?: number + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number +} + +/** + * TimeOfDayOutput + */ +export type SchemaImageEditingTimeOfDayOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * TimeOfDayInput + */ +export type SchemaImageEditingTimeOfDayInput = { + /** + * Time of Day + * + * The time of day to transform the scene to. + */ + prompt?: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * Image prompt for the omni model. + */ + image_url: string + /** + * Sync Mode + * + * If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. + */ + num_inference_steps?: number + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number +} + +/** + * WeatherEffectOutput + */ +export type SchemaImageEditingWeatherEffectOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * WeatherEffectInput + */ +export type SchemaImageEditingWeatherEffectInput = { + /** + * Weather Effect + * + * The weather effect to apply. + */ + prompt?: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * Image prompt for the omni model. + */ + image_url: string + /** + * Sync Mode + * + * If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. + */ + num_inference_steps?: number + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number +} + +/** + * PhotoRestorationOutput + */ +export type SchemaImageEditingPhotoRestorationOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * PhotoRestorationInput + * + * Input model for photo restoration endpoint. + */ +export type SchemaImageEditingPhotoRestorationInput = { + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * URL of the old or damaged photo to restore. + */ + image_url: string + /** + * Sync Mode + * + * If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. + */ + num_inference_steps?: number + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number +} + +/** + * TextRemovalOutput + */ +export type SchemaImageEditingTextRemovalOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * TextRemovalInput + * + * Input model for text removal endpoint. + */ +export type SchemaImageEditingTextRemovalInput = { + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * URL of the image containing text to be removed. + */ + image_url: string + /** + * Sync Mode + * + * If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. + */ + num_inference_steps?: number + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number +} + +/** + * Output + */ +export type SchemaFlux1DevImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseFlux1ImageToInput + */ +export type SchemaFlux1DevImageToImageInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The URL of the image to generate an image from. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Strength + * + * The strength of the initial image. Higher strength values are better for this model. + */ + strength?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number | unknown + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number +} + +/** + * Output + */ +export type SchemaFlux1DevReduxOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseFlux1ReduxInput + */ +export type SchemaFlux1DevReduxInput = { + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The URL of the image to generate an image from. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number | unknown + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number +} + +/** + * Output + */ +export type SchemaFlux1SchnellReduxOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * SchnellFlux1ReduxInput + */ +export type SchemaFlux1SchnellReduxInput = { + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The URL of the image to generate an image from. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number | unknown +} + +/** + * T2IOutput + */ +export type SchemaLumaPhotonReframeOutput = { + /** + * Images + * + * The generated image + */ + images: Array +} + +/** + * ReframeImageRequest + */ +export type SchemaLumaPhotonReframeInput = { + /** + * Prompt + * + * Optional prompt for reframing + */ + prompt?: string + /** + * Aspect Ratio + * + * The aspect ratio of the reframed image + */ + aspect_ratio: '1:1' | '16:9' | '9:16' | '4:3' | '3:4' | '21:9' | '9:21' + /** + * Y Start + * + * Start Y coordinate for reframing + */ + y_start?: number + /** + * X End + * + * End X coordinate for reframing + */ + x_end?: number + /** + * Y End + * + * End Y coordinate for reframing + */ + y_end?: number + /** + * Grid Position Y + * + * Y position of the grid for reframing + */ + grid_position_y?: number + /** + * Image Url + * + * URL of the input image to reframe + */ + image_url: string + /** + * Grid Position X + * + * X position of the grid for reframing + */ + grid_position_x?: number + /** + * X Start + * + * Start X coordinate for reframing + */ + x_start?: number +} + +/** + * T2IOutput + */ +export type SchemaLumaPhotonFlashReframeOutput = { + /** + * Images + * + * The generated image + */ + images: Array +} + +/** + * ReframeImageRequest + */ +export type SchemaLumaPhotonFlashReframeInput = { + /** + * Prompt + * + * Optional prompt for reframing + */ + prompt?: string + /** + * Aspect Ratio + * + * The aspect ratio of the reframed image + */ + aspect_ratio: '1:1' | '16:9' | '9:16' | '4:3' | '3:4' | '21:9' | '9:21' + /** + * Y Start + * + * Start Y coordinate for reframing + */ + y_start?: number + /** + * X End + * + * End X coordinate for reframing + */ + x_end?: number + /** + * Y End + * + * End Y coordinate for reframing + */ + y_end?: number + /** + * Grid Position Y + * + * Y position of the grid for reframing + */ + grid_position_y?: number + /** + * Image Url + * + * URL of the input image to reframe + */ + image_url: string + /** + * Grid Position X + * + * X position of the grid for reframing + */ + grid_position_x?: number + /** + * X Start + * + * Start X coordinate for reframing + */ + x_start?: number +} + +/** + * BabyVersionOutput + */ +export type SchemaImageEditingBabyVersionOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * BabyVersionInput + * + * Input model for baby version endpoint. + */ +export type SchemaImageEditingBabyVersionInput = { + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * URL of the image to transform into a baby version. + */ + image_url: string + /** + * Sync Mode + * + * If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. + */ + num_inference_steps?: number + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number +} + +/** + * ReframeOutput + */ +export type SchemaImageEditingReframeOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * ReframeInput + */ +export type SchemaImageEditingReframeInput = { + /** + * Aspect Ratio + * + * The desired aspect ratio for the reframed image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * URL of the old or damaged photo to restore. + */ + image_url: string + /** + * Sync Mode + * + * If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. + */ + num_inference_steps?: number + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number +} + +/** + * T2IOutput + */ +export type SchemaLumaPhotonModifyOutput = { + /** + * Images + * + * The generated image + */ + images: Array +} + +/** + * ModifyImageRequest + */ +export type SchemaLumaPhotonModifyInput = { + /** + * Prompt + * + * Instruction for modifying the image + */ + prompt?: string + /** + * Aspect Ratio + * + * The aspect ratio of the reframed image + */ + aspect_ratio: '1:1' | '16:9' | '9:16' | '4:3' | '3:4' | '21:9' | '9:21' + /** + * Strength + * + * The strength of the initial image. Higher strength values are corresponding to more influence of the initial image on the output. + */ + strength: number + /** + * Image Url + * + * URL of the input image to reframe + */ + image_url: string +} + +/** + * T2IOutput + */ +export type SchemaLumaPhotonFlashModifyOutput = { + /** + * Images + * + * The generated image + */ + images: Array +} + +/** + * ModifyImageRequest + */ +export type SchemaLumaPhotonFlashModifyInput = { + /** + * Prompt + * + * Instruction for modifying the image + */ + prompt?: string + /** + * Aspect Ratio + * + * The aspect ratio of the reframed image + */ + aspect_ratio: '1:1' | '16:9' | '9:16' | '4:3' | '3:4' | '21:9' | '9:21' + /** + * Strength + * + * The strength of the initial image. Higher strength values are corresponding to more influence of the initial image on the output. + */ + strength: number + /** + * Image Url + * + * URL of the input image to reframe + */ + image_url: string +} + +/** + * FrameOutput + */ +export type SchemaFfmpegApiExtractFrameOutput = { + /** + * Images + */ + images: Array +} + +/** + * FrameInput + */ +export type SchemaFfmpegApiExtractFrameInput = { + /** + * Video Url + * + * URL of the video file to use as the video track + */ + video_url: string + /** + * Frame Type + * + * Type of frame to extract: first, middle, or last frame of the video + */ + frame_type?: 'first' | 'middle' | 'last' +} + +/** + * VectorizeOutput + */ +export type SchemaRecraftVectorizeOutput = { + /** + * Image + * + * The vectorized image. + */ + image: SchemaFile +} + +/** + * VectorizeInput + */ +export type SchemaRecraftVectorizeInput = { + /** + * Image Url + * + * The URL of the image to be vectorized. Must be in PNG, JPG or WEBP format, less than 5 MB in size, have resolution less than 16 MP and max dimension less than 4096 pixels, min dimension more than 256 pixels. + */ + image_url: string +} + +/** + * Output + */ +export type SchemaObjectRemovalOutput = { + /** + * Images + * + * The generated images with objects removed. + */ + images: Array +} + +/** + * PromptInput + */ +export type SchemaObjectRemovalInput = { + /** + * Prompt + * + * Text description of the object to remove. + */ + prompt: string + /** + * Mask Expansion + * + * Amount of pixels to expand the mask by. Range: 0-50 + */ + mask_expansion?: number + /** + * Model + */ + model?: 'low_quality' | 'medium_quality' | 'high_quality' | 'best_quality' + /** + * Image Url + * + * The URL of the image to remove objects from. + */ + image_url: string +} + +/** + * Output + */ +export type SchemaObjectRemovalMaskOutput = { + /** + * Images + * + * The generated images with objects removed. + */ + images: Array +} + +/** + * MaskInput + */ +export type SchemaObjectRemovalMaskInput = { + /** + * Model + */ + model?: 'low_quality' | 'medium_quality' | 'high_quality' | 'best_quality' + /** + * Mask Expansion + * + * Amount of pixels to expand the mask by. Range: 0-50 + */ + mask_expansion?: number + /** + * Mask Url + * + * The URL of the mask image. White pixels (255) indicate areas to remove. + */ + mask_url: string + /** + * Image Url + * + * The URL of the image to remove objects from. + */ + image_url: string +} + +/** + * BBoxPromptBase + */ +export type SchemaBBoxPromptBase = { + /** + * Y Min + * + * Y Min Coordinate of the box (0-1) + */ + y_min?: number + /** + * X Max + * + * X Max Coordinate of the prompt (0-1) + */ + x_max?: number + /** + * X Min + * + * X Min Coordinate of the box (0-1) + */ + x_min?: number + /** + * Y Max + * + * Y Max Coordinate of the prompt (0-1) + */ + y_max?: number +} + +/** + * Output + */ +export type SchemaObjectRemovalBboxOutput = { + /** + * Images + * + * The generated images with objects removed. + */ + images: Array +} + +/** + * BboxInput + */ +export type SchemaObjectRemovalBboxInput = { + /** + * Model + */ + model?: 'low_quality' | 'medium_quality' | 'high_quality' | 'best_quality' + /** + * Mask Expansion + * + * Amount of pixels to expand the mask by. Range: 0-50 + */ + mask_expansion?: number + /** + * Box Prompts + * + * List of bounding box coordinates to erase (only one box prompt is supported) + */ + box_prompts?: Array + /** + * Image Url + * + * The URL of the image to remove objects from. + */ + image_url: string +} + +/** + * Output + */ +export type SchemaPasdOutput = { + /** + * Images + * + * The generated super-resolved images + */ + images: Array + /** + * Timings + * + * Timing information for different processing stages + */ + timings?: { + [key: string]: number + } +} + +/** + * Input + */ +export type SchemaPasdInput = { + /** + * Conditioning Scale + * + * ControlNet conditioning scale (0.1-1.0) + */ + conditioning_scale?: number + /** + * Prompt + * + * Additional prompt to guide super-resolution + */ + prompt?: string + /** + * Image Url + * + * Input image to super-resolve + */ + image_url: string + /** + * Steps + * + * Number of inference steps (10-50) + */ + steps?: number + /** + * Scale + * + * Upscaling factor (1-4x) + */ + scale?: number + /** + * Guidance Scale + * + * Guidance scale for diffusion (1.0-20.0) + */ + guidance_scale?: number + /** + * Negative Prompt + * + * Negative prompt to avoid unwanted artifacts + */ + negative_prompt?: string +} + +/** + * Output + */ +export type SchemaChainOfZoomOutput = { + /** + * Images + * + * List of intermediate images + */ + images: Array + /** + * Zoom Center + * + * Center coordinates used for zoom + */ + zoom_center: Array + /** + * Scale + * + * Actual linear zoom scale applied + */ + scale: number +} + +/** + * Input + */ +export type SchemaChainOfZoomInput = { + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Center Y + * + * Y coordinate of zoom center (0-1) + */ + center_y?: number + /** + * Scale + * + * Zoom scale in powers of 2 + */ + scale?: number + /** + * Center X + * + * X coordinate of zoom center (0-1) + */ + center_x?: number + /** + * User Prompt + * + * Additional prompt text to guide the zoom enhancement + */ + user_prompt?: string + /** + * Image Url + * + * Input image to zoom into + */ + image_url: string +} + +/** + * V16Output + */ +export type SchemaFashnTryonV16Output = { + /** + * Images + */ + images: Array +} + +/** + * V16Input + */ +export type SchemaFashnTryonV16Input = { + /** + * Model Image + * + * URL or base64 of the model image + */ + model_image: string + /** + * Moderation Level + * + * Content moderation level for garment images. 'none' disables moderation, 'permissive' blocks only explicit content, 'conservative' also blocks underwear and swimwear. + */ + moderation_level?: 'none' | 'permissive' | 'conservative' + /** + * Garment Photo Type + * + * Specifies the type of garment photo to optimize internal parameters for better performance. 'model' is for photos of garments on a model, 'flat-lay' is for flat-lay or ghost mannequin images, and 'auto' attempts to automatically detect the photo type. + */ + garment_photo_type?: 'auto' | 'model' | 'flat-lay' + /** + * Garment Image + * + * URL or base64 of the garment image + */ + garment_image: string + /** + * Category + * + * Category of the garment to try-on. 'auto' will attempt to automatically detect the category of the garment. + */ + category?: 'tops' | 'bottoms' | 'one-pieces' | 'auto' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Segmentation Free + * + * Disables human parsing on the model image. + */ + segmentation_free?: boolean + /** + * Num Samples + * + * Number of images to generate in a single run. Image generation has a random element in it, so trying multiple images at once increases the chances of getting a good result. + */ + num_samples?: number + /** + * Mode + * + * Specifies the mode of operation. 'performance' mode is faster but may sacrifice quality, 'balanced' mode is a balance between speed and quality, and 'quality' mode is slower but produces higher quality results. + */ + mode?: 'performance' | 'balanced' | 'quality' + /** + * Seed + * + * Sets random operations to a fixed state. Use the same seed to reproduce results with the same inputs, or different seed to force different results. + */ + seed?: number + /** + * Output Format + * + * Output format of the generated images. 'png' is highest quality, while 'jpeg' is faster + */ + output_format?: 'png' | 'jpeg' +} + +/** + * KontextEditOutput + */ +export type SchemaFluxKontextLoraOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseKontextEditInput + */ +export type SchemaFluxKontextLoraInput = { + /** + * Prompt + * + * The prompt to edit the image. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Resolution Mode + * + * + * Determines how the output resolution is set for image editing. + * - `auto`: The model selects an optimal resolution from a predefined set that best matches the input image's aspect ratio. This is the recommended setting for most use cases as it's what the model was trained on. + * - `match_input`: The model will attempt to use the same resolution as the input image. The resolution will be adjusted to be compatible with the model's requirements (e.g. dimensions must be multiples of 16 and within supported limits). + * Apart from these, a few aspect ratios are also supported. + * + */ + resolution_mode?: + | 'auto' + | 'match_input' + | '1:1' + | '16:9' + | '21:9' + | '3:2' + | '2:3' + | '4:5' + | '5:4' + | '3:4' + | '4:3' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The URL of the image to edit. + * + * Max width: 14142px, Max height: 14142px, Timeout: 20s + */ + image_url: string + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * PlushieStyleOutput + */ +export type SchemaImageEditingPlushieStyleOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * PlushieStyleInput + * + * Input model for plushie style endpoint. + */ +export type SchemaImageEditingPlushieStyleInput = { + /** + * Lora Scale + * + * The scale factor for the LoRA model. Controls the strength of the LoRA effect. + */ + lora_scale?: number + /** + * Image URL + * + * URL of the image to convert to plushie style. + */ + image_url: string + /** + * Sync Mode + * + * If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean +} + +/** + * WojakStyleOutput + */ +export type SchemaImageEditingWojakStyleOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * WojakStyleInput + * + * Input model for wojak style endpoint. + */ +export type SchemaImageEditingWojakStyleInput = { + /** + * Lora Scale + * + * The scale factor for the LoRA model. Controls the strength of the LoRA effect. + */ + lora_scale?: number + /** + * Image URL + * + * URL of the image to convert to wojak style. + */ + image_url: string + /** + * Sync Mode + * + * If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean +} + +/** + * BroccoliHaircutOutput + */ +export type SchemaImageEditingBroccoliHaircutOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * BroccoliHaircutInput + * + * Input model for broccoli haircut endpoint. + */ +export type SchemaImageEditingBroccoliHaircutInput = { + /** + * Lora Scale + * + * The scale factor for the LoRA model. Controls the strength of the LoRA effect. + */ + lora_scale?: number + /** + * Image URL + * + * URL of the image to apply broccoli haircut style. + */ + image_url: string + /** + * Sync Mode + * + * If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean +} + +/** + * ImageUpscaleOutput + */ +export type SchemaTopazUpscaleImageOutput = { + /** + * Image + * + * The upscaled image. + */ + image: SchemaFile +} + +/** + * ImageUpscaleRequest + */ +export type SchemaTopazUpscaleImageInput = { + /** + * Face Enhancement Creativity + * + * Creativity level for face enhancement. 0.0 means no creativity, 1.0 means maximum creativity. Ignored if face ehnancement is disabled. + */ + face_enhancement_creativity?: number + /** + * Face Enhancement Strength + * + * Strength of the face enhancement. 0.0 means no enhancement, 1.0 means maximum enhancement. Ignored if face ehnancement is disabled. + */ + face_enhancement_strength?: number + /** + * Output Format + * + * Output format of the upscaled image. + */ + output_format?: 'jpeg' | 'png' + /** + * Face Enhancement + * + * Whether to apply face enhancement to the image. + */ + face_enhancement?: boolean + /** + * Subject Detection + * + * Subject detection mode for the image enhancement. + */ + subject_detection?: 'All' | 'Foreground' | 'Background' + /** + * Model + * + * Model to use for image enhancement. + */ + model?: + | 'Low Resolution V2' + | 'Standard V2' + | 'CGI' + | 'High Fidelity V2' + | 'Text Refine' + | 'Recovery' + | 'Redefine' + | 'Recovery V2' + /** + * Image Url + * + * Url of the image to be upscaled + */ + image_url: string + /** + * Upscale Factor + * + * Factor to upscale the video by (e.g. 2.0 doubles width and height) + */ + upscale_factor?: number + /** + * Crop To Fill + */ + crop_to_fill?: boolean +} + +/** + * YouTubeThumbnailsOutput + */ +export type SchemaImageEditingYoutubeThumbnailsOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * YouTubeThumbnailsInput + * + * Input model for YouTube thumbnails endpoint. + */ +export type SchemaImageEditingYoutubeThumbnailsInput = { + /** + * Thumbnail Text + * + * The text to include in the YouTube thumbnail. + */ + prompt?: string + /** + * Lora Scale + * + * The scale factor for the LoRA model. Controls the strength of the LoRA effect. + */ + lora_scale?: number + /** + * Image URL + * + * URL of the image to convert to YouTube thumbnail style. + */ + image_url: string + /** + * Sync Mode + * + * If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean +} + +/** + * BlurOutput + */ +export type SchemaPostProcessingBlurOutput = { + /** + * Images + * + * The processed images with blur effect + */ + images: Array +} + +/** + * BlurInput + */ +export type SchemaPostProcessingBlurInput = { + /** + * Blur Sigma + * + * Sigma for Gaussian blur + */ + blur_sigma?: number + /** + * Blur Radius + * + * Blur radius + */ + blur_radius?: number + /** + * Blur Type + * + * Type of blur to apply + */ + blur_type?: 'gaussian' | 'kuwahara' + /** + * Image Url + * + * URL of image to process + */ + image_url: string +} + +/** + * ChromaticAberrationOutput + */ +export type SchemaPostProcessingChromaticAberrationOutput = { + /** + * Images + * + * The processed images with chromatic aberration effect + */ + images: Array +} + +/** + * ChromaticAberrationInput + */ +export type SchemaPostProcessingChromaticAberrationInput = { + /** + * Blue Shift + * + * Blue channel shift amount + */ + blue_shift?: number + /** + * Red Shift + * + * Red channel shift amount + */ + red_shift?: number + /** + * Green Direction + * + * Green channel shift direction + */ + green_direction?: 'horizontal' | 'vertical' + /** + * Blue Direction + * + * Blue channel shift direction + */ + blue_direction?: 'horizontal' | 'vertical' + /** + * Red Direction + * + * Red channel shift direction + */ + red_direction?: 'horizontal' | 'vertical' + /** + * Image Url + * + * URL of image to process + */ + image_url: string + /** + * Green Shift + * + * Green channel shift amount + */ + green_shift?: number +} + +/** + * ColorCorrectionOutput + */ +export type SchemaPostProcessingColorCorrectionOutput = { + /** + * Images + * + * The processed images with color correction + */ + images: Array +} + +/** + * ColorCorrectionInput + */ +export type SchemaPostProcessingColorCorrectionInput = { + /** + * Gamma + * + * Gamma adjustment + */ + gamma?: number + /** + * Saturation + * + * Saturation adjustment + */ + saturation?: number + /** + * Temperature + * + * Color temperature adjustment + */ + temperature?: number + /** + * Brightness + * + * Brightness adjustment + */ + brightness?: number + /** + * Contrast + * + * Contrast adjustment + */ + contrast?: number + /** + * Image Url + * + * URL of image to process + */ + image_url: string +} + +/** + * ColorTintOutput + */ +export type SchemaPostProcessingColorTintOutput = { + /** + * Images + * + * The processed images with color tint effect + */ + images: Array +} + +/** + * ColorTintInput + */ +export type SchemaPostProcessingColorTintInput = { + /** + * Tint Strength + * + * Tint strength + */ + tint_strength?: number + /** + * Tint Mode + * + * Tint color mode + */ + tint_mode?: + | 'sepia' + | 'red' + | 'green' + | 'blue' + | 'cyan' + | 'magenta' + | 'yellow' + | 'purple' + | 'orange' + | 'warm' + | 'cool' + | 'lime' + | 'navy' + | 'vintage' + | 'rose' + | 'teal' + | 'maroon' + | 'peach' + | 'lavender' + | 'olive' + /** + * Image Url + * + * URL of image to process + */ + image_url: string +} + +/** + * DesaturateOutput + */ +export type SchemaPostProcessingDesaturateOutput = { + /** + * Images + * + * The processed images with desaturation effect + */ + images: Array +} + +/** + * DesaturateInput + */ +export type SchemaPostProcessingDesaturateInput = { + /** + * Desaturate Method + * + * Desaturation method + */ + desaturate_method?: + | 'luminance (Rec.709)' + | 'luminance (Rec.601)' + | 'average' + | 'lightness' + /** + * Desaturate Factor + * + * Desaturation factor + */ + desaturate_factor?: number + /** + * Image Url + * + * URL of image to process + */ + image_url: string +} + +/** + * DissolveOutput + */ +export type SchemaPostProcessingDissolveOutput = { + /** + * Images + * + * The processed images with dissolve effect + */ + images: Array +} + +/** + * DissolveInput + */ +export type SchemaPostProcessingDissolveInput = { + /** + * Dissolve Factor + * + * Dissolve blend factor + */ + dissolve_factor?: number + /** + * Dissolve Image Url + * + * URL of second image for dissolve + */ + dissolve_image_url: string + /** + * Image Url + * + * URL of image to process + */ + image_url: string +} + +/** + * DodgeBurnOutput + */ +export type SchemaPostProcessingDodgeBurnOutput = { + /** + * Images + * + * The processed images with dodge and burn effect + */ + images: Array +} + +/** + * DodgeBurnInput + */ +export type SchemaPostProcessingDodgeBurnInput = { + /** + * Dodge Burn Mode + * + * Dodge and burn mode + */ + dodge_burn_mode?: + | 'dodge' + | 'burn' + | 'dodge_and_burn' + | 'burn_and_dodge' + | 'color_dodge' + | 'color_burn' + | 'linear_dodge' + | 'linear_burn' + /** + * Dodge Burn Intensity + * + * Dodge and burn intensity + */ + dodge_burn_intensity?: number + /** + * Image Url + * + * URL of image to process + */ + image_url: string +} + +/** + * GrainOutput + */ +export type SchemaPostProcessingGrainOutput = { + /** + * Images + * + * The processed images with grain effect + */ + images: Array +} + +/** + * GrainInput + */ +export type SchemaPostProcessingGrainInput = { + /** + * Grain Style + * + * Style of film grain to apply + */ + grain_style?: + | 'modern' + | 'analog' + | 'kodak' + | 'fuji' + | 'cinematic' + | 'newspaper' + /** + * Grain Intensity + * + * Film grain intensity + */ + grain_intensity?: number + /** + * Grain Scale + * + * Film grain scale + */ + grain_scale?: number + /** + * Image Url + * + * URL of image to process + */ + image_url: string +} + +/** + * ParabolizeOutput + */ +export type SchemaPostProcessingParabolizeOutput = { + /** + * Images + * + * The processed images with parabolize effect + */ + images: Array +} + +/** + * ParabolizeInput + */ +export type SchemaPostProcessingParabolizeInput = { + /** + * Parabolize Coeff + * + * Parabolize coefficient + */ + parabolize_coeff?: number + /** + * Vertex Y + * + * Vertex Y position + */ + vertex_y?: number + /** + * Vertex X + * + * Vertex X position + */ + vertex_x?: number + /** + * Image Url + * + * URL of image to process + */ + image_url: string +} + +/** + * SharpenOutput + */ +export type SchemaPostProcessingSharpenOutput = { + /** + * Images + * + * The processed images with sharpen effect + */ + images: Array +} + +/** + * SharpenInput + */ +export type SchemaPostProcessingSharpenInput = { + /** + * Sharpen Mode + * + * Type of sharpening to apply + */ + sharpen_mode?: 'basic' | 'smart' | 'cas' + /** + * Sharpen Alpha + * + * Sharpen strength (for basic mode) + */ + sharpen_alpha?: number + /** + * Noise Radius + * + * Noise radius for smart sharpen + */ + noise_radius?: number + /** + * Sharpen Radius + * + * Sharpen radius (for basic mode) + */ + sharpen_radius?: number + /** + * Image Url + * + * URL of image to process + */ + image_url: string + /** + * Smart Sharpen Strength + * + * Smart sharpen strength + */ + smart_sharpen_strength?: number + /** + * Cas Amount + * + * CAS sharpening amount + */ + cas_amount?: number + /** + * Preserve Edges + * + * Edge preservation factor + */ + preserve_edges?: number + /** + * Smart Sharpen Ratio + * + * Smart sharpen blend ratio + */ + smart_sharpen_ratio?: number +} + +/** + * SolarizeOutput + */ +export type SchemaPostProcessingSolarizeOutput = { + /** + * Images + * + * The processed images with solarize effect + */ + images: Array +} + +/** + * SolarizeInput + */ +export type SchemaPostProcessingSolarizeInput = { + /** + * Solarize Threshold + * + * Solarize threshold + */ + solarize_threshold?: number + /** + * Image Url + * + * URL of image to process + */ + image_url: string +} + +/** + * VignetteOutput + */ +export type SchemaPostProcessingVignetteOutput = { + /** + * Images + * + * The processed images with vignette effect + */ + images: Array +} + +/** + * VignetteInput + */ +export type SchemaPostProcessingVignetteInput = { + /** + * Vignette Strength + * + * Vignette strength + */ + vignette_strength?: number + /** + * Image Url + * + * URL of image to process + */ + image_url: string +} + +/** + * RealismOutput + */ +export type SchemaImageEditingRealismOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * RealismInput + * + * Input model for realism enhancement endpoint. + */ +export type SchemaImageEditingRealismInput = { + /** + * Lora Scale + * + * The scale factor for the LoRA model. Controls the strength of the LoRA effect. + */ + lora_scale?: number + /** + * Image URL + * + * URL of the image to enhance with realism details. + */ + image_url: string + /** + * Sync Mode + * + * If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean +} + +/** + * ReimagineOutput + */ +export type SchemaBriaReimagineOutput = { + /** + * Images + * + * The generated images + */ + images: Array + /** + * Seed + * + * Seed value used for generation. + */ + seed: number +} + +/** + * ReimagineInput + */ +export type SchemaBriaReimagineInput = { + /** + * Prompt + * + * The prompt you would like to use to generate images. + */ + prompt: string + /** + * Num Results + * + * How many images you would like to generate. When using any Guidance Method, Value is set to 1. + */ + num_results?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Structure Ref Influence + * + * The influence of the structure reference on the generated image. + */ + structure_ref_influence?: number + /** + * Fast + * + * Whether to use the fast model + */ + fast?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Num Inference Steps + * + * The number of iterations the model goes through to refine the generated image. This parameter is optional. + */ + num_inference_steps?: number + /** + * Structure Image Url + * + * The URL of the structure reference image. Use "" to leave empty. Accepted formats are jpeg, jpg, png, webp. + */ + structure_image_url?: string +} + +/** + * Output + */ +export type SchemaCalligrapherOutput = { + /** + * Images + */ + images: Array +} + +/** + * Input + */ +export type SchemaCalligrapherInput = { + /** + * Use Context + * + * Whether to prepend context reference to the input + */ + use_context?: boolean + /** + * Num Images + * + * How many images to generate + */ + num_images?: number + /** + * Image Size + * + * Target image size for generation + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Auto Mask Generation + * + * Whether to automatically generate mask from detected text + */ + auto_mask_generation?: boolean + /** + * Reference Image Url + * + * Optional base64 reference image for style + */ + reference_image_url?: string + /** + * Source Image Url + * + * Base64-encoded source image with drawn mask layers + */ + source_image_url: string + /** + * Prompt + * + * Text prompt to inpaint or customize + */ + prompt: string + /** + * Mask Image Url + * + * Base64-encoded mask image (optional if using auto_mask_generation) + */ + mask_image_url?: string + /** + * Source Text + * + * Source text to replace (if empty, masks all detected text) + */ + source_text?: string + /** + * Num Inference Steps + * + * Number of inference steps (1-100) + */ + num_inference_steps?: number + /** + * Seed + * + * Random seed for reproducibility + */ + seed?: number + /** + * Cfg Scale + * + * Guidance or strength scale for the model + */ + cfg_scale?: number +} + +/** + * VideoFile + */ +export type SchemaVideoFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * Duration + * + * The duration of the video + */ + duration?: number + /** + * Height + * + * The height of the video + */ + height?: number + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * Width + * + * The width of the video + */ + width?: number + /** + * Fps + * + * The FPS of the video + */ + fps?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * Num Frames + * + * The number of frames in the video + */ + num_frames?: number + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * FILMImageOutput + */ +export type SchemaFilmOutput = { + /** + * Images + * + * The generated frames as individual images. + */ + images?: Array + /** + * Video + * + * The generated video file, if output_type is 'video'. + */ + video?: SchemaVideoFile +} + +/** + * FILMImageInput + */ +export type SchemaFilmInput = { + /** + * Video Write Mode + * + * The write mode of the output video. Only applicable if output_type is 'video'. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Number of Frames + * + * The number of frames to generate between the input images. + */ + num_frames?: number + /** + * Include Start + * + * Whether to include the start image in the output. + */ + include_start?: boolean + /** + * Video Quality + * + * The quality of the output video. Only applicable if output_type is 'video'. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Include End + * + * Whether to include the end image in the output. + */ + include_end?: boolean + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Frames Per Second + * + * Frames per second for the output video. Only applicable if output_type is 'video'. + */ + fps?: number + /** + * Start Image URL + * + * The URL of the first image to use as the starting point for interpolation. + */ + start_image_url: string + /** + * End Image URL + * + * The URL of the second image to use as the ending point for interpolation. + */ + end_image_url: string + /** + * Image Format + * + * The format of the output images. Only applicable if output_type is 'images'. + */ + image_format?: 'png' | 'jpeg' + /** + * Output Type + * + * The type of output to generate; either individual images or a video. + */ + output_type?: 'images' | 'video' +} + +/** + * RIFEImageOutput + */ +export type SchemaRifeOutput = { + /** + * Images + * + * The generated frames as individual images. + */ + images?: Array + /** + * Video + * + * The generated video file, if output_type is 'video'. + */ + video?: SchemaFile +} + +/** + * RIFEImageInput + */ +export type SchemaRifeInput = { + /** + * Output Format + * + * The format of the output images. Only applicable if output_type is 'images'. + */ + output_format?: 'png' | 'jpeg' + /** + * Frames Per Second + * + * Frames per second for the output video. Only applicable if output_type is 'video'. + */ + fps?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Include End + * + * Whether to include the end image in the output. + */ + include_end?: boolean + /** + * Include Start + * + * Whether to include the start image in the output. + */ + include_start?: boolean + /** + * Number of Frames + * + * The number of frames to generate between the input images. + */ + num_frames?: number + /** + * End Image URL + * + * The URL of the second image to use as the ending point for interpolation. + */ + end_image_url: string + /** + * Output Type + * + * The type of output to generate; either individual images or a video. + */ + output_type?: 'images' | 'video' + /** + * Start Image URL + * + * The URL of the first image to use as the starting point for interpolation. + */ + start_image_url: string +} + +/** + * Output + */ +export type SchemaHidreamE11Output = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseInput + */ +export type SchemaHidreamE11Input = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt?: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Guidance Scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your initial image when looking for a related image to show you. + * + */ + image_guidance_scale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * URL of an input image to edit. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance Scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Target Image Description + * + * The description of the target image after your edits have been made. Leave this blank to allow the model to use its own imagination. + */ + target_image_description?: string + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * RetouchOutput + */ +export type SchemaImageEditingRetouchOutput = { + /** + * Images + */ + images: Array + /** + * Seed + */ + seed: number +} + +/** + * RetouchInput + * + * Input model for retouch endpoint. + */ +export type SchemaImageEditingRetouchInput = { + /** + * Lora Scale + * + * The scale factor for the LoRA model. Controls the strength of the LoRA effect. + */ + lora_scale?: number + /** + * Image URL + * + * URL of the image to retouch. + */ + image_url: string + /** + * Sync Mode + * + * If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean +} + +/** + * ImageToPanoramaResponse + */ +export type SchemaHunyuanWorldOutput = { + /** + * Image + * + * The generated panorama image. + */ + image: SchemaImage +} + +/** + * ImageToPanoramaRequest + */ +export type SchemaHunyuanWorldInput = { + /** + * Prompt + * + * The prompt to use for the panorama generation. + */ + prompt: string + /** + * Image Url + * + * The URL of the image to convert to a panorama. + */ + image_url: string +} + +/** + * KontextInpaintOutput + */ +export type SchemaFluxKontextLoraInpaintOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseKontextInpaintInput + */ +export type SchemaFluxKontextLoraInpaintInput = { + /** + * Prompt + * + * The prompt for the image to image task. + */ + prompt: string + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Reference Image URL + * + * The URL of the reference image for inpainting. + */ + reference_image_url: string + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The URL of the image to be inpainted. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Strength + * + * The strength of the initial image. Higher strength values are better for this model. + */ + strength?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Image URL + * + * The URL of the mask for inpainting. + */ + mask_url: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * KreaReduxOutput + */ +export type SchemaFlux1KreaReduxOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseKreaFlux1ReduxInput + */ +export type SchemaFlux1KreaReduxInput = { + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The URL of the image to generate an image from. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number | unknown + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number +} + +/** + * KreaOutput + */ +export type SchemaFlux1KreaImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseKreaFlux1ImageToInput + */ +export type SchemaFlux1KreaImageToImageInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The URL of the image to generate an image from. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Strength + * + * The strength of the initial image. Higher strength values are better for this model. + */ + strength?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number | unknown + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number +} + +/** + * KreaReduxOutput + */ +export type SchemaFluxKreaReduxOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseKreaReduxInput + */ +export type SchemaFluxKreaReduxInput = { + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The URL of the image to generate an image from. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number | unknown + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * Output + */ +export type SchemaFluxKreaImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseKreaImageToInput + */ +export type SchemaFluxKreaImageToImageInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The URL of the image to generate an image from. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Strength + * + * The strength of the initial image. Higher strength values are better for this model. + */ + strength?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number | unknown + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * Output + */ +export type SchemaFluxKreaLoraImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * ImageToImageInput + */ +export type SchemaFluxKreaLoraImageToImageInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. This is always set to 1 for streaming output. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image Url + * + * URL of image to use for inpainting. or img2img + */ + image_url: string + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Strength + * + * The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original. + */ + strength?: number + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * Output + */ +export type SchemaFluxKreaLoraInpaintingOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * InpaintInput + */ +export type SchemaFluxKreaLoraInpaintingInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. This is always set to 1 for streaming output. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image Url + * + * URL of image to use for inpainting. or img2img + */ + image_url: string + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Strength + * + * The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original. + */ + strength?: number + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Mask Url + * + * + * The mask to area to Inpaint in. + * + */ + mask_url: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * CharacterRemixOutputV3 + */ +export type SchemaIdeogramCharacterRemixOutput = { + /** + * Images + */ + images: Array + /** + * Seed + * + * Seed used for the random number generator + */ + seed: number +} + +/** + * CharacterRemixInputV3 + */ +export type SchemaIdeogramCharacterRemixInput = { + /** + * Prompt + * + * The prompt to remix the image with + */ + prompt: string + /** + * Image Size + * + * The resolution of the generated image + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Style + * + * The style type to generate with. Cannot be used with style_codes. + */ + style?: 'AUTO' | 'REALISTIC' | 'FICTION' + /** + * Expand Prompt + * + * Determine if MagicPrompt should be used in generating the request or not. + */ + expand_prompt?: boolean + /** + * Rendering Speed + * + * The rendering speed to use. + */ + rendering_speed?: 'TURBO' | 'BALANCED' | 'QUALITY' + /** + * Reference Mask Urls + * + * A set of masks to apply to the character references. Currently only 1 mask is supported, rest will be ignored. (maximum total size 10MB across all character references). The masks should be in JPEG, PNG or WebP format + */ + reference_mask_urls?: Array + /** + * Reference Image Urls + * + * A set of images to use as character references. Currently only 1 image is supported, rest will be ignored. (maximum total size 10MB across all character references). The images should be in JPEG, PNG or WebP format + */ + reference_image_urls: Array + /** + * Image Urls + * + * A set of images to use as style references (maximum total size 10MB across all style references). The images should be in JPEG, PNG or WebP format + */ + image_urls?: Array | unknown + /** + * Negative Prompt + * + * Description of what to exclude from an image. Descriptions in the prompt take precedence to descriptions in the negative prompt. + */ + negative_prompt?: string + /** + * Num Images + * + * Number of images to generate. + */ + num_images?: number + /** + * Image URL + * + * The image URL to remix + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * A color palette for generation, must EITHER be specified via one of the presets (name) or explicitly via hexadecimal representations of the color with optional weights (members) + */ + color_palette?: SchemaColorPalette | unknown + /** + * Style Codes + * + * A list of 8 character hexadecimal codes representing the style of the image. Cannot be used in conjunction with style_reference_images or style + */ + style_codes?: Array | unknown + /** + * Strength + * + * Strength of the input image in the remix + */ + strength?: number + /** + * Seed + * + * Seed for the random number generator + */ + seed?: number | unknown +} + +/** + * CharacterOutputV3 + */ +export type SchemaIdeogramCharacterOutput = { + /** + * Images + */ + images: Array + /** + * Seed + * + * Seed used for the random number generator + */ + seed: number +} + +/** + * BaseCharacterInputV3 + */ +export type SchemaIdeogramCharacterInput = { + /** + * Prompt + * + * The prompt to fill the masked part of the image. + */ + prompt: string + /** + * Image Size + * + * The resolution of the generated image + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Style + * + * The style type to generate with. Cannot be used with style_codes. + */ + style?: 'AUTO' | 'REALISTIC' | 'FICTION' + /** + * Expand Prompt + * + * Determine if MagicPrompt should be used in generating the request or not. + */ + expand_prompt?: boolean + /** + * Rendering Speed + * + * The rendering speed to use. + */ + rendering_speed?: 'TURBO' | 'BALANCED' | 'QUALITY' + /** + * Reference Mask Urls + * + * A set of masks to apply to the character references. Currently only 1 mask is supported, rest will be ignored. (maximum total size 10MB across all character references). The masks should be in JPEG, PNG or WebP format + */ + reference_mask_urls?: Array + /** + * Reference Image Urls + * + * A set of images to use as character references. Currently only 1 image is supported, rest will be ignored. (maximum total size 10MB across all character references). The images should be in JPEG, PNG or WebP format + */ + reference_image_urls: Array + /** + * Image Urls + * + * A set of images to use as style references (maximum total size 10MB across all style references). The images should be in JPEG, PNG or WebP format + */ + image_urls?: Array | unknown + /** + * Negative Prompt + * + * Description of what to exclude from an image. Descriptions in the prompt take precedence to descriptions in the negative prompt. + */ + negative_prompt?: string + /** + * Num Images + * + * Number of images to generate. + */ + num_images?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * A color palette for generation, must EITHER be specified via one of the presets (name) or explicitly via hexadecimal representations of the color with optional weights (members) + */ + color_palette?: SchemaColorPalette | unknown + /** + * Style Codes + * + * A list of 8 character hexadecimal codes representing the style of the image. Cannot be used in conjunction with style_reference_images or style + */ + style_codes?: Array | unknown + /** + * Seed + * + * Seed for the random number generator + */ + seed?: number | unknown +} + +/** + * CharacterEditOutputV3 + */ +export type SchemaIdeogramCharacterEditOutput = { + /** + * Images + */ + images: Array + /** + * Seed + * + * Seed used for the random number generator + */ + seed: number +} + +/** + * CharacterEditInputV3 + */ +export type SchemaIdeogramCharacterEditInput = { + /** + * Prompt + * + * The prompt to fill the masked part of the image. + */ + prompt: string + /** + * Style + * + * The style type to generate with. Cannot be used with style_codes. + */ + style?: 'AUTO' | 'REALISTIC' | 'FICTION' + /** + * Expand Prompt + * + * Determine if MagicPrompt should be used in generating the request or not. + */ + expand_prompt?: boolean + /** + * Rendering Speed + * + * The rendering speed to use. + */ + rendering_speed?: 'TURBO' | 'BALANCED' | 'QUALITY' + /** + * Reference Mask Urls + * + * A set of masks to apply to the character references. Currently only 1 mask is supported, rest will be ignored. (maximum total size 10MB across all character references). The masks should be in JPEG, PNG or WebP format + */ + reference_mask_urls?: Array + /** + * Reference Image Urls + * + * A set of images to use as character references. Currently only 1 image is supported, rest will be ignored. (maximum total size 10MB across all character references). The images should be in JPEG, PNG or WebP format + */ + reference_image_urls: Array + /** + * Image Urls + * + * A set of images to use as style references (maximum total size 10MB across all style references). The images should be in JPEG, PNG or WebP format + */ + image_urls?: Array | unknown + /** + * Num Images + * + * Number of images to generate. + */ + num_images?: number + /** + * Image URL + * + * The image URL to generate an image from. MUST have the exact same dimensions (width and height) as the mask image. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * A color palette for generation, must EITHER be specified via one of the presets (name) or explicitly via hexadecimal representations of the color with optional weights (members) + */ + color_palette?: SchemaColorPalette | unknown + /** + * Style Codes + * + * A list of 8 character hexadecimal codes representing the style of the image. Cannot be used in conjunction with style_reference_images or style + */ + style_codes?: Array | unknown + /** + * Seed + * + * Seed for the random number generator + */ + seed?: number | unknown + /** + * Mask URL + * + * The mask URL to inpaint the image. MUST have the exact same dimensions (width and height) as the input image. + */ + mask_url: string +} + +/** + * QwenImageOutput + */ +export type SchemaQwenImageEditOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseQwenEditImageInput + */ +export type SchemaQwenImageEditInput = { + /** + * Prompt + * + * The prompt to generate the image with + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The URL of the image to edit. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * NextStepResponse + */ +export type SchemaNextstep1Output = { + /** + * Image + * + * Generated image + */ + image: { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number | unknown + /** + * Height + * + * The height of the image in pixels. + */ + height?: number | unknown + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string | unknown + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string | unknown + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * Width + * + * The width of the image in pixels. + */ + width?: number | unknown + } + /** + * Seed + * + * Seed used for random number generation + */ + seed: number +} + +/** + * NextStepEditRequest + */ +export type SchemaNextstep1Input = { + /** + * Prompt + * + * The prompt to edit the image. + */ + prompt: string + /** + * Negative Prompt + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * + */ + negative_prompt: string + /** + * Image URL + * + * The URL of the image to edit. + */ + image_url: string +} + +/** + * NanoBananaImageToImageOutput + */ +export type SchemaNanoBananaEditOutput = { + /** + * Images + * + * The edited images. + */ + images: Array + /** + * Description + * + * The description of the generated images. + */ + description: string +} + +/** + * NanoBananaImageToImageInput + */ +export type SchemaNanoBananaEditInput = { + /** + * Prompt + * + * The prompt for image editing. + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | 'auto' + | '21:9' + | '16:9' + | '3:2' + | '4:3' + | '5:4' + | '1:1' + | '4:5' + | '3:4' + | '2:3' + | '9:16' + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Image URLs + * + * The URLs of the images to use for image-to-image generation or image editing. + */ + image_urls: Array + /** + * Limit Generations + * + * Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate. + */ + limit_generations?: boolean +} + +/** + * OutputModel + */ +export type SchemaReimagine32Output = { + image: SchemaImage +} + +/** + * InputModel + */ +export type SchemaReimagine32Input = { + /** + * Prompt + * + * Prompt for image generation. + */ + prompt: string + /** + * Depth Preprocess + * + * Depth image preprocess. + */ + depth_preprocess?: boolean + /** + * Canny Preprocess + * + * Canny image preprocess. + */ + canny_preprocess?: boolean + /** + * Depth Image Url + * + * Depth control image (file or URL). + */ + depth_image_url?: string | unknown + /** + * Guidance Scale + * + * Guidance scale for text. + */ + guidance_scale?: number + /** + * Canny Image Url + * + * Canny edge control image (file or URL). + */ + canny_image_url?: string | unknown + /** + * Negative Prompt + * + * Negative prompt for image generation. + */ + negative_prompt?: string + /** + * Depth Scale + * + * Depth control strength (0.0 to 1.0). + */ + depth_scale?: number + /** + * Aspect Ratio + * + * Aspect ratio. Options: 1:1, 2:3, 3:2, 3:4, 4:3, 4:5, 5:4, 9:16, 16:9 + */ + aspect_ratio?: + | '1:1' + | '2:3' + | '3:2' + | '3:4' + | '4:3' + | '4:5' + | '5:4' + | '9:16' + | '16:9' + /** + * Sync Mode + * + * If true, returns the image directly in the response (increases latency). + */ + sync_mode?: boolean + /** + * Prompt Enhancer + * + * Whether to improve the prompt. + */ + prompt_enhancer?: boolean + /** + * Truncate Prompt + * + * Whether to truncate the prompt. + */ + truncate_prompt?: boolean + /** + * Seed + * + * Random seed for reproducibility. + */ + seed?: number + /** + * Canny Scale + * + * Canny edge control strength (0.0 to 1.0). + */ + canny_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps. + */ + num_inference_steps?: number +} + +/** + * QwenImageI2IOutput + */ +export type SchemaQwenImageImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * QwenImageI2IInput + */ +export type SchemaQwenImageImageToImageInput = { + /** + * Prompt + * + * The prompt to generate the image with + */ + prompt: string + /** + * Acceleration + * + * Acceleration level for image generation. Options: 'none', 'regular', 'high'. Higher acceleration increases speed. 'regular' balances speed and quality. 'high' is recommended for images without text. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Image Size + * + * The size of the generated image. By default, we will use the provided image for determining the image_size. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use up to 3 LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Guidance scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Use Turbo + * + * Enable turbo mode for faster generation with high quality. When enabled, uses optimized settings (10 steps, CFG=1.2). + */ + use_turbo?: boolean + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image Url + * + * The reference image to guide the generation. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Strength + * + * Denoising strength. 1.0 = fully remake; 0.0 = preserve original. + */ + strength?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * NanoBananaImageToImageOutput + */ +export type SchemaGemini25FlashImageEditOutput = { + /** + * Images + * + * The edited images. + */ + images: Array + /** + * Description + * + * The description of the generated images. + */ + description: string +} + +/** + * NanoBananaImageToImageInput + */ +export type SchemaGemini25FlashImageEditInput = { + /** + * Prompt + * + * The prompt for image editing. + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | 'auto' + | '21:9' + | '16:9' + | '3:2' + | '4:3' + | '5:4' + | '1:1' + | '4:5' + | '3:4' + | '2:3' + | '9:16' + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Image URLs + * + * The URLs of the images to use for image-to-image generation or image editing. + */ + image_urls: Array + /** + * Limit Generations + * + * Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate. + */ + limit_generations?: boolean +} + +/** + * USOOutputImage + */ +export type SchemaUsoOutput = { + /** + * Prompt + * + * The prompt used for generation + */ + prompt: string + /** + * Images + * + * The generated images with applied style and/or subject customization + */ + images: Array + /** + * Timings + * + * Performance timings for different stages + */ + timings: { + [key: string]: unknown + } + /** + * Has Nsfw Concepts + * + * NSFW detection results for each generated image + */ + has_nsfw_concepts: Array + /** + * Seed + * + * Seed used for generation + */ + seed: number +} + +/** + * USOInputImage + */ +export type SchemaUsoInput = { + /** + * Prompt + * + * Text prompt for generation. Can be empty for pure style transfer. + */ + prompt?: string + /** + * Number of Images + * + * Number of images to generate in parallel. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * Output image format. PNG preserves transparency, JPEG is smaller. + */ + output_format?: 'jpeg' | 'png' + /** + * Keep Input Size + * + * Preserve the layout and dimensions of the input content image. Useful for style transfer. + */ + keep_size?: boolean + /** + * Reference Images + * + * List of image URLs in order: [content_image, style_image, extra_style_image]. + */ + input_image_urls: Array + /** + * Sync Mode + * + * If true, wait for generation and upload before returning. Increases latency but provides immediate access to images. + */ + sync_mode?: boolean + /** + * Guidance Scale (CFG) + * + * How closely to follow the prompt. Higher values stick closer to the prompt. + */ + guidance_scale?: number + /** + * Inference Steps + * + * Number of denoising steps. More steps can improve quality but increase generation time. + */ + num_inference_steps?: number + /** + * Seed + * + * Random seed for reproducible generation. Use same seed for consistent results. + */ + seed?: number + /** + * Negative Prompt + * + * What you don't want in the image. Use it to exclude unwanted elements, styles, or artifacts. + */ + negative_prompt?: string + /** + * Safety Checker + * + * Enable NSFW content detection and filtering. + */ + enable_safety_checker?: boolean +} + +/** + * WanI2IResponse + */ +export type SchemaWanV22A14bImageToImageOutput = { + /** + * Prompt + * + * The text prompt used for image generation. + */ + prompt?: string + /** + * Image + * + * The generated image file. + */ + image: SchemaFile + /** + * Seed + * + * The seed used for generation. + */ + seed: number +} + +/** + * WanI2IRequest + */ +export type SchemaWanV22A14bImageToImageInput = { + /** + * Prompt + * + * The text prompt to guide image generation. + */ + prompt: string + /** + * Shift + */ + shift?: number + /** + * Image Size + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'. + */ + acceleration?: 'none' | 'regular' + /** + * Enable Safety Checker + * + * If set to true, input data will be checked for safety before processing. + */ + enable_safety_checker?: boolean + /** + * Guidance Scale + * + * Classifier-free guidance scale. + */ + guidance_scale?: number + /** + * Image Format + * + * The format of the output image. + */ + image_format?: 'png' | 'jpeg' + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Aspect Ratio + * + * Aspect ratio of the generated image. If 'auto', the aspect ratio will be determined automatically based on the input image. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' | '1:1' + /** + * Enable Output Safety Checker + * + * If set to true, output video will be checked for safety after generation. + */ + enable_output_safety_checker?: boolean + /** + * Image URL + * + * URL of the input image. + */ + image_url: string + /** + * Strength + * + * Denoising strength. 1.0 = fully remake; 0.0 = preserve original. + */ + strength?: number + /** + * Guidance Scale (2nd Stage) + * + * Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model. + */ + guidance_scale_2?: number + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number +} + +/** + * SeedDream4EditOutput + */ +export type SchemaBytedanceSeedreamV4EditOutput = { + /** + * Images + * + * Generated images + */ + images: Array + /** + * Seed + * + * Seed used for generation + */ + seed: number +} + +/** + * SeedDream4EditInput + */ +export type SchemaBytedanceSeedreamV4EditInput = { + /** + * Prompt + * + * The text prompt used to edit the image + */ + prompt: string + /** + * Num Images + * + * Number of separate model generations to be run with the prompt. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. The minimum total image area is 921600 pixels. Failing this, the image size will be adjusted to by scaling it up, while maintaining the aspect ratio. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | 'auto' + | 'auto_2K' + | 'auto_4K' + /** + * Enhance Prompt Mode + * + * The mode to use for enhancing prompt enhancement. Standard mode provides higher quality results but takes longer to generate. Fast mode provides average quality results but takes less time to generate. + */ + enhance_prompt_mode?: 'standard' | 'fast' + /** + * Max Images + * + * If set to a number greater than one, enables multi-image generation. The model will potentially return up to `max_images` images every generation, and in total, `num_images` generations will be carried out. In total, the number of images generated will be between `num_images` and `max_images*num_images`. The total number of images (image inputs + image outputs) must not exceed 15 + */ + max_images?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * Random seed to control the stochasticity of image generation. + */ + seed?: number + /** + * Image URLs + * + * List of URLs of input images for editing. Presently, up to 10 image inputs are allowed. If over 10 images are sent, only the last 10 will be used. + */ + image_urls: Array +} + +/** + * ReferenceToImageOutput + */ +export type SchemaViduReferenceToImageOutput = { + /** + * Image + * + * The edited image + */ + image: SchemaImage +} + +/** + * ReferenceToImageRequest + */ +export type SchemaViduReferenceToImageInput = { + /** + * Prompt + * + * Text prompt for video generation, max 1500 characters + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the output video + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Reference Image Urls + * + * URLs of the reference images to use for consistent subject appearance + */ + reference_image_urls: Array + /** + * Seed + * + * Random seed for generation + */ + seed?: number +} + +/** + * QwenImageOutput + */ +export type SchemaQwenImageEditLoraOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseQwenEditImageLoRAInput + */ +export type SchemaQwenImageEditLoraInput = { + /** + * Prompt + * + * The prompt to generate the image with + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The URL of the image to edit. + */ + image_url: string + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use up to 3 LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Guidance scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaFlux1SrpoImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseSRPOFlux1ImageToInput + */ +export type SchemaFlux1SrpoImageToImageInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The URL of the image to generate an image from. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Strength + * + * The strength of the initial image. Higher strength values are better for this model. + */ + strength?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number | unknown + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number +} + +/** + * Output + */ +export type SchemaFluxSrpoImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseSRPOImageToInput + */ +export type SchemaFluxSrpoImageToImageInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The URL of the image to generate an image from. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Strength + * + * The strength of the initial image. Higher strength values are better for this model. + */ + strength?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number | unknown + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * QwenImageInpaintOutput + */ +export type SchemaQwenImageEditInpaintOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseQwenEditInpaintImageInput + */ +export type SchemaQwenImageEditInpaintInput = { + /** + * Prompt + * + * The prompt to generate the image with + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Guidance scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The URL of the image to edit. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Strength + * + * Strength of noising process for inpainting + */ + strength?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Mask URL + * + * The URL of the mask for inpainting + */ + mask_url: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * MakeupApplicationOutput + */ +export type SchemaImageAppsV2MakeupApplicationOutput = { + /** + * Images + * + * Portrait with applied makeup + */ + images: Array + /** + * Inference Time Ms + * + * Total inference time in milliseconds + */ + inference_time_ms: number +} + +/** + * MakeupApplicationInput + */ +export type SchemaImageAppsV2MakeupApplicationInput = { + aspect_ratio?: SchemaAspectRatio + /** + * Intensity + */ + intensity?: 'light' | 'medium' | 'heavy' | 'dramatic' + /** + * Makeup Style + */ + makeup_style?: + | 'natural' + | 'glamorous' + | 'smoky_eyes' + | 'bold_lips' + | 'no_makeup' + | 'remove_makeup' + | 'dramatic' + | 'bridal' + | 'professional' + | 'korean_style' + | 'artistic' + /** + * Image Url + * + * Portrait image URL for makeup application + */ + image_url: string +} + +/** + * AspectRatio + * + * Aspect ratio model that calculates 4K resolution dimensions + */ +export type SchemaAspectRatio = { + /** + * Ratio + * + * Aspect ratio for 4K resolution output + */ + ratio?: '1:1' | '16:9' | '9:16' | '4:3' | '3:4' +} + +/** + * AgeModifyOutput + */ +export type SchemaImageAppsV2AgeModifyOutput = { + /** + * Images + * + * Portrait with modified age + */ + images: Array + /** + * Inference Time Ms + * + * Total inference time in milliseconds + */ + inference_time_ms: number +} + +/** + * AgeModifyInput + */ +export type SchemaImageAppsV2AgeModifyInput = { + /** + * Image Url + * + * Portrait image URL for age modification + */ + image_url: string + aspect_ratio?: SchemaAspectRatio + /** + * Preserve Identity + */ + preserve_identity?: boolean + /** + * Target Age + */ + target_age?: number +} + +/** + * CityTeleportOutput + */ +export type SchemaImageAppsV2CityTeleportOutput = { + /** + * Images + * + * Person teleported to city location + */ + images: Array + /** + * Inference Time Ms + * + * Total inference time in milliseconds + */ + inference_time_ms: number +} + +/** + * CityTeleportInput + */ +export type SchemaImageAppsV2CityTeleportInput = { + /** + * City Image Url + * + * Optional city background image URL. When provided, the person will be blended into this custom scene. + */ + city_image_url?: string | unknown + aspect_ratio?: SchemaAspectRatio + /** + * City Name + * + * City name (used when city_image_url is not provided) + */ + city_name: string + /** + * Photo Shot + * + * Type of photo shot + */ + photo_shot?: + | 'extreme_close_up' + | 'close_up' + | 'medium_close_up' + | 'medium_shot' + | 'medium_long_shot' + | 'long_shot' + | 'extreme_long_shot' + | 'full_body' + /** + * Camera Angle + * + * Camera angle for the shot + */ + camera_angle?: + | 'eye_level' + | 'low_angle' + | 'high_angle' + | 'dutch_angle' + | 'birds_eye_view' + | 'worms_eye_view' + | 'overhead' + | 'side_angle' + /** + * Person Image Url + * + * Person photo URL + */ + person_image_url: string +} + +/** + * ExpressionChangeOutput + */ +export type SchemaImageAppsV2ExpressionChangeOutput = { + /** + * Images + * + * Portrait with changed expression + */ + images: Array + /** + * Inference Time Ms + * + * Total inference time in milliseconds + */ + inference_time_ms: number +} + +/** + * ExpressionChangeInput + */ +export type SchemaImageAppsV2ExpressionChangeInput = { + aspect_ratio?: SchemaAspectRatio + /** + * Target Expression + */ + target_expression?: + | 'smile' + | 'surprise' + | 'glare' + | 'panic' + | 'shyness' + | 'laugh' + | 'cry' + | 'angry' + | 'sad' + | 'happy' + | 'excited' + | 'shocked' + | 'confused' + | 'focused' + | 'dreamy' + | 'serious' + | 'playful' + | 'mysterious' + | 'confident' + | 'thoughtful' + /** + * Image Url + * + * Portrait image URL for expression change + */ + image_url: string +} + +/** + * HairChangeOutput + */ +export type SchemaImageAppsV2HairChangeOutput = { + /** + * Images + * + * Portrait with changed hair + */ + images: Array + /** + * Inference Time Ms + * + * Total inference time in milliseconds + */ + inference_time_ms: number +} + +/** + * HairChangeInput + */ +export type SchemaImageAppsV2HairChangeInput = { + /** + * Target Hairstyle + */ + target_hairstyle?: + | 'short_hair' + | 'medium_long_hair' + | 'long_hair' + | 'curly_hair' + | 'wavy_hair' + | 'high_ponytail' + | 'bun' + | 'bob_cut' + | 'pixie_cut' + | 'braids' + | 'straight_hair' + | 'afro' + | 'dreadlocks' + | 'buzz_cut' + | 'mohawk' + | 'bangs' + | 'side_part' + | 'middle_part' + aspect_ratio?: SchemaAspectRatio + /** + * Hair Color + */ + hair_color?: + | 'black' + | 'dark_brown' + | 'light_brown' + | 'blonde' + | 'platinum_blonde' + | 'red' + | 'auburn' + | 'gray' + | 'silver' + | 'blue' + | 'green' + | 'purple' + | 'pink' + | 'rainbow' + | 'natural' + | 'highlights' + | 'ombre' + | 'balayage' + /** + * Image Url + * + * Portrait image URL for hair change + */ + image_url: string +} + +/** + * HeadshotOutput + */ +export type SchemaImageAppsV2HeadshotPhotoOutput = { + /** + * Images + * + * Professional headshot image + */ + images: Array + /** + * Inference Time Ms + * + * Total inference time in milliseconds + */ + inference_time_ms: number +} + +/** + * HeadshotInput + */ +export type SchemaImageAppsV2HeadshotPhotoInput = { + aspect_ratio?: SchemaAspectRatio + /** + * Background Style + */ + background_style?: 'professional' | 'corporate' | 'clean' | 'gradient' + /** + * Image Url + * + * Portrait image URL to convert to professional headshot + */ + image_url: string +} + +/** + * ObjectRemovalOutput + */ +export type SchemaImageAppsV2ObjectRemovalOutput = { + /** + * Images + * + * Image with object removed + */ + images: Array + /** + * Inference Time Ms + * + * Total inference time in milliseconds + */ + inference_time_ms: number +} + +/** + * ObjectRemovalInput + */ +export type SchemaImageAppsV2ObjectRemovalInput = { + aspect_ratio?: SchemaAspectRatio + /** + * Object To Remove + * + * Object to remove + */ + object_to_remove: string + /** + * Image Url + * + * Image URL containing object to remove + */ + image_url: string +} + +/** + * PerspectiveOutput + */ +export type SchemaImageAppsV2PerspectiveOutput = { + /** + * Images + * + * Image with changed perspective + */ + images: Array + /** + * Inference Time Ms + * + * Total inference time in milliseconds + */ + inference_time_ms: number +} + +/** + * PerspectiveInput + */ +export type SchemaImageAppsV2PerspectiveInput = { + aspect_ratio?: SchemaAspectRatio + /** + * Target Perspective + */ + target_perspective?: + | 'front' + | 'left_side' + | 'right_side' + | 'back' + | 'top_down' + | 'bottom_up' + | 'birds_eye' + | 'three_quarter_left' + | 'three_quarter_right' + /** + * Image Url + * + * Image URL for perspective change + */ + image_url: string +} + +/** + * PhotographyEffectsOutput + */ +export type SchemaImageAppsV2PhotographyEffectsOutput = { + /** + * Images + * + * Image with photography effects + */ + images: Array + /** + * Inference Time Ms + * + * Total inference time in milliseconds + */ + inference_time_ms: number +} + +/** + * PhotographyEffectsInput + */ +export type SchemaImageAppsV2PhotographyEffectsInput = { + /** + * Effect Type + */ + effect_type?: + | 'film' + | 'vintage_film' + | 'portrait_photography' + | 'fashion_photography' + | 'street_photography' + | 'sepia_tone' + | 'film_grain' + | 'light_leaks' + | 'vignette_effect' + | 'instant_camera' + | 'golden_hour' + | 'dramatic_lighting' + | 'soft_focus' + | 'bokeh_effect' + | 'high_contrast' + | 'double_exposure' + aspect_ratio?: SchemaAspectRatio + /** + * Image Url + * + * Image URL for photography effects + */ + image_url: string +} + +/** + * PortraitOutput + */ +export type SchemaImageAppsV2PortraitEnhanceOutput = { + /** + * Images + * + * Enhanced portrait + */ + images: Array + /** + * Inference Time Ms + * + * Total inference time in milliseconds + */ + inference_time_ms: number +} + +/** + * PortraitInput + */ +export type SchemaImageAppsV2PortraitEnhanceInput = { + aspect_ratio?: SchemaAspectRatio + /** + * Image Url + * + * Portrait image URL to enhance + */ + image_url: string +} + +/** + * PhotoRestorationOutput + */ +export type SchemaImageAppsV2PhotoRestorationOutput = { + /** + * Images + * + * Restored photo + */ + images: Array + /** + * Inference Time Ms + * + * Total inference time in milliseconds + */ + inference_time_ms: number +} + +/** + * PhotoRestorationInput + */ +export type SchemaImageAppsV2PhotoRestorationInput = { + /** + * Enhance Resolution + */ + enhance_resolution?: boolean + aspect_ratio?: SchemaAspectRatio + /** + * Remove Scratches + */ + remove_scratches?: boolean + /** + * Fix Colors + */ + fix_colors?: boolean + /** + * Image Url + * + * Old or damaged photo URL to restore + */ + image_url: string +} + +/** + * StyleTransferOutput + */ +export type SchemaImageAppsV2StyleTransferOutput = { + /** + * Images + * + * Image with transferred style + */ + images: Array + /** + * Inference Time Ms + * + * Total inference time in milliseconds + */ + inference_time_ms: number +} + +/** + * StyleTransferInput + */ +export type SchemaImageAppsV2StyleTransferInput = { + /** + * Target Style + */ + target_style?: + | 'anime_character' + | 'cartoon_3d' + | 'hand_drawn_animation' + | 'cyberpunk_future' + | 'anime_game_style' + | 'comic_book_animation' + | 'animated_series' + | 'cartoon_animation' + | 'lofi_aesthetic' + | 'cottagecore' + | 'dark_academia' + | 'y2k' + | 'vaporwave' + | 'liminal_space' + | 'weirdcore' + | 'dreamcore' + | 'synthwave' + | 'outrun' + | 'photorealistic' + | 'hyperrealistic' + | 'digital_art' + | 'concept_art' + | 'impressionist' + | 'anime' + | 'pixel_art' + | 'claymation' + aspect_ratio?: SchemaAspectRatio + /** + * Style Reference Image Url + * + * Optional reference image URL. When provided, the style will be inferred from this image instead of the selected preset style. + */ + style_reference_image_url?: string | unknown + /** + * Image Url + * + * Image URL for style transfer + */ + image_url: string +} + +/** + * RelightingOutput + */ +export type SchemaImageAppsV2RelightingOutput = { + /** + * Images + * + * Image with new lighting + */ + images: Array + /** + * Inference Time Ms + * + * Total inference time in milliseconds + */ + inference_time_ms: number +} + +/** + * RelightingInput + */ +export type SchemaImageAppsV2RelightingInput = { + aspect_ratio?: SchemaAspectRatio + /** + * Lighting Style + */ + lighting_style?: + | 'natural' + | 'studio' + | 'golden_hour' + | 'blue_hour' + | 'dramatic' + | 'soft' + | 'hard' + | 'backlight' + | 'side_light' + | 'front_light' + | 'rim_light' + | 'sunset' + | 'sunrise' + | 'neon' + | 'candlelight' + | 'moonlight' + | 'spotlight' + | 'ambient' + /** + * Image Url + * + * Image URL for relighting + */ + image_url: string +} + +/** + * TextureTransformOutput + */ +export type SchemaImageAppsV2TextureTransformOutput = { + /** + * Images + * + * Image with transformed texture + */ + images: Array + /** + * Inference Time Ms + * + * Total inference time in milliseconds + */ + inference_time_ms: number +} + +/** + * TextureTransformInput + */ +export type SchemaImageAppsV2TextureTransformInput = { + /** + * Target Texture + */ + target_texture?: + | 'cotton' + | 'denim' + | 'wool' + | 'felt' + | 'wood' + | 'leather' + | 'velvet' + | 'stone' + | 'marble' + | 'ceramic' + | 'concrete' + | 'brick' + | 'clay' + | 'foam' + | 'glass' + | 'metal' + | 'silk' + | 'fabric' + | 'crystal' + | 'rubber' + | 'plastic' + | 'lace' + aspect_ratio?: SchemaAspectRatio + /** + * Image Url + * + * Image URL for texture transformation + */ + image_url: string +} + +/** + * VirtualTryOnOutput + */ +export type SchemaImageAppsV2VirtualTryOnOutput = { + /** + * Images + * + * Person wearing the virtual clothing + */ + images: Array + /** + * Inference Time Ms + * + * Total inference time in milliseconds + */ + inference_time_ms: number +} + +/** + * VirtualTryOnInput + */ +export type SchemaImageAppsV2VirtualTryOnInput = { + /** + * Preserve Pose + */ + preserve_pose?: boolean + aspect_ratio?: SchemaAspectRatio + /** + * Clothing Image Url + * + * Clothing photo URL + */ + clothing_image_url: string + /** + * Person Image Url + * + * Person photo URL + */ + person_image_url: string +} + +/** + * ProductPhotographyOutput + */ +export type SchemaImageAppsV2ProductPhotographyOutput = { + /** + * Images + * + * Professional studio product photography + */ + images: Array + /** + * Inference Time Ms + * + * Total inference time in milliseconds + */ + inference_time_ms: number +} + +/** + * ProductPhotographyInput + */ +export type SchemaImageAppsV2ProductPhotographyInput = { + aspect_ratio?: SchemaAspectRatio + /** + * Product Image Url + * + * Image URL of the product to create professional studio photography + */ + product_image_url: string +} + +/** + * ProductHoldingOutput + */ +export type SchemaImageAppsV2ProductHoldingOutput = { + /** + * Images + * + * Person holding the product naturally + */ + images: Array + /** + * Inference Time Ms + * + * Total inference time in milliseconds + */ + inference_time_ms: number +} + +/** + * ProductHoldingInput + */ +export type SchemaImageAppsV2ProductHoldingInput = { + aspect_ratio?: SchemaAspectRatio + /** + * Product Image Url + * + * Image URL of the product to be held by the person + */ + product_image_url: string + /** + * Person Image Url + * + * Image URL of the person who will hold the product + */ + person_image_url: string +} + +/** + * SeedVRImageOutput + */ +export type SchemaSeedvrUpscaleImageOutput = { + image: SchemaImageFile + /** + * Seed + * + * The random seed used for the generation process. + */ + seed: number +} + +/** + * SeedVRImageInput + */ +export type SchemaSeedvrUpscaleImageInput = { + /** + * Upscale Mode + * + * The mode to use for the upscale. If 'target', the upscale factor will be calculated based on the target resolution. If 'factor', the upscale factor will be used directly. + */ + upscale_mode?: 'target' | 'factor' + /** + * Noise Scale + * + * The noise scale to use for the generation process. + */ + noise_scale?: number + /** + * Output Format + * + * The format of the output image. + */ + output_format?: 'png' | 'jpg' | 'webp' + /** + * Target Resolution + * + * The target resolution to upscale to when `upscale_mode` is `target`. + */ + target_resolution?: '720p' | '1080p' | '1440p' | '2160p' + /** + * Image Url + * + * The input image to be processed + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Upscale Factor + * + * Upscaling factor to be used. Will multiply the dimensions with this factor when `upscale_mode` is `factor`. + */ + upscale_factor?: number + /** + * Seed + * + * The random seed used for the generation process. + */ + seed?: number | unknown +} + +/** + * QwenImageOutput + */ +export type SchemaQwenImageEditPlusOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * BaseQwenEditImagePlusInput + */ +export type SchemaQwenImageEditPlusInput = { + /** + * Prompt + * + * The prompt to generate the image with + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Image URLs + * + * The URLs of the images to edit. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * ImageToImageOutput + * + * Output for image editing + */ +export type SchemaWan25PreviewImageToImageOutput = { + /** + * Images + * + * The edited images + */ + images: Array + /** + * Seeds + * + * The seeds used for each generated image + */ + seeds: Array + /** + * Actual Prompt + * + * The original prompt (prompt expansion is not available for image editing) + */ + actual_prompt?: string +} + +/** + * ImageToImageInput + * + * Input for image editing + */ +export type SchemaWan25PreviewImageToImageInput = { + /** + * Prompt + * + * The text prompt describing how to edit the image. Max 2000 characters. + */ + prompt: string + /** + * Num Images + * + * Number of images to generate. Values from 1 to 4. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. Width and height must be between 384 and 1440 pixels. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Image Urls + * + * URLs of images to edit. For single-image editing, provide 1 URL. For multi-reference generation, provide up to 2 URLs. If more than 2 URLs are provided, only the first 2 will be used. + */ + image_urls: Array + /** + * Negative Prompt + * + * Negative prompt to describe content to avoid. Max 500 characters. + */ + negative_prompt?: string +} + +/** + * QwenImageOutput + */ +export type SchemaQwenImageEditImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseQwenEditImg2ImgInput + */ +export type SchemaQwenImageEditImageToImageInput = { + /** + * Prompt + * + * The prompt to generate the image with + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The URL of the image to edit. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Strength + * + * Strength of the image-to-image transformation. Lower values preserve more of the original image. + */ + strength?: number + /** + * Guidance scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * LucidFluxResponse + */ +export type SchemaLucidfluxOutput = { + /** + * Image + * + * Generated image + */ + image: SchemaImage + /** + * Seed + * + * Seed used for random number generation + */ + seed: number +} + +/** + * LucidFluxRequest + */ +export type SchemaLucidfluxInput = { + /** + * Prompt + * + * The prompt to edit the image. + */ + prompt: string + /** + * Guidance + * + * The guidance to use for the diffusion process. + */ + guidance?: number + /** + * Target Height + * + * The height of the output image. + */ + target_height?: number + /** + * Image URL + * + * The URL of the image to edit. + */ + image_url: string + /** + * Target Width + * + * The width of the output image. + */ + target_width?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Seed + * + * Seed used for random number generation + */ + seed?: number +} + +/** + * QwenImageOutput + */ +export type SchemaQwenImageEditPlusLoraOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * BaseQwenEditImagePlusLoRAInput + */ +export type SchemaQwenImageEditPlusLoraInput = { + /** + * Prompt + * + * The prompt to generate the image with + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the final input image will be used to calculate the size of the output image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use up to 3 LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Image URLs + * + * The URLs of the images to edit. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * DreamOmni2Response + */ +export type SchemaDreamomni2EditOutput = { + /** + * Image + * + * Generated image + */ + image: SchemaImage +} + +/** + * DreamOmni2Request + */ +export type SchemaDreamomni2EditInput = { + /** + * Prompt + * + * The prompt to edit the image. + */ + prompt: string + /** + * You can use only with to 2 images. + * + * List of URLs of input images for editing. + */ + image_urls: Array +} + +/** + * Image2PixelOutput + */ +export type SchemaImage2PixelOutput = { + /** + * Images + * + * The processed pixel-art image (PNG) and the scaled image (PNG). + */ + images: Array + /** + * Num Colors + * + * The number of colors in the processed media. + */ + num_colors: number + /** + * Palette + * + * The palette of the processed media. + */ + palette: Array + /** + * Pixel Scale + * + * The detected pixel scale of the input. + */ + pixel_scale: number +} + +/** + * Image2PixelInput + */ +export type SchemaImage2PixelInput = { + /** + * Cleanup Morph + * + * Apply morphological operations to remove noise. + */ + cleanup_morph?: boolean + /** + * Auto Color Detect + * + * Enable automatic detection of optimal number of colors. + */ + auto_color_detect?: boolean + /** + * Alpha Threshold + * + * Alpha binarization threshold (0-255). + */ + alpha_threshold?: number + /** + * Snap Grid + * + * Align output to the pixel grid. + */ + snap_grid?: boolean + /** + * Fixed Palette + * + * Optional fixed color palette as hex strings (e.g., ['#000000', '#ffffff']). + */ + fixed_palette?: Array + /** + * Scale + * + * Force a specific pixel scale. If None, auto-detect. + */ + scale?: number + /** + * Cleanup Jaggy + * + * Remove isolated diagonal pixels (jaggy edge cleanup). + */ + cleanup_jaggy?: boolean + /** + * Trim Borders + * + * Trim borders of the image. + */ + trim_borders?: boolean + /** + * Background Tolerance + * + * Background tolerance (0-255). + */ + background_tolerance?: number + /** + * Detect Method + * + * Scale detection method to use. + */ + detect_method?: 'auto' | 'runs' | 'edge' + /** + * Transparent Background + * + * Remove background of the image. This will check for contiguous color regions from the edges after correction and make them transparent. + */ + transparent_background?: boolean + /** + * Downscale Method + * + * Downscaling method to produce the pixel-art output. + */ + downscale_method?: + | 'dominant' + | 'median' + | 'mode' + | 'mean' + | 'content-adaptive' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Image Url + * + * The image URL to process into improved pixel art + */ + image_url: string + /** + * Background Mode + * + * Controls where to flood-fill from when removing the background. + */ + background_mode?: 'edges' | 'corners' | 'midpoints' + /** + * Max Colors + * + * Maximum number of colors in the output palette. Set None to disable limit. + */ + max_colors?: number + /** + * Dominant Color Threshold + * + * Dominant color threshold (0.0-1.0). + */ + dominant_color_threshold?: number +} + +/** + * ReveEditOutput + * + * Output for Reve image editing + */ +export type SchemaReveEditOutput = { + /** + * Images + * + * The edited images + */ + images: Array +} + +/** + * ReveEditInput + * + * Input for Reve image editing + */ +export type SchemaReveEditInput = { + /** + * Prompt + * + * The text description of how to edit the provided image. + */ + prompt: string + /** + * Number of Images + * + * Number of images to generate + */ + num_images?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Output Format + * + * Output format for the generated image. + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Reference Image URL + * + * URL of the reference image to edit. Must be publicly accessible or base64 data URI. Supports PNG, JPEG, WebP, AVIF, and HEIF formats. + */ + image_url: string +} + +/** + * ReveRemixOutput + * + * Output for Reve image remixing + */ +export type SchemaReveRemixOutput = { + /** + * Images + * + * The remixed images + */ + images: Array +} + +/** + * ReveRemixInput + * + * Input for Reve image remixing + */ +export type SchemaReveRemixInput = { + /** + * Prompt + * + * The text description of the desired image. May include XML img tags like 0 to refer to specific images by their index in the image_urls list. + */ + prompt: string + /** + * Number of Images + * + * Number of images to generate + */ + num_images?: number + /** + * Aspect Ratio + * + * The desired aspect ratio of the generated image. If not provided, will be smartly chosen by the model. + */ + aspect_ratio?: '16:9' | '9:16' | '3:2' | '2:3' | '4:3' | '3:4' | '1:1' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Reference Image URLs + * + * List of URLs of reference images. Must provide between 1 and 6 images (inclusive). Each image must be less than 10 MB. Supports PNG, JPEG, WebP, AVIF, and HEIF formats. + */ + image_urls: Array + /** + * Output Format + * + * Output format for the generated image. + */ + output_format?: 'png' | 'jpeg' | 'webp' +} + +/** + * EditImageResponseMini + */ +export type SchemaGptImage1MiniEditOutput = { + /** + * Images + * + * The generated images. + */ + images: Array +} + +/** + * EditImageRequestMini + */ +export type SchemaGptImage1MiniEditInput = { + /** + * Prompt + * + * The prompt for image generation + */ + prompt: string + /** + * Number of Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * Aspect ratio for the generated image + */ + image_size?: 'auto' | '1024x1024' | '1536x1024' | '1024x1536' + /** + * Background + * + * Background for the generated image + */ + background?: 'auto' | 'transparent' | 'opaque' + /** + * Quality + * + * Quality for the generated image + */ + quality?: 'auto' | 'low' | 'medium' | 'high' + /** + * Output Format + * + * Output format for the images + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Image URLs + * + * The URLs of the images to use as a reference for the generation. + */ + image_urls: Array +} + +/** + * ChronoEditOutput + * + * Unified output model for all ChronoEdit operations + */ +export type SchemaChronoEditOutput = { + /** + * Prompt + * + * The prompt used for the inference. + */ + prompt: string + /** + * Images + * + * The edited image. + */ + images: Array + /** + * Seed + * + * The seed for the inference. + */ + seed: number +} + +/** + * ChronoEditInput + * + * Input model for ChronoEdit standard editing operations + */ +export type SchemaChronoEditInput = { + /** + * Prompt + * + * The prompt to edit the image. + */ + prompt: string + /** + * Resolution + * + * The resolution of the output image. + */ + resolution?: '480p' | '720p' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Output Format + * + * The format of the output image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Image URL + * + * The image to edit. + */ + image_url: string + /** + * Turbo Mode + * + * Enable turbo mode to use for faster inference. + */ + turbo_mode?: boolean + /** + * Number of Temporal Reasoning Steps + * + * The number of temporal reasoning steps to perform. + */ + num_temporal_reasoning_steps?: number + /** + * Sync Mode + * + * Whether to return the image in sync mode. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The guidance scale for the inference. + */ + guidance_scale?: number + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Enable Temporal Reasoning + * + * Whether to enable temporal reasoning. + */ + enable_temporal_reasoning?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * The seed for the inference. + */ + seed?: number +} + +/** + * Emu35EditOutput + */ +export type SchemaEmu35ImageEditImageOutput = { + /** + * Images + * + * The edited image. + */ + images: Array + /** + * Seed + * + * The seed for the inference. + */ + seed: number +} + +/** + * Emu35ImageEditInput + */ +export type SchemaEmu35ImageEditImageInput = { + /** + * Prompt + * + * The prompt to edit the image. + */ + prompt: string + /** + * Resolution + * + * The resolution of the output image. + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * The aspect ratio of the output image. + */ + aspect_ratio?: + | 'auto' + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the output image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Image URL + * + * The image to edit. + */ + image_url: string + /** + * Sync Mode + * + * Whether to return the image in sync mode. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * The seed for the inference. + */ + seed?: number +} + +/** + * Output + */ +export type SchemaFluxVisionUpscalerOutput = { + /** + * The URL of the generated image. + */ + image: SchemaImage + /** + * Caption + * + * The VLM-generated caption describing the upscaled image. + */ + caption: string + /** + * Seed + * + * The seed used to generate the image. + */ + seed: number + /** + * Timings + * + * The timings of the different steps in the workflow. + */ + timings: { + [key: string]: number + } +} + +/** + * Input + */ +export type SchemaFluxVisionUpscalerInput = { + /** + * Guidance + * + * CFG/guidance scale (1-4). Controls how closely the model follows the prompt. + */ + guidance?: number + /** + * Creativity + * + * The creativity of the model. The higher the creativity, the more the model will deviate from the original. Refers to the denoise strength of the sampling. + */ + creativity?: number + /** + * Image Url + * + * The URL of the image to upscale. + */ + image_url: string + /** + * Upscale Factor + * + * The upscale factor (1-4x). + */ + upscale_factor?: number + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * The seed to use for the upscale. If not provided, a random seed will be used. + */ + seed?: number | unknown + /** + * Steps + * + * Number of inference steps (4-50). + */ + steps?: number +} + +/** + * OutpaintOutput + */ +export type SchemaImageAppsV2OutpaintOutput = { + /** + * Images + * + * Outpainted image with extended scene + */ + images: Array +} + +/** + * OutpaintInput + */ +export type SchemaImageAppsV2OutpaintInput = { + /** + * Prompt + * + * Optional prompt to guide the outpainting. If provided, it will be appended to the base outpaint instruction. Example: 'with a beautiful sunset in the background' + */ + prompt?: string + /** + * Expand Right + * + * Number of pixels to add as black margin on the right side (0-700). + */ + expand_right?: number + /** + * Num Images + * + * Number of images to generate. + */ + num_images?: number + /** + * Zoom Out Percentage + * + * Percentage to zoom out the image. If set, the image will be scaled down by this percentage and black margins will be added to maintain original size. Example: 50 means the image will be 50% of original size with black margins filling the rest. + */ + zoom_out_percentage?: number + /** + * Output Format + * + * The format of the output image. + */ + output_format?: 'png' | 'jpeg' | 'jpg' | 'webp' + /** + * Image Url + * + * Image URL to outpaint + */ + image_url: string + /** + * Sync Mode + * + * If True, the function will wait for the image to be generated and uploaded before returning the response. If False, the function will return immediately and the image will be generated asynchronously. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Expand Left + * + * Number of pixels to add as black margin on the left side (0-700). + */ + expand_left?: number + /** + * Expand Bottom + * + * Number of pixels to add as black margin on the bottom side (0-700). + */ + expand_bottom?: number + /** + * Expand Top + * + * Number of pixels to add as black margin on the top side (0-700). + */ + expand_top?: number +} + +/** + * ReveFastEditOutput + * + * Output for Reve fast image editing + */ +export type SchemaReveFastEditOutput = { + /** + * Images + * + * The edited images + */ + images: Array +} + +/** + * ReveFastEditInput + * + * Input for Reve fast image editing + */ +export type SchemaReveFastEditInput = { + /** + * Prompt + * + * The text description of how to edit the provided image. + */ + prompt: string + /** + * Number of Images + * + * Number of images to generate + */ + num_images?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Output Format + * + * Output format for the generated image. + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Reference Image URL + * + * URL of the reference image to edit. Must be publicly accessible or base64 data URI. Supports PNG, JPEG, WebP, AVIF, and HEIF formats. + */ + image_url: string +} + +/** + * ReveRemixOutput + * + * Output for Reve image remixing + */ +export type SchemaReveFastRemixOutput = { + /** + * Images + * + * The remixed images + */ + images: Array +} + +/** + * ReveRemixInput + * + * Input for Reve image remixing + */ +export type SchemaReveFastRemixInput = { + /** + * Prompt + * + * The text description of the desired image. May include XML img tags like 0 to refer to specific images by their index in the image_urls list. + */ + prompt: string + /** + * Number of Images + * + * Number of images to generate + */ + num_images?: number + /** + * Aspect Ratio + * + * The desired aspect ratio of the generated image. If not provided, will be smartly chosen by the model. + */ + aspect_ratio?: '16:9' | '9:16' | '3:2' | '2:3' | '4:3' | '3:4' | '1:1' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Reference Image URLs + * + * List of URLs of reference images. Must provide between 1 and 6 images (inclusive). Each image must be less than 10 MB. Supports PNG, JPEG, WebP, AVIF, and HEIF formats. + */ + image_urls: Array + /** + * Output Format + * + * Output format for the generated image. + */ + output_format?: 'png' | 'jpeg' | 'webp' +} + +/** + * AddBackgroundOutput + */ +export type SchemaQwenImageEditPlusLoraGalleryAddBackgroundOutput = { + /** + * Images + * + * The generated/edited images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * AddBackgroundInput + * + * Input model for Add Background endpoint - Remove white background and add a realistic scene + */ +export type SchemaQwenImageEditPlusLoraGalleryAddBackgroundInput = { + /** + * Prompt + * + * Describe the background/scene you want to add behind the object. The model will remove the white background and add the specified environment. + */ + prompt?: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the final input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The scale factor for the LoRA model. Controls the strength of the LoRA effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Image URLs + * + * The URLs of the images to edit. Provide an image with a white or clean background. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * FaceToFullPortraitOutput + */ +export type SchemaQwenImageEditPlusLoraGalleryFaceToFullPortraitOutput = { + /** + * Images + * + * The generated/edited images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * FaceToFullPortraitInput + * + * Input model for Face to Full Portrait endpoint - Generate full portrait from a cropped face image + */ +export type SchemaQwenImageEditPlusLoraGalleryFaceToFullPortraitInput = { + /** + * Prompt + * + * Describe the full portrait you want to generate from the face. Include clothing, setting, pose, and style details. + */ + prompt?: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the final input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The scale factor for the LoRA model. Controls the strength of the LoRA effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Image URLs + * + * The URL of the cropped face image. Provide a close-up face photo. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * GroupPhotoOutput + */ +export type SchemaQwenImageEditPlusLoraGalleryGroupPhotoOutput = { + /** + * Images + * + * The generated/edited images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * GroupPhotoInput + * + * Input model for Group Photo endpoint - Create composite group photos with vintage/retro style + */ +export type SchemaQwenImageEditPlusLoraGalleryGroupPhotoInput = { + /** + * Prompt + * + * Describe the group photo scene, setting, and style. The model will maintain character consistency and add vintage effects like grain, blur, and retro filters. + */ + prompt?: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the final input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The scale factor for the LoRA model. Controls the strength of the LoRA effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Image URLs + * + * The URLs of the images to combine into a group photo. Provide 2 or more individual portrait images. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * IntegrateProductOutput + */ +export type SchemaQwenImageEditPlusLoraGalleryIntegrateProductOutput = { + /** + * Images + * + * The generated/edited images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * IntegrateProductInput + * + * Input model for Integrate Product endpoint - Blend and integrate products/elements into backgrounds + */ +export type SchemaQwenImageEditPlusLoraGalleryIntegrateProductInput = { + /** + * Prompt + * + * Describe how to blend and integrate the product/element into the background. The model will automatically correct perspective, lighting and shadows for natural integration. + */ + prompt?: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the final input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The scale factor for the LoRA model. Controls the strength of the LoRA effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Image URLs + * + * The URL of the image with product to integrate into background. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * NextSceneOutput + */ +export type SchemaQwenImageEditPlusLoraGalleryNextSceneOutput = { + /** + * Images + * + * The generated/edited images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * NextSceneInput + * + * Input model for Next Scene endpoint - Create cinematic shot progressions and scene transitions + */ +export type SchemaQwenImageEditPlusLoraGalleryNextSceneInput = { + /** + * Prompt + * + * Describe the camera movement, framing change, or scene transition. Start with 'Next Scene:' for best results. Examples: camera movements (dolly, push-in, pull-back), framing changes (wide to close-up), new elements entering frame. + */ + prompt?: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the final input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The scale factor for the LoRA model. Controls the strength of the LoRA effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Image URLs + * + * The URL of the image to create the next scene from. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * RemoveElementOutput + */ +export type SchemaQwenImageEditPlusLoraGalleryRemoveElementOutput = { + /** + * Images + * + * The generated/edited images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * RemoveElementInput + * + * Input model for Remove Element endpoint - Remove/delete elements (objects, people, text) from the image + */ +export type SchemaQwenImageEditPlusLoraGalleryRemoveElementInput = { + /** + * Prompt + * + * Specify what element(s) to remove from the image (objects, people, text, etc.). The model will cleanly remove the element while maintaining consistency of the rest of the image. + */ + prompt?: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the final input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The scale factor for the LoRA model. Controls the strength of the LoRA effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Image URLs + * + * The URL of the image containing elements to remove. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * RemoveLightingOutput + */ +export type SchemaQwenImageEditPlusLoraGalleryRemoveLightingOutput = { + /** + * Images + * + * The generated/edited images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * RemoveLightingInput + * + * Input model for Remove Lighting endpoint - Remove existing lighting and apply soft even lighting + */ +export type SchemaQwenImageEditPlusLoraGalleryRemoveLightingInput = { + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the final input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Image URLs + * + * The URL of the image with lighting/shadows to remove. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * ShirtDesignOutput + */ +export type SchemaQwenImageEditPlusLoraGalleryShirtDesignOutput = { + /** + * Images + * + * The generated/edited images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * ShirtDesignInput + * + * Input model for Shirt Design endpoint - Put designs/graphics on people's shirts + */ +export type SchemaQwenImageEditPlusLoraGalleryShirtDesignInput = { + /** + * Prompt + * + * Describe what design to put on the shirt. The model will apply the design from your input image onto the person's shirt. + */ + prompt?: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the final input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The scale factor for the LoRA model. Controls the strength of the LoRA effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Image URLs + * + * The URLs of the images: first image is the person wearing a shirt, second image is the design/logo to put on the shirt. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * MultipleAnglesOutput + */ +export type SchemaQwenImageEditPlusLoraGalleryMultipleAnglesOutput = { + /** + * Images + * + * The generated/edited images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * MultipleAnglesInput + * + * Input model for Multiple Angles endpoint - Camera control with precise adjustments + */ +export type SchemaQwenImageEditPlusLoraGalleryMultipleAnglesInput = { + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the final input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Wide-Angle Lens + * + * Enable wide-angle lens effect + */ + wide_angle_lens?: boolean + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Image URLs + * + * The URL of the image to adjust camera angle for. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Vertical Angle (Bird ⬄ Worm) + * + * Adjust vertical camera angle (-1=bird's-eye view/looking down, 0=neutral, 1=worm's-eye view/looking up) + */ + vertical_angle?: number + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Move Forward → Close-Up + * + * Move camera forward (0=no movement, 10=close-up) + */ + move_forward?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Rotate Right-Left (degrees °) + * + * Rotate camera left (positive) or right (negative) in degrees. Positive values rotate left, negative values rotate right. + */ + rotate_right_left?: number + /** + * Lora Scale + * + * The scale factor for the LoRA model. Controls the strength of the camera control effect. + */ + lora_scale?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * NanoBananaImageToImageOutput + */ +export type SchemaNanoBananaProEditOutput = { + /** + * Images + * + * The edited images. + */ + images: Array + /** + * Description + * + * The description of the generated images. + */ + description: string +} + +/** + * NanoBananaImageToImageInput + */ +export type SchemaNanoBananaProEditInput = { + /** + * Prompt + * + * The prompt for image editing. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Enable Web Search + * + * Enable web search for the image generation task. This will allow the model to use the latest information from the web to generate the image. + */ + enable_web_search?: boolean + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | 'auto' + | '21:9' + | '16:9' + | '3:2' + | '4:3' + | '5:4' + | '1:1' + | '4:5' + | '3:4' + | '2:3' + | '9:16' + /** + * Resolution + * + * The resolution of the image to generate. + */ + resolution?: '1K' | '2K' | '4K' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number + /** + * Image URLs + * + * The URLs of the images to use for image-to-image generation or image editing. + */ + image_urls: Array + /** + * Limit Generations + * + * Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate. + */ + limit_generations?: boolean +} + +/** + * NanoBananaImageToImageOutput + */ +export type SchemaGemini3ProImagePreviewEditOutput = { + /** + * Images + * + * The edited images. + */ + images: Array + /** + * Description + * + * The description of the generated images. + */ + description: string +} + +/** + * NanoBananaImageToImageInput + */ +export type SchemaGemini3ProImagePreviewEditInput = { + /** + * Prompt + * + * The prompt for image editing. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Enable Web Search + * + * Enable web search for the image generation task. This will allow the model to use the latest information from the web to generate the image. + */ + enable_web_search?: boolean + /** + * Resolution + * + * The resolution of the image to generate. + */ + resolution?: '1K' | '2K' | '4K' + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | 'auto' + | '21:9' + | '16:9' + | '3:2' + | '4:3' + | '5:4' + | '1:1' + | '4:5' + | '3:4' + | '2:3' + | '9:16' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number + /** + * Image URLs + * + * The URLs of the images to use for image-to-image generation or image editing. + */ + image_urls: Array + /** + * Limit Generations + * + * Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate. + */ + limit_generations?: boolean +} + +/** + * SAM3ImageOutput + */ +export type SchemaSam3ImageOutput = { + /** + * Image + * + * Primary segmented mask preview. + */ + image?: SchemaImage + /** + * Metadata + * + * Per-mask metadata including scores and boxes. + */ + metadata?: Array + /** + * Masks + * + * Segmented mask images. + */ + masks: Array + /** + * Scores + * + * Per-mask confidence scores when requested. + */ + scores?: Array + /** + * Boxes + * + * Per-mask normalized bounding boxes [cx, cy, w, h] when requested. + */ + boxes?: Array> +} + +/** + * MaskMetadata + */ +export type SchemaMaskMetadata = { + /** + * Box + * + * Bounding box for the mask in normalized cxcywh coordinates. + */ + box?: Array + /** + * Score + * + * Score for this mask. + */ + score?: number + /** + * Index + * + * Index of the mask inside the model output. + */ + index: number +} + +/** + * SAM3ImageInput + */ +export type SchemaSam3ImageInput = { + /** + * Prompt + * + * Text prompt for segmentation + */ + prompt?: string + /** + * Include Boxes + * + * Whether to include bounding boxes for each mask (when available). + */ + include_boxes?: boolean + /** + * Box Prompts + * + * Box prompt coordinates (x_min, y_min, x_max, y_max). Multiple boxes supported - use object_id to group boxes for the same object or leave empty for separate objects. + */ + box_prompts?: Array + /** + * Return Multiple Masks + * + * If True, upload and return multiple generated masks as defined by `max_masks`. + */ + return_multiple_masks?: boolean + /** + * Image Url + * + * URL of the image to be segmented + */ + image_url: string + /** + * Sync Mode + * + * If True, the media will be returned as a data URI. + */ + sync_mode?: boolean + /** + * Point Prompts + * + * List of point prompts + */ + point_prompts?: Array + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Max Masks + * + * Maximum number of masks to return when `return_multiple_masks` is enabled. + */ + max_masks?: number + /** + * Include Scores + * + * Whether to include mask confidence scores. + */ + include_scores?: boolean + /** + * Apply Mask + * + * Apply the mask on the image. + */ + apply_mask?: boolean + /** + * Text Prompt + * + * [DEPRECATED] Use 'prompt' instead. Kept for backward compatibility. + * + * @deprecated + */ + text_prompt?: string +} + +/** + * SAM3RLEOutput + */ +export type SchemaSam3ImageRleOutput = { + /** + * Rle + * + * Run Length Encoding of the mask. + */ + rle: string | Array + /** + * Metadata + * + * Per-mask metadata when multiple RLEs are returned. + */ + metadata?: Array + /** + * Scores + * + * Per-mask confidence scores when requested. + */ + scores?: Array + /** + * Boundingbox Frames Zip + * + * Zip file containing per-frame bounding box overlays. + */ + boundingbox_frames_zip?: SchemaFile + /** + * Boxes + * + * Per-mask normalized bounding boxes [cx, cy, w, h] when requested. + */ + boxes?: Array> +} + +/** + * SAM3ImageInput + */ +export type SchemaSam3ImageRleInput = { + /** + * Prompt + * + * Text prompt for segmentation + */ + prompt?: string + /** + * Include Boxes + * + * Whether to include bounding boxes for each mask (when available). + */ + include_boxes?: boolean + /** + * Box Prompts + * + * Box prompt coordinates (x_min, y_min, x_max, y_max). Multiple boxes supported - use object_id to group boxes for the same object or leave empty for separate objects. + */ + box_prompts?: Array + /** + * Return Multiple Masks + * + * If True, upload and return multiple generated masks as defined by `max_masks`. + */ + return_multiple_masks?: boolean + /** + * Image Url + * + * URL of the image to be segmented + */ + image_url: string + /** + * Sync Mode + * + * If True, the media will be returned as a data URI. + */ + sync_mode?: boolean + /** + * Point Prompts + * + * List of point prompts + */ + point_prompts?: Array + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Max Masks + * + * Maximum number of masks to return when `return_multiple_masks` is enabled. + */ + max_masks?: number + /** + * Include Scores + * + * Whether to include mask confidence scores. + */ + include_scores?: boolean + /** + * Apply Mask + * + * Apply the mask on the image. + */ + apply_mask?: boolean + /** + * Text Prompt + * + * [DEPRECATED] Use 'prompt' instead. Kept for backward compatibility. + * + * @deprecated + */ + text_prompt?: string +} + +/** + * ChronoEditOutput + * + * Unified output model for all ChronoEdit operations + */ +export type SchemaChronoEditLoraGalleryUpscalerOutput = { + /** + * Prompt + * + * The prompt used for the inference. + */ + prompt: string + /** + * Images + * + * The edited image. + */ + images: Array + /** + * Seed + * + * The seed for the inference. + */ + seed: number +} + +/** + * ChronoEditUpscalerInput + * + * Input for upscaler mode + */ +export type SchemaChronoEditLoraGalleryUpscalerInput = { + /** + * Lora Scale + * + * The scale factor for the LoRA adapter. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Image Url + * + * The image to upscale. + */ + image_url: string + /** + * Sync Mode + * + * Whether to return the image in sync mode. + */ + sync_mode?: boolean + /** + * Loras + * + * Optional additional LoRAs to merge (max 3). + */ + loras?: Array + /** + * Upscale Factor + * + * Target scale factor for the output resolution. + */ + upscale_factor?: number + /** + * Guidance Scale + * + * The guidance scale for the inference. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps for the upscaling pass. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed for the inference. + */ + seed?: number + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean +} + +/** + * ChronoLoraWeight + */ +export type SchemaChronoLoraWeight = { + /** + * Path + * + * URL or path to the LoRA weights (Safetensors). + */ + path: string + /** + * Scale + * + * Scale factor controlling LoRA strength. + */ + scale?: number +} + +/** + * ChronoEditOutput + * + * Unified output model for all ChronoEdit operations + */ +export type SchemaChronoEditLoraGalleryPaintbrushOutput = { + /** + * Prompt + * + * The prompt used for the inference. + */ + prompt: string + /** + * Images + * + * The edited image. + */ + images: Array + /** + * Seed + * + * The seed for the inference. + */ + seed: number +} + +/** + * ChronoEditPaintBrushInput + * + * Input for paintbrush mode + */ +export type SchemaChronoEditLoraGalleryPaintbrushInput = { + /** + * Prompt + * + * Describe how to transform the sketched regions. + */ + prompt: string + /** + * Resolution + * + * The resolution of the output image. + */ + resolution?: '480p' | '720p' + /** + * Lora Scale + * + * The scale factor for the LoRA adapter. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Image Url + * + * The image to edit. + */ + image_url: string + /** + * Sync Mode + * + * Whether to return the image in sync mode. + */ + sync_mode?: boolean + /** + * Turbo Mode + * + * Enable turbo mode to use faster inference. + */ + turbo_mode?: boolean + /** + * Loras + * + * Optional additional LoRAs to merge (max 3). + */ + loras?: Array + /** + * Guidance Scale + * + * Classifier-free guidance scale. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of denoising steps to run. + */ + num_inference_steps?: number + /** + * Mask Url + * + * Optional mask image where black areas indicate regions to sketch/paint. + */ + mask_url?: string + /** + * Seed + * + * The seed for the inference. + */ + seed?: number + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean +} + +/** + * ChronoEditOutput + * + * Unified output model for all ChronoEdit operations + */ +export type SchemaChronoEditLoraOutput = { + /** + * Prompt + * + * The prompt used for the inference. + */ + prompt: string + /** + * Images + * + * The edited image. + */ + images: Array + /** + * Seed + * + * The seed for the inference. + */ + seed: number +} + +/** + * ChronoEditLoRAInput + * + * ChronoEdit input with optional custom LoRAs. + */ +export type SchemaChronoEditLoraInput = { + /** + * Prompt + * + * The prompt to edit the image. + */ + prompt: string + /** + * Loras + * + * Optional additional LoRAs to merge for this request (max 3). + */ + loras?: Array + /** + * Turbo Mode + * + * Enable turbo mode to use for faster inference. + */ + turbo_mode?: boolean + /** + * Enable Temporal Reasoning + * + * Whether to enable temporal reasoning. + */ + enable_temporal_reasoning?: boolean + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Guidance Scale + * + * The guidance scale for the inference. + */ + guidance_scale?: number + /** + * Resolution + * + * The resolution of the output image. + */ + resolution?: '480p' | '720p' + /** + * Output Format + * + * The format of the output image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Number of Temporal Reasoning Steps + * + * The number of temporal reasoning steps to perform. + */ + num_temporal_reasoning_steps?: number + /** + * Sync Mode + * + * Whether to return the image in sync mode. + */ + sync_mode?: boolean + /** + * Image URL + * + * The image to edit. + */ + image_url: string + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed for the inference. + */ + seed?: number +} + +/** + * Flux2FlexEditOutput + */ +export type SchemaFlux2FlexEditOutput = { + /** + * Images + * + * The generated images. + */ + images: Array + /** + * Seed + * + * The seed used for the generation. + */ + seed: number +} + +/** + * Flux2FlexImageEditInput + */ +export type SchemaFlux2FlexEditInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Guidance Scale + * + * The guidance scale to use for the generation. + */ + guidance_scale?: number + /** + * Image Size + * + * The size of the generated image. If `auto`, the size will be determined by the model. + */ + image_size?: + | SchemaImageSize + | 'auto' + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' + /** + * Enable Prompt Expansion + * + * Whether to expand the prompt using the model's own knowledge. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Image URLs + * + * List of URLs of input images for editing + */ + image_urls: Array + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * The seed to use for the generation. + */ + seed?: number +} + +/** + * CrystalUpscaleOutput + */ +export type SchemaCrystalUpscalerOutput = { + /** + * Images + * + * List of upscaled images + */ + images: Array +} + +/** + * CrystalUpscaleInput + */ +export type SchemaCrystalUpscalerInput = { + /** + * Creativity + * + * Creativity level for upscaling + */ + creativity?: number + /** + * Scale Factor + * + * Scale factor + */ + scale_factor?: number + /** + * Image Url + * + * URL to the input image + */ + image_url: string +} + +/** + * AddBackgroundOutput + */ +export type SchemaFlux2LoraGalleryAddBackgroundOutput = { + /** + * Prompt + * + * The prompt used for generation + */ + prompt: string + /** + * Images + * + * The generated images with added background + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * AddBackgroundInput + * + * Input model for Add Background endpoint - Add background to images + */ +export type SchemaFlux2LoraGalleryAddBackgroundInput = { + /** + * Prompt + * + * The prompt describing the background to add. Must start with 'Add Background' followed by your description. + */ + prompt?: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The strength of the add background effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Image URLs + * + * The URLs of the images. Provide an image with a white or clean background. + */ + image_urls: Array + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * ApartmentStagingOutput + */ +export type SchemaFlux2LoraGalleryApartmentStagingOutput = { + /** + * Prompt + * + * The prompt used for generation + */ + prompt: string + /** + * Images + * + * The generated furnished room images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * ApartmentStagingInput + * + * Input model for Apartment Staging endpoint - Furnish rooms + */ +export type SchemaFlux2LoraGalleryApartmentStagingInput = { + /** + * Prompt + * + * The prompt to generate a furnished room. Use 'furnish this room' for best results. + */ + prompt: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The strength of the apartment staging effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Image URLs + * + * The URL of the empty room image to furnish. + */ + image_urls: Array + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * FaceToFullPortraitOutput + */ +export type SchemaFlux2LoraGalleryFaceToFullPortraitOutput = { + /** + * Prompt + * + * The prompt used for generation + */ + prompt: string + /** + * Images + * + * The generated full portrait images from face + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * FaceToFullPortraitInput + * + * Input model for Face to Full Portrait endpoint - Generate full portrait from face + */ +export type SchemaFlux2LoraGalleryFaceToFullPortraitInput = { + /** + * Prompt + * + * The prompt describing the full portrait to generate from the face. + */ + prompt?: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The strength of the face to full portrait effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Image URLs + * + * The URL of the cropped face image. + */ + image_urls: Array + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * MultipleAnglesOutput + */ +export type SchemaFlux2LoraGalleryMultipleAnglesOutput = { + /** + * Prompt + * + * The prompt used for generation + */ + prompt: string + /** + * Images + * + * The generated images with multiple camera angles + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * MultipleAnglesInput + * + * Input model for Multiple Angles endpoint - Camera control with precise adjustments using trigger word. Prompt is built automatically from slider values. + */ +export type SchemaFlux2LoraGalleryMultipleAnglesInput = { + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Acceleration + * + * Acceleration level for image generation. + */ + acceleration?: 'none' | 'regular' + /** + * Horizontal Angle (Azimuth °) + * + * Horizontal rotation angle around the object in degrees. 0°=front view, 90°=right side, 180°=back view, 270°=left side, 360°=front view again. + */ + horizontal_angle?: number + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Image URLs + * + * The URL of the image to adjust camera angle for. + */ + image_urls: Array + /** + * Zoom (Distance) + * + * Camera zoom/distance. 0=wide shot (far away), 5=medium shot (normal), 10=close-up (very close). + */ + zoom?: number + /** + * Vertical Angle (Elevation °) + * + * Vertical camera angle in degrees. 0°=eye-level shot, 30°=elevated shot, 60°=high-angle shot (looking down from above). + */ + vertical_angle?: number + /** + * Num Images + * + * Number of images to generate. + */ + num_images?: number + /** + * Lora Scale + * + * The strength of the multiple angles effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image. + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Sync Mode + * + * If True, the media will be returned as a data URI. + */ + sync_mode?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * Random seed for reproducibility. + */ + seed?: number | unknown +} + +/** + * VirtualTryonOutput + */ +export type SchemaFlux2LoraGalleryVirtualTryonOutput = { + /** + * Prompt + * + * The prompt used for generation + */ + prompt: string + /** + * Images + * + * The generated virtual try-on images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * VirtualTryonInput + * + * Input model for Virtual Try-on endpoint - Generate virtual try-on images + */ +export type SchemaFlux2LoraGalleryVirtualTryonInput = { + /** + * Prompt + * + * The prompt to generate a virtual try-on image. + */ + prompt: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The strength of the virtual try-on effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Image URLs + * + * The URLs of the images for virtual try-on. Provide person image and clothing image. + */ + image_urls: Array + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * OmniImageElementInput + */ +export type SchemaOmniImageElementInput = { + /** + * Reference Image Urls + * + * Additional reference images from different angles. 1-3 images supported. At least one image is required. + */ + reference_image_urls?: Array + /** + * Frontal Image Url + * + * The frontal image of the element (main view). + */ + frontal_image_url: string +} + +/** + * OmniImageOutput + */ +export type SchemaKlingImageO1Output = { + /** + * Images + * + * Generated images + */ + images: Array +} + +/** + * OmniImageRequest + */ +export type SchemaKlingImageO1Input = { + /** + * Prompt + * + * Text prompt for image generation. Reference images using @Image1, @Image2, etc. (or @Image if only one image). Max 2500 characters. + */ + prompt: string + /** + * Aspect Ratio + * + * Aspect ratio of generated images. 'auto' intelligently determines based on input content. + */ + aspect_ratio?: + | 'auto' + | '16:9' + | '9:16' + | '1:1' + | '4:3' + | '3:4' + | '3:2' + | '2:3' + | '21:9' + /** + * Num Images + * + * Number of images to generate (1-9). + */ + num_images?: number + /** + * Resolution + * + * Image generation resolution. 1K: standard, 2K: high-res. + */ + resolution?: '1K' | '2K' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Elements + * + * Elements (characters/objects) to include in the image. Reference in prompt as @Element1, @Element2, etc. Maximum 10 total (elements + reference images). + */ + elements?: Array + /** + * Image Urls + * + * List of reference images. Reference images in prompt using @Image1, @Image2, etc. (1-indexed). Max 10 images. + */ + image_urls: Array +} + +/** + * ReferenceToImageOutput + */ +export type SchemaViduQ2ReferenceToImageOutput = { + /** + * Image + * + * The edited image + */ + image: SchemaImage +} + +/** + * ReferenceToImageRequest + */ +export type SchemaViduQ2ReferenceToImageInput = { + /** + * Prompt + * + * Text prompt for video generation, max 1500 characters + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the output video + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Reference Image Urls + * + * URLs of the reference images to use for consistent subject appearance + */ + reference_image_urls: Array + /** + * Seed + * + * Random seed for generation + */ + seed?: number +} + +/** + * SeedDream45EditOutput + */ +export type SchemaBytedanceSeedreamV45EditOutput = { + /** + * Images + * + * Generated images + */ + images: Array +} + +/** + * SeedDream45EditInput + */ +export type SchemaBytedanceSeedreamV45EditInput = { + /** + * Prompt + * + * The text prompt used to edit the image + */ + prompt: string + /** + * Num Images + * + * Number of separate model generations to be run with the prompt. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. Width and height must be between 1920 and 4096, or total number of pixels must be between 2560*1440 and 4096*4096. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | 'auto_2K' + | 'auto_4K' + /** + * Max Images + * + * If set to a number greater than one, enables multi-image generation. The model will potentially return up to `max_images` images every generation, and in total, `num_images` generations will be carried out. In total, the number of images generated will be between `num_images` and `max_images*num_images`. The total number of images (image inputs + image outputs) must not exceed 15 + */ + max_images?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * Random seed to control the stochasticity of image generation. + */ + seed?: number + /** + * Image URLs + * + * List of URLs of input images for editing. Presently, up to 10 image inputs are allowed. If over 10 images are sent, only the last 10 will be used. + */ + image_urls: Array +} + +/** + * ImageToImageOutput + */ +export type SchemaLongcatImageEditOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * EditImageInput + */ +export type SchemaLongcatImageEditInput = { + /** + * Prompt + * + * The prompt to edit the image with. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Image URL + * + * The URL of the image to edit. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Guidance Scale + * + * The guidance scale to use for the image generation. + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * ZImageTurboImageToImageOutput + */ +export type SchemaZImageTurboImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed. + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + * + * The timings of the generation process. + */ + timings: { + [key: string]: number + } +} + +/** + * ZImageTurboImageToImageInput + */ +export type SchemaZImageTurboImageToImageInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | 'auto' + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Image URL + * + * URL of Image for Image-to-Image generation. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Strength + * + * The strength of the image-to-image conditioning. + */ + strength?: number + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * ZImageTurboImageToImageOutput + */ +export type SchemaZImageTurboImageToImageLoraOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed. + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + * + * The timings of the generation process. + */ + timings: { + [key: string]: number + } +} + +/** + * ZImageTurboImageToImageLoRAInput + */ +export type SchemaZImageTurboImageToImageLoraInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | 'auto' + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Image URL + * + * URL of Image for Image-to-Image generation. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Loras + * + * List of LoRA weights to apply (maximum 3). + */ + loras?: Array + /** + * Strength + * + * The strength of the image-to-image conditioning. + */ + strength?: number + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * LoRAInput + * + * LoRA weight configuration. + */ +export type SchemaLoRaInput = { + /** + * Path + * + * URL, HuggingFace repo ID (owner/repo) to lora weights. + */ + path: string + /** + * Scale + * + * Scale factor for LoRA application (0.0 to 4.0). + */ + scale?: number +} + +/** + * ZImageTurboControlNetOutput + */ +export type SchemaZImageTurboControlnetOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed. + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + * + * The timings of the generation process. + */ + timings: { + [key: string]: number + } +} + +/** + * ZImageTurboControlNetInput + */ +export type SchemaZImageTurboControlnetInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | 'auto' + /** + * Control End + * + * The end of the controlnet conditioning. + */ + control_end?: number + /** + * Control Start + * + * The start of the controlnet conditioning. + */ + control_start?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Image URL + * + * URL of Image for ControlNet generation. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Control Scale + * + * The scale of the controlnet conditioning. + */ + control_scale?: number + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Preprocess + * + * What kind of preprocessing to apply to the image, if any. + */ + preprocess?: 'none' | 'canny' | 'depth' | 'pose' +} + +/** + * ZImageTurboControlNetOutput + */ +export type SchemaZImageTurboControlnetLoraOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed. + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + * + * The timings of the generation process. + */ + timings: { + [key: string]: number + } +} + +/** + * ZImageTurboControlNetLoRAInput + */ +export type SchemaZImageTurboControlnetLoraInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | 'auto' + /** + * Loras + * + * List of LoRA weights to apply (maximum 3). + */ + loras?: Array + /** + * Control End + * + * The end of the controlnet conditioning. + */ + control_end?: number + /** + * Control Start + * + * The start of the controlnet conditioning. + */ + control_start?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Image URL + * + * URL of Image for ControlNet generation. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Control Scale + * + * The scale of the controlnet conditioning. + */ + control_scale?: number + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Preprocess + * + * What kind of preprocessing to apply to the image, if any. + */ + preprocess?: 'none' | 'canny' | 'depth' | 'pose' +} + +/** + * ImageOutput + */ +export type SchemaStepxEdit2Output = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Best Info + * + * Reflection analysis (only available when reflection mode is enabled). + */ + best_info?: Array<{ + [key: string]: unknown + }> + /** + * Images + * + * The generated images + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Reformat Prompt + * + * The model's interpretation of your instruction (only available when thinking mode is enabled). + */ + reformat_prompt?: string + /** + * Think Info + * + * Reasoning process details (only available when thinking mode is enabled). + */ + think_info?: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * ImageToImageInput + */ +export type SchemaStepxEdit2Input = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enable Reflection Mode + * + * Enable reflection mode. Reviews outputs, corrects unintended changes, and determines when editing is complete. + */ + enable_reflection_mode?: boolean + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The image URL to generate an image from. Needs to match the dimensions of the mask. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * True CFG scale + * + * + * The true CFG scale. Controls how closely the model follows the prompt. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. Recommended: 50. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Enable Thinking Mode + * + * Enable thinking mode. Uses multimodal language model knowledge to interpret abstract editing instructions. + */ + enable_thinking_mode?: boolean +} + +/** + * Schema referenced but not defined by fal.ai (missing from source OpenAPI spec) + */ +export type SchemaPoint = { + [key: string]: unknown +} + +/** + * UsageInfo + */ +export type SchemaUsageInfo = { + /** + * Output Tokens + * + * Number of output tokens generated + */ + output_tokens: number + /** + * Decode Time Ms + * + * Time taken for decoding in milliseconds + */ + decode_time_ms: number + /** + * Input Tokens + * + * Number of input tokens processed + */ + input_tokens: number + /** + * Ttft Ms + * + * Time to first token in milliseconds + */ + ttft_ms: number + /** + * Prefill Time Ms + * + * Time taken for prefill in milliseconds + */ + prefill_time_ms: number +} + +/** + * Object + */ +export type SchemaObject = { + /** + * Y Min + * + * Top boundary of detection box in normalized format (0 to 1) + */ + y_min: number + /** + * X Max + * + * Right boundary of detection box in normalized format (0 to 1) + */ + x_max: number + /** + * X Min + * + * Left boundary of detection box in normalized format (0 to 1) + */ + x_min: number + /** + * Y Max + * + * Bottom boundary of detection box in normalized format (0 to 1) + */ + y_max: number +} + +/** + * SegmentSamplingSettings + */ +export type SchemaSegmentSamplingSettings = { + /** + * Top P + * + * Nucleus sampling probability mass to use, between 0 and 1. + */ + top_p?: number + /** + * Max Tokens + * + * Maximum number of tokens to generate. + */ + max_tokens?: number + /** + * Temperature + * + * Sampling temperature to use. Higher values will make the output more random, while lower values will make it more focused and deterministic. + */ + temperature?: number +} + +/** + * MoondreamSegementationOutput + */ +export type SchemaMoondream3PreviewSegmentOutput = { + /** + * Finish Reason + * + * Reason for finishing the output generation + */ + finish_reason: string + /** + * Image + * + * Segmentation mask image. If no object detected or preview not requested, will be null. + */ + image?: SchemaImageFile + /** + * Bbox + * + * Bounding box of the segmented object. If not detected, will be null. + */ + bbox?: SchemaObject + /** + * Path + * + * SVG path data representing the segmentation mask. If not detected, will be null. + */ + path?: string + /** + * Usage Info + * + * Usage information for the request + */ + usage_info: SchemaUsageInfo +} + +/** + * MoondreamSegementationInput + */ +export type SchemaMoondream3PreviewSegmentInput = { + /** + * Spatial References + * + * Spatial references to guide the segmentation. By feeding in references you can help the segmentation process. Must be either list of Point object with x and y members, or list of arrays containing either 2 floats (x,y) or 4 floats (x1,y1,x2,y2). + * **NOTE**: You can also use the [**point endpoint**](https://fal.ai/models/fal-ai/moondream3-preview/point) to get points for the objects, and pass them in here. + */ + spatial_references?: Array> + /** + * Settings + * + * Sampling settings for the segmentation model + */ + settings?: SchemaSegmentSamplingSettings + /** + * Object + * + * Object to be segmented in the image + */ + object: string + /** + * Preview + * + * Whether to preview the output and return a binary mask of the image + */ + preview?: boolean + /** + * Image URL + * + * URL of the image to be processed + * + * Max width: 7000px, Max height: 7000px, Timeout: 20.0s + */ + image_url: string +} + +/** + * LightingRestorationOutput + */ +export type SchemaQwenImageEditPlusLoraGalleryLightingRestorationOutput = { + /** + * Images + * + * The generated/edited images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * LightingRestorationInput + * + * Input model for Lighting Restoration endpoint - Restore natural lighting by removing harsh shadows and light spots + */ +export type SchemaQwenImageEditPlusLoraGalleryLightingRestorationInput = { + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the final input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Image URLs + * + * The URL of the image to restore lighting for. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * QwenImageOutput + */ +export type SchemaQwenImageEdit2509Output = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * BaseQwenEditImagePlusInput + */ +export type SchemaQwenImageEdit2509Input = { + /** + * Prompt + * + * The prompt to generate the image with + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Image URLs + * + * The URLs of the images to edit. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * QwenImageOutput + */ +export type SchemaQwenImageEdit2509LoraOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * BaseQwenEditImagePlusLoRAInput + */ +export type SchemaQwenImageEdit2509LoraInput = { + /** + * Prompt + * + * The prompt to generate the image with + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the final input image will be used to calculate the size of the output image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use up to 3 LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Image URLs + * + * The URLs of the images to edit. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * MultipleAnglesOutput + */ +export type SchemaQwenImageEdit2509LoraGalleryMultipleAnglesOutput = { + /** + * Images + * + * The generated/edited images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * MultipleAnglesInput + * + * Input model for Multiple Angles endpoint - Camera control with precise adjustments + */ +export type SchemaQwenImageEdit2509LoraGalleryMultipleAnglesInput = { + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the final input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Wide-Angle Lens + * + * Enable wide-angle lens effect + */ + wide_angle_lens?: boolean + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Image URLs + * + * The URL of the image to adjust camera angle for. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Vertical Angle (Bird ⬄ Worm) + * + * Adjust vertical camera angle (-1=bird's-eye view/looking down, 0=neutral, 1=worm's-eye view/looking up) + */ + vertical_angle?: number + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Move Forward → Close-Up + * + * Move camera forward (0=no movement, 10=close-up) + */ + move_forward?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Rotate Right-Left (degrees °) + * + * Rotate camera left (positive) or right (negative) in degrees. Positive values rotate left, negative values rotate right. + */ + rotate_right_left?: number + /** + * Lora Scale + * + * The scale factor for the LoRA model. Controls the strength of the camera control effect. + */ + lora_scale?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * NextSceneOutput + */ +export type SchemaQwenImageEdit2509LoraGalleryNextSceneOutput = { + /** + * Images + * + * The generated/edited images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * NextSceneInput + * + * Input model for Next Scene endpoint - Create cinematic shot progressions and scene transitions + */ +export type SchemaQwenImageEdit2509LoraGalleryNextSceneInput = { + /** + * Prompt + * + * Describe the camera movement, framing change, or scene transition. Start with 'Next Scene:' for best results. Examples: camera movements (dolly, push-in, pull-back), framing changes (wide to close-up), new elements entering frame. + */ + prompt?: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the final input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The scale factor for the LoRA model. Controls the strength of the LoRA effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Image URLs + * + * The URL of the image to create the next scene from. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * AddBackgroundOutput + */ +export type SchemaQwenImageEdit2509LoraGalleryAddBackgroundOutput = { + /** + * Images + * + * The generated/edited images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * AddBackgroundInput + * + * Input model for Add Background endpoint - Remove white background and add a realistic scene + */ +export type SchemaQwenImageEdit2509LoraGalleryAddBackgroundInput = { + /** + * Prompt + * + * Describe the background/scene you want to add behind the object. The model will remove the white background and add the specified environment. + */ + prompt?: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the final input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The scale factor for the LoRA model. Controls the strength of the LoRA effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Image URLs + * + * The URLs of the images to edit. Provide an image with a white or clean background. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * FaceToFullPortraitOutput + */ +export type SchemaQwenImageEdit2509LoraGalleryFaceToFullPortraitOutput = { + /** + * Images + * + * The generated/edited images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * FaceToFullPortraitInput + * + * Input model for Face to Full Portrait endpoint - Generate full portrait from a cropped face image + */ +export type SchemaQwenImageEdit2509LoraGalleryFaceToFullPortraitInput = { + /** + * Prompt + * + * Describe the full portrait you want to generate from the face. Include clothing, setting, pose, and style details. + */ + prompt?: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the final input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The scale factor for the LoRA model. Controls the strength of the LoRA effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Image URLs + * + * The URL of the cropped face image. Provide a close-up face photo. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * GroupPhotoOutput + */ +export type SchemaQwenImageEdit2509LoraGalleryGroupPhotoOutput = { + /** + * Images + * + * The generated/edited images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * GroupPhotoInput + * + * Input model for Group Photo endpoint - Create composite group photos with vintage/retro style + */ +export type SchemaQwenImageEdit2509LoraGalleryGroupPhotoInput = { + /** + * Prompt + * + * Describe the group photo scene, setting, and style. The model will maintain character consistency and add vintage effects like grain, blur, and retro filters. + */ + prompt?: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the final input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The scale factor for the LoRA model. Controls the strength of the LoRA effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Image URLs + * + * The URLs of the images to combine into a group photo. Provide 2 or more individual portrait images. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * IntegrateProductOutput + */ +export type SchemaQwenImageEdit2509LoraGalleryIntegrateProductOutput = { + /** + * Images + * + * The generated/edited images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * IntegrateProductInput + * + * Input model for Integrate Product endpoint - Blend and integrate products/elements into backgrounds + */ +export type SchemaQwenImageEdit2509LoraGalleryIntegrateProductInput = { + /** + * Prompt + * + * Describe how to blend and integrate the product/element into the background. The model will automatically correct perspective, lighting and shadows for natural integration. + */ + prompt?: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the final input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The scale factor for the LoRA model. Controls the strength of the LoRA effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Image URLs + * + * The URL of the image with product to integrate into background. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * LightingRestorationOutput + */ +export type SchemaQwenImageEdit2509LoraGalleryLightingRestorationOutput = { + /** + * Images + * + * The generated/edited images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * LightingRestorationInput + * + * Input model for Lighting Restoration endpoint - Restore natural lighting by removing harsh shadows and light spots + */ +export type SchemaQwenImageEdit2509LoraGalleryLightingRestorationInput = { + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the final input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Image URLs + * + * The URL of the image to restore lighting for. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * RemoveElementOutput + */ +export type SchemaQwenImageEdit2509LoraGalleryRemoveElementOutput = { + /** + * Images + * + * The generated/edited images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * RemoveElementInput + * + * Input model for Remove Element endpoint - Remove/delete elements (objects, people, text) from the image + */ +export type SchemaQwenImageEdit2509LoraGalleryRemoveElementInput = { + /** + * Prompt + * + * Specify what element(s) to remove from the image (objects, people, text, etc.). The model will cleanly remove the element while maintaining consistency of the rest of the image. + */ + prompt?: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the final input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The scale factor for the LoRA model. Controls the strength of the LoRA effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Image URLs + * + * The URL of the image containing elements to remove. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * RemoveLightingOutput + */ +export type SchemaQwenImageEdit2509LoraGalleryRemoveLightingOutput = { + /** + * Images + * + * The generated/edited images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * RemoveLightingInput + * + * Input model for Remove Lighting endpoint - Remove existing lighting and apply soft even lighting + */ +export type SchemaQwenImageEdit2509LoraGalleryRemoveLightingInput = { + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the final input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Image URLs + * + * The URL of the image with lighting/shadows to remove. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * ShirtDesignOutput + */ +export type SchemaQwenImageEdit2509LoraGalleryShirtDesignOutput = { + /** + * Images + * + * The generated/edited images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * ShirtDesignInput + * + * Input model for Shirt Design endpoint - Put designs/graphics on people's shirts + */ +export type SchemaQwenImageEdit2509LoraGalleryShirtDesignInput = { + /** + * Prompt + * + * Describe what design to put on the shirt. The model will apply the design from your input image onto the person's shirt. + */ + prompt?: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the final input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The scale factor for the LoRA model. Controls the strength of the LoRA effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Image URLs + * + * The URLs of the images: first image is the person wearing a shirt, second image is the design/logo to put on the shirt. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * FluxSingleIDOutput + */ +export type SchemaAiBabyAndAgingGeneratorSingleOutput = { + /** + * Prompt + * + * The final prompt used for generation + */ + prompt: string + /** + * Images + * + * The generated image files info + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * SingleFluxIDInput + * + * Input schema for single mode generation + */ +export type SchemaAiBabyAndAgingGeneratorSingleInput = { + /** + * Prompt + * + * Text prompt to guide the image generation + */ + prompt?: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Id Image Urls + * + * List of ID images for single mode (or general reference images) + */ + id_image_urls: Array + /** + * Output Format + * + * The format of the generated image. Choose from: 'jpeg' or 'png'. + */ + output_format?: 'jpeg' | 'png' + /** + * Age Group + * + * Age group for the generated image. Choose from: 'baby' (0-12 months), 'toddler' (1-3 years), 'preschool' (3-5 years), 'gradeschooler' (6-12 years), 'teen' (13-19 years), 'adult' (20-40 years), 'mid' (40-60 years), 'senior' (60+ years). + */ + age_group: + | 'baby' + | 'toddler' + | 'preschool' + | 'gradeschooler' + | 'teen' + | 'adult' + | 'mid' + | 'senior' + /** + * Gender + * + * Gender for the generated image. Choose from: 'male' or 'female'. + */ + gender: 'male' | 'female' + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed will be used + */ + seed?: number | unknown +} + +/** + * FluxMultiIDOutput + */ +export type SchemaAiBabyAndAgingGeneratorMultiOutput = { + /** + * Prompt + * + * The final prompt used for generation + */ + prompt: string + /** + * Images + * + * The generated image files info + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * MultiFluxIDInput + * + * Input schema for multi mode generation + */ +export type SchemaAiBabyAndAgingGeneratorMultiInput = { + /** + * Prompt + * + * Text prompt to guide the image generation + */ + prompt?: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Father Weight + * + * Weight of the father's influence in multi mode generation + */ + father_weight?: number + /** + * Mother Image Urls + * + * List of mother images for multi mode + */ + mother_image_urls: Array + /** + * Output Format + * + * The format of the generated image. Choose from: 'jpeg' or 'png'. + */ + output_format?: 'jpeg' | 'png' + /** + * Age Group + * + * Age group for the generated image. Choose from: 'baby' (0-12 months), 'toddler' (1-3 years), 'preschool' (3-5 years), 'gradeschooler' (6-12 years), 'teen' (13-19 years), 'adult' (20-40 years), 'mid' (40-60 years), 'senior' (60+ years). + */ + age_group: + | 'baby' + | 'toddler' + | 'preschool' + | 'gradeschooler' + | 'teen' + | 'adult' + | 'mid' + | 'senior' + /** + * Gender + * + * Gender for the generated image. Choose from: 'male' or 'female'. + */ + gender: 'male' | 'female' + /** + * Father Image Urls + * + * List of father images for multi mode + */ + father_image_urls: Array + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed will be used + */ + seed?: number | unknown +} + +/** + * Flux2MaxEditOutput + */ +export type SchemaFlux2MaxEditOutput = { + /** + * Images + * + * The generated images. + */ + images: Array + /** + * Seed + * + * The seed used for the generation. + */ + seed: number +} + +/** + * Flux2MaxImageEditInput + */ +export type SchemaFlux2MaxEditInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. If `auto`, the size will be determined by the model. + */ + image_size?: + | SchemaImageSize + | 'auto' + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * The seed to use for the generation. + */ + seed?: number + /** + * Image URLs + * + * List of URLs of input images for editing + */ + image_urls: Array +} + +/** + * Flux2TurboEditImageOutput + */ +export type SchemaFlux2TurboEditOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The edited images + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * Flux2TurboEditImageInput + */ +export type SchemaFlux2TurboEditInput = { + /** + * Prompt + * + * The prompt to edit the image. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the image to generate. The width and height must be between 512 and 2048 pixels. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used. + */ + seed?: number + /** + * Image URLs + * + * The URLs of the images for editing. A maximum of 4 images are allowed, if more are provided, only the first 4 will be used. + */ + image_urls: Array + /** + * Enable Prompt Expansion + * + * If set to true, the prompt will be expanded for better results. + */ + enable_prompt_expansion?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * EditImageResponse + */ +export type SchemaGptImage15EditOutput = { + /** + * Images + * + * The generated images. + */ + images: Array +} + +/** + * EditImageRequest + */ +export type SchemaGptImage15EditInput = { + /** + * Input Fidelity + * + * Input fidelity for the generated image + */ + input_fidelity?: 'low' | 'high' + /** + * Number of Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * Aspect ratio for the generated image + */ + image_size?: 'auto' | '1024x1024' | '1536x1024' | '1024x1536' + /** + * Prompt + * + * The prompt for image generation + */ + prompt: string + /** + * Quality + * + * Quality for the generated image + */ + quality?: 'low' | 'medium' | 'high' + /** + * Output Format + * + * Output format for the images + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Background + * + * Background for the generated image + */ + background?: 'auto' | 'transparent' | 'opaque' + /** + * Mask Image URL + * + * The URL of the mask image to use for the generation. This indicates what part of the image to edit. + */ + mask_image_url?: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Image URLs + * + * The URLs of the images to use as a reference for the generation. + */ + image_urls: Array +} + +/** + * Flux2FlashEditImageOutput + */ +export type SchemaFlux2FlashEditOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The edited images + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * Flux2FlashEditImageInput + */ +export type SchemaFlux2FlashEditInput = { + /** + * Prompt + * + * The prompt to edit the image. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the image to generate. The width and height must be between 512 and 2048 pixels. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used. + */ + seed?: number + /** + * Image URLs + * + * The URLs of the images for editing. A maximum of 4 images are allowed, if more are provided, only the first 4 will be used. + */ + image_urls: Array + /** + * Enable Prompt Expansion + * + * If set to true, the prompt will be expanded for better results. + */ + enable_prompt_expansion?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * ZImageTurboInpaintOutput + */ +export type SchemaZImageTurboInpaintOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed. + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + * + * The timings of the generation process. + */ + timings: { + [key: string]: number + } +} + +/** + * ZImageTurboInpaintInput + */ +export type SchemaZImageTurboInpaintInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | 'auto' + /** + * Mask Image URL + * + * URL of Mask for Inpaint generation. + */ + mask_image_url: string + /** + * Control End + * + * The end of the controlnet conditioning. + */ + control_end?: number + /** + * Control Start + * + * The start of the controlnet conditioning. + */ + control_start?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Image URL + * + * URL of Image for Inpaint generation. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Strength + * + * The strength of the inpaint conditioning. + */ + strength?: number + /** + * Control Scale + * + * The scale of the controlnet conditioning. + */ + control_scale?: number + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * ZImageTurboInpaintOutput + */ +export type SchemaZImageTurboInpaintLoraOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed. + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + * + * The timings of the generation process. + */ + timings: { + [key: string]: number + } +} + +/** + * ZImageTurboInpaintLoRAInput + */ +export type SchemaZImageTurboInpaintLoraInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | 'auto' + /** + * Mask Image URL + * + * URL of Mask for Inpaint generation. + */ + mask_image_url: string + /** + * Loras + * + * List of LoRA weights to apply (maximum 3). + */ + loras?: Array + /** + * Control End + * + * The end of the controlnet conditioning. + */ + control_end?: number + /** + * Control Start + * + * The start of the controlnet conditioning. + */ + control_start?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Image URL + * + * URL of Image for Inpaint generation. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Strength + * + * The strength of the inpaint conditioning. + */ + strength?: number + /** + * Control Scale + * + * The scale of the controlnet conditioning. + */ + control_scale?: number + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * QwenImageLayeredOutput + */ +export type SchemaQwenImageLayeredOutput = { + /** + * Prompt + * + * The prompt used to generate the image. + */ + prompt?: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaQwenImageLayeredInput = { + /** + * Prompt + * + * A caption for the input image. + */ + prompt?: string + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Num Layers + * + * The number of layers to generate. + */ + num_layers?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'png' | 'webp' + /** + * Image URL + * + * The URL of the input image. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Guidance Scale + * + * The guidance scale to use for the image generation. + */ + guidance_scale?: number + /** + * Negative Prompt + * + * The negative prompt to generate an image from. + */ + negative_prompt?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * ImageToImageOutput + */ +export type SchemaQwenImageEdit2511Output = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * EditImageInput + */ +export type SchemaQwenImageEdit2511Input = { + /** + * Prompt + * + * The prompt to edit the image with. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If None, uses the input image dimensions. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The guidance scale to use for the image generation. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Image URLs + * + * The URLs of the images to edit. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt to generate an image from. + */ + negative_prompt?: string + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number +} + +/** + * ImageEditOutput + * + * Output for Wan 2.6 image editing + */ +export type SchemaV26ImageToImageOutput = { + /** + * Images + * + * Generated images in PNG format + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * ImageEditInput + * + * Input for Wan 2.6 image editing with reference images (enable_interleave=false) + */ +export type SchemaV26ImageToImageInput = { + /** + * Prompt + * + * Text prompt describing the desired image. Supports Chinese and English. Max 2000 characters. Example: 'Generate an image using the style of image 1 and background of image 2'. + */ + prompt: string + /** + * Num Images + * + * Number of images to generate (1-4). Directly affects billing cost. + */ + num_images?: number + /** + * Image Size + * + * Output image size. Use presets like 'square_hd', 'landscape_16_9', 'portrait_9_16', or specify exact dimensions with ImageSize(width=1280, height=720). Total pixels must be between 768*768 and 1280*1280. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Enable Prompt Expansion + * + * Enable LLM prompt optimization. Significantly improves results for simple prompts but adds 3-4 seconds processing time. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * Random seed for reproducibility (0-2147483647). Same seed produces more consistent results. + */ + seed?: number + /** + * Image Urls + * + * Reference images for editing (1-3 images required). Order matters: reference as 'image 1', 'image 2', 'image 3' in prompt. Resolution: 384-5000px each dimension. Max size: 10MB each. Formats: JPEG, JPG, PNG (no alpha), BMP, WEBP. + */ + image_urls: Array + /** + * Negative Prompt + * + * Content to avoid in the generated image. Max 500 characters. + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * Enable content moderation for input and output. + */ + enable_safety_checker?: boolean +} + +/** + * QwenImageLayeredOutput + */ +export type SchemaQwenImageLayeredLoraOutput = { + /** + * Prompt + * + * The prompt used to generate the image. + */ + prompt?: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageLoRAInput + */ +export type SchemaQwenImageLayeredLoraInput = { + /** + * Prompt + * + * A caption for the input image. + */ + prompt?: string + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Num Layers + * + * The number of layers to generate. + */ + num_layers?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'png' | 'webp' + /** + * Image URL + * + * The URL of the input image. + */ + image_url: string + /** + * Loras + * + * List of LoRA weights to apply (maximum 3). + */ + loras?: Array + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The guidance scale to use for the image generation. + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Negative Prompt + * + * The negative prompt to generate an image from. + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * ArchEditOutput + */ +export type SchemaAiHomeEditOutput = { + image: SchemaImage + /** + * Status + * + * Status message with processing details + */ + status: string +} + +/** + * ArchEditInput + */ +export type SchemaAiHomeEditInput = { + /** + * Input Image Url + * + * URL of the image to do architectural editing + */ + input_image_url: string + /** + * Editing Type + * + * Type of editing. Structural editing only edits structural elements such as windows, walls etc. Virtual staging edits your furniture. Both do full editing including structural and furniture + */ + editing_type: 'structural editing' | 'virtual staging' | 'both' + /** + * Style + * + * Style for furniture and decor + */ + style: + | 'minimalistic-interior' + | 'farmhouse-interior' + | 'luxury-interior' + | 'modern-interior' + | 'zen-interior' + | 'mid century-interior' + | 'airbnb-interior' + | 'cozy-interior' + | 'rustic-interior' + | 'christmas-interior' + | 'bohemian-interior' + | 'tropical-interior' + | 'industrial-interior' + | 'japanese-interior' + | 'vintage-interior' + | 'loft-interior' + | 'halloween-interior' + | 'soho-interior' + | 'baroque-interior' + | 'kids room-interior' + | 'girls room-interior' + | 'boys room-interior' + | 'scandinavian-interior' + | 'french country-interior' + | 'mediterranean-interior' + | 'cyberpunk-interior' + | 'hot pink-interior' + | 'biophilic-interior' + | 'ancient egypt-interior' + | 'pixel-interior' + | 'art deco-interior' + | 'modern-exterior' + | 'minimalistic-exterior' + | 'farmhouse-exterior' + | 'cozy-exterior' + | 'luxury-exterior' + | 'colonial-exterior' + | 'zen-exterior' + | 'asian-exterior' + | 'creepy-exterior' + | 'airstone-exterior' + | 'ancient greek-exterior' + | 'art deco-exterior' + | 'brutalist-exterior' + | 'christmas lights-exterior' + | 'contemporary-exterior' + | 'cottage-exterior' + | 'dutch colonial-exterior' + | 'federal colonial-exterior' + | 'fire-exterior' + | 'french provincial-exterior' + | 'full glass-exterior' + | 'georgian colonial-exterior' + | 'gothic-exterior' + | 'greek revival-exterior' + | 'ice-exterior' + | 'italianate-exterior' + | 'mediterranean-exterior' + | 'midcentury-exterior' + | 'middle eastern-exterior' + | 'minecraft-exterior' + | 'morocco-exterior' + | 'neoclassical-exterior' + | 'spanish-exterior' + | 'tudor-exterior' + | 'underwater-exterior' + | 'winter-exterior' + | 'yard lighting-exterior' + /** + * Additional Elements + * + * Additional elements to include in the options above (e.g., plants, lighting) + */ + additional_elements?: string | unknown + /** + * Output Format + * + * The format of the generated image. Choose from: 'jpeg' or 'png'. + */ + output_format?: 'jpeg' | 'png' + /** + * Architecture Type + * + * Type of architecture for appropriate furniture selection + */ + architecture_type: + | 'living room-interior' + | 'bedroom-interior' + | 'kitchen-interior' + | 'dining room-interior' + | 'bathroom-interior' + | 'laundry room-interior' + | 'home office-interior' + | 'study room-interior' + | 'dorm room-interior' + | 'coffee shop-interior' + | 'gaming room-interior' + | 'restaurant-interior' + | 'office-interior' + | 'attic-interior' + | 'toilet-interior' + | 'other-interior' + | 'house-exterior' + | 'villa-exterior' + | 'backyard-exterior' + | 'courtyard-exterior' + | 'ranch-exterior' + | 'office-exterior' + | 'retail-exterior' + | 'tower-exterior' + | 'apartment-exterior' + | 'school-exterior' + | 'museum-exterior' + | 'commercial-exterior' + | 'residential-exterior' + | 'other-exterior' + /** + * Color Palette + * + * Color palette for furniture and decor + */ + color_palette: + | 'surprise me' + | 'golden beige' + | 'refined blues' + | 'dusky elegance' + | 'emerald charm' + | 'crimson luxury' + | 'golden sapphire' + | 'soft pastures' + | 'candy sky' + | 'peach meadow' + | 'muted sands' + | 'ocean breeze' + | 'frosted pastels' + | 'spring bloom' + | 'gentle horizon' + | 'seaside breeze' + | 'azure coast' + | 'golden shore' + | 'mediterranean gem' + | 'ocean serenity' + | 'serene blush' + | 'muted horizon' + | 'pastel shores' + | 'dusky calm' + | 'woodland retreat' + | 'meadow glow' + | 'forest canopy' + | 'riverbank calm' + | 'earthy tones' + | 'earthy neutrals' + | 'arctic mist' + | 'aqua drift' + | 'blush bloom' + | 'coral haze' + | 'retro rust' + | 'autumn glow' + | 'rustic charm' + | 'vintage sage' + | 'faded plum' + | 'electric lime' + | 'violet pulse' + | 'neon sorbet' + | 'aqua glow' + | 'fluorescent sunset' + | 'lavender bloom' + | 'petal fresh' + | 'meadow light' + | 'sunny pastures' + | 'frosted mauve' + | 'snowy hearth' + | 'icy blues' + | 'winter twilight' + | 'earthy hues' + | 'stone balance' + | 'neutral sands' + | 'slate shades' + /** + * Custom Prompt + * + * Custom prompt for architectural editing, it overrides above options when used + */ + custom_prompt?: string +} + +/** + * ArchStyleOutput + */ +export type SchemaAiHomeStyleOutput = { + image: SchemaImage + /** + * Status + * + * Status message with processing details + */ + status: string +} + +/** + * ArchStyleInput + */ +export type SchemaAiHomeStyleInput = { + /** + * Input Image Url + * + * URL of the image to do architectural styling + */ + input_image_url: string + /** + * Input Image Strength + * + * Strength of the input image + */ + input_image_strength?: number + /** + * Additional Elements + * + * Additional elements to include in the options above (e.g., plants, lighting) + */ + additional_elements?: string | unknown + /** + * Output Format + * + * The format of the generated image. Choose from: 'jpeg' or 'png'. + */ + output_format?: 'jpeg' | 'png' + /** + * Style + * + * Style for furniture and decor + */ + style: + | 'minimalistic-interior' + | 'farmhouse-interior' + | 'luxury-interior' + | 'modern-interior' + | 'zen-interior' + | 'mid century-interior' + | 'airbnb-interior' + | 'cozy-interior' + | 'rustic-interior' + | 'christmas-interior' + | 'bohemian-interior' + | 'tropical-interior' + | 'industrial-interior' + | 'japanese-interior' + | 'vintage-interior' + | 'loft-interior' + | 'halloween-interior' + | 'soho-interior' + | 'baroque-interior' + | 'kids room-interior' + | 'girls room-interior' + | 'boys room-interior' + | 'scandinavian-interior' + | 'french country-interior' + | 'mediterranean-interior' + | 'cyberpunk-interior' + | 'hot pink-interior' + | 'biophilic-interior' + | 'ancient egypt-interior' + | 'pixel-interior' + | 'art deco-interior' + | 'modern-exterior' + | 'minimalistic-exterior' + | 'farmhouse-exterior' + | 'cozy-exterior' + | 'luxury-exterior' + | 'colonial-exterior' + | 'zen-exterior' + | 'asian-exterior' + | 'creepy-exterior' + | 'airstone-exterior' + | 'ancient greek-exterior' + | 'art deco-exterior' + | 'brutalist-exterior' + | 'christmas lights-exterior' + | 'contemporary-exterior' + | 'cottage-exterior' + | 'dutch colonial-exterior' + | 'federal colonial-exterior' + | 'fire-exterior' + | 'french provincial-exterior' + | 'full glass-exterior' + | 'georgian colonial-exterior' + | 'gothic-exterior' + | 'greek revival-exterior' + | 'ice-exterior' + | 'italianate-exterior' + | 'mediterranean-exterior' + | 'midcentury-exterior' + | 'middle eastern-exterior' + | 'minecraft-exterior' + | 'morocco-exterior' + | 'neoclassical-exterior' + | 'spanish-exterior' + | 'tudor-exterior' + | 'underwater-exterior' + | 'winter-exterior' + | 'yard lighting-exterior' + /** + * Architecture Type + * + * Type of architecture for appropriate furniture selection + */ + architecture_type: + | 'living room-interior' + | 'bedroom-interior' + | 'kitchen-interior' + | 'dining room-interior' + | 'bathroom-interior' + | 'laundry room-interior' + | 'home office-interior' + | 'study room-interior' + | 'dorm room-interior' + | 'coffee shop-interior' + | 'gaming room-interior' + | 'restaurant-interior' + | 'office-interior' + | 'attic-interior' + | 'toilet-interior' + | 'other-interior' + | 'house-exterior' + | 'villa-exterior' + | 'backyard-exterior' + | 'courtyard-exterior' + | 'ranch-exterior' + | 'office-exterior' + | 'retail-exterior' + | 'tower-exterior' + | 'apartment-exterior' + | 'school-exterior' + | 'museum-exterior' + | 'commercial-exterior' + | 'residential-exterior' + | 'other-exterior' + /** + * Color Palette + * + * Color palette for furniture and decor + */ + color_palette: + | 'surprise me' + | 'golden beige' + | 'refined blues' + | 'dusky elegance' + | 'emerald charm' + | 'crimson luxury' + | 'golden sapphire' + | 'soft pastures' + | 'candy sky' + | 'peach meadow' + | 'muted sands' + | 'ocean breeze' + | 'frosted pastels' + | 'spring bloom' + | 'gentle horizon' + | 'seaside breeze' + | 'azure coast' + | 'golden shore' + | 'mediterranean gem' + | 'ocean serenity' + | 'serene blush' + | 'muted horizon' + | 'pastel shores' + | 'dusky calm' + | 'woodland retreat' + | 'meadow glow' + | 'forest canopy' + | 'riverbank calm' + | 'earthy tones' + | 'earthy neutrals' + | 'arctic mist' + | 'aqua drift' + | 'blush bloom' + | 'coral haze' + | 'retro rust' + | 'autumn glow' + | 'rustic charm' + | 'vintage sage' + | 'faded plum' + | 'electric lime' + | 'violet pulse' + | 'neon sorbet' + | 'aqua glow' + | 'fluorescent sunset' + | 'lavender bloom' + | 'petal fresh' + | 'meadow light' + | 'sunny pastures' + | 'frosted mauve' + | 'snowy hearth' + | 'icy blues' + | 'winter twilight' + | 'earthy hues' + | 'stone balance' + | 'neutral sands' + | 'slate shades' + /** + * Style Image Url + * + * URL of the style image, optional. If given, other parameters are ignored + */ + style_image_url?: string | unknown + /** + * Custom Prompt + * + * Custom prompt for architectural editing, it overrides above options when used + */ + custom_prompt?: string + /** + * Enhanced Rendering + * + * It gives better rendering quality with more processing time, additional cost is 0.01 USD per image + */ + enhanced_rendering?: boolean | unknown +} + +/** + * ImageToImageOutput + */ +export type SchemaQwenImageEdit2511LoraOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * EditImageLoraInput + */ +export type SchemaQwenImageEdit2511LoraInput = { + /** + * Prompt + * + * The prompt to edit the image with. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If None, uses the input image dimensions. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI. + */ + sync_mode?: boolean + /** + * Loras + * + * The LoRAs to use for the image generation. You can use up to 3 LoRAs and they will be merged together to generate the final image. + */ + loras?: Array + /** + * Guidance Scale + * + * The guidance scale to use for the image generation. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Image URLs + * + * The URLs of the images to edit. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt to generate an image from. + */ + negative_prompt?: string + /** + * Seed + * + * The same seed and the same prompt given to the same version of the model will output the same image every time. + */ + seed?: number +} + +/** + * MultipleAnglesOutput + * + * Output model for Multiple Angles endpoint + */ +export type SchemaQwenImageEdit2511MultipleAnglesOutput = { + /** + * Prompt + * + * The constructed prompt used for generation + */ + prompt: string + /** + * Images + * + * The generated/edited images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * MultipleAnglesInput + * + * Input model for Multiple Angles endpoint - Camera control with precise adjustments using trigger word. + * Prompt is built automatically from slider values. + */ +export type SchemaQwenImageEdit2511MultipleAnglesInput = { + /** + * Acceleration + * + * Acceleration level for image generation. + */ + acceleration?: 'none' | 'regular' + /** + * Image Size + * + * The size of the generated image. If not provided, the size of the input image will be used. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Horizontal Angle (Azimuth °) + * + * Horizontal rotation angle around the object in degrees. 0°=front view, 90°=right side, 180°=back view, 270°=left side, 360°=front view again. + */ + horizontal_angle?: number + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Image URLs + * + * The URL of the image to adjust camera angle for. + */ + image_urls: Array + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Zoom (Distance) + * + * Camera zoom/distance. 0=wide shot (far away), 5=medium shot (normal), 10=close-up (very close). + */ + zoom?: number + /** + * Vertical Angle (Elevation °) + * + * Vertical camera angle in degrees. -30°=low-angle shot (looking up), 0°=eye-level, 30°=elevated, 60°=high-angle, 90°=bird's-eye view (looking down). + */ + vertical_angle?: number + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Lora Scale + * + * The scale factor for the LoRA model. Controls the strength of the camera control effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Additional Prompt + * + * Additional text to append to the automatically generated prompt. + */ + additional_prompt?: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI. + */ + sync_mode?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * Random seed for reproducibility. + */ + seed?: number +} + +/** + * GlmImageToImageOutput + */ +export type SchemaGlmImageImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * List of URLs to the generated images. + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * GlmImageToImageInput + */ +export type SchemaGlmImageImageToImageInput = { + /** + * Prompt + * + * Text prompt for image generation. + */ + prompt: string + /** + * Num Images + * + * Number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * Output image size. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | 'portrait_3_2' + | 'landscape_3_2' + | 'portrait_hd' + | 'landscape_hd' + /** + * Enable Safety Checker + * + * Enable NSFW safety checking on the generated images. + */ + enable_safety_checker?: boolean + /** + * Output Format + * + * Output image format. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If True, the image will be returned as a base64 data URI instead of a URL. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * Classifier-free guidance scale. Higher values make the model follow the prompt more closely. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. The same seed with the same prompt will produce the same image. + */ + seed?: number + /** + * Image Urls + * + * URL(s) of the condition image(s) for image-to-image generation. Supports up to 4 URLs for multi-image references. + */ + image_urls: Array + /** + * Enable Prompt Expansion + * + * If True, the prompt will be enhanced using an LLM for more detailed and higher quality results. + */ + enable_prompt_expansion?: boolean + /** + * Num Inference Steps + * + * Number of diffusion denoising steps. More steps generally produce higher quality images. + */ + num_inference_steps?: number +} + +/** + * Klein9BDistilledEditOutput + */ +export type SchemaFlux2Klein9bEditOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The edited images + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * Klein9BDistilledEditInput + */ +export type SchemaFlux2Klein9bEditInput = { + /** + * Prompt + * + * The prompt to edit the image. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, uses the input image size. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI. Output is not stored when this is True. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Image URLs + * + * The URLs of the images for editing. A maximum of 4 images are allowed. + */ + image_urls: Array + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used. + */ + seed?: number +} + +/** + * Klein4BDistilledEditOutput + */ +export type SchemaFlux2Klein4bEditOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The edited images + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * KleinDistilledEditInput + */ +export type SchemaFlux2Klein4bEditInput = { + /** + * Prompt + * + * The prompt to edit the image. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, uses the input image size. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI. Output is not stored when this is True. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Image URLs + * + * The URLs of the images for editing. A maximum of 4 images are allowed. + */ + image_urls: Array + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used. + */ + seed?: number +} + +/** + * Klein9BBaseEditOutput + */ +export type SchemaFlux2Klein9bBaseEditOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The edited images + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * Klein9BEditImageInput + */ +export type SchemaFlux2Klein9bBaseEditInput = { + /** + * Prompt + * + * The prompt to edit the image. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, uses the input image size. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The acceleration level to use for image generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Guidance Scale + * + * Guidance scale for classifier-free guidance. + */ + guidance_scale?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI. Output is not stored when this is True. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Image URLs + * + * The URLs of the images for editing. A maximum of 4 images are allowed. + */ + image_urls: Array + /** + * Negative Prompt + * + * Negative prompt for classifier-free guidance. Describes what to avoid in the image. + */ + negative_prompt?: string + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used. + */ + seed?: number +} + +/** + * Klein4BBaseEditOutput + */ +export type SchemaFlux2Klein4bBaseEditOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The edited images + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * Klein4BBaseEditInput + */ +export type SchemaFlux2Klein4bBaseEditInput = { + /** + * Prompt + * + * The prompt to edit the image. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, uses the input image size. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The acceleration level to use for image generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Guidance Scale + * + * Guidance scale for classifier-free guidance. + */ + guidance_scale?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI. Output is not stored when this is True. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Image URLs + * + * The URLs of the images for editing. A maximum of 4 images are allowed. + */ + image_urls: Array + /** + * Negative Prompt + * + * Negative prompt for classifier-free guidance. Describes what to avoid in the image. + */ + negative_prompt?: string + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used. + */ + seed?: number +} + +/** + * KleinT2IOutput + */ +export type SchemaFlux2Klein4bBaseEditLoraOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * KleinBaseEditLoRAInput + */ +export type SchemaFlux2Klein4bBaseEditLoraInput = { + /** + * Prompt + * + * The prompt to edit the image. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, uses the input image size. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The acceleration level to use for image generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Guidance Scale + * + * Guidance scale for classifier-free guidance. + */ + guidance_scale?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Loras + * + * List of LoRA weights to apply (maximum 3). + */ + loras?: Array + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI. Output is not stored when this is True. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Image URLs + * + * The URLs of the images for editing. A maximum of 4 images are allowed. + */ + image_urls: Array + /** + * Negative Prompt + * + * Negative prompt for classifier-free guidance. Describes what to avoid in the image. + */ + negative_prompt?: string + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used. + */ + seed?: number +} + +/** + * LoRAInput + */ +export type SchemaFalAiFlux2KleinLoRaInput = { + /** + * Path + * + * URL, HuggingFace repo ID (owner/repo), or local path to LoRA weights. + */ + path: string + /** + * Scale + * + * Scale factor for LoRA application (0.0 to 4.0). + */ + scale?: number +} + +/** + * KleinT2IOutput + */ +export type SchemaFlux2Klein9bBaseEditLoraOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * KleinBaseEditLoRAInput + */ +export type SchemaFlux2Klein9bBaseEditLoraInput = { + /** + * Prompt + * + * The prompt to edit the image. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. If not provided, uses the input image size. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The acceleration level to use for image generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Guidance Scale + * + * Guidance scale for classifier-free guidance. + */ + guidance_scale?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Loras + * + * List of LoRA weights to apply (maximum 3). + */ + loras?: Array + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI. Output is not stored when this is True. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Image URLs + * + * The URLs of the images for editing. A maximum of 4 images are allowed. + */ + image_urls: Array + /** + * Negative Prompt + * + * Negative prompt for classifier-free guidance. Describes what to avoid in the image. + */ + negative_prompt?: string + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used. + */ + seed?: number +} + +/** + * FiboEditExtraEPOutputModel + */ +export type SchemaFiboEditColorizeOutput = { + /** + * Images + * + * Generated images. + */ + images?: Array + image: SchemaImage + /** + * Structured Instruction + * + * Current instruction. + */ + structured_instruction: { + [key: string]: unknown + } +} + +/** + * ColorizeInput + */ +export type SchemaFiboEditColorizeInput = { + /** + * Color + * + * Select the color palette or aesthetic for the output image + */ + color: + | 'contemporary color' + | 'vivid color' + | 'black and white colors' + | 'sepia vintage' + /** + * Image Url + * + * The source image. + */ + image_url: string +} + +/** + * FiboEditExtraEPOutputModel + */ +export type SchemaFiboEditBlendOutput = { + /** + * Images + * + * Generated images. + */ + images?: Array + image: SchemaImage + /** + * Structured Instruction + * + * Current instruction. + */ + structured_instruction: { + [key: string]: unknown + } +} + +/** + * BlendingInput + */ +export type SchemaFiboEditBlendInput = { + /** + * Instruction + * + * Instruct what elements you would like to blend in your image. + */ + instruction: string + /** + * Image Url + * + * The source image. + */ + image_url: string +} + +/** + * FiboEditExtraEPOutputModel + */ +export type SchemaFiboEditAddObjectByTextOutput = { + /** + * Images + * + * Generated images. + */ + images?: Array + image: SchemaImage + /** + * Structured Instruction + * + * Current instruction. + */ + structured_instruction: { + [key: string]: unknown + } +} + +/** + * AddObjectByTextInput + */ +export type SchemaFiboEditAddObjectByTextInput = { + /** + * Instruction + * + * The full natural language command describing what to add and where. + */ + instruction: string + /** + * Image Url + * + * The source image. + */ + image_url: string +} + +/** + * Lighting + */ +export type SchemaLighting = { + /** + * Shadows + * + * The shadows in the image to be generated. + */ + shadows?: string | unknown + /** + * Conditions + * + * The conditions of the lighting in the image to be generated. + */ + conditions?: string | unknown + /** + * Direction + * + * The direction of the lighting in the image to be generated. + */ + direction?: string | unknown +} + +/** + * Aesthetics + */ +export type SchemaAesthetics = { + /** + * Composition + * + * The composition of the image to be generated. + */ + composition?: string | unknown + /** + * Mood Atmosphere + * + * The mood and atmosphere of the image to be generated. + */ + mood_atmosphere?: string | unknown + /** + * Color Scheme + * + * The color scheme of the image to be generated. + */ + color_scheme?: string | unknown +} + +/** + * PhotographicCharacteristics + */ +export type SchemaPhotographicCharacteristics = { + /** + * Focus + * + * The focus in the image to be generated. + */ + focus?: string | unknown + /** + * Lens Focal Length + * + * The focal length of the lens in the image to be generated. + */ + lens_focal_length?: string | unknown + /** + * Camera Angle + * + * The angle of the camera in the image to be generated. + */ + camera_angle?: string | unknown + /** + * Depth Of Field + * + * The depth of field in the image to be generated. + */ + depth_of_field?: string | unknown +} + +/** + * PromptObject + */ +export type SchemaPromptObject = { + /** + * Relative Size + * + * The relative size of the object in the image. + */ + relative_size?: string | unknown + /** + * Description + * + * A description of the object to be generated. + */ + description?: string | unknown + /** + * Skin Tone And Texture + * + * The skin tone and texture of the object in the image. + */ + skin_tone_and_texture?: string | unknown + /** + * Appearance Details + * + * The appearance details of the object. + */ + appearance_details?: string | unknown + /** + * Number Of Objects + * + * The number of objects in the image. + */ + number_of_objects?: number | unknown + /** + * Expression + * + * The expression of the object in the image. + */ + expression?: string | unknown + /** + * Pose + * + * The pose of the object in the image. + */ + pose?: string | unknown + /** + * Shape And Color + * + * The shape and color of the object. + */ + shape_and_color?: string | unknown + /** + * Relationship + * + * The relationship of the object to other objects in the image. + */ + relationship: string + /** + * Texture + * + * The texture of the object. + */ + texture?: string | unknown + /** + * Gender + * + * The gender of the object in the image. + */ + gender?: string | unknown + /** + * Clothing + * + * The clothing of the object in the image. + */ + clothing?: string | unknown + /** + * Location + * + * The location of the object in the image. + */ + location?: string | unknown + /** + * Orientation + * + * The orientation of the object in the image. + */ + orientation?: string | unknown + /** + * Action + * + * The action of the object in the image. + */ + action?: string | unknown +} + +/** + * StructuredInstruction + */ +export type SchemaStructuredInstruction = { + /** + * Background Setting + * + * The background setting of the image to be generated. + */ + background_setting?: string | unknown + /** + * Artistic Style + * + * The artistic style of the image to be generated. + */ + artistic_style?: string | unknown + /** + * Style Medium + * + * The style medium of the image to be generated. + */ + style_medium?: string | unknown + /** + * Text Render + * + * A list of text to be rendered in the image. + */ + text_render?: Array | unknown + /** + * Objects + * + * A list of objects in the image to be generated, along with their attributes and relationships to other objects in the image. + */ + objects?: Array | unknown + /** + * Context + * + * The context of the image to be generated. + */ + context?: string | unknown + /** + * The photographic characteristics of the image to be generated. + */ + photographic_characteristics?: SchemaPhotographicCharacteristics | unknown + /** + * The aesthetics of the image to be generated. + */ + aesthetics?: SchemaAesthetics | unknown + /** + * The lighting of the image to be generated. + */ + lighting?: SchemaLighting | unknown + /** + * Short Description + * + * A short description of the image to be generated. + */ + short_description?: string | unknown + /** + * Edit Instruction + * + * The edit instruction for the image. + */ + edit_instruction?: string | unknown +} + +/** + * FiboEditOutputModel + */ +export type SchemaFiboEditEditOutput = { + /** + * Images + * + * Generated images. + */ + images?: Array + image: SchemaImage + /** + * Structured Instruction + * + * Current instruction. + */ + structured_instruction: { + [key: string]: unknown + } +} + +/** + * FiboEditInputModel + */ +export type SchemaFiboEditEditInput = { + /** + * Steps Num + * + * Number of inference steps. + */ + steps_num?: number + /** + * Instruction + * + * Instruction for image editing. + */ + instruction?: string | unknown + /** + * Image Url + * + * Reference image (file or URL). + */ + image_url?: string | unknown + /** + * Sync Mode + * + * If true, returns the image directly in the response (increases latency). + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * Guidance scale for text. + */ + guidance_scale?: number | number + /** + * The structured prompt to generate an image from. + */ + structured_instruction?: SchemaStructuredInstruction | unknown + /** + * Mask Url + * + * Mask image (file or URL). Optional + */ + mask_url?: string | unknown + /** + * Negative Prompt + * + * Negative prompt for image generation. + */ + negative_prompt?: string + /** + * Seed + * + * Random seed for reproducibility. + */ + seed?: number +} + +/** + * FiboEditExtraEPOutputModel + */ +export type SchemaFiboEditEraseByTextOutput = { + /** + * Images + * + * Generated images. + */ + images?: Array + image: SchemaImage + /** + * Structured Instruction + * + * Current instruction. + */ + structured_instruction: { + [key: string]: unknown + } +} + +/** + * EraseByTextInput + */ +export type SchemaFiboEditEraseByTextInput = { + /** + * Object Name + * + * The name of the object to remove. + */ + object_name: string + /** + * Image Url + * + * The source image. + */ + image_url: string +} + +/** + * FiboEditExtraEPOutputModel + */ +export type SchemaFiboEditRewriteTextOutput = { + /** + * Images + * + * Generated images. + */ + images?: Array + image: SchemaImage + /** + * Structured Instruction + * + * Current instruction. + */ + structured_instruction: { + [key: string]: unknown + } +} + +/** + * RewriteTextInput + */ +export type SchemaFiboEditRewriteTextInput = { + /** + * New Text + * + * The new text string to appear in the image. + */ + new_text: string + /** + * Image Url + * + * The source image. + */ + image_url: string +} + +/** + * FiboEditExtraEPOutputModel + */ +export type SchemaFiboEditRestyleOutput = { + /** + * Images + * + * Generated images. + */ + images?: Array + image: SchemaImage + /** + * Structured Instruction + * + * Current instruction. + */ + structured_instruction: { + [key: string]: unknown + } +} + +/** + * RestyletInput + */ +export type SchemaFiboEditRestyleInput = { + /** + * Style + * + * Select the desired artistic style for the output image. + */ + style: + | '3D Render' + | 'Cubism' + | 'Oil Painting' + | 'Anime' + | 'Cartoon' + | 'Coloring Book' + | 'Retro Ad' + | 'Pop Art Halftone' + | 'Vector Art' + | 'Story Board' + | 'Art Nouveau' + | 'Cross Etching' + | 'Wood Cut' + /** + * Image Url + * + * The source image. + */ + image_url: string +} + +/** + * FiboEditExtraEPOutputModel + */ +export type SchemaFiboEditRelightOutput = { + /** + * Images + * + * Generated images. + */ + images?: Array + image: SchemaImage + /** + * Structured Instruction + * + * Current instruction. + */ + structured_instruction: { + [key: string]: unknown + } +} + +/** + * RelightInput + */ +export type SchemaFiboEditRelightInput = { + /** + * Light Type + * + * The quality/style/time of day. + */ + light_type: + | 'midday' + | 'blue hour light' + | 'low-angle sunlight' + | 'sunrise light' + | 'spotlight on subject' + | 'overcast light' + | 'soft overcast daylight lighting' + | 'cloud-filtered lighting' + | 'fog-diffused lighting' + | 'moonlight lighting' + | 'starlight nighttime' + | 'soft bokeh lighting' + | 'harsh studio lighting' + /** + * Light Direction + * + * Where the light comes from. + */ + light_direction: 'front' | 'side' | 'bottom' | 'top-down' | unknown + /** + * Image Url + * + * The source image. + */ + image_url: string +} + +/** + * FiboEditExtraEPOutputModel + */ +export type SchemaFiboEditReseasonOutput = { + /** + * Images + * + * Generated images. + */ + images?: Array + image: SchemaImage + /** + * Structured Instruction + * + * Current instruction. + */ + structured_instruction: { + [key: string]: unknown + } +} + +/** + * ReseasonInput + */ +export type SchemaFiboEditReseasonInput = { + /** + * Season + * + * The desired season. + */ + season: 'spring' | 'summer' | 'autumn' | 'winter' + /** + * Image Url + * + * The source image. + */ + image_url: string +} + +/** + * FiboEditExtraEPOutputModel + */ +export type SchemaFiboEditRestoreOutput = { + /** + * Images + * + * Generated images. + */ + images?: Array + image: SchemaImage + /** + * Structured Instruction + * + * Current instruction. + */ + structured_instruction: { + [key: string]: unknown + } +} + +/** + * RestoreInput + */ +export type SchemaFiboEditRestoreInput = { + /** + * Image Url + * + * The source image. + */ + image_url: string +} + +/** + * FiboEditExtraEPOutputModel + */ +export type SchemaFiboEditSketchToColoredImageOutput = { + /** + * Images + * + * Generated images. + */ + images?: Array + image: SchemaImage + /** + * Structured Instruction + * + * Current instruction. + */ + structured_instruction: { + [key: string]: unknown + } +} + +/** + * SketchColoredImageInput + */ +export type SchemaFiboEditSketchToColoredImageInput = { + /** + * Image Url + * + * The source image. + */ + image_url: string +} + +/** + * FiboEditExtraEPOutputModel + */ +export type SchemaFiboEditReplaceObjectByTextOutput = { + /** + * Images + * + * Generated images. + */ + images?: Array + image: SchemaImage + /** + * Structured Instruction + * + * Current instruction. + */ + structured_instruction: { + [key: string]: unknown + } +} + +/** + * ReplaceObjectInput + */ +export type SchemaFiboEditReplaceObjectByTextInput = { + /** + * Instruction + * + * The full natural language command describing what to replace. + */ + instruction: string + /** + * Image Url + * + * The source image. + */ + image_url: string +} + +/** + * FaceFusionImageOutput + * + * FaceFusion output payload when image content is generated + */ +export type SchemaAiFaceSwapFaceswapimageOutput = { + image: SchemaImage + /** + * Processing Time Ms + * + * Optional processing duration in milliseconds + */ + processing_time_ms?: number | unknown +} + +/** + * FaceSwapInputImage + * + * Input schema for image ↔ image face swap + */ +export type SchemaAiFaceSwapFaceswapimageInput = { + /** + * Source Face Url + * + * Source face image + */ + source_face_url: string + /** + * Target Image Url + * + * Target image URL + */ + target_image_url: string +} + +/** + * ReplaceBackgroundOutputModel + */ +export type SchemaReplaceBackgroundOutput = { + /** + * Images + * + * Generated images. + */ + images?: Array<{ + [key: string]: unknown + }> + image: SchemaImage +} + +/** + * ReplaceBackgroundInputModel + */ +export type SchemaReplaceBackgroundInput = { + /** + * Prompt + * + * Prompt for background replacement. + */ + prompt?: string | unknown + /** + * Steps Num + * + * Number of inference steps. + */ + steps_num?: number + /** + * Sync Mode + * + * If true, returns the image directly in the response (increases latency). + */ + sync_mode?: boolean + /** + * Seed + * + * Random seed for reproducibility. + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt for background replacement. + */ + negative_prompt?: string + /** + * Image Url + * + * Reference image (file or URL). + */ + image_url?: string | unknown +} + +/** + * Output + */ +export type SchemaClarityUpscalerOutput = { + /** + * The URL of the generated image. + */ + image: SchemaImage + /** + * Seed + * + * The seed used to generate the image. + */ + seed: number + /** + * Timings + * + * The timings of the different steps in the workflow. + */ + timings: { + [key: string]: number + } +} + +/** + * Input + */ +export type SchemaClarityUpscalerInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt?: string + /** + * Resemblance + * + * + * The resemblance of the upscaled image to the original image. The higher the resemblance, the more the model will try to keep the original image. + * Refers to the strength of the ControlNet. + * + */ + resemblance?: number + /** + * Creativity + * + * + * The creativity of the model. The higher the creativity, the more the model will deviate from the prompt. + * Refers to the denoise strength of the sampling. + * + */ + creativity?: number + /** + * Image Url + * + * The URL of the image to upscale. + */ + image_url: string + /** + * Upscale Factor + * + * The upscale factor + */ + upscale_factor?: number + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number | unknown + /** + * Negative Prompt + * + * The negative prompt to use. Use it to address details that you don't want in the image. + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to false, the safety checker will be disabled. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaAuraSrOutput = { + /** + * Image + * + * Upscaled image + */ + image: SchemaImage + /** + * Timings + * + * Timings for each step in the pipeline. + */ + timings: { + [key: string]: number + } +} + +/** + * Input + */ +export type SchemaAuraSrInput = { + /** + * Overlapping Tiles + * + * Whether to use overlapping tiles for upscaling. Setting this to true helps remove seams but doubles the inference time. + */ + overlapping_tiles?: boolean + /** + * Checkpoint + * + * Checkpoint to use for upscaling. More coming soon. + */ + checkpoint?: 'v1' | 'v2' + /** + * Upscaling Factor (Xs) + * + * Upscaling factor. More coming soon. + */ + upscaling_factor?: 4 + /** + * Image URL + * + * URL of the image to upscale. + */ + image_url: string +} + +/** + * Output + */ +export type SchemaFluxDevImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseImageToInput + */ +export type SchemaFluxDevImageToImageInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The URL of the image to generate an image from. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Strength + * + * The strength of the initial image. Higher strength values are better for this model. + */ + strength?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number | unknown + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * Flux2ProEditOutput + */ +export type SchemaFlux2ProEditOutput = { + /** + * Images + * + * The generated images. + */ + images: Array + /** + * Seed + * + * The seed used for the generation. + */ + seed: number +} + +/** + * Flux2ProImageEditInput + */ +export type SchemaFlux2ProEditInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. If `auto`, the size will be determined by the model. + */ + image_size?: + | SchemaImageSize + | 'auto' + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * The seed to use for the generation. + */ + seed?: number + /** + * Image URLs + * + * List of URLs of input images for editing + */ + image_urls: Array +} + +/** + * Flux2EditImageOutput + */ +export type SchemaFlux2EditOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The edited images + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * Flux2EditImageInput + */ +export type SchemaFlux2EditInput = { + /** + * Prompt + * + * The prompt to edit the image. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the image to generate. The width and height must be between 512 and 2048 pixels. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The acceleration level to use for the image generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used. + */ + seed?: number + /** + * Image URLs + * + * The URLs of the images for editing. A maximum of 4 images are allowed, if more are provided, only the first 4 will be used. + */ + image_urls: Array + /** + * Enable Prompt Expansion + * + * If set to true, the prompt will be expanded for better results. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * Flux2EditImageLoRAOutput + */ +export type SchemaFlux2LoraEditOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The edited images + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * Flux2EditImageLoRAInput + */ +export type SchemaFlux2LoraEditInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the image to generate. The width and height must be between 512 and 2048 pixels. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The acceleration level to use for the image generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Loras + * + * List of LoRA weights to apply (maximum 3). Each LoRA can be a URL, HuggingFace repo ID, or local path. + */ + loras?: Array + /** + * Guidance Scale + * + * Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used. + */ + seed?: number + /** + * Image URLs + * + * The URsL of the images for editing. A maximum of 3 images are allowed, if more are provided, only the first 3 will be used. + */ + image_urls: Array + /** + * Enable Prompt Expansion + * + * If set to true, the prompt will be expanded for better results. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * FluxKontextOutput + */ +export type SchemaFluxProKontextOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * FluxKontextInput + */ +export type SchemaFluxProKontextInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * Image prompt for the omni model. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enhance Prompt + * + * Whether to enhance the prompt for better results. + */ + enhance_prompt?: boolean +} + +export type SchemaQueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetFalAiFluxProKontextRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-pro/kontext/requests/{request_id}/status' +} + +export type GetFalAiFluxProKontextRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxProKontextRequestsByRequestIdStatusResponse = + GetFalAiFluxProKontextRequestsByRequestIdStatusResponses[keyof GetFalAiFluxProKontextRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxProKontextRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/kontext/requests/{request_id}/cancel' +} + +export type PutFalAiFluxProKontextRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxProKontextRequestsByRequestIdCancelResponse = + PutFalAiFluxProKontextRequestsByRequestIdCancelResponses[keyof PutFalAiFluxProKontextRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxProKontextData = { + body: SchemaFluxProKontextInput + path?: never + query?: never + url: '/fal-ai/flux-pro/kontext' +} + +export type PostFalAiFluxProKontextResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxProKontextResponse = + PostFalAiFluxProKontextResponses[keyof PostFalAiFluxProKontextResponses] + +export type GetFalAiFluxProKontextRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/kontext/requests/{request_id}' +} + +export type GetFalAiFluxProKontextRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxProKontextOutput +} + +export type GetFalAiFluxProKontextRequestsByRequestIdResponse = + GetFalAiFluxProKontextRequestsByRequestIdResponses[keyof GetFalAiFluxProKontextRequestsByRequestIdResponses] + +export type GetFalAiFlux2LoraEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2/lora/edit/requests/{request_id}/status' +} + +export type GetFalAiFlux2LoraEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2LoraEditRequestsByRequestIdStatusResponse = + GetFalAiFlux2LoraEditRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2LoraEditRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2LoraEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/lora/edit/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2LoraEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2LoraEditRequestsByRequestIdCancelResponse = + PutFalAiFlux2LoraEditRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2LoraEditRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2LoraEditData = { + body: SchemaFlux2LoraEditInput + path?: never + query?: never + url: '/fal-ai/flux-2/lora/edit' +} + +export type PostFalAiFlux2LoraEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2LoraEditResponse = + PostFalAiFlux2LoraEditResponses[keyof PostFalAiFlux2LoraEditResponses] + +export type GetFalAiFlux2LoraEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/lora/edit/requests/{request_id}' +} + +export type GetFalAiFlux2LoraEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2LoraEditOutput +} + +export type GetFalAiFlux2LoraEditRequestsByRequestIdResponse = + GetFalAiFlux2LoraEditRequestsByRequestIdResponses[keyof GetFalAiFlux2LoraEditRequestsByRequestIdResponses] + +export type GetFalAiFlux2EditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2/edit/requests/{request_id}/status' +} + +export type GetFalAiFlux2EditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2EditRequestsByRequestIdStatusResponse = + GetFalAiFlux2EditRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2EditRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2EditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/edit/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2EditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2EditRequestsByRequestIdCancelResponse = + PutFalAiFlux2EditRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2EditRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2EditData = { + body: SchemaFlux2EditInput + path?: never + query?: never + url: '/fal-ai/flux-2/edit' +} + +export type PostFalAiFlux2EditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2EditResponse = + PostFalAiFlux2EditResponses[keyof PostFalAiFlux2EditResponses] + +export type GetFalAiFlux2EditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/edit/requests/{request_id}' +} + +export type GetFalAiFlux2EditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2EditOutput +} + +export type GetFalAiFlux2EditRequestsByRequestIdResponse = + GetFalAiFlux2EditRequestsByRequestIdResponses[keyof GetFalAiFlux2EditRequestsByRequestIdResponses] + +export type GetFalAiFlux2ProEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-pro/edit/requests/{request_id}/status' +} + +export type GetFalAiFlux2ProEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2ProEditRequestsByRequestIdStatusResponse = + GetFalAiFlux2ProEditRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2ProEditRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2ProEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-pro/edit/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2ProEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2ProEditRequestsByRequestIdCancelResponse = + PutFalAiFlux2ProEditRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2ProEditRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2ProEditData = { + body: SchemaFlux2ProEditInput + path?: never + query?: never + url: '/fal-ai/flux-2-pro/edit' +} + +export type PostFalAiFlux2ProEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2ProEditResponse = + PostFalAiFlux2ProEditResponses[keyof PostFalAiFlux2ProEditResponses] + +export type GetFalAiFlux2ProEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-pro/edit/requests/{request_id}' +} + +export type GetFalAiFlux2ProEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2ProEditOutput +} + +export type GetFalAiFlux2ProEditRequestsByRequestIdResponse = + GetFalAiFlux2ProEditRequestsByRequestIdResponses[keyof GetFalAiFlux2ProEditRequestsByRequestIdResponses] + +export type GetFalAiFluxDevImageToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux/dev/image-to-image/requests/{request_id}/status' +} + +export type GetFalAiFluxDevImageToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxDevImageToImageRequestsByRequestIdStatusResponse = + GetFalAiFluxDevImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiFluxDevImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxDevImageToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux/dev/image-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiFluxDevImageToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxDevImageToImageRequestsByRequestIdCancelResponse = + PutFalAiFluxDevImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiFluxDevImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxDevImageToImageData = { + body: SchemaFluxDevImageToImageInput + path?: never + query?: never + url: '/fal-ai/flux/dev/image-to-image' +} + +export type PostFalAiFluxDevImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxDevImageToImageResponse = + PostFalAiFluxDevImageToImageResponses[keyof PostFalAiFluxDevImageToImageResponses] + +export type GetFalAiFluxDevImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux/dev/image-to-image/requests/{request_id}' +} + +export type GetFalAiFluxDevImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxDevImageToImageOutput +} + +export type GetFalAiFluxDevImageToImageRequestsByRequestIdResponse = + GetFalAiFluxDevImageToImageRequestsByRequestIdResponses[keyof GetFalAiFluxDevImageToImageRequestsByRequestIdResponses] + +export type GetFalAiAuraSrRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/aura-sr/requests/{request_id}/status' +} + +export type GetFalAiAuraSrRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiAuraSrRequestsByRequestIdStatusResponse = + GetFalAiAuraSrRequestsByRequestIdStatusResponses[keyof GetFalAiAuraSrRequestsByRequestIdStatusResponses] + +export type PutFalAiAuraSrRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/aura-sr/requests/{request_id}/cancel' +} + +export type PutFalAiAuraSrRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiAuraSrRequestsByRequestIdCancelResponse = + PutFalAiAuraSrRequestsByRequestIdCancelResponses[keyof PutFalAiAuraSrRequestsByRequestIdCancelResponses] + +export type PostFalAiAuraSrData = { + body: SchemaAuraSrInput + path?: never + query?: never + url: '/fal-ai/aura-sr' +} + +export type PostFalAiAuraSrResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiAuraSrResponse = + PostFalAiAuraSrResponses[keyof PostFalAiAuraSrResponses] + +export type GetFalAiAuraSrRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/aura-sr/requests/{request_id}' +} + +export type GetFalAiAuraSrRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAuraSrOutput +} + +export type GetFalAiAuraSrRequestsByRequestIdResponse = + GetFalAiAuraSrRequestsByRequestIdResponses[keyof GetFalAiAuraSrRequestsByRequestIdResponses] + +export type GetFalAiClarityUpscalerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/clarity-upscaler/requests/{request_id}/status' +} + +export type GetFalAiClarityUpscalerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiClarityUpscalerRequestsByRequestIdStatusResponse = + GetFalAiClarityUpscalerRequestsByRequestIdStatusResponses[keyof GetFalAiClarityUpscalerRequestsByRequestIdStatusResponses] + +export type PutFalAiClarityUpscalerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/clarity-upscaler/requests/{request_id}/cancel' +} + +export type PutFalAiClarityUpscalerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiClarityUpscalerRequestsByRequestIdCancelResponse = + PutFalAiClarityUpscalerRequestsByRequestIdCancelResponses[keyof PutFalAiClarityUpscalerRequestsByRequestIdCancelResponses] + +export type PostFalAiClarityUpscalerData = { + body: SchemaClarityUpscalerInput + path?: never + query?: never + url: '/fal-ai/clarity-upscaler' +} + +export type PostFalAiClarityUpscalerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiClarityUpscalerResponse = + PostFalAiClarityUpscalerResponses[keyof PostFalAiClarityUpscalerResponses] + +export type GetFalAiClarityUpscalerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/clarity-upscaler/requests/{request_id}' +} + +export type GetFalAiClarityUpscalerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaClarityUpscalerOutput +} + +export type GetFalAiClarityUpscalerRequestsByRequestIdResponse = + GetFalAiClarityUpscalerRequestsByRequestIdResponses[keyof GetFalAiClarityUpscalerRequestsByRequestIdResponses] + +export type GetBriaReplaceBackgroundRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/replace-background/requests/{request_id}/status' +} + +export type GetBriaReplaceBackgroundRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetBriaReplaceBackgroundRequestsByRequestIdStatusResponse = + GetBriaReplaceBackgroundRequestsByRequestIdStatusResponses[keyof GetBriaReplaceBackgroundRequestsByRequestIdStatusResponses] + +export type PutBriaReplaceBackgroundRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/replace-background/requests/{request_id}/cancel' +} + +export type PutBriaReplaceBackgroundRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutBriaReplaceBackgroundRequestsByRequestIdCancelResponse = + PutBriaReplaceBackgroundRequestsByRequestIdCancelResponses[keyof PutBriaReplaceBackgroundRequestsByRequestIdCancelResponses] + +export type PostBriaReplaceBackgroundData = { + body: SchemaReplaceBackgroundInput + path?: never + query?: never + url: '/bria/replace-background' +} + +export type PostBriaReplaceBackgroundResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaReplaceBackgroundResponse = + PostBriaReplaceBackgroundResponses[keyof PostBriaReplaceBackgroundResponses] + +export type GetBriaReplaceBackgroundRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/replace-background/requests/{request_id}' +} + +export type GetBriaReplaceBackgroundRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaReplaceBackgroundOutput +} + +export type GetBriaReplaceBackgroundRequestsByRequestIdResponse = + GetBriaReplaceBackgroundRequestsByRequestIdResponses[keyof GetBriaReplaceBackgroundRequestsByRequestIdResponses] + +export type GetHalfMoonAiAiFaceSwapFaceswapimageRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/half-moon-ai/ai-face-swap/faceswapimage/requests/{request_id}/status' + } + +export type GetHalfMoonAiAiFaceSwapFaceswapimageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetHalfMoonAiAiFaceSwapFaceswapimageRequestsByRequestIdStatusResponse = + GetHalfMoonAiAiFaceSwapFaceswapimageRequestsByRequestIdStatusResponses[keyof GetHalfMoonAiAiFaceSwapFaceswapimageRequestsByRequestIdStatusResponses] + +export type PutHalfMoonAiAiFaceSwapFaceswapimageRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/half-moon-ai/ai-face-swap/faceswapimage/requests/{request_id}/cancel' + } + +export type PutHalfMoonAiAiFaceSwapFaceswapimageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutHalfMoonAiAiFaceSwapFaceswapimageRequestsByRequestIdCancelResponse = + PutHalfMoonAiAiFaceSwapFaceswapimageRequestsByRequestIdCancelResponses[keyof PutHalfMoonAiAiFaceSwapFaceswapimageRequestsByRequestIdCancelResponses] + +export type PostHalfMoonAiAiFaceSwapFaceswapimageData = { + body: SchemaAiFaceSwapFaceswapimageInput + path?: never + query?: never + url: '/half-moon-ai/ai-face-swap/faceswapimage' +} + +export type PostHalfMoonAiAiFaceSwapFaceswapimageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostHalfMoonAiAiFaceSwapFaceswapimageResponse = + PostHalfMoonAiAiFaceSwapFaceswapimageResponses[keyof PostHalfMoonAiAiFaceSwapFaceswapimageResponses] + +export type GetHalfMoonAiAiFaceSwapFaceswapimageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/half-moon-ai/ai-face-swap/faceswapimage/requests/{request_id}' +} + +export type GetHalfMoonAiAiFaceSwapFaceswapimageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAiFaceSwapFaceswapimageOutput +} + +export type GetHalfMoonAiAiFaceSwapFaceswapimageRequestsByRequestIdResponse = + GetHalfMoonAiAiFaceSwapFaceswapimageRequestsByRequestIdResponses[keyof GetHalfMoonAiAiFaceSwapFaceswapimageRequestsByRequestIdResponses] + +export type GetBriaFiboEditReplaceObjectByTextRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/fibo-edit/replace_object_by_text/requests/{request_id}/status' +} + +export type GetBriaFiboEditReplaceObjectByTextRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetBriaFiboEditReplaceObjectByTextRequestsByRequestIdStatusResponse = + GetBriaFiboEditReplaceObjectByTextRequestsByRequestIdStatusResponses[keyof GetBriaFiboEditReplaceObjectByTextRequestsByRequestIdStatusResponses] + +export type PutBriaFiboEditReplaceObjectByTextRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/replace_object_by_text/requests/{request_id}/cancel' +} + +export type PutBriaFiboEditReplaceObjectByTextRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutBriaFiboEditReplaceObjectByTextRequestsByRequestIdCancelResponse = + PutBriaFiboEditReplaceObjectByTextRequestsByRequestIdCancelResponses[keyof PutBriaFiboEditReplaceObjectByTextRequestsByRequestIdCancelResponses] + +export type PostBriaFiboEditReplaceObjectByTextData = { + body: SchemaFiboEditReplaceObjectByTextInput + path?: never + query?: never + url: '/bria/fibo-edit/replace_object_by_text' +} + +export type PostBriaFiboEditReplaceObjectByTextResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaFiboEditReplaceObjectByTextResponse = + PostBriaFiboEditReplaceObjectByTextResponses[keyof PostBriaFiboEditReplaceObjectByTextResponses] + +export type GetBriaFiboEditReplaceObjectByTextRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/replace_object_by_text/requests/{request_id}' +} + +export type GetBriaFiboEditReplaceObjectByTextRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFiboEditReplaceObjectByTextOutput +} + +export type GetBriaFiboEditReplaceObjectByTextRequestsByRequestIdResponse = + GetBriaFiboEditReplaceObjectByTextRequestsByRequestIdResponses[keyof GetBriaFiboEditReplaceObjectByTextRequestsByRequestIdResponses] + +export type GetBriaFiboEditSketchToColoredImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/fibo-edit/sketch_to_colored_image/requests/{request_id}/status' +} + +export type GetBriaFiboEditSketchToColoredImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetBriaFiboEditSketchToColoredImageRequestsByRequestIdStatusResponse = + GetBriaFiboEditSketchToColoredImageRequestsByRequestIdStatusResponses[keyof GetBriaFiboEditSketchToColoredImageRequestsByRequestIdStatusResponses] + +export type PutBriaFiboEditSketchToColoredImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/sketch_to_colored_image/requests/{request_id}/cancel' +} + +export type PutBriaFiboEditSketchToColoredImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutBriaFiboEditSketchToColoredImageRequestsByRequestIdCancelResponse = + PutBriaFiboEditSketchToColoredImageRequestsByRequestIdCancelResponses[keyof PutBriaFiboEditSketchToColoredImageRequestsByRequestIdCancelResponses] + +export type PostBriaFiboEditSketchToColoredImageData = { + body: SchemaFiboEditSketchToColoredImageInput + path?: never + query?: never + url: '/bria/fibo-edit/sketch_to_colored_image' +} + +export type PostBriaFiboEditSketchToColoredImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaFiboEditSketchToColoredImageResponse = + PostBriaFiboEditSketchToColoredImageResponses[keyof PostBriaFiboEditSketchToColoredImageResponses] + +export type GetBriaFiboEditSketchToColoredImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/sketch_to_colored_image/requests/{request_id}' +} + +export type GetBriaFiboEditSketchToColoredImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFiboEditSketchToColoredImageOutput +} + +export type GetBriaFiboEditSketchToColoredImageRequestsByRequestIdResponse = + GetBriaFiboEditSketchToColoredImageRequestsByRequestIdResponses[keyof GetBriaFiboEditSketchToColoredImageRequestsByRequestIdResponses] + +export type GetBriaFiboEditRestoreRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/fibo-edit/restore/requests/{request_id}/status' +} + +export type GetBriaFiboEditRestoreRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetBriaFiboEditRestoreRequestsByRequestIdStatusResponse = + GetBriaFiboEditRestoreRequestsByRequestIdStatusResponses[keyof GetBriaFiboEditRestoreRequestsByRequestIdStatusResponses] + +export type PutBriaFiboEditRestoreRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/restore/requests/{request_id}/cancel' +} + +export type PutBriaFiboEditRestoreRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutBriaFiboEditRestoreRequestsByRequestIdCancelResponse = + PutBriaFiboEditRestoreRequestsByRequestIdCancelResponses[keyof PutBriaFiboEditRestoreRequestsByRequestIdCancelResponses] + +export type PostBriaFiboEditRestoreData = { + body: SchemaFiboEditRestoreInput + path?: never + query?: never + url: '/bria/fibo-edit/restore' +} + +export type PostBriaFiboEditRestoreResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaFiboEditRestoreResponse = + PostBriaFiboEditRestoreResponses[keyof PostBriaFiboEditRestoreResponses] + +export type GetBriaFiboEditRestoreRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/restore/requests/{request_id}' +} + +export type GetBriaFiboEditRestoreRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFiboEditRestoreOutput +} + +export type GetBriaFiboEditRestoreRequestsByRequestIdResponse = + GetBriaFiboEditRestoreRequestsByRequestIdResponses[keyof GetBriaFiboEditRestoreRequestsByRequestIdResponses] + +export type GetBriaFiboEditReseasonRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/fibo-edit/reseason/requests/{request_id}/status' +} + +export type GetBriaFiboEditReseasonRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetBriaFiboEditReseasonRequestsByRequestIdStatusResponse = + GetBriaFiboEditReseasonRequestsByRequestIdStatusResponses[keyof GetBriaFiboEditReseasonRequestsByRequestIdStatusResponses] + +export type PutBriaFiboEditReseasonRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/reseason/requests/{request_id}/cancel' +} + +export type PutBriaFiboEditReseasonRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutBriaFiboEditReseasonRequestsByRequestIdCancelResponse = + PutBriaFiboEditReseasonRequestsByRequestIdCancelResponses[keyof PutBriaFiboEditReseasonRequestsByRequestIdCancelResponses] + +export type PostBriaFiboEditReseasonData = { + body: SchemaFiboEditReseasonInput + path?: never + query?: never + url: '/bria/fibo-edit/reseason' +} + +export type PostBriaFiboEditReseasonResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaFiboEditReseasonResponse = + PostBriaFiboEditReseasonResponses[keyof PostBriaFiboEditReseasonResponses] + +export type GetBriaFiboEditReseasonRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/reseason/requests/{request_id}' +} + +export type GetBriaFiboEditReseasonRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFiboEditReseasonOutput +} + +export type GetBriaFiboEditReseasonRequestsByRequestIdResponse = + GetBriaFiboEditReseasonRequestsByRequestIdResponses[keyof GetBriaFiboEditReseasonRequestsByRequestIdResponses] + +export type GetBriaFiboEditRelightRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/fibo-edit/relight/requests/{request_id}/status' +} + +export type GetBriaFiboEditRelightRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetBriaFiboEditRelightRequestsByRequestIdStatusResponse = + GetBriaFiboEditRelightRequestsByRequestIdStatusResponses[keyof GetBriaFiboEditRelightRequestsByRequestIdStatusResponses] + +export type PutBriaFiboEditRelightRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/relight/requests/{request_id}/cancel' +} + +export type PutBriaFiboEditRelightRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutBriaFiboEditRelightRequestsByRequestIdCancelResponse = + PutBriaFiboEditRelightRequestsByRequestIdCancelResponses[keyof PutBriaFiboEditRelightRequestsByRequestIdCancelResponses] + +export type PostBriaFiboEditRelightData = { + body: SchemaFiboEditRelightInput + path?: never + query?: never + url: '/bria/fibo-edit/relight' +} + +export type PostBriaFiboEditRelightResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaFiboEditRelightResponse = + PostBriaFiboEditRelightResponses[keyof PostBriaFiboEditRelightResponses] + +export type GetBriaFiboEditRelightRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/relight/requests/{request_id}' +} + +export type GetBriaFiboEditRelightRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFiboEditRelightOutput +} + +export type GetBriaFiboEditRelightRequestsByRequestIdResponse = + GetBriaFiboEditRelightRequestsByRequestIdResponses[keyof GetBriaFiboEditRelightRequestsByRequestIdResponses] + +export type GetBriaFiboEditRestyleRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/fibo-edit/restyle/requests/{request_id}/status' +} + +export type GetBriaFiboEditRestyleRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetBriaFiboEditRestyleRequestsByRequestIdStatusResponse = + GetBriaFiboEditRestyleRequestsByRequestIdStatusResponses[keyof GetBriaFiboEditRestyleRequestsByRequestIdStatusResponses] + +export type PutBriaFiboEditRestyleRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/restyle/requests/{request_id}/cancel' +} + +export type PutBriaFiboEditRestyleRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutBriaFiboEditRestyleRequestsByRequestIdCancelResponse = + PutBriaFiboEditRestyleRequestsByRequestIdCancelResponses[keyof PutBriaFiboEditRestyleRequestsByRequestIdCancelResponses] + +export type PostBriaFiboEditRestyleData = { + body: SchemaFiboEditRestyleInput + path?: never + query?: never + url: '/bria/fibo-edit/restyle' +} + +export type PostBriaFiboEditRestyleResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaFiboEditRestyleResponse = + PostBriaFiboEditRestyleResponses[keyof PostBriaFiboEditRestyleResponses] + +export type GetBriaFiboEditRestyleRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/restyle/requests/{request_id}' +} + +export type GetBriaFiboEditRestyleRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFiboEditRestyleOutput +} + +export type GetBriaFiboEditRestyleRequestsByRequestIdResponse = + GetBriaFiboEditRestyleRequestsByRequestIdResponses[keyof GetBriaFiboEditRestyleRequestsByRequestIdResponses] + +export type GetBriaFiboEditRewriteTextRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/fibo-edit/rewrite_text/requests/{request_id}/status' +} + +export type GetBriaFiboEditRewriteTextRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetBriaFiboEditRewriteTextRequestsByRequestIdStatusResponse = + GetBriaFiboEditRewriteTextRequestsByRequestIdStatusResponses[keyof GetBriaFiboEditRewriteTextRequestsByRequestIdStatusResponses] + +export type PutBriaFiboEditRewriteTextRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/rewrite_text/requests/{request_id}/cancel' +} + +export type PutBriaFiboEditRewriteTextRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutBriaFiboEditRewriteTextRequestsByRequestIdCancelResponse = + PutBriaFiboEditRewriteTextRequestsByRequestIdCancelResponses[keyof PutBriaFiboEditRewriteTextRequestsByRequestIdCancelResponses] + +export type PostBriaFiboEditRewriteTextData = { + body: SchemaFiboEditRewriteTextInput + path?: never + query?: never + url: '/bria/fibo-edit/rewrite_text' +} + +export type PostBriaFiboEditRewriteTextResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaFiboEditRewriteTextResponse = + PostBriaFiboEditRewriteTextResponses[keyof PostBriaFiboEditRewriteTextResponses] + +export type GetBriaFiboEditRewriteTextRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/rewrite_text/requests/{request_id}' +} + +export type GetBriaFiboEditRewriteTextRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFiboEditRewriteTextOutput +} + +export type GetBriaFiboEditRewriteTextRequestsByRequestIdResponse = + GetBriaFiboEditRewriteTextRequestsByRequestIdResponses[keyof GetBriaFiboEditRewriteTextRequestsByRequestIdResponses] + +export type GetBriaFiboEditEraseByTextRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/fibo-edit/erase_by_text/requests/{request_id}/status' +} + +export type GetBriaFiboEditEraseByTextRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetBriaFiboEditEraseByTextRequestsByRequestIdStatusResponse = + GetBriaFiboEditEraseByTextRequestsByRequestIdStatusResponses[keyof GetBriaFiboEditEraseByTextRequestsByRequestIdStatusResponses] + +export type PutBriaFiboEditEraseByTextRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/erase_by_text/requests/{request_id}/cancel' +} + +export type PutBriaFiboEditEraseByTextRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutBriaFiboEditEraseByTextRequestsByRequestIdCancelResponse = + PutBriaFiboEditEraseByTextRequestsByRequestIdCancelResponses[keyof PutBriaFiboEditEraseByTextRequestsByRequestIdCancelResponses] + +export type PostBriaFiboEditEraseByTextData = { + body: SchemaFiboEditEraseByTextInput + path?: never + query?: never + url: '/bria/fibo-edit/erase_by_text' +} + +export type PostBriaFiboEditEraseByTextResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaFiboEditEraseByTextResponse = + PostBriaFiboEditEraseByTextResponses[keyof PostBriaFiboEditEraseByTextResponses] + +export type GetBriaFiboEditEraseByTextRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/erase_by_text/requests/{request_id}' +} + +export type GetBriaFiboEditEraseByTextRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFiboEditEraseByTextOutput +} + +export type GetBriaFiboEditEraseByTextRequestsByRequestIdResponse = + GetBriaFiboEditEraseByTextRequestsByRequestIdResponses[keyof GetBriaFiboEditEraseByTextRequestsByRequestIdResponses] + +export type GetBriaFiboEditEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/fibo-edit/edit/requests/{request_id}/status' +} + +export type GetBriaFiboEditEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetBriaFiboEditEditRequestsByRequestIdStatusResponse = + GetBriaFiboEditEditRequestsByRequestIdStatusResponses[keyof GetBriaFiboEditEditRequestsByRequestIdStatusResponses] + +export type PutBriaFiboEditEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/edit/requests/{request_id}/cancel' +} + +export type PutBriaFiboEditEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutBriaFiboEditEditRequestsByRequestIdCancelResponse = + PutBriaFiboEditEditRequestsByRequestIdCancelResponses[keyof PutBriaFiboEditEditRequestsByRequestIdCancelResponses] + +export type PostBriaFiboEditEditData = { + body: SchemaFiboEditEditInput + path?: never + query?: never + url: '/bria/fibo-edit/edit' +} + +export type PostBriaFiboEditEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaFiboEditEditResponse = + PostBriaFiboEditEditResponses[keyof PostBriaFiboEditEditResponses] + +export type GetBriaFiboEditEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/edit/requests/{request_id}' +} + +export type GetBriaFiboEditEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFiboEditEditOutput +} + +export type GetBriaFiboEditEditRequestsByRequestIdResponse = + GetBriaFiboEditEditRequestsByRequestIdResponses[keyof GetBriaFiboEditEditRequestsByRequestIdResponses] + +export type GetBriaFiboEditAddObjectByTextRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/fibo-edit/add_object_by_text/requests/{request_id}/status' +} + +export type GetBriaFiboEditAddObjectByTextRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetBriaFiboEditAddObjectByTextRequestsByRequestIdStatusResponse = + GetBriaFiboEditAddObjectByTextRequestsByRequestIdStatusResponses[keyof GetBriaFiboEditAddObjectByTextRequestsByRequestIdStatusResponses] + +export type PutBriaFiboEditAddObjectByTextRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/add_object_by_text/requests/{request_id}/cancel' +} + +export type PutBriaFiboEditAddObjectByTextRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutBriaFiboEditAddObjectByTextRequestsByRequestIdCancelResponse = + PutBriaFiboEditAddObjectByTextRequestsByRequestIdCancelResponses[keyof PutBriaFiboEditAddObjectByTextRequestsByRequestIdCancelResponses] + +export type PostBriaFiboEditAddObjectByTextData = { + body: SchemaFiboEditAddObjectByTextInput + path?: never + query?: never + url: '/bria/fibo-edit/add_object_by_text' +} + +export type PostBriaFiboEditAddObjectByTextResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaFiboEditAddObjectByTextResponse = + PostBriaFiboEditAddObjectByTextResponses[keyof PostBriaFiboEditAddObjectByTextResponses] + +export type GetBriaFiboEditAddObjectByTextRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/add_object_by_text/requests/{request_id}' +} + +export type GetBriaFiboEditAddObjectByTextRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFiboEditAddObjectByTextOutput +} + +export type GetBriaFiboEditAddObjectByTextRequestsByRequestIdResponse = + GetBriaFiboEditAddObjectByTextRequestsByRequestIdResponses[keyof GetBriaFiboEditAddObjectByTextRequestsByRequestIdResponses] + +export type GetBriaFiboEditBlendRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/fibo-edit/blend/requests/{request_id}/status' +} + +export type GetBriaFiboEditBlendRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetBriaFiboEditBlendRequestsByRequestIdStatusResponse = + GetBriaFiboEditBlendRequestsByRequestIdStatusResponses[keyof GetBriaFiboEditBlendRequestsByRequestIdStatusResponses] + +export type PutBriaFiboEditBlendRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/blend/requests/{request_id}/cancel' +} + +export type PutBriaFiboEditBlendRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutBriaFiboEditBlendRequestsByRequestIdCancelResponse = + PutBriaFiboEditBlendRequestsByRequestIdCancelResponses[keyof PutBriaFiboEditBlendRequestsByRequestIdCancelResponses] + +export type PostBriaFiboEditBlendData = { + body: SchemaFiboEditBlendInput + path?: never + query?: never + url: '/bria/fibo-edit/blend' +} + +export type PostBriaFiboEditBlendResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaFiboEditBlendResponse = + PostBriaFiboEditBlendResponses[keyof PostBriaFiboEditBlendResponses] + +export type GetBriaFiboEditBlendRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/blend/requests/{request_id}' +} + +export type GetBriaFiboEditBlendRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFiboEditBlendOutput +} + +export type GetBriaFiboEditBlendRequestsByRequestIdResponse = + GetBriaFiboEditBlendRequestsByRequestIdResponses[keyof GetBriaFiboEditBlendRequestsByRequestIdResponses] + +export type GetBriaFiboEditColorizeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/fibo-edit/colorize/requests/{request_id}/status' +} + +export type GetBriaFiboEditColorizeRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetBriaFiboEditColorizeRequestsByRequestIdStatusResponse = + GetBriaFiboEditColorizeRequestsByRequestIdStatusResponses[keyof GetBriaFiboEditColorizeRequestsByRequestIdStatusResponses] + +export type PutBriaFiboEditColorizeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/colorize/requests/{request_id}/cancel' +} + +export type PutBriaFiboEditColorizeRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutBriaFiboEditColorizeRequestsByRequestIdCancelResponse = + PutBriaFiboEditColorizeRequestsByRequestIdCancelResponses[keyof PutBriaFiboEditColorizeRequestsByRequestIdCancelResponses] + +export type PostBriaFiboEditColorizeData = { + body: SchemaFiboEditColorizeInput + path?: never + query?: never + url: '/bria/fibo-edit/colorize' +} + +export type PostBriaFiboEditColorizeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaFiboEditColorizeResponse = + PostBriaFiboEditColorizeResponses[keyof PostBriaFiboEditColorizeResponses] + +export type GetBriaFiboEditColorizeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/colorize/requests/{request_id}' +} + +export type GetBriaFiboEditColorizeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFiboEditColorizeOutput +} + +export type GetBriaFiboEditColorizeRequestsByRequestIdResponse = + GetBriaFiboEditColorizeRequestsByRequestIdResponses[keyof GetBriaFiboEditColorizeRequestsByRequestIdResponses] + +export type GetFalAiFlux2Klein9bBaseEditLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2/klein/9b/base/edit/lora/requests/{request_id}/status' +} + +export type GetFalAiFlux2Klein9bBaseEditLoraRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlux2Klein9bBaseEditLoraRequestsByRequestIdStatusResponse = + GetFalAiFlux2Klein9bBaseEditLoraRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2Klein9bBaseEditLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2Klein9bBaseEditLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/9b/base/edit/lora/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2Klein9bBaseEditLoraRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlux2Klein9bBaseEditLoraRequestsByRequestIdCancelResponse = + PutFalAiFlux2Klein9bBaseEditLoraRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2Klein9bBaseEditLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2Klein9bBaseEditLoraData = { + body: SchemaFlux2Klein9bBaseEditLoraInput + path?: never + query?: never + url: '/fal-ai/flux-2/klein/9b/base/edit/lora' +} + +export type PostFalAiFlux2Klein9bBaseEditLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2Klein9bBaseEditLoraResponse = + PostFalAiFlux2Klein9bBaseEditLoraResponses[keyof PostFalAiFlux2Klein9bBaseEditLoraResponses] + +export type GetFalAiFlux2Klein9bBaseEditLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/9b/base/edit/lora/requests/{request_id}' +} + +export type GetFalAiFlux2Klein9bBaseEditLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2Klein9bBaseEditLoraOutput +} + +export type GetFalAiFlux2Klein9bBaseEditLoraRequestsByRequestIdResponse = + GetFalAiFlux2Klein9bBaseEditLoraRequestsByRequestIdResponses[keyof GetFalAiFlux2Klein9bBaseEditLoraRequestsByRequestIdResponses] + +export type GetFalAiFlux2Klein4bBaseEditLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2/klein/4b/base/edit/lora/requests/{request_id}/status' +} + +export type GetFalAiFlux2Klein4bBaseEditLoraRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlux2Klein4bBaseEditLoraRequestsByRequestIdStatusResponse = + GetFalAiFlux2Klein4bBaseEditLoraRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2Klein4bBaseEditLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2Klein4bBaseEditLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/4b/base/edit/lora/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2Klein4bBaseEditLoraRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlux2Klein4bBaseEditLoraRequestsByRequestIdCancelResponse = + PutFalAiFlux2Klein4bBaseEditLoraRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2Klein4bBaseEditLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2Klein4bBaseEditLoraData = { + body: SchemaFlux2Klein4bBaseEditLoraInput + path?: never + query?: never + url: '/fal-ai/flux-2/klein/4b/base/edit/lora' +} + +export type PostFalAiFlux2Klein4bBaseEditLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2Klein4bBaseEditLoraResponse = + PostFalAiFlux2Klein4bBaseEditLoraResponses[keyof PostFalAiFlux2Klein4bBaseEditLoraResponses] + +export type GetFalAiFlux2Klein4bBaseEditLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/4b/base/edit/lora/requests/{request_id}' +} + +export type GetFalAiFlux2Klein4bBaseEditLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2Klein4bBaseEditLoraOutput +} + +export type GetFalAiFlux2Klein4bBaseEditLoraRequestsByRequestIdResponse = + GetFalAiFlux2Klein4bBaseEditLoraRequestsByRequestIdResponses[keyof GetFalAiFlux2Klein4bBaseEditLoraRequestsByRequestIdResponses] + +export type GetFalAiFlux2Klein4bBaseEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2/klein/4b/base/edit/requests/{request_id}/status' +} + +export type GetFalAiFlux2Klein4bBaseEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2Klein4bBaseEditRequestsByRequestIdStatusResponse = + GetFalAiFlux2Klein4bBaseEditRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2Klein4bBaseEditRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2Klein4bBaseEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/4b/base/edit/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2Klein4bBaseEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2Klein4bBaseEditRequestsByRequestIdCancelResponse = + PutFalAiFlux2Klein4bBaseEditRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2Klein4bBaseEditRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2Klein4bBaseEditData = { + body: SchemaFlux2Klein4bBaseEditInput + path?: never + query?: never + url: '/fal-ai/flux-2/klein/4b/base/edit' +} + +export type PostFalAiFlux2Klein4bBaseEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2Klein4bBaseEditResponse = + PostFalAiFlux2Klein4bBaseEditResponses[keyof PostFalAiFlux2Klein4bBaseEditResponses] + +export type GetFalAiFlux2Klein4bBaseEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/4b/base/edit/requests/{request_id}' +} + +export type GetFalAiFlux2Klein4bBaseEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2Klein4bBaseEditOutput +} + +export type GetFalAiFlux2Klein4bBaseEditRequestsByRequestIdResponse = + GetFalAiFlux2Klein4bBaseEditRequestsByRequestIdResponses[keyof GetFalAiFlux2Klein4bBaseEditRequestsByRequestIdResponses] + +export type GetFalAiFlux2Klein9bBaseEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2/klein/9b/base/edit/requests/{request_id}/status' +} + +export type GetFalAiFlux2Klein9bBaseEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2Klein9bBaseEditRequestsByRequestIdStatusResponse = + GetFalAiFlux2Klein9bBaseEditRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2Klein9bBaseEditRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2Klein9bBaseEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/9b/base/edit/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2Klein9bBaseEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2Klein9bBaseEditRequestsByRequestIdCancelResponse = + PutFalAiFlux2Klein9bBaseEditRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2Klein9bBaseEditRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2Klein9bBaseEditData = { + body: SchemaFlux2Klein9bBaseEditInput + path?: never + query?: never + url: '/fal-ai/flux-2/klein/9b/base/edit' +} + +export type PostFalAiFlux2Klein9bBaseEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2Klein9bBaseEditResponse = + PostFalAiFlux2Klein9bBaseEditResponses[keyof PostFalAiFlux2Klein9bBaseEditResponses] + +export type GetFalAiFlux2Klein9bBaseEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/9b/base/edit/requests/{request_id}' +} + +export type GetFalAiFlux2Klein9bBaseEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2Klein9bBaseEditOutput +} + +export type GetFalAiFlux2Klein9bBaseEditRequestsByRequestIdResponse = + GetFalAiFlux2Klein9bBaseEditRequestsByRequestIdResponses[keyof GetFalAiFlux2Klein9bBaseEditRequestsByRequestIdResponses] + +export type GetFalAiFlux2Klein4bEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2/klein/4b/edit/requests/{request_id}/status' +} + +export type GetFalAiFlux2Klein4bEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2Klein4bEditRequestsByRequestIdStatusResponse = + GetFalAiFlux2Klein4bEditRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2Klein4bEditRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2Klein4bEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/4b/edit/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2Klein4bEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2Klein4bEditRequestsByRequestIdCancelResponse = + PutFalAiFlux2Klein4bEditRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2Klein4bEditRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2Klein4bEditData = { + body: SchemaFlux2Klein4bEditInput + path?: never + query?: never + url: '/fal-ai/flux-2/klein/4b/edit' +} + +export type PostFalAiFlux2Klein4bEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2Klein4bEditResponse = + PostFalAiFlux2Klein4bEditResponses[keyof PostFalAiFlux2Klein4bEditResponses] + +export type GetFalAiFlux2Klein4bEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/4b/edit/requests/{request_id}' +} + +export type GetFalAiFlux2Klein4bEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2Klein4bEditOutput +} + +export type GetFalAiFlux2Klein4bEditRequestsByRequestIdResponse = + GetFalAiFlux2Klein4bEditRequestsByRequestIdResponses[keyof GetFalAiFlux2Klein4bEditRequestsByRequestIdResponses] + +export type GetFalAiFlux2Klein9bEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2/klein/9b/edit/requests/{request_id}/status' +} + +export type GetFalAiFlux2Klein9bEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2Klein9bEditRequestsByRequestIdStatusResponse = + GetFalAiFlux2Klein9bEditRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2Klein9bEditRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2Klein9bEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/9b/edit/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2Klein9bEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2Klein9bEditRequestsByRequestIdCancelResponse = + PutFalAiFlux2Klein9bEditRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2Klein9bEditRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2Klein9bEditData = { + body: SchemaFlux2Klein9bEditInput + path?: never + query?: never + url: '/fal-ai/flux-2/klein/9b/edit' +} + +export type PostFalAiFlux2Klein9bEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2Klein9bEditResponse = + PostFalAiFlux2Klein9bEditResponses[keyof PostFalAiFlux2Klein9bEditResponses] + +export type GetFalAiFlux2Klein9bEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/9b/edit/requests/{request_id}' +} + +export type GetFalAiFlux2Klein9bEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2Klein9bEditOutput +} + +export type GetFalAiFlux2Klein9bEditRequestsByRequestIdResponse = + GetFalAiFlux2Klein9bEditRequestsByRequestIdResponses[keyof GetFalAiFlux2Klein9bEditRequestsByRequestIdResponses] + +export type GetFalAiGlmImageImageToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/glm-image/image-to-image/requests/{request_id}/status' +} + +export type GetFalAiGlmImageImageToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiGlmImageImageToImageRequestsByRequestIdStatusResponse = + GetFalAiGlmImageImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiGlmImageImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiGlmImageImageToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/glm-image/image-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiGlmImageImageToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiGlmImageImageToImageRequestsByRequestIdCancelResponse = + PutFalAiGlmImageImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiGlmImageImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiGlmImageImageToImageData = { + body: SchemaGlmImageImageToImageInput + path?: never + query?: never + url: '/fal-ai/glm-image/image-to-image' +} + +export type PostFalAiGlmImageImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiGlmImageImageToImageResponse = + PostFalAiGlmImageImageToImageResponses[keyof PostFalAiGlmImageImageToImageResponses] + +export type GetFalAiGlmImageImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/glm-image/image-to-image/requests/{request_id}' +} + +export type GetFalAiGlmImageImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaGlmImageImageToImageOutput +} + +export type GetFalAiGlmImageImageToImageRequestsByRequestIdResponse = + GetFalAiGlmImageImageToImageRequestsByRequestIdResponses[keyof GetFalAiGlmImageImageToImageRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEdit2511MultipleAnglesRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-2511-multiple-angles/requests/{request_id}/status' + } + +export type GetFalAiQwenImageEdit2511MultipleAnglesRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEdit2511MultipleAnglesRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEdit2511MultipleAnglesRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEdit2511MultipleAnglesRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEdit2511MultipleAnglesRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2511-multiple-angles/requests/{request_id}/cancel' + } + +export type PutFalAiQwenImageEdit2511MultipleAnglesRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEdit2511MultipleAnglesRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEdit2511MultipleAnglesRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEdit2511MultipleAnglesRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEdit2511MultipleAnglesData = { + body: SchemaQwenImageEdit2511MultipleAnglesInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-2511-multiple-angles' +} + +export type PostFalAiQwenImageEdit2511MultipleAnglesResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEdit2511MultipleAnglesResponse = + PostFalAiQwenImageEdit2511MultipleAnglesResponses[keyof PostFalAiQwenImageEdit2511MultipleAnglesResponses] + +export type GetFalAiQwenImageEdit2511MultipleAnglesRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2511-multiple-angles/requests/{request_id}' +} + +export type GetFalAiQwenImageEdit2511MultipleAnglesRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaQwenImageEdit2511MultipleAnglesOutput + } + +export type GetFalAiQwenImageEdit2511MultipleAnglesRequestsByRequestIdResponse = + GetFalAiQwenImageEdit2511MultipleAnglesRequestsByRequestIdResponses[keyof GetFalAiQwenImageEdit2511MultipleAnglesRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEdit2511LoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-2511/lora/requests/{request_id}/status' +} + +export type GetFalAiQwenImageEdit2511LoraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiQwenImageEdit2511LoraRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEdit2511LoraRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEdit2511LoraRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEdit2511LoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2511/lora/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImageEdit2511LoraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiQwenImageEdit2511LoraRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEdit2511LoraRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEdit2511LoraRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEdit2511LoraData = { + body: SchemaQwenImageEdit2511LoraInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-2511/lora' +} + +export type PostFalAiQwenImageEdit2511LoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEdit2511LoraResponse = + PostFalAiQwenImageEdit2511LoraResponses[keyof PostFalAiQwenImageEdit2511LoraResponses] + +export type GetFalAiQwenImageEdit2511LoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2511/lora/requests/{request_id}' +} + +export type GetFalAiQwenImageEdit2511LoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImageEdit2511LoraOutput +} + +export type GetFalAiQwenImageEdit2511LoraRequestsByRequestIdResponse = + GetFalAiQwenImageEdit2511LoraRequestsByRequestIdResponses[keyof GetFalAiQwenImageEdit2511LoraRequestsByRequestIdResponses] + +export type GetHalfMoonAiAiHomeStyleRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/half-moon-ai/ai-home/style/requests/{request_id}/status' +} + +export type GetHalfMoonAiAiHomeStyleRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetHalfMoonAiAiHomeStyleRequestsByRequestIdStatusResponse = + GetHalfMoonAiAiHomeStyleRequestsByRequestIdStatusResponses[keyof GetHalfMoonAiAiHomeStyleRequestsByRequestIdStatusResponses] + +export type PutHalfMoonAiAiHomeStyleRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/half-moon-ai/ai-home/style/requests/{request_id}/cancel' +} + +export type PutHalfMoonAiAiHomeStyleRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutHalfMoonAiAiHomeStyleRequestsByRequestIdCancelResponse = + PutHalfMoonAiAiHomeStyleRequestsByRequestIdCancelResponses[keyof PutHalfMoonAiAiHomeStyleRequestsByRequestIdCancelResponses] + +export type PostHalfMoonAiAiHomeStyleData = { + body: SchemaAiHomeStyleInput + path?: never + query?: never + url: '/half-moon-ai/ai-home/style' +} + +export type PostHalfMoonAiAiHomeStyleResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostHalfMoonAiAiHomeStyleResponse = + PostHalfMoonAiAiHomeStyleResponses[keyof PostHalfMoonAiAiHomeStyleResponses] + +export type GetHalfMoonAiAiHomeStyleRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/half-moon-ai/ai-home/style/requests/{request_id}' +} + +export type GetHalfMoonAiAiHomeStyleRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAiHomeStyleOutput +} + +export type GetHalfMoonAiAiHomeStyleRequestsByRequestIdResponse = + GetHalfMoonAiAiHomeStyleRequestsByRequestIdResponses[keyof GetHalfMoonAiAiHomeStyleRequestsByRequestIdResponses] + +export type GetHalfMoonAiAiHomeEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/half-moon-ai/ai-home/edit/requests/{request_id}/status' +} + +export type GetHalfMoonAiAiHomeEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetHalfMoonAiAiHomeEditRequestsByRequestIdStatusResponse = + GetHalfMoonAiAiHomeEditRequestsByRequestIdStatusResponses[keyof GetHalfMoonAiAiHomeEditRequestsByRequestIdStatusResponses] + +export type PutHalfMoonAiAiHomeEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/half-moon-ai/ai-home/edit/requests/{request_id}/cancel' +} + +export type PutHalfMoonAiAiHomeEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutHalfMoonAiAiHomeEditRequestsByRequestIdCancelResponse = + PutHalfMoonAiAiHomeEditRequestsByRequestIdCancelResponses[keyof PutHalfMoonAiAiHomeEditRequestsByRequestIdCancelResponses] + +export type PostHalfMoonAiAiHomeEditData = { + body: SchemaAiHomeEditInput + path?: never + query?: never + url: '/half-moon-ai/ai-home/edit' +} + +export type PostHalfMoonAiAiHomeEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostHalfMoonAiAiHomeEditResponse = + PostHalfMoonAiAiHomeEditResponses[keyof PostHalfMoonAiAiHomeEditResponses] + +export type GetHalfMoonAiAiHomeEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/half-moon-ai/ai-home/edit/requests/{request_id}' +} + +export type GetHalfMoonAiAiHomeEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAiHomeEditOutput +} + +export type GetHalfMoonAiAiHomeEditRequestsByRequestIdResponse = + GetHalfMoonAiAiHomeEditRequestsByRequestIdResponses[keyof GetHalfMoonAiAiHomeEditRequestsByRequestIdResponses] + +export type GetFalAiQwenImageLayeredLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-layered/lora/requests/{request_id}/status' +} + +export type GetFalAiQwenImageLayeredLoraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiQwenImageLayeredLoraRequestsByRequestIdStatusResponse = + GetFalAiQwenImageLayeredLoraRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageLayeredLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageLayeredLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-layered/lora/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImageLayeredLoraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiQwenImageLayeredLoraRequestsByRequestIdCancelResponse = + PutFalAiQwenImageLayeredLoraRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageLayeredLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageLayeredLoraData = { + body: SchemaQwenImageLayeredLoraInput + path?: never + query?: never + url: '/fal-ai/qwen-image-layered/lora' +} + +export type PostFalAiQwenImageLayeredLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageLayeredLoraResponse = + PostFalAiQwenImageLayeredLoraResponses[keyof PostFalAiQwenImageLayeredLoraResponses] + +export type GetFalAiQwenImageLayeredLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-layered/lora/requests/{request_id}' +} + +export type GetFalAiQwenImageLayeredLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImageLayeredLoraOutput +} + +export type GetFalAiQwenImageLayeredLoraRequestsByRequestIdResponse = + GetFalAiQwenImageLayeredLoraRequestsByRequestIdResponses[keyof GetFalAiQwenImageLayeredLoraRequestsByRequestIdResponses] + +export type GetWanV26ImageToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/wan/v2.6/image-to-image/requests/{request_id}/status' +} + +export type GetWanV26ImageToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetWanV26ImageToImageRequestsByRequestIdStatusResponse = + GetWanV26ImageToImageRequestsByRequestIdStatusResponses[keyof GetWanV26ImageToImageRequestsByRequestIdStatusResponses] + +export type PutWanV26ImageToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/wan/v2.6/image-to-image/requests/{request_id}/cancel' +} + +export type PutWanV26ImageToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutWanV26ImageToImageRequestsByRequestIdCancelResponse = + PutWanV26ImageToImageRequestsByRequestIdCancelResponses[keyof PutWanV26ImageToImageRequestsByRequestIdCancelResponses] + +export type PostWanV26ImageToImageData = { + body: SchemaV26ImageToImageInput + path?: never + query?: never + url: '/wan/v2.6/image-to-image' +} + +export type PostWanV26ImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostWanV26ImageToImageResponse = + PostWanV26ImageToImageResponses[keyof PostWanV26ImageToImageResponses] + +export type GetWanV26ImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/wan/v2.6/image-to-image/requests/{request_id}' +} + +export type GetWanV26ImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaV26ImageToImageOutput +} + +export type GetWanV26ImageToImageRequestsByRequestIdResponse = + GetWanV26ImageToImageRequestsByRequestIdResponses[keyof GetWanV26ImageToImageRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEdit2511RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-2511/requests/{request_id}/status' +} + +export type GetFalAiQwenImageEdit2511RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiQwenImageEdit2511RequestsByRequestIdStatusResponse = + GetFalAiQwenImageEdit2511RequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEdit2511RequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEdit2511RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2511/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImageEdit2511RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiQwenImageEdit2511RequestsByRequestIdCancelResponse = + PutFalAiQwenImageEdit2511RequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEdit2511RequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEdit2511Data = { + body: SchemaQwenImageEdit2511Input + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-2511' +} + +export type PostFalAiQwenImageEdit2511Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEdit2511Response = + PostFalAiQwenImageEdit2511Responses[keyof PostFalAiQwenImageEdit2511Responses] + +export type GetFalAiQwenImageEdit2511RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2511/requests/{request_id}' +} + +export type GetFalAiQwenImageEdit2511RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImageEdit2511Output +} + +export type GetFalAiQwenImageEdit2511RequestsByRequestIdResponse = + GetFalAiQwenImageEdit2511RequestsByRequestIdResponses[keyof GetFalAiQwenImageEdit2511RequestsByRequestIdResponses] + +export type GetFalAiQwenImageLayeredRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-layered/requests/{request_id}/status' +} + +export type GetFalAiQwenImageLayeredRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiQwenImageLayeredRequestsByRequestIdStatusResponse = + GetFalAiQwenImageLayeredRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageLayeredRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageLayeredRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-layered/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImageLayeredRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiQwenImageLayeredRequestsByRequestIdCancelResponse = + PutFalAiQwenImageLayeredRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageLayeredRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageLayeredData = { + body: SchemaQwenImageLayeredInput + path?: never + query?: never + url: '/fal-ai/qwen-image-layered' +} + +export type PostFalAiQwenImageLayeredResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageLayeredResponse = + PostFalAiQwenImageLayeredResponses[keyof PostFalAiQwenImageLayeredResponses] + +export type GetFalAiQwenImageLayeredRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-layered/requests/{request_id}' +} + +export type GetFalAiQwenImageLayeredRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImageLayeredOutput +} + +export type GetFalAiQwenImageLayeredRequestsByRequestIdResponse = + GetFalAiQwenImageLayeredRequestsByRequestIdResponses[keyof GetFalAiQwenImageLayeredRequestsByRequestIdResponses] + +export type GetFalAiZImageTurboInpaintLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/z-image/turbo/inpaint/lora/requests/{request_id}/status' +} + +export type GetFalAiZImageTurboInpaintLoraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiZImageTurboInpaintLoraRequestsByRequestIdStatusResponse = + GetFalAiZImageTurboInpaintLoraRequestsByRequestIdStatusResponses[keyof GetFalAiZImageTurboInpaintLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiZImageTurboInpaintLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image/turbo/inpaint/lora/requests/{request_id}/cancel' +} + +export type PutFalAiZImageTurboInpaintLoraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiZImageTurboInpaintLoraRequestsByRequestIdCancelResponse = + PutFalAiZImageTurboInpaintLoraRequestsByRequestIdCancelResponses[keyof PutFalAiZImageTurboInpaintLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiZImageTurboInpaintLoraData = { + body: SchemaZImageTurboInpaintLoraInput + path?: never + query?: never + url: '/fal-ai/z-image/turbo/inpaint/lora' +} + +export type PostFalAiZImageTurboInpaintLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiZImageTurboInpaintLoraResponse = + PostFalAiZImageTurboInpaintLoraResponses[keyof PostFalAiZImageTurboInpaintLoraResponses] + +export type GetFalAiZImageTurboInpaintLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image/turbo/inpaint/lora/requests/{request_id}' +} + +export type GetFalAiZImageTurboInpaintLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaZImageTurboInpaintLoraOutput +} + +export type GetFalAiZImageTurboInpaintLoraRequestsByRequestIdResponse = + GetFalAiZImageTurboInpaintLoraRequestsByRequestIdResponses[keyof GetFalAiZImageTurboInpaintLoraRequestsByRequestIdResponses] + +export type GetFalAiZImageTurboInpaintRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/z-image/turbo/inpaint/requests/{request_id}/status' +} + +export type GetFalAiZImageTurboInpaintRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiZImageTurboInpaintRequestsByRequestIdStatusResponse = + GetFalAiZImageTurboInpaintRequestsByRequestIdStatusResponses[keyof GetFalAiZImageTurboInpaintRequestsByRequestIdStatusResponses] + +export type PutFalAiZImageTurboInpaintRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image/turbo/inpaint/requests/{request_id}/cancel' +} + +export type PutFalAiZImageTurboInpaintRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiZImageTurboInpaintRequestsByRequestIdCancelResponse = + PutFalAiZImageTurboInpaintRequestsByRequestIdCancelResponses[keyof PutFalAiZImageTurboInpaintRequestsByRequestIdCancelResponses] + +export type PostFalAiZImageTurboInpaintData = { + body: SchemaZImageTurboInpaintInput + path?: never + query?: never + url: '/fal-ai/z-image/turbo/inpaint' +} + +export type PostFalAiZImageTurboInpaintResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiZImageTurboInpaintResponse = + PostFalAiZImageTurboInpaintResponses[keyof PostFalAiZImageTurboInpaintResponses] + +export type GetFalAiZImageTurboInpaintRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image/turbo/inpaint/requests/{request_id}' +} + +export type GetFalAiZImageTurboInpaintRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaZImageTurboInpaintOutput +} + +export type GetFalAiZImageTurboInpaintRequestsByRequestIdResponse = + GetFalAiZImageTurboInpaintRequestsByRequestIdResponses[keyof GetFalAiZImageTurboInpaintRequestsByRequestIdResponses] + +export type GetFalAiFlux2FlashEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2/flash/edit/requests/{request_id}/status' +} + +export type GetFalAiFlux2FlashEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2FlashEditRequestsByRequestIdStatusResponse = + GetFalAiFlux2FlashEditRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2FlashEditRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2FlashEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/flash/edit/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2FlashEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2FlashEditRequestsByRequestIdCancelResponse = + PutFalAiFlux2FlashEditRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2FlashEditRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2FlashEditData = { + body: SchemaFlux2FlashEditInput + path?: never + query?: never + url: '/fal-ai/flux-2/flash/edit' +} + +export type PostFalAiFlux2FlashEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2FlashEditResponse = + PostFalAiFlux2FlashEditResponses[keyof PostFalAiFlux2FlashEditResponses] + +export type GetFalAiFlux2FlashEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/flash/edit/requests/{request_id}' +} + +export type GetFalAiFlux2FlashEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2FlashEditOutput +} + +export type GetFalAiFlux2FlashEditRequestsByRequestIdResponse = + GetFalAiFlux2FlashEditRequestsByRequestIdResponses[keyof GetFalAiFlux2FlashEditRequestsByRequestIdResponses] + +export type GetFalAiGptImage15EditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/gpt-image-1.5/edit/requests/{request_id}/status' +} + +export type GetFalAiGptImage15EditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiGptImage15EditRequestsByRequestIdStatusResponse = + GetFalAiGptImage15EditRequestsByRequestIdStatusResponses[keyof GetFalAiGptImage15EditRequestsByRequestIdStatusResponses] + +export type PutFalAiGptImage15EditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gpt-image-1.5/edit/requests/{request_id}/cancel' +} + +export type PutFalAiGptImage15EditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiGptImage15EditRequestsByRequestIdCancelResponse = + PutFalAiGptImage15EditRequestsByRequestIdCancelResponses[keyof PutFalAiGptImage15EditRequestsByRequestIdCancelResponses] + +export type PostFalAiGptImage15EditData = { + body: SchemaGptImage15EditInput + path?: never + query?: never + url: '/fal-ai/gpt-image-1.5/edit' +} + +export type PostFalAiGptImage15EditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiGptImage15EditResponse = + PostFalAiGptImage15EditResponses[keyof PostFalAiGptImage15EditResponses] + +export type GetFalAiGptImage15EditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gpt-image-1.5/edit/requests/{request_id}' +} + +export type GetFalAiGptImage15EditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaGptImage15EditOutput +} + +export type GetFalAiGptImage15EditRequestsByRequestIdResponse = + GetFalAiGptImage15EditRequestsByRequestIdResponses[keyof GetFalAiGptImage15EditRequestsByRequestIdResponses] + +export type GetFalAiFlux2TurboEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2/turbo/edit/requests/{request_id}/status' +} + +export type GetFalAiFlux2TurboEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2TurboEditRequestsByRequestIdStatusResponse = + GetFalAiFlux2TurboEditRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2TurboEditRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2TurboEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/turbo/edit/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2TurboEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2TurboEditRequestsByRequestIdCancelResponse = + PutFalAiFlux2TurboEditRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2TurboEditRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2TurboEditData = { + body: SchemaFlux2TurboEditInput + path?: never + query?: never + url: '/fal-ai/flux-2/turbo/edit' +} + +export type PostFalAiFlux2TurboEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2TurboEditResponse = + PostFalAiFlux2TurboEditResponses[keyof PostFalAiFlux2TurboEditResponses] + +export type GetFalAiFlux2TurboEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/turbo/edit/requests/{request_id}' +} + +export type GetFalAiFlux2TurboEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2TurboEditOutput +} + +export type GetFalAiFlux2TurboEditRequestsByRequestIdResponse = + GetFalAiFlux2TurboEditRequestsByRequestIdResponses[keyof GetFalAiFlux2TurboEditRequestsByRequestIdResponses] + +export type GetFalAiFlux2MaxEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-max/edit/requests/{request_id}/status' +} + +export type GetFalAiFlux2MaxEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2MaxEditRequestsByRequestIdStatusResponse = + GetFalAiFlux2MaxEditRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2MaxEditRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2MaxEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-max/edit/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2MaxEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2MaxEditRequestsByRequestIdCancelResponse = + PutFalAiFlux2MaxEditRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2MaxEditRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2MaxEditData = { + body: SchemaFlux2MaxEditInput + path?: never + query?: never + url: '/fal-ai/flux-2-max/edit' +} + +export type PostFalAiFlux2MaxEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2MaxEditResponse = + PostFalAiFlux2MaxEditResponses[keyof PostFalAiFlux2MaxEditResponses] + +export type GetFalAiFlux2MaxEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-max/edit/requests/{request_id}' +} + +export type GetFalAiFlux2MaxEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2MaxEditOutput +} + +export type GetFalAiFlux2MaxEditRequestsByRequestIdResponse = + GetFalAiFlux2MaxEditRequestsByRequestIdResponses[keyof GetFalAiFlux2MaxEditRequestsByRequestIdResponses] + +export type GetHalfMoonAiAiBabyAndAgingGeneratorMultiRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/half-moon-ai/ai-baby-and-aging-generator/multi/requests/{request_id}/status' + } + +export type GetHalfMoonAiAiBabyAndAgingGeneratorMultiRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetHalfMoonAiAiBabyAndAgingGeneratorMultiRequestsByRequestIdStatusResponse = + GetHalfMoonAiAiBabyAndAgingGeneratorMultiRequestsByRequestIdStatusResponses[keyof GetHalfMoonAiAiBabyAndAgingGeneratorMultiRequestsByRequestIdStatusResponses] + +export type PutHalfMoonAiAiBabyAndAgingGeneratorMultiRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/half-moon-ai/ai-baby-and-aging-generator/multi/requests/{request_id}/cancel' + } + +export type PutHalfMoonAiAiBabyAndAgingGeneratorMultiRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutHalfMoonAiAiBabyAndAgingGeneratorMultiRequestsByRequestIdCancelResponse = + PutHalfMoonAiAiBabyAndAgingGeneratorMultiRequestsByRequestIdCancelResponses[keyof PutHalfMoonAiAiBabyAndAgingGeneratorMultiRequestsByRequestIdCancelResponses] + +export type PostHalfMoonAiAiBabyAndAgingGeneratorMultiData = { + body: SchemaAiBabyAndAgingGeneratorMultiInput + path?: never + query?: never + url: '/half-moon-ai/ai-baby-and-aging-generator/multi' +} + +export type PostHalfMoonAiAiBabyAndAgingGeneratorMultiResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostHalfMoonAiAiBabyAndAgingGeneratorMultiResponse = + PostHalfMoonAiAiBabyAndAgingGeneratorMultiResponses[keyof PostHalfMoonAiAiBabyAndAgingGeneratorMultiResponses] + +export type GetHalfMoonAiAiBabyAndAgingGeneratorMultiRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/half-moon-ai/ai-baby-and-aging-generator/multi/requests/{request_id}' +} + +export type GetHalfMoonAiAiBabyAndAgingGeneratorMultiRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaAiBabyAndAgingGeneratorMultiOutput + } + +export type GetHalfMoonAiAiBabyAndAgingGeneratorMultiRequestsByRequestIdResponse = + GetHalfMoonAiAiBabyAndAgingGeneratorMultiRequestsByRequestIdResponses[keyof GetHalfMoonAiAiBabyAndAgingGeneratorMultiRequestsByRequestIdResponses] + +export type GetHalfMoonAiAiBabyAndAgingGeneratorSingleRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/half-moon-ai/ai-baby-and-aging-generator/single/requests/{request_id}/status' + } + +export type GetHalfMoonAiAiBabyAndAgingGeneratorSingleRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetHalfMoonAiAiBabyAndAgingGeneratorSingleRequestsByRequestIdStatusResponse = + GetHalfMoonAiAiBabyAndAgingGeneratorSingleRequestsByRequestIdStatusResponses[keyof GetHalfMoonAiAiBabyAndAgingGeneratorSingleRequestsByRequestIdStatusResponses] + +export type PutHalfMoonAiAiBabyAndAgingGeneratorSingleRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/half-moon-ai/ai-baby-and-aging-generator/single/requests/{request_id}/cancel' + } + +export type PutHalfMoonAiAiBabyAndAgingGeneratorSingleRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutHalfMoonAiAiBabyAndAgingGeneratorSingleRequestsByRequestIdCancelResponse = + PutHalfMoonAiAiBabyAndAgingGeneratorSingleRequestsByRequestIdCancelResponses[keyof PutHalfMoonAiAiBabyAndAgingGeneratorSingleRequestsByRequestIdCancelResponses] + +export type PostHalfMoonAiAiBabyAndAgingGeneratorSingleData = { + body: SchemaAiBabyAndAgingGeneratorSingleInput + path?: never + query?: never + url: '/half-moon-ai/ai-baby-and-aging-generator/single' +} + +export type PostHalfMoonAiAiBabyAndAgingGeneratorSingleResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostHalfMoonAiAiBabyAndAgingGeneratorSingleResponse = + PostHalfMoonAiAiBabyAndAgingGeneratorSingleResponses[keyof PostHalfMoonAiAiBabyAndAgingGeneratorSingleResponses] + +export type GetHalfMoonAiAiBabyAndAgingGeneratorSingleRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/half-moon-ai/ai-baby-and-aging-generator/single/requests/{request_id}' + } + +export type GetHalfMoonAiAiBabyAndAgingGeneratorSingleRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaAiBabyAndAgingGeneratorSingleOutput + } + +export type GetHalfMoonAiAiBabyAndAgingGeneratorSingleRequestsByRequestIdResponse = + GetHalfMoonAiAiBabyAndAgingGeneratorSingleRequestsByRequestIdResponses[keyof GetHalfMoonAiAiBabyAndAgingGeneratorSingleRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEdit2509LoraGalleryShirtDesignRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/shirt-design/requests/{request_id}/status' + } + +export type GetFalAiQwenImageEdit2509LoraGalleryShirtDesignRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEdit2509LoraGalleryShirtDesignRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEdit2509LoraGalleryShirtDesignRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEdit2509LoraGalleryShirtDesignRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEdit2509LoraGalleryShirtDesignRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/shirt-design/requests/{request_id}/cancel' + } + +export type PutFalAiQwenImageEdit2509LoraGalleryShirtDesignRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEdit2509LoraGalleryShirtDesignRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEdit2509LoraGalleryShirtDesignRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEdit2509LoraGalleryShirtDesignRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEdit2509LoraGalleryShirtDesignData = { + body: SchemaQwenImageEdit2509LoraGalleryShirtDesignInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/shirt-design' +} + +export type PostFalAiQwenImageEdit2509LoraGalleryShirtDesignResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEdit2509LoraGalleryShirtDesignResponse = + PostFalAiQwenImageEdit2509LoraGalleryShirtDesignResponses[keyof PostFalAiQwenImageEdit2509LoraGalleryShirtDesignResponses] + +export type GetFalAiQwenImageEdit2509LoraGalleryShirtDesignRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/shirt-design/requests/{request_id}' + } + +export type GetFalAiQwenImageEdit2509LoraGalleryShirtDesignRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaQwenImageEdit2509LoraGalleryShirtDesignOutput + } + +export type GetFalAiQwenImageEdit2509LoraGalleryShirtDesignRequestsByRequestIdResponse = + GetFalAiQwenImageEdit2509LoraGalleryShirtDesignRequestsByRequestIdResponses[keyof GetFalAiQwenImageEdit2509LoraGalleryShirtDesignRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEdit2509LoraGalleryRemoveLightingRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/remove-lighting/requests/{request_id}/status' + } + +export type GetFalAiQwenImageEdit2509LoraGalleryRemoveLightingRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEdit2509LoraGalleryRemoveLightingRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEdit2509LoraGalleryRemoveLightingRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEdit2509LoraGalleryRemoveLightingRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEdit2509LoraGalleryRemoveLightingRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/remove-lighting/requests/{request_id}/cancel' + } + +export type PutFalAiQwenImageEdit2509LoraGalleryRemoveLightingRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEdit2509LoraGalleryRemoveLightingRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEdit2509LoraGalleryRemoveLightingRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEdit2509LoraGalleryRemoveLightingRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEdit2509LoraGalleryRemoveLightingData = { + body: SchemaQwenImageEdit2509LoraGalleryRemoveLightingInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/remove-lighting' +} + +export type PostFalAiQwenImageEdit2509LoraGalleryRemoveLightingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEdit2509LoraGalleryRemoveLightingResponse = + PostFalAiQwenImageEdit2509LoraGalleryRemoveLightingResponses[keyof PostFalAiQwenImageEdit2509LoraGalleryRemoveLightingResponses] + +export type GetFalAiQwenImageEdit2509LoraGalleryRemoveLightingRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/remove-lighting/requests/{request_id}' + } + +export type GetFalAiQwenImageEdit2509LoraGalleryRemoveLightingRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaQwenImageEdit2509LoraGalleryRemoveLightingOutput + } + +export type GetFalAiQwenImageEdit2509LoraGalleryRemoveLightingRequestsByRequestIdResponse = + GetFalAiQwenImageEdit2509LoraGalleryRemoveLightingRequestsByRequestIdResponses[keyof GetFalAiQwenImageEdit2509LoraGalleryRemoveLightingRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEdit2509LoraGalleryRemoveElementRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/remove-element/requests/{request_id}/status' + } + +export type GetFalAiQwenImageEdit2509LoraGalleryRemoveElementRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEdit2509LoraGalleryRemoveElementRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEdit2509LoraGalleryRemoveElementRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEdit2509LoraGalleryRemoveElementRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEdit2509LoraGalleryRemoveElementRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/remove-element/requests/{request_id}/cancel' + } + +export type PutFalAiQwenImageEdit2509LoraGalleryRemoveElementRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEdit2509LoraGalleryRemoveElementRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEdit2509LoraGalleryRemoveElementRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEdit2509LoraGalleryRemoveElementRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEdit2509LoraGalleryRemoveElementData = { + body: SchemaQwenImageEdit2509LoraGalleryRemoveElementInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/remove-element' +} + +export type PostFalAiQwenImageEdit2509LoraGalleryRemoveElementResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEdit2509LoraGalleryRemoveElementResponse = + PostFalAiQwenImageEdit2509LoraGalleryRemoveElementResponses[keyof PostFalAiQwenImageEdit2509LoraGalleryRemoveElementResponses] + +export type GetFalAiQwenImageEdit2509LoraGalleryRemoveElementRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/remove-element/requests/{request_id}' + } + +export type GetFalAiQwenImageEdit2509LoraGalleryRemoveElementRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaQwenImageEdit2509LoraGalleryRemoveElementOutput + } + +export type GetFalAiQwenImageEdit2509LoraGalleryRemoveElementRequestsByRequestIdResponse = + GetFalAiQwenImageEdit2509LoraGalleryRemoveElementRequestsByRequestIdResponses[keyof GetFalAiQwenImageEdit2509LoraGalleryRemoveElementRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEdit2509LoraGalleryLightingRestorationRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/lighting-restoration/requests/{request_id}/status' + } + +export type GetFalAiQwenImageEdit2509LoraGalleryLightingRestorationRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEdit2509LoraGalleryLightingRestorationRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEdit2509LoraGalleryLightingRestorationRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEdit2509LoraGalleryLightingRestorationRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEdit2509LoraGalleryLightingRestorationRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/lighting-restoration/requests/{request_id}/cancel' + } + +export type PutFalAiQwenImageEdit2509LoraGalleryLightingRestorationRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEdit2509LoraGalleryLightingRestorationRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEdit2509LoraGalleryLightingRestorationRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEdit2509LoraGalleryLightingRestorationRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEdit2509LoraGalleryLightingRestorationData = { + body: SchemaQwenImageEdit2509LoraGalleryLightingRestorationInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/lighting-restoration' +} + +export type PostFalAiQwenImageEdit2509LoraGalleryLightingRestorationResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type PostFalAiQwenImageEdit2509LoraGalleryLightingRestorationResponse = + PostFalAiQwenImageEdit2509LoraGalleryLightingRestorationResponses[keyof PostFalAiQwenImageEdit2509LoraGalleryLightingRestorationResponses] + +export type GetFalAiQwenImageEdit2509LoraGalleryLightingRestorationRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/lighting-restoration/requests/{request_id}' + } + +export type GetFalAiQwenImageEdit2509LoraGalleryLightingRestorationRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaQwenImageEdit2509LoraGalleryLightingRestorationOutput + } + +export type GetFalAiQwenImageEdit2509LoraGalleryLightingRestorationRequestsByRequestIdResponse = + GetFalAiQwenImageEdit2509LoraGalleryLightingRestorationRequestsByRequestIdResponses[keyof GetFalAiQwenImageEdit2509LoraGalleryLightingRestorationRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEdit2509LoraGalleryIntegrateProductRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/integrate-product/requests/{request_id}/status' + } + +export type GetFalAiQwenImageEdit2509LoraGalleryIntegrateProductRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEdit2509LoraGalleryIntegrateProductRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEdit2509LoraGalleryIntegrateProductRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEdit2509LoraGalleryIntegrateProductRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEdit2509LoraGalleryIntegrateProductRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/integrate-product/requests/{request_id}/cancel' + } + +export type PutFalAiQwenImageEdit2509LoraGalleryIntegrateProductRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEdit2509LoraGalleryIntegrateProductRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEdit2509LoraGalleryIntegrateProductRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEdit2509LoraGalleryIntegrateProductRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEdit2509LoraGalleryIntegrateProductData = { + body: SchemaQwenImageEdit2509LoraGalleryIntegrateProductInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/integrate-product' +} + +export type PostFalAiQwenImageEdit2509LoraGalleryIntegrateProductResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEdit2509LoraGalleryIntegrateProductResponse = + PostFalAiQwenImageEdit2509LoraGalleryIntegrateProductResponses[keyof PostFalAiQwenImageEdit2509LoraGalleryIntegrateProductResponses] + +export type GetFalAiQwenImageEdit2509LoraGalleryIntegrateProductRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/integrate-product/requests/{request_id}' + } + +export type GetFalAiQwenImageEdit2509LoraGalleryIntegrateProductRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaQwenImageEdit2509LoraGalleryIntegrateProductOutput + } + +export type GetFalAiQwenImageEdit2509LoraGalleryIntegrateProductRequestsByRequestIdResponse = + GetFalAiQwenImageEdit2509LoraGalleryIntegrateProductRequestsByRequestIdResponses[keyof GetFalAiQwenImageEdit2509LoraGalleryIntegrateProductRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEdit2509LoraGalleryGroupPhotoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/group-photo/requests/{request_id}/status' + } + +export type GetFalAiQwenImageEdit2509LoraGalleryGroupPhotoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEdit2509LoraGalleryGroupPhotoRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEdit2509LoraGalleryGroupPhotoRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEdit2509LoraGalleryGroupPhotoRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEdit2509LoraGalleryGroupPhotoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/group-photo/requests/{request_id}/cancel' + } + +export type PutFalAiQwenImageEdit2509LoraGalleryGroupPhotoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEdit2509LoraGalleryGroupPhotoRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEdit2509LoraGalleryGroupPhotoRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEdit2509LoraGalleryGroupPhotoRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEdit2509LoraGalleryGroupPhotoData = { + body: SchemaQwenImageEdit2509LoraGalleryGroupPhotoInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/group-photo' +} + +export type PostFalAiQwenImageEdit2509LoraGalleryGroupPhotoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEdit2509LoraGalleryGroupPhotoResponse = + PostFalAiQwenImageEdit2509LoraGalleryGroupPhotoResponses[keyof PostFalAiQwenImageEdit2509LoraGalleryGroupPhotoResponses] + +export type GetFalAiQwenImageEdit2509LoraGalleryGroupPhotoRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/group-photo/requests/{request_id}' + } + +export type GetFalAiQwenImageEdit2509LoraGalleryGroupPhotoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaQwenImageEdit2509LoraGalleryGroupPhotoOutput + } + +export type GetFalAiQwenImageEdit2509LoraGalleryGroupPhotoRequestsByRequestIdResponse = + GetFalAiQwenImageEdit2509LoraGalleryGroupPhotoRequestsByRequestIdResponses[keyof GetFalAiQwenImageEdit2509LoraGalleryGroupPhotoRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/face-to-full-portrait/requests/{request_id}/status' + } + +export type GetFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/face-to-full-portrait/requests/{request_id}/cancel' + } + +export type PutFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitData = { + body: SchemaQwenImageEdit2509LoraGalleryFaceToFullPortraitInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/face-to-full-portrait' +} + +export type PostFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitResponse = + PostFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitResponses[keyof PostFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitResponses] + +export type GetFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/face-to-full-portrait/requests/{request_id}' + } + +export type GetFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaQwenImageEdit2509LoraGalleryFaceToFullPortraitOutput + } + +export type GetFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitRequestsByRequestIdResponse = + GetFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitRequestsByRequestIdResponses[keyof GetFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEdit2509LoraGalleryAddBackgroundRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/add-background/requests/{request_id}/status' + } + +export type GetFalAiQwenImageEdit2509LoraGalleryAddBackgroundRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEdit2509LoraGalleryAddBackgroundRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEdit2509LoraGalleryAddBackgroundRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEdit2509LoraGalleryAddBackgroundRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEdit2509LoraGalleryAddBackgroundRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/add-background/requests/{request_id}/cancel' + } + +export type PutFalAiQwenImageEdit2509LoraGalleryAddBackgroundRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEdit2509LoraGalleryAddBackgroundRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEdit2509LoraGalleryAddBackgroundRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEdit2509LoraGalleryAddBackgroundRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEdit2509LoraGalleryAddBackgroundData = { + body: SchemaQwenImageEdit2509LoraGalleryAddBackgroundInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/add-background' +} + +export type PostFalAiQwenImageEdit2509LoraGalleryAddBackgroundResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEdit2509LoraGalleryAddBackgroundResponse = + PostFalAiQwenImageEdit2509LoraGalleryAddBackgroundResponses[keyof PostFalAiQwenImageEdit2509LoraGalleryAddBackgroundResponses] + +export type GetFalAiQwenImageEdit2509LoraGalleryAddBackgroundRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/add-background/requests/{request_id}' + } + +export type GetFalAiQwenImageEdit2509LoraGalleryAddBackgroundRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaQwenImageEdit2509LoraGalleryAddBackgroundOutput + } + +export type GetFalAiQwenImageEdit2509LoraGalleryAddBackgroundRequestsByRequestIdResponse = + GetFalAiQwenImageEdit2509LoraGalleryAddBackgroundRequestsByRequestIdResponses[keyof GetFalAiQwenImageEdit2509LoraGalleryAddBackgroundRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEdit2509LoraGalleryNextSceneRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/next-scene/requests/{request_id}/status' + } + +export type GetFalAiQwenImageEdit2509LoraGalleryNextSceneRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEdit2509LoraGalleryNextSceneRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEdit2509LoraGalleryNextSceneRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEdit2509LoraGalleryNextSceneRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEdit2509LoraGalleryNextSceneRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/next-scene/requests/{request_id}/cancel' + } + +export type PutFalAiQwenImageEdit2509LoraGalleryNextSceneRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEdit2509LoraGalleryNextSceneRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEdit2509LoraGalleryNextSceneRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEdit2509LoraGalleryNextSceneRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEdit2509LoraGalleryNextSceneData = { + body: SchemaQwenImageEdit2509LoraGalleryNextSceneInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/next-scene' +} + +export type PostFalAiQwenImageEdit2509LoraGalleryNextSceneResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEdit2509LoraGalleryNextSceneResponse = + PostFalAiQwenImageEdit2509LoraGalleryNextSceneResponses[keyof PostFalAiQwenImageEdit2509LoraGalleryNextSceneResponses] + +export type GetFalAiQwenImageEdit2509LoraGalleryNextSceneRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/next-scene/requests/{request_id}' + } + +export type GetFalAiQwenImageEdit2509LoraGalleryNextSceneRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaQwenImageEdit2509LoraGalleryNextSceneOutput + } + +export type GetFalAiQwenImageEdit2509LoraGalleryNextSceneRequestsByRequestIdResponse = + GetFalAiQwenImageEdit2509LoraGalleryNextSceneRequestsByRequestIdResponses[keyof GetFalAiQwenImageEdit2509LoraGalleryNextSceneRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEdit2509LoraGalleryMultipleAnglesRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/multiple-angles/requests/{request_id}/status' + } + +export type GetFalAiQwenImageEdit2509LoraGalleryMultipleAnglesRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEdit2509LoraGalleryMultipleAnglesRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEdit2509LoraGalleryMultipleAnglesRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEdit2509LoraGalleryMultipleAnglesRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEdit2509LoraGalleryMultipleAnglesRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/multiple-angles/requests/{request_id}/cancel' + } + +export type PutFalAiQwenImageEdit2509LoraGalleryMultipleAnglesRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEdit2509LoraGalleryMultipleAnglesRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEdit2509LoraGalleryMultipleAnglesRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEdit2509LoraGalleryMultipleAnglesRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEdit2509LoraGalleryMultipleAnglesData = { + body: SchemaQwenImageEdit2509LoraGalleryMultipleAnglesInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/multiple-angles' +} + +export type PostFalAiQwenImageEdit2509LoraGalleryMultipleAnglesResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEdit2509LoraGalleryMultipleAnglesResponse = + PostFalAiQwenImageEdit2509LoraGalleryMultipleAnglesResponses[keyof PostFalAiQwenImageEdit2509LoraGalleryMultipleAnglesResponses] + +export type GetFalAiQwenImageEdit2509LoraGalleryMultipleAnglesRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora-gallery/multiple-angles/requests/{request_id}' + } + +export type GetFalAiQwenImageEdit2509LoraGalleryMultipleAnglesRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaQwenImageEdit2509LoraGalleryMultipleAnglesOutput + } + +export type GetFalAiQwenImageEdit2509LoraGalleryMultipleAnglesRequestsByRequestIdResponse = + GetFalAiQwenImageEdit2509LoraGalleryMultipleAnglesRequestsByRequestIdResponses[keyof GetFalAiQwenImageEdit2509LoraGalleryMultipleAnglesRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEdit2509LoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-2509-lora/requests/{request_id}/status' +} + +export type GetFalAiQwenImageEdit2509LoraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiQwenImageEdit2509LoraRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEdit2509LoraRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEdit2509LoraRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEdit2509LoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImageEdit2509LoraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiQwenImageEdit2509LoraRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEdit2509LoraRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEdit2509LoraRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEdit2509LoraData = { + body: SchemaQwenImageEdit2509LoraInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora' +} + +export type PostFalAiQwenImageEdit2509LoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEdit2509LoraResponse = + PostFalAiQwenImageEdit2509LoraResponses[keyof PostFalAiQwenImageEdit2509LoraResponses] + +export type GetFalAiQwenImageEdit2509LoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-lora/requests/{request_id}' +} + +export type GetFalAiQwenImageEdit2509LoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImageEdit2509LoraOutput +} + +export type GetFalAiQwenImageEdit2509LoraRequestsByRequestIdResponse = + GetFalAiQwenImageEdit2509LoraRequestsByRequestIdResponses[keyof GetFalAiQwenImageEdit2509LoraRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEdit2509RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-2509/requests/{request_id}/status' +} + +export type GetFalAiQwenImageEdit2509RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiQwenImageEdit2509RequestsByRequestIdStatusResponse = + GetFalAiQwenImageEdit2509RequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEdit2509RequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEdit2509RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImageEdit2509RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiQwenImageEdit2509RequestsByRequestIdCancelResponse = + PutFalAiQwenImageEdit2509RequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEdit2509RequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEdit2509Data = { + body: SchemaQwenImageEdit2509Input + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-2509' +} + +export type PostFalAiQwenImageEdit2509Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEdit2509Response = + PostFalAiQwenImageEdit2509Responses[keyof PostFalAiQwenImageEdit2509Responses] + +export type GetFalAiQwenImageEdit2509RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509/requests/{request_id}' +} + +export type GetFalAiQwenImageEdit2509RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImageEdit2509Output +} + +export type GetFalAiQwenImageEdit2509RequestsByRequestIdResponse = + GetFalAiQwenImageEdit2509RequestsByRequestIdResponses[keyof GetFalAiQwenImageEdit2509RequestsByRequestIdResponses] + +export type GetFalAiQwenImageEditPlusLoraGalleryLightingRestorationRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/lighting-restoration/requests/{request_id}/status' + } + +export type GetFalAiQwenImageEditPlusLoraGalleryLightingRestorationRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEditPlusLoraGalleryLightingRestorationRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEditPlusLoraGalleryLightingRestorationRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEditPlusLoraGalleryLightingRestorationRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEditPlusLoraGalleryLightingRestorationRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/lighting-restoration/requests/{request_id}/cancel' + } + +export type PutFalAiQwenImageEditPlusLoraGalleryLightingRestorationRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEditPlusLoraGalleryLightingRestorationRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEditPlusLoraGalleryLightingRestorationRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEditPlusLoraGalleryLightingRestorationRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEditPlusLoraGalleryLightingRestorationData = { + body: SchemaQwenImageEditPlusLoraGalleryLightingRestorationInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/lighting-restoration' +} + +export type PostFalAiQwenImageEditPlusLoraGalleryLightingRestorationResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type PostFalAiQwenImageEditPlusLoraGalleryLightingRestorationResponse = + PostFalAiQwenImageEditPlusLoraGalleryLightingRestorationResponses[keyof PostFalAiQwenImageEditPlusLoraGalleryLightingRestorationResponses] + +export type GetFalAiQwenImageEditPlusLoraGalleryLightingRestorationRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/lighting-restoration/requests/{request_id}' + } + +export type GetFalAiQwenImageEditPlusLoraGalleryLightingRestorationRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaQwenImageEditPlusLoraGalleryLightingRestorationOutput + } + +export type GetFalAiQwenImageEditPlusLoraGalleryLightingRestorationRequestsByRequestIdResponse = + GetFalAiQwenImageEditPlusLoraGalleryLightingRestorationRequestsByRequestIdResponses[keyof GetFalAiQwenImageEditPlusLoraGalleryLightingRestorationRequestsByRequestIdResponses] + +export type GetFalAiMoondream3PreviewSegmentRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/moondream3-preview/segment/requests/{request_id}/status' +} + +export type GetFalAiMoondream3PreviewSegmentRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMoondream3PreviewSegmentRequestsByRequestIdStatusResponse = + GetFalAiMoondream3PreviewSegmentRequestsByRequestIdStatusResponses[keyof GetFalAiMoondream3PreviewSegmentRequestsByRequestIdStatusResponses] + +export type PutFalAiMoondream3PreviewSegmentRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream3-preview/segment/requests/{request_id}/cancel' +} + +export type PutFalAiMoondream3PreviewSegmentRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMoondream3PreviewSegmentRequestsByRequestIdCancelResponse = + PutFalAiMoondream3PreviewSegmentRequestsByRequestIdCancelResponses[keyof PutFalAiMoondream3PreviewSegmentRequestsByRequestIdCancelResponses] + +export type PostFalAiMoondream3PreviewSegmentData = { + body: SchemaMoondream3PreviewSegmentInput + path?: never + query?: never + url: '/fal-ai/moondream3-preview/segment' +} + +export type PostFalAiMoondream3PreviewSegmentResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMoondream3PreviewSegmentResponse = + PostFalAiMoondream3PreviewSegmentResponses[keyof PostFalAiMoondream3PreviewSegmentResponses] + +export type GetFalAiMoondream3PreviewSegmentRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream3-preview/segment/requests/{request_id}' +} + +export type GetFalAiMoondream3PreviewSegmentRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMoondream3PreviewSegmentOutput +} + +export type GetFalAiMoondream3PreviewSegmentRequestsByRequestIdResponse = + GetFalAiMoondream3PreviewSegmentRequestsByRequestIdResponses[keyof GetFalAiMoondream3PreviewSegmentRequestsByRequestIdResponses] + +export type GetFalAiStepxEdit2RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/stepx-edit2/requests/{request_id}/status' +} + +export type GetFalAiStepxEdit2RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiStepxEdit2RequestsByRequestIdStatusResponse = + GetFalAiStepxEdit2RequestsByRequestIdStatusResponses[keyof GetFalAiStepxEdit2RequestsByRequestIdStatusResponses] + +export type PutFalAiStepxEdit2RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stepx-edit2/requests/{request_id}/cancel' +} + +export type PutFalAiStepxEdit2RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiStepxEdit2RequestsByRequestIdCancelResponse = + PutFalAiStepxEdit2RequestsByRequestIdCancelResponses[keyof PutFalAiStepxEdit2RequestsByRequestIdCancelResponses] + +export type PostFalAiStepxEdit2Data = { + body: SchemaStepxEdit2Input + path?: never + query?: never + url: '/fal-ai/stepx-edit2' +} + +export type PostFalAiStepxEdit2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiStepxEdit2Response = + PostFalAiStepxEdit2Responses[keyof PostFalAiStepxEdit2Responses] + +export type GetFalAiStepxEdit2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stepx-edit2/requests/{request_id}' +} + +export type GetFalAiStepxEdit2RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaStepxEdit2Output +} + +export type GetFalAiStepxEdit2RequestsByRequestIdResponse = + GetFalAiStepxEdit2RequestsByRequestIdResponses[keyof GetFalAiStepxEdit2RequestsByRequestIdResponses] + +export type GetFalAiZImageTurboControlnetLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/z-image/turbo/controlnet/lora/requests/{request_id}/status' +} + +export type GetFalAiZImageTurboControlnetLoraRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiZImageTurboControlnetLoraRequestsByRequestIdStatusResponse = + GetFalAiZImageTurboControlnetLoraRequestsByRequestIdStatusResponses[keyof GetFalAiZImageTurboControlnetLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiZImageTurboControlnetLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image/turbo/controlnet/lora/requests/{request_id}/cancel' +} + +export type PutFalAiZImageTurboControlnetLoraRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiZImageTurboControlnetLoraRequestsByRequestIdCancelResponse = + PutFalAiZImageTurboControlnetLoraRequestsByRequestIdCancelResponses[keyof PutFalAiZImageTurboControlnetLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiZImageTurboControlnetLoraData = { + body: SchemaZImageTurboControlnetLoraInput + path?: never + query?: never + url: '/fal-ai/z-image/turbo/controlnet/lora' +} + +export type PostFalAiZImageTurboControlnetLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiZImageTurboControlnetLoraResponse = + PostFalAiZImageTurboControlnetLoraResponses[keyof PostFalAiZImageTurboControlnetLoraResponses] + +export type GetFalAiZImageTurboControlnetLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image/turbo/controlnet/lora/requests/{request_id}' +} + +export type GetFalAiZImageTurboControlnetLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaZImageTurboControlnetLoraOutput +} + +export type GetFalAiZImageTurboControlnetLoraRequestsByRequestIdResponse = + GetFalAiZImageTurboControlnetLoraRequestsByRequestIdResponses[keyof GetFalAiZImageTurboControlnetLoraRequestsByRequestIdResponses] + +export type GetFalAiZImageTurboControlnetRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/z-image/turbo/controlnet/requests/{request_id}/status' +} + +export type GetFalAiZImageTurboControlnetRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiZImageTurboControlnetRequestsByRequestIdStatusResponse = + GetFalAiZImageTurboControlnetRequestsByRequestIdStatusResponses[keyof GetFalAiZImageTurboControlnetRequestsByRequestIdStatusResponses] + +export type PutFalAiZImageTurboControlnetRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image/turbo/controlnet/requests/{request_id}/cancel' +} + +export type PutFalAiZImageTurboControlnetRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiZImageTurboControlnetRequestsByRequestIdCancelResponse = + PutFalAiZImageTurboControlnetRequestsByRequestIdCancelResponses[keyof PutFalAiZImageTurboControlnetRequestsByRequestIdCancelResponses] + +export type PostFalAiZImageTurboControlnetData = { + body: SchemaZImageTurboControlnetInput + path?: never + query?: never + url: '/fal-ai/z-image/turbo/controlnet' +} + +export type PostFalAiZImageTurboControlnetResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiZImageTurboControlnetResponse = + PostFalAiZImageTurboControlnetResponses[keyof PostFalAiZImageTurboControlnetResponses] + +export type GetFalAiZImageTurboControlnetRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image/turbo/controlnet/requests/{request_id}' +} + +export type GetFalAiZImageTurboControlnetRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaZImageTurboControlnetOutput +} + +export type GetFalAiZImageTurboControlnetRequestsByRequestIdResponse = + GetFalAiZImageTurboControlnetRequestsByRequestIdResponses[keyof GetFalAiZImageTurboControlnetRequestsByRequestIdResponses] + +export type GetFalAiZImageTurboImageToImageLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/z-image/turbo/image-to-image/lora/requests/{request_id}/status' +} + +export type GetFalAiZImageTurboImageToImageLoraRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiZImageTurboImageToImageLoraRequestsByRequestIdStatusResponse = + GetFalAiZImageTurboImageToImageLoraRequestsByRequestIdStatusResponses[keyof GetFalAiZImageTurboImageToImageLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiZImageTurboImageToImageLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image/turbo/image-to-image/lora/requests/{request_id}/cancel' +} + +export type PutFalAiZImageTurboImageToImageLoraRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiZImageTurboImageToImageLoraRequestsByRequestIdCancelResponse = + PutFalAiZImageTurboImageToImageLoraRequestsByRequestIdCancelResponses[keyof PutFalAiZImageTurboImageToImageLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiZImageTurboImageToImageLoraData = { + body: SchemaZImageTurboImageToImageLoraInput + path?: never + query?: never + url: '/fal-ai/z-image/turbo/image-to-image/lora' +} + +export type PostFalAiZImageTurboImageToImageLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiZImageTurboImageToImageLoraResponse = + PostFalAiZImageTurboImageToImageLoraResponses[keyof PostFalAiZImageTurboImageToImageLoraResponses] + +export type GetFalAiZImageTurboImageToImageLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image/turbo/image-to-image/lora/requests/{request_id}' +} + +export type GetFalAiZImageTurboImageToImageLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaZImageTurboImageToImageLoraOutput +} + +export type GetFalAiZImageTurboImageToImageLoraRequestsByRequestIdResponse = + GetFalAiZImageTurboImageToImageLoraRequestsByRequestIdResponses[keyof GetFalAiZImageTurboImageToImageLoraRequestsByRequestIdResponses] + +export type GetFalAiZImageTurboImageToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/z-image/turbo/image-to-image/requests/{request_id}/status' +} + +export type GetFalAiZImageTurboImageToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiZImageTurboImageToImageRequestsByRequestIdStatusResponse = + GetFalAiZImageTurboImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiZImageTurboImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiZImageTurboImageToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image/turbo/image-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiZImageTurboImageToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiZImageTurboImageToImageRequestsByRequestIdCancelResponse = + PutFalAiZImageTurboImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiZImageTurboImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiZImageTurboImageToImageData = { + body: SchemaZImageTurboImageToImageInput + path?: never + query?: never + url: '/fal-ai/z-image/turbo/image-to-image' +} + +export type PostFalAiZImageTurboImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiZImageTurboImageToImageResponse = + PostFalAiZImageTurboImageToImageResponses[keyof PostFalAiZImageTurboImageToImageResponses] + +export type GetFalAiZImageTurboImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image/turbo/image-to-image/requests/{request_id}' +} + +export type GetFalAiZImageTurboImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaZImageTurboImageToImageOutput +} + +export type GetFalAiZImageTurboImageToImageRequestsByRequestIdResponse = + GetFalAiZImageTurboImageToImageRequestsByRequestIdResponses[keyof GetFalAiZImageTurboImageToImageRequestsByRequestIdResponses] + +export type GetFalAiLongcatImageEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/longcat-image/edit/requests/{request_id}/status' +} + +export type GetFalAiLongcatImageEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLongcatImageEditRequestsByRequestIdStatusResponse = + GetFalAiLongcatImageEditRequestsByRequestIdStatusResponses[keyof GetFalAiLongcatImageEditRequestsByRequestIdStatusResponses] + +export type PutFalAiLongcatImageEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-image/edit/requests/{request_id}/cancel' +} + +export type PutFalAiLongcatImageEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLongcatImageEditRequestsByRequestIdCancelResponse = + PutFalAiLongcatImageEditRequestsByRequestIdCancelResponses[keyof PutFalAiLongcatImageEditRequestsByRequestIdCancelResponses] + +export type PostFalAiLongcatImageEditData = { + body: SchemaLongcatImageEditInput + path?: never + query?: never + url: '/fal-ai/longcat-image/edit' +} + +export type PostFalAiLongcatImageEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLongcatImageEditResponse = + PostFalAiLongcatImageEditResponses[keyof PostFalAiLongcatImageEditResponses] + +export type GetFalAiLongcatImageEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-image/edit/requests/{request_id}' +} + +export type GetFalAiLongcatImageEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLongcatImageEditOutput +} + +export type GetFalAiLongcatImageEditRequestsByRequestIdResponse = + GetFalAiLongcatImageEditRequestsByRequestIdResponses[keyof GetFalAiLongcatImageEditRequestsByRequestIdResponses] + +export type GetFalAiBytedanceSeedreamV45EditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bytedance/seedream/v4.5/edit/requests/{request_id}/status' +} + +export type GetFalAiBytedanceSeedreamV45EditRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiBytedanceSeedreamV45EditRequestsByRequestIdStatusResponse = + GetFalAiBytedanceSeedreamV45EditRequestsByRequestIdStatusResponses[keyof GetFalAiBytedanceSeedreamV45EditRequestsByRequestIdStatusResponses] + +export type PutFalAiBytedanceSeedreamV45EditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedream/v4.5/edit/requests/{request_id}/cancel' +} + +export type PutFalAiBytedanceSeedreamV45EditRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiBytedanceSeedreamV45EditRequestsByRequestIdCancelResponse = + PutFalAiBytedanceSeedreamV45EditRequestsByRequestIdCancelResponses[keyof PutFalAiBytedanceSeedreamV45EditRequestsByRequestIdCancelResponses] + +export type PostFalAiBytedanceSeedreamV45EditData = { + body: SchemaBytedanceSeedreamV45EditInput + path?: never + query?: never + url: '/fal-ai/bytedance/seedream/v4.5/edit' +} + +export type PostFalAiBytedanceSeedreamV45EditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBytedanceSeedreamV45EditResponse = + PostFalAiBytedanceSeedreamV45EditResponses[keyof PostFalAiBytedanceSeedreamV45EditResponses] + +export type GetFalAiBytedanceSeedreamV45EditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedream/v4.5/edit/requests/{request_id}' +} + +export type GetFalAiBytedanceSeedreamV45EditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBytedanceSeedreamV45EditOutput +} + +export type GetFalAiBytedanceSeedreamV45EditRequestsByRequestIdResponse = + GetFalAiBytedanceSeedreamV45EditRequestsByRequestIdResponses[keyof GetFalAiBytedanceSeedreamV45EditRequestsByRequestIdResponses] + +export type GetFalAiViduQ2ReferenceToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/vidu/q2/reference-to-image/requests/{request_id}/status' +} + +export type GetFalAiViduQ2ReferenceToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiViduQ2ReferenceToImageRequestsByRequestIdStatusResponse = + GetFalAiViduQ2ReferenceToImageRequestsByRequestIdStatusResponses[keyof GetFalAiViduQ2ReferenceToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiViduQ2ReferenceToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/q2/reference-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiViduQ2ReferenceToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiViduQ2ReferenceToImageRequestsByRequestIdCancelResponse = + PutFalAiViduQ2ReferenceToImageRequestsByRequestIdCancelResponses[keyof PutFalAiViduQ2ReferenceToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiViduQ2ReferenceToImageData = { + body: SchemaViduQ2ReferenceToImageInput + path?: never + query?: never + url: '/fal-ai/vidu/q2/reference-to-image' +} + +export type PostFalAiViduQ2ReferenceToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiViduQ2ReferenceToImageResponse = + PostFalAiViduQ2ReferenceToImageResponses[keyof PostFalAiViduQ2ReferenceToImageResponses] + +export type GetFalAiViduQ2ReferenceToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/q2/reference-to-image/requests/{request_id}' +} + +export type GetFalAiViduQ2ReferenceToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaViduQ2ReferenceToImageOutput +} + +export type GetFalAiViduQ2ReferenceToImageRequestsByRequestIdResponse = + GetFalAiViduQ2ReferenceToImageRequestsByRequestIdResponses[keyof GetFalAiViduQ2ReferenceToImageRequestsByRequestIdResponses] + +export type GetFalAiKlingImageO1RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-image/o1/requests/{request_id}/status' +} + +export type GetFalAiKlingImageO1RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiKlingImageO1RequestsByRequestIdStatusResponse = + GetFalAiKlingImageO1RequestsByRequestIdStatusResponses[keyof GetFalAiKlingImageO1RequestsByRequestIdStatusResponses] + +export type PutFalAiKlingImageO1RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-image/o1/requests/{request_id}/cancel' +} + +export type PutFalAiKlingImageO1RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiKlingImageO1RequestsByRequestIdCancelResponse = + PutFalAiKlingImageO1RequestsByRequestIdCancelResponses[keyof PutFalAiKlingImageO1RequestsByRequestIdCancelResponses] + +export type PostFalAiKlingImageO1Data = { + body: SchemaKlingImageO1Input + path?: never + query?: never + url: '/fal-ai/kling-image/o1' +} + +export type PostFalAiKlingImageO1Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingImageO1Response = + PostFalAiKlingImageO1Responses[keyof PostFalAiKlingImageO1Responses] + +export type GetFalAiKlingImageO1RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-image/o1/requests/{request_id}' +} + +export type GetFalAiKlingImageO1RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingImageO1Output +} + +export type GetFalAiKlingImageO1RequestsByRequestIdResponse = + GetFalAiKlingImageO1RequestsByRequestIdResponses[keyof GetFalAiKlingImageO1RequestsByRequestIdResponses] + +export type GetFalAiFlux2LoraGalleryVirtualTryonRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-lora-gallery/virtual-tryon/requests/{request_id}/status' + } + +export type GetFalAiFlux2LoraGalleryVirtualTryonRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlux2LoraGalleryVirtualTryonRequestsByRequestIdStatusResponse = + GetFalAiFlux2LoraGalleryVirtualTryonRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2LoraGalleryVirtualTryonRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2LoraGalleryVirtualTryonRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-lora-gallery/virtual-tryon/requests/{request_id}/cancel' + } + +export type PutFalAiFlux2LoraGalleryVirtualTryonRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlux2LoraGalleryVirtualTryonRequestsByRequestIdCancelResponse = + PutFalAiFlux2LoraGalleryVirtualTryonRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2LoraGalleryVirtualTryonRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2LoraGalleryVirtualTryonData = { + body: SchemaFlux2LoraGalleryVirtualTryonInput + path?: never + query?: never + url: '/fal-ai/flux-2-lora-gallery/virtual-tryon' +} + +export type PostFalAiFlux2LoraGalleryVirtualTryonResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2LoraGalleryVirtualTryonResponse = + PostFalAiFlux2LoraGalleryVirtualTryonResponses[keyof PostFalAiFlux2LoraGalleryVirtualTryonResponses] + +export type GetFalAiFlux2LoraGalleryVirtualTryonRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-lora-gallery/virtual-tryon/requests/{request_id}' +} + +export type GetFalAiFlux2LoraGalleryVirtualTryonRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2LoraGalleryVirtualTryonOutput +} + +export type GetFalAiFlux2LoraGalleryVirtualTryonRequestsByRequestIdResponse = + GetFalAiFlux2LoraGalleryVirtualTryonRequestsByRequestIdResponses[keyof GetFalAiFlux2LoraGalleryVirtualTryonRequestsByRequestIdResponses] + +export type GetFalAiFlux2LoraGalleryMultipleAnglesRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-lora-gallery/multiple-angles/requests/{request_id}/status' + } + +export type GetFalAiFlux2LoraGalleryMultipleAnglesRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlux2LoraGalleryMultipleAnglesRequestsByRequestIdStatusResponse = + GetFalAiFlux2LoraGalleryMultipleAnglesRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2LoraGalleryMultipleAnglesRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2LoraGalleryMultipleAnglesRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-lora-gallery/multiple-angles/requests/{request_id}/cancel' + } + +export type PutFalAiFlux2LoraGalleryMultipleAnglesRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlux2LoraGalleryMultipleAnglesRequestsByRequestIdCancelResponse = + PutFalAiFlux2LoraGalleryMultipleAnglesRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2LoraGalleryMultipleAnglesRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2LoraGalleryMultipleAnglesData = { + body: SchemaFlux2LoraGalleryMultipleAnglesInput + path?: never + query?: never + url: '/fal-ai/flux-2-lora-gallery/multiple-angles' +} + +export type PostFalAiFlux2LoraGalleryMultipleAnglesResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2LoraGalleryMultipleAnglesResponse = + PostFalAiFlux2LoraGalleryMultipleAnglesResponses[keyof PostFalAiFlux2LoraGalleryMultipleAnglesResponses] + +export type GetFalAiFlux2LoraGalleryMultipleAnglesRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-lora-gallery/multiple-angles/requests/{request_id}' +} + +export type GetFalAiFlux2LoraGalleryMultipleAnglesRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFlux2LoraGalleryMultipleAnglesOutput + } + +export type GetFalAiFlux2LoraGalleryMultipleAnglesRequestsByRequestIdResponse = + GetFalAiFlux2LoraGalleryMultipleAnglesRequestsByRequestIdResponses[keyof GetFalAiFlux2LoraGalleryMultipleAnglesRequestsByRequestIdResponses] + +export type GetFalAiFlux2LoraGalleryFaceToFullPortraitRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-lora-gallery/face-to-full-portrait/requests/{request_id}/status' + } + +export type GetFalAiFlux2LoraGalleryFaceToFullPortraitRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlux2LoraGalleryFaceToFullPortraitRequestsByRequestIdStatusResponse = + GetFalAiFlux2LoraGalleryFaceToFullPortraitRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2LoraGalleryFaceToFullPortraitRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2LoraGalleryFaceToFullPortraitRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-lora-gallery/face-to-full-portrait/requests/{request_id}/cancel' + } + +export type PutFalAiFlux2LoraGalleryFaceToFullPortraitRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlux2LoraGalleryFaceToFullPortraitRequestsByRequestIdCancelResponse = + PutFalAiFlux2LoraGalleryFaceToFullPortraitRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2LoraGalleryFaceToFullPortraitRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2LoraGalleryFaceToFullPortraitData = { + body: SchemaFlux2LoraGalleryFaceToFullPortraitInput + path?: never + query?: never + url: '/fal-ai/flux-2-lora-gallery/face-to-full-portrait' +} + +export type PostFalAiFlux2LoraGalleryFaceToFullPortraitResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2LoraGalleryFaceToFullPortraitResponse = + PostFalAiFlux2LoraGalleryFaceToFullPortraitResponses[keyof PostFalAiFlux2LoraGalleryFaceToFullPortraitResponses] + +export type GetFalAiFlux2LoraGalleryFaceToFullPortraitRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-lora-gallery/face-to-full-portrait/requests/{request_id}' + } + +export type GetFalAiFlux2LoraGalleryFaceToFullPortraitRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFlux2LoraGalleryFaceToFullPortraitOutput + } + +export type GetFalAiFlux2LoraGalleryFaceToFullPortraitRequestsByRequestIdResponse = + GetFalAiFlux2LoraGalleryFaceToFullPortraitRequestsByRequestIdResponses[keyof GetFalAiFlux2LoraGalleryFaceToFullPortraitRequestsByRequestIdResponses] + +export type GetFalAiFlux2LoraGalleryApartmentStagingRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-lora-gallery/apartment-staging/requests/{request_id}/status' + } + +export type GetFalAiFlux2LoraGalleryApartmentStagingRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlux2LoraGalleryApartmentStagingRequestsByRequestIdStatusResponse = + GetFalAiFlux2LoraGalleryApartmentStagingRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2LoraGalleryApartmentStagingRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2LoraGalleryApartmentStagingRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-lora-gallery/apartment-staging/requests/{request_id}/cancel' + } + +export type PutFalAiFlux2LoraGalleryApartmentStagingRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlux2LoraGalleryApartmentStagingRequestsByRequestIdCancelResponse = + PutFalAiFlux2LoraGalleryApartmentStagingRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2LoraGalleryApartmentStagingRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2LoraGalleryApartmentStagingData = { + body: SchemaFlux2LoraGalleryApartmentStagingInput + path?: never + query?: never + url: '/fal-ai/flux-2-lora-gallery/apartment-staging' +} + +export type PostFalAiFlux2LoraGalleryApartmentStagingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2LoraGalleryApartmentStagingResponse = + PostFalAiFlux2LoraGalleryApartmentStagingResponses[keyof PostFalAiFlux2LoraGalleryApartmentStagingResponses] + +export type GetFalAiFlux2LoraGalleryApartmentStagingRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-lora-gallery/apartment-staging/requests/{request_id}' +} + +export type GetFalAiFlux2LoraGalleryApartmentStagingRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFlux2LoraGalleryApartmentStagingOutput + } + +export type GetFalAiFlux2LoraGalleryApartmentStagingRequestsByRequestIdResponse = + GetFalAiFlux2LoraGalleryApartmentStagingRequestsByRequestIdResponses[keyof GetFalAiFlux2LoraGalleryApartmentStagingRequestsByRequestIdResponses] + +export type GetFalAiFlux2LoraGalleryAddBackgroundRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-lora-gallery/add-background/requests/{request_id}/status' + } + +export type GetFalAiFlux2LoraGalleryAddBackgroundRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlux2LoraGalleryAddBackgroundRequestsByRequestIdStatusResponse = + GetFalAiFlux2LoraGalleryAddBackgroundRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2LoraGalleryAddBackgroundRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2LoraGalleryAddBackgroundRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-lora-gallery/add-background/requests/{request_id}/cancel' + } + +export type PutFalAiFlux2LoraGalleryAddBackgroundRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlux2LoraGalleryAddBackgroundRequestsByRequestIdCancelResponse = + PutFalAiFlux2LoraGalleryAddBackgroundRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2LoraGalleryAddBackgroundRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2LoraGalleryAddBackgroundData = { + body: SchemaFlux2LoraGalleryAddBackgroundInput + path?: never + query?: never + url: '/fal-ai/flux-2-lora-gallery/add-background' +} + +export type PostFalAiFlux2LoraGalleryAddBackgroundResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2LoraGalleryAddBackgroundResponse = + PostFalAiFlux2LoraGalleryAddBackgroundResponses[keyof PostFalAiFlux2LoraGalleryAddBackgroundResponses] + +export type GetFalAiFlux2LoraGalleryAddBackgroundRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-lora-gallery/add-background/requests/{request_id}' +} + +export type GetFalAiFlux2LoraGalleryAddBackgroundRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFlux2LoraGalleryAddBackgroundOutput + } + +export type GetFalAiFlux2LoraGalleryAddBackgroundRequestsByRequestIdResponse = + GetFalAiFlux2LoraGalleryAddBackgroundRequestsByRequestIdResponses[keyof GetFalAiFlux2LoraGalleryAddBackgroundRequestsByRequestIdResponses] + +export type GetClarityaiCrystalUpscalerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/clarityai/crystal-upscaler/requests/{request_id}/status' +} + +export type GetClarityaiCrystalUpscalerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetClarityaiCrystalUpscalerRequestsByRequestIdStatusResponse = + GetClarityaiCrystalUpscalerRequestsByRequestIdStatusResponses[keyof GetClarityaiCrystalUpscalerRequestsByRequestIdStatusResponses] + +export type PutClarityaiCrystalUpscalerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/clarityai/crystal-upscaler/requests/{request_id}/cancel' +} + +export type PutClarityaiCrystalUpscalerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutClarityaiCrystalUpscalerRequestsByRequestIdCancelResponse = + PutClarityaiCrystalUpscalerRequestsByRequestIdCancelResponses[keyof PutClarityaiCrystalUpscalerRequestsByRequestIdCancelResponses] + +export type PostClarityaiCrystalUpscalerData = { + body: SchemaCrystalUpscalerInput + path?: never + query?: never + url: '/clarityai/crystal-upscaler' +} + +export type PostClarityaiCrystalUpscalerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostClarityaiCrystalUpscalerResponse = + PostClarityaiCrystalUpscalerResponses[keyof PostClarityaiCrystalUpscalerResponses] + +export type GetClarityaiCrystalUpscalerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/clarityai/crystal-upscaler/requests/{request_id}' +} + +export type GetClarityaiCrystalUpscalerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaCrystalUpscalerOutput +} + +export type GetClarityaiCrystalUpscalerRequestsByRequestIdResponse = + GetClarityaiCrystalUpscalerRequestsByRequestIdResponses[keyof GetClarityaiCrystalUpscalerRequestsByRequestIdResponses] + +export type GetFalAiFlux2FlexEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-flex/edit/requests/{request_id}/status' +} + +export type GetFalAiFlux2FlexEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2FlexEditRequestsByRequestIdStatusResponse = + GetFalAiFlux2FlexEditRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2FlexEditRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2FlexEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-flex/edit/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2FlexEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2FlexEditRequestsByRequestIdCancelResponse = + PutFalAiFlux2FlexEditRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2FlexEditRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2FlexEditData = { + body: SchemaFlux2FlexEditInput + path?: never + query?: never + url: '/fal-ai/flux-2-flex/edit' +} + +export type PostFalAiFlux2FlexEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2FlexEditResponse = + PostFalAiFlux2FlexEditResponses[keyof PostFalAiFlux2FlexEditResponses] + +export type GetFalAiFlux2FlexEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-flex/edit/requests/{request_id}' +} + +export type GetFalAiFlux2FlexEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2FlexEditOutput +} + +export type GetFalAiFlux2FlexEditRequestsByRequestIdResponse = + GetFalAiFlux2FlexEditRequestsByRequestIdResponses[keyof GetFalAiFlux2FlexEditRequestsByRequestIdResponses] + +export type GetFalAiChronoEditLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/chrono-edit-lora/requests/{request_id}/status' +} + +export type GetFalAiChronoEditLoraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiChronoEditLoraRequestsByRequestIdStatusResponse = + GetFalAiChronoEditLoraRequestsByRequestIdStatusResponses[keyof GetFalAiChronoEditLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiChronoEditLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/chrono-edit-lora/requests/{request_id}/cancel' +} + +export type PutFalAiChronoEditLoraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiChronoEditLoraRequestsByRequestIdCancelResponse = + PutFalAiChronoEditLoraRequestsByRequestIdCancelResponses[keyof PutFalAiChronoEditLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiChronoEditLoraData = { + body: SchemaChronoEditLoraInput + path?: never + query?: never + url: '/fal-ai/chrono-edit-lora' +} + +export type PostFalAiChronoEditLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiChronoEditLoraResponse = + PostFalAiChronoEditLoraResponses[keyof PostFalAiChronoEditLoraResponses] + +export type GetFalAiChronoEditLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/chrono-edit-lora/requests/{request_id}' +} + +export type GetFalAiChronoEditLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaChronoEditLoraOutput +} + +export type GetFalAiChronoEditLoraRequestsByRequestIdResponse = + GetFalAiChronoEditLoraRequestsByRequestIdResponses[keyof GetFalAiChronoEditLoraRequestsByRequestIdResponses] + +export type GetFalAiChronoEditLoraGalleryPaintbrushRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/chrono-edit-lora-gallery/paintbrush/requests/{request_id}/status' + } + +export type GetFalAiChronoEditLoraGalleryPaintbrushRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiChronoEditLoraGalleryPaintbrushRequestsByRequestIdStatusResponse = + GetFalAiChronoEditLoraGalleryPaintbrushRequestsByRequestIdStatusResponses[keyof GetFalAiChronoEditLoraGalleryPaintbrushRequestsByRequestIdStatusResponses] + +export type PutFalAiChronoEditLoraGalleryPaintbrushRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/chrono-edit-lora-gallery/paintbrush/requests/{request_id}/cancel' + } + +export type PutFalAiChronoEditLoraGalleryPaintbrushRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiChronoEditLoraGalleryPaintbrushRequestsByRequestIdCancelResponse = + PutFalAiChronoEditLoraGalleryPaintbrushRequestsByRequestIdCancelResponses[keyof PutFalAiChronoEditLoraGalleryPaintbrushRequestsByRequestIdCancelResponses] + +export type PostFalAiChronoEditLoraGalleryPaintbrushData = { + body: SchemaChronoEditLoraGalleryPaintbrushInput + path?: never + query?: never + url: '/fal-ai/chrono-edit-lora-gallery/paintbrush' +} + +export type PostFalAiChronoEditLoraGalleryPaintbrushResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiChronoEditLoraGalleryPaintbrushResponse = + PostFalAiChronoEditLoraGalleryPaintbrushResponses[keyof PostFalAiChronoEditLoraGalleryPaintbrushResponses] + +export type GetFalAiChronoEditLoraGalleryPaintbrushRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/chrono-edit-lora-gallery/paintbrush/requests/{request_id}' +} + +export type GetFalAiChronoEditLoraGalleryPaintbrushRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaChronoEditLoraGalleryPaintbrushOutput + } + +export type GetFalAiChronoEditLoraGalleryPaintbrushRequestsByRequestIdResponse = + GetFalAiChronoEditLoraGalleryPaintbrushRequestsByRequestIdResponses[keyof GetFalAiChronoEditLoraGalleryPaintbrushRequestsByRequestIdResponses] + +export type GetFalAiChronoEditLoraGalleryUpscalerRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/chrono-edit-lora-gallery/upscaler/requests/{request_id}/status' + } + +export type GetFalAiChronoEditLoraGalleryUpscalerRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiChronoEditLoraGalleryUpscalerRequestsByRequestIdStatusResponse = + GetFalAiChronoEditLoraGalleryUpscalerRequestsByRequestIdStatusResponses[keyof GetFalAiChronoEditLoraGalleryUpscalerRequestsByRequestIdStatusResponses] + +export type PutFalAiChronoEditLoraGalleryUpscalerRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/chrono-edit-lora-gallery/upscaler/requests/{request_id}/cancel' + } + +export type PutFalAiChronoEditLoraGalleryUpscalerRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiChronoEditLoraGalleryUpscalerRequestsByRequestIdCancelResponse = + PutFalAiChronoEditLoraGalleryUpscalerRequestsByRequestIdCancelResponses[keyof PutFalAiChronoEditLoraGalleryUpscalerRequestsByRequestIdCancelResponses] + +export type PostFalAiChronoEditLoraGalleryUpscalerData = { + body: SchemaChronoEditLoraGalleryUpscalerInput + path?: never + query?: never + url: '/fal-ai/chrono-edit-lora-gallery/upscaler' +} + +export type PostFalAiChronoEditLoraGalleryUpscalerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiChronoEditLoraGalleryUpscalerResponse = + PostFalAiChronoEditLoraGalleryUpscalerResponses[keyof PostFalAiChronoEditLoraGalleryUpscalerResponses] + +export type GetFalAiChronoEditLoraGalleryUpscalerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/chrono-edit-lora-gallery/upscaler/requests/{request_id}' +} + +export type GetFalAiChronoEditLoraGalleryUpscalerRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaChronoEditLoraGalleryUpscalerOutput + } + +export type GetFalAiChronoEditLoraGalleryUpscalerRequestsByRequestIdResponse = + GetFalAiChronoEditLoraGalleryUpscalerRequestsByRequestIdResponses[keyof GetFalAiChronoEditLoraGalleryUpscalerRequestsByRequestIdResponses] + +export type GetFalAiSam3ImageRleRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sam-3/image-rle/requests/{request_id}/status' +} + +export type GetFalAiSam3ImageRleRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSam3ImageRleRequestsByRequestIdStatusResponse = + GetFalAiSam3ImageRleRequestsByRequestIdStatusResponses[keyof GetFalAiSam3ImageRleRequestsByRequestIdStatusResponses] + +export type PutFalAiSam3ImageRleRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam-3/image-rle/requests/{request_id}/cancel' +} + +export type PutFalAiSam3ImageRleRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSam3ImageRleRequestsByRequestIdCancelResponse = + PutFalAiSam3ImageRleRequestsByRequestIdCancelResponses[keyof PutFalAiSam3ImageRleRequestsByRequestIdCancelResponses] + +export type PostFalAiSam3ImageRleData = { + body: SchemaSam3ImageRleInput + path?: never + query?: never + url: '/fal-ai/sam-3/image-rle' +} + +export type PostFalAiSam3ImageRleResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSam3ImageRleResponse = + PostFalAiSam3ImageRleResponses[keyof PostFalAiSam3ImageRleResponses] + +export type GetFalAiSam3ImageRleRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam-3/image-rle/requests/{request_id}' +} + +export type GetFalAiSam3ImageRleRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSam3ImageRleOutput +} + +export type GetFalAiSam3ImageRleRequestsByRequestIdResponse = + GetFalAiSam3ImageRleRequestsByRequestIdResponses[keyof GetFalAiSam3ImageRleRequestsByRequestIdResponses] + +export type GetFalAiSam3ImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sam-3/image/requests/{request_id}/status' +} + +export type GetFalAiSam3ImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSam3ImageRequestsByRequestIdStatusResponse = + GetFalAiSam3ImageRequestsByRequestIdStatusResponses[keyof GetFalAiSam3ImageRequestsByRequestIdStatusResponses] + +export type PutFalAiSam3ImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam-3/image/requests/{request_id}/cancel' +} + +export type PutFalAiSam3ImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSam3ImageRequestsByRequestIdCancelResponse = + PutFalAiSam3ImageRequestsByRequestIdCancelResponses[keyof PutFalAiSam3ImageRequestsByRequestIdCancelResponses] + +export type PostFalAiSam3ImageData = { + body: SchemaSam3ImageInput + path?: never + query?: never + url: '/fal-ai/sam-3/image' +} + +export type PostFalAiSam3ImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSam3ImageResponse = + PostFalAiSam3ImageResponses[keyof PostFalAiSam3ImageResponses] + +export type GetFalAiSam3ImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam-3/image/requests/{request_id}' +} + +export type GetFalAiSam3ImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSam3ImageOutput +} + +export type GetFalAiSam3ImageRequestsByRequestIdResponse = + GetFalAiSam3ImageRequestsByRequestIdResponses[keyof GetFalAiSam3ImageRequestsByRequestIdResponses] + +export type GetFalAiGemini3ProImagePreviewEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/gemini-3-pro-image-preview/edit/requests/{request_id}/status' +} + +export type GetFalAiGemini3ProImagePreviewEditRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiGemini3ProImagePreviewEditRequestsByRequestIdStatusResponse = + GetFalAiGemini3ProImagePreviewEditRequestsByRequestIdStatusResponses[keyof GetFalAiGemini3ProImagePreviewEditRequestsByRequestIdStatusResponses] + +export type PutFalAiGemini3ProImagePreviewEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gemini-3-pro-image-preview/edit/requests/{request_id}/cancel' +} + +export type PutFalAiGemini3ProImagePreviewEditRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiGemini3ProImagePreviewEditRequestsByRequestIdCancelResponse = + PutFalAiGemini3ProImagePreviewEditRequestsByRequestIdCancelResponses[keyof PutFalAiGemini3ProImagePreviewEditRequestsByRequestIdCancelResponses] + +export type PostFalAiGemini3ProImagePreviewEditData = { + body: SchemaGemini3ProImagePreviewEditInput + path?: never + query?: never + url: '/fal-ai/gemini-3-pro-image-preview/edit' +} + +export type PostFalAiGemini3ProImagePreviewEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiGemini3ProImagePreviewEditResponse = + PostFalAiGemini3ProImagePreviewEditResponses[keyof PostFalAiGemini3ProImagePreviewEditResponses] + +export type GetFalAiGemini3ProImagePreviewEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gemini-3-pro-image-preview/edit/requests/{request_id}' +} + +export type GetFalAiGemini3ProImagePreviewEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaGemini3ProImagePreviewEditOutput +} + +export type GetFalAiGemini3ProImagePreviewEditRequestsByRequestIdResponse = + GetFalAiGemini3ProImagePreviewEditRequestsByRequestIdResponses[keyof GetFalAiGemini3ProImagePreviewEditRequestsByRequestIdResponses] + +export type GetFalAiNanoBananaProEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/nano-banana-pro/edit/requests/{request_id}/status' +} + +export type GetFalAiNanoBananaProEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiNanoBananaProEditRequestsByRequestIdStatusResponse = + GetFalAiNanoBananaProEditRequestsByRequestIdStatusResponses[keyof GetFalAiNanoBananaProEditRequestsByRequestIdStatusResponses] + +export type PutFalAiNanoBananaProEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/nano-banana-pro/edit/requests/{request_id}/cancel' +} + +export type PutFalAiNanoBananaProEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiNanoBananaProEditRequestsByRequestIdCancelResponse = + PutFalAiNanoBananaProEditRequestsByRequestIdCancelResponses[keyof PutFalAiNanoBananaProEditRequestsByRequestIdCancelResponses] + +export type PostFalAiNanoBananaProEditData = { + body: SchemaNanoBananaProEditInput + path?: never + query?: never + url: '/fal-ai/nano-banana-pro/edit' +} + +export type PostFalAiNanoBananaProEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiNanoBananaProEditResponse = + PostFalAiNanoBananaProEditResponses[keyof PostFalAiNanoBananaProEditResponses] + +export type GetFalAiNanoBananaProEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/nano-banana-pro/edit/requests/{request_id}' +} + +export type GetFalAiNanoBananaProEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaNanoBananaProEditOutput +} + +export type GetFalAiNanoBananaProEditRequestsByRequestIdResponse = + GetFalAiNanoBananaProEditRequestsByRequestIdResponses[keyof GetFalAiNanoBananaProEditRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEditPlusLoraGalleryMultipleAnglesRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/multiple-angles/requests/{request_id}/status' + } + +export type GetFalAiQwenImageEditPlusLoraGalleryMultipleAnglesRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEditPlusLoraGalleryMultipleAnglesRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEditPlusLoraGalleryMultipleAnglesRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEditPlusLoraGalleryMultipleAnglesRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEditPlusLoraGalleryMultipleAnglesRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/multiple-angles/requests/{request_id}/cancel' + } + +export type PutFalAiQwenImageEditPlusLoraGalleryMultipleAnglesRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEditPlusLoraGalleryMultipleAnglesRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEditPlusLoraGalleryMultipleAnglesRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEditPlusLoraGalleryMultipleAnglesRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEditPlusLoraGalleryMultipleAnglesData = { + body: SchemaQwenImageEditPlusLoraGalleryMultipleAnglesInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/multiple-angles' +} + +export type PostFalAiQwenImageEditPlusLoraGalleryMultipleAnglesResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEditPlusLoraGalleryMultipleAnglesResponse = + PostFalAiQwenImageEditPlusLoraGalleryMultipleAnglesResponses[keyof PostFalAiQwenImageEditPlusLoraGalleryMultipleAnglesResponses] + +export type GetFalAiQwenImageEditPlusLoraGalleryMultipleAnglesRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/multiple-angles/requests/{request_id}' + } + +export type GetFalAiQwenImageEditPlusLoraGalleryMultipleAnglesRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaQwenImageEditPlusLoraGalleryMultipleAnglesOutput + } + +export type GetFalAiQwenImageEditPlusLoraGalleryMultipleAnglesRequestsByRequestIdResponse = + GetFalAiQwenImageEditPlusLoraGalleryMultipleAnglesRequestsByRequestIdResponses[keyof GetFalAiQwenImageEditPlusLoraGalleryMultipleAnglesRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEditPlusLoraGalleryShirtDesignRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/shirt-design/requests/{request_id}/status' + } + +export type GetFalAiQwenImageEditPlusLoraGalleryShirtDesignRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEditPlusLoraGalleryShirtDesignRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEditPlusLoraGalleryShirtDesignRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEditPlusLoraGalleryShirtDesignRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEditPlusLoraGalleryShirtDesignRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/shirt-design/requests/{request_id}/cancel' + } + +export type PutFalAiQwenImageEditPlusLoraGalleryShirtDesignRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEditPlusLoraGalleryShirtDesignRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEditPlusLoraGalleryShirtDesignRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEditPlusLoraGalleryShirtDesignRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEditPlusLoraGalleryShirtDesignData = { + body: SchemaQwenImageEditPlusLoraGalleryShirtDesignInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/shirt-design' +} + +export type PostFalAiQwenImageEditPlusLoraGalleryShirtDesignResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEditPlusLoraGalleryShirtDesignResponse = + PostFalAiQwenImageEditPlusLoraGalleryShirtDesignResponses[keyof PostFalAiQwenImageEditPlusLoraGalleryShirtDesignResponses] + +export type GetFalAiQwenImageEditPlusLoraGalleryShirtDesignRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/shirt-design/requests/{request_id}' + } + +export type GetFalAiQwenImageEditPlusLoraGalleryShirtDesignRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaQwenImageEditPlusLoraGalleryShirtDesignOutput + } + +export type GetFalAiQwenImageEditPlusLoraGalleryShirtDesignRequestsByRequestIdResponse = + GetFalAiQwenImageEditPlusLoraGalleryShirtDesignRequestsByRequestIdResponses[keyof GetFalAiQwenImageEditPlusLoraGalleryShirtDesignRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEditPlusLoraGalleryRemoveLightingRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/remove-lighting/requests/{request_id}/status' + } + +export type GetFalAiQwenImageEditPlusLoraGalleryRemoveLightingRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEditPlusLoraGalleryRemoveLightingRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEditPlusLoraGalleryRemoveLightingRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEditPlusLoraGalleryRemoveLightingRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEditPlusLoraGalleryRemoveLightingRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/remove-lighting/requests/{request_id}/cancel' + } + +export type PutFalAiQwenImageEditPlusLoraGalleryRemoveLightingRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEditPlusLoraGalleryRemoveLightingRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEditPlusLoraGalleryRemoveLightingRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEditPlusLoraGalleryRemoveLightingRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEditPlusLoraGalleryRemoveLightingData = { + body: SchemaQwenImageEditPlusLoraGalleryRemoveLightingInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/remove-lighting' +} + +export type PostFalAiQwenImageEditPlusLoraGalleryRemoveLightingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEditPlusLoraGalleryRemoveLightingResponse = + PostFalAiQwenImageEditPlusLoraGalleryRemoveLightingResponses[keyof PostFalAiQwenImageEditPlusLoraGalleryRemoveLightingResponses] + +export type GetFalAiQwenImageEditPlusLoraGalleryRemoveLightingRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/remove-lighting/requests/{request_id}' + } + +export type GetFalAiQwenImageEditPlusLoraGalleryRemoveLightingRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaQwenImageEditPlusLoraGalleryRemoveLightingOutput + } + +export type GetFalAiQwenImageEditPlusLoraGalleryRemoveLightingRequestsByRequestIdResponse = + GetFalAiQwenImageEditPlusLoraGalleryRemoveLightingRequestsByRequestIdResponses[keyof GetFalAiQwenImageEditPlusLoraGalleryRemoveLightingRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEditPlusLoraGalleryRemoveElementRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/remove-element/requests/{request_id}/status' + } + +export type GetFalAiQwenImageEditPlusLoraGalleryRemoveElementRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEditPlusLoraGalleryRemoveElementRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEditPlusLoraGalleryRemoveElementRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEditPlusLoraGalleryRemoveElementRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEditPlusLoraGalleryRemoveElementRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/remove-element/requests/{request_id}/cancel' + } + +export type PutFalAiQwenImageEditPlusLoraGalleryRemoveElementRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEditPlusLoraGalleryRemoveElementRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEditPlusLoraGalleryRemoveElementRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEditPlusLoraGalleryRemoveElementRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEditPlusLoraGalleryRemoveElementData = { + body: SchemaQwenImageEditPlusLoraGalleryRemoveElementInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/remove-element' +} + +export type PostFalAiQwenImageEditPlusLoraGalleryRemoveElementResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEditPlusLoraGalleryRemoveElementResponse = + PostFalAiQwenImageEditPlusLoraGalleryRemoveElementResponses[keyof PostFalAiQwenImageEditPlusLoraGalleryRemoveElementResponses] + +export type GetFalAiQwenImageEditPlusLoraGalleryRemoveElementRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/remove-element/requests/{request_id}' + } + +export type GetFalAiQwenImageEditPlusLoraGalleryRemoveElementRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaQwenImageEditPlusLoraGalleryRemoveElementOutput + } + +export type GetFalAiQwenImageEditPlusLoraGalleryRemoveElementRequestsByRequestIdResponse = + GetFalAiQwenImageEditPlusLoraGalleryRemoveElementRequestsByRequestIdResponses[keyof GetFalAiQwenImageEditPlusLoraGalleryRemoveElementRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEditPlusLoraGalleryNextSceneRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/next-scene/requests/{request_id}/status' + } + +export type GetFalAiQwenImageEditPlusLoraGalleryNextSceneRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEditPlusLoraGalleryNextSceneRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEditPlusLoraGalleryNextSceneRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEditPlusLoraGalleryNextSceneRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEditPlusLoraGalleryNextSceneRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/next-scene/requests/{request_id}/cancel' + } + +export type PutFalAiQwenImageEditPlusLoraGalleryNextSceneRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEditPlusLoraGalleryNextSceneRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEditPlusLoraGalleryNextSceneRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEditPlusLoraGalleryNextSceneRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEditPlusLoraGalleryNextSceneData = { + body: SchemaQwenImageEditPlusLoraGalleryNextSceneInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/next-scene' +} + +export type PostFalAiQwenImageEditPlusLoraGalleryNextSceneResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEditPlusLoraGalleryNextSceneResponse = + PostFalAiQwenImageEditPlusLoraGalleryNextSceneResponses[keyof PostFalAiQwenImageEditPlusLoraGalleryNextSceneResponses] + +export type GetFalAiQwenImageEditPlusLoraGalleryNextSceneRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/next-scene/requests/{request_id}' + } + +export type GetFalAiQwenImageEditPlusLoraGalleryNextSceneRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaQwenImageEditPlusLoraGalleryNextSceneOutput + } + +export type GetFalAiQwenImageEditPlusLoraGalleryNextSceneRequestsByRequestIdResponse = + GetFalAiQwenImageEditPlusLoraGalleryNextSceneRequestsByRequestIdResponses[keyof GetFalAiQwenImageEditPlusLoraGalleryNextSceneRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEditPlusLoraGalleryIntegrateProductRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/integrate-product/requests/{request_id}/status' + } + +export type GetFalAiQwenImageEditPlusLoraGalleryIntegrateProductRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEditPlusLoraGalleryIntegrateProductRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEditPlusLoraGalleryIntegrateProductRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEditPlusLoraGalleryIntegrateProductRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEditPlusLoraGalleryIntegrateProductRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/integrate-product/requests/{request_id}/cancel' + } + +export type PutFalAiQwenImageEditPlusLoraGalleryIntegrateProductRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEditPlusLoraGalleryIntegrateProductRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEditPlusLoraGalleryIntegrateProductRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEditPlusLoraGalleryIntegrateProductRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEditPlusLoraGalleryIntegrateProductData = { + body: SchemaQwenImageEditPlusLoraGalleryIntegrateProductInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/integrate-product' +} + +export type PostFalAiQwenImageEditPlusLoraGalleryIntegrateProductResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEditPlusLoraGalleryIntegrateProductResponse = + PostFalAiQwenImageEditPlusLoraGalleryIntegrateProductResponses[keyof PostFalAiQwenImageEditPlusLoraGalleryIntegrateProductResponses] + +export type GetFalAiQwenImageEditPlusLoraGalleryIntegrateProductRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/integrate-product/requests/{request_id}' + } + +export type GetFalAiQwenImageEditPlusLoraGalleryIntegrateProductRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaQwenImageEditPlusLoraGalleryIntegrateProductOutput + } + +export type GetFalAiQwenImageEditPlusLoraGalleryIntegrateProductRequestsByRequestIdResponse = + GetFalAiQwenImageEditPlusLoraGalleryIntegrateProductRequestsByRequestIdResponses[keyof GetFalAiQwenImageEditPlusLoraGalleryIntegrateProductRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEditPlusLoraGalleryGroupPhotoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/group-photo/requests/{request_id}/status' + } + +export type GetFalAiQwenImageEditPlusLoraGalleryGroupPhotoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEditPlusLoraGalleryGroupPhotoRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEditPlusLoraGalleryGroupPhotoRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEditPlusLoraGalleryGroupPhotoRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEditPlusLoraGalleryGroupPhotoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/group-photo/requests/{request_id}/cancel' + } + +export type PutFalAiQwenImageEditPlusLoraGalleryGroupPhotoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEditPlusLoraGalleryGroupPhotoRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEditPlusLoraGalleryGroupPhotoRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEditPlusLoraGalleryGroupPhotoRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEditPlusLoraGalleryGroupPhotoData = { + body: SchemaQwenImageEditPlusLoraGalleryGroupPhotoInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/group-photo' +} + +export type PostFalAiQwenImageEditPlusLoraGalleryGroupPhotoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEditPlusLoraGalleryGroupPhotoResponse = + PostFalAiQwenImageEditPlusLoraGalleryGroupPhotoResponses[keyof PostFalAiQwenImageEditPlusLoraGalleryGroupPhotoResponses] + +export type GetFalAiQwenImageEditPlusLoraGalleryGroupPhotoRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/group-photo/requests/{request_id}' + } + +export type GetFalAiQwenImageEditPlusLoraGalleryGroupPhotoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaQwenImageEditPlusLoraGalleryGroupPhotoOutput + } + +export type GetFalAiQwenImageEditPlusLoraGalleryGroupPhotoRequestsByRequestIdResponse = + GetFalAiQwenImageEditPlusLoraGalleryGroupPhotoRequestsByRequestIdResponses[keyof GetFalAiQwenImageEditPlusLoraGalleryGroupPhotoRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/face-to-full-portrait/requests/{request_id}/status' + } + +export type GetFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/face-to-full-portrait/requests/{request_id}/cancel' + } + +export type PutFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitData = { + body: SchemaQwenImageEditPlusLoraGalleryFaceToFullPortraitInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/face-to-full-portrait' +} + +export type PostFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitResponse = + PostFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitResponses[keyof PostFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitResponses] + +export type GetFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/face-to-full-portrait/requests/{request_id}' + } + +export type GetFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaQwenImageEditPlusLoraGalleryFaceToFullPortraitOutput + } + +export type GetFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitRequestsByRequestIdResponse = + GetFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitRequestsByRequestIdResponses[keyof GetFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEditPlusLoraGalleryAddBackgroundRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/add-background/requests/{request_id}/status' + } + +export type GetFalAiQwenImageEditPlusLoraGalleryAddBackgroundRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEditPlusLoraGalleryAddBackgroundRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEditPlusLoraGalleryAddBackgroundRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEditPlusLoraGalleryAddBackgroundRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEditPlusLoraGalleryAddBackgroundRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/add-background/requests/{request_id}/cancel' + } + +export type PutFalAiQwenImageEditPlusLoraGalleryAddBackgroundRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEditPlusLoraGalleryAddBackgroundRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEditPlusLoraGalleryAddBackgroundRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEditPlusLoraGalleryAddBackgroundRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEditPlusLoraGalleryAddBackgroundData = { + body: SchemaQwenImageEditPlusLoraGalleryAddBackgroundInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/add-background' +} + +export type PostFalAiQwenImageEditPlusLoraGalleryAddBackgroundResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEditPlusLoraGalleryAddBackgroundResponse = + PostFalAiQwenImageEditPlusLoraGalleryAddBackgroundResponses[keyof PostFalAiQwenImageEditPlusLoraGalleryAddBackgroundResponses] + +export type GetFalAiQwenImageEditPlusLoraGalleryAddBackgroundRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora-gallery/add-background/requests/{request_id}' + } + +export type GetFalAiQwenImageEditPlusLoraGalleryAddBackgroundRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaQwenImageEditPlusLoraGalleryAddBackgroundOutput + } + +export type GetFalAiQwenImageEditPlusLoraGalleryAddBackgroundRequestsByRequestIdResponse = + GetFalAiQwenImageEditPlusLoraGalleryAddBackgroundRequestsByRequestIdResponses[keyof GetFalAiQwenImageEditPlusLoraGalleryAddBackgroundRequestsByRequestIdResponses] + +export type GetFalAiReveFastRemixRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/reve/fast/remix/requests/{request_id}/status' +} + +export type GetFalAiReveFastRemixRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiReveFastRemixRequestsByRequestIdStatusResponse = + GetFalAiReveFastRemixRequestsByRequestIdStatusResponses[keyof GetFalAiReveFastRemixRequestsByRequestIdStatusResponses] + +export type PutFalAiReveFastRemixRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/reve/fast/remix/requests/{request_id}/cancel' +} + +export type PutFalAiReveFastRemixRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiReveFastRemixRequestsByRequestIdCancelResponse = + PutFalAiReveFastRemixRequestsByRequestIdCancelResponses[keyof PutFalAiReveFastRemixRequestsByRequestIdCancelResponses] + +export type PostFalAiReveFastRemixData = { + body: SchemaReveFastRemixInput + path?: never + query?: never + url: '/fal-ai/reve/fast/remix' +} + +export type PostFalAiReveFastRemixResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiReveFastRemixResponse = + PostFalAiReveFastRemixResponses[keyof PostFalAiReveFastRemixResponses] + +export type GetFalAiReveFastRemixRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/reve/fast/remix/requests/{request_id}' +} + +export type GetFalAiReveFastRemixRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaReveFastRemixOutput +} + +export type GetFalAiReveFastRemixRequestsByRequestIdResponse = + GetFalAiReveFastRemixRequestsByRequestIdResponses[keyof GetFalAiReveFastRemixRequestsByRequestIdResponses] + +export type GetFalAiReveFastEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/reve/fast/edit/requests/{request_id}/status' +} + +export type GetFalAiReveFastEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiReveFastEditRequestsByRequestIdStatusResponse = + GetFalAiReveFastEditRequestsByRequestIdStatusResponses[keyof GetFalAiReveFastEditRequestsByRequestIdStatusResponses] + +export type PutFalAiReveFastEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/reve/fast/edit/requests/{request_id}/cancel' +} + +export type PutFalAiReveFastEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiReveFastEditRequestsByRequestIdCancelResponse = + PutFalAiReveFastEditRequestsByRequestIdCancelResponses[keyof PutFalAiReveFastEditRequestsByRequestIdCancelResponses] + +export type PostFalAiReveFastEditData = { + body: SchemaReveFastEditInput + path?: never + query?: never + url: '/fal-ai/reve/fast/edit' +} + +export type PostFalAiReveFastEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiReveFastEditResponse = + PostFalAiReveFastEditResponses[keyof PostFalAiReveFastEditResponses] + +export type GetFalAiReveFastEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/reve/fast/edit/requests/{request_id}' +} + +export type GetFalAiReveFastEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaReveFastEditOutput +} + +export type GetFalAiReveFastEditRequestsByRequestIdResponse = + GetFalAiReveFastEditRequestsByRequestIdResponses[keyof GetFalAiReveFastEditRequestsByRequestIdResponses] + +export type GetFalAiImageAppsV2OutpaintRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-apps-v2/outpaint/requests/{request_id}/status' +} + +export type GetFalAiImageAppsV2OutpaintRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImageAppsV2OutpaintRequestsByRequestIdStatusResponse = + GetFalAiImageAppsV2OutpaintRequestsByRequestIdStatusResponses[keyof GetFalAiImageAppsV2OutpaintRequestsByRequestIdStatusResponses] + +export type PutFalAiImageAppsV2OutpaintRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/outpaint/requests/{request_id}/cancel' +} + +export type PutFalAiImageAppsV2OutpaintRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImageAppsV2OutpaintRequestsByRequestIdCancelResponse = + PutFalAiImageAppsV2OutpaintRequestsByRequestIdCancelResponses[keyof PutFalAiImageAppsV2OutpaintRequestsByRequestIdCancelResponses] + +export type PostFalAiImageAppsV2OutpaintData = { + body: SchemaImageAppsV2OutpaintInput + path?: never + query?: never + url: '/fal-ai/image-apps-v2/outpaint' +} + +export type PostFalAiImageAppsV2OutpaintResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageAppsV2OutpaintResponse = + PostFalAiImageAppsV2OutpaintResponses[keyof PostFalAiImageAppsV2OutpaintResponses] + +export type GetFalAiImageAppsV2OutpaintRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/outpaint/requests/{request_id}' +} + +export type GetFalAiImageAppsV2OutpaintRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageAppsV2OutpaintOutput +} + +export type GetFalAiImageAppsV2OutpaintRequestsByRequestIdResponse = + GetFalAiImageAppsV2OutpaintRequestsByRequestIdResponses[keyof GetFalAiImageAppsV2OutpaintRequestsByRequestIdResponses] + +export type GetFalAiFluxVisionUpscalerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-vision-upscaler/requests/{request_id}/status' +} + +export type GetFalAiFluxVisionUpscalerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxVisionUpscalerRequestsByRequestIdStatusResponse = + GetFalAiFluxVisionUpscalerRequestsByRequestIdStatusResponses[keyof GetFalAiFluxVisionUpscalerRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxVisionUpscalerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-vision-upscaler/requests/{request_id}/cancel' +} + +export type PutFalAiFluxVisionUpscalerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxVisionUpscalerRequestsByRequestIdCancelResponse = + PutFalAiFluxVisionUpscalerRequestsByRequestIdCancelResponses[keyof PutFalAiFluxVisionUpscalerRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxVisionUpscalerData = { + body: SchemaFluxVisionUpscalerInput + path?: never + query?: never + url: '/fal-ai/flux-vision-upscaler' +} + +export type PostFalAiFluxVisionUpscalerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxVisionUpscalerResponse = + PostFalAiFluxVisionUpscalerResponses[keyof PostFalAiFluxVisionUpscalerResponses] + +export type GetFalAiFluxVisionUpscalerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-vision-upscaler/requests/{request_id}' +} + +export type GetFalAiFluxVisionUpscalerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxVisionUpscalerOutput +} + +export type GetFalAiFluxVisionUpscalerRequestsByRequestIdResponse = + GetFalAiFluxVisionUpscalerRequestsByRequestIdResponses[keyof GetFalAiFluxVisionUpscalerRequestsByRequestIdResponses] + +export type GetFalAiEmu35ImageEditImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/emu-3.5-image/edit-image/requests/{request_id}/status' +} + +export type GetFalAiEmu35ImageEditImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiEmu35ImageEditImageRequestsByRequestIdStatusResponse = + GetFalAiEmu35ImageEditImageRequestsByRequestIdStatusResponses[keyof GetFalAiEmu35ImageEditImageRequestsByRequestIdStatusResponses] + +export type PutFalAiEmu35ImageEditImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/emu-3.5-image/edit-image/requests/{request_id}/cancel' +} + +export type PutFalAiEmu35ImageEditImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiEmu35ImageEditImageRequestsByRequestIdCancelResponse = + PutFalAiEmu35ImageEditImageRequestsByRequestIdCancelResponses[keyof PutFalAiEmu35ImageEditImageRequestsByRequestIdCancelResponses] + +export type PostFalAiEmu35ImageEditImageData = { + body: SchemaEmu35ImageEditImageInput + path?: never + query?: never + url: '/fal-ai/emu-3.5-image/edit-image' +} + +export type PostFalAiEmu35ImageEditImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiEmu35ImageEditImageResponse = + PostFalAiEmu35ImageEditImageResponses[keyof PostFalAiEmu35ImageEditImageResponses] + +export type GetFalAiEmu35ImageEditImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/emu-3.5-image/edit-image/requests/{request_id}' +} + +export type GetFalAiEmu35ImageEditImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaEmu35ImageEditImageOutput +} + +export type GetFalAiEmu35ImageEditImageRequestsByRequestIdResponse = + GetFalAiEmu35ImageEditImageRequestsByRequestIdResponses[keyof GetFalAiEmu35ImageEditImageRequestsByRequestIdResponses] + +export type GetFalAiChronoEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/chrono-edit/requests/{request_id}/status' +} + +export type GetFalAiChronoEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiChronoEditRequestsByRequestIdStatusResponse = + GetFalAiChronoEditRequestsByRequestIdStatusResponses[keyof GetFalAiChronoEditRequestsByRequestIdStatusResponses] + +export type PutFalAiChronoEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/chrono-edit/requests/{request_id}/cancel' +} + +export type PutFalAiChronoEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiChronoEditRequestsByRequestIdCancelResponse = + PutFalAiChronoEditRequestsByRequestIdCancelResponses[keyof PutFalAiChronoEditRequestsByRequestIdCancelResponses] + +export type PostFalAiChronoEditData = { + body: SchemaChronoEditInput + path?: never + query?: never + url: '/fal-ai/chrono-edit' +} + +export type PostFalAiChronoEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiChronoEditResponse = + PostFalAiChronoEditResponses[keyof PostFalAiChronoEditResponses] + +export type GetFalAiChronoEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/chrono-edit/requests/{request_id}' +} + +export type GetFalAiChronoEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaChronoEditOutput +} + +export type GetFalAiChronoEditRequestsByRequestIdResponse = + GetFalAiChronoEditRequestsByRequestIdResponses[keyof GetFalAiChronoEditRequestsByRequestIdResponses] + +export type GetFalAiGptImage1MiniEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/gpt-image-1-mini/edit/requests/{request_id}/status' +} + +export type GetFalAiGptImage1MiniEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiGptImage1MiniEditRequestsByRequestIdStatusResponse = + GetFalAiGptImage1MiniEditRequestsByRequestIdStatusResponses[keyof GetFalAiGptImage1MiniEditRequestsByRequestIdStatusResponses] + +export type PutFalAiGptImage1MiniEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gpt-image-1-mini/edit/requests/{request_id}/cancel' +} + +export type PutFalAiGptImage1MiniEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiGptImage1MiniEditRequestsByRequestIdCancelResponse = + PutFalAiGptImage1MiniEditRequestsByRequestIdCancelResponses[keyof PutFalAiGptImage1MiniEditRequestsByRequestIdCancelResponses] + +export type PostFalAiGptImage1MiniEditData = { + body: SchemaGptImage1MiniEditInput + path?: never + query?: never + url: '/fal-ai/gpt-image-1-mini/edit' +} + +export type PostFalAiGptImage1MiniEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiGptImage1MiniEditResponse = + PostFalAiGptImage1MiniEditResponses[keyof PostFalAiGptImage1MiniEditResponses] + +export type GetFalAiGptImage1MiniEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gpt-image-1-mini/edit/requests/{request_id}' +} + +export type GetFalAiGptImage1MiniEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaGptImage1MiniEditOutput +} + +export type GetFalAiGptImage1MiniEditRequestsByRequestIdResponse = + GetFalAiGptImage1MiniEditRequestsByRequestIdResponses[keyof GetFalAiGptImage1MiniEditRequestsByRequestIdResponses] + +export type GetFalAiReveRemixRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/reve/remix/requests/{request_id}/status' +} + +export type GetFalAiReveRemixRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiReveRemixRequestsByRequestIdStatusResponse = + GetFalAiReveRemixRequestsByRequestIdStatusResponses[keyof GetFalAiReveRemixRequestsByRequestIdStatusResponses] + +export type PutFalAiReveRemixRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/reve/remix/requests/{request_id}/cancel' +} + +export type PutFalAiReveRemixRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiReveRemixRequestsByRequestIdCancelResponse = + PutFalAiReveRemixRequestsByRequestIdCancelResponses[keyof PutFalAiReveRemixRequestsByRequestIdCancelResponses] + +export type PostFalAiReveRemixData = { + body: SchemaReveRemixInput + path?: never + query?: never + url: '/fal-ai/reve/remix' +} + +export type PostFalAiReveRemixResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiReveRemixResponse = + PostFalAiReveRemixResponses[keyof PostFalAiReveRemixResponses] + +export type GetFalAiReveRemixRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/reve/remix/requests/{request_id}' +} + +export type GetFalAiReveRemixRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaReveRemixOutput +} + +export type GetFalAiReveRemixRequestsByRequestIdResponse = + GetFalAiReveRemixRequestsByRequestIdResponses[keyof GetFalAiReveRemixRequestsByRequestIdResponses] + +export type GetFalAiReveEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/reve/edit/requests/{request_id}/status' +} + +export type GetFalAiReveEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiReveEditRequestsByRequestIdStatusResponse = + GetFalAiReveEditRequestsByRequestIdStatusResponses[keyof GetFalAiReveEditRequestsByRequestIdStatusResponses] + +export type PutFalAiReveEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/reve/edit/requests/{request_id}/cancel' +} + +export type PutFalAiReveEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiReveEditRequestsByRequestIdCancelResponse = + PutFalAiReveEditRequestsByRequestIdCancelResponses[keyof PutFalAiReveEditRequestsByRequestIdCancelResponses] + +export type PostFalAiReveEditData = { + body: SchemaReveEditInput + path?: never + query?: never + url: '/fal-ai/reve/edit' +} + +export type PostFalAiReveEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiReveEditResponse = + PostFalAiReveEditResponses[keyof PostFalAiReveEditResponses] + +export type GetFalAiReveEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/reve/edit/requests/{request_id}' +} + +export type GetFalAiReveEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaReveEditOutput +} + +export type GetFalAiReveEditRequestsByRequestIdResponse = + GetFalAiReveEditRequestsByRequestIdResponses[keyof GetFalAiReveEditRequestsByRequestIdResponses] + +export type GetFalAiImage2PixelRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image2pixel/requests/{request_id}/status' +} + +export type GetFalAiImage2PixelRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImage2PixelRequestsByRequestIdStatusResponse = + GetFalAiImage2PixelRequestsByRequestIdStatusResponses[keyof GetFalAiImage2PixelRequestsByRequestIdStatusResponses] + +export type PutFalAiImage2PixelRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image2pixel/requests/{request_id}/cancel' +} + +export type PutFalAiImage2PixelRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImage2PixelRequestsByRequestIdCancelResponse = + PutFalAiImage2PixelRequestsByRequestIdCancelResponses[keyof PutFalAiImage2PixelRequestsByRequestIdCancelResponses] + +export type PostFalAiImage2PixelData = { + body: SchemaImage2PixelInput + path?: never + query?: never + url: '/fal-ai/image2pixel' +} + +export type PostFalAiImage2PixelResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImage2PixelResponse = + PostFalAiImage2PixelResponses[keyof PostFalAiImage2PixelResponses] + +export type GetFalAiImage2PixelRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image2pixel/requests/{request_id}' +} + +export type GetFalAiImage2PixelRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImage2PixelOutput +} + +export type GetFalAiImage2PixelRequestsByRequestIdResponse = + GetFalAiImage2PixelRequestsByRequestIdResponses[keyof GetFalAiImage2PixelRequestsByRequestIdResponses] + +export type GetFalAiDreamomni2EditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/dreamomni2/edit/requests/{request_id}/status' +} + +export type GetFalAiDreamomni2EditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiDreamomni2EditRequestsByRequestIdStatusResponse = + GetFalAiDreamomni2EditRequestsByRequestIdStatusResponses[keyof GetFalAiDreamomni2EditRequestsByRequestIdStatusResponses] + +export type PutFalAiDreamomni2EditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/dreamomni2/edit/requests/{request_id}/cancel' +} + +export type PutFalAiDreamomni2EditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiDreamomni2EditRequestsByRequestIdCancelResponse = + PutFalAiDreamomni2EditRequestsByRequestIdCancelResponses[keyof PutFalAiDreamomni2EditRequestsByRequestIdCancelResponses] + +export type PostFalAiDreamomni2EditData = { + body: SchemaDreamomni2EditInput + path?: never + query?: never + url: '/fal-ai/dreamomni2/edit' +} + +export type PostFalAiDreamomni2EditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiDreamomni2EditResponse = + PostFalAiDreamomni2EditResponses[keyof PostFalAiDreamomni2EditResponses] + +export type GetFalAiDreamomni2EditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/dreamomni2/edit/requests/{request_id}' +} + +export type GetFalAiDreamomni2EditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaDreamomni2EditOutput +} + +export type GetFalAiDreamomni2EditRequestsByRequestIdResponse = + GetFalAiDreamomni2EditRequestsByRequestIdResponses[keyof GetFalAiDreamomni2EditRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEditPlusLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-plus-lora/requests/{request_id}/status' +} + +export type GetFalAiQwenImageEditPlusLoraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiQwenImageEditPlusLoraRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEditPlusLoraRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEditPlusLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEditPlusLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImageEditPlusLoraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiQwenImageEditPlusLoraRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEditPlusLoraRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEditPlusLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEditPlusLoraData = { + body: SchemaQwenImageEditPlusLoraInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora' +} + +export type PostFalAiQwenImageEditPlusLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEditPlusLoraResponse = + PostFalAiQwenImageEditPlusLoraResponses[keyof PostFalAiQwenImageEditPlusLoraResponses] + +export type GetFalAiQwenImageEditPlusLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-lora/requests/{request_id}' +} + +export type GetFalAiQwenImageEditPlusLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImageEditPlusLoraOutput +} + +export type GetFalAiQwenImageEditPlusLoraRequestsByRequestIdResponse = + GetFalAiQwenImageEditPlusLoraRequestsByRequestIdResponses[keyof GetFalAiQwenImageEditPlusLoraRequestsByRequestIdResponses] + +export type GetFalAiLucidfluxRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/lucidflux/requests/{request_id}/status' +} + +export type GetFalAiLucidfluxRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLucidfluxRequestsByRequestIdStatusResponse = + GetFalAiLucidfluxRequestsByRequestIdStatusResponses[keyof GetFalAiLucidfluxRequestsByRequestIdStatusResponses] + +export type PutFalAiLucidfluxRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/lucidflux/requests/{request_id}/cancel' +} + +export type PutFalAiLucidfluxRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLucidfluxRequestsByRequestIdCancelResponse = + PutFalAiLucidfluxRequestsByRequestIdCancelResponses[keyof PutFalAiLucidfluxRequestsByRequestIdCancelResponses] + +export type PostFalAiLucidfluxData = { + body: SchemaLucidfluxInput + path?: never + query?: never + url: '/fal-ai/lucidflux' +} + +export type PostFalAiLucidfluxResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLucidfluxResponse = + PostFalAiLucidfluxResponses[keyof PostFalAiLucidfluxResponses] + +export type GetFalAiLucidfluxRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/lucidflux/requests/{request_id}' +} + +export type GetFalAiLucidfluxRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLucidfluxOutput +} + +export type GetFalAiLucidfluxRequestsByRequestIdResponse = + GetFalAiLucidfluxRequestsByRequestIdResponses[keyof GetFalAiLucidfluxRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEditImageToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit/image-to-image/requests/{request_id}/status' +} + +export type GetFalAiQwenImageEditImageToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEditImageToImageRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEditImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEditImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEditImageToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit/image-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImageEditImageToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEditImageToImageRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEditImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEditImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEditImageToImageData = { + body: SchemaQwenImageEditImageToImageInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit/image-to-image' +} + +export type PostFalAiQwenImageEditImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEditImageToImageResponse = + PostFalAiQwenImageEditImageToImageResponses[keyof PostFalAiQwenImageEditImageToImageResponses] + +export type GetFalAiQwenImageEditImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit/image-to-image/requests/{request_id}' +} + +export type GetFalAiQwenImageEditImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImageEditImageToImageOutput +} + +export type GetFalAiQwenImageEditImageToImageRequestsByRequestIdResponse = + GetFalAiQwenImageEditImageToImageRequestsByRequestIdResponses[keyof GetFalAiQwenImageEditImageToImageRequestsByRequestIdResponses] + +export type GetFalAiWan25PreviewImageToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-25-preview/image-to-image/requests/{request_id}/status' +} + +export type GetFalAiWan25PreviewImageToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiWan25PreviewImageToImageRequestsByRequestIdStatusResponse = + GetFalAiWan25PreviewImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiWan25PreviewImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiWan25PreviewImageToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-25-preview/image-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiWan25PreviewImageToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiWan25PreviewImageToImageRequestsByRequestIdCancelResponse = + PutFalAiWan25PreviewImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiWan25PreviewImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiWan25PreviewImageToImageData = { + body: SchemaWan25PreviewImageToImageInput + path?: never + query?: never + url: '/fal-ai/wan-25-preview/image-to-image' +} + +export type PostFalAiWan25PreviewImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWan25PreviewImageToImageResponse = + PostFalAiWan25PreviewImageToImageResponses[keyof PostFalAiWan25PreviewImageToImageResponses] + +export type GetFalAiWan25PreviewImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-25-preview/image-to-image/requests/{request_id}' +} + +export type GetFalAiWan25PreviewImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWan25PreviewImageToImageOutput +} + +export type GetFalAiWan25PreviewImageToImageRequestsByRequestIdResponse = + GetFalAiWan25PreviewImageToImageRequestsByRequestIdResponses[keyof GetFalAiWan25PreviewImageToImageRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEditPlusRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-plus/requests/{request_id}/status' +} + +export type GetFalAiQwenImageEditPlusRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiQwenImageEditPlusRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEditPlusRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEditPlusRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEditPlusRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImageEditPlusRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiQwenImageEditPlusRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEditPlusRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEditPlusRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEditPlusData = { + body: SchemaQwenImageEditPlusInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-plus' +} + +export type PostFalAiQwenImageEditPlusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEditPlusResponse = + PostFalAiQwenImageEditPlusResponses[keyof PostFalAiQwenImageEditPlusResponses] + +export type GetFalAiQwenImageEditPlusRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus/requests/{request_id}' +} + +export type GetFalAiQwenImageEditPlusRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImageEditPlusOutput +} + +export type GetFalAiQwenImageEditPlusRequestsByRequestIdResponse = + GetFalAiQwenImageEditPlusRequestsByRequestIdResponses[keyof GetFalAiQwenImageEditPlusRequestsByRequestIdResponses] + +export type GetFalAiSeedvrUpscaleImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/seedvr/upscale/image/requests/{request_id}/status' +} + +export type GetFalAiSeedvrUpscaleImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSeedvrUpscaleImageRequestsByRequestIdStatusResponse = + GetFalAiSeedvrUpscaleImageRequestsByRequestIdStatusResponses[keyof GetFalAiSeedvrUpscaleImageRequestsByRequestIdStatusResponses] + +export type PutFalAiSeedvrUpscaleImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/seedvr/upscale/image/requests/{request_id}/cancel' +} + +export type PutFalAiSeedvrUpscaleImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSeedvrUpscaleImageRequestsByRequestIdCancelResponse = + PutFalAiSeedvrUpscaleImageRequestsByRequestIdCancelResponses[keyof PutFalAiSeedvrUpscaleImageRequestsByRequestIdCancelResponses] + +export type PostFalAiSeedvrUpscaleImageData = { + body: SchemaSeedvrUpscaleImageInput + path?: never + query?: never + url: '/fal-ai/seedvr/upscale/image' +} + +export type PostFalAiSeedvrUpscaleImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSeedvrUpscaleImageResponse = + PostFalAiSeedvrUpscaleImageResponses[keyof PostFalAiSeedvrUpscaleImageResponses] + +export type GetFalAiSeedvrUpscaleImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/seedvr/upscale/image/requests/{request_id}' +} + +export type GetFalAiSeedvrUpscaleImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSeedvrUpscaleImageOutput +} + +export type GetFalAiSeedvrUpscaleImageRequestsByRequestIdResponse = + GetFalAiSeedvrUpscaleImageRequestsByRequestIdResponses[keyof GetFalAiSeedvrUpscaleImageRequestsByRequestIdResponses] + +export type GetFalAiImageAppsV2ProductHoldingRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-apps-v2/product-holding/requests/{request_id}/status' +} + +export type GetFalAiImageAppsV2ProductHoldingRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageAppsV2ProductHoldingRequestsByRequestIdStatusResponse = + GetFalAiImageAppsV2ProductHoldingRequestsByRequestIdStatusResponses[keyof GetFalAiImageAppsV2ProductHoldingRequestsByRequestIdStatusResponses] + +export type PutFalAiImageAppsV2ProductHoldingRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/product-holding/requests/{request_id}/cancel' +} + +export type PutFalAiImageAppsV2ProductHoldingRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageAppsV2ProductHoldingRequestsByRequestIdCancelResponse = + PutFalAiImageAppsV2ProductHoldingRequestsByRequestIdCancelResponses[keyof PutFalAiImageAppsV2ProductHoldingRequestsByRequestIdCancelResponses] + +export type PostFalAiImageAppsV2ProductHoldingData = { + body: SchemaImageAppsV2ProductHoldingInput + path?: never + query?: never + url: '/fal-ai/image-apps-v2/product-holding' +} + +export type PostFalAiImageAppsV2ProductHoldingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageAppsV2ProductHoldingResponse = + PostFalAiImageAppsV2ProductHoldingResponses[keyof PostFalAiImageAppsV2ProductHoldingResponses] + +export type GetFalAiImageAppsV2ProductHoldingRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/product-holding/requests/{request_id}' +} + +export type GetFalAiImageAppsV2ProductHoldingRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageAppsV2ProductHoldingOutput +} + +export type GetFalAiImageAppsV2ProductHoldingRequestsByRequestIdResponse = + GetFalAiImageAppsV2ProductHoldingRequestsByRequestIdResponses[keyof GetFalAiImageAppsV2ProductHoldingRequestsByRequestIdResponses] + +export type GetFalAiImageAppsV2ProductPhotographyRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-apps-v2/product-photography/requests/{request_id}/status' + } + +export type GetFalAiImageAppsV2ProductPhotographyRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageAppsV2ProductPhotographyRequestsByRequestIdStatusResponse = + GetFalAiImageAppsV2ProductPhotographyRequestsByRequestIdStatusResponses[keyof GetFalAiImageAppsV2ProductPhotographyRequestsByRequestIdStatusResponses] + +export type PutFalAiImageAppsV2ProductPhotographyRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/product-photography/requests/{request_id}/cancel' + } + +export type PutFalAiImageAppsV2ProductPhotographyRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageAppsV2ProductPhotographyRequestsByRequestIdCancelResponse = + PutFalAiImageAppsV2ProductPhotographyRequestsByRequestIdCancelResponses[keyof PutFalAiImageAppsV2ProductPhotographyRequestsByRequestIdCancelResponses] + +export type PostFalAiImageAppsV2ProductPhotographyData = { + body: SchemaImageAppsV2ProductPhotographyInput + path?: never + query?: never + url: '/fal-ai/image-apps-v2/product-photography' +} + +export type PostFalAiImageAppsV2ProductPhotographyResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageAppsV2ProductPhotographyResponse = + PostFalAiImageAppsV2ProductPhotographyResponses[keyof PostFalAiImageAppsV2ProductPhotographyResponses] + +export type GetFalAiImageAppsV2ProductPhotographyRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/product-photography/requests/{request_id}' +} + +export type GetFalAiImageAppsV2ProductPhotographyRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaImageAppsV2ProductPhotographyOutput + } + +export type GetFalAiImageAppsV2ProductPhotographyRequestsByRequestIdResponse = + GetFalAiImageAppsV2ProductPhotographyRequestsByRequestIdResponses[keyof GetFalAiImageAppsV2ProductPhotographyRequestsByRequestIdResponses] + +export type GetFalAiImageAppsV2VirtualTryOnRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-apps-v2/virtual-try-on/requests/{request_id}/status' +} + +export type GetFalAiImageAppsV2VirtualTryOnRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageAppsV2VirtualTryOnRequestsByRequestIdStatusResponse = + GetFalAiImageAppsV2VirtualTryOnRequestsByRequestIdStatusResponses[keyof GetFalAiImageAppsV2VirtualTryOnRequestsByRequestIdStatusResponses] + +export type PutFalAiImageAppsV2VirtualTryOnRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/virtual-try-on/requests/{request_id}/cancel' +} + +export type PutFalAiImageAppsV2VirtualTryOnRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageAppsV2VirtualTryOnRequestsByRequestIdCancelResponse = + PutFalAiImageAppsV2VirtualTryOnRequestsByRequestIdCancelResponses[keyof PutFalAiImageAppsV2VirtualTryOnRequestsByRequestIdCancelResponses] + +export type PostFalAiImageAppsV2VirtualTryOnData = { + body: SchemaImageAppsV2VirtualTryOnInput + path?: never + query?: never + url: '/fal-ai/image-apps-v2/virtual-try-on' +} + +export type PostFalAiImageAppsV2VirtualTryOnResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageAppsV2VirtualTryOnResponse = + PostFalAiImageAppsV2VirtualTryOnResponses[keyof PostFalAiImageAppsV2VirtualTryOnResponses] + +export type GetFalAiImageAppsV2VirtualTryOnRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/virtual-try-on/requests/{request_id}' +} + +export type GetFalAiImageAppsV2VirtualTryOnRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageAppsV2VirtualTryOnOutput +} + +export type GetFalAiImageAppsV2VirtualTryOnRequestsByRequestIdResponse = + GetFalAiImageAppsV2VirtualTryOnRequestsByRequestIdResponses[keyof GetFalAiImageAppsV2VirtualTryOnRequestsByRequestIdResponses] + +export type GetFalAiImageAppsV2TextureTransformRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-apps-v2/texture-transform/requests/{request_id}/status' +} + +export type GetFalAiImageAppsV2TextureTransformRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageAppsV2TextureTransformRequestsByRequestIdStatusResponse = + GetFalAiImageAppsV2TextureTransformRequestsByRequestIdStatusResponses[keyof GetFalAiImageAppsV2TextureTransformRequestsByRequestIdStatusResponses] + +export type PutFalAiImageAppsV2TextureTransformRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/texture-transform/requests/{request_id}/cancel' +} + +export type PutFalAiImageAppsV2TextureTransformRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageAppsV2TextureTransformRequestsByRequestIdCancelResponse = + PutFalAiImageAppsV2TextureTransformRequestsByRequestIdCancelResponses[keyof PutFalAiImageAppsV2TextureTransformRequestsByRequestIdCancelResponses] + +export type PostFalAiImageAppsV2TextureTransformData = { + body: SchemaImageAppsV2TextureTransformInput + path?: never + query?: never + url: '/fal-ai/image-apps-v2/texture-transform' +} + +export type PostFalAiImageAppsV2TextureTransformResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageAppsV2TextureTransformResponse = + PostFalAiImageAppsV2TextureTransformResponses[keyof PostFalAiImageAppsV2TextureTransformResponses] + +export type GetFalAiImageAppsV2TextureTransformRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/texture-transform/requests/{request_id}' +} + +export type GetFalAiImageAppsV2TextureTransformRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageAppsV2TextureTransformOutput +} + +export type GetFalAiImageAppsV2TextureTransformRequestsByRequestIdResponse = + GetFalAiImageAppsV2TextureTransformRequestsByRequestIdResponses[keyof GetFalAiImageAppsV2TextureTransformRequestsByRequestIdResponses] + +export type GetFalAiImageAppsV2RelightingRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-apps-v2/relighting/requests/{request_id}/status' +} + +export type GetFalAiImageAppsV2RelightingRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImageAppsV2RelightingRequestsByRequestIdStatusResponse = + GetFalAiImageAppsV2RelightingRequestsByRequestIdStatusResponses[keyof GetFalAiImageAppsV2RelightingRequestsByRequestIdStatusResponses] + +export type PutFalAiImageAppsV2RelightingRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/relighting/requests/{request_id}/cancel' +} + +export type PutFalAiImageAppsV2RelightingRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImageAppsV2RelightingRequestsByRequestIdCancelResponse = + PutFalAiImageAppsV2RelightingRequestsByRequestIdCancelResponses[keyof PutFalAiImageAppsV2RelightingRequestsByRequestIdCancelResponses] + +export type PostFalAiImageAppsV2RelightingData = { + body: SchemaImageAppsV2RelightingInput + path?: never + query?: never + url: '/fal-ai/image-apps-v2/relighting' +} + +export type PostFalAiImageAppsV2RelightingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageAppsV2RelightingResponse = + PostFalAiImageAppsV2RelightingResponses[keyof PostFalAiImageAppsV2RelightingResponses] + +export type GetFalAiImageAppsV2RelightingRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/relighting/requests/{request_id}' +} + +export type GetFalAiImageAppsV2RelightingRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageAppsV2RelightingOutput +} + +export type GetFalAiImageAppsV2RelightingRequestsByRequestIdResponse = + GetFalAiImageAppsV2RelightingRequestsByRequestIdResponses[keyof GetFalAiImageAppsV2RelightingRequestsByRequestIdResponses] + +export type GetFalAiImageAppsV2StyleTransferRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-apps-v2/style-transfer/requests/{request_id}/status' +} + +export type GetFalAiImageAppsV2StyleTransferRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageAppsV2StyleTransferRequestsByRequestIdStatusResponse = + GetFalAiImageAppsV2StyleTransferRequestsByRequestIdStatusResponses[keyof GetFalAiImageAppsV2StyleTransferRequestsByRequestIdStatusResponses] + +export type PutFalAiImageAppsV2StyleTransferRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/style-transfer/requests/{request_id}/cancel' +} + +export type PutFalAiImageAppsV2StyleTransferRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageAppsV2StyleTransferRequestsByRequestIdCancelResponse = + PutFalAiImageAppsV2StyleTransferRequestsByRequestIdCancelResponses[keyof PutFalAiImageAppsV2StyleTransferRequestsByRequestIdCancelResponses] + +export type PostFalAiImageAppsV2StyleTransferData = { + body: SchemaImageAppsV2StyleTransferInput + path?: never + query?: never + url: '/fal-ai/image-apps-v2/style-transfer' +} + +export type PostFalAiImageAppsV2StyleTransferResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageAppsV2StyleTransferResponse = + PostFalAiImageAppsV2StyleTransferResponses[keyof PostFalAiImageAppsV2StyleTransferResponses] + +export type GetFalAiImageAppsV2StyleTransferRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/style-transfer/requests/{request_id}' +} + +export type GetFalAiImageAppsV2StyleTransferRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageAppsV2StyleTransferOutput +} + +export type GetFalAiImageAppsV2StyleTransferRequestsByRequestIdResponse = + GetFalAiImageAppsV2StyleTransferRequestsByRequestIdResponses[keyof GetFalAiImageAppsV2StyleTransferRequestsByRequestIdResponses] + +export type GetFalAiImageAppsV2PhotoRestorationRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-apps-v2/photo-restoration/requests/{request_id}/status' +} + +export type GetFalAiImageAppsV2PhotoRestorationRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageAppsV2PhotoRestorationRequestsByRequestIdStatusResponse = + GetFalAiImageAppsV2PhotoRestorationRequestsByRequestIdStatusResponses[keyof GetFalAiImageAppsV2PhotoRestorationRequestsByRequestIdStatusResponses] + +export type PutFalAiImageAppsV2PhotoRestorationRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/photo-restoration/requests/{request_id}/cancel' +} + +export type PutFalAiImageAppsV2PhotoRestorationRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageAppsV2PhotoRestorationRequestsByRequestIdCancelResponse = + PutFalAiImageAppsV2PhotoRestorationRequestsByRequestIdCancelResponses[keyof PutFalAiImageAppsV2PhotoRestorationRequestsByRequestIdCancelResponses] + +export type PostFalAiImageAppsV2PhotoRestorationData = { + body: SchemaImageAppsV2PhotoRestorationInput + path?: never + query?: never + url: '/fal-ai/image-apps-v2/photo-restoration' +} + +export type PostFalAiImageAppsV2PhotoRestorationResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageAppsV2PhotoRestorationResponse = + PostFalAiImageAppsV2PhotoRestorationResponses[keyof PostFalAiImageAppsV2PhotoRestorationResponses] + +export type GetFalAiImageAppsV2PhotoRestorationRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/photo-restoration/requests/{request_id}' +} + +export type GetFalAiImageAppsV2PhotoRestorationRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageAppsV2PhotoRestorationOutput +} + +export type GetFalAiImageAppsV2PhotoRestorationRequestsByRequestIdResponse = + GetFalAiImageAppsV2PhotoRestorationRequestsByRequestIdResponses[keyof GetFalAiImageAppsV2PhotoRestorationRequestsByRequestIdResponses] + +export type GetFalAiImageAppsV2PortraitEnhanceRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-apps-v2/portrait-enhance/requests/{request_id}/status' +} + +export type GetFalAiImageAppsV2PortraitEnhanceRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageAppsV2PortraitEnhanceRequestsByRequestIdStatusResponse = + GetFalAiImageAppsV2PortraitEnhanceRequestsByRequestIdStatusResponses[keyof GetFalAiImageAppsV2PortraitEnhanceRequestsByRequestIdStatusResponses] + +export type PutFalAiImageAppsV2PortraitEnhanceRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/portrait-enhance/requests/{request_id}/cancel' +} + +export type PutFalAiImageAppsV2PortraitEnhanceRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageAppsV2PortraitEnhanceRequestsByRequestIdCancelResponse = + PutFalAiImageAppsV2PortraitEnhanceRequestsByRequestIdCancelResponses[keyof PutFalAiImageAppsV2PortraitEnhanceRequestsByRequestIdCancelResponses] + +export type PostFalAiImageAppsV2PortraitEnhanceData = { + body: SchemaImageAppsV2PortraitEnhanceInput + path?: never + query?: never + url: '/fal-ai/image-apps-v2/portrait-enhance' +} + +export type PostFalAiImageAppsV2PortraitEnhanceResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageAppsV2PortraitEnhanceResponse = + PostFalAiImageAppsV2PortraitEnhanceResponses[keyof PostFalAiImageAppsV2PortraitEnhanceResponses] + +export type GetFalAiImageAppsV2PortraitEnhanceRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/portrait-enhance/requests/{request_id}' +} + +export type GetFalAiImageAppsV2PortraitEnhanceRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageAppsV2PortraitEnhanceOutput +} + +export type GetFalAiImageAppsV2PortraitEnhanceRequestsByRequestIdResponse = + GetFalAiImageAppsV2PortraitEnhanceRequestsByRequestIdResponses[keyof GetFalAiImageAppsV2PortraitEnhanceRequestsByRequestIdResponses] + +export type GetFalAiImageAppsV2PhotographyEffectsRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-apps-v2/photography-effects/requests/{request_id}/status' + } + +export type GetFalAiImageAppsV2PhotographyEffectsRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageAppsV2PhotographyEffectsRequestsByRequestIdStatusResponse = + GetFalAiImageAppsV2PhotographyEffectsRequestsByRequestIdStatusResponses[keyof GetFalAiImageAppsV2PhotographyEffectsRequestsByRequestIdStatusResponses] + +export type PutFalAiImageAppsV2PhotographyEffectsRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/photography-effects/requests/{request_id}/cancel' + } + +export type PutFalAiImageAppsV2PhotographyEffectsRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageAppsV2PhotographyEffectsRequestsByRequestIdCancelResponse = + PutFalAiImageAppsV2PhotographyEffectsRequestsByRequestIdCancelResponses[keyof PutFalAiImageAppsV2PhotographyEffectsRequestsByRequestIdCancelResponses] + +export type PostFalAiImageAppsV2PhotographyEffectsData = { + body: SchemaImageAppsV2PhotographyEffectsInput + path?: never + query?: never + url: '/fal-ai/image-apps-v2/photography-effects' +} + +export type PostFalAiImageAppsV2PhotographyEffectsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageAppsV2PhotographyEffectsResponse = + PostFalAiImageAppsV2PhotographyEffectsResponses[keyof PostFalAiImageAppsV2PhotographyEffectsResponses] + +export type GetFalAiImageAppsV2PhotographyEffectsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/photography-effects/requests/{request_id}' +} + +export type GetFalAiImageAppsV2PhotographyEffectsRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaImageAppsV2PhotographyEffectsOutput + } + +export type GetFalAiImageAppsV2PhotographyEffectsRequestsByRequestIdResponse = + GetFalAiImageAppsV2PhotographyEffectsRequestsByRequestIdResponses[keyof GetFalAiImageAppsV2PhotographyEffectsRequestsByRequestIdResponses] + +export type GetFalAiImageAppsV2PerspectiveRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-apps-v2/perspective/requests/{request_id}/status' +} + +export type GetFalAiImageAppsV2PerspectiveRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImageAppsV2PerspectiveRequestsByRequestIdStatusResponse = + GetFalAiImageAppsV2PerspectiveRequestsByRequestIdStatusResponses[keyof GetFalAiImageAppsV2PerspectiveRequestsByRequestIdStatusResponses] + +export type PutFalAiImageAppsV2PerspectiveRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/perspective/requests/{request_id}/cancel' +} + +export type PutFalAiImageAppsV2PerspectiveRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImageAppsV2PerspectiveRequestsByRequestIdCancelResponse = + PutFalAiImageAppsV2PerspectiveRequestsByRequestIdCancelResponses[keyof PutFalAiImageAppsV2PerspectiveRequestsByRequestIdCancelResponses] + +export type PostFalAiImageAppsV2PerspectiveData = { + body: SchemaImageAppsV2PerspectiveInput + path?: never + query?: never + url: '/fal-ai/image-apps-v2/perspective' +} + +export type PostFalAiImageAppsV2PerspectiveResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageAppsV2PerspectiveResponse = + PostFalAiImageAppsV2PerspectiveResponses[keyof PostFalAiImageAppsV2PerspectiveResponses] + +export type GetFalAiImageAppsV2PerspectiveRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/perspective/requests/{request_id}' +} + +export type GetFalAiImageAppsV2PerspectiveRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageAppsV2PerspectiveOutput +} + +export type GetFalAiImageAppsV2PerspectiveRequestsByRequestIdResponse = + GetFalAiImageAppsV2PerspectiveRequestsByRequestIdResponses[keyof GetFalAiImageAppsV2PerspectiveRequestsByRequestIdResponses] + +export type GetFalAiImageAppsV2ObjectRemovalRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-apps-v2/object-removal/requests/{request_id}/status' +} + +export type GetFalAiImageAppsV2ObjectRemovalRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageAppsV2ObjectRemovalRequestsByRequestIdStatusResponse = + GetFalAiImageAppsV2ObjectRemovalRequestsByRequestIdStatusResponses[keyof GetFalAiImageAppsV2ObjectRemovalRequestsByRequestIdStatusResponses] + +export type PutFalAiImageAppsV2ObjectRemovalRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/object-removal/requests/{request_id}/cancel' +} + +export type PutFalAiImageAppsV2ObjectRemovalRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageAppsV2ObjectRemovalRequestsByRequestIdCancelResponse = + PutFalAiImageAppsV2ObjectRemovalRequestsByRequestIdCancelResponses[keyof PutFalAiImageAppsV2ObjectRemovalRequestsByRequestIdCancelResponses] + +export type PostFalAiImageAppsV2ObjectRemovalData = { + body: SchemaImageAppsV2ObjectRemovalInput + path?: never + query?: never + url: '/fal-ai/image-apps-v2/object-removal' +} + +export type PostFalAiImageAppsV2ObjectRemovalResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageAppsV2ObjectRemovalResponse = + PostFalAiImageAppsV2ObjectRemovalResponses[keyof PostFalAiImageAppsV2ObjectRemovalResponses] + +export type GetFalAiImageAppsV2ObjectRemovalRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/object-removal/requests/{request_id}' +} + +export type GetFalAiImageAppsV2ObjectRemovalRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageAppsV2ObjectRemovalOutput +} + +export type GetFalAiImageAppsV2ObjectRemovalRequestsByRequestIdResponse = + GetFalAiImageAppsV2ObjectRemovalRequestsByRequestIdResponses[keyof GetFalAiImageAppsV2ObjectRemovalRequestsByRequestIdResponses] + +export type GetFalAiImageAppsV2HeadshotPhotoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-apps-v2/headshot-photo/requests/{request_id}/status' +} + +export type GetFalAiImageAppsV2HeadshotPhotoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageAppsV2HeadshotPhotoRequestsByRequestIdStatusResponse = + GetFalAiImageAppsV2HeadshotPhotoRequestsByRequestIdStatusResponses[keyof GetFalAiImageAppsV2HeadshotPhotoRequestsByRequestIdStatusResponses] + +export type PutFalAiImageAppsV2HeadshotPhotoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/headshot-photo/requests/{request_id}/cancel' +} + +export type PutFalAiImageAppsV2HeadshotPhotoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageAppsV2HeadshotPhotoRequestsByRequestIdCancelResponse = + PutFalAiImageAppsV2HeadshotPhotoRequestsByRequestIdCancelResponses[keyof PutFalAiImageAppsV2HeadshotPhotoRequestsByRequestIdCancelResponses] + +export type PostFalAiImageAppsV2HeadshotPhotoData = { + body: SchemaImageAppsV2HeadshotPhotoInput + path?: never + query?: never + url: '/fal-ai/image-apps-v2/headshot-photo' +} + +export type PostFalAiImageAppsV2HeadshotPhotoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageAppsV2HeadshotPhotoResponse = + PostFalAiImageAppsV2HeadshotPhotoResponses[keyof PostFalAiImageAppsV2HeadshotPhotoResponses] + +export type GetFalAiImageAppsV2HeadshotPhotoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/headshot-photo/requests/{request_id}' +} + +export type GetFalAiImageAppsV2HeadshotPhotoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageAppsV2HeadshotPhotoOutput +} + +export type GetFalAiImageAppsV2HeadshotPhotoRequestsByRequestIdResponse = + GetFalAiImageAppsV2HeadshotPhotoRequestsByRequestIdResponses[keyof GetFalAiImageAppsV2HeadshotPhotoRequestsByRequestIdResponses] + +export type GetFalAiImageAppsV2HairChangeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-apps-v2/hair-change/requests/{request_id}/status' +} + +export type GetFalAiImageAppsV2HairChangeRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImageAppsV2HairChangeRequestsByRequestIdStatusResponse = + GetFalAiImageAppsV2HairChangeRequestsByRequestIdStatusResponses[keyof GetFalAiImageAppsV2HairChangeRequestsByRequestIdStatusResponses] + +export type PutFalAiImageAppsV2HairChangeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/hair-change/requests/{request_id}/cancel' +} + +export type PutFalAiImageAppsV2HairChangeRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImageAppsV2HairChangeRequestsByRequestIdCancelResponse = + PutFalAiImageAppsV2HairChangeRequestsByRequestIdCancelResponses[keyof PutFalAiImageAppsV2HairChangeRequestsByRequestIdCancelResponses] + +export type PostFalAiImageAppsV2HairChangeData = { + body: SchemaImageAppsV2HairChangeInput + path?: never + query?: never + url: '/fal-ai/image-apps-v2/hair-change' +} + +export type PostFalAiImageAppsV2HairChangeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageAppsV2HairChangeResponse = + PostFalAiImageAppsV2HairChangeResponses[keyof PostFalAiImageAppsV2HairChangeResponses] + +export type GetFalAiImageAppsV2HairChangeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/hair-change/requests/{request_id}' +} + +export type GetFalAiImageAppsV2HairChangeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageAppsV2HairChangeOutput +} + +export type GetFalAiImageAppsV2HairChangeRequestsByRequestIdResponse = + GetFalAiImageAppsV2HairChangeRequestsByRequestIdResponses[keyof GetFalAiImageAppsV2HairChangeRequestsByRequestIdResponses] + +export type GetFalAiImageAppsV2ExpressionChangeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-apps-v2/expression-change/requests/{request_id}/status' +} + +export type GetFalAiImageAppsV2ExpressionChangeRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageAppsV2ExpressionChangeRequestsByRequestIdStatusResponse = + GetFalAiImageAppsV2ExpressionChangeRequestsByRequestIdStatusResponses[keyof GetFalAiImageAppsV2ExpressionChangeRequestsByRequestIdStatusResponses] + +export type PutFalAiImageAppsV2ExpressionChangeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/expression-change/requests/{request_id}/cancel' +} + +export type PutFalAiImageAppsV2ExpressionChangeRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageAppsV2ExpressionChangeRequestsByRequestIdCancelResponse = + PutFalAiImageAppsV2ExpressionChangeRequestsByRequestIdCancelResponses[keyof PutFalAiImageAppsV2ExpressionChangeRequestsByRequestIdCancelResponses] + +export type PostFalAiImageAppsV2ExpressionChangeData = { + body: SchemaImageAppsV2ExpressionChangeInput + path?: never + query?: never + url: '/fal-ai/image-apps-v2/expression-change' +} + +export type PostFalAiImageAppsV2ExpressionChangeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageAppsV2ExpressionChangeResponse = + PostFalAiImageAppsV2ExpressionChangeResponses[keyof PostFalAiImageAppsV2ExpressionChangeResponses] + +export type GetFalAiImageAppsV2ExpressionChangeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/expression-change/requests/{request_id}' +} + +export type GetFalAiImageAppsV2ExpressionChangeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageAppsV2ExpressionChangeOutput +} + +export type GetFalAiImageAppsV2ExpressionChangeRequestsByRequestIdResponse = + GetFalAiImageAppsV2ExpressionChangeRequestsByRequestIdResponses[keyof GetFalAiImageAppsV2ExpressionChangeRequestsByRequestIdResponses] + +export type GetFalAiImageAppsV2CityTeleportRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-apps-v2/city-teleport/requests/{request_id}/status' +} + +export type GetFalAiImageAppsV2CityTeleportRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageAppsV2CityTeleportRequestsByRequestIdStatusResponse = + GetFalAiImageAppsV2CityTeleportRequestsByRequestIdStatusResponses[keyof GetFalAiImageAppsV2CityTeleportRequestsByRequestIdStatusResponses] + +export type PutFalAiImageAppsV2CityTeleportRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/city-teleport/requests/{request_id}/cancel' +} + +export type PutFalAiImageAppsV2CityTeleportRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageAppsV2CityTeleportRequestsByRequestIdCancelResponse = + PutFalAiImageAppsV2CityTeleportRequestsByRequestIdCancelResponses[keyof PutFalAiImageAppsV2CityTeleportRequestsByRequestIdCancelResponses] + +export type PostFalAiImageAppsV2CityTeleportData = { + body: SchemaImageAppsV2CityTeleportInput + path?: never + query?: never + url: '/fal-ai/image-apps-v2/city-teleport' +} + +export type PostFalAiImageAppsV2CityTeleportResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageAppsV2CityTeleportResponse = + PostFalAiImageAppsV2CityTeleportResponses[keyof PostFalAiImageAppsV2CityTeleportResponses] + +export type GetFalAiImageAppsV2CityTeleportRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/city-teleport/requests/{request_id}' +} + +export type GetFalAiImageAppsV2CityTeleportRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageAppsV2CityTeleportOutput +} + +export type GetFalAiImageAppsV2CityTeleportRequestsByRequestIdResponse = + GetFalAiImageAppsV2CityTeleportRequestsByRequestIdResponses[keyof GetFalAiImageAppsV2CityTeleportRequestsByRequestIdResponses] + +export type GetFalAiImageAppsV2AgeModifyRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-apps-v2/age-modify/requests/{request_id}/status' +} + +export type GetFalAiImageAppsV2AgeModifyRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImageAppsV2AgeModifyRequestsByRequestIdStatusResponse = + GetFalAiImageAppsV2AgeModifyRequestsByRequestIdStatusResponses[keyof GetFalAiImageAppsV2AgeModifyRequestsByRequestIdStatusResponses] + +export type PutFalAiImageAppsV2AgeModifyRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/age-modify/requests/{request_id}/cancel' +} + +export type PutFalAiImageAppsV2AgeModifyRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImageAppsV2AgeModifyRequestsByRequestIdCancelResponse = + PutFalAiImageAppsV2AgeModifyRequestsByRequestIdCancelResponses[keyof PutFalAiImageAppsV2AgeModifyRequestsByRequestIdCancelResponses] + +export type PostFalAiImageAppsV2AgeModifyData = { + body: SchemaImageAppsV2AgeModifyInput + path?: never + query?: never + url: '/fal-ai/image-apps-v2/age-modify' +} + +export type PostFalAiImageAppsV2AgeModifyResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageAppsV2AgeModifyResponse = + PostFalAiImageAppsV2AgeModifyResponses[keyof PostFalAiImageAppsV2AgeModifyResponses] + +export type GetFalAiImageAppsV2AgeModifyRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/age-modify/requests/{request_id}' +} + +export type GetFalAiImageAppsV2AgeModifyRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageAppsV2AgeModifyOutput +} + +export type GetFalAiImageAppsV2AgeModifyRequestsByRequestIdResponse = + GetFalAiImageAppsV2AgeModifyRequestsByRequestIdResponses[keyof GetFalAiImageAppsV2AgeModifyRequestsByRequestIdResponses] + +export type GetFalAiImageAppsV2MakeupApplicationRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-apps-v2/makeup-application/requests/{request_id}/status' + } + +export type GetFalAiImageAppsV2MakeupApplicationRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageAppsV2MakeupApplicationRequestsByRequestIdStatusResponse = + GetFalAiImageAppsV2MakeupApplicationRequestsByRequestIdStatusResponses[keyof GetFalAiImageAppsV2MakeupApplicationRequestsByRequestIdStatusResponses] + +export type PutFalAiImageAppsV2MakeupApplicationRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/makeup-application/requests/{request_id}/cancel' + } + +export type PutFalAiImageAppsV2MakeupApplicationRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageAppsV2MakeupApplicationRequestsByRequestIdCancelResponse = + PutFalAiImageAppsV2MakeupApplicationRequestsByRequestIdCancelResponses[keyof PutFalAiImageAppsV2MakeupApplicationRequestsByRequestIdCancelResponses] + +export type PostFalAiImageAppsV2MakeupApplicationData = { + body: SchemaImageAppsV2MakeupApplicationInput + path?: never + query?: never + url: '/fal-ai/image-apps-v2/makeup-application' +} + +export type PostFalAiImageAppsV2MakeupApplicationResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageAppsV2MakeupApplicationResponse = + PostFalAiImageAppsV2MakeupApplicationResponses[keyof PostFalAiImageAppsV2MakeupApplicationResponses] + +export type GetFalAiImageAppsV2MakeupApplicationRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-apps-v2/makeup-application/requests/{request_id}' +} + +export type GetFalAiImageAppsV2MakeupApplicationRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageAppsV2MakeupApplicationOutput +} + +export type GetFalAiImageAppsV2MakeupApplicationRequestsByRequestIdResponse = + GetFalAiImageAppsV2MakeupApplicationRequestsByRequestIdResponses[keyof GetFalAiImageAppsV2MakeupApplicationRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEditInpaintRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit/inpaint/requests/{request_id}/status' +} + +export type GetFalAiQwenImageEditInpaintRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiQwenImageEditInpaintRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEditInpaintRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEditInpaintRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEditInpaintRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit/inpaint/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImageEditInpaintRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiQwenImageEditInpaintRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEditInpaintRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEditInpaintRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEditInpaintData = { + body: SchemaQwenImageEditInpaintInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit/inpaint' +} + +export type PostFalAiQwenImageEditInpaintResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEditInpaintResponse = + PostFalAiQwenImageEditInpaintResponses[keyof PostFalAiQwenImageEditInpaintResponses] + +export type GetFalAiQwenImageEditInpaintRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit/inpaint/requests/{request_id}' +} + +export type GetFalAiQwenImageEditInpaintRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImageEditInpaintOutput +} + +export type GetFalAiQwenImageEditInpaintRequestsByRequestIdResponse = + GetFalAiQwenImageEditInpaintRequestsByRequestIdResponses[keyof GetFalAiQwenImageEditInpaintRequestsByRequestIdResponses] + +export type GetFalAiFluxSrpoImageToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux/srpo/image-to-image/requests/{request_id}/status' +} + +export type GetFalAiFluxSrpoImageToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxSrpoImageToImageRequestsByRequestIdStatusResponse = + GetFalAiFluxSrpoImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiFluxSrpoImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxSrpoImageToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux/srpo/image-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiFluxSrpoImageToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxSrpoImageToImageRequestsByRequestIdCancelResponse = + PutFalAiFluxSrpoImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiFluxSrpoImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxSrpoImageToImageData = { + body: SchemaFluxSrpoImageToImageInput + path?: never + query?: never + url: '/fal-ai/flux/srpo/image-to-image' +} + +export type PostFalAiFluxSrpoImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxSrpoImageToImageResponse = + PostFalAiFluxSrpoImageToImageResponses[keyof PostFalAiFluxSrpoImageToImageResponses] + +export type GetFalAiFluxSrpoImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux/srpo/image-to-image/requests/{request_id}' +} + +export type GetFalAiFluxSrpoImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxSrpoImageToImageOutput +} + +export type GetFalAiFluxSrpoImageToImageRequestsByRequestIdResponse = + GetFalAiFluxSrpoImageToImageRequestsByRequestIdResponses[keyof GetFalAiFluxSrpoImageToImageRequestsByRequestIdResponses] + +export type GetFalAiFlux1SrpoImageToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-1/srpo/image-to-image/requests/{request_id}/status' +} + +export type GetFalAiFlux1SrpoImageToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux1SrpoImageToImageRequestsByRequestIdStatusResponse = + GetFalAiFlux1SrpoImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiFlux1SrpoImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux1SrpoImageToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-1/srpo/image-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiFlux1SrpoImageToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux1SrpoImageToImageRequestsByRequestIdCancelResponse = + PutFalAiFlux1SrpoImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiFlux1SrpoImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux1SrpoImageToImageData = { + body: SchemaFlux1SrpoImageToImageInput + path?: never + query?: never + url: '/fal-ai/flux-1/srpo/image-to-image' +} + +export type PostFalAiFlux1SrpoImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux1SrpoImageToImageResponse = + PostFalAiFlux1SrpoImageToImageResponses[keyof PostFalAiFlux1SrpoImageToImageResponses] + +export type GetFalAiFlux1SrpoImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-1/srpo/image-to-image/requests/{request_id}' +} + +export type GetFalAiFlux1SrpoImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux1SrpoImageToImageOutput +} + +export type GetFalAiFlux1SrpoImageToImageRequestsByRequestIdResponse = + GetFalAiFlux1SrpoImageToImageRequestsByRequestIdResponses[keyof GetFalAiFlux1SrpoImageToImageRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEditLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-lora/requests/{request_id}/status' +} + +export type GetFalAiQwenImageEditLoraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiQwenImageEditLoraRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEditLoraRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEditLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEditLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-lora/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImageEditLoraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiQwenImageEditLoraRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEditLoraRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEditLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEditLoraData = { + body: SchemaQwenImageEditLoraInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-lora' +} + +export type PostFalAiQwenImageEditLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEditLoraResponse = + PostFalAiQwenImageEditLoraResponses[keyof PostFalAiQwenImageEditLoraResponses] + +export type GetFalAiQwenImageEditLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-lora/requests/{request_id}' +} + +export type GetFalAiQwenImageEditLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImageEditLoraOutput +} + +export type GetFalAiQwenImageEditLoraRequestsByRequestIdResponse = + GetFalAiQwenImageEditLoraRequestsByRequestIdResponses[keyof GetFalAiQwenImageEditLoraRequestsByRequestIdResponses] + +export type GetFalAiViduReferenceToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/vidu/reference-to-image/requests/{request_id}/status' +} + +export type GetFalAiViduReferenceToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiViduReferenceToImageRequestsByRequestIdStatusResponse = + GetFalAiViduReferenceToImageRequestsByRequestIdStatusResponses[keyof GetFalAiViduReferenceToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiViduReferenceToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/reference-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiViduReferenceToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiViduReferenceToImageRequestsByRequestIdCancelResponse = + PutFalAiViduReferenceToImageRequestsByRequestIdCancelResponses[keyof PutFalAiViduReferenceToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiViduReferenceToImageData = { + body: SchemaViduReferenceToImageInput + path?: never + query?: never + url: '/fal-ai/vidu/reference-to-image' +} + +export type PostFalAiViduReferenceToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiViduReferenceToImageResponse = + PostFalAiViduReferenceToImageResponses[keyof PostFalAiViduReferenceToImageResponses] + +export type GetFalAiViduReferenceToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/reference-to-image/requests/{request_id}' +} + +export type GetFalAiViduReferenceToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaViduReferenceToImageOutput +} + +export type GetFalAiViduReferenceToImageRequestsByRequestIdResponse = + GetFalAiViduReferenceToImageRequestsByRequestIdResponses[keyof GetFalAiViduReferenceToImageRequestsByRequestIdResponses] + +export type GetFalAiBytedanceSeedreamV4EditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bytedance/seedream/v4/edit/requests/{request_id}/status' +} + +export type GetFalAiBytedanceSeedreamV4EditRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiBytedanceSeedreamV4EditRequestsByRequestIdStatusResponse = + GetFalAiBytedanceSeedreamV4EditRequestsByRequestIdStatusResponses[keyof GetFalAiBytedanceSeedreamV4EditRequestsByRequestIdStatusResponses] + +export type PutFalAiBytedanceSeedreamV4EditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedream/v4/edit/requests/{request_id}/cancel' +} + +export type PutFalAiBytedanceSeedreamV4EditRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiBytedanceSeedreamV4EditRequestsByRequestIdCancelResponse = + PutFalAiBytedanceSeedreamV4EditRequestsByRequestIdCancelResponses[keyof PutFalAiBytedanceSeedreamV4EditRequestsByRequestIdCancelResponses] + +export type PostFalAiBytedanceSeedreamV4EditData = { + body: SchemaBytedanceSeedreamV4EditInput + path?: never + query?: never + url: '/fal-ai/bytedance/seedream/v4/edit' +} + +export type PostFalAiBytedanceSeedreamV4EditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBytedanceSeedreamV4EditResponse = + PostFalAiBytedanceSeedreamV4EditResponses[keyof PostFalAiBytedanceSeedreamV4EditResponses] + +export type GetFalAiBytedanceSeedreamV4EditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedream/v4/edit/requests/{request_id}' +} + +export type GetFalAiBytedanceSeedreamV4EditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBytedanceSeedreamV4EditOutput +} + +export type GetFalAiBytedanceSeedreamV4EditRequestsByRequestIdResponse = + GetFalAiBytedanceSeedreamV4EditRequestsByRequestIdResponses[keyof GetFalAiBytedanceSeedreamV4EditRequestsByRequestIdResponses] + +export type GetFalAiWanV22A14bImageToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan/v2.2-a14b/image-to-image/requests/{request_id}/status' +} + +export type GetFalAiWanV22A14bImageToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanV22A14bImageToImageRequestsByRequestIdStatusResponse = + GetFalAiWanV22A14bImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiWanV22A14bImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiWanV22A14bImageToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-a14b/image-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiWanV22A14bImageToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanV22A14bImageToImageRequestsByRequestIdCancelResponse = + PutFalAiWanV22A14bImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiWanV22A14bImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiWanV22A14bImageToImageData = { + body: SchemaWanV22A14bImageToImageInput + path?: never + query?: never + url: '/fal-ai/wan/v2.2-a14b/image-to-image' +} + +export type PostFalAiWanV22A14bImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanV22A14bImageToImageResponse = + PostFalAiWanV22A14bImageToImageResponses[keyof PostFalAiWanV22A14bImageToImageResponses] + +export type GetFalAiWanV22A14bImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-a14b/image-to-image/requests/{request_id}' +} + +export type GetFalAiWanV22A14bImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanV22A14bImageToImageOutput +} + +export type GetFalAiWanV22A14bImageToImageRequestsByRequestIdResponse = + GetFalAiWanV22A14bImageToImageRequestsByRequestIdResponses[keyof GetFalAiWanV22A14bImageToImageRequestsByRequestIdResponses] + +export type GetFalAiUsoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/uso/requests/{request_id}/status' +} + +export type GetFalAiUsoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiUsoRequestsByRequestIdStatusResponse = + GetFalAiUsoRequestsByRequestIdStatusResponses[keyof GetFalAiUsoRequestsByRequestIdStatusResponses] + +export type PutFalAiUsoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/uso/requests/{request_id}/cancel' +} + +export type PutFalAiUsoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiUsoRequestsByRequestIdCancelResponse = + PutFalAiUsoRequestsByRequestIdCancelResponses[keyof PutFalAiUsoRequestsByRequestIdCancelResponses] + +export type PostFalAiUsoData = { + body: SchemaUsoInput + path?: never + query?: never + url: '/fal-ai/uso' +} + +export type PostFalAiUsoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiUsoResponse = + PostFalAiUsoResponses[keyof PostFalAiUsoResponses] + +export type GetFalAiUsoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/uso/requests/{request_id}' +} + +export type GetFalAiUsoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaUsoOutput +} + +export type GetFalAiUsoRequestsByRequestIdResponse = + GetFalAiUsoRequestsByRequestIdResponses[keyof GetFalAiUsoRequestsByRequestIdResponses] + +export type GetFalAiGemini25FlashImageEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/gemini-25-flash-image/edit/requests/{request_id}/status' +} + +export type GetFalAiGemini25FlashImageEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiGemini25FlashImageEditRequestsByRequestIdStatusResponse = + GetFalAiGemini25FlashImageEditRequestsByRequestIdStatusResponses[keyof GetFalAiGemini25FlashImageEditRequestsByRequestIdStatusResponses] + +export type PutFalAiGemini25FlashImageEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gemini-25-flash-image/edit/requests/{request_id}/cancel' +} + +export type PutFalAiGemini25FlashImageEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiGemini25FlashImageEditRequestsByRequestIdCancelResponse = + PutFalAiGemini25FlashImageEditRequestsByRequestIdCancelResponses[keyof PutFalAiGemini25FlashImageEditRequestsByRequestIdCancelResponses] + +export type PostFalAiGemini25FlashImageEditData = { + body: SchemaGemini25FlashImageEditInput + path?: never + query?: never + url: '/fal-ai/gemini-25-flash-image/edit' +} + +export type PostFalAiGemini25FlashImageEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiGemini25FlashImageEditResponse = + PostFalAiGemini25FlashImageEditResponses[keyof PostFalAiGemini25FlashImageEditResponses] + +export type GetFalAiGemini25FlashImageEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gemini-25-flash-image/edit/requests/{request_id}' +} + +export type GetFalAiGemini25FlashImageEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaGemini25FlashImageEditOutput +} + +export type GetFalAiGemini25FlashImageEditRequestsByRequestIdResponse = + GetFalAiGemini25FlashImageEditRequestsByRequestIdResponses[keyof GetFalAiGemini25FlashImageEditRequestsByRequestIdResponses] + +export type GetFalAiQwenImageImageToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image/image-to-image/requests/{request_id}/status' +} + +export type GetFalAiQwenImageImageToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiQwenImageImageToImageRequestsByRequestIdStatusResponse = + GetFalAiQwenImageImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageImageToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image/image-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImageImageToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiQwenImageImageToImageRequestsByRequestIdCancelResponse = + PutFalAiQwenImageImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageImageToImageData = { + body: SchemaQwenImageImageToImageInput + path?: never + query?: never + url: '/fal-ai/qwen-image/image-to-image' +} + +export type PostFalAiQwenImageImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageImageToImageResponse = + PostFalAiQwenImageImageToImageResponses[keyof PostFalAiQwenImageImageToImageResponses] + +export type GetFalAiQwenImageImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image/image-to-image/requests/{request_id}' +} + +export type GetFalAiQwenImageImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImageImageToImageOutput +} + +export type GetFalAiQwenImageImageToImageRequestsByRequestIdResponse = + GetFalAiQwenImageImageToImageRequestsByRequestIdResponses[keyof GetFalAiQwenImageImageToImageRequestsByRequestIdResponses] + +export type GetBriaReimagine32RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/reimagine/3.2/requests/{request_id}/status' +} + +export type GetBriaReimagine32RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetBriaReimagine32RequestsByRequestIdStatusResponse = + GetBriaReimagine32RequestsByRequestIdStatusResponses[keyof GetBriaReimagine32RequestsByRequestIdStatusResponses] + +export type PutBriaReimagine32RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/reimagine/3.2/requests/{request_id}/cancel' +} + +export type PutBriaReimagine32RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutBriaReimagine32RequestsByRequestIdCancelResponse = + PutBriaReimagine32RequestsByRequestIdCancelResponses[keyof PutBriaReimagine32RequestsByRequestIdCancelResponses] + +export type PostBriaReimagine32Data = { + body: SchemaReimagine32Input + path?: never + query?: never + url: '/bria/reimagine/3.2' +} + +export type PostBriaReimagine32Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaReimagine32Response = + PostBriaReimagine32Responses[keyof PostBriaReimagine32Responses] + +export type GetBriaReimagine32RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/reimagine/3.2/requests/{request_id}' +} + +export type GetBriaReimagine32RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaReimagine32Output +} + +export type GetBriaReimagine32RequestsByRequestIdResponse = + GetBriaReimagine32RequestsByRequestIdResponses[keyof GetBriaReimagine32RequestsByRequestIdResponses] + +export type GetFalAiNanoBananaEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/nano-banana/edit/requests/{request_id}/status' +} + +export type GetFalAiNanoBananaEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiNanoBananaEditRequestsByRequestIdStatusResponse = + GetFalAiNanoBananaEditRequestsByRequestIdStatusResponses[keyof GetFalAiNanoBananaEditRequestsByRequestIdStatusResponses] + +export type PutFalAiNanoBananaEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/nano-banana/edit/requests/{request_id}/cancel' +} + +export type PutFalAiNanoBananaEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiNanoBananaEditRequestsByRequestIdCancelResponse = + PutFalAiNanoBananaEditRequestsByRequestIdCancelResponses[keyof PutFalAiNanoBananaEditRequestsByRequestIdCancelResponses] + +export type PostFalAiNanoBananaEditData = { + body: SchemaNanoBananaEditInput + path?: never + query?: never + url: '/fal-ai/nano-banana/edit' +} + +export type PostFalAiNanoBananaEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiNanoBananaEditResponse = + PostFalAiNanoBananaEditResponses[keyof PostFalAiNanoBananaEditResponses] + +export type GetFalAiNanoBananaEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/nano-banana/edit/requests/{request_id}' +} + +export type GetFalAiNanoBananaEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaNanoBananaEditOutput +} + +export type GetFalAiNanoBananaEditRequestsByRequestIdResponse = + GetFalAiNanoBananaEditRequestsByRequestIdResponses[keyof GetFalAiNanoBananaEditRequestsByRequestIdResponses] + +export type GetFalAiNextstep1RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/nextstep-1/requests/{request_id}/status' +} + +export type GetFalAiNextstep1RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiNextstep1RequestsByRequestIdStatusResponse = + GetFalAiNextstep1RequestsByRequestIdStatusResponses[keyof GetFalAiNextstep1RequestsByRequestIdStatusResponses] + +export type PutFalAiNextstep1RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/nextstep-1/requests/{request_id}/cancel' +} + +export type PutFalAiNextstep1RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiNextstep1RequestsByRequestIdCancelResponse = + PutFalAiNextstep1RequestsByRequestIdCancelResponses[keyof PutFalAiNextstep1RequestsByRequestIdCancelResponses] + +export type PostFalAiNextstep1Data = { + body: SchemaNextstep1Input + path?: never + query?: never + url: '/fal-ai/nextstep-1' +} + +export type PostFalAiNextstep1Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiNextstep1Response = + PostFalAiNextstep1Responses[keyof PostFalAiNextstep1Responses] + +export type GetFalAiNextstep1RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/nextstep-1/requests/{request_id}' +} + +export type GetFalAiNextstep1RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaNextstep1Output +} + +export type GetFalAiNextstep1RequestsByRequestIdResponse = + GetFalAiNextstep1RequestsByRequestIdResponses[keyof GetFalAiNextstep1RequestsByRequestIdResponses] + +export type GetFalAiQwenImageEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit/requests/{request_id}/status' +} + +export type GetFalAiQwenImageEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiQwenImageEditRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEditRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEditRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImageEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiQwenImageEditRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEditRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEditRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEditData = { + body: SchemaQwenImageEditInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit' +} + +export type PostFalAiQwenImageEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEditResponse = + PostFalAiQwenImageEditResponses[keyof PostFalAiQwenImageEditResponses] + +export type GetFalAiQwenImageEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit/requests/{request_id}' +} + +export type GetFalAiQwenImageEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImageEditOutput +} + +export type GetFalAiQwenImageEditRequestsByRequestIdResponse = + GetFalAiQwenImageEditRequestsByRequestIdResponses[keyof GetFalAiQwenImageEditRequestsByRequestIdResponses] + +export type GetFalAiIdeogramCharacterEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ideogram/character/edit/requests/{request_id}/status' +} + +export type GetFalAiIdeogramCharacterEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiIdeogramCharacterEditRequestsByRequestIdStatusResponse = + GetFalAiIdeogramCharacterEditRequestsByRequestIdStatusResponses[keyof GetFalAiIdeogramCharacterEditRequestsByRequestIdStatusResponses] + +export type PutFalAiIdeogramCharacterEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/character/edit/requests/{request_id}/cancel' +} + +export type PutFalAiIdeogramCharacterEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiIdeogramCharacterEditRequestsByRequestIdCancelResponse = + PutFalAiIdeogramCharacterEditRequestsByRequestIdCancelResponses[keyof PutFalAiIdeogramCharacterEditRequestsByRequestIdCancelResponses] + +export type PostFalAiIdeogramCharacterEditData = { + body: SchemaIdeogramCharacterEditInput + path?: never + query?: never + url: '/fal-ai/ideogram/character/edit' +} + +export type PostFalAiIdeogramCharacterEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiIdeogramCharacterEditResponse = + PostFalAiIdeogramCharacterEditResponses[keyof PostFalAiIdeogramCharacterEditResponses] + +export type GetFalAiIdeogramCharacterEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/character/edit/requests/{request_id}' +} + +export type GetFalAiIdeogramCharacterEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIdeogramCharacterEditOutput +} + +export type GetFalAiIdeogramCharacterEditRequestsByRequestIdResponse = + GetFalAiIdeogramCharacterEditRequestsByRequestIdResponses[keyof GetFalAiIdeogramCharacterEditRequestsByRequestIdResponses] + +export type GetFalAiIdeogramCharacterRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ideogram/character/requests/{request_id}/status' +} + +export type GetFalAiIdeogramCharacterRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiIdeogramCharacterRequestsByRequestIdStatusResponse = + GetFalAiIdeogramCharacterRequestsByRequestIdStatusResponses[keyof GetFalAiIdeogramCharacterRequestsByRequestIdStatusResponses] + +export type PutFalAiIdeogramCharacterRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/character/requests/{request_id}/cancel' +} + +export type PutFalAiIdeogramCharacterRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiIdeogramCharacterRequestsByRequestIdCancelResponse = + PutFalAiIdeogramCharacterRequestsByRequestIdCancelResponses[keyof PutFalAiIdeogramCharacterRequestsByRequestIdCancelResponses] + +export type PostFalAiIdeogramCharacterData = { + body: SchemaIdeogramCharacterInput + path?: never + query?: never + url: '/fal-ai/ideogram/character' +} + +export type PostFalAiIdeogramCharacterResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiIdeogramCharacterResponse = + PostFalAiIdeogramCharacterResponses[keyof PostFalAiIdeogramCharacterResponses] + +export type GetFalAiIdeogramCharacterRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/character/requests/{request_id}' +} + +export type GetFalAiIdeogramCharacterRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIdeogramCharacterOutput +} + +export type GetFalAiIdeogramCharacterRequestsByRequestIdResponse = + GetFalAiIdeogramCharacterRequestsByRequestIdResponses[keyof GetFalAiIdeogramCharacterRequestsByRequestIdResponses] + +export type GetFalAiIdeogramCharacterRemixRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ideogram/character/remix/requests/{request_id}/status' +} + +export type GetFalAiIdeogramCharacterRemixRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiIdeogramCharacterRemixRequestsByRequestIdStatusResponse = + GetFalAiIdeogramCharacterRemixRequestsByRequestIdStatusResponses[keyof GetFalAiIdeogramCharacterRemixRequestsByRequestIdStatusResponses] + +export type PutFalAiIdeogramCharacterRemixRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/character/remix/requests/{request_id}/cancel' +} + +export type PutFalAiIdeogramCharacterRemixRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiIdeogramCharacterRemixRequestsByRequestIdCancelResponse = + PutFalAiIdeogramCharacterRemixRequestsByRequestIdCancelResponses[keyof PutFalAiIdeogramCharacterRemixRequestsByRequestIdCancelResponses] + +export type PostFalAiIdeogramCharacterRemixData = { + body: SchemaIdeogramCharacterRemixInput + path?: never + query?: never + url: '/fal-ai/ideogram/character/remix' +} + +export type PostFalAiIdeogramCharacterRemixResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiIdeogramCharacterRemixResponse = + PostFalAiIdeogramCharacterRemixResponses[keyof PostFalAiIdeogramCharacterRemixResponses] + +export type GetFalAiIdeogramCharacterRemixRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/character/remix/requests/{request_id}' +} + +export type GetFalAiIdeogramCharacterRemixRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIdeogramCharacterRemixOutput +} + +export type GetFalAiIdeogramCharacterRemixRequestsByRequestIdResponse = + GetFalAiIdeogramCharacterRemixRequestsByRequestIdResponses[keyof GetFalAiIdeogramCharacterRemixRequestsByRequestIdResponses] + +export type GetFalAiFluxKreaLoraInpaintingRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-krea-lora/inpainting/requests/{request_id}/status' +} + +export type GetFalAiFluxKreaLoraInpaintingRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxKreaLoraInpaintingRequestsByRequestIdStatusResponse = + GetFalAiFluxKreaLoraInpaintingRequestsByRequestIdStatusResponses[keyof GetFalAiFluxKreaLoraInpaintingRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxKreaLoraInpaintingRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-krea-lora/inpainting/requests/{request_id}/cancel' +} + +export type PutFalAiFluxKreaLoraInpaintingRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxKreaLoraInpaintingRequestsByRequestIdCancelResponse = + PutFalAiFluxKreaLoraInpaintingRequestsByRequestIdCancelResponses[keyof PutFalAiFluxKreaLoraInpaintingRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxKreaLoraInpaintingData = { + body: SchemaFluxKreaLoraInpaintingInput + path?: never + query?: never + url: '/fal-ai/flux-krea-lora/inpainting' +} + +export type PostFalAiFluxKreaLoraInpaintingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxKreaLoraInpaintingResponse = + PostFalAiFluxKreaLoraInpaintingResponses[keyof PostFalAiFluxKreaLoraInpaintingResponses] + +export type GetFalAiFluxKreaLoraInpaintingRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-krea-lora/inpainting/requests/{request_id}' +} + +export type GetFalAiFluxKreaLoraInpaintingRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxKreaLoraInpaintingOutput +} + +export type GetFalAiFluxKreaLoraInpaintingRequestsByRequestIdResponse = + GetFalAiFluxKreaLoraInpaintingRequestsByRequestIdResponses[keyof GetFalAiFluxKreaLoraInpaintingRequestsByRequestIdResponses] + +export type GetFalAiFluxKreaLoraImageToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-krea-lora/image-to-image/requests/{request_id}/status' +} + +export type GetFalAiFluxKreaLoraImageToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFluxKreaLoraImageToImageRequestsByRequestIdStatusResponse = + GetFalAiFluxKreaLoraImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiFluxKreaLoraImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxKreaLoraImageToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-krea-lora/image-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiFluxKreaLoraImageToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFluxKreaLoraImageToImageRequestsByRequestIdCancelResponse = + PutFalAiFluxKreaLoraImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiFluxKreaLoraImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxKreaLoraImageToImageData = { + body: SchemaFluxKreaLoraImageToImageInput + path?: never + query?: never + url: '/fal-ai/flux-krea-lora/image-to-image' +} + +export type PostFalAiFluxKreaLoraImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxKreaLoraImageToImageResponse = + PostFalAiFluxKreaLoraImageToImageResponses[keyof PostFalAiFluxKreaLoraImageToImageResponses] + +export type GetFalAiFluxKreaLoraImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-krea-lora/image-to-image/requests/{request_id}' +} + +export type GetFalAiFluxKreaLoraImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxKreaLoraImageToImageOutput +} + +export type GetFalAiFluxKreaLoraImageToImageRequestsByRequestIdResponse = + GetFalAiFluxKreaLoraImageToImageRequestsByRequestIdResponses[keyof GetFalAiFluxKreaLoraImageToImageRequestsByRequestIdResponses] + +export type GetFalAiFluxKreaImageToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux/krea/image-to-image/requests/{request_id}/status' +} + +export type GetFalAiFluxKreaImageToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxKreaImageToImageRequestsByRequestIdStatusResponse = + GetFalAiFluxKreaImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiFluxKreaImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxKreaImageToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux/krea/image-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiFluxKreaImageToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxKreaImageToImageRequestsByRequestIdCancelResponse = + PutFalAiFluxKreaImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiFluxKreaImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxKreaImageToImageData = { + body: SchemaFluxKreaImageToImageInput + path?: never + query?: never + url: '/fal-ai/flux/krea/image-to-image' +} + +export type PostFalAiFluxKreaImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxKreaImageToImageResponse = + PostFalAiFluxKreaImageToImageResponses[keyof PostFalAiFluxKreaImageToImageResponses] + +export type GetFalAiFluxKreaImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux/krea/image-to-image/requests/{request_id}' +} + +export type GetFalAiFluxKreaImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxKreaImageToImageOutput +} + +export type GetFalAiFluxKreaImageToImageRequestsByRequestIdResponse = + GetFalAiFluxKreaImageToImageRequestsByRequestIdResponses[keyof GetFalAiFluxKreaImageToImageRequestsByRequestIdResponses] + +export type GetFalAiFluxKreaReduxRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux/krea/redux/requests/{request_id}/status' +} + +export type GetFalAiFluxKreaReduxRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxKreaReduxRequestsByRequestIdStatusResponse = + GetFalAiFluxKreaReduxRequestsByRequestIdStatusResponses[keyof GetFalAiFluxKreaReduxRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxKreaReduxRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux/krea/redux/requests/{request_id}/cancel' +} + +export type PutFalAiFluxKreaReduxRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxKreaReduxRequestsByRequestIdCancelResponse = + PutFalAiFluxKreaReduxRequestsByRequestIdCancelResponses[keyof PutFalAiFluxKreaReduxRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxKreaReduxData = { + body: SchemaFluxKreaReduxInput + path?: never + query?: never + url: '/fal-ai/flux/krea/redux' +} + +export type PostFalAiFluxKreaReduxResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxKreaReduxResponse = + PostFalAiFluxKreaReduxResponses[keyof PostFalAiFluxKreaReduxResponses] + +export type GetFalAiFluxKreaReduxRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux/krea/redux/requests/{request_id}' +} + +export type GetFalAiFluxKreaReduxRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxKreaReduxOutput +} + +export type GetFalAiFluxKreaReduxRequestsByRequestIdResponse = + GetFalAiFluxKreaReduxRequestsByRequestIdResponses[keyof GetFalAiFluxKreaReduxRequestsByRequestIdResponses] + +export type GetFalAiFlux1KreaImageToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-1/krea/image-to-image/requests/{request_id}/status' +} + +export type GetFalAiFlux1KreaImageToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux1KreaImageToImageRequestsByRequestIdStatusResponse = + GetFalAiFlux1KreaImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiFlux1KreaImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux1KreaImageToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-1/krea/image-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiFlux1KreaImageToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux1KreaImageToImageRequestsByRequestIdCancelResponse = + PutFalAiFlux1KreaImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiFlux1KreaImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux1KreaImageToImageData = { + body: SchemaFlux1KreaImageToImageInput + path?: never + query?: never + url: '/fal-ai/flux-1/krea/image-to-image' +} + +export type PostFalAiFlux1KreaImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux1KreaImageToImageResponse = + PostFalAiFlux1KreaImageToImageResponses[keyof PostFalAiFlux1KreaImageToImageResponses] + +export type GetFalAiFlux1KreaImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-1/krea/image-to-image/requests/{request_id}' +} + +export type GetFalAiFlux1KreaImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux1KreaImageToImageOutput +} + +export type GetFalAiFlux1KreaImageToImageRequestsByRequestIdResponse = + GetFalAiFlux1KreaImageToImageRequestsByRequestIdResponses[keyof GetFalAiFlux1KreaImageToImageRequestsByRequestIdResponses] + +export type GetFalAiFlux1KreaReduxRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-1/krea/redux/requests/{request_id}/status' +} + +export type GetFalAiFlux1KreaReduxRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux1KreaReduxRequestsByRequestIdStatusResponse = + GetFalAiFlux1KreaReduxRequestsByRequestIdStatusResponses[keyof GetFalAiFlux1KreaReduxRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux1KreaReduxRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-1/krea/redux/requests/{request_id}/cancel' +} + +export type PutFalAiFlux1KreaReduxRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux1KreaReduxRequestsByRequestIdCancelResponse = + PutFalAiFlux1KreaReduxRequestsByRequestIdCancelResponses[keyof PutFalAiFlux1KreaReduxRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux1KreaReduxData = { + body: SchemaFlux1KreaReduxInput + path?: never + query?: never + url: '/fal-ai/flux-1/krea/redux' +} + +export type PostFalAiFlux1KreaReduxResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux1KreaReduxResponse = + PostFalAiFlux1KreaReduxResponses[keyof PostFalAiFlux1KreaReduxResponses] + +export type GetFalAiFlux1KreaReduxRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-1/krea/redux/requests/{request_id}' +} + +export type GetFalAiFlux1KreaReduxRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux1KreaReduxOutput +} + +export type GetFalAiFlux1KreaReduxRequestsByRequestIdResponse = + GetFalAiFlux1KreaReduxRequestsByRequestIdResponses[keyof GetFalAiFlux1KreaReduxRequestsByRequestIdResponses] + +export type GetFalAiFluxKontextLoraInpaintRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-kontext-lora/inpaint/requests/{request_id}/status' +} + +export type GetFalAiFluxKontextLoraInpaintRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxKontextLoraInpaintRequestsByRequestIdStatusResponse = + GetFalAiFluxKontextLoraInpaintRequestsByRequestIdStatusResponses[keyof GetFalAiFluxKontextLoraInpaintRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxKontextLoraInpaintRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-kontext-lora/inpaint/requests/{request_id}/cancel' +} + +export type PutFalAiFluxKontextLoraInpaintRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxKontextLoraInpaintRequestsByRequestIdCancelResponse = + PutFalAiFluxKontextLoraInpaintRequestsByRequestIdCancelResponses[keyof PutFalAiFluxKontextLoraInpaintRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxKontextLoraInpaintData = { + body: SchemaFluxKontextLoraInpaintInput + path?: never + query?: never + url: '/fal-ai/flux-kontext-lora/inpaint' +} + +export type PostFalAiFluxKontextLoraInpaintResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxKontextLoraInpaintResponse = + PostFalAiFluxKontextLoraInpaintResponses[keyof PostFalAiFluxKontextLoraInpaintResponses] + +export type GetFalAiFluxKontextLoraInpaintRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-kontext-lora/inpaint/requests/{request_id}' +} + +export type GetFalAiFluxKontextLoraInpaintRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxKontextLoraInpaintOutput +} + +export type GetFalAiFluxKontextLoraInpaintRequestsByRequestIdResponse = + GetFalAiFluxKontextLoraInpaintRequestsByRequestIdResponses[keyof GetFalAiFluxKontextLoraInpaintRequestsByRequestIdResponses] + +export type GetFalAiHunyuanWorldRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan_world/requests/{request_id}/status' +} + +export type GetFalAiHunyuanWorldRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHunyuanWorldRequestsByRequestIdStatusResponse = + GetFalAiHunyuanWorldRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuanWorldRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuanWorldRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan_world/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuanWorldRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHunyuanWorldRequestsByRequestIdCancelResponse = + PutFalAiHunyuanWorldRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuanWorldRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuanWorldData = { + body: SchemaHunyuanWorldInput + path?: never + query?: never + url: '/fal-ai/hunyuan_world' +} + +export type PostFalAiHunyuanWorldResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuanWorldResponse = + PostFalAiHunyuanWorldResponses[keyof PostFalAiHunyuanWorldResponses] + +export type GetFalAiHunyuanWorldRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan_world/requests/{request_id}' +} + +export type GetFalAiHunyuanWorldRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuanWorldOutput +} + +export type GetFalAiHunyuanWorldRequestsByRequestIdResponse = + GetFalAiHunyuanWorldRequestsByRequestIdResponses[keyof GetFalAiHunyuanWorldRequestsByRequestIdResponses] + +export type GetFalAiImageEditingRetouchRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-editing/retouch/requests/{request_id}/status' +} + +export type GetFalAiImageEditingRetouchRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImageEditingRetouchRequestsByRequestIdStatusResponse = + GetFalAiImageEditingRetouchRequestsByRequestIdStatusResponses[keyof GetFalAiImageEditingRetouchRequestsByRequestIdStatusResponses] + +export type PutFalAiImageEditingRetouchRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/retouch/requests/{request_id}/cancel' +} + +export type PutFalAiImageEditingRetouchRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImageEditingRetouchRequestsByRequestIdCancelResponse = + PutFalAiImageEditingRetouchRequestsByRequestIdCancelResponses[keyof PutFalAiImageEditingRetouchRequestsByRequestIdCancelResponses] + +export type PostFalAiImageEditingRetouchData = { + body: SchemaImageEditingRetouchInput + path?: never + query?: never + url: '/fal-ai/image-editing/retouch' +} + +export type PostFalAiImageEditingRetouchResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageEditingRetouchResponse = + PostFalAiImageEditingRetouchResponses[keyof PostFalAiImageEditingRetouchResponses] + +export type GetFalAiImageEditingRetouchRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/retouch/requests/{request_id}' +} + +export type GetFalAiImageEditingRetouchRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageEditingRetouchOutput +} + +export type GetFalAiImageEditingRetouchRequestsByRequestIdResponse = + GetFalAiImageEditingRetouchRequestsByRequestIdResponses[keyof GetFalAiImageEditingRetouchRequestsByRequestIdResponses] + +export type GetFalAiHidreamE11RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hidream-e1-1/requests/{request_id}/status' +} + +export type GetFalAiHidreamE11RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHidreamE11RequestsByRequestIdStatusResponse = + GetFalAiHidreamE11RequestsByRequestIdStatusResponses[keyof GetFalAiHidreamE11RequestsByRequestIdStatusResponses] + +export type PutFalAiHidreamE11RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hidream-e1-1/requests/{request_id}/cancel' +} + +export type PutFalAiHidreamE11RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHidreamE11RequestsByRequestIdCancelResponse = + PutFalAiHidreamE11RequestsByRequestIdCancelResponses[keyof PutFalAiHidreamE11RequestsByRequestIdCancelResponses] + +export type PostFalAiHidreamE11Data = { + body: SchemaHidreamE11Input + path?: never + query?: never + url: '/fal-ai/hidream-e1-1' +} + +export type PostFalAiHidreamE11Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHidreamE11Response = + PostFalAiHidreamE11Responses[keyof PostFalAiHidreamE11Responses] + +export type GetFalAiHidreamE11RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hidream-e1-1/requests/{request_id}' +} + +export type GetFalAiHidreamE11RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHidreamE11Output +} + +export type GetFalAiHidreamE11RequestsByRequestIdResponse = + GetFalAiHidreamE11RequestsByRequestIdResponses[keyof GetFalAiHidreamE11RequestsByRequestIdResponses] + +export type GetFalAiRifeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/rife/requests/{request_id}/status' +} + +export type GetFalAiRifeRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiRifeRequestsByRequestIdStatusResponse = + GetFalAiRifeRequestsByRequestIdStatusResponses[keyof GetFalAiRifeRequestsByRequestIdStatusResponses] + +export type PutFalAiRifeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/rife/requests/{request_id}/cancel' +} + +export type PutFalAiRifeRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiRifeRequestsByRequestIdCancelResponse = + PutFalAiRifeRequestsByRequestIdCancelResponses[keyof PutFalAiRifeRequestsByRequestIdCancelResponses] + +export type PostFalAiRifeData = { + body: SchemaRifeInput + path?: never + query?: never + url: '/fal-ai/rife' +} + +export type PostFalAiRifeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiRifeResponse = + PostFalAiRifeResponses[keyof PostFalAiRifeResponses] + +export type GetFalAiRifeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/rife/requests/{request_id}' +} + +export type GetFalAiRifeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaRifeOutput +} + +export type GetFalAiRifeRequestsByRequestIdResponse = + GetFalAiRifeRequestsByRequestIdResponses[keyof GetFalAiRifeRequestsByRequestIdResponses] + +export type GetFalAiFilmRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/film/requests/{request_id}/status' +} + +export type GetFalAiFilmRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFilmRequestsByRequestIdStatusResponse = + GetFalAiFilmRequestsByRequestIdStatusResponses[keyof GetFalAiFilmRequestsByRequestIdStatusResponses] + +export type PutFalAiFilmRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/film/requests/{request_id}/cancel' +} + +export type PutFalAiFilmRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFilmRequestsByRequestIdCancelResponse = + PutFalAiFilmRequestsByRequestIdCancelResponses[keyof PutFalAiFilmRequestsByRequestIdCancelResponses] + +export type PostFalAiFilmData = { + body: SchemaFilmInput + path?: never + query?: never + url: '/fal-ai/film' +} + +export type PostFalAiFilmResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFilmResponse = + PostFalAiFilmResponses[keyof PostFalAiFilmResponses] + +export type GetFalAiFilmRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/film/requests/{request_id}' +} + +export type GetFalAiFilmRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFilmOutput +} + +export type GetFalAiFilmRequestsByRequestIdResponse = + GetFalAiFilmRequestsByRequestIdResponses[keyof GetFalAiFilmRequestsByRequestIdResponses] + +export type GetFalAiCalligrapherRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/calligrapher/requests/{request_id}/status' +} + +export type GetFalAiCalligrapherRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiCalligrapherRequestsByRequestIdStatusResponse = + GetFalAiCalligrapherRequestsByRequestIdStatusResponses[keyof GetFalAiCalligrapherRequestsByRequestIdStatusResponses] + +export type PutFalAiCalligrapherRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/calligrapher/requests/{request_id}/cancel' +} + +export type PutFalAiCalligrapherRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiCalligrapherRequestsByRequestIdCancelResponse = + PutFalAiCalligrapherRequestsByRequestIdCancelResponses[keyof PutFalAiCalligrapherRequestsByRequestIdCancelResponses] + +export type PostFalAiCalligrapherData = { + body: SchemaCalligrapherInput + path?: never + query?: never + url: '/fal-ai/calligrapher' +} + +export type PostFalAiCalligrapherResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiCalligrapherResponse = + PostFalAiCalligrapherResponses[keyof PostFalAiCalligrapherResponses] + +export type GetFalAiCalligrapherRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/calligrapher/requests/{request_id}' +} + +export type GetFalAiCalligrapherRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaCalligrapherOutput +} + +export type GetFalAiCalligrapherRequestsByRequestIdResponse = + GetFalAiCalligrapherRequestsByRequestIdResponses[keyof GetFalAiCalligrapherRequestsByRequestIdResponses] + +export type GetFalAiBriaReimagineRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bria/reimagine/requests/{request_id}/status' +} + +export type GetFalAiBriaReimagineRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiBriaReimagineRequestsByRequestIdStatusResponse = + GetFalAiBriaReimagineRequestsByRequestIdStatusResponses[keyof GetFalAiBriaReimagineRequestsByRequestIdStatusResponses] + +export type PutFalAiBriaReimagineRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bria/reimagine/requests/{request_id}/cancel' +} + +export type PutFalAiBriaReimagineRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiBriaReimagineRequestsByRequestIdCancelResponse = + PutFalAiBriaReimagineRequestsByRequestIdCancelResponses[keyof PutFalAiBriaReimagineRequestsByRequestIdCancelResponses] + +export type PostFalAiBriaReimagineData = { + body: SchemaBriaReimagineInput + path?: never + query?: never + url: '/fal-ai/bria/reimagine' +} + +export type PostFalAiBriaReimagineResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBriaReimagineResponse = + PostFalAiBriaReimagineResponses[keyof PostFalAiBriaReimagineResponses] + +export type GetFalAiBriaReimagineRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bria/reimagine/requests/{request_id}' +} + +export type GetFalAiBriaReimagineRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBriaReimagineOutput +} + +export type GetFalAiBriaReimagineRequestsByRequestIdResponse = + GetFalAiBriaReimagineRequestsByRequestIdResponses[keyof GetFalAiBriaReimagineRequestsByRequestIdResponses] + +export type GetFalAiImageEditingRealismRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-editing/realism/requests/{request_id}/status' +} + +export type GetFalAiImageEditingRealismRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImageEditingRealismRequestsByRequestIdStatusResponse = + GetFalAiImageEditingRealismRequestsByRequestIdStatusResponses[keyof GetFalAiImageEditingRealismRequestsByRequestIdStatusResponses] + +export type PutFalAiImageEditingRealismRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/realism/requests/{request_id}/cancel' +} + +export type PutFalAiImageEditingRealismRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImageEditingRealismRequestsByRequestIdCancelResponse = + PutFalAiImageEditingRealismRequestsByRequestIdCancelResponses[keyof PutFalAiImageEditingRealismRequestsByRequestIdCancelResponses] + +export type PostFalAiImageEditingRealismData = { + body: SchemaImageEditingRealismInput + path?: never + query?: never + url: '/fal-ai/image-editing/realism' +} + +export type PostFalAiImageEditingRealismResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageEditingRealismResponse = + PostFalAiImageEditingRealismResponses[keyof PostFalAiImageEditingRealismResponses] + +export type GetFalAiImageEditingRealismRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/realism/requests/{request_id}' +} + +export type GetFalAiImageEditingRealismRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageEditingRealismOutput +} + +export type GetFalAiImageEditingRealismRequestsByRequestIdResponse = + GetFalAiImageEditingRealismRequestsByRequestIdResponses[keyof GetFalAiImageEditingRealismRequestsByRequestIdResponses] + +export type GetFalAiPostProcessingVignetteRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/post-processing/vignette/requests/{request_id}/status' +} + +export type GetFalAiPostProcessingVignetteRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPostProcessingVignetteRequestsByRequestIdStatusResponse = + GetFalAiPostProcessingVignetteRequestsByRequestIdStatusResponses[keyof GetFalAiPostProcessingVignetteRequestsByRequestIdStatusResponses] + +export type PutFalAiPostProcessingVignetteRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/vignette/requests/{request_id}/cancel' +} + +export type PutFalAiPostProcessingVignetteRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPostProcessingVignetteRequestsByRequestIdCancelResponse = + PutFalAiPostProcessingVignetteRequestsByRequestIdCancelResponses[keyof PutFalAiPostProcessingVignetteRequestsByRequestIdCancelResponses] + +export type PostFalAiPostProcessingVignetteData = { + body: SchemaPostProcessingVignetteInput + path?: never + query?: never + url: '/fal-ai/post-processing/vignette' +} + +export type PostFalAiPostProcessingVignetteResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPostProcessingVignetteResponse = + PostFalAiPostProcessingVignetteResponses[keyof PostFalAiPostProcessingVignetteResponses] + +export type GetFalAiPostProcessingVignetteRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/vignette/requests/{request_id}' +} + +export type GetFalAiPostProcessingVignetteRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPostProcessingVignetteOutput +} + +export type GetFalAiPostProcessingVignetteRequestsByRequestIdResponse = + GetFalAiPostProcessingVignetteRequestsByRequestIdResponses[keyof GetFalAiPostProcessingVignetteRequestsByRequestIdResponses] + +export type GetFalAiPostProcessingSolarizeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/post-processing/solarize/requests/{request_id}/status' +} + +export type GetFalAiPostProcessingSolarizeRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPostProcessingSolarizeRequestsByRequestIdStatusResponse = + GetFalAiPostProcessingSolarizeRequestsByRequestIdStatusResponses[keyof GetFalAiPostProcessingSolarizeRequestsByRequestIdStatusResponses] + +export type PutFalAiPostProcessingSolarizeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/solarize/requests/{request_id}/cancel' +} + +export type PutFalAiPostProcessingSolarizeRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPostProcessingSolarizeRequestsByRequestIdCancelResponse = + PutFalAiPostProcessingSolarizeRequestsByRequestIdCancelResponses[keyof PutFalAiPostProcessingSolarizeRequestsByRequestIdCancelResponses] + +export type PostFalAiPostProcessingSolarizeData = { + body: SchemaPostProcessingSolarizeInput + path?: never + query?: never + url: '/fal-ai/post-processing/solarize' +} + +export type PostFalAiPostProcessingSolarizeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPostProcessingSolarizeResponse = + PostFalAiPostProcessingSolarizeResponses[keyof PostFalAiPostProcessingSolarizeResponses] + +export type GetFalAiPostProcessingSolarizeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/solarize/requests/{request_id}' +} + +export type GetFalAiPostProcessingSolarizeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPostProcessingSolarizeOutput +} + +export type GetFalAiPostProcessingSolarizeRequestsByRequestIdResponse = + GetFalAiPostProcessingSolarizeRequestsByRequestIdResponses[keyof GetFalAiPostProcessingSolarizeRequestsByRequestIdResponses] + +export type GetFalAiPostProcessingSharpenRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/post-processing/sharpen/requests/{request_id}/status' +} + +export type GetFalAiPostProcessingSharpenRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPostProcessingSharpenRequestsByRequestIdStatusResponse = + GetFalAiPostProcessingSharpenRequestsByRequestIdStatusResponses[keyof GetFalAiPostProcessingSharpenRequestsByRequestIdStatusResponses] + +export type PutFalAiPostProcessingSharpenRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/sharpen/requests/{request_id}/cancel' +} + +export type PutFalAiPostProcessingSharpenRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPostProcessingSharpenRequestsByRequestIdCancelResponse = + PutFalAiPostProcessingSharpenRequestsByRequestIdCancelResponses[keyof PutFalAiPostProcessingSharpenRequestsByRequestIdCancelResponses] + +export type PostFalAiPostProcessingSharpenData = { + body: SchemaPostProcessingSharpenInput + path?: never + query?: never + url: '/fal-ai/post-processing/sharpen' +} + +export type PostFalAiPostProcessingSharpenResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPostProcessingSharpenResponse = + PostFalAiPostProcessingSharpenResponses[keyof PostFalAiPostProcessingSharpenResponses] + +export type GetFalAiPostProcessingSharpenRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/sharpen/requests/{request_id}' +} + +export type GetFalAiPostProcessingSharpenRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPostProcessingSharpenOutput +} + +export type GetFalAiPostProcessingSharpenRequestsByRequestIdResponse = + GetFalAiPostProcessingSharpenRequestsByRequestIdResponses[keyof GetFalAiPostProcessingSharpenRequestsByRequestIdResponses] + +export type GetFalAiPostProcessingParabolizeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/post-processing/parabolize/requests/{request_id}/status' +} + +export type GetFalAiPostProcessingParabolizeRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiPostProcessingParabolizeRequestsByRequestIdStatusResponse = + GetFalAiPostProcessingParabolizeRequestsByRequestIdStatusResponses[keyof GetFalAiPostProcessingParabolizeRequestsByRequestIdStatusResponses] + +export type PutFalAiPostProcessingParabolizeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/parabolize/requests/{request_id}/cancel' +} + +export type PutFalAiPostProcessingParabolizeRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiPostProcessingParabolizeRequestsByRequestIdCancelResponse = + PutFalAiPostProcessingParabolizeRequestsByRequestIdCancelResponses[keyof PutFalAiPostProcessingParabolizeRequestsByRequestIdCancelResponses] + +export type PostFalAiPostProcessingParabolizeData = { + body: SchemaPostProcessingParabolizeInput + path?: never + query?: never + url: '/fal-ai/post-processing/parabolize' +} + +export type PostFalAiPostProcessingParabolizeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPostProcessingParabolizeResponse = + PostFalAiPostProcessingParabolizeResponses[keyof PostFalAiPostProcessingParabolizeResponses] + +export type GetFalAiPostProcessingParabolizeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/parabolize/requests/{request_id}' +} + +export type GetFalAiPostProcessingParabolizeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPostProcessingParabolizeOutput +} + +export type GetFalAiPostProcessingParabolizeRequestsByRequestIdResponse = + GetFalAiPostProcessingParabolizeRequestsByRequestIdResponses[keyof GetFalAiPostProcessingParabolizeRequestsByRequestIdResponses] + +export type GetFalAiPostProcessingGrainRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/post-processing/grain/requests/{request_id}/status' +} + +export type GetFalAiPostProcessingGrainRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPostProcessingGrainRequestsByRequestIdStatusResponse = + GetFalAiPostProcessingGrainRequestsByRequestIdStatusResponses[keyof GetFalAiPostProcessingGrainRequestsByRequestIdStatusResponses] + +export type PutFalAiPostProcessingGrainRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/grain/requests/{request_id}/cancel' +} + +export type PutFalAiPostProcessingGrainRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPostProcessingGrainRequestsByRequestIdCancelResponse = + PutFalAiPostProcessingGrainRequestsByRequestIdCancelResponses[keyof PutFalAiPostProcessingGrainRequestsByRequestIdCancelResponses] + +export type PostFalAiPostProcessingGrainData = { + body: SchemaPostProcessingGrainInput + path?: never + query?: never + url: '/fal-ai/post-processing/grain' +} + +export type PostFalAiPostProcessingGrainResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPostProcessingGrainResponse = + PostFalAiPostProcessingGrainResponses[keyof PostFalAiPostProcessingGrainResponses] + +export type GetFalAiPostProcessingGrainRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/grain/requests/{request_id}' +} + +export type GetFalAiPostProcessingGrainRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPostProcessingGrainOutput +} + +export type GetFalAiPostProcessingGrainRequestsByRequestIdResponse = + GetFalAiPostProcessingGrainRequestsByRequestIdResponses[keyof GetFalAiPostProcessingGrainRequestsByRequestIdResponses] + +export type GetFalAiPostProcessingDodgeBurnRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/post-processing/dodge-burn/requests/{request_id}/status' +} + +export type GetFalAiPostProcessingDodgeBurnRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiPostProcessingDodgeBurnRequestsByRequestIdStatusResponse = + GetFalAiPostProcessingDodgeBurnRequestsByRequestIdStatusResponses[keyof GetFalAiPostProcessingDodgeBurnRequestsByRequestIdStatusResponses] + +export type PutFalAiPostProcessingDodgeBurnRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/dodge-burn/requests/{request_id}/cancel' +} + +export type PutFalAiPostProcessingDodgeBurnRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiPostProcessingDodgeBurnRequestsByRequestIdCancelResponse = + PutFalAiPostProcessingDodgeBurnRequestsByRequestIdCancelResponses[keyof PutFalAiPostProcessingDodgeBurnRequestsByRequestIdCancelResponses] + +export type PostFalAiPostProcessingDodgeBurnData = { + body: SchemaPostProcessingDodgeBurnInput + path?: never + query?: never + url: '/fal-ai/post-processing/dodge-burn' +} + +export type PostFalAiPostProcessingDodgeBurnResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPostProcessingDodgeBurnResponse = + PostFalAiPostProcessingDodgeBurnResponses[keyof PostFalAiPostProcessingDodgeBurnResponses] + +export type GetFalAiPostProcessingDodgeBurnRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/dodge-burn/requests/{request_id}' +} + +export type GetFalAiPostProcessingDodgeBurnRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPostProcessingDodgeBurnOutput +} + +export type GetFalAiPostProcessingDodgeBurnRequestsByRequestIdResponse = + GetFalAiPostProcessingDodgeBurnRequestsByRequestIdResponses[keyof GetFalAiPostProcessingDodgeBurnRequestsByRequestIdResponses] + +export type GetFalAiPostProcessingDissolveRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/post-processing/dissolve/requests/{request_id}/status' +} + +export type GetFalAiPostProcessingDissolveRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPostProcessingDissolveRequestsByRequestIdStatusResponse = + GetFalAiPostProcessingDissolveRequestsByRequestIdStatusResponses[keyof GetFalAiPostProcessingDissolveRequestsByRequestIdStatusResponses] + +export type PutFalAiPostProcessingDissolveRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/dissolve/requests/{request_id}/cancel' +} + +export type PutFalAiPostProcessingDissolveRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPostProcessingDissolveRequestsByRequestIdCancelResponse = + PutFalAiPostProcessingDissolveRequestsByRequestIdCancelResponses[keyof PutFalAiPostProcessingDissolveRequestsByRequestIdCancelResponses] + +export type PostFalAiPostProcessingDissolveData = { + body: SchemaPostProcessingDissolveInput + path?: never + query?: never + url: '/fal-ai/post-processing/dissolve' +} + +export type PostFalAiPostProcessingDissolveResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPostProcessingDissolveResponse = + PostFalAiPostProcessingDissolveResponses[keyof PostFalAiPostProcessingDissolveResponses] + +export type GetFalAiPostProcessingDissolveRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/dissolve/requests/{request_id}' +} + +export type GetFalAiPostProcessingDissolveRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPostProcessingDissolveOutput +} + +export type GetFalAiPostProcessingDissolveRequestsByRequestIdResponse = + GetFalAiPostProcessingDissolveRequestsByRequestIdResponses[keyof GetFalAiPostProcessingDissolveRequestsByRequestIdResponses] + +export type GetFalAiPostProcessingDesaturateRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/post-processing/desaturate/requests/{request_id}/status' +} + +export type GetFalAiPostProcessingDesaturateRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiPostProcessingDesaturateRequestsByRequestIdStatusResponse = + GetFalAiPostProcessingDesaturateRequestsByRequestIdStatusResponses[keyof GetFalAiPostProcessingDesaturateRequestsByRequestIdStatusResponses] + +export type PutFalAiPostProcessingDesaturateRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/desaturate/requests/{request_id}/cancel' +} + +export type PutFalAiPostProcessingDesaturateRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiPostProcessingDesaturateRequestsByRequestIdCancelResponse = + PutFalAiPostProcessingDesaturateRequestsByRequestIdCancelResponses[keyof PutFalAiPostProcessingDesaturateRequestsByRequestIdCancelResponses] + +export type PostFalAiPostProcessingDesaturateData = { + body: SchemaPostProcessingDesaturateInput + path?: never + query?: never + url: '/fal-ai/post-processing/desaturate' +} + +export type PostFalAiPostProcessingDesaturateResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPostProcessingDesaturateResponse = + PostFalAiPostProcessingDesaturateResponses[keyof PostFalAiPostProcessingDesaturateResponses] + +export type GetFalAiPostProcessingDesaturateRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/desaturate/requests/{request_id}' +} + +export type GetFalAiPostProcessingDesaturateRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPostProcessingDesaturateOutput +} + +export type GetFalAiPostProcessingDesaturateRequestsByRequestIdResponse = + GetFalAiPostProcessingDesaturateRequestsByRequestIdResponses[keyof GetFalAiPostProcessingDesaturateRequestsByRequestIdResponses] + +export type GetFalAiPostProcessingColorTintRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/post-processing/color-tint/requests/{request_id}/status' +} + +export type GetFalAiPostProcessingColorTintRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiPostProcessingColorTintRequestsByRequestIdStatusResponse = + GetFalAiPostProcessingColorTintRequestsByRequestIdStatusResponses[keyof GetFalAiPostProcessingColorTintRequestsByRequestIdStatusResponses] + +export type PutFalAiPostProcessingColorTintRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/color-tint/requests/{request_id}/cancel' +} + +export type PutFalAiPostProcessingColorTintRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiPostProcessingColorTintRequestsByRequestIdCancelResponse = + PutFalAiPostProcessingColorTintRequestsByRequestIdCancelResponses[keyof PutFalAiPostProcessingColorTintRequestsByRequestIdCancelResponses] + +export type PostFalAiPostProcessingColorTintData = { + body: SchemaPostProcessingColorTintInput + path?: never + query?: never + url: '/fal-ai/post-processing/color-tint' +} + +export type PostFalAiPostProcessingColorTintResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPostProcessingColorTintResponse = + PostFalAiPostProcessingColorTintResponses[keyof PostFalAiPostProcessingColorTintResponses] + +export type GetFalAiPostProcessingColorTintRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/color-tint/requests/{request_id}' +} + +export type GetFalAiPostProcessingColorTintRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPostProcessingColorTintOutput +} + +export type GetFalAiPostProcessingColorTintRequestsByRequestIdResponse = + GetFalAiPostProcessingColorTintRequestsByRequestIdResponses[keyof GetFalAiPostProcessingColorTintRequestsByRequestIdResponses] + +export type GetFalAiPostProcessingColorCorrectionRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/post-processing/color-correction/requests/{request_id}/status' + } + +export type GetFalAiPostProcessingColorCorrectionRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiPostProcessingColorCorrectionRequestsByRequestIdStatusResponse = + GetFalAiPostProcessingColorCorrectionRequestsByRequestIdStatusResponses[keyof GetFalAiPostProcessingColorCorrectionRequestsByRequestIdStatusResponses] + +export type PutFalAiPostProcessingColorCorrectionRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/color-correction/requests/{request_id}/cancel' + } + +export type PutFalAiPostProcessingColorCorrectionRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiPostProcessingColorCorrectionRequestsByRequestIdCancelResponse = + PutFalAiPostProcessingColorCorrectionRequestsByRequestIdCancelResponses[keyof PutFalAiPostProcessingColorCorrectionRequestsByRequestIdCancelResponses] + +export type PostFalAiPostProcessingColorCorrectionData = { + body: SchemaPostProcessingColorCorrectionInput + path?: never + query?: never + url: '/fal-ai/post-processing/color-correction' +} + +export type PostFalAiPostProcessingColorCorrectionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPostProcessingColorCorrectionResponse = + PostFalAiPostProcessingColorCorrectionResponses[keyof PostFalAiPostProcessingColorCorrectionResponses] + +export type GetFalAiPostProcessingColorCorrectionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/color-correction/requests/{request_id}' +} + +export type GetFalAiPostProcessingColorCorrectionRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaPostProcessingColorCorrectionOutput + } + +export type GetFalAiPostProcessingColorCorrectionRequestsByRequestIdResponse = + GetFalAiPostProcessingColorCorrectionRequestsByRequestIdResponses[keyof GetFalAiPostProcessingColorCorrectionRequestsByRequestIdResponses] + +export type GetFalAiPostProcessingChromaticAberrationRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/post-processing/chromatic-aberration/requests/{request_id}/status' + } + +export type GetFalAiPostProcessingChromaticAberrationRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiPostProcessingChromaticAberrationRequestsByRequestIdStatusResponse = + GetFalAiPostProcessingChromaticAberrationRequestsByRequestIdStatusResponses[keyof GetFalAiPostProcessingChromaticAberrationRequestsByRequestIdStatusResponses] + +export type PutFalAiPostProcessingChromaticAberrationRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/chromatic-aberration/requests/{request_id}/cancel' + } + +export type PutFalAiPostProcessingChromaticAberrationRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiPostProcessingChromaticAberrationRequestsByRequestIdCancelResponse = + PutFalAiPostProcessingChromaticAberrationRequestsByRequestIdCancelResponses[keyof PutFalAiPostProcessingChromaticAberrationRequestsByRequestIdCancelResponses] + +export type PostFalAiPostProcessingChromaticAberrationData = { + body: SchemaPostProcessingChromaticAberrationInput + path?: never + query?: never + url: '/fal-ai/post-processing/chromatic-aberration' +} + +export type PostFalAiPostProcessingChromaticAberrationResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPostProcessingChromaticAberrationResponse = + PostFalAiPostProcessingChromaticAberrationResponses[keyof PostFalAiPostProcessingChromaticAberrationResponses] + +export type GetFalAiPostProcessingChromaticAberrationRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/chromatic-aberration/requests/{request_id}' +} + +export type GetFalAiPostProcessingChromaticAberrationRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaPostProcessingChromaticAberrationOutput + } + +export type GetFalAiPostProcessingChromaticAberrationRequestsByRequestIdResponse = + GetFalAiPostProcessingChromaticAberrationRequestsByRequestIdResponses[keyof GetFalAiPostProcessingChromaticAberrationRequestsByRequestIdResponses] + +export type GetFalAiPostProcessingBlurRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/post-processing/blur/requests/{request_id}/status' +} + +export type GetFalAiPostProcessingBlurRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPostProcessingBlurRequestsByRequestIdStatusResponse = + GetFalAiPostProcessingBlurRequestsByRequestIdStatusResponses[keyof GetFalAiPostProcessingBlurRequestsByRequestIdStatusResponses] + +export type PutFalAiPostProcessingBlurRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/blur/requests/{request_id}/cancel' +} + +export type PutFalAiPostProcessingBlurRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPostProcessingBlurRequestsByRequestIdCancelResponse = + PutFalAiPostProcessingBlurRequestsByRequestIdCancelResponses[keyof PutFalAiPostProcessingBlurRequestsByRequestIdCancelResponses] + +export type PostFalAiPostProcessingBlurData = { + body: SchemaPostProcessingBlurInput + path?: never + query?: never + url: '/fal-ai/post-processing/blur' +} + +export type PostFalAiPostProcessingBlurResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPostProcessingBlurResponse = + PostFalAiPostProcessingBlurResponses[keyof PostFalAiPostProcessingBlurResponses] + +export type GetFalAiPostProcessingBlurRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/blur/requests/{request_id}' +} + +export type GetFalAiPostProcessingBlurRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPostProcessingBlurOutput +} + +export type GetFalAiPostProcessingBlurRequestsByRequestIdResponse = + GetFalAiPostProcessingBlurRequestsByRequestIdResponses[keyof GetFalAiPostProcessingBlurRequestsByRequestIdResponses] + +export type GetFalAiImageEditingYoutubeThumbnailsRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-editing/youtube-thumbnails/requests/{request_id}/status' + } + +export type GetFalAiImageEditingYoutubeThumbnailsRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageEditingYoutubeThumbnailsRequestsByRequestIdStatusResponse = + GetFalAiImageEditingYoutubeThumbnailsRequestsByRequestIdStatusResponses[keyof GetFalAiImageEditingYoutubeThumbnailsRequestsByRequestIdStatusResponses] + +export type PutFalAiImageEditingYoutubeThumbnailsRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/youtube-thumbnails/requests/{request_id}/cancel' + } + +export type PutFalAiImageEditingYoutubeThumbnailsRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageEditingYoutubeThumbnailsRequestsByRequestIdCancelResponse = + PutFalAiImageEditingYoutubeThumbnailsRequestsByRequestIdCancelResponses[keyof PutFalAiImageEditingYoutubeThumbnailsRequestsByRequestIdCancelResponses] + +export type PostFalAiImageEditingYoutubeThumbnailsData = { + body: SchemaImageEditingYoutubeThumbnailsInput + path?: never + query?: never + url: '/fal-ai/image-editing/youtube-thumbnails' +} + +export type PostFalAiImageEditingYoutubeThumbnailsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageEditingYoutubeThumbnailsResponse = + PostFalAiImageEditingYoutubeThumbnailsResponses[keyof PostFalAiImageEditingYoutubeThumbnailsResponses] + +export type GetFalAiImageEditingYoutubeThumbnailsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/youtube-thumbnails/requests/{request_id}' +} + +export type GetFalAiImageEditingYoutubeThumbnailsRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaImageEditingYoutubeThumbnailsOutput + } + +export type GetFalAiImageEditingYoutubeThumbnailsRequestsByRequestIdResponse = + GetFalAiImageEditingYoutubeThumbnailsRequestsByRequestIdResponses[keyof GetFalAiImageEditingYoutubeThumbnailsRequestsByRequestIdResponses] + +export type GetFalAiTopazUpscaleImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/topaz/upscale/image/requests/{request_id}/status' +} + +export type GetFalAiTopazUpscaleImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiTopazUpscaleImageRequestsByRequestIdStatusResponse = + GetFalAiTopazUpscaleImageRequestsByRequestIdStatusResponses[keyof GetFalAiTopazUpscaleImageRequestsByRequestIdStatusResponses] + +export type PutFalAiTopazUpscaleImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/topaz/upscale/image/requests/{request_id}/cancel' +} + +export type PutFalAiTopazUpscaleImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiTopazUpscaleImageRequestsByRequestIdCancelResponse = + PutFalAiTopazUpscaleImageRequestsByRequestIdCancelResponses[keyof PutFalAiTopazUpscaleImageRequestsByRequestIdCancelResponses] + +export type PostFalAiTopazUpscaleImageData = { + body: SchemaTopazUpscaleImageInput + path?: never + query?: never + url: '/fal-ai/topaz/upscale/image' +} + +export type PostFalAiTopazUpscaleImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiTopazUpscaleImageResponse = + PostFalAiTopazUpscaleImageResponses[keyof PostFalAiTopazUpscaleImageResponses] + +export type GetFalAiTopazUpscaleImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/topaz/upscale/image/requests/{request_id}' +} + +export type GetFalAiTopazUpscaleImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaTopazUpscaleImageOutput +} + +export type GetFalAiTopazUpscaleImageRequestsByRequestIdResponse = + GetFalAiTopazUpscaleImageRequestsByRequestIdResponses[keyof GetFalAiTopazUpscaleImageRequestsByRequestIdResponses] + +export type GetFalAiImageEditingBroccoliHaircutRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-editing/broccoli-haircut/requests/{request_id}/status' +} + +export type GetFalAiImageEditingBroccoliHaircutRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageEditingBroccoliHaircutRequestsByRequestIdStatusResponse = + GetFalAiImageEditingBroccoliHaircutRequestsByRequestIdStatusResponses[keyof GetFalAiImageEditingBroccoliHaircutRequestsByRequestIdStatusResponses] + +export type PutFalAiImageEditingBroccoliHaircutRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/broccoli-haircut/requests/{request_id}/cancel' +} + +export type PutFalAiImageEditingBroccoliHaircutRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageEditingBroccoliHaircutRequestsByRequestIdCancelResponse = + PutFalAiImageEditingBroccoliHaircutRequestsByRequestIdCancelResponses[keyof PutFalAiImageEditingBroccoliHaircutRequestsByRequestIdCancelResponses] + +export type PostFalAiImageEditingBroccoliHaircutData = { + body: SchemaImageEditingBroccoliHaircutInput + path?: never + query?: never + url: '/fal-ai/image-editing/broccoli-haircut' +} + +export type PostFalAiImageEditingBroccoliHaircutResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageEditingBroccoliHaircutResponse = + PostFalAiImageEditingBroccoliHaircutResponses[keyof PostFalAiImageEditingBroccoliHaircutResponses] + +export type GetFalAiImageEditingBroccoliHaircutRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/broccoli-haircut/requests/{request_id}' +} + +export type GetFalAiImageEditingBroccoliHaircutRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageEditingBroccoliHaircutOutput +} + +export type GetFalAiImageEditingBroccoliHaircutRequestsByRequestIdResponse = + GetFalAiImageEditingBroccoliHaircutRequestsByRequestIdResponses[keyof GetFalAiImageEditingBroccoliHaircutRequestsByRequestIdResponses] + +export type GetFalAiImageEditingWojakStyleRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-editing/wojak-style/requests/{request_id}/status' +} + +export type GetFalAiImageEditingWojakStyleRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImageEditingWojakStyleRequestsByRequestIdStatusResponse = + GetFalAiImageEditingWojakStyleRequestsByRequestIdStatusResponses[keyof GetFalAiImageEditingWojakStyleRequestsByRequestIdStatusResponses] + +export type PutFalAiImageEditingWojakStyleRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/wojak-style/requests/{request_id}/cancel' +} + +export type PutFalAiImageEditingWojakStyleRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImageEditingWojakStyleRequestsByRequestIdCancelResponse = + PutFalAiImageEditingWojakStyleRequestsByRequestIdCancelResponses[keyof PutFalAiImageEditingWojakStyleRequestsByRequestIdCancelResponses] + +export type PostFalAiImageEditingWojakStyleData = { + body: SchemaImageEditingWojakStyleInput + path?: never + query?: never + url: '/fal-ai/image-editing/wojak-style' +} + +export type PostFalAiImageEditingWojakStyleResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageEditingWojakStyleResponse = + PostFalAiImageEditingWojakStyleResponses[keyof PostFalAiImageEditingWojakStyleResponses] + +export type GetFalAiImageEditingWojakStyleRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/wojak-style/requests/{request_id}' +} + +export type GetFalAiImageEditingWojakStyleRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageEditingWojakStyleOutput +} + +export type GetFalAiImageEditingWojakStyleRequestsByRequestIdResponse = + GetFalAiImageEditingWojakStyleRequestsByRequestIdResponses[keyof GetFalAiImageEditingWojakStyleRequestsByRequestIdResponses] + +export type GetFalAiImageEditingPlushieStyleRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-editing/plushie-style/requests/{request_id}/status' +} + +export type GetFalAiImageEditingPlushieStyleRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageEditingPlushieStyleRequestsByRequestIdStatusResponse = + GetFalAiImageEditingPlushieStyleRequestsByRequestIdStatusResponses[keyof GetFalAiImageEditingPlushieStyleRequestsByRequestIdStatusResponses] + +export type PutFalAiImageEditingPlushieStyleRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/plushie-style/requests/{request_id}/cancel' +} + +export type PutFalAiImageEditingPlushieStyleRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageEditingPlushieStyleRequestsByRequestIdCancelResponse = + PutFalAiImageEditingPlushieStyleRequestsByRequestIdCancelResponses[keyof PutFalAiImageEditingPlushieStyleRequestsByRequestIdCancelResponses] + +export type PostFalAiImageEditingPlushieStyleData = { + body: SchemaImageEditingPlushieStyleInput + path?: never + query?: never + url: '/fal-ai/image-editing/plushie-style' +} + +export type PostFalAiImageEditingPlushieStyleResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageEditingPlushieStyleResponse = + PostFalAiImageEditingPlushieStyleResponses[keyof PostFalAiImageEditingPlushieStyleResponses] + +export type GetFalAiImageEditingPlushieStyleRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/plushie-style/requests/{request_id}' +} + +export type GetFalAiImageEditingPlushieStyleRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageEditingPlushieStyleOutput +} + +export type GetFalAiImageEditingPlushieStyleRequestsByRequestIdResponse = + GetFalAiImageEditingPlushieStyleRequestsByRequestIdResponses[keyof GetFalAiImageEditingPlushieStyleRequestsByRequestIdResponses] + +export type GetFalAiFluxKontextLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-kontext-lora/requests/{request_id}/status' +} + +export type GetFalAiFluxKontextLoraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxKontextLoraRequestsByRequestIdStatusResponse = + GetFalAiFluxKontextLoraRequestsByRequestIdStatusResponses[keyof GetFalAiFluxKontextLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxKontextLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-kontext-lora/requests/{request_id}/cancel' +} + +export type PutFalAiFluxKontextLoraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxKontextLoraRequestsByRequestIdCancelResponse = + PutFalAiFluxKontextLoraRequestsByRequestIdCancelResponses[keyof PutFalAiFluxKontextLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxKontextLoraData = { + body: SchemaFluxKontextLoraInput + path?: never + query?: never + url: '/fal-ai/flux-kontext-lora' +} + +export type PostFalAiFluxKontextLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxKontextLoraResponse = + PostFalAiFluxKontextLoraResponses[keyof PostFalAiFluxKontextLoraResponses] + +export type GetFalAiFluxKontextLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-kontext-lora/requests/{request_id}' +} + +export type GetFalAiFluxKontextLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxKontextLoraOutput +} + +export type GetFalAiFluxKontextLoraRequestsByRequestIdResponse = + GetFalAiFluxKontextLoraRequestsByRequestIdResponses[keyof GetFalAiFluxKontextLoraRequestsByRequestIdResponses] + +export type GetFalAiFashnTryonV16RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fashn/tryon/v1.6/requests/{request_id}/status' +} + +export type GetFalAiFashnTryonV16RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFashnTryonV16RequestsByRequestIdStatusResponse = + GetFalAiFashnTryonV16RequestsByRequestIdStatusResponses[keyof GetFalAiFashnTryonV16RequestsByRequestIdStatusResponses] + +export type PutFalAiFashnTryonV16RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fashn/tryon/v1.6/requests/{request_id}/cancel' +} + +export type PutFalAiFashnTryonV16RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFashnTryonV16RequestsByRequestIdCancelResponse = + PutFalAiFashnTryonV16RequestsByRequestIdCancelResponses[keyof PutFalAiFashnTryonV16RequestsByRequestIdCancelResponses] + +export type PostFalAiFashnTryonV16Data = { + body: SchemaFashnTryonV16Input + path?: never + query?: never + url: '/fal-ai/fashn/tryon/v1.6' +} + +export type PostFalAiFashnTryonV16Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFashnTryonV16Response = + PostFalAiFashnTryonV16Responses[keyof PostFalAiFashnTryonV16Responses] + +export type GetFalAiFashnTryonV16RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fashn/tryon/v1.6/requests/{request_id}' +} + +export type GetFalAiFashnTryonV16RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFashnTryonV16Output +} + +export type GetFalAiFashnTryonV16RequestsByRequestIdResponse = + GetFalAiFashnTryonV16RequestsByRequestIdResponses[keyof GetFalAiFashnTryonV16RequestsByRequestIdResponses] + +export type GetFalAiChainOfZoomRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/chain-of-zoom/requests/{request_id}/status' +} + +export type GetFalAiChainOfZoomRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiChainOfZoomRequestsByRequestIdStatusResponse = + GetFalAiChainOfZoomRequestsByRequestIdStatusResponses[keyof GetFalAiChainOfZoomRequestsByRequestIdStatusResponses] + +export type PutFalAiChainOfZoomRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/chain-of-zoom/requests/{request_id}/cancel' +} + +export type PutFalAiChainOfZoomRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiChainOfZoomRequestsByRequestIdCancelResponse = + PutFalAiChainOfZoomRequestsByRequestIdCancelResponses[keyof PutFalAiChainOfZoomRequestsByRequestIdCancelResponses] + +export type PostFalAiChainOfZoomData = { + body: SchemaChainOfZoomInput + path?: never + query?: never + url: '/fal-ai/chain-of-zoom' +} + +export type PostFalAiChainOfZoomResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiChainOfZoomResponse = + PostFalAiChainOfZoomResponses[keyof PostFalAiChainOfZoomResponses] + +export type GetFalAiChainOfZoomRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/chain-of-zoom/requests/{request_id}' +} + +export type GetFalAiChainOfZoomRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaChainOfZoomOutput +} + +export type GetFalAiChainOfZoomRequestsByRequestIdResponse = + GetFalAiChainOfZoomRequestsByRequestIdResponses[keyof GetFalAiChainOfZoomRequestsByRequestIdResponses] + +export type GetFalAiPasdRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pasd/requests/{request_id}/status' +} + +export type GetFalAiPasdRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPasdRequestsByRequestIdStatusResponse = + GetFalAiPasdRequestsByRequestIdStatusResponses[keyof GetFalAiPasdRequestsByRequestIdStatusResponses] + +export type PutFalAiPasdRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pasd/requests/{request_id}/cancel' +} + +export type PutFalAiPasdRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPasdRequestsByRequestIdCancelResponse = + PutFalAiPasdRequestsByRequestIdCancelResponses[keyof PutFalAiPasdRequestsByRequestIdCancelResponses] + +export type PostFalAiPasdData = { + body: SchemaPasdInput + path?: never + query?: never + url: '/fal-ai/pasd' +} + +export type PostFalAiPasdResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPasdResponse = + PostFalAiPasdResponses[keyof PostFalAiPasdResponses] + +export type GetFalAiPasdRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pasd/requests/{request_id}' +} + +export type GetFalAiPasdRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPasdOutput +} + +export type GetFalAiPasdRequestsByRequestIdResponse = + GetFalAiPasdRequestsByRequestIdResponses[keyof GetFalAiPasdRequestsByRequestIdResponses] + +export type GetFalAiObjectRemovalBboxRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/object-removal/bbox/requests/{request_id}/status' +} + +export type GetFalAiObjectRemovalBboxRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiObjectRemovalBboxRequestsByRequestIdStatusResponse = + GetFalAiObjectRemovalBboxRequestsByRequestIdStatusResponses[keyof GetFalAiObjectRemovalBboxRequestsByRequestIdStatusResponses] + +export type PutFalAiObjectRemovalBboxRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/object-removal/bbox/requests/{request_id}/cancel' +} + +export type PutFalAiObjectRemovalBboxRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiObjectRemovalBboxRequestsByRequestIdCancelResponse = + PutFalAiObjectRemovalBboxRequestsByRequestIdCancelResponses[keyof PutFalAiObjectRemovalBboxRequestsByRequestIdCancelResponses] + +export type PostFalAiObjectRemovalBboxData = { + body: SchemaObjectRemovalBboxInput + path?: never + query?: never + url: '/fal-ai/object-removal/bbox' +} + +export type PostFalAiObjectRemovalBboxResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiObjectRemovalBboxResponse = + PostFalAiObjectRemovalBboxResponses[keyof PostFalAiObjectRemovalBboxResponses] + +export type GetFalAiObjectRemovalBboxRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/object-removal/bbox/requests/{request_id}' +} + +export type GetFalAiObjectRemovalBboxRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaObjectRemovalBboxOutput +} + +export type GetFalAiObjectRemovalBboxRequestsByRequestIdResponse = + GetFalAiObjectRemovalBboxRequestsByRequestIdResponses[keyof GetFalAiObjectRemovalBboxRequestsByRequestIdResponses] + +export type GetFalAiObjectRemovalMaskRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/object-removal/mask/requests/{request_id}/status' +} + +export type GetFalAiObjectRemovalMaskRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiObjectRemovalMaskRequestsByRequestIdStatusResponse = + GetFalAiObjectRemovalMaskRequestsByRequestIdStatusResponses[keyof GetFalAiObjectRemovalMaskRequestsByRequestIdStatusResponses] + +export type PutFalAiObjectRemovalMaskRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/object-removal/mask/requests/{request_id}/cancel' +} + +export type PutFalAiObjectRemovalMaskRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiObjectRemovalMaskRequestsByRequestIdCancelResponse = + PutFalAiObjectRemovalMaskRequestsByRequestIdCancelResponses[keyof PutFalAiObjectRemovalMaskRequestsByRequestIdCancelResponses] + +export type PostFalAiObjectRemovalMaskData = { + body: SchemaObjectRemovalMaskInput + path?: never + query?: never + url: '/fal-ai/object-removal/mask' +} + +export type PostFalAiObjectRemovalMaskResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiObjectRemovalMaskResponse = + PostFalAiObjectRemovalMaskResponses[keyof PostFalAiObjectRemovalMaskResponses] + +export type GetFalAiObjectRemovalMaskRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/object-removal/mask/requests/{request_id}' +} + +export type GetFalAiObjectRemovalMaskRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaObjectRemovalMaskOutput +} + +export type GetFalAiObjectRemovalMaskRequestsByRequestIdResponse = + GetFalAiObjectRemovalMaskRequestsByRequestIdResponses[keyof GetFalAiObjectRemovalMaskRequestsByRequestIdResponses] + +export type GetFalAiObjectRemovalRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/object-removal/requests/{request_id}/status' +} + +export type GetFalAiObjectRemovalRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiObjectRemovalRequestsByRequestIdStatusResponse = + GetFalAiObjectRemovalRequestsByRequestIdStatusResponses[keyof GetFalAiObjectRemovalRequestsByRequestIdStatusResponses] + +export type PutFalAiObjectRemovalRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/object-removal/requests/{request_id}/cancel' +} + +export type PutFalAiObjectRemovalRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiObjectRemovalRequestsByRequestIdCancelResponse = + PutFalAiObjectRemovalRequestsByRequestIdCancelResponses[keyof PutFalAiObjectRemovalRequestsByRequestIdCancelResponses] + +export type PostFalAiObjectRemovalData = { + body: SchemaObjectRemovalInput + path?: never + query?: never + url: '/fal-ai/object-removal' +} + +export type PostFalAiObjectRemovalResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiObjectRemovalResponse = + PostFalAiObjectRemovalResponses[keyof PostFalAiObjectRemovalResponses] + +export type GetFalAiObjectRemovalRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/object-removal/requests/{request_id}' +} + +export type GetFalAiObjectRemovalRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaObjectRemovalOutput +} + +export type GetFalAiObjectRemovalRequestsByRequestIdResponse = + GetFalAiObjectRemovalRequestsByRequestIdResponses[keyof GetFalAiObjectRemovalRequestsByRequestIdResponses] + +export type GetFalAiRecraftVectorizeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/recraft/vectorize/requests/{request_id}/status' +} + +export type GetFalAiRecraftVectorizeRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiRecraftVectorizeRequestsByRequestIdStatusResponse = + GetFalAiRecraftVectorizeRequestsByRequestIdStatusResponses[keyof GetFalAiRecraftVectorizeRequestsByRequestIdStatusResponses] + +export type PutFalAiRecraftVectorizeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/recraft/vectorize/requests/{request_id}/cancel' +} + +export type PutFalAiRecraftVectorizeRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiRecraftVectorizeRequestsByRequestIdCancelResponse = + PutFalAiRecraftVectorizeRequestsByRequestIdCancelResponses[keyof PutFalAiRecraftVectorizeRequestsByRequestIdCancelResponses] + +export type PostFalAiRecraftVectorizeData = { + body: SchemaRecraftVectorizeInput + path?: never + query?: never + url: '/fal-ai/recraft/vectorize' +} + +export type PostFalAiRecraftVectorizeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiRecraftVectorizeResponse = + PostFalAiRecraftVectorizeResponses[keyof PostFalAiRecraftVectorizeResponses] + +export type GetFalAiRecraftVectorizeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/recraft/vectorize/requests/{request_id}' +} + +export type GetFalAiRecraftVectorizeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaRecraftVectorizeOutput +} + +export type GetFalAiRecraftVectorizeRequestsByRequestIdResponse = + GetFalAiRecraftVectorizeRequestsByRequestIdResponses[keyof GetFalAiRecraftVectorizeRequestsByRequestIdResponses] + +export type GetFalAiFfmpegApiExtractFrameRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ffmpeg-api/extract-frame/requests/{request_id}/status' +} + +export type GetFalAiFfmpegApiExtractFrameRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFfmpegApiExtractFrameRequestsByRequestIdStatusResponse = + GetFalAiFfmpegApiExtractFrameRequestsByRequestIdStatusResponses[keyof GetFalAiFfmpegApiExtractFrameRequestsByRequestIdStatusResponses] + +export type PutFalAiFfmpegApiExtractFrameRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ffmpeg-api/extract-frame/requests/{request_id}/cancel' +} + +export type PutFalAiFfmpegApiExtractFrameRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFfmpegApiExtractFrameRequestsByRequestIdCancelResponse = + PutFalAiFfmpegApiExtractFrameRequestsByRequestIdCancelResponses[keyof PutFalAiFfmpegApiExtractFrameRequestsByRequestIdCancelResponses] + +export type PostFalAiFfmpegApiExtractFrameData = { + body: SchemaFfmpegApiExtractFrameInput + path?: never + query?: never + url: '/fal-ai/ffmpeg-api/extract-frame' +} + +export type PostFalAiFfmpegApiExtractFrameResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFfmpegApiExtractFrameResponse = + PostFalAiFfmpegApiExtractFrameResponses[keyof PostFalAiFfmpegApiExtractFrameResponses] + +export type GetFalAiFfmpegApiExtractFrameRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ffmpeg-api/extract-frame/requests/{request_id}' +} + +export type GetFalAiFfmpegApiExtractFrameRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFfmpegApiExtractFrameOutput +} + +export type GetFalAiFfmpegApiExtractFrameRequestsByRequestIdResponse = + GetFalAiFfmpegApiExtractFrameRequestsByRequestIdResponses[keyof GetFalAiFfmpegApiExtractFrameRequestsByRequestIdResponses] + +export type GetFalAiLumaPhotonFlashModifyRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/luma-photon/flash/modify/requests/{request_id}/status' +} + +export type GetFalAiLumaPhotonFlashModifyRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLumaPhotonFlashModifyRequestsByRequestIdStatusResponse = + GetFalAiLumaPhotonFlashModifyRequestsByRequestIdStatusResponses[keyof GetFalAiLumaPhotonFlashModifyRequestsByRequestIdStatusResponses] + +export type PutFalAiLumaPhotonFlashModifyRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-photon/flash/modify/requests/{request_id}/cancel' +} + +export type PutFalAiLumaPhotonFlashModifyRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLumaPhotonFlashModifyRequestsByRequestIdCancelResponse = + PutFalAiLumaPhotonFlashModifyRequestsByRequestIdCancelResponses[keyof PutFalAiLumaPhotonFlashModifyRequestsByRequestIdCancelResponses] + +export type PostFalAiLumaPhotonFlashModifyData = { + body: SchemaLumaPhotonFlashModifyInput + path?: never + query?: never + url: '/fal-ai/luma-photon/flash/modify' +} + +export type PostFalAiLumaPhotonFlashModifyResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLumaPhotonFlashModifyResponse = + PostFalAiLumaPhotonFlashModifyResponses[keyof PostFalAiLumaPhotonFlashModifyResponses] + +export type GetFalAiLumaPhotonFlashModifyRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-photon/flash/modify/requests/{request_id}' +} + +export type GetFalAiLumaPhotonFlashModifyRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLumaPhotonFlashModifyOutput +} + +export type GetFalAiLumaPhotonFlashModifyRequestsByRequestIdResponse = + GetFalAiLumaPhotonFlashModifyRequestsByRequestIdResponses[keyof GetFalAiLumaPhotonFlashModifyRequestsByRequestIdResponses] + +export type GetFalAiLumaPhotonModifyRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/luma-photon/modify/requests/{request_id}/status' +} + +export type GetFalAiLumaPhotonModifyRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLumaPhotonModifyRequestsByRequestIdStatusResponse = + GetFalAiLumaPhotonModifyRequestsByRequestIdStatusResponses[keyof GetFalAiLumaPhotonModifyRequestsByRequestIdStatusResponses] + +export type PutFalAiLumaPhotonModifyRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-photon/modify/requests/{request_id}/cancel' +} + +export type PutFalAiLumaPhotonModifyRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLumaPhotonModifyRequestsByRequestIdCancelResponse = + PutFalAiLumaPhotonModifyRequestsByRequestIdCancelResponses[keyof PutFalAiLumaPhotonModifyRequestsByRequestIdCancelResponses] + +export type PostFalAiLumaPhotonModifyData = { + body: SchemaLumaPhotonModifyInput + path?: never + query?: never + url: '/fal-ai/luma-photon/modify' +} + +export type PostFalAiLumaPhotonModifyResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLumaPhotonModifyResponse = + PostFalAiLumaPhotonModifyResponses[keyof PostFalAiLumaPhotonModifyResponses] + +export type GetFalAiLumaPhotonModifyRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-photon/modify/requests/{request_id}' +} + +export type GetFalAiLumaPhotonModifyRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLumaPhotonModifyOutput +} + +export type GetFalAiLumaPhotonModifyRequestsByRequestIdResponse = + GetFalAiLumaPhotonModifyRequestsByRequestIdResponses[keyof GetFalAiLumaPhotonModifyRequestsByRequestIdResponses] + +export type GetFalAiImageEditingReframeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-editing/reframe/requests/{request_id}/status' +} + +export type GetFalAiImageEditingReframeRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImageEditingReframeRequestsByRequestIdStatusResponse = + GetFalAiImageEditingReframeRequestsByRequestIdStatusResponses[keyof GetFalAiImageEditingReframeRequestsByRequestIdStatusResponses] + +export type PutFalAiImageEditingReframeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/reframe/requests/{request_id}/cancel' +} + +export type PutFalAiImageEditingReframeRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImageEditingReframeRequestsByRequestIdCancelResponse = + PutFalAiImageEditingReframeRequestsByRequestIdCancelResponses[keyof PutFalAiImageEditingReframeRequestsByRequestIdCancelResponses] + +export type PostFalAiImageEditingReframeData = { + body: SchemaImageEditingReframeInput + path?: never + query?: never + url: '/fal-ai/image-editing/reframe' +} + +export type PostFalAiImageEditingReframeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageEditingReframeResponse = + PostFalAiImageEditingReframeResponses[keyof PostFalAiImageEditingReframeResponses] + +export type GetFalAiImageEditingReframeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/reframe/requests/{request_id}' +} + +export type GetFalAiImageEditingReframeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageEditingReframeOutput +} + +export type GetFalAiImageEditingReframeRequestsByRequestIdResponse = + GetFalAiImageEditingReframeRequestsByRequestIdResponses[keyof GetFalAiImageEditingReframeRequestsByRequestIdResponses] + +export type GetFalAiImageEditingBabyVersionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-editing/baby-version/requests/{request_id}/status' +} + +export type GetFalAiImageEditingBabyVersionRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageEditingBabyVersionRequestsByRequestIdStatusResponse = + GetFalAiImageEditingBabyVersionRequestsByRequestIdStatusResponses[keyof GetFalAiImageEditingBabyVersionRequestsByRequestIdStatusResponses] + +export type PutFalAiImageEditingBabyVersionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/baby-version/requests/{request_id}/cancel' +} + +export type PutFalAiImageEditingBabyVersionRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageEditingBabyVersionRequestsByRequestIdCancelResponse = + PutFalAiImageEditingBabyVersionRequestsByRequestIdCancelResponses[keyof PutFalAiImageEditingBabyVersionRequestsByRequestIdCancelResponses] + +export type PostFalAiImageEditingBabyVersionData = { + body: SchemaImageEditingBabyVersionInput + path?: never + query?: never + url: '/fal-ai/image-editing/baby-version' +} + +export type PostFalAiImageEditingBabyVersionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageEditingBabyVersionResponse = + PostFalAiImageEditingBabyVersionResponses[keyof PostFalAiImageEditingBabyVersionResponses] + +export type GetFalAiImageEditingBabyVersionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/baby-version/requests/{request_id}' +} + +export type GetFalAiImageEditingBabyVersionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageEditingBabyVersionOutput +} + +export type GetFalAiImageEditingBabyVersionRequestsByRequestIdResponse = + GetFalAiImageEditingBabyVersionRequestsByRequestIdResponses[keyof GetFalAiImageEditingBabyVersionRequestsByRequestIdResponses] + +export type GetFalAiLumaPhotonFlashReframeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/luma-photon/flash/reframe/requests/{request_id}/status' +} + +export type GetFalAiLumaPhotonFlashReframeRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLumaPhotonFlashReframeRequestsByRequestIdStatusResponse = + GetFalAiLumaPhotonFlashReframeRequestsByRequestIdStatusResponses[keyof GetFalAiLumaPhotonFlashReframeRequestsByRequestIdStatusResponses] + +export type PutFalAiLumaPhotonFlashReframeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-photon/flash/reframe/requests/{request_id}/cancel' +} + +export type PutFalAiLumaPhotonFlashReframeRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLumaPhotonFlashReframeRequestsByRequestIdCancelResponse = + PutFalAiLumaPhotonFlashReframeRequestsByRequestIdCancelResponses[keyof PutFalAiLumaPhotonFlashReframeRequestsByRequestIdCancelResponses] + +export type PostFalAiLumaPhotonFlashReframeData = { + body: SchemaLumaPhotonFlashReframeInput + path?: never + query?: never + url: '/fal-ai/luma-photon/flash/reframe' +} + +export type PostFalAiLumaPhotonFlashReframeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLumaPhotonFlashReframeResponse = + PostFalAiLumaPhotonFlashReframeResponses[keyof PostFalAiLumaPhotonFlashReframeResponses] + +export type GetFalAiLumaPhotonFlashReframeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-photon/flash/reframe/requests/{request_id}' +} + +export type GetFalAiLumaPhotonFlashReframeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLumaPhotonFlashReframeOutput +} + +export type GetFalAiLumaPhotonFlashReframeRequestsByRequestIdResponse = + GetFalAiLumaPhotonFlashReframeRequestsByRequestIdResponses[keyof GetFalAiLumaPhotonFlashReframeRequestsByRequestIdResponses] + +export type GetFalAiLumaPhotonReframeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/luma-photon/reframe/requests/{request_id}/status' +} + +export type GetFalAiLumaPhotonReframeRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLumaPhotonReframeRequestsByRequestIdStatusResponse = + GetFalAiLumaPhotonReframeRequestsByRequestIdStatusResponses[keyof GetFalAiLumaPhotonReframeRequestsByRequestIdStatusResponses] + +export type PutFalAiLumaPhotonReframeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-photon/reframe/requests/{request_id}/cancel' +} + +export type PutFalAiLumaPhotonReframeRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLumaPhotonReframeRequestsByRequestIdCancelResponse = + PutFalAiLumaPhotonReframeRequestsByRequestIdCancelResponses[keyof PutFalAiLumaPhotonReframeRequestsByRequestIdCancelResponses] + +export type PostFalAiLumaPhotonReframeData = { + body: SchemaLumaPhotonReframeInput + path?: never + query?: never + url: '/fal-ai/luma-photon/reframe' +} + +export type PostFalAiLumaPhotonReframeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLumaPhotonReframeResponse = + PostFalAiLumaPhotonReframeResponses[keyof PostFalAiLumaPhotonReframeResponses] + +export type GetFalAiLumaPhotonReframeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-photon/reframe/requests/{request_id}' +} + +export type GetFalAiLumaPhotonReframeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLumaPhotonReframeOutput +} + +export type GetFalAiLumaPhotonReframeRequestsByRequestIdResponse = + GetFalAiLumaPhotonReframeRequestsByRequestIdResponses[keyof GetFalAiLumaPhotonReframeRequestsByRequestIdResponses] + +export type GetFalAiFlux1SchnellReduxRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-1/schnell/redux/requests/{request_id}/status' +} + +export type GetFalAiFlux1SchnellReduxRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux1SchnellReduxRequestsByRequestIdStatusResponse = + GetFalAiFlux1SchnellReduxRequestsByRequestIdStatusResponses[keyof GetFalAiFlux1SchnellReduxRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux1SchnellReduxRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-1/schnell/redux/requests/{request_id}/cancel' +} + +export type PutFalAiFlux1SchnellReduxRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux1SchnellReduxRequestsByRequestIdCancelResponse = + PutFalAiFlux1SchnellReduxRequestsByRequestIdCancelResponses[keyof PutFalAiFlux1SchnellReduxRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux1SchnellReduxData = { + body: SchemaFlux1SchnellReduxInput + path?: never + query?: never + url: '/fal-ai/flux-1/schnell/redux' +} + +export type PostFalAiFlux1SchnellReduxResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux1SchnellReduxResponse = + PostFalAiFlux1SchnellReduxResponses[keyof PostFalAiFlux1SchnellReduxResponses] + +export type GetFalAiFlux1SchnellReduxRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-1/schnell/redux/requests/{request_id}' +} + +export type GetFalAiFlux1SchnellReduxRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux1SchnellReduxOutput +} + +export type GetFalAiFlux1SchnellReduxRequestsByRequestIdResponse = + GetFalAiFlux1SchnellReduxRequestsByRequestIdResponses[keyof GetFalAiFlux1SchnellReduxRequestsByRequestIdResponses] + +export type GetFalAiFlux1DevReduxRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-1/dev/redux/requests/{request_id}/status' +} + +export type GetFalAiFlux1DevReduxRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux1DevReduxRequestsByRequestIdStatusResponse = + GetFalAiFlux1DevReduxRequestsByRequestIdStatusResponses[keyof GetFalAiFlux1DevReduxRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux1DevReduxRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-1/dev/redux/requests/{request_id}/cancel' +} + +export type PutFalAiFlux1DevReduxRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux1DevReduxRequestsByRequestIdCancelResponse = + PutFalAiFlux1DevReduxRequestsByRequestIdCancelResponses[keyof PutFalAiFlux1DevReduxRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux1DevReduxData = { + body: SchemaFlux1DevReduxInput + path?: never + query?: never + url: '/fal-ai/flux-1/dev/redux' +} + +export type PostFalAiFlux1DevReduxResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux1DevReduxResponse = + PostFalAiFlux1DevReduxResponses[keyof PostFalAiFlux1DevReduxResponses] + +export type GetFalAiFlux1DevReduxRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-1/dev/redux/requests/{request_id}' +} + +export type GetFalAiFlux1DevReduxRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux1DevReduxOutput +} + +export type GetFalAiFlux1DevReduxRequestsByRequestIdResponse = + GetFalAiFlux1DevReduxRequestsByRequestIdResponses[keyof GetFalAiFlux1DevReduxRequestsByRequestIdResponses] + +export type GetFalAiFlux1DevImageToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-1/dev/image-to-image/requests/{request_id}/status' +} + +export type GetFalAiFlux1DevImageToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux1DevImageToImageRequestsByRequestIdStatusResponse = + GetFalAiFlux1DevImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiFlux1DevImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux1DevImageToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-1/dev/image-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiFlux1DevImageToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux1DevImageToImageRequestsByRequestIdCancelResponse = + PutFalAiFlux1DevImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiFlux1DevImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux1DevImageToImageData = { + body: SchemaFlux1DevImageToImageInput + path?: never + query?: never + url: '/fal-ai/flux-1/dev/image-to-image' +} + +export type PostFalAiFlux1DevImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux1DevImageToImageResponse = + PostFalAiFlux1DevImageToImageResponses[keyof PostFalAiFlux1DevImageToImageResponses] + +export type GetFalAiFlux1DevImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-1/dev/image-to-image/requests/{request_id}' +} + +export type GetFalAiFlux1DevImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux1DevImageToImageOutput +} + +export type GetFalAiFlux1DevImageToImageRequestsByRequestIdResponse = + GetFalAiFlux1DevImageToImageRequestsByRequestIdResponses[keyof GetFalAiFlux1DevImageToImageRequestsByRequestIdResponses] + +export type GetFalAiImageEditingTextRemovalRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-editing/text-removal/requests/{request_id}/status' +} + +export type GetFalAiImageEditingTextRemovalRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageEditingTextRemovalRequestsByRequestIdStatusResponse = + GetFalAiImageEditingTextRemovalRequestsByRequestIdStatusResponses[keyof GetFalAiImageEditingTextRemovalRequestsByRequestIdStatusResponses] + +export type PutFalAiImageEditingTextRemovalRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/text-removal/requests/{request_id}/cancel' +} + +export type PutFalAiImageEditingTextRemovalRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageEditingTextRemovalRequestsByRequestIdCancelResponse = + PutFalAiImageEditingTextRemovalRequestsByRequestIdCancelResponses[keyof PutFalAiImageEditingTextRemovalRequestsByRequestIdCancelResponses] + +export type PostFalAiImageEditingTextRemovalData = { + body: SchemaImageEditingTextRemovalInput + path?: never + query?: never + url: '/fal-ai/image-editing/text-removal' +} + +export type PostFalAiImageEditingTextRemovalResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageEditingTextRemovalResponse = + PostFalAiImageEditingTextRemovalResponses[keyof PostFalAiImageEditingTextRemovalResponses] + +export type GetFalAiImageEditingTextRemovalRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/text-removal/requests/{request_id}' +} + +export type GetFalAiImageEditingTextRemovalRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageEditingTextRemovalOutput +} + +export type GetFalAiImageEditingTextRemovalRequestsByRequestIdResponse = + GetFalAiImageEditingTextRemovalRequestsByRequestIdResponses[keyof GetFalAiImageEditingTextRemovalRequestsByRequestIdResponses] + +export type GetFalAiImageEditingPhotoRestorationRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-editing/photo-restoration/requests/{request_id}/status' + } + +export type GetFalAiImageEditingPhotoRestorationRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageEditingPhotoRestorationRequestsByRequestIdStatusResponse = + GetFalAiImageEditingPhotoRestorationRequestsByRequestIdStatusResponses[keyof GetFalAiImageEditingPhotoRestorationRequestsByRequestIdStatusResponses] + +export type PutFalAiImageEditingPhotoRestorationRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/photo-restoration/requests/{request_id}/cancel' + } + +export type PutFalAiImageEditingPhotoRestorationRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageEditingPhotoRestorationRequestsByRequestIdCancelResponse = + PutFalAiImageEditingPhotoRestorationRequestsByRequestIdCancelResponses[keyof PutFalAiImageEditingPhotoRestorationRequestsByRequestIdCancelResponses] + +export type PostFalAiImageEditingPhotoRestorationData = { + body: SchemaImageEditingPhotoRestorationInput + path?: never + query?: never + url: '/fal-ai/image-editing/photo-restoration' +} + +export type PostFalAiImageEditingPhotoRestorationResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageEditingPhotoRestorationResponse = + PostFalAiImageEditingPhotoRestorationResponses[keyof PostFalAiImageEditingPhotoRestorationResponses] + +export type GetFalAiImageEditingPhotoRestorationRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/photo-restoration/requests/{request_id}' +} + +export type GetFalAiImageEditingPhotoRestorationRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageEditingPhotoRestorationOutput +} + +export type GetFalAiImageEditingPhotoRestorationRequestsByRequestIdResponse = + GetFalAiImageEditingPhotoRestorationRequestsByRequestIdResponses[keyof GetFalAiImageEditingPhotoRestorationRequestsByRequestIdResponses] + +export type GetFalAiImageEditingWeatherEffectRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-editing/weather-effect/requests/{request_id}/status' +} + +export type GetFalAiImageEditingWeatherEffectRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageEditingWeatherEffectRequestsByRequestIdStatusResponse = + GetFalAiImageEditingWeatherEffectRequestsByRequestIdStatusResponses[keyof GetFalAiImageEditingWeatherEffectRequestsByRequestIdStatusResponses] + +export type PutFalAiImageEditingWeatherEffectRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/weather-effect/requests/{request_id}/cancel' +} + +export type PutFalAiImageEditingWeatherEffectRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageEditingWeatherEffectRequestsByRequestIdCancelResponse = + PutFalAiImageEditingWeatherEffectRequestsByRequestIdCancelResponses[keyof PutFalAiImageEditingWeatherEffectRequestsByRequestIdCancelResponses] + +export type PostFalAiImageEditingWeatherEffectData = { + body: SchemaImageEditingWeatherEffectInput + path?: never + query?: never + url: '/fal-ai/image-editing/weather-effect' +} + +export type PostFalAiImageEditingWeatherEffectResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageEditingWeatherEffectResponse = + PostFalAiImageEditingWeatherEffectResponses[keyof PostFalAiImageEditingWeatherEffectResponses] + +export type GetFalAiImageEditingWeatherEffectRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/weather-effect/requests/{request_id}' +} + +export type GetFalAiImageEditingWeatherEffectRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageEditingWeatherEffectOutput +} + +export type GetFalAiImageEditingWeatherEffectRequestsByRequestIdResponse = + GetFalAiImageEditingWeatherEffectRequestsByRequestIdResponses[keyof GetFalAiImageEditingWeatherEffectRequestsByRequestIdResponses] + +export type GetFalAiImageEditingTimeOfDayRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-editing/time-of-day/requests/{request_id}/status' +} + +export type GetFalAiImageEditingTimeOfDayRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImageEditingTimeOfDayRequestsByRequestIdStatusResponse = + GetFalAiImageEditingTimeOfDayRequestsByRequestIdStatusResponses[keyof GetFalAiImageEditingTimeOfDayRequestsByRequestIdStatusResponses] + +export type PutFalAiImageEditingTimeOfDayRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/time-of-day/requests/{request_id}/cancel' +} + +export type PutFalAiImageEditingTimeOfDayRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImageEditingTimeOfDayRequestsByRequestIdCancelResponse = + PutFalAiImageEditingTimeOfDayRequestsByRequestIdCancelResponses[keyof PutFalAiImageEditingTimeOfDayRequestsByRequestIdCancelResponses] + +export type PostFalAiImageEditingTimeOfDayData = { + body: SchemaImageEditingTimeOfDayInput + path?: never + query?: never + url: '/fal-ai/image-editing/time-of-day' +} + +export type PostFalAiImageEditingTimeOfDayResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageEditingTimeOfDayResponse = + PostFalAiImageEditingTimeOfDayResponses[keyof PostFalAiImageEditingTimeOfDayResponses] + +export type GetFalAiImageEditingTimeOfDayRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/time-of-day/requests/{request_id}' +} + +export type GetFalAiImageEditingTimeOfDayRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageEditingTimeOfDayOutput +} + +export type GetFalAiImageEditingTimeOfDayRequestsByRequestIdResponse = + GetFalAiImageEditingTimeOfDayRequestsByRequestIdResponses[keyof GetFalAiImageEditingTimeOfDayRequestsByRequestIdResponses] + +export type GetFalAiImageEditingStyleTransferRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-editing/style-transfer/requests/{request_id}/status' +} + +export type GetFalAiImageEditingStyleTransferRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageEditingStyleTransferRequestsByRequestIdStatusResponse = + GetFalAiImageEditingStyleTransferRequestsByRequestIdStatusResponses[keyof GetFalAiImageEditingStyleTransferRequestsByRequestIdStatusResponses] + +export type PutFalAiImageEditingStyleTransferRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/style-transfer/requests/{request_id}/cancel' +} + +export type PutFalAiImageEditingStyleTransferRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageEditingStyleTransferRequestsByRequestIdCancelResponse = + PutFalAiImageEditingStyleTransferRequestsByRequestIdCancelResponses[keyof PutFalAiImageEditingStyleTransferRequestsByRequestIdCancelResponses] + +export type PostFalAiImageEditingStyleTransferData = { + body: SchemaImageEditingStyleTransferInput + path?: never + query?: never + url: '/fal-ai/image-editing/style-transfer' +} + +export type PostFalAiImageEditingStyleTransferResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageEditingStyleTransferResponse = + PostFalAiImageEditingStyleTransferResponses[keyof PostFalAiImageEditingStyleTransferResponses] + +export type GetFalAiImageEditingStyleTransferRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/style-transfer/requests/{request_id}' +} + +export type GetFalAiImageEditingStyleTransferRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageEditingStyleTransferOutput +} + +export type GetFalAiImageEditingStyleTransferRequestsByRequestIdResponse = + GetFalAiImageEditingStyleTransferRequestsByRequestIdResponses[keyof GetFalAiImageEditingStyleTransferRequestsByRequestIdResponses] + +export type GetFalAiImageEditingSceneCompositionRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-editing/scene-composition/requests/{request_id}/status' + } + +export type GetFalAiImageEditingSceneCompositionRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageEditingSceneCompositionRequestsByRequestIdStatusResponse = + GetFalAiImageEditingSceneCompositionRequestsByRequestIdStatusResponses[keyof GetFalAiImageEditingSceneCompositionRequestsByRequestIdStatusResponses] + +export type PutFalAiImageEditingSceneCompositionRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/scene-composition/requests/{request_id}/cancel' + } + +export type PutFalAiImageEditingSceneCompositionRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageEditingSceneCompositionRequestsByRequestIdCancelResponse = + PutFalAiImageEditingSceneCompositionRequestsByRequestIdCancelResponses[keyof PutFalAiImageEditingSceneCompositionRequestsByRequestIdCancelResponses] + +export type PostFalAiImageEditingSceneCompositionData = { + body: SchemaImageEditingSceneCompositionInput + path?: never + query?: never + url: '/fal-ai/image-editing/scene-composition' +} + +export type PostFalAiImageEditingSceneCompositionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageEditingSceneCompositionResponse = + PostFalAiImageEditingSceneCompositionResponses[keyof PostFalAiImageEditingSceneCompositionResponses] + +export type GetFalAiImageEditingSceneCompositionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/scene-composition/requests/{request_id}' +} + +export type GetFalAiImageEditingSceneCompositionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageEditingSceneCompositionOutput +} + +export type GetFalAiImageEditingSceneCompositionRequestsByRequestIdResponse = + GetFalAiImageEditingSceneCompositionRequestsByRequestIdResponses[keyof GetFalAiImageEditingSceneCompositionRequestsByRequestIdResponses] + +export type GetFalAiImageEditingProfessionalPhotoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-editing/professional-photo/requests/{request_id}/status' + } + +export type GetFalAiImageEditingProfessionalPhotoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageEditingProfessionalPhotoRequestsByRequestIdStatusResponse = + GetFalAiImageEditingProfessionalPhotoRequestsByRequestIdStatusResponses[keyof GetFalAiImageEditingProfessionalPhotoRequestsByRequestIdStatusResponses] + +export type PutFalAiImageEditingProfessionalPhotoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/professional-photo/requests/{request_id}/cancel' + } + +export type PutFalAiImageEditingProfessionalPhotoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageEditingProfessionalPhotoRequestsByRequestIdCancelResponse = + PutFalAiImageEditingProfessionalPhotoRequestsByRequestIdCancelResponses[keyof PutFalAiImageEditingProfessionalPhotoRequestsByRequestIdCancelResponses] + +export type PostFalAiImageEditingProfessionalPhotoData = { + body: SchemaImageEditingProfessionalPhotoInput + path?: never + query?: never + url: '/fal-ai/image-editing/professional-photo' +} + +export type PostFalAiImageEditingProfessionalPhotoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageEditingProfessionalPhotoResponse = + PostFalAiImageEditingProfessionalPhotoResponses[keyof PostFalAiImageEditingProfessionalPhotoResponses] + +export type GetFalAiImageEditingProfessionalPhotoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/professional-photo/requests/{request_id}' +} + +export type GetFalAiImageEditingProfessionalPhotoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaImageEditingProfessionalPhotoOutput + } + +export type GetFalAiImageEditingProfessionalPhotoRequestsByRequestIdResponse = + GetFalAiImageEditingProfessionalPhotoRequestsByRequestIdResponses[keyof GetFalAiImageEditingProfessionalPhotoRequestsByRequestIdResponses] + +export type GetFalAiImageEditingObjectRemovalRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-editing/object-removal/requests/{request_id}/status' +} + +export type GetFalAiImageEditingObjectRemovalRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageEditingObjectRemovalRequestsByRequestIdStatusResponse = + GetFalAiImageEditingObjectRemovalRequestsByRequestIdStatusResponses[keyof GetFalAiImageEditingObjectRemovalRequestsByRequestIdStatusResponses] + +export type PutFalAiImageEditingObjectRemovalRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/object-removal/requests/{request_id}/cancel' +} + +export type PutFalAiImageEditingObjectRemovalRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageEditingObjectRemovalRequestsByRequestIdCancelResponse = + PutFalAiImageEditingObjectRemovalRequestsByRequestIdCancelResponses[keyof PutFalAiImageEditingObjectRemovalRequestsByRequestIdCancelResponses] + +export type PostFalAiImageEditingObjectRemovalData = { + body: SchemaImageEditingObjectRemovalInput + path?: never + query?: never + url: '/fal-ai/image-editing/object-removal' +} + +export type PostFalAiImageEditingObjectRemovalResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageEditingObjectRemovalResponse = + PostFalAiImageEditingObjectRemovalResponses[keyof PostFalAiImageEditingObjectRemovalResponses] + +export type GetFalAiImageEditingObjectRemovalRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/object-removal/requests/{request_id}' +} + +export type GetFalAiImageEditingObjectRemovalRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageEditingObjectRemovalOutput +} + +export type GetFalAiImageEditingObjectRemovalRequestsByRequestIdResponse = + GetFalAiImageEditingObjectRemovalRequestsByRequestIdResponses[keyof GetFalAiImageEditingObjectRemovalRequestsByRequestIdResponses] + +export type GetFalAiImageEditingHairChangeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-editing/hair-change/requests/{request_id}/status' +} + +export type GetFalAiImageEditingHairChangeRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImageEditingHairChangeRequestsByRequestIdStatusResponse = + GetFalAiImageEditingHairChangeRequestsByRequestIdStatusResponses[keyof GetFalAiImageEditingHairChangeRequestsByRequestIdStatusResponses] + +export type PutFalAiImageEditingHairChangeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/hair-change/requests/{request_id}/cancel' +} + +export type PutFalAiImageEditingHairChangeRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImageEditingHairChangeRequestsByRequestIdCancelResponse = + PutFalAiImageEditingHairChangeRequestsByRequestIdCancelResponses[keyof PutFalAiImageEditingHairChangeRequestsByRequestIdCancelResponses] + +export type PostFalAiImageEditingHairChangeData = { + body: SchemaImageEditingHairChangeInput + path?: never + query?: never + url: '/fal-ai/image-editing/hair-change' +} + +export type PostFalAiImageEditingHairChangeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageEditingHairChangeResponse = + PostFalAiImageEditingHairChangeResponses[keyof PostFalAiImageEditingHairChangeResponses] + +export type GetFalAiImageEditingHairChangeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/hair-change/requests/{request_id}' +} + +export type GetFalAiImageEditingHairChangeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageEditingHairChangeOutput +} + +export type GetFalAiImageEditingHairChangeRequestsByRequestIdResponse = + GetFalAiImageEditingHairChangeRequestsByRequestIdResponses[keyof GetFalAiImageEditingHairChangeRequestsByRequestIdResponses] + +export type GetFalAiImageEditingFaceEnhancementRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-editing/face-enhancement/requests/{request_id}/status' +} + +export type GetFalAiImageEditingFaceEnhancementRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageEditingFaceEnhancementRequestsByRequestIdStatusResponse = + GetFalAiImageEditingFaceEnhancementRequestsByRequestIdStatusResponses[keyof GetFalAiImageEditingFaceEnhancementRequestsByRequestIdStatusResponses] + +export type PutFalAiImageEditingFaceEnhancementRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/face-enhancement/requests/{request_id}/cancel' +} + +export type PutFalAiImageEditingFaceEnhancementRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageEditingFaceEnhancementRequestsByRequestIdCancelResponse = + PutFalAiImageEditingFaceEnhancementRequestsByRequestIdCancelResponses[keyof PutFalAiImageEditingFaceEnhancementRequestsByRequestIdCancelResponses] + +export type PostFalAiImageEditingFaceEnhancementData = { + body: SchemaImageEditingFaceEnhancementInput + path?: never + query?: never + url: '/fal-ai/image-editing/face-enhancement' +} + +export type PostFalAiImageEditingFaceEnhancementResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageEditingFaceEnhancementResponse = + PostFalAiImageEditingFaceEnhancementResponses[keyof PostFalAiImageEditingFaceEnhancementResponses] + +export type GetFalAiImageEditingFaceEnhancementRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/face-enhancement/requests/{request_id}' +} + +export type GetFalAiImageEditingFaceEnhancementRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageEditingFaceEnhancementOutput +} + +export type GetFalAiImageEditingFaceEnhancementRequestsByRequestIdResponse = + GetFalAiImageEditingFaceEnhancementRequestsByRequestIdResponses[keyof GetFalAiImageEditingFaceEnhancementRequestsByRequestIdResponses] + +export type GetFalAiImageEditingExpressionChangeRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-editing/expression-change/requests/{request_id}/status' + } + +export type GetFalAiImageEditingExpressionChangeRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageEditingExpressionChangeRequestsByRequestIdStatusResponse = + GetFalAiImageEditingExpressionChangeRequestsByRequestIdStatusResponses[keyof GetFalAiImageEditingExpressionChangeRequestsByRequestIdStatusResponses] + +export type PutFalAiImageEditingExpressionChangeRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/expression-change/requests/{request_id}/cancel' + } + +export type PutFalAiImageEditingExpressionChangeRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageEditingExpressionChangeRequestsByRequestIdCancelResponse = + PutFalAiImageEditingExpressionChangeRequestsByRequestIdCancelResponses[keyof PutFalAiImageEditingExpressionChangeRequestsByRequestIdCancelResponses] + +export type PostFalAiImageEditingExpressionChangeData = { + body: SchemaImageEditingExpressionChangeInput + path?: never + query?: never + url: '/fal-ai/image-editing/expression-change' +} + +export type PostFalAiImageEditingExpressionChangeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageEditingExpressionChangeResponse = + PostFalAiImageEditingExpressionChangeResponses[keyof PostFalAiImageEditingExpressionChangeResponses] + +export type GetFalAiImageEditingExpressionChangeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/expression-change/requests/{request_id}' +} + +export type GetFalAiImageEditingExpressionChangeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageEditingExpressionChangeOutput +} + +export type GetFalAiImageEditingExpressionChangeRequestsByRequestIdResponse = + GetFalAiImageEditingExpressionChangeRequestsByRequestIdResponses[keyof GetFalAiImageEditingExpressionChangeRequestsByRequestIdResponses] + +export type GetFalAiImageEditingColorCorrectionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-editing/color-correction/requests/{request_id}/status' +} + +export type GetFalAiImageEditingColorCorrectionRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageEditingColorCorrectionRequestsByRequestIdStatusResponse = + GetFalAiImageEditingColorCorrectionRequestsByRequestIdStatusResponses[keyof GetFalAiImageEditingColorCorrectionRequestsByRequestIdStatusResponses] + +export type PutFalAiImageEditingColorCorrectionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/color-correction/requests/{request_id}/cancel' +} + +export type PutFalAiImageEditingColorCorrectionRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageEditingColorCorrectionRequestsByRequestIdCancelResponse = + PutFalAiImageEditingColorCorrectionRequestsByRequestIdCancelResponses[keyof PutFalAiImageEditingColorCorrectionRequestsByRequestIdCancelResponses] + +export type PostFalAiImageEditingColorCorrectionData = { + body: SchemaImageEditingColorCorrectionInput + path?: never + query?: never + url: '/fal-ai/image-editing/color-correction' +} + +export type PostFalAiImageEditingColorCorrectionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageEditingColorCorrectionResponse = + PostFalAiImageEditingColorCorrectionResponses[keyof PostFalAiImageEditingColorCorrectionResponses] + +export type GetFalAiImageEditingColorCorrectionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/color-correction/requests/{request_id}' +} + +export type GetFalAiImageEditingColorCorrectionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageEditingColorCorrectionOutput +} + +export type GetFalAiImageEditingColorCorrectionRequestsByRequestIdResponse = + GetFalAiImageEditingColorCorrectionRequestsByRequestIdResponses[keyof GetFalAiImageEditingColorCorrectionRequestsByRequestIdResponses] + +export type GetFalAiImageEditingCartoonifyRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-editing/cartoonify/requests/{request_id}/status' +} + +export type GetFalAiImageEditingCartoonifyRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImageEditingCartoonifyRequestsByRequestIdStatusResponse = + GetFalAiImageEditingCartoonifyRequestsByRequestIdStatusResponses[keyof GetFalAiImageEditingCartoonifyRequestsByRequestIdStatusResponses] + +export type PutFalAiImageEditingCartoonifyRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/cartoonify/requests/{request_id}/cancel' +} + +export type PutFalAiImageEditingCartoonifyRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImageEditingCartoonifyRequestsByRequestIdCancelResponse = + PutFalAiImageEditingCartoonifyRequestsByRequestIdCancelResponses[keyof PutFalAiImageEditingCartoonifyRequestsByRequestIdCancelResponses] + +export type PostFalAiImageEditingCartoonifyData = { + body: SchemaImageEditingCartoonifyInput + path?: never + query?: never + url: '/fal-ai/image-editing/cartoonify' +} + +export type PostFalAiImageEditingCartoonifyResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageEditingCartoonifyResponse = + PostFalAiImageEditingCartoonifyResponses[keyof PostFalAiImageEditingCartoonifyResponses] + +export type GetFalAiImageEditingCartoonifyRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/cartoonify/requests/{request_id}' +} + +export type GetFalAiImageEditingCartoonifyRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageEditingCartoonifyOutput +} + +export type GetFalAiImageEditingCartoonifyRequestsByRequestIdResponse = + GetFalAiImageEditingCartoonifyRequestsByRequestIdResponses[keyof GetFalAiImageEditingCartoonifyRequestsByRequestIdResponses] + +export type GetFalAiImageEditingBackgroundChangeRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-editing/background-change/requests/{request_id}/status' + } + +export type GetFalAiImageEditingBackgroundChangeRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageEditingBackgroundChangeRequestsByRequestIdStatusResponse = + GetFalAiImageEditingBackgroundChangeRequestsByRequestIdStatusResponses[keyof GetFalAiImageEditingBackgroundChangeRequestsByRequestIdStatusResponses] + +export type PutFalAiImageEditingBackgroundChangeRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/background-change/requests/{request_id}/cancel' + } + +export type PutFalAiImageEditingBackgroundChangeRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageEditingBackgroundChangeRequestsByRequestIdCancelResponse = + PutFalAiImageEditingBackgroundChangeRequestsByRequestIdCancelResponses[keyof PutFalAiImageEditingBackgroundChangeRequestsByRequestIdCancelResponses] + +export type PostFalAiImageEditingBackgroundChangeData = { + body: SchemaImageEditingBackgroundChangeInput + path?: never + query?: never + url: '/fal-ai/image-editing/background-change' +} + +export type PostFalAiImageEditingBackgroundChangeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageEditingBackgroundChangeResponse = + PostFalAiImageEditingBackgroundChangeResponses[keyof PostFalAiImageEditingBackgroundChangeResponses] + +export type GetFalAiImageEditingBackgroundChangeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/background-change/requests/{request_id}' +} + +export type GetFalAiImageEditingBackgroundChangeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageEditingBackgroundChangeOutput +} + +export type GetFalAiImageEditingBackgroundChangeRequestsByRequestIdResponse = + GetFalAiImageEditingBackgroundChangeRequestsByRequestIdResponses[keyof GetFalAiImageEditingBackgroundChangeRequestsByRequestIdResponses] + +export type GetFalAiImageEditingAgeProgressionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-editing/age-progression/requests/{request_id}/status' +} + +export type GetFalAiImageEditingAgeProgressionRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageEditingAgeProgressionRequestsByRequestIdStatusResponse = + GetFalAiImageEditingAgeProgressionRequestsByRequestIdStatusResponses[keyof GetFalAiImageEditingAgeProgressionRequestsByRequestIdStatusResponses] + +export type PutFalAiImageEditingAgeProgressionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/age-progression/requests/{request_id}/cancel' +} + +export type PutFalAiImageEditingAgeProgressionRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageEditingAgeProgressionRequestsByRequestIdCancelResponse = + PutFalAiImageEditingAgeProgressionRequestsByRequestIdCancelResponses[keyof PutFalAiImageEditingAgeProgressionRequestsByRequestIdCancelResponses] + +export type PostFalAiImageEditingAgeProgressionData = { + body: SchemaImageEditingAgeProgressionInput + path?: never + query?: never + url: '/fal-ai/image-editing/age-progression' +} + +export type PostFalAiImageEditingAgeProgressionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageEditingAgeProgressionResponse = + PostFalAiImageEditingAgeProgressionResponses[keyof PostFalAiImageEditingAgeProgressionResponses] + +export type GetFalAiImageEditingAgeProgressionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-editing/age-progression/requests/{request_id}' +} + +export type GetFalAiImageEditingAgeProgressionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageEditingAgeProgressionOutput +} + +export type GetFalAiImageEditingAgeProgressionRequestsByRequestIdResponse = + GetFalAiImageEditingAgeProgressionRequestsByRequestIdResponses[keyof GetFalAiImageEditingAgeProgressionRequestsByRequestIdResponses] + +export type GetFalAiFluxProKontextMaxMultiRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-pro/kontext/max/multi/requests/{request_id}/status' +} + +export type GetFalAiFluxProKontextMaxMultiRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxProKontextMaxMultiRequestsByRequestIdStatusResponse = + GetFalAiFluxProKontextMaxMultiRequestsByRequestIdStatusResponses[keyof GetFalAiFluxProKontextMaxMultiRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxProKontextMaxMultiRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/kontext/max/multi/requests/{request_id}/cancel' +} + +export type PutFalAiFluxProKontextMaxMultiRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxProKontextMaxMultiRequestsByRequestIdCancelResponse = + PutFalAiFluxProKontextMaxMultiRequestsByRequestIdCancelResponses[keyof PutFalAiFluxProKontextMaxMultiRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxProKontextMaxMultiData = { + body: SchemaFluxProKontextMaxMultiInput + path?: never + query?: never + url: '/fal-ai/flux-pro/kontext/max/multi' +} + +export type PostFalAiFluxProKontextMaxMultiResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxProKontextMaxMultiResponse = + PostFalAiFluxProKontextMaxMultiResponses[keyof PostFalAiFluxProKontextMaxMultiResponses] + +export type GetFalAiFluxProKontextMaxMultiRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/kontext/max/multi/requests/{request_id}' +} + +export type GetFalAiFluxProKontextMaxMultiRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxProKontextMaxMultiOutput +} + +export type GetFalAiFluxProKontextMaxMultiRequestsByRequestIdResponse = + GetFalAiFluxProKontextMaxMultiRequestsByRequestIdResponses[keyof GetFalAiFluxProKontextMaxMultiRequestsByRequestIdResponses] + +export type GetFalAiFluxProKontextMultiRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-pro/kontext/multi/requests/{request_id}/status' +} + +export type GetFalAiFluxProKontextMultiRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxProKontextMultiRequestsByRequestIdStatusResponse = + GetFalAiFluxProKontextMultiRequestsByRequestIdStatusResponses[keyof GetFalAiFluxProKontextMultiRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxProKontextMultiRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/kontext/multi/requests/{request_id}/cancel' +} + +export type PutFalAiFluxProKontextMultiRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxProKontextMultiRequestsByRequestIdCancelResponse = + PutFalAiFluxProKontextMultiRequestsByRequestIdCancelResponses[keyof PutFalAiFluxProKontextMultiRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxProKontextMultiData = { + body: SchemaFluxProKontextMultiInput + path?: never + query?: never + url: '/fal-ai/flux-pro/kontext/multi' +} + +export type PostFalAiFluxProKontextMultiResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxProKontextMultiResponse = + PostFalAiFluxProKontextMultiResponses[keyof PostFalAiFluxProKontextMultiResponses] + +export type GetFalAiFluxProKontextMultiRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/kontext/multi/requests/{request_id}' +} + +export type GetFalAiFluxProKontextMultiRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxProKontextMultiOutput +} + +export type GetFalAiFluxProKontextMultiRequestsByRequestIdResponse = + GetFalAiFluxProKontextMultiRequestsByRequestIdResponses[keyof GetFalAiFluxProKontextMultiRequestsByRequestIdResponses] + +export type GetFalAiFluxProKontextMaxRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-pro/kontext/max/requests/{request_id}/status' +} + +export type GetFalAiFluxProKontextMaxRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxProKontextMaxRequestsByRequestIdStatusResponse = + GetFalAiFluxProKontextMaxRequestsByRequestIdStatusResponses[keyof GetFalAiFluxProKontextMaxRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxProKontextMaxRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/kontext/max/requests/{request_id}/cancel' +} + +export type PutFalAiFluxProKontextMaxRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxProKontextMaxRequestsByRequestIdCancelResponse = + PutFalAiFluxProKontextMaxRequestsByRequestIdCancelResponses[keyof PutFalAiFluxProKontextMaxRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxProKontextMaxData = { + body: SchemaFluxProKontextMaxInput + path?: never + query?: never + url: '/fal-ai/flux-pro/kontext/max' +} + +export type PostFalAiFluxProKontextMaxResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxProKontextMaxResponse = + PostFalAiFluxProKontextMaxResponses[keyof PostFalAiFluxProKontextMaxResponses] + +export type GetFalAiFluxProKontextMaxRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/kontext/max/requests/{request_id}' +} + +export type GetFalAiFluxProKontextMaxRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxProKontextMaxOutput +} + +export type GetFalAiFluxProKontextMaxRequestsByRequestIdResponse = + GetFalAiFluxProKontextMaxRequestsByRequestIdResponses[keyof GetFalAiFluxProKontextMaxRequestsByRequestIdResponses] + +export type GetFalAiFluxKontextDevRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-kontext/dev/requests/{request_id}/status' +} + +export type GetFalAiFluxKontextDevRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxKontextDevRequestsByRequestIdStatusResponse = + GetFalAiFluxKontextDevRequestsByRequestIdStatusResponses[keyof GetFalAiFluxKontextDevRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxKontextDevRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-kontext/dev/requests/{request_id}/cancel' +} + +export type PutFalAiFluxKontextDevRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxKontextDevRequestsByRequestIdCancelResponse = + PutFalAiFluxKontextDevRequestsByRequestIdCancelResponses[keyof PutFalAiFluxKontextDevRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxKontextDevData = { + body: SchemaFluxKontextDevInput + path?: never + query?: never + url: '/fal-ai/flux-kontext/dev' +} + +export type PostFalAiFluxKontextDevResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxKontextDevResponse = + PostFalAiFluxKontextDevResponses[keyof PostFalAiFluxKontextDevResponses] + +export type GetFalAiFluxKontextDevRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-kontext/dev/requests/{request_id}' +} + +export type GetFalAiFluxKontextDevRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxKontextDevOutput +} + +export type GetFalAiFluxKontextDevRequestsByRequestIdResponse = + GetFalAiFluxKontextDevRequestsByRequestIdResponses[keyof GetFalAiFluxKontextDevRequestsByRequestIdResponses] + +export type GetFalAiBagelEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bagel/edit/requests/{request_id}/status' +} + +export type GetFalAiBagelEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiBagelEditRequestsByRequestIdStatusResponse = + GetFalAiBagelEditRequestsByRequestIdStatusResponses[keyof GetFalAiBagelEditRequestsByRequestIdStatusResponses] + +export type PutFalAiBagelEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bagel/edit/requests/{request_id}/cancel' +} + +export type PutFalAiBagelEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiBagelEditRequestsByRequestIdCancelResponse = + PutFalAiBagelEditRequestsByRequestIdCancelResponses[keyof PutFalAiBagelEditRequestsByRequestIdCancelResponses] + +export type PostFalAiBagelEditData = { + body: SchemaBagelEditInput + path?: never + query?: never + url: '/fal-ai/bagel/edit' +} + +export type PostFalAiBagelEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBagelEditResponse = + PostFalAiBagelEditResponses[keyof PostFalAiBagelEditResponses] + +export type GetFalAiBagelEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bagel/edit/requests/{request_id}' +} + +export type GetFalAiBagelEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBagelEditOutput +} + +export type GetFalAiBagelEditRequestsByRequestIdResponse = + GetFalAiBagelEditRequestsByRequestIdResponses[keyof GetFalAiBagelEditRequestsByRequestIdResponses] + +export type GetSmoretalkAiRembgEnhanceRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/smoretalk-ai/rembg-enhance/requests/{request_id}/status' +} + +export type GetSmoretalkAiRembgEnhanceRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetSmoretalkAiRembgEnhanceRequestsByRequestIdStatusResponse = + GetSmoretalkAiRembgEnhanceRequestsByRequestIdStatusResponses[keyof GetSmoretalkAiRembgEnhanceRequestsByRequestIdStatusResponses] + +export type PutSmoretalkAiRembgEnhanceRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/smoretalk-ai/rembg-enhance/requests/{request_id}/cancel' +} + +export type PutSmoretalkAiRembgEnhanceRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutSmoretalkAiRembgEnhanceRequestsByRequestIdCancelResponse = + PutSmoretalkAiRembgEnhanceRequestsByRequestIdCancelResponses[keyof PutSmoretalkAiRembgEnhanceRequestsByRequestIdCancelResponses] + +export type PostSmoretalkAiRembgEnhanceData = { + body: SchemaRembgEnhanceInput + path?: never + query?: never + url: '/smoretalk-ai/rembg-enhance' +} + +export type PostSmoretalkAiRembgEnhanceResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostSmoretalkAiRembgEnhanceResponse = + PostSmoretalkAiRembgEnhanceResponses[keyof PostSmoretalkAiRembgEnhanceResponses] + +export type GetSmoretalkAiRembgEnhanceRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/smoretalk-ai/rembg-enhance/requests/{request_id}' +} + +export type GetSmoretalkAiRembgEnhanceRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaRembgEnhanceOutput +} + +export type GetSmoretalkAiRembgEnhanceRequestsByRequestIdResponse = + GetSmoretalkAiRembgEnhanceRequestsByRequestIdResponses[keyof GetSmoretalkAiRembgEnhanceRequestsByRequestIdResponses] + +export type GetFalAiRecraftUpscaleCreativeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/recraft/upscale/creative/requests/{request_id}/status' +} + +export type GetFalAiRecraftUpscaleCreativeRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiRecraftUpscaleCreativeRequestsByRequestIdStatusResponse = + GetFalAiRecraftUpscaleCreativeRequestsByRequestIdStatusResponses[keyof GetFalAiRecraftUpscaleCreativeRequestsByRequestIdStatusResponses] + +export type PutFalAiRecraftUpscaleCreativeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/recraft/upscale/creative/requests/{request_id}/cancel' +} + +export type PutFalAiRecraftUpscaleCreativeRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiRecraftUpscaleCreativeRequestsByRequestIdCancelResponse = + PutFalAiRecraftUpscaleCreativeRequestsByRequestIdCancelResponses[keyof PutFalAiRecraftUpscaleCreativeRequestsByRequestIdCancelResponses] + +export type PostFalAiRecraftUpscaleCreativeData = { + body: SchemaRecraftUpscaleCreativeInput + path?: never + query?: never + url: '/fal-ai/recraft/upscale/creative' +} + +export type PostFalAiRecraftUpscaleCreativeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiRecraftUpscaleCreativeResponse = + PostFalAiRecraftUpscaleCreativeResponses[keyof PostFalAiRecraftUpscaleCreativeResponses] + +export type GetFalAiRecraftUpscaleCreativeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/recraft/upscale/creative/requests/{request_id}' +} + +export type GetFalAiRecraftUpscaleCreativeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaRecraftUpscaleCreativeOutput +} + +export type GetFalAiRecraftUpscaleCreativeRequestsByRequestIdResponse = + GetFalAiRecraftUpscaleCreativeRequestsByRequestIdResponses[keyof GetFalAiRecraftUpscaleCreativeRequestsByRequestIdResponses] + +export type GetFalAiRecraftUpscaleCrispRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/recraft/upscale/crisp/requests/{request_id}/status' +} + +export type GetFalAiRecraftUpscaleCrispRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiRecraftUpscaleCrispRequestsByRequestIdStatusResponse = + GetFalAiRecraftUpscaleCrispRequestsByRequestIdStatusResponses[keyof GetFalAiRecraftUpscaleCrispRequestsByRequestIdStatusResponses] + +export type PutFalAiRecraftUpscaleCrispRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/recraft/upscale/crisp/requests/{request_id}/cancel' +} + +export type PutFalAiRecraftUpscaleCrispRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiRecraftUpscaleCrispRequestsByRequestIdCancelResponse = + PutFalAiRecraftUpscaleCrispRequestsByRequestIdCancelResponses[keyof PutFalAiRecraftUpscaleCrispRequestsByRequestIdCancelResponses] + +export type PostFalAiRecraftUpscaleCrispData = { + body: SchemaRecraftUpscaleCrispInput + path?: never + query?: never + url: '/fal-ai/recraft/upscale/crisp' +} + +export type PostFalAiRecraftUpscaleCrispResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiRecraftUpscaleCrispResponse = + PostFalAiRecraftUpscaleCrispResponses[keyof PostFalAiRecraftUpscaleCrispResponses] + +export type GetFalAiRecraftUpscaleCrispRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/recraft/upscale/crisp/requests/{request_id}' +} + +export type GetFalAiRecraftUpscaleCrispRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaRecraftUpscaleCrispOutput +} + +export type GetFalAiRecraftUpscaleCrispRequestsByRequestIdResponse = + GetFalAiRecraftUpscaleCrispRequestsByRequestIdResponses[keyof GetFalAiRecraftUpscaleCrispRequestsByRequestIdResponses] + +export type GetFalAiRecraftV3ImageToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/recraft/v3/image-to-image/requests/{request_id}/status' +} + +export type GetFalAiRecraftV3ImageToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiRecraftV3ImageToImageRequestsByRequestIdStatusResponse = + GetFalAiRecraftV3ImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiRecraftV3ImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiRecraftV3ImageToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/recraft/v3/image-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiRecraftV3ImageToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiRecraftV3ImageToImageRequestsByRequestIdCancelResponse = + PutFalAiRecraftV3ImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiRecraftV3ImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiRecraftV3ImageToImageData = { + body: SchemaRecraftV3ImageToImageInput + path?: never + query?: never + url: '/fal-ai/recraft/v3/image-to-image' +} + +export type PostFalAiRecraftV3ImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiRecraftV3ImageToImageResponse = + PostFalAiRecraftV3ImageToImageResponses[keyof PostFalAiRecraftV3ImageToImageResponses] + +export type GetFalAiRecraftV3ImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/recraft/v3/image-to-image/requests/{request_id}' +} + +export type GetFalAiRecraftV3ImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaRecraftV3ImageToImageOutput +} + +export type GetFalAiRecraftV3ImageToImageRequestsByRequestIdResponse = + GetFalAiRecraftV3ImageToImageRequestsByRequestIdResponses[keyof GetFalAiRecraftV3ImageToImageRequestsByRequestIdResponses] + +export type GetFalAiMinimaxImage01SubjectReferenceRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/image-01/subject-reference/requests/{request_id}/status' + } + +export type GetFalAiMinimaxImage01SubjectReferenceRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMinimaxImage01SubjectReferenceRequestsByRequestIdStatusResponse = + GetFalAiMinimaxImage01SubjectReferenceRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxImage01SubjectReferenceRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxImage01SubjectReferenceRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/image-01/subject-reference/requests/{request_id}/cancel' + } + +export type PutFalAiMinimaxImage01SubjectReferenceRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMinimaxImage01SubjectReferenceRequestsByRequestIdCancelResponse = + PutFalAiMinimaxImage01SubjectReferenceRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxImage01SubjectReferenceRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxImage01SubjectReferenceData = { + body: SchemaMinimaxImage01SubjectReferenceInput + path?: never + query?: never + url: '/fal-ai/minimax/image-01/subject-reference' +} + +export type PostFalAiMinimaxImage01SubjectReferenceResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxImage01SubjectReferenceResponse = + PostFalAiMinimaxImage01SubjectReferenceResponses[keyof PostFalAiMinimaxImage01SubjectReferenceResponses] + +export type GetFalAiMinimaxImage01SubjectReferenceRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/image-01/subject-reference/requests/{request_id}' +} + +export type GetFalAiMinimaxImage01SubjectReferenceRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaMinimaxImage01SubjectReferenceOutput + } + +export type GetFalAiMinimaxImage01SubjectReferenceRequestsByRequestIdResponse = + GetFalAiMinimaxImage01SubjectReferenceRequestsByRequestIdResponses[keyof GetFalAiMinimaxImage01SubjectReferenceRequestsByRequestIdResponses] + +export type GetFalAiHidreamI1FullImageToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hidream-i1-full/image-to-image/requests/{request_id}/status' +} + +export type GetFalAiHidreamI1FullImageToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiHidreamI1FullImageToImageRequestsByRequestIdStatusResponse = + GetFalAiHidreamI1FullImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiHidreamI1FullImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiHidreamI1FullImageToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hidream-i1-full/image-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiHidreamI1FullImageToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiHidreamI1FullImageToImageRequestsByRequestIdCancelResponse = + PutFalAiHidreamI1FullImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiHidreamI1FullImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiHidreamI1FullImageToImageData = { + body: SchemaHidreamI1FullImageToImageInput + path?: never + query?: never + url: '/fal-ai/hidream-i1-full/image-to-image' +} + +export type PostFalAiHidreamI1FullImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHidreamI1FullImageToImageResponse = + PostFalAiHidreamI1FullImageToImageResponses[keyof PostFalAiHidreamI1FullImageToImageResponses] + +export type GetFalAiHidreamI1FullImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hidream-i1-full/image-to-image/requests/{request_id}' +} + +export type GetFalAiHidreamI1FullImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHidreamI1FullImageToImageOutput +} + +export type GetFalAiHidreamI1FullImageToImageRequestsByRequestIdResponse = + GetFalAiHidreamI1FullImageToImageRequestsByRequestIdResponses[keyof GetFalAiHidreamI1FullImageToImageRequestsByRequestIdResponses] + +export type GetFalAiIdeogramV3ReframeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ideogram/v3/reframe/requests/{request_id}/status' +} + +export type GetFalAiIdeogramV3ReframeRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiIdeogramV3ReframeRequestsByRequestIdStatusResponse = + GetFalAiIdeogramV3ReframeRequestsByRequestIdStatusResponses[keyof GetFalAiIdeogramV3ReframeRequestsByRequestIdStatusResponses] + +export type PutFalAiIdeogramV3ReframeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v3/reframe/requests/{request_id}/cancel' +} + +export type PutFalAiIdeogramV3ReframeRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiIdeogramV3ReframeRequestsByRequestIdCancelResponse = + PutFalAiIdeogramV3ReframeRequestsByRequestIdCancelResponses[keyof PutFalAiIdeogramV3ReframeRequestsByRequestIdCancelResponses] + +export type PostFalAiIdeogramV3ReframeData = { + body: SchemaIdeogramV3ReframeInput + path?: never + query?: never + url: '/fal-ai/ideogram/v3/reframe' +} + +export type PostFalAiIdeogramV3ReframeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiIdeogramV3ReframeResponse = + PostFalAiIdeogramV3ReframeResponses[keyof PostFalAiIdeogramV3ReframeResponses] + +export type GetFalAiIdeogramV3ReframeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v3/reframe/requests/{request_id}' +} + +export type GetFalAiIdeogramV3ReframeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIdeogramV3ReframeOutput +} + +export type GetFalAiIdeogramV3ReframeRequestsByRequestIdResponse = + GetFalAiIdeogramV3ReframeRequestsByRequestIdResponses[keyof GetFalAiIdeogramV3ReframeRequestsByRequestIdResponses] + +export type GetFalAiIdeogramV3ReplaceBackgroundRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ideogram/v3/replace-background/requests/{request_id}/status' +} + +export type GetFalAiIdeogramV3ReplaceBackgroundRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiIdeogramV3ReplaceBackgroundRequestsByRequestIdStatusResponse = + GetFalAiIdeogramV3ReplaceBackgroundRequestsByRequestIdStatusResponses[keyof GetFalAiIdeogramV3ReplaceBackgroundRequestsByRequestIdStatusResponses] + +export type PutFalAiIdeogramV3ReplaceBackgroundRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v3/replace-background/requests/{request_id}/cancel' +} + +export type PutFalAiIdeogramV3ReplaceBackgroundRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiIdeogramV3ReplaceBackgroundRequestsByRequestIdCancelResponse = + PutFalAiIdeogramV3ReplaceBackgroundRequestsByRequestIdCancelResponses[keyof PutFalAiIdeogramV3ReplaceBackgroundRequestsByRequestIdCancelResponses] + +export type PostFalAiIdeogramV3ReplaceBackgroundData = { + body: SchemaIdeogramV3ReplaceBackgroundInput + path?: never + query?: never + url: '/fal-ai/ideogram/v3/replace-background' +} + +export type PostFalAiIdeogramV3ReplaceBackgroundResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiIdeogramV3ReplaceBackgroundResponse = + PostFalAiIdeogramV3ReplaceBackgroundResponses[keyof PostFalAiIdeogramV3ReplaceBackgroundResponses] + +export type GetFalAiIdeogramV3ReplaceBackgroundRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v3/replace-background/requests/{request_id}' +} + +export type GetFalAiIdeogramV3ReplaceBackgroundRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIdeogramV3ReplaceBackgroundOutput +} + +export type GetFalAiIdeogramV3ReplaceBackgroundRequestsByRequestIdResponse = + GetFalAiIdeogramV3ReplaceBackgroundRequestsByRequestIdResponses[keyof GetFalAiIdeogramV3ReplaceBackgroundRequestsByRequestIdResponses] + +export type GetFalAiIdeogramV3RemixRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ideogram/v3/remix/requests/{request_id}/status' +} + +export type GetFalAiIdeogramV3RemixRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiIdeogramV3RemixRequestsByRequestIdStatusResponse = + GetFalAiIdeogramV3RemixRequestsByRequestIdStatusResponses[keyof GetFalAiIdeogramV3RemixRequestsByRequestIdStatusResponses] + +export type PutFalAiIdeogramV3RemixRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v3/remix/requests/{request_id}/cancel' +} + +export type PutFalAiIdeogramV3RemixRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiIdeogramV3RemixRequestsByRequestIdCancelResponse = + PutFalAiIdeogramV3RemixRequestsByRequestIdCancelResponses[keyof PutFalAiIdeogramV3RemixRequestsByRequestIdCancelResponses] + +export type PostFalAiIdeogramV3RemixData = { + body: SchemaIdeogramV3RemixInput + path?: never + query?: never + url: '/fal-ai/ideogram/v3/remix' +} + +export type PostFalAiIdeogramV3RemixResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiIdeogramV3RemixResponse = + PostFalAiIdeogramV3RemixResponses[keyof PostFalAiIdeogramV3RemixResponses] + +export type GetFalAiIdeogramV3RemixRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v3/remix/requests/{request_id}' +} + +export type GetFalAiIdeogramV3RemixRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIdeogramV3RemixOutput +} + +export type GetFalAiIdeogramV3RemixRequestsByRequestIdResponse = + GetFalAiIdeogramV3RemixRequestsByRequestIdResponses[keyof GetFalAiIdeogramV3RemixRequestsByRequestIdResponses] + +export type GetFalAiIdeogramV3EditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ideogram/v3/edit/requests/{request_id}/status' +} + +export type GetFalAiIdeogramV3EditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiIdeogramV3EditRequestsByRequestIdStatusResponse = + GetFalAiIdeogramV3EditRequestsByRequestIdStatusResponses[keyof GetFalAiIdeogramV3EditRequestsByRequestIdStatusResponses] + +export type PutFalAiIdeogramV3EditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v3/edit/requests/{request_id}/cancel' +} + +export type PutFalAiIdeogramV3EditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiIdeogramV3EditRequestsByRequestIdCancelResponse = + PutFalAiIdeogramV3EditRequestsByRequestIdCancelResponses[keyof PutFalAiIdeogramV3EditRequestsByRequestIdCancelResponses] + +export type PostFalAiIdeogramV3EditData = { + body: SchemaIdeogramV3EditInput + path?: never + query?: never + url: '/fal-ai/ideogram/v3/edit' +} + +export type PostFalAiIdeogramV3EditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiIdeogramV3EditResponse = + PostFalAiIdeogramV3EditResponses[keyof PostFalAiIdeogramV3EditResponses] + +export type GetFalAiIdeogramV3EditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v3/edit/requests/{request_id}' +} + +export type GetFalAiIdeogramV3EditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIdeogramV3EditOutput +} + +export type GetFalAiIdeogramV3EditRequestsByRequestIdResponse = + GetFalAiIdeogramV3EditRequestsByRequestIdResponses[keyof GetFalAiIdeogramV3EditRequestsByRequestIdResponses] + +export type GetFalAiStep1xEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/step1x-edit/requests/{request_id}/status' +} + +export type GetFalAiStep1xEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiStep1xEditRequestsByRequestIdStatusResponse = + GetFalAiStep1xEditRequestsByRequestIdStatusResponses[keyof GetFalAiStep1xEditRequestsByRequestIdStatusResponses] + +export type PutFalAiStep1xEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/step1x-edit/requests/{request_id}/cancel' +} + +export type PutFalAiStep1xEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiStep1xEditRequestsByRequestIdCancelResponse = + PutFalAiStep1xEditRequestsByRequestIdCancelResponses[keyof PutFalAiStep1xEditRequestsByRequestIdCancelResponses] + +export type PostFalAiStep1xEditData = { + body: SchemaStep1xEditInput + path?: never + query?: never + url: '/fal-ai/step1x-edit' +} + +export type PostFalAiStep1xEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiStep1xEditResponse = + PostFalAiStep1xEditResponses[keyof PostFalAiStep1xEditResponses] + +export type GetFalAiStep1xEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/step1x-edit/requests/{request_id}' +} + +export type GetFalAiStep1xEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaStep1xEditOutput +} + +export type GetFalAiStep1xEditRequestsByRequestIdResponse = + GetFalAiStep1xEditRequestsByRequestIdResponses[keyof GetFalAiStep1xEditRequestsByRequestIdResponses] + +export type GetFalAiImage2SvgRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image2svg/requests/{request_id}/status' +} + +export type GetFalAiImage2SvgRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImage2SvgRequestsByRequestIdStatusResponse = + GetFalAiImage2SvgRequestsByRequestIdStatusResponses[keyof GetFalAiImage2SvgRequestsByRequestIdStatusResponses] + +export type PutFalAiImage2SvgRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image2svg/requests/{request_id}/cancel' +} + +export type PutFalAiImage2SvgRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImage2SvgRequestsByRequestIdCancelResponse = + PutFalAiImage2SvgRequestsByRequestIdCancelResponses[keyof PutFalAiImage2SvgRequestsByRequestIdCancelResponses] + +export type PostFalAiImage2SvgData = { + body: SchemaImage2SvgInput + path?: never + query?: never + url: '/fal-ai/image2svg' +} + +export type PostFalAiImage2SvgResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImage2SvgResponse = + PostFalAiImage2SvgResponses[keyof PostFalAiImage2SvgResponses] + +export type GetFalAiImage2SvgRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image2svg/requests/{request_id}' +} + +export type GetFalAiImage2SvgRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImage2SvgOutput +} + +export type GetFalAiImage2SvgRequestsByRequestIdResponse = + GetFalAiImage2SvgRequestsByRequestIdResponses[keyof GetFalAiImage2SvgRequestsByRequestIdResponses] + +export type GetFalAiUnoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/uno/requests/{request_id}/status' +} + +export type GetFalAiUnoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiUnoRequestsByRequestIdStatusResponse = + GetFalAiUnoRequestsByRequestIdStatusResponses[keyof GetFalAiUnoRequestsByRequestIdStatusResponses] + +export type PutFalAiUnoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/uno/requests/{request_id}/cancel' +} + +export type PutFalAiUnoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiUnoRequestsByRequestIdCancelResponse = + PutFalAiUnoRequestsByRequestIdCancelResponses[keyof PutFalAiUnoRequestsByRequestIdCancelResponses] + +export type PostFalAiUnoData = { + body: SchemaUnoInput + path?: never + query?: never + url: '/fal-ai/uno' +} + +export type PostFalAiUnoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiUnoResponse = + PostFalAiUnoResponses[keyof PostFalAiUnoResponses] + +export type GetFalAiUnoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/uno/requests/{request_id}' +} + +export type GetFalAiUnoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaUnoOutput +} + +export type GetFalAiUnoRequestsByRequestIdResponse = + GetFalAiUnoRequestsByRequestIdResponses[keyof GetFalAiUnoRequestsByRequestIdResponses] + +export type GetFalAiGptImage1EditImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/gpt-image-1/edit-image/requests/{request_id}/status' +} + +export type GetFalAiGptImage1EditImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiGptImage1EditImageRequestsByRequestIdStatusResponse = + GetFalAiGptImage1EditImageRequestsByRequestIdStatusResponses[keyof GetFalAiGptImage1EditImageRequestsByRequestIdStatusResponses] + +export type PutFalAiGptImage1EditImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gpt-image-1/edit-image/requests/{request_id}/cancel' +} + +export type PutFalAiGptImage1EditImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiGptImage1EditImageRequestsByRequestIdCancelResponse = + PutFalAiGptImage1EditImageRequestsByRequestIdCancelResponses[keyof PutFalAiGptImage1EditImageRequestsByRequestIdCancelResponses] + +export type PostFalAiGptImage1EditImageData = { + body: SchemaGptImage1EditImageInput + path?: never + query?: never + url: '/fal-ai/gpt-image-1/edit-image' +} + +export type PostFalAiGptImage1EditImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiGptImage1EditImageResponse = + PostFalAiGptImage1EditImageResponses[keyof PostFalAiGptImage1EditImageResponses] + +export type GetFalAiGptImage1EditImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gpt-image-1/edit-image/requests/{request_id}' +} + +export type GetFalAiGptImage1EditImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaGptImage1EditImageOutput +} + +export type GetFalAiGptImage1EditImageRequestsByRequestIdResponse = + GetFalAiGptImage1EditImageRequestsByRequestIdResponses[keyof GetFalAiGptImage1EditImageRequestsByRequestIdResponses] + +export type GetRundiffusionFalJuggernautFluxLoraInpaintingRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/rundiffusion-fal/juggernaut-flux-lora/inpainting/requests/{request_id}/status' + } + +export type GetRundiffusionFalJuggernautFluxLoraInpaintingRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetRundiffusionFalJuggernautFluxLoraInpaintingRequestsByRequestIdStatusResponse = + GetRundiffusionFalJuggernautFluxLoraInpaintingRequestsByRequestIdStatusResponses[keyof GetRundiffusionFalJuggernautFluxLoraInpaintingRequestsByRequestIdStatusResponses] + +export type PutRundiffusionFalJuggernautFluxLoraInpaintingRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/rundiffusion-fal/juggernaut-flux-lora/inpainting/requests/{request_id}/cancel' + } + +export type PutRundiffusionFalJuggernautFluxLoraInpaintingRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutRundiffusionFalJuggernautFluxLoraInpaintingRequestsByRequestIdCancelResponse = + PutRundiffusionFalJuggernautFluxLoraInpaintingRequestsByRequestIdCancelResponses[keyof PutRundiffusionFalJuggernautFluxLoraInpaintingRequestsByRequestIdCancelResponses] + +export type PostRundiffusionFalJuggernautFluxLoraInpaintingData = { + body: SchemaJuggernautFluxLoraInpaintingInput + path?: never + query?: never + url: '/rundiffusion-fal/juggernaut-flux-lora/inpainting' +} + +export type PostRundiffusionFalJuggernautFluxLoraInpaintingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostRundiffusionFalJuggernautFluxLoraInpaintingResponse = + PostRundiffusionFalJuggernautFluxLoraInpaintingResponses[keyof PostRundiffusionFalJuggernautFluxLoraInpaintingResponses] + +export type GetRundiffusionFalJuggernautFluxLoraInpaintingRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/rundiffusion-fal/juggernaut-flux-lora/inpainting/requests/{request_id}' + } + +export type GetRundiffusionFalJuggernautFluxLoraInpaintingRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaJuggernautFluxLoraInpaintingOutput + } + +export type GetRundiffusionFalJuggernautFluxLoraInpaintingRequestsByRequestIdResponse = + GetRundiffusionFalJuggernautFluxLoraInpaintingRequestsByRequestIdResponses[keyof GetRundiffusionFalJuggernautFluxLoraInpaintingRequestsByRequestIdResponses] + +export type GetFalAiFashnTryonV15RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fashn/tryon/v1.5/requests/{request_id}/status' +} + +export type GetFalAiFashnTryonV15RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFashnTryonV15RequestsByRequestIdStatusResponse = + GetFalAiFashnTryonV15RequestsByRequestIdStatusResponses[keyof GetFalAiFashnTryonV15RequestsByRequestIdStatusResponses] + +export type PutFalAiFashnTryonV15RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fashn/tryon/v1.5/requests/{request_id}/cancel' +} + +export type PutFalAiFashnTryonV15RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFashnTryonV15RequestsByRequestIdCancelResponse = + PutFalAiFashnTryonV15RequestsByRequestIdCancelResponses[keyof PutFalAiFashnTryonV15RequestsByRequestIdCancelResponses] + +export type PostFalAiFashnTryonV15Data = { + body: SchemaFashnTryonV15Input + path?: never + query?: never + url: '/fal-ai/fashn/tryon/v1.5' +} + +export type PostFalAiFashnTryonV15Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFashnTryonV15Response = + PostFalAiFashnTryonV15Responses[keyof PostFalAiFashnTryonV15Responses] + +export type GetFalAiFashnTryonV15RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fashn/tryon/v1.5/requests/{request_id}' +} + +export type GetFalAiFashnTryonV15RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFashnTryonV15Output +} + +export type GetFalAiFashnTryonV15RequestsByRequestIdResponse = + GetFalAiFashnTryonV15RequestsByRequestIdResponses[keyof GetFalAiFashnTryonV15RequestsByRequestIdResponses] + +export type GetFalAiPlushifyRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/plushify/requests/{request_id}/status' +} + +export type GetFalAiPlushifyRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPlushifyRequestsByRequestIdStatusResponse = + GetFalAiPlushifyRequestsByRequestIdStatusResponses[keyof GetFalAiPlushifyRequestsByRequestIdStatusResponses] + +export type PutFalAiPlushifyRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/plushify/requests/{request_id}/cancel' +} + +export type PutFalAiPlushifyRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPlushifyRequestsByRequestIdCancelResponse = + PutFalAiPlushifyRequestsByRequestIdCancelResponses[keyof PutFalAiPlushifyRequestsByRequestIdCancelResponses] + +export type PostFalAiPlushifyData = { + body: SchemaPlushifyInput + path?: never + query?: never + url: '/fal-ai/plushify' +} + +export type PostFalAiPlushifyResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPlushifyResponse = + PostFalAiPlushifyResponses[keyof PostFalAiPlushifyResponses] + +export type GetFalAiPlushifyRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/plushify/requests/{request_id}' +} + +export type GetFalAiPlushifyRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPlushifyOutput +} + +export type GetFalAiPlushifyRequestsByRequestIdResponse = + GetFalAiPlushifyRequestsByRequestIdResponses[keyof GetFalAiPlushifyRequestsByRequestIdResponses] + +export type GetFalAiInstantCharacterRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/instant-character/requests/{request_id}/status' +} + +export type GetFalAiInstantCharacterRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiInstantCharacterRequestsByRequestIdStatusResponse = + GetFalAiInstantCharacterRequestsByRequestIdStatusResponses[keyof GetFalAiInstantCharacterRequestsByRequestIdStatusResponses] + +export type PutFalAiInstantCharacterRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/instant-character/requests/{request_id}/cancel' +} + +export type PutFalAiInstantCharacterRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiInstantCharacterRequestsByRequestIdCancelResponse = + PutFalAiInstantCharacterRequestsByRequestIdCancelResponses[keyof PutFalAiInstantCharacterRequestsByRequestIdCancelResponses] + +export type PostFalAiInstantCharacterData = { + body: SchemaInstantCharacterInput + path?: never + query?: never + url: '/fal-ai/instant-character' +} + +export type PostFalAiInstantCharacterResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiInstantCharacterResponse = + PostFalAiInstantCharacterResponses[keyof PostFalAiInstantCharacterResponses] + +export type GetFalAiInstantCharacterRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/instant-character/requests/{request_id}' +} + +export type GetFalAiInstantCharacterRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaInstantCharacterOutput +} + +export type GetFalAiInstantCharacterRequestsByRequestIdResponse = + GetFalAiInstantCharacterRequestsByRequestIdResponses[keyof GetFalAiInstantCharacterRequestsByRequestIdResponses] + +export type GetFalAiCartoonifyRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/cartoonify/requests/{request_id}/status' +} + +export type GetFalAiCartoonifyRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiCartoonifyRequestsByRequestIdStatusResponse = + GetFalAiCartoonifyRequestsByRequestIdStatusResponses[keyof GetFalAiCartoonifyRequestsByRequestIdStatusResponses] + +export type PutFalAiCartoonifyRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/cartoonify/requests/{request_id}/cancel' +} + +export type PutFalAiCartoonifyRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiCartoonifyRequestsByRequestIdCancelResponse = + PutFalAiCartoonifyRequestsByRequestIdCancelResponses[keyof PutFalAiCartoonifyRequestsByRequestIdCancelResponses] + +export type PostFalAiCartoonifyData = { + body: SchemaCartoonifyInput + path?: never + query?: never + url: '/fal-ai/cartoonify' +} + +export type PostFalAiCartoonifyResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiCartoonifyResponse = + PostFalAiCartoonifyResponses[keyof PostFalAiCartoonifyResponses] + +export type GetFalAiCartoonifyRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/cartoonify/requests/{request_id}' +} + +export type GetFalAiCartoonifyRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaCartoonifyOutput +} + +export type GetFalAiCartoonifyRequestsByRequestIdResponse = + GetFalAiCartoonifyRequestsByRequestIdResponses[keyof GetFalAiCartoonifyRequestsByRequestIdResponses] + +export type GetFalAiFinegrainEraserMaskRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/finegrain-eraser/mask/requests/{request_id}/status' +} + +export type GetFalAiFinegrainEraserMaskRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFinegrainEraserMaskRequestsByRequestIdStatusResponse = + GetFalAiFinegrainEraserMaskRequestsByRequestIdStatusResponses[keyof GetFalAiFinegrainEraserMaskRequestsByRequestIdStatusResponses] + +export type PutFalAiFinegrainEraserMaskRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/finegrain-eraser/mask/requests/{request_id}/cancel' +} + +export type PutFalAiFinegrainEraserMaskRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFinegrainEraserMaskRequestsByRequestIdCancelResponse = + PutFalAiFinegrainEraserMaskRequestsByRequestIdCancelResponses[keyof PutFalAiFinegrainEraserMaskRequestsByRequestIdCancelResponses] + +export type PostFalAiFinegrainEraserMaskData = { + body: SchemaFinegrainEraserMaskInput + path?: never + query?: never + url: '/fal-ai/finegrain-eraser/mask' +} + +export type PostFalAiFinegrainEraserMaskResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFinegrainEraserMaskResponse = + PostFalAiFinegrainEraserMaskResponses[keyof PostFalAiFinegrainEraserMaskResponses] + +export type GetFalAiFinegrainEraserMaskRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/finegrain-eraser/mask/requests/{request_id}' +} + +export type GetFalAiFinegrainEraserMaskRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFinegrainEraserMaskOutput +} + +export type GetFalAiFinegrainEraserMaskRequestsByRequestIdResponse = + GetFalAiFinegrainEraserMaskRequestsByRequestIdResponses[keyof GetFalAiFinegrainEraserMaskRequestsByRequestIdResponses] + +export type GetFalAiFinegrainEraserBboxRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/finegrain-eraser/bbox/requests/{request_id}/status' +} + +export type GetFalAiFinegrainEraserBboxRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFinegrainEraserBboxRequestsByRequestIdStatusResponse = + GetFalAiFinegrainEraserBboxRequestsByRequestIdStatusResponses[keyof GetFalAiFinegrainEraserBboxRequestsByRequestIdStatusResponses] + +export type PutFalAiFinegrainEraserBboxRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/finegrain-eraser/bbox/requests/{request_id}/cancel' +} + +export type PutFalAiFinegrainEraserBboxRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFinegrainEraserBboxRequestsByRequestIdCancelResponse = + PutFalAiFinegrainEraserBboxRequestsByRequestIdCancelResponses[keyof PutFalAiFinegrainEraserBboxRequestsByRequestIdCancelResponses] + +export type PostFalAiFinegrainEraserBboxData = { + body: SchemaFinegrainEraserBboxInput + path?: never + query?: never + url: '/fal-ai/finegrain-eraser/bbox' +} + +export type PostFalAiFinegrainEraserBboxResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFinegrainEraserBboxResponse = + PostFalAiFinegrainEraserBboxResponses[keyof PostFalAiFinegrainEraserBboxResponses] + +export type GetFalAiFinegrainEraserBboxRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/finegrain-eraser/bbox/requests/{request_id}' +} + +export type GetFalAiFinegrainEraserBboxRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFinegrainEraserBboxOutput +} + +export type GetFalAiFinegrainEraserBboxRequestsByRequestIdResponse = + GetFalAiFinegrainEraserBboxRequestsByRequestIdResponses[keyof GetFalAiFinegrainEraserBboxRequestsByRequestIdResponses] + +export type GetFalAiFinegrainEraserRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/finegrain-eraser/requests/{request_id}/status' +} + +export type GetFalAiFinegrainEraserRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFinegrainEraserRequestsByRequestIdStatusResponse = + GetFalAiFinegrainEraserRequestsByRequestIdStatusResponses[keyof GetFalAiFinegrainEraserRequestsByRequestIdStatusResponses] + +export type PutFalAiFinegrainEraserRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/finegrain-eraser/requests/{request_id}/cancel' +} + +export type PutFalAiFinegrainEraserRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFinegrainEraserRequestsByRequestIdCancelResponse = + PutFalAiFinegrainEraserRequestsByRequestIdCancelResponses[keyof PutFalAiFinegrainEraserRequestsByRequestIdCancelResponses] + +export type PostFalAiFinegrainEraserData = { + body: SchemaFinegrainEraserInput + path?: never + query?: never + url: '/fal-ai/finegrain-eraser' +} + +export type PostFalAiFinegrainEraserResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFinegrainEraserResponse = + PostFalAiFinegrainEraserResponses[keyof PostFalAiFinegrainEraserResponses] + +export type GetFalAiFinegrainEraserRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/finegrain-eraser/requests/{request_id}' +} + +export type GetFalAiFinegrainEraserRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFinegrainEraserOutput +} + +export type GetFalAiFinegrainEraserRequestsByRequestIdResponse = + GetFalAiFinegrainEraserRequestsByRequestIdResponses[keyof GetFalAiFinegrainEraserRequestsByRequestIdResponses] + +export type GetFalAiStarVectorRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/star-vector/requests/{request_id}/status' +} + +export type GetFalAiStarVectorRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiStarVectorRequestsByRequestIdStatusResponse = + GetFalAiStarVectorRequestsByRequestIdStatusResponses[keyof GetFalAiStarVectorRequestsByRequestIdStatusResponses] + +export type PutFalAiStarVectorRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/star-vector/requests/{request_id}/cancel' +} + +export type PutFalAiStarVectorRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiStarVectorRequestsByRequestIdCancelResponse = + PutFalAiStarVectorRequestsByRequestIdCancelResponses[keyof PutFalAiStarVectorRequestsByRequestIdCancelResponses] + +export type PostFalAiStarVectorData = { + body: SchemaStarVectorInput + path?: never + query?: never + url: '/fal-ai/star-vector' +} + +export type PostFalAiStarVectorResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiStarVectorResponse = + PostFalAiStarVectorResponses[keyof PostFalAiStarVectorResponses] + +export type GetFalAiStarVectorRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/star-vector/requests/{request_id}' +} + +export type GetFalAiStarVectorRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaStarVectorOutput +} + +export type GetFalAiStarVectorRequestsByRequestIdResponse = + GetFalAiStarVectorRequestsByRequestIdResponses[keyof GetFalAiStarVectorRequestsByRequestIdResponses] + +export type GetFalAiGhiblifyRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ghiblify/requests/{request_id}/status' +} + +export type GetFalAiGhiblifyRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiGhiblifyRequestsByRequestIdStatusResponse = + GetFalAiGhiblifyRequestsByRequestIdStatusResponses[keyof GetFalAiGhiblifyRequestsByRequestIdStatusResponses] + +export type PutFalAiGhiblifyRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ghiblify/requests/{request_id}/cancel' +} + +export type PutFalAiGhiblifyRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiGhiblifyRequestsByRequestIdCancelResponse = + PutFalAiGhiblifyRequestsByRequestIdCancelResponses[keyof PutFalAiGhiblifyRequestsByRequestIdCancelResponses] + +export type PostFalAiGhiblifyData = { + body: SchemaGhiblifyInput + path?: never + query?: never + url: '/fal-ai/ghiblify' +} + +export type PostFalAiGhiblifyResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiGhiblifyResponse = + PostFalAiGhiblifyResponses[keyof PostFalAiGhiblifyResponses] + +export type GetFalAiGhiblifyRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ghiblify/requests/{request_id}' +} + +export type GetFalAiGhiblifyRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaGhiblifyOutput +} + +export type GetFalAiGhiblifyRequestsByRequestIdResponse = + GetFalAiGhiblifyRequestsByRequestIdResponses[keyof GetFalAiGhiblifyRequestsByRequestIdResponses] + +export type GetFalAiTheraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/thera/requests/{request_id}/status' +} + +export type GetFalAiTheraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiTheraRequestsByRequestIdStatusResponse = + GetFalAiTheraRequestsByRequestIdStatusResponses[keyof GetFalAiTheraRequestsByRequestIdStatusResponses] + +export type PutFalAiTheraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/thera/requests/{request_id}/cancel' +} + +export type PutFalAiTheraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiTheraRequestsByRequestIdCancelResponse = + PutFalAiTheraRequestsByRequestIdCancelResponses[keyof PutFalAiTheraRequestsByRequestIdCancelResponses] + +export type PostFalAiTheraData = { + body: SchemaTheraInput + path?: never + query?: never + url: '/fal-ai/thera' +} + +export type PostFalAiTheraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiTheraResponse = + PostFalAiTheraResponses[keyof PostFalAiTheraResponses] + +export type GetFalAiTheraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/thera/requests/{request_id}' +} + +export type GetFalAiTheraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaTheraOutput +} + +export type GetFalAiTheraRequestsByRequestIdResponse = + GetFalAiTheraRequestsByRequestIdResponses[keyof GetFalAiTheraRequestsByRequestIdResponses] + +export type GetFalAiMixDehazeNetRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/mix-dehaze-net/requests/{request_id}/status' +} + +export type GetFalAiMixDehazeNetRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMixDehazeNetRequestsByRequestIdStatusResponse = + GetFalAiMixDehazeNetRequestsByRequestIdStatusResponses[keyof GetFalAiMixDehazeNetRequestsByRequestIdStatusResponses] + +export type PutFalAiMixDehazeNetRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/mix-dehaze-net/requests/{request_id}/cancel' +} + +export type PutFalAiMixDehazeNetRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMixDehazeNetRequestsByRequestIdCancelResponse = + PutFalAiMixDehazeNetRequestsByRequestIdCancelResponses[keyof PutFalAiMixDehazeNetRequestsByRequestIdCancelResponses] + +export type PostFalAiMixDehazeNetData = { + body: SchemaMixDehazeNetInput + path?: never + query?: never + url: '/fal-ai/mix-dehaze-net' +} + +export type PostFalAiMixDehazeNetResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMixDehazeNetResponse = + PostFalAiMixDehazeNetResponses[keyof PostFalAiMixDehazeNetResponses] + +export type GetFalAiMixDehazeNetRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/mix-dehaze-net/requests/{request_id}' +} + +export type GetFalAiMixDehazeNetRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMixDehazeNetOutput +} + +export type GetFalAiMixDehazeNetRequestsByRequestIdResponse = + GetFalAiMixDehazeNetRequestsByRequestIdResponses[keyof GetFalAiMixDehazeNetRequestsByRequestIdResponses] + +export type GetFalAiGeminiFlashEditMultiRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/gemini-flash-edit/multi/requests/{request_id}/status' +} + +export type GetFalAiGeminiFlashEditMultiRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiGeminiFlashEditMultiRequestsByRequestIdStatusResponse = + GetFalAiGeminiFlashEditMultiRequestsByRequestIdStatusResponses[keyof GetFalAiGeminiFlashEditMultiRequestsByRequestIdStatusResponses] + +export type PutFalAiGeminiFlashEditMultiRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gemini-flash-edit/multi/requests/{request_id}/cancel' +} + +export type PutFalAiGeminiFlashEditMultiRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiGeminiFlashEditMultiRequestsByRequestIdCancelResponse = + PutFalAiGeminiFlashEditMultiRequestsByRequestIdCancelResponses[keyof PutFalAiGeminiFlashEditMultiRequestsByRequestIdCancelResponses] + +export type PostFalAiGeminiFlashEditMultiData = { + body: SchemaGeminiFlashEditMultiInput + path?: never + query?: never + url: '/fal-ai/gemini-flash-edit/multi' +} + +export type PostFalAiGeminiFlashEditMultiResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiGeminiFlashEditMultiResponse = + PostFalAiGeminiFlashEditMultiResponses[keyof PostFalAiGeminiFlashEditMultiResponses] + +export type GetFalAiGeminiFlashEditMultiRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gemini-flash-edit/multi/requests/{request_id}' +} + +export type GetFalAiGeminiFlashEditMultiRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaGeminiFlashEditMultiOutput +} + +export type GetFalAiGeminiFlashEditMultiRequestsByRequestIdResponse = + GetFalAiGeminiFlashEditMultiRequestsByRequestIdResponses[keyof GetFalAiGeminiFlashEditMultiRequestsByRequestIdResponses] + +export type GetFalAiGeminiFlashEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/gemini-flash-edit/requests/{request_id}/status' +} + +export type GetFalAiGeminiFlashEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiGeminiFlashEditRequestsByRequestIdStatusResponse = + GetFalAiGeminiFlashEditRequestsByRequestIdStatusResponses[keyof GetFalAiGeminiFlashEditRequestsByRequestIdStatusResponses] + +export type PutFalAiGeminiFlashEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gemini-flash-edit/requests/{request_id}/cancel' +} + +export type PutFalAiGeminiFlashEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiGeminiFlashEditRequestsByRequestIdCancelResponse = + PutFalAiGeminiFlashEditRequestsByRequestIdCancelResponses[keyof PutFalAiGeminiFlashEditRequestsByRequestIdCancelResponses] + +export type PostFalAiGeminiFlashEditData = { + body: SchemaGeminiFlashEditInput + path?: never + query?: never + url: '/fal-ai/gemini-flash-edit' +} + +export type PostFalAiGeminiFlashEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiGeminiFlashEditResponse = + PostFalAiGeminiFlashEditResponses[keyof PostFalAiGeminiFlashEditResponses] + +export type GetFalAiGeminiFlashEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gemini-flash-edit/requests/{request_id}' +} + +export type GetFalAiGeminiFlashEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaGeminiFlashEditOutput +} + +export type GetFalAiGeminiFlashEditRequestsByRequestIdResponse = + GetFalAiGeminiFlashEditRequestsByRequestIdResponses[keyof GetFalAiGeminiFlashEditRequestsByRequestIdResponses] + +export type GetFalAiInvisibleWatermarkRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/invisible-watermark/requests/{request_id}/status' +} + +export type GetFalAiInvisibleWatermarkRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiInvisibleWatermarkRequestsByRequestIdStatusResponse = + GetFalAiInvisibleWatermarkRequestsByRequestIdStatusResponses[keyof GetFalAiInvisibleWatermarkRequestsByRequestIdStatusResponses] + +export type PutFalAiInvisibleWatermarkRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/invisible-watermark/requests/{request_id}/cancel' +} + +export type PutFalAiInvisibleWatermarkRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiInvisibleWatermarkRequestsByRequestIdCancelResponse = + PutFalAiInvisibleWatermarkRequestsByRequestIdCancelResponses[keyof PutFalAiInvisibleWatermarkRequestsByRequestIdCancelResponses] + +export type PostFalAiInvisibleWatermarkData = { + body: SchemaInvisibleWatermarkInput + path?: never + query?: never + url: '/fal-ai/invisible-watermark' +} + +export type PostFalAiInvisibleWatermarkResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiInvisibleWatermarkResponse = + PostFalAiInvisibleWatermarkResponses[keyof PostFalAiInvisibleWatermarkResponses] + +export type GetFalAiInvisibleWatermarkRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/invisible-watermark/requests/{request_id}' +} + +export type GetFalAiInvisibleWatermarkRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaInvisibleWatermarkOutput +} + +export type GetFalAiInvisibleWatermarkRequestsByRequestIdResponse = + GetFalAiInvisibleWatermarkRequestsByRequestIdResponses[keyof GetFalAiInvisibleWatermarkRequestsByRequestIdResponses] + +export type GetRundiffusionFalJuggernautFluxProImageToImageRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/rundiffusion-fal/juggernaut-flux/pro/image-to-image/requests/{request_id}/status' + } + +export type GetRundiffusionFalJuggernautFluxProImageToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetRundiffusionFalJuggernautFluxProImageToImageRequestsByRequestIdStatusResponse = + GetRundiffusionFalJuggernautFluxProImageToImageRequestsByRequestIdStatusResponses[keyof GetRundiffusionFalJuggernautFluxProImageToImageRequestsByRequestIdStatusResponses] + +export type PutRundiffusionFalJuggernautFluxProImageToImageRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/rundiffusion-fal/juggernaut-flux/pro/image-to-image/requests/{request_id}/cancel' + } + +export type PutRundiffusionFalJuggernautFluxProImageToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutRundiffusionFalJuggernautFluxProImageToImageRequestsByRequestIdCancelResponse = + PutRundiffusionFalJuggernautFluxProImageToImageRequestsByRequestIdCancelResponses[keyof PutRundiffusionFalJuggernautFluxProImageToImageRequestsByRequestIdCancelResponses] + +export type PostRundiffusionFalJuggernautFluxProImageToImageData = { + body: SchemaJuggernautFluxProImageToImageInput + path?: never + query?: never + url: '/rundiffusion-fal/juggernaut-flux/pro/image-to-image' +} + +export type PostRundiffusionFalJuggernautFluxProImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostRundiffusionFalJuggernautFluxProImageToImageResponse = + PostRundiffusionFalJuggernautFluxProImageToImageResponses[keyof PostRundiffusionFalJuggernautFluxProImageToImageResponses] + +export type GetRundiffusionFalJuggernautFluxProImageToImageRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/rundiffusion-fal/juggernaut-flux/pro/image-to-image/requests/{request_id}' + } + +export type GetRundiffusionFalJuggernautFluxProImageToImageRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaJuggernautFluxProImageToImageOutput + } + +export type GetRundiffusionFalJuggernautFluxProImageToImageRequestsByRequestIdResponse = + GetRundiffusionFalJuggernautFluxProImageToImageRequestsByRequestIdResponses[keyof GetRundiffusionFalJuggernautFluxProImageToImageRequestsByRequestIdResponses] + +export type GetRundiffusionFalJuggernautFluxBaseImageToImageRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/rundiffusion-fal/juggernaut-flux/base/image-to-image/requests/{request_id}/status' + } + +export type GetRundiffusionFalJuggernautFluxBaseImageToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetRundiffusionFalJuggernautFluxBaseImageToImageRequestsByRequestIdStatusResponse = + GetRundiffusionFalJuggernautFluxBaseImageToImageRequestsByRequestIdStatusResponses[keyof GetRundiffusionFalJuggernautFluxBaseImageToImageRequestsByRequestIdStatusResponses] + +export type PutRundiffusionFalJuggernautFluxBaseImageToImageRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/rundiffusion-fal/juggernaut-flux/base/image-to-image/requests/{request_id}/cancel' + } + +export type PutRundiffusionFalJuggernautFluxBaseImageToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutRundiffusionFalJuggernautFluxBaseImageToImageRequestsByRequestIdCancelResponse = + PutRundiffusionFalJuggernautFluxBaseImageToImageRequestsByRequestIdCancelResponses[keyof PutRundiffusionFalJuggernautFluxBaseImageToImageRequestsByRequestIdCancelResponses] + +export type PostRundiffusionFalJuggernautFluxBaseImageToImageData = { + body: SchemaJuggernautFluxBaseImageToImageInput + path?: never + query?: never + url: '/rundiffusion-fal/juggernaut-flux/base/image-to-image' +} + +export type PostRundiffusionFalJuggernautFluxBaseImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostRundiffusionFalJuggernautFluxBaseImageToImageResponse = + PostRundiffusionFalJuggernautFluxBaseImageToImageResponses[keyof PostRundiffusionFalJuggernautFluxBaseImageToImageResponses] + +export type GetRundiffusionFalJuggernautFluxBaseImageToImageRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/rundiffusion-fal/juggernaut-flux/base/image-to-image/requests/{request_id}' + } + +export type GetRundiffusionFalJuggernautFluxBaseImageToImageRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaJuggernautFluxBaseImageToImageOutput + } + +export type GetRundiffusionFalJuggernautFluxBaseImageToImageRequestsByRequestIdResponse = + GetRundiffusionFalJuggernautFluxBaseImageToImageRequestsByRequestIdResponses[keyof GetRundiffusionFalJuggernautFluxBaseImageToImageRequestsByRequestIdResponses] + +export type GetFalAiDocresDewarpRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/docres/dewarp/requests/{request_id}/status' +} + +export type GetFalAiDocresDewarpRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiDocresDewarpRequestsByRequestIdStatusResponse = + GetFalAiDocresDewarpRequestsByRequestIdStatusResponses[keyof GetFalAiDocresDewarpRequestsByRequestIdStatusResponses] + +export type PutFalAiDocresDewarpRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/docres/dewarp/requests/{request_id}/cancel' +} + +export type PutFalAiDocresDewarpRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiDocresDewarpRequestsByRequestIdCancelResponse = + PutFalAiDocresDewarpRequestsByRequestIdCancelResponses[keyof PutFalAiDocresDewarpRequestsByRequestIdCancelResponses] + +export type PostFalAiDocresDewarpData = { + body: SchemaDocresDewarpInput + path?: never + query?: never + url: '/fal-ai/docres/dewarp' +} + +export type PostFalAiDocresDewarpResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiDocresDewarpResponse = + PostFalAiDocresDewarpResponses[keyof PostFalAiDocresDewarpResponses] + +export type GetFalAiDocresDewarpRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/docres/dewarp/requests/{request_id}' +} + +export type GetFalAiDocresDewarpRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaDocresDewarpOutput +} + +export type GetFalAiDocresDewarpRequestsByRequestIdResponse = + GetFalAiDocresDewarpRequestsByRequestIdResponses[keyof GetFalAiDocresDewarpRequestsByRequestIdResponses] + +export type GetFalAiDocresRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/docres/requests/{request_id}/status' +} + +export type GetFalAiDocresRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiDocresRequestsByRequestIdStatusResponse = + GetFalAiDocresRequestsByRequestIdStatusResponses[keyof GetFalAiDocresRequestsByRequestIdStatusResponses] + +export type PutFalAiDocresRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/docres/requests/{request_id}/cancel' +} + +export type PutFalAiDocresRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiDocresRequestsByRequestIdCancelResponse = + PutFalAiDocresRequestsByRequestIdCancelResponses[keyof PutFalAiDocresRequestsByRequestIdCancelResponses] + +export type PostFalAiDocresData = { + body: SchemaDocresInput + path?: never + query?: never + url: '/fal-ai/docres' +} + +export type PostFalAiDocresResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiDocresResponse = + PostFalAiDocresResponses[keyof PostFalAiDocresResponses] + +export type GetFalAiDocresRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/docres/requests/{request_id}' +} + +export type GetFalAiDocresRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaDocresOutput +} + +export type GetFalAiDocresRequestsByRequestIdResponse = + GetFalAiDocresRequestsByRequestIdResponses[keyof GetFalAiDocresRequestsByRequestIdResponses] + +export type GetFalAiSwin2SrRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/swin2sr/requests/{request_id}/status' +} + +export type GetFalAiSwin2SrRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSwin2SrRequestsByRequestIdStatusResponse = + GetFalAiSwin2SrRequestsByRequestIdStatusResponses[keyof GetFalAiSwin2SrRequestsByRequestIdStatusResponses] + +export type PutFalAiSwin2SrRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/swin2sr/requests/{request_id}/cancel' +} + +export type PutFalAiSwin2SrRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSwin2SrRequestsByRequestIdCancelResponse = + PutFalAiSwin2SrRequestsByRequestIdCancelResponses[keyof PutFalAiSwin2SrRequestsByRequestIdCancelResponses] + +export type PostFalAiSwin2SrData = { + body: SchemaSwin2SrInput + path?: never + query?: never + url: '/fal-ai/swin2sr' +} + +export type PostFalAiSwin2SrResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSwin2SrResponse = + PostFalAiSwin2SrResponses[keyof PostFalAiSwin2SrResponses] + +export type GetFalAiSwin2SrRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/swin2sr/requests/{request_id}' +} + +export type GetFalAiSwin2SrRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSwin2SrOutput +} + +export type GetFalAiSwin2SrRequestsByRequestIdResponse = + GetFalAiSwin2SrRequestsByRequestIdResponses[keyof GetFalAiSwin2SrRequestsByRequestIdResponses] + +export type GetFalAiIdeogramV2aRemixRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ideogram/v2a/remix/requests/{request_id}/status' +} + +export type GetFalAiIdeogramV2aRemixRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiIdeogramV2aRemixRequestsByRequestIdStatusResponse = + GetFalAiIdeogramV2aRemixRequestsByRequestIdStatusResponses[keyof GetFalAiIdeogramV2aRemixRequestsByRequestIdStatusResponses] + +export type PutFalAiIdeogramV2aRemixRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v2a/remix/requests/{request_id}/cancel' +} + +export type PutFalAiIdeogramV2aRemixRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiIdeogramV2aRemixRequestsByRequestIdCancelResponse = + PutFalAiIdeogramV2aRemixRequestsByRequestIdCancelResponses[keyof PutFalAiIdeogramV2aRemixRequestsByRequestIdCancelResponses] + +export type PostFalAiIdeogramV2aRemixData = { + body: SchemaIdeogramV2aRemixInput + path?: never + query?: never + url: '/fal-ai/ideogram/v2a/remix' +} + +export type PostFalAiIdeogramV2aRemixResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiIdeogramV2aRemixResponse = + PostFalAiIdeogramV2aRemixResponses[keyof PostFalAiIdeogramV2aRemixResponses] + +export type GetFalAiIdeogramV2aRemixRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v2a/remix/requests/{request_id}' +} + +export type GetFalAiIdeogramV2aRemixRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIdeogramV2aRemixOutput +} + +export type GetFalAiIdeogramV2aRemixRequestsByRequestIdResponse = + GetFalAiIdeogramV2aRemixRequestsByRequestIdResponses[keyof GetFalAiIdeogramV2aRemixRequestsByRequestIdResponses] + +export type GetFalAiIdeogramV2aTurboRemixRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ideogram/v2a/turbo/remix/requests/{request_id}/status' +} + +export type GetFalAiIdeogramV2aTurboRemixRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiIdeogramV2aTurboRemixRequestsByRequestIdStatusResponse = + GetFalAiIdeogramV2aTurboRemixRequestsByRequestIdStatusResponses[keyof GetFalAiIdeogramV2aTurboRemixRequestsByRequestIdStatusResponses] + +export type PutFalAiIdeogramV2aTurboRemixRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v2a/turbo/remix/requests/{request_id}/cancel' +} + +export type PutFalAiIdeogramV2aTurboRemixRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiIdeogramV2aTurboRemixRequestsByRequestIdCancelResponse = + PutFalAiIdeogramV2aTurboRemixRequestsByRequestIdCancelResponses[keyof PutFalAiIdeogramV2aTurboRemixRequestsByRequestIdCancelResponses] + +export type PostFalAiIdeogramV2aTurboRemixData = { + body: SchemaIdeogramV2aTurboRemixInput + path?: never + query?: never + url: '/fal-ai/ideogram/v2a/turbo/remix' +} + +export type PostFalAiIdeogramV2aTurboRemixResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiIdeogramV2aTurboRemixResponse = + PostFalAiIdeogramV2aTurboRemixResponses[keyof PostFalAiIdeogramV2aTurboRemixResponses] + +export type GetFalAiIdeogramV2aTurboRemixRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v2a/turbo/remix/requests/{request_id}' +} + +export type GetFalAiIdeogramV2aTurboRemixRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIdeogramV2aTurboRemixOutput +} + +export type GetFalAiIdeogramV2aTurboRemixRequestsByRequestIdResponse = + GetFalAiIdeogramV2aTurboRemixRequestsByRequestIdResponses[keyof GetFalAiIdeogramV2aTurboRemixRequestsByRequestIdResponses] + +export type GetFalAiEvfSamRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/evf-sam/requests/{request_id}/status' +} + +export type GetFalAiEvfSamRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiEvfSamRequestsByRequestIdStatusResponse = + GetFalAiEvfSamRequestsByRequestIdStatusResponses[keyof GetFalAiEvfSamRequestsByRequestIdStatusResponses] + +export type PutFalAiEvfSamRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/evf-sam/requests/{request_id}/cancel' +} + +export type PutFalAiEvfSamRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiEvfSamRequestsByRequestIdCancelResponse = + PutFalAiEvfSamRequestsByRequestIdCancelResponses[keyof PutFalAiEvfSamRequestsByRequestIdCancelResponses] + +export type PostFalAiEvfSamData = { + body: SchemaEvfSamInput + path?: never + query?: never + url: '/fal-ai/evf-sam' +} + +export type PostFalAiEvfSamResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiEvfSamResponse = + PostFalAiEvfSamResponses[keyof PostFalAiEvfSamResponses] + +export type GetFalAiEvfSamRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/evf-sam/requests/{request_id}' +} + +export type GetFalAiEvfSamRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaEvfSamOutput +} + +export type GetFalAiEvfSamRequestsByRequestIdResponse = + GetFalAiEvfSamRequestsByRequestIdResponses[keyof GetFalAiEvfSamRequestsByRequestIdResponses] + +export type GetFalAiDdcolorRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ddcolor/requests/{request_id}/status' +} + +export type GetFalAiDdcolorRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiDdcolorRequestsByRequestIdStatusResponse = + GetFalAiDdcolorRequestsByRequestIdStatusResponses[keyof GetFalAiDdcolorRequestsByRequestIdStatusResponses] + +export type PutFalAiDdcolorRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ddcolor/requests/{request_id}/cancel' +} + +export type PutFalAiDdcolorRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiDdcolorRequestsByRequestIdCancelResponse = + PutFalAiDdcolorRequestsByRequestIdCancelResponses[keyof PutFalAiDdcolorRequestsByRequestIdCancelResponses] + +export type PostFalAiDdcolorData = { + body: SchemaDdcolorInput + path?: never + query?: never + url: '/fal-ai/ddcolor' +} + +export type PostFalAiDdcolorResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiDdcolorResponse = + PostFalAiDdcolorResponses[keyof PostFalAiDdcolorResponses] + +export type GetFalAiDdcolorRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ddcolor/requests/{request_id}' +} + +export type GetFalAiDdcolorRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaDdcolorOutput +} + +export type GetFalAiDdcolorRequestsByRequestIdResponse = + GetFalAiDdcolorRequestsByRequestIdResponses[keyof GetFalAiDdcolorRequestsByRequestIdResponses] + +export type GetFalAiSam2AutoSegmentRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sam2/auto-segment/requests/{request_id}/status' +} + +export type GetFalAiSam2AutoSegmentRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSam2AutoSegmentRequestsByRequestIdStatusResponse = + GetFalAiSam2AutoSegmentRequestsByRequestIdStatusResponses[keyof GetFalAiSam2AutoSegmentRequestsByRequestIdStatusResponses] + +export type PutFalAiSam2AutoSegmentRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam2/auto-segment/requests/{request_id}/cancel' +} + +export type PutFalAiSam2AutoSegmentRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSam2AutoSegmentRequestsByRequestIdCancelResponse = + PutFalAiSam2AutoSegmentRequestsByRequestIdCancelResponses[keyof PutFalAiSam2AutoSegmentRequestsByRequestIdCancelResponses] + +export type PostFalAiSam2AutoSegmentData = { + body: SchemaSam2AutoSegmentInput + path?: never + query?: never + url: '/fal-ai/sam2/auto-segment' +} + +export type PostFalAiSam2AutoSegmentResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSam2AutoSegmentResponse = + PostFalAiSam2AutoSegmentResponses[keyof PostFalAiSam2AutoSegmentResponses] + +export type GetFalAiSam2AutoSegmentRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam2/auto-segment/requests/{request_id}' +} + +export type GetFalAiSam2AutoSegmentRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSam2AutoSegmentOutput +} + +export type GetFalAiSam2AutoSegmentRequestsByRequestIdResponse = + GetFalAiSam2AutoSegmentRequestsByRequestIdResponses[keyof GetFalAiSam2AutoSegmentRequestsByRequestIdResponses] + +export type GetFalAiDrctSuperResolutionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/drct-super-resolution/requests/{request_id}/status' +} + +export type GetFalAiDrctSuperResolutionRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiDrctSuperResolutionRequestsByRequestIdStatusResponse = + GetFalAiDrctSuperResolutionRequestsByRequestIdStatusResponses[keyof GetFalAiDrctSuperResolutionRequestsByRequestIdStatusResponses] + +export type PutFalAiDrctSuperResolutionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/drct-super-resolution/requests/{request_id}/cancel' +} + +export type PutFalAiDrctSuperResolutionRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiDrctSuperResolutionRequestsByRequestIdCancelResponse = + PutFalAiDrctSuperResolutionRequestsByRequestIdCancelResponses[keyof PutFalAiDrctSuperResolutionRequestsByRequestIdCancelResponses] + +export type PostFalAiDrctSuperResolutionData = { + body: SchemaDrctSuperResolutionInput + path?: never + query?: never + url: '/fal-ai/drct-super-resolution' +} + +export type PostFalAiDrctSuperResolutionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiDrctSuperResolutionResponse = + PostFalAiDrctSuperResolutionResponses[keyof PostFalAiDrctSuperResolutionResponses] + +export type GetFalAiDrctSuperResolutionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/drct-super-resolution/requests/{request_id}' +} + +export type GetFalAiDrctSuperResolutionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaDrctSuperResolutionOutput +} + +export type GetFalAiDrctSuperResolutionRequestsByRequestIdResponse = + GetFalAiDrctSuperResolutionRequestsByRequestIdResponses[keyof GetFalAiDrctSuperResolutionRequestsByRequestIdResponses] + +export type GetFalAiNafnetDeblurRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/nafnet/deblur/requests/{request_id}/status' +} + +export type GetFalAiNafnetDeblurRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiNafnetDeblurRequestsByRequestIdStatusResponse = + GetFalAiNafnetDeblurRequestsByRequestIdStatusResponses[keyof GetFalAiNafnetDeblurRequestsByRequestIdStatusResponses] + +export type PutFalAiNafnetDeblurRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/nafnet/deblur/requests/{request_id}/cancel' +} + +export type PutFalAiNafnetDeblurRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiNafnetDeblurRequestsByRequestIdCancelResponse = + PutFalAiNafnetDeblurRequestsByRequestIdCancelResponses[keyof PutFalAiNafnetDeblurRequestsByRequestIdCancelResponses] + +export type PostFalAiNafnetDeblurData = { + body: SchemaNafnetDeblurInput + path?: never + query?: never + url: '/fal-ai/nafnet/deblur' +} + +export type PostFalAiNafnetDeblurResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiNafnetDeblurResponse = + PostFalAiNafnetDeblurResponses[keyof PostFalAiNafnetDeblurResponses] + +export type GetFalAiNafnetDeblurRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/nafnet/deblur/requests/{request_id}' +} + +export type GetFalAiNafnetDeblurRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaNafnetDeblurOutput +} + +export type GetFalAiNafnetDeblurRequestsByRequestIdResponse = + GetFalAiNafnetDeblurRequestsByRequestIdResponses[keyof GetFalAiNafnetDeblurRequestsByRequestIdResponses] + +export type GetFalAiNafnetDenoiseRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/nafnet/denoise/requests/{request_id}/status' +} + +export type GetFalAiNafnetDenoiseRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiNafnetDenoiseRequestsByRequestIdStatusResponse = + GetFalAiNafnetDenoiseRequestsByRequestIdStatusResponses[keyof GetFalAiNafnetDenoiseRequestsByRequestIdStatusResponses] + +export type PutFalAiNafnetDenoiseRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/nafnet/denoise/requests/{request_id}/cancel' +} + +export type PutFalAiNafnetDenoiseRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiNafnetDenoiseRequestsByRequestIdCancelResponse = + PutFalAiNafnetDenoiseRequestsByRequestIdCancelResponses[keyof PutFalAiNafnetDenoiseRequestsByRequestIdCancelResponses] + +export type PostFalAiNafnetDenoiseData = { + body: SchemaNafnetDenoiseInput + path?: never + query?: never + url: '/fal-ai/nafnet/denoise' +} + +export type PostFalAiNafnetDenoiseResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiNafnetDenoiseResponse = + PostFalAiNafnetDenoiseResponses[keyof PostFalAiNafnetDenoiseResponses] + +export type GetFalAiNafnetDenoiseRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/nafnet/denoise/requests/{request_id}' +} + +export type GetFalAiNafnetDenoiseRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaNafnetDenoiseOutput +} + +export type GetFalAiNafnetDenoiseRequestsByRequestIdResponse = + GetFalAiNafnetDenoiseRequestsByRequestIdResponses[keyof GetFalAiNafnetDenoiseRequestsByRequestIdResponses] + +export type GetFalAiPostProcessingRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/post-processing/requests/{request_id}/status' +} + +export type GetFalAiPostProcessingRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPostProcessingRequestsByRequestIdStatusResponse = + GetFalAiPostProcessingRequestsByRequestIdStatusResponses[keyof GetFalAiPostProcessingRequestsByRequestIdStatusResponses] + +export type PutFalAiPostProcessingRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/requests/{request_id}/cancel' +} + +export type PutFalAiPostProcessingRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPostProcessingRequestsByRequestIdCancelResponse = + PutFalAiPostProcessingRequestsByRequestIdCancelResponses[keyof PutFalAiPostProcessingRequestsByRequestIdCancelResponses] + +export type PostFalAiPostProcessingData = { + body: SchemaPostProcessingInput + path?: never + query?: never + url: '/fal-ai/post-processing' +} + +export type PostFalAiPostProcessingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPostProcessingResponse = + PostFalAiPostProcessingResponses[keyof PostFalAiPostProcessingResponses] + +export type GetFalAiPostProcessingRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/post-processing/requests/{request_id}' +} + +export type GetFalAiPostProcessingRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPostProcessingOutput +} + +export type GetFalAiPostProcessingRequestsByRequestIdResponse = + GetFalAiPostProcessingRequestsByRequestIdResponses[keyof GetFalAiPostProcessingRequestsByRequestIdResponses] + +export type GetFalAiFloweditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flowedit/requests/{request_id}/status' +} + +export type GetFalAiFloweditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFloweditRequestsByRequestIdStatusResponse = + GetFalAiFloweditRequestsByRequestIdStatusResponses[keyof GetFalAiFloweditRequestsByRequestIdStatusResponses] + +export type PutFalAiFloweditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flowedit/requests/{request_id}/cancel' +} + +export type PutFalAiFloweditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFloweditRequestsByRequestIdCancelResponse = + PutFalAiFloweditRequestsByRequestIdCancelResponses[keyof PutFalAiFloweditRequestsByRequestIdCancelResponses] + +export type PostFalAiFloweditData = { + body: SchemaFloweditInput + path?: never + query?: never + url: '/fal-ai/flowedit' +} + +export type PostFalAiFloweditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFloweditResponse = + PostFalAiFloweditResponses[keyof PostFalAiFloweditResponses] + +export type GetFalAiFloweditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flowedit/requests/{request_id}' +} + +export type GetFalAiFloweditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFloweditOutput +} + +export type GetFalAiFloweditRequestsByRequestIdResponse = + GetFalAiFloweditRequestsByRequestIdResponses[keyof GetFalAiFloweditRequestsByRequestIdResponses] + +export type GetFalAiFluxControlLoraDepthImageToImageRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-control-lora-depth/image-to-image/requests/{request_id}/status' + } + +export type GetFalAiFluxControlLoraDepthImageToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFluxControlLoraDepthImageToImageRequestsByRequestIdStatusResponse = + GetFalAiFluxControlLoraDepthImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiFluxControlLoraDepthImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxControlLoraDepthImageToImageRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-control-lora-depth/image-to-image/requests/{request_id}/cancel' + } + +export type PutFalAiFluxControlLoraDepthImageToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFluxControlLoraDepthImageToImageRequestsByRequestIdCancelResponse = + PutFalAiFluxControlLoraDepthImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiFluxControlLoraDepthImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxControlLoraDepthImageToImageData = { + body: SchemaFluxControlLoraDepthImageToImageInput + path?: never + query?: never + url: '/fal-ai/flux-control-lora-depth/image-to-image' +} + +export type PostFalAiFluxControlLoraDepthImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxControlLoraDepthImageToImageResponse = + PostFalAiFluxControlLoraDepthImageToImageResponses[keyof PostFalAiFluxControlLoraDepthImageToImageResponses] + +export type GetFalAiFluxControlLoraDepthImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-control-lora-depth/image-to-image/requests/{request_id}' +} + +export type GetFalAiFluxControlLoraDepthImageToImageRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFluxControlLoraDepthImageToImageOutput + } + +export type GetFalAiFluxControlLoraDepthImageToImageRequestsByRequestIdResponse = + GetFalAiFluxControlLoraDepthImageToImageRequestsByRequestIdResponses[keyof GetFalAiFluxControlLoraDepthImageToImageRequestsByRequestIdResponses] + +export type GetFalAiBenV2ImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ben/v2/image/requests/{request_id}/status' +} + +export type GetFalAiBenV2ImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiBenV2ImageRequestsByRequestIdStatusResponse = + GetFalAiBenV2ImageRequestsByRequestIdStatusResponses[keyof GetFalAiBenV2ImageRequestsByRequestIdStatusResponses] + +export type PutFalAiBenV2ImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ben/v2/image/requests/{request_id}/cancel' +} + +export type PutFalAiBenV2ImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiBenV2ImageRequestsByRequestIdCancelResponse = + PutFalAiBenV2ImageRequestsByRequestIdCancelResponses[keyof PutFalAiBenV2ImageRequestsByRequestIdCancelResponses] + +export type PostFalAiBenV2ImageData = { + body: SchemaBenV2ImageInput + path?: never + query?: never + url: '/fal-ai/ben/v2/image' +} + +export type PostFalAiBenV2ImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBenV2ImageResponse = + PostFalAiBenV2ImageResponses[keyof PostFalAiBenV2ImageResponses] + +export type GetFalAiBenV2ImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ben/v2/image/requests/{request_id}' +} + +export type GetFalAiBenV2ImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBenV2ImageOutput +} + +export type GetFalAiBenV2ImageRequestsByRequestIdResponse = + GetFalAiBenV2ImageRequestsByRequestIdResponses[keyof GetFalAiBenV2ImageRequestsByRequestIdResponses] + +export type GetFalAiFluxControlLoraCannyImageToImageRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-control-lora-canny/image-to-image/requests/{request_id}/status' + } + +export type GetFalAiFluxControlLoraCannyImageToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFluxControlLoraCannyImageToImageRequestsByRequestIdStatusResponse = + GetFalAiFluxControlLoraCannyImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiFluxControlLoraCannyImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxControlLoraCannyImageToImageRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-control-lora-canny/image-to-image/requests/{request_id}/cancel' + } + +export type PutFalAiFluxControlLoraCannyImageToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFluxControlLoraCannyImageToImageRequestsByRequestIdCancelResponse = + PutFalAiFluxControlLoraCannyImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiFluxControlLoraCannyImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxControlLoraCannyImageToImageData = { + body: SchemaFluxControlLoraCannyImageToImageInput + path?: never + query?: never + url: '/fal-ai/flux-control-lora-canny/image-to-image' +} + +export type PostFalAiFluxControlLoraCannyImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxControlLoraCannyImageToImageResponse = + PostFalAiFluxControlLoraCannyImageToImageResponses[keyof PostFalAiFluxControlLoraCannyImageToImageResponses] + +export type GetFalAiFluxControlLoraCannyImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-control-lora-canny/image-to-image/requests/{request_id}' +} + +export type GetFalAiFluxControlLoraCannyImageToImageRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFluxControlLoraCannyImageToImageOutput + } + +export type GetFalAiFluxControlLoraCannyImageToImageRequestsByRequestIdResponse = + GetFalAiFluxControlLoraCannyImageToImageRequestsByRequestIdResponses[keyof GetFalAiFluxControlLoraCannyImageToImageRequestsByRequestIdResponses] + +export type GetFalAiIdeogramUpscaleRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ideogram/upscale/requests/{request_id}/status' +} + +export type GetFalAiIdeogramUpscaleRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiIdeogramUpscaleRequestsByRequestIdStatusResponse = + GetFalAiIdeogramUpscaleRequestsByRequestIdStatusResponses[keyof GetFalAiIdeogramUpscaleRequestsByRequestIdStatusResponses] + +export type PutFalAiIdeogramUpscaleRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/upscale/requests/{request_id}/cancel' +} + +export type PutFalAiIdeogramUpscaleRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiIdeogramUpscaleRequestsByRequestIdCancelResponse = + PutFalAiIdeogramUpscaleRequestsByRequestIdCancelResponses[keyof PutFalAiIdeogramUpscaleRequestsByRequestIdCancelResponses] + +export type PostFalAiIdeogramUpscaleData = { + body: SchemaIdeogramUpscaleInput + path?: never + query?: never + url: '/fal-ai/ideogram/upscale' +} + +export type PostFalAiIdeogramUpscaleResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiIdeogramUpscaleResponse = + PostFalAiIdeogramUpscaleResponses[keyof PostFalAiIdeogramUpscaleResponses] + +export type GetFalAiIdeogramUpscaleRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/upscale/requests/{request_id}' +} + +export type GetFalAiIdeogramUpscaleRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIdeogramUpscaleOutput +} + +export type GetFalAiIdeogramUpscaleRequestsByRequestIdResponse = + GetFalAiIdeogramUpscaleRequestsByRequestIdResponses[keyof GetFalAiIdeogramUpscaleRequestsByRequestIdResponses] + +export type GetFalAiCodeformerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/codeformer/requests/{request_id}/status' +} + +export type GetFalAiCodeformerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiCodeformerRequestsByRequestIdStatusResponse = + GetFalAiCodeformerRequestsByRequestIdStatusResponses[keyof GetFalAiCodeformerRequestsByRequestIdStatusResponses] + +export type PutFalAiCodeformerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/codeformer/requests/{request_id}/cancel' +} + +export type PutFalAiCodeformerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiCodeformerRequestsByRequestIdCancelResponse = + PutFalAiCodeformerRequestsByRequestIdCancelResponses[keyof PutFalAiCodeformerRequestsByRequestIdCancelResponses] + +export type PostFalAiCodeformerData = { + body: SchemaCodeformerInput + path?: never + query?: never + url: '/fal-ai/codeformer' +} + +export type PostFalAiCodeformerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiCodeformerResponse = + PostFalAiCodeformerResponses[keyof PostFalAiCodeformerResponses] + +export type GetFalAiCodeformerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/codeformer/requests/{request_id}' +} + +export type GetFalAiCodeformerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaCodeformerOutput +} + +export type GetFalAiCodeformerRequestsByRequestIdResponse = + GetFalAiCodeformerRequestsByRequestIdResponses[keyof GetFalAiCodeformerRequestsByRequestIdResponses] + +export type GetFalAiKlingV15KolorsVirtualTryOnRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling/v1-5/kolors-virtual-try-on/requests/{request_id}/status' +} + +export type GetFalAiKlingV15KolorsVirtualTryOnRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingV15KolorsVirtualTryOnRequestsByRequestIdStatusResponse = + GetFalAiKlingV15KolorsVirtualTryOnRequestsByRequestIdStatusResponses[keyof GetFalAiKlingV15KolorsVirtualTryOnRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingV15KolorsVirtualTryOnRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling/v1-5/kolors-virtual-try-on/requests/{request_id}/cancel' +} + +export type PutFalAiKlingV15KolorsVirtualTryOnRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingV15KolorsVirtualTryOnRequestsByRequestIdCancelResponse = + PutFalAiKlingV15KolorsVirtualTryOnRequestsByRequestIdCancelResponses[keyof PutFalAiKlingV15KolorsVirtualTryOnRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingV15KolorsVirtualTryOnData = { + body: SchemaKlingV15KolorsVirtualTryOnInput + path?: never + query?: never + url: '/fal-ai/kling/v1-5/kolors-virtual-try-on' +} + +export type PostFalAiKlingV15KolorsVirtualTryOnResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingV15KolorsVirtualTryOnResponse = + PostFalAiKlingV15KolorsVirtualTryOnResponses[keyof PostFalAiKlingV15KolorsVirtualTryOnResponses] + +export type GetFalAiKlingV15KolorsVirtualTryOnRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling/v1-5/kolors-virtual-try-on/requests/{request_id}' +} + +export type GetFalAiKlingV15KolorsVirtualTryOnRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingV15KolorsVirtualTryOnOutput +} + +export type GetFalAiKlingV15KolorsVirtualTryOnRequestsByRequestIdResponse = + GetFalAiKlingV15KolorsVirtualTryOnRequestsByRequestIdResponses[keyof GetFalAiKlingV15KolorsVirtualTryOnRequestsByRequestIdResponses] + +export type GetFalAiFluxLoraCannyRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-lora-canny/requests/{request_id}/status' +} + +export type GetFalAiFluxLoraCannyRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxLoraCannyRequestsByRequestIdStatusResponse = + GetFalAiFluxLoraCannyRequestsByRequestIdStatusResponses[keyof GetFalAiFluxLoraCannyRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxLoraCannyRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-lora-canny/requests/{request_id}/cancel' +} + +export type PutFalAiFluxLoraCannyRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxLoraCannyRequestsByRequestIdCancelResponse = + PutFalAiFluxLoraCannyRequestsByRequestIdCancelResponses[keyof PutFalAiFluxLoraCannyRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxLoraCannyData = { + body: SchemaFluxLoraCannyInput + path?: never + query?: never + url: '/fal-ai/flux-lora-canny' +} + +export type PostFalAiFluxLoraCannyResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxLoraCannyResponse = + PostFalAiFluxLoraCannyResponses[keyof PostFalAiFluxLoraCannyResponses] + +export type GetFalAiFluxLoraCannyRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-lora-canny/requests/{request_id}' +} + +export type GetFalAiFluxLoraCannyRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxLoraCannyOutput +} + +export type GetFalAiFluxLoraCannyRequestsByRequestIdResponse = + GetFalAiFluxLoraCannyRequestsByRequestIdResponses[keyof GetFalAiFluxLoraCannyRequestsByRequestIdResponses] + +export type GetFalAiFluxProV1FillFinetunedRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-pro/v1/fill-finetuned/requests/{request_id}/status' +} + +export type GetFalAiFluxProV1FillFinetunedRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxProV1FillFinetunedRequestsByRequestIdStatusResponse = + GetFalAiFluxProV1FillFinetunedRequestsByRequestIdStatusResponses[keyof GetFalAiFluxProV1FillFinetunedRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxProV1FillFinetunedRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/v1/fill-finetuned/requests/{request_id}/cancel' +} + +export type PutFalAiFluxProV1FillFinetunedRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxProV1FillFinetunedRequestsByRequestIdCancelResponse = + PutFalAiFluxProV1FillFinetunedRequestsByRequestIdCancelResponses[keyof PutFalAiFluxProV1FillFinetunedRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxProV1FillFinetunedData = { + body: SchemaFluxProV1FillFinetunedInput + path?: never + query?: never + url: '/fal-ai/flux-pro/v1/fill-finetuned' +} + +export type PostFalAiFluxProV1FillFinetunedResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxProV1FillFinetunedResponse = + PostFalAiFluxProV1FillFinetunedResponses[keyof PostFalAiFluxProV1FillFinetunedResponses] + +export type GetFalAiFluxProV1FillFinetunedRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/v1/fill-finetuned/requests/{request_id}' +} + +export type GetFalAiFluxProV1FillFinetunedRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxProV1FillFinetunedOutput +} + +export type GetFalAiFluxProV1FillFinetunedRequestsByRequestIdResponse = + GetFalAiFluxProV1FillFinetunedRequestsByRequestIdResponses[keyof GetFalAiFluxProV1FillFinetunedRequestsByRequestIdResponses] + +export type GetFalAiMoondreamNextDetectionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/moondream-next/detection/requests/{request_id}/status' +} + +export type GetFalAiMoondreamNextDetectionRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMoondreamNextDetectionRequestsByRequestIdStatusResponse = + GetFalAiMoondreamNextDetectionRequestsByRequestIdStatusResponses[keyof GetFalAiMoondreamNextDetectionRequestsByRequestIdStatusResponses] + +export type PutFalAiMoondreamNextDetectionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream-next/detection/requests/{request_id}/cancel' +} + +export type PutFalAiMoondreamNextDetectionRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMoondreamNextDetectionRequestsByRequestIdCancelResponse = + PutFalAiMoondreamNextDetectionRequestsByRequestIdCancelResponses[keyof PutFalAiMoondreamNextDetectionRequestsByRequestIdCancelResponses] + +export type PostFalAiMoondreamNextDetectionData = { + body: SchemaMoondreamNextDetectionInput + path?: never + query?: never + url: '/fal-ai/moondream-next/detection' +} + +export type PostFalAiMoondreamNextDetectionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMoondreamNextDetectionResponse = + PostFalAiMoondreamNextDetectionResponses[keyof PostFalAiMoondreamNextDetectionResponses] + +export type GetFalAiMoondreamNextDetectionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream-next/detection/requests/{request_id}' +} + +export type GetFalAiMoondreamNextDetectionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMoondreamNextDetectionOutput +} + +export type GetFalAiMoondreamNextDetectionRequestsByRequestIdResponse = + GetFalAiMoondreamNextDetectionRequestsByRequestIdResponses[keyof GetFalAiMoondreamNextDetectionRequestsByRequestIdResponses] + +export type GetFalAiBriaExpandRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bria/expand/requests/{request_id}/status' +} + +export type GetFalAiBriaExpandRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiBriaExpandRequestsByRequestIdStatusResponse = + GetFalAiBriaExpandRequestsByRequestIdStatusResponses[keyof GetFalAiBriaExpandRequestsByRequestIdStatusResponses] + +export type PutFalAiBriaExpandRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bria/expand/requests/{request_id}/cancel' +} + +export type PutFalAiBriaExpandRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiBriaExpandRequestsByRequestIdCancelResponse = + PutFalAiBriaExpandRequestsByRequestIdCancelResponses[keyof PutFalAiBriaExpandRequestsByRequestIdCancelResponses] + +export type PostFalAiBriaExpandData = { + body: SchemaBriaExpandInput + path?: never + query?: never + url: '/fal-ai/bria/expand' +} + +export type PostFalAiBriaExpandResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBriaExpandResponse = + PostFalAiBriaExpandResponses[keyof PostFalAiBriaExpandResponses] + +export type GetFalAiBriaExpandRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bria/expand/requests/{request_id}' +} + +export type GetFalAiBriaExpandRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBriaExpandOutput +} + +export type GetFalAiBriaExpandRequestsByRequestIdResponse = + GetFalAiBriaExpandRequestsByRequestIdResponses[keyof GetFalAiBriaExpandRequestsByRequestIdResponses] + +export type GetFalAiBriaGenfillRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bria/genfill/requests/{request_id}/status' +} + +export type GetFalAiBriaGenfillRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiBriaGenfillRequestsByRequestIdStatusResponse = + GetFalAiBriaGenfillRequestsByRequestIdStatusResponses[keyof GetFalAiBriaGenfillRequestsByRequestIdStatusResponses] + +export type PutFalAiBriaGenfillRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bria/genfill/requests/{request_id}/cancel' +} + +export type PutFalAiBriaGenfillRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiBriaGenfillRequestsByRequestIdCancelResponse = + PutFalAiBriaGenfillRequestsByRequestIdCancelResponses[keyof PutFalAiBriaGenfillRequestsByRequestIdCancelResponses] + +export type PostFalAiBriaGenfillData = { + body: SchemaBriaGenfillInput + path?: never + query?: never + url: '/fal-ai/bria/genfill' +} + +export type PostFalAiBriaGenfillResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBriaGenfillResponse = + PostFalAiBriaGenfillResponses[keyof PostFalAiBriaGenfillResponses] + +export type GetFalAiBriaGenfillRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bria/genfill/requests/{request_id}' +} + +export type GetFalAiBriaGenfillRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBriaGenfillOutput +} + +export type GetFalAiBriaGenfillRequestsByRequestIdResponse = + GetFalAiBriaGenfillRequestsByRequestIdResponses[keyof GetFalAiBriaGenfillRequestsByRequestIdResponses] + +export type GetFalAiFluxLoraFillRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-lora-fill/requests/{request_id}/status' +} + +export type GetFalAiFluxLoraFillRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxLoraFillRequestsByRequestIdStatusResponse = + GetFalAiFluxLoraFillRequestsByRequestIdStatusResponses[keyof GetFalAiFluxLoraFillRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxLoraFillRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-lora-fill/requests/{request_id}/cancel' +} + +export type PutFalAiFluxLoraFillRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxLoraFillRequestsByRequestIdCancelResponse = + PutFalAiFluxLoraFillRequestsByRequestIdCancelResponses[keyof PutFalAiFluxLoraFillRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxLoraFillData = { + body: SchemaFluxLoraFillInput + path?: never + query?: never + url: '/fal-ai/flux-lora-fill' +} + +export type PostFalAiFluxLoraFillResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxLoraFillResponse = + PostFalAiFluxLoraFillResponses[keyof PostFalAiFluxLoraFillResponses] + +export type GetFalAiFluxLoraFillRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-lora-fill/requests/{request_id}' +} + +export type GetFalAiFluxLoraFillRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxLoraFillOutput +} + +export type GetFalAiFluxLoraFillRequestsByRequestIdResponse = + GetFalAiFluxLoraFillRequestsByRequestIdResponses[keyof GetFalAiFluxLoraFillRequestsByRequestIdResponses] + +export type GetFalAiBriaBackgroundReplaceRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bria/background/replace/requests/{request_id}/status' +} + +export type GetFalAiBriaBackgroundReplaceRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiBriaBackgroundReplaceRequestsByRequestIdStatusResponse = + GetFalAiBriaBackgroundReplaceRequestsByRequestIdStatusResponses[keyof GetFalAiBriaBackgroundReplaceRequestsByRequestIdStatusResponses] + +export type PutFalAiBriaBackgroundReplaceRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bria/background/replace/requests/{request_id}/cancel' +} + +export type PutFalAiBriaBackgroundReplaceRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiBriaBackgroundReplaceRequestsByRequestIdCancelResponse = + PutFalAiBriaBackgroundReplaceRequestsByRequestIdCancelResponses[keyof PutFalAiBriaBackgroundReplaceRequestsByRequestIdCancelResponses] + +export type PostFalAiBriaBackgroundReplaceData = { + body: SchemaBriaBackgroundReplaceInput + path?: never + query?: never + url: '/fal-ai/bria/background/replace' +} + +export type PostFalAiBriaBackgroundReplaceResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBriaBackgroundReplaceResponse = + PostFalAiBriaBackgroundReplaceResponses[keyof PostFalAiBriaBackgroundReplaceResponses] + +export type GetFalAiBriaBackgroundReplaceRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bria/background/replace/requests/{request_id}' +} + +export type GetFalAiBriaBackgroundReplaceRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBriaBackgroundReplaceOutput +} + +export type GetFalAiBriaBackgroundReplaceRequestsByRequestIdResponse = + GetFalAiBriaBackgroundReplaceRequestsByRequestIdResponses[keyof GetFalAiBriaBackgroundReplaceRequestsByRequestIdResponses] + +export type GetFalAiBriaEraserRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bria/eraser/requests/{request_id}/status' +} + +export type GetFalAiBriaEraserRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiBriaEraserRequestsByRequestIdStatusResponse = + GetFalAiBriaEraserRequestsByRequestIdStatusResponses[keyof GetFalAiBriaEraserRequestsByRequestIdStatusResponses] + +export type PutFalAiBriaEraserRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bria/eraser/requests/{request_id}/cancel' +} + +export type PutFalAiBriaEraserRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiBriaEraserRequestsByRequestIdCancelResponse = + PutFalAiBriaEraserRequestsByRequestIdCancelResponses[keyof PutFalAiBriaEraserRequestsByRequestIdCancelResponses] + +export type PostFalAiBriaEraserData = { + body: SchemaBriaEraserInput + path?: never + query?: never + url: '/fal-ai/bria/eraser' +} + +export type PostFalAiBriaEraserResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBriaEraserResponse = + PostFalAiBriaEraserResponses[keyof PostFalAiBriaEraserResponses] + +export type GetFalAiBriaEraserRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bria/eraser/requests/{request_id}' +} + +export type GetFalAiBriaEraserRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBriaEraserOutput +} + +export type GetFalAiBriaEraserRequestsByRequestIdResponse = + GetFalAiBriaEraserRequestsByRequestIdResponses[keyof GetFalAiBriaEraserRequestsByRequestIdResponses] + +export type GetFalAiBriaProductShotRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bria/product-shot/requests/{request_id}/status' +} + +export type GetFalAiBriaProductShotRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiBriaProductShotRequestsByRequestIdStatusResponse = + GetFalAiBriaProductShotRequestsByRequestIdStatusResponses[keyof GetFalAiBriaProductShotRequestsByRequestIdStatusResponses] + +export type PutFalAiBriaProductShotRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bria/product-shot/requests/{request_id}/cancel' +} + +export type PutFalAiBriaProductShotRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiBriaProductShotRequestsByRequestIdCancelResponse = + PutFalAiBriaProductShotRequestsByRequestIdCancelResponses[keyof PutFalAiBriaProductShotRequestsByRequestIdCancelResponses] + +export type PostFalAiBriaProductShotData = { + body: SchemaBriaProductShotInput + path?: never + query?: never + url: '/fal-ai/bria/product-shot' +} + +export type PostFalAiBriaProductShotResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBriaProductShotResponse = + PostFalAiBriaProductShotResponses[keyof PostFalAiBriaProductShotResponses] + +export type GetFalAiBriaProductShotRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bria/product-shot/requests/{request_id}' +} + +export type GetFalAiBriaProductShotRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBriaProductShotOutput +} + +export type GetFalAiBriaProductShotRequestsByRequestIdResponse = + GetFalAiBriaProductShotRequestsByRequestIdResponses[keyof GetFalAiBriaProductShotRequestsByRequestIdResponses] + +export type GetFalAiBriaBackgroundRemoveRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bria/background/remove/requests/{request_id}/status' +} + +export type GetFalAiBriaBackgroundRemoveRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiBriaBackgroundRemoveRequestsByRequestIdStatusResponse = + GetFalAiBriaBackgroundRemoveRequestsByRequestIdStatusResponses[keyof GetFalAiBriaBackgroundRemoveRequestsByRequestIdStatusResponses] + +export type PutFalAiBriaBackgroundRemoveRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bria/background/remove/requests/{request_id}/cancel' +} + +export type PutFalAiBriaBackgroundRemoveRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiBriaBackgroundRemoveRequestsByRequestIdCancelResponse = + PutFalAiBriaBackgroundRemoveRequestsByRequestIdCancelResponses[keyof PutFalAiBriaBackgroundRemoveRequestsByRequestIdCancelResponses] + +export type PostFalAiBriaBackgroundRemoveData = { + body: SchemaBriaBackgroundRemoveInput + path?: never + query?: never + url: '/fal-ai/bria/background/remove' +} + +export type PostFalAiBriaBackgroundRemoveResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBriaBackgroundRemoveResponse = + PostFalAiBriaBackgroundRemoveResponses[keyof PostFalAiBriaBackgroundRemoveResponses] + +export type GetFalAiBriaBackgroundRemoveRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bria/background/remove/requests/{request_id}' +} + +export type GetFalAiBriaBackgroundRemoveRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBriaBackgroundRemoveOutput +} + +export type GetFalAiBriaBackgroundRemoveRequestsByRequestIdResponse = + GetFalAiBriaBackgroundRemoveRequestsByRequestIdResponses[keyof GetFalAiBriaBackgroundRemoveRequestsByRequestIdResponses] + +export type GetFalAiCatVtonRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/cat-vton/requests/{request_id}/status' +} + +export type GetFalAiCatVtonRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiCatVtonRequestsByRequestIdStatusResponse = + GetFalAiCatVtonRequestsByRequestIdStatusResponses[keyof GetFalAiCatVtonRequestsByRequestIdStatusResponses] + +export type PutFalAiCatVtonRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/cat-vton/requests/{request_id}/cancel' +} + +export type PutFalAiCatVtonRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiCatVtonRequestsByRequestIdCancelResponse = + PutFalAiCatVtonRequestsByRequestIdCancelResponses[keyof PutFalAiCatVtonRequestsByRequestIdCancelResponses] + +export type PostFalAiCatVtonData = { + body: SchemaCatVtonInput + path?: never + query?: never + url: '/fal-ai/cat-vton' +} + +export type PostFalAiCatVtonResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiCatVtonResponse = + PostFalAiCatVtonResponses[keyof PostFalAiCatVtonResponses] + +export type GetFalAiCatVtonRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/cat-vton/requests/{request_id}' +} + +export type GetFalAiCatVtonRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaCatVtonOutput +} + +export type GetFalAiCatVtonRequestsByRequestIdResponse = + GetFalAiCatVtonRequestsByRequestIdResponses[keyof GetFalAiCatVtonRequestsByRequestIdResponses] + +export type GetFalAiLeffaPoseTransferRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/leffa/pose-transfer/requests/{request_id}/status' +} + +export type GetFalAiLeffaPoseTransferRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLeffaPoseTransferRequestsByRequestIdStatusResponse = + GetFalAiLeffaPoseTransferRequestsByRequestIdStatusResponses[keyof GetFalAiLeffaPoseTransferRequestsByRequestIdStatusResponses] + +export type PutFalAiLeffaPoseTransferRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/leffa/pose-transfer/requests/{request_id}/cancel' +} + +export type PutFalAiLeffaPoseTransferRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLeffaPoseTransferRequestsByRequestIdCancelResponse = + PutFalAiLeffaPoseTransferRequestsByRequestIdCancelResponses[keyof PutFalAiLeffaPoseTransferRequestsByRequestIdCancelResponses] + +export type PostFalAiLeffaPoseTransferData = { + body: SchemaLeffaPoseTransferInput + path?: never + query?: never + url: '/fal-ai/leffa/pose-transfer' +} + +export type PostFalAiLeffaPoseTransferResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLeffaPoseTransferResponse = + PostFalAiLeffaPoseTransferResponses[keyof PostFalAiLeffaPoseTransferResponses] + +export type GetFalAiLeffaPoseTransferRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/leffa/pose-transfer/requests/{request_id}' +} + +export type GetFalAiLeffaPoseTransferRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLeffaPoseTransferOutput +} + +export type GetFalAiLeffaPoseTransferRequestsByRequestIdResponse = + GetFalAiLeffaPoseTransferRequestsByRequestIdResponses[keyof GetFalAiLeffaPoseTransferRequestsByRequestIdResponses] + +export type GetFalAiLeffaVirtualTryonRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/leffa/virtual-tryon/requests/{request_id}/status' +} + +export type GetFalAiLeffaVirtualTryonRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLeffaVirtualTryonRequestsByRequestIdStatusResponse = + GetFalAiLeffaVirtualTryonRequestsByRequestIdStatusResponses[keyof GetFalAiLeffaVirtualTryonRequestsByRequestIdStatusResponses] + +export type PutFalAiLeffaVirtualTryonRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/leffa/virtual-tryon/requests/{request_id}/cancel' +} + +export type PutFalAiLeffaVirtualTryonRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLeffaVirtualTryonRequestsByRequestIdCancelResponse = + PutFalAiLeffaVirtualTryonRequestsByRequestIdCancelResponses[keyof PutFalAiLeffaVirtualTryonRequestsByRequestIdCancelResponses] + +export type PostFalAiLeffaVirtualTryonData = { + body: SchemaLeffaVirtualTryonInput + path?: never + query?: never + url: '/fal-ai/leffa/virtual-tryon' +} + +export type PostFalAiLeffaVirtualTryonResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLeffaVirtualTryonResponse = + PostFalAiLeffaVirtualTryonResponses[keyof PostFalAiLeffaVirtualTryonResponses] + +export type GetFalAiLeffaVirtualTryonRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/leffa/virtual-tryon/requests/{request_id}' +} + +export type GetFalAiLeffaVirtualTryonRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLeffaVirtualTryonOutput +} + +export type GetFalAiLeffaVirtualTryonRequestsByRequestIdResponse = + GetFalAiLeffaVirtualTryonRequestsByRequestIdResponses[keyof GetFalAiLeffaVirtualTryonRequestsByRequestIdResponses] + +export type GetFalAiIdeogramV2EditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ideogram/v2/edit/requests/{request_id}/status' +} + +export type GetFalAiIdeogramV2EditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiIdeogramV2EditRequestsByRequestIdStatusResponse = + GetFalAiIdeogramV2EditRequestsByRequestIdStatusResponses[keyof GetFalAiIdeogramV2EditRequestsByRequestIdStatusResponses] + +export type PutFalAiIdeogramV2EditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v2/edit/requests/{request_id}/cancel' +} + +export type PutFalAiIdeogramV2EditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiIdeogramV2EditRequestsByRequestIdCancelResponse = + PutFalAiIdeogramV2EditRequestsByRequestIdCancelResponses[keyof PutFalAiIdeogramV2EditRequestsByRequestIdCancelResponses] + +export type PostFalAiIdeogramV2EditData = { + body: SchemaIdeogramV2EditInput + path?: never + query?: never + url: '/fal-ai/ideogram/v2/edit' +} + +export type PostFalAiIdeogramV2EditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiIdeogramV2EditResponse = + PostFalAiIdeogramV2EditResponses[keyof PostFalAiIdeogramV2EditResponses] + +export type GetFalAiIdeogramV2EditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v2/edit/requests/{request_id}' +} + +export type GetFalAiIdeogramV2EditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIdeogramV2EditOutput +} + +export type GetFalAiIdeogramV2EditRequestsByRequestIdResponse = + GetFalAiIdeogramV2EditRequestsByRequestIdResponses[keyof GetFalAiIdeogramV2EditRequestsByRequestIdResponses] + +export type GetFalAiIdeogramV2TurboEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ideogram/v2/turbo/edit/requests/{request_id}/status' +} + +export type GetFalAiIdeogramV2TurboEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiIdeogramV2TurboEditRequestsByRequestIdStatusResponse = + GetFalAiIdeogramV2TurboEditRequestsByRequestIdStatusResponses[keyof GetFalAiIdeogramV2TurboEditRequestsByRequestIdStatusResponses] + +export type PutFalAiIdeogramV2TurboEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v2/turbo/edit/requests/{request_id}/cancel' +} + +export type PutFalAiIdeogramV2TurboEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiIdeogramV2TurboEditRequestsByRequestIdCancelResponse = + PutFalAiIdeogramV2TurboEditRequestsByRequestIdCancelResponses[keyof PutFalAiIdeogramV2TurboEditRequestsByRequestIdCancelResponses] + +export type PostFalAiIdeogramV2TurboEditData = { + body: SchemaIdeogramV2TurboEditInput + path?: never + query?: never + url: '/fal-ai/ideogram/v2/turbo/edit' +} + +export type PostFalAiIdeogramV2TurboEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiIdeogramV2TurboEditResponse = + PostFalAiIdeogramV2TurboEditResponses[keyof PostFalAiIdeogramV2TurboEditResponses] + +export type GetFalAiIdeogramV2TurboEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v2/turbo/edit/requests/{request_id}' +} + +export type GetFalAiIdeogramV2TurboEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIdeogramV2TurboEditOutput +} + +export type GetFalAiIdeogramV2TurboEditRequestsByRequestIdResponse = + GetFalAiIdeogramV2TurboEditRequestsByRequestIdResponses[keyof GetFalAiIdeogramV2TurboEditRequestsByRequestIdResponses] + +export type GetFalAiIdeogramV2TurboRemixRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ideogram/v2/turbo/remix/requests/{request_id}/status' +} + +export type GetFalAiIdeogramV2TurboRemixRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiIdeogramV2TurboRemixRequestsByRequestIdStatusResponse = + GetFalAiIdeogramV2TurboRemixRequestsByRequestIdStatusResponses[keyof GetFalAiIdeogramV2TurboRemixRequestsByRequestIdStatusResponses] + +export type PutFalAiIdeogramV2TurboRemixRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v2/turbo/remix/requests/{request_id}/cancel' +} + +export type PutFalAiIdeogramV2TurboRemixRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiIdeogramV2TurboRemixRequestsByRequestIdCancelResponse = + PutFalAiIdeogramV2TurboRemixRequestsByRequestIdCancelResponses[keyof PutFalAiIdeogramV2TurboRemixRequestsByRequestIdCancelResponses] + +export type PostFalAiIdeogramV2TurboRemixData = { + body: SchemaIdeogramV2TurboRemixInput + path?: never + query?: never + url: '/fal-ai/ideogram/v2/turbo/remix' +} + +export type PostFalAiIdeogramV2TurboRemixResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiIdeogramV2TurboRemixResponse = + PostFalAiIdeogramV2TurboRemixResponses[keyof PostFalAiIdeogramV2TurboRemixResponses] + +export type GetFalAiIdeogramV2TurboRemixRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v2/turbo/remix/requests/{request_id}' +} + +export type GetFalAiIdeogramV2TurboRemixRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIdeogramV2TurboRemixOutput +} + +export type GetFalAiIdeogramV2TurboRemixRequestsByRequestIdResponse = + GetFalAiIdeogramV2TurboRemixRequestsByRequestIdResponses[keyof GetFalAiIdeogramV2TurboRemixRequestsByRequestIdResponses] + +export type GetFalAiIdeogramV2RemixRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ideogram/v2/remix/requests/{request_id}/status' +} + +export type GetFalAiIdeogramV2RemixRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiIdeogramV2RemixRequestsByRequestIdStatusResponse = + GetFalAiIdeogramV2RemixRequestsByRequestIdStatusResponses[keyof GetFalAiIdeogramV2RemixRequestsByRequestIdStatusResponses] + +export type PutFalAiIdeogramV2RemixRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v2/remix/requests/{request_id}/cancel' +} + +export type PutFalAiIdeogramV2RemixRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiIdeogramV2RemixRequestsByRequestIdCancelResponse = + PutFalAiIdeogramV2RemixRequestsByRequestIdCancelResponses[keyof PutFalAiIdeogramV2RemixRequestsByRequestIdCancelResponses] + +export type PostFalAiIdeogramV2RemixData = { + body: SchemaIdeogramV2RemixInput + path?: never + query?: never + url: '/fal-ai/ideogram/v2/remix' +} + +export type PostFalAiIdeogramV2RemixResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiIdeogramV2RemixResponse = + PostFalAiIdeogramV2RemixResponses[keyof PostFalAiIdeogramV2RemixResponses] + +export type GetFalAiIdeogramV2RemixRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v2/remix/requests/{request_id}' +} + +export type GetFalAiIdeogramV2RemixRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIdeogramV2RemixOutput +} + +export type GetFalAiIdeogramV2RemixRequestsByRequestIdResponse = + GetFalAiIdeogramV2RemixRequestsByRequestIdResponses[keyof GetFalAiIdeogramV2RemixRequestsByRequestIdResponses] + +export type GetFalAiFluxSchnellReduxRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux/schnell/redux/requests/{request_id}/status' +} + +export type GetFalAiFluxSchnellReduxRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxSchnellReduxRequestsByRequestIdStatusResponse = + GetFalAiFluxSchnellReduxRequestsByRequestIdStatusResponses[keyof GetFalAiFluxSchnellReduxRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxSchnellReduxRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux/schnell/redux/requests/{request_id}/cancel' +} + +export type PutFalAiFluxSchnellReduxRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxSchnellReduxRequestsByRequestIdCancelResponse = + PutFalAiFluxSchnellReduxRequestsByRequestIdCancelResponses[keyof PutFalAiFluxSchnellReduxRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxSchnellReduxData = { + body: SchemaFluxSchnellReduxInput + path?: never + query?: never + url: '/fal-ai/flux/schnell/redux' +} + +export type PostFalAiFluxSchnellReduxResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxSchnellReduxResponse = + PostFalAiFluxSchnellReduxResponses[keyof PostFalAiFluxSchnellReduxResponses] + +export type GetFalAiFluxSchnellReduxRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux/schnell/redux/requests/{request_id}' +} + +export type GetFalAiFluxSchnellReduxRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxSchnellReduxOutput +} + +export type GetFalAiFluxSchnellReduxRequestsByRequestIdResponse = + GetFalAiFluxSchnellReduxRequestsByRequestIdResponses[keyof GetFalAiFluxSchnellReduxRequestsByRequestIdResponses] + +export type GetFalAiFluxProV11ReduxRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-pro/v1.1/redux/requests/{request_id}/status' +} + +export type GetFalAiFluxProV11ReduxRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxProV11ReduxRequestsByRequestIdStatusResponse = + GetFalAiFluxProV11ReduxRequestsByRequestIdStatusResponses[keyof GetFalAiFluxProV11ReduxRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxProV11ReduxRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/v1.1/redux/requests/{request_id}/cancel' +} + +export type PutFalAiFluxProV11ReduxRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxProV11ReduxRequestsByRequestIdCancelResponse = + PutFalAiFluxProV11ReduxRequestsByRequestIdCancelResponses[keyof PutFalAiFluxProV11ReduxRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxProV11ReduxData = { + body: SchemaFluxProV11ReduxInput + path?: never + query?: never + url: '/fal-ai/flux-pro/v1.1/redux' +} + +export type PostFalAiFluxProV11ReduxResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxProV11ReduxResponse = + PostFalAiFluxProV11ReduxResponses[keyof PostFalAiFluxProV11ReduxResponses] + +export type GetFalAiFluxProV11ReduxRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/v1.1/redux/requests/{request_id}' +} + +export type GetFalAiFluxProV11ReduxRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxProV11ReduxOutput +} + +export type GetFalAiFluxProV11ReduxRequestsByRequestIdResponse = + GetFalAiFluxProV11ReduxRequestsByRequestIdResponses[keyof GetFalAiFluxProV11ReduxRequestsByRequestIdResponses] + +export type GetFalAiFluxDevReduxRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux/dev/redux/requests/{request_id}/status' +} + +export type GetFalAiFluxDevReduxRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxDevReduxRequestsByRequestIdStatusResponse = + GetFalAiFluxDevReduxRequestsByRequestIdStatusResponses[keyof GetFalAiFluxDevReduxRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxDevReduxRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux/dev/redux/requests/{request_id}/cancel' +} + +export type PutFalAiFluxDevReduxRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxDevReduxRequestsByRequestIdCancelResponse = + PutFalAiFluxDevReduxRequestsByRequestIdCancelResponses[keyof PutFalAiFluxDevReduxRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxDevReduxData = { + body: SchemaFluxDevReduxInput + path?: never + query?: never + url: '/fal-ai/flux/dev/redux' +} + +export type PostFalAiFluxDevReduxResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxDevReduxResponse = + PostFalAiFluxDevReduxResponses[keyof PostFalAiFluxDevReduxResponses] + +export type GetFalAiFluxDevReduxRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux/dev/redux/requests/{request_id}' +} + +export type GetFalAiFluxDevReduxRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxDevReduxOutput +} + +export type GetFalAiFluxDevReduxRequestsByRequestIdResponse = + GetFalAiFluxDevReduxRequestsByRequestIdResponses[keyof GetFalAiFluxDevReduxRequestsByRequestIdResponses] + +export type GetFalAiFluxProV11UltraReduxRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-pro/v1.1-ultra/redux/requests/{request_id}/status' +} + +export type GetFalAiFluxProV11UltraReduxRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxProV11UltraReduxRequestsByRequestIdStatusResponse = + GetFalAiFluxProV11UltraReduxRequestsByRequestIdStatusResponses[keyof GetFalAiFluxProV11UltraReduxRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxProV11UltraReduxRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/v1.1-ultra/redux/requests/{request_id}/cancel' +} + +export type PutFalAiFluxProV11UltraReduxRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxProV11UltraReduxRequestsByRequestIdCancelResponse = + PutFalAiFluxProV11UltraReduxRequestsByRequestIdCancelResponses[keyof PutFalAiFluxProV11UltraReduxRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxProV11UltraReduxData = { + body: SchemaFluxProV11UltraReduxInput + path?: never + query?: never + url: '/fal-ai/flux-pro/v1.1-ultra/redux' +} + +export type PostFalAiFluxProV11UltraReduxResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxProV11UltraReduxResponse = + PostFalAiFluxProV11UltraReduxResponses[keyof PostFalAiFluxProV11UltraReduxResponses] + +export type GetFalAiFluxProV11UltraReduxRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/v1.1-ultra/redux/requests/{request_id}' +} + +export type GetFalAiFluxProV11UltraReduxRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxProV11UltraReduxOutput +} + +export type GetFalAiFluxProV11UltraReduxRequestsByRequestIdResponse = + GetFalAiFluxProV11UltraReduxRequestsByRequestIdResponses[keyof GetFalAiFluxProV11UltraReduxRequestsByRequestIdResponses] + +export type GetFalAiFluxLoraDepthRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-lora-depth/requests/{request_id}/status' +} + +export type GetFalAiFluxLoraDepthRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxLoraDepthRequestsByRequestIdStatusResponse = + GetFalAiFluxLoraDepthRequestsByRequestIdStatusResponses[keyof GetFalAiFluxLoraDepthRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxLoraDepthRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-lora-depth/requests/{request_id}/cancel' +} + +export type PutFalAiFluxLoraDepthRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxLoraDepthRequestsByRequestIdCancelResponse = + PutFalAiFluxLoraDepthRequestsByRequestIdCancelResponses[keyof PutFalAiFluxLoraDepthRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxLoraDepthData = { + body: SchemaFluxLoraDepthInput + path?: never + query?: never + url: '/fal-ai/flux-lora-depth' +} + +export type PostFalAiFluxLoraDepthResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxLoraDepthResponse = + PostFalAiFluxLoraDepthResponses[keyof PostFalAiFluxLoraDepthResponses] + +export type GetFalAiFluxLoraDepthRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-lora-depth/requests/{request_id}' +} + +export type GetFalAiFluxLoraDepthRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxLoraDepthOutput +} + +export type GetFalAiFluxLoraDepthRequestsByRequestIdResponse = + GetFalAiFluxLoraDepthRequestsByRequestIdResponses[keyof GetFalAiFluxLoraDepthRequestsByRequestIdResponses] + +export type GetFalAiFluxProV1FillRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-pro/v1/fill/requests/{request_id}/status' +} + +export type GetFalAiFluxProV1FillRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxProV1FillRequestsByRequestIdStatusResponse = + GetFalAiFluxProV1FillRequestsByRequestIdStatusResponses[keyof GetFalAiFluxProV1FillRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxProV1FillRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/v1/fill/requests/{request_id}/cancel' +} + +export type PutFalAiFluxProV1FillRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxProV1FillRequestsByRequestIdCancelResponse = + PutFalAiFluxProV1FillRequestsByRequestIdCancelResponses[keyof PutFalAiFluxProV1FillRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxProV1FillData = { + body: SchemaFluxProV1FillInput + path?: never + query?: never + url: '/fal-ai/flux-pro/v1/fill' +} + +export type PostFalAiFluxProV1FillResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxProV1FillResponse = + PostFalAiFluxProV1FillResponses[keyof PostFalAiFluxProV1FillResponses] + +export type GetFalAiFluxProV1FillRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/v1/fill/requests/{request_id}' +} + +export type GetFalAiFluxProV1FillRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxProV1FillOutput +} + +export type GetFalAiFluxProV1FillRequestsByRequestIdResponse = + GetFalAiFluxProV1FillRequestsByRequestIdResponses[keyof GetFalAiFluxProV1FillRequestsByRequestIdResponses] + +export type GetFalAiKolorsImageToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kolors/image-to-image/requests/{request_id}/status' +} + +export type GetFalAiKolorsImageToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiKolorsImageToImageRequestsByRequestIdStatusResponse = + GetFalAiKolorsImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiKolorsImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiKolorsImageToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kolors/image-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiKolorsImageToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiKolorsImageToImageRequestsByRequestIdCancelResponse = + PutFalAiKolorsImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiKolorsImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiKolorsImageToImageData = { + body: SchemaKolorsImageToImageInput + path?: never + query?: never + url: '/fal-ai/kolors/image-to-image' +} + +export type PostFalAiKolorsImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKolorsImageToImageResponse = + PostFalAiKolorsImageToImageResponses[keyof PostFalAiKolorsImageToImageResponses] + +export type GetFalAiKolorsImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kolors/image-to-image/requests/{request_id}' +} + +export type GetFalAiKolorsImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKolorsImageToImageOutput +} + +export type GetFalAiKolorsImageToImageRequestsByRequestIdResponse = + GetFalAiKolorsImageToImageRequestsByRequestIdResponses[keyof GetFalAiKolorsImageToImageRequestsByRequestIdResponses] + +export type GetFalAiIclightV2RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/iclight-v2/requests/{request_id}/status' +} + +export type GetFalAiIclightV2RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiIclightV2RequestsByRequestIdStatusResponse = + GetFalAiIclightV2RequestsByRequestIdStatusResponses[keyof GetFalAiIclightV2RequestsByRequestIdStatusResponses] + +export type PutFalAiIclightV2RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/iclight-v2/requests/{request_id}/cancel' +} + +export type PutFalAiIclightV2RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiIclightV2RequestsByRequestIdCancelResponse = + PutFalAiIclightV2RequestsByRequestIdCancelResponses[keyof PutFalAiIclightV2RequestsByRequestIdCancelResponses] + +export type PostFalAiIclightV2Data = { + body: SchemaIclightV2Input + path?: never + query?: never + url: '/fal-ai/iclight-v2' +} + +export type PostFalAiIclightV2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiIclightV2Response = + PostFalAiIclightV2Responses[keyof PostFalAiIclightV2Responses] + +export type GetFalAiIclightV2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/iclight-v2/requests/{request_id}' +} + +export type GetFalAiIclightV2RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIclightV2Output +} + +export type GetFalAiIclightV2RequestsByRequestIdResponse = + GetFalAiIclightV2RequestsByRequestIdResponses[keyof GetFalAiIclightV2RequestsByRequestIdResponses] + +export type GetFalAiFluxDifferentialDiffusionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-differential-diffusion/requests/{request_id}/status' +} + +export type GetFalAiFluxDifferentialDiffusionRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFluxDifferentialDiffusionRequestsByRequestIdStatusResponse = + GetFalAiFluxDifferentialDiffusionRequestsByRequestIdStatusResponses[keyof GetFalAiFluxDifferentialDiffusionRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxDifferentialDiffusionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-differential-diffusion/requests/{request_id}/cancel' +} + +export type PutFalAiFluxDifferentialDiffusionRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFluxDifferentialDiffusionRequestsByRequestIdCancelResponse = + PutFalAiFluxDifferentialDiffusionRequestsByRequestIdCancelResponses[keyof PutFalAiFluxDifferentialDiffusionRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxDifferentialDiffusionData = { + body: SchemaFluxDifferentialDiffusionInput + path?: never + query?: never + url: '/fal-ai/flux-differential-diffusion' +} + +export type PostFalAiFluxDifferentialDiffusionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxDifferentialDiffusionResponse = + PostFalAiFluxDifferentialDiffusionResponses[keyof PostFalAiFluxDifferentialDiffusionResponses] + +export type GetFalAiFluxDifferentialDiffusionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-differential-diffusion/requests/{request_id}' +} + +export type GetFalAiFluxDifferentialDiffusionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxDifferentialDiffusionOutput +} + +export type GetFalAiFluxDifferentialDiffusionRequestsByRequestIdResponse = + GetFalAiFluxDifferentialDiffusionRequestsByRequestIdResponses[keyof GetFalAiFluxDifferentialDiffusionRequestsByRequestIdResponses] + +export type GetFalAiFluxPulidRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-pulid/requests/{request_id}/status' +} + +export type GetFalAiFluxPulidRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxPulidRequestsByRequestIdStatusResponse = + GetFalAiFluxPulidRequestsByRequestIdStatusResponses[keyof GetFalAiFluxPulidRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxPulidRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pulid/requests/{request_id}/cancel' +} + +export type PutFalAiFluxPulidRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxPulidRequestsByRequestIdCancelResponse = + PutFalAiFluxPulidRequestsByRequestIdCancelResponses[keyof PutFalAiFluxPulidRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxPulidData = { + body: SchemaFluxPulidInput + path?: never + query?: never + url: '/fal-ai/flux-pulid' +} + +export type PostFalAiFluxPulidResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxPulidResponse = + PostFalAiFluxPulidResponses[keyof PostFalAiFluxPulidResponses] + +export type GetFalAiFluxPulidRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pulid/requests/{request_id}' +} + +export type GetFalAiFluxPulidRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxPulidOutput +} + +export type GetFalAiFluxPulidRequestsByRequestIdResponse = + GetFalAiFluxPulidRequestsByRequestIdResponses[keyof GetFalAiFluxPulidRequestsByRequestIdResponses] + +export type GetFalAiBirefnetV2RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/birefnet/v2/requests/{request_id}/status' +} + +export type GetFalAiBirefnetV2RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiBirefnetV2RequestsByRequestIdStatusResponse = + GetFalAiBirefnetV2RequestsByRequestIdStatusResponses[keyof GetFalAiBirefnetV2RequestsByRequestIdStatusResponses] + +export type PutFalAiBirefnetV2RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/birefnet/v2/requests/{request_id}/cancel' +} + +export type PutFalAiBirefnetV2RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiBirefnetV2RequestsByRequestIdCancelResponse = + PutFalAiBirefnetV2RequestsByRequestIdCancelResponses[keyof PutFalAiBirefnetV2RequestsByRequestIdCancelResponses] + +export type PostFalAiBirefnetV2Data = { + body: SchemaBirefnetV2Input + path?: never + query?: never + url: '/fal-ai/birefnet/v2' +} + +export type PostFalAiBirefnetV2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBirefnetV2Response = + PostFalAiBirefnetV2Responses[keyof PostFalAiBirefnetV2Responses] + +export type GetFalAiBirefnetV2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/birefnet/v2/requests/{request_id}' +} + +export type GetFalAiBirefnetV2RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBirefnetV2Output +} + +export type GetFalAiBirefnetV2RequestsByRequestIdResponse = + GetFalAiBirefnetV2RequestsByRequestIdResponses[keyof GetFalAiBirefnetV2RequestsByRequestIdResponses] + +export type GetFalAiLivePortraitImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/live-portrait/image/requests/{request_id}/status' +} + +export type GetFalAiLivePortraitImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLivePortraitImageRequestsByRequestIdStatusResponse = + GetFalAiLivePortraitImageRequestsByRequestIdStatusResponses[keyof GetFalAiLivePortraitImageRequestsByRequestIdStatusResponses] + +export type PutFalAiLivePortraitImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/live-portrait/image/requests/{request_id}/cancel' +} + +export type PutFalAiLivePortraitImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLivePortraitImageRequestsByRequestIdCancelResponse = + PutFalAiLivePortraitImageRequestsByRequestIdCancelResponses[keyof PutFalAiLivePortraitImageRequestsByRequestIdCancelResponses] + +export type PostFalAiLivePortraitImageData = { + body: SchemaLivePortraitImageInput + path?: never + query?: never + url: '/fal-ai/live-portrait/image' +} + +export type PostFalAiLivePortraitImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLivePortraitImageResponse = + PostFalAiLivePortraitImageResponses[keyof PostFalAiLivePortraitImageResponses] + +export type GetFalAiLivePortraitImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/live-portrait/image/requests/{request_id}' +} + +export type GetFalAiLivePortraitImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLivePortraitImageOutput +} + +export type GetFalAiLivePortraitImageRequestsByRequestIdResponse = + GetFalAiLivePortraitImageRequestsByRequestIdResponses[keyof GetFalAiLivePortraitImageRequestsByRequestIdResponses] + +export type GetFalAiFluxGeneralRfInversionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-general/rf-inversion/requests/{request_id}/status' +} + +export type GetFalAiFluxGeneralRfInversionRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxGeneralRfInversionRequestsByRequestIdStatusResponse = + GetFalAiFluxGeneralRfInversionRequestsByRequestIdStatusResponses[keyof GetFalAiFluxGeneralRfInversionRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxGeneralRfInversionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-general/rf-inversion/requests/{request_id}/cancel' +} + +export type PutFalAiFluxGeneralRfInversionRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxGeneralRfInversionRequestsByRequestIdCancelResponse = + PutFalAiFluxGeneralRfInversionRequestsByRequestIdCancelResponses[keyof PutFalAiFluxGeneralRfInversionRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxGeneralRfInversionData = { + body: SchemaFluxGeneralRfInversionInput + path?: never + query?: never + url: '/fal-ai/flux-general/rf-inversion' +} + +export type PostFalAiFluxGeneralRfInversionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxGeneralRfInversionResponse = + PostFalAiFluxGeneralRfInversionResponses[keyof PostFalAiFluxGeneralRfInversionResponses] + +export type GetFalAiFluxGeneralRfInversionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-general/rf-inversion/requests/{request_id}' +} + +export type GetFalAiFluxGeneralRfInversionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxGeneralRfInversionOutput +} + +export type GetFalAiFluxGeneralRfInversionRequestsByRequestIdResponse = + GetFalAiFluxGeneralRfInversionRequestsByRequestIdResponses[keyof GetFalAiFluxGeneralRfInversionRequestsByRequestIdResponses] + +export type GetFalAiImagePreprocessorsHedRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-preprocessors/hed/requests/{request_id}/status' +} + +export type GetFalAiImagePreprocessorsHedRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImagePreprocessorsHedRequestsByRequestIdStatusResponse = + GetFalAiImagePreprocessorsHedRequestsByRequestIdStatusResponses[keyof GetFalAiImagePreprocessorsHedRequestsByRequestIdStatusResponses] + +export type PutFalAiImagePreprocessorsHedRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-preprocessors/hed/requests/{request_id}/cancel' +} + +export type PutFalAiImagePreprocessorsHedRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImagePreprocessorsHedRequestsByRequestIdCancelResponse = + PutFalAiImagePreprocessorsHedRequestsByRequestIdCancelResponses[keyof PutFalAiImagePreprocessorsHedRequestsByRequestIdCancelResponses] + +export type PostFalAiImagePreprocessorsHedData = { + body: SchemaImagePreprocessorsHedInput + path?: never + query?: never + url: '/fal-ai/image-preprocessors/hed' +} + +export type PostFalAiImagePreprocessorsHedResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImagePreprocessorsHedResponse = + PostFalAiImagePreprocessorsHedResponses[keyof PostFalAiImagePreprocessorsHedResponses] + +export type GetFalAiImagePreprocessorsHedRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-preprocessors/hed/requests/{request_id}' +} + +export type GetFalAiImagePreprocessorsHedRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImagePreprocessorsHedOutput +} + +export type GetFalAiImagePreprocessorsHedRequestsByRequestIdResponse = + GetFalAiImagePreprocessorsHedRequestsByRequestIdResponses[keyof GetFalAiImagePreprocessorsHedRequestsByRequestIdResponses] + +export type GetFalAiImagePreprocessorsDepthAnythingV2RequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-preprocessors/depth-anything/v2/requests/{request_id}/status' + } + +export type GetFalAiImagePreprocessorsDepthAnythingV2RequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImagePreprocessorsDepthAnythingV2RequestsByRequestIdStatusResponse = + GetFalAiImagePreprocessorsDepthAnythingV2RequestsByRequestIdStatusResponses[keyof GetFalAiImagePreprocessorsDepthAnythingV2RequestsByRequestIdStatusResponses] + +export type PutFalAiImagePreprocessorsDepthAnythingV2RequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-preprocessors/depth-anything/v2/requests/{request_id}/cancel' + } + +export type PutFalAiImagePreprocessorsDepthAnythingV2RequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImagePreprocessorsDepthAnythingV2RequestsByRequestIdCancelResponse = + PutFalAiImagePreprocessorsDepthAnythingV2RequestsByRequestIdCancelResponses[keyof PutFalAiImagePreprocessorsDepthAnythingV2RequestsByRequestIdCancelResponses] + +export type PostFalAiImagePreprocessorsDepthAnythingV2Data = { + body: SchemaImagePreprocessorsDepthAnythingV2Input + path?: never + query?: never + url: '/fal-ai/image-preprocessors/depth-anything/v2' +} + +export type PostFalAiImagePreprocessorsDepthAnythingV2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImagePreprocessorsDepthAnythingV2Response = + PostFalAiImagePreprocessorsDepthAnythingV2Responses[keyof PostFalAiImagePreprocessorsDepthAnythingV2Responses] + +export type GetFalAiImagePreprocessorsDepthAnythingV2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-preprocessors/depth-anything/v2/requests/{request_id}' +} + +export type GetFalAiImagePreprocessorsDepthAnythingV2RequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaImagePreprocessorsDepthAnythingV2Output + } + +export type GetFalAiImagePreprocessorsDepthAnythingV2RequestsByRequestIdResponse = + GetFalAiImagePreprocessorsDepthAnythingV2RequestsByRequestIdResponses[keyof GetFalAiImagePreprocessorsDepthAnythingV2RequestsByRequestIdResponses] + +export type GetFalAiImagePreprocessorsScribbleRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-preprocessors/scribble/requests/{request_id}/status' +} + +export type GetFalAiImagePreprocessorsScribbleRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImagePreprocessorsScribbleRequestsByRequestIdStatusResponse = + GetFalAiImagePreprocessorsScribbleRequestsByRequestIdStatusResponses[keyof GetFalAiImagePreprocessorsScribbleRequestsByRequestIdStatusResponses] + +export type PutFalAiImagePreprocessorsScribbleRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-preprocessors/scribble/requests/{request_id}/cancel' +} + +export type PutFalAiImagePreprocessorsScribbleRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImagePreprocessorsScribbleRequestsByRequestIdCancelResponse = + PutFalAiImagePreprocessorsScribbleRequestsByRequestIdCancelResponses[keyof PutFalAiImagePreprocessorsScribbleRequestsByRequestIdCancelResponses] + +export type PostFalAiImagePreprocessorsScribbleData = { + body: SchemaImagePreprocessorsScribbleInput + path?: never + query?: never + url: '/fal-ai/image-preprocessors/scribble' +} + +export type PostFalAiImagePreprocessorsScribbleResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImagePreprocessorsScribbleResponse = + PostFalAiImagePreprocessorsScribbleResponses[keyof PostFalAiImagePreprocessorsScribbleResponses] + +export type GetFalAiImagePreprocessorsScribbleRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-preprocessors/scribble/requests/{request_id}' +} + +export type GetFalAiImagePreprocessorsScribbleRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImagePreprocessorsScribbleOutput +} + +export type GetFalAiImagePreprocessorsScribbleRequestsByRequestIdResponse = + GetFalAiImagePreprocessorsScribbleRequestsByRequestIdResponses[keyof GetFalAiImagePreprocessorsScribbleRequestsByRequestIdResponses] + +export type GetFalAiImagePreprocessorsMlsdRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-preprocessors/mlsd/requests/{request_id}/status' +} + +export type GetFalAiImagePreprocessorsMlsdRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImagePreprocessorsMlsdRequestsByRequestIdStatusResponse = + GetFalAiImagePreprocessorsMlsdRequestsByRequestIdStatusResponses[keyof GetFalAiImagePreprocessorsMlsdRequestsByRequestIdStatusResponses] + +export type PutFalAiImagePreprocessorsMlsdRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-preprocessors/mlsd/requests/{request_id}/cancel' +} + +export type PutFalAiImagePreprocessorsMlsdRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImagePreprocessorsMlsdRequestsByRequestIdCancelResponse = + PutFalAiImagePreprocessorsMlsdRequestsByRequestIdCancelResponses[keyof PutFalAiImagePreprocessorsMlsdRequestsByRequestIdCancelResponses] + +export type PostFalAiImagePreprocessorsMlsdData = { + body: SchemaImagePreprocessorsMlsdInput + path?: never + query?: never + url: '/fal-ai/image-preprocessors/mlsd' +} + +export type PostFalAiImagePreprocessorsMlsdResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImagePreprocessorsMlsdResponse = + PostFalAiImagePreprocessorsMlsdResponses[keyof PostFalAiImagePreprocessorsMlsdResponses] + +export type GetFalAiImagePreprocessorsMlsdRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-preprocessors/mlsd/requests/{request_id}' +} + +export type GetFalAiImagePreprocessorsMlsdRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImagePreprocessorsMlsdOutput +} + +export type GetFalAiImagePreprocessorsMlsdRequestsByRequestIdResponse = + GetFalAiImagePreprocessorsMlsdRequestsByRequestIdResponses[keyof GetFalAiImagePreprocessorsMlsdRequestsByRequestIdResponses] + +export type GetFalAiImagePreprocessorsSamRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-preprocessors/sam/requests/{request_id}/status' +} + +export type GetFalAiImagePreprocessorsSamRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImagePreprocessorsSamRequestsByRequestIdStatusResponse = + GetFalAiImagePreprocessorsSamRequestsByRequestIdStatusResponses[keyof GetFalAiImagePreprocessorsSamRequestsByRequestIdStatusResponses] + +export type PutFalAiImagePreprocessorsSamRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-preprocessors/sam/requests/{request_id}/cancel' +} + +export type PutFalAiImagePreprocessorsSamRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImagePreprocessorsSamRequestsByRequestIdCancelResponse = + PutFalAiImagePreprocessorsSamRequestsByRequestIdCancelResponses[keyof PutFalAiImagePreprocessorsSamRequestsByRequestIdCancelResponses] + +export type PostFalAiImagePreprocessorsSamData = { + body: SchemaImagePreprocessorsSamInput + path?: never + query?: never + url: '/fal-ai/image-preprocessors/sam' +} + +export type PostFalAiImagePreprocessorsSamResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImagePreprocessorsSamResponse = + PostFalAiImagePreprocessorsSamResponses[keyof PostFalAiImagePreprocessorsSamResponses] + +export type GetFalAiImagePreprocessorsSamRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-preprocessors/sam/requests/{request_id}' +} + +export type GetFalAiImagePreprocessorsSamRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImagePreprocessorsSamOutput +} + +export type GetFalAiImagePreprocessorsSamRequestsByRequestIdResponse = + GetFalAiImagePreprocessorsSamRequestsByRequestIdResponses[keyof GetFalAiImagePreprocessorsSamRequestsByRequestIdResponses] + +export type GetFalAiImagePreprocessorsMidasRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-preprocessors/midas/requests/{request_id}/status' +} + +export type GetFalAiImagePreprocessorsMidasRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImagePreprocessorsMidasRequestsByRequestIdStatusResponse = + GetFalAiImagePreprocessorsMidasRequestsByRequestIdStatusResponses[keyof GetFalAiImagePreprocessorsMidasRequestsByRequestIdStatusResponses] + +export type PutFalAiImagePreprocessorsMidasRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-preprocessors/midas/requests/{request_id}/cancel' +} + +export type PutFalAiImagePreprocessorsMidasRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImagePreprocessorsMidasRequestsByRequestIdCancelResponse = + PutFalAiImagePreprocessorsMidasRequestsByRequestIdCancelResponses[keyof PutFalAiImagePreprocessorsMidasRequestsByRequestIdCancelResponses] + +export type PostFalAiImagePreprocessorsMidasData = { + body: SchemaImagePreprocessorsMidasInput + path?: never + query?: never + url: '/fal-ai/image-preprocessors/midas' +} + +export type PostFalAiImagePreprocessorsMidasResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImagePreprocessorsMidasResponse = + PostFalAiImagePreprocessorsMidasResponses[keyof PostFalAiImagePreprocessorsMidasResponses] + +export type GetFalAiImagePreprocessorsMidasRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-preprocessors/midas/requests/{request_id}' +} + +export type GetFalAiImagePreprocessorsMidasRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImagePreprocessorsMidasOutput +} + +export type GetFalAiImagePreprocessorsMidasRequestsByRequestIdResponse = + GetFalAiImagePreprocessorsMidasRequestsByRequestIdResponses[keyof GetFalAiImagePreprocessorsMidasRequestsByRequestIdResponses] + +export type GetFalAiImagePreprocessorsTeedRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-preprocessors/teed/requests/{request_id}/status' +} + +export type GetFalAiImagePreprocessorsTeedRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImagePreprocessorsTeedRequestsByRequestIdStatusResponse = + GetFalAiImagePreprocessorsTeedRequestsByRequestIdStatusResponses[keyof GetFalAiImagePreprocessorsTeedRequestsByRequestIdStatusResponses] + +export type PutFalAiImagePreprocessorsTeedRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-preprocessors/teed/requests/{request_id}/cancel' +} + +export type PutFalAiImagePreprocessorsTeedRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImagePreprocessorsTeedRequestsByRequestIdCancelResponse = + PutFalAiImagePreprocessorsTeedRequestsByRequestIdCancelResponses[keyof PutFalAiImagePreprocessorsTeedRequestsByRequestIdCancelResponses] + +export type PostFalAiImagePreprocessorsTeedData = { + body: SchemaImagePreprocessorsTeedInput + path?: never + query?: never + url: '/fal-ai/image-preprocessors/teed' +} + +export type PostFalAiImagePreprocessorsTeedResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImagePreprocessorsTeedResponse = + PostFalAiImagePreprocessorsTeedResponses[keyof PostFalAiImagePreprocessorsTeedResponses] + +export type GetFalAiImagePreprocessorsTeedRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-preprocessors/teed/requests/{request_id}' +} + +export type GetFalAiImagePreprocessorsTeedRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImagePreprocessorsTeedOutput +} + +export type GetFalAiImagePreprocessorsTeedRequestsByRequestIdResponse = + GetFalAiImagePreprocessorsTeedRequestsByRequestIdResponses[keyof GetFalAiImagePreprocessorsTeedRequestsByRequestIdResponses] + +export type GetFalAiImagePreprocessorsLineartRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-preprocessors/lineart/requests/{request_id}/status' +} + +export type GetFalAiImagePreprocessorsLineartRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImagePreprocessorsLineartRequestsByRequestIdStatusResponse = + GetFalAiImagePreprocessorsLineartRequestsByRequestIdStatusResponses[keyof GetFalAiImagePreprocessorsLineartRequestsByRequestIdStatusResponses] + +export type PutFalAiImagePreprocessorsLineartRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-preprocessors/lineart/requests/{request_id}/cancel' +} + +export type PutFalAiImagePreprocessorsLineartRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImagePreprocessorsLineartRequestsByRequestIdCancelResponse = + PutFalAiImagePreprocessorsLineartRequestsByRequestIdCancelResponses[keyof PutFalAiImagePreprocessorsLineartRequestsByRequestIdCancelResponses] + +export type PostFalAiImagePreprocessorsLineartData = { + body: SchemaImagePreprocessorsLineartInput + path?: never + query?: never + url: '/fal-ai/image-preprocessors/lineart' +} + +export type PostFalAiImagePreprocessorsLineartResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImagePreprocessorsLineartResponse = + PostFalAiImagePreprocessorsLineartResponses[keyof PostFalAiImagePreprocessorsLineartResponses] + +export type GetFalAiImagePreprocessorsLineartRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-preprocessors/lineart/requests/{request_id}' +} + +export type GetFalAiImagePreprocessorsLineartRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImagePreprocessorsLineartOutput +} + +export type GetFalAiImagePreprocessorsLineartRequestsByRequestIdResponse = + GetFalAiImagePreprocessorsLineartRequestsByRequestIdResponses[keyof GetFalAiImagePreprocessorsLineartRequestsByRequestIdResponses] + +export type GetFalAiImagePreprocessorsZoeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-preprocessors/zoe/requests/{request_id}/status' +} + +export type GetFalAiImagePreprocessorsZoeRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImagePreprocessorsZoeRequestsByRequestIdStatusResponse = + GetFalAiImagePreprocessorsZoeRequestsByRequestIdStatusResponses[keyof GetFalAiImagePreprocessorsZoeRequestsByRequestIdStatusResponses] + +export type PutFalAiImagePreprocessorsZoeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-preprocessors/zoe/requests/{request_id}/cancel' +} + +export type PutFalAiImagePreprocessorsZoeRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImagePreprocessorsZoeRequestsByRequestIdCancelResponse = + PutFalAiImagePreprocessorsZoeRequestsByRequestIdCancelResponses[keyof PutFalAiImagePreprocessorsZoeRequestsByRequestIdCancelResponses] + +export type PostFalAiImagePreprocessorsZoeData = { + body: SchemaImagePreprocessorsZoeInput + path?: never + query?: never + url: '/fal-ai/image-preprocessors/zoe' +} + +export type PostFalAiImagePreprocessorsZoeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImagePreprocessorsZoeResponse = + PostFalAiImagePreprocessorsZoeResponses[keyof PostFalAiImagePreprocessorsZoeResponses] + +export type GetFalAiImagePreprocessorsZoeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-preprocessors/zoe/requests/{request_id}' +} + +export type GetFalAiImagePreprocessorsZoeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImagePreprocessorsZoeOutput +} + +export type GetFalAiImagePreprocessorsZoeRequestsByRequestIdResponse = + GetFalAiImagePreprocessorsZoeRequestsByRequestIdResponses[keyof GetFalAiImagePreprocessorsZoeRequestsByRequestIdResponses] + +export type GetFalAiImagePreprocessorsPidiRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/image-preprocessors/pidi/requests/{request_id}/status' +} + +export type GetFalAiImagePreprocessorsPidiRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImagePreprocessorsPidiRequestsByRequestIdStatusResponse = + GetFalAiImagePreprocessorsPidiRequestsByRequestIdStatusResponses[keyof GetFalAiImagePreprocessorsPidiRequestsByRequestIdStatusResponses] + +export type PutFalAiImagePreprocessorsPidiRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-preprocessors/pidi/requests/{request_id}/cancel' +} + +export type PutFalAiImagePreprocessorsPidiRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImagePreprocessorsPidiRequestsByRequestIdCancelResponse = + PutFalAiImagePreprocessorsPidiRequestsByRequestIdCancelResponses[keyof PutFalAiImagePreprocessorsPidiRequestsByRequestIdCancelResponses] + +export type PostFalAiImagePreprocessorsPidiData = { + body: SchemaImagePreprocessorsPidiInput + path?: never + query?: never + url: '/fal-ai/image-preprocessors/pidi' +} + +export type PostFalAiImagePreprocessorsPidiResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImagePreprocessorsPidiResponse = + PostFalAiImagePreprocessorsPidiResponses[keyof PostFalAiImagePreprocessorsPidiResponses] + +export type GetFalAiImagePreprocessorsPidiRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/image-preprocessors/pidi/requests/{request_id}' +} + +export type GetFalAiImagePreprocessorsPidiRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImagePreprocessorsPidiOutput +} + +export type GetFalAiImagePreprocessorsPidiRequestsByRequestIdResponse = + GetFalAiImagePreprocessorsPidiRequestsByRequestIdResponses[keyof GetFalAiImagePreprocessorsPidiRequestsByRequestIdResponses] + +export type GetFalAiSam2ImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sam2/image/requests/{request_id}/status' +} + +export type GetFalAiSam2ImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSam2ImageRequestsByRequestIdStatusResponse = + GetFalAiSam2ImageRequestsByRequestIdStatusResponses[keyof GetFalAiSam2ImageRequestsByRequestIdStatusResponses] + +export type PutFalAiSam2ImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam2/image/requests/{request_id}/cancel' +} + +export type PutFalAiSam2ImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSam2ImageRequestsByRequestIdCancelResponse = + PutFalAiSam2ImageRequestsByRequestIdCancelResponses[keyof PutFalAiSam2ImageRequestsByRequestIdCancelResponses] + +export type PostFalAiSam2ImageData = { + body: SchemaSam2ImageInput + path?: never + query?: never + url: '/fal-ai/sam2/image' +} + +export type PostFalAiSam2ImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSam2ImageResponse = + PostFalAiSam2ImageResponses[keyof PostFalAiSam2ImageResponses] + +export type GetFalAiSam2ImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam2/image/requests/{request_id}' +} + +export type GetFalAiSam2ImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSam2ImageOutput +} + +export type GetFalAiSam2ImageRequestsByRequestIdResponse = + GetFalAiSam2ImageRequestsByRequestIdResponses[keyof GetFalAiSam2ImageRequestsByRequestIdResponses] + +export type GetFalAiFluxGeneralImageToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-general/image-to-image/requests/{request_id}/status' +} + +export type GetFalAiFluxGeneralImageToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFluxGeneralImageToImageRequestsByRequestIdStatusResponse = + GetFalAiFluxGeneralImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiFluxGeneralImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxGeneralImageToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-general/image-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiFluxGeneralImageToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFluxGeneralImageToImageRequestsByRequestIdCancelResponse = + PutFalAiFluxGeneralImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiFluxGeneralImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxGeneralImageToImageData = { + body: SchemaFluxGeneralImageToImageInput + path?: never + query?: never + url: '/fal-ai/flux-general/image-to-image' +} + +export type PostFalAiFluxGeneralImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxGeneralImageToImageResponse = + PostFalAiFluxGeneralImageToImageResponses[keyof PostFalAiFluxGeneralImageToImageResponses] + +export type GetFalAiFluxGeneralImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-general/image-to-image/requests/{request_id}' +} + +export type GetFalAiFluxGeneralImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxGeneralImageToImageOutput +} + +export type GetFalAiFluxGeneralImageToImageRequestsByRequestIdResponse = + GetFalAiFluxGeneralImageToImageRequestsByRequestIdResponses[keyof GetFalAiFluxGeneralImageToImageRequestsByRequestIdResponses] + +export type GetFalAiFluxGeneralInpaintingRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-general/inpainting/requests/{request_id}/status' +} + +export type GetFalAiFluxGeneralInpaintingRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxGeneralInpaintingRequestsByRequestIdStatusResponse = + GetFalAiFluxGeneralInpaintingRequestsByRequestIdStatusResponses[keyof GetFalAiFluxGeneralInpaintingRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxGeneralInpaintingRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-general/inpainting/requests/{request_id}/cancel' +} + +export type PutFalAiFluxGeneralInpaintingRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxGeneralInpaintingRequestsByRequestIdCancelResponse = + PutFalAiFluxGeneralInpaintingRequestsByRequestIdCancelResponses[keyof PutFalAiFluxGeneralInpaintingRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxGeneralInpaintingData = { + body: SchemaFluxGeneralInpaintingInput + path?: never + query?: never + url: '/fal-ai/flux-general/inpainting' +} + +export type PostFalAiFluxGeneralInpaintingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxGeneralInpaintingResponse = + PostFalAiFluxGeneralInpaintingResponses[keyof PostFalAiFluxGeneralInpaintingResponses] + +export type GetFalAiFluxGeneralInpaintingRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-general/inpainting/requests/{request_id}' +} + +export type GetFalAiFluxGeneralInpaintingRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxGeneralInpaintingOutput +} + +export type GetFalAiFluxGeneralInpaintingRequestsByRequestIdResponse = + GetFalAiFluxGeneralInpaintingRequestsByRequestIdResponses[keyof GetFalAiFluxGeneralInpaintingRequestsByRequestIdResponses] + +export type GetFalAiFluxGeneralDifferentialDiffusionRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-general/differential-diffusion/requests/{request_id}/status' + } + +export type GetFalAiFluxGeneralDifferentialDiffusionRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFluxGeneralDifferentialDiffusionRequestsByRequestIdStatusResponse = + GetFalAiFluxGeneralDifferentialDiffusionRequestsByRequestIdStatusResponses[keyof GetFalAiFluxGeneralDifferentialDiffusionRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxGeneralDifferentialDiffusionRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-general/differential-diffusion/requests/{request_id}/cancel' + } + +export type PutFalAiFluxGeneralDifferentialDiffusionRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFluxGeneralDifferentialDiffusionRequestsByRequestIdCancelResponse = + PutFalAiFluxGeneralDifferentialDiffusionRequestsByRequestIdCancelResponses[keyof PutFalAiFluxGeneralDifferentialDiffusionRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxGeneralDifferentialDiffusionData = { + body: SchemaFluxGeneralDifferentialDiffusionInput + path?: never + query?: never + url: '/fal-ai/flux-general/differential-diffusion' +} + +export type PostFalAiFluxGeneralDifferentialDiffusionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxGeneralDifferentialDiffusionResponse = + PostFalAiFluxGeneralDifferentialDiffusionResponses[keyof PostFalAiFluxGeneralDifferentialDiffusionResponses] + +export type GetFalAiFluxGeneralDifferentialDiffusionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-general/differential-diffusion/requests/{request_id}' +} + +export type GetFalAiFluxGeneralDifferentialDiffusionRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFluxGeneralDifferentialDiffusionOutput + } + +export type GetFalAiFluxGeneralDifferentialDiffusionRequestsByRequestIdResponse = + GetFalAiFluxGeneralDifferentialDiffusionRequestsByRequestIdResponses[keyof GetFalAiFluxGeneralDifferentialDiffusionRequestsByRequestIdResponses] + +export type GetFalAiFluxLoraImageToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-lora/image-to-image/requests/{request_id}/status' +} + +export type GetFalAiFluxLoraImageToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxLoraImageToImageRequestsByRequestIdStatusResponse = + GetFalAiFluxLoraImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiFluxLoraImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxLoraImageToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-lora/image-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiFluxLoraImageToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxLoraImageToImageRequestsByRequestIdCancelResponse = + PutFalAiFluxLoraImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiFluxLoraImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxLoraImageToImageData = { + body: SchemaFluxLoraImageToImageInput + path?: never + query?: never + url: '/fal-ai/flux-lora/image-to-image' +} + +export type PostFalAiFluxLoraImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxLoraImageToImageResponse = + PostFalAiFluxLoraImageToImageResponses[keyof PostFalAiFluxLoraImageToImageResponses] + +export type GetFalAiFluxLoraImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-lora/image-to-image/requests/{request_id}' +} + +export type GetFalAiFluxLoraImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxLoraImageToImageOutput +} + +export type GetFalAiFluxLoraImageToImageRequestsByRequestIdResponse = + GetFalAiFluxLoraImageToImageRequestsByRequestIdResponses[keyof GetFalAiFluxLoraImageToImageRequestsByRequestIdResponses] + +export type GetFalAiSdxlControlnetUnionInpaintingRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sdxl-controlnet-union/inpainting/requests/{request_id}/status' + } + +export type GetFalAiSdxlControlnetUnionInpaintingRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiSdxlControlnetUnionInpaintingRequestsByRequestIdStatusResponse = + GetFalAiSdxlControlnetUnionInpaintingRequestsByRequestIdStatusResponses[keyof GetFalAiSdxlControlnetUnionInpaintingRequestsByRequestIdStatusResponses] + +export type PutFalAiSdxlControlnetUnionInpaintingRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sdxl-controlnet-union/inpainting/requests/{request_id}/cancel' + } + +export type PutFalAiSdxlControlnetUnionInpaintingRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiSdxlControlnetUnionInpaintingRequestsByRequestIdCancelResponse = + PutFalAiSdxlControlnetUnionInpaintingRequestsByRequestIdCancelResponses[keyof PutFalAiSdxlControlnetUnionInpaintingRequestsByRequestIdCancelResponses] + +export type PostFalAiSdxlControlnetUnionInpaintingData = { + body: SchemaSdxlControlnetUnionInpaintingInput + path?: never + query?: never + url: '/fal-ai/sdxl-controlnet-union/inpainting' +} + +export type PostFalAiSdxlControlnetUnionInpaintingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSdxlControlnetUnionInpaintingResponse = + PostFalAiSdxlControlnetUnionInpaintingResponses[keyof PostFalAiSdxlControlnetUnionInpaintingResponses] + +export type GetFalAiSdxlControlnetUnionInpaintingRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sdxl-controlnet-union/inpainting/requests/{request_id}' +} + +export type GetFalAiSdxlControlnetUnionInpaintingRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaSdxlControlnetUnionInpaintingOutput + } + +export type GetFalAiSdxlControlnetUnionInpaintingRequestsByRequestIdResponse = + GetFalAiSdxlControlnetUnionInpaintingRequestsByRequestIdResponses[keyof GetFalAiSdxlControlnetUnionInpaintingRequestsByRequestIdResponses] + +export type GetFalAiSdxlControlnetUnionImageToImageRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sdxl-controlnet-union/image-to-image/requests/{request_id}/status' + } + +export type GetFalAiSdxlControlnetUnionImageToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiSdxlControlnetUnionImageToImageRequestsByRequestIdStatusResponse = + GetFalAiSdxlControlnetUnionImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiSdxlControlnetUnionImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiSdxlControlnetUnionImageToImageRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sdxl-controlnet-union/image-to-image/requests/{request_id}/cancel' + } + +export type PutFalAiSdxlControlnetUnionImageToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiSdxlControlnetUnionImageToImageRequestsByRequestIdCancelResponse = + PutFalAiSdxlControlnetUnionImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiSdxlControlnetUnionImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiSdxlControlnetUnionImageToImageData = { + body: SchemaSdxlControlnetUnionImageToImageInput + path?: never + query?: never + url: '/fal-ai/sdxl-controlnet-union/image-to-image' +} + +export type PostFalAiSdxlControlnetUnionImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSdxlControlnetUnionImageToImageResponse = + PostFalAiSdxlControlnetUnionImageToImageResponses[keyof PostFalAiSdxlControlnetUnionImageToImageResponses] + +export type GetFalAiSdxlControlnetUnionImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sdxl-controlnet-union/image-to-image/requests/{request_id}' +} + +export type GetFalAiSdxlControlnetUnionImageToImageRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaSdxlControlnetUnionImageToImageOutput + } + +export type GetFalAiSdxlControlnetUnionImageToImageRequestsByRequestIdResponse = + GetFalAiSdxlControlnetUnionImageToImageRequestsByRequestIdResponses[keyof GetFalAiSdxlControlnetUnionImageToImageRequestsByRequestIdResponses] + +export type GetFalAiEra3dRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/era-3d/requests/{request_id}/status' +} + +export type GetFalAiEra3dRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiEra3dRequestsByRequestIdStatusResponse = + GetFalAiEra3dRequestsByRequestIdStatusResponses[keyof GetFalAiEra3dRequestsByRequestIdStatusResponses] + +export type PutFalAiEra3dRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/era-3d/requests/{request_id}/cancel' +} + +export type PutFalAiEra3dRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiEra3dRequestsByRequestIdCancelResponse = + PutFalAiEra3dRequestsByRequestIdCancelResponses[keyof PutFalAiEra3dRequestsByRequestIdCancelResponses] + +export type PostFalAiEra3dData = { + body: SchemaEra3dInput + path?: never + query?: never + url: '/fal-ai/era-3d' +} + +export type PostFalAiEra3dResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiEra3dResponse = + PostFalAiEra3dResponses[keyof PostFalAiEra3dResponses] + +export type GetFalAiEra3dRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/era-3d/requests/{request_id}' +} + +export type GetFalAiEra3dRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaEra3dOutput +} + +export type GetFalAiEra3dRequestsByRequestIdResponse = + GetFalAiEra3dRequestsByRequestIdResponses[keyof GetFalAiEra3dRequestsByRequestIdResponses] + +export type GetFalAiFlorence2LargeDenseRegionCaptionRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/florence-2-large/dense-region-caption/requests/{request_id}/status' + } + +export type GetFalAiFlorence2LargeDenseRegionCaptionRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlorence2LargeDenseRegionCaptionRequestsByRequestIdStatusResponse = + GetFalAiFlorence2LargeDenseRegionCaptionRequestsByRequestIdStatusResponses[keyof GetFalAiFlorence2LargeDenseRegionCaptionRequestsByRequestIdStatusResponses] + +export type PutFalAiFlorence2LargeDenseRegionCaptionRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/dense-region-caption/requests/{request_id}/cancel' + } + +export type PutFalAiFlorence2LargeDenseRegionCaptionRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlorence2LargeDenseRegionCaptionRequestsByRequestIdCancelResponse = + PutFalAiFlorence2LargeDenseRegionCaptionRequestsByRequestIdCancelResponses[keyof PutFalAiFlorence2LargeDenseRegionCaptionRequestsByRequestIdCancelResponses] + +export type PostFalAiFlorence2LargeDenseRegionCaptionData = { + body: SchemaFlorence2LargeDenseRegionCaptionInput + path?: never + query?: never + url: '/fal-ai/florence-2-large/dense-region-caption' +} + +export type PostFalAiFlorence2LargeDenseRegionCaptionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlorence2LargeDenseRegionCaptionResponse = + PostFalAiFlorence2LargeDenseRegionCaptionResponses[keyof PostFalAiFlorence2LargeDenseRegionCaptionResponses] + +export type GetFalAiFlorence2LargeDenseRegionCaptionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/dense-region-caption/requests/{request_id}' +} + +export type GetFalAiFlorence2LargeDenseRegionCaptionRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFlorence2LargeDenseRegionCaptionOutput + } + +export type GetFalAiFlorence2LargeDenseRegionCaptionRequestsByRequestIdResponse = + GetFalAiFlorence2LargeDenseRegionCaptionRequestsByRequestIdResponses[keyof GetFalAiFlorence2LargeDenseRegionCaptionRequestsByRequestIdResponses] + +export type GetFalAiFlorence2LargeReferringExpressionSegmentationRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/florence-2-large/referring-expression-segmentation/requests/{request_id}/status' + } + +export type GetFalAiFlorence2LargeReferringExpressionSegmentationRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlorence2LargeReferringExpressionSegmentationRequestsByRequestIdStatusResponse = + GetFalAiFlorence2LargeReferringExpressionSegmentationRequestsByRequestIdStatusResponses[keyof GetFalAiFlorence2LargeReferringExpressionSegmentationRequestsByRequestIdStatusResponses] + +export type PutFalAiFlorence2LargeReferringExpressionSegmentationRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/referring-expression-segmentation/requests/{request_id}/cancel' + } + +export type PutFalAiFlorence2LargeReferringExpressionSegmentationRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlorence2LargeReferringExpressionSegmentationRequestsByRequestIdCancelResponse = + PutFalAiFlorence2LargeReferringExpressionSegmentationRequestsByRequestIdCancelResponses[keyof PutFalAiFlorence2LargeReferringExpressionSegmentationRequestsByRequestIdCancelResponses] + +export type PostFalAiFlorence2LargeReferringExpressionSegmentationData = { + body: SchemaFlorence2LargeReferringExpressionSegmentationInput + path?: never + query?: never + url: '/fal-ai/florence-2-large/referring-expression-segmentation' +} + +export type PostFalAiFlorence2LargeReferringExpressionSegmentationResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlorence2LargeReferringExpressionSegmentationResponse = + PostFalAiFlorence2LargeReferringExpressionSegmentationResponses[keyof PostFalAiFlorence2LargeReferringExpressionSegmentationResponses] + +export type GetFalAiFlorence2LargeReferringExpressionSegmentationRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/referring-expression-segmentation/requests/{request_id}' + } + +export type GetFalAiFlorence2LargeReferringExpressionSegmentationRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFlorence2LargeReferringExpressionSegmentationOutput + } + +export type GetFalAiFlorence2LargeReferringExpressionSegmentationRequestsByRequestIdResponse = + GetFalAiFlorence2LargeReferringExpressionSegmentationRequestsByRequestIdResponses[keyof GetFalAiFlorence2LargeReferringExpressionSegmentationRequestsByRequestIdResponses] + +export type GetFalAiFlorence2LargeObjectDetectionRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/florence-2-large/object-detection/requests/{request_id}/status' + } + +export type GetFalAiFlorence2LargeObjectDetectionRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlorence2LargeObjectDetectionRequestsByRequestIdStatusResponse = + GetFalAiFlorence2LargeObjectDetectionRequestsByRequestIdStatusResponses[keyof GetFalAiFlorence2LargeObjectDetectionRequestsByRequestIdStatusResponses] + +export type PutFalAiFlorence2LargeObjectDetectionRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/object-detection/requests/{request_id}/cancel' + } + +export type PutFalAiFlorence2LargeObjectDetectionRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlorence2LargeObjectDetectionRequestsByRequestIdCancelResponse = + PutFalAiFlorence2LargeObjectDetectionRequestsByRequestIdCancelResponses[keyof PutFalAiFlorence2LargeObjectDetectionRequestsByRequestIdCancelResponses] + +export type PostFalAiFlorence2LargeObjectDetectionData = { + body: SchemaFlorence2LargeObjectDetectionInput + path?: never + query?: never + url: '/fal-ai/florence-2-large/object-detection' +} + +export type PostFalAiFlorence2LargeObjectDetectionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlorence2LargeObjectDetectionResponse = + PostFalAiFlorence2LargeObjectDetectionResponses[keyof PostFalAiFlorence2LargeObjectDetectionResponses] + +export type GetFalAiFlorence2LargeObjectDetectionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/object-detection/requests/{request_id}' +} + +export type GetFalAiFlorence2LargeObjectDetectionRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFlorence2LargeObjectDetectionOutput + } + +export type GetFalAiFlorence2LargeObjectDetectionRequestsByRequestIdResponse = + GetFalAiFlorence2LargeObjectDetectionRequestsByRequestIdResponses[keyof GetFalAiFlorence2LargeObjectDetectionRequestsByRequestIdResponses] + +export type GetFalAiFlorence2LargeOpenVocabularyDetectionRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/florence-2-large/open-vocabulary-detection/requests/{request_id}/status' + } + +export type GetFalAiFlorence2LargeOpenVocabularyDetectionRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlorence2LargeOpenVocabularyDetectionRequestsByRequestIdStatusResponse = + GetFalAiFlorence2LargeOpenVocabularyDetectionRequestsByRequestIdStatusResponses[keyof GetFalAiFlorence2LargeOpenVocabularyDetectionRequestsByRequestIdStatusResponses] + +export type PutFalAiFlorence2LargeOpenVocabularyDetectionRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/open-vocabulary-detection/requests/{request_id}/cancel' + } + +export type PutFalAiFlorence2LargeOpenVocabularyDetectionRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlorence2LargeOpenVocabularyDetectionRequestsByRequestIdCancelResponse = + PutFalAiFlorence2LargeOpenVocabularyDetectionRequestsByRequestIdCancelResponses[keyof PutFalAiFlorence2LargeOpenVocabularyDetectionRequestsByRequestIdCancelResponses] + +export type PostFalAiFlorence2LargeOpenVocabularyDetectionData = { + body: SchemaFlorence2LargeOpenVocabularyDetectionInput + path?: never + query?: never + url: '/fal-ai/florence-2-large/open-vocabulary-detection' +} + +export type PostFalAiFlorence2LargeOpenVocabularyDetectionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlorence2LargeOpenVocabularyDetectionResponse = + PostFalAiFlorence2LargeOpenVocabularyDetectionResponses[keyof PostFalAiFlorence2LargeOpenVocabularyDetectionResponses] + +export type GetFalAiFlorence2LargeOpenVocabularyDetectionRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/open-vocabulary-detection/requests/{request_id}' + } + +export type GetFalAiFlorence2LargeOpenVocabularyDetectionRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFlorence2LargeOpenVocabularyDetectionOutput + } + +export type GetFalAiFlorence2LargeOpenVocabularyDetectionRequestsByRequestIdResponse = + GetFalAiFlorence2LargeOpenVocabularyDetectionRequestsByRequestIdResponses[keyof GetFalAiFlorence2LargeOpenVocabularyDetectionRequestsByRequestIdResponses] + +export type GetFalAiFlorence2LargeCaptionToPhraseGroundingRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/florence-2-large/caption-to-phrase-grounding/requests/{request_id}/status' + } + +export type GetFalAiFlorence2LargeCaptionToPhraseGroundingRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlorence2LargeCaptionToPhraseGroundingRequestsByRequestIdStatusResponse = + GetFalAiFlorence2LargeCaptionToPhraseGroundingRequestsByRequestIdStatusResponses[keyof GetFalAiFlorence2LargeCaptionToPhraseGroundingRequestsByRequestIdStatusResponses] + +export type PutFalAiFlorence2LargeCaptionToPhraseGroundingRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/caption-to-phrase-grounding/requests/{request_id}/cancel' + } + +export type PutFalAiFlorence2LargeCaptionToPhraseGroundingRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlorence2LargeCaptionToPhraseGroundingRequestsByRequestIdCancelResponse = + PutFalAiFlorence2LargeCaptionToPhraseGroundingRequestsByRequestIdCancelResponses[keyof PutFalAiFlorence2LargeCaptionToPhraseGroundingRequestsByRequestIdCancelResponses] + +export type PostFalAiFlorence2LargeCaptionToPhraseGroundingData = { + body: SchemaFlorence2LargeCaptionToPhraseGroundingInput + path?: never + query?: never + url: '/fal-ai/florence-2-large/caption-to-phrase-grounding' +} + +export type PostFalAiFlorence2LargeCaptionToPhraseGroundingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlorence2LargeCaptionToPhraseGroundingResponse = + PostFalAiFlorence2LargeCaptionToPhraseGroundingResponses[keyof PostFalAiFlorence2LargeCaptionToPhraseGroundingResponses] + +export type GetFalAiFlorence2LargeCaptionToPhraseGroundingRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/caption-to-phrase-grounding/requests/{request_id}' + } + +export type GetFalAiFlorence2LargeCaptionToPhraseGroundingRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFlorence2LargeCaptionToPhraseGroundingOutput + } + +export type GetFalAiFlorence2LargeCaptionToPhraseGroundingRequestsByRequestIdResponse = + GetFalAiFlorence2LargeCaptionToPhraseGroundingRequestsByRequestIdResponses[keyof GetFalAiFlorence2LargeCaptionToPhraseGroundingRequestsByRequestIdResponses] + +export type GetFalAiFlorence2LargeRegionProposalRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/florence-2-large/region-proposal/requests/{request_id}/status' + } + +export type GetFalAiFlorence2LargeRegionProposalRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlorence2LargeRegionProposalRequestsByRequestIdStatusResponse = + GetFalAiFlorence2LargeRegionProposalRequestsByRequestIdStatusResponses[keyof GetFalAiFlorence2LargeRegionProposalRequestsByRequestIdStatusResponses] + +export type PutFalAiFlorence2LargeRegionProposalRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/region-proposal/requests/{request_id}/cancel' + } + +export type PutFalAiFlorence2LargeRegionProposalRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlorence2LargeRegionProposalRequestsByRequestIdCancelResponse = + PutFalAiFlorence2LargeRegionProposalRequestsByRequestIdCancelResponses[keyof PutFalAiFlorence2LargeRegionProposalRequestsByRequestIdCancelResponses] + +export type PostFalAiFlorence2LargeRegionProposalData = { + body: SchemaFlorence2LargeRegionProposalInput + path?: never + query?: never + url: '/fal-ai/florence-2-large/region-proposal' +} + +export type PostFalAiFlorence2LargeRegionProposalResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlorence2LargeRegionProposalResponse = + PostFalAiFlorence2LargeRegionProposalResponses[keyof PostFalAiFlorence2LargeRegionProposalResponses] + +export type GetFalAiFlorence2LargeRegionProposalRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/region-proposal/requests/{request_id}' +} + +export type GetFalAiFlorence2LargeRegionProposalRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlorence2LargeRegionProposalOutput +} + +export type GetFalAiFlorence2LargeRegionProposalRequestsByRequestIdResponse = + GetFalAiFlorence2LargeRegionProposalRequestsByRequestIdResponses[keyof GetFalAiFlorence2LargeRegionProposalRequestsByRequestIdResponses] + +export type GetFalAiFlorence2LargeOcrWithRegionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/florence-2-large/ocr-with-region/requests/{request_id}/status' +} + +export type GetFalAiFlorence2LargeOcrWithRegionRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlorence2LargeOcrWithRegionRequestsByRequestIdStatusResponse = + GetFalAiFlorence2LargeOcrWithRegionRequestsByRequestIdStatusResponses[keyof GetFalAiFlorence2LargeOcrWithRegionRequestsByRequestIdStatusResponses] + +export type PutFalAiFlorence2LargeOcrWithRegionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/ocr-with-region/requests/{request_id}/cancel' +} + +export type PutFalAiFlorence2LargeOcrWithRegionRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlorence2LargeOcrWithRegionRequestsByRequestIdCancelResponse = + PutFalAiFlorence2LargeOcrWithRegionRequestsByRequestIdCancelResponses[keyof PutFalAiFlorence2LargeOcrWithRegionRequestsByRequestIdCancelResponses] + +export type PostFalAiFlorence2LargeOcrWithRegionData = { + body: SchemaFlorence2LargeOcrWithRegionInput + path?: never + query?: never + url: '/fal-ai/florence-2-large/ocr-with-region' +} + +export type PostFalAiFlorence2LargeOcrWithRegionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlorence2LargeOcrWithRegionResponse = + PostFalAiFlorence2LargeOcrWithRegionResponses[keyof PostFalAiFlorence2LargeOcrWithRegionResponses] + +export type GetFalAiFlorence2LargeOcrWithRegionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/ocr-with-region/requests/{request_id}' +} + +export type GetFalAiFlorence2LargeOcrWithRegionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlorence2LargeOcrWithRegionOutput +} + +export type GetFalAiFlorence2LargeOcrWithRegionRequestsByRequestIdResponse = + GetFalAiFlorence2LargeOcrWithRegionRequestsByRequestIdResponses[keyof GetFalAiFlorence2LargeOcrWithRegionRequestsByRequestIdResponses] + +export type GetFalAiFlorence2LargeRegionToSegmentationRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/florence-2-large/region-to-segmentation/requests/{request_id}/status' + } + +export type GetFalAiFlorence2LargeRegionToSegmentationRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlorence2LargeRegionToSegmentationRequestsByRequestIdStatusResponse = + GetFalAiFlorence2LargeRegionToSegmentationRequestsByRequestIdStatusResponses[keyof GetFalAiFlorence2LargeRegionToSegmentationRequestsByRequestIdStatusResponses] + +export type PutFalAiFlorence2LargeRegionToSegmentationRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/region-to-segmentation/requests/{request_id}/cancel' + } + +export type PutFalAiFlorence2LargeRegionToSegmentationRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlorence2LargeRegionToSegmentationRequestsByRequestIdCancelResponse = + PutFalAiFlorence2LargeRegionToSegmentationRequestsByRequestIdCancelResponses[keyof PutFalAiFlorence2LargeRegionToSegmentationRequestsByRequestIdCancelResponses] + +export type PostFalAiFlorence2LargeRegionToSegmentationData = { + body: SchemaFlorence2LargeRegionToSegmentationInput + path?: never + query?: never + url: '/fal-ai/florence-2-large/region-to-segmentation' +} + +export type PostFalAiFlorence2LargeRegionToSegmentationResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlorence2LargeRegionToSegmentationResponse = + PostFalAiFlorence2LargeRegionToSegmentationResponses[keyof PostFalAiFlorence2LargeRegionToSegmentationResponses] + +export type GetFalAiFlorence2LargeRegionToSegmentationRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/region-to-segmentation/requests/{request_id}' + } + +export type GetFalAiFlorence2LargeRegionToSegmentationRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFlorence2LargeRegionToSegmentationOutput + } + +export type GetFalAiFlorence2LargeRegionToSegmentationRequestsByRequestIdResponse = + GetFalAiFlorence2LargeRegionToSegmentationRequestsByRequestIdResponses[keyof GetFalAiFlorence2LargeRegionToSegmentationRequestsByRequestIdResponses] + +export type GetFalAiStableDiffusionV3MediumImageToImageRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/stable-diffusion-v3-medium/image-to-image/requests/{request_id}/status' + } + +export type GetFalAiStableDiffusionV3MediumImageToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiStableDiffusionV3MediumImageToImageRequestsByRequestIdStatusResponse = + GetFalAiStableDiffusionV3MediumImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiStableDiffusionV3MediumImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiStableDiffusionV3MediumImageToImageRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-diffusion-v3-medium/image-to-image/requests/{request_id}/cancel' + } + +export type PutFalAiStableDiffusionV3MediumImageToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiStableDiffusionV3MediumImageToImageRequestsByRequestIdCancelResponse = + PutFalAiStableDiffusionV3MediumImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiStableDiffusionV3MediumImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiStableDiffusionV3MediumImageToImageData = { + body: SchemaStableDiffusionV3MediumImageToImageInput + path?: never + query?: never + url: '/fal-ai/stable-diffusion-v3-medium/image-to-image' +} + +export type PostFalAiStableDiffusionV3MediumImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiStableDiffusionV3MediumImageToImageResponse = + PostFalAiStableDiffusionV3MediumImageToImageResponses[keyof PostFalAiStableDiffusionV3MediumImageToImageResponses] + +export type GetFalAiStableDiffusionV3MediumImageToImageRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-diffusion-v3-medium/image-to-image/requests/{request_id}' + } + +export type GetFalAiStableDiffusionV3MediumImageToImageRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaStableDiffusionV3MediumImageToImageOutput + } + +export type GetFalAiStableDiffusionV3MediumImageToImageRequestsByRequestIdResponse = + GetFalAiStableDiffusionV3MediumImageToImageRequestsByRequestIdResponses[keyof GetFalAiStableDiffusionV3MediumImageToImageRequestsByRequestIdResponses] + +export type GetFalAiDwposeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/dwpose/requests/{request_id}/status' +} + +export type GetFalAiDwposeRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiDwposeRequestsByRequestIdStatusResponse = + GetFalAiDwposeRequestsByRequestIdStatusResponses[keyof GetFalAiDwposeRequestsByRequestIdStatusResponses] + +export type PutFalAiDwposeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/dwpose/requests/{request_id}/cancel' +} + +export type PutFalAiDwposeRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiDwposeRequestsByRequestIdCancelResponse = + PutFalAiDwposeRequestsByRequestIdCancelResponses[keyof PutFalAiDwposeRequestsByRequestIdCancelResponses] + +export type PostFalAiDwposeData = { + body: SchemaDwposeInput + path?: never + query?: never + url: '/fal-ai/dwpose' +} + +export type PostFalAiDwposeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiDwposeResponse = + PostFalAiDwposeResponses[keyof PostFalAiDwposeResponses] + +export type GetFalAiDwposeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/dwpose/requests/{request_id}' +} + +export type GetFalAiDwposeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaDwposeOutput +} + +export type GetFalAiDwposeRequestsByRequestIdResponse = + GetFalAiDwposeRequestsByRequestIdResponses[keyof GetFalAiDwposeRequestsByRequestIdResponses] + +export type GetFalAiSd15DepthControlnetRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sd15-depth-controlnet/requests/{request_id}/status' +} + +export type GetFalAiSd15DepthControlnetRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSd15DepthControlnetRequestsByRequestIdStatusResponse = + GetFalAiSd15DepthControlnetRequestsByRequestIdStatusResponses[keyof GetFalAiSd15DepthControlnetRequestsByRequestIdStatusResponses] + +export type PutFalAiSd15DepthControlnetRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sd15-depth-controlnet/requests/{request_id}/cancel' +} + +export type PutFalAiSd15DepthControlnetRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSd15DepthControlnetRequestsByRequestIdCancelResponse = + PutFalAiSd15DepthControlnetRequestsByRequestIdCancelResponses[keyof PutFalAiSd15DepthControlnetRequestsByRequestIdCancelResponses] + +export type PostFalAiSd15DepthControlnetData = { + body: SchemaSd15DepthControlnetInput + path?: never + query?: never + url: '/fal-ai/sd15-depth-controlnet' +} + +export type PostFalAiSd15DepthControlnetResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSd15DepthControlnetResponse = + PostFalAiSd15DepthControlnetResponses[keyof PostFalAiSd15DepthControlnetResponses] + +export type GetFalAiSd15DepthControlnetRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sd15-depth-controlnet/requests/{request_id}' +} + +export type GetFalAiSd15DepthControlnetRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSd15DepthControlnetOutput +} + +export type GetFalAiSd15DepthControlnetRequestsByRequestIdResponse = + GetFalAiSd15DepthControlnetRequestsByRequestIdResponses[keyof GetFalAiSd15DepthControlnetRequestsByRequestIdResponses] + +export type GetFalAiCcsrRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ccsr/requests/{request_id}/status' +} + +export type GetFalAiCcsrRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiCcsrRequestsByRequestIdStatusResponse = + GetFalAiCcsrRequestsByRequestIdStatusResponses[keyof GetFalAiCcsrRequestsByRequestIdStatusResponses] + +export type PutFalAiCcsrRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ccsr/requests/{request_id}/cancel' +} + +export type PutFalAiCcsrRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiCcsrRequestsByRequestIdCancelResponse = + PutFalAiCcsrRequestsByRequestIdCancelResponses[keyof PutFalAiCcsrRequestsByRequestIdCancelResponses] + +export type PostFalAiCcsrData = { + body: SchemaCcsrInput + path?: never + query?: never + url: '/fal-ai/ccsr' +} + +export type PostFalAiCcsrResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiCcsrResponse = + PostFalAiCcsrResponses[keyof PostFalAiCcsrResponses] + +export type GetFalAiCcsrRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ccsr/requests/{request_id}' +} + +export type GetFalAiCcsrRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaCcsrOutput +} + +export type GetFalAiCcsrRequestsByRequestIdResponse = + GetFalAiCcsrRequestsByRequestIdResponses[keyof GetFalAiCcsrRequestsByRequestIdResponses] + +export type GetFalAiOmniZeroRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/omni-zero/requests/{request_id}/status' +} + +export type GetFalAiOmniZeroRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiOmniZeroRequestsByRequestIdStatusResponse = + GetFalAiOmniZeroRequestsByRequestIdStatusResponses[keyof GetFalAiOmniZeroRequestsByRequestIdStatusResponses] + +export type PutFalAiOmniZeroRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/omni-zero/requests/{request_id}/cancel' +} + +export type PutFalAiOmniZeroRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiOmniZeroRequestsByRequestIdCancelResponse = + PutFalAiOmniZeroRequestsByRequestIdCancelResponses[keyof PutFalAiOmniZeroRequestsByRequestIdCancelResponses] + +export type PostFalAiOmniZeroData = { + body: SchemaOmniZeroInput + path?: never + query?: never + url: '/fal-ai/omni-zero' +} + +export type PostFalAiOmniZeroResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiOmniZeroResponse = + PostFalAiOmniZeroResponses[keyof PostFalAiOmniZeroResponses] + +export type GetFalAiOmniZeroRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/omni-zero/requests/{request_id}' +} + +export type GetFalAiOmniZeroRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaOmniZeroOutput +} + +export type GetFalAiOmniZeroRequestsByRequestIdResponse = + GetFalAiOmniZeroRequestsByRequestIdResponses[keyof GetFalAiOmniZeroRequestsByRequestIdResponses] + +export type GetFalAiIpAdapterFaceIdRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ip-adapter-face-id/requests/{request_id}/status' +} + +export type GetFalAiIpAdapterFaceIdRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiIpAdapterFaceIdRequestsByRequestIdStatusResponse = + GetFalAiIpAdapterFaceIdRequestsByRequestIdStatusResponses[keyof GetFalAiIpAdapterFaceIdRequestsByRequestIdStatusResponses] + +export type PutFalAiIpAdapterFaceIdRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ip-adapter-face-id/requests/{request_id}/cancel' +} + +export type PutFalAiIpAdapterFaceIdRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiIpAdapterFaceIdRequestsByRequestIdCancelResponse = + PutFalAiIpAdapterFaceIdRequestsByRequestIdCancelResponses[keyof PutFalAiIpAdapterFaceIdRequestsByRequestIdCancelResponses] + +export type PostFalAiIpAdapterFaceIdData = { + body: SchemaIpAdapterFaceIdInput + path?: never + query?: never + url: '/fal-ai/ip-adapter-face-id' +} + +export type PostFalAiIpAdapterFaceIdResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiIpAdapterFaceIdResponse = + PostFalAiIpAdapterFaceIdResponses[keyof PostFalAiIpAdapterFaceIdResponses] + +export type GetFalAiIpAdapterFaceIdRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ip-adapter-face-id/requests/{request_id}' +} + +export type GetFalAiIpAdapterFaceIdRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIpAdapterFaceIdOutput +} + +export type GetFalAiIpAdapterFaceIdRequestsByRequestIdResponse = + GetFalAiIpAdapterFaceIdRequestsByRequestIdResponses[keyof GetFalAiIpAdapterFaceIdRequestsByRequestIdResponses] + +export type GetFalAiLoraInpaintRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/lora/inpaint/requests/{request_id}/status' +} + +export type GetFalAiLoraInpaintRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLoraInpaintRequestsByRequestIdStatusResponse = + GetFalAiLoraInpaintRequestsByRequestIdStatusResponses[keyof GetFalAiLoraInpaintRequestsByRequestIdStatusResponses] + +export type PutFalAiLoraInpaintRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/lora/inpaint/requests/{request_id}/cancel' +} + +export type PutFalAiLoraInpaintRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLoraInpaintRequestsByRequestIdCancelResponse = + PutFalAiLoraInpaintRequestsByRequestIdCancelResponses[keyof PutFalAiLoraInpaintRequestsByRequestIdCancelResponses] + +export type PostFalAiLoraInpaintData = { + body: SchemaLoraInpaintInput + path?: never + query?: never + url: '/fal-ai/lora/inpaint' +} + +export type PostFalAiLoraInpaintResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLoraInpaintResponse = + PostFalAiLoraInpaintResponses[keyof PostFalAiLoraInpaintResponses] + +export type GetFalAiLoraInpaintRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/lora/inpaint/requests/{request_id}' +} + +export type GetFalAiLoraInpaintRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLoraInpaintOutput +} + +export type GetFalAiLoraInpaintRequestsByRequestIdResponse = + GetFalAiLoraInpaintRequestsByRequestIdResponses[keyof GetFalAiLoraInpaintRequestsByRequestIdResponses] + +export type GetFalAiLoraImageToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/lora/image-to-image/requests/{request_id}/status' +} + +export type GetFalAiLoraImageToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLoraImageToImageRequestsByRequestIdStatusResponse = + GetFalAiLoraImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiLoraImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiLoraImageToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/lora/image-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiLoraImageToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLoraImageToImageRequestsByRequestIdCancelResponse = + PutFalAiLoraImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiLoraImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiLoraImageToImageData = { + body: SchemaLoraImageToImageInput + path?: never + query?: never + url: '/fal-ai/lora/image-to-image' +} + +export type PostFalAiLoraImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLoraImageToImageResponse = + PostFalAiLoraImageToImageResponses[keyof PostFalAiLoraImageToImageResponses] + +export type GetFalAiLoraImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/lora/image-to-image/requests/{request_id}' +} + +export type GetFalAiLoraImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLoraImageToImageOutput +} + +export type GetFalAiLoraImageToImageRequestsByRequestIdResponse = + GetFalAiLoraImageToImageRequestsByRequestIdResponses[keyof GetFalAiLoraImageToImageRequestsByRequestIdResponses] + +export type GetFalAiFastSdxlImageToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fast-sdxl/image-to-image/requests/{request_id}/status' +} + +export type GetFalAiFastSdxlImageToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFastSdxlImageToImageRequestsByRequestIdStatusResponse = + GetFalAiFastSdxlImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiFastSdxlImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiFastSdxlImageToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-sdxl/image-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiFastSdxlImageToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFastSdxlImageToImageRequestsByRequestIdCancelResponse = + PutFalAiFastSdxlImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiFastSdxlImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiFastSdxlImageToImageData = { + body: SchemaFastSdxlImageToImageInput + path?: never + query?: never + url: '/fal-ai/fast-sdxl/image-to-image' +} + +export type PostFalAiFastSdxlImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFastSdxlImageToImageResponse = + PostFalAiFastSdxlImageToImageResponses[keyof PostFalAiFastSdxlImageToImageResponses] + +export type GetFalAiFastSdxlImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-sdxl/image-to-image/requests/{request_id}' +} + +export type GetFalAiFastSdxlImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFastSdxlImageToImageOutput +} + +export type GetFalAiFastSdxlImageToImageRequestsByRequestIdResponse = + GetFalAiFastSdxlImageToImageRequestsByRequestIdResponses[keyof GetFalAiFastSdxlImageToImageRequestsByRequestIdResponses] + +export type GetFalAiFastSdxlInpaintingRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fast-sdxl/inpainting/requests/{request_id}/status' +} + +export type GetFalAiFastSdxlInpaintingRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFastSdxlInpaintingRequestsByRequestIdStatusResponse = + GetFalAiFastSdxlInpaintingRequestsByRequestIdStatusResponses[keyof GetFalAiFastSdxlInpaintingRequestsByRequestIdStatusResponses] + +export type PutFalAiFastSdxlInpaintingRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-sdxl/inpainting/requests/{request_id}/cancel' +} + +export type PutFalAiFastSdxlInpaintingRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFastSdxlInpaintingRequestsByRequestIdCancelResponse = + PutFalAiFastSdxlInpaintingRequestsByRequestIdCancelResponses[keyof PutFalAiFastSdxlInpaintingRequestsByRequestIdCancelResponses] + +export type PostFalAiFastSdxlInpaintingData = { + body: SchemaFastSdxlInpaintingInput + path?: never + query?: never + url: '/fal-ai/fast-sdxl/inpainting' +} + +export type PostFalAiFastSdxlInpaintingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFastSdxlInpaintingResponse = + PostFalAiFastSdxlInpaintingResponses[keyof PostFalAiFastSdxlInpaintingResponses] + +export type GetFalAiFastSdxlInpaintingRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-sdxl/inpainting/requests/{request_id}' +} + +export type GetFalAiFastSdxlInpaintingRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFastSdxlInpaintingOutput +} + +export type GetFalAiFastSdxlInpaintingRequestsByRequestIdResponse = + GetFalAiFastSdxlInpaintingRequestsByRequestIdResponses[keyof GetFalAiFastSdxlInpaintingRequestsByRequestIdResponses] + +export type GetFalAiFaceToStickerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/face-to-sticker/requests/{request_id}/status' +} + +export type GetFalAiFaceToStickerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFaceToStickerRequestsByRequestIdStatusResponse = + GetFalAiFaceToStickerRequestsByRequestIdStatusResponses[keyof GetFalAiFaceToStickerRequestsByRequestIdStatusResponses] + +export type PutFalAiFaceToStickerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/face-to-sticker/requests/{request_id}/cancel' +} + +export type PutFalAiFaceToStickerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFaceToStickerRequestsByRequestIdCancelResponse = + PutFalAiFaceToStickerRequestsByRequestIdCancelResponses[keyof PutFalAiFaceToStickerRequestsByRequestIdCancelResponses] + +export type PostFalAiFaceToStickerData = { + body: SchemaFaceToStickerInput + path?: never + query?: never + url: '/fal-ai/face-to-sticker' +} + +export type PostFalAiFaceToStickerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFaceToStickerResponse = + PostFalAiFaceToStickerResponses[keyof PostFalAiFaceToStickerResponses] + +export type GetFalAiFaceToStickerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/face-to-sticker/requests/{request_id}' +} + +export type GetFalAiFaceToStickerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFaceToStickerOutput +} + +export type GetFalAiFaceToStickerRequestsByRequestIdResponse = + GetFalAiFaceToStickerRequestsByRequestIdResponses[keyof GetFalAiFaceToStickerRequestsByRequestIdResponses] + +export type GetFalAiPhotomakerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/photomaker/requests/{request_id}/status' +} + +export type GetFalAiPhotomakerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPhotomakerRequestsByRequestIdStatusResponse = + GetFalAiPhotomakerRequestsByRequestIdStatusResponses[keyof GetFalAiPhotomakerRequestsByRequestIdStatusResponses] + +export type PutFalAiPhotomakerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/photomaker/requests/{request_id}/cancel' +} + +export type PutFalAiPhotomakerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPhotomakerRequestsByRequestIdCancelResponse = + PutFalAiPhotomakerRequestsByRequestIdCancelResponses[keyof PutFalAiPhotomakerRequestsByRequestIdCancelResponses] + +export type PostFalAiPhotomakerData = { + body: SchemaPhotomakerInput + path?: never + query?: never + url: '/fal-ai/photomaker' +} + +export type PostFalAiPhotomakerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPhotomakerResponse = + PostFalAiPhotomakerResponses[keyof PostFalAiPhotomakerResponses] + +export type GetFalAiPhotomakerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/photomaker/requests/{request_id}' +} + +export type GetFalAiPhotomakerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPhotomakerOutput +} + +export type GetFalAiPhotomakerRequestsByRequestIdResponse = + GetFalAiPhotomakerRequestsByRequestIdResponses[keyof GetFalAiPhotomakerRequestsByRequestIdResponses] + +export type GetFalAiCreativeUpscalerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/creative-upscaler/requests/{request_id}/status' +} + +export type GetFalAiCreativeUpscalerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiCreativeUpscalerRequestsByRequestIdStatusResponse = + GetFalAiCreativeUpscalerRequestsByRequestIdStatusResponses[keyof GetFalAiCreativeUpscalerRequestsByRequestIdStatusResponses] + +export type PutFalAiCreativeUpscalerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/creative-upscaler/requests/{request_id}/cancel' +} + +export type PutFalAiCreativeUpscalerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiCreativeUpscalerRequestsByRequestIdCancelResponse = + PutFalAiCreativeUpscalerRequestsByRequestIdCancelResponses[keyof PutFalAiCreativeUpscalerRequestsByRequestIdCancelResponses] + +export type PostFalAiCreativeUpscalerData = { + body: SchemaCreativeUpscalerInput + path?: never + query?: never + url: '/fal-ai/creative-upscaler' +} + +export type PostFalAiCreativeUpscalerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiCreativeUpscalerResponse = + PostFalAiCreativeUpscalerResponses[keyof PostFalAiCreativeUpscalerResponses] + +export type GetFalAiCreativeUpscalerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/creative-upscaler/requests/{request_id}' +} + +export type GetFalAiCreativeUpscalerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaCreativeUpscalerOutput +} + +export type GetFalAiCreativeUpscalerRequestsByRequestIdResponse = + GetFalAiCreativeUpscalerRequestsByRequestIdResponses[keyof GetFalAiCreativeUpscalerRequestsByRequestIdResponses] + +export type GetFalAiBirefnetRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/birefnet/requests/{request_id}/status' +} + +export type GetFalAiBirefnetRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiBirefnetRequestsByRequestIdStatusResponse = + GetFalAiBirefnetRequestsByRequestIdStatusResponses[keyof GetFalAiBirefnetRequestsByRequestIdStatusResponses] + +export type PutFalAiBirefnetRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/birefnet/requests/{request_id}/cancel' +} + +export type PutFalAiBirefnetRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiBirefnetRequestsByRequestIdCancelResponse = + PutFalAiBirefnetRequestsByRequestIdCancelResponses[keyof PutFalAiBirefnetRequestsByRequestIdCancelResponses] + +export type PostFalAiBirefnetData = { + body: SchemaBirefnetInput + path?: never + query?: never + url: '/fal-ai/birefnet' +} + +export type PostFalAiBirefnetResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBirefnetResponse = + PostFalAiBirefnetResponses[keyof PostFalAiBirefnetResponses] + +export type GetFalAiBirefnetRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/birefnet/requests/{request_id}' +} + +export type GetFalAiBirefnetRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBirefnetOutput +} + +export type GetFalAiBirefnetRequestsByRequestIdResponse = + GetFalAiBirefnetRequestsByRequestIdResponses[keyof GetFalAiBirefnetRequestsByRequestIdResponses] + +export type GetFalAiPlaygroundV25ImageToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/playground-v25/image-to-image/requests/{request_id}/status' +} + +export type GetFalAiPlaygroundV25ImageToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiPlaygroundV25ImageToImageRequestsByRequestIdStatusResponse = + GetFalAiPlaygroundV25ImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiPlaygroundV25ImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiPlaygroundV25ImageToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/playground-v25/image-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiPlaygroundV25ImageToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiPlaygroundV25ImageToImageRequestsByRequestIdCancelResponse = + PutFalAiPlaygroundV25ImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiPlaygroundV25ImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiPlaygroundV25ImageToImageData = { + body: SchemaPlaygroundV25ImageToImageInput + path?: never + query?: never + url: '/fal-ai/playground-v25/image-to-image' +} + +export type PostFalAiPlaygroundV25ImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPlaygroundV25ImageToImageResponse = + PostFalAiPlaygroundV25ImageToImageResponses[keyof PostFalAiPlaygroundV25ImageToImageResponses] + +export type GetFalAiPlaygroundV25ImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/playground-v25/image-to-image/requests/{request_id}' +} + +export type GetFalAiPlaygroundV25ImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPlaygroundV25ImageToImageOutput +} + +export type GetFalAiPlaygroundV25ImageToImageRequestsByRequestIdResponse = + GetFalAiPlaygroundV25ImageToImageRequestsByRequestIdResponses[keyof GetFalAiPlaygroundV25ImageToImageRequestsByRequestIdResponses] + +export type GetFalAiFastLightningSdxlImageToImageRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fast-lightning-sdxl/image-to-image/requests/{request_id}/status' + } + +export type GetFalAiFastLightningSdxlImageToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFastLightningSdxlImageToImageRequestsByRequestIdStatusResponse = + GetFalAiFastLightningSdxlImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiFastLightningSdxlImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiFastLightningSdxlImageToImageRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-lightning-sdxl/image-to-image/requests/{request_id}/cancel' + } + +export type PutFalAiFastLightningSdxlImageToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFastLightningSdxlImageToImageRequestsByRequestIdCancelResponse = + PutFalAiFastLightningSdxlImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiFastLightningSdxlImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiFastLightningSdxlImageToImageData = { + body: SchemaFastLightningSdxlImageToImageInput + path?: never + query?: never + url: '/fal-ai/fast-lightning-sdxl/image-to-image' +} + +export type PostFalAiFastLightningSdxlImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFastLightningSdxlImageToImageResponse = + PostFalAiFastLightningSdxlImageToImageResponses[keyof PostFalAiFastLightningSdxlImageToImageResponses] + +export type GetFalAiFastLightningSdxlImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-lightning-sdxl/image-to-image/requests/{request_id}' +} + +export type GetFalAiFastLightningSdxlImageToImageRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFastLightningSdxlImageToImageOutput + } + +export type GetFalAiFastLightningSdxlImageToImageRequestsByRequestIdResponse = + GetFalAiFastLightningSdxlImageToImageRequestsByRequestIdResponses[keyof GetFalAiFastLightningSdxlImageToImageRequestsByRequestIdResponses] + +export type GetFalAiFastLightningSdxlInpaintingRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fast-lightning-sdxl/inpainting/requests/{request_id}/status' +} + +export type GetFalAiFastLightningSdxlInpaintingRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFastLightningSdxlInpaintingRequestsByRequestIdStatusResponse = + GetFalAiFastLightningSdxlInpaintingRequestsByRequestIdStatusResponses[keyof GetFalAiFastLightningSdxlInpaintingRequestsByRequestIdStatusResponses] + +export type PutFalAiFastLightningSdxlInpaintingRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-lightning-sdxl/inpainting/requests/{request_id}/cancel' +} + +export type PutFalAiFastLightningSdxlInpaintingRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFastLightningSdxlInpaintingRequestsByRequestIdCancelResponse = + PutFalAiFastLightningSdxlInpaintingRequestsByRequestIdCancelResponses[keyof PutFalAiFastLightningSdxlInpaintingRequestsByRequestIdCancelResponses] + +export type PostFalAiFastLightningSdxlInpaintingData = { + body: SchemaFastLightningSdxlInpaintingInput + path?: never + query?: never + url: '/fal-ai/fast-lightning-sdxl/inpainting' +} + +export type PostFalAiFastLightningSdxlInpaintingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFastLightningSdxlInpaintingResponse = + PostFalAiFastLightningSdxlInpaintingResponses[keyof PostFalAiFastLightningSdxlInpaintingResponses] + +export type GetFalAiFastLightningSdxlInpaintingRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-lightning-sdxl/inpainting/requests/{request_id}' +} + +export type GetFalAiFastLightningSdxlInpaintingRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFastLightningSdxlInpaintingOutput +} + +export type GetFalAiFastLightningSdxlInpaintingRequestsByRequestIdResponse = + GetFalAiFastLightningSdxlInpaintingRequestsByRequestIdResponses[keyof GetFalAiFastLightningSdxlInpaintingRequestsByRequestIdResponses] + +export type GetFalAiPlaygroundV25InpaintingRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/playground-v25/inpainting/requests/{request_id}/status' +} + +export type GetFalAiPlaygroundV25InpaintingRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiPlaygroundV25InpaintingRequestsByRequestIdStatusResponse = + GetFalAiPlaygroundV25InpaintingRequestsByRequestIdStatusResponses[keyof GetFalAiPlaygroundV25InpaintingRequestsByRequestIdStatusResponses] + +export type PutFalAiPlaygroundV25InpaintingRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/playground-v25/inpainting/requests/{request_id}/cancel' +} + +export type PutFalAiPlaygroundV25InpaintingRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiPlaygroundV25InpaintingRequestsByRequestIdCancelResponse = + PutFalAiPlaygroundV25InpaintingRequestsByRequestIdCancelResponses[keyof PutFalAiPlaygroundV25InpaintingRequestsByRequestIdCancelResponses] + +export type PostFalAiPlaygroundV25InpaintingData = { + body: SchemaPlaygroundV25InpaintingInput + path?: never + query?: never + url: '/fal-ai/playground-v25/inpainting' +} + +export type PostFalAiPlaygroundV25InpaintingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPlaygroundV25InpaintingResponse = + PostFalAiPlaygroundV25InpaintingResponses[keyof PostFalAiPlaygroundV25InpaintingResponses] + +export type GetFalAiPlaygroundV25InpaintingRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/playground-v25/inpainting/requests/{request_id}' +} + +export type GetFalAiPlaygroundV25InpaintingRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPlaygroundV25InpaintingOutput +} + +export type GetFalAiPlaygroundV25InpaintingRequestsByRequestIdResponse = + GetFalAiPlaygroundV25InpaintingRequestsByRequestIdResponses[keyof GetFalAiPlaygroundV25InpaintingRequestsByRequestIdResponses] + +export type GetFalAiFastLcmDiffusionInpaintingRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fast-lcm-diffusion/inpainting/requests/{request_id}/status' +} + +export type GetFalAiFastLcmDiffusionInpaintingRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFastLcmDiffusionInpaintingRequestsByRequestIdStatusResponse = + GetFalAiFastLcmDiffusionInpaintingRequestsByRequestIdStatusResponses[keyof GetFalAiFastLcmDiffusionInpaintingRequestsByRequestIdStatusResponses] + +export type PutFalAiFastLcmDiffusionInpaintingRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-lcm-diffusion/inpainting/requests/{request_id}/cancel' +} + +export type PutFalAiFastLcmDiffusionInpaintingRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFastLcmDiffusionInpaintingRequestsByRequestIdCancelResponse = + PutFalAiFastLcmDiffusionInpaintingRequestsByRequestIdCancelResponses[keyof PutFalAiFastLcmDiffusionInpaintingRequestsByRequestIdCancelResponses] + +export type PostFalAiFastLcmDiffusionInpaintingData = { + body: SchemaFastLcmDiffusionInpaintingInput + path?: never + query?: never + url: '/fal-ai/fast-lcm-diffusion/inpainting' +} + +export type PostFalAiFastLcmDiffusionInpaintingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFastLcmDiffusionInpaintingResponse = + PostFalAiFastLcmDiffusionInpaintingResponses[keyof PostFalAiFastLcmDiffusionInpaintingResponses] + +export type GetFalAiFastLcmDiffusionInpaintingRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-lcm-diffusion/inpainting/requests/{request_id}' +} + +export type GetFalAiFastLcmDiffusionInpaintingRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFastLcmDiffusionInpaintingOutput +} + +export type GetFalAiFastLcmDiffusionInpaintingRequestsByRequestIdResponse = + GetFalAiFastLcmDiffusionInpaintingRequestsByRequestIdResponses[keyof GetFalAiFastLcmDiffusionInpaintingRequestsByRequestIdResponses] + +export type GetFalAiFastLcmDiffusionImageToImageRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fast-lcm-diffusion/image-to-image/requests/{request_id}/status' + } + +export type GetFalAiFastLcmDiffusionImageToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFastLcmDiffusionImageToImageRequestsByRequestIdStatusResponse = + GetFalAiFastLcmDiffusionImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiFastLcmDiffusionImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiFastLcmDiffusionImageToImageRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-lcm-diffusion/image-to-image/requests/{request_id}/cancel' + } + +export type PutFalAiFastLcmDiffusionImageToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFastLcmDiffusionImageToImageRequestsByRequestIdCancelResponse = + PutFalAiFastLcmDiffusionImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiFastLcmDiffusionImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiFastLcmDiffusionImageToImageData = { + body: SchemaFastLcmDiffusionImageToImageInput + path?: never + query?: never + url: '/fal-ai/fast-lcm-diffusion/image-to-image' +} + +export type PostFalAiFastLcmDiffusionImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFastLcmDiffusionImageToImageResponse = + PostFalAiFastLcmDiffusionImageToImageResponses[keyof PostFalAiFastLcmDiffusionImageToImageResponses] + +export type GetFalAiFastLcmDiffusionImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-lcm-diffusion/image-to-image/requests/{request_id}' +} + +export type GetFalAiFastLcmDiffusionImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFastLcmDiffusionImageToImageOutput +} + +export type GetFalAiFastLcmDiffusionImageToImageRequestsByRequestIdResponse = + GetFalAiFastLcmDiffusionImageToImageRequestsByRequestIdResponses[keyof GetFalAiFastLcmDiffusionImageToImageRequestsByRequestIdResponses] + +export type GetFalAiRetoucherRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/retoucher/requests/{request_id}/status' +} + +export type GetFalAiRetoucherRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiRetoucherRequestsByRequestIdStatusResponse = + GetFalAiRetoucherRequestsByRequestIdStatusResponses[keyof GetFalAiRetoucherRequestsByRequestIdStatusResponses] + +export type PutFalAiRetoucherRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/retoucher/requests/{request_id}/cancel' +} + +export type PutFalAiRetoucherRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiRetoucherRequestsByRequestIdCancelResponse = + PutFalAiRetoucherRequestsByRequestIdCancelResponses[keyof PutFalAiRetoucherRequestsByRequestIdCancelResponses] + +export type PostFalAiRetoucherData = { + body: SchemaRetoucherInput + path?: never + query?: never + url: '/fal-ai/retoucher' +} + +export type PostFalAiRetoucherResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiRetoucherResponse = + PostFalAiRetoucherResponses[keyof PostFalAiRetoucherResponses] + +export type GetFalAiRetoucherRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/retoucher/requests/{request_id}' +} + +export type GetFalAiRetoucherRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaRetoucherOutput +} + +export type GetFalAiRetoucherRequestsByRequestIdResponse = + GetFalAiRetoucherRequestsByRequestIdResponses[keyof GetFalAiRetoucherRequestsByRequestIdResponses] + +export type GetFalAiImageutilsDepthRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/imageutils/depth/requests/{request_id}/status' +} + +export type GetFalAiImageutilsDepthRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImageutilsDepthRequestsByRequestIdStatusResponse = + GetFalAiImageutilsDepthRequestsByRequestIdStatusResponses[keyof GetFalAiImageutilsDepthRequestsByRequestIdStatusResponses] + +export type PutFalAiImageutilsDepthRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/imageutils/depth/requests/{request_id}/cancel' +} + +export type PutFalAiImageutilsDepthRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImageutilsDepthRequestsByRequestIdCancelResponse = + PutFalAiImageutilsDepthRequestsByRequestIdCancelResponses[keyof PutFalAiImageutilsDepthRequestsByRequestIdCancelResponses] + +export type PostFalAiImageutilsDepthData = { + body: SchemaImageutilsDepthInput + path?: never + query?: never + url: '/fal-ai/imageutils/depth' +} + +export type PostFalAiImageutilsDepthResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageutilsDepthResponse = + PostFalAiImageutilsDepthResponses[keyof PostFalAiImageutilsDepthResponses] + +export type GetFalAiImageutilsDepthRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/imageutils/depth/requests/{request_id}' +} + +export type GetFalAiImageutilsDepthRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageutilsDepthOutput +} + +export type GetFalAiImageutilsDepthRequestsByRequestIdResponse = + GetFalAiImageutilsDepthRequestsByRequestIdResponses[keyof GetFalAiImageutilsDepthRequestsByRequestIdResponses] + +export type GetFalAiImageutilsMarigoldDepthRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/imageutils/marigold-depth/requests/{request_id}/status' +} + +export type GetFalAiImageutilsMarigoldDepthRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiImageutilsMarigoldDepthRequestsByRequestIdStatusResponse = + GetFalAiImageutilsMarigoldDepthRequestsByRequestIdStatusResponses[keyof GetFalAiImageutilsMarigoldDepthRequestsByRequestIdStatusResponses] + +export type PutFalAiImageutilsMarigoldDepthRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/imageutils/marigold-depth/requests/{request_id}/cancel' +} + +export type PutFalAiImageutilsMarigoldDepthRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiImageutilsMarigoldDepthRequestsByRequestIdCancelResponse = + PutFalAiImageutilsMarigoldDepthRequestsByRequestIdCancelResponses[keyof PutFalAiImageutilsMarigoldDepthRequestsByRequestIdCancelResponses] + +export type PostFalAiImageutilsMarigoldDepthData = { + body: SchemaImageutilsMarigoldDepthInput + path?: never + query?: never + url: '/fal-ai/imageutils/marigold-depth' +} + +export type PostFalAiImageutilsMarigoldDepthResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageutilsMarigoldDepthResponse = + PostFalAiImageutilsMarigoldDepthResponses[keyof PostFalAiImageutilsMarigoldDepthResponses] + +export type GetFalAiImageutilsMarigoldDepthRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/imageutils/marigold-depth/requests/{request_id}' +} + +export type GetFalAiImageutilsMarigoldDepthRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageutilsMarigoldDepthOutput +} + +export type GetFalAiImageutilsMarigoldDepthRequestsByRequestIdResponse = + GetFalAiImageutilsMarigoldDepthRequestsByRequestIdResponses[keyof GetFalAiImageutilsMarigoldDepthRequestsByRequestIdResponses] + +export type GetFalAiPulidRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pulid/requests/{request_id}/status' +} + +export type GetFalAiPulidRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPulidRequestsByRequestIdStatusResponse = + GetFalAiPulidRequestsByRequestIdStatusResponses[keyof GetFalAiPulidRequestsByRequestIdStatusResponses] + +export type PutFalAiPulidRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pulid/requests/{request_id}/cancel' +} + +export type PutFalAiPulidRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPulidRequestsByRequestIdCancelResponse = + PutFalAiPulidRequestsByRequestIdCancelResponses[keyof PutFalAiPulidRequestsByRequestIdCancelResponses] + +export type PostFalAiPulidData = { + body: SchemaPulidInput + path?: never + query?: never + url: '/fal-ai/pulid' +} + +export type PostFalAiPulidResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPulidResponse = + PostFalAiPulidResponses[keyof PostFalAiPulidResponses] + +export type GetFalAiPulidRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pulid/requests/{request_id}' +} + +export type GetFalAiPulidRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPulidOutput +} + +export type GetFalAiPulidRequestsByRequestIdResponse = + GetFalAiPulidRequestsByRequestIdResponses[keyof GetFalAiPulidRequestsByRequestIdResponses] + +export type GetFalAiFastSdxlControlnetCannyImageToImageRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fast-sdxl-controlnet-canny/image-to-image/requests/{request_id}/status' + } + +export type GetFalAiFastSdxlControlnetCannyImageToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFastSdxlControlnetCannyImageToImageRequestsByRequestIdStatusResponse = + GetFalAiFastSdxlControlnetCannyImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiFastSdxlControlnetCannyImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiFastSdxlControlnetCannyImageToImageRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-sdxl-controlnet-canny/image-to-image/requests/{request_id}/cancel' + } + +export type PutFalAiFastSdxlControlnetCannyImageToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFastSdxlControlnetCannyImageToImageRequestsByRequestIdCancelResponse = + PutFalAiFastSdxlControlnetCannyImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiFastSdxlControlnetCannyImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiFastSdxlControlnetCannyImageToImageData = { + body: SchemaFastSdxlControlnetCannyImageToImageInput + path?: never + query?: never + url: '/fal-ai/fast-sdxl-controlnet-canny/image-to-image' +} + +export type PostFalAiFastSdxlControlnetCannyImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFastSdxlControlnetCannyImageToImageResponse = + PostFalAiFastSdxlControlnetCannyImageToImageResponses[keyof PostFalAiFastSdxlControlnetCannyImageToImageResponses] + +export type GetFalAiFastSdxlControlnetCannyImageToImageRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-sdxl-controlnet-canny/image-to-image/requests/{request_id}' + } + +export type GetFalAiFastSdxlControlnetCannyImageToImageRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFastSdxlControlnetCannyImageToImageOutput + } + +export type GetFalAiFastSdxlControlnetCannyImageToImageRequestsByRequestIdResponse = + GetFalAiFastSdxlControlnetCannyImageToImageRequestsByRequestIdResponses[keyof GetFalAiFastSdxlControlnetCannyImageToImageRequestsByRequestIdResponses] + +export type GetFalAiFastSdxlControlnetCannyInpaintingRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fast-sdxl-controlnet-canny/inpainting/requests/{request_id}/status' + } + +export type GetFalAiFastSdxlControlnetCannyInpaintingRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFastSdxlControlnetCannyInpaintingRequestsByRequestIdStatusResponse = + GetFalAiFastSdxlControlnetCannyInpaintingRequestsByRequestIdStatusResponses[keyof GetFalAiFastSdxlControlnetCannyInpaintingRequestsByRequestIdStatusResponses] + +export type PutFalAiFastSdxlControlnetCannyInpaintingRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-sdxl-controlnet-canny/inpainting/requests/{request_id}/cancel' + } + +export type PutFalAiFastSdxlControlnetCannyInpaintingRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFastSdxlControlnetCannyInpaintingRequestsByRequestIdCancelResponse = + PutFalAiFastSdxlControlnetCannyInpaintingRequestsByRequestIdCancelResponses[keyof PutFalAiFastSdxlControlnetCannyInpaintingRequestsByRequestIdCancelResponses] + +export type PostFalAiFastSdxlControlnetCannyInpaintingData = { + body: SchemaFastSdxlControlnetCannyInpaintingInput + path?: never + query?: never + url: '/fal-ai/fast-sdxl-controlnet-canny/inpainting' +} + +export type PostFalAiFastSdxlControlnetCannyInpaintingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFastSdxlControlnetCannyInpaintingResponse = + PostFalAiFastSdxlControlnetCannyInpaintingResponses[keyof PostFalAiFastSdxlControlnetCannyInpaintingResponses] + +export type GetFalAiFastSdxlControlnetCannyInpaintingRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-sdxl-controlnet-canny/inpainting/requests/{request_id}' +} + +export type GetFalAiFastSdxlControlnetCannyInpaintingRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFastSdxlControlnetCannyInpaintingOutput + } + +export type GetFalAiFastSdxlControlnetCannyInpaintingRequestsByRequestIdResponse = + GetFalAiFastSdxlControlnetCannyInpaintingRequestsByRequestIdResponses[keyof GetFalAiFastSdxlControlnetCannyInpaintingRequestsByRequestIdResponses] + +export type GetFalAiLcmSd15I2iRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/lcm-sd15-i2i/requests/{request_id}/status' +} + +export type GetFalAiLcmSd15I2iRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLcmSd15I2iRequestsByRequestIdStatusResponse = + GetFalAiLcmSd15I2iRequestsByRequestIdStatusResponses[keyof GetFalAiLcmSd15I2iRequestsByRequestIdStatusResponses] + +export type PutFalAiLcmSd15I2iRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/lcm-sd15-i2i/requests/{request_id}/cancel' +} + +export type PutFalAiLcmSd15I2iRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLcmSd15I2iRequestsByRequestIdCancelResponse = + PutFalAiLcmSd15I2iRequestsByRequestIdCancelResponses[keyof PutFalAiLcmSd15I2iRequestsByRequestIdCancelResponses] + +export type PostFalAiLcmSd15I2iData = { + body: SchemaLcmSd15I2iInput + path?: never + query?: never + url: '/fal-ai/lcm-sd15-i2i' +} + +export type PostFalAiLcmSd15I2iResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLcmSd15I2iResponse = + PostFalAiLcmSd15I2iResponses[keyof PostFalAiLcmSd15I2iResponses] + +export type GetFalAiLcmSd15I2iRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/lcm-sd15-i2i/requests/{request_id}' +} + +export type GetFalAiLcmSd15I2iRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLcmSd15I2iOutput +} + +export type GetFalAiLcmSd15I2iRequestsByRequestIdResponse = + GetFalAiLcmSd15I2iRequestsByRequestIdResponses[keyof GetFalAiLcmSd15I2iRequestsByRequestIdResponses] + +export type GetFalAiInpaintRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/inpaint/requests/{request_id}/status' +} + +export type GetFalAiInpaintRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiInpaintRequestsByRequestIdStatusResponse = + GetFalAiInpaintRequestsByRequestIdStatusResponses[keyof GetFalAiInpaintRequestsByRequestIdStatusResponses] + +export type PutFalAiInpaintRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/inpaint/requests/{request_id}/cancel' +} + +export type PutFalAiInpaintRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiInpaintRequestsByRequestIdCancelResponse = + PutFalAiInpaintRequestsByRequestIdCancelResponses[keyof PutFalAiInpaintRequestsByRequestIdCancelResponses] + +export type PostFalAiInpaintData = { + body: SchemaInpaintInput + path?: never + query?: never + url: '/fal-ai/inpaint' +} + +export type PostFalAiInpaintResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiInpaintResponse = + PostFalAiInpaintResponses[keyof PostFalAiInpaintResponses] + +export type GetFalAiInpaintRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/inpaint/requests/{request_id}' +} + +export type GetFalAiInpaintRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaInpaintOutput +} + +export type GetFalAiInpaintRequestsByRequestIdResponse = + GetFalAiInpaintRequestsByRequestIdResponses[keyof GetFalAiInpaintRequestsByRequestIdResponses] + +export type GetFalAiEsrganRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/esrgan/requests/{request_id}/status' +} + +export type GetFalAiEsrganRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiEsrganRequestsByRequestIdStatusResponse = + GetFalAiEsrganRequestsByRequestIdStatusResponses[keyof GetFalAiEsrganRequestsByRequestIdStatusResponses] + +export type PutFalAiEsrganRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/esrgan/requests/{request_id}/cancel' +} + +export type PutFalAiEsrganRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiEsrganRequestsByRequestIdCancelResponse = + PutFalAiEsrganRequestsByRequestIdCancelResponses[keyof PutFalAiEsrganRequestsByRequestIdCancelResponses] + +export type PostFalAiEsrganData = { + body: SchemaEsrganInput + path?: never + query?: never + url: '/fal-ai/esrgan' +} + +export type PostFalAiEsrganResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiEsrganResponse = + PostFalAiEsrganResponses[keyof PostFalAiEsrganResponses] + +export type GetFalAiEsrganRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/esrgan/requests/{request_id}' +} + +export type GetFalAiEsrganRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaEsrganOutput +} + +export type GetFalAiEsrganRequestsByRequestIdResponse = + GetFalAiEsrganRequestsByRequestIdResponses[keyof GetFalAiEsrganRequestsByRequestIdResponses] + +export type GetFalAiImageutilsRembgRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/imageutils/rembg/requests/{request_id}/status' +} + +export type GetFalAiImageutilsRembgRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImageutilsRembgRequestsByRequestIdStatusResponse = + GetFalAiImageutilsRembgRequestsByRequestIdStatusResponses[keyof GetFalAiImageutilsRembgRequestsByRequestIdStatusResponses] + +export type PutFalAiImageutilsRembgRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/imageutils/rembg/requests/{request_id}/cancel' +} + +export type PutFalAiImageutilsRembgRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImageutilsRembgRequestsByRequestIdCancelResponse = + PutFalAiImageutilsRembgRequestsByRequestIdCancelResponses[keyof PutFalAiImageutilsRembgRequestsByRequestIdCancelResponses] + +export type PostFalAiImageutilsRembgData = { + body: SchemaImageutilsRembgInput + path?: never + query?: never + url: '/fal-ai/imageutils/rembg' +} + +export type PostFalAiImageutilsRembgResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageutilsRembgResponse = + PostFalAiImageutilsRembgResponses[keyof PostFalAiImageutilsRembgResponses] + +export type GetFalAiImageutilsRembgRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/imageutils/rembg/requests/{request_id}' +} + +export type GetFalAiImageutilsRembgRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageutilsRembgOutput +} + +export type GetFalAiImageutilsRembgRequestsByRequestIdResponse = + GetFalAiImageutilsRembgRequestsByRequestIdResponses[keyof GetFalAiImageutilsRembgRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/image-to-image/zod.gen.ts b/packages/typescript/ai-fal/src/generated/image-to-image/zod.gen.ts new file mode 100644 index 00000000..3a09430a --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/image-to-image/zod.gen.ts @@ -0,0 +1,57026 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * Image + * + * Represents an image file. + */ +export const zSchemaImage = z + .object({ + height: z.optional( + z.int().register(z.globalRegistry, { + description: 'The height of the image in pixels.', + }), + ), + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + width: z.optional( + z.int().register(z.globalRegistry, { + description: 'The width of the image in pixels.', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Represents an image file.', + }) + +/** + * RemoveBackgroundOutput + */ +export const zSchemaImageutilsRembgOutput = z.object({ + image: zSchemaImage, +}) + +/** + * RemoveBackgroundInput + */ +export const zSchemaImageutilsRembgInput = z.object({ + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + crop_to_bbox: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the resulting image be cropped to a bounding box around the subject\n ', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'Input image url.', + }), +}) + +/** + * UpscaleOutput + */ +export const zSchemaEsrganOutput = z.object({ + image: zSchemaImage, +}) + +/** + * UpscaleInput + */ +export const zSchemaEsrganInput = z.object({ + model: z.optional( + z + .enum([ + 'RealESRGAN_x4plus', + 'RealESRGAN_x2plus', + 'RealESRGAN_x4plus_anime_6B', + 'RealESRGAN_x4_v3', + 'RealESRGAN_x4_wdn_v3', + 'RealESRGAN_x4_anime_v3', + ]) + .register(z.globalRegistry, { + description: 'Model to use for upscaling', + }), + ), + face: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Upscaling a face', + }), + ) + .default(false), + scale: z + .optional( + z.number().gte(1).lte(8).register(z.globalRegistry, { + description: 'Rescaling factor', + }), + ) + .default(2), + tile: z + .optional( + z.int().register(z.globalRegistry, { + description: + 'Tile size. Default is 0, that is no tile. When encountering the out-of-GPU-memory issue, please specify it, e.g., 400 or 200', + }), + ) + .default(0), + output_format: z.optional( + z.enum(['png', 'jpeg']).register(z.globalRegistry, { + description: 'Output image format (png or jpeg)', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Url to input image', + }), +}) + +/** + * InpaintOutput + */ +export const zSchemaInpaintOutput = z.object({ + image: zSchemaImage, + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * InpaintInput + */ +export const zSchemaInpaintInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'Input image for img2img or inpaint mode', + }), + model_name: z.string().register(z.globalRegistry, { + description: + 'URL or HuggingFace ID of the base model to generate the image.', + }), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(7.5), + num_inference_steps: z + .optional( + z.int().gte(0).lte(150).register(z.globalRegistry, { + description: + '\n Increasing the amount of steps tells Stable Diffusion that it should take more steps\n to generate your final result which can increase the amount of detail in your image.\n ', + }), + ) + .default(30), + mask_url: z.string().register(z.globalRegistry, { + description: + 'Input mask for inpaint mode. Black areas will be preserved, white areas will be inpainted.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), +}) + +/** + * LCMOutput + */ +export const zSchemaLcmSd15I2iOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + request_id: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n An id bound to a request, can be used with response to identify the request\n itself.\n ', + }), + ) + .default(''), + timings: z.record(z.string(), z.number()), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + num_inference_steps: z + .optional( + z.int().register(z.globalRegistry, { + description: + '\n Number of inference steps used to generate the image. It will be the same value of the one passed in the\n input or the default one in case none was passed.\n ', + }), + ) + .default(4), + nsfw_content_detected: z.array(z.boolean()).register(z.globalRegistry, { + description: + '\n A list of booleans indicating whether the generated image contains any\n potentially unsafe content. If the safety check is disabled, this field\n will have a false for each generated image.\n ', + }), +}) + +/** + * LCMI2IInput + */ +export const zSchemaLcmSd15I2iInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: + '\n The number of images to generate. The function will return a list of images\n with the same prompt and negative prompt but different seeds.\n ', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: 'The image to use as a base.', + }), + strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The strength of the image.', + }), + ) + .default(0.8), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + enable_safety_checks: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the resulting image will be checked whether it includes any\n potentially unsafe content. If it does, it will be replaced with a black\n image.\n ', + }), + ) + .default(true), + guidance_scale: z + .optional( + z.number().gte(0).lte(16).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(1), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + request_id: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n An id bound to a request, can be used with response to identify the request\n itself.\n ', + }), + ) + .default(''), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + num_inference_steps: z + .optional( + z.int().gte(1).lte(12).register(z.globalRegistry, { + description: + '\n The number of inference steps to use for generating the image. The more steps\n the better the image will be but it will also take longer to generate.\n ', + }), + ) + .default(4), +}) + +/** + * Output + */ +export const zSchemaFastSdxlControlnetCannyInpaintingOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * LoraWeight + */ +export const zSchemaLoraWeight = z.object({ + path: z.string().register(z.globalRegistry, { + description: 'URL or the path to the LoRA weights. Or HF model name.', + }), + scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ', + }), + ) + .default(1), +}) + +/** + * ImageSize + */ +export const zSchemaImageSize = z.object({ + height: z + .optional( + z.int().lte(14142).register(z.globalRegistry, { + description: 'The height of the generated image.', + }), + ) + .default(512), + width: z + .optional( + z.int().lte(14142).register(z.globalRegistry, { + description: 'The width of the generated image.', + }), + ) + .default(512), +}) + +/** + * InpaintingControlNetInput + */ +export const zSchemaFastSdxlControlnetCannyInpaintingInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.null(), + ]), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: 'The list of LoRA weights to use.', + }), + ) + .default([]), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(7.5), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + controlnet_conditioning_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The scale of the controlnet conditioning.', + }), + ) + .default(0.5), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image to use as a starting point for the generation.', + }), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'determines how much the generated image resembles the initial image', + }), + ) + .default(0.95), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + control_image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the control image.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + mask_url: z.string().register(z.globalRegistry, { + description: 'The URL of the mask to use for inpainting.', + }), + num_inference_steps: z + .optional( + z.int().gte(1).lte(65).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(25), +}) + +/** + * Output + */ +export const zSchemaFastSdxlControlnetCannyImageToImageOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * ImageToImageControlNetInput + */ +export const zSchemaFastSdxlControlnetCannyImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.null(), + ]), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: 'The list of LoRA weights to use.', + }), + ) + .default([]), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(7.5), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + controlnet_conditioning_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The scale of the controlnet conditioning.', + }), + ) + .default(0.5), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image to use as a starting point for the generation.', + }), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'determines how much the generated image resembles the initial image', + }), + ) + .default(0.95), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + control_image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the control image.', + }), + num_inference_steps: z + .optional( + z.int().gte(1).lte(65).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(25), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), +}) + +/** + * ReferenceFace + */ +export const zSchemaReferenceFace = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the reference face image', + }), +}) + +/** + * OutputModel + */ +export const zSchemaPulidOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'List of generated images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'Random seed used for reproducibility', + }), +}) + +/** + * InputModel + */ +export const zSchemaPulidInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Prompt to generate the face from', + }), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + id_scale: z + .optional( + z.number().lte(5).register(z.globalRegistry, { + description: 'ID scale', + }), + ) + .default(0.8), + mode: z.optional( + z.enum(['fidelity', 'extreme style']).register(z.globalRegistry, { + description: 'Mode of generation', + }), + ), + id_mix: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'if you want to mix two ID image, please turn this on, otherwise, turn this off', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(1.5).register(z.globalRegistry, { + description: 'Guidance scale', + }), + ) + .default(1.2), + num_inference_steps: z + .optional( + z.int().gte(1).lte(12).register(z.globalRegistry, { + description: 'Number of steps to take', + }), + ) + .default(4), + reference_images: z.array(zSchemaReferenceFace).register(z.globalRegistry, { + description: 'List of reference faces, ideally 4 images.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to generate the face from', + }), + ) + .default( + 'flaws in the eyes, flaws in the face, flaws, lowres, non-HDRi, low quality, worst quality,artifacts noise, text, watermark, glitch, deformed, mutated, ugly, disfigured, hands, low resolution, partially rendered objects, deformed or partially rendered eyes, deformed, deformed eyeballs, cross-eyed,blurry', + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducibility', + }), + ), +}) + +/** + * MarigoldDepthMapOutput + */ +export const zSchemaImageutilsMarigoldDepthOutput = z.object({ + image: zSchemaImage, +}) + +/** + * MarigoldDepthMapInput + */ +export const zSchemaImageutilsMarigoldDepthInput = z.object({ + ensemble_size: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: + 'Number of predictions to average over. Defaults to `10`. The higher the number, the more accurate the result, but the slower the inference.', + }), + ) + .default(10), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: + 'Number of denoising steps. Defaults to `10`. The higher the number, the more accurate the result, but the slower the inference.', + }), + ) + .default(10), + processing_res: z + .optional( + z.int().gte(0).lte(2048).register(z.globalRegistry, { + description: + 'Maximum processing resolution. Defaults `0` which means it uses the size of the input image.', + }), + ) + .default(0), + image_url: z.string().register(z.globalRegistry, { + description: 'Input image url.', + }), +}) + +/** + * DepthMapOutput + */ +export const zSchemaImageutilsDepthOutput = z.object({ + image: zSchemaImage, +}) + +/** + * DepthMapInput + */ +export const zSchemaImageutilsDepthInput = z.object({ + bg_th: z + .optional( + z.number().register(z.globalRegistry, { + description: 'bg_th', + }), + ) + .default(0.1), + a: z + .optional( + z.number().register(z.globalRegistry, { + description: 'a', + }), + ) + .default(6.283185307179586), + depth_and_normal: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'depth_and_normal', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'Input image url.', + }), +}) + +/** + * RetoucherOutput + */ +export const zSchemaRetoucherOutput = z.object({ + image: zSchemaImage, + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the generation.', + }), +}) + +/** + * RetoucherInput + */ +export const zSchemaRetoucherInput = z.object({ + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Seed for reproducibility. Different seeds will make slightly different results.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to be retouched.', + }), +}) + +/** + * Output + */ +export const zSchemaFastLcmDiffusionImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * ImageToImageLCMInput + */ +export const zSchemaFastLcmDiffusionImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(false), + guidance_rescale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The rescale factor for the CFG.', + }), + ) + .default(0), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(1.5), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + preserve_aspect_ratio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the aspect ratio of the generated image will be preserved even\n if the image size is too large. However, if the image is not a multiple of 32\n in width or height, it will be resized to the nearest multiple of 32. By default,\n this snapping to the nearest multiple of 32 will not preserve the aspect ratio.\n Set crop_output to True, to crop the output to the proper aspect ratio\n after generating.\n ', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + crop_output: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the output cropped to the proper aspect ratio after generating.\n ', + }), + ) + .default(false), + format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image to use as a starting point for the generation.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(true), + model_name: z.optional( + z + .enum([ + 'stabilityai/stable-diffusion-xl-base-1.0', + 'runwayml/stable-diffusion-v1-5', + ]) + .register(z.globalRegistry, { + description: 'The name of the model to use.', + }), + ), + safety_checker_version: z.optional( + z.enum(['v1', 'v2']).register(z.globalRegistry, { + description: + 'The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.', + }), + ), + request_id: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n An id bound to a request, can be used with response to identify the request\n itself.\n ', + }), + ) + .default(''), + num_inference_steps: z + .optional( + z.int().gte(1).lte(32).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(6), + strength: z + .optional( + z.number().gte(0.05).lte(1).register(z.globalRegistry, { + description: + 'determines how much the generated image resembles the initial image', + }), + ) + .default(0.95), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), +}) + +/** + * Output + */ +export const zSchemaFastLcmDiffusionInpaintingOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * InpaintingLCMInput + */ +export const zSchemaFastLcmDiffusionInpaintingInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(false), + guidance_rescale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The rescale factor for the CFG.', + }), + ) + .default(0), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(1.5), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image to use as a starting point for the generation.', + }), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'determines how much the generated image resembles the initial image', + }), + ) + .default(0.95), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(true), + safety_checker_version: z.optional( + z.enum(['v1', 'v2']).register(z.globalRegistry, { + description: + 'The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.', + }), + ), + request_id: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n An id bound to a request, can be used with response to identify the request\n itself.\n ', + }), + ) + .default(''), + num_inference_steps: z + .optional( + z.int().gte(1).lte(32).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(6), + mask_url: z.string().register(z.globalRegistry, { + description: 'The URL of the mask to use for inpainting.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + model_name: z.optional( + z + .enum([ + 'stabilityai/stable-diffusion-xl-base-1.0', + 'runwayml/stable-diffusion-v1-5', + ]) + .register(z.globalRegistry, { + description: 'The name of the model to use.', + }), + ), +}) + +/** + * Output + */ +export const zSchemaPlaygroundV25InpaintingOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * Embedding + */ +export const zSchemaEmbedding = z.object({ + tokens: z + .optional( + z.array(z.string()).register(z.globalRegistry, { + description: 'The list of tokens to use for the embedding.', + }), + ) + .default(['', '']), + path: z.string().register(z.globalRegistry, { + description: 'URL or the path to the embedding weights.', + }), +}) + +/** + * InpaintingPlaygroundv25Input + */ +export const zSchemaPlaygroundV25InpaintingInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + embeddings: z + .optional( + z.array(zSchemaEmbedding).register(z.globalRegistry, { + description: 'The list of embeddings to use.', + }), + ) + .default([]), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(false), + guidance_rescale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The rescale factor for the CFG.', + }), + ) + .default(0), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image to use as a starting point for the generation.', + }), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'determines how much the generated image resembles the initial image', + }), + ) + .default(0.95), + safety_checker_version: z.optional( + z.enum(['v1', 'v2']).register(z.globalRegistry, { + description: + 'The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.', + }), + ), + request_id: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n An id bound to a request, can be used with response to identify the request\n itself.\n ', + }), + ) + .default(''), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + mask_url: z.string().register(z.globalRegistry, { + description: 'The URL of the mask to use for inpainting.', + }), + num_inference_steps: z + .optional( + z.int().gte(1).lte(65).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(25), +}) + +/** + * Output + */ +export const zSchemaFastLightningSdxlInpaintingOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * InpaintingLightningInput + */ +export const zSchemaFastLightningSdxlInpaintingInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + embeddings: z + .optional( + z.array(zSchemaEmbedding).register(z.globalRegistry, { + description: 'The list of embeddings to use.', + }), + ) + .default([]), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(false), + guidance_rescale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The rescale factor for the CFG.', + }), + ) + .default(0), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image to use as a starting point for the generation.', + }), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'determines how much the generated image resembles the initial image', + }), + ) + .default(0.95), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_checker_version: z.optional( + z.enum(['v1', 'v2']).register(z.globalRegistry, { + description: + 'The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.', + }), + ), + request_id: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n An id bound to a request, can be used with response to identify the request\n itself.\n ', + }), + ) + .default(''), + num_inference_steps: z.optional( + z.enum(['1', '2', '4', '8']).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ), + mask_url: z.string().register(z.globalRegistry, { + description: 'The URL of the mask to use for inpainting.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), +}) + +/** + * Output + */ +export const zSchemaFastLightningSdxlImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * ImageToImageLightningInput + */ +export const zSchemaFastLightningSdxlImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + embeddings: z + .optional( + z.array(zSchemaEmbedding).register(z.globalRegistry, { + description: 'The list of embeddings to use.', + }), + ) + .default([]), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(false), + guidance_rescale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The rescale factor for the CFG.', + }), + ) + .default(0), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + preserve_aspect_ratio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the aspect ratio of the generated image will be preserved even\n if the image size is too large. However, if the image is not a multiple of 32\n in width or height, it will be resized to the nearest multiple of 32. By default,\n this snapping to the nearest multiple of 32 will not preserve the aspect ratio.\n Set crop_output to True, to crop the output to the proper aspect ratio\n after generating.\n ', + }), + ) + .default(false), + crop_output: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the output cropped to the proper aspect ratio after generating.\n ', + }), + ) + .default(false), + format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image to use as a starting point for the generation.', + }), + strength: z + .optional( + z.number().gte(0.05).lte(1).register(z.globalRegistry, { + description: + 'determines how much the generated image resembles the initial image', + }), + ) + .default(0.95), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_checker_version: z.optional( + z.enum(['v1', 'v2']).register(z.globalRegistry, { + description: + 'The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.', + }), + ), + request_id: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n An id bound to a request, can be used with response to identify the request\n itself.\n ', + }), + ) + .default(''), + num_inference_steps: z.optional( + z.enum(['1', '2', '4', '8']).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), +}) + +/** + * Output + */ +export const zSchemaPlaygroundV25ImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * ImageToImagePlaygroundv25Input + */ +export const zSchemaPlaygroundV25ImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + embeddings: z + .optional( + z.array(zSchemaEmbedding).register(z.globalRegistry, { + description: 'The list of embeddings to use.', + }), + ) + .default([]), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(false), + guidance_rescale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The rescale factor for the CFG.', + }), + ) + .default(0), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3), + preserve_aspect_ratio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the aspect ratio of the generated image will be preserved even\n if the image size is too large. However, if the image is not a multiple of 32\n in width or height, it will be resized to the nearest multiple of 32. By default,\n this snapping to the nearest multiple of 32 will not preserve the aspect ratio.\n Set crop_output to True, to crop the output to the proper aspect ratio\n after generating.\n ', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + crop_output: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the output cropped to the proper aspect ratio after generating.\n ', + }), + ) + .default(false), + format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image to use as a starting point for the generation.', + }), + strength: z + .optional( + z.number().gte(0.05).lte(1).register(z.globalRegistry, { + description: + 'determines how much the generated image resembles the initial image', + }), + ) + .default(0.95), + safety_checker_version: z.optional( + z.enum(['v1', 'v2']).register(z.globalRegistry, { + description: + 'The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.', + }), + ), + request_id: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n An id bound to a request, can be used with response to identify the request\n itself.\n ', + }), + ) + .default(''), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(65).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(25), +}) + +/** + * ImageFile + */ +export const zSchemaImageFile = z.object({ + height: z.optional( + z.int().register(z.globalRegistry, { + description: 'The height of the image', + }), + ), + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + width: z.optional( + z.int().register(z.globalRegistry, { + description: 'The width of the image', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), +}) + +/** + * Output + */ +export const zSchemaBirefnetOutput = z.object({ + image: zSchemaImageFile, + mask_image: z.optional(zSchemaImageFile), +}) + +/** + * Input + */ +export const zSchemaBirefnetInput = z.object({ + operating_resolution: z.optional( + z.enum(['1024x1024', '2048x2048']).register(z.globalRegistry, { + description: + 'The resolution to operate on. The higher the resolution, the more accurate the output will be for high res input images.', + }), + ), + output_format: z.optional( + z.enum(['webp', 'png', 'gif']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to remove background from', + }), + model: z.optional( + z + .enum(['General Use (Light)', 'General Use (Heavy)', 'Portrait']) + .register(z.globalRegistry, { + description: + "\n Model to use for background removal.\n The 'General Use (Light)' model is the original model used in the BiRefNet repository.\n The 'General Use (Heavy)' model is a slower but more accurate model.\n The 'Portrait' model is a model trained specifically for portrait images.\n The 'General Use (Light)' model is recommended for most use cases.\n\n The corresponding models are as follows:\n - 'General Use (Light)': BiRefNet-DIS_ep580.pth\n - 'General Use (Heavy)': BiRefNet-massive-epoch_240.pth\n - 'Portrait': BiRefNet-portrait-TR_P3M_10k-epoch_120.pth\n ", + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + output_mask: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to output the mask used to remove the background', + }), + ) + .default(false), + refine_foreground: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to refine the foreground using the estimated mask', + }), + ) + .default(true), +}) + +/** + * CreativeUpscalerOutput + */ +export const zSchemaCreativeUpscalerOutput = z.object({ + image: zSchemaImage, + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * CreativeUpscalerInput + */ +export const zSchemaCreativeUpscalerInput = z.object({ + shape_preservation: z + .optional( + z.number().gte(0).lte(3).register(z.globalRegistry, { + description: 'How much to preserve the shape of the original image', + }), + ) + .default(0.25), + prompt: z.optional(z.union([z.string(), z.null()])), + additional_embedding_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The URL to the additional embeddings to use for the upscaling. Default is None', + }), + ), + enable_safety_checks: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the resulting image will be checked whether it includes any\n potentially unsafe content. If it does, it will be replaced with a black\n image.\n ', + }), + ) + .default(true), + additional_lora_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The URL to the additional LORA model to use for the upscaling. Default is None', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(16).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(7.5), + scale: z + .optional( + z.number().gte(1).lte(5).register(z.globalRegistry, { + description: + 'The scale of the output image. The higher the scale, the bigger the output image will be.', + }), + ) + .default(2), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default( + 'blurry, low resolution, bad, ugly, low quality, pixelated, interpolated, compression artifacts, noisey, grainy', + ), + skip_ccsr: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the image will not be processed by the CCSR model before\n being processed by the creativity model.\n ', + }), + ) + .default(false), + additional_lora_scale: z + .optional( + z.number().register(z.globalRegistry, { + description: + 'The scale of the additional LORA model to use for the upscaling. Default is 1.0', + }), + ) + .default(1), + detail: z + .optional( + z.number().gte(0).lte(5).register(z.globalRegistry, { + description: 'How much detail to add', + }), + ) + .default(1), + base_model_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The URL to the base model to use for the upscaling', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The image to upscale.', + }), + creativity: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'How much the output can deviate from the original', + }), + ) + .default(0.5), + override_size_limits: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n Allow for large uploads that could take a very long time.\n ', + }), + ) + .default(false), + prompt_suffix: z + .optional( + z.string().register(z.globalRegistry, { + description: + "The suffix to add to the prompt. This is useful to add a common ending to all prompts such as 'high quality' etc or embedding tokens.", + }), + ) + .default(' high quality, highly detailed, high resolution, sharp'), + num_inference_steps: z + .optional( + z.int().gte(1).lte(200).register(z.globalRegistry, { + description: + '\n The number of inference steps to use for generating the image. The more steps\n the better the image will be but it will also take longer to generate.\n ', + }), + ) + .default(20), + model_type: z.optional( + z.enum(['SD_1_5', 'SDXL']).register(z.globalRegistry, { + description: + 'The type of model to use for the upscaling. Default is SD_1_5', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), +}) + +/** + * PhotoMakerOutput + */ +export const zSchemaPhotomakerOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * PhotoMakerInput + */ +export const zSchemaPhotomakerInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + '\n Number of images to generate in one request. Note that the higher the batch size,\n the longer it will take to generate the images.\n ', + }), + ) + .default(1), + style_strength: z.optional(z.int().gte(15).lte(50)).default(20), + style: z.optional( + z.enum([ + '(No style)', + 'Cinematic', + 'Disney Character', + 'Digital Art', + 'Photographic', + 'Fantasy art', + 'Neonpunk', + 'Enhance', + 'Comic book', + 'Lowpoly', + 'Line art', + ]), + ), + guidance_scale: z + .optional( + z.number().gte(0.1).lte(10).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + image_archive_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image archive containing the images you want to use.', + }), + initial_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'Optional initial image for img2img', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(20).lte(100).register(z.globalRegistry, { + description: + '\n Increasing the amount of steps tells Stable Diffusion that it should take more steps\n to generate your final result which can increase the amount of detail in your image.\n ', + }), + ) + .default(50), + initial_image_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'How much noise to add to the latent image. O for no noise, 1 for maximum noise.', + }), + ) + .default(0.5), + base_pipeline: z.optional( + z.enum(['photomaker', 'photomaker-style']).register(z.globalRegistry, { + description: 'The base pipeline to use for generating the image.', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), +}) + +/** + * FaceToStickerOutput + */ +export const zSchemaFaceToStickerOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images.', + }), + sticker_image: zSchemaImage, + sticker_image_background_removed: zSchemaImage, + seed: z.int().register(z.globalRegistry, { + description: 'Seed used during the inference.', + }), + has_nsfw_concepts: z + .record(z.string(), z.boolean()) + .register(z.globalRegistry, { + description: + '\n Whether the generated images contain NSFW concepts.\n The key is the image type and the value is a boolean.\n ', + }), +}) + +/** + * FaceToStickerInput + */ +export const zSchemaFaceToStickerInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to false, the safety checker will be disabled.', + }), + ) + .default(true), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + ip_adapter_weight: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The weight of the IP adapter.', + }), + ) + .default(0.2), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the video.', + }), + upscale_steps: z + .optional( + z.int().gte(1).lte(20).register(z.globalRegistry, { + description: + 'The number of steps to use for upscaling. Only used if `upscale` is `true`.', + }), + ) + .default(10), + instant_id_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The strength of the instant ID.', + }), + ) + .default(0.7), + upscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to upscale the image 2x.', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4.5), + num_inference_steps: z + .optional( + z.int().gte(10).lte(40).register(z.globalRegistry, { + description: + '\n Increasing the amount of steps tells Stable Diffusion that it should take more steps\n to generate your final result which can increase the amount of detail in your image.\n ', + }), + ) + .default(20), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + ip_adapter_noise: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The amount of noise to add to the IP adapter.', + }), + ) + .default(0.5), +}) + +/** + * Output + */ +export const zSchemaFastSdxlInpaintingOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * InpaintingInput + */ +export const zSchemaFastSdxlInpaintingInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + embeddings: z + .optional( + z.array(zSchemaEmbedding).register(z.globalRegistry, { + description: 'The list of embeddings to use.', + }), + ) + .default([]), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: 'The list of LoRA weights to use.', + }), + ) + .default([]), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(7.5), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image to use as a starting point for the generation.', + }), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'determines how much the generated image resembles the initial image', + }), + ) + .default(0.95), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_checker_version: z.optional( + z.enum(['v1', 'v2']).register(z.globalRegistry, { + description: + 'The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.', + }), + ), + request_id: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n An id bound to a request, can be used with response to identify the request\n itself.\n ', + }), + ) + .default(''), + num_inference_steps: z + .optional( + z.int().gte(1).lte(65).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(25), + mask_url: z.string().register(z.globalRegistry, { + description: 'The URL of the mask to use for inpainting.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), +}) + +/** + * Output + */ +export const zSchemaFastSdxlImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * ImageToImageInput + */ +export const zSchemaFastSdxlImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + embeddings: z + .optional( + z.array(zSchemaEmbedding).register(z.globalRegistry, { + description: 'The list of embeddings to use.', + }), + ) + .default([]), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: 'The list of LoRA weights to use.', + }), + ) + .default([]), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(7.5), + preserve_aspect_ratio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the aspect ratio of the generated image will be preserved even\n if the image size is too large. However, if the image is not a multiple of 32\n in width or height, it will be resized to the nearest multiple of 32. By default,\n this snapping to the nearest multiple of 32 will not preserve the aspect ratio.\n Set crop_output to True, to crop the output to the proper aspect ratio\n after generating.\n ', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + crop_output: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the output cropped to the proper aspect ratio after generating.\n ', + }), + ) + .default(false), + format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image to use as a starting point for the generation.', + }), + strength: z + .optional( + z.number().gte(0.05).lte(1).register(z.globalRegistry, { + description: + 'determines how much the generated image resembles the initial image', + }), + ) + .default(0.95), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_checker_version: z.optional( + z.enum(['v1', 'v2']).register(z.globalRegistry, { + description: + 'The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.', + }), + ), + request_id: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n An id bound to a request, can be used with response to identify the request\n itself.\n ', + }), + ) + .default(''), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(65).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(25), +}) + +/** + * File + */ +export const zSchemaFile = z.object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), +}) + +/** + * OutputParameters + */ +export const zSchemaLoraImageToImageOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + debug_latents: z.optional(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + debug_per_pass_latents: z.optional(zSchemaFile), +}) + +/** + * ControlNet + */ +export const zSchemaControlNet = z.object({ + conditioning_scale: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: + '\n The scale of the control net weight. This is used to scale the control net weight\n before merging it with the base model.\n ', + }), + ) + .default(1), + path: z.string().register(z.globalRegistry, { + description: 'URL or the path to the control net weights.', + }), + ip_adapter_index: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The index of the IP adapter to be applied to the controlnet. This is only needed for InstantID ControlNets.\n ', + }), + ), + end_percentage: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The percentage of the image to end applying the controlnet in terms of the total timesteps.\n ', + }), + ) + .default(1), + config_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'optional URL to the controlnet config.json file.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be used as the control net.', + }), + variant: z.optional( + z.string().register(z.globalRegistry, { + description: 'The optional variant if a Hugging Face repo key is used.', + }), + ), + mask_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The mask to use for the controlnet. When using a mask, the control image size and the mask size must be the same and divisible by 32.\n ', + }), + ), + start_percentage: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The percentage of the image to start applying the controlnet in terms of the total timesteps.\n ', + }), + ) + .default(0), +}) + +/** + * TimestepsInput + */ +export const zSchemaTimestepsInput = z.object({ + method: z.optional( + z.enum(['default', 'array']).register(z.globalRegistry, { + description: + "\n The method to use for the timesteps. If set to 'array', the timesteps will be set based\n on the provided timesteps schedule in the `array` field.\n Defaults to 'default' which means the scheduler will use the `num_inference_steps` parameter.\n ", + }), + ), + array: z + .optional( + z.array(z.int()).register(z.globalRegistry, { + description: + "\n Timesteps schedule to be used if 'custom' method is selected.\n ", + }), + ) + .default([]), +}) + +/** + * SigmasInput + */ +export const zSchemaSigmasInput = z.object({ + method: z.optional( + z.enum(['default', 'array']).register(z.globalRegistry, { + description: + "\n The method to use for the sigmas. If set to 'custom', the sigmas will be set based\n on the provided sigmas schedule in the `array` field.\n Defaults to 'default' which means the scheduler will use the sigmas of the scheduler.\n ", + }), + ), + array: z + .optional( + z.array(z.number()).register(z.globalRegistry, { + description: + "\n Sigmas schedule to be used if 'custom' method is selected.\n ", + }), + ) + .default([]), +}) + +/** + * IPAdapter + */ +export const zSchemaIpAdapter = z.object({ + unconditional_noising_factor: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The factor to apply to the unconditional noising of the IP adapter.', + }), + ) + .default(0), + ip_adapter_image_url: z.union([z.string(), z.array(z.string())]), + path: z.string().register(z.globalRegistry, { + description: 'URL or the path to the IP adapter weights.', + }), + image_projection_shortcut: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n The value to set the image projection shortcut to. For FaceID plus V1 models,\n this should be set to False. For FaceID plus V2 models, this should be set to True.\n Default is True.\n ', + }), + ) + .default(true), + scale_json: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: + '\n The scale of the IP adapter weight. This is used to scale the IP adapter weight\n before merging it with the base model.\n ', + }), + ), + ip_adapter_mask_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The mask to use for the IP adapter. When using a mask, the ip-adapter image size and the mask size must be the same\n ', + }), + ), + model_subfolder: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Subfolder in the model directory where the IP adapter weights are stored.', + }), + ), + scale: z + .optional( + z.number().gte(0).register(z.globalRegistry, { + description: + '\n The scale of the IP adapter weight. This is used to scale the IP adapter weight\n before merging it with the base model.\n ', + }), + ) + .default(1), + insight_face_model_path: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL or the path to the InsightFace model weights.', + }), + ), + weight_name: z.optional( + z.string().register(z.globalRegistry, { + description: 'Name of the weight file.', + }), + ), +}) + +/** + * ImageToImageInput + */ +export const zSchemaLoraImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + noise_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The amount of noise to add to noise image for image. Only used if the image_url is provided. 1.0 is complete noise and 0 is no noise.', + }), + ) + .default(0.5), + tile_height: z + .optional( + z.int().gte(128).lte(4096).register(z.globalRegistry, { + description: + 'The size of the tiles to be used for the image generation.', + }), + ) + .default(4096), + embeddings: z + .optional( + z.array(zSchemaEmbedding).register(z.globalRegistry, { + description: + '\n The embeddings to use for the image generation. Only a single embedding is supported at the moment.\n The embeddings will be used to map the tokens in the prompt to the embedding weights.\n ', + }), + ) + .default([]), + ic_light_model_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The URL of the IC Light model to use for the image generation.\n ', + }), + ), + image_encoder_weight_name: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n The weight name of the image encoder model to use for the image generation.\n ', + }), + ) + .default('pytorch_model.bin'), + ip_adapter: z + .optional( + z.array(zSchemaIpAdapter).register(z.globalRegistry, { + description: + '\n The IP adapter to use for the image generation.\n ', + }), + ) + .default([]), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + scheduler: z.optional( + z + .enum([ + 'DPM++ 2M', + 'DPM++ 2M Karras', + 'DPM++ 2M SDE', + 'DPM++ 2M SDE Karras', + 'Euler', + 'Euler A', + 'Euler (trailing timesteps)', + 'LCM', + 'LCM (trailing timesteps)', + 'DDIM', + 'TCD', + ]) + .register(z.globalRegistry, { + description: + 'Scheduler / sampler to use for the image denoising process.', + }), + ), + sigmas: z.optional(zSchemaSigmasInput), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(7.5), + tile_stride_width: z + .optional( + z.int().gte(64).lte(2048).register(z.globalRegistry, { + description: + 'The stride of the tiles to be used for the image generation.', + }), + ) + .default(2048), + debug_per_pass_latents: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the latents will be saved for debugging per pass.', + }), + ) + .default(false), + timesteps: z.optional(zSchemaTimestepsInput), + model_name: z.string().register(z.globalRegistry, { + description: + 'URL or HuggingFace ID of the base model to generate the image.', + }), + prompt_weighting: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the prompt weighting syntax will be used.\n Additionally, this will lift the 77 token limit by averaging embeddings.\n ', + }), + ) + .default(false), + variant: z.optional( + z.string().register(z.globalRegistry, { + description: + "The variant of the model to use for huggingface models, e.g. 'fp16'.", + }), + ), + image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of image to use for image to image/inpainting.', + }), + ), + controlnet_guess_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the controlnet will be applied to only the conditional predictions.\n ', + }), + ) + .default(false), + image_encoder_subfolder: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The subfolder of the image encoder model to use for the image generation.\n ', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + ic_light_model_background_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The URL of the IC Light model background image to use for the image generation.\n Make sure to use a background compatible with the model.\n ', + }), + ), + rescale_betas_snr_zero: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n Whether to set the rescale_betas_snr_zero option or not for the sampler\n ', + }), + ) + .default(false), + tile_width: z + .optional( + z.int().gte(128).lte(4096).register(z.globalRegistry, { + description: + 'The size of the tiles to be used for the image generation.', + }), + ) + .default(4096), + prediction_type: z.optional( + z.enum(['v_prediction', 'epsilon']).register(z.globalRegistry, { + description: + '\n The type of prediction to use for the image generation.\n The `epsilon` is the default.\n ', + }), + ), + eta: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The eta value to be used for the image generation.', + }), + ) + .default(0), + image_encoder_path: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The path to the image encoder model to use for the image generation.\n ', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + image_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: + '\n Number of images to generate in one request. Note that the higher the batch size,\n the longer it will take to generate the images.\n ', + }), + ) + .default(1), + debug_latents: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the latents will be saved for debugging.', + }), + ) + .default(false), + ic_light_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The URL of the IC Light model image to use for the image generation.\n ', + }), + ), + unet_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'URL or HuggingFace ID of the custom U-Net model to use for the image generation.', + }), + ), + clip_skip: z + .optional( + z.int().gte(0).lte(2).register(z.globalRegistry, { + description: + '\n Skips part of the image generation process, leading to slightly different results.\n This means the image renders faster, too.\n ', + }), + ) + .default(0), + tile_stride_height: z + .optional( + z.int().gte(64).lte(2048).register(z.globalRegistry, { + description: + 'The stride of the tiles to be used for the image generation.', + }), + ) + .default(2048), + controlnets: z + .optional( + z.array(zSchemaControlNet).register(z.globalRegistry, { + description: + '\n The control nets to use for the image generation. You can use any number of control nets\n and they will be applied to the image at the specified timesteps.\n ', + }), + ) + .default([]), + num_inference_steps: z + .optional( + z.int().gte(1).lte(150).register(z.globalRegistry, { + description: + '\n Increasing the amount of steps tells Stable Diffusion that it should take more steps\n to generate your final result which can increase the amount of detail in your image.\n ', + }), + ) + .default(30), +}) + +/** + * OutputParameters + */ +export const zSchemaLoraInpaintOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + debug_latents: z.optional(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + debug_per_pass_latents: z.optional(zSchemaFile), +}) + +/** + * InpaintInput + */ +export const zSchemaLoraInpaintInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + noise_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The amount of noise to add to noise image for image. Only used if the image_url is provided. 1.0 is complete noise and 0 is no noise.', + }), + ) + .default(0.5), + tile_height: z + .optional( + z.int().gte(128).lte(4096).register(z.globalRegistry, { + description: + 'The size of the tiles to be used for the image generation.', + }), + ) + .default(4096), + embeddings: z + .optional( + z.array(zSchemaEmbedding).register(z.globalRegistry, { + description: + '\n The embeddings to use for the image generation. Only a single embedding is supported at the moment.\n The embeddings will be used to map the tokens in the prompt to the embedding weights.\n ', + }), + ) + .default([]), + ic_light_model_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The URL of the IC Light model to use for the image generation.\n ', + }), + ), + image_encoder_weight_name: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n The weight name of the image encoder model to use for the image generation.\n ', + }), + ) + .default('pytorch_model.bin'), + ip_adapter: z + .optional( + z.array(zSchemaIpAdapter).register(z.globalRegistry, { + description: + '\n The IP adapter to use for the image generation.\n ', + }), + ) + .default([]), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + scheduler: z.optional( + z + .enum([ + 'DPM++ 2M', + 'DPM++ 2M Karras', + 'DPM++ 2M SDE', + 'DPM++ 2M SDE Karras', + 'Euler', + 'Euler A', + 'Euler (trailing timesteps)', + 'LCM', + 'LCM (trailing timesteps)', + 'DDIM', + 'TCD', + ]) + .register(z.globalRegistry, { + description: + 'Scheduler / sampler to use for the image denoising process.', + }), + ), + sigmas: z.optional(zSchemaSigmasInput), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(7.5), + tile_stride_width: z + .optional( + z.int().gte(64).lte(2048).register(z.globalRegistry, { + description: + 'The stride of the tiles to be used for the image generation.', + }), + ) + .default(2048), + debug_per_pass_latents: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the latents will be saved for debugging per pass.', + }), + ) + .default(false), + timesteps: z.optional(zSchemaTimestepsInput), + model_name: z.string().register(z.globalRegistry, { + description: + 'URL or HuggingFace ID of the base model to generate the image.', + }), + prompt_weighting: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the prompt weighting syntax will be used.\n Additionally, this will lift the 77 token limit by averaging embeddings.\n ', + }), + ) + .default(false), + variant: z.optional( + z.string().register(z.globalRegistry, { + description: + "The variant of the model to use for huggingface models, e.g. 'fp16'.", + }), + ), + image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of image to use for image to image/inpainting.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + mask_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'URL of black-and-white image to use as mask during inpainting.', + }), + ), + image_encoder_subfolder: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The subfolder of the image encoder model to use for the image generation.\n ', + }), + ), + ic_light_model_background_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The URL of the IC Light model background image to use for the image generation.\n Make sure to use a background compatible with the model.\n ', + }), + ), + rescale_betas_snr_zero: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n Whether to set the rescale_betas_snr_zero option or not for the sampler\n ', + }), + ) + .default(false), + tile_width: z + .optional( + z.int().gte(128).lte(4096).register(z.globalRegistry, { + description: + 'The size of the tiles to be used for the image generation.', + }), + ) + .default(4096), + controlnet_guess_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the controlnet will be applied to only the conditional predictions.\n ', + }), + ) + .default(false), + prediction_type: z.optional( + z.enum(['v_prediction', 'epsilon']).register(z.globalRegistry, { + description: + '\n The type of prediction to use for the image generation.\n The `epsilon` is the default.\n ', + }), + ), + eta: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The eta value to be used for the image generation.', + }), + ) + .default(0), + image_encoder_path: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The path to the image encoder model to use for the image generation.\n ', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + image_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: + '\n Number of images to generate in one request. Note that the higher the batch size,\n the longer it will take to generate the images.\n ', + }), + ) + .default(1), + debug_latents: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the latents will be saved for debugging.', + }), + ) + .default(false), + ic_light_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The URL of the IC Light model image to use for the image generation.\n ', + }), + ), + unet_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'URL or HuggingFace ID of the custom U-Net model to use for the image generation.', + }), + ), + clip_skip: z + .optional( + z.int().gte(0).lte(2).register(z.globalRegistry, { + description: + '\n Skips part of the image generation process, leading to slightly different results.\n This means the image renders faster, too.\n ', + }), + ) + .default(0), + tile_stride_height: z + .optional( + z.int().gte(64).lte(2048).register(z.globalRegistry, { + description: + 'The stride of the tiles to be used for the image generation.', + }), + ) + .default(2048), + controlnets: z + .optional( + z.array(zSchemaControlNet).register(z.globalRegistry, { + description: + '\n The control nets to use for the image generation. You can use any number of control nets\n and they will be applied to the image at the specified timesteps.\n ', + }), + ) + .default([]), + num_inference_steps: z + .optional( + z.int().gte(1).lte(150).register(z.globalRegistry, { + description: + '\n Increasing the amount of steps tells Stable Diffusion that it should take more steps\n to generate your final result which can increase the amount of detail in your image.\n ', + }), + ) + .default(30), +}) + +/** + * IpAdapterFaceIdOutput + */ +export const zSchemaIpAdapterFaceIdOutput = z.object({ + image: zSchemaImage, + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * IpAdapterFaceIdInput + */ +export const zSchemaIpAdapterFaceIdInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + face_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'An image of a face to match. If an image with a size of 640x640 is not provided, it will be scaled and cropped to that size.', + }), + ), + width: z + .optional( + z.int().gte(512).lte(1024).register(z.globalRegistry, { + description: + '\n The width of the generated image.\n ', + }), + ) + .default(512), + face_id_det_size: z + .optional( + z.int().gte(64).lte(640).register(z.globalRegistry, { + description: + '\n The size of the face detection model. The higher the number the more accurate\n the detection will be but it will also take longer to run. The higher the number the more\n likely it will fail to find a face as well. Lower it if you are having trouble\n finding a face in the image.\n ', + }), + ) + .default(640), + guidance_scale: z + .optional( + z.number().gte(0).lte(16).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(7.5), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default( + 'blurry, low resolution, bad, ugly, low quality, pixelated, interpolated, compression artifacts, noisey, grainy', + ), + height: z + .optional( + z.int().gte(512).lte(1024).register(z.globalRegistry, { + description: + '\n The height of the generated image.\n ', + }), + ) + .default(512), + num_samples: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + '\n The number of samples for face id. The more samples the better the image will\n be but it will also take longer to generate. Default is 4.\n ', + }), + ) + .default(4), + base_sdxl_model_repo: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The URL to the base SDXL model. Default is SG161222/RealVisXL_V3.0', + }), + ) + .default('SG161222/RealVisXL_V3.0'), + base_1_5_model_repo: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The URL to the base 1.5 model. Default is SG161222/Realistic_Vision_V4.0_noVAE', + }), + ) + .default('SG161222/Realistic_Vision_V4.0_noVAE'), + num_inference_steps: z + .optional( + z.int().gte(1).lte(200).register(z.globalRegistry, { + description: + '\n The number of inference steps to use for generating the image. The more steps\n the better the image will be but it will also take longer to generate.\n ', + }), + ) + .default(50), + model_type: z.optional( + z + .enum([ + '1_5-v1', + '1_5-v1-plus', + '1_5-v2-plus', + 'SDXL-v1', + 'SDXL-v2-plus', + '1_5-auraface-v1', + ]) + .register(z.globalRegistry, { + description: + 'The model type to use. 1_5 is the default and is recommended for most use cases.', + }), + ), + face_images_data_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n URL to zip archive with images of faces. The images embedding will be averaged to\n create a more accurate face id.\n ', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), +}) + +/** + * OmniZeroOutput + */ +export const zSchemaOmniZeroOutput = z.object({ + image: zSchemaImage, +}) + +/** + * OmniZeroInput + */ +export const zSchemaOmniZeroInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Prompt to guide the image generation.', + }), + identity_image_url: z.string().register(z.globalRegistry, { + description: 'Identity image url.', + }), + identity_strength: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Identity strength.', + }), + ) + .default(1), + number_of_images: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Number of images.', + }), + ) + .default(1), + guidance_scale: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Guidance scale.', + }), + ) + .default(5), + image_strength: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Image strength.', + }), + ) + .default(0.75), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to guide the image generation.', + }), + ) + .default(''), + composition_image_url: z.string().register(z.globalRegistry, { + description: 'Composition image url.', + }), + depth_strength: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Depth strength.', + }), + ) + .default(0.5), + composition_strength: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Composition strength.', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: 'Input image url.', + }), + style_image_url: z.string().register(z.globalRegistry, { + description: 'Style image url.', + }), + face_strength: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Face strength.', + }), + ) + .default(1), + style_strength: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Style strength.', + }), + ) + .default(1), + seed: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Seed.', + }), + ) + .default(42), +}) + +/** + * CCSROutput + */ +export const zSchemaCcsrOutput = z.object({ + image: zSchemaImage, + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the generation.', + }), +}) + +/** + * CCSRInput + */ +export const zSchemaCcsrInput = z.object({ + color_fix_type: z.optional( + z.enum(['none', 'wavelet', 'adain']).register(z.globalRegistry, { + description: 'Type of color correction for samples.', + }), + ), + tile_diffusion_size: z + .optional( + z.int().gte(256).lte(2048).register(z.globalRegistry, { + description: 'Size of patch.', + }), + ) + .default(1024), + tile_vae_decoder_size: z + .optional( + z.int().gte(64).lte(2048).register(z.globalRegistry, { + description: 'Size of VAE patch.', + }), + ) + .default(226), + tile_vae_encoder_size: z + .optional( + z.int().gte(128).lte(2048).register(z.globalRegistry, { + description: 'Size of latent image', + }), + ) + .default(1024), + t_min: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The starting point of uniform sampling strategy.', + }), + ) + .default(0.3333), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL or data URI of the image to upscale.', + }), + tile_diffusion_stride: z + .optional( + z.int().gte(128).lte(1024).register(z.globalRegistry, { + description: 'Stride of sliding patch.', + }), + ) + .default(512), + tile_vae: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If specified, a patch-based sampling strategy will be used for VAE decoding.', + }), + ) + .default(false), + scale: z + .optional( + z.number().gte(1).lte(4).register(z.globalRegistry, { + description: + 'The scale of the output image. The higher the scale, the bigger the output image will be.', + }), + ) + .default(2), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Seed for reproducibility. Different seeds will make slightly different results.', + }), + ), + t_max: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The ending point of uniform sampling strategy.', + }), + ) + .default(0.6667), + steps: z + .optional( + z.int().gte(10).lte(100).register(z.globalRegistry, { + description: + 'The number of steps to run the model for. The higher the number the better the quality and longer it will take to generate.', + }), + ) + .default(50), + tile_diffusion: z.optional( + z.enum(['none', 'mix', 'gaussian']).register(z.globalRegistry, { + description: + 'If specified, a patch-based sampling strategy will be used for sampling.', + }), + ), +}) + +/** + * Output + */ +export const zSchemaSd15DepthControlnetOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageControlNetInput + */ +export const zSchemaSd15DepthControlnetInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.null(), + ]), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: 'The list of LoRA weights to use.', + }), + ) + .default([]), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(7.5), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + controlnet_conditioning_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The scale of the controlnet conditioning.', + }), + ) + .default(0.5), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + control_image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the control image.', + }), + num_inference_steps: z + .optional( + z.int().gte(1).lte(70).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(35), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + enable_deep_cache: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, DeepCache will be enabled. TBD\n ', + }), + ) + .default(false), +}) + +/** + * DWPoseOutput + */ +export const zSchemaDwposeOutput = z.object({ + image: zSchemaImage, +}) + +/** + * DWPoseInput + */ +export const zSchemaDwposeInput = z.object({ + draw_mode: z.optional( + z + .enum([ + 'full-pose', + 'body-pose', + 'face-pose', + 'hand-pose', + 'face-hand-mask', + 'face-mask', + 'hand-mask', + ]) + .register(z.globalRegistry, { + description: + "Mode of drawing the pose on the image. Options are: 'full-pose', 'body-pose', 'face-pose', 'hand-pose', 'face-hand-mask', 'face-mask', 'hand-mask'.", + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be processed', + }), +}) + +/** + * SD3Output + */ +export const zSchemaStableDiffusionV3MediumImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + num_images: z.int().register(z.globalRegistry, { + description: 'The number of images generated.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * ImageToImageInput + */ +export const zSchemaStableDiffusionV3MediumImageToImageInput = z.object({ + prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, prompt will be upsampled with more details.', + }), + ) + .default(false), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.null(), + ]), + ), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'The image URL to generate an image from.', + }), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: 'The strength of the image-to-image transformation.', + }), + ) + .default(0.9), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate an image from.', + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * Region + */ +export const zSchemaRegion = z.object({ + y1: z.int().gte(0).lte(999).register(z.globalRegistry, { + description: 'Y-coordinate of the top-left corner', + }), + x2: z.int().gte(0).lte(999).register(z.globalRegistry, { + description: 'X-coordinate of the bottom-right corner', + }), + x1: z.int().gte(0).lte(999).register(z.globalRegistry, { + description: 'X-coordinate of the top-left corner', + }), + y2: z.int().gte(0).lte(999).register(z.globalRegistry, { + description: 'Y-coordinate of the bottom-right corner', + }), +}) + +/** + * Polygon + */ +export const zSchemaPolygon = z.object({ + points: z.array(z.record(z.string(), z.number())).register(z.globalRegistry, { + description: 'List of points', + }), + label: z.string().register(z.globalRegistry, { + description: 'Label of the polygon', + }), +}) + +/** + * PolygonOutput + */ +export const zSchemaPolygonOutput = z.object({ + polygons: z.array(zSchemaPolygon).register(z.globalRegistry, { + description: 'List of polygons', + }), +}) + +/** + * PolygonOutputWithLabels + */ +export const zSchemaFlorence2LargeRegionToSegmentationOutput = z.object({ + image: z.optional(zSchemaImage), + results: zSchemaPolygonOutput, +}) + +/** + * ImageWithUserCoordinatesInput + */ +export const zSchemaFlorence2LargeRegionToSegmentationInput = z.object({ + region: zSchemaRegion, + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to be processed.', + }), +}) + +/** + * OCRBoundingBoxSingle + */ +export const zSchemaOcrBoundingBoxSingle = z.object({ + y: z.number().register(z.globalRegistry, { + description: 'Y-coordinate of the top-left corner', + }), + label: z.string().register(z.globalRegistry, { + description: 'Label of the bounding box', + }), + h: z.number().register(z.globalRegistry, { + description: 'Height of the bounding box', + }), + w: z.number().register(z.globalRegistry, { + description: 'Width of the bounding box', + }), + x: z.number().register(z.globalRegistry, { + description: 'X-coordinate of the top-left corner', + }), +}) + +/** + * OCRBoundingBox + */ +export const zSchemaOcrBoundingBox = z.object({ + quad_boxes: z.array(zSchemaOcrBoundingBoxSingle).register(z.globalRegistry, { + description: 'List of quadrilateral boxes', + }), +}) + +/** + * OCRBoundingBoxOutputWithLabels + */ +export const zSchemaFlorence2LargeOcrWithRegionOutput = z.object({ + image: z.optional(zSchemaImage), + results: zSchemaOcrBoundingBox, +}) + +/** + * ImageInput + */ +export const zSchemaFlorence2LargeOcrWithRegionInput = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to be processed.', + }), +}) + +/** + * BoundingBox + */ +export const zSchemaBoundingBox = z.object({ + y: z.number().register(z.globalRegistry, { + description: 'Y-coordinate of the top-left corner', + }), + label: z.string().register(z.globalRegistry, { + description: 'Label of the bounding box', + }), + h: z.number().register(z.globalRegistry, { + description: 'Height of the bounding box', + }), + w: z.number().register(z.globalRegistry, { + description: 'Width of the bounding box', + }), + x: z.number().register(z.globalRegistry, { + description: 'X-coordinate of the top-left corner', + }), +}) + +/** + * BoundingBoxes + */ +export const zSchemaBoundingBoxes = z.object({ + bboxes: z.array(zSchemaBoundingBox).register(z.globalRegistry, { + description: 'List of bounding boxes', + }), +}) + +/** + * BoundingBoxOutputWithLabels + */ +export const zSchemaFlorence2LargeRegionProposalOutput = z.object({ + image: z.optional(zSchemaImage), + results: zSchemaBoundingBoxes, +}) + +/** + * ImageInput + */ +export const zSchemaFlorence2LargeRegionProposalInput = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to be processed.', + }), +}) + +/** + * BoundingBoxOutputWithLabels + */ +export const zSchemaFlorence2LargeCaptionToPhraseGroundingOutput = z.object({ + image: z.optional(zSchemaImage), + results: zSchemaBoundingBoxes, +}) + +/** + * ImageWithTextInput + */ +export const zSchemaFlorence2LargeCaptionToPhraseGroundingInput = z.object({ + text_input: z.string().register(z.globalRegistry, { + description: 'Text input for the task', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to be processed.', + }), +}) + +/** + * BoundingBoxOutputWithLabels + */ +export const zSchemaFlorence2LargeOpenVocabularyDetectionOutput = z.object({ + image: z.optional(zSchemaImage), + results: zSchemaBoundingBoxes, +}) + +/** + * ImageWithTextInput + */ +export const zSchemaFlorence2LargeOpenVocabularyDetectionInput = z.object({ + text_input: z.string().register(z.globalRegistry, { + description: 'Text input for the task', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to be processed.', + }), +}) + +/** + * BoundingBoxOutputWithLabels + */ +export const zSchemaFlorence2LargeObjectDetectionOutput = z.object({ + image: z.optional(zSchemaImage), + results: zSchemaBoundingBoxes, +}) + +/** + * ImageInput + */ +export const zSchemaFlorence2LargeObjectDetectionInput = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to be processed.', + }), +}) + +/** + * PolygonOutputWithLabels + */ +export const zSchemaFlorence2LargeReferringExpressionSegmentationOutput = + z.object({ + image: z.optional(zSchemaImage), + results: zSchemaPolygonOutput, + }) + +/** + * ImageWithTextInput + */ +export const zSchemaFlorence2LargeReferringExpressionSegmentationInput = + z.object({ + text_input: z.string().register(z.globalRegistry, { + description: 'Text input for the task', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to be processed.', + }), + }) + +/** + * BoundingBoxOutputWithLabels + */ +export const zSchemaFlorence2LargeDenseRegionCaptionOutput = z.object({ + image: z.optional(zSchemaImage), + results: zSchemaBoundingBoxes, +}) + +/** + * ImageInput + */ +export const zSchemaFlorence2LargeDenseRegionCaptionInput = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to be processed.', + }), +}) + +/** + * Era3DOutput + */ +export const zSchemaEra3dOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Images with background removed', + }), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for random number generation', + }), + normal_images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Normal images with background removed', + }), +}) + +/** + * Era3DInput + */ +export const zSchemaEra3dInput = z.object({ + cfg: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(4), + background_removal: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Background removal', + }), + ) + .default(true), + steps: z + .optional( + z.int().gte(1).lte(200).register(z.globalRegistry, { + description: 'Number of steps to run the model for', + }), + ) + .default(40), + crop_size: z + .optional( + z.int().gte(256).lte(512).register(z.globalRegistry, { + description: 'Size of the image to crop to', + }), + ) + .default(400), + seed: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Seed for random number generation', + }), + ) + .default(-1), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to remove background from', + }), +}) + +/** + * Output + */ +export const zSchemaSdxlControlnetUnionImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * ImageToImageControlNetUnionInput + */ +export const zSchemaSdxlControlnetUnionImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + depth_preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the depth image.', + }), + ) + .default(true), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.null(), + ]), + ), + normal_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The URL of the control image.', + }), + ), + embeddings: z + .optional( + z.array(zSchemaEmbedding).register(z.globalRegistry, { + description: 'The list of embeddings to use.', + }), + ) + .default([]), + teed_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The URL of the control image.', + }), + ), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: 'The list of LoRA weights to use.', + }), + ) + .default([]), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(7.5), + canny_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The URL of the control image.', + }), + ), + segmentation_preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the segmentation image.', + }), + ) + .default(true), + format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image to use as a starting point for the generation.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + request_id: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n An id bound to a request, can be used with response to identify the request\n itself.\n ', + }), + ) + .default(''), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + segmentation_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The URL of the control image.', + }), + ), + openpose_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The URL of the control image.', + }), + ), + canny_preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the canny image.', + }), + ) + .default(true), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(false), + depth_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The URL of the control image.', + }), + ), + normal_preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the normal image.', + }), + ) + .default(true), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + preserve_aspect_ratio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the aspect ratio of the generated image will be preserved even\n if the image size is too large. However, if the image is not a multiple of 32\n in width or height, it will be resized to the nearest multiple of 32. By default,\n this snapping to the nearest multiple of 32 will not preserve the aspect ratio.\n Set crop_output to True, to crop the output to the proper aspect ratio\n after generating.\n ', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + crop_output: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the output cropped to the proper aspect ratio after generating.\n ', + }), + ) + .default(false), + teed_preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the teed image.', + }), + ) + .default(true), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + controlnet_conditioning_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The scale of the controlnet conditioning.', + }), + ) + .default(0.5), + strength: z + .optional( + z.number().gte(0.05).lte(1).register(z.globalRegistry, { + description: + 'determines how much the generated image resembles the initial image', + }), + ) + .default(0.95), + safety_checker_version: z.optional( + z.enum(['v1', 'v2']).register(z.globalRegistry, { + description: + 'The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.', + }), + ), + openpose_preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the openpose image.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(70).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(35), +}) + +/** + * Output + */ +export const zSchemaSdxlControlnetUnionInpaintingOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * InpaintingControlNetUnionInput + */ +export const zSchemaSdxlControlnetUnionInpaintingInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + depth_preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the depth image.', + }), + ) + .default(true), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.null(), + ]), + ), + normal_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The URL of the control image.', + }), + ), + embeddings: z + .optional( + z.array(zSchemaEmbedding).register(z.globalRegistry, { + description: 'The list of embeddings to use.', + }), + ) + .default([]), + teed_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The URL of the control image.', + }), + ), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: 'The list of LoRA weights to use.', + }), + ) + .default([]), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(7.5), + canny_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The URL of the control image.', + }), + ), + segmentation_preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the segmentation image.', + }), + ) + .default(true), + format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image to use as a starting point for the generation.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + request_id: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n An id bound to a request, can be used with response to identify the request\n itself.\n ', + }), + ) + .default(''), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + mask_url: z.string().register(z.globalRegistry, { + description: 'The URL of the mask to use for inpainting.', + }), + segmentation_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The URL of the control image.', + }), + ), + openpose_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The URL of the control image.', + }), + ), + canny_preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the canny image.', + }), + ) + .default(true), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(false), + depth_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The URL of the control image.', + }), + ), + normal_preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the normal image.', + }), + ) + .default(true), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + teed_preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the teed image.', + }), + ) + .default(true), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + controlnet_conditioning_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The scale of the controlnet conditioning.', + }), + ) + .default(0.5), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'determines how much the generated image resembles the initial image', + }), + ) + .default(0.95), + safety_checker_version: z.optional( + z.enum(['v1', 'v2']).register(z.globalRegistry, { + description: + 'The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.', + }), + ), + openpose_preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the openpose image.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(70).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(35), +}) + +/** + * Output + */ +export const zSchemaFluxLoraImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * ImageToImageInput + */ +export const zSchemaFluxLoraImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + 'The number of images to generate. This is always set to 1 for streaming output.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use for inpainting. or img2img', + }), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original.', + }), + ) + .default(0.85), + guidance_scale: z + .optional( + z.number().gte(0).lte(35).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaFluxGeneralDifferentialDiffusionOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * ControlNetUnionInput + */ +export const zSchemaControlNetUnionInput = z.object({ + conditioning_scale: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: + '\n The scale of the control net weight. This is used to scale the control net weight\n before merging it with the base model.\n ', + }), + ) + .default(1), + mask_threshold: z + .optional( + z.number().gte(0.01).lte(0.99).register(z.globalRegistry, { + description: 'Threshold for mask.', + }), + ) + .default(0.5), + end_percentage: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The percentage of the image to end applying the controlnet in terms of the total timesteps.\n ', + }), + ) + .default(1), + mask_image_url: z.optional(z.union([z.string(), z.null()])), + control_image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be used as the control image.', + }), + control_mode: z + .enum(['canny', 'tile', 'depth', 'blur', 'pose', 'gray', 'low-quality']) + .register(z.globalRegistry, { + description: + 'Control Mode for Flux Controlnet Union. Supported values are:\n - canny: Uses the edges for guided generation.\n - tile: Uses the tiles for guided generation.\n - depth: Utilizes a grayscale depth map for guided generation.\n - blur: Adds a blur to the image.\n - pose: Uses the pose of the image for guided generation.\n - gray: Converts the image to grayscale.\n - low-quality: Converts the image to a low-quality image.', + }), + start_percentage: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The percentage of the image to start applying the controlnet in terms of the total timesteps.\n ', + }), + ) + .default(0), +}) + +/** + * ControlNetUnion + */ +export const zSchemaControlNetUnion = z.object({ + controls: z.array(zSchemaControlNetUnionInput).register(z.globalRegistry, { + description: 'The control images and modes to use for the control net.', + }), + path: z.string().register(z.globalRegistry, { + description: 'URL or the path to the control net weights.', + }), + variant: z.optional( + z.string().register(z.globalRegistry, { + description: 'The optional variant if a Hugging Face repo key is used.', + }), + ), + config_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'optional URL to the controlnet config.json file.', + }), + ), +}) + +/** + * ImageFillInput + */ +export const zSchemaImageFillInput = z.object({ + fill_image_url: z.optional(z.union([z.string(), z.array(z.string())])), +}) + +/** + * EasyControlWeight + */ +export const zSchemaEasyControlWeight = z.object({ + scale: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: 'Scale for the control method.', + }), + ) + .default(1), + image_control_type: z + .enum(['subject', 'spatial']) + .register(z.globalRegistry, { + description: + 'Control type of the image. Must be one of `spatial` or `subject`.', + }), + control_method_url: z.string().register(z.globalRegistry, { + description: + 'URL to safetensor weights of control method to be applied. Can also be one of `canny`, `depth`, `hedsketch`, `inpainting`, `pose`, `seg`, `subject`, `ghibli` ', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of an image to use as a control', + }), +}) + +/** + * ControlLoraWeight + */ +export const zSchemaControlLoraWeight = z.object({ + path: z.string().register(z.globalRegistry, { + description: 'URL or the path to the LoRA weights.', + }), + scale: z.optional( + z.union([z.record(z.string(), z.unknown()), z.number().gte(-4).lte(4)]), + ), + control_image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be used as the control image.', + }), + preprocess: z.optional( + z.enum(['canny', 'depth', 'None']).register(z.globalRegistry, { + description: 'Type of preprocessing to apply to the input image.', + }), + ), +}) + +/** + * DifferentialDiffusionInput + */ +export const zSchemaFluxGeneralDifferentialDiffusionInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + nag_end: z + .optional( + z.number().lte(1).register(z.globalRegistry, { + description: + '\n The proportion of steps to apply NAG. After the specified proportion\n of steps has been iterated, the remaining steps will use original\n attention processors in FLUX.\n ', + }), + ) + .default(0.25), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + control_loras: z + .optional( + z.array(zSchemaControlLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation which use a control image. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + scheduler: z.optional( + z.enum(['euler', 'dpmpp_2m']).register(z.globalRegistry, { + description: 'Scheduler for the denoising process.', + }), + ), + easycontrols: z + .optional( + z.array(zSchemaEasyControlWeight).register(z.globalRegistry, { + description: + '\n EasyControl Inputs to use for image generation.\n ', + }), + ) + .default([]), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + real_cfg_scale: z + .optional( + z.number().gte(0).lte(5).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + use_cfg_zero: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n Uses CFG-zero init sampling as in https://arxiv.org/abs/2503.18886.\n ', + }), + ) + .default(false), + fill_image: z.optional(zSchemaImageFillInput), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + sigma_schedule: z.optional( + z.enum(['sgm_uniform']).register(z.globalRegistry, { + description: 'Sigmas schedule for the denoising process.', + }), + ), + reference_end: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The percentage of the total timesteps when the reference guidance is to be ended.\n ', + }), + ) + .default(1), + reference_strength: z + .optional( + z.number().gte(-3).lte(3).register(z.globalRegistry, { + description: + 'Strength of reference_only generation. Only used if a reference image is provided.', + }), + ) + .default(0.65), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use as initial image.', + }), + nag_scale: z + .optional( + z.number().lte(10).register(z.globalRegistry, { + description: + '\n The scale for NAG. Higher values will result in a image that is more distant\n to the negative prompt.\n ', + }), + ) + .default(3), + reference_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of Image for Reference-Only', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + controlnet_unions: z + .optional( + z.array(zSchemaControlNetUnion).register(z.globalRegistry, { + description: + '\n The controlnet unions to use for the image generation. Only one controlnet is supported at the moment.\n ', + }), + ) + .default([]), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n Negative prompt to steer the image generation away from unwanted features.\n By default, we will be using NAG for processing the negative prompt.\n ', + }), + ) + .default(''), + nag_tau: z + .optional( + z.number().register(z.globalRegistry, { + description: + '\n The tau for NAG. Controls the normalization of the hidden state.\n Higher values will result in a less aggressive normalization,\n but may also lead to unexpected changes with respect to the original image.\n Not recommended to change this value.\n ', + }), + ) + .default(2.5), + change_map_image_url: z.string().register(z.globalRegistry, { + description: 'URL of change map.', + }), + num_images: z + .optional( + z.int().gte(1).lte(10).register(z.globalRegistry, { + description: + 'The number of images to generate. This is always set to 1 for streaming output.', + }), + ) + .default(1), + use_beta_schedule: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Specifies whether beta sigmas ought to be used.', + }), + ) + .default(false), + ip_adapters: z + .optional( + z.array(zSchemaIpAdapter).register(z.globalRegistry, { + description: + '\n IP-Adapter to use for image generation.\n ', + }), + ) + .default([]), + base_shift: z + .optional( + z.number().gte(0.01).lte(5).register(z.globalRegistry, { + description: 'Base shift for the scheduled timesteps', + }), + ) + .default(0.5), + nag_alpha: z + .optional( + z.number().lte(1).register(z.globalRegistry, { + description: + '\n The alpha value for NAG. This value is used as a final weighting\n factor for steering the normalized guidance (positive and negative prompts)\n in the direction of the positive prompt. Higher values will result in less\n steering on the normalized guidance where lower values will result in\n considering the positive prompt guidance more.\n ', + }), + ) + .default(0.25), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'The strength to use for differential diffusion. 1.0 is completely remakes the image while 0.0 preserves the original.', + }), + ) + .default(0.85), + max_shift: z + .optional( + z.number().gte(0.01).lte(5).register(z.globalRegistry, { + description: 'Max shift for the scheduled timesteps', + }), + ) + .default(1.15), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + controlnets: z + .optional( + z.array(zSchemaControlNet).register(z.globalRegistry, { + description: + '\n The controlnets to use for the image generation. Only one controlnet is supported at the moment.\n ', + }), + ) + .default([]), + reference_start: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The percentage of the total timesteps when the reference guidance is to bestarted.\n ', + }), + ) + .default(0), + use_real_cfg: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n Uses classical CFG as in SD1.5, SDXL, etc. Increases generation times and price when set to be true.\n If using XLabs IP-Adapter v1, this will be turned on!.\n ', + }), + ) + .default(false), +}) + +/** + * Output + */ +export const zSchemaFluxGeneralInpaintingOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * InpaintInput + */ +export const zSchemaFluxGeneralInpaintingInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + nag_end: z + .optional( + z.number().lte(1).register(z.globalRegistry, { + description: + '\n The proportion of steps to apply NAG. After the specified proportion\n of steps has been iterated, the remaining steps will use original\n attention processors in FLUX.\n ', + }), + ) + .default(0.25), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + control_loras: z + .optional( + z.array(zSchemaControlLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation which use a control image. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + scheduler: z.optional( + z.enum(['euler', 'dpmpp_2m']).register(z.globalRegistry, { + description: 'Scheduler for the denoising process.', + }), + ), + easycontrols: z + .optional( + z.array(zSchemaEasyControlWeight).register(z.globalRegistry, { + description: + '\n EasyControl Inputs to use for image generation.\n ', + }), + ) + .default([]), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + real_cfg_scale: z + .optional( + z.number().gte(0).lte(5).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + use_cfg_zero: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n Uses CFG-zero init sampling as in https://arxiv.org/abs/2503.18886.\n ', + }), + ) + .default(false), + fill_image: z.optional(zSchemaImageFillInput), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + sigma_schedule: z.optional( + z.enum(['sgm_uniform']).register(z.globalRegistry, { + description: 'Sigmas schedule for the denoising process.', + }), + ), + reference_end: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The percentage of the total timesteps when the reference guidance is to be ended.\n ', + }), + ) + .default(1), + reference_strength: z + .optional( + z.number().gte(-3).lte(3).register(z.globalRegistry, { + description: + 'Strength of reference_only generation. Only used if a reference image is provided.', + }), + ) + .default(0.65), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + mask_url: z.string().register(z.globalRegistry, { + description: '\n The mask to area to Inpaint in.\n ', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use for inpainting. or img2img', + }), + nag_scale: z + .optional( + z.number().lte(10).register(z.globalRegistry, { + description: + '\n The scale for NAG. Higher values will result in a image that is more distant\n to the negative prompt.\n ', + }), + ) + .default(3), + reference_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of Image for Reference-Only', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + controlnet_unions: z + .optional( + z.array(zSchemaControlNetUnion).register(z.globalRegistry, { + description: + '\n The controlnet unions to use for the image generation. Only one controlnet is supported at the moment.\n ', + }), + ) + .default([]), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n Negative prompt to steer the image generation away from unwanted features.\n By default, we will be using NAG for processing the negative prompt.\n ', + }), + ) + .default(''), + nag_tau: z + .optional( + z.number().register(z.globalRegistry, { + description: + '\n The tau for NAG. Controls the normalization of the hidden state.\n Higher values will result in a less aggressive normalization,\n but may also lead to unexpected changes with respect to the original image.\n Not recommended to change this value.\n ', + }), + ) + .default(2.5), + num_images: z + .optional( + z.int().gte(1).lte(10).register(z.globalRegistry, { + description: + 'The number of images to generate. This is always set to 1 for streaming output.', + }), + ) + .default(1), + use_beta_schedule: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Specifies whether beta sigmas ought to be used.', + }), + ) + .default(false), + ip_adapters: z + .optional( + z.array(zSchemaIpAdapter).register(z.globalRegistry, { + description: + '\n IP-Adapter to use for image generation.\n ', + }), + ) + .default([]), + base_shift: z + .optional( + z.number().gte(0.01).lte(5).register(z.globalRegistry, { + description: 'Base shift for the scheduled timesteps', + }), + ) + .default(0.5), + nag_alpha: z + .optional( + z.number().lte(1).register(z.globalRegistry, { + description: + '\n The alpha value for NAG. This value is used as a final weighting\n factor for steering the normalized guidance (positive and negative prompts)\n in the direction of the positive prompt. Higher values will result in less\n steering on the normalized guidance where lower values will result in\n considering the positive prompt guidance more.\n ', + }), + ) + .default(0.25), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original.', + }), + ) + .default(0.85), + max_shift: z + .optional( + z.number().gte(0.01).lte(5).register(z.globalRegistry, { + description: 'Max shift for the scheduled timesteps', + }), + ) + .default(1.15), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + controlnets: z + .optional( + z.array(zSchemaControlNet).register(z.globalRegistry, { + description: + '\n The controlnets to use for the image generation. Only one controlnet is supported at the moment.\n ', + }), + ) + .default([]), + reference_start: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The percentage of the total timesteps when the reference guidance is to bestarted.\n ', + }), + ) + .default(0), + use_real_cfg: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n Uses classical CFG as in SD1.5, SDXL, etc. Increases generation times and price when set to be true.\n If using XLabs IP-Adapter v1, this will be turned on!.\n ', + }), + ) + .default(false), +}) + +/** + * Output + */ +export const zSchemaFluxGeneralImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * ImageToImageInput + */ +export const zSchemaFluxGeneralImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + nag_end: z + .optional( + z.number().lte(1).register(z.globalRegistry, { + description: + '\n The proportion of steps to apply NAG. After the specified proportion\n of steps has been iterated, the remaining steps will use original\n attention processors in FLUX.\n ', + }), + ) + .default(0.25), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + control_loras: z + .optional( + z.array(zSchemaControlLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation which use a control image. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + scheduler: z.optional( + z.enum(['euler', 'dpmpp_2m']).register(z.globalRegistry, { + description: 'Scheduler for the denoising process.', + }), + ), + easycontrols: z + .optional( + z.array(zSchemaEasyControlWeight).register(z.globalRegistry, { + description: + '\n EasyControl Inputs to use for image generation.\n ', + }), + ) + .default([]), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + real_cfg_scale: z + .optional( + z.number().gte(0).lte(5).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + use_cfg_zero: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n Uses CFG-zero init sampling as in https://arxiv.org/abs/2503.18886.\n ', + }), + ) + .default(false), + fill_image: z.optional(zSchemaImageFillInput), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + sigma_schedule: z.optional( + z.enum(['sgm_uniform']).register(z.globalRegistry, { + description: 'Sigmas schedule for the denoising process.', + }), + ), + reference_end: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The percentage of the total timesteps when the reference guidance is to be ended.\n ', + }), + ) + .default(1), + reference_strength: z + .optional( + z.number().gte(-3).lte(3).register(z.globalRegistry, { + description: + 'Strength of reference_only generation. Only used if a reference image is provided.', + }), + ) + .default(0.65), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use for inpainting. or img2img', + }), + nag_scale: z + .optional( + z.number().lte(10).register(z.globalRegistry, { + description: + '\n The scale for NAG. Higher values will result in a image that is more distant\n to the negative prompt.\n ', + }), + ) + .default(3), + reference_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of Image for Reference-Only', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + controlnet_unions: z + .optional( + z.array(zSchemaControlNetUnion).register(z.globalRegistry, { + description: + '\n The controlnet unions to use for the image generation. Only one controlnet is supported at the moment.\n ', + }), + ) + .default([]), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n Negative prompt to steer the image generation away from unwanted features.\n By default, we will be using NAG for processing the negative prompt.\n ', + }), + ) + .default(''), + nag_tau: z + .optional( + z.number().register(z.globalRegistry, { + description: + '\n The tau for NAG. Controls the normalization of the hidden state.\n Higher values will result in a less aggressive normalization,\n but may also lead to unexpected changes with respect to the original image.\n Not recommended to change this value.\n ', + }), + ) + .default(2.5), + num_images: z + .optional( + z.int().gte(1).lte(10).register(z.globalRegistry, { + description: + 'The number of images to generate. This is always set to 1 for streaming output.', + }), + ) + .default(1), + use_beta_schedule: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Specifies whether beta sigmas ought to be used.', + }), + ) + .default(false), + ip_adapters: z + .optional( + z.array(zSchemaIpAdapter).register(z.globalRegistry, { + description: + '\n IP-Adapter to use for image generation.\n ', + }), + ) + .default([]), + base_shift: z + .optional( + z.number().gte(0.01).lte(5).register(z.globalRegistry, { + description: 'Base shift for the scheduled timesteps', + }), + ) + .default(0.5), + nag_alpha: z + .optional( + z.number().lte(1).register(z.globalRegistry, { + description: + '\n The alpha value for NAG. This value is used as a final weighting\n factor for steering the normalized guidance (positive and negative prompts)\n in the direction of the positive prompt. Higher values will result in less\n steering on the normalized guidance where lower values will result in\n considering the positive prompt guidance more.\n ', + }), + ) + .default(0.25), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original.', + }), + ) + .default(0.85), + max_shift: z + .optional( + z.number().gte(0.01).lte(5).register(z.globalRegistry, { + description: 'Max shift for the scheduled timesteps', + }), + ) + .default(1.15), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + controlnets: z + .optional( + z.array(zSchemaControlNet).register(z.globalRegistry, { + description: + '\n The controlnets to use for the image generation. Only one controlnet is supported at the moment.\n ', + }), + ) + .default([]), + reference_start: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The percentage of the total timesteps when the reference guidance is to bestarted.\n ', + }), + ) + .default(0), + use_real_cfg: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n Uses classical CFG as in SD1.5, SDXL, etc. Increases generation times and price when set to be true.\n If using XLabs IP-Adapter v1, this will be turned on!.\n ', + }), + ) + .default(false), +}) + +/** + * SAM2ImageOutput + */ +export const zSchemaSam2ImageOutput = z.object({ + image: zSchemaImage, +}) + +/** + * BoxPrompt + */ +export const zSchemaBoxPrompt = z.object({ + y_min: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Y Min Coordinate of the box', + }), + ) + .default(0), + frame_index: z + .optional( + z.int().register(z.globalRegistry, { + description: 'The frame index to interact with.', + }), + ) + .default(0), + x_max: z + .optional( + z.int().register(z.globalRegistry, { + description: 'X Max Coordinate of the prompt', + }), + ) + .default(0), + x_min: z + .optional( + z.int().register(z.globalRegistry, { + description: 'X Min Coordinate of the box', + }), + ) + .default(0), + y_max: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Y Max Coordinate of the prompt', + }), + ) + .default(0), +}) + +/** + * PointPrompt + */ +export const zSchemaPointPrompt = z.object({ + y: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Y Coordinate of the prompt', + }), + ) + .default(350), + label: z.optional( + z.union([z.literal(0), z.literal(1)]).register(z.globalRegistry, { + description: 'Label of the prompt. 1 for foreground, 0 for background', + }), + ), + frame_index: z + .optional( + z.int().register(z.globalRegistry, { + description: 'The frame index to interact with.', + }), + ) + .default(0), + x: z + .optional( + z.int().register(z.globalRegistry, { + description: 'X Coordinate of the prompt', + }), + ) + .default(305), +}) + +/** + * SAM2ImageInput + */ +export const zSchemaSam2ImageInput = z.object({ + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + prompts: z + .optional( + z.array(zSchemaPointPrompt).register(z.globalRegistry, { + description: 'List of prompts to segment the image', + }), + ) + .default([]), + box_prompts: z + .optional( + z.array(zSchemaBoxPrompt).register(z.globalRegistry, { + description: 'Coordinates for boxes', + }), + ) + .default([]), + apply_mask: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Apply the mask on the image.', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be segmented', + }), +}) + +/** + * PiDiOutput + */ +export const zSchemaImagePreprocessorsPidiOutput = z.object({ + image: zSchemaImage, +}) + +/** + * PiDiInput + */ +export const zSchemaImagePreprocessorsPidiInput = z.object({ + safe: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to use the safe version of the Pidi detector', + }), + ) + .default(false), + apply_filter: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to apply the filter to the image.', + }), + ) + .default(false), + scribble: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to use the scribble version of the Pidi detector', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to process', + }), +}) + +/** + * ZoeOutput + */ +export const zSchemaImagePreprocessorsZoeOutput = z.object({ + image: zSchemaImage, +}) + +/** + * ZoeInput + */ +export const zSchemaImagePreprocessorsZoeInput = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to process', + }), +}) + +/** + * LineartOutput + */ +export const zSchemaImagePreprocessorsLineartOutput = z.object({ + image: zSchemaImage, +}) + +/** + * LineartInput + */ +export const zSchemaImagePreprocessorsLineartInput = z.object({ + coarse: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to use the coarse model', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to process', + }), +}) + +/** + * TeeDOutput + */ +export const zSchemaImagePreprocessorsTeedOutput = z.object({ + image: zSchemaImage, +}) + +/** + * TeeDInput + */ +export const zSchemaImagePreprocessorsTeedInput = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to process', + }), +}) + +/** + * MiDaSOutput + */ +export const zSchemaImagePreprocessorsMidasOutput = z.object({ + normal_map: zSchemaImage, + depth_map: zSchemaImage, +}) + +/** + * MiDaSInput + */ +export const zSchemaImagePreprocessorsMidasInput = z.object({ + a: z + .optional( + z.number().register(z.globalRegistry, { + description: 'A parameter for the MiDaS detector', + }), + ) + .default(6.283185307179586), + background_threshold: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Background threshold for the MiDaS detector', + }), + ) + .default(0.1), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to process', + }), +}) + +/** + * SamOutput + */ +export const zSchemaImagePreprocessorsSamOutput = z.object({ + image: zSchemaImage, +}) + +/** + * SamInput + */ +export const zSchemaImagePreprocessorsSamInput = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to process', + }), +}) + +/** + * MLSDOutput + */ +export const zSchemaImagePreprocessorsMlsdOutput = z.object({ + image: zSchemaImage, +}) + +/** + * MLSDInput + */ +export const zSchemaImagePreprocessorsMlsdInput = z.object({ + distance_threshold: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Distance threshold for the MLSD detector', + }), + ) + .default(0.1), + score_threshold: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Score threshold for the MLSD detector', + }), + ) + .default(0.1), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to process', + }), +}) + +/** + * ScribbleOutput + */ +export const zSchemaImagePreprocessorsScribbleOutput = z.object({ + image: zSchemaImage, +}) + +/** + * ScribbleInput + */ +export const zSchemaImagePreprocessorsScribbleInput = z.object({ + model: z.optional( + z.enum(['HED', 'PiDi']).register(z.globalRegistry, { + description: 'The model to use for the Scribble detector', + }), + ), + safe: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to use the safe version of the Scribble detector', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to process', + }), +}) + +/** + * DepthAnythingV2Output + */ +export const zSchemaImagePreprocessorsDepthAnythingV2Output = z.object({ + image: zSchemaImage, +}) + +/** + * DepthAnythingV2Input + */ +export const zSchemaImagePreprocessorsDepthAnythingV2Input = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to process', + }), +}) + +/** + * HEDOutput + */ +export const zSchemaImagePreprocessorsHedOutput = z.object({ + image: zSchemaImage, +}) + +/** + * HEDInput + */ +export const zSchemaImagePreprocessorsHedInput = z.object({ + safe: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to use the safe version of the HED detector', + }), + ) + .default(false), + scribble: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to use the scribble version of the HED detector', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to process', + }), +}) + +/** + * Output + */ +export const zSchemaFluxGeneralRfInversionOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * RFInversionInput + */ +export const zSchemaFluxGeneralRfInversionInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to edit the image with', + }), + nag_end: z + .optional( + z.number().lte(1).register(z.globalRegistry, { + description: + '\n The proportion of steps to apply NAG. After the specified proportion\n of steps has been iterated, the remaining steps will use original\n attention processors in FLUX.\n ', + }), + ) + .default(0.25), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.null(), + ]), + ), + control_loras: z + .optional( + z.array(zSchemaControlLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation which use a control image. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + controller_guidance_reverse: z + .optional( + z.number().gte(0.01).lte(3).register(z.globalRegistry, { + description: + 'The controller guidance (eta) used in the denoising process.Using values closer to 1 will result in an image closer to input.', + }), + ) + .default(0.75), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + reverse_guidance_start: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Timestep to start guidance during reverse process.', + }), + ) + .default(0), + easycontrols: z + .optional( + z.array(zSchemaEasyControlWeight).register(z.globalRegistry, { + description: + '\n EasyControl Inputs to use for image generation.\n ', + }), + ) + .default([]), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + scheduler: z.optional( + z.enum(['euler', 'dpmpp_2m']).register(z.globalRegistry, { + description: 'Scheduler for the denoising process.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + use_cfg_zero: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n Uses CFG-zero init sampling as in https://arxiv.org/abs/2503.18886.\n ', + }), + ) + .default(false), + reference_strength: z + .optional( + z.number().gte(-3).lte(3).register(z.globalRegistry, { + description: + 'Strength of reference_only generation. Only used if a reference image is provided.', + }), + ) + .default(0.65), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + sigma_schedule: z.optional( + z.enum(['sgm_uniform']).register(z.globalRegistry, { + description: 'Sigmas schedule for the denoising process.', + }), + ), + reference_end: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The percentage of the total timesteps when the reference guidance is to be ended.\n ', + }), + ) + .default(1), + controller_guidance_forward: z + .optional( + z.number().gte(0.01).lte(3).register(z.globalRegistry, { + description: + 'The controller guidance (gamma) used in the creation of structured noise.', + }), + ) + .default(0.6), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to be edited', + }), + fill_image: z.optional(zSchemaImageFillInput), + nag_scale: z + .optional( + z.number().lte(10).register(z.globalRegistry, { + description: + '\n The scale for NAG. Higher values will result in a image that is more distant\n to the negative prompt.\n ', + }), + ) + .default(3), + reverse_guidance_schedule: z.optional( + z + .enum(['constant', 'linear_increase', 'linear_decrease']) + .register(z.globalRegistry, { + description: 'Scheduler for applying reverse guidance.', + }), + ), + reference_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of Image for Reference-Only', + }), + ), + reverse_guidance_end: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Timestep to stop guidance during reverse process.', + }), + ) + .default(8), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + controlnet_unions: z + .optional( + z.array(zSchemaControlNetUnion).register(z.globalRegistry, { + description: + '\n The controlnet unions to use for the image generation. Only one controlnet is supported at the moment.\n ', + }), + ) + .default([]), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n Negative prompt to steer the image generation away from unwanted features.\n By default, we will be using NAG for processing the negative prompt.\n ', + }), + ) + .default(''), + nag_tau: z + .optional( + z.number().register(z.globalRegistry, { + description: + '\n The tau for NAG. Controls the normalization of the hidden state.\n Higher values will result in a less aggressive normalization,\n but may also lead to unexpected changes with respect to the original image.\n Not recommended to change this value.\n ', + }), + ) + .default(2.5), + num_images: z + .optional( + z.int().gte(1).lte(10).register(z.globalRegistry, { + description: + 'The number of images to generate. This is always set to 1 for streaming output.', + }), + ) + .default(1), + use_beta_schedule: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Specifies whether beta sigmas ought to be used.', + }), + ) + .default(false), + nag_alpha: z + .optional( + z.number().lte(1).register(z.globalRegistry, { + description: + '\n The alpha value for NAG. This value is used as a final weighting\n factor for steering the normalized guidance (positive and negative prompts)\n in the direction of the positive prompt. Higher values will result in less\n steering on the normalized guidance where lower values will result in\n considering the positive prompt guidance more.\n ', + }), + ) + .default(0.25), + base_shift: z + .optional( + z.number().gte(0.01).lte(5).register(z.globalRegistry, { + description: 'Base shift for the scheduled timesteps', + }), + ) + .default(0.5), + max_shift: z + .optional( + z.number().gte(0.01).lte(5).register(z.globalRegistry, { + description: 'Max shift for the scheduled timesteps', + }), + ) + .default(1.15), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + reference_start: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The percentage of the total timesteps when the reference guidance is to bestarted.\n ', + }), + ) + .default(0), + controlnets: z + .optional( + z.array(zSchemaControlNet).register(z.globalRegistry, { + description: + '\n The controlnets to use for the image generation. Only one controlnet is supported at the moment.\n ', + }), + ) + .default([]), +}) + +/** + * LivePortraitImageOutput + */ +export const zSchemaLivePortraitImageOutput = z.object({ + image: zSchemaImage, +}) + +/** + * LivePortraitImageInput + */ +export const zSchemaLivePortraitImageInput = z.object({ + smile: z + .optional( + z.number().gte(-2).lte(2).register(z.globalRegistry, { + description: 'Amount to smile', + }), + ) + .default(0), + eyebrow: z + .optional( + z.number().gte(-30).lte(30).register(z.globalRegistry, { + description: 'Amount to raise or lower eyebrows', + }), + ) + .default(0), + rotate_roll: z + .optional( + z.number().gte(-45).lte(45).register(z.globalRegistry, { + description: 'Amount to rotate the face in roll', + }), + ) + .default(0), + wink: z + .optional( + z.number().gte(0).lte(25).register(z.globalRegistry, { + description: 'Amount to wink', + }), + ) + .default(0), + rotate_pitch: z + .optional( + z.number().gte(-45).lte(45).register(z.globalRegistry, { + description: 'Amount to rotate the face in pitch', + }), + ) + .default(0), + blink: z + .optional( + z.number().gte(-30).lte(30).register(z.globalRegistry, { + description: 'Amount to blink the eyes', + }), + ) + .default(0), + dsize: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Size of the output image.', + }), + ) + .default(512), + vy_ratio: z + .optional( + z.number().register(z.globalRegistry, { + description: + 'Vertical offset ratio for face crop. Positive values move up, negative values move down.', + }), + ) + .default(-0.125), + scale: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Scaling factor for the face crop.', + }), + ) + .default(2.3), + pupil_x: z + .optional( + z.number().gte(-45).lte(45).register(z.globalRegistry, { + description: 'Amount to move pupils horizontally', + }), + ) + .default(0), + flag_pasteback: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to paste-back/stitch the animated face cropping from the face-cropping space to the original image space.', + }), + ) + .default(true), + eee: z + .optional( + z.number().gte(-40).lte(40).register(z.globalRegistry, { + description: "Amount to shape mouth in 'eee' position", + }), + ) + .default(0), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n Whether to enable the safety checker. If enabled, the model will check if the input image contains a face before processing it.\n The safety checker will process the input image\n ', + }), + ) + .default(false), + vx_ratio: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Horizontal offset ratio for face crop.', + }), + ) + .default(0), + pupil_y: z + .optional( + z.number().gte(-45).lte(45).register(z.globalRegistry, { + description: 'Amount to move pupils vertically', + }), + ) + .default(0), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'Output format', + }), + ), + rotate_yaw: z + .optional( + z.number().gte(-45).lte(45).register(z.globalRegistry, { + description: 'Amount to rotate the face in yaw', + }), + ) + .default(0), + flag_do_rot: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to conduct the rotation when flag_do_crop is True.', + }), + ) + .default(true), + woo: z + .optional( + z.number().gte(-100).lte(100).register(z.globalRegistry, { + description: "Amount to shape mouth in 'woo' position", + }), + ) + .default(0), + aaa: z + .optional( + z.number().gte(-200).lte(200).register(z.globalRegistry, { + description: "Amount to open mouth in 'aaa' shape", + }), + ) + .default(0), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be animated', + }), + flag_do_crop: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to crop the source portrait to the face-cropping space.', + }), + ) + .default(true), + flag_lip_zero: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to set the lip to closed state before animation. Only takes effect when flag_eye_retargeting and flag_lip_retargeting are False.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaBirefnetV2Output = z.object({ + image: zSchemaImageFile, + mask_image: z.optional(zSchemaImageFile), +}) + +/** + * InputV2 + */ +export const zSchemaBirefnetV2Input = z.object({ + operating_resolution: z.optional( + z.enum(['1024x1024', '2048x2048', '2304x2304']).register(z.globalRegistry, { + description: + "The resolution to operate on. The higher the resolution, the more accurate the output will be for high res input images. The '2304x2304' option is only available for the 'General Use (Dynamic)' model.", + }), + ), + output_format: z.optional( + z.enum(['webp', 'png', 'gif']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to remove background from', + }), + model: z.optional( + z + .enum([ + 'General Use (Light)', + 'General Use (Light 2K)', + 'General Use (Heavy)', + 'Matting', + 'Portrait', + 'General Use (Dynamic)', + ]) + .register(z.globalRegistry, { + description: + "\n Model to use for background removal.\n The 'General Use (Light)' model is the original model used in the BiRefNet repository.\n The 'General Use (Light 2K)' model is the original model used in the BiRefNet repository but trained with 2K images.\n The 'General Use (Heavy)' model is a slower but more accurate model.\n The 'Matting' model is a model trained specifically for matting images.\n The 'Portrait' model is a model trained specifically for portrait images.\n The 'General Use (Dynamic)' model supports dynamic resolutions from 256x256 to 2304x2304.\n The 'General Use (Light)' model is recommended for most use cases.\n\n The corresponding models are as follows:\n - 'General Use (Light)': BiRefNet\n - 'General Use (Light 2K)': BiRefNet_lite-2K\n - 'General Use (Heavy)': BiRefNet_lite\n - 'Matting': BiRefNet-matting\n - 'Portrait': BiRefNet-portrait\n - 'General Use (Dynamic)': BiRefNet_dynamic\n ", + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + output_mask: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to output the mask used to remove the background', + }), + ) + .default(false), + refine_foreground: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to refine the foreground using the estimated mask', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaFluxPulidOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * FluxPulidInput + */ +export const zSchemaFluxPulidInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + id_weight: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The weight of the ID loss.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + start_step: z + .optional( + z.int().gte(0).lte(50).register(z.globalRegistry, { + description: 'The number of steps to start the CFG from.', + }), + ) + .default(0), + reference_image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use for inpainting.', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + max_sequence_length: z.optional( + z.enum(['128', '256', '512']).register(z.globalRegistry, { + description: 'The maximum sequence length for the model.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(20), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + ) + .default(''), + true_cfg: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The weight of the CFG loss.', + }), + ) + .default(1), +}) + +/** + * Output + */ +export const zSchemaFluxDifferentialDiffusionOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * DiffInput + */ +export const zSchemaFluxDifferentialDiffusionInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use as initial image.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'The strength to use for image-to-image. 1.0 is completely remakes the image while 0.0 preserves the original.', + }), + ) + .default(0.85), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + change_map_image_url: z.string().register(z.globalRegistry, { + description: 'URL of change map.', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), +}) + +/** + * Output + */ +export const zSchemaIclightV2Output = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseInput + */ +export const zSchemaIclightV2Input = z.object({ + initial_latent: z.optional( + z + .enum(['None', 'Left', 'Right', 'Top', 'Bottom']) + .register(z.globalRegistry, { + description: + '\n Provide lighting conditions for the model\n ', + }), + ), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + background_threshold: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'Threshold for the background removal algorithm. A high threshold will produce sharper masks. Note: This parameter is currently deprecated and has no effect on the output.', + }), + ) + .default(0.67), + mask_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of mask to be used for ic-light conditioning image', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(5), + lowres_denoise: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: 'Strength for low-resolution pass.', + }), + ) + .default(0.98), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative Prompt for the image', + }), + ) + .default(''), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + hr_downscale: z.optional(z.number().gte(0.01).lte(1)).default(0.5), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to be used for relighting', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + highres_denoise: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'Strength for high-resolution pass. Only used if enable_hr_fix is True.', + }), + ) + .default(0.95), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enable_hr_fix: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Use HR fix', + }), + ) + .default(false), + cfg: z + .optional( + z.number().gte(0.01).lte(5).register(z.globalRegistry, { + description: + 'The real classifier-free-guidance scale for the generation.', + }), + ) + .default(1), +}) + +/** + * Output + */ +export const zSchemaKolorsImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * KolorsImg2ImgInput + */ +export const zSchemaKolorsImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use for image to image', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and\n uploaded before returning the response. This will increase the latency of\n the function but it allows you to get the image directly in the response\n without going through the CDN.\n ', + }), + ) + .default(false), + scheduler: z.optional( + z + .enum([ + 'EulerDiscreteScheduler', + 'EulerAncestralDiscreteScheduler', + 'DPMSolverMultistepScheduler', + 'DPMSolverMultistepScheduler_SDE_karras', + 'UniPCMultistepScheduler', + 'DEISMultistepScheduler', + ]) + .register(z.globalRegistry, { + description: 'The scheduler to use for the model.', + }), + ), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'The strength to use for image-to-image. 1.0 is completely remakes the image while 0.0 preserves the original.', + }), + ) + .default(0.85), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show\n you.\n ', + }), + ) + .default(5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(150).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(50), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Seed', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small\n details (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable safety checker.', + }), + ) + .default(true), +}) + +/** + * Image + */ +export const zSchemaRegistryImageFastSdxlModelsImage = z.object({ + height: z.int(), + content_type: z.optional(z.string()).default('image/jpeg'), + url: z.string(), + width: z.int(), +}) + +/** + * Output + */ +export const zSchemaFluxProV1FillOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z + .array(zSchemaRegistryImageFastSdxlModelsImage) + .register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * FluxProFillInput + */ +export const zSchemaFluxProV1FillInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to fill the masked part of the image.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'The image URL to generate an image from. Needs to match the dimensions of the mask.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + mask_url: z.string().register(z.globalRegistry, { + description: + 'The mask URL to inpaint the image. Needs to match the dimensions of the input image.', + }), + enhance_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enhance the prompt for better results.', + }), + ) + .default(false), +}) + +/** + * Output + */ +export const zSchemaFluxLoraDepthOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * DepthInput + */ +export const zSchemaFluxLoraDepthInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + 'The number of images to generate. This is always set to 1 for streaming output.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use for depth input', + }), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(35).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaFluxProV11UltraReduxOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z + .array(zSchemaRegistryImageFastSdxlModelsImage) + .register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * FluxProUltraTextToImageInputRedux + */ +export const zSchemaFluxProV11UltraReduxInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + ) + .default(''), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + aspect_ratio: z.optional( + z.union([ + z.enum([ + '21:9', + '16:9', + '4:3', + '3:2', + '1:1', + '2:3', + '3:4', + '9:16', + '9:21', + ]), + z.string(), + ]), + ), + enhance_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enhance the prompt for better results.', + }), + ) + .default(false), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'The image URL to generate an image from. Needs to match the dimensions of the mask.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.', + }), + ), + image_prompt_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The strength of the image prompt, between 0 and 1.', + }), + ) + .default(0.1), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + raw: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Generate less processed, more natural-looking images.', + }), + ) + .default(false), +}) + +/** + * Output + */ +export const zSchemaFluxDevReduxOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseReduxInput + */ +export const zSchemaFluxDevReduxInput = z.object({ + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate an image from.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional(z.union([z.int(), z.unknown()])), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), +}) + +/** + * Output + */ +export const zSchemaFluxProV11ReduxOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z + .array(zSchemaRegistryImageFastSdxlModelsImage) + .register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * FluxProRedux + */ +export const zSchemaFluxProV11ReduxInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + ) + .default(''), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'The image URL to generate an image from. Needs to match the dimensions of the mask.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enhance_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enhance the prompt for better results.', + }), + ) + .default(false), +}) + +/** + * Output + */ +export const zSchemaFluxSchnellReduxOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * SchnellReduxInput + */ +export const zSchemaFluxSchnellReduxInput = z.object({ + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate an image from.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional(z.union([z.int(), z.unknown()])), + num_inference_steps: z + .optional( + z.int().gte(1).lte(12).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(4), +}) + +/** + * Output + */ +export const zSchemaIdeogramV2RemixOutput = z.object({ + images: z.array(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for the random number generator', + }), +}) + +/** + * RemixImageInput + */ +export const zSchemaIdeogramV2RemixInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to remix the image with', + }), + aspect_ratio: z.optional( + z + .enum([ + '10:16', + '16:10', + '9:16', + '16:9', + '4:3', + '3:4', + '1:1', + '1:3', + '3:1', + '3:2', + '2:3', + ]) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image', + }), + ), + style: z.optional( + z + .enum(['auto', 'general', 'realistic', 'design', 'render_3D', 'anime']) + .register(z.globalRegistry, { + description: 'The style of the generated image', + }), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to expand the prompt with MagicPrompt functionality.', + }), + ) + .default(true), + image_url: z.string().register(z.globalRegistry, { + description: 'The image URL to remix', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: 'Strength of the input image in the remix', + }), + ) + .default(0.8), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * Output + */ +export const zSchemaIdeogramV2TurboRemixOutput = z.object({ + images: z.array(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for the random number generator', + }), +}) + +/** + * RemixImageInput + */ +export const zSchemaIdeogramV2TurboRemixInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to remix the image with', + }), + aspect_ratio: z.optional( + z + .enum([ + '10:16', + '16:10', + '9:16', + '16:9', + '4:3', + '3:4', + '1:1', + '1:3', + '3:1', + '3:2', + '2:3', + ]) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image', + }), + ), + style: z.optional( + z + .enum(['auto', 'general', 'realistic', 'design', 'render_3D', 'anime']) + .register(z.globalRegistry, { + description: 'The style of the generated image', + }), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to expand the prompt with MagicPrompt functionality.', + }), + ) + .default(true), + image_url: z.string().register(z.globalRegistry, { + description: 'The image URL to remix', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: 'Strength of the input image in the remix', + }), + ) + .default(0.8), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * Output + */ +export const zSchemaIdeogramV2TurboEditOutput = z.object({ + images: z.array(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for the random number generator', + }), +}) + +/** + * EditImageInput + */ +export const zSchemaIdeogramV2TurboEditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to fill the masked part of the image.', + }), + style: z.optional( + z + .enum(['auto', 'general', 'realistic', 'design', 'render_3D', 'anime']) + .register(z.globalRegistry, { + description: 'The style of the generated image', + }), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to expand the prompt with MagicPrompt functionality.', + }), + ) + .default(true), + image_url: z.string().register(z.globalRegistry, { + description: + 'The image URL to generate an image from. Needs to match the dimensions of the mask.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.unknown()])), + mask_url: z.string().register(z.globalRegistry, { + description: + 'The mask URL to inpaint the image. Needs to match the dimensions of the input image.', + }), +}) + +/** + * Output + */ +export const zSchemaIdeogramV2EditOutput = z.object({ + images: z.array(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for the random number generator', + }), +}) + +/** + * EditImageInput + */ +export const zSchemaIdeogramV2EditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to fill the masked part of the image.', + }), + style: z.optional( + z + .enum(['auto', 'general', 'realistic', 'design', 'render_3D', 'anime']) + .register(z.globalRegistry, { + description: 'The style of the generated image', + }), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to expand the prompt with MagicPrompt functionality.', + }), + ) + .default(true), + image_url: z.string().register(z.globalRegistry, { + description: + 'The image URL to generate an image from. Needs to match the dimensions of the mask.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.unknown()])), + mask_url: z.string().register(z.globalRegistry, { + description: + 'The mask URL to inpaint the image. Needs to match the dimensions of the input image.', + }), +}) + +/** + * VTONOutput + */ +export const zSchemaLeffaVirtualTryonOutput = z.object({ + image: zSchemaImage, + seed: z.int().register(z.globalRegistry, { + description: 'The seed for the inference.', + }), + has_nsfw_concepts: z.boolean().register(z.globalRegistry, { + description: 'Whether the image contains NSFW concepts.', + }), +}) + +/** + * VTONInput + */ +export const zSchemaLeffaVirtualTryonInput = z.object({ + garment_image_url: z.string().register(z.globalRegistry, { + description: 'Url to the garment image.', + }), + human_image_url: z.string().register(z.globalRegistry, { + description: 'Url for the human image.', + }), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + garment_type: z + .enum(['upper_body', 'lower_body', 'dresses']) + .register(z.globalRegistry, { + description: 'The type of the garment used for virtual try-on.', + }), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your input when generating the image.\n ', + }), + ) + .default(2.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(50), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same input given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * PoseTransferOutput + */ +export const zSchemaLeffaPoseTransferOutput = z.object({ + image: zSchemaImage, + seed: z.int().register(z.globalRegistry, { + description: 'The seed for the inference.', + }), + has_nsfw_concepts: z.boolean().register(z.globalRegistry, { + description: 'Whether the image contains NSFW concepts.', + }), +}) + +/** + * PoseTransferInput + */ +export const zSchemaLeffaPoseTransferInput = z.object({ + pose_image_url: z.string().register(z.globalRegistry, { + description: 'Url for the human image.', + }), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your input when generating the image.\n ', + }), + ) + .default(2.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(50), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same input given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + person_image_url: z.string().register(z.globalRegistry, { + description: 'Url to the garment image.', + }), +}) + +/** + * CATVTONOutput + */ +export const zSchemaCatVtonOutput = z.object({ + image: zSchemaImage, +}) + +/** + * CATVTONInput + */ +export const zSchemaCatVtonInput = z.object({ + garment_image_url: z.string().register(z.globalRegistry, { + description: 'Url to the garment image.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + human_image_url: z.string().register(z.globalRegistry, { + description: 'Url for the human image.', + }), + cloth_type: z + .enum(['upper', 'lower', 'overall', 'inner', 'outer']) + .register(z.globalRegistry, { + description: + '\n Type of the Cloth to be tried on.\n\n Options:\n upper: Upper body cloth\n lower: Lower body cloth\n overall: Full body cloth\n inner: Inner cloth, like T-shirt inside a jacket\n outer: Outer cloth, like a jacket over a T-shirt\n ', + }), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(2.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same input given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * BGRemoveOutput + */ +export const zSchemaBriaBackgroundRemoveOutput = z.object({ + image: zSchemaImage, +}) + +/** + * BGRemoveInput + */ +export const zSchemaBriaBackgroundRemoveInput = z.object({ + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'Input Image to erase from', + }), +}) + +/** + * ProductShotOutput + */ +export const zSchemaBriaProductShotOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images', + }), +}) + +/** + * ProductShotInput + */ +export const zSchemaBriaProductShotInput = z.object({ + ref_image_url: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The URL of the reference image to be used for generating the new scene or background for the product shot. Use "" to leave empty.Either ref_image_url or scene_description has to be provided but not both. If both ref_image_url and ref_image_file are provided, ref_image_url will be used. Accepted formats are jpeg, jpg, png, webp.', + }), + ) + .default(''), + manual_placement_selection: z.optional( + z + .enum([ + 'upper_left', + 'upper_right', + 'bottom_left', + 'bottom_right', + 'right_center', + 'left_center', + 'upper_center', + 'bottom_center', + 'center_vertical', + 'center_horizontal', + ]) + .register(z.globalRegistry, { + description: + "If you've selected placement_type=manual_placement, you should use this parameter to specify which placements/positions you would like to use from the list. You can select more than one placement in one request.", + }), + ), + num_results: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + 'The number of lifestyle product shots you would like to generate. You will get num_results x 10 results when placement_type=automatic and according to the number of required placements x num_results if placement_type=manual_placement.', + }), + ) + .default(1), + padding_values: z.optional( + z.array(z.int()).register(z.globalRegistry, { + description: + 'The desired padding in pixels around the product, when using placement_type=manual_padding. The order of the values is [left, right, top, bottom]. For optimal results, the total number of pixels, including padding, should be around 1,000,000. It is recommended to first use the product cutout API, get the cutout and understand the size of the result, and then define the required padding and use the cutout as an input for this API.', + }), + ), + shot_size: z + .optional( + z.array(z.int()).register(z.globalRegistry, { + description: + 'The desired size of the final product shot. For optimal results, the total number of pixels should be around 1,000,000. This parameter is only relevant when placement_type=automatic or placement_type=manual_placement.', + }), + ) + .default([1000, 1000]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + placement_type: z.optional( + z + .enum(['original', 'automatic', 'manual_placement', 'manual_padding']) + .register(z.globalRegistry, { + description: + "This parameter allows you to control the positioning of the product in the image. Choosing 'original' will preserve the original position of the product in the image. Choosing 'automatic' will generate results with the 10 recommended positions for the product. Choosing 'manual_placement' will allow you to select predefined positions (using the parameter 'manual_placement_selection'). Selecting 'manual_padding' will allow you to control the position and size of the image by defining the desired padding in pixels around the product.", + }), + ), + original_quality: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "This flag is only relevant when placement_type=original. If true, the output image retains the original input image's size; otherwise, the image is scaled to 1 megapixel (1MP) while preserving its aspect ratio.", + }), + ) + .default(false), + fast: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to use the fast model', + }), + ) + .default(true), + optimize_description: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to optimize the scene description', + }), + ) + .default(true), + scene_description: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Text description of the new scene or background for the provided product shot. Bria currently supports prompts in English only, excluding special characters.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the product shot to be placed in a lifestyle shot. If both image_url and image_file are provided, image_url will be used. Accepted formats are jpeg, jpg, png, webp. Maximum file size 12MB.', + }), +}) + +/** + * EraserOutput + */ +export const zSchemaBriaEraserOutput = z.object({ + image: zSchemaImage, +}) + +/** + * EraserInput + */ +export const zSchemaBriaEraserInput = z.object({ + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + preserve_alpha: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, attempts to preserve the alpha channel of the input image.\n ', + }), + ) + .default(false), + mask_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the binary mask image that represents the area that will be cleaned.', + }), + mask_type: z.optional( + z.enum(['manual', 'automatic']).register(z.globalRegistry, { + description: + "You can use this parameter to specify the type of the input mask from the list. 'manual' opttion should be used in cases in which the mask had been generated by a user (e.g. with a brush tool), and 'automatic' mask type should be used when mask had been generated by an algorithm like 'SAM'.", + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Input Image to erase from', + }), +}) + +/** + * BGReplaceOutput + */ +export const zSchemaBriaBackgroundReplaceOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'Seed value used for generation.', + }), +}) + +/** + * BGReplaceInput + */ +export const zSchemaBriaBackgroundReplaceInput = z.object({ + prompt: z.optional( + z.string().min(1).register(z.globalRegistry, { + description: 'The prompt you would like to use to generate images.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of Images to generate.', + }), + ) + .default(1), + ref_image_url: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The URL of the reference image to be used for generating the new background. Use "" to leave empty. Either ref_image_url or bg_prompt has to be provided but not both. If both ref_image_url and ref_image_file are provided, ref_image_url will be used. Accepted formats are jpeg, jpg, png, webp.', + }), + ) + .default(''), + refine_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to refine prompt', + }), + ) + .default(true), + image_url: z.string().register(z.globalRegistry, { + description: 'Input Image to erase from', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + fast: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to use the fast model', + }), + ) + .default(true), + seed: z.optional( + z.int().gte(0).lte(2147483647).register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The negative prompt you would like to use to generate images.', + }), + ) + .default(''), +}) + +/** + * Output + */ +export const zSchemaFluxLoraFillOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * FillInput + */ +export const zSchemaFluxLoraFillInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + ) + .default(''), + resize_to_original: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Resizes the image back to the original size. Use when you wish to preserve the exact image size as the originally provided image.', + }), + ) + .default(false), + paste_back: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Specifies whether to paste-back the original image onto to the non-inpainted areas of the output', + }), + ) + .default(true), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + guidance_scale: z + .optional( + z.number().gte(28).lte(35).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(30), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + 'The number of images to generate. This is always set to 1 for streaming output.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use for fill operation', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + fill_image: z.optional(zSchemaImageFillInput), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + mask_url: z.string().register(z.globalRegistry, { + description: '\n The mask to area to Inpaint in.\n ', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * GenFillOutput + */ +export const zSchemaBriaGenfillOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Generated Images', + }), +}) + +/** + * GenFillInput + */ +export const zSchemaBriaGenfillInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt you would like to use to generate images.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of Images to generate.', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: 'Input Image to erase from', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + seed: z.optional( + z.int().gte(0).lte(2147483647).register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + mask_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the binary mask image that represents the area that will be cleaned.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The negative prompt you would like to use to generate images.', + }), + ) + .default(''), +}) + +/** + * ImageExpansionOutput + */ +export const zSchemaBriaExpandOutput = z.object({ + image: zSchemaImage, + seed: z.int().register(z.globalRegistry, { + description: 'Seed value used for generation.', + }), +}) + +/** + * ImageExpansionInput + */ +export const zSchemaBriaExpandInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Text on which you wish to base the image expansion. This parameter is optional. Bria currently supports prompts in English only, excluding special characters.', + }), + ) + .default(''), + aspect_ratio: z.optional( + z + .enum(['1:1', '2:3', '3:2', '3:4', '4:3', '4:5', '5:4', '9:16', '16:9']) + .register(z.globalRegistry, { + description: + 'The desired aspect ratio of the final image. Will be used over original_image_size and original_image_location if provided.', + }), + ), + original_image_location: z.optional( + z.array(z.int()).register(z.globalRegistry, { + description: + 'The desired location of the original image, inside the full canvas. Provide the location of the upper left corner of the original image. The location can also be outside the canvas (the original image will be cropped). Will be ignored if aspect_ratio is provided.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the input image.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + original_image_size: z.optional( + z.array(z.int()).register(z.globalRegistry, { + description: + 'The desired size of the original image, inside the full canvas. Ensure that the ratio of input image foreground or main subject to the canvas area is greater than 15% to achieve optimal results. Will be ignored if aspect_ratio is provided.', + }), + ), + canvas_size: z.array(z.int()).register(z.globalRegistry, { + description: + 'The desired size of the final image, after the expansion. should have an area of less than 5000x5000 pixels.', + }), + seed: z.optional( + z.int().gte(0).lte(2147483647).register(z.globalRegistry, { + description: + 'You can choose whether you want your generated expension to be random or predictable. You can recreate the same result in the future by using the seed value of a result from the response. You can exclude this parameter if you are not interested in recreating your results. This parameter is optional.', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The negative prompt you would like to use to generate images.', + }), + ) + .default(''), +}) + +/** + * DetectionOutput + */ +export const zSchemaMoondreamNextDetectionOutput = z.object({ + image: z.optional(zSchemaImage), + text_output: z.string().register(z.globalRegistry, { + description: 'Detection results as text', + }), +}) + +/** + * DetectionInput + */ +export const zSchemaMoondreamNextDetectionInput = z.object({ + detection_prompt: z.string().register(z.globalRegistry, { + description: 'Text description of what to detect', + }), + use_ensemble: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to use ensemble for gaze detection', + }), + ) + .default(false), + task_type: z + .enum(['bbox_detection', 'point_detection', 'gaze_detection']) + .register(z.globalRegistry, { + description: 'Type of detection to perform', + }), + show_visualization: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to show visualization for detection', + }), + ) + .default(true), + combine_points: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to combine points into a single point for point detection. This has no effect for bbox detection or gaze detection.', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'Image URL to be processed', + }), +}) + +/** + * Output + */ +export const zSchemaFluxProV1FillFinetunedOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z + .array(zSchemaRegistryImageFastSdxlModelsImage) + .register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * FluxProFillFinetunedInput + */ +export const zSchemaFluxProV1FillFinetunedInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to fill the masked part of the image.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + finetune_strength: z.number().gte(0).lte(2).register(z.globalRegistry, { + description: + "\n Controls finetune influence.\n Increase this value if your target concept isn't showing up strongly enough.\n The optimal setting depends on your finetune and prompt\n ", + }), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + finetune_id: z.string().register(z.globalRegistry, { + description: 'References your specific model', + }), + image_url: z.string().register(z.globalRegistry, { + description: + 'The image URL to generate an image from. Needs to match the dimensions of the mask.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + mask_url: z.string().register(z.globalRegistry, { + description: + 'The mask URL to inpaint the image. Needs to match the dimensions of the input image.', + }), + enhance_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enhance the prompt for better results.', + }), + ) + .default(false), +}) + +/** + * Output + */ +export const zSchemaFluxLoraCannyOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * CannyInput + */ +export const zSchemaFluxLoraCannyInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use for canny input', + }), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(20).lte(40).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(30), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * TryOnOutput + */ +export const zSchemaKlingV15KolorsVirtualTryOnOutput = z.object({ + image: zSchemaImage, +}) + +/** + * TryOnRequest + */ +export const zSchemaKlingV15KolorsVirtualTryOnInput = z.object({ + garment_image_url: z.string().register(z.globalRegistry, { + description: 'Url to the garment image.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the function will return the image in the response.', + }), + ) + .default(false), + human_image_url: z.string().register(z.globalRegistry, { + description: 'Url for the human image.', + }), +}) + +/** + * ConformerOutput + */ +export const zSchemaCodeformerOutput = z.object({ + image: zSchemaImage, + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * CodeformerInput + */ +export const zSchemaCodeformerInput = z.object({ + aligned: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Should faces etc should be aligned.', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to be used for relighting', + }), + upscale_factor: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Upscaling factor', + }), + ) + .default(2), + fidelity: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Weight of the fidelity factor.', + }), + ) + .default(0.5), + face_upscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Should faces be upscaled', + }), + ) + .default(true), + only_center_face: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Should only center face be restored', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducible generation.', + }), + ), +}) + +/** + * UpscaleOutput + */ +export const zSchemaIdeogramUpscaleOutput = z.object({ + images: z.array(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for the random number generator', + }), +}) + +/** + * UpscaleImageInput + */ +export const zSchemaIdeogramUpscaleInput = z.object({ + prompt: z.optional(z.union([z.string(), z.unknown()])), + detail: z + .optional( + z.int().gte(1).lte(100).register(z.globalRegistry, { + description: 'The detail of the upscaled image', + }), + ) + .default(50), + resemblance: z + .optional( + z.int().gte(1).lte(100).register(z.globalRegistry, { + description: + 'The resemblance of the upscaled image to the original image', + }), + ) + .default(50), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to expand the prompt with MagicPrompt functionality.', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'The image URL to upscale', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * Output + */ +export const zSchemaFluxControlLoraCannyImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * ImageToImageInput + */ +export const zSchemaFluxControlLoraCannyImageToImageInput = z.object({ + control_lora_strength: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: 'The strength of the control lora.', + }), + ) + .default(1), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + guidance_scale: z + .optional( + z.number().gte(0).lte(35).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use for inpainting. or img2img', + }), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original.', + }), + ) + .default(0.85), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + control_lora_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The image to use for control lora. This is used to control the style of the generated image.\n ', + }), + ), +}) + +/** + * Ben2OutputImage + */ +export const zSchemaBenV2ImageOutput = z.object({ + image: zSchemaImage, + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * Ben2InputImage + */ +export const zSchemaBenV2ImageInput = z.object({ + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducible generation.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to be used for background removal', + }), +}) + +/** + * Output + */ +export const zSchemaFluxControlLoraDepthImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * ImageToImageInput + */ +export const zSchemaFluxControlLoraDepthImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + control_lora_strength: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: 'The strength of the control lora.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + guidance_scale: z + .optional( + z.number().gte(0).lte(35).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use for inpainting. or img2img', + }), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original.', + }), + ) + .default(0.85), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + control_lora_image_url: z.string().register(z.globalRegistry, { + description: + '\n The image to use for control lora. This is used to control the style of the generated image.\n ', + }), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * FlowEditOutput + */ +export const zSchemaFloweditOutput = z.object({ + image: zSchemaImage, + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * FlowEditInput + */ +export const zSchemaFloweditInput = z.object({ + src_guidance_scale: z + .optional( + z.int().gte(0).lte(30).register(z.globalRegistry, { + description: 'Guidance scale for the source.', + }), + ) + .default(1.5), + n_min: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Minimum step for improved style edits', + }), + ) + .default(0), + n_max: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Control the strength of the edit', + }), + ) + .default(23), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to be used for relighting', + }), + source_prompt: z.string().register(z.globalRegistry, { + description: 'Prompt of the image to be used.', + }), + tar_guidance_scale: z + .optional( + z.int().gte(0).lte(30).register(z.globalRegistry, { + description: 'Guidance scale for target.', + }), + ) + .default(5.5), + target_prompt: z.string().register(z.globalRegistry, { + description: 'Prompt of the image to be made.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducible generation. If set none, a random seed will be used.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Steps for which the model should run.', + }), + ) + .default(28), + n_avg: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Average step count', + }), + ) + .default(1), +}) + +/** + * ProcessedOutput + */ +export const zSchemaPostProcessingOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The processed images', + }), +}) + +/** + * ImageProcessingInput + */ +export const zSchemaPostProcessingInput = z.object({ + blue_shift: z + .optional( + z.int().gte(-20).lte(20).register(z.globalRegistry, { + description: 'Blue channel shift amount', + }), + ) + .default(0), + vertex_y: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Vertex Y position', + }), + ) + .default(0.5), + green_direction: z.optional( + z.enum(['horizontal', 'vertical']).register(z.globalRegistry, { + description: 'Green channel shift direction', + }), + ), + enable_glow: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable glow effect', + }), + ) + .default(false), + dodge_burn_mode: z.optional( + z + .enum([ + 'dodge', + 'burn', + 'dodge_and_burn', + 'burn_and_dodge', + 'color_dodge', + 'color_burn', + 'linear_dodge', + 'linear_burn', + ]) + .register(z.globalRegistry, { + description: 'Dodge and burn mode', + }), + ), + glow_intensity: z + .optional( + z.number().gte(0).lte(5).register(z.globalRegistry, { + description: 'Glow intensity', + }), + ) + .default(1), + blur_sigma: z + .optional( + z.number().gte(0.1).lte(10).register(z.globalRegistry, { + description: 'Sigma for Gaussian blur', + }), + ) + .default(1), + desaturate_method: z.optional( + z + .enum([ + 'luminance (Rec.709)', + 'luminance (Rec.601)', + 'average', + 'lightness', + ]) + .register(z.globalRegistry, { + description: 'Desaturation method', + }), + ), + enable_blur: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable blur effect', + }), + ) + .default(false), + blur_radius: z + .optional( + z.int().gte(0).lte(31).register(z.globalRegistry, { + description: 'Blur radius', + }), + ) + .default(3), + grain_style: z.optional( + z + .enum(['modern', 'analog', 'kodak', 'fuji', 'cinematic', 'newspaper']) + .register(z.globalRegistry, { + description: 'Style of film grain to apply', + }), + ), + cas_amount: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'CAS sharpening amount', + }), + ) + .default(0.8), + gamma: z + .optional( + z.number().gte(0.2).lte(2.2).register(z.globalRegistry, { + description: 'Gamma adjustment', + }), + ) + .default(1), + tint_mode: z.optional( + z + .enum([ + 'sepia', + 'red', + 'green', + 'blue', + 'cyan', + 'magenta', + 'yellow', + 'purple', + 'orange', + 'warm', + 'cool', + 'lime', + 'navy', + 'vintage', + 'rose', + 'teal', + 'maroon', + 'peach', + 'lavender', + 'olive', + ]) + .register(z.globalRegistry, { + description: 'Tint color mode', + }), + ), + blur_type: z.optional( + z.enum(['gaussian', 'kuwahara']).register(z.globalRegistry, { + description: 'Type of blur to apply', + }), + ), + enable_vignette: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable vignette effect', + }), + ) + .default(false), + dissolve_image_url: z + .optional( + z.string().register(z.globalRegistry, { + description: 'URL of second image for dissolve', + }), + ) + .default(''), + red_shift: z + .optional( + z.int().gte(-20).lte(20).register(z.globalRegistry, { + description: 'Red channel shift amount', + }), + ) + .default(0), + enable_desaturate: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable desaturation effect', + }), + ) + .default(false), + grain_intensity: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Film grain intensity (when enabled)', + }), + ) + .default(0.4), + dodge_burn_intensity: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Dodge and burn intensity', + }), + ) + .default(0.5), + smart_sharpen_strength: z + .optional( + z.number().gte(0).lte(25).register(z.globalRegistry, { + description: 'Smart sharpen strength', + }), + ) + .default(5), + red_direction: z.optional( + z.enum(['horizontal', 'vertical']).register(z.globalRegistry, { + description: 'Red channel shift direction', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to process', + }), + vertex_x: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Vertex X position', + }), + ) + .default(0.5), + tint_strength: z + .optional( + z.number().gte(0.1).lte(1).register(z.globalRegistry, { + description: 'Tint strength', + }), + ) + .default(1), + enable_dissolve: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable dissolve effect', + }), + ) + .default(false), + enable_parabolize: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable parabolize effect', + }), + ) + .default(false), + enable_grain: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable film grain effect', + }), + ) + .default(false), + solarize_threshold: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Solarize threshold', + }), + ) + .default(0.5), + enable_sharpen: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable sharpen effect', + }), + ) + .default(false), + enable_dodge_burn: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable dodge and burn effect', + }), + ) + .default(false), + glow_radius: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Glow blur radius', + }), + ) + .default(5), + sharpen_alpha: z + .optional( + z.number().gte(0.1).lte(5).register(z.globalRegistry, { + description: 'Sharpen strength (for basic mode)', + }), + ) + .default(1), + enable_color_correction: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable color correction', + }), + ) + .default(false), + contrast: z + .optional( + z.number().gte(-100).lte(100).register(z.globalRegistry, { + description: 'Contrast adjustment', + }), + ) + .default(0), + enable_solarize: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable solarize effect', + }), + ) + .default(false), + noise_radius: z + .optional( + z.int().gte(1).lte(25).register(z.globalRegistry, { + description: 'Noise radius for smart sharpen', + }), + ) + .default(7), + grain_scale: z + .optional( + z.number().gte(1).lte(100).register(z.globalRegistry, { + description: 'Film grain scale (when enabled)', + }), + ) + .default(10), + temperature: z + .optional( + z.number().gte(-100).lte(100).register(z.globalRegistry, { + description: 'Color temperature adjustment', + }), + ) + .default(0), + brightness: z + .optional( + z.number().gte(-100).lte(100).register(z.globalRegistry, { + description: 'Brightness adjustment', + }), + ) + .default(0), + blue_direction: z.optional( + z.enum(['horizontal', 'vertical']).register(z.globalRegistry, { + description: 'Blue channel shift direction', + }), + ), + dissolve_factor: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Dissolve blend factor', + }), + ) + .default(0.5), + sharpen_mode: z.optional( + z.enum(['basic', 'smart', 'cas']).register(z.globalRegistry, { + description: 'Type of sharpening to apply', + }), + ), + vignette_strength: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'Vignette strength (when enabled)', + }), + ) + .default(0.5), + sharpen_radius: z + .optional( + z.int().gte(1).lte(15).register(z.globalRegistry, { + description: 'Sharpen radius (for basic mode)', + }), + ) + .default(1), + parabolize_coeff: z + .optional( + z.number().gte(-10).lte(10).register(z.globalRegistry, { + description: 'Parabolize coefficient', + }), + ) + .default(1), + saturation: z + .optional( + z.number().gte(-100).lte(100).register(z.globalRegistry, { + description: 'Saturation adjustment', + }), + ) + .default(0), + enable_tint: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable color tint effect', + }), + ) + .default(false), + green_shift: z + .optional( + z.int().gte(-20).lte(20).register(z.globalRegistry, { + description: 'Green channel shift amount', + }), + ) + .default(0), + preserve_edges: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Edge preservation factor', + }), + ) + .default(0.75), + desaturate_factor: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Desaturation factor', + }), + ) + .default(1), + smart_sharpen_ratio: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Smart sharpen blend ratio', + }), + ) + .default(0.5), + enable_chromatic: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable chromatic aberration', + }), + ) + .default(false), +}) + +/** + * NafnetOutputDenoise + */ +export const zSchemaNafnetDenoiseOutput = z.object({ + image: zSchemaImage, +}) + +/** + * NafnetInputDenoise + */ +export const zSchemaNafnetDenoiseInput = z.object({ + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'seed to be used for generation', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to be used for relighting', + }), +}) + +/** + * NafnetOutput + */ +export const zSchemaNafnetDeblurOutput = z.object({ + image: zSchemaImage, +}) + +/** + * NafnetInput + */ +export const zSchemaNafnetDeblurInput = z.object({ + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'seed to be used for generation', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to be used for relighting', + }), +}) + +/** + * Output + */ +export const zSchemaDrctSuperResolutionOutput = z.object({ + image: zSchemaImage, +}) + +/** + * Input + */ +export const zSchemaDrctSuperResolutionInput = z.object({ + upscale_factor: z.optional( + z.literal(4).register(z.globalRegistry, { + description: 'Upscaling factor.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to upscale.', + }), +}) + +/** + * SAM2AutomaticSegmentationOutput + */ +export const zSchemaSam2AutoSegmentOutput = z.object({ + combined_mask: zSchemaImage, + individual_masks: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Individual segmentation masks.', + }), +}) + +/** + * SAM2AutomaticSegmentationInput + */ +export const zSchemaSam2AutoSegmentInput = z.object({ + points_per_side: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Number of points to sample along each side of the image.', + }), + ) + .default(32), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + min_mask_region_area: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Minimum area of a mask region.', + }), + ) + .default(100), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be automatically segmented', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + pred_iou_thresh: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Threshold for predicted IOU score.', + }), + ) + .default(0.88), + stability_score_thresh: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Threshold for stability score.', + }), + ) + .default(0.95), +}) + +/** + * DDColorOutput + */ +export const zSchemaDdcolorOutput = z.object({ + image: zSchemaImage, +}) + +/** + * DDColorInput + */ +export const zSchemaDdcolorInput = z.object({ + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'seed to be used for generation', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to be used for relighting', + }), +}) + +/** + * ImageOutput + */ +export const zSchemaEvfSamOutput = z.object({ + image: zSchemaFile, +}) + +/** + * ImageInput + */ +export const zSchemaEvfSamInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate segmentation from.', + }), + use_grounding_dino: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Use GroundingDINO instead of SAM for segmentation', + }), + ) + .default(false), + semantic_type: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Enable semantic level segmentation for body parts, background or multi objects', + }), + ) + .default(false), + fill_holes: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Fill holes in the mask using morphological operations', + }), + ) + .default(false), + expand_mask: z + .optional( + z.int().gte(0).lte(20).register(z.globalRegistry, { + description: 'Expand/dilate the mask by specified pixels', + }), + ) + .default(0), + mask_only: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Output only the binary mask instead of masked image', + }), + ) + .default(true), + revert_mask: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Invert the mask (background becomes foreground and vice versa)', + }), + ) + .default(false), + blur_mask: z + .optional( + z.int().gte(0).lte(50).register(z.globalRegistry, { + description: + 'Apply Gaussian blur to the mask. Value determines kernel size (must be odd number)', + }), + ) + .default(0), + negative_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Areas to exclude from segmentation (will be subtracted from prompt results)', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the input image', + }), +}) + +/** + * Output + */ +export const zSchemaIdeogramV2aTurboRemixOutput = z.object({ + images: z.array(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for the random number generator', + }), +}) + +/** + * RemixImageInput + */ +export const zSchemaIdeogramV2aTurboRemixInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to remix the image with', + }), + aspect_ratio: z.optional( + z + .enum([ + '10:16', + '16:10', + '9:16', + '16:9', + '4:3', + '3:4', + '1:1', + '1:3', + '3:1', + '3:2', + '2:3', + ]) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image', + }), + ), + style: z.optional( + z + .enum(['auto', 'general', 'realistic', 'design', 'render_3D', 'anime']) + .register(z.globalRegistry, { + description: 'The style of the generated image', + }), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to expand the prompt with MagicPrompt functionality.', + }), + ) + .default(true), + image_url: z.string().register(z.globalRegistry, { + description: 'The image URL to remix', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: 'Strength of the input image in the remix', + }), + ) + .default(0.8), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * Output + */ +export const zSchemaIdeogramV2aRemixOutput = z.object({ + images: z.array(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for the random number generator', + }), +}) + +/** + * RemixImageInput + */ +export const zSchemaIdeogramV2aRemixInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to remix the image with', + }), + aspect_ratio: z.optional( + z + .enum([ + '10:16', + '16:10', + '9:16', + '16:9', + '4:3', + '3:4', + '1:1', + '1:3', + '3:1', + '3:2', + '2:3', + ]) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image', + }), + ), + style: z.optional( + z + .enum(['auto', 'general', 'realistic', 'design', 'render_3D', 'anime']) + .register(z.globalRegistry, { + description: 'The style of the generated image', + }), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to expand the prompt with MagicPrompt functionality.', + }), + ) + .default(true), + image_url: z.string().register(z.globalRegistry, { + description: 'The image URL to remix', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: 'Strength of the input image in the remix', + }), + ) + .default(0.8), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * SwinSrOutput + */ +export const zSchemaSwin2SrOutput = z.object({ + image: zSchemaImage, +}) + +/** + * SwinSrInput + */ +export const zSchemaSwin2SrInput = z.object({ + task: z.optional( + z + .enum(['classical_sr', 'compressed_sr', 'real_sr']) + .register(z.globalRegistry, { + description: 'Task to perform', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'seed to be used for generation', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to be used for image enhancement', + }), +}) + +/** + * DocResOutput + */ +export const zSchemaDocresOutput = z.object({ + image: zSchemaImage, +}) + +/** + * DocResInput + */ +export const zSchemaDocresInput = z.object({ + task: z + .enum(['deshadowing', 'appearance', 'deblurring', 'binarization']) + .register(z.globalRegistry, { + description: 'Task to perform', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to be used for relighting', + }), +}) + +/** + * DocResOutput + */ +export const zSchemaDocresDewarpOutput = z.object({ + image: zSchemaImage, +}) + +/** + * DocResInputDewarp + */ +export const zSchemaDocresDewarpInput = z.object({ + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to be used for relighting', + }), +}) + +/** + * Output + */ +export const zSchemaJuggernautFluxBaseImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * DevImageToImageInput + */ +export const zSchemaJuggernautFluxBaseImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate an image from.', + }), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'The strength of the initial image. Higher strength values are better for this model.', + }), + ) + .default(0.95), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(10).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(40), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaJuggernautFluxProImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * DevImageToImageInput + */ +export const zSchemaJuggernautFluxProImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate an image from.', + }), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'The strength of the initial image. Higher strength values are better for this model.', + }), + ) + .default(0.95), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(10).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(40), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * WatermarkOutput + */ +export const zSchemaInvisibleWatermarkOutput = z.object({ + image: z.optional(zSchemaImage), + extracted_watermark: z.optional( + z.string().register(z.globalRegistry, { + description: 'The extracted watermark text (when decoding)', + }), + ), + length: z + .optional( + z.int().register(z.globalRegistry, { + description: + 'Length of the watermark bits used (helpful for future decoding)', + }), + ) + .default(0), +}) + +/** + * WatermarkInput + */ +export const zSchemaInvisibleWatermarkInput = z.object({ + decode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to decode a watermark from the image instead of encoding', + }), + ) + .default(false), + watermark: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Text to use as watermark (for encoding only)', + }), + ) + .default('watermark'), + length: z + .optional( + z.int().register(z.globalRegistry, { + description: + 'Length of watermark bits to decode (required when decode=True)', + }), + ) + .default(0), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to be watermarked or decoded', + }), +}) + +/** + * GeminiImageOutput + */ +export const zSchemaGeminiFlashEditOutput = z.object({ + description: z.string().register(z.globalRegistry, { + description: 'Text description or response from Gemini', + }), + image: zSchemaImage, +}) + +/** + * GeminiImageRequest + */ +export const zSchemaGeminiFlashEditInput = z.object({ + prompt: z.string().min(3).max(5000).register(z.globalRegistry, { + description: 'The prompt for image generation or editing', + }), + image_url: z.string().register(z.globalRegistry, { + description: + 'Optional URL of an input image for editing. If not provided, generates a new image.', + }), +}) + +/** + * GeminiImageOutput + */ +export const zSchemaGeminiFlashEditMultiOutput = z.object({ + description: z.string().register(z.globalRegistry, { + description: 'Text description or response from Gemini', + }), + image: zSchemaImage, +}) + +/** + * GeminiMultiImageRequest + */ +export const zSchemaGeminiFlashEditMultiInput = z.object({ + prompt: z.string().min(3).max(5000).register(z.globalRegistry, { + description: 'The prompt for image generation or editing', + }), + input_image_urls: z + .array(z.string()) + .min(1) + .max(10) + .register(z.globalRegistry, { + description: 'List of URLs of input images for editing', + }), +}) + +/** + * MixDehazeNetOutput + */ +export const zSchemaMixDehazeNetOutput = z.object({ + image: zSchemaImage, +}) + +/** + * MixDehazeNetInput + */ +export const zSchemaMixDehazeNetInput = z.object({ + model: z.optional( + z.enum(['indoor', 'outdoor']).register(z.globalRegistry, { + description: 'Model to be used for dehazing', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'seed to be used for generation', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to be used for image enhancement', + }), +}) + +/** + * TheraOutput + */ +export const zSchemaTheraOutput = z.object({ + image: zSchemaImage, + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TheraInput + */ +export const zSchemaTheraInput = z.object({ + upscale_factor: z + .optional( + z.number().gte(1).lte(6).register(z.globalRegistry, { + description: 'The upscaling factor for the image.', + }), + ) + .default(2), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducible generation.', + }), + ), + backbone: z.enum(['edsr', 'rdn']).register(z.globalRegistry, { + description: 'Backbone to use for upscaling', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to be used for upscaling', + }), +}) + +/** + * Output + */ +export const zSchemaGhiblifyOutput = z.object({ + image: zSchemaImage, +}) + +/** + * Input + */ +export const zSchemaGhiblifyInput = z.object({ + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to upscale.', + }), +}) + +/** + * StarVectorOutput + */ +export const zSchemaStarVectorOutput = z.object({ + image: zSchemaFile, + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * StarVectorInput + */ +export const zSchemaStarVectorInput = z.object({ + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'seed to be used for generation', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to be used for relighting', + }), +}) + +/** + * EraseOutput + */ +export const zSchemaFinegrainEraserOutput = z.object({ + image: zSchemaFile, + used_seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generation', + }), +}) + +/** + * PromptEraseRequest + */ +export const zSchemaFinegrainEraserInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Text description of what to erase', + }), + mode: z.optional( + z.enum(['express', 'standard', 'premium']).register(z.globalRegistry, { + description: 'Erase quality mode', + }), + ), + seed: z.optional( + z.int().gte(0).lte(999).register(z.globalRegistry, { + description: 'Random seed for reproducible generation', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to edit', + }), +}) + +/** + * BoxPromptBase + */ +export const zSchemaBoxPromptBase = z.object({ + y_min: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Y Min Coordinate of the box', + }), + ) + .default(0), + x_max: z + .optional( + z.int().register(z.globalRegistry, { + description: 'X Max Coordinate of the prompt', + }), + ) + .default(0), + x_min: z + .optional( + z.int().register(z.globalRegistry, { + description: 'X Min Coordinate of the box', + }), + ) + .default(0), + y_max: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Y Max Coordinate of the prompt', + }), + ) + .default(0), +}) + +/** + * EraseOutput + */ +export const zSchemaFinegrainEraserBboxOutput = z.object({ + image: zSchemaFile, + used_seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generation', + }), +}) + +/** + * BBoxEraseRequest + */ +export const zSchemaFinegrainEraserBboxInput = z.object({ + mode: z.optional( + z.enum(['express', 'standard', 'premium']).register(z.globalRegistry, { + description: 'Erase quality mode', + }), + ), + seed: z.optional( + z.int().gte(0).lte(999).register(z.globalRegistry, { + description: 'Random seed for reproducible generation', + }), + ), + box_prompts: z.array(zSchemaBoxPromptBase).register(z.globalRegistry, { + description: + 'List of bounding box coordinates to erase (only one box prompt is supported)', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to edit', + }), +}) + +/** + * EraseOutput + */ +export const zSchemaFinegrainEraserMaskOutput = z.object({ + image: zSchemaFile, + used_seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generation', + }), +}) + +/** + * MaskEraseRequest + */ +export const zSchemaFinegrainEraserMaskInput = z.object({ + mode: z.optional( + z.enum(['express', 'standard', 'premium']).register(z.globalRegistry, { + description: 'Erase quality mode', + }), + ), + seed: z.optional( + z.int().gte(0).lte(999).register(z.globalRegistry, { + description: 'Random seed for reproducible generation', + }), + ), + mask_url: z.string().register(z.globalRegistry, { + description: + 'URL of the mask image. Should be a binary mask where white (255) indicates areas to erase', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to edit', + }), +}) + +/** + * Output + */ +export const zSchemaCartoonifyOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * CartoonifyInput + */ +export const zSchemaCartoonifyInput = z.object({ + use_cfg_zero: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to use CFG zero', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to apply Pixar style to', + }), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'Guidance scale for the generation', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps', + }), + ) + .default(28), + scale: z + .optional( + z.number().gte(0.1).lte(2).register(z.globalRegistry, { + description: 'Scale factor for the Pixar effect', + }), + ) + .default(1), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The seed for image generation. Same seed with same parameters will generate same image.', + }), + ), +}) + +/** + * ImageOutput + */ +export const zSchemaInstantCharacterOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaInstantCharacterInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + scale: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: + 'The scale of the subject image. Higher values will make the subject image more prominent in the generated image.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'The image URL to generate an image from. Needs to match the dimensions of the mask.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaPlushifyOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * PlushifyInput + */ +export const zSchemaPlushifyInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Prompt for the generation. Default is empty which is usually best, but sometimes it can help to add a description of the subject.', + }), + ) + .default(''), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + use_cfg_zero: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to use CFG zero', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to apply cartoon style to', + }), + scale: z + .optional( + z.number().gte(0.1).lte(2).register(z.globalRegistry, { + description: 'Scale factor for the Cartoon effect', + }), + ) + .default(1), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps', + }), + ) + .default(28), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'Guidance scale for the generation', + }), + ) + .default(3.5), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The seed for image generation. Same seed with same parameters will generate same image.', + }), + ), +}) + +/** + * Output + */ +export const zSchemaFashnTryonV15Output = z.object({ + images: z.array(zSchemaFile), +}) + +/** + * Input + */ +export const zSchemaFashnTryonV15Input = z.object({ + model_image: z.string().register(z.globalRegistry, { + description: 'URL or base64 of the model image', + }), + moderation_level: z.optional( + z.enum(['none', 'permissive', 'conservative']).register(z.globalRegistry, { + description: + "Content moderation level for garment images. 'none' disables moderation, 'permissive' blocks only explicit content, 'conservative' also blocks underwear and swimwear.", + }), + ), + garment_photo_type: z.optional( + z.enum(['auto', 'model', 'flat-lay']).register(z.globalRegistry, { + description: + "Specifies the type of garment photo to optimize internal parameters for better performance. 'model' is for photos of garments on a model, 'flat-lay' is for flat-lay or ghost mannequin images, and 'auto' attempts to automatically detect the photo type.", + }), + ), + garment_image: z.string().register(z.globalRegistry, { + description: 'URL or base64 of the garment image', + }), + category: z.optional( + z + .enum(['tops', 'bottoms', 'one-pieces', 'auto']) + .register(z.globalRegistry, { + description: + "Category of the garment to try-on. 'auto' will attempt to automatically detect the category of the garment.", + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + segmentation_free: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Disables human parsing on the model image.', + }), + ) + .default(true), + num_samples: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + 'Number of images to generate in a single run. Image generation has a random element in it, so trying multiple images at once increases the chances of getting a good result.', + }), + ) + .default(1), + mode: z.optional( + z.enum(['performance', 'balanced', 'quality']).register(z.globalRegistry, { + description: + "Specifies the mode of operation. 'performance' mode is faster but may sacrifice quality, 'balanced' mode is a balance between speed and quality, and 'quality' mode is slower but produces higher quality results.", + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Sets random operations to a fixed state. Use the same seed to reproduce results with the same inputs, or different seed to force different results.', + }), + ), + output_format: z.optional( + z.enum(['png', 'jpeg']).register(z.globalRegistry, { + description: + "Output format of the generated images. 'png' is highest quality, while 'jpeg' is faster", + }), + ), +}) + +/** + * Output + */ +export const zSchemaJuggernautFluxLoraInpaintingOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * InpaintInput + */ +export const zSchemaJuggernautFluxLoraInpaintingInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use for inpainting. or img2img', + }), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original.', + }), + ) + .default(0.85), + guidance_scale: z + .optional( + z.number().gte(0).lte(35).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + mask_url: z.string().register(z.globalRegistry, { + description: '\n The mask to area to Inpaint in.\n ', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * EditImageResponse + */ +export const zSchemaGptImage1EditImageOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images.', + }), +}) + +/** + * EditImageRequest + */ +export const zSchemaGptImage1EditImageInput = z.object({ + prompt: z.string().min(2).register(z.globalRegistry, { + description: 'The prompt for image generation', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z + .enum(['auto', '1024x1024', '1536x1024', '1024x1536']) + .register(z.globalRegistry, { + description: 'Aspect ratio for the generated image', + }), + ), + background: z.optional( + z.enum(['auto', 'transparent', 'opaque']).register(z.globalRegistry, { + description: 'Background for the generated image', + }), + ), + quality: z.optional( + z.enum(['auto', 'low', 'medium', 'high']).register(z.globalRegistry, { + description: 'Quality for the generated image', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'Output format for the images', + }), + ), + input_fidelity: z.optional( + z.enum(['low', 'high']).register(z.globalRegistry, { + description: 'Input fidelity for the generated image', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images to use as a reference for the generation.', + }), +}) + +/** + * UNOOutput + */ +export const zSchemaUnoOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used to generate the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The URLs of the generated images.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * UNOInput + */ +export const zSchemaUnoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + input_image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'URL of images to use while generating the image.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducible generation. If set none, a random seed will be used.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * Image2SVGOutput + */ +export const zSchemaImage2SvgOutput = z.object({ + images: z.array(zSchemaFile).register(z.globalRegistry, { + description: 'The converted SVG file', + }), +}) + +/** + * Image2SVGInput + */ +export const zSchemaImage2SvgInput = z.object({ + splice_threshold: z + .optional( + z.int().gte(0).lte(90).register(z.globalRegistry, { + description: 'Splice threshold for joining paths', + }), + ) + .default(45), + hierarchical: z.optional( + z.enum(['stacked', 'cutout']).register(z.globalRegistry, { + description: 'Hierarchical mode: stacked or cutout', + }), + ), + color_precision: z + .optional( + z.int().gte(1).lte(10).register(z.globalRegistry, { + description: 'Color quantization level', + }), + ) + .default(6), + colormode: z.optional( + z.enum(['color', 'binary']).register(z.globalRegistry, { + description: 'Choose between color or binary (black and white) output', + }), + ), + max_iterations: z + .optional( + z.int().gte(1).lte(20).register(z.globalRegistry, { + description: 'Maximum number of iterations for optimization', + }), + ) + .default(10), + length_threshold: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'Length threshold for curves/lines', + }), + ) + .default(4), + image_url: z.string().register(z.globalRegistry, { + description: 'The image to convert to SVG', + }), + mode: z.optional( + z.enum(['spline', 'polygon']).register(z.globalRegistry, { + description: 'Mode: spline (curved) or polygon (straight lines)', + }), + ), + corner_threshold: z + .optional( + z.int().gte(0).lte(180).register(z.globalRegistry, { + description: 'Corner detection threshold in degrees', + }), + ) + .default(60), + path_precision: z + .optional( + z.int().gte(1).lte(10).register(z.globalRegistry, { + description: 'Decimal precision for path coordinates', + }), + ) + .default(3), + filter_speckle: z + .optional( + z.int().gte(0).lte(20).register(z.globalRegistry, { + description: 'Filter out small speckles and noise', + }), + ) + .default(4), + layer_difference: z + .optional( + z.int().gte(1).lte(32).register(z.globalRegistry, { + description: 'Layer difference threshold for hierarchical mode', + }), + ) + .default(16), +}) + +/** + * ImageOutput + */ +export const zSchemaStep1xEditOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaStep1xEditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'The image URL to generate an image from. Needs to match the dimensions of the mask.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(30), +}) + +/** + * EditOutputV3 + */ +export const zSchemaIdeogramV3EditOutput = z.object({ + images: z.array(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for the random number generator', + }), +}) + +/** + * RGBColor + */ +export const zSchemaRgbColor = z.object({ + r: z + .optional( + z.int().gte(0).lte(255).register(z.globalRegistry, { + description: 'Red color value', + }), + ) + .default(0), + b: z + .optional( + z.int().gte(0).lte(255).register(z.globalRegistry, { + description: 'Blue color value', + }), + ) + .default(0), + g: z + .optional( + z.int().gte(0).lte(255).register(z.globalRegistry, { + description: 'Green color value', + }), + ) + .default(0), +}) + +/** + * ColorPaletteMember + */ +export const zSchemaColorPaletteMember = z.object({ + color_weight: z.optional(z.union([z.number().gte(0.05).lte(1), z.unknown()])), + rgb: zSchemaRgbColor, +}) + +/** + * ColorPalette + */ +export const zSchemaColorPalette = z.object({ + members: z.optional( + z.union([z.array(zSchemaColorPaletteMember), z.unknown()]), + ), + name: z.optional( + z.union([ + z.enum([ + 'EMBER', + 'FRESH', + 'JUNGLE', + 'MAGIC', + 'MELON', + 'MOSAIC', + 'PASTEL', + 'ULTRAMARINE', + ]), + z.unknown(), + ]), + ), +}) + +/** + * EditImageInputV3 + */ +export const zSchemaIdeogramV3EditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to fill the masked part of the image.', + }), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'Number of images to generate.', + }), + ) + .default(1), + style_preset: z.optional( + z.union([ + z.enum([ + '80S_ILLUSTRATION', + '90S_NOSTALGIA', + 'ABSTRACT_ORGANIC', + 'ANALOG_NOSTALGIA', + 'ART_BRUT', + 'ART_DECO', + 'ART_POSTER', + 'AURA', + 'AVANT_GARDE', + 'BAUHAUS', + 'BLUEPRINT', + 'BLURRY_MOTION', + 'BRIGHT_ART', + 'C4D_CARTOON', + 'CHILDRENS_BOOK', + 'COLLAGE', + 'COLORING_BOOK_I', + 'COLORING_BOOK_II', + 'CUBISM', + 'DARK_AURA', + 'DOODLE', + 'DOUBLE_EXPOSURE', + 'DRAMATIC_CINEMA', + 'EDITORIAL', + 'EMOTIONAL_MINIMAL', + 'ETHEREAL_PARTY', + 'EXPIRED_FILM', + 'FLAT_ART', + 'FLAT_VECTOR', + 'FOREST_REVERIE', + 'GEO_MINIMALIST', + 'GLASS_PRISM', + 'GOLDEN_HOUR', + 'GRAFFITI_I', + 'GRAFFITI_II', + 'HALFTONE_PRINT', + 'HIGH_CONTRAST', + 'HIPPIE_ERA', + 'ICONIC', + 'JAPANDI_FUSION', + 'JAZZY', + 'LONG_EXPOSURE', + 'MAGAZINE_EDITORIAL', + 'MINIMAL_ILLUSTRATION', + 'MIXED_MEDIA', + 'MONOCHROME', + 'NIGHTLIFE', + 'OIL_PAINTING', + 'OLD_CARTOONS', + 'PAINT_GESTURE', + 'POP_ART', + 'RETRO_ETCHING', + 'RIVIERA_POP', + 'SPOTLIGHT_80S', + 'STYLIZED_RED', + 'SURREAL_COLLAGE', + 'TRAVEL_POSTER', + 'VINTAGE_GEO', + 'VINTAGE_POSTER', + 'WATERCOLOR', + 'WEIRD', + 'WOODBLOCK_PRINT', + ]), + z.unknown(), + ]), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Determine if MagicPrompt should be used in generating the request or not.', + }), + ) + .default(true), + rendering_speed: z.optional( + z.enum(['TURBO', 'BALANCED', 'QUALITY']).register(z.globalRegistry, { + description: 'The rendering speed to use.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + color_palette: z.optional(z.union([zSchemaColorPalette, z.unknown()])), + style_codes: z.optional(z.union([z.array(z.string()), z.unknown()])), + image_url: z.string().register(z.globalRegistry, { + description: + 'The image URL to generate an image from. MUST have the exact same dimensions (width and height) as the mask image.', + }), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.optional(z.union([z.array(z.string()), z.unknown()])), + mask_url: z.string().register(z.globalRegistry, { + description: + 'The mask URL to inpaint the image. MUST have the exact same dimensions (width and height) as the input image.', + }), +}) + +/** + * RemixOutputV3 + */ +export const zSchemaIdeogramV3RemixOutput = z.object({ + images: z.array(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for the random number generator', + }), +}) + +/** + * RemixImageInputV3 + */ +export const zSchemaIdeogramV3RemixInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to remix the image with', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + style: z.optional( + z.union([z.enum(['AUTO', 'GENERAL', 'REALISTIC', 'DESIGN']), z.unknown()]), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Determine if MagicPrompt should be used in generating the request or not.', + }), + ) + .default(true), + rendering_speed: z.optional( + z.enum(['TURBO', 'BALANCED', 'QUALITY']).register(z.globalRegistry, { + description: 'The rendering speed to use.', + }), + ), + image_urls: z.optional(z.union([z.array(z.string()), z.unknown()])), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Description of what to exclude from an image. Descriptions in the prompt take precedence to descriptions in the negative prompt.', + }), + ) + .default(''), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'Number of images to generate.', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: 'The image URL to remix', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + color_palette: z.optional(z.union([zSchemaColorPalette, z.unknown()])), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: 'Strength of the input image in the remix', + }), + ) + .default(0.8), + style_codes: z.optional(z.union([z.array(z.string()), z.unknown()])), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * ReplaceBackgroundOutputV3 + */ +export const zSchemaIdeogramV3ReplaceBackgroundOutput = z.object({ + images: z.array(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for the random number generator', + }), +}) + +/** + * ReplaceBackgroundInputV3 + */ +export const zSchemaIdeogramV3ReplaceBackgroundInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Cyber punk city with neon lights and skyscrappers', + }), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'Number of images to generate.', + }), + ) + .default(1), + style: z.optional( + z.union([z.enum(['AUTO', 'GENERAL', 'REALISTIC', 'DESIGN']), z.unknown()]), + ), + style_preset: z.optional( + z.union([ + z.enum([ + '80S_ILLUSTRATION', + '90S_NOSTALGIA', + 'ABSTRACT_ORGANIC', + 'ANALOG_NOSTALGIA', + 'ART_BRUT', + 'ART_DECO', + 'ART_POSTER', + 'AURA', + 'AVANT_GARDE', + 'BAUHAUS', + 'BLUEPRINT', + 'BLURRY_MOTION', + 'BRIGHT_ART', + 'C4D_CARTOON', + 'CHILDRENS_BOOK', + 'COLLAGE', + 'COLORING_BOOK_I', + 'COLORING_BOOK_II', + 'CUBISM', + 'DARK_AURA', + 'DOODLE', + 'DOUBLE_EXPOSURE', + 'DRAMATIC_CINEMA', + 'EDITORIAL', + 'EMOTIONAL_MINIMAL', + 'ETHEREAL_PARTY', + 'EXPIRED_FILM', + 'FLAT_ART', + 'FLAT_VECTOR', + 'FOREST_REVERIE', + 'GEO_MINIMALIST', + 'GLASS_PRISM', + 'GOLDEN_HOUR', + 'GRAFFITI_I', + 'GRAFFITI_II', + 'HALFTONE_PRINT', + 'HIGH_CONTRAST', + 'HIPPIE_ERA', + 'ICONIC', + 'JAPANDI_FUSION', + 'JAZZY', + 'LONG_EXPOSURE', + 'MAGAZINE_EDITORIAL', + 'MINIMAL_ILLUSTRATION', + 'MIXED_MEDIA', + 'MONOCHROME', + 'NIGHTLIFE', + 'OIL_PAINTING', + 'OLD_CARTOONS', + 'PAINT_GESTURE', + 'POP_ART', + 'RETRO_ETCHING', + 'RIVIERA_POP', + 'SPOTLIGHT_80S', + 'STYLIZED_RED', + 'SURREAL_COLLAGE', + 'TRAVEL_POSTER', + 'VINTAGE_GEO', + 'VINTAGE_POSTER', + 'WATERCOLOR', + 'WEIRD', + 'WOODBLOCK_PRINT', + ]), + z.unknown(), + ]), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Determine if MagicPrompt should be used in generating the request or not.', + }), + ) + .default(true), + rendering_speed: z.optional( + z.enum(['TURBO', 'BALANCED', 'QUALITY']).register(z.globalRegistry, { + description: 'The rendering speed to use.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + color_palette: z.optional(z.union([zSchemaColorPalette, z.unknown()])), + style_codes: z.optional(z.union([z.array(z.string()), z.unknown()])), + image_url: z.string().register(z.globalRegistry, { + description: 'The image URL whose background needs to be replaced', + }), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.optional(z.union([z.array(z.string()), z.unknown()])), +}) + +/** + * ReframeOutputV3 + */ +export const zSchemaIdeogramV3ReframeOutput = z.object({ + images: z.array(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for the random number generator', + }), +}) + +/** + * ReframeImageInputV3 + */ +export const zSchemaIdeogramV3ReframeInput = z.object({ + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'Number of images to generate.', + }), + ) + .default(1), + image_size: z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + style: z.optional( + z.union([z.enum(['AUTO', 'GENERAL', 'REALISTIC', 'DESIGN']), z.unknown()]), + ), + style_preset: z.optional( + z.union([ + z.enum([ + '80S_ILLUSTRATION', + '90S_NOSTALGIA', + 'ABSTRACT_ORGANIC', + 'ANALOG_NOSTALGIA', + 'ART_BRUT', + 'ART_DECO', + 'ART_POSTER', + 'AURA', + 'AVANT_GARDE', + 'BAUHAUS', + 'BLUEPRINT', + 'BLURRY_MOTION', + 'BRIGHT_ART', + 'C4D_CARTOON', + 'CHILDRENS_BOOK', + 'COLLAGE', + 'COLORING_BOOK_I', + 'COLORING_BOOK_II', + 'CUBISM', + 'DARK_AURA', + 'DOODLE', + 'DOUBLE_EXPOSURE', + 'DRAMATIC_CINEMA', + 'EDITORIAL', + 'EMOTIONAL_MINIMAL', + 'ETHEREAL_PARTY', + 'EXPIRED_FILM', + 'FLAT_ART', + 'FLAT_VECTOR', + 'FOREST_REVERIE', + 'GEO_MINIMALIST', + 'GLASS_PRISM', + 'GOLDEN_HOUR', + 'GRAFFITI_I', + 'GRAFFITI_II', + 'HALFTONE_PRINT', + 'HIGH_CONTRAST', + 'HIPPIE_ERA', + 'ICONIC', + 'JAPANDI_FUSION', + 'JAZZY', + 'LONG_EXPOSURE', + 'MAGAZINE_EDITORIAL', + 'MINIMAL_ILLUSTRATION', + 'MIXED_MEDIA', + 'MONOCHROME', + 'NIGHTLIFE', + 'OIL_PAINTING', + 'OLD_CARTOONS', + 'PAINT_GESTURE', + 'POP_ART', + 'RETRO_ETCHING', + 'RIVIERA_POP', + 'SPOTLIGHT_80S', + 'STYLIZED_RED', + 'SURREAL_COLLAGE', + 'TRAVEL_POSTER', + 'VINTAGE_GEO', + 'VINTAGE_POSTER', + 'WATERCOLOR', + 'WEIRD', + 'WOODBLOCK_PRINT', + ]), + z.unknown(), + ]), + ), + rendering_speed: z.optional( + z.enum(['TURBO', 'BALANCED', 'QUALITY']).register(z.globalRegistry, { + description: 'The rendering speed to use.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + color_palette: z.optional(z.union([zSchemaColorPalette, z.unknown()])), + style_codes: z.optional(z.union([z.array(z.string()), z.unknown()])), + image_url: z.string().register(z.globalRegistry, { + description: 'The image URL to reframe', + }), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.optional(z.union([z.array(z.string()), z.unknown()])), +}) + +/** + * Img2ImgOutput + */ +export const zSchemaHidreamI1FullImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * ImageToImageInput + */ +export const zSchemaHidreamI1FullImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The image URL to generate an image from.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + 'A list of LoRAs to apply to the model. Each LoRA specifies its path, scale, and optional weight name.', + }), + ) + .default([]), + strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Denoising strength for image-to-image generation.', + }), + ) + .default(0.75), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(50), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * MiniMaxTextToImageWithReferenceOutput + */ +export const zSchemaMinimaxImage01SubjectReferenceOutput = z.object({ + images: z.array(zSchemaFile).register(z.globalRegistry, { + description: 'Generated images', + }), +}) + +/** + * MiniMaxTextToImageWithReferenceRequest + */ +export const zSchemaMinimaxImage01SubjectReferenceInput = z.object({ + prompt_optimizer: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable automatic prompt optimization', + }), + ) + .default(false), + aspect_ratio: z.optional( + z + .enum(['1:1', '16:9', '4:3', '3:2', '2:3', '3:4', '9:16', '21:9']) + .register(z.globalRegistry, { + description: 'Aspect ratio of the generated image', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(9).register(z.globalRegistry, { + description: 'Number of images to generate (1-9)', + }), + ) + .default(1), + prompt: z.string().min(1).max(1500).register(z.globalRegistry, { + description: 'Text prompt for image generation (max 1500 characters)', + }), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the subject reference image to use for consistent character appearance', + }), +}) + +/** + * ImageToImageOutput + */ +export const zSchemaRecraftV3ImageToImageOutput = z.object({ + images: z.array(zSchemaFile).register(z.globalRegistry, { + description: 'The generated images', + }), +}) + +/** + * ImageToImageInput + */ +export const zSchemaRecraftV3ImageToImageInput = z.object({ + prompt: z.string().max(1000).register(z.globalRegistry, { + description: 'A text description of areas to change.', + }), + style: z.optional( + z + .enum([ + 'any', + 'realistic_image', + 'digital_illustration', + 'vector_illustration', + 'realistic_image/b_and_w', + 'realistic_image/hard_flash', + 'realistic_image/hdr', + 'realistic_image/natural_light', + 'realistic_image/studio_portrait', + 'realistic_image/enterprise', + 'realistic_image/motion_blur', + 'realistic_image/evening_light', + 'realistic_image/faded_nostalgia', + 'realistic_image/forest_life', + 'realistic_image/mystic_naturalism', + 'realistic_image/natural_tones', + 'realistic_image/organic_calm', + 'realistic_image/real_life_glow', + 'realistic_image/retro_realism', + 'realistic_image/retro_snapshot', + 'realistic_image/urban_drama', + 'realistic_image/village_realism', + 'realistic_image/warm_folk', + 'digital_illustration/pixel_art', + 'digital_illustration/hand_drawn', + 'digital_illustration/grain', + 'digital_illustration/infantile_sketch', + 'digital_illustration/2d_art_poster', + 'digital_illustration/handmade_3d', + 'digital_illustration/hand_drawn_outline', + 'digital_illustration/engraving_color', + 'digital_illustration/2d_art_poster_2', + 'digital_illustration/antiquarian', + 'digital_illustration/bold_fantasy', + 'digital_illustration/child_book', + 'digital_illustration/child_books', + 'digital_illustration/cover', + 'digital_illustration/crosshatch', + 'digital_illustration/digital_engraving', + 'digital_illustration/expressionism', + 'digital_illustration/freehand_details', + 'digital_illustration/grain_20', + 'digital_illustration/graphic_intensity', + 'digital_illustration/hard_comics', + 'digital_illustration/long_shadow', + 'digital_illustration/modern_folk', + 'digital_illustration/multicolor', + 'digital_illustration/neon_calm', + 'digital_illustration/noir', + 'digital_illustration/nostalgic_pastel', + 'digital_illustration/outline_details', + 'digital_illustration/pastel_gradient', + 'digital_illustration/pastel_sketch', + 'digital_illustration/pop_art', + 'digital_illustration/pop_renaissance', + 'digital_illustration/street_art', + 'digital_illustration/tablet_sketch', + 'digital_illustration/urban_glow', + 'digital_illustration/urban_sketching', + 'digital_illustration/vanilla_dreams', + 'digital_illustration/young_adult_book', + 'digital_illustration/young_adult_book_2', + 'vector_illustration/bold_stroke', + 'vector_illustration/chemistry', + 'vector_illustration/colored_stencil', + 'vector_illustration/contour_pop_art', + 'vector_illustration/cosmics', + 'vector_illustration/cutout', + 'vector_illustration/depressive', + 'vector_illustration/editorial', + 'vector_illustration/emotional_flat', + 'vector_illustration/infographical', + 'vector_illustration/marker_outline', + 'vector_illustration/mosaic', + 'vector_illustration/naivector', + 'vector_illustration/roundish_flat', + 'vector_illustration/segmented_colors', + 'vector_illustration/sharp_contrast', + 'vector_illustration/thin', + 'vector_illustration/vector_photo', + 'vector_illustration/vivid_shapes', + 'vector_illustration/engraving', + 'vector_illustration/line_art', + 'vector_illustration/line_circuit', + 'vector_illustration/linocut', + ]) + .register(z.globalRegistry, { + description: + 'The style of the generated images. Vector images cost 2X as much.', + }), + ), + style_id: z.optional( + z.string().register(z.globalRegistry, { + description: 'The ID of the custom style reference (optional)', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image to modify. Must be less than 5 MB in size, have resolution less than 16 MP and max dimension less than 4096 pixels.', + }), + strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Defines the difference with the original image, should lie in [0, 1], where 0 means almost identical, and 1 means miserable similarity', + }), + ) + .default(0.5), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + colors: z + .optional( + z.array(zSchemaRgbColor).register(z.globalRegistry, { + description: 'An array of preferable colors', + }), + ) + .default([]), + negative_prompt: z.optional( + z.string().max(1000).register(z.globalRegistry, { + description: 'A text description of undesired elements on an image', + }), + ), +}) + +/** + * UpscaleOutput + */ +export const zSchemaRecraftUpscaleCrispOutput = z.object({ + image: zSchemaFile, +}) + +/** + * UpscaleInput + */ +export const zSchemaRecraftUpscaleCrispInput = z.object({ + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to be upscaled. Must be in PNG format.', + }), +}) + +/** + * UpscaleOutput + */ +export const zSchemaRecraftUpscaleCreativeOutput = z.object({ + image: zSchemaFile, +}) + +/** + * UpscaleInput + */ +export const zSchemaRecraftUpscaleCreativeInput = z.object({ + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to be upscaled. Must be in PNG format.', + }), +}) + +/** + * ImageOutput + */ +export const zSchemaRembgEnhanceOutput = z.object({ + image: zSchemaFile, +}) + +/** + * ImageInput + */ +export const zSchemaRembgEnhanceInput = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the input image', + }), +}) + +/** + * ImageEditOutput + */ +export const zSchemaBagelEditOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The edited images.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * ImageEditInput + */ +export const zSchemaBagelEditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to edit the image with.', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for the generation.', + }), + ), + use_thought: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use thought tokens for generation. If set to true, the model will "think" to potentially improve generation quality. Increases generation time and increases the cost by 20%.', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'The image to edit.', + }), +}) + +/** + * KontextEditOutput + */ +export const zSchemaFluxKontextDevOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * BaseKontextEditInput + */ +export const zSchemaFluxKontextDevInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to edit the image.', + }), + resolution_mode: z.optional( + z + .enum([ + 'auto', + 'match_input', + '1:1', + '16:9', + '21:9', + '3:2', + '2:3', + '4:5', + '5:4', + '3:4', + '4:3', + '9:16', + '9:21', + ]) + .register(z.globalRegistry, { + description: + "\n Determines how the output resolution is set for image editing.\n - `auto`: The model selects an optimal resolution from a predefined set that best matches the input image's aspect ratio. This is the recommended setting for most use cases as it's what the model was trained on.\n - `match_input`: The model will attempt to use the same resolution as the input image. The resolution will be adjusted to be compatible with the model's requirements (e.g. dimensions must be multiples of 16 and within supported limits).\n Apart from these, a few aspect ratios are also supported.\n ", + }), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'Output format', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to edit.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(2.5), + seed: z.optional(z.union([z.int(), z.unknown()])), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(10).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), +}) + +/** + * Image + * + * Represents an image file. + */ +export const zSchemaFalToolkitImageImageImage = z + .object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + height: z.optional( + z.int().register(z.globalRegistry, { + description: 'The height of the image in pixels.', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + width: z.optional( + z.int().register(z.globalRegistry, { + description: 'The width of the image in pixels.', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Represents an image file.', + }) + +/** + * FluxKontextOutput + */ +export const zSchemaFluxProKontextMaxOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaFalToolkitImageImageImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * FluxKontextInput + */ +export const zSchemaFluxProKontextMaxInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '3:2', '1:1', '2:3', '3:4', '9:16', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Image prompt for the omni model.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enhance_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enhance the prompt for better results.', + }), + ) + .default(false), +}) + +/** + * Output + */ +export const zSchemaFluxProKontextMultiOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z + .array(zSchemaRegistryImageFastSdxlModelsImage) + .register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * FluxKontextMultiInput + */ +export const zSchemaFluxProKontextMultiInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '3:2', '1:1', '2:3', '3:4', '9:16', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'Image prompt for the omni model.', + }), + enhance_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enhance the prompt for better results.', + }), + ) + .default(false), +}) + +/** + * Output + */ +export const zSchemaFluxProKontextMaxMultiOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z + .array(zSchemaRegistryImageFastSdxlModelsImage) + .register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * FluxKontextMultiInput + */ +export const zSchemaFluxProKontextMaxMultiInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '3:2', '1:1', '2:3', '3:4', '9:16', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'Image prompt for the omni model.', + }), + enhance_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enhance the prompt for better results.', + }), + ) + .default(false), +}) + +/** + * AgeProgressionOutput + */ +export const zSchemaImageEditingAgeProgressionOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * AgeProgressionInput + */ +export const zSchemaImageEditingAgeProgressionInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The age change to apply.', + }), + ) + .default('20 years older'), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '3:2', '1:1', '2:3', '3:4', '9:16', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Image prompt for the omni model.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.', + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for sampling.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), +}) + +/** + * BackgroundChangeOutput + */ +export const zSchemaImageEditingBackgroundChangeOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * BackgroundChangeInput + */ +export const zSchemaImageEditingBackgroundChangeInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The desired background to apply.', + }), + ) + .default('beach sunset with palm trees'), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '3:2', '1:1', '2:3', '3:4', '9:16', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Image prompt for the omni model.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.', + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for sampling.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), +}) + +/** + * CartoonifyOutput + */ +export const zSchemaImageEditingCartoonifyOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * BaseInput + */ +export const zSchemaImageEditingCartoonifyInput = z.object({ + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '3:2', '1:1', '2:3', '3:4', '9:16', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Image prompt for the omni model.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.', + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for sampling.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), +}) + +/** + * ColorCorrectionOutput + */ +export const zSchemaImageEditingColorCorrectionOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * BaseInput + */ +export const zSchemaImageEditingColorCorrectionInput = z.object({ + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '3:2', '1:1', '2:3', '3:4', '9:16', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Image prompt for the omni model.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.', + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for sampling.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), +}) + +/** + * ExpressionChangeOutput + */ +export const zSchemaImageEditingExpressionChangeOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * ExpressionChangeInput + */ +export const zSchemaImageEditingExpressionChangeInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The desired facial expression to apply.', + }), + ) + .default('sad'), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '3:2', '1:1', '2:3', '3:4', '9:16', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Image prompt for the omni model.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.', + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for sampling.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), +}) + +/** + * FaceEnhancementOutput + */ +export const zSchemaImageEditingFaceEnhancementOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * BaseInput + */ +export const zSchemaImageEditingFaceEnhancementInput = z.object({ + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '3:2', '1:1', '2:3', '3:4', '9:16', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Image prompt for the omni model.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.', + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for sampling.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), +}) + +/** + * HairChangeOutput + */ +export const zSchemaImageEditingHairChangeOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * HairChangeInput + */ +export const zSchemaImageEditingHairChangeInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The desired hair style to apply.', + }), + ) + .default('bald'), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '3:2', '1:1', '2:3', '3:4', '9:16', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Image prompt for the omni model.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.', + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for sampling.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), +}) + +/** + * ObjectRemovalOutput + */ +export const zSchemaImageEditingObjectRemovalOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * ObjectRemovalInput + */ +export const zSchemaImageEditingObjectRemovalInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Specify which objects to remove from the image.', + }), + ) + .default('background people'), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '3:2', '1:1', '2:3', '3:4', '9:16', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Image prompt for the omni model.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.', + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for sampling.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), +}) + +/** + * ProfessionalPhotoOutput + */ +export const zSchemaImageEditingProfessionalPhotoOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * BaseInput + */ +export const zSchemaImageEditingProfessionalPhotoInput = z.object({ + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '3:2', '1:1', '2:3', '3:4', '9:16', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Image prompt for the omni model.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.', + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for sampling.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), +}) + +/** + * SceneCompositionOutput + */ +export const zSchemaImageEditingSceneCompositionOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * SceneCompositionInput + */ +export const zSchemaImageEditingSceneCompositionInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Describe the scene where you want to place the subject.', + }), + ) + .default('enchanted forest'), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '3:2', '1:1', '2:3', '3:4', '9:16', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Image prompt for the omni model.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.', + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for sampling.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), +}) + +/** + * StyleTransferOutput + */ +export const zSchemaImageEditingStyleTransferOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * StyleTransferInput + */ +export const zSchemaImageEditingStyleTransferInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The artistic style to apply.', + }), + ) + .default("Van Gogh's Starry Night"), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '3:2', '1:1', '2:3', '3:4', '9:16', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Image prompt for the omni model.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.', + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for sampling.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), +}) + +/** + * TimeOfDayOutput + */ +export const zSchemaImageEditingTimeOfDayOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * TimeOfDayInput + */ +export const zSchemaImageEditingTimeOfDayInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The time of day to transform the scene to.', + }), + ) + .default('golden hour'), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '3:2', '1:1', '2:3', '3:4', '9:16', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Image prompt for the omni model.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.', + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for sampling.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), +}) + +/** + * WeatherEffectOutput + */ +export const zSchemaImageEditingWeatherEffectOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * WeatherEffectInput + */ +export const zSchemaImageEditingWeatherEffectInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The weather effect to apply.', + }), + ) + .default('heavy snowfall'), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '3:2', '1:1', '2:3', '3:4', '9:16', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Image prompt for the omni model.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.', + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for sampling.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), +}) + +/** + * PhotoRestorationOutput + */ +export const zSchemaImageEditingPhotoRestorationOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * PhotoRestorationInput + * + * Input model for photo restoration endpoint. + */ +export const zSchemaImageEditingPhotoRestorationInput = z + .object({ + aspect_ratio: z.optional( + z + .enum([ + '21:9', + '16:9', + '4:3', + '3:2', + '1:1', + '2:3', + '3:4', + '9:16', + '9:21', + ]) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the old or damaged photo to restore.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.', + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for sampling.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Input model for photo restoration endpoint.', + }) + +/** + * TextRemovalOutput + */ +export const zSchemaImageEditingTextRemovalOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * TextRemovalInput + * + * Input model for text removal endpoint. + */ +export const zSchemaImageEditingTextRemovalInput = z + .object({ + aspect_ratio: z.optional( + z + .enum([ + '21:9', + '16:9', + '4:3', + '3:2', + '1:1', + '2:3', + '3:4', + '9:16', + '9:21', + ]) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image containing text to be removed.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.', + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for sampling.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Input model for text removal endpoint.', + }) + +/** + * Output + */ +export const zSchemaFlux1DevImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseFlux1ImageToInput + */ +export const zSchemaFlux1DevImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate an image from.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'The strength of the initial image. Higher strength values are better for this model.', + }), + ) + .default(0.95), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(10).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(40), + seed: z.optional(z.union([z.int(), z.unknown()])), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), +}) + +/** + * Output + */ +export const zSchemaFlux1DevReduxOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseFlux1ReduxInput + */ +export const zSchemaFlux1DevReduxInput = z.object({ + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate an image from.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional(z.union([z.int(), z.unknown()])), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), +}) + +/** + * Output + */ +export const zSchemaFlux1SchnellReduxOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * SchnellFlux1ReduxInput + */ +export const zSchemaFlux1SchnellReduxInput = z.object({ + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate an image from.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(12).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(4), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * T2IOutput + */ +export const zSchemaLumaPhotonReframeOutput = z.object({ + images: z.array(zSchemaFile).register(z.globalRegistry, { + description: 'The generated image', + }), +}) + +/** + * ReframeImageRequest + */ +export const zSchemaLumaPhotonReframeInput = z.object({ + prompt: z.optional( + z.string().min(3).max(5000).register(z.globalRegistry, { + description: 'Optional prompt for reframing', + }), + ), + aspect_ratio: z + .enum(['1:1', '16:9', '9:16', '4:3', '3:4', '21:9', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the reframed image', + }), + y_start: z.optional( + z.int().register(z.globalRegistry, { + description: 'Start Y coordinate for reframing', + }), + ), + x_end: z.optional( + z.int().register(z.globalRegistry, { + description: 'End X coordinate for reframing', + }), + ), + y_end: z.optional( + z.int().register(z.globalRegistry, { + description: 'End Y coordinate for reframing', + }), + ), + grid_position_y: z.optional( + z.int().register(z.globalRegistry, { + description: 'Y position of the grid for reframing', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the input image to reframe', + }), + grid_position_x: z.optional( + z.int().register(z.globalRegistry, { + description: 'X position of the grid for reframing', + }), + ), + x_start: z.optional( + z.int().register(z.globalRegistry, { + description: 'Start X coordinate for reframing', + }), + ), +}) + +/** + * T2IOutput + */ +export const zSchemaLumaPhotonFlashReframeOutput = z.object({ + images: z.array(zSchemaFile).register(z.globalRegistry, { + description: 'The generated image', + }), +}) + +/** + * ReframeImageRequest + */ +export const zSchemaLumaPhotonFlashReframeInput = z.object({ + prompt: z.optional( + z.string().min(3).max(5000).register(z.globalRegistry, { + description: 'Optional prompt for reframing', + }), + ), + aspect_ratio: z + .enum(['1:1', '16:9', '9:16', '4:3', '3:4', '21:9', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the reframed image', + }), + y_start: z.optional( + z.int().register(z.globalRegistry, { + description: 'Start Y coordinate for reframing', + }), + ), + x_end: z.optional( + z.int().register(z.globalRegistry, { + description: 'End X coordinate for reframing', + }), + ), + y_end: z.optional( + z.int().register(z.globalRegistry, { + description: 'End Y coordinate for reframing', + }), + ), + grid_position_y: z.optional( + z.int().register(z.globalRegistry, { + description: 'Y position of the grid for reframing', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the input image to reframe', + }), + grid_position_x: z.optional( + z.int().register(z.globalRegistry, { + description: 'X position of the grid for reframing', + }), + ), + x_start: z.optional( + z.int().register(z.globalRegistry, { + description: 'Start X coordinate for reframing', + }), + ), +}) + +/** + * BabyVersionOutput + */ +export const zSchemaImageEditingBabyVersionOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * BabyVersionInput + * + * Input model for baby version endpoint. + */ +export const zSchemaImageEditingBabyVersionInput = z + .object({ + aspect_ratio: z.optional( + z + .enum([ + '21:9', + '16:9', + '4:3', + '3:2', + '1:1', + '2:3', + '3:4', + '9:16', + '9:21', + ]) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to transform into a baby version.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.', + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for sampling.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Input model for baby version endpoint.', + }) + +/** + * ReframeOutput + */ +export const zSchemaImageEditingReframeOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * ReframeInput + */ +export const zSchemaImageEditingReframeInput = z.object({ + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '3:2', '1:1', '2:3', '3:4', '9:16', '9:21']) + .register(z.globalRegistry, { + description: 'The desired aspect ratio for the reframed image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the old or damaged photo to restore.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.', + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 6 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for sampling.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), +}) + +/** + * T2IOutput + */ +export const zSchemaLumaPhotonModifyOutput = z.object({ + images: z.array(zSchemaFile).register(z.globalRegistry, { + description: 'The generated image', + }), +}) + +/** + * ModifyImageRequest + */ +export const zSchemaLumaPhotonModifyInput = z.object({ + prompt: z.optional( + z.string().min(3).max(5000).register(z.globalRegistry, { + description: 'Instruction for modifying the image', + }), + ), + aspect_ratio: z + .enum(['1:1', '16:9', '9:16', '4:3', '3:4', '21:9', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the reframed image', + }), + strength: z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The strength of the initial image. Higher strength values are corresponding to more influence of the initial image on the output.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the input image to reframe', + }), +}) + +/** + * T2IOutput + */ +export const zSchemaLumaPhotonFlashModifyOutput = z.object({ + images: z.array(zSchemaFile).register(z.globalRegistry, { + description: 'The generated image', + }), +}) + +/** + * ModifyImageRequest + */ +export const zSchemaLumaPhotonFlashModifyInput = z.object({ + prompt: z.optional( + z.string().min(3).max(5000).register(z.globalRegistry, { + description: 'Instruction for modifying the image', + }), + ), + aspect_ratio: z + .enum(['1:1', '16:9', '9:16', '4:3', '3:4', '21:9', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the reframed image', + }), + strength: z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The strength of the initial image. Higher strength values are corresponding to more influence of the initial image on the output.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the input image to reframe', + }), +}) + +/** + * FrameOutput + */ +export const zSchemaFfmpegApiExtractFrameOutput = z.object({ + images: z.array(zSchemaImage), +}) + +/** + * FrameInput + */ +export const zSchemaFfmpegApiExtractFrameInput = z.object({ + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the video file to use as the video track', + }), + frame_type: z.optional( + z.enum(['first', 'middle', 'last']).register(z.globalRegistry, { + description: + 'Type of frame to extract: first, middle, or last frame of the video', + }), + ), +}) + +/** + * VectorizeOutput + */ +export const zSchemaRecraftVectorizeOutput = z.object({ + image: zSchemaFile, +}) + +/** + * VectorizeInput + */ +export const zSchemaRecraftVectorizeInput = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image to be vectorized. Must be in PNG, JPG or WEBP format, less than 5 MB in size, have resolution less than 16 MP and max dimension less than 4096 pixels, min dimension more than 256 pixels.', + }), +}) + +/** + * Output + */ +export const zSchemaObjectRemovalOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images with objects removed.', + }), +}) + +/** + * PromptInput + */ +export const zSchemaObjectRemovalInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Text description of the object to remove.', + }), + mask_expansion: z + .optional( + z.int().gte(0).lte(50).register(z.globalRegistry, { + description: 'Amount of pixels to expand the mask by. Range: 0-50', + }), + ) + .default(15), + model: z.optional( + z.enum(['low_quality', 'medium_quality', 'high_quality', 'best_quality']), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to remove objects from.', + }), +}) + +/** + * Output + */ +export const zSchemaObjectRemovalMaskOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images with objects removed.', + }), +}) + +/** + * MaskInput + */ +export const zSchemaObjectRemovalMaskInput = z.object({ + model: z.optional( + z.enum(['low_quality', 'medium_quality', 'high_quality', 'best_quality']), + ), + mask_expansion: z + .optional( + z.int().gte(0).lte(50).register(z.globalRegistry, { + description: 'Amount of pixels to expand the mask by. Range: 0-50', + }), + ) + .default(15), + mask_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the mask image. White pixels (255) indicate areas to remove.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to remove objects from.', + }), +}) + +/** + * BBoxPromptBase + */ +export const zSchemaBBoxPromptBase = z.object({ + y_min: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Y Min Coordinate of the box (0-1)', + }), + ) + .default(0), + x_max: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'X Max Coordinate of the prompt (0-1)', + }), + ) + .default(0), + x_min: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'X Min Coordinate of the box (0-1)', + }), + ) + .default(0), + y_max: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Y Max Coordinate of the prompt (0-1)', + }), + ) + .default(0), +}) + +/** + * Output + */ +export const zSchemaObjectRemovalBboxOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images with objects removed.', + }), +}) + +/** + * BboxInput + */ +export const zSchemaObjectRemovalBboxInput = z.object({ + model: z.optional( + z.enum(['low_quality', 'medium_quality', 'high_quality', 'best_quality']), + ), + mask_expansion: z + .optional( + z.int().gte(0).lte(50).register(z.globalRegistry, { + description: 'Amount of pixels to expand the mask by. Range: 0-50', + }), + ) + .default(15), + box_prompts: z + .optional( + z.array(zSchemaBBoxPromptBase).register(z.globalRegistry, { + description: + 'List of bounding box coordinates to erase (only one box prompt is supported)', + }), + ) + .default([]), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to remove objects from.', + }), +}) + +/** + * Output + */ +export const zSchemaPasdOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated super-resolved images', + }), + timings: z.optional( + z.record(z.string(), z.number()).register(z.globalRegistry, { + description: 'Timing information for different processing stages', + }), + ), +}) + +/** + * Input + */ +export const zSchemaPasdInput = z.object({ + conditioning_scale: z + .optional( + z.number().gte(0.1).lte(1).register(z.globalRegistry, { + description: 'ControlNet conditioning scale (0.1-1.0)', + }), + ) + .default(0.8), + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Additional prompt to guide super-resolution', + }), + ) + .default(''), + image_url: z.string().register(z.globalRegistry, { + description: 'Input image to super-resolve', + }), + steps: z + .optional( + z.int().gte(10).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps (10-50)', + }), + ) + .default(25), + scale: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Upscaling factor (1-4x)', + }), + ) + .default(2), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: 'Guidance scale for diffusion (1.0-20.0)', + }), + ) + .default(7), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to avoid unwanted artifacts', + }), + ) + .default( + 'blurry, dirty, messy, frames, deformed, dotted, noise, raster lines, unclear, lowres, over-smoothed, painting, ai generated', + ), +}) + +/** + * Output + */ +export const zSchemaChainOfZoomOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'List of intermediate images', + }), + zoom_center: z.array(z.number()).register(z.globalRegistry, { + description: 'Center coordinates used for zoom', + }), + scale: z.number().register(z.globalRegistry, { + description: 'Actual linear zoom scale applied', + }), +}) + +/** + * Input + */ +export const zSchemaChainOfZoomInput = z.object({ + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + center_y: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Y coordinate of zoom center (0-1)', + }), + ) + .default(0.5), + scale: z + .optional( + z.number().gte(1).lte(8).register(z.globalRegistry, { + description: 'Zoom scale in powers of 2', + }), + ) + .default(5), + center_x: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'X coordinate of zoom center (0-1)', + }), + ) + .default(0.5), + user_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Additional prompt text to guide the zoom enhancement', + }), + ) + .default(''), + image_url: z.string().register(z.globalRegistry, { + description: 'Input image to zoom into', + }), +}) + +/** + * V16Output + */ +export const zSchemaFashnTryonV16Output = z.object({ + images: z.array(zSchemaFile), +}) + +/** + * V16Input + */ +export const zSchemaFashnTryonV16Input = z.object({ + model_image: z.string().register(z.globalRegistry, { + description: 'URL or base64 of the model image', + }), + moderation_level: z.optional( + z.enum(['none', 'permissive', 'conservative']).register(z.globalRegistry, { + description: + "Content moderation level for garment images. 'none' disables moderation, 'permissive' blocks only explicit content, 'conservative' also blocks underwear and swimwear.", + }), + ), + garment_photo_type: z.optional( + z.enum(['auto', 'model', 'flat-lay']).register(z.globalRegistry, { + description: + "Specifies the type of garment photo to optimize internal parameters for better performance. 'model' is for photos of garments on a model, 'flat-lay' is for flat-lay or ghost mannequin images, and 'auto' attempts to automatically detect the photo type.", + }), + ), + garment_image: z.string().register(z.globalRegistry, { + description: 'URL or base64 of the garment image', + }), + category: z.optional( + z + .enum(['tops', 'bottoms', 'one-pieces', 'auto']) + .register(z.globalRegistry, { + description: + "Category of the garment to try-on. 'auto' will attempt to automatically detect the category of the garment.", + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + segmentation_free: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Disables human parsing on the model image.', + }), + ) + .default(true), + num_samples: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + 'Number of images to generate in a single run. Image generation has a random element in it, so trying multiple images at once increases the chances of getting a good result.', + }), + ) + .default(1), + mode: z.optional( + z.enum(['performance', 'balanced', 'quality']).register(z.globalRegistry, { + description: + "Specifies the mode of operation. 'performance' mode is faster but may sacrifice quality, 'balanced' mode is a balance between speed and quality, and 'quality' mode is slower but produces higher quality results.", + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Sets random operations to a fixed state. Use the same seed to reproduce results with the same inputs, or different seed to force different results.', + }), + ), + output_format: z.optional( + z.enum(['png', 'jpeg']).register(z.globalRegistry, { + description: + "Output format of the generated images. 'png' is highest quality, while 'jpeg' is faster", + }), + ), +}) + +/** + * KontextEditOutput + */ +export const zSchemaFluxKontextLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseKontextEditInput + */ +export const zSchemaFluxKontextLoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to edit the image.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + resolution_mode: z.optional( + z + .enum([ + 'auto', + 'match_input', + '1:1', + '16:9', + '21:9', + '3:2', + '2:3', + '4:5', + '5:4', + '3:4', + '4:3', + '9:16', + '9:21', + ]) + .register(z.globalRegistry, { + description: + "\n Determines how the output resolution is set for image editing.\n - `auto`: The model selects an optimal resolution from a predefined set that best matches the input image's aspect ratio. This is the recommended setting for most use cases as it's what the model was trained on.\n - `match_input`: The model will attempt to use the same resolution as the input image. The resolution will be adjusted to be compatible with the model's requirements (e.g. dimensions must be multiples of 16 and within supported limits).\n Apart from these, a few aspect ratios are also supported.\n ", + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image to edit.\n\nMax width: 14142px, Max height: 14142px, Timeout: 20s', + }), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(2.5), + num_inference_steps: z + .optional( + z.int().gte(10).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(30), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * PlushieStyleOutput + */ +export const zSchemaImageEditingPlushieStyleOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * PlushieStyleInput + * + * Input model for plushie style endpoint. + */ +export const zSchemaImageEditingPlushieStyleInput = z + .object({ + lora_scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'The scale factor for the LoRA model. Controls the strength of the LoRA effect.', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to convert to plushie style.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(3.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for sampling.', + }), + ) + .default(30), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + }) + .register(z.globalRegistry, { + description: 'Input model for plushie style endpoint.', + }) + +/** + * WojakStyleOutput + */ +export const zSchemaImageEditingWojakStyleOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * WojakStyleInput + * + * Input model for wojak style endpoint. + */ +export const zSchemaImageEditingWojakStyleInput = z + .object({ + lora_scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'The scale factor for the LoRA model. Controls the strength of the LoRA effect.', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to convert to wojak style.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(3.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for sampling.', + }), + ) + .default(30), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + }) + .register(z.globalRegistry, { + description: 'Input model for wojak style endpoint.', + }) + +/** + * BroccoliHaircutOutput + */ +export const zSchemaImageEditingBroccoliHaircutOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * BroccoliHaircutInput + * + * Input model for broccoli haircut endpoint. + */ +export const zSchemaImageEditingBroccoliHaircutInput = z + .object({ + lora_scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'The scale factor for the LoRA model. Controls the strength of the LoRA effect.', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to apply broccoli haircut style.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(3.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for sampling.', + }), + ) + .default(30), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + }) + .register(z.globalRegistry, { + description: 'Input model for broccoli haircut endpoint.', + }) + +/** + * ImageUpscaleOutput + */ +export const zSchemaTopazUpscaleImageOutput = z.object({ + image: zSchemaFile, +}) + +/** + * ImageUpscaleRequest + */ +export const zSchemaTopazUpscaleImageInput = z.object({ + face_enhancement_creativity: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Creativity level for face enhancement. 0.0 means no creativity, 1.0 means maximum creativity. Ignored if face ehnancement is disabled.', + }), + ) + .default(0), + face_enhancement_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Strength of the face enhancement. 0.0 means no enhancement, 1.0 means maximum enhancement. Ignored if face ehnancement is disabled.', + }), + ) + .default(0.8), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'Output format of the upscaled image.', + }), + ), + face_enhancement: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to apply face enhancement to the image.', + }), + ) + .default(true), + subject_detection: z.optional( + z.enum(['All', 'Foreground', 'Background']).register(z.globalRegistry, { + description: 'Subject detection mode for the image enhancement.', + }), + ), + model: z.optional( + z + .enum([ + 'Low Resolution V2', + 'Standard V2', + 'CGI', + 'High Fidelity V2', + 'Text Refine', + 'Recovery', + 'Redefine', + 'Recovery V2', + ]) + .register(z.globalRegistry, { + description: 'Model to use for image enhancement.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Url of the image to be upscaled', + }), + upscale_factor: z + .optional( + z.number().gte(1).lte(4).register(z.globalRegistry, { + description: + 'Factor to upscale the video by (e.g. 2.0 doubles width and height)', + }), + ) + .default(2), + crop_to_fill: z.optional(z.boolean()).default(false), +}) + +/** + * YouTubeThumbnailsOutput + */ +export const zSchemaImageEditingYoutubeThumbnailsOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * YouTubeThumbnailsInput + * + * Input model for YouTube thumbnails endpoint. + */ +export const zSchemaImageEditingYoutubeThumbnailsInput = z + .object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The text to include in the YouTube thumbnail.', + }), + ) + .default('Generate youtube thumbnails'), + lora_scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'The scale factor for the LoRA model. Controls the strength of the LoRA effect.', + }), + ) + .default(0.5), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to convert to YouTube thumbnail style.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(3.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for sampling.', + }), + ) + .default(30), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + }) + .register(z.globalRegistry, { + description: 'Input model for YouTube thumbnails endpoint.', + }) + +/** + * BlurOutput + */ +export const zSchemaPostProcessingBlurOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The processed images with blur effect', + }), +}) + +/** + * BlurInput + */ +export const zSchemaPostProcessingBlurInput = z.object({ + blur_sigma: z + .optional( + z.number().gte(0.1).lte(10).register(z.globalRegistry, { + description: 'Sigma for Gaussian blur', + }), + ) + .default(1), + blur_radius: z + .optional( + z.int().gte(0).lte(31).register(z.globalRegistry, { + description: 'Blur radius', + }), + ) + .default(3), + blur_type: z.optional( + z.enum(['gaussian', 'kuwahara']).register(z.globalRegistry, { + description: 'Type of blur to apply', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to process', + }), +}) + +/** + * ChromaticAberrationOutput + */ +export const zSchemaPostProcessingChromaticAberrationOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The processed images with chromatic aberration effect', + }), +}) + +/** + * ChromaticAberrationInput + */ +export const zSchemaPostProcessingChromaticAberrationInput = z.object({ + blue_shift: z + .optional( + z.int().gte(-20).lte(20).register(z.globalRegistry, { + description: 'Blue channel shift amount', + }), + ) + .default(0), + red_shift: z + .optional( + z.int().gte(-20).lte(20).register(z.globalRegistry, { + description: 'Red channel shift amount', + }), + ) + .default(0), + green_direction: z.optional( + z.enum(['horizontal', 'vertical']).register(z.globalRegistry, { + description: 'Green channel shift direction', + }), + ), + blue_direction: z.optional( + z.enum(['horizontal', 'vertical']).register(z.globalRegistry, { + description: 'Blue channel shift direction', + }), + ), + red_direction: z.optional( + z.enum(['horizontal', 'vertical']).register(z.globalRegistry, { + description: 'Red channel shift direction', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to process', + }), + green_shift: z + .optional( + z.int().gte(-20).lte(20).register(z.globalRegistry, { + description: 'Green channel shift amount', + }), + ) + .default(0), +}) + +/** + * ColorCorrectionOutput + */ +export const zSchemaPostProcessingColorCorrectionOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The processed images with color correction', + }), +}) + +/** + * ColorCorrectionInput + */ +export const zSchemaPostProcessingColorCorrectionInput = z.object({ + gamma: z + .optional( + z.number().gte(0.2).lte(2.2).register(z.globalRegistry, { + description: 'Gamma adjustment', + }), + ) + .default(1), + saturation: z + .optional( + z.number().gte(-100).lte(100).register(z.globalRegistry, { + description: 'Saturation adjustment', + }), + ) + .default(0), + temperature: z + .optional( + z.number().gte(-100).lte(100).register(z.globalRegistry, { + description: 'Color temperature adjustment', + }), + ) + .default(0), + brightness: z + .optional( + z.number().gte(-100).lte(100).register(z.globalRegistry, { + description: 'Brightness adjustment', + }), + ) + .default(0), + contrast: z + .optional( + z.number().gte(-100).lte(100).register(z.globalRegistry, { + description: 'Contrast adjustment', + }), + ) + .default(0), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to process', + }), +}) + +/** + * ColorTintOutput + */ +export const zSchemaPostProcessingColorTintOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The processed images with color tint effect', + }), +}) + +/** + * ColorTintInput + */ +export const zSchemaPostProcessingColorTintInput = z.object({ + tint_strength: z + .optional( + z.number().gte(0.1).lte(1).register(z.globalRegistry, { + description: 'Tint strength', + }), + ) + .default(1), + tint_mode: z.optional( + z + .enum([ + 'sepia', + 'red', + 'green', + 'blue', + 'cyan', + 'magenta', + 'yellow', + 'purple', + 'orange', + 'warm', + 'cool', + 'lime', + 'navy', + 'vintage', + 'rose', + 'teal', + 'maroon', + 'peach', + 'lavender', + 'olive', + ]) + .register(z.globalRegistry, { + description: 'Tint color mode', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to process', + }), +}) + +/** + * DesaturateOutput + */ +export const zSchemaPostProcessingDesaturateOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The processed images with desaturation effect', + }), +}) + +/** + * DesaturateInput + */ +export const zSchemaPostProcessingDesaturateInput = z.object({ + desaturate_method: z.optional( + z + .enum([ + 'luminance (Rec.709)', + 'luminance (Rec.601)', + 'average', + 'lightness', + ]) + .register(z.globalRegistry, { + description: 'Desaturation method', + }), + ), + desaturate_factor: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Desaturation factor', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to process', + }), +}) + +/** + * DissolveOutput + */ +export const zSchemaPostProcessingDissolveOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The processed images with dissolve effect', + }), +}) + +/** + * DissolveInput + */ +export const zSchemaPostProcessingDissolveInput = z.object({ + dissolve_factor: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Dissolve blend factor', + }), + ) + .default(0.5), + dissolve_image_url: z.string().register(z.globalRegistry, { + description: 'URL of second image for dissolve', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to process', + }), +}) + +/** + * DodgeBurnOutput + */ +export const zSchemaPostProcessingDodgeBurnOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The processed images with dodge and burn effect', + }), +}) + +/** + * DodgeBurnInput + */ +export const zSchemaPostProcessingDodgeBurnInput = z.object({ + dodge_burn_mode: z.optional( + z + .enum([ + 'dodge', + 'burn', + 'dodge_and_burn', + 'burn_and_dodge', + 'color_dodge', + 'color_burn', + 'linear_dodge', + 'linear_burn', + ]) + .register(z.globalRegistry, { + description: 'Dodge and burn mode', + }), + ), + dodge_burn_intensity: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Dodge and burn intensity', + }), + ) + .default(0.5), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to process', + }), +}) + +/** + * GrainOutput + */ +export const zSchemaPostProcessingGrainOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The processed images with grain effect', + }), +}) + +/** + * GrainInput + */ +export const zSchemaPostProcessingGrainInput = z.object({ + grain_style: z.optional( + z + .enum(['modern', 'analog', 'kodak', 'fuji', 'cinematic', 'newspaper']) + .register(z.globalRegistry, { + description: 'Style of film grain to apply', + }), + ), + grain_intensity: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Film grain intensity', + }), + ) + .default(0.4), + grain_scale: z + .optional( + z.number().gte(1).lte(100).register(z.globalRegistry, { + description: 'Film grain scale', + }), + ) + .default(10), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to process', + }), +}) + +/** + * ParabolizeOutput + */ +export const zSchemaPostProcessingParabolizeOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The processed images with parabolize effect', + }), +}) + +/** + * ParabolizeInput + */ +export const zSchemaPostProcessingParabolizeInput = z.object({ + parabolize_coeff: z + .optional( + z.number().gte(-10).lte(10).register(z.globalRegistry, { + description: 'Parabolize coefficient', + }), + ) + .default(1), + vertex_y: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Vertex Y position', + }), + ) + .default(0.5), + vertex_x: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Vertex X position', + }), + ) + .default(0.5), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to process', + }), +}) + +/** + * SharpenOutput + */ +export const zSchemaPostProcessingSharpenOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The processed images with sharpen effect', + }), +}) + +/** + * SharpenInput + */ +export const zSchemaPostProcessingSharpenInput = z.object({ + sharpen_mode: z.optional( + z.enum(['basic', 'smart', 'cas']).register(z.globalRegistry, { + description: 'Type of sharpening to apply', + }), + ), + sharpen_alpha: z + .optional( + z.number().gte(0.1).lte(5).register(z.globalRegistry, { + description: 'Sharpen strength (for basic mode)', + }), + ) + .default(1), + noise_radius: z + .optional( + z.int().gte(1).lte(25).register(z.globalRegistry, { + description: 'Noise radius for smart sharpen', + }), + ) + .default(7), + sharpen_radius: z + .optional( + z.int().gte(1).lte(15).register(z.globalRegistry, { + description: 'Sharpen radius (for basic mode)', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to process', + }), + smart_sharpen_strength: z + .optional( + z.number().gte(0).lte(25).register(z.globalRegistry, { + description: 'Smart sharpen strength', + }), + ) + .default(5), + cas_amount: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'CAS sharpening amount', + }), + ) + .default(0.8), + preserve_edges: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Edge preservation factor', + }), + ) + .default(0.75), + smart_sharpen_ratio: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Smart sharpen blend ratio', + }), + ) + .default(0.5), +}) + +/** + * SolarizeOutput + */ +export const zSchemaPostProcessingSolarizeOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The processed images with solarize effect', + }), +}) + +/** + * SolarizeInput + */ +export const zSchemaPostProcessingSolarizeInput = z.object({ + solarize_threshold: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Solarize threshold', + }), + ) + .default(0.5), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to process', + }), +}) + +/** + * VignetteOutput + */ +export const zSchemaPostProcessingVignetteOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The processed images with vignette effect', + }), +}) + +/** + * VignetteInput + */ +export const zSchemaPostProcessingVignetteInput = z.object({ + vignette_strength: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'Vignette strength', + }), + ) + .default(0.5), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to process', + }), +}) + +/** + * RealismOutput + */ +export const zSchemaImageEditingRealismOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * RealismInput + * + * Input model for realism enhancement endpoint. + */ +export const zSchemaImageEditingRealismInput = z + .object({ + lora_scale: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: + 'The scale factor for the LoRA model. Controls the strength of the LoRA effect.', + }), + ) + .default(0.6), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to enhance with realism details.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(3.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for sampling.', + }), + ) + .default(30), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + }) + .register(z.globalRegistry, { + description: 'Input model for realism enhancement endpoint.', + }) + +/** + * ReimagineOutput + */ +export const zSchemaBriaReimagineOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'Seed value used for generation.', + }), +}) + +/** + * ReimagineInput + */ +export const zSchemaBriaReimagineInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt you would like to use to generate images.', + }), + num_results: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + 'How many images you would like to generate. When using any Guidance Method, Value is set to 1.', + }), + ) + .default(1), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + structure_ref_influence: z + .optional( + z.number().register(z.globalRegistry, { + description: + 'The influence of the structure reference on the generated image.', + }), + ) + .default(0.75), + fast: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to use the fast model', + }), + ) + .default(true), + seed: z.optional( + z.int().gte(0).lte(2147483647).register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(20).lte(50).register(z.globalRegistry, { + description: + 'The number of iterations the model goes through to refine the generated image. This parameter is optional.', + }), + ) + .default(30), + structure_image_url: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The URL of the structure reference image. Use "" to leave empty. Accepted formats are jpeg, jpg, png, webp.', + }), + ) + .default(''), +}) + +/** + * Output + */ +export const zSchemaCalligrapherOutput = z.object({ + images: z.array(zSchemaImage), +}) + +/** + * Input + */ +export const zSchemaCalligrapherInput = z.object({ + use_context: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to prepend context reference to the input', + }), + ) + .default(true), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'How many images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + auto_mask_generation: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to automatically generate mask from detected text', + }), + ) + .default(false), + reference_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'Optional base64 reference image for style', + }), + ), + source_image_url: z.string().register(z.globalRegistry, { + description: 'Base64-encoded source image with drawn mask layers', + }), + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt to inpaint or customize', + }), + mask_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Base64-encoded mask image (optional if using auto_mask_generation)', + }), + ), + source_text: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Source text to replace (if empty, masks all detected text)', + }), + ) + .default(''), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps (1-100)', + }), + ) + .default(50), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducibility', + }), + ), + cfg_scale: z + .optional( + z.number().gte(0).lte(5).register(z.globalRegistry, { + description: 'Guidance or strength scale for the model', + }), + ) + .default(1), +}) + +/** + * VideoFile + */ +export const zSchemaVideoFile = z.object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + duration: z.optional( + z.number().register(z.globalRegistry, { + description: 'The duration of the video', + }), + ), + height: z.optional( + z.int().register(z.globalRegistry, { + description: 'The height of the video', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + width: z.optional( + z.int().register(z.globalRegistry, { + description: 'The width of the video', + }), + ), + fps: z.optional( + z.number().register(z.globalRegistry, { + description: 'The FPS of the video', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + num_frames: z.optional( + z.int().register(z.globalRegistry, { + description: 'The number of frames in the video', + }), + ), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), +}) + +/** + * FILMImageOutput + */ +export const zSchemaFilmOutput = z.object({ + images: z + .optional( + z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated frames as individual images.', + }), + ) + .default([]), + video: z.optional(zSchemaVideoFile), +}) + +/** + * FILMImageInput + */ +export const zSchemaFilmInput = z.object({ + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: + "The write mode of the output video. Only applicable if output_type is 'video'.", + }), + ), + num_frames: z + .optional( + z.int().gte(1).lte(64).register(z.globalRegistry, { + description: + 'The number of frames to generate between the input images.', + }), + ) + .default(1), + include_start: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to include the start image in the output.', + }), + ) + .default(false), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: + "The quality of the output video. Only applicable if output_type is 'video'.", + }), + ), + include_end: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to include the end image in the output.', + }), + ) + .default(false), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + fps: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: + "Frames per second for the output video. Only applicable if output_type is 'video'.", + }), + ) + .default(8), + start_image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the first image to use as the starting point for interpolation.', + }), + end_image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the second image to use as the ending point for interpolation.', + }), + image_format: z.optional( + z.enum(['png', 'jpeg']).register(z.globalRegistry, { + description: + "The format of the output images. Only applicable if output_type is 'images'.", + }), + ), + output_type: z.optional( + z.enum(['images', 'video']).register(z.globalRegistry, { + description: + 'The type of output to generate; either individual images or a video.', + }), + ), +}) + +/** + * RIFEImageOutput + */ +export const zSchemaRifeOutput = z.object({ + images: z + .optional( + z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated frames as individual images.', + }), + ) + .default([]), + video: z.optional(zSchemaFile), +}) + +/** + * RIFEImageInput + */ +export const zSchemaRifeInput = z.object({ + output_format: z.optional( + z.enum(['png', 'jpeg']).register(z.globalRegistry, { + description: + "The format of the output images. Only applicable if output_type is 'images'.", + }), + ), + fps: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: + "Frames per second for the output video. Only applicable if output_type is 'video'.", + }), + ) + .default(8), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + include_end: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to include the end image in the output.', + }), + ) + .default(false), + include_start: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to include the start image in the output.', + }), + ) + .default(false), + num_frames: z + .optional( + z.int().gte(1).lte(64).register(z.globalRegistry, { + description: + 'The number of frames to generate between the input images.', + }), + ) + .default(1), + end_image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the second image to use as the ending point for interpolation.', + }), + output_type: z.optional( + z.enum(['images', 'video']).register(z.globalRegistry, { + description: + 'The type of output to generate; either individual images or a video.', + }), + ), + start_image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the first image to use as the starting point for interpolation.', + }), +}) + +/** + * Output + */ +export const zSchemaHidreamE11Output = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseInput + */ +export const zSchemaHidreamE11Input = z.object({ + prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your initial image when looking for a related image to show you.\n ', + }), + ) + .default(2), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of an input image to edit.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(50), + target_image_description: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The description of the target image after your edits have been made. Leave this blank to allow the model to use its own imagination.', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default('low resolution, blur'), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * RetouchOutput + */ +export const zSchemaImageEditingRetouchOutput = z.object({ + images: z.array(zSchemaImage), + seed: z.int(), +}) + +/** + * RetouchInput + * + * Input model for retouch endpoint. + */ +export const zSchemaImageEditingRetouchInput = z + .object({ + lora_scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'The scale factor for the LoRA model. Controls the strength of the LoRA effect.', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to retouch.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(3.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for sampling.', + }), + ) + .default(30), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + }) + .register(z.globalRegistry, { + description: 'Input model for retouch endpoint.', + }) + +/** + * ImageToPanoramaResponse + */ +export const zSchemaHunyuanWorldOutput = z.object({ + image: zSchemaImage, +}) + +/** + * ImageToPanoramaRequest + */ +export const zSchemaHunyuanWorldInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to use for the panorama generation.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to convert to a panorama.', + }), +}) + +/** + * KontextInpaintOutput + */ +export const zSchemaFluxKontextLoraInpaintOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseKontextInpaintInput + */ +export const zSchemaFluxKontextLoraInpaintInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt for the image to image task.', + }), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + reference_image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the reference image for inpainting.', + }), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(2.5), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to be inpainted.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'The strength of the initial image. Higher strength values are better for this model.', + }), + ) + .default(0.88), + num_inference_steps: z + .optional( + z.int().gte(10).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(30), + mask_url: z.string().register(z.globalRegistry, { + description: 'The URL of the mask for inpainting.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * KreaReduxOutput + */ +export const zSchemaFlux1KreaReduxOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseKreaFlux1ReduxInput + */ +export const zSchemaFlux1KreaReduxInput = z.object({ + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate an image from.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional(z.union([z.int(), z.unknown()])), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4.5), +}) + +/** + * KreaOutput + */ +export const zSchemaFlux1KreaImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseKreaFlux1ImageToInput + */ +export const zSchemaFlux1KreaImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate an image from.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'The strength of the initial image. Higher strength values are better for this model.', + }), + ) + .default(0.95), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(10).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(40), + seed: z.optional(z.union([z.int(), z.unknown()])), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4.5), +}) + +/** + * KreaReduxOutput + */ +export const zSchemaFluxKreaReduxOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseKreaReduxInput + */ +export const zSchemaFluxKreaReduxInput = z.object({ + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate an image from.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional(z.union([z.int(), z.unknown()])), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), +}) + +/** + * Output + */ +export const zSchemaFluxKreaImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseKreaImageToInput + */ +export const zSchemaFluxKreaImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate an image from.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'The strength of the initial image. Higher strength values are better for this model.', + }), + ) + .default(0.95), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional(z.union([z.int(), z.unknown()])), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4.5), + num_inference_steps: z + .optional( + z.int().gte(10).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(40), +}) + +/** + * Output + */ +export const zSchemaFluxKreaLoraImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * ImageToImageInput + */ +export const zSchemaFluxKreaLoraImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + 'The number of images to generate. This is always set to 1 for streaming output.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use for inpainting. or img2img', + }), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original.', + }), + ) + .default(0.85), + guidance_scale: z + .optional( + z.number().gte(0).lte(35).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * Output + */ +export const zSchemaFluxKreaLoraInpaintingOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * InpaintInput + */ +export const zSchemaFluxKreaLoraInpaintingInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + 'The number of images to generate. This is always set to 1 for streaming output.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use for inpainting. or img2img', + }), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original.', + }), + ) + .default(0.85), + guidance_scale: z + .optional( + z.number().gte(0).lte(35).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + mask_url: z.string().register(z.globalRegistry, { + description: '\n The mask to area to Inpaint in.\n ', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * CharacterRemixOutputV3 + */ +export const zSchemaIdeogramCharacterRemixOutput = z.object({ + images: z.array(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for the random number generator', + }), +}) + +/** + * CharacterRemixInputV3 + */ +export const zSchemaIdeogramCharacterRemixInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to remix the image with', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + style: z.optional( + z.enum(['AUTO', 'REALISTIC', 'FICTION']).register(z.globalRegistry, { + description: + 'The style type to generate with. Cannot be used with style_codes.', + }), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Determine if MagicPrompt should be used in generating the request or not.', + }), + ) + .default(true), + rendering_speed: z.optional( + z.enum(['TURBO', 'BALANCED', 'QUALITY']).register(z.globalRegistry, { + description: 'The rendering speed to use.', + }), + ), + reference_mask_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'A set of masks to apply to the character references. Currently only 1 mask is supported, rest will be ignored. (maximum total size 10MB across all character references). The masks should be in JPEG, PNG or WebP format', + }), + ), + reference_image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'A set of images to use as character references. Currently only 1 image is supported, rest will be ignored. (maximum total size 10MB across all character references). The images should be in JPEG, PNG or WebP format', + }), + image_urls: z.optional(z.union([z.array(z.string()), z.unknown()])), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Description of what to exclude from an image. Descriptions in the prompt take precedence to descriptions in the negative prompt.', + }), + ) + .default(''), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'Number of images to generate.', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: 'The image URL to remix', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + color_palette: z.optional(z.union([zSchemaColorPalette, z.unknown()])), + style_codes: z.optional(z.union([z.array(z.string()), z.unknown()])), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: 'Strength of the input image in the remix', + }), + ) + .default(0.8), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * CharacterOutputV3 + */ +export const zSchemaIdeogramCharacterOutput = z.object({ + images: z.array(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for the random number generator', + }), +}) + +/** + * BaseCharacterInputV3 + */ +export const zSchemaIdeogramCharacterInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to fill the masked part of the image.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + style: z.optional( + z.enum(['AUTO', 'REALISTIC', 'FICTION']).register(z.globalRegistry, { + description: + 'The style type to generate with. Cannot be used with style_codes.', + }), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Determine if MagicPrompt should be used in generating the request or not.', + }), + ) + .default(true), + rendering_speed: z.optional( + z.enum(['TURBO', 'BALANCED', 'QUALITY']).register(z.globalRegistry, { + description: 'The rendering speed to use.', + }), + ), + reference_mask_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'A set of masks to apply to the character references. Currently only 1 mask is supported, rest will be ignored. (maximum total size 10MB across all character references). The masks should be in JPEG, PNG or WebP format', + }), + ), + reference_image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'A set of images to use as character references. Currently only 1 image is supported, rest will be ignored. (maximum total size 10MB across all character references). The images should be in JPEG, PNG or WebP format', + }), + image_urls: z.optional(z.union([z.array(z.string()), z.unknown()])), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Description of what to exclude from an image. Descriptions in the prompt take precedence to descriptions in the negative prompt.', + }), + ) + .default(''), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'Number of images to generate.', + }), + ) + .default(1), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + color_palette: z.optional(z.union([zSchemaColorPalette, z.unknown()])), + style_codes: z.optional(z.union([z.array(z.string()), z.unknown()])), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * CharacterEditOutputV3 + */ +export const zSchemaIdeogramCharacterEditOutput = z.object({ + images: z.array(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for the random number generator', + }), +}) + +/** + * CharacterEditInputV3 + */ +export const zSchemaIdeogramCharacterEditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to fill the masked part of the image.', + }), + style: z.optional( + z.enum(['AUTO', 'REALISTIC', 'FICTION']).register(z.globalRegistry, { + description: + 'The style type to generate with. Cannot be used with style_codes.', + }), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Determine if MagicPrompt should be used in generating the request or not.', + }), + ) + .default(true), + rendering_speed: z.optional( + z.enum(['TURBO', 'BALANCED', 'QUALITY']).register(z.globalRegistry, { + description: 'The rendering speed to use.', + }), + ), + reference_mask_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'A set of masks to apply to the character references. Currently only 1 mask is supported, rest will be ignored. (maximum total size 10MB across all character references). The masks should be in JPEG, PNG or WebP format', + }), + ), + reference_image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'A set of images to use as character references. Currently only 1 image is supported, rest will be ignored. (maximum total size 10MB across all character references). The images should be in JPEG, PNG or WebP format', + }), + image_urls: z.optional(z.union([z.array(z.string()), z.unknown()])), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'Number of images to generate.', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: + 'The image URL to generate an image from. MUST have the exact same dimensions (width and height) as the mask image.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + color_palette: z.optional(z.union([zSchemaColorPalette, z.unknown()])), + style_codes: z.optional(z.union([z.array(z.string()), z.unknown()])), + seed: z.optional(z.union([z.int(), z.unknown()])), + mask_url: z.string().register(z.globalRegistry, { + description: + 'The mask URL to inpaint the image. MUST have the exact same dimensions (width and height) as the input image.', + }), +}) + +/** + * QwenImageOutput + */ +export const zSchemaQwenImageEditOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseQwenEditImageInput + */ +export const zSchemaQwenImageEditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the image with', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality.", + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to edit.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * NextStepResponse + */ +export const zSchemaNextstep1Output = z.object({ + image: z + .object({ + file_size: z.optional(z.union([z.int(), z.unknown()])), + height: z.optional(z.union([z.int(), z.unknown()])), + file_name: z.optional(z.union([z.string(), z.unknown()])), + content_type: z.optional(z.union([z.string(), z.unknown()])), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + width: z.optional(z.union([z.int(), z.unknown()])), + }) + .register(z.globalRegistry, { + description: 'Generated image', + }), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for random number generation', + }), +}) + +/** + * NextStepEditRequest + */ +export const zSchemaNextstep1Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to edit the image.', + }), + negative_prompt: z.string().register(z.globalRegistry, { + description: + "The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n ", + }), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to edit.', + }), +}) + +/** + * NanoBananaImageToImageOutput + */ +export const zSchemaNanoBananaEditOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The edited images.', + }), + description: z.string().register(z.globalRegistry, { + description: 'The description of the generated images.', + }), +}) + +/** + * NanoBananaImageToImageInput + */ +export const zSchemaNanoBananaEditInput = z.object({ + prompt: z.string().min(3).max(50000).register(z.globalRegistry, { + description: 'The prompt for image editing.', + }), + aspect_ratio: z.optional( + z + .enum([ + 'auto', + '21:9', + '16:9', + '3:2', + '4:3', + '5:4', + '1:1', + '4:5', + '3:4', + '2:3', + '9:16', + ]) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images to use for image-to-image generation or image editing.', + }), + limit_generations: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate.', + }), + ) + .default(false), +}) + +/** + * OutputModel + */ +export const zSchemaReimagine32Output = z.object({ + image: zSchemaImage, +}) + +/** + * InputModel + */ +export const zSchemaReimagine32Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Prompt for image generation.', + }), + depth_preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Depth image preprocess.', + }), + ) + .default(true), + canny_preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Canny image preprocess.', + }), + ) + .default(true), + depth_image_url: z.optional(z.union([z.string(), z.unknown()])), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Guidance scale for text.', + }), + ) + .default(5), + canny_image_url: z.optional(z.union([z.string(), z.unknown()])), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for image generation.', + }), + ) + .default( + 'Logo,Watermark,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers', + ), + depth_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Depth control strength (0.0 to 1.0).', + }), + ) + .default(0.5), + aspect_ratio: z.optional( + z + .enum(['1:1', '2:3', '3:2', '3:4', '4:3', '4:5', '5:4', '9:16', '16:9']) + .register(z.globalRegistry, { + description: + 'Aspect ratio. Options: 1:1, 2:3, 3:2, 3:4, 4:3, 4:5, 5:4, 9:16, 16:9', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, returns the image directly in the response (increases latency).', + }), + ) + .default(false), + prompt_enhancer: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to improve the prompt.', + }), + ) + .default(true), + truncate_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to truncate the prompt.', + }), + ) + .default(true), + seed: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducibility.', + }), + ) + .default(5555), + canny_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Canny edge control strength (0.0 to 1.0).', + }), + ) + .default(0.5), + num_inference_steps: z + .optional( + z.int().gte(20).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps.', + }), + ) + .default(30), +}) + +/** + * QwenImageI2IOutput + */ +export const zSchemaQwenImageImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * QwenImageI2IInput + */ +export const zSchemaQwenImageImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the image with', + }), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. Options: 'none', 'regular', 'high'. Higher acceleration increases speed. 'regular' balances speed and quality. 'high' is recommended for images without text.", + }), + ), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use up to 3 LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(2.5), + use_turbo: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Enable turbo mode for faster generation with high quality. When enabled, uses optimized settings (10 steps, CFG=1.2).', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The reference image to guide the generation.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Denoising strength. 1.0 = fully remake; 0.0 = preserve original.', + }), + ) + .default(0.6), + num_inference_steps: z + .optional( + z.int().gte(2).lte(250).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * NanoBananaImageToImageOutput + */ +export const zSchemaGemini25FlashImageEditOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The edited images.', + }), + description: z.string().register(z.globalRegistry, { + description: 'The description of the generated images.', + }), +}) + +/** + * NanoBananaImageToImageInput + */ +export const zSchemaGemini25FlashImageEditInput = z.object({ + prompt: z.string().min(3).max(50000).register(z.globalRegistry, { + description: 'The prompt for image editing.', + }), + aspect_ratio: z.optional( + z + .enum([ + 'auto', + '21:9', + '16:9', + '3:2', + '4:3', + '5:4', + '1:1', + '4:5', + '3:4', + '2:3', + '9:16', + ]) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images to use for image-to-image generation or image editing.', + }), + limit_generations: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate.', + }), + ) + .default(false), +}) + +/** + * USOOutputImage + */ +export const zSchemaUsoOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: + 'The generated images with applied style and/or subject customization', + }), + timings: z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'Performance timings for different stages', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'NSFW detection results for each generated image', + }), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generation', + }), +}) + +/** + * USOInputImage + */ +export const zSchemaUsoInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Text prompt for generation. Can be empty for pure style transfer.', + }), + ) + .default(''), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate in parallel.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: + 'Output image format. PNG preserves transparency, JPEG is smaller.', + }), + ), + keep_size: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Preserve the layout and dimensions of the input content image. Useful for style transfer.', + }), + ) + .default(false), + input_image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'List of image URLs in order: [content_image, style_image, extra_style_image].', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, wait for generation and upload before returning. Increases latency but provides immediate access to images.', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'How closely to follow the prompt. Higher values stick closer to the prompt.', + }), + ) + .default(4), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: + 'Number of denoising steps. More steps can improve quality but increase generation time.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducible generation. Use same seed for consistent results.', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "What you don't want in the image. Use it to exclude unwanted elements, styles, or artifacts.", + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable NSFW content detection and filtering.', + }), + ) + .default(true), +}) + +/** + * WanI2IResponse + */ +export const zSchemaWanV22A14bImageToImageOutput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The text prompt used for image generation.', + }), + ) + .default(''), + image: zSchemaFile, + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), +}) + +/** + * WanI2IRequest + */ +export const zSchemaWanV22A14bImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide image generation.', + }), + shift: z.optional(z.number().gte(1).lte(10)).default(2), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, input data will be checked for safety before processing.', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Classifier-free guidance scale.', + }), + ) + .default(3.5), + image_format: z.optional( + z.enum(['png', 'jpeg']).register(z.globalRegistry, { + description: 'The format of the output image.', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default(''), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: + "Aspect ratio of the generated image. If 'auto', the aspect ratio will be determined automatically based on the input image.", + }), + ), + enable_output_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, output video will be checked for safety after generation.', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the input image.', + }), + strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Denoising strength. 1.0 = fully remake; 0.0 = preserve original.', + }), + ) + .default(0.5), + guidance_scale_2: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model.', + }), + ) + .default(4), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(40).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(27), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), +}) + +/** + * SeedDream4EditOutput + */ +export const zSchemaBytedanceSeedreamV4EditOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Generated images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generation', + }), +}) + +/** + * SeedDream4EditInput + */ +export const zSchemaBytedanceSeedreamV4EditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt used to edit the image', + }), + num_images: z + .optional( + z.int().gte(1).lte(6).register(z.globalRegistry, { + description: + 'Number of separate model generations to be run with the prompt.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + 'auto', + 'auto_2K', + 'auto_4K', + ]), + ]), + ), + enhance_prompt_mode: z.optional( + z.enum(['standard', 'fast']).register(z.globalRegistry, { + description: + 'The mode to use for enhancing prompt enhancement. Standard mode provides higher quality results but takes longer to generate. Fast mode provides average quality results but takes less time to generate.', + }), + ), + max_images: z + .optional( + z.int().gte(1).lte(6).register(z.globalRegistry, { + description: + 'If set to a number greater than one, enables multi-image generation. The model will potentially return up to `max_images` images every generation, and in total, `num_images` generations will be carried out. In total, the number of images generated will be between `num_images` and `max_images*num_images`. The total number of images (image inputs + image outputs) must not exceed 15', + }), + ) + .default(1), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed to control the stochasticity of image generation.', + }), + ), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'List of URLs of input images for editing. Presently, up to 10 image inputs are allowed. If over 10 images are sent, only the last 10 will be used.', + }), +}) + +/** + * ReferenceToImageOutput + */ +export const zSchemaViduReferenceToImageOutput = z.object({ + image: zSchemaImage, +}) + +/** + * ReferenceToImageRequest + */ +export const zSchemaViduReferenceToImageInput = z.object({ + prompt: z.string().max(1500).register(z.globalRegistry, { + description: 'Text prompt for video generation, max 1500 characters', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the output video', + }), + ), + reference_image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'URLs of the reference images to use for consistent subject appearance', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), +}) + +/** + * QwenImageOutput + */ +export const zSchemaQwenImageEditLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseQwenEditImageLoRAInput + */ +export const zSchemaQwenImageEditLoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the image with', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality.", + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to edit.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use up to 3 LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaFlux1SrpoImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseSRPOFlux1ImageToInput + */ +export const zSchemaFlux1SrpoImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate an image from.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'The strength of the initial image. Higher strength values are better for this model.', + }), + ) + .default(0.95), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(10).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(40), + seed: z.optional(z.union([z.int(), z.unknown()])), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4.5), +}) + +/** + * Output + */ +export const zSchemaFluxSrpoImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseSRPOImageToInput + */ +export const zSchemaFluxSrpoImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate an image from.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'The strength of the initial image. Higher strength values are better for this model.', + }), + ) + .default(0.95), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional(z.union([z.int(), z.unknown()])), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4.5), + num_inference_steps: z + .optional( + z.int().gte(10).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(40), +}) + +/** + * QwenImageInpaintOutput + */ +export const zSchemaQwenImageEditInpaintOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseQwenEditInpaintImageInput + */ +export const zSchemaQwenImageEditInpaintInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the image with', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality.", + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to edit.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: 'Strength of noising process for inpainting', + }), + ) + .default(0.93), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + mask_url: z.string().register(z.globalRegistry, { + description: 'The URL of the mask for inpainting', + }), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(30), +}) + +/** + * MakeupApplicationOutput + */ +export const zSchemaImageAppsV2MakeupApplicationOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Portrait with applied makeup', + }), + inference_time_ms: z.int().register(z.globalRegistry, { + description: 'Total inference time in milliseconds', + }), +}) + +/** + * AspectRatio + * + * Aspect ratio model that calculates 4K resolution dimensions + */ +export const zSchemaAspectRatio = z + .object({ + ratio: z.optional( + z.enum(['1:1', '16:9', '9:16', '4:3', '3:4']).register(z.globalRegistry, { + description: 'Aspect ratio for 4K resolution output', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Aspect ratio model that calculates 4K resolution dimensions', + }) + +/** + * MakeupApplicationInput + */ +export const zSchemaImageAppsV2MakeupApplicationInput = z.object({ + aspect_ratio: z.optional(zSchemaAspectRatio), + intensity: z.optional(z.enum(['light', 'medium', 'heavy', 'dramatic'])), + makeup_style: z.optional( + z.enum([ + 'natural', + 'glamorous', + 'smoky_eyes', + 'bold_lips', + 'no_makeup', + 'remove_makeup', + 'dramatic', + 'bridal', + 'professional', + 'korean_style', + 'artistic', + ]), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Portrait image URL for makeup application', + }), +}) + +/** + * AgeModifyOutput + */ +export const zSchemaImageAppsV2AgeModifyOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Portrait with modified age', + }), + inference_time_ms: z.int().register(z.globalRegistry, { + description: 'Total inference time in milliseconds', + }), +}) + +/** + * AgeModifyInput + */ +export const zSchemaImageAppsV2AgeModifyInput = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: 'Portrait image URL for age modification', + }), + aspect_ratio: z.optional(zSchemaAspectRatio), + preserve_identity: z.optional(z.boolean()).default(true), + target_age: z.optional(z.int().gte(6).lte(100)).default(30), +}) + +/** + * CityTeleportOutput + */ +export const zSchemaImageAppsV2CityTeleportOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Person teleported to city location', + }), + inference_time_ms: z.int().register(z.globalRegistry, { + description: 'Total inference time in milliseconds', + }), +}) + +/** + * CityTeleportInput + */ +export const zSchemaImageAppsV2CityTeleportInput = z.object({ + city_image_url: z.optional(z.union([z.string(), z.unknown()])), + aspect_ratio: z.optional(zSchemaAspectRatio), + city_name: z.string().register(z.globalRegistry, { + description: 'City name (used when city_image_url is not provided)', + }), + photo_shot: z.optional( + z + .enum([ + 'extreme_close_up', + 'close_up', + 'medium_close_up', + 'medium_shot', + 'medium_long_shot', + 'long_shot', + 'extreme_long_shot', + 'full_body', + ]) + .register(z.globalRegistry, { + description: 'Type of photo shot', + }), + ), + camera_angle: z.optional( + z + .enum([ + 'eye_level', + 'low_angle', + 'high_angle', + 'dutch_angle', + 'birds_eye_view', + 'worms_eye_view', + 'overhead', + 'side_angle', + ]) + .register(z.globalRegistry, { + description: 'Camera angle for the shot', + }), + ), + person_image_url: z.string().register(z.globalRegistry, { + description: 'Person photo URL', + }), +}) + +/** + * ExpressionChangeOutput + */ +export const zSchemaImageAppsV2ExpressionChangeOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Portrait with changed expression', + }), + inference_time_ms: z.int().register(z.globalRegistry, { + description: 'Total inference time in milliseconds', + }), +}) + +/** + * ExpressionChangeInput + */ +export const zSchemaImageAppsV2ExpressionChangeInput = z.object({ + aspect_ratio: z.optional(zSchemaAspectRatio), + target_expression: z.optional( + z.enum([ + 'smile', + 'surprise', + 'glare', + 'panic', + 'shyness', + 'laugh', + 'cry', + 'angry', + 'sad', + 'happy', + 'excited', + 'shocked', + 'confused', + 'focused', + 'dreamy', + 'serious', + 'playful', + 'mysterious', + 'confident', + 'thoughtful', + ]), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Portrait image URL for expression change', + }), +}) + +/** + * HairChangeOutput + */ +export const zSchemaImageAppsV2HairChangeOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Portrait with changed hair', + }), + inference_time_ms: z.int().register(z.globalRegistry, { + description: 'Total inference time in milliseconds', + }), +}) + +/** + * HairChangeInput + */ +export const zSchemaImageAppsV2HairChangeInput = z.object({ + target_hairstyle: z.optional( + z.enum([ + 'short_hair', + 'medium_long_hair', + 'long_hair', + 'curly_hair', + 'wavy_hair', + 'high_ponytail', + 'bun', + 'bob_cut', + 'pixie_cut', + 'braids', + 'straight_hair', + 'afro', + 'dreadlocks', + 'buzz_cut', + 'mohawk', + 'bangs', + 'side_part', + 'middle_part', + ]), + ), + aspect_ratio: z.optional(zSchemaAspectRatio), + hair_color: z.optional( + z.enum([ + 'black', + 'dark_brown', + 'light_brown', + 'blonde', + 'platinum_blonde', + 'red', + 'auburn', + 'gray', + 'silver', + 'blue', + 'green', + 'purple', + 'pink', + 'rainbow', + 'natural', + 'highlights', + 'ombre', + 'balayage', + ]), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Portrait image URL for hair change', + }), +}) + +/** + * HeadshotOutput + */ +export const zSchemaImageAppsV2HeadshotPhotoOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Professional headshot image', + }), + inference_time_ms: z.int().register(z.globalRegistry, { + description: 'Total inference time in milliseconds', + }), +}) + +/** + * HeadshotInput + */ +export const zSchemaImageAppsV2HeadshotPhotoInput = z.object({ + aspect_ratio: z.optional(zSchemaAspectRatio), + background_style: z.optional( + z.enum(['professional', 'corporate', 'clean', 'gradient']), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Portrait image URL to convert to professional headshot', + }), +}) + +/** + * ObjectRemovalOutput + */ +export const zSchemaImageAppsV2ObjectRemovalOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Image with object removed', + }), + inference_time_ms: z.int().register(z.globalRegistry, { + description: 'Total inference time in milliseconds', + }), +}) + +/** + * ObjectRemovalInput + */ +export const zSchemaImageAppsV2ObjectRemovalInput = z.object({ + aspect_ratio: z.optional(zSchemaAspectRatio), + object_to_remove: z.string().register(z.globalRegistry, { + description: 'Object to remove', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'Image URL containing object to remove', + }), +}) + +/** + * PerspectiveOutput + */ +export const zSchemaImageAppsV2PerspectiveOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Image with changed perspective', + }), + inference_time_ms: z.int().register(z.globalRegistry, { + description: 'Total inference time in milliseconds', + }), +}) + +/** + * PerspectiveInput + */ +export const zSchemaImageAppsV2PerspectiveInput = z.object({ + aspect_ratio: z.optional(zSchemaAspectRatio), + target_perspective: z.optional( + z.enum([ + 'front', + 'left_side', + 'right_side', + 'back', + 'top_down', + 'bottom_up', + 'birds_eye', + 'three_quarter_left', + 'three_quarter_right', + ]), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Image URL for perspective change', + }), +}) + +/** + * PhotographyEffectsOutput + */ +export const zSchemaImageAppsV2PhotographyEffectsOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Image with photography effects', + }), + inference_time_ms: z.int().register(z.globalRegistry, { + description: 'Total inference time in milliseconds', + }), +}) + +/** + * PhotographyEffectsInput + */ +export const zSchemaImageAppsV2PhotographyEffectsInput = z.object({ + effect_type: z.optional( + z.enum([ + 'film', + 'vintage_film', + 'portrait_photography', + 'fashion_photography', + 'street_photography', + 'sepia_tone', + 'film_grain', + 'light_leaks', + 'vignette_effect', + 'instant_camera', + 'golden_hour', + 'dramatic_lighting', + 'soft_focus', + 'bokeh_effect', + 'high_contrast', + 'double_exposure', + ]), + ), + aspect_ratio: z.optional(zSchemaAspectRatio), + image_url: z.string().register(z.globalRegistry, { + description: 'Image URL for photography effects', + }), +}) + +/** + * PortraitOutput + */ +export const zSchemaImageAppsV2PortraitEnhanceOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Enhanced portrait', + }), + inference_time_ms: z.int().register(z.globalRegistry, { + description: 'Total inference time in milliseconds', + }), +}) + +/** + * PortraitInput + */ +export const zSchemaImageAppsV2PortraitEnhanceInput = z.object({ + aspect_ratio: z.optional(zSchemaAspectRatio), + image_url: z.string().register(z.globalRegistry, { + description: 'Portrait image URL to enhance', + }), +}) + +/** + * PhotoRestorationOutput + */ +export const zSchemaImageAppsV2PhotoRestorationOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Restored photo', + }), + inference_time_ms: z.int().register(z.globalRegistry, { + description: 'Total inference time in milliseconds', + }), +}) + +/** + * PhotoRestorationInput + */ +export const zSchemaImageAppsV2PhotoRestorationInput = z.object({ + enhance_resolution: z.optional(z.boolean()).default(true), + aspect_ratio: z.optional(zSchemaAspectRatio), + remove_scratches: z.optional(z.boolean()).default(true), + fix_colors: z.optional(z.boolean()).default(true), + image_url: z.string().register(z.globalRegistry, { + description: 'Old or damaged photo URL to restore', + }), +}) + +/** + * StyleTransferOutput + */ +export const zSchemaImageAppsV2StyleTransferOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Image with transferred style', + }), + inference_time_ms: z.int().register(z.globalRegistry, { + description: 'Total inference time in milliseconds', + }), +}) + +/** + * StyleTransferInput + */ +export const zSchemaImageAppsV2StyleTransferInput = z.object({ + target_style: z.optional( + z.enum([ + 'anime_character', + 'cartoon_3d', + 'hand_drawn_animation', + 'cyberpunk_future', + 'anime_game_style', + 'comic_book_animation', + 'animated_series', + 'cartoon_animation', + 'lofi_aesthetic', + 'cottagecore', + 'dark_academia', + 'y2k', + 'vaporwave', + 'liminal_space', + 'weirdcore', + 'dreamcore', + 'synthwave', + 'outrun', + 'photorealistic', + 'hyperrealistic', + 'digital_art', + 'concept_art', + 'impressionist', + 'anime', + 'pixel_art', + 'claymation', + ]), + ), + aspect_ratio: z.optional(zSchemaAspectRatio), + style_reference_image_url: z.optional(z.union([z.string(), z.unknown()])), + image_url: z.string().register(z.globalRegistry, { + description: 'Image URL for style transfer', + }), +}) + +/** + * RelightingOutput + */ +export const zSchemaImageAppsV2RelightingOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Image with new lighting', + }), + inference_time_ms: z.int().register(z.globalRegistry, { + description: 'Total inference time in milliseconds', + }), +}) + +/** + * RelightingInput + */ +export const zSchemaImageAppsV2RelightingInput = z.object({ + aspect_ratio: z.optional(zSchemaAspectRatio), + lighting_style: z.optional( + z.enum([ + 'natural', + 'studio', + 'golden_hour', + 'blue_hour', + 'dramatic', + 'soft', + 'hard', + 'backlight', + 'side_light', + 'front_light', + 'rim_light', + 'sunset', + 'sunrise', + 'neon', + 'candlelight', + 'moonlight', + 'spotlight', + 'ambient', + ]), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Image URL for relighting', + }), +}) + +/** + * TextureTransformOutput + */ +export const zSchemaImageAppsV2TextureTransformOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Image with transformed texture', + }), + inference_time_ms: z.int().register(z.globalRegistry, { + description: 'Total inference time in milliseconds', + }), +}) + +/** + * TextureTransformInput + */ +export const zSchemaImageAppsV2TextureTransformInput = z.object({ + target_texture: z.optional( + z.enum([ + 'cotton', + 'denim', + 'wool', + 'felt', + 'wood', + 'leather', + 'velvet', + 'stone', + 'marble', + 'ceramic', + 'concrete', + 'brick', + 'clay', + 'foam', + 'glass', + 'metal', + 'silk', + 'fabric', + 'crystal', + 'rubber', + 'plastic', + 'lace', + ]), + ), + aspect_ratio: z.optional(zSchemaAspectRatio), + image_url: z.string().register(z.globalRegistry, { + description: 'Image URL for texture transformation', + }), +}) + +/** + * VirtualTryOnOutput + */ +export const zSchemaImageAppsV2VirtualTryOnOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Person wearing the virtual clothing', + }), + inference_time_ms: z.int().register(z.globalRegistry, { + description: 'Total inference time in milliseconds', + }), +}) + +/** + * VirtualTryOnInput + */ +export const zSchemaImageAppsV2VirtualTryOnInput = z.object({ + preserve_pose: z.optional(z.boolean()).default(true), + aspect_ratio: z.optional(zSchemaAspectRatio), + clothing_image_url: z.string().register(z.globalRegistry, { + description: 'Clothing photo URL', + }), + person_image_url: z.string().register(z.globalRegistry, { + description: 'Person photo URL', + }), +}) + +/** + * ProductPhotographyOutput + */ +export const zSchemaImageAppsV2ProductPhotographyOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Professional studio product photography', + }), + inference_time_ms: z.int().register(z.globalRegistry, { + description: 'Total inference time in milliseconds', + }), +}) + +/** + * ProductPhotographyInput + */ +export const zSchemaImageAppsV2ProductPhotographyInput = z.object({ + aspect_ratio: z.optional(zSchemaAspectRatio), + product_image_url: z.string().register(z.globalRegistry, { + description: + 'Image URL of the product to create professional studio photography', + }), +}) + +/** + * ProductHoldingOutput + */ +export const zSchemaImageAppsV2ProductHoldingOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Person holding the product naturally', + }), + inference_time_ms: z.int().register(z.globalRegistry, { + description: 'Total inference time in milliseconds', + }), +}) + +/** + * ProductHoldingInput + */ +export const zSchemaImageAppsV2ProductHoldingInput = z.object({ + aspect_ratio: z.optional(zSchemaAspectRatio), + product_image_url: z.string().register(z.globalRegistry, { + description: 'Image URL of the product to be held by the person', + }), + person_image_url: z.string().register(z.globalRegistry, { + description: 'Image URL of the person who will hold the product', + }), +}) + +/** + * SeedVRImageOutput + */ +export const zSchemaSeedvrUpscaleImageOutput = z.object({ + image: zSchemaImageFile, + seed: z.int().register(z.globalRegistry, { + description: 'The random seed used for the generation process.', + }), +}) + +/** + * SeedVRImageInput + */ +export const zSchemaSeedvrUpscaleImageInput = z.object({ + upscale_mode: z.optional( + z.enum(['target', 'factor']).register(z.globalRegistry, { + description: + "The mode to use for the upscale. If 'target', the upscale factor will be calculated based on the target resolution. If 'factor', the upscale factor will be used directly.", + }), + ), + noise_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The noise scale to use for the generation process.', + }), + ) + .default(0.1), + output_format: z.optional( + z.enum(['png', 'jpg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image.', + }), + ), + target_resolution: z.optional( + z.enum(['720p', '1080p', '1440p', '2160p']).register(z.globalRegistry, { + description: + 'The target resolution to upscale to when `upscale_mode` is `target`.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The input image to be processed', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + upscale_factor: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Upscaling factor to be used. Will multiply the dimensions with this factor when `upscale_mode` is `factor`.', + }), + ) + .default(2), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * QwenImageOutput + */ +export const zSchemaQwenImageEditPlusOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * BaseQwenEditImagePlusInput + */ +export const zSchemaQwenImageEditPlusInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the image with', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality.", + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'The URLs of the images to edit.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + num_inference_steps: z + .optional( + z.int().gte(2).lte(100).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(50), +}) + +/** + * ImageToImageOutput + * + * Output for image editing + */ +export const zSchemaWan25PreviewImageToImageOutput = z + .object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The edited images', + }), + seeds: z.array(z.int()).register(z.globalRegistry, { + description: 'The seeds used for each generated image', + }), + actual_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The original prompt (prompt expansion is not available for image editing)', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Output for image editing', + }) + +/** + * ImageToImageInput + * + * Input for image editing + */ +export const zSchemaWan25PreviewImageToImageInput = z + .object({ + prompt: z.string().min(1).register(z.globalRegistry, { + description: + 'The text prompt describing how to edit the image. Max 2000 characters.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate. Values from 1 to 4.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'URLs of images to edit. For single-image editing, provide 1 URL. For multi-reference generation, provide up to 2 URLs. If more than 2 URLs are provided, only the first 2 will be used.', + }), + negative_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Negative prompt to describe content to avoid. Max 500 characters.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Input for image editing', + }) + +/** + * QwenImageOutput + */ +export const zSchemaQwenImageEditImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseQwenEditImg2ImgInput + */ +export const zSchemaQwenImageEditImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the image with', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality.", + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to edit.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'Strength of the image-to-image transformation. Lower values preserve more of the original image.', + }), + ) + .default(0.94), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * LucidFluxResponse + */ +export const zSchemaLucidfluxOutput = z.object({ + image: zSchemaImage, + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for random number generation', + }), +}) + +/** + * LucidFluxRequest + */ +export const zSchemaLucidfluxInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to edit the image.', + }), + guidance: z + .optional( + z.number().gte(1).lte(30).register(z.globalRegistry, { + description: 'The guidance to use for the diffusion process.', + }), + ) + .default(4), + target_height: z + .optional( + z.int().gte(512).lte(1024).register(z.globalRegistry, { + description: 'The height of the output image.', + }), + ) + .default(1024), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to edit.', + }), + target_width: z + .optional( + z.int().gte(512).lte(1024).register(z.globalRegistry, { + description: 'The width of the output image.', + }), + ) + .default(1024), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(50), + seed: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Seed used for random number generation', + }), + ) + .default(42), +}) + +/** + * QwenImageOutput + */ +export const zSchemaQwenImageEditPlusLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * BaseQwenEditImagePlusLoRAInput + */ +export const zSchemaQwenImageEditPlusLoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the image with', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality.", + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use up to 3 LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'The URLs of the images to edit.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * DreamOmni2Response + */ +export const zSchemaDreamomni2EditOutput = z.object({ + image: zSchemaImage, +}) + +/** + * DreamOmni2Request + */ +export const zSchemaDreamomni2EditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to edit the image.', + }), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'List of URLs of input images for editing.', + }), +}) + +/** + * Image2PixelOutput + */ +export const zSchemaImage2PixelOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: + 'The processed pixel-art image (PNG) and the scaled image (PNG).', + }), + num_colors: z.int().register(z.globalRegistry, { + description: 'The number of colors in the processed media.', + }), + palette: z.array(z.string()).register(z.globalRegistry, { + description: 'The palette of the processed media.', + }), + pixel_scale: z.int().register(z.globalRegistry, { + description: 'The detected pixel scale of the input.', + }), +}) + +/** + * Image2PixelInput + */ +export const zSchemaImage2PixelInput = z.object({ + cleanup_morph: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Apply morphological operations to remove noise.', + }), + ) + .default(false), + auto_color_detect: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable automatic detection of optimal number of colors.', + }), + ) + .default(false), + alpha_threshold: z + .optional( + z.int().gte(0).lte(255).register(z.globalRegistry, { + description: 'Alpha binarization threshold (0-255).', + }), + ) + .default(128), + snap_grid: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Align output to the pixel grid.', + }), + ) + .default(true), + fixed_palette: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + "Optional fixed color palette as hex strings (e.g., ['#000000', '#ffffff']).", + }), + ), + scale: z.optional( + z.int().gte(1).lte(64).register(z.globalRegistry, { + description: 'Force a specific pixel scale. If None, auto-detect.', + }), + ), + cleanup_jaggy: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Remove isolated diagonal pixels (jaggy edge cleanup).', + }), + ) + .default(false), + trim_borders: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Trim borders of the image.', + }), + ) + .default(false), + background_tolerance: z + .optional( + z.int().gte(0).lte(255).register(z.globalRegistry, { + description: 'Background tolerance (0-255).', + }), + ) + .default(0), + detect_method: z.optional( + z.enum(['auto', 'runs', 'edge']).register(z.globalRegistry, { + description: 'Scale detection method to use.', + }), + ), + transparent_background: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Remove background of the image. This will check for contiguous color regions from the edges after correction and make them transparent.', + }), + ) + .default(false), + downscale_method: z.optional( + z + .enum(['dominant', 'median', 'mode', 'mean', 'content-adaptive']) + .register(z.globalRegistry, { + description: 'Downscaling method to produce the pixel-art output.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'The image URL to process into improved pixel art', + }), + background_mode: z.optional( + z.enum(['edges', 'corners', 'midpoints']).register(z.globalRegistry, { + description: + 'Controls where to flood-fill from when removing the background.', + }), + ), + max_colors: z + .optional( + z.int().gte(1).lte(256).register(z.globalRegistry, { + description: + 'Maximum number of colors in the output palette. Set None to disable limit.', + }), + ) + .default(32), + dominant_color_threshold: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Dominant color threshold (0.0-1.0).', + }), + ) + .default(0.05), +}) + +/** + * ReveEditOutput + * + * Output for Reve image editing + */ +export const zSchemaReveEditOutput = z + .object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The edited images', + }), + }) + .register(z.globalRegistry, { + description: 'Output for Reve image editing', + }) + +/** + * ReveEditInput + * + * Input for Reve image editing + */ +export const zSchemaReveEditInput = z + .object({ + prompt: z.string().min(1).max(2560).register(z.globalRegistry, { + description: 'The text description of how to edit the provided image.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'Output format for the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the reference image to edit. Must be publicly accessible or base64 data URI. Supports PNG, JPEG, WebP, AVIF, and HEIF formats.', + }), + }) + .register(z.globalRegistry, { + description: 'Input for Reve image editing', + }) + +/** + * ReveRemixOutput + * + * Output for Reve image remixing + */ +export const zSchemaReveRemixOutput = z + .object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The remixed images', + }), + }) + .register(z.globalRegistry, { + description: 'Output for Reve image remixing', + }) + +/** + * ReveRemixInput + * + * Input for Reve image remixing + */ +export const zSchemaReveRemixInput = z + .object({ + prompt: z.string().min(1).max(2560).register(z.globalRegistry, { + description: + 'The text description of the desired image. May include XML img tags like 0 to refer to specific images by their index in the image_urls list.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + aspect_ratio: z.optional( + z + .enum(['16:9', '9:16', '3:2', '2:3', '4:3', '3:4', '1:1']) + .register(z.globalRegistry, { + description: + 'The desired aspect ratio of the generated image. If not provided, will be smartly chosen by the model.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'List of URLs of reference images. Must provide between 1 and 6 images (inclusive). Each image must be less than 10 MB. Supports PNG, JPEG, WebP, AVIF, and HEIF formats.', + }), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'Output format for the generated image.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Input for Reve image remixing', + }) + +/** + * EditImageResponseMini + */ +export const zSchemaGptImage1MiniEditOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images.', + }), +}) + +/** + * EditImageRequestMini + */ +export const zSchemaGptImage1MiniEditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt for image generation', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z + .enum(['auto', '1024x1024', '1536x1024', '1024x1536']) + .register(z.globalRegistry, { + description: 'Aspect ratio for the generated image', + }), + ), + background: z.optional( + z.enum(['auto', 'transparent', 'opaque']).register(z.globalRegistry, { + description: 'Background for the generated image', + }), + ), + quality: z.optional( + z.enum(['auto', 'low', 'medium', 'high']).register(z.globalRegistry, { + description: 'Quality for the generated image', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'Output format for the images', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images to use as a reference for the generation.', + }), +}) + +/** + * ChronoEditOutput + * + * Unified output model for all ChronoEdit operations + */ +export const zSchemaChronoEditOutput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the inference.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The edited image.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed for the inference.', + }), + }) + .register(z.globalRegistry, { + description: 'Unified output model for all ChronoEdit operations', + }) + +/** + * ChronoEditInput + * + * Input model for ChronoEdit standard editing operations + */ +export const zSchemaChronoEditInput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to edit the image.', + }), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: 'The resolution of the output image.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The image to edit.', + }), + turbo_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable turbo mode to use for faster inference.', + }), + ) + .default(true), + num_temporal_reasoning_steps: z + .optional( + z.int().gte(2).lte(12).register(z.globalRegistry, { + description: 'The number of temporal reasoning steps to perform.', + }), + ) + .default(8), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to return the image in sync mode.', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'The guidance scale for the inference.', + }), + ) + .default(1), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(8), + enable_temporal_reasoning: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable temporal reasoning.', + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the inference.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Input model for ChronoEdit standard editing operations', + }) + +/** + * Emu35EditOutput + */ +export const zSchemaEmu35ImageEditImageOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The edited image.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed for the inference.', + }), +}) + +/** + * Emu35ImageEditInput + */ +export const zSchemaEmu35ImageEditImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to edit the image.', + }), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: 'The resolution of the output image.', + }), + ), + aspect_ratio: z.optional( + z + .enum([ + 'auto', + '21:9', + '16:9', + '4:3', + '3:2', + '1:1', + '2:3', + '3:4', + '9:16', + '9:21', + ]) + .register(z.globalRegistry, { + description: 'The aspect ratio of the output image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The image to edit.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to return the image in sync mode.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the inference.', + }), + ), +}) + +/** + * Output + */ +export const zSchemaFluxVisionUpscalerOutput = z.object({ + image: zSchemaImage, + caption: z.string().register(z.globalRegistry, { + description: 'The VLM-generated caption describing the upscaled image.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used to generate the image.', + }), + timings: z.record(z.string(), z.number()).register(z.globalRegistry, { + description: 'The timings of the different steps in the workflow.', + }), +}) + +/** + * Input + */ +export const zSchemaFluxVisionUpscalerInput = z.object({ + guidance: z + .optional( + z.number().gte(1).lte(4).register(z.globalRegistry, { + description: + 'CFG/guidance scale (1-4). Controls how closely the model follows the prompt.', + }), + ) + .default(1), + creativity: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The creativity of the model. The higher the creativity, the more the model will deviate from the original. Refers to the denoise strength of the sampling.', + }), + ) + .default(0.3), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to upscale.', + }), + upscale_factor: z + .optional( + z.number().gte(1).lte(4).register(z.globalRegistry, { + description: 'The upscale factor (1-4x).', + }), + ) + .default(2), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + seed: z.optional(z.union([z.int(), z.unknown()])), + steps: z + .optional( + z.int().gte(4).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps (4-50).', + }), + ) + .default(20), +}) + +/** + * OutpaintOutput + */ +export const zSchemaImageAppsV2OutpaintOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Outpainted image with extended scene', + }), +}) + +/** + * OutpaintInput + */ +export const zSchemaImageAppsV2OutpaintInput = z.object({ + prompt: z + .optional( + z.string().max(500).register(z.globalRegistry, { + description: + "Optional prompt to guide the outpainting. If provided, it will be appended to the base outpaint instruction. Example: 'with a beautiful sunset in the background'", + }), + ) + .default(''), + expand_right: z + .optional( + z.int().gte(0).lte(700).register(z.globalRegistry, { + description: + 'Number of pixels to add as black margin on the right side (0-700).', + }), + ) + .default(0), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate.', + }), + ) + .default(1), + zoom_out_percentage: z + .optional( + z.number().gte(0).lte(90).register(z.globalRegistry, { + description: + 'Percentage to zoom out the image. If set, the image will be scaled down by this percentage and black margins will be added to maintain original size. Example: 50 means the image will be 50% of original size with black margins filling the rest.', + }), + ) + .default(20), + output_format: z.optional( + z.enum(['png', 'jpeg', 'jpg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Image URL to outpaint', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If True, the function will wait for the image to be generated and uploaded before returning the response. If False, the function will return immediately and the image will be generated asynchronously.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + expand_left: z + .optional( + z.int().gte(0).lte(700).register(z.globalRegistry, { + description: + 'Number of pixels to add as black margin on the left side (0-700).', + }), + ) + .default(0), + expand_bottom: z + .optional( + z.int().gte(0).lte(700).register(z.globalRegistry, { + description: + 'Number of pixels to add as black margin on the bottom side (0-700).', + }), + ) + .default(400), + expand_top: z + .optional( + z.int().gte(0).lte(700).register(z.globalRegistry, { + description: + 'Number of pixels to add as black margin on the top side (0-700).', + }), + ) + .default(0), +}) + +/** + * ReveFastEditOutput + * + * Output for Reve fast image editing + */ +export const zSchemaReveFastEditOutput = z + .object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The edited images', + }), + }) + .register(z.globalRegistry, { + description: 'Output for Reve fast image editing', + }) + +/** + * ReveFastEditInput + * + * Input for Reve fast image editing + */ +export const zSchemaReveFastEditInput = z + .object({ + prompt: z.string().min(1).max(2560).register(z.globalRegistry, { + description: 'The text description of how to edit the provided image.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'Output format for the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the reference image to edit. Must be publicly accessible or base64 data URI. Supports PNG, JPEG, WebP, AVIF, and HEIF formats.', + }), + }) + .register(z.globalRegistry, { + description: 'Input for Reve fast image editing', + }) + +/** + * ReveRemixOutput + * + * Output for Reve image remixing + */ +export const zSchemaReveFastRemixOutput = z + .object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The remixed images', + }), + }) + .register(z.globalRegistry, { + description: 'Output for Reve image remixing', + }) + +/** + * ReveRemixInput + * + * Input for Reve image remixing + */ +export const zSchemaReveFastRemixInput = z + .object({ + prompt: z.string().min(1).max(2560).register(z.globalRegistry, { + description: + 'The text description of the desired image. May include XML img tags like 0 to refer to specific images by their index in the image_urls list.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + aspect_ratio: z.optional( + z + .enum(['16:9', '9:16', '3:2', '2:3', '4:3', '3:4', '1:1']) + .register(z.globalRegistry, { + description: + 'The desired aspect ratio of the generated image. If not provided, will be smartly chosen by the model.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'List of URLs of reference images. Must provide between 1 and 6 images (inclusive). Each image must be less than 10 MB. Supports PNG, JPEG, WebP, AVIF, and HEIF formats.', + }), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'Output format for the generated image.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Input for Reve image remixing', + }) + +/** + * AddBackgroundOutput + */ +export const zSchemaQwenImageEditPlusLoraGalleryAddBackgroundOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated/edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), +}) + +/** + * AddBackgroundInput + * + * Input model for Add Background endpoint - Remove white background and add a realistic scene + */ +export const zSchemaQwenImageEditPlusLoraGalleryAddBackgroundInput = z + .object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Describe the background/scene you want to add behind the object. The model will remove the white background and add the specified environment.', + }), + ) + .default( + 'Remove white background and add a realistic scene behind the object', + ), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'The scale factor for the LoRA model. Controls the strength of the LoRA effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images to edit. Provide an image with a white or clean background.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(6), + }) + .register(z.globalRegistry, { + description: + 'Input model for Add Background endpoint - Remove white background and add a realistic scene', + }) + +/** + * FaceToFullPortraitOutput + */ +export const zSchemaQwenImageEditPlusLoraGalleryFaceToFullPortraitOutput = + z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated/edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + }) + +/** + * FaceToFullPortraitInput + * + * Input model for Face to Full Portrait endpoint - Generate full portrait from a cropped face image + */ +export const zSchemaQwenImageEditPlusLoraGalleryFaceToFullPortraitInput = z + .object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Describe the full portrait you want to generate from the face. Include clothing, setting, pose, and style details.', + }), + ) + .default( + 'Photography. A portrait of the person in professional attire with natural lighting', + ), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'The scale factor for the LoRA model. Controls the strength of the LoRA effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URL of the cropped face image. Provide a close-up face photo.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(6), + }) + .register(z.globalRegistry, { + description: + 'Input model for Face to Full Portrait endpoint - Generate full portrait from a cropped face image', + }) + +/** + * GroupPhotoOutput + */ +export const zSchemaQwenImageEditPlusLoraGalleryGroupPhotoOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated/edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), +}) + +/** + * GroupPhotoInput + * + * Input model for Group Photo endpoint - Create composite group photos with vintage/retro style + */ +export const zSchemaQwenImageEditPlusLoraGalleryGroupPhotoInput = z + .object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Describe the group photo scene, setting, and style. The model will maintain character consistency and add vintage effects like grain, blur, and retro filters.', + }), + ) + .default( + 'Two people standing next to each other outside with a landscape background', + ), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'The scale factor for the LoRA model. Controls the strength of the LoRA effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images to combine into a group photo. Provide 2 or more individual portrait images.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(6), + }) + .register(z.globalRegistry, { + description: + 'Input model for Group Photo endpoint - Create composite group photos with vintage/retro style', + }) + +/** + * IntegrateProductOutput + */ +export const zSchemaQwenImageEditPlusLoraGalleryIntegrateProductOutput = + z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated/edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + }) + +/** + * IntegrateProductInput + * + * Input model for Integrate Product endpoint - Blend and integrate products/elements into backgrounds + */ +export const zSchemaQwenImageEditPlusLoraGalleryIntegrateProductInput = z + .object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Describe how to blend and integrate the product/element into the background. The model will automatically correct perspective, lighting and shadows for natural integration.', + }), + ) + .default('Blend and integrate the product into the background'), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'The scale factor for the LoRA model. Controls the strength of the LoRA effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URL of the image with product to integrate into background.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(6), + }) + .register(z.globalRegistry, { + description: + 'Input model for Integrate Product endpoint - Blend and integrate products/elements into backgrounds', + }) + +/** + * NextSceneOutput + */ +export const zSchemaQwenImageEditPlusLoraGalleryNextSceneOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated/edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), +}) + +/** + * NextSceneInput + * + * Input model for Next Scene endpoint - Create cinematic shot progressions and scene transitions + */ +export const zSchemaQwenImageEditPlusLoraGalleryNextSceneInput = z + .object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "Describe the camera movement, framing change, or scene transition. Start with 'Next Scene:' for best results. Examples: camera movements (dolly, push-in, pull-back), framing changes (wide to close-up), new elements entering frame.", + }), + ) + .default( + 'Next Scene: The camera moves forward revealing more of the scene', + ), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'The scale factor for the LoRA model. Controls the strength of the LoRA effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'The URL of the image to create the next scene from.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(6), + }) + .register(z.globalRegistry, { + description: + 'Input model for Next Scene endpoint - Create cinematic shot progressions and scene transitions', + }) + +/** + * RemoveElementOutput + */ +export const zSchemaQwenImageEditPlusLoraGalleryRemoveElementOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated/edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), +}) + +/** + * RemoveElementInput + * + * Input model for Remove Element endpoint - Remove/delete elements (objects, people, text) from the image + */ +export const zSchemaQwenImageEditPlusLoraGalleryRemoveElementInput = z + .object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Specify what element(s) to remove from the image (objects, people, text, etc.). The model will cleanly remove the element while maintaining consistency of the rest of the image.', + }), + ) + .default('Remove the specified element from the scene'), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'The scale factor for the LoRA model. Controls the strength of the LoRA effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'The URL of the image containing elements to remove.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(6), + }) + .register(z.globalRegistry, { + description: + 'Input model for Remove Element endpoint - Remove/delete elements (objects, people, text) from the image', + }) + +/** + * RemoveLightingOutput + */ +export const zSchemaQwenImageEditPlusLoraGalleryRemoveLightingOutput = z.object( + { + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated/edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + }, +) + +/** + * RemoveLightingInput + * + * Input model for Remove Lighting endpoint - Remove existing lighting and apply soft even lighting + */ +export const zSchemaQwenImageEditPlusLoraGalleryRemoveLightingInput = z + .object({ + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'The URL of the image with lighting/shadows to remove.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(6), + }) + .register(z.globalRegistry, { + description: + 'Input model for Remove Lighting endpoint - Remove existing lighting and apply soft even lighting', + }) + +/** + * ShirtDesignOutput + */ +export const zSchemaQwenImageEditPlusLoraGalleryShirtDesignOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated/edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), +}) + +/** + * ShirtDesignInput + * + * Input model for Shirt Design endpoint - Put designs/graphics on people's shirts + */ +export const zSchemaQwenImageEditPlusLoraGalleryShirtDesignInput = z + .object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "Describe what design to put on the shirt. The model will apply the design from your input image onto the person's shirt.", + }), + ) + .default('Put this design on their shirt'), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'The scale factor for the LoRA model. Controls the strength of the LoRA effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images: first image is the person wearing a shirt, second image is the design/logo to put on the shirt.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(6), + }) + .register(z.globalRegistry, { + description: + "Input model for Shirt Design endpoint - Put designs/graphics on people's shirts", + }) + +/** + * MultipleAnglesOutput + */ +export const zSchemaQwenImageEditPlusLoraGalleryMultipleAnglesOutput = z.object( + { + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated/edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + }, +) + +/** + * MultipleAnglesInput + * + * Input model for Multiple Angles endpoint - Camera control with precise adjustments + */ +export const zSchemaQwenImageEditPlusLoraGalleryMultipleAnglesInput = z + .object({ + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + wide_angle_lens: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable wide-angle lens effect', + }), + ) + .default(false), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(1), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'The URL of the image to adjust camera angle for.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + vertical_angle: z + .optional( + z.number().gte(-1).lte(1).register(z.globalRegistry, { + description: + "Adjust vertical camera angle (-1=bird's-eye view/looking down, 0=neutral, 1=worm's-eye view/looking up)", + }), + ) + .default(0), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + move_forward: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'Move camera forward (0=no movement, 10=close-up)', + }), + ) + .default(0), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + rotate_right_left: z + .optional( + z.number().gte(-90).lte(90).register(z.globalRegistry, { + description: + 'Rotate camera left (positive) or right (negative) in degrees. Positive values rotate left, negative values rotate right.', + }), + ) + .default(0), + lora_scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'The scale factor for the LoRA model. Controls the strength of the camera control effect.', + }), + ) + .default(1.25), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.unknown()])), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(6), + }) + .register(z.globalRegistry, { + description: + 'Input model for Multiple Angles endpoint - Camera control with precise adjustments', + }) + +/** + * NanoBananaImageToImageOutput + */ +export const zSchemaNanoBananaProEditOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The edited images.', + }), + description: z.string().register(z.globalRegistry, { + description: 'The description of the generated images.', + }), +}) + +/** + * NanoBananaImageToImageInput + */ +export const zSchemaNanoBananaProEditInput = z.object({ + prompt: z.string().min(3).max(50000).register(z.globalRegistry, { + description: 'The prompt for image editing.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + enable_web_search: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Enable web search for the image generation task. This will allow the model to use the latest information from the web to generate the image.', + }), + ) + .default(false), + aspect_ratio: z.optional( + z + .enum([ + 'auto', + '21:9', + '16:9', + '3:2', + '4:3', + '5:4', + '1:1', + '4:5', + '3:4', + '2:3', + '9:16', + ]) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + resolution: z.optional( + z.enum(['1K', '2K', '4K']).register(z.globalRegistry, { + description: 'The resolution of the image to generate.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images to use for image-to-image generation or image editing.', + }), + limit_generations: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate.', + }), + ) + .default(false), +}) + +/** + * NanoBananaImageToImageOutput + */ +export const zSchemaGemini3ProImagePreviewEditOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The edited images.', + }), + description: z.string().register(z.globalRegistry, { + description: 'The description of the generated images.', + }), +}) + +/** + * NanoBananaImageToImageInput + */ +export const zSchemaGemini3ProImagePreviewEditInput = z.object({ + prompt: z.string().min(3).max(50000).register(z.globalRegistry, { + description: 'The prompt for image editing.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + enable_web_search: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Enable web search for the image generation task. This will allow the model to use the latest information from the web to generate the image.', + }), + ) + .default(false), + resolution: z.optional( + z.enum(['1K', '2K', '4K']).register(z.globalRegistry, { + description: 'The resolution of the image to generate.', + }), + ), + aspect_ratio: z.optional( + z + .enum([ + 'auto', + '21:9', + '16:9', + '3:2', + '4:3', + '5:4', + '1:1', + '4:5', + '3:4', + '2:3', + '9:16', + ]) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images to use for image-to-image generation or image editing.', + }), + limit_generations: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate.', + }), + ) + .default(false), +}) + +/** + * MaskMetadata + */ +export const zSchemaMaskMetadata = z.object({ + box: z.optional( + z.array(z.number()).register(z.globalRegistry, { + description: + 'Bounding box for the mask in normalized cxcywh coordinates.', + }), + ), + score: z.optional( + z.number().register(z.globalRegistry, { + description: 'Score for this mask.', + }), + ), + index: z.int().register(z.globalRegistry, { + description: 'Index of the mask inside the model output.', + }), +}) + +/** + * SAM3ImageOutput + */ +export const zSchemaSam3ImageOutput = z.object({ + image: z.optional(zSchemaImage), + metadata: z.optional( + z.array(zSchemaMaskMetadata).register(z.globalRegistry, { + description: 'Per-mask metadata including scores and boxes.', + }), + ), + masks: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Segmented mask images.', + }), + scores: z.optional( + z.array(z.number()).register(z.globalRegistry, { + description: 'Per-mask confidence scores when requested.', + }), + ), + boxes: z.optional( + z.array(z.array(z.number())).register(z.globalRegistry, { + description: + 'Per-mask normalized bounding boxes [cx, cy, w, h] when requested.', + }), + ), +}) + +/** + * SAM3ImageInput + */ +export const zSchemaSam3ImageInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Text prompt for segmentation', + }), + ) + .default('wheel'), + include_boxes: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to include bounding boxes for each mask (when available).', + }), + ) + .default(false), + box_prompts: z + .optional( + z.array(zSchemaBoxPrompt).register(z.globalRegistry, { + description: + 'Box prompt coordinates (x_min, y_min, x_max, y_max). Multiple boxes supported - use object_id to group boxes for the same object or leave empty for separate objects.', + }), + ) + .default([]), + return_multiple_masks: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If True, upload and return multiple generated masks as defined by `max_masks`.', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be segmented', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If True, the media will be returned as a data URI.', + }), + ) + .default(false), + point_prompts: z + .optional( + z.array(zSchemaPointPrompt).register(z.globalRegistry, { + description: 'List of point prompts', + }), + ) + .default([]), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + max_masks: z + .optional( + z.int().gte(1).lte(32).register(z.globalRegistry, { + description: + 'Maximum number of masks to return when `return_multiple_masks` is enabled.', + }), + ) + .default(3), + include_scores: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to include mask confidence scores.', + }), + ) + .default(false), + apply_mask: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Apply the mask on the image.', + }), + ) + .default(true), + text_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + "[DEPRECATED] Use 'prompt' instead. Kept for backward compatibility.", + }), + ), +}) + +/** + * SAM3RLEOutput + */ +export const zSchemaSam3ImageRleOutput = z.object({ + rle: z.union([z.string(), z.array(z.string())]), + metadata: z.optional( + z.array(zSchemaMaskMetadata).register(z.globalRegistry, { + description: 'Per-mask metadata when multiple RLEs are returned.', + }), + ), + scores: z.optional( + z.array(z.number()).register(z.globalRegistry, { + description: 'Per-mask confidence scores when requested.', + }), + ), + boundingbox_frames_zip: z.optional(zSchemaFile), + boxes: z.optional( + z.array(z.array(z.number())).register(z.globalRegistry, { + description: + 'Per-mask normalized bounding boxes [cx, cy, w, h] when requested.', + }), + ), +}) + +/** + * SAM3ImageInput + */ +export const zSchemaSam3ImageRleInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Text prompt for segmentation', + }), + ) + .default('wheel'), + include_boxes: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to include bounding boxes for each mask (when available).', + }), + ) + .default(false), + box_prompts: z + .optional( + z.array(zSchemaBoxPrompt).register(z.globalRegistry, { + description: + 'Box prompt coordinates (x_min, y_min, x_max, y_max). Multiple boxes supported - use object_id to group boxes for the same object or leave empty for separate objects.', + }), + ) + .default([]), + return_multiple_masks: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If True, upload and return multiple generated masks as defined by `max_masks`.', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be segmented', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If True, the media will be returned as a data URI.', + }), + ) + .default(false), + point_prompts: z + .optional( + z.array(zSchemaPointPrompt).register(z.globalRegistry, { + description: 'List of point prompts', + }), + ) + .default([]), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + max_masks: z + .optional( + z.int().gte(1).lte(32).register(z.globalRegistry, { + description: + 'Maximum number of masks to return when `return_multiple_masks` is enabled.', + }), + ) + .default(3), + include_scores: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to include mask confidence scores.', + }), + ) + .default(false), + apply_mask: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Apply the mask on the image.', + }), + ) + .default(true), + text_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + "[DEPRECATED] Use 'prompt' instead. Kept for backward compatibility.", + }), + ), +}) + +/** + * ChronoEditOutput + * + * Unified output model for all ChronoEdit operations + */ +export const zSchemaChronoEditLoraGalleryUpscalerOutput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the inference.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The edited image.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed for the inference.', + }), + }) + .register(z.globalRegistry, { + description: 'Unified output model for all ChronoEdit operations', + }) + +/** + * ChronoLoraWeight + */ +export const zSchemaChronoLoraWeight = z.object({ + path: z.string().register(z.globalRegistry, { + description: 'URL or path to the LoRA weights (Safetensors).', + }), + scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: 'Scale factor controlling LoRA strength.', + }), + ) + .default(1), +}) + +/** + * ChronoEditUpscalerInput + * + * Input for upscaler mode + */ +export const zSchemaChronoEditLoraGalleryUpscalerInput = z + .object({ + lora_scale: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: 'The scale factor for the LoRA adapter.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The image to upscale.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to return the image in sync mode.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaChronoLoraWeight).register(z.globalRegistry, { + description: 'Optional additional LoRAs to merge (max 3).', + }), + ) + .default([]), + upscale_factor: z + .optional( + z.number().gte(1).lte(4).register(z.globalRegistry, { + description: 'Target scale factor for the output resolution.', + }), + ) + .default(2), + guidance_scale: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'The guidance scale for the inference.', + }), + ) + .default(1), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps for the upscaling pass.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the inference.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + }) + .register(z.globalRegistry, { + description: 'Input for upscaler mode', + }) + +/** + * ChronoEditOutput + * + * Unified output model for all ChronoEdit operations + */ +export const zSchemaChronoEditLoraGalleryPaintbrushOutput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the inference.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The edited image.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed for the inference.', + }), + }) + .register(z.globalRegistry, { + description: 'Unified output model for all ChronoEdit operations', + }) + +/** + * ChronoEditPaintBrushInput + * + * Input for paintbrush mode + */ +export const zSchemaChronoEditLoraGalleryPaintbrushInput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Describe how to transform the sketched regions.', + }), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: 'The resolution of the output image.', + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: 'The scale factor for the LoRA adapter.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The image to edit.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to return the image in sync mode.', + }), + ) + .default(false), + turbo_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable turbo mode to use faster inference.', + }), + ) + .default(true), + loras: z + .optional( + z.array(zSchemaChronoLoraWeight).register(z.globalRegistry, { + description: 'Optional additional LoRAs to merge (max 3).', + }), + ) + .default([]), + guidance_scale: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'Classifier-free guidance scale.', + }), + ) + .default(1), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'Number of denoising steps to run.', + }), + ) + .default(8), + mask_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Optional mask image where black areas indicate regions to sketch/paint.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the inference.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + }) + .register(z.globalRegistry, { + description: 'Input for paintbrush mode', + }) + +/** + * ChronoEditOutput + * + * Unified output model for all ChronoEdit operations + */ +export const zSchemaChronoEditLoraOutput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the inference.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The edited image.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed for the inference.', + }), + }) + .register(z.globalRegistry, { + description: 'Unified output model for all ChronoEdit operations', + }) + +/** + * ChronoEditLoRAInput + * + * ChronoEdit input with optional custom LoRAs. + */ +export const zSchemaChronoEditLoraInput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to edit the image.', + }), + loras: z + .optional( + z.array(zSchemaChronoLoraWeight).register(z.globalRegistry, { + description: + 'Optional additional LoRAs to merge for this request (max 3).', + }), + ) + .default([]), + turbo_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable turbo mode to use for faster inference.', + }), + ) + .default(true), + enable_temporal_reasoning: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable temporal reasoning.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + guidance_scale: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'The guidance scale for the inference.', + }), + ) + .default(1), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: 'The resolution of the output image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image.', + }), + ), + num_temporal_reasoning_steps: z + .optional( + z.int().gte(2).lte(12).register(z.globalRegistry, { + description: 'The number of temporal reasoning steps to perform.', + }), + ) + .default(8), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to return the image in sync mode.', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'The image to edit.', + }), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(8), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the inference.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'ChronoEdit input with optional custom LoRAs.', + }) + +/** + * Flux2FlexEditOutput + */ +export const zSchemaFlux2FlexEditOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the generation.', + }), +}) + +/** + * Flux2FlexImageEditInput + */ +export const zSchemaFlux2FlexEditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + guidance_scale: z + .optional( + z.number().gte(1.5).lte(10).register(z.globalRegistry, { + description: 'The guidance scale to use for the generation.', + }), + ) + .default(3.5), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'auto', + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "Whether to expand the prompt using the model's own knowledge.", + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'List of URLs of input images for editing', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for the generation.', + }), + ), +}) + +/** + * CrystalUpscaleOutput + */ +export const zSchemaCrystalUpscalerOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'List of upscaled images', + }), +}) + +/** + * CrystalUpscaleInput + */ +export const zSchemaCrystalUpscalerInput = z.object({ + creativity: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'Creativity level for upscaling', + }), + ) + .default(0), + scale_factor: z + .optional( + z.number().gte(1).lte(200).register(z.globalRegistry, { + description: 'Scale factor', + }), + ) + .default(2), + image_url: z.string().register(z.globalRegistry, { + description: 'URL to the input image', + }), +}) + +/** + * AddBackgroundOutput + */ +export const zSchemaFlux2LoraGalleryAddBackgroundOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images with added background', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), +}) + +/** + * AddBackgroundInput + * + * Input model for Add Background endpoint - Add background to images + */ +export const zSchemaFlux2LoraGalleryAddBackgroundInput = z + .object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "The prompt describing the background to add. Must start with 'Add Background' followed by your description.", + }), + ) + .default('Add Background forest'), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: 'The strength of the add background effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(2.5), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images. Provide an image with a white or clean background.', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(40), + }) + .register(z.globalRegistry, { + description: + 'Input model for Add Background endpoint - Add background to images', + }) + +/** + * ApartmentStagingOutput + */ +export const zSchemaFlux2LoraGalleryApartmentStagingOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated furnished room images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), +}) + +/** + * ApartmentStagingInput + * + * Input model for Apartment Staging endpoint - Furnish rooms + */ +export const zSchemaFlux2LoraGalleryApartmentStagingInput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: + "The prompt to generate a furnished room. Use 'furnish this room' for best results.", + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: 'The strength of the apartment staging effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(2.5), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'The URL of the empty room image to furnish.', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(40), + }) + .register(z.globalRegistry, { + description: 'Input model for Apartment Staging endpoint - Furnish rooms', + }) + +/** + * FaceToFullPortraitOutput + */ +export const zSchemaFlux2LoraGalleryFaceToFullPortraitOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated full portrait images from face', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), +}) + +/** + * FaceToFullPortraitInput + * + * Input model for Face to Full Portrait endpoint - Generate full portrait from face + */ +export const zSchemaFlux2LoraGalleryFaceToFullPortraitInput = z + .object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The prompt describing the full portrait to generate from the face.', + }), + ) + .default('Face to full portrait'), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: 'The strength of the face to full portrait effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(2.5), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'The URL of the cropped face image.', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(40), + }) + .register(z.globalRegistry, { + description: + 'Input model for Face to Full Portrait endpoint - Generate full portrait from face', + }) + +/** + * MultipleAnglesOutput + */ +export const zSchemaFlux2LoraGalleryMultipleAnglesOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images with multiple camera angles', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), +}) + +/** + * MultipleAnglesInput + * + * Input model for Multiple Angles endpoint - Camera control with precise adjustments using trigger word. Prompt is built automatically from slider values. + */ +export const zSchemaFlux2LoraGalleryMultipleAnglesInput = z + .object({ + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: 'Acceleration level for image generation.', + }), + ), + horizontal_angle: z + .optional( + z.number().gte(0).lte(360).register(z.globalRegistry, { + description: + 'Horizontal rotation angle around the object in degrees. 0°=front view, 90°=right side, 180°=back view, 270°=left side, 360°=front view again.', + }), + ) + .default(0), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'The CFG (Classifier Free Guidance) scale.', + }), + ) + .default(2.5), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'The URL of the image to adjust camera angle for.', + }), + zoom: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: + 'Camera zoom/distance. 0=wide shot (far away), 5=medium shot (normal), 10=close-up (very close).', + }), + ) + .default(5), + vertical_angle: z + .optional( + z.number().gte(0).lte(60).register(z.globalRegistry, { + description: + 'Vertical camera angle in degrees. 0°=eye-level shot, 30°=elevated shot, 60°=high-angle shot (looking down from above).', + }), + ) + .default(0), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate.', + }), + ) + .default(1), + lora_scale: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: 'The strength of the multiple angles effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If True, the media will be returned as a data URI.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(40), + seed: z.optional(z.union([z.int(), z.unknown()])), + }) + .register(z.globalRegistry, { + description: + 'Input model for Multiple Angles endpoint - Camera control with precise adjustments using trigger word. Prompt is built automatically from slider values.', + }) + +/** + * VirtualTryonOutput + */ +export const zSchemaFlux2LoraGalleryVirtualTryonOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated virtual try-on images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), +}) + +/** + * VirtualTryonInput + * + * Input model for Virtual Try-on endpoint - Generate virtual try-on images + */ +export const zSchemaFlux2LoraGalleryVirtualTryonInput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate a virtual try-on image.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: 'The strength of the virtual try-on effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(2.5), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images for virtual try-on. Provide person image and clothing image.', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(40), + }) + .register(z.globalRegistry, { + description: + 'Input model for Virtual Try-on endpoint - Generate virtual try-on images', + }) + +/** + * OmniImageElementInput + */ +export const zSchemaOmniImageElementInput = z.object({ + reference_image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'Additional reference images from different angles. 1-3 images supported. At least one image is required.', + }), + ), + frontal_image_url: z.string().register(z.globalRegistry, { + description: 'The frontal image of the element (main view).', + }), +}) + +/** + * OmniImageOutput + */ +export const zSchemaKlingImageO1Output = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Generated images', + }), +}) + +/** + * OmniImageRequest + */ +export const zSchemaKlingImageO1Input = z.object({ + prompt: z.string().max(2500).register(z.globalRegistry, { + description: + 'Text prompt for image generation. Reference images using @Image1, @Image2, etc. (or @Image if only one image). Max 2500 characters.', + }), + aspect_ratio: z.optional( + z + .enum(['auto', '16:9', '9:16', '1:1', '4:3', '3:4', '3:2', '2:3', '21:9']) + .register(z.globalRegistry, { + description: + "Aspect ratio of generated images. 'auto' intelligently determines based on input content.", + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(9).register(z.globalRegistry, { + description: 'Number of images to generate (1-9).', + }), + ) + .default(1), + resolution: z.optional( + z.enum(['1K', '2K']).register(z.globalRegistry, { + description: 'Image generation resolution. 1K: standard, 2K: high-res.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + elements: z.optional( + z.array(zSchemaOmniImageElementInput).register(z.globalRegistry, { + description: + 'Elements (characters/objects) to include in the image. Reference in prompt as @Element1, @Element2, etc. Maximum 10 total (elements + reference images).', + }), + ), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'List of reference images. Reference images in prompt using @Image1, @Image2, etc. (1-indexed). Max 10 images.', + }), +}) + +/** + * ReferenceToImageOutput + */ +export const zSchemaViduQ2ReferenceToImageOutput = z.object({ + image: zSchemaImage, +}) + +/** + * ReferenceToImageRequest + */ +export const zSchemaViduQ2ReferenceToImageInput = z.object({ + prompt: z.string().max(1500).register(z.globalRegistry, { + description: 'Text prompt for video generation, max 1500 characters', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the output video', + }), + ), + reference_image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'URLs of the reference images to use for consistent subject appearance', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), +}) + +/** + * SeedDream45EditOutput + */ +export const zSchemaBytedanceSeedreamV45EditOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Generated images', + }), +}) + +/** + * SeedDream45EditInput + */ +export const zSchemaBytedanceSeedreamV45EditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt used to edit the image', + }), + num_images: z + .optional( + z.int().gte(1).lte(6).register(z.globalRegistry, { + description: + 'Number of separate model generations to be run with the prompt.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + 'auto_2K', + 'auto_4K', + ]), + ]), + ), + max_images: z + .optional( + z.int().gte(1).lte(6).register(z.globalRegistry, { + description: + 'If set to a number greater than one, enables multi-image generation. The model will potentially return up to `max_images` images every generation, and in total, `num_images` generations will be carried out. In total, the number of images generated will be between `num_images` and `max_images*num_images`. The total number of images (image inputs + image outputs) must not exceed 15', + }), + ) + .default(1), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed to control the stochasticity of image generation.', + }), + ), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'List of URLs of input images for editing. Presently, up to 10 image inputs are allowed. If over 10 images are sent, only the last 10 will be used.', + }), +}) + +/** + * ImageToImageOutput + */ +export const zSchemaLongcatImageEditOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * EditImageInput + */ +export const zSchemaLongcatImageEditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to edit the image with.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to edit.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: 'The guidance scale to use for the image generation.', + }), + ) + .default(4.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * ZImageTurboImageToImageOutput + */ +export const zSchemaZImageTurboImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + 'Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed.', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()).register(z.globalRegistry, { + description: 'The timings of the generation process.', + }), +}) + +/** + * ZImageTurboImageToImageInput + */ +export const zSchemaZImageTurboImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + 'auto', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of Image for Image-to-Image generation.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The strength of the image-to-image conditioning.', + }), + ) + .default(0.6), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(8), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * ZImageTurboImageToImageOutput + */ +export const zSchemaZImageTurboImageToImageLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + 'Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed.', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()).register(z.globalRegistry, { + description: 'The timings of the generation process.', + }), +}) + +/** + * LoRAInput + * + * LoRA weight configuration. + */ +export const zSchemaLoRaInput = z + .object({ + path: z.string().register(z.globalRegistry, { + description: 'URL, HuggingFace repo ID (owner/repo) to lora weights.', + }), + scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: 'Scale factor for LoRA application (0.0 to 4.0).', + }), + ) + .default(1), + }) + .register(z.globalRegistry, { + description: 'LoRA weight configuration.', + }) + +/** + * ZImageTurboImageToImageLoRAInput + */ +export const zSchemaZImageTurboImageToImageLoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + 'auto', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of Image for Image-to-Image generation.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoRaInput).register(z.globalRegistry, { + description: 'List of LoRA weights to apply (maximum 3).', + }), + ) + .default([]), + strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The strength of the image-to-image conditioning.', + }), + ) + .default(0.6), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(8), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * ZImageTurboControlNetOutput + */ +export const zSchemaZImageTurboControlnetOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + 'Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed.', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()).register(z.globalRegistry, { + description: 'The timings of the generation process.', + }), +}) + +/** + * ZImageTurboControlNetInput + */ +export const zSchemaZImageTurboControlnetInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + 'auto', + ]), + ]), + ), + control_end: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The end of the controlnet conditioning.', + }), + ) + .default(0.8), + control_start: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The start of the controlnet conditioning.', + }), + ) + .default(0), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of Image for ControlNet generation.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + control_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The scale of the controlnet conditioning.', + }), + ) + .default(0.75), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(8), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + preprocess: z.optional( + z.enum(['none', 'canny', 'depth', 'pose']).register(z.globalRegistry, { + description: 'What kind of preprocessing to apply to the image, if any.', + }), + ), +}) + +/** + * ZImageTurboControlNetOutput + */ +export const zSchemaZImageTurboControlnetLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + 'Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed.', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()).register(z.globalRegistry, { + description: 'The timings of the generation process.', + }), +}) + +/** + * ZImageTurboControlNetLoRAInput + */ +export const zSchemaZImageTurboControlnetLoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + 'auto', + ]), + ]), + ), + loras: z + .optional( + z.array(zSchemaLoRaInput).register(z.globalRegistry, { + description: 'List of LoRA weights to apply (maximum 3).', + }), + ) + .default([]), + control_end: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The end of the controlnet conditioning.', + }), + ) + .default(0.8), + control_start: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The start of the controlnet conditioning.', + }), + ) + .default(0), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of Image for ControlNet generation.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + control_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The scale of the controlnet conditioning.', + }), + ) + .default(0.75), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(8), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + preprocess: z.optional( + z.enum(['none', 'canny', 'depth', 'pose']).register(z.globalRegistry, { + description: 'What kind of preprocessing to apply to the image, if any.', + }), + ), +}) + +/** + * ImageOutput + */ +export const zSchemaStepxEdit2Output = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + best_info: z.optional( + z.array(z.record(z.string(), z.unknown())).register(z.globalRegistry, { + description: + 'Reflection analysis (only available when reflection mode is enabled).', + }), + ), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images', + }), + timings: z.record(z.string(), z.number()), + reformat_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + "The model's interpretation of your instruction (only available when thinking mode is enabled).", + }), + ), + think_info: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'Reasoning process details (only available when thinking mode is enabled).', + }), + ), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * ImageToImageInput + */ +export const zSchemaStepxEdit2Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enable_reflection_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Enable reflection mode. Reviews outputs, corrects unintended changes, and determines when editing is complete.', + }), + ) + .default(true), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'The image URL to generate an image from. Needs to match the dimensions of the mask.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The true CFG scale. Controls how closely the model follows the prompt.\n ', + }), + ) + .default(6), + num_inference_steps: z + .optional( + z.int().gte(1).lte(100).register(z.globalRegistry, { + description: + 'The number of inference steps to perform. Recommended: 50.', + }), + ) + .default(50), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + enable_thinking_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Enable thinking mode. Uses multimodal language model knowledge to interpret abstract editing instructions.', + }), + ) + .default(true), +}) + +/** + * Schema referenced but not defined by fal.ai (missing from source OpenAPI spec) + */ +export const zSchemaPoint = z + .record(z.string(), z.unknown()) + .register(z.globalRegistry, { + description: + 'Schema referenced but not defined by fal.ai (missing from source OpenAPI spec)', + }) + +/** + * UsageInfo + */ +export const zSchemaUsageInfo = z.object({ + output_tokens: z.int().register(z.globalRegistry, { + description: 'Number of output tokens generated', + }), + decode_time_ms: z.number().register(z.globalRegistry, { + description: 'Time taken for decoding in milliseconds', + }), + input_tokens: z.int().register(z.globalRegistry, { + description: 'Number of input tokens processed', + }), + ttft_ms: z.number().register(z.globalRegistry, { + description: 'Time to first token in milliseconds', + }), + prefill_time_ms: z.number().register(z.globalRegistry, { + description: 'Time taken for prefill in milliseconds', + }), +}) + +/** + * Object + */ +export const zSchemaObject = z.object({ + y_min: z.number().register(z.globalRegistry, { + description: 'Top boundary of detection box in normalized format (0 to 1)', + }), + x_max: z.number().register(z.globalRegistry, { + description: + 'Right boundary of detection box in normalized format (0 to 1)', + }), + x_min: z.number().register(z.globalRegistry, { + description: 'Left boundary of detection box in normalized format (0 to 1)', + }), + y_max: z.number().register(z.globalRegistry, { + description: + 'Bottom boundary of detection box in normalized format (0 to 1)', + }), +}) + +/** + * SegmentSamplingSettings + */ +export const zSchemaSegmentSamplingSettings = z.object({ + top_p: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Nucleus sampling probability mass to use, between 0 and 1.', + }), + ) + .default(1), + max_tokens: z.optional( + z.int().gte(1).register(z.globalRegistry, { + description: 'Maximum number of tokens to generate.', + }), + ), + temperature: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Sampling temperature to use. Higher values will make the output more random, while lower values will make it more focused and deterministic.', + }), + ) + .default(1), +}) + +/** + * MoondreamSegementationOutput + */ +export const zSchemaMoondream3PreviewSegmentOutput = z.object({ + finish_reason: z.string().register(z.globalRegistry, { + description: 'Reason for finishing the output generation', + }), + image: z.optional(zSchemaImageFile), + bbox: z.optional(zSchemaObject), + path: z.optional( + z.string().register(z.globalRegistry, { + description: + 'SVG path data representing the segmentation mask. If not detected, will be null.', + }), + ), + usage_info: zSchemaUsageInfo, +}) + +/** + * MoondreamSegementationInput + */ +export const zSchemaMoondream3PreviewSegmentInput = z.object({ + spatial_references: z.optional( + z + .array(z.union([zSchemaPoint, z.array(z.number())])) + .register(z.globalRegistry, { + description: + 'Spatial references to guide the segmentation. By feeding in references you can help the segmentation process. Must be either list of Point object with x and y members, or list of arrays containing either 2 floats (x,y) or 4 floats (x1,y1,x2,y2). \n**NOTE**: You can also use the [**point endpoint**](https://fal.ai/models/fal-ai/moondream3-preview/point) to get points for the objects, and pass them in here.', + }), + ), + settings: z.optional(zSchemaSegmentSamplingSettings), + object: z.string().register(z.globalRegistry, { + description: 'Object to be segmented in the image', + }), + preview: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to preview the output and return a binary mask of the image', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the image to be processed\n\nMax width: 7000px, Max height: 7000px, Timeout: 20.0s', + }), +}) + +/** + * LightingRestorationOutput + */ +export const zSchemaQwenImageEditPlusLoraGalleryLightingRestorationOutput = + z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated/edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + }) + +/** + * LightingRestorationInput + * + * Input model for Lighting Restoration endpoint - Restore natural lighting by removing harsh shadows and light spots + */ +export const zSchemaQwenImageEditPlusLoraGalleryLightingRestorationInput = z + .object({ + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'The URL of the image to restore lighting for.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(6), + }) + .register(z.globalRegistry, { + description: + 'Input model for Lighting Restoration endpoint - Restore natural lighting by removing harsh shadows and light spots', + }) + +/** + * QwenImageOutput + */ +export const zSchemaQwenImageEdit2509Output = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * BaseQwenEditImagePlusInput + */ +export const zSchemaQwenImageEdit2509Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the image with', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality.", + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'The URLs of the images to edit.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + num_inference_steps: z + .optional( + z.int().gte(2).lte(100).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(50), +}) + +/** + * QwenImageOutput + */ +export const zSchemaQwenImageEdit2509LoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * BaseQwenEditImagePlusLoRAInput + */ +export const zSchemaQwenImageEdit2509LoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the image with', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. Options: 'none', 'regular'. Higher acceleration increases speed. 'regular' balances speed and quality.", + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use up to 3 LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'The URLs of the images to edit.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * MultipleAnglesOutput + */ +export const zSchemaQwenImageEdit2509LoraGalleryMultipleAnglesOutput = z.object( + { + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated/edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + }, +) + +/** + * MultipleAnglesInput + * + * Input model for Multiple Angles endpoint - Camera control with precise adjustments + */ +export const zSchemaQwenImageEdit2509LoraGalleryMultipleAnglesInput = z + .object({ + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + wide_angle_lens: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable wide-angle lens effect', + }), + ) + .default(false), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(1), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'The URL of the image to adjust camera angle for.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + vertical_angle: z + .optional( + z.number().gte(-1).lte(1).register(z.globalRegistry, { + description: + "Adjust vertical camera angle (-1=bird's-eye view/looking down, 0=neutral, 1=worm's-eye view/looking up)", + }), + ) + .default(0), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + move_forward: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'Move camera forward (0=no movement, 10=close-up)', + }), + ) + .default(0), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + rotate_right_left: z + .optional( + z.number().gte(-90).lte(90).register(z.globalRegistry, { + description: + 'Rotate camera left (positive) or right (negative) in degrees. Positive values rotate left, negative values rotate right.', + }), + ) + .default(0), + lora_scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'The scale factor for the LoRA model. Controls the strength of the camera control effect.', + }), + ) + .default(1.25), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.unknown()])), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(6), + }) + .register(z.globalRegistry, { + description: + 'Input model for Multiple Angles endpoint - Camera control with precise adjustments', + }) + +/** + * NextSceneOutput + */ +export const zSchemaQwenImageEdit2509LoraGalleryNextSceneOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated/edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), +}) + +/** + * NextSceneInput + * + * Input model for Next Scene endpoint - Create cinematic shot progressions and scene transitions + */ +export const zSchemaQwenImageEdit2509LoraGalleryNextSceneInput = z + .object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "Describe the camera movement, framing change, or scene transition. Start with 'Next Scene:' for best results. Examples: camera movements (dolly, push-in, pull-back), framing changes (wide to close-up), new elements entering frame.", + }), + ) + .default( + 'Next Scene: The camera moves forward revealing more of the scene', + ), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'The scale factor for the LoRA model. Controls the strength of the LoRA effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'The URL of the image to create the next scene from.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(6), + }) + .register(z.globalRegistry, { + description: + 'Input model for Next Scene endpoint - Create cinematic shot progressions and scene transitions', + }) + +/** + * AddBackgroundOutput + */ +export const zSchemaQwenImageEdit2509LoraGalleryAddBackgroundOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated/edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), +}) + +/** + * AddBackgroundInput + * + * Input model for Add Background endpoint - Remove white background and add a realistic scene + */ +export const zSchemaQwenImageEdit2509LoraGalleryAddBackgroundInput = z + .object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Describe the background/scene you want to add behind the object. The model will remove the white background and add the specified environment.', + }), + ) + .default( + 'Remove white background and add a realistic scene behind the object', + ), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'The scale factor for the LoRA model. Controls the strength of the LoRA effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images to edit. Provide an image with a white or clean background.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(6), + }) + .register(z.globalRegistry, { + description: + 'Input model for Add Background endpoint - Remove white background and add a realistic scene', + }) + +/** + * FaceToFullPortraitOutput + */ +export const zSchemaQwenImageEdit2509LoraGalleryFaceToFullPortraitOutput = + z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated/edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + }) + +/** + * FaceToFullPortraitInput + * + * Input model for Face to Full Portrait endpoint - Generate full portrait from a cropped face image + */ +export const zSchemaQwenImageEdit2509LoraGalleryFaceToFullPortraitInput = z + .object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Describe the full portrait you want to generate from the face. Include clothing, setting, pose, and style details.', + }), + ) + .default( + 'Photography. A portrait of the person in professional attire with natural lighting', + ), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'The scale factor for the LoRA model. Controls the strength of the LoRA effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URL of the cropped face image. Provide a close-up face photo.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(6), + }) + .register(z.globalRegistry, { + description: + 'Input model for Face to Full Portrait endpoint - Generate full portrait from a cropped face image', + }) + +/** + * GroupPhotoOutput + */ +export const zSchemaQwenImageEdit2509LoraGalleryGroupPhotoOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated/edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), +}) + +/** + * GroupPhotoInput + * + * Input model for Group Photo endpoint - Create composite group photos with vintage/retro style + */ +export const zSchemaQwenImageEdit2509LoraGalleryGroupPhotoInput = z + .object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Describe the group photo scene, setting, and style. The model will maintain character consistency and add vintage effects like grain, blur, and retro filters.', + }), + ) + .default( + 'Two people standing next to each other outside with a landscape background', + ), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'The scale factor for the LoRA model. Controls the strength of the LoRA effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images to combine into a group photo. Provide 2 or more individual portrait images.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(6), + }) + .register(z.globalRegistry, { + description: + 'Input model for Group Photo endpoint - Create composite group photos with vintage/retro style', + }) + +/** + * IntegrateProductOutput + */ +export const zSchemaQwenImageEdit2509LoraGalleryIntegrateProductOutput = + z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated/edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + }) + +/** + * IntegrateProductInput + * + * Input model for Integrate Product endpoint - Blend and integrate products/elements into backgrounds + */ +export const zSchemaQwenImageEdit2509LoraGalleryIntegrateProductInput = z + .object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Describe how to blend and integrate the product/element into the background. The model will automatically correct perspective, lighting and shadows for natural integration.', + }), + ) + .default('Blend and integrate the product into the background'), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'The scale factor for the LoRA model. Controls the strength of the LoRA effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URL of the image with product to integrate into background.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(6), + }) + .register(z.globalRegistry, { + description: + 'Input model for Integrate Product endpoint - Blend and integrate products/elements into backgrounds', + }) + +/** + * LightingRestorationOutput + */ +export const zSchemaQwenImageEdit2509LoraGalleryLightingRestorationOutput = + z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated/edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + }) + +/** + * LightingRestorationInput + * + * Input model for Lighting Restoration endpoint - Restore natural lighting by removing harsh shadows and light spots + */ +export const zSchemaQwenImageEdit2509LoraGalleryLightingRestorationInput = z + .object({ + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'The URL of the image to restore lighting for.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(6), + }) + .register(z.globalRegistry, { + description: + 'Input model for Lighting Restoration endpoint - Restore natural lighting by removing harsh shadows and light spots', + }) + +/** + * RemoveElementOutput + */ +export const zSchemaQwenImageEdit2509LoraGalleryRemoveElementOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated/edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), +}) + +/** + * RemoveElementInput + * + * Input model for Remove Element endpoint - Remove/delete elements (objects, people, text) from the image + */ +export const zSchemaQwenImageEdit2509LoraGalleryRemoveElementInput = z + .object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Specify what element(s) to remove from the image (objects, people, text, etc.). The model will cleanly remove the element while maintaining consistency of the rest of the image.', + }), + ) + .default('Remove the specified element from the scene'), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'The scale factor for the LoRA model. Controls the strength of the LoRA effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'The URL of the image containing elements to remove.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(6), + }) + .register(z.globalRegistry, { + description: + 'Input model for Remove Element endpoint - Remove/delete elements (objects, people, text) from the image', + }) + +/** + * RemoveLightingOutput + */ +export const zSchemaQwenImageEdit2509LoraGalleryRemoveLightingOutput = z.object( + { + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated/edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + }, +) + +/** + * RemoveLightingInput + * + * Input model for Remove Lighting endpoint - Remove existing lighting and apply soft even lighting + */ +export const zSchemaQwenImageEdit2509LoraGalleryRemoveLightingInput = z + .object({ + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'The URL of the image with lighting/shadows to remove.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(6), + }) + .register(z.globalRegistry, { + description: + 'Input model for Remove Lighting endpoint - Remove existing lighting and apply soft even lighting', + }) + +/** + * ShirtDesignOutput + */ +export const zSchemaQwenImageEdit2509LoraGalleryShirtDesignOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated/edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), +}) + +/** + * ShirtDesignInput + * + * Input model for Shirt Design endpoint - Put designs/graphics on people's shirts + */ +export const zSchemaQwenImageEdit2509LoraGalleryShirtDesignInput = z + .object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "Describe what design to put on the shirt. The model will apply the design from your input image onto the person's shirt.", + }), + ) + .default('Put this design on their shirt'), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'The scale factor for the LoRA model. Controls the strength of the LoRA effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images: first image is the person wearing a shirt, second image is the design/logo to put on the shirt.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(6), + }) + .register(z.globalRegistry, { + description: + "Input model for Shirt Design endpoint - Put designs/graphics on people's shirts", + }) + +/** + * FluxSingleIDOutput + */ +export const zSchemaAiBabyAndAgingGeneratorSingleOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The final prompt used for generation', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), +}) + +/** + * SingleFluxIDInput + * + * Input schema for single mode generation + */ +export const zSchemaAiBabyAndAgingGeneratorSingleInput = z + .object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Text prompt to guide the image generation', + }), + ) + .default('a newborn baby, well dressed'), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + id_image_urls: z.array(z.string()).min(1).register(z.globalRegistry, { + description: + 'List of ID images for single mode (or general reference images)', + }), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: + "The format of the generated image. Choose from: 'jpeg' or 'png'.", + }), + ), + age_group: z + .enum([ + 'baby', + 'toddler', + 'preschool', + 'gradeschooler', + 'teen', + 'adult', + 'mid', + 'senior', + ]) + .register(z.globalRegistry, { + description: + "Age group for the generated image. Choose from: 'baby' (0-12 months), 'toddler' (1-3 years), 'preschool' (3-5 years), 'gradeschooler' (6-12 years), 'teen' (13-19 years), 'adult' (20-40 years), 'mid' (40-60 years), 'senior' (60+ years).", + }), + gender: z.enum(['male', 'female']).register(z.globalRegistry, { + description: + "Gender for the generated image. Choose from: 'male' or 'female'.", + }), + seed: z.optional(z.union([z.int(), z.unknown()])), + }) + .register(z.globalRegistry, { + description: 'Input schema for single mode generation', + }) + +/** + * FluxMultiIDOutput + */ +export const zSchemaAiBabyAndAgingGeneratorMultiOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The final prompt used for generation', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), +}) + +/** + * MultiFluxIDInput + * + * Input schema for multi mode generation + */ +export const zSchemaAiBabyAndAgingGeneratorMultiInput = z + .object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Text prompt to guide the image generation', + }), + ) + .default('a newborn baby, well dressed'), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + father_weight: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + "Weight of the father's influence in multi mode generation", + }), + ) + .default(0.5), + mother_image_urls: z.array(z.string()).min(1).register(z.globalRegistry, { + description: 'List of mother images for multi mode', + }), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: + "The format of the generated image. Choose from: 'jpeg' or 'png'.", + }), + ), + age_group: z + .enum([ + 'baby', + 'toddler', + 'preschool', + 'gradeschooler', + 'teen', + 'adult', + 'mid', + 'senior', + ]) + .register(z.globalRegistry, { + description: + "Age group for the generated image. Choose from: 'baby' (0-12 months), 'toddler' (1-3 years), 'preschool' (3-5 years), 'gradeschooler' (6-12 years), 'teen' (13-19 years), 'adult' (20-40 years), 'mid' (40-60 years), 'senior' (60+ years).", + }), + gender: z.enum(['male', 'female']).register(z.globalRegistry, { + description: + "Gender for the generated image. Choose from: 'male' or 'female'.", + }), + father_image_urls: z.array(z.string()).min(1).register(z.globalRegistry, { + description: 'List of father images for multi mode', + }), + seed: z.optional(z.union([z.int(), z.unknown()])), + }) + .register(z.globalRegistry, { + description: 'Input schema for multi mode generation', + }) + +/** + * Flux2MaxEditOutput + */ +export const zSchemaFlux2MaxEditOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the generation.', + }), +}) + +/** + * Flux2MaxImageEditInput + */ +export const zSchemaFlux2MaxEditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'auto', + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for the generation.', + }), + ), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'List of URLs of input images for editing', + }), +}) + +/** + * Flux2TurboEditImageOutput + */ +export const zSchemaFlux2TurboEditOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * Flux2TurboEditImageInput + */ +export const zSchemaFlux2TurboEditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to edit the image.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(2.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The seed to use for the generation. If not provided, a random seed will be used.', + }), + ), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images for editing. A maximum of 4 images are allowed, if more are provided, only the first 4 will be used.', + }), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded for better results.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * EditImageResponse + */ +export const zSchemaGptImage15EditOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images.', + }), +}) + +/** + * EditImageRequest + */ +export const zSchemaGptImage15EditInput = z.object({ + input_fidelity: z.optional( + z.enum(['low', 'high']).register(z.globalRegistry, { + description: 'Input fidelity for the generated image', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z + .enum(['auto', '1024x1024', '1536x1024', '1024x1536']) + .register(z.globalRegistry, { + description: 'Aspect ratio for the generated image', + }), + ), + prompt: z.string().min(2).register(z.globalRegistry, { + description: 'The prompt for image generation', + }), + quality: z.optional( + z.enum(['low', 'medium', 'high']).register(z.globalRegistry, { + description: 'Quality for the generated image', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'Output format for the images', + }), + ), + background: z.optional( + z.enum(['auto', 'transparent', 'opaque']).register(z.globalRegistry, { + description: 'Background for the generated image', + }), + ), + mask_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The URL of the mask image to use for the generation. This indicates what part of the image to edit.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images to use as a reference for the generation.', + }), +}) + +/** + * Flux2FlashEditImageOutput + */ +export const zSchemaFlux2FlashEditOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * Flux2FlashEditImageInput + */ +export const zSchemaFlux2FlashEditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to edit the image.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(2.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The seed to use for the generation. If not provided, a random seed will be used.', + }), + ), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images for editing. A maximum of 4 images are allowed, if more are provided, only the first 4 will be used.', + }), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded for better results.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * ZImageTurboInpaintOutput + */ +export const zSchemaZImageTurboInpaintOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + 'Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed.', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()).register(z.globalRegistry, { + description: 'The timings of the generation process.', + }), +}) + +/** + * ZImageTurboInpaintInput + */ +export const zSchemaZImageTurboInpaintInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + 'auto', + ]), + ]), + ), + mask_image_url: z.string().register(z.globalRegistry, { + description: 'URL of Mask for Inpaint generation.', + }), + control_end: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The end of the controlnet conditioning.', + }), + ) + .default(0.8), + control_start: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The start of the controlnet conditioning.', + }), + ) + .default(0), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of Image for Inpaint generation.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The strength of the inpaint conditioning.', + }), + ) + .default(1), + control_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The scale of the controlnet conditioning.', + }), + ) + .default(0.75), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(8), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * ZImageTurboInpaintOutput + */ +export const zSchemaZImageTurboInpaintLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + 'Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed.', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()).register(z.globalRegistry, { + description: 'The timings of the generation process.', + }), +}) + +/** + * ZImageTurboInpaintLoRAInput + */ +export const zSchemaZImageTurboInpaintLoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + 'auto', + ]), + ]), + ), + mask_image_url: z.string().register(z.globalRegistry, { + description: 'URL of Mask for Inpaint generation.', + }), + loras: z + .optional( + z.array(zSchemaLoRaInput).register(z.globalRegistry, { + description: 'List of LoRA weights to apply (maximum 3).', + }), + ) + .default([]), + control_end: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The end of the controlnet conditioning.', + }), + ) + .default(0.8), + control_start: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The start of the controlnet conditioning.', + }), + ) + .default(0), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of Image for Inpaint generation.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The strength of the inpaint conditioning.', + }), + ) + .default(1), + control_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The scale of the controlnet conditioning.', + }), + ) + .default(0.75), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(8), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * QwenImageLayeredOutput + */ +export const zSchemaQwenImageLayeredOutput = z.object({ + prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'The prompt used to generate the image.', + }), + ), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaQwenImageLayeredInput = z.object({ + prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'A caption for the input image.', + }), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + num_layers: z + .optional( + z.int().gte(1).lte(10).register(z.globalRegistry, { + description: 'The number of layers to generate.', + }), + ) + .default(4), + output_format: z.optional( + z.enum(['png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the input image.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: 'The guidance scale to use for the image generation.', + }), + ) + .default(5), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate an image from.', + }), + ) + .default(''), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * ImageToImageOutput + */ +export const zSchemaQwenImageEdit2511Output = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * EditImageInput + */ +export const zSchemaQwenImageEdit2511Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to edit the image with.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If `True`, the media will be returned as a data URI.', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: 'The guidance scale to use for the image generation.', + }), + ) + .default(4.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'The URLs of the images to edit.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate an image from.', + }), + ) + .default(''), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), +}) + +/** + * ImageEditOutput + * + * Output for Wan 2.6 image editing + */ +export const zSchemaV26ImageToImageOutput = z + .object({ + images: z.array(zSchemaFile).register(z.globalRegistry, { + description: 'Generated images in PNG format', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + }) + .register(z.globalRegistry, { + description: 'Output for Wan 2.6 image editing', + }) + +/** + * ImageEditInput + * + * Input for Wan 2.6 image editing with reference images (enable_interleave=false) + */ +export const zSchemaV26ImageToImageInput = z + .object({ + prompt: z.string().min(1).register(z.globalRegistry, { + description: + "Text prompt describing the desired image. Supports Chinese and English. Max 2000 characters. Example: 'Generate an image using the style of image 1 and background of image 2'.", + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + 'Number of images to generate (1-4). Directly affects billing cost.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Enable LLM prompt optimization. Significantly improves results for simple prompts but adds 3-4 seconds processing time.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility (0-2147483647). Same seed produces more consistent results.', + }), + ), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + "Reference images for editing (1-3 images required). Order matters: reference as 'image 1', 'image 2', 'image 3' in prompt. Resolution: 384-5000px each dimension. Max size: 10MB each. Formats: JPEG, JPG, PNG (no alpha), BMP, WEBP.", + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Content to avoid in the generated image. Max 500 characters.', + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable content moderation for input and output.', + }), + ) + .default(true), + }) + .register(z.globalRegistry, { + description: + 'Input for Wan 2.6 image editing with reference images (enable_interleave=false)', + }) + +/** + * QwenImageLayeredOutput + */ +export const zSchemaQwenImageLayeredLoraOutput = z.object({ + prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'The prompt used to generate the image.', + }), + ), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageLoRAInput + */ +export const zSchemaQwenImageLayeredLoraInput = z.object({ + prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'A caption for the input image.', + }), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + num_layers: z + .optional( + z.int().gte(1).lte(10).register(z.globalRegistry, { + description: 'The number of layers to generate.', + }), + ) + .default(4), + output_format: z.optional( + z.enum(['png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the input image.', + }), + loras: z + .optional( + z.array(zSchemaLoRaInput).register(z.globalRegistry, { + description: 'List of LoRA weights to apply (maximum 3).', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: 'The guidance scale to use for the image generation.', + }), + ) + .default(5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate an image from.', + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * ArchEditOutput + */ +export const zSchemaAiHomeEditOutput = z.object({ + image: zSchemaImage, + status: z.string().register(z.globalRegistry, { + description: 'Status message with processing details', + }), +}) + +/** + * ArchEditInput + */ +export const zSchemaAiHomeEditInput = z.object({ + input_image_url: z.string().max(512).register(z.globalRegistry, { + description: 'URL of the image to do architectural editing', + }), + editing_type: z + .enum(['structural editing', 'virtual staging', 'both']) + .register(z.globalRegistry, { + description: + 'Type of editing. Structural editing only edits structural elements such as windows, walls etc. Virtual staging edits your furniture. Both do full editing including structural and furniture', + }), + style: z + .enum([ + 'minimalistic-interior', + 'farmhouse-interior', + 'luxury-interior', + 'modern-interior', + 'zen-interior', + 'mid century-interior', + 'airbnb-interior', + 'cozy-interior', + 'rustic-interior', + 'christmas-interior', + 'bohemian-interior', + 'tropical-interior', + 'industrial-interior', + 'japanese-interior', + 'vintage-interior', + 'loft-interior', + 'halloween-interior', + 'soho-interior', + 'baroque-interior', + 'kids room-interior', + 'girls room-interior', + 'boys room-interior', + 'scandinavian-interior', + 'french country-interior', + 'mediterranean-interior', + 'cyberpunk-interior', + 'hot pink-interior', + 'biophilic-interior', + 'ancient egypt-interior', + 'pixel-interior', + 'art deco-interior', + 'modern-exterior', + 'minimalistic-exterior', + 'farmhouse-exterior', + 'cozy-exterior', + 'luxury-exterior', + 'colonial-exterior', + 'zen-exterior', + 'asian-exterior', + 'creepy-exterior', + 'airstone-exterior', + 'ancient greek-exterior', + 'art deco-exterior', + 'brutalist-exterior', + 'christmas lights-exterior', + 'contemporary-exterior', + 'cottage-exterior', + 'dutch colonial-exterior', + 'federal colonial-exterior', + 'fire-exterior', + 'french provincial-exterior', + 'full glass-exterior', + 'georgian colonial-exterior', + 'gothic-exterior', + 'greek revival-exterior', + 'ice-exterior', + 'italianate-exterior', + 'mediterranean-exterior', + 'midcentury-exterior', + 'middle eastern-exterior', + 'minecraft-exterior', + 'morocco-exterior', + 'neoclassical-exterior', + 'spanish-exterior', + 'tudor-exterior', + 'underwater-exterior', + 'winter-exterior', + 'yard lighting-exterior', + ]) + .register(z.globalRegistry, { + description: 'Style for furniture and decor', + }), + additional_elements: z.optional(z.union([z.string().max(200), z.unknown()])), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: + "The format of the generated image. Choose from: 'jpeg' or 'png'.", + }), + ), + architecture_type: z + .enum([ + 'living room-interior', + 'bedroom-interior', + 'kitchen-interior', + 'dining room-interior', + 'bathroom-interior', + 'laundry room-interior', + 'home office-interior', + 'study room-interior', + 'dorm room-interior', + 'coffee shop-interior', + 'gaming room-interior', + 'restaurant-interior', + 'office-interior', + 'attic-interior', + 'toilet-interior', + 'other-interior', + 'house-exterior', + 'villa-exterior', + 'backyard-exterior', + 'courtyard-exterior', + 'ranch-exterior', + 'office-exterior', + 'retail-exterior', + 'tower-exterior', + 'apartment-exterior', + 'school-exterior', + 'museum-exterior', + 'commercial-exterior', + 'residential-exterior', + 'other-exterior', + ]) + .register(z.globalRegistry, { + description: 'Type of architecture for appropriate furniture selection', + }), + color_palette: z + .enum([ + 'surprise me', + 'golden beige', + 'refined blues', + 'dusky elegance', + 'emerald charm', + 'crimson luxury', + 'golden sapphire', + 'soft pastures', + 'candy sky', + 'peach meadow', + 'muted sands', + 'ocean breeze', + 'frosted pastels', + 'spring bloom', + 'gentle horizon', + 'seaside breeze', + 'azure coast', + 'golden shore', + 'mediterranean gem', + 'ocean serenity', + 'serene blush', + 'muted horizon', + 'pastel shores', + 'dusky calm', + 'woodland retreat', + 'meadow glow', + 'forest canopy', + 'riverbank calm', + 'earthy tones', + 'earthy neutrals', + 'arctic mist', + 'aqua drift', + 'blush bloom', + 'coral haze', + 'retro rust', + 'autumn glow', + 'rustic charm', + 'vintage sage', + 'faded plum', + 'electric lime', + 'violet pulse', + 'neon sorbet', + 'aqua glow', + 'fluorescent sunset', + 'lavender bloom', + 'petal fresh', + 'meadow light', + 'sunny pastures', + 'frosted mauve', + 'snowy hearth', + 'icy blues', + 'winter twilight', + 'earthy hues', + 'stone balance', + 'neutral sands', + 'slate shades', + ]) + .register(z.globalRegistry, { + description: 'Color palette for furniture and decor', + }), + custom_prompt: z + .optional( + z.string().max(300).register(z.globalRegistry, { + description: + 'Custom prompt for architectural editing, it overrides above options when used', + }), + ) + .default(''), +}) + +/** + * ArchStyleOutput + */ +export const zSchemaAiHomeStyleOutput = z.object({ + image: zSchemaImage, + status: z.string().register(z.globalRegistry, { + description: 'Status message with processing details', + }), +}) + +/** + * ArchStyleInput + */ +export const zSchemaAiHomeStyleInput = z.object({ + input_image_url: z.string().max(512).register(z.globalRegistry, { + description: 'URL of the image to do architectural styling', + }), + input_image_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Strength of the input image', + }), + ) + .default(0.85), + additional_elements: z.optional(z.union([z.string().max(200), z.unknown()])), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: + "The format of the generated image. Choose from: 'jpeg' or 'png'.", + }), + ), + style: z + .enum([ + 'minimalistic-interior', + 'farmhouse-interior', + 'luxury-interior', + 'modern-interior', + 'zen-interior', + 'mid century-interior', + 'airbnb-interior', + 'cozy-interior', + 'rustic-interior', + 'christmas-interior', + 'bohemian-interior', + 'tropical-interior', + 'industrial-interior', + 'japanese-interior', + 'vintage-interior', + 'loft-interior', + 'halloween-interior', + 'soho-interior', + 'baroque-interior', + 'kids room-interior', + 'girls room-interior', + 'boys room-interior', + 'scandinavian-interior', + 'french country-interior', + 'mediterranean-interior', + 'cyberpunk-interior', + 'hot pink-interior', + 'biophilic-interior', + 'ancient egypt-interior', + 'pixel-interior', + 'art deco-interior', + 'modern-exterior', + 'minimalistic-exterior', + 'farmhouse-exterior', + 'cozy-exterior', + 'luxury-exterior', + 'colonial-exterior', + 'zen-exterior', + 'asian-exterior', + 'creepy-exterior', + 'airstone-exterior', + 'ancient greek-exterior', + 'art deco-exterior', + 'brutalist-exterior', + 'christmas lights-exterior', + 'contemporary-exterior', + 'cottage-exterior', + 'dutch colonial-exterior', + 'federal colonial-exterior', + 'fire-exterior', + 'french provincial-exterior', + 'full glass-exterior', + 'georgian colonial-exterior', + 'gothic-exterior', + 'greek revival-exterior', + 'ice-exterior', + 'italianate-exterior', + 'mediterranean-exterior', + 'midcentury-exterior', + 'middle eastern-exterior', + 'minecraft-exterior', + 'morocco-exterior', + 'neoclassical-exterior', + 'spanish-exterior', + 'tudor-exterior', + 'underwater-exterior', + 'winter-exterior', + 'yard lighting-exterior', + ]) + .register(z.globalRegistry, { + description: 'Style for furniture and decor', + }), + architecture_type: z + .enum([ + 'living room-interior', + 'bedroom-interior', + 'kitchen-interior', + 'dining room-interior', + 'bathroom-interior', + 'laundry room-interior', + 'home office-interior', + 'study room-interior', + 'dorm room-interior', + 'coffee shop-interior', + 'gaming room-interior', + 'restaurant-interior', + 'office-interior', + 'attic-interior', + 'toilet-interior', + 'other-interior', + 'house-exterior', + 'villa-exterior', + 'backyard-exterior', + 'courtyard-exterior', + 'ranch-exterior', + 'office-exterior', + 'retail-exterior', + 'tower-exterior', + 'apartment-exterior', + 'school-exterior', + 'museum-exterior', + 'commercial-exterior', + 'residential-exterior', + 'other-exterior', + ]) + .register(z.globalRegistry, { + description: 'Type of architecture for appropriate furniture selection', + }), + color_palette: z + .enum([ + 'surprise me', + 'golden beige', + 'refined blues', + 'dusky elegance', + 'emerald charm', + 'crimson luxury', + 'golden sapphire', + 'soft pastures', + 'candy sky', + 'peach meadow', + 'muted sands', + 'ocean breeze', + 'frosted pastels', + 'spring bloom', + 'gentle horizon', + 'seaside breeze', + 'azure coast', + 'golden shore', + 'mediterranean gem', + 'ocean serenity', + 'serene blush', + 'muted horizon', + 'pastel shores', + 'dusky calm', + 'woodland retreat', + 'meadow glow', + 'forest canopy', + 'riverbank calm', + 'earthy tones', + 'earthy neutrals', + 'arctic mist', + 'aqua drift', + 'blush bloom', + 'coral haze', + 'retro rust', + 'autumn glow', + 'rustic charm', + 'vintage sage', + 'faded plum', + 'electric lime', + 'violet pulse', + 'neon sorbet', + 'aqua glow', + 'fluorescent sunset', + 'lavender bloom', + 'petal fresh', + 'meadow light', + 'sunny pastures', + 'frosted mauve', + 'snowy hearth', + 'icy blues', + 'winter twilight', + 'earthy hues', + 'stone balance', + 'neutral sands', + 'slate shades', + ]) + .register(z.globalRegistry, { + description: 'Color palette for furniture and decor', + }), + style_image_url: z.optional(z.union([z.string().max(512), z.unknown()])), + custom_prompt: z + .optional( + z.string().max(300).register(z.globalRegistry, { + description: + 'Custom prompt for architectural editing, it overrides above options when used', + }), + ) + .default(''), + enhanced_rendering: z.optional(z.union([z.boolean(), z.unknown()])), +}) + +/** + * ImageToImageOutput + */ +export const zSchemaQwenImageEdit2511LoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * EditImageLoraInput + */ +export const zSchemaQwenImageEdit2511LoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to edit the image with.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If `True`, the media will be returned as a data URI.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + 'The LoRAs to use for the image generation. You can use up to 3 LoRAs and they will be merged together to generate the final image.', + }), + ) + .default([]), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: 'The guidance scale to use for the image generation.', + }), + ) + .default(4.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'The URLs of the images to edit.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate an image from.', + }), + ) + .default(''), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The same seed and the same prompt given to the same version of the model will output the same image every time.', + }), + ), +}) + +/** + * MultipleAnglesOutput + * + * Output model for Multiple Angles endpoint + */ +export const zSchemaQwenImageEdit2511MultipleAnglesOutput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The constructed prompt used for generation', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated/edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + }) + .register(z.globalRegistry, { + description: 'Output model for Multiple Angles endpoint', + }) + +/** + * MultipleAnglesInput + * + * Input model for Multiple Angles endpoint - Camera control with precise adjustments using trigger word. + * Prompt is built automatically from slider values. + */ +export const zSchemaQwenImageEdit2511MultipleAnglesInput = z + .object({ + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: 'Acceleration level for image generation.', + }), + ), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + horizontal_angle: z + .optional( + z.number().gte(0).lte(360).register(z.globalRegistry, { + description: + 'Horizontal rotation angle around the object in degrees. 0°=front view, 90°=right side, 180°=back view, 270°=left side, 360°=front view again.', + }), + ) + .default(0), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: 'The CFG (Classifier Free Guidance) scale.', + }), + ) + .default(4.5), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'The URL of the image to adjust camera angle for.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(''), + zoom: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: + 'Camera zoom/distance. 0=wide shot (far away), 5=medium shot (normal), 10=close-up (very close).', + }), + ) + .default(5), + vertical_angle: z + .optional( + z.number().gte(-30).lte(90).register(z.globalRegistry, { + description: + "Vertical camera angle in degrees. -30°=low-angle shot (looking up), 0°=eye-level, 30°=elevated, 60°=high-angle, 90°=bird's-eye view (looking down).", + }), + ) + .default(0), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + lora_scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'The scale factor for the LoRA model. Controls the strength of the camera control effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + additional_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Additional text to append to the automatically generated prompt.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If `True`, the media will be returned as a data URI.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducibility.', + }), + ), + }) + .register(z.globalRegistry, { + description: + 'Input model for Multiple Angles endpoint - Camera control with precise adjustments using trigger word.\nPrompt is built automatically from slider values.', + }) + +/** + * GlmImageToImageOutput + */ +export const zSchemaGlmImageImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'List of URLs to the generated images.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * GlmImageToImageInput + */ +export const zSchemaGlmImageImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt for image generation.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + 'portrait_3_2', + 'landscape_3_2', + 'portrait_hd', + 'landscape_hd', + ]), + ]), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable NSFW safety checking on the generated images.', + }), + ) + .default(true), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'Output image format.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If True, the image will be returned as a base64 data URI instead of a URL.', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values make the model follow the prompt more closely.', + }), + ) + .default(1.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. The same seed with the same prompt will produce the same image.', + }), + ), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'URL(s) of the condition image(s) for image-to-image generation. Supports up to 4 URLs for multi-image references.', + }), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If True, the prompt will be enhanced using an LLM for more detailed and higher quality results.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(10).lte(100).register(z.globalRegistry, { + description: + 'Number of diffusion denoising steps. More steps generally produce higher quality images.', + }), + ) + .default(30), +}) + +/** + * Klein9BDistilledEditOutput + */ +export const zSchemaFlux2Klein9bEditOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The edited images', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * Klein9BDistilledEditInput + */ +export const zSchemaFlux2Klein9bEditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to edit the image.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If `True`, the media will be returned as a data URI. Output is not stored when this is True.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(4).lte(8).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(4), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images for editing. A maximum of 4 images are allowed.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The seed to use for the generation. If not provided, a random seed will be used.', + }), + ), +}) + +/** + * Klein4BDistilledEditOutput + */ +export const zSchemaFlux2Klein4bEditOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The edited images', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * KleinDistilledEditInput + */ +export const zSchemaFlux2Klein4bEditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to edit the image.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If `True`, the media will be returned as a data URI. Output is not stored when this is True.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(4).lte(8).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(4), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images for editing. A maximum of 4 images are allowed.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The seed to use for the generation. If not provided, a random seed will be used.', + }), + ), +}) + +/** + * Klein9BBaseEditOutput + */ +export const zSchemaFlux2Klein9bBaseEditOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The edited images', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * Klein9BEditImageInput + */ +export const zSchemaFlux2Klein9bBaseEditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to edit the image.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use for image generation.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'Guidance scale for classifier-free guidance.', + }), + ) + .default(5), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If `True`, the media will be returned as a data URI. Output is not stored when this is True.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(4).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images for editing. A maximum of 4 images are allowed.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Negative prompt for classifier-free guidance. Describes what to avoid in the image.', + }), + ) + .default(''), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The seed to use for the generation. If not provided, a random seed will be used.', + }), + ), +}) + +/** + * Klein4BBaseEditOutput + */ +export const zSchemaFlux2Klein4bBaseEditOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The edited images', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * Klein4BBaseEditInput + */ +export const zSchemaFlux2Klein4bBaseEditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to edit the image.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use for image generation.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'Guidance scale for classifier-free guidance.', + }), + ) + .default(5), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If `True`, the media will be returned as a data URI. Output is not stored when this is True.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(4).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images for editing. A maximum of 4 images are allowed.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Negative prompt for classifier-free guidance. Describes what to avoid in the image.', + }), + ) + .default(''), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The seed to use for the generation. If not provided, a random seed will be used.', + }), + ), +}) + +/** + * KleinT2IOutput + */ +export const zSchemaFlux2Klein4bBaseEditLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * LoRAInput + */ +export const zSchemaFalAiFlux2KleinLoRaInput = z.object({ + path: z.string().register(z.globalRegistry, { + description: + 'URL, HuggingFace repo ID (owner/repo), or local path to LoRA weights.', + }), + scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: 'Scale factor for LoRA application (0.0 to 4.0).', + }), + ) + .default(1), +}) + +/** + * KleinBaseEditLoRAInput + */ +export const zSchemaFlux2Klein4bBaseEditLoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to edit the image.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use for image generation.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'Guidance scale for classifier-free guidance.', + }), + ) + .default(5), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + loras: z + .optional( + z.array(zSchemaFalAiFlux2KleinLoRaInput).register(z.globalRegistry, { + description: 'List of LoRA weights to apply (maximum 3).', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If `True`, the media will be returned as a data URI. Output is not stored when this is True.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(4).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images for editing. A maximum of 4 images are allowed.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Negative prompt for classifier-free guidance. Describes what to avoid in the image.', + }), + ) + .default(''), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The seed to use for the generation. If not provided, a random seed will be used.', + }), + ), +}) + +/** + * KleinT2IOutput + */ +export const zSchemaFlux2Klein9bBaseEditLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * KleinBaseEditLoRAInput + */ +export const zSchemaFlux2Klein9bBaseEditLoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to edit the image.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use for image generation.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'Guidance scale for classifier-free guidance.', + }), + ) + .default(5), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + loras: z + .optional( + z.array(zSchemaFalAiFlux2KleinLoRaInput).register(z.globalRegistry, { + description: 'List of LoRA weights to apply (maximum 3).', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If `True`, the media will be returned as a data URI. Output is not stored when this is True.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(4).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images for editing. A maximum of 4 images are allowed.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Negative prompt for classifier-free guidance. Describes what to avoid in the image.', + }), + ) + .default(''), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The seed to use for the generation. If not provided, a random seed will be used.', + }), + ), +}) + +/** + * FiboEditExtraEPOutputModel + */ +export const zSchemaFiboEditColorizeOutput = z.object({ + images: z + .optional( + z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Generated images.', + }), + ) + .default([]), + image: zSchemaImage, + structured_instruction: z + .record(z.string(), z.unknown()) + .register(z.globalRegistry, { + description: 'Current instruction.', + }), +}) + +/** + * ColorizeInput + */ +export const zSchemaFiboEditColorizeInput = z.object({ + color: z + .enum([ + 'contemporary color', + 'vivid color', + 'black and white colors', + 'sepia vintage', + ]) + .register(z.globalRegistry, { + description: 'Select the color palette or aesthetic for the output image', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'The source image.', + }), +}) + +/** + * FiboEditExtraEPOutputModel + */ +export const zSchemaFiboEditBlendOutput = z.object({ + images: z + .optional( + z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Generated images.', + }), + ) + .default([]), + image: zSchemaImage, + structured_instruction: z + .record(z.string(), z.unknown()) + .register(z.globalRegistry, { + description: 'Current instruction.', + }), +}) + +/** + * BlendingInput + */ +export const zSchemaFiboEditBlendInput = z.object({ + instruction: z.string().register(z.globalRegistry, { + description: + 'Instruct what elements you would like to blend in your image.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'The source image.', + }), +}) + +/** + * FiboEditExtraEPOutputModel + */ +export const zSchemaFiboEditAddObjectByTextOutput = z.object({ + images: z + .optional( + z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Generated images.', + }), + ) + .default([]), + image: zSchemaImage, + structured_instruction: z + .record(z.string(), z.unknown()) + .register(z.globalRegistry, { + description: 'Current instruction.', + }), +}) + +/** + * AddObjectByTextInput + */ +export const zSchemaFiboEditAddObjectByTextInput = z.object({ + instruction: z.string().register(z.globalRegistry, { + description: + 'The full natural language command describing what to add and where.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'The source image.', + }), +}) + +/** + * Lighting + */ +export const zSchemaLighting = z.object({ + shadows: z.optional(z.union([z.string(), z.unknown()])), + conditions: z.optional(z.union([z.string(), z.unknown()])), + direction: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * Aesthetics + */ +export const zSchemaAesthetics = z.object({ + composition: z.optional(z.union([z.string(), z.unknown()])), + mood_atmosphere: z.optional(z.union([z.string(), z.unknown()])), + color_scheme: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * PhotographicCharacteristics + */ +export const zSchemaPhotographicCharacteristics = z.object({ + focus: z.optional(z.union([z.string(), z.unknown()])), + lens_focal_length: z.optional(z.union([z.string(), z.unknown()])), + camera_angle: z.optional(z.union([z.string(), z.unknown()])), + depth_of_field: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * PromptObject + */ +export const zSchemaPromptObject = z.object({ + relative_size: z.optional(z.union([z.string(), z.unknown()])), + description: z.optional(z.union([z.string(), z.unknown()])), + skin_tone_and_texture: z.optional(z.union([z.string(), z.unknown()])), + appearance_details: z.optional(z.union([z.string(), z.unknown()])), + number_of_objects: z.optional(z.union([z.int(), z.unknown()])), + expression: z.optional(z.union([z.string(), z.unknown()])), + pose: z.optional(z.union([z.string(), z.unknown()])), + shape_and_color: z.optional(z.union([z.string(), z.unknown()])), + relationship: z.string().register(z.globalRegistry, { + description: + 'The relationship of the object to other objects in the image.', + }), + texture: z.optional(z.union([z.string(), z.unknown()])), + gender: z.optional(z.union([z.string(), z.unknown()])), + clothing: z.optional(z.union([z.string(), z.unknown()])), + location: z.optional(z.union([z.string(), z.unknown()])), + orientation: z.optional(z.union([z.string(), z.unknown()])), + action: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * StructuredInstruction + */ +export const zSchemaStructuredInstruction = z.object({ + background_setting: z.optional(z.union([z.string(), z.unknown()])), + artistic_style: z.optional(z.union([z.string(), z.unknown()])), + style_medium: z.optional(z.union([z.string(), z.unknown()])), + text_render: z.optional(z.union([z.array(z.unknown()), z.unknown()])), + objects: z.optional(z.union([z.array(zSchemaPromptObject), z.unknown()])), + context: z.optional(z.union([z.string(), z.unknown()])), + photographic_characteristics: z.optional( + z.union([zSchemaPhotographicCharacteristics, z.unknown()]), + ), + aesthetics: z.optional(z.union([zSchemaAesthetics, z.unknown()])), + lighting: z.optional(z.union([zSchemaLighting, z.unknown()])), + short_description: z.optional(z.union([z.string(), z.unknown()])), + edit_instruction: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * FiboEditOutputModel + */ +export const zSchemaFiboEditEditOutput = z.object({ + images: z + .optional( + z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Generated images.', + }), + ) + .default([]), + image: zSchemaImage, + structured_instruction: z + .record(z.string(), z.unknown()) + .register(z.globalRegistry, { + description: 'Current instruction.', + }), +}) + +/** + * FiboEditInputModel + */ +export const zSchemaFiboEditEditInput = z.object({ + steps_num: z + .optional( + z.int().gte(20).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps.', + }), + ) + .default(50), + instruction: z.optional(z.union([z.string(), z.unknown()])), + image_url: z.optional(z.union([z.string(), z.unknown()])), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, returns the image directly in the response (increases latency).', + }), + ) + .default(false), + guidance_scale: z.optional(z.union([z.number(), z.int()])), + structured_instruction: z.optional( + z.union([zSchemaStructuredInstruction, z.unknown()]), + ), + mask_url: z.optional(z.union([z.string(), z.unknown()])), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for image generation.', + }), + ) + .default(''), + seed: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducibility.', + }), + ) + .default(5555), +}) + +/** + * FiboEditExtraEPOutputModel + */ +export const zSchemaFiboEditEraseByTextOutput = z.object({ + images: z + .optional( + z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Generated images.', + }), + ) + .default([]), + image: zSchemaImage, + structured_instruction: z + .record(z.string(), z.unknown()) + .register(z.globalRegistry, { + description: 'Current instruction.', + }), +}) + +/** + * EraseByTextInput + */ +export const zSchemaFiboEditEraseByTextInput = z.object({ + object_name: z.string().register(z.globalRegistry, { + description: 'The name of the object to remove.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'The source image.', + }), +}) + +/** + * FiboEditExtraEPOutputModel + */ +export const zSchemaFiboEditRewriteTextOutput = z.object({ + images: z + .optional( + z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Generated images.', + }), + ) + .default([]), + image: zSchemaImage, + structured_instruction: z + .record(z.string(), z.unknown()) + .register(z.globalRegistry, { + description: 'Current instruction.', + }), +}) + +/** + * RewriteTextInput + */ +export const zSchemaFiboEditRewriteTextInput = z.object({ + new_text: z.string().register(z.globalRegistry, { + description: 'The new text string to appear in the image.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'The source image.', + }), +}) + +/** + * FiboEditExtraEPOutputModel + */ +export const zSchemaFiboEditRestyleOutput = z.object({ + images: z + .optional( + z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Generated images.', + }), + ) + .default([]), + image: zSchemaImage, + structured_instruction: z + .record(z.string(), z.unknown()) + .register(z.globalRegistry, { + description: 'Current instruction.', + }), +}) + +/** + * RestyletInput + */ +export const zSchemaFiboEditRestyleInput = z.object({ + style: z + .enum([ + '3D Render', + 'Cubism', + 'Oil Painting', + 'Anime', + 'Cartoon', + 'Coloring Book', + 'Retro Ad', + 'Pop Art Halftone', + 'Vector Art', + 'Story Board', + 'Art Nouveau', + 'Cross Etching', + 'Wood Cut', + ]) + .register(z.globalRegistry, { + description: 'Select the desired artistic style for the output image.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'The source image.', + }), +}) + +/** + * FiboEditExtraEPOutputModel + */ +export const zSchemaFiboEditRelightOutput = z.object({ + images: z + .optional( + z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Generated images.', + }), + ) + .default([]), + image: zSchemaImage, + structured_instruction: z + .record(z.string(), z.unknown()) + .register(z.globalRegistry, { + description: 'Current instruction.', + }), +}) + +/** + * RelightInput + */ +export const zSchemaFiboEditRelightInput = z.object({ + light_type: z + .enum([ + 'midday', + 'blue hour light', + 'low-angle sunlight', + 'sunrise light', + 'spotlight on subject', + 'overcast light', + 'soft overcast daylight lighting', + 'cloud-filtered lighting', + 'fog-diffused lighting', + 'moonlight lighting', + 'starlight nighttime', + 'soft bokeh lighting', + 'harsh studio lighting', + ]) + .register(z.globalRegistry, { + description: 'The quality/style/time of day.', + }), + light_direction: z.union([ + z.enum(['front', 'side', 'bottom', 'top-down']), + z.unknown(), + ]), + image_url: z.string().register(z.globalRegistry, { + description: 'The source image.', + }), +}) + +/** + * FiboEditExtraEPOutputModel + */ +export const zSchemaFiboEditReseasonOutput = z.object({ + images: z + .optional( + z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Generated images.', + }), + ) + .default([]), + image: zSchemaImage, + structured_instruction: z + .record(z.string(), z.unknown()) + .register(z.globalRegistry, { + description: 'Current instruction.', + }), +}) + +/** + * ReseasonInput + */ +export const zSchemaFiboEditReseasonInput = z.object({ + season: z + .enum(['spring', 'summer', 'autumn', 'winter']) + .register(z.globalRegistry, { + description: 'The desired season.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'The source image.', + }), +}) + +/** + * FiboEditExtraEPOutputModel + */ +export const zSchemaFiboEditRestoreOutput = z.object({ + images: z + .optional( + z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Generated images.', + }), + ) + .default([]), + image: zSchemaImage, + structured_instruction: z + .record(z.string(), z.unknown()) + .register(z.globalRegistry, { + description: 'Current instruction.', + }), +}) + +/** + * RestoreInput + */ +export const zSchemaFiboEditRestoreInput = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: 'The source image.', + }), +}) + +/** + * FiboEditExtraEPOutputModel + */ +export const zSchemaFiboEditSketchToColoredImageOutput = z.object({ + images: z + .optional( + z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Generated images.', + }), + ) + .default([]), + image: zSchemaImage, + structured_instruction: z + .record(z.string(), z.unknown()) + .register(z.globalRegistry, { + description: 'Current instruction.', + }), +}) + +/** + * SketchColoredImageInput + */ +export const zSchemaFiboEditSketchToColoredImageInput = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: 'The source image.', + }), +}) + +/** + * FiboEditExtraEPOutputModel + */ +export const zSchemaFiboEditReplaceObjectByTextOutput = z.object({ + images: z + .optional( + z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Generated images.', + }), + ) + .default([]), + image: zSchemaImage, + structured_instruction: z + .record(z.string(), z.unknown()) + .register(z.globalRegistry, { + description: 'Current instruction.', + }), +}) + +/** + * ReplaceObjectInput + */ +export const zSchemaFiboEditReplaceObjectByTextInput = z.object({ + instruction: z.string().register(z.globalRegistry, { + description: + 'The full natural language command describing what to replace.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'The source image.', + }), +}) + +/** + * FaceFusionImageOutput + * + * FaceFusion output payload when image content is generated + */ +export const zSchemaAiFaceSwapFaceswapimageOutput = z + .object({ + image: zSchemaImage, + processing_time_ms: z.optional(z.union([z.int(), z.unknown()])), + }) + .register(z.globalRegistry, { + description: 'FaceFusion output payload when image content is generated', + }) + +/** + * FaceSwapInputImage + * + * Input schema for image ↔ image face swap + */ +export const zSchemaAiFaceSwapFaceswapimageInput = z + .object({ + source_face_url: z.string().register(z.globalRegistry, { + description: 'Source face image', + }), + target_image_url: z.string().register(z.globalRegistry, { + description: 'Target image URL', + }), + }) + .register(z.globalRegistry, { + description: 'Input schema for image ↔ image face swap', + }) + +/** + * ReplaceBackgroundOutputModel + */ +export const zSchemaReplaceBackgroundOutput = z.object({ + images: z.optional( + z.array(z.record(z.string(), z.unknown())).register(z.globalRegistry, { + description: 'Generated images.', + }), + ), + image: zSchemaImage, +}) + +/** + * ReplaceBackgroundInputModel + */ +export const zSchemaReplaceBackgroundInput = z.object({ + prompt: z.optional(z.union([z.string(), z.unknown()])), + steps_num: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Number of inference steps.', + }), + ) + .default(30), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, returns the image directly in the response (increases latency).', + }), + ) + .default(false), + seed: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducibility.', + }), + ) + .default(4925634), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for background replacement.', + }), + ) + .default(''), + image_url: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * Output + */ +export const zSchemaClarityUpscalerOutput = z.object({ + image: zSchemaImage, + seed: z.int().register(z.globalRegistry, { + description: 'The seed used to generate the image.', + }), + timings: z.record(z.string(), z.number()).register(z.globalRegistry, { + description: 'The timings of the different steps in the workflow.', + }), +}) + +/** + * Input + */ +export const zSchemaClarityUpscalerInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + ) + .default('masterpiece, best quality, highres'), + resemblance: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The resemblance of the upscaled image to the original image. The higher the resemblance, the more the model will try to keep the original image.\n Refers to the strength of the ControlNet.\n ', + }), + ) + .default(0.6), + creativity: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The creativity of the model. The higher the creativity, the more the model will deviate from the prompt.\n Refers to the denoise strength of the sampling.\n ', + }), + ) + .default(0.35), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to upscale.', + }), + upscale_factor: z + .optional( + z.number().gte(1).lte(4).register(z.globalRegistry, { + description: 'The upscale factor', + }), + ) + .default(2), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4), + num_inference_steps: z + .optional( + z.int().gte(4).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(18), + seed: z.optional(z.union([z.int(), z.unknown()])), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "The negative prompt to use. Use it to address details that you don't want in the image.", + }), + ) + .default('(worst quality, low quality, normal quality:2)'), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to false, the safety checker will be disabled.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaAuraSrOutput = z.object({ + image: zSchemaImage, + timings: z.record(z.string(), z.number()).register(z.globalRegistry, { + description: 'Timings for each step in the pipeline.', + }), +}) + +/** + * Input + */ +export const zSchemaAuraSrInput = z.object({ + overlapping_tiles: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use overlapping tiles for upscaling. Setting this to true helps remove seams but doubles the inference time.', + }), + ) + .default(false), + checkpoint: z.optional( + z.enum(['v1', 'v2']).register(z.globalRegistry, { + description: 'Checkpoint to use for upscaling. More coming soon.', + }), + ), + upscaling_factor: z.optional( + z.literal(4).register(z.globalRegistry, { + description: 'Upscaling factor. More coming soon.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to upscale.', + }), +}) + +/** + * Output + */ +export const zSchemaFluxDevImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseImageToInput + */ +export const zSchemaFluxDevImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate an image from.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'The strength of the initial image. Higher strength values are better for this model.', + }), + ) + .default(0.95), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional(z.union([z.int(), z.unknown()])), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(10).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(40), +}) + +/** + * Flux2ProEditOutput + */ +export const zSchemaFlux2ProEditOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the generation.', + }), +}) + +/** + * Flux2ProImageEditInput + */ +export const zSchemaFlux2ProEditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'auto', + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for the generation.', + }), + ), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'List of URLs of input images for editing', + }), +}) + +/** + * Flux2EditImageOutput + */ +export const zSchemaFlux2EditOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * Flux2EditImageInput + */ +export const zSchemaFlux2EditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to edit the image.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use for the image generation.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(2.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The seed to use for the generation. If not provided, a random seed will be used.', + }), + ), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URLs of the images for editing. A maximum of 4 images are allowed, if more are provided, only the first 4 will be used.', + }), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded for better results.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(4).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), +}) + +/** + * Flux2EditImageLoRAOutput + */ +export const zSchemaFlux2LoraEditOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The edited images', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * Flux2EditImageLoRAInput + */ +export const zSchemaFlux2LoraEditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use for the image generation.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoRaInput).register(z.globalRegistry, { + description: + 'List of LoRA weights to apply (maximum 3). Each LoRA can be a URL, HuggingFace repo ID, or local path.', + }), + ) + .default([]), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(2.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The seed to use for the generation. If not provided, a random seed will be used.', + }), + ), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'The URsL of the images for editing. A maximum of 3 images are allowed, if more are provided, only the first 3 will be used.', + }), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded for better results.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(4).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), +}) + +/** + * FluxKontextOutput + */ +export const zSchemaFluxProKontextOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaFalToolkitImageImageImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * FluxKontextInput + */ +export const zSchemaFluxProKontextInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '3:2', '1:1', '2:3', '3:4', '9:16', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Image prompt for the omni model.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enhance_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enhance the prompt for better results.', + }), + ) + .default(false), +}) + +export const zSchemaQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetFalAiFluxProKontextRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxProKontextRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxProKontextRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxProKontextRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxProKontextData = z.object({ + body: zSchemaFluxProKontextInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxProKontextResponse = zSchemaQueueStatus + +export const zGetFalAiFluxProKontextRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxProKontextRequestsByRequestIdResponse = + zSchemaFluxProKontextOutput + +export const zGetFalAiFlux2LoraEditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux2LoraEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2LoraEditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2LoraEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2LoraEditData = z.object({ + body: zSchemaFlux2LoraEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2LoraEditResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2LoraEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2LoraEditRequestsByRequestIdResponse = + zSchemaFlux2LoraEditOutput + +export const zGetFalAiFlux2EditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux2EditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2EditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2EditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2EditData = z.object({ + body: zSchemaFlux2EditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2EditResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2EditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2EditRequestsByRequestIdResponse = + zSchemaFlux2EditOutput + +export const zGetFalAiFlux2ProEditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux2ProEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2ProEditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2ProEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2ProEditData = z.object({ + body: zSchemaFlux2ProEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2ProEditResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2ProEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2ProEditRequestsByRequestIdResponse = + zSchemaFlux2ProEditOutput + +export const zGetFalAiFluxDevImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxDevImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxDevImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxDevImageToImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxDevImageToImageData = z.object({ + body: zSchemaFluxDevImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxDevImageToImageResponse = zSchemaQueueStatus + +export const zGetFalAiFluxDevImageToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxDevImageToImageRequestsByRequestIdResponse = + zSchemaFluxDevImageToImageOutput + +export const zGetFalAiAuraSrRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiAuraSrRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiAuraSrRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiAuraSrRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiAuraSrData = z.object({ + body: zSchemaAuraSrInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiAuraSrResponse = zSchemaQueueStatus + +export const zGetFalAiAuraSrRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiAuraSrRequestsByRequestIdResponse = zSchemaAuraSrOutput + +export const zGetFalAiClarityUpscalerRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiClarityUpscalerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiClarityUpscalerRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiClarityUpscalerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiClarityUpscalerData = z.object({ + body: zSchemaClarityUpscalerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiClarityUpscalerResponse = zSchemaQueueStatus + +export const zGetFalAiClarityUpscalerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiClarityUpscalerRequestsByRequestIdResponse = + zSchemaClarityUpscalerOutput + +export const zGetBriaReplaceBackgroundRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetBriaReplaceBackgroundRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaReplaceBackgroundRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutBriaReplaceBackgroundRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaReplaceBackgroundData = z.object({ + body: zSchemaReplaceBackgroundInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaReplaceBackgroundResponse = zSchemaQueueStatus + +export const zGetBriaReplaceBackgroundRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetBriaReplaceBackgroundRequestsByRequestIdResponse = + zSchemaReplaceBackgroundOutput + +export const zGetHalfMoonAiAiFaceSwapFaceswapimageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetHalfMoonAiAiFaceSwapFaceswapimageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutHalfMoonAiAiFaceSwapFaceswapimageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutHalfMoonAiAiFaceSwapFaceswapimageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostHalfMoonAiAiFaceSwapFaceswapimageData = z.object({ + body: zSchemaAiFaceSwapFaceswapimageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostHalfMoonAiAiFaceSwapFaceswapimageResponse = zSchemaQueueStatus + +export const zGetHalfMoonAiAiFaceSwapFaceswapimageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetHalfMoonAiAiFaceSwapFaceswapimageRequestsByRequestIdResponse = + zSchemaAiFaceSwapFaceswapimageOutput + +export const zGetBriaFiboEditReplaceObjectByTextRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetBriaFiboEditReplaceObjectByTextRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaFiboEditReplaceObjectByTextRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutBriaFiboEditReplaceObjectByTextRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaFiboEditReplaceObjectByTextData = z.object({ + body: zSchemaFiboEditReplaceObjectByTextInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaFiboEditReplaceObjectByTextResponse = zSchemaQueueStatus + +export const zGetBriaFiboEditReplaceObjectByTextRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetBriaFiboEditReplaceObjectByTextRequestsByRequestIdResponse = + zSchemaFiboEditReplaceObjectByTextOutput + +export const zGetBriaFiboEditSketchToColoredImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetBriaFiboEditSketchToColoredImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaFiboEditSketchToColoredImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutBriaFiboEditSketchToColoredImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaFiboEditSketchToColoredImageData = z.object({ + body: zSchemaFiboEditSketchToColoredImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaFiboEditSketchToColoredImageResponse = zSchemaQueueStatus + +export const zGetBriaFiboEditSketchToColoredImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetBriaFiboEditSketchToColoredImageRequestsByRequestIdResponse = + zSchemaFiboEditSketchToColoredImageOutput + +export const zGetBriaFiboEditRestoreRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetBriaFiboEditRestoreRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaFiboEditRestoreRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutBriaFiboEditRestoreRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaFiboEditRestoreData = z.object({ + body: zSchemaFiboEditRestoreInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaFiboEditRestoreResponse = zSchemaQueueStatus + +export const zGetBriaFiboEditRestoreRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetBriaFiboEditRestoreRequestsByRequestIdResponse = + zSchemaFiboEditRestoreOutput + +export const zGetBriaFiboEditReseasonRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetBriaFiboEditReseasonRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaFiboEditReseasonRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutBriaFiboEditReseasonRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaFiboEditReseasonData = z.object({ + body: zSchemaFiboEditReseasonInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaFiboEditReseasonResponse = zSchemaQueueStatus + +export const zGetBriaFiboEditReseasonRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetBriaFiboEditReseasonRequestsByRequestIdResponse = + zSchemaFiboEditReseasonOutput + +export const zGetBriaFiboEditRelightRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetBriaFiboEditRelightRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaFiboEditRelightRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutBriaFiboEditRelightRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaFiboEditRelightData = z.object({ + body: zSchemaFiboEditRelightInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaFiboEditRelightResponse = zSchemaQueueStatus + +export const zGetBriaFiboEditRelightRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetBriaFiboEditRelightRequestsByRequestIdResponse = + zSchemaFiboEditRelightOutput + +export const zGetBriaFiboEditRestyleRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetBriaFiboEditRestyleRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaFiboEditRestyleRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutBriaFiboEditRestyleRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaFiboEditRestyleData = z.object({ + body: zSchemaFiboEditRestyleInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaFiboEditRestyleResponse = zSchemaQueueStatus + +export const zGetBriaFiboEditRestyleRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetBriaFiboEditRestyleRequestsByRequestIdResponse = + zSchemaFiboEditRestyleOutput + +export const zGetBriaFiboEditRewriteTextRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetBriaFiboEditRewriteTextRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaFiboEditRewriteTextRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutBriaFiboEditRewriteTextRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaFiboEditRewriteTextData = z.object({ + body: zSchemaFiboEditRewriteTextInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaFiboEditRewriteTextResponse = zSchemaQueueStatus + +export const zGetBriaFiboEditRewriteTextRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetBriaFiboEditRewriteTextRequestsByRequestIdResponse = + zSchemaFiboEditRewriteTextOutput + +export const zGetBriaFiboEditEraseByTextRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetBriaFiboEditEraseByTextRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaFiboEditEraseByTextRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutBriaFiboEditEraseByTextRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaFiboEditEraseByTextData = z.object({ + body: zSchemaFiboEditEraseByTextInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaFiboEditEraseByTextResponse = zSchemaQueueStatus + +export const zGetBriaFiboEditEraseByTextRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetBriaFiboEditEraseByTextRequestsByRequestIdResponse = + zSchemaFiboEditEraseByTextOutput + +export const zGetBriaFiboEditEditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetBriaFiboEditEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaFiboEditEditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutBriaFiboEditEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaFiboEditEditData = z.object({ + body: zSchemaFiboEditEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaFiboEditEditResponse = zSchemaQueueStatus + +export const zGetBriaFiboEditEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetBriaFiboEditEditRequestsByRequestIdResponse = + zSchemaFiboEditEditOutput + +export const zGetBriaFiboEditAddObjectByTextRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetBriaFiboEditAddObjectByTextRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaFiboEditAddObjectByTextRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutBriaFiboEditAddObjectByTextRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaFiboEditAddObjectByTextData = z.object({ + body: zSchemaFiboEditAddObjectByTextInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaFiboEditAddObjectByTextResponse = zSchemaQueueStatus + +export const zGetBriaFiboEditAddObjectByTextRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetBriaFiboEditAddObjectByTextRequestsByRequestIdResponse = + zSchemaFiboEditAddObjectByTextOutput + +export const zGetBriaFiboEditBlendRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetBriaFiboEditBlendRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaFiboEditBlendRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutBriaFiboEditBlendRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaFiboEditBlendData = z.object({ + body: zSchemaFiboEditBlendInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaFiboEditBlendResponse = zSchemaQueueStatus + +export const zGetBriaFiboEditBlendRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetBriaFiboEditBlendRequestsByRequestIdResponse = + zSchemaFiboEditBlendOutput + +export const zGetBriaFiboEditColorizeRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetBriaFiboEditColorizeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaFiboEditColorizeRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutBriaFiboEditColorizeRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaFiboEditColorizeData = z.object({ + body: zSchemaFiboEditColorizeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaFiboEditColorizeResponse = zSchemaQueueStatus + +export const zGetBriaFiboEditColorizeRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetBriaFiboEditColorizeRequestsByRequestIdResponse = + zSchemaFiboEditColorizeOutput + +export const zGetFalAiFlux2Klein9bBaseEditLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux2Klein9bBaseEditLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2Klein9bBaseEditLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2Klein9bBaseEditLoraRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2Klein9bBaseEditLoraData = z.object({ + body: zSchemaFlux2Klein9bBaseEditLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2Klein9bBaseEditLoraResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2Klein9bBaseEditLoraRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2Klein9bBaseEditLoraRequestsByRequestIdResponse = + zSchemaFlux2Klein9bBaseEditLoraOutput + +export const zGetFalAiFlux2Klein4bBaseEditLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux2Klein4bBaseEditLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2Klein4bBaseEditLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2Klein4bBaseEditLoraRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2Klein4bBaseEditLoraData = z.object({ + body: zSchemaFlux2Klein4bBaseEditLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2Klein4bBaseEditLoraResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2Klein4bBaseEditLoraRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2Klein4bBaseEditLoraRequestsByRequestIdResponse = + zSchemaFlux2Klein4bBaseEditLoraOutput + +export const zGetFalAiFlux2Klein4bBaseEditRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux2Klein4bBaseEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2Klein4bBaseEditRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2Klein4bBaseEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2Klein4bBaseEditData = z.object({ + body: zSchemaFlux2Klein4bBaseEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2Klein4bBaseEditResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2Klein4bBaseEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2Klein4bBaseEditRequestsByRequestIdResponse = + zSchemaFlux2Klein4bBaseEditOutput + +export const zGetFalAiFlux2Klein9bBaseEditRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux2Klein9bBaseEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2Klein9bBaseEditRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2Klein9bBaseEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2Klein9bBaseEditData = z.object({ + body: zSchemaFlux2Klein9bBaseEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2Klein9bBaseEditResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2Klein9bBaseEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2Klein9bBaseEditRequestsByRequestIdResponse = + zSchemaFlux2Klein9bBaseEditOutput + +export const zGetFalAiFlux2Klein4bEditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux2Klein4bEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2Klein4bEditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2Klein4bEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2Klein4bEditData = z.object({ + body: zSchemaFlux2Klein4bEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2Klein4bEditResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2Klein4bEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2Klein4bEditRequestsByRequestIdResponse = + zSchemaFlux2Klein4bEditOutput + +export const zGetFalAiFlux2Klein9bEditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux2Klein9bEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2Klein9bEditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2Klein9bEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2Klein9bEditData = z.object({ + body: zSchemaFlux2Klein9bEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2Klein9bEditResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2Klein9bEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2Klein9bEditRequestsByRequestIdResponse = + zSchemaFlux2Klein9bEditOutput + +export const zGetFalAiGlmImageImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiGlmImageImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiGlmImageImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiGlmImageImageToImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiGlmImageImageToImageData = z.object({ + body: zSchemaGlmImageImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiGlmImageImageToImageResponse = zSchemaQueueStatus + +export const zGetFalAiGlmImageImageToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiGlmImageImageToImageRequestsByRequestIdResponse = + zSchemaGlmImageImageToImageOutput + +export const zGetFalAiQwenImageEdit2511MultipleAnglesRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEdit2511MultipleAnglesRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEdit2511MultipleAnglesRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEdit2511MultipleAnglesRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEdit2511MultipleAnglesData = z.object({ + body: zSchemaQwenImageEdit2511MultipleAnglesInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEdit2511MultipleAnglesResponse = + zSchemaQueueStatus + +export const zGetFalAiQwenImageEdit2511MultipleAnglesRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEdit2511MultipleAnglesRequestsByRequestIdResponse = + zSchemaQwenImageEdit2511MultipleAnglesOutput + +export const zGetFalAiQwenImageEdit2511LoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEdit2511LoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEdit2511LoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEdit2511LoraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEdit2511LoraData = z.object({ + body: zSchemaQwenImageEdit2511LoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEdit2511LoraResponse = zSchemaQueueStatus + +export const zGetFalAiQwenImageEdit2511LoraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEdit2511LoraRequestsByRequestIdResponse = + zSchemaQwenImageEdit2511LoraOutput + +export const zGetHalfMoonAiAiHomeStyleRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetHalfMoonAiAiHomeStyleRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutHalfMoonAiAiHomeStyleRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutHalfMoonAiAiHomeStyleRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostHalfMoonAiAiHomeStyleData = z.object({ + body: zSchemaAiHomeStyleInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostHalfMoonAiAiHomeStyleResponse = zSchemaQueueStatus + +export const zGetHalfMoonAiAiHomeStyleRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetHalfMoonAiAiHomeStyleRequestsByRequestIdResponse = + zSchemaAiHomeStyleOutput + +export const zGetHalfMoonAiAiHomeEditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetHalfMoonAiAiHomeEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutHalfMoonAiAiHomeEditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutHalfMoonAiAiHomeEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostHalfMoonAiAiHomeEditData = z.object({ + body: zSchemaAiHomeEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostHalfMoonAiAiHomeEditResponse = zSchemaQueueStatus + +export const zGetHalfMoonAiAiHomeEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetHalfMoonAiAiHomeEditRequestsByRequestIdResponse = + zSchemaAiHomeEditOutput + +export const zGetFalAiQwenImageLayeredLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageLayeredLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageLayeredLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageLayeredLoraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageLayeredLoraData = z.object({ + body: zSchemaQwenImageLayeredLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageLayeredLoraResponse = zSchemaQueueStatus + +export const zGetFalAiQwenImageLayeredLoraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageLayeredLoraRequestsByRequestIdResponse = + zSchemaQwenImageLayeredLoraOutput + +export const zGetWanV26ImageToImageRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetWanV26ImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutWanV26ImageToImageRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutWanV26ImageToImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostWanV26ImageToImageData = z.object({ + body: zSchemaV26ImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostWanV26ImageToImageResponse = zSchemaQueueStatus + +export const zGetWanV26ImageToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetWanV26ImageToImageRequestsByRequestIdResponse = + zSchemaV26ImageToImageOutput + +export const zGetFalAiQwenImageEdit2511RequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEdit2511RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEdit2511RequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEdit2511RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEdit2511Data = z.object({ + body: zSchemaQwenImageEdit2511Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEdit2511Response = zSchemaQueueStatus + +export const zGetFalAiQwenImageEdit2511RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEdit2511RequestsByRequestIdResponse = + zSchemaQwenImageEdit2511Output + +export const zGetFalAiQwenImageLayeredRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiQwenImageLayeredRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageLayeredRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageLayeredRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageLayeredData = z.object({ + body: zSchemaQwenImageLayeredInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageLayeredResponse = zSchemaQueueStatus + +export const zGetFalAiQwenImageLayeredRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageLayeredRequestsByRequestIdResponse = + zSchemaQwenImageLayeredOutput + +export const zGetFalAiZImageTurboInpaintLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiZImageTurboInpaintLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiZImageTurboInpaintLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiZImageTurboInpaintLoraRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiZImageTurboInpaintLoraData = z.object({ + body: zSchemaZImageTurboInpaintLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiZImageTurboInpaintLoraResponse = zSchemaQueueStatus + +export const zGetFalAiZImageTurboInpaintLoraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiZImageTurboInpaintLoraRequestsByRequestIdResponse = + zSchemaZImageTurboInpaintLoraOutput + +export const zGetFalAiZImageTurboInpaintRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiZImageTurboInpaintRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiZImageTurboInpaintRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiZImageTurboInpaintRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiZImageTurboInpaintData = z.object({ + body: zSchemaZImageTurboInpaintInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiZImageTurboInpaintResponse = zSchemaQueueStatus + +export const zGetFalAiZImageTurboInpaintRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiZImageTurboInpaintRequestsByRequestIdResponse = + zSchemaZImageTurboInpaintOutput + +export const zGetFalAiFlux2FlashEditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux2FlashEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2FlashEditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2FlashEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2FlashEditData = z.object({ + body: zSchemaFlux2FlashEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2FlashEditResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2FlashEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2FlashEditRequestsByRequestIdResponse = + zSchemaFlux2FlashEditOutput + +export const zGetFalAiGptImage15EditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiGptImage15EditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiGptImage15EditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiGptImage15EditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiGptImage15EditData = z.object({ + body: zSchemaGptImage15EditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiGptImage15EditResponse = zSchemaQueueStatus + +export const zGetFalAiGptImage15EditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiGptImage15EditRequestsByRequestIdResponse = + zSchemaGptImage15EditOutput + +export const zGetFalAiFlux2TurboEditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux2TurboEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2TurboEditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2TurboEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2TurboEditData = z.object({ + body: zSchemaFlux2TurboEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2TurboEditResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2TurboEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2TurboEditRequestsByRequestIdResponse = + zSchemaFlux2TurboEditOutput + +export const zGetFalAiFlux2MaxEditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux2MaxEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2MaxEditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2MaxEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2MaxEditData = z.object({ + body: zSchemaFlux2MaxEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2MaxEditResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2MaxEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2MaxEditRequestsByRequestIdResponse = + zSchemaFlux2MaxEditOutput + +export const zGetHalfMoonAiAiBabyAndAgingGeneratorMultiRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetHalfMoonAiAiBabyAndAgingGeneratorMultiRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutHalfMoonAiAiBabyAndAgingGeneratorMultiRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutHalfMoonAiAiBabyAndAgingGeneratorMultiRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostHalfMoonAiAiBabyAndAgingGeneratorMultiData = z.object({ + body: zSchemaAiBabyAndAgingGeneratorMultiInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostHalfMoonAiAiBabyAndAgingGeneratorMultiResponse = + zSchemaQueueStatus + +export const zGetHalfMoonAiAiBabyAndAgingGeneratorMultiRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetHalfMoonAiAiBabyAndAgingGeneratorMultiRequestsByRequestIdResponse = + zSchemaAiBabyAndAgingGeneratorMultiOutput + +export const zGetHalfMoonAiAiBabyAndAgingGeneratorSingleRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetHalfMoonAiAiBabyAndAgingGeneratorSingleRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutHalfMoonAiAiBabyAndAgingGeneratorSingleRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutHalfMoonAiAiBabyAndAgingGeneratorSingleRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostHalfMoonAiAiBabyAndAgingGeneratorSingleData = z.object({ + body: zSchemaAiBabyAndAgingGeneratorSingleInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostHalfMoonAiAiBabyAndAgingGeneratorSingleResponse = + zSchemaQueueStatus + +export const zGetHalfMoonAiAiBabyAndAgingGeneratorSingleRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetHalfMoonAiAiBabyAndAgingGeneratorSingleRequestsByRequestIdResponse = + zSchemaAiBabyAndAgingGeneratorSingleOutput + +export const zGetFalAiQwenImageEdit2509LoraGalleryShirtDesignRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEdit2509LoraGalleryShirtDesignRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEdit2509LoraGalleryShirtDesignRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEdit2509LoraGalleryShirtDesignRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEdit2509LoraGalleryShirtDesignData = z.object({ + body: zSchemaQwenImageEdit2509LoraGalleryShirtDesignInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEdit2509LoraGalleryShirtDesignResponse = + zSchemaQueueStatus + +export const zGetFalAiQwenImageEdit2509LoraGalleryShirtDesignRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEdit2509LoraGalleryShirtDesignRequestsByRequestIdResponse = + zSchemaQwenImageEdit2509LoraGalleryShirtDesignOutput + +export const zGetFalAiQwenImageEdit2509LoraGalleryRemoveLightingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEdit2509LoraGalleryRemoveLightingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEdit2509LoraGalleryRemoveLightingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEdit2509LoraGalleryRemoveLightingRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEdit2509LoraGalleryRemoveLightingData = + z.object({ + body: zSchemaQwenImageEdit2509LoraGalleryRemoveLightingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), + }) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEdit2509LoraGalleryRemoveLightingResponse = + zSchemaQueueStatus + +export const zGetFalAiQwenImageEdit2509LoraGalleryRemoveLightingRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEdit2509LoraGalleryRemoveLightingRequestsByRequestIdResponse = + zSchemaQwenImageEdit2509LoraGalleryRemoveLightingOutput + +export const zGetFalAiQwenImageEdit2509LoraGalleryRemoveElementRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEdit2509LoraGalleryRemoveElementRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEdit2509LoraGalleryRemoveElementRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEdit2509LoraGalleryRemoveElementRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEdit2509LoraGalleryRemoveElementData = z.object( + { + body: zSchemaQwenImageEdit2509LoraGalleryRemoveElementInput, + path: z.optional(z.never()), + query: z.optional(z.never()), + }, +) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEdit2509LoraGalleryRemoveElementResponse = + zSchemaQueueStatus + +export const zGetFalAiQwenImageEdit2509LoraGalleryRemoveElementRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEdit2509LoraGalleryRemoveElementRequestsByRequestIdResponse = + zSchemaQwenImageEdit2509LoraGalleryRemoveElementOutput + +export const zGetFalAiQwenImageEdit2509LoraGalleryLightingRestorationRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEdit2509LoraGalleryLightingRestorationRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEdit2509LoraGalleryLightingRestorationRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEdit2509LoraGalleryLightingRestorationRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEdit2509LoraGalleryLightingRestorationData = + z.object({ + body: zSchemaQwenImageEdit2509LoraGalleryLightingRestorationInput, + path: z.optional(z.never()), + query: z.optional(z.never()), + }) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEdit2509LoraGalleryLightingRestorationResponse = + zSchemaQueueStatus + +export const zGetFalAiQwenImageEdit2509LoraGalleryLightingRestorationRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEdit2509LoraGalleryLightingRestorationRequestsByRequestIdResponse = + zSchemaQwenImageEdit2509LoraGalleryLightingRestorationOutput + +export const zGetFalAiQwenImageEdit2509LoraGalleryIntegrateProductRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEdit2509LoraGalleryIntegrateProductRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEdit2509LoraGalleryIntegrateProductRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEdit2509LoraGalleryIntegrateProductRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEdit2509LoraGalleryIntegrateProductData = + z.object({ + body: zSchemaQwenImageEdit2509LoraGalleryIntegrateProductInput, + path: z.optional(z.never()), + query: z.optional(z.never()), + }) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEdit2509LoraGalleryIntegrateProductResponse = + zSchemaQueueStatus + +export const zGetFalAiQwenImageEdit2509LoraGalleryIntegrateProductRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEdit2509LoraGalleryIntegrateProductRequestsByRequestIdResponse = + zSchemaQwenImageEdit2509LoraGalleryIntegrateProductOutput + +export const zGetFalAiQwenImageEdit2509LoraGalleryGroupPhotoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEdit2509LoraGalleryGroupPhotoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEdit2509LoraGalleryGroupPhotoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEdit2509LoraGalleryGroupPhotoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEdit2509LoraGalleryGroupPhotoData = z.object({ + body: zSchemaQwenImageEdit2509LoraGalleryGroupPhotoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEdit2509LoraGalleryGroupPhotoResponse = + zSchemaQueueStatus + +export const zGetFalAiQwenImageEdit2509LoraGalleryGroupPhotoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEdit2509LoraGalleryGroupPhotoRequestsByRequestIdResponse = + zSchemaQwenImageEdit2509LoraGalleryGroupPhotoOutput + +export const zGetFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitData = + z.object({ + body: zSchemaQwenImageEdit2509LoraGalleryFaceToFullPortraitInput, + path: z.optional(z.never()), + query: z.optional(z.never()), + }) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitResponse = + zSchemaQueueStatus + +export const zGetFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEdit2509LoraGalleryFaceToFullPortraitRequestsByRequestIdResponse = + zSchemaQwenImageEdit2509LoraGalleryFaceToFullPortraitOutput + +export const zGetFalAiQwenImageEdit2509LoraGalleryAddBackgroundRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEdit2509LoraGalleryAddBackgroundRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEdit2509LoraGalleryAddBackgroundRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEdit2509LoraGalleryAddBackgroundRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEdit2509LoraGalleryAddBackgroundData = z.object( + { + body: zSchemaQwenImageEdit2509LoraGalleryAddBackgroundInput, + path: z.optional(z.never()), + query: z.optional(z.never()), + }, +) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEdit2509LoraGalleryAddBackgroundResponse = + zSchemaQueueStatus + +export const zGetFalAiQwenImageEdit2509LoraGalleryAddBackgroundRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEdit2509LoraGalleryAddBackgroundRequestsByRequestIdResponse = + zSchemaQwenImageEdit2509LoraGalleryAddBackgroundOutput + +export const zGetFalAiQwenImageEdit2509LoraGalleryNextSceneRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEdit2509LoraGalleryNextSceneRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEdit2509LoraGalleryNextSceneRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEdit2509LoraGalleryNextSceneRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEdit2509LoraGalleryNextSceneData = z.object({ + body: zSchemaQwenImageEdit2509LoraGalleryNextSceneInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEdit2509LoraGalleryNextSceneResponse = + zSchemaQueueStatus + +export const zGetFalAiQwenImageEdit2509LoraGalleryNextSceneRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEdit2509LoraGalleryNextSceneRequestsByRequestIdResponse = + zSchemaQwenImageEdit2509LoraGalleryNextSceneOutput + +export const zGetFalAiQwenImageEdit2509LoraGalleryMultipleAnglesRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEdit2509LoraGalleryMultipleAnglesRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEdit2509LoraGalleryMultipleAnglesRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEdit2509LoraGalleryMultipleAnglesRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEdit2509LoraGalleryMultipleAnglesData = + z.object({ + body: zSchemaQwenImageEdit2509LoraGalleryMultipleAnglesInput, + path: z.optional(z.never()), + query: z.optional(z.never()), + }) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEdit2509LoraGalleryMultipleAnglesResponse = + zSchemaQueueStatus + +export const zGetFalAiQwenImageEdit2509LoraGalleryMultipleAnglesRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEdit2509LoraGalleryMultipleAnglesRequestsByRequestIdResponse = + zSchemaQwenImageEdit2509LoraGalleryMultipleAnglesOutput + +export const zGetFalAiQwenImageEdit2509LoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEdit2509LoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEdit2509LoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEdit2509LoraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEdit2509LoraData = z.object({ + body: zSchemaQwenImageEdit2509LoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEdit2509LoraResponse = zSchemaQueueStatus + +export const zGetFalAiQwenImageEdit2509LoraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEdit2509LoraRequestsByRequestIdResponse = + zSchemaQwenImageEdit2509LoraOutput + +export const zGetFalAiQwenImageEdit2509RequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEdit2509RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEdit2509RequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEdit2509RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEdit2509Data = z.object({ + body: zSchemaQwenImageEdit2509Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEdit2509Response = zSchemaQueueStatus + +export const zGetFalAiQwenImageEdit2509RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEdit2509RequestsByRequestIdResponse = + zSchemaQwenImageEdit2509Output + +export const zGetFalAiQwenImageEditPlusLoraGalleryLightingRestorationRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEditPlusLoraGalleryLightingRestorationRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEditPlusLoraGalleryLightingRestorationRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEditPlusLoraGalleryLightingRestorationRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEditPlusLoraGalleryLightingRestorationData = + z.object({ + body: zSchemaQwenImageEditPlusLoraGalleryLightingRestorationInput, + path: z.optional(z.never()), + query: z.optional(z.never()), + }) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEditPlusLoraGalleryLightingRestorationResponse = + zSchemaQueueStatus + +export const zGetFalAiQwenImageEditPlusLoraGalleryLightingRestorationRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEditPlusLoraGalleryLightingRestorationRequestsByRequestIdResponse = + zSchemaQwenImageEditPlusLoraGalleryLightingRestorationOutput + +export const zGetFalAiMoondream3PreviewSegmentRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMoondream3PreviewSegmentRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMoondream3PreviewSegmentRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMoondream3PreviewSegmentRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMoondream3PreviewSegmentData = z.object({ + body: zSchemaMoondream3PreviewSegmentInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMoondream3PreviewSegmentResponse = zSchemaQueueStatus + +export const zGetFalAiMoondream3PreviewSegmentRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMoondream3PreviewSegmentRequestsByRequestIdResponse = + zSchemaMoondream3PreviewSegmentOutput + +export const zGetFalAiStepxEdit2RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiStepxEdit2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiStepxEdit2RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiStepxEdit2RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiStepxEdit2Data = z.object({ + body: zSchemaStepxEdit2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiStepxEdit2Response = zSchemaQueueStatus + +export const zGetFalAiStepxEdit2RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiStepxEdit2RequestsByRequestIdResponse = + zSchemaStepxEdit2Output + +export const zGetFalAiZImageTurboControlnetLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiZImageTurboControlnetLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiZImageTurboControlnetLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiZImageTurboControlnetLoraRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiZImageTurboControlnetLoraData = z.object({ + body: zSchemaZImageTurboControlnetLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiZImageTurboControlnetLoraResponse = zSchemaQueueStatus + +export const zGetFalAiZImageTurboControlnetLoraRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiZImageTurboControlnetLoraRequestsByRequestIdResponse = + zSchemaZImageTurboControlnetLoraOutput + +export const zGetFalAiZImageTurboControlnetRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiZImageTurboControlnetRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiZImageTurboControlnetRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiZImageTurboControlnetRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiZImageTurboControlnetData = z.object({ + body: zSchemaZImageTurboControlnetInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiZImageTurboControlnetResponse = zSchemaQueueStatus + +export const zGetFalAiZImageTurboControlnetRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiZImageTurboControlnetRequestsByRequestIdResponse = + zSchemaZImageTurboControlnetOutput + +export const zGetFalAiZImageTurboImageToImageLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiZImageTurboImageToImageLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiZImageTurboImageToImageLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiZImageTurboImageToImageLoraRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiZImageTurboImageToImageLoraData = z.object({ + body: zSchemaZImageTurboImageToImageLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiZImageTurboImageToImageLoraResponse = zSchemaQueueStatus + +export const zGetFalAiZImageTurboImageToImageLoraRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiZImageTurboImageToImageLoraRequestsByRequestIdResponse = + zSchemaZImageTurboImageToImageLoraOutput + +export const zGetFalAiZImageTurboImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiZImageTurboImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiZImageTurboImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiZImageTurboImageToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiZImageTurboImageToImageData = z.object({ + body: zSchemaZImageTurboImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiZImageTurboImageToImageResponse = zSchemaQueueStatus + +export const zGetFalAiZImageTurboImageToImageRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiZImageTurboImageToImageRequestsByRequestIdResponse = + zSchemaZImageTurboImageToImageOutput + +export const zGetFalAiLongcatImageEditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLongcatImageEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLongcatImageEditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLongcatImageEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLongcatImageEditData = z.object({ + body: zSchemaLongcatImageEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLongcatImageEditResponse = zSchemaQueueStatus + +export const zGetFalAiLongcatImageEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLongcatImageEditRequestsByRequestIdResponse = + zSchemaLongcatImageEditOutput + +export const zGetFalAiBytedanceSeedreamV45EditRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBytedanceSeedreamV45EditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBytedanceSeedreamV45EditRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBytedanceSeedreamV45EditRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBytedanceSeedreamV45EditData = z.object({ + body: zSchemaBytedanceSeedreamV45EditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBytedanceSeedreamV45EditResponse = zSchemaQueueStatus + +export const zGetFalAiBytedanceSeedreamV45EditRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiBytedanceSeedreamV45EditRequestsByRequestIdResponse = + zSchemaBytedanceSeedreamV45EditOutput + +export const zGetFalAiViduQ2ReferenceToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiViduQ2ReferenceToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiViduQ2ReferenceToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiViduQ2ReferenceToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiViduQ2ReferenceToImageData = z.object({ + body: zSchemaViduQ2ReferenceToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiViduQ2ReferenceToImageResponse = zSchemaQueueStatus + +export const zGetFalAiViduQ2ReferenceToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiViduQ2ReferenceToImageRequestsByRequestIdResponse = + zSchemaViduQ2ReferenceToImageOutput + +export const zGetFalAiKlingImageO1RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiKlingImageO1RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingImageO1RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingImageO1RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingImageO1Data = z.object({ + body: zSchemaKlingImageO1Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingImageO1Response = zSchemaQueueStatus + +export const zGetFalAiKlingImageO1RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiKlingImageO1RequestsByRequestIdResponse = + zSchemaKlingImageO1Output + +export const zGetFalAiFlux2LoraGalleryVirtualTryonRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux2LoraGalleryVirtualTryonRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2LoraGalleryVirtualTryonRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2LoraGalleryVirtualTryonRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2LoraGalleryVirtualTryonData = z.object({ + body: zSchemaFlux2LoraGalleryVirtualTryonInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2LoraGalleryVirtualTryonResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2LoraGalleryVirtualTryonRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2LoraGalleryVirtualTryonRequestsByRequestIdResponse = + zSchemaFlux2LoraGalleryVirtualTryonOutput + +export const zGetFalAiFlux2LoraGalleryMultipleAnglesRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux2LoraGalleryMultipleAnglesRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2LoraGalleryMultipleAnglesRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2LoraGalleryMultipleAnglesRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2LoraGalleryMultipleAnglesData = z.object({ + body: zSchemaFlux2LoraGalleryMultipleAnglesInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2LoraGalleryMultipleAnglesResponse = + zSchemaQueueStatus + +export const zGetFalAiFlux2LoraGalleryMultipleAnglesRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2LoraGalleryMultipleAnglesRequestsByRequestIdResponse = + zSchemaFlux2LoraGalleryMultipleAnglesOutput + +export const zGetFalAiFlux2LoraGalleryFaceToFullPortraitRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux2LoraGalleryFaceToFullPortraitRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2LoraGalleryFaceToFullPortraitRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2LoraGalleryFaceToFullPortraitRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2LoraGalleryFaceToFullPortraitData = z.object({ + body: zSchemaFlux2LoraGalleryFaceToFullPortraitInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2LoraGalleryFaceToFullPortraitResponse = + zSchemaQueueStatus + +export const zGetFalAiFlux2LoraGalleryFaceToFullPortraitRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2LoraGalleryFaceToFullPortraitRequestsByRequestIdResponse = + zSchemaFlux2LoraGalleryFaceToFullPortraitOutput + +export const zGetFalAiFlux2LoraGalleryApartmentStagingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux2LoraGalleryApartmentStagingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2LoraGalleryApartmentStagingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2LoraGalleryApartmentStagingRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2LoraGalleryApartmentStagingData = z.object({ + body: zSchemaFlux2LoraGalleryApartmentStagingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2LoraGalleryApartmentStagingResponse = + zSchemaQueueStatus + +export const zGetFalAiFlux2LoraGalleryApartmentStagingRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2LoraGalleryApartmentStagingRequestsByRequestIdResponse = + zSchemaFlux2LoraGalleryApartmentStagingOutput + +export const zGetFalAiFlux2LoraGalleryAddBackgroundRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux2LoraGalleryAddBackgroundRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2LoraGalleryAddBackgroundRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2LoraGalleryAddBackgroundRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2LoraGalleryAddBackgroundData = z.object({ + body: zSchemaFlux2LoraGalleryAddBackgroundInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2LoraGalleryAddBackgroundResponse = + zSchemaQueueStatus + +export const zGetFalAiFlux2LoraGalleryAddBackgroundRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2LoraGalleryAddBackgroundRequestsByRequestIdResponse = + zSchemaFlux2LoraGalleryAddBackgroundOutput + +export const zGetClarityaiCrystalUpscalerRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetClarityaiCrystalUpscalerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutClarityaiCrystalUpscalerRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutClarityaiCrystalUpscalerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostClarityaiCrystalUpscalerData = z.object({ + body: zSchemaCrystalUpscalerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostClarityaiCrystalUpscalerResponse = zSchemaQueueStatus + +export const zGetClarityaiCrystalUpscalerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetClarityaiCrystalUpscalerRequestsByRequestIdResponse = + zSchemaCrystalUpscalerOutput + +export const zGetFalAiFlux2FlexEditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux2FlexEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2FlexEditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2FlexEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2FlexEditData = z.object({ + body: zSchemaFlux2FlexEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2FlexEditResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2FlexEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2FlexEditRequestsByRequestIdResponse = + zSchemaFlux2FlexEditOutput + +export const zGetFalAiChronoEditLoraRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiChronoEditLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiChronoEditLoraRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiChronoEditLoraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiChronoEditLoraData = z.object({ + body: zSchemaChronoEditLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiChronoEditLoraResponse = zSchemaQueueStatus + +export const zGetFalAiChronoEditLoraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiChronoEditLoraRequestsByRequestIdResponse = + zSchemaChronoEditLoraOutput + +export const zGetFalAiChronoEditLoraGalleryPaintbrushRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiChronoEditLoraGalleryPaintbrushRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiChronoEditLoraGalleryPaintbrushRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiChronoEditLoraGalleryPaintbrushRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiChronoEditLoraGalleryPaintbrushData = z.object({ + body: zSchemaChronoEditLoraGalleryPaintbrushInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiChronoEditLoraGalleryPaintbrushResponse = + zSchemaQueueStatus + +export const zGetFalAiChronoEditLoraGalleryPaintbrushRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiChronoEditLoraGalleryPaintbrushRequestsByRequestIdResponse = + zSchemaChronoEditLoraGalleryPaintbrushOutput + +export const zGetFalAiChronoEditLoraGalleryUpscalerRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiChronoEditLoraGalleryUpscalerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiChronoEditLoraGalleryUpscalerRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiChronoEditLoraGalleryUpscalerRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiChronoEditLoraGalleryUpscalerData = z.object({ + body: zSchemaChronoEditLoraGalleryUpscalerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiChronoEditLoraGalleryUpscalerResponse = + zSchemaQueueStatus + +export const zGetFalAiChronoEditLoraGalleryUpscalerRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiChronoEditLoraGalleryUpscalerRequestsByRequestIdResponse = + zSchemaChronoEditLoraGalleryUpscalerOutput + +export const zGetFalAiSam3ImageRleRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSam3ImageRleRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSam3ImageRleRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSam3ImageRleRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSam3ImageRleData = z.object({ + body: zSchemaSam3ImageRleInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSam3ImageRleResponse = zSchemaQueueStatus + +export const zGetFalAiSam3ImageRleRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSam3ImageRleRequestsByRequestIdResponse = + zSchemaSam3ImageRleOutput + +export const zGetFalAiSam3ImageRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSam3ImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSam3ImageRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSam3ImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSam3ImageData = z.object({ + body: zSchemaSam3ImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSam3ImageResponse = zSchemaQueueStatus + +export const zGetFalAiSam3ImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSam3ImageRequestsByRequestIdResponse = + zSchemaSam3ImageOutput + +export const zGetFalAiGemini3ProImagePreviewEditRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiGemini3ProImagePreviewEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiGemini3ProImagePreviewEditRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiGemini3ProImagePreviewEditRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiGemini3ProImagePreviewEditData = z.object({ + body: zSchemaGemini3ProImagePreviewEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiGemini3ProImagePreviewEditResponse = zSchemaQueueStatus + +export const zGetFalAiGemini3ProImagePreviewEditRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiGemini3ProImagePreviewEditRequestsByRequestIdResponse = + zSchemaGemini3ProImagePreviewEditOutput + +export const zGetFalAiNanoBananaProEditRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiNanoBananaProEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiNanoBananaProEditRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiNanoBananaProEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiNanoBananaProEditData = z.object({ + body: zSchemaNanoBananaProEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiNanoBananaProEditResponse = zSchemaQueueStatus + +export const zGetFalAiNanoBananaProEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiNanoBananaProEditRequestsByRequestIdResponse = + zSchemaNanoBananaProEditOutput + +export const zGetFalAiQwenImageEditPlusLoraGalleryMultipleAnglesRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEditPlusLoraGalleryMultipleAnglesRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEditPlusLoraGalleryMultipleAnglesRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEditPlusLoraGalleryMultipleAnglesRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEditPlusLoraGalleryMultipleAnglesData = + z.object({ + body: zSchemaQwenImageEditPlusLoraGalleryMultipleAnglesInput, + path: z.optional(z.never()), + query: z.optional(z.never()), + }) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEditPlusLoraGalleryMultipleAnglesResponse = + zSchemaQueueStatus + +export const zGetFalAiQwenImageEditPlusLoraGalleryMultipleAnglesRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEditPlusLoraGalleryMultipleAnglesRequestsByRequestIdResponse = + zSchemaQwenImageEditPlusLoraGalleryMultipleAnglesOutput + +export const zGetFalAiQwenImageEditPlusLoraGalleryShirtDesignRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEditPlusLoraGalleryShirtDesignRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEditPlusLoraGalleryShirtDesignRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEditPlusLoraGalleryShirtDesignRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEditPlusLoraGalleryShirtDesignData = z.object({ + body: zSchemaQwenImageEditPlusLoraGalleryShirtDesignInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEditPlusLoraGalleryShirtDesignResponse = + zSchemaQueueStatus + +export const zGetFalAiQwenImageEditPlusLoraGalleryShirtDesignRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEditPlusLoraGalleryShirtDesignRequestsByRequestIdResponse = + zSchemaQwenImageEditPlusLoraGalleryShirtDesignOutput + +export const zGetFalAiQwenImageEditPlusLoraGalleryRemoveLightingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEditPlusLoraGalleryRemoveLightingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEditPlusLoraGalleryRemoveLightingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEditPlusLoraGalleryRemoveLightingRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEditPlusLoraGalleryRemoveLightingData = + z.object({ + body: zSchemaQwenImageEditPlusLoraGalleryRemoveLightingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), + }) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEditPlusLoraGalleryRemoveLightingResponse = + zSchemaQueueStatus + +export const zGetFalAiQwenImageEditPlusLoraGalleryRemoveLightingRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEditPlusLoraGalleryRemoveLightingRequestsByRequestIdResponse = + zSchemaQwenImageEditPlusLoraGalleryRemoveLightingOutput + +export const zGetFalAiQwenImageEditPlusLoraGalleryRemoveElementRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEditPlusLoraGalleryRemoveElementRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEditPlusLoraGalleryRemoveElementRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEditPlusLoraGalleryRemoveElementRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEditPlusLoraGalleryRemoveElementData = z.object( + { + body: zSchemaQwenImageEditPlusLoraGalleryRemoveElementInput, + path: z.optional(z.never()), + query: z.optional(z.never()), + }, +) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEditPlusLoraGalleryRemoveElementResponse = + zSchemaQueueStatus + +export const zGetFalAiQwenImageEditPlusLoraGalleryRemoveElementRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEditPlusLoraGalleryRemoveElementRequestsByRequestIdResponse = + zSchemaQwenImageEditPlusLoraGalleryRemoveElementOutput + +export const zGetFalAiQwenImageEditPlusLoraGalleryNextSceneRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEditPlusLoraGalleryNextSceneRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEditPlusLoraGalleryNextSceneRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEditPlusLoraGalleryNextSceneRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEditPlusLoraGalleryNextSceneData = z.object({ + body: zSchemaQwenImageEditPlusLoraGalleryNextSceneInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEditPlusLoraGalleryNextSceneResponse = + zSchemaQueueStatus + +export const zGetFalAiQwenImageEditPlusLoraGalleryNextSceneRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEditPlusLoraGalleryNextSceneRequestsByRequestIdResponse = + zSchemaQwenImageEditPlusLoraGalleryNextSceneOutput + +export const zGetFalAiQwenImageEditPlusLoraGalleryIntegrateProductRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEditPlusLoraGalleryIntegrateProductRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEditPlusLoraGalleryIntegrateProductRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEditPlusLoraGalleryIntegrateProductRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEditPlusLoraGalleryIntegrateProductData = + z.object({ + body: zSchemaQwenImageEditPlusLoraGalleryIntegrateProductInput, + path: z.optional(z.never()), + query: z.optional(z.never()), + }) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEditPlusLoraGalleryIntegrateProductResponse = + zSchemaQueueStatus + +export const zGetFalAiQwenImageEditPlusLoraGalleryIntegrateProductRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEditPlusLoraGalleryIntegrateProductRequestsByRequestIdResponse = + zSchemaQwenImageEditPlusLoraGalleryIntegrateProductOutput + +export const zGetFalAiQwenImageEditPlusLoraGalleryGroupPhotoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEditPlusLoraGalleryGroupPhotoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEditPlusLoraGalleryGroupPhotoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEditPlusLoraGalleryGroupPhotoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEditPlusLoraGalleryGroupPhotoData = z.object({ + body: zSchemaQwenImageEditPlusLoraGalleryGroupPhotoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEditPlusLoraGalleryGroupPhotoResponse = + zSchemaQueueStatus + +export const zGetFalAiQwenImageEditPlusLoraGalleryGroupPhotoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEditPlusLoraGalleryGroupPhotoRequestsByRequestIdResponse = + zSchemaQwenImageEditPlusLoraGalleryGroupPhotoOutput + +export const zGetFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitData = + z.object({ + body: zSchemaQwenImageEditPlusLoraGalleryFaceToFullPortraitInput, + path: z.optional(z.never()), + query: z.optional(z.never()), + }) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitResponse = + zSchemaQueueStatus + +export const zGetFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEditPlusLoraGalleryFaceToFullPortraitRequestsByRequestIdResponse = + zSchemaQwenImageEditPlusLoraGalleryFaceToFullPortraitOutput + +export const zGetFalAiQwenImageEditPlusLoraGalleryAddBackgroundRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEditPlusLoraGalleryAddBackgroundRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEditPlusLoraGalleryAddBackgroundRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEditPlusLoraGalleryAddBackgroundRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEditPlusLoraGalleryAddBackgroundData = z.object( + { + body: zSchemaQwenImageEditPlusLoraGalleryAddBackgroundInput, + path: z.optional(z.never()), + query: z.optional(z.never()), + }, +) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEditPlusLoraGalleryAddBackgroundResponse = + zSchemaQueueStatus + +export const zGetFalAiQwenImageEditPlusLoraGalleryAddBackgroundRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEditPlusLoraGalleryAddBackgroundRequestsByRequestIdResponse = + zSchemaQwenImageEditPlusLoraGalleryAddBackgroundOutput + +export const zGetFalAiReveFastRemixRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiReveFastRemixRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiReveFastRemixRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiReveFastRemixRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiReveFastRemixData = z.object({ + body: zSchemaReveFastRemixInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiReveFastRemixResponse = zSchemaQueueStatus + +export const zGetFalAiReveFastRemixRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiReveFastRemixRequestsByRequestIdResponse = + zSchemaReveFastRemixOutput + +export const zGetFalAiReveFastEditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiReveFastEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiReveFastEditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiReveFastEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiReveFastEditData = z.object({ + body: zSchemaReveFastEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiReveFastEditResponse = zSchemaQueueStatus + +export const zGetFalAiReveFastEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiReveFastEditRequestsByRequestIdResponse = + zSchemaReveFastEditOutput + +export const zGetFalAiImageAppsV2OutpaintRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageAppsV2OutpaintRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageAppsV2OutpaintRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageAppsV2OutpaintRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageAppsV2OutpaintData = z.object({ + body: zSchemaImageAppsV2OutpaintInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageAppsV2OutpaintResponse = zSchemaQueueStatus + +export const zGetFalAiImageAppsV2OutpaintRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImageAppsV2OutpaintRequestsByRequestIdResponse = + zSchemaImageAppsV2OutpaintOutput + +export const zGetFalAiFluxVisionUpscalerRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxVisionUpscalerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxVisionUpscalerRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxVisionUpscalerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxVisionUpscalerData = z.object({ + body: zSchemaFluxVisionUpscalerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxVisionUpscalerResponse = zSchemaQueueStatus + +export const zGetFalAiFluxVisionUpscalerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxVisionUpscalerRequestsByRequestIdResponse = + zSchemaFluxVisionUpscalerOutput + +export const zGetFalAiEmu35ImageEditImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiEmu35ImageEditImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiEmu35ImageEditImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiEmu35ImageEditImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiEmu35ImageEditImageData = z.object({ + body: zSchemaEmu35ImageEditImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiEmu35ImageEditImageResponse = zSchemaQueueStatus + +export const zGetFalAiEmu35ImageEditImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiEmu35ImageEditImageRequestsByRequestIdResponse = + zSchemaEmu35ImageEditImageOutput + +export const zGetFalAiChronoEditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiChronoEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiChronoEditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiChronoEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiChronoEditData = z.object({ + body: zSchemaChronoEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiChronoEditResponse = zSchemaQueueStatus + +export const zGetFalAiChronoEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiChronoEditRequestsByRequestIdResponse = + zSchemaChronoEditOutput + +export const zGetFalAiGptImage1MiniEditRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiGptImage1MiniEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiGptImage1MiniEditRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiGptImage1MiniEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiGptImage1MiniEditData = z.object({ + body: zSchemaGptImage1MiniEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiGptImage1MiniEditResponse = zSchemaQueueStatus + +export const zGetFalAiGptImage1MiniEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiGptImage1MiniEditRequestsByRequestIdResponse = + zSchemaGptImage1MiniEditOutput + +export const zGetFalAiReveRemixRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiReveRemixRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiReveRemixRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiReveRemixRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiReveRemixData = z.object({ + body: zSchemaReveRemixInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiReveRemixResponse = zSchemaQueueStatus + +export const zGetFalAiReveRemixRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiReveRemixRequestsByRequestIdResponse = + zSchemaReveRemixOutput + +export const zGetFalAiReveEditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiReveEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiReveEditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiReveEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiReveEditData = z.object({ + body: zSchemaReveEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiReveEditResponse = zSchemaQueueStatus + +export const zGetFalAiReveEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiReveEditRequestsByRequestIdResponse = + zSchemaReveEditOutput + +export const zGetFalAiImage2PixelRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiImage2PixelRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImage2PixelRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiImage2PixelRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImage2PixelData = z.object({ + body: zSchemaImage2PixelInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImage2PixelResponse = zSchemaQueueStatus + +export const zGetFalAiImage2PixelRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImage2PixelRequestsByRequestIdResponse = + zSchemaImage2PixelOutput + +export const zGetFalAiDreamomni2EditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiDreamomni2EditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiDreamomni2EditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiDreamomni2EditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiDreamomni2EditData = z.object({ + body: zSchemaDreamomni2EditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiDreamomni2EditResponse = zSchemaQueueStatus + +export const zGetFalAiDreamomni2EditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiDreamomni2EditRequestsByRequestIdResponse = + zSchemaDreamomni2EditOutput + +export const zGetFalAiQwenImageEditPlusLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEditPlusLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEditPlusLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEditPlusLoraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEditPlusLoraData = z.object({ + body: zSchemaQwenImageEditPlusLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEditPlusLoraResponse = zSchemaQueueStatus + +export const zGetFalAiQwenImageEditPlusLoraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEditPlusLoraRequestsByRequestIdResponse = + zSchemaQwenImageEditPlusLoraOutput + +export const zGetFalAiLucidfluxRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLucidfluxRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLucidfluxRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLucidfluxRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLucidfluxData = z.object({ + body: zSchemaLucidfluxInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLucidfluxResponse = zSchemaQueueStatus + +export const zGetFalAiLucidfluxRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLucidfluxRequestsByRequestIdResponse = + zSchemaLucidfluxOutput + +export const zGetFalAiQwenImageEditImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEditImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEditImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEditImageToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEditImageToImageData = z.object({ + body: zSchemaQwenImageEditImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEditImageToImageResponse = zSchemaQueueStatus + +export const zGetFalAiQwenImageEditImageToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEditImageToImageRequestsByRequestIdResponse = + zSchemaQwenImageEditImageToImageOutput + +export const zGetFalAiWan25PreviewImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWan25PreviewImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWan25PreviewImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWan25PreviewImageToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWan25PreviewImageToImageData = z.object({ + body: zSchemaWan25PreviewImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWan25PreviewImageToImageResponse = zSchemaQueueStatus + +export const zGetFalAiWan25PreviewImageToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiWan25PreviewImageToImageRequestsByRequestIdResponse = + zSchemaWan25PreviewImageToImageOutput + +export const zGetFalAiQwenImageEditPlusRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEditPlusRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEditPlusRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEditPlusRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEditPlusData = z.object({ + body: zSchemaQwenImageEditPlusInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEditPlusResponse = zSchemaQueueStatus + +export const zGetFalAiQwenImageEditPlusRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEditPlusRequestsByRequestIdResponse = + zSchemaQwenImageEditPlusOutput + +export const zGetFalAiSeedvrUpscaleImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiSeedvrUpscaleImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSeedvrUpscaleImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiSeedvrUpscaleImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSeedvrUpscaleImageData = z.object({ + body: zSchemaSeedvrUpscaleImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSeedvrUpscaleImageResponse = zSchemaQueueStatus + +export const zGetFalAiSeedvrUpscaleImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSeedvrUpscaleImageRequestsByRequestIdResponse = + zSchemaSeedvrUpscaleImageOutput + +export const zGetFalAiImageAppsV2ProductHoldingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageAppsV2ProductHoldingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageAppsV2ProductHoldingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageAppsV2ProductHoldingRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageAppsV2ProductHoldingData = z.object({ + body: zSchemaImageAppsV2ProductHoldingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageAppsV2ProductHoldingResponse = zSchemaQueueStatus + +export const zGetFalAiImageAppsV2ProductHoldingRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageAppsV2ProductHoldingRequestsByRequestIdResponse = + zSchemaImageAppsV2ProductHoldingOutput + +export const zGetFalAiImageAppsV2ProductPhotographyRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageAppsV2ProductPhotographyRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageAppsV2ProductPhotographyRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageAppsV2ProductPhotographyRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageAppsV2ProductPhotographyData = z.object({ + body: zSchemaImageAppsV2ProductPhotographyInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageAppsV2ProductPhotographyResponse = + zSchemaQueueStatus + +export const zGetFalAiImageAppsV2ProductPhotographyRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageAppsV2ProductPhotographyRequestsByRequestIdResponse = + zSchemaImageAppsV2ProductPhotographyOutput + +export const zGetFalAiImageAppsV2VirtualTryOnRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageAppsV2VirtualTryOnRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageAppsV2VirtualTryOnRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageAppsV2VirtualTryOnRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageAppsV2VirtualTryOnData = z.object({ + body: zSchemaImageAppsV2VirtualTryOnInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageAppsV2VirtualTryOnResponse = zSchemaQueueStatus + +export const zGetFalAiImageAppsV2VirtualTryOnRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiImageAppsV2VirtualTryOnRequestsByRequestIdResponse = + zSchemaImageAppsV2VirtualTryOnOutput + +export const zGetFalAiImageAppsV2TextureTransformRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageAppsV2TextureTransformRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageAppsV2TextureTransformRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageAppsV2TextureTransformRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageAppsV2TextureTransformData = z.object({ + body: zSchemaImageAppsV2TextureTransformInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageAppsV2TextureTransformResponse = zSchemaQueueStatus + +export const zGetFalAiImageAppsV2TextureTransformRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageAppsV2TextureTransformRequestsByRequestIdResponse = + zSchemaImageAppsV2TextureTransformOutput + +export const zGetFalAiImageAppsV2RelightingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageAppsV2RelightingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageAppsV2RelightingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageAppsV2RelightingRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageAppsV2RelightingData = z.object({ + body: zSchemaImageAppsV2RelightingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageAppsV2RelightingResponse = zSchemaQueueStatus + +export const zGetFalAiImageAppsV2RelightingRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImageAppsV2RelightingRequestsByRequestIdResponse = + zSchemaImageAppsV2RelightingOutput + +export const zGetFalAiImageAppsV2StyleTransferRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageAppsV2StyleTransferRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageAppsV2StyleTransferRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageAppsV2StyleTransferRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageAppsV2StyleTransferData = z.object({ + body: zSchemaImageAppsV2StyleTransferInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageAppsV2StyleTransferResponse = zSchemaQueueStatus + +export const zGetFalAiImageAppsV2StyleTransferRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageAppsV2StyleTransferRequestsByRequestIdResponse = + zSchemaImageAppsV2StyleTransferOutput + +export const zGetFalAiImageAppsV2PhotoRestorationRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageAppsV2PhotoRestorationRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageAppsV2PhotoRestorationRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageAppsV2PhotoRestorationRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageAppsV2PhotoRestorationData = z.object({ + body: zSchemaImageAppsV2PhotoRestorationInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageAppsV2PhotoRestorationResponse = zSchemaQueueStatus + +export const zGetFalAiImageAppsV2PhotoRestorationRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageAppsV2PhotoRestorationRequestsByRequestIdResponse = + zSchemaImageAppsV2PhotoRestorationOutput + +export const zGetFalAiImageAppsV2PortraitEnhanceRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageAppsV2PortraitEnhanceRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageAppsV2PortraitEnhanceRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageAppsV2PortraitEnhanceRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageAppsV2PortraitEnhanceData = z.object({ + body: zSchemaImageAppsV2PortraitEnhanceInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageAppsV2PortraitEnhanceResponse = zSchemaQueueStatus + +export const zGetFalAiImageAppsV2PortraitEnhanceRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageAppsV2PortraitEnhanceRequestsByRequestIdResponse = + zSchemaImageAppsV2PortraitEnhanceOutput + +export const zGetFalAiImageAppsV2PhotographyEffectsRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageAppsV2PhotographyEffectsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageAppsV2PhotographyEffectsRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageAppsV2PhotographyEffectsRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageAppsV2PhotographyEffectsData = z.object({ + body: zSchemaImageAppsV2PhotographyEffectsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageAppsV2PhotographyEffectsResponse = + zSchemaQueueStatus + +export const zGetFalAiImageAppsV2PhotographyEffectsRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageAppsV2PhotographyEffectsRequestsByRequestIdResponse = + zSchemaImageAppsV2PhotographyEffectsOutput + +export const zGetFalAiImageAppsV2PerspectiveRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageAppsV2PerspectiveRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageAppsV2PerspectiveRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageAppsV2PerspectiveRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageAppsV2PerspectiveData = z.object({ + body: zSchemaImageAppsV2PerspectiveInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageAppsV2PerspectiveResponse = zSchemaQueueStatus + +export const zGetFalAiImageAppsV2PerspectiveRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImageAppsV2PerspectiveRequestsByRequestIdResponse = + zSchemaImageAppsV2PerspectiveOutput + +export const zGetFalAiImageAppsV2ObjectRemovalRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageAppsV2ObjectRemovalRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageAppsV2ObjectRemovalRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageAppsV2ObjectRemovalRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageAppsV2ObjectRemovalData = z.object({ + body: zSchemaImageAppsV2ObjectRemovalInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageAppsV2ObjectRemovalResponse = zSchemaQueueStatus + +export const zGetFalAiImageAppsV2ObjectRemovalRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageAppsV2ObjectRemovalRequestsByRequestIdResponse = + zSchemaImageAppsV2ObjectRemovalOutput + +export const zGetFalAiImageAppsV2HeadshotPhotoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageAppsV2HeadshotPhotoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageAppsV2HeadshotPhotoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageAppsV2HeadshotPhotoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageAppsV2HeadshotPhotoData = z.object({ + body: zSchemaImageAppsV2HeadshotPhotoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageAppsV2HeadshotPhotoResponse = zSchemaQueueStatus + +export const zGetFalAiImageAppsV2HeadshotPhotoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageAppsV2HeadshotPhotoRequestsByRequestIdResponse = + zSchemaImageAppsV2HeadshotPhotoOutput + +export const zGetFalAiImageAppsV2HairChangeRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageAppsV2HairChangeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageAppsV2HairChangeRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageAppsV2HairChangeRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageAppsV2HairChangeData = z.object({ + body: zSchemaImageAppsV2HairChangeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageAppsV2HairChangeResponse = zSchemaQueueStatus + +export const zGetFalAiImageAppsV2HairChangeRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImageAppsV2HairChangeRequestsByRequestIdResponse = + zSchemaImageAppsV2HairChangeOutput + +export const zGetFalAiImageAppsV2ExpressionChangeRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageAppsV2ExpressionChangeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageAppsV2ExpressionChangeRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageAppsV2ExpressionChangeRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageAppsV2ExpressionChangeData = z.object({ + body: zSchemaImageAppsV2ExpressionChangeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageAppsV2ExpressionChangeResponse = zSchemaQueueStatus + +export const zGetFalAiImageAppsV2ExpressionChangeRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageAppsV2ExpressionChangeRequestsByRequestIdResponse = + zSchemaImageAppsV2ExpressionChangeOutput + +export const zGetFalAiImageAppsV2CityTeleportRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageAppsV2CityTeleportRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageAppsV2CityTeleportRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageAppsV2CityTeleportRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageAppsV2CityTeleportData = z.object({ + body: zSchemaImageAppsV2CityTeleportInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageAppsV2CityTeleportResponse = zSchemaQueueStatus + +export const zGetFalAiImageAppsV2CityTeleportRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiImageAppsV2CityTeleportRequestsByRequestIdResponse = + zSchemaImageAppsV2CityTeleportOutput + +export const zGetFalAiImageAppsV2AgeModifyRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageAppsV2AgeModifyRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageAppsV2AgeModifyRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageAppsV2AgeModifyRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageAppsV2AgeModifyData = z.object({ + body: zSchemaImageAppsV2AgeModifyInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageAppsV2AgeModifyResponse = zSchemaQueueStatus + +export const zGetFalAiImageAppsV2AgeModifyRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImageAppsV2AgeModifyRequestsByRequestIdResponse = + zSchemaImageAppsV2AgeModifyOutput + +export const zGetFalAiImageAppsV2MakeupApplicationRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageAppsV2MakeupApplicationRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageAppsV2MakeupApplicationRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageAppsV2MakeupApplicationRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageAppsV2MakeupApplicationData = z.object({ + body: zSchemaImageAppsV2MakeupApplicationInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageAppsV2MakeupApplicationResponse = zSchemaQueueStatus + +export const zGetFalAiImageAppsV2MakeupApplicationRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageAppsV2MakeupApplicationRequestsByRequestIdResponse = + zSchemaImageAppsV2MakeupApplicationOutput + +export const zGetFalAiQwenImageEditInpaintRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEditInpaintRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEditInpaintRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEditInpaintRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEditInpaintData = z.object({ + body: zSchemaQwenImageEditInpaintInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEditInpaintResponse = zSchemaQueueStatus + +export const zGetFalAiQwenImageEditInpaintRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEditInpaintRequestsByRequestIdResponse = + zSchemaQwenImageEditInpaintOutput + +export const zGetFalAiFluxSrpoImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxSrpoImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxSrpoImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxSrpoImageToImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxSrpoImageToImageData = z.object({ + body: zSchemaFluxSrpoImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxSrpoImageToImageResponse = zSchemaQueueStatus + +export const zGetFalAiFluxSrpoImageToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxSrpoImageToImageRequestsByRequestIdResponse = + zSchemaFluxSrpoImageToImageOutput + +export const zGetFalAiFlux1SrpoImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux1SrpoImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux1SrpoImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux1SrpoImageToImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux1SrpoImageToImageData = z.object({ + body: zSchemaFlux1SrpoImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux1SrpoImageToImageResponse = zSchemaQueueStatus + +export const zGetFalAiFlux1SrpoImageToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux1SrpoImageToImageRequestsByRequestIdResponse = + zSchemaFlux1SrpoImageToImageOutput + +export const zGetFalAiQwenImageEditLoraRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEditLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEditLoraRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEditLoraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEditLoraData = z.object({ + body: zSchemaQwenImageEditLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEditLoraResponse = zSchemaQueueStatus + +export const zGetFalAiQwenImageEditLoraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEditLoraRequestsByRequestIdResponse = + zSchemaQwenImageEditLoraOutput + +export const zGetFalAiViduReferenceToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiViduReferenceToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiViduReferenceToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiViduReferenceToImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiViduReferenceToImageData = z.object({ + body: zSchemaViduReferenceToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiViduReferenceToImageResponse = zSchemaQueueStatus + +export const zGetFalAiViduReferenceToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiViduReferenceToImageRequestsByRequestIdResponse = + zSchemaViduReferenceToImageOutput + +export const zGetFalAiBytedanceSeedreamV4EditRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBytedanceSeedreamV4EditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBytedanceSeedreamV4EditRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBytedanceSeedreamV4EditRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBytedanceSeedreamV4EditData = z.object({ + body: zSchemaBytedanceSeedreamV4EditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBytedanceSeedreamV4EditResponse = zSchemaQueueStatus + +export const zGetFalAiBytedanceSeedreamV4EditRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiBytedanceSeedreamV4EditRequestsByRequestIdResponse = + zSchemaBytedanceSeedreamV4EditOutput + +export const zGetFalAiWanV22A14bImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanV22A14bImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanV22A14bImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanV22A14bImageToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanV22A14bImageToImageData = z.object({ + body: zSchemaWanV22A14bImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanV22A14bImageToImageResponse = zSchemaQueueStatus + +export const zGetFalAiWanV22A14bImageToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanV22A14bImageToImageRequestsByRequestIdResponse = + zSchemaWanV22A14bImageToImageOutput + +export const zGetFalAiUsoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiUsoRequestsByRequestIdStatusResponse = zSchemaQueueStatus + +export const zPutFalAiUsoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiUsoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiUsoData = z.object({ + body: zSchemaUsoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiUsoResponse = zSchemaQueueStatus + +export const zGetFalAiUsoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiUsoRequestsByRequestIdResponse = zSchemaUsoOutput + +export const zGetFalAiGemini25FlashImageEditRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiGemini25FlashImageEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiGemini25FlashImageEditRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiGemini25FlashImageEditRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiGemini25FlashImageEditData = z.object({ + body: zSchemaGemini25FlashImageEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiGemini25FlashImageEditResponse = zSchemaQueueStatus + +export const zGetFalAiGemini25FlashImageEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiGemini25FlashImageEditRequestsByRequestIdResponse = + zSchemaGemini25FlashImageEditOutput + +export const zGetFalAiQwenImageImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageImageToImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageImageToImageData = z.object({ + body: zSchemaQwenImageImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageImageToImageResponse = zSchemaQueueStatus + +export const zGetFalAiQwenImageImageToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageImageToImageRequestsByRequestIdResponse = + zSchemaQwenImageImageToImageOutput + +export const zGetBriaReimagine32RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetBriaReimagine32RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaReimagine32RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutBriaReimagine32RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaReimagine32Data = z.object({ + body: zSchemaReimagine32Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaReimagine32Response = zSchemaQueueStatus + +export const zGetBriaReimagine32RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetBriaReimagine32RequestsByRequestIdResponse = + zSchemaReimagine32Output + +export const zGetFalAiNanoBananaEditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiNanoBananaEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiNanoBananaEditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiNanoBananaEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiNanoBananaEditData = z.object({ + body: zSchemaNanoBananaEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiNanoBananaEditResponse = zSchemaQueueStatus + +export const zGetFalAiNanoBananaEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiNanoBananaEditRequestsByRequestIdResponse = + zSchemaNanoBananaEditOutput + +export const zGetFalAiNextstep1RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiNextstep1RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiNextstep1RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiNextstep1RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiNextstep1Data = z.object({ + body: zSchemaNextstep1Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiNextstep1Response = zSchemaQueueStatus + +export const zGetFalAiNextstep1RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiNextstep1RequestsByRequestIdResponse = + zSchemaNextstep1Output + +export const zGetFalAiQwenImageEditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEditData = z.object({ + body: zSchemaQwenImageEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEditResponse = zSchemaQueueStatus + +export const zGetFalAiQwenImageEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEditRequestsByRequestIdResponse = + zSchemaQwenImageEditOutput + +export const zGetFalAiIdeogramCharacterEditRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiIdeogramCharacterEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiIdeogramCharacterEditRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiIdeogramCharacterEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiIdeogramCharacterEditData = z.object({ + body: zSchemaIdeogramCharacterEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiIdeogramCharacterEditResponse = zSchemaQueueStatus + +export const zGetFalAiIdeogramCharacterEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiIdeogramCharacterEditRequestsByRequestIdResponse = + zSchemaIdeogramCharacterEditOutput + +export const zGetFalAiIdeogramCharacterRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiIdeogramCharacterRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiIdeogramCharacterRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiIdeogramCharacterRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiIdeogramCharacterData = z.object({ + body: zSchemaIdeogramCharacterInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiIdeogramCharacterResponse = zSchemaQueueStatus + +export const zGetFalAiIdeogramCharacterRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiIdeogramCharacterRequestsByRequestIdResponse = + zSchemaIdeogramCharacterOutput + +export const zGetFalAiIdeogramCharacterRemixRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiIdeogramCharacterRemixRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiIdeogramCharacterRemixRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiIdeogramCharacterRemixRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiIdeogramCharacterRemixData = z.object({ + body: zSchemaIdeogramCharacterRemixInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiIdeogramCharacterRemixResponse = zSchemaQueueStatus + +export const zGetFalAiIdeogramCharacterRemixRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiIdeogramCharacterRemixRequestsByRequestIdResponse = + zSchemaIdeogramCharacterRemixOutput + +export const zGetFalAiFluxKreaLoraInpaintingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxKreaLoraInpaintingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxKreaLoraInpaintingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxKreaLoraInpaintingRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxKreaLoraInpaintingData = z.object({ + body: zSchemaFluxKreaLoraInpaintingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxKreaLoraInpaintingResponse = zSchemaQueueStatus + +export const zGetFalAiFluxKreaLoraInpaintingRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxKreaLoraInpaintingRequestsByRequestIdResponse = + zSchemaFluxKreaLoraInpaintingOutput + +export const zGetFalAiFluxKreaLoraImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxKreaLoraImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxKreaLoraImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxKreaLoraImageToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxKreaLoraImageToImageData = z.object({ + body: zSchemaFluxKreaLoraImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxKreaLoraImageToImageResponse = zSchemaQueueStatus + +export const zGetFalAiFluxKreaLoraImageToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFluxKreaLoraImageToImageRequestsByRequestIdResponse = + zSchemaFluxKreaLoraImageToImageOutput + +export const zGetFalAiFluxKreaImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxKreaImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxKreaImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxKreaImageToImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxKreaImageToImageData = z.object({ + body: zSchemaFluxKreaImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxKreaImageToImageResponse = zSchemaQueueStatus + +export const zGetFalAiFluxKreaImageToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxKreaImageToImageRequestsByRequestIdResponse = + zSchemaFluxKreaImageToImageOutput + +export const zGetFalAiFluxKreaReduxRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxKreaReduxRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxKreaReduxRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxKreaReduxRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxKreaReduxData = z.object({ + body: zSchemaFluxKreaReduxInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxKreaReduxResponse = zSchemaQueueStatus + +export const zGetFalAiFluxKreaReduxRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxKreaReduxRequestsByRequestIdResponse = + zSchemaFluxKreaReduxOutput + +export const zGetFalAiFlux1KreaImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux1KreaImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux1KreaImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux1KreaImageToImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux1KreaImageToImageData = z.object({ + body: zSchemaFlux1KreaImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux1KreaImageToImageResponse = zSchemaQueueStatus + +export const zGetFalAiFlux1KreaImageToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux1KreaImageToImageRequestsByRequestIdResponse = + zSchemaFlux1KreaImageToImageOutput + +export const zGetFalAiFlux1KreaReduxRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux1KreaReduxRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux1KreaReduxRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux1KreaReduxRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux1KreaReduxData = z.object({ + body: zSchemaFlux1KreaReduxInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux1KreaReduxResponse = zSchemaQueueStatus + +export const zGetFalAiFlux1KreaReduxRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux1KreaReduxRequestsByRequestIdResponse = + zSchemaFlux1KreaReduxOutput + +export const zGetFalAiFluxKontextLoraInpaintRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxKontextLoraInpaintRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxKontextLoraInpaintRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxKontextLoraInpaintRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxKontextLoraInpaintData = z.object({ + body: zSchemaFluxKontextLoraInpaintInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxKontextLoraInpaintResponse = zSchemaQueueStatus + +export const zGetFalAiFluxKontextLoraInpaintRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxKontextLoraInpaintRequestsByRequestIdResponse = + zSchemaFluxKontextLoraInpaintOutput + +export const zGetFalAiHunyuanWorldRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiHunyuanWorldRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuanWorldRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuanWorldRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuanWorldData = z.object({ + body: zSchemaHunyuanWorldInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuanWorldResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuanWorldRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuanWorldRequestsByRequestIdResponse = + zSchemaHunyuanWorldOutput + +export const zGetFalAiImageEditingRetouchRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageEditingRetouchRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageEditingRetouchRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageEditingRetouchRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageEditingRetouchData = z.object({ + body: zSchemaImageEditingRetouchInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageEditingRetouchResponse = zSchemaQueueStatus + +export const zGetFalAiImageEditingRetouchRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImageEditingRetouchRequestsByRequestIdResponse = + zSchemaImageEditingRetouchOutput + +export const zGetFalAiHidreamE11RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiHidreamE11RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHidreamE11RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiHidreamE11RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHidreamE11Data = z.object({ + body: zSchemaHidreamE11Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHidreamE11Response = zSchemaQueueStatus + +export const zGetFalAiHidreamE11RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHidreamE11RequestsByRequestIdResponse = + zSchemaHidreamE11Output + +export const zGetFalAiRifeRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiRifeRequestsByRequestIdStatusResponse = zSchemaQueueStatus + +export const zPutFalAiRifeRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiRifeRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiRifeData = z.object({ + body: zSchemaRifeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiRifeResponse = zSchemaQueueStatus + +export const zGetFalAiRifeRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiRifeRequestsByRequestIdResponse = zSchemaRifeOutput + +export const zGetFalAiFilmRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFilmRequestsByRequestIdStatusResponse = zSchemaQueueStatus + +export const zPutFalAiFilmRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFilmRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFilmData = z.object({ + body: zSchemaFilmInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFilmResponse = zSchemaQueueStatus + +export const zGetFalAiFilmRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFilmRequestsByRequestIdResponse = zSchemaFilmOutput + +export const zGetFalAiCalligrapherRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiCalligrapherRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiCalligrapherRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiCalligrapherRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiCalligrapherData = z.object({ + body: zSchemaCalligrapherInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiCalligrapherResponse = zSchemaQueueStatus + +export const zGetFalAiCalligrapherRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiCalligrapherRequestsByRequestIdResponse = + zSchemaCalligrapherOutput + +export const zGetFalAiBriaReimagineRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiBriaReimagineRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBriaReimagineRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiBriaReimagineRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBriaReimagineData = z.object({ + body: zSchemaBriaReimagineInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBriaReimagineResponse = zSchemaQueueStatus + +export const zGetFalAiBriaReimagineRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiBriaReimagineRequestsByRequestIdResponse = + zSchemaBriaReimagineOutput + +export const zGetFalAiImageEditingRealismRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageEditingRealismRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageEditingRealismRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageEditingRealismRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageEditingRealismData = z.object({ + body: zSchemaImageEditingRealismInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageEditingRealismResponse = zSchemaQueueStatus + +export const zGetFalAiImageEditingRealismRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImageEditingRealismRequestsByRequestIdResponse = + zSchemaImageEditingRealismOutput + +export const zGetFalAiPostProcessingVignetteRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPostProcessingVignetteRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPostProcessingVignetteRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPostProcessingVignetteRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPostProcessingVignetteData = z.object({ + body: zSchemaPostProcessingVignetteInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPostProcessingVignetteResponse = zSchemaQueueStatus + +export const zGetFalAiPostProcessingVignetteRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPostProcessingVignetteRequestsByRequestIdResponse = + zSchemaPostProcessingVignetteOutput + +export const zGetFalAiPostProcessingSolarizeRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPostProcessingSolarizeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPostProcessingSolarizeRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPostProcessingSolarizeRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPostProcessingSolarizeData = z.object({ + body: zSchemaPostProcessingSolarizeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPostProcessingSolarizeResponse = zSchemaQueueStatus + +export const zGetFalAiPostProcessingSolarizeRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPostProcessingSolarizeRequestsByRequestIdResponse = + zSchemaPostProcessingSolarizeOutput + +export const zGetFalAiPostProcessingSharpenRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPostProcessingSharpenRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPostProcessingSharpenRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPostProcessingSharpenRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPostProcessingSharpenData = z.object({ + body: zSchemaPostProcessingSharpenInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPostProcessingSharpenResponse = zSchemaQueueStatus + +export const zGetFalAiPostProcessingSharpenRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPostProcessingSharpenRequestsByRequestIdResponse = + zSchemaPostProcessingSharpenOutput + +export const zGetFalAiPostProcessingParabolizeRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPostProcessingParabolizeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPostProcessingParabolizeRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPostProcessingParabolizeRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPostProcessingParabolizeData = z.object({ + body: zSchemaPostProcessingParabolizeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPostProcessingParabolizeResponse = zSchemaQueueStatus + +export const zGetFalAiPostProcessingParabolizeRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiPostProcessingParabolizeRequestsByRequestIdResponse = + zSchemaPostProcessingParabolizeOutput + +export const zGetFalAiPostProcessingGrainRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPostProcessingGrainRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPostProcessingGrainRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPostProcessingGrainRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPostProcessingGrainData = z.object({ + body: zSchemaPostProcessingGrainInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPostProcessingGrainResponse = zSchemaQueueStatus + +export const zGetFalAiPostProcessingGrainRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPostProcessingGrainRequestsByRequestIdResponse = + zSchemaPostProcessingGrainOutput + +export const zGetFalAiPostProcessingDodgeBurnRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPostProcessingDodgeBurnRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPostProcessingDodgeBurnRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPostProcessingDodgeBurnRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPostProcessingDodgeBurnData = z.object({ + body: zSchemaPostProcessingDodgeBurnInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPostProcessingDodgeBurnResponse = zSchemaQueueStatus + +export const zGetFalAiPostProcessingDodgeBurnRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiPostProcessingDodgeBurnRequestsByRequestIdResponse = + zSchemaPostProcessingDodgeBurnOutput + +export const zGetFalAiPostProcessingDissolveRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPostProcessingDissolveRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPostProcessingDissolveRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPostProcessingDissolveRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPostProcessingDissolveData = z.object({ + body: zSchemaPostProcessingDissolveInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPostProcessingDissolveResponse = zSchemaQueueStatus + +export const zGetFalAiPostProcessingDissolveRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPostProcessingDissolveRequestsByRequestIdResponse = + zSchemaPostProcessingDissolveOutput + +export const zGetFalAiPostProcessingDesaturateRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPostProcessingDesaturateRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPostProcessingDesaturateRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPostProcessingDesaturateRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPostProcessingDesaturateData = z.object({ + body: zSchemaPostProcessingDesaturateInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPostProcessingDesaturateResponse = zSchemaQueueStatus + +export const zGetFalAiPostProcessingDesaturateRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiPostProcessingDesaturateRequestsByRequestIdResponse = + zSchemaPostProcessingDesaturateOutput + +export const zGetFalAiPostProcessingColorTintRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPostProcessingColorTintRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPostProcessingColorTintRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPostProcessingColorTintRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPostProcessingColorTintData = z.object({ + body: zSchemaPostProcessingColorTintInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPostProcessingColorTintResponse = zSchemaQueueStatus + +export const zGetFalAiPostProcessingColorTintRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiPostProcessingColorTintRequestsByRequestIdResponse = + zSchemaPostProcessingColorTintOutput + +export const zGetFalAiPostProcessingColorCorrectionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPostProcessingColorCorrectionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPostProcessingColorCorrectionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPostProcessingColorCorrectionRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPostProcessingColorCorrectionData = z.object({ + body: zSchemaPostProcessingColorCorrectionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPostProcessingColorCorrectionResponse = + zSchemaQueueStatus + +export const zGetFalAiPostProcessingColorCorrectionRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiPostProcessingColorCorrectionRequestsByRequestIdResponse = + zSchemaPostProcessingColorCorrectionOutput + +export const zGetFalAiPostProcessingChromaticAberrationRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPostProcessingChromaticAberrationRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPostProcessingChromaticAberrationRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPostProcessingChromaticAberrationRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPostProcessingChromaticAberrationData = z.object({ + body: zSchemaPostProcessingChromaticAberrationInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPostProcessingChromaticAberrationResponse = + zSchemaQueueStatus + +export const zGetFalAiPostProcessingChromaticAberrationRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiPostProcessingChromaticAberrationRequestsByRequestIdResponse = + zSchemaPostProcessingChromaticAberrationOutput + +export const zGetFalAiPostProcessingBlurRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPostProcessingBlurRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPostProcessingBlurRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPostProcessingBlurRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPostProcessingBlurData = z.object({ + body: zSchemaPostProcessingBlurInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPostProcessingBlurResponse = zSchemaQueueStatus + +export const zGetFalAiPostProcessingBlurRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPostProcessingBlurRequestsByRequestIdResponse = + zSchemaPostProcessingBlurOutput + +export const zGetFalAiImageEditingYoutubeThumbnailsRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageEditingYoutubeThumbnailsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageEditingYoutubeThumbnailsRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageEditingYoutubeThumbnailsRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageEditingYoutubeThumbnailsData = z.object({ + body: zSchemaImageEditingYoutubeThumbnailsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageEditingYoutubeThumbnailsResponse = + zSchemaQueueStatus + +export const zGetFalAiImageEditingYoutubeThumbnailsRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageEditingYoutubeThumbnailsRequestsByRequestIdResponse = + zSchemaImageEditingYoutubeThumbnailsOutput + +export const zGetFalAiTopazUpscaleImageRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiTopazUpscaleImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiTopazUpscaleImageRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiTopazUpscaleImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiTopazUpscaleImageData = z.object({ + body: zSchemaTopazUpscaleImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiTopazUpscaleImageResponse = zSchemaQueueStatus + +export const zGetFalAiTopazUpscaleImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiTopazUpscaleImageRequestsByRequestIdResponse = + zSchemaTopazUpscaleImageOutput + +export const zGetFalAiImageEditingBroccoliHaircutRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageEditingBroccoliHaircutRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageEditingBroccoliHaircutRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageEditingBroccoliHaircutRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageEditingBroccoliHaircutData = z.object({ + body: zSchemaImageEditingBroccoliHaircutInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageEditingBroccoliHaircutResponse = zSchemaQueueStatus + +export const zGetFalAiImageEditingBroccoliHaircutRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageEditingBroccoliHaircutRequestsByRequestIdResponse = + zSchemaImageEditingBroccoliHaircutOutput + +export const zGetFalAiImageEditingWojakStyleRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageEditingWojakStyleRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageEditingWojakStyleRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageEditingWojakStyleRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageEditingWojakStyleData = z.object({ + body: zSchemaImageEditingWojakStyleInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageEditingWojakStyleResponse = zSchemaQueueStatus + +export const zGetFalAiImageEditingWojakStyleRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImageEditingWojakStyleRequestsByRequestIdResponse = + zSchemaImageEditingWojakStyleOutput + +export const zGetFalAiImageEditingPlushieStyleRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageEditingPlushieStyleRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageEditingPlushieStyleRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageEditingPlushieStyleRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageEditingPlushieStyleData = z.object({ + body: zSchemaImageEditingPlushieStyleInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageEditingPlushieStyleResponse = zSchemaQueueStatus + +export const zGetFalAiImageEditingPlushieStyleRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageEditingPlushieStyleRequestsByRequestIdResponse = + zSchemaImageEditingPlushieStyleOutput + +export const zGetFalAiFluxKontextLoraRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxKontextLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxKontextLoraRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxKontextLoraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxKontextLoraData = z.object({ + body: zSchemaFluxKontextLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxKontextLoraResponse = zSchemaQueueStatus + +export const zGetFalAiFluxKontextLoraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxKontextLoraRequestsByRequestIdResponse = + zSchemaFluxKontextLoraOutput + +export const zGetFalAiFashnTryonV16RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFashnTryonV16RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFashnTryonV16RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFashnTryonV16RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFashnTryonV16Data = z.object({ + body: zSchemaFashnTryonV16Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFashnTryonV16Response = zSchemaQueueStatus + +export const zGetFalAiFashnTryonV16RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFashnTryonV16RequestsByRequestIdResponse = + zSchemaFashnTryonV16Output + +export const zGetFalAiChainOfZoomRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiChainOfZoomRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiChainOfZoomRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiChainOfZoomRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiChainOfZoomData = z.object({ + body: zSchemaChainOfZoomInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiChainOfZoomResponse = zSchemaQueueStatus + +export const zGetFalAiChainOfZoomRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiChainOfZoomRequestsByRequestIdResponse = + zSchemaChainOfZoomOutput + +export const zGetFalAiPasdRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiPasdRequestsByRequestIdStatusResponse = zSchemaQueueStatus + +export const zPutFalAiPasdRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiPasdRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPasdData = z.object({ + body: zSchemaPasdInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPasdResponse = zSchemaQueueStatus + +export const zGetFalAiPasdRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPasdRequestsByRequestIdResponse = zSchemaPasdOutput + +export const zGetFalAiObjectRemovalBboxRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiObjectRemovalBboxRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiObjectRemovalBboxRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiObjectRemovalBboxRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiObjectRemovalBboxData = z.object({ + body: zSchemaObjectRemovalBboxInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiObjectRemovalBboxResponse = zSchemaQueueStatus + +export const zGetFalAiObjectRemovalBboxRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiObjectRemovalBboxRequestsByRequestIdResponse = + zSchemaObjectRemovalBboxOutput + +export const zGetFalAiObjectRemovalMaskRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiObjectRemovalMaskRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiObjectRemovalMaskRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiObjectRemovalMaskRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiObjectRemovalMaskData = z.object({ + body: zSchemaObjectRemovalMaskInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiObjectRemovalMaskResponse = zSchemaQueueStatus + +export const zGetFalAiObjectRemovalMaskRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiObjectRemovalMaskRequestsByRequestIdResponse = + zSchemaObjectRemovalMaskOutput + +export const zGetFalAiObjectRemovalRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiObjectRemovalRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiObjectRemovalRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiObjectRemovalRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiObjectRemovalData = z.object({ + body: zSchemaObjectRemovalInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiObjectRemovalResponse = zSchemaQueueStatus + +export const zGetFalAiObjectRemovalRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiObjectRemovalRequestsByRequestIdResponse = + zSchemaObjectRemovalOutput + +export const zGetFalAiRecraftVectorizeRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiRecraftVectorizeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiRecraftVectorizeRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiRecraftVectorizeRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiRecraftVectorizeData = z.object({ + body: zSchemaRecraftVectorizeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiRecraftVectorizeResponse = zSchemaQueueStatus + +export const zGetFalAiRecraftVectorizeRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiRecraftVectorizeRequestsByRequestIdResponse = + zSchemaRecraftVectorizeOutput + +export const zGetFalAiFfmpegApiExtractFrameRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFfmpegApiExtractFrameRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFfmpegApiExtractFrameRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFfmpegApiExtractFrameRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFfmpegApiExtractFrameData = z.object({ + body: zSchemaFfmpegApiExtractFrameInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFfmpegApiExtractFrameResponse = zSchemaQueueStatus + +export const zGetFalAiFfmpegApiExtractFrameRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFfmpegApiExtractFrameRequestsByRequestIdResponse = + zSchemaFfmpegApiExtractFrameOutput + +export const zGetFalAiLumaPhotonFlashModifyRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLumaPhotonFlashModifyRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLumaPhotonFlashModifyRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLumaPhotonFlashModifyRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLumaPhotonFlashModifyData = z.object({ + body: zSchemaLumaPhotonFlashModifyInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLumaPhotonFlashModifyResponse = zSchemaQueueStatus + +export const zGetFalAiLumaPhotonFlashModifyRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLumaPhotonFlashModifyRequestsByRequestIdResponse = + zSchemaLumaPhotonFlashModifyOutput + +export const zGetFalAiLumaPhotonModifyRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLumaPhotonModifyRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLumaPhotonModifyRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLumaPhotonModifyRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLumaPhotonModifyData = z.object({ + body: zSchemaLumaPhotonModifyInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLumaPhotonModifyResponse = zSchemaQueueStatus + +export const zGetFalAiLumaPhotonModifyRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLumaPhotonModifyRequestsByRequestIdResponse = + zSchemaLumaPhotonModifyOutput + +export const zGetFalAiImageEditingReframeRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageEditingReframeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageEditingReframeRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageEditingReframeRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageEditingReframeData = z.object({ + body: zSchemaImageEditingReframeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageEditingReframeResponse = zSchemaQueueStatus + +export const zGetFalAiImageEditingReframeRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImageEditingReframeRequestsByRequestIdResponse = + zSchemaImageEditingReframeOutput + +export const zGetFalAiImageEditingBabyVersionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageEditingBabyVersionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageEditingBabyVersionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageEditingBabyVersionRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageEditingBabyVersionData = z.object({ + body: zSchemaImageEditingBabyVersionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageEditingBabyVersionResponse = zSchemaQueueStatus + +export const zGetFalAiImageEditingBabyVersionRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiImageEditingBabyVersionRequestsByRequestIdResponse = + zSchemaImageEditingBabyVersionOutput + +export const zGetFalAiLumaPhotonFlashReframeRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLumaPhotonFlashReframeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLumaPhotonFlashReframeRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLumaPhotonFlashReframeRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLumaPhotonFlashReframeData = z.object({ + body: zSchemaLumaPhotonFlashReframeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLumaPhotonFlashReframeResponse = zSchemaQueueStatus + +export const zGetFalAiLumaPhotonFlashReframeRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLumaPhotonFlashReframeRequestsByRequestIdResponse = + zSchemaLumaPhotonFlashReframeOutput + +export const zGetFalAiLumaPhotonReframeRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiLumaPhotonReframeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLumaPhotonReframeRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiLumaPhotonReframeRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLumaPhotonReframeData = z.object({ + body: zSchemaLumaPhotonReframeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLumaPhotonReframeResponse = zSchemaQueueStatus + +export const zGetFalAiLumaPhotonReframeRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLumaPhotonReframeRequestsByRequestIdResponse = + zSchemaLumaPhotonReframeOutput + +export const zGetFalAiFlux1SchnellReduxRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiFlux1SchnellReduxRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux1SchnellReduxRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux1SchnellReduxRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux1SchnellReduxData = z.object({ + body: zSchemaFlux1SchnellReduxInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux1SchnellReduxResponse = zSchemaQueueStatus + +export const zGetFalAiFlux1SchnellReduxRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux1SchnellReduxRequestsByRequestIdResponse = + zSchemaFlux1SchnellReduxOutput + +export const zGetFalAiFlux1DevReduxRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux1DevReduxRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux1DevReduxRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux1DevReduxRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux1DevReduxData = z.object({ + body: zSchemaFlux1DevReduxInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux1DevReduxResponse = zSchemaQueueStatus + +export const zGetFalAiFlux1DevReduxRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux1DevReduxRequestsByRequestIdResponse = + zSchemaFlux1DevReduxOutput + +export const zGetFalAiFlux1DevImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux1DevImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux1DevImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux1DevImageToImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux1DevImageToImageData = z.object({ + body: zSchemaFlux1DevImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux1DevImageToImageResponse = zSchemaQueueStatus + +export const zGetFalAiFlux1DevImageToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux1DevImageToImageRequestsByRequestIdResponse = + zSchemaFlux1DevImageToImageOutput + +export const zGetFalAiImageEditingTextRemovalRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageEditingTextRemovalRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageEditingTextRemovalRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageEditingTextRemovalRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageEditingTextRemovalData = z.object({ + body: zSchemaImageEditingTextRemovalInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageEditingTextRemovalResponse = zSchemaQueueStatus + +export const zGetFalAiImageEditingTextRemovalRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiImageEditingTextRemovalRequestsByRequestIdResponse = + zSchemaImageEditingTextRemovalOutput + +export const zGetFalAiImageEditingPhotoRestorationRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageEditingPhotoRestorationRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageEditingPhotoRestorationRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageEditingPhotoRestorationRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageEditingPhotoRestorationData = z.object({ + body: zSchemaImageEditingPhotoRestorationInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageEditingPhotoRestorationResponse = zSchemaQueueStatus + +export const zGetFalAiImageEditingPhotoRestorationRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageEditingPhotoRestorationRequestsByRequestIdResponse = + zSchemaImageEditingPhotoRestorationOutput + +export const zGetFalAiImageEditingWeatherEffectRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageEditingWeatherEffectRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageEditingWeatherEffectRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageEditingWeatherEffectRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageEditingWeatherEffectData = z.object({ + body: zSchemaImageEditingWeatherEffectInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageEditingWeatherEffectResponse = zSchemaQueueStatus + +export const zGetFalAiImageEditingWeatherEffectRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageEditingWeatherEffectRequestsByRequestIdResponse = + zSchemaImageEditingWeatherEffectOutput + +export const zGetFalAiImageEditingTimeOfDayRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageEditingTimeOfDayRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageEditingTimeOfDayRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageEditingTimeOfDayRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageEditingTimeOfDayData = z.object({ + body: zSchemaImageEditingTimeOfDayInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageEditingTimeOfDayResponse = zSchemaQueueStatus + +export const zGetFalAiImageEditingTimeOfDayRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImageEditingTimeOfDayRequestsByRequestIdResponse = + zSchemaImageEditingTimeOfDayOutput + +export const zGetFalAiImageEditingStyleTransferRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageEditingStyleTransferRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageEditingStyleTransferRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageEditingStyleTransferRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageEditingStyleTransferData = z.object({ + body: zSchemaImageEditingStyleTransferInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageEditingStyleTransferResponse = zSchemaQueueStatus + +export const zGetFalAiImageEditingStyleTransferRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageEditingStyleTransferRequestsByRequestIdResponse = + zSchemaImageEditingStyleTransferOutput + +export const zGetFalAiImageEditingSceneCompositionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageEditingSceneCompositionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageEditingSceneCompositionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageEditingSceneCompositionRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageEditingSceneCompositionData = z.object({ + body: zSchemaImageEditingSceneCompositionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageEditingSceneCompositionResponse = zSchemaQueueStatus + +export const zGetFalAiImageEditingSceneCompositionRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageEditingSceneCompositionRequestsByRequestIdResponse = + zSchemaImageEditingSceneCompositionOutput + +export const zGetFalAiImageEditingProfessionalPhotoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageEditingProfessionalPhotoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageEditingProfessionalPhotoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageEditingProfessionalPhotoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageEditingProfessionalPhotoData = z.object({ + body: zSchemaImageEditingProfessionalPhotoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageEditingProfessionalPhotoResponse = + zSchemaQueueStatus + +export const zGetFalAiImageEditingProfessionalPhotoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageEditingProfessionalPhotoRequestsByRequestIdResponse = + zSchemaImageEditingProfessionalPhotoOutput + +export const zGetFalAiImageEditingObjectRemovalRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageEditingObjectRemovalRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageEditingObjectRemovalRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageEditingObjectRemovalRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageEditingObjectRemovalData = z.object({ + body: zSchemaImageEditingObjectRemovalInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageEditingObjectRemovalResponse = zSchemaQueueStatus + +export const zGetFalAiImageEditingObjectRemovalRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageEditingObjectRemovalRequestsByRequestIdResponse = + zSchemaImageEditingObjectRemovalOutput + +export const zGetFalAiImageEditingHairChangeRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageEditingHairChangeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageEditingHairChangeRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageEditingHairChangeRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageEditingHairChangeData = z.object({ + body: zSchemaImageEditingHairChangeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageEditingHairChangeResponse = zSchemaQueueStatus + +export const zGetFalAiImageEditingHairChangeRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImageEditingHairChangeRequestsByRequestIdResponse = + zSchemaImageEditingHairChangeOutput + +export const zGetFalAiImageEditingFaceEnhancementRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageEditingFaceEnhancementRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageEditingFaceEnhancementRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageEditingFaceEnhancementRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageEditingFaceEnhancementData = z.object({ + body: zSchemaImageEditingFaceEnhancementInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageEditingFaceEnhancementResponse = zSchemaQueueStatus + +export const zGetFalAiImageEditingFaceEnhancementRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageEditingFaceEnhancementRequestsByRequestIdResponse = + zSchemaImageEditingFaceEnhancementOutput + +export const zGetFalAiImageEditingExpressionChangeRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageEditingExpressionChangeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageEditingExpressionChangeRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageEditingExpressionChangeRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageEditingExpressionChangeData = z.object({ + body: zSchemaImageEditingExpressionChangeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageEditingExpressionChangeResponse = zSchemaQueueStatus + +export const zGetFalAiImageEditingExpressionChangeRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageEditingExpressionChangeRequestsByRequestIdResponse = + zSchemaImageEditingExpressionChangeOutput + +export const zGetFalAiImageEditingColorCorrectionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageEditingColorCorrectionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageEditingColorCorrectionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageEditingColorCorrectionRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageEditingColorCorrectionData = z.object({ + body: zSchemaImageEditingColorCorrectionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageEditingColorCorrectionResponse = zSchemaQueueStatus + +export const zGetFalAiImageEditingColorCorrectionRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageEditingColorCorrectionRequestsByRequestIdResponse = + zSchemaImageEditingColorCorrectionOutput + +export const zGetFalAiImageEditingCartoonifyRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageEditingCartoonifyRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageEditingCartoonifyRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageEditingCartoonifyRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageEditingCartoonifyData = z.object({ + body: zSchemaImageEditingCartoonifyInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageEditingCartoonifyResponse = zSchemaQueueStatus + +export const zGetFalAiImageEditingCartoonifyRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImageEditingCartoonifyRequestsByRequestIdResponse = + zSchemaImageEditingCartoonifyOutput + +export const zGetFalAiImageEditingBackgroundChangeRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageEditingBackgroundChangeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageEditingBackgroundChangeRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageEditingBackgroundChangeRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageEditingBackgroundChangeData = z.object({ + body: zSchemaImageEditingBackgroundChangeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageEditingBackgroundChangeResponse = zSchemaQueueStatus + +export const zGetFalAiImageEditingBackgroundChangeRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageEditingBackgroundChangeRequestsByRequestIdResponse = + zSchemaImageEditingBackgroundChangeOutput + +export const zGetFalAiImageEditingAgeProgressionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageEditingAgeProgressionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageEditingAgeProgressionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageEditingAgeProgressionRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageEditingAgeProgressionData = z.object({ + body: zSchemaImageEditingAgeProgressionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageEditingAgeProgressionResponse = zSchemaQueueStatus + +export const zGetFalAiImageEditingAgeProgressionRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImageEditingAgeProgressionRequestsByRequestIdResponse = + zSchemaImageEditingAgeProgressionOutput + +export const zGetFalAiFluxProKontextMaxMultiRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxProKontextMaxMultiRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxProKontextMaxMultiRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxProKontextMaxMultiRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxProKontextMaxMultiData = z.object({ + body: zSchemaFluxProKontextMaxMultiInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxProKontextMaxMultiResponse = zSchemaQueueStatus + +export const zGetFalAiFluxProKontextMaxMultiRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxProKontextMaxMultiRequestsByRequestIdResponse = + zSchemaFluxProKontextMaxMultiOutput + +export const zGetFalAiFluxProKontextMultiRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxProKontextMultiRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxProKontextMultiRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxProKontextMultiRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxProKontextMultiData = z.object({ + body: zSchemaFluxProKontextMultiInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxProKontextMultiResponse = zSchemaQueueStatus + +export const zGetFalAiFluxProKontextMultiRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxProKontextMultiRequestsByRequestIdResponse = + zSchemaFluxProKontextMultiOutput + +export const zGetFalAiFluxProKontextMaxRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiFluxProKontextMaxRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxProKontextMaxRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxProKontextMaxRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxProKontextMaxData = z.object({ + body: zSchemaFluxProKontextMaxInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxProKontextMaxResponse = zSchemaQueueStatus + +export const zGetFalAiFluxProKontextMaxRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxProKontextMaxRequestsByRequestIdResponse = + zSchemaFluxProKontextMaxOutput + +export const zGetFalAiFluxKontextDevRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxKontextDevRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxKontextDevRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxKontextDevRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxKontextDevData = z.object({ + body: zSchemaFluxKontextDevInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxKontextDevResponse = zSchemaQueueStatus + +export const zGetFalAiFluxKontextDevRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxKontextDevRequestsByRequestIdResponse = + zSchemaFluxKontextDevOutput + +export const zGetFalAiBagelEditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiBagelEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBagelEditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiBagelEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBagelEditData = z.object({ + body: zSchemaBagelEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBagelEditResponse = zSchemaQueueStatus + +export const zGetFalAiBagelEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiBagelEditRequestsByRequestIdResponse = + zSchemaBagelEditOutput + +export const zGetSmoretalkAiRembgEnhanceRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetSmoretalkAiRembgEnhanceRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutSmoretalkAiRembgEnhanceRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutSmoretalkAiRembgEnhanceRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostSmoretalkAiRembgEnhanceData = z.object({ + body: zSchemaRembgEnhanceInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostSmoretalkAiRembgEnhanceResponse = zSchemaQueueStatus + +export const zGetSmoretalkAiRembgEnhanceRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetSmoretalkAiRembgEnhanceRequestsByRequestIdResponse = + zSchemaRembgEnhanceOutput + +export const zGetFalAiRecraftUpscaleCreativeRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiRecraftUpscaleCreativeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiRecraftUpscaleCreativeRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiRecraftUpscaleCreativeRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiRecraftUpscaleCreativeData = z.object({ + body: zSchemaRecraftUpscaleCreativeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiRecraftUpscaleCreativeResponse = zSchemaQueueStatus + +export const zGetFalAiRecraftUpscaleCreativeRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiRecraftUpscaleCreativeRequestsByRequestIdResponse = + zSchemaRecraftUpscaleCreativeOutput + +export const zGetFalAiRecraftUpscaleCrispRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiRecraftUpscaleCrispRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiRecraftUpscaleCrispRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiRecraftUpscaleCrispRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiRecraftUpscaleCrispData = z.object({ + body: zSchemaRecraftUpscaleCrispInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiRecraftUpscaleCrispResponse = zSchemaQueueStatus + +export const zGetFalAiRecraftUpscaleCrispRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiRecraftUpscaleCrispRequestsByRequestIdResponse = + zSchemaRecraftUpscaleCrispOutput + +export const zGetFalAiRecraftV3ImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiRecraftV3ImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiRecraftV3ImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiRecraftV3ImageToImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiRecraftV3ImageToImageData = z.object({ + body: zSchemaRecraftV3ImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiRecraftV3ImageToImageResponse = zSchemaQueueStatus + +export const zGetFalAiRecraftV3ImageToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiRecraftV3ImageToImageRequestsByRequestIdResponse = + zSchemaRecraftV3ImageToImageOutput + +export const zGetFalAiMinimaxImage01SubjectReferenceRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMinimaxImage01SubjectReferenceRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxImage01SubjectReferenceRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxImage01SubjectReferenceRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxImage01SubjectReferenceData = z.object({ + body: zSchemaMinimaxImage01SubjectReferenceInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxImage01SubjectReferenceResponse = + zSchemaQueueStatus + +export const zGetFalAiMinimaxImage01SubjectReferenceRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxImage01SubjectReferenceRequestsByRequestIdResponse = + zSchemaMinimaxImage01SubjectReferenceOutput + +export const zGetFalAiHidreamI1FullImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiHidreamI1FullImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHidreamI1FullImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiHidreamI1FullImageToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHidreamI1FullImageToImageData = z.object({ + body: zSchemaHidreamI1FullImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHidreamI1FullImageToImageResponse = zSchemaQueueStatus + +export const zGetFalAiHidreamI1FullImageToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiHidreamI1FullImageToImageRequestsByRequestIdResponse = + zSchemaHidreamI1FullImageToImageOutput + +export const zGetFalAiIdeogramV3ReframeRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiIdeogramV3ReframeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiIdeogramV3ReframeRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiIdeogramV3ReframeRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiIdeogramV3ReframeData = z.object({ + body: zSchemaIdeogramV3ReframeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiIdeogramV3ReframeResponse = zSchemaQueueStatus + +export const zGetFalAiIdeogramV3ReframeRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiIdeogramV3ReframeRequestsByRequestIdResponse = + zSchemaIdeogramV3ReframeOutput + +export const zGetFalAiIdeogramV3ReplaceBackgroundRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiIdeogramV3ReplaceBackgroundRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiIdeogramV3ReplaceBackgroundRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiIdeogramV3ReplaceBackgroundRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiIdeogramV3ReplaceBackgroundData = z.object({ + body: zSchemaIdeogramV3ReplaceBackgroundInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiIdeogramV3ReplaceBackgroundResponse = zSchemaQueueStatus + +export const zGetFalAiIdeogramV3ReplaceBackgroundRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiIdeogramV3ReplaceBackgroundRequestsByRequestIdResponse = + zSchemaIdeogramV3ReplaceBackgroundOutput + +export const zGetFalAiIdeogramV3RemixRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiIdeogramV3RemixRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiIdeogramV3RemixRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiIdeogramV3RemixRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiIdeogramV3RemixData = z.object({ + body: zSchemaIdeogramV3RemixInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiIdeogramV3RemixResponse = zSchemaQueueStatus + +export const zGetFalAiIdeogramV3RemixRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiIdeogramV3RemixRequestsByRequestIdResponse = + zSchemaIdeogramV3RemixOutput + +export const zGetFalAiIdeogramV3EditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiIdeogramV3EditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiIdeogramV3EditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiIdeogramV3EditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiIdeogramV3EditData = z.object({ + body: zSchemaIdeogramV3EditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiIdeogramV3EditResponse = zSchemaQueueStatus + +export const zGetFalAiIdeogramV3EditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiIdeogramV3EditRequestsByRequestIdResponse = + zSchemaIdeogramV3EditOutput + +export const zGetFalAiStep1xEditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiStep1xEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiStep1xEditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiStep1xEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiStep1xEditData = z.object({ + body: zSchemaStep1xEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiStep1xEditResponse = zSchemaQueueStatus + +export const zGetFalAiStep1xEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiStep1xEditRequestsByRequestIdResponse = + zSchemaStep1xEditOutput + +export const zGetFalAiImage2SvgRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiImage2SvgRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImage2SvgRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiImage2SvgRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImage2SvgData = z.object({ + body: zSchemaImage2SvgInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImage2SvgResponse = zSchemaQueueStatus + +export const zGetFalAiImage2SvgRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImage2SvgRequestsByRequestIdResponse = + zSchemaImage2SvgOutput + +export const zGetFalAiUnoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiUnoRequestsByRequestIdStatusResponse = zSchemaQueueStatus + +export const zPutFalAiUnoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiUnoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiUnoData = z.object({ + body: zSchemaUnoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiUnoResponse = zSchemaQueueStatus + +export const zGetFalAiUnoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiUnoRequestsByRequestIdResponse = zSchemaUnoOutput + +export const zGetFalAiGptImage1EditImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiGptImage1EditImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiGptImage1EditImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiGptImage1EditImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiGptImage1EditImageData = z.object({ + body: zSchemaGptImage1EditImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiGptImage1EditImageResponse = zSchemaQueueStatus + +export const zGetFalAiGptImage1EditImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiGptImage1EditImageRequestsByRequestIdResponse = + zSchemaGptImage1EditImageOutput + +export const zGetRundiffusionFalJuggernautFluxLoraInpaintingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetRundiffusionFalJuggernautFluxLoraInpaintingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutRundiffusionFalJuggernautFluxLoraInpaintingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutRundiffusionFalJuggernautFluxLoraInpaintingRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostRundiffusionFalJuggernautFluxLoraInpaintingData = z.object({ + body: zSchemaJuggernautFluxLoraInpaintingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostRundiffusionFalJuggernautFluxLoraInpaintingResponse = + zSchemaQueueStatus + +export const zGetRundiffusionFalJuggernautFluxLoraInpaintingRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetRundiffusionFalJuggernautFluxLoraInpaintingRequestsByRequestIdResponse = + zSchemaJuggernautFluxLoraInpaintingOutput + +export const zGetFalAiFashnTryonV15RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFashnTryonV15RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFashnTryonV15RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFashnTryonV15RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFashnTryonV15Data = z.object({ + body: zSchemaFashnTryonV15Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFashnTryonV15Response = zSchemaQueueStatus + +export const zGetFalAiFashnTryonV15RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFashnTryonV15RequestsByRequestIdResponse = + zSchemaFashnTryonV15Output + +export const zGetFalAiPlushifyRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiPlushifyRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPlushifyRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiPlushifyRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPlushifyData = z.object({ + body: zSchemaPlushifyInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPlushifyResponse = zSchemaQueueStatus + +export const zGetFalAiPlushifyRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPlushifyRequestsByRequestIdResponse = + zSchemaPlushifyOutput + +export const zGetFalAiInstantCharacterRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiInstantCharacterRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiInstantCharacterRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiInstantCharacterRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiInstantCharacterData = z.object({ + body: zSchemaInstantCharacterInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiInstantCharacterResponse = zSchemaQueueStatus + +export const zGetFalAiInstantCharacterRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiInstantCharacterRequestsByRequestIdResponse = + zSchemaInstantCharacterOutput + +export const zGetFalAiCartoonifyRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiCartoonifyRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiCartoonifyRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiCartoonifyRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiCartoonifyData = z.object({ + body: zSchemaCartoonifyInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiCartoonifyResponse = zSchemaQueueStatus + +export const zGetFalAiCartoonifyRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiCartoonifyRequestsByRequestIdResponse = + zSchemaCartoonifyOutput + +export const zGetFalAiFinegrainEraserMaskRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFinegrainEraserMaskRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFinegrainEraserMaskRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFinegrainEraserMaskRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFinegrainEraserMaskData = z.object({ + body: zSchemaFinegrainEraserMaskInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFinegrainEraserMaskResponse = zSchemaQueueStatus + +export const zGetFalAiFinegrainEraserMaskRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFinegrainEraserMaskRequestsByRequestIdResponse = + zSchemaFinegrainEraserMaskOutput + +export const zGetFalAiFinegrainEraserBboxRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFinegrainEraserBboxRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFinegrainEraserBboxRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFinegrainEraserBboxRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFinegrainEraserBboxData = z.object({ + body: zSchemaFinegrainEraserBboxInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFinegrainEraserBboxResponse = zSchemaQueueStatus + +export const zGetFalAiFinegrainEraserBboxRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFinegrainEraserBboxRequestsByRequestIdResponse = + zSchemaFinegrainEraserBboxOutput + +export const zGetFalAiFinegrainEraserRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFinegrainEraserRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFinegrainEraserRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFinegrainEraserRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFinegrainEraserData = z.object({ + body: zSchemaFinegrainEraserInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFinegrainEraserResponse = zSchemaQueueStatus + +export const zGetFalAiFinegrainEraserRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFinegrainEraserRequestsByRequestIdResponse = + zSchemaFinegrainEraserOutput + +export const zGetFalAiStarVectorRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiStarVectorRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiStarVectorRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiStarVectorRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiStarVectorData = z.object({ + body: zSchemaStarVectorInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiStarVectorResponse = zSchemaQueueStatus + +export const zGetFalAiStarVectorRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiStarVectorRequestsByRequestIdResponse = + zSchemaStarVectorOutput + +export const zGetFalAiGhiblifyRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiGhiblifyRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiGhiblifyRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiGhiblifyRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiGhiblifyData = z.object({ + body: zSchemaGhiblifyInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiGhiblifyResponse = zSchemaQueueStatus + +export const zGetFalAiGhiblifyRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiGhiblifyRequestsByRequestIdResponse = + zSchemaGhiblifyOutput + +export const zGetFalAiTheraRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiTheraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiTheraRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiTheraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiTheraData = z.object({ + body: zSchemaTheraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiTheraResponse = zSchemaQueueStatus + +export const zGetFalAiTheraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiTheraRequestsByRequestIdResponse = zSchemaTheraOutput + +export const zGetFalAiMixDehazeNetRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiMixDehazeNetRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMixDehazeNetRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiMixDehazeNetRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMixDehazeNetData = z.object({ + body: zSchemaMixDehazeNetInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMixDehazeNetResponse = zSchemaQueueStatus + +export const zGetFalAiMixDehazeNetRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMixDehazeNetRequestsByRequestIdResponse = + zSchemaMixDehazeNetOutput + +export const zGetFalAiGeminiFlashEditMultiRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiGeminiFlashEditMultiRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiGeminiFlashEditMultiRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiGeminiFlashEditMultiRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiGeminiFlashEditMultiData = z.object({ + body: zSchemaGeminiFlashEditMultiInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiGeminiFlashEditMultiResponse = zSchemaQueueStatus + +export const zGetFalAiGeminiFlashEditMultiRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiGeminiFlashEditMultiRequestsByRequestIdResponse = + zSchemaGeminiFlashEditMultiOutput + +export const zGetFalAiGeminiFlashEditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiGeminiFlashEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiGeminiFlashEditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiGeminiFlashEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiGeminiFlashEditData = z.object({ + body: zSchemaGeminiFlashEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiGeminiFlashEditResponse = zSchemaQueueStatus + +export const zGetFalAiGeminiFlashEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiGeminiFlashEditRequestsByRequestIdResponse = + zSchemaGeminiFlashEditOutput + +export const zGetFalAiInvisibleWatermarkRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiInvisibleWatermarkRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiInvisibleWatermarkRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiInvisibleWatermarkRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiInvisibleWatermarkData = z.object({ + body: zSchemaInvisibleWatermarkInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiInvisibleWatermarkResponse = zSchemaQueueStatus + +export const zGetFalAiInvisibleWatermarkRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiInvisibleWatermarkRequestsByRequestIdResponse = + zSchemaInvisibleWatermarkOutput + +export const zGetRundiffusionFalJuggernautFluxProImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetRundiffusionFalJuggernautFluxProImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutRundiffusionFalJuggernautFluxProImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutRundiffusionFalJuggernautFluxProImageToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostRundiffusionFalJuggernautFluxProImageToImageData = z.object({ + body: zSchemaJuggernautFluxProImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostRundiffusionFalJuggernautFluxProImageToImageResponse = + zSchemaQueueStatus + +export const zGetRundiffusionFalJuggernautFluxProImageToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetRundiffusionFalJuggernautFluxProImageToImageRequestsByRequestIdResponse = + zSchemaJuggernautFluxProImageToImageOutput + +export const zGetRundiffusionFalJuggernautFluxBaseImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetRundiffusionFalJuggernautFluxBaseImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutRundiffusionFalJuggernautFluxBaseImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutRundiffusionFalJuggernautFluxBaseImageToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostRundiffusionFalJuggernautFluxBaseImageToImageData = z.object({ + body: zSchemaJuggernautFluxBaseImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostRundiffusionFalJuggernautFluxBaseImageToImageResponse = + zSchemaQueueStatus + +export const zGetRundiffusionFalJuggernautFluxBaseImageToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetRundiffusionFalJuggernautFluxBaseImageToImageRequestsByRequestIdResponse = + zSchemaJuggernautFluxBaseImageToImageOutput + +export const zGetFalAiDocresDewarpRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiDocresDewarpRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiDocresDewarpRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiDocresDewarpRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiDocresDewarpData = z.object({ + body: zSchemaDocresDewarpInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiDocresDewarpResponse = zSchemaQueueStatus + +export const zGetFalAiDocresDewarpRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiDocresDewarpRequestsByRequestIdResponse = + zSchemaDocresDewarpOutput + +export const zGetFalAiDocresRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiDocresRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiDocresRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiDocresRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiDocresData = z.object({ + body: zSchemaDocresInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiDocresResponse = zSchemaQueueStatus + +export const zGetFalAiDocresRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiDocresRequestsByRequestIdResponse = zSchemaDocresOutput + +export const zGetFalAiSwin2SrRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSwin2SrRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSwin2SrRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSwin2SrRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSwin2SrData = z.object({ + body: zSchemaSwin2SrInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSwin2SrResponse = zSchemaQueueStatus + +export const zGetFalAiSwin2SrRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSwin2SrRequestsByRequestIdResponse = zSchemaSwin2SrOutput + +export const zGetFalAiIdeogramV2aRemixRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiIdeogramV2aRemixRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiIdeogramV2aRemixRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiIdeogramV2aRemixRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiIdeogramV2aRemixData = z.object({ + body: zSchemaIdeogramV2aRemixInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiIdeogramV2aRemixResponse = zSchemaQueueStatus + +export const zGetFalAiIdeogramV2aRemixRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiIdeogramV2aRemixRequestsByRequestIdResponse = + zSchemaIdeogramV2aRemixOutput + +export const zGetFalAiIdeogramV2aTurboRemixRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiIdeogramV2aTurboRemixRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiIdeogramV2aTurboRemixRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiIdeogramV2aTurboRemixRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiIdeogramV2aTurboRemixData = z.object({ + body: zSchemaIdeogramV2aTurboRemixInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiIdeogramV2aTurboRemixResponse = zSchemaQueueStatus + +export const zGetFalAiIdeogramV2aTurboRemixRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiIdeogramV2aTurboRemixRequestsByRequestIdResponse = + zSchemaIdeogramV2aTurboRemixOutput + +export const zGetFalAiEvfSamRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiEvfSamRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiEvfSamRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiEvfSamRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiEvfSamData = z.object({ + body: zSchemaEvfSamInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiEvfSamResponse = zSchemaQueueStatus + +export const zGetFalAiEvfSamRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiEvfSamRequestsByRequestIdResponse = zSchemaEvfSamOutput + +export const zGetFalAiDdcolorRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiDdcolorRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiDdcolorRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiDdcolorRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiDdcolorData = z.object({ + body: zSchemaDdcolorInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiDdcolorResponse = zSchemaQueueStatus + +export const zGetFalAiDdcolorRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiDdcolorRequestsByRequestIdResponse = zSchemaDdcolorOutput + +export const zGetFalAiSam2AutoSegmentRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSam2AutoSegmentRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSam2AutoSegmentRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSam2AutoSegmentRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSam2AutoSegmentData = z.object({ + body: zSchemaSam2AutoSegmentInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSam2AutoSegmentResponse = zSchemaQueueStatus + +export const zGetFalAiSam2AutoSegmentRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSam2AutoSegmentRequestsByRequestIdResponse = + zSchemaSam2AutoSegmentOutput + +export const zGetFalAiDrctSuperResolutionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiDrctSuperResolutionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiDrctSuperResolutionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiDrctSuperResolutionRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiDrctSuperResolutionData = z.object({ + body: zSchemaDrctSuperResolutionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiDrctSuperResolutionResponse = zSchemaQueueStatus + +export const zGetFalAiDrctSuperResolutionRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiDrctSuperResolutionRequestsByRequestIdResponse = + zSchemaDrctSuperResolutionOutput + +export const zGetFalAiNafnetDeblurRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiNafnetDeblurRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiNafnetDeblurRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiNafnetDeblurRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiNafnetDeblurData = z.object({ + body: zSchemaNafnetDeblurInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiNafnetDeblurResponse = zSchemaQueueStatus + +export const zGetFalAiNafnetDeblurRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiNafnetDeblurRequestsByRequestIdResponse = + zSchemaNafnetDeblurOutput + +export const zGetFalAiNafnetDenoiseRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiNafnetDenoiseRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiNafnetDenoiseRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiNafnetDenoiseRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiNafnetDenoiseData = z.object({ + body: zSchemaNafnetDenoiseInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiNafnetDenoiseResponse = zSchemaQueueStatus + +export const zGetFalAiNafnetDenoiseRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiNafnetDenoiseRequestsByRequestIdResponse = + zSchemaNafnetDenoiseOutput + +export const zGetFalAiPostProcessingRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiPostProcessingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPostProcessingRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiPostProcessingRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPostProcessingData = z.object({ + body: zSchemaPostProcessingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPostProcessingResponse = zSchemaQueueStatus + +export const zGetFalAiPostProcessingRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPostProcessingRequestsByRequestIdResponse = + zSchemaPostProcessingOutput + +export const zGetFalAiFloweditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFloweditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFloweditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFloweditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFloweditData = z.object({ + body: zSchemaFloweditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFloweditResponse = zSchemaQueueStatus + +export const zGetFalAiFloweditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFloweditRequestsByRequestIdResponse = + zSchemaFloweditOutput + +export const zGetFalAiFluxControlLoraDepthImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxControlLoraDepthImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxControlLoraDepthImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxControlLoraDepthImageToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxControlLoraDepthImageToImageData = z.object({ + body: zSchemaFluxControlLoraDepthImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxControlLoraDepthImageToImageResponse = + zSchemaQueueStatus + +export const zGetFalAiFluxControlLoraDepthImageToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFluxControlLoraDepthImageToImageRequestsByRequestIdResponse = + zSchemaFluxControlLoraDepthImageToImageOutput + +export const zGetFalAiBenV2ImageRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiBenV2ImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBenV2ImageRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiBenV2ImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBenV2ImageData = z.object({ + body: zSchemaBenV2ImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBenV2ImageResponse = zSchemaQueueStatus + +export const zGetFalAiBenV2ImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiBenV2ImageRequestsByRequestIdResponse = + zSchemaBenV2ImageOutput + +export const zGetFalAiFluxControlLoraCannyImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxControlLoraCannyImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxControlLoraCannyImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxControlLoraCannyImageToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxControlLoraCannyImageToImageData = z.object({ + body: zSchemaFluxControlLoraCannyImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxControlLoraCannyImageToImageResponse = + zSchemaQueueStatus + +export const zGetFalAiFluxControlLoraCannyImageToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFluxControlLoraCannyImageToImageRequestsByRequestIdResponse = + zSchemaFluxControlLoraCannyImageToImageOutput + +export const zGetFalAiIdeogramUpscaleRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiIdeogramUpscaleRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiIdeogramUpscaleRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiIdeogramUpscaleRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiIdeogramUpscaleData = z.object({ + body: zSchemaIdeogramUpscaleInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiIdeogramUpscaleResponse = zSchemaQueueStatus + +export const zGetFalAiIdeogramUpscaleRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiIdeogramUpscaleRequestsByRequestIdResponse = + zSchemaIdeogramUpscaleOutput + +export const zGetFalAiCodeformerRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiCodeformerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiCodeformerRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiCodeformerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiCodeformerData = z.object({ + body: zSchemaCodeformerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiCodeformerResponse = zSchemaQueueStatus + +export const zGetFalAiCodeformerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiCodeformerRequestsByRequestIdResponse = + zSchemaCodeformerOutput + +export const zGetFalAiKlingV15KolorsVirtualTryOnRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingV15KolorsVirtualTryOnRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingV15KolorsVirtualTryOnRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingV15KolorsVirtualTryOnRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingV15KolorsVirtualTryOnData = z.object({ + body: zSchemaKlingV15KolorsVirtualTryOnInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingV15KolorsVirtualTryOnResponse = zSchemaQueueStatus + +export const zGetFalAiKlingV15KolorsVirtualTryOnRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingV15KolorsVirtualTryOnRequestsByRequestIdResponse = + zSchemaKlingV15KolorsVirtualTryOnOutput + +export const zGetFalAiFluxLoraCannyRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxLoraCannyRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxLoraCannyRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxLoraCannyRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxLoraCannyData = z.object({ + body: zSchemaFluxLoraCannyInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxLoraCannyResponse = zSchemaQueueStatus + +export const zGetFalAiFluxLoraCannyRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxLoraCannyRequestsByRequestIdResponse = + zSchemaFluxLoraCannyOutput + +export const zGetFalAiFluxProV1FillFinetunedRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxProV1FillFinetunedRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxProV1FillFinetunedRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxProV1FillFinetunedRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxProV1FillFinetunedData = z.object({ + body: zSchemaFluxProV1FillFinetunedInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxProV1FillFinetunedResponse = zSchemaQueueStatus + +export const zGetFalAiFluxProV1FillFinetunedRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxProV1FillFinetunedRequestsByRequestIdResponse = + zSchemaFluxProV1FillFinetunedOutput + +export const zGetFalAiMoondreamNextDetectionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMoondreamNextDetectionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMoondreamNextDetectionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMoondreamNextDetectionRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMoondreamNextDetectionData = z.object({ + body: zSchemaMoondreamNextDetectionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMoondreamNextDetectionResponse = zSchemaQueueStatus + +export const zGetFalAiMoondreamNextDetectionRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMoondreamNextDetectionRequestsByRequestIdResponse = + zSchemaMoondreamNextDetectionOutput + +export const zGetFalAiBriaExpandRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiBriaExpandRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBriaExpandRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiBriaExpandRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBriaExpandData = z.object({ + body: zSchemaBriaExpandInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBriaExpandResponse = zSchemaQueueStatus + +export const zGetFalAiBriaExpandRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiBriaExpandRequestsByRequestIdResponse = + zSchemaBriaExpandOutput + +export const zGetFalAiBriaGenfillRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiBriaGenfillRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBriaGenfillRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiBriaGenfillRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBriaGenfillData = z.object({ + body: zSchemaBriaGenfillInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBriaGenfillResponse = zSchemaQueueStatus + +export const zGetFalAiBriaGenfillRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiBriaGenfillRequestsByRequestIdResponse = + zSchemaBriaGenfillOutput + +export const zGetFalAiFluxLoraFillRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxLoraFillRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxLoraFillRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxLoraFillRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxLoraFillData = z.object({ + body: zSchemaFluxLoraFillInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxLoraFillResponse = zSchemaQueueStatus + +export const zGetFalAiFluxLoraFillRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxLoraFillRequestsByRequestIdResponse = + zSchemaFluxLoraFillOutput + +export const zGetFalAiBriaBackgroundReplaceRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBriaBackgroundReplaceRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBriaBackgroundReplaceRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBriaBackgroundReplaceRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBriaBackgroundReplaceData = z.object({ + body: zSchemaBriaBackgroundReplaceInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBriaBackgroundReplaceResponse = zSchemaQueueStatus + +export const zGetFalAiBriaBackgroundReplaceRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiBriaBackgroundReplaceRequestsByRequestIdResponse = + zSchemaBriaBackgroundReplaceOutput + +export const zGetFalAiBriaEraserRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiBriaEraserRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBriaEraserRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiBriaEraserRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBriaEraserData = z.object({ + body: zSchemaBriaEraserInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBriaEraserResponse = zSchemaQueueStatus + +export const zGetFalAiBriaEraserRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiBriaEraserRequestsByRequestIdResponse = + zSchemaBriaEraserOutput + +export const zGetFalAiBriaProductShotRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiBriaProductShotRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBriaProductShotRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiBriaProductShotRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBriaProductShotData = z.object({ + body: zSchemaBriaProductShotInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBriaProductShotResponse = zSchemaQueueStatus + +export const zGetFalAiBriaProductShotRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiBriaProductShotRequestsByRequestIdResponse = + zSchemaBriaProductShotOutput + +export const zGetFalAiBriaBackgroundRemoveRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBriaBackgroundRemoveRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBriaBackgroundRemoveRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBriaBackgroundRemoveRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBriaBackgroundRemoveData = z.object({ + body: zSchemaBriaBackgroundRemoveInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBriaBackgroundRemoveResponse = zSchemaQueueStatus + +export const zGetFalAiBriaBackgroundRemoveRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiBriaBackgroundRemoveRequestsByRequestIdResponse = + zSchemaBriaBackgroundRemoveOutput + +export const zGetFalAiCatVtonRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiCatVtonRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiCatVtonRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiCatVtonRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiCatVtonData = z.object({ + body: zSchemaCatVtonInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiCatVtonResponse = zSchemaQueueStatus + +export const zGetFalAiCatVtonRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiCatVtonRequestsByRequestIdResponse = zSchemaCatVtonOutput + +export const zGetFalAiLeffaPoseTransferRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiLeffaPoseTransferRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLeffaPoseTransferRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiLeffaPoseTransferRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLeffaPoseTransferData = z.object({ + body: zSchemaLeffaPoseTransferInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLeffaPoseTransferResponse = zSchemaQueueStatus + +export const zGetFalAiLeffaPoseTransferRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLeffaPoseTransferRequestsByRequestIdResponse = + zSchemaLeffaPoseTransferOutput + +export const zGetFalAiLeffaVirtualTryonRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiLeffaVirtualTryonRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLeffaVirtualTryonRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiLeffaVirtualTryonRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLeffaVirtualTryonData = z.object({ + body: zSchemaLeffaVirtualTryonInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLeffaVirtualTryonResponse = zSchemaQueueStatus + +export const zGetFalAiLeffaVirtualTryonRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLeffaVirtualTryonRequestsByRequestIdResponse = + zSchemaLeffaVirtualTryonOutput + +export const zGetFalAiIdeogramV2EditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiIdeogramV2EditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiIdeogramV2EditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiIdeogramV2EditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiIdeogramV2EditData = z.object({ + body: zSchemaIdeogramV2EditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiIdeogramV2EditResponse = zSchemaQueueStatus + +export const zGetFalAiIdeogramV2EditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiIdeogramV2EditRequestsByRequestIdResponse = + zSchemaIdeogramV2EditOutput + +export const zGetFalAiIdeogramV2TurboEditRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiIdeogramV2TurboEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiIdeogramV2TurboEditRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiIdeogramV2TurboEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiIdeogramV2TurboEditData = z.object({ + body: zSchemaIdeogramV2TurboEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiIdeogramV2TurboEditResponse = zSchemaQueueStatus + +export const zGetFalAiIdeogramV2TurboEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiIdeogramV2TurboEditRequestsByRequestIdResponse = + zSchemaIdeogramV2TurboEditOutput + +export const zGetFalAiIdeogramV2TurboRemixRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiIdeogramV2TurboRemixRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiIdeogramV2TurboRemixRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiIdeogramV2TurboRemixRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiIdeogramV2TurboRemixData = z.object({ + body: zSchemaIdeogramV2TurboRemixInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiIdeogramV2TurboRemixResponse = zSchemaQueueStatus + +export const zGetFalAiIdeogramV2TurboRemixRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiIdeogramV2TurboRemixRequestsByRequestIdResponse = + zSchemaIdeogramV2TurboRemixOutput + +export const zGetFalAiIdeogramV2RemixRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiIdeogramV2RemixRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiIdeogramV2RemixRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiIdeogramV2RemixRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiIdeogramV2RemixData = z.object({ + body: zSchemaIdeogramV2RemixInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiIdeogramV2RemixResponse = zSchemaQueueStatus + +export const zGetFalAiIdeogramV2RemixRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiIdeogramV2RemixRequestsByRequestIdResponse = + zSchemaIdeogramV2RemixOutput + +export const zGetFalAiFluxSchnellReduxRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxSchnellReduxRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxSchnellReduxRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxSchnellReduxRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxSchnellReduxData = z.object({ + body: zSchemaFluxSchnellReduxInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxSchnellReduxResponse = zSchemaQueueStatus + +export const zGetFalAiFluxSchnellReduxRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxSchnellReduxRequestsByRequestIdResponse = + zSchemaFluxSchnellReduxOutput + +export const zGetFalAiFluxProV11ReduxRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxProV11ReduxRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxProV11ReduxRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxProV11ReduxRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxProV11ReduxData = z.object({ + body: zSchemaFluxProV11ReduxInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxProV11ReduxResponse = zSchemaQueueStatus + +export const zGetFalAiFluxProV11ReduxRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxProV11ReduxRequestsByRequestIdResponse = + zSchemaFluxProV11ReduxOutput + +export const zGetFalAiFluxDevReduxRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxDevReduxRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxDevReduxRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxDevReduxRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxDevReduxData = z.object({ + body: zSchemaFluxDevReduxInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxDevReduxResponse = zSchemaQueueStatus + +export const zGetFalAiFluxDevReduxRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxDevReduxRequestsByRequestIdResponse = + zSchemaFluxDevReduxOutput + +export const zGetFalAiFluxProV11UltraReduxRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxProV11UltraReduxRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxProV11UltraReduxRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxProV11UltraReduxRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxProV11UltraReduxData = z.object({ + body: zSchemaFluxProV11UltraReduxInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxProV11UltraReduxResponse = zSchemaQueueStatus + +export const zGetFalAiFluxProV11UltraReduxRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxProV11UltraReduxRequestsByRequestIdResponse = + zSchemaFluxProV11UltraReduxOutput + +export const zGetFalAiFluxLoraDepthRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxLoraDepthRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxLoraDepthRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxLoraDepthRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxLoraDepthData = z.object({ + body: zSchemaFluxLoraDepthInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxLoraDepthResponse = zSchemaQueueStatus + +export const zGetFalAiFluxLoraDepthRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxLoraDepthRequestsByRequestIdResponse = + zSchemaFluxLoraDepthOutput + +export const zGetFalAiFluxProV1FillRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxProV1FillRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxProV1FillRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxProV1FillRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxProV1FillData = z.object({ + body: zSchemaFluxProV1FillInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxProV1FillResponse = zSchemaQueueStatus + +export const zGetFalAiFluxProV1FillRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxProV1FillRequestsByRequestIdResponse = + zSchemaFluxProV1FillOutput + +export const zGetFalAiKolorsImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKolorsImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKolorsImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKolorsImageToImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKolorsImageToImageData = z.object({ + body: zSchemaKolorsImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKolorsImageToImageResponse = zSchemaQueueStatus + +export const zGetFalAiKolorsImageToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiKolorsImageToImageRequestsByRequestIdResponse = + zSchemaKolorsImageToImageOutput + +export const zGetFalAiIclightV2RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiIclightV2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiIclightV2RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiIclightV2RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiIclightV2Data = z.object({ + body: zSchemaIclightV2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiIclightV2Response = zSchemaQueueStatus + +export const zGetFalAiIclightV2RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiIclightV2RequestsByRequestIdResponse = + zSchemaIclightV2Output + +export const zGetFalAiFluxDifferentialDiffusionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxDifferentialDiffusionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxDifferentialDiffusionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxDifferentialDiffusionRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxDifferentialDiffusionData = z.object({ + body: zSchemaFluxDifferentialDiffusionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxDifferentialDiffusionResponse = zSchemaQueueStatus + +export const zGetFalAiFluxDifferentialDiffusionRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFluxDifferentialDiffusionRequestsByRequestIdResponse = + zSchemaFluxDifferentialDiffusionOutput + +export const zGetFalAiFluxPulidRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxPulidRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxPulidRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxPulidRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxPulidData = z.object({ + body: zSchemaFluxPulidInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxPulidResponse = zSchemaQueueStatus + +export const zGetFalAiFluxPulidRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxPulidRequestsByRequestIdResponse = + zSchemaFluxPulidOutput + +export const zGetFalAiBirefnetV2RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiBirefnetV2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBirefnetV2RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiBirefnetV2RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBirefnetV2Data = z.object({ + body: zSchemaBirefnetV2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBirefnetV2Response = zSchemaQueueStatus + +export const zGetFalAiBirefnetV2RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiBirefnetV2RequestsByRequestIdResponse = + zSchemaBirefnetV2Output + +export const zGetFalAiLivePortraitImageRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiLivePortraitImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLivePortraitImageRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiLivePortraitImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLivePortraitImageData = z.object({ + body: zSchemaLivePortraitImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLivePortraitImageResponse = zSchemaQueueStatus + +export const zGetFalAiLivePortraitImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLivePortraitImageRequestsByRequestIdResponse = + zSchemaLivePortraitImageOutput + +export const zGetFalAiFluxGeneralRfInversionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxGeneralRfInversionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxGeneralRfInversionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxGeneralRfInversionRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxGeneralRfInversionData = z.object({ + body: zSchemaFluxGeneralRfInversionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxGeneralRfInversionResponse = zSchemaQueueStatus + +export const zGetFalAiFluxGeneralRfInversionRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxGeneralRfInversionRequestsByRequestIdResponse = + zSchemaFluxGeneralRfInversionOutput + +export const zGetFalAiImagePreprocessorsHedRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImagePreprocessorsHedRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImagePreprocessorsHedRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImagePreprocessorsHedRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImagePreprocessorsHedData = z.object({ + body: zSchemaImagePreprocessorsHedInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImagePreprocessorsHedResponse = zSchemaQueueStatus + +export const zGetFalAiImagePreprocessorsHedRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImagePreprocessorsHedRequestsByRequestIdResponse = + zSchemaImagePreprocessorsHedOutput + +export const zGetFalAiImagePreprocessorsDepthAnythingV2RequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImagePreprocessorsDepthAnythingV2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImagePreprocessorsDepthAnythingV2RequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImagePreprocessorsDepthAnythingV2RequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImagePreprocessorsDepthAnythingV2Data = z.object({ + body: zSchemaImagePreprocessorsDepthAnythingV2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImagePreprocessorsDepthAnythingV2Response = + zSchemaQueueStatus + +export const zGetFalAiImagePreprocessorsDepthAnythingV2RequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImagePreprocessorsDepthAnythingV2RequestsByRequestIdResponse = + zSchemaImagePreprocessorsDepthAnythingV2Output + +export const zGetFalAiImagePreprocessorsScribbleRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImagePreprocessorsScribbleRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImagePreprocessorsScribbleRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImagePreprocessorsScribbleRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImagePreprocessorsScribbleData = z.object({ + body: zSchemaImagePreprocessorsScribbleInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImagePreprocessorsScribbleResponse = zSchemaQueueStatus + +export const zGetFalAiImagePreprocessorsScribbleRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImagePreprocessorsScribbleRequestsByRequestIdResponse = + zSchemaImagePreprocessorsScribbleOutput + +export const zGetFalAiImagePreprocessorsMlsdRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImagePreprocessorsMlsdRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImagePreprocessorsMlsdRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImagePreprocessorsMlsdRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImagePreprocessorsMlsdData = z.object({ + body: zSchemaImagePreprocessorsMlsdInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImagePreprocessorsMlsdResponse = zSchemaQueueStatus + +export const zGetFalAiImagePreprocessorsMlsdRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImagePreprocessorsMlsdRequestsByRequestIdResponse = + zSchemaImagePreprocessorsMlsdOutput + +export const zGetFalAiImagePreprocessorsSamRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImagePreprocessorsSamRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImagePreprocessorsSamRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImagePreprocessorsSamRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImagePreprocessorsSamData = z.object({ + body: zSchemaImagePreprocessorsSamInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImagePreprocessorsSamResponse = zSchemaQueueStatus + +export const zGetFalAiImagePreprocessorsSamRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImagePreprocessorsSamRequestsByRequestIdResponse = + zSchemaImagePreprocessorsSamOutput + +export const zGetFalAiImagePreprocessorsMidasRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImagePreprocessorsMidasRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImagePreprocessorsMidasRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImagePreprocessorsMidasRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImagePreprocessorsMidasData = z.object({ + body: zSchemaImagePreprocessorsMidasInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImagePreprocessorsMidasResponse = zSchemaQueueStatus + +export const zGetFalAiImagePreprocessorsMidasRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiImagePreprocessorsMidasRequestsByRequestIdResponse = + zSchemaImagePreprocessorsMidasOutput + +export const zGetFalAiImagePreprocessorsTeedRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImagePreprocessorsTeedRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImagePreprocessorsTeedRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImagePreprocessorsTeedRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImagePreprocessorsTeedData = z.object({ + body: zSchemaImagePreprocessorsTeedInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImagePreprocessorsTeedResponse = zSchemaQueueStatus + +export const zGetFalAiImagePreprocessorsTeedRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImagePreprocessorsTeedRequestsByRequestIdResponse = + zSchemaImagePreprocessorsTeedOutput + +export const zGetFalAiImagePreprocessorsLineartRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImagePreprocessorsLineartRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImagePreprocessorsLineartRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImagePreprocessorsLineartRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImagePreprocessorsLineartData = z.object({ + body: zSchemaImagePreprocessorsLineartInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImagePreprocessorsLineartResponse = zSchemaQueueStatus + +export const zGetFalAiImagePreprocessorsLineartRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiImagePreprocessorsLineartRequestsByRequestIdResponse = + zSchemaImagePreprocessorsLineartOutput + +export const zGetFalAiImagePreprocessorsZoeRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImagePreprocessorsZoeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImagePreprocessorsZoeRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImagePreprocessorsZoeRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImagePreprocessorsZoeData = z.object({ + body: zSchemaImagePreprocessorsZoeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImagePreprocessorsZoeResponse = zSchemaQueueStatus + +export const zGetFalAiImagePreprocessorsZoeRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImagePreprocessorsZoeRequestsByRequestIdResponse = + zSchemaImagePreprocessorsZoeOutput + +export const zGetFalAiImagePreprocessorsPidiRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImagePreprocessorsPidiRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImagePreprocessorsPidiRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImagePreprocessorsPidiRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImagePreprocessorsPidiData = z.object({ + body: zSchemaImagePreprocessorsPidiInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImagePreprocessorsPidiResponse = zSchemaQueueStatus + +export const zGetFalAiImagePreprocessorsPidiRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImagePreprocessorsPidiRequestsByRequestIdResponse = + zSchemaImagePreprocessorsPidiOutput + +export const zGetFalAiSam2ImageRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSam2ImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSam2ImageRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSam2ImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSam2ImageData = z.object({ + body: zSchemaSam2ImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSam2ImageResponse = zSchemaQueueStatus + +export const zGetFalAiSam2ImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSam2ImageRequestsByRequestIdResponse = + zSchemaSam2ImageOutput + +export const zGetFalAiFluxGeneralImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxGeneralImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxGeneralImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxGeneralImageToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxGeneralImageToImageData = z.object({ + body: zSchemaFluxGeneralImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxGeneralImageToImageResponse = zSchemaQueueStatus + +export const zGetFalAiFluxGeneralImageToImageRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiFluxGeneralImageToImageRequestsByRequestIdResponse = + zSchemaFluxGeneralImageToImageOutput + +export const zGetFalAiFluxGeneralInpaintingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxGeneralInpaintingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxGeneralInpaintingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxGeneralInpaintingRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxGeneralInpaintingData = z.object({ + body: zSchemaFluxGeneralInpaintingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxGeneralInpaintingResponse = zSchemaQueueStatus + +export const zGetFalAiFluxGeneralInpaintingRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxGeneralInpaintingRequestsByRequestIdResponse = + zSchemaFluxGeneralInpaintingOutput + +export const zGetFalAiFluxGeneralDifferentialDiffusionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxGeneralDifferentialDiffusionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxGeneralDifferentialDiffusionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxGeneralDifferentialDiffusionRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxGeneralDifferentialDiffusionData = z.object({ + body: zSchemaFluxGeneralDifferentialDiffusionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxGeneralDifferentialDiffusionResponse = + zSchemaQueueStatus + +export const zGetFalAiFluxGeneralDifferentialDiffusionRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFluxGeneralDifferentialDiffusionRequestsByRequestIdResponse = + zSchemaFluxGeneralDifferentialDiffusionOutput + +export const zGetFalAiFluxLoraImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxLoraImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxLoraImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxLoraImageToImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxLoraImageToImageData = z.object({ + body: zSchemaFluxLoraImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxLoraImageToImageResponse = zSchemaQueueStatus + +export const zGetFalAiFluxLoraImageToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxLoraImageToImageRequestsByRequestIdResponse = + zSchemaFluxLoraImageToImageOutput + +export const zGetFalAiSdxlControlnetUnionInpaintingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiSdxlControlnetUnionInpaintingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSdxlControlnetUnionInpaintingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiSdxlControlnetUnionInpaintingRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSdxlControlnetUnionInpaintingData = z.object({ + body: zSchemaSdxlControlnetUnionInpaintingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSdxlControlnetUnionInpaintingResponse = + zSchemaQueueStatus + +export const zGetFalAiSdxlControlnetUnionInpaintingRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiSdxlControlnetUnionInpaintingRequestsByRequestIdResponse = + zSchemaSdxlControlnetUnionInpaintingOutput + +export const zGetFalAiSdxlControlnetUnionImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiSdxlControlnetUnionImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSdxlControlnetUnionImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiSdxlControlnetUnionImageToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSdxlControlnetUnionImageToImageData = z.object({ + body: zSchemaSdxlControlnetUnionImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSdxlControlnetUnionImageToImageResponse = + zSchemaQueueStatus + +export const zGetFalAiSdxlControlnetUnionImageToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiSdxlControlnetUnionImageToImageRequestsByRequestIdResponse = + zSchemaSdxlControlnetUnionImageToImageOutput + +export const zGetFalAiEra3dRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiEra3dRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiEra3dRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiEra3dRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiEra3dData = z.object({ + body: zSchemaEra3dInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiEra3dResponse = zSchemaQueueStatus + +export const zGetFalAiEra3dRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiEra3dRequestsByRequestIdResponse = zSchemaEra3dOutput + +export const zGetFalAiFlorence2LargeDenseRegionCaptionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlorence2LargeDenseRegionCaptionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlorence2LargeDenseRegionCaptionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlorence2LargeDenseRegionCaptionRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlorence2LargeDenseRegionCaptionData = z.object({ + body: zSchemaFlorence2LargeDenseRegionCaptionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlorence2LargeDenseRegionCaptionResponse = + zSchemaQueueStatus + +export const zGetFalAiFlorence2LargeDenseRegionCaptionRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlorence2LargeDenseRegionCaptionRequestsByRequestIdResponse = + zSchemaFlorence2LargeDenseRegionCaptionOutput + +export const zGetFalAiFlorence2LargeReferringExpressionSegmentationRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlorence2LargeReferringExpressionSegmentationRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlorence2LargeReferringExpressionSegmentationRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlorence2LargeReferringExpressionSegmentationRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlorence2LargeReferringExpressionSegmentationData = + z.object({ + body: zSchemaFlorence2LargeReferringExpressionSegmentationInput, + path: z.optional(z.never()), + query: z.optional(z.never()), + }) + +/** + * The request status. + */ +export const zPostFalAiFlorence2LargeReferringExpressionSegmentationResponse = + zSchemaQueueStatus + +export const zGetFalAiFlorence2LargeReferringExpressionSegmentationRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlorence2LargeReferringExpressionSegmentationRequestsByRequestIdResponse = + zSchemaFlorence2LargeReferringExpressionSegmentationOutput + +export const zGetFalAiFlorence2LargeObjectDetectionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlorence2LargeObjectDetectionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlorence2LargeObjectDetectionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlorence2LargeObjectDetectionRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlorence2LargeObjectDetectionData = z.object({ + body: zSchemaFlorence2LargeObjectDetectionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlorence2LargeObjectDetectionResponse = + zSchemaQueueStatus + +export const zGetFalAiFlorence2LargeObjectDetectionRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlorence2LargeObjectDetectionRequestsByRequestIdResponse = + zSchemaFlorence2LargeObjectDetectionOutput + +export const zGetFalAiFlorence2LargeOpenVocabularyDetectionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlorence2LargeOpenVocabularyDetectionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlorence2LargeOpenVocabularyDetectionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlorence2LargeOpenVocabularyDetectionRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlorence2LargeOpenVocabularyDetectionData = z.object({ + body: zSchemaFlorence2LargeOpenVocabularyDetectionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlorence2LargeOpenVocabularyDetectionResponse = + zSchemaQueueStatus + +export const zGetFalAiFlorence2LargeOpenVocabularyDetectionRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlorence2LargeOpenVocabularyDetectionRequestsByRequestIdResponse = + zSchemaFlorence2LargeOpenVocabularyDetectionOutput + +export const zGetFalAiFlorence2LargeCaptionToPhraseGroundingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlorence2LargeCaptionToPhraseGroundingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlorence2LargeCaptionToPhraseGroundingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlorence2LargeCaptionToPhraseGroundingRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlorence2LargeCaptionToPhraseGroundingData = z.object({ + body: zSchemaFlorence2LargeCaptionToPhraseGroundingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlorence2LargeCaptionToPhraseGroundingResponse = + zSchemaQueueStatus + +export const zGetFalAiFlorence2LargeCaptionToPhraseGroundingRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlorence2LargeCaptionToPhraseGroundingRequestsByRequestIdResponse = + zSchemaFlorence2LargeCaptionToPhraseGroundingOutput + +export const zGetFalAiFlorence2LargeRegionProposalRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlorence2LargeRegionProposalRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlorence2LargeRegionProposalRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlorence2LargeRegionProposalRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlorence2LargeRegionProposalData = z.object({ + body: zSchemaFlorence2LargeRegionProposalInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlorence2LargeRegionProposalResponse = zSchemaQueueStatus + +export const zGetFalAiFlorence2LargeRegionProposalRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlorence2LargeRegionProposalRequestsByRequestIdResponse = + zSchemaFlorence2LargeRegionProposalOutput + +export const zGetFalAiFlorence2LargeOcrWithRegionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlorence2LargeOcrWithRegionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlorence2LargeOcrWithRegionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlorence2LargeOcrWithRegionRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlorence2LargeOcrWithRegionData = z.object({ + body: zSchemaFlorence2LargeOcrWithRegionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlorence2LargeOcrWithRegionResponse = zSchemaQueueStatus + +export const zGetFalAiFlorence2LargeOcrWithRegionRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlorence2LargeOcrWithRegionRequestsByRequestIdResponse = + zSchemaFlorence2LargeOcrWithRegionOutput + +export const zGetFalAiFlorence2LargeRegionToSegmentationRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlorence2LargeRegionToSegmentationRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlorence2LargeRegionToSegmentationRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlorence2LargeRegionToSegmentationRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlorence2LargeRegionToSegmentationData = z.object({ + body: zSchemaFlorence2LargeRegionToSegmentationInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlorence2LargeRegionToSegmentationResponse = + zSchemaQueueStatus + +export const zGetFalAiFlorence2LargeRegionToSegmentationRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlorence2LargeRegionToSegmentationRequestsByRequestIdResponse = + zSchemaFlorence2LargeRegionToSegmentationOutput + +export const zGetFalAiStableDiffusionV3MediumImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiStableDiffusionV3MediumImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiStableDiffusionV3MediumImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiStableDiffusionV3MediumImageToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiStableDiffusionV3MediumImageToImageData = z.object({ + body: zSchemaStableDiffusionV3MediumImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiStableDiffusionV3MediumImageToImageResponse = + zSchemaQueueStatus + +export const zGetFalAiStableDiffusionV3MediumImageToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiStableDiffusionV3MediumImageToImageRequestsByRequestIdResponse = + zSchemaStableDiffusionV3MediumImageToImageOutput + +export const zGetFalAiDwposeRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiDwposeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiDwposeRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiDwposeRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiDwposeData = z.object({ + body: zSchemaDwposeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiDwposeResponse = zSchemaQueueStatus + +export const zGetFalAiDwposeRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiDwposeRequestsByRequestIdResponse = zSchemaDwposeOutput + +export const zGetFalAiSd15DepthControlnetRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiSd15DepthControlnetRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSd15DepthControlnetRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiSd15DepthControlnetRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSd15DepthControlnetData = z.object({ + body: zSchemaSd15DepthControlnetInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSd15DepthControlnetResponse = zSchemaQueueStatus + +export const zGetFalAiSd15DepthControlnetRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSd15DepthControlnetRequestsByRequestIdResponse = + zSchemaSd15DepthControlnetOutput + +export const zGetFalAiCcsrRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiCcsrRequestsByRequestIdStatusResponse = zSchemaQueueStatus + +export const zPutFalAiCcsrRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiCcsrRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiCcsrData = z.object({ + body: zSchemaCcsrInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiCcsrResponse = zSchemaQueueStatus + +export const zGetFalAiCcsrRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiCcsrRequestsByRequestIdResponse = zSchemaCcsrOutput + +export const zGetFalAiOmniZeroRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiOmniZeroRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiOmniZeroRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiOmniZeroRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiOmniZeroData = z.object({ + body: zSchemaOmniZeroInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiOmniZeroResponse = zSchemaQueueStatus + +export const zGetFalAiOmniZeroRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiOmniZeroRequestsByRequestIdResponse = + zSchemaOmniZeroOutput + +export const zGetFalAiIpAdapterFaceIdRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiIpAdapterFaceIdRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiIpAdapterFaceIdRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiIpAdapterFaceIdRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiIpAdapterFaceIdData = z.object({ + body: zSchemaIpAdapterFaceIdInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiIpAdapterFaceIdResponse = zSchemaQueueStatus + +export const zGetFalAiIpAdapterFaceIdRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiIpAdapterFaceIdRequestsByRequestIdResponse = + zSchemaIpAdapterFaceIdOutput + +export const zGetFalAiLoraInpaintRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLoraInpaintRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLoraInpaintRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLoraInpaintRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLoraInpaintData = z.object({ + body: zSchemaLoraInpaintInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLoraInpaintResponse = zSchemaQueueStatus + +export const zGetFalAiLoraInpaintRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLoraInpaintRequestsByRequestIdResponse = + zSchemaLoraInpaintOutput + +export const zGetFalAiLoraImageToImageRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLoraImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLoraImageToImageRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLoraImageToImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLoraImageToImageData = z.object({ + body: zSchemaLoraImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLoraImageToImageResponse = zSchemaQueueStatus + +export const zGetFalAiLoraImageToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLoraImageToImageRequestsByRequestIdResponse = + zSchemaLoraImageToImageOutput + +export const zGetFalAiFastSdxlImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFastSdxlImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFastSdxlImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFastSdxlImageToImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFastSdxlImageToImageData = z.object({ + body: zSchemaFastSdxlImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFastSdxlImageToImageResponse = zSchemaQueueStatus + +export const zGetFalAiFastSdxlImageToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFastSdxlImageToImageRequestsByRequestIdResponse = + zSchemaFastSdxlImageToImageOutput + +export const zGetFalAiFastSdxlInpaintingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFastSdxlInpaintingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFastSdxlInpaintingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFastSdxlInpaintingRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFastSdxlInpaintingData = z.object({ + body: zSchemaFastSdxlInpaintingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFastSdxlInpaintingResponse = zSchemaQueueStatus + +export const zGetFalAiFastSdxlInpaintingRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFastSdxlInpaintingRequestsByRequestIdResponse = + zSchemaFastSdxlInpaintingOutput + +export const zGetFalAiFaceToStickerRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFaceToStickerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFaceToStickerRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFaceToStickerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFaceToStickerData = z.object({ + body: zSchemaFaceToStickerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFaceToStickerResponse = zSchemaQueueStatus + +export const zGetFalAiFaceToStickerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFaceToStickerRequestsByRequestIdResponse = + zSchemaFaceToStickerOutput + +export const zGetFalAiPhotomakerRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiPhotomakerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPhotomakerRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiPhotomakerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPhotomakerData = z.object({ + body: zSchemaPhotomakerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPhotomakerResponse = zSchemaQueueStatus + +export const zGetFalAiPhotomakerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPhotomakerRequestsByRequestIdResponse = + zSchemaPhotomakerOutput + +export const zGetFalAiCreativeUpscalerRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiCreativeUpscalerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiCreativeUpscalerRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiCreativeUpscalerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiCreativeUpscalerData = z.object({ + body: zSchemaCreativeUpscalerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiCreativeUpscalerResponse = zSchemaQueueStatus + +export const zGetFalAiCreativeUpscalerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiCreativeUpscalerRequestsByRequestIdResponse = + zSchemaCreativeUpscalerOutput + +export const zGetFalAiBirefnetRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiBirefnetRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBirefnetRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiBirefnetRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBirefnetData = z.object({ + body: zSchemaBirefnetInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBirefnetResponse = zSchemaQueueStatus + +export const zGetFalAiBirefnetRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiBirefnetRequestsByRequestIdResponse = + zSchemaBirefnetOutput + +export const zGetFalAiPlaygroundV25ImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPlaygroundV25ImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPlaygroundV25ImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPlaygroundV25ImageToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPlaygroundV25ImageToImageData = z.object({ + body: zSchemaPlaygroundV25ImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPlaygroundV25ImageToImageResponse = zSchemaQueueStatus + +export const zGetFalAiPlaygroundV25ImageToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiPlaygroundV25ImageToImageRequestsByRequestIdResponse = + zSchemaPlaygroundV25ImageToImageOutput + +export const zGetFalAiFastLightningSdxlImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFastLightningSdxlImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFastLightningSdxlImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFastLightningSdxlImageToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFastLightningSdxlImageToImageData = z.object({ + body: zSchemaFastLightningSdxlImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFastLightningSdxlImageToImageResponse = + zSchemaQueueStatus + +export const zGetFalAiFastLightningSdxlImageToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFastLightningSdxlImageToImageRequestsByRequestIdResponse = + zSchemaFastLightningSdxlImageToImageOutput + +export const zGetFalAiFastLightningSdxlInpaintingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFastLightningSdxlInpaintingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFastLightningSdxlInpaintingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFastLightningSdxlInpaintingRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFastLightningSdxlInpaintingData = z.object({ + body: zSchemaFastLightningSdxlInpaintingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFastLightningSdxlInpaintingResponse = zSchemaQueueStatus + +export const zGetFalAiFastLightningSdxlInpaintingRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFastLightningSdxlInpaintingRequestsByRequestIdResponse = + zSchemaFastLightningSdxlInpaintingOutput + +export const zGetFalAiPlaygroundV25InpaintingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPlaygroundV25InpaintingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPlaygroundV25InpaintingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPlaygroundV25InpaintingRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPlaygroundV25InpaintingData = z.object({ + body: zSchemaPlaygroundV25InpaintingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPlaygroundV25InpaintingResponse = zSchemaQueueStatus + +export const zGetFalAiPlaygroundV25InpaintingRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiPlaygroundV25InpaintingRequestsByRequestIdResponse = + zSchemaPlaygroundV25InpaintingOutput + +export const zGetFalAiFastLcmDiffusionInpaintingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFastLcmDiffusionInpaintingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFastLcmDiffusionInpaintingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFastLcmDiffusionInpaintingRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFastLcmDiffusionInpaintingData = z.object({ + body: zSchemaFastLcmDiffusionInpaintingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFastLcmDiffusionInpaintingResponse = zSchemaQueueStatus + +export const zGetFalAiFastLcmDiffusionInpaintingRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFastLcmDiffusionInpaintingRequestsByRequestIdResponse = + zSchemaFastLcmDiffusionInpaintingOutput + +export const zGetFalAiFastLcmDiffusionImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFastLcmDiffusionImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFastLcmDiffusionImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFastLcmDiffusionImageToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFastLcmDiffusionImageToImageData = z.object({ + body: zSchemaFastLcmDiffusionImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFastLcmDiffusionImageToImageResponse = zSchemaQueueStatus + +export const zGetFalAiFastLcmDiffusionImageToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFastLcmDiffusionImageToImageRequestsByRequestIdResponse = + zSchemaFastLcmDiffusionImageToImageOutput + +export const zGetFalAiRetoucherRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiRetoucherRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiRetoucherRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiRetoucherRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiRetoucherData = z.object({ + body: zSchemaRetoucherInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiRetoucherResponse = zSchemaQueueStatus + +export const zGetFalAiRetoucherRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiRetoucherRequestsByRequestIdResponse = + zSchemaRetoucherOutput + +export const zGetFalAiImageutilsDepthRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiImageutilsDepthRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageutilsDepthRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageutilsDepthRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageutilsDepthData = z.object({ + body: zSchemaImageutilsDepthInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageutilsDepthResponse = zSchemaQueueStatus + +export const zGetFalAiImageutilsDepthRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImageutilsDepthRequestsByRequestIdResponse = + zSchemaImageutilsDepthOutput + +export const zGetFalAiImageutilsMarigoldDepthRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImageutilsMarigoldDepthRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageutilsMarigoldDepthRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageutilsMarigoldDepthRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageutilsMarigoldDepthData = z.object({ + body: zSchemaImageutilsMarigoldDepthInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageutilsMarigoldDepthResponse = zSchemaQueueStatus + +export const zGetFalAiImageutilsMarigoldDepthRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiImageutilsMarigoldDepthRequestsByRequestIdResponse = + zSchemaImageutilsMarigoldDepthOutput + +export const zGetFalAiPulidRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiPulidRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPulidRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiPulidRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPulidData = z.object({ + body: zSchemaPulidInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPulidResponse = zSchemaQueueStatus + +export const zGetFalAiPulidRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPulidRequestsByRequestIdResponse = zSchemaPulidOutput + +export const zGetFalAiFastSdxlControlnetCannyImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFastSdxlControlnetCannyImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFastSdxlControlnetCannyImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFastSdxlControlnetCannyImageToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFastSdxlControlnetCannyImageToImageData = z.object({ + body: zSchemaFastSdxlControlnetCannyImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFastSdxlControlnetCannyImageToImageResponse = + zSchemaQueueStatus + +export const zGetFalAiFastSdxlControlnetCannyImageToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFastSdxlControlnetCannyImageToImageRequestsByRequestIdResponse = + zSchemaFastSdxlControlnetCannyImageToImageOutput + +export const zGetFalAiFastSdxlControlnetCannyInpaintingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFastSdxlControlnetCannyInpaintingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFastSdxlControlnetCannyInpaintingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFastSdxlControlnetCannyInpaintingRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFastSdxlControlnetCannyInpaintingData = z.object({ + body: zSchemaFastSdxlControlnetCannyInpaintingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFastSdxlControlnetCannyInpaintingResponse = + zSchemaQueueStatus + +export const zGetFalAiFastSdxlControlnetCannyInpaintingRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFastSdxlControlnetCannyInpaintingRequestsByRequestIdResponse = + zSchemaFastSdxlControlnetCannyInpaintingOutput + +export const zGetFalAiLcmSd15I2iRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLcmSd15I2iRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLcmSd15I2iRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLcmSd15I2iRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLcmSd15I2iData = z.object({ + body: zSchemaLcmSd15I2iInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLcmSd15I2iResponse = zSchemaQueueStatus + +export const zGetFalAiLcmSd15I2iRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLcmSd15I2iRequestsByRequestIdResponse = + zSchemaLcmSd15I2iOutput + +export const zGetFalAiInpaintRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiInpaintRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiInpaintRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiInpaintRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiInpaintData = z.object({ + body: zSchemaInpaintInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiInpaintResponse = zSchemaQueueStatus + +export const zGetFalAiInpaintRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiInpaintRequestsByRequestIdResponse = zSchemaInpaintOutput + +export const zGetFalAiEsrganRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiEsrganRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiEsrganRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiEsrganRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiEsrganData = z.object({ + body: zSchemaEsrganInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiEsrganResponse = zSchemaQueueStatus + +export const zGetFalAiEsrganRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiEsrganRequestsByRequestIdResponse = zSchemaEsrganOutput + +export const zGetFalAiImageutilsRembgRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiImageutilsRembgRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageutilsRembgRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageutilsRembgRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageutilsRembgData = z.object({ + body: zSchemaImageutilsRembgInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageutilsRembgResponse = zSchemaQueueStatus + +export const zGetFalAiImageutilsRembgRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImageutilsRembgRequestsByRequestIdResponse = + zSchemaImageutilsRembgOutput diff --git a/packages/typescript/ai-fal/src/generated/image-to-json/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/image-to-json/endpoint-map.ts new file mode 100644 index 00000000..f4685257 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/image-to-json/endpoint-map.ts @@ -0,0 +1,36 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { zBagelUnderstandInput, zBagelUnderstandOutput } from './zod.gen' + +import type { BagelUnderstandInput, BagelUnderstandOutput } from './types.gen' + +import type { z } from 'zod' + +export type ImageToJsonEndpointMap = { + 'fal-ai/bagel/understand': { + input: BagelUnderstandInput + output: BagelUnderstandOutput + } +} + +/** Union type of all image-to-json model endpoint IDs */ +export type ImageToJsonModel = keyof ImageToJsonEndpointMap + +export const ImageToJsonSchemaMap: Record< + ImageToJsonModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['fal-ai/bagel/understand']: { + input: zBagelUnderstandInput, + output: zBagelUnderstandOutput, + }, +} as const + +/** Get the input type for a specific image-to-json model */ +export type ImageToJsonModelInput = + ImageToJsonEndpointMap[T]['input'] + +/** Get the output type for a specific image-to-json model */ +export type ImageToJsonModelOutput = + ImageToJsonEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/image-to-json/types.gen.ts b/packages/typescript/ai-fal/src/generated/image-to-json/types.gen.ts new file mode 100644 index 00000000..465c8233 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/image-to-json/types.gen.ts @@ -0,0 +1,190 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * TextOutput + */ +export type BagelUnderstandOutput = { + /** + * Text + * + * The answer to the query. + */ + text: string + /** + * Prompt + * + * The query used for the generation. + */ + prompt: string + /** + * Seed + * + * The seed used for the generation. + */ + seed: number + /** + * Timings + * + * The timings of the generation. + */ + timings: { + [key: string]: unknown + } +} + +/** + * ImageUnderstandingInput + */ +export type BagelUnderstandInput = { + /** + * Prompt + * + * The prompt to query the image with. + */ + prompt: string + /** + * Seed + * + * The seed to use for the generation. + */ + seed?: number + /** + * Image Url + * + * The image for the query. + */ + image_url: string +} + +export type QueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetFalAiBagelUnderstandRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bagel/understand/requests/{request_id}/status' +} + +export type GetFalAiBagelUnderstandRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: QueueStatus +} + +export type GetFalAiBagelUnderstandRequestsByRequestIdStatusResponse = + GetFalAiBagelUnderstandRequestsByRequestIdStatusResponses[keyof GetFalAiBagelUnderstandRequestsByRequestIdStatusResponses] + +export type PutFalAiBagelUnderstandRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bagel/understand/requests/{request_id}/cancel' +} + +export type PutFalAiBagelUnderstandRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiBagelUnderstandRequestsByRequestIdCancelResponse = + PutFalAiBagelUnderstandRequestsByRequestIdCancelResponses[keyof PutFalAiBagelUnderstandRequestsByRequestIdCancelResponses] + +export type PostFalAiBagelUnderstandData = { + body: BagelUnderstandInput + path?: never + query?: never + url: '/fal-ai/bagel/understand' +} + +export type PostFalAiBagelUnderstandResponses = { + /** + * The request status. + */ + 200: QueueStatus +} + +export type PostFalAiBagelUnderstandResponse = + PostFalAiBagelUnderstandResponses[keyof PostFalAiBagelUnderstandResponses] + +export type GetFalAiBagelUnderstandRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bagel/understand/requests/{request_id}' +} + +export type GetFalAiBagelUnderstandRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: BagelUnderstandOutput +} + +export type GetFalAiBagelUnderstandRequestsByRequestIdResponse = + GetFalAiBagelUnderstandRequestsByRequestIdResponses[keyof GetFalAiBagelUnderstandRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/image-to-json/zod.gen.ts b/packages/typescript/ai-fal/src/generated/image-to-json/zod.gen.ts new file mode 100644 index 00000000..8cf01ea7 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/image-to-json/zod.gen.ts @@ -0,0 +1,152 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * TextOutput + */ +export const zBagelUnderstandOutput = z.object({ + text: z.string().register(z.globalRegistry, { + description: 'The answer to the query.', + }), + prompt: z.string().register(z.globalRegistry, { + description: 'The query used for the generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the generation.', + }), + timings: z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The timings of the generation.', + }), +}) + +/** + * ImageUnderstandingInput + */ +export const zBagelUnderstandInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to query the image with.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for the generation.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The image for the query.', + }), +}) + +export const zQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetFalAiBagelUnderstandRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiBagelUnderstandRequestsByRequestIdStatusResponse = + zQueueStatus + +export const zPutFalAiBagelUnderstandRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiBagelUnderstandRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBagelUnderstandData = z.object({ + body: zBagelUnderstandInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBagelUnderstandResponse = zQueueStatus + +export const zGetFalAiBagelUnderstandRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiBagelUnderstandRequestsByRequestIdResponse = + zBagelUnderstandOutput diff --git a/packages/typescript/ai-fal/src/generated/image-to-video/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/image-to-video/endpoint-map.ts new file mode 100644 index 00000000..f848b934 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/image-to-video/endpoint-map.ts @@ -0,0 +1,1842 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zSchemaAiAvatarInput, + zSchemaAiAvatarMultiInput, + zSchemaAiAvatarMultiOutput, + zSchemaAiAvatarMultiTextInput, + zSchemaAiAvatarMultiTextOutput, + zSchemaAiAvatarOutput, + zSchemaAiAvatarSingleTextInput, + zSchemaAiAvatarSingleTextOutput, + zSchemaAmtInterpolationFrameInterpolationInput, + zSchemaAmtInterpolationFrameInterpolationOutput, + zSchemaBytedanceOmnihumanInput, + zSchemaBytedanceOmnihumanOutput, + zSchemaBytedanceOmnihumanV15Input, + zSchemaBytedanceOmnihumanV15Output, + zSchemaBytedanceSeedanceV15ProImageToVideoInput, + zSchemaBytedanceSeedanceV15ProImageToVideoOutput, + zSchemaBytedanceSeedanceV1LiteImageToVideoInput, + zSchemaBytedanceSeedanceV1LiteImageToVideoOutput, + zSchemaBytedanceSeedanceV1LiteReferenceToVideoInput, + zSchemaBytedanceSeedanceV1LiteReferenceToVideoOutput, + zSchemaBytedanceSeedanceV1ProFastImageToVideoInput, + zSchemaBytedanceSeedanceV1ProFastImageToVideoOutput, + zSchemaBytedanceSeedanceV1ProImageToVideoInput, + zSchemaBytedanceSeedanceV1ProImageToVideoOutput, + zSchemaBytedanceVideoStylizeInput, + zSchemaBytedanceVideoStylizeOutput, + zSchemaCogvideox5bImageToVideoInput, + zSchemaCogvideox5bImageToVideoOutput, + zSchemaCreatifyAuroraInput, + zSchemaCreatifyAuroraOutput, + zSchemaDecartLucy5bImageToVideoInput, + zSchemaDecartLucy5bImageToVideoOutput, + zSchemaFabric10FastInput, + zSchemaFabric10FastOutput, + zSchemaFabric10Input, + zSchemaFabric10Output, + zSchemaFastSvdLcmInput, + zSchemaFastSvdLcmOutput, + zSchemaFramepackF1Input, + zSchemaFramepackF1Output, + zSchemaFramepackFlf2vInput, + zSchemaFramepackFlf2vOutput, + zSchemaFramepackInput, + zSchemaFramepackOutput, + zSchemaHunyuanAvatarInput, + zSchemaHunyuanAvatarOutput, + zSchemaHunyuanCustomInput, + zSchemaHunyuanCustomOutput, + zSchemaHunyuanPortraitInput, + zSchemaHunyuanPortraitOutput, + zSchemaHunyuanVideoImageToVideoInput, + zSchemaHunyuanVideoImageToVideoOutput, + zSchemaHunyuanVideoImg2VidLoraInput, + zSchemaHunyuanVideoImg2VidLoraOutput, + zSchemaHunyuanVideoV15ImageToVideoInput, + zSchemaHunyuanVideoV15ImageToVideoOutput, + zSchemaKandinsky5ProImageToVideoInput, + zSchemaKandinsky5ProImageToVideoOutput, + zSchemaKlingVideoAiAvatarV2ProInput, + zSchemaKlingVideoAiAvatarV2ProOutput, + zSchemaKlingVideoAiAvatarV2StandardInput, + zSchemaKlingVideoAiAvatarV2StandardOutput, + zSchemaKlingVideoO1ImageToVideoInput, + zSchemaKlingVideoO1ImageToVideoOutput, + zSchemaKlingVideoO1ReferenceToVideoInput, + zSchemaKlingVideoO1ReferenceToVideoOutput, + zSchemaKlingVideoO1StandardImageToVideoInput, + zSchemaKlingVideoO1StandardImageToVideoOutput, + zSchemaKlingVideoO1StandardReferenceToVideoInput, + zSchemaKlingVideoO1StandardReferenceToVideoOutput, + zSchemaKlingVideoV15ProImageToVideoInput, + zSchemaKlingVideoV15ProImageToVideoOutput, + zSchemaKlingVideoV16ProElementsInput, + zSchemaKlingVideoV16ProElementsOutput, + zSchemaKlingVideoV16ProImageToVideoInput, + zSchemaKlingVideoV16ProImageToVideoOutput, + zSchemaKlingVideoV16StandardElementsInput, + zSchemaKlingVideoV16StandardElementsOutput, + zSchemaKlingVideoV16StandardImageToVideoInput, + zSchemaKlingVideoV16StandardImageToVideoOutput, + zSchemaKlingVideoV1ProAiAvatarInput, + zSchemaKlingVideoV1ProAiAvatarOutput, + zSchemaKlingVideoV1StandardAiAvatarInput, + zSchemaKlingVideoV1StandardAiAvatarOutput, + zSchemaKlingVideoV1StandardImageToVideoInput, + zSchemaKlingVideoV1StandardImageToVideoOutput, + zSchemaKlingVideoV21MasterImageToVideoInput, + zSchemaKlingVideoV21MasterImageToVideoOutput, + zSchemaKlingVideoV21ProImageToVideoInput, + zSchemaKlingVideoV21ProImageToVideoOutput, + zSchemaKlingVideoV21StandardImageToVideoInput, + zSchemaKlingVideoV21StandardImageToVideoOutput, + zSchemaKlingVideoV25TurboProImageToVideoInput, + zSchemaKlingVideoV25TurboProImageToVideoOutput, + zSchemaKlingVideoV25TurboStandardImageToVideoInput, + zSchemaKlingVideoV25TurboStandardImageToVideoOutput, + zSchemaKlingVideoV26ProImageToVideoInput, + zSchemaKlingVideoV26ProImageToVideoOutput, + zSchemaKlingVideoV2MasterImageToVideoInput, + zSchemaKlingVideoV2MasterImageToVideoOutput, + zSchemaLiveAvatarInput, + zSchemaLiveAvatarOutput, + zSchemaLivePortraitInput, + zSchemaLivePortraitOutput, + zSchemaLongcatVideoDistilledImageToVideo480pInput, + zSchemaLongcatVideoDistilledImageToVideo480pOutput, + zSchemaLongcatVideoDistilledImageToVideo720pInput, + zSchemaLongcatVideoDistilledImageToVideo720pOutput, + zSchemaLongcatVideoImageToVideo480pInput, + zSchemaLongcatVideoImageToVideo480pOutput, + zSchemaLongcatVideoImageToVideo720pInput, + zSchemaLongcatVideoImageToVideo720pOutput, + zSchemaLtx219bDistilledImageToVideoInput, + zSchemaLtx219bDistilledImageToVideoLoraInput, + zSchemaLtx219bDistilledImageToVideoLoraOutput, + zSchemaLtx219bDistilledImageToVideoOutput, + zSchemaLtx219bImageToVideoInput, + zSchemaLtx219bImageToVideoLoraInput, + zSchemaLtx219bImageToVideoLoraOutput, + zSchemaLtx219bImageToVideoOutput, + zSchemaLtx2ImageToVideoFastInput, + zSchemaLtx2ImageToVideoFastOutput, + zSchemaLtx2ImageToVideoInput, + zSchemaLtx2ImageToVideoOutput, + zSchemaLtxVideo13bDevImageToVideoInput, + zSchemaLtxVideo13bDevImageToVideoOutput, + zSchemaLtxVideo13bDistilledImageToVideoInput, + zSchemaLtxVideo13bDistilledImageToVideoOutput, + zSchemaLtxVideoImageToVideoInput, + zSchemaLtxVideoImageToVideoOutput, + zSchemaLtxVideoLoraImageToVideoInput, + zSchemaLtxVideoLoraImageToVideoOutput, + zSchemaLtxv13B098DistilledImageToVideoInput, + zSchemaLtxv13B098DistilledImageToVideoOutput, + zSchemaLucy14bImageToVideoInput, + zSchemaLucy14bImageToVideoOutput, + zSchemaLumaDreamMachineRay2FlashImageToVideoInput, + zSchemaLumaDreamMachineRay2FlashImageToVideoOutput, + zSchemaLumaDreamMachineRay2ImageToVideoInput, + zSchemaLumaDreamMachineRay2ImageToVideoOutput, + zSchemaLynxInput, + zSchemaLynxOutput, + zSchemaMagiDistilledImageToVideoInput, + zSchemaMagiDistilledImageToVideoOutput, + zSchemaMagiImageToVideoInput, + zSchemaMagiImageToVideoOutput, + zSchemaMareyI2vInput, + zSchemaMareyI2vOutput, + zSchemaMinimaxHailuo02FastImageToVideoInput, + zSchemaMinimaxHailuo02FastImageToVideoOutput, + zSchemaMinimaxHailuo02ProImageToVideoInput, + zSchemaMinimaxHailuo02ProImageToVideoOutput, + zSchemaMinimaxHailuo02StandardImageToVideoInput, + zSchemaMinimaxHailuo02StandardImageToVideoOutput, + zSchemaMinimaxHailuo23FastProImageToVideoInput, + zSchemaMinimaxHailuo23FastProImageToVideoOutput, + zSchemaMinimaxHailuo23FastStandardImageToVideoInput, + zSchemaMinimaxHailuo23FastStandardImageToVideoOutput, + zSchemaMinimaxHailuo23ProImageToVideoInput, + zSchemaMinimaxHailuo23ProImageToVideoOutput, + zSchemaMinimaxHailuo23StandardImageToVideoInput, + zSchemaMinimaxHailuo23StandardImageToVideoOutput, + zSchemaMinimaxVideo01DirectorImageToVideoInput, + zSchemaMinimaxVideo01DirectorImageToVideoOutput, + zSchemaMinimaxVideo01ImageToVideoInput, + zSchemaMinimaxVideo01ImageToVideoOutput, + zSchemaMinimaxVideo01LiveImageToVideoInput, + zSchemaMinimaxVideo01LiveImageToVideoOutput, + zSchemaMinimaxVideo01SubjectReferenceInput, + zSchemaMinimaxVideo01SubjectReferenceOutput, + zSchemaMusetalkInput, + zSchemaMusetalkOutput, + zSchemaOviImageToVideoInput, + zSchemaOviImageToVideoOutput, + zSchemaPikaV15PikaffectsInput, + zSchemaPikaV15PikaffectsOutput, + zSchemaPikaV21ImageToVideoInput, + zSchemaPikaV21ImageToVideoOutput, + zSchemaPikaV22ImageToVideoInput, + zSchemaPikaV22ImageToVideoOutput, + zSchemaPikaV22PikaframesInput, + zSchemaPikaV22PikaframesOutput, + zSchemaPikaV22PikascenesInput, + zSchemaPikaV22PikascenesOutput, + zSchemaPikaV2TurboImageToVideoInput, + zSchemaPikaV2TurboImageToVideoOutput, + zSchemaPixverseSwapInput, + zSchemaPixverseSwapOutput, + zSchemaPixverseV35EffectsInput, + zSchemaPixverseV35EffectsOutput, + zSchemaPixverseV35ImageToVideoFastInput, + zSchemaPixverseV35ImageToVideoFastOutput, + zSchemaPixverseV35ImageToVideoInput, + zSchemaPixverseV35ImageToVideoOutput, + zSchemaPixverseV35TransitionInput, + zSchemaPixverseV35TransitionOutput, + zSchemaPixverseV45EffectsInput, + zSchemaPixverseV45EffectsOutput, + zSchemaPixverseV45ImageToVideoFastInput, + zSchemaPixverseV45ImageToVideoFastOutput, + zSchemaPixverseV45ImageToVideoInput, + zSchemaPixverseV45ImageToVideoOutput, + zSchemaPixverseV45TransitionInput, + zSchemaPixverseV45TransitionOutput, + zSchemaPixverseV4EffectsInput, + zSchemaPixverseV4EffectsOutput, + zSchemaPixverseV4ImageToVideoFastInput, + zSchemaPixverseV4ImageToVideoFastOutput, + zSchemaPixverseV4ImageToVideoInput, + zSchemaPixverseV4ImageToVideoOutput, + zSchemaPixverseV55EffectsInput, + zSchemaPixverseV55EffectsOutput, + zSchemaPixverseV55ImageToVideoInput, + zSchemaPixverseV55ImageToVideoOutput, + zSchemaPixverseV55TransitionInput, + zSchemaPixverseV55TransitionOutput, + zSchemaPixverseV56ImageToVideoInput, + zSchemaPixverseV56ImageToVideoOutput, + zSchemaPixverseV56TransitionInput, + zSchemaPixverseV56TransitionOutput, + zSchemaPixverseV5EffectsInput, + zSchemaPixverseV5EffectsOutput, + zSchemaPixverseV5ImageToVideoInput, + zSchemaPixverseV5ImageToVideoOutput, + zSchemaPixverseV5TransitionInput, + zSchemaPixverseV5TransitionOutput, + zSchemaSadtalkerInput, + zSchemaSadtalkerOutput, + zSchemaSadtalkerReferenceInput, + zSchemaSadtalkerReferenceOutput, + zSchemaSkyreelsI2vInput, + zSchemaSkyreelsI2vOutput, + zSchemaSora2ImageToVideoInput, + zSchemaSora2ImageToVideoOutput, + zSchemaSora2ImageToVideoProInput, + zSchemaSora2ImageToVideoProOutput, + zSchemaStableVideoInput, + zSchemaStableVideoOutput, + zSchemaV26ImageToVideoFlashInput, + zSchemaV26ImageToVideoFlashOutput, + zSchemaV26ImageToVideoInput, + zSchemaV26ImageToVideoOutput, + zSchemaVeo2ImageToVideoInput, + zSchemaVeo2ImageToVideoOutput, + zSchemaVeo31FastFirstLastFrameToVideoInput, + zSchemaVeo31FastFirstLastFrameToVideoOutput, + zSchemaVeo31FastImageToVideoInput, + zSchemaVeo31FastImageToVideoOutput, + zSchemaVeo31FirstLastFrameToVideoInput, + zSchemaVeo31FirstLastFrameToVideoOutput, + zSchemaVeo31ImageToVideoInput, + zSchemaVeo31ImageToVideoOutput, + zSchemaVeo31ReferenceToVideoInput, + zSchemaVeo31ReferenceToVideoOutput, + zSchemaVeo3FastImageToVideoInput, + zSchemaVeo3FastImageToVideoOutput, + zSchemaVeo3ImageToVideoInput, + zSchemaVeo3ImageToVideoOutput, + zSchemaViduImageToVideoInput, + zSchemaViduImageToVideoOutput, + zSchemaViduQ1ImageToVideoInput, + zSchemaViduQ1ImageToVideoOutput, + zSchemaViduQ1ReferenceToVideoInput, + zSchemaViduQ1ReferenceToVideoOutput, + zSchemaViduQ1StartEndToVideoInput, + zSchemaViduQ1StartEndToVideoOutput, + zSchemaViduQ2ImageToVideoProInput, + zSchemaViduQ2ImageToVideoProOutput, + zSchemaViduQ2ImageToVideoTurboInput, + zSchemaViduQ2ImageToVideoTurboOutput, + zSchemaViduQ2ReferenceToVideoProInput, + zSchemaViduQ2ReferenceToVideoProOutput, + zSchemaViduReferenceToVideoInput, + zSchemaViduReferenceToVideoOutput, + zSchemaViduStartEndToVideoInput, + zSchemaViduStartEndToVideoOutput, + zSchemaViduTemplateToVideoInput, + zSchemaViduTemplateToVideoOutput, + zSchemaWan25PreviewImageToVideoInput, + zSchemaWan25PreviewImageToVideoOutput, + zSchemaWanAtiInput, + zSchemaWanAtiOutput, + zSchemaWanEffectsInput, + zSchemaWanEffectsOutput, + zSchemaWanFlf2vInput, + zSchemaWanFlf2vOutput, + zSchemaWanI2vInput, + zSchemaWanI2vLoraInput, + zSchemaWanI2vLoraOutput, + zSchemaWanI2vOutput, + zSchemaWanMoveInput, + zSchemaWanMoveOutput, + zSchemaWanProImageToVideoInput, + zSchemaWanProImageToVideoOutput, + zSchemaWanV225bImageToVideoInput, + zSchemaWanV225bImageToVideoOutput, + zSchemaWanV22A14bImageToVideoInput, + zSchemaWanV22A14bImageToVideoLoraInput, + zSchemaWanV22A14bImageToVideoLoraOutput, + zSchemaWanV22A14bImageToVideoOutput, + zSchemaWanV22A14bImageToVideoTurboInput, + zSchemaWanV22A14bImageToVideoTurboOutput, +} from './zod.gen' + +import type { + SchemaAiAvatarInput, + SchemaAiAvatarMultiInput, + SchemaAiAvatarMultiOutput, + SchemaAiAvatarMultiTextInput, + SchemaAiAvatarMultiTextOutput, + SchemaAiAvatarOutput, + SchemaAiAvatarSingleTextInput, + SchemaAiAvatarSingleTextOutput, + SchemaAmtInterpolationFrameInterpolationInput, + SchemaAmtInterpolationFrameInterpolationOutput, + SchemaBytedanceOmnihumanInput, + SchemaBytedanceOmnihumanOutput, + SchemaBytedanceOmnihumanV15Input, + SchemaBytedanceOmnihumanV15Output, + SchemaBytedanceSeedanceV15ProImageToVideoInput, + SchemaBytedanceSeedanceV15ProImageToVideoOutput, + SchemaBytedanceSeedanceV1LiteImageToVideoInput, + SchemaBytedanceSeedanceV1LiteImageToVideoOutput, + SchemaBytedanceSeedanceV1LiteReferenceToVideoInput, + SchemaBytedanceSeedanceV1LiteReferenceToVideoOutput, + SchemaBytedanceSeedanceV1ProFastImageToVideoInput, + SchemaBytedanceSeedanceV1ProFastImageToVideoOutput, + SchemaBytedanceSeedanceV1ProImageToVideoInput, + SchemaBytedanceSeedanceV1ProImageToVideoOutput, + SchemaBytedanceVideoStylizeInput, + SchemaBytedanceVideoStylizeOutput, + SchemaCogvideox5bImageToVideoInput, + SchemaCogvideox5bImageToVideoOutput, + SchemaCreatifyAuroraInput, + SchemaCreatifyAuroraOutput, + SchemaDecartLucy5bImageToVideoInput, + SchemaDecartLucy5bImageToVideoOutput, + SchemaFabric10FastInput, + SchemaFabric10FastOutput, + SchemaFabric10Input, + SchemaFabric10Output, + SchemaFastSvdLcmInput, + SchemaFastSvdLcmOutput, + SchemaFramepackF1Input, + SchemaFramepackF1Output, + SchemaFramepackFlf2vInput, + SchemaFramepackFlf2vOutput, + SchemaFramepackInput, + SchemaFramepackOutput, + SchemaHunyuanAvatarInput, + SchemaHunyuanAvatarOutput, + SchemaHunyuanCustomInput, + SchemaHunyuanCustomOutput, + SchemaHunyuanPortraitInput, + SchemaHunyuanPortraitOutput, + SchemaHunyuanVideoImageToVideoInput, + SchemaHunyuanVideoImageToVideoOutput, + SchemaHunyuanVideoImg2VidLoraInput, + SchemaHunyuanVideoImg2VidLoraOutput, + SchemaHunyuanVideoV15ImageToVideoInput, + SchemaHunyuanVideoV15ImageToVideoOutput, + SchemaKandinsky5ProImageToVideoInput, + SchemaKandinsky5ProImageToVideoOutput, + SchemaKlingVideoAiAvatarV2ProInput, + SchemaKlingVideoAiAvatarV2ProOutput, + SchemaKlingVideoAiAvatarV2StandardInput, + SchemaKlingVideoAiAvatarV2StandardOutput, + SchemaKlingVideoO1ImageToVideoInput, + SchemaKlingVideoO1ImageToVideoOutput, + SchemaKlingVideoO1ReferenceToVideoInput, + SchemaKlingVideoO1ReferenceToVideoOutput, + SchemaKlingVideoO1StandardImageToVideoInput, + SchemaKlingVideoO1StandardImageToVideoOutput, + SchemaKlingVideoO1StandardReferenceToVideoInput, + SchemaKlingVideoO1StandardReferenceToVideoOutput, + SchemaKlingVideoV15ProImageToVideoInput, + SchemaKlingVideoV15ProImageToVideoOutput, + SchemaKlingVideoV16ProElementsInput, + SchemaKlingVideoV16ProElementsOutput, + SchemaKlingVideoV16ProImageToVideoInput, + SchemaKlingVideoV16ProImageToVideoOutput, + SchemaKlingVideoV16StandardElementsInput, + SchemaKlingVideoV16StandardElementsOutput, + SchemaKlingVideoV16StandardImageToVideoInput, + SchemaKlingVideoV16StandardImageToVideoOutput, + SchemaKlingVideoV1ProAiAvatarInput, + SchemaKlingVideoV1ProAiAvatarOutput, + SchemaKlingVideoV1StandardAiAvatarInput, + SchemaKlingVideoV1StandardAiAvatarOutput, + SchemaKlingVideoV1StandardImageToVideoInput, + SchemaKlingVideoV1StandardImageToVideoOutput, + SchemaKlingVideoV21MasterImageToVideoInput, + SchemaKlingVideoV21MasterImageToVideoOutput, + SchemaKlingVideoV21ProImageToVideoInput, + SchemaKlingVideoV21ProImageToVideoOutput, + SchemaKlingVideoV21StandardImageToVideoInput, + SchemaKlingVideoV21StandardImageToVideoOutput, + SchemaKlingVideoV25TurboProImageToVideoInput, + SchemaKlingVideoV25TurboProImageToVideoOutput, + SchemaKlingVideoV25TurboStandardImageToVideoInput, + SchemaKlingVideoV25TurboStandardImageToVideoOutput, + SchemaKlingVideoV26ProImageToVideoInput, + SchemaKlingVideoV26ProImageToVideoOutput, + SchemaKlingVideoV2MasterImageToVideoInput, + SchemaKlingVideoV2MasterImageToVideoOutput, + SchemaLiveAvatarInput, + SchemaLiveAvatarOutput, + SchemaLivePortraitInput, + SchemaLivePortraitOutput, + SchemaLongcatVideoDistilledImageToVideo480pInput, + SchemaLongcatVideoDistilledImageToVideo480pOutput, + SchemaLongcatVideoDistilledImageToVideo720pInput, + SchemaLongcatVideoDistilledImageToVideo720pOutput, + SchemaLongcatVideoImageToVideo480pInput, + SchemaLongcatVideoImageToVideo480pOutput, + SchemaLongcatVideoImageToVideo720pInput, + SchemaLongcatVideoImageToVideo720pOutput, + SchemaLtx219bDistilledImageToVideoInput, + SchemaLtx219bDistilledImageToVideoLoraInput, + SchemaLtx219bDistilledImageToVideoLoraOutput, + SchemaLtx219bDistilledImageToVideoOutput, + SchemaLtx219bImageToVideoInput, + SchemaLtx219bImageToVideoLoraInput, + SchemaLtx219bImageToVideoLoraOutput, + SchemaLtx219bImageToVideoOutput, + SchemaLtx2ImageToVideoFastInput, + SchemaLtx2ImageToVideoFastOutput, + SchemaLtx2ImageToVideoInput, + SchemaLtx2ImageToVideoOutput, + SchemaLtxVideo13bDevImageToVideoInput, + SchemaLtxVideo13bDevImageToVideoOutput, + SchemaLtxVideo13bDistilledImageToVideoInput, + SchemaLtxVideo13bDistilledImageToVideoOutput, + SchemaLtxVideoImageToVideoInput, + SchemaLtxVideoImageToVideoOutput, + SchemaLtxVideoLoraImageToVideoInput, + SchemaLtxVideoLoraImageToVideoOutput, + SchemaLtxv13B098DistilledImageToVideoInput, + SchemaLtxv13B098DistilledImageToVideoOutput, + SchemaLucy14bImageToVideoInput, + SchemaLucy14bImageToVideoOutput, + SchemaLumaDreamMachineRay2FlashImageToVideoInput, + SchemaLumaDreamMachineRay2FlashImageToVideoOutput, + SchemaLumaDreamMachineRay2ImageToVideoInput, + SchemaLumaDreamMachineRay2ImageToVideoOutput, + SchemaLynxInput, + SchemaLynxOutput, + SchemaMagiDistilledImageToVideoInput, + SchemaMagiDistilledImageToVideoOutput, + SchemaMagiImageToVideoInput, + SchemaMagiImageToVideoOutput, + SchemaMareyI2vInput, + SchemaMareyI2vOutput, + SchemaMinimaxHailuo02FastImageToVideoInput, + SchemaMinimaxHailuo02FastImageToVideoOutput, + SchemaMinimaxHailuo02ProImageToVideoInput, + SchemaMinimaxHailuo02ProImageToVideoOutput, + SchemaMinimaxHailuo02StandardImageToVideoInput, + SchemaMinimaxHailuo02StandardImageToVideoOutput, + SchemaMinimaxHailuo23FastProImageToVideoInput, + SchemaMinimaxHailuo23FastProImageToVideoOutput, + SchemaMinimaxHailuo23FastStandardImageToVideoInput, + SchemaMinimaxHailuo23FastStandardImageToVideoOutput, + SchemaMinimaxHailuo23ProImageToVideoInput, + SchemaMinimaxHailuo23ProImageToVideoOutput, + SchemaMinimaxHailuo23StandardImageToVideoInput, + SchemaMinimaxHailuo23StandardImageToVideoOutput, + SchemaMinimaxVideo01DirectorImageToVideoInput, + SchemaMinimaxVideo01DirectorImageToVideoOutput, + SchemaMinimaxVideo01ImageToVideoInput, + SchemaMinimaxVideo01ImageToVideoOutput, + SchemaMinimaxVideo01LiveImageToVideoInput, + SchemaMinimaxVideo01LiveImageToVideoOutput, + SchemaMinimaxVideo01SubjectReferenceInput, + SchemaMinimaxVideo01SubjectReferenceOutput, + SchemaMusetalkInput, + SchemaMusetalkOutput, + SchemaOviImageToVideoInput, + SchemaOviImageToVideoOutput, + SchemaPikaV15PikaffectsInput, + SchemaPikaV15PikaffectsOutput, + SchemaPikaV21ImageToVideoInput, + SchemaPikaV21ImageToVideoOutput, + SchemaPikaV22ImageToVideoInput, + SchemaPikaV22ImageToVideoOutput, + SchemaPikaV22PikaframesInput, + SchemaPikaV22PikaframesOutput, + SchemaPikaV22PikascenesInput, + SchemaPikaV22PikascenesOutput, + SchemaPikaV2TurboImageToVideoInput, + SchemaPikaV2TurboImageToVideoOutput, + SchemaPixverseSwapInput, + SchemaPixverseSwapOutput, + SchemaPixverseV35EffectsInput, + SchemaPixverseV35EffectsOutput, + SchemaPixverseV35ImageToVideoFastInput, + SchemaPixverseV35ImageToVideoFastOutput, + SchemaPixverseV35ImageToVideoInput, + SchemaPixverseV35ImageToVideoOutput, + SchemaPixverseV35TransitionInput, + SchemaPixverseV35TransitionOutput, + SchemaPixverseV45EffectsInput, + SchemaPixverseV45EffectsOutput, + SchemaPixverseV45ImageToVideoFastInput, + SchemaPixverseV45ImageToVideoFastOutput, + SchemaPixverseV45ImageToVideoInput, + SchemaPixverseV45ImageToVideoOutput, + SchemaPixverseV45TransitionInput, + SchemaPixverseV45TransitionOutput, + SchemaPixverseV4EffectsInput, + SchemaPixverseV4EffectsOutput, + SchemaPixverseV4ImageToVideoFastInput, + SchemaPixverseV4ImageToVideoFastOutput, + SchemaPixverseV4ImageToVideoInput, + SchemaPixverseV4ImageToVideoOutput, + SchemaPixverseV55EffectsInput, + SchemaPixverseV55EffectsOutput, + SchemaPixverseV55ImageToVideoInput, + SchemaPixverseV55ImageToVideoOutput, + SchemaPixverseV55TransitionInput, + SchemaPixverseV55TransitionOutput, + SchemaPixverseV56ImageToVideoInput, + SchemaPixverseV56ImageToVideoOutput, + SchemaPixverseV56TransitionInput, + SchemaPixverseV56TransitionOutput, + SchemaPixverseV5EffectsInput, + SchemaPixverseV5EffectsOutput, + SchemaPixverseV5ImageToVideoInput, + SchemaPixverseV5ImageToVideoOutput, + SchemaPixverseV5TransitionInput, + SchemaPixverseV5TransitionOutput, + SchemaSadtalkerInput, + SchemaSadtalkerOutput, + SchemaSadtalkerReferenceInput, + SchemaSadtalkerReferenceOutput, + SchemaSkyreelsI2vInput, + SchemaSkyreelsI2vOutput, + SchemaSora2ImageToVideoInput, + SchemaSora2ImageToVideoOutput, + SchemaSora2ImageToVideoProInput, + SchemaSora2ImageToVideoProOutput, + SchemaStableVideoInput, + SchemaStableVideoOutput, + SchemaV26ImageToVideoFlashInput, + SchemaV26ImageToVideoFlashOutput, + SchemaV26ImageToVideoInput, + SchemaV26ImageToVideoOutput, + SchemaVeo2ImageToVideoInput, + SchemaVeo2ImageToVideoOutput, + SchemaVeo31FastFirstLastFrameToVideoInput, + SchemaVeo31FastFirstLastFrameToVideoOutput, + SchemaVeo31FastImageToVideoInput, + SchemaVeo31FastImageToVideoOutput, + SchemaVeo31FirstLastFrameToVideoInput, + SchemaVeo31FirstLastFrameToVideoOutput, + SchemaVeo31ImageToVideoInput, + SchemaVeo31ImageToVideoOutput, + SchemaVeo31ReferenceToVideoInput, + SchemaVeo31ReferenceToVideoOutput, + SchemaVeo3FastImageToVideoInput, + SchemaVeo3FastImageToVideoOutput, + SchemaVeo3ImageToVideoInput, + SchemaVeo3ImageToVideoOutput, + SchemaViduImageToVideoInput, + SchemaViduImageToVideoOutput, + SchemaViduQ1ImageToVideoInput, + SchemaViduQ1ImageToVideoOutput, + SchemaViduQ1ReferenceToVideoInput, + SchemaViduQ1ReferenceToVideoOutput, + SchemaViduQ1StartEndToVideoInput, + SchemaViduQ1StartEndToVideoOutput, + SchemaViduQ2ImageToVideoProInput, + SchemaViduQ2ImageToVideoProOutput, + SchemaViduQ2ImageToVideoTurboInput, + SchemaViduQ2ImageToVideoTurboOutput, + SchemaViduQ2ReferenceToVideoProInput, + SchemaViduQ2ReferenceToVideoProOutput, + SchemaViduReferenceToVideoInput, + SchemaViduReferenceToVideoOutput, + SchemaViduStartEndToVideoInput, + SchemaViduStartEndToVideoOutput, + SchemaViduTemplateToVideoInput, + SchemaViduTemplateToVideoOutput, + SchemaWan25PreviewImageToVideoInput, + SchemaWan25PreviewImageToVideoOutput, + SchemaWanAtiInput, + SchemaWanAtiOutput, + SchemaWanEffectsInput, + SchemaWanEffectsOutput, + SchemaWanFlf2vInput, + SchemaWanFlf2vOutput, + SchemaWanI2vInput, + SchemaWanI2vLoraInput, + SchemaWanI2vLoraOutput, + SchemaWanI2vOutput, + SchemaWanMoveInput, + SchemaWanMoveOutput, + SchemaWanProImageToVideoInput, + SchemaWanProImageToVideoOutput, + SchemaWanV225bImageToVideoInput, + SchemaWanV225bImageToVideoOutput, + SchemaWanV22A14bImageToVideoInput, + SchemaWanV22A14bImageToVideoLoraInput, + SchemaWanV22A14bImageToVideoLoraOutput, + SchemaWanV22A14bImageToVideoOutput, + SchemaWanV22A14bImageToVideoTurboInput, + SchemaWanV22A14bImageToVideoTurboOutput, +} from './types.gen' + +import type { z } from 'zod' + +export type ImageToVideoEndpointMap = { + 'fal-ai/wan-effects': { + input: SchemaWanEffectsInput + output: SchemaWanEffectsOutput + } + 'fal-ai/wan-pro/image-to-video': { + input: SchemaWanProImageToVideoInput + output: SchemaWanProImageToVideoOutput + } + 'fal-ai/veo2/image-to-video': { + input: SchemaVeo2ImageToVideoInput + output: SchemaVeo2ImageToVideoOutput + } + 'fal-ai/kling-video/v1.6/pro/image-to-video': { + input: SchemaKlingVideoV16ProImageToVideoInput + output: SchemaKlingVideoV16ProImageToVideoOutput + } + 'fal-ai/minimax/video-01/image-to-video': { + input: SchemaMinimaxVideo01ImageToVideoInput + output: SchemaMinimaxVideo01ImageToVideoOutput + } + 'fal-ai/minimax/hailuo-2.3/pro/image-to-video': { + input: SchemaMinimaxHailuo23ProImageToVideoInput + output: SchemaMinimaxHailuo23ProImageToVideoOutput + } + 'fal-ai/wan-25-preview/image-to-video': { + input: SchemaWan25PreviewImageToVideoInput + output: SchemaWan25PreviewImageToVideoOutput + } + 'fal-ai/kling-video/v2.5-turbo/pro/image-to-video': { + input: SchemaKlingVideoV25TurboProImageToVideoInput + output: SchemaKlingVideoV25TurboProImageToVideoOutput + } + 'fal-ai/minimax/hailuo-02/standard/image-to-video': { + input: SchemaMinimaxHailuo02StandardImageToVideoInput + output: SchemaMinimaxHailuo02StandardImageToVideoOutput + } + 'fal-ai/bytedance/seedance/v1/pro/image-to-video': { + input: SchemaBytedanceSeedanceV1ProImageToVideoInput + output: SchemaBytedanceSeedanceV1ProImageToVideoOutput + } + 'fal-ai/kling-video/v2.1/master/image-to-video': { + input: SchemaKlingVideoV21MasterImageToVideoInput + output: SchemaKlingVideoV21MasterImageToVideoOutput + } + 'fal-ai/kling-video/v2.1/standard/image-to-video': { + input: SchemaKlingVideoV21StandardImageToVideoInput + output: SchemaKlingVideoV21StandardImageToVideoOutput + } + 'fal-ai/pixverse/v4.5/image-to-video': { + input: SchemaPixverseV45ImageToVideoInput + output: SchemaPixverseV45ImageToVideoOutput + } + 'fal-ai/kling-video/v2/master/image-to-video': { + input: SchemaKlingVideoV2MasterImageToVideoInput + output: SchemaKlingVideoV2MasterImageToVideoOutput + } + 'fal-ai/wan-i2v': { + input: SchemaWanI2vInput + output: SchemaWanI2vOutput + } + 'fal-ai/pixverse/v5.6/transition': { + input: SchemaPixverseV56TransitionInput + output: SchemaPixverseV56TransitionOutput + } + 'fal-ai/pixverse/v5.6/image-to-video': { + input: SchemaPixverseV56ImageToVideoInput + output: SchemaPixverseV56ImageToVideoOutput + } + 'fal-ai/vidu/q2/reference-to-video/pro': { + input: SchemaViduQ2ReferenceToVideoProInput + output: SchemaViduQ2ReferenceToVideoProOutput + } + 'wan/v2.6/image-to-video/flash': { + input: SchemaV26ImageToVideoFlashInput + output: SchemaV26ImageToVideoFlashOutput + } + 'fal-ai/ltx-2-19b/distilled/image-to-video/lora': { + input: SchemaLtx219bDistilledImageToVideoLoraInput + output: SchemaLtx219bDistilledImageToVideoLoraOutput + } + 'fal-ai/ltx-2-19b/distilled/image-to-video': { + input: SchemaLtx219bDistilledImageToVideoInput + output: SchemaLtx219bDistilledImageToVideoOutput + } + 'fal-ai/ltx-2-19b/image-to-video/lora': { + input: SchemaLtx219bImageToVideoLoraInput + output: SchemaLtx219bImageToVideoLoraOutput + } + 'fal-ai/ltx-2-19b/image-to-video': { + input: SchemaLtx219bImageToVideoInput + output: SchemaLtx219bImageToVideoOutput + } + 'fal-ai/wan-move': { + input: SchemaWanMoveInput + output: SchemaWanMoveOutput + } + 'fal-ai/kandinsky5-pro/image-to-video': { + input: SchemaKandinsky5ProImageToVideoInput + output: SchemaKandinsky5ProImageToVideoOutput + } + 'fal-ai/bytedance/seedance/v1.5/pro/image-to-video': { + input: SchemaBytedanceSeedanceV15ProImageToVideoInput + output: SchemaBytedanceSeedanceV15ProImageToVideoOutput + } + 'fal-ai/live-avatar': { + input: SchemaLiveAvatarInput + output: SchemaLiveAvatarOutput + } + 'fal-ai/hunyuan-video-v1.5/image-to-video': { + input: SchemaHunyuanVideoV15ImageToVideoInput + output: SchemaHunyuanVideoV15ImageToVideoOutput + } + 'wan/v2.6/image-to-video': { + input: SchemaV26ImageToVideoInput + output: SchemaV26ImageToVideoOutput + } + 'fal-ai/kling-video/o1/standard/reference-to-video': { + input: SchemaKlingVideoO1StandardReferenceToVideoInput + output: SchemaKlingVideoO1StandardReferenceToVideoOutput + } + 'fal-ai/kling-video/o1/standard/image-to-video': { + input: SchemaKlingVideoO1StandardImageToVideoInput + output: SchemaKlingVideoO1StandardImageToVideoOutput + } + 'fal-ai/creatify/aurora': { + input: SchemaCreatifyAuroraInput + output: SchemaCreatifyAuroraOutput + } + 'fal-ai/kling-video/ai-avatar/v2/pro': { + input: SchemaKlingVideoAiAvatarV2ProInput + output: SchemaKlingVideoAiAvatarV2ProOutput + } + 'fal-ai/kling-video/ai-avatar/v2/standard': { + input: SchemaKlingVideoAiAvatarV2StandardInput + output: SchemaKlingVideoAiAvatarV2StandardOutput + } + 'fal-ai/kling-video/v2.6/pro/image-to-video': { + input: SchemaKlingVideoV26ProImageToVideoInput + output: SchemaKlingVideoV26ProImageToVideoOutput + } + 'fal-ai/pixverse/v5.5/effects': { + input: SchemaPixverseV55EffectsInput + output: SchemaPixverseV55EffectsOutput + } + 'fal-ai/pixverse/v5.5/transition': { + input: SchemaPixverseV55TransitionInput + output: SchemaPixverseV55TransitionOutput + } + 'fal-ai/pixverse/v5.5/image-to-video': { + input: SchemaPixverseV55ImageToVideoInput + output: SchemaPixverseV55ImageToVideoOutput + } + 'fal-ai/kling-video/o1/image-to-video': { + input: SchemaKlingVideoO1ImageToVideoInput + output: SchemaKlingVideoO1ImageToVideoOutput + } + 'fal-ai/kling-video/o1/reference-to-video': { + input: SchemaKlingVideoO1ReferenceToVideoInput + output: SchemaKlingVideoO1ReferenceToVideoOutput + } + 'fal-ai/ltx-2/image-to-video/fast': { + input: SchemaLtx2ImageToVideoFastInput + output: SchemaLtx2ImageToVideoFastOutput + } + 'fal-ai/ltx-2/image-to-video': { + input: SchemaLtx2ImageToVideoInput + output: SchemaLtx2ImageToVideoOutput + } + 'bytedance/lynx': { + input: SchemaLynxInput + output: SchemaLynxOutput + } + 'fal-ai/pixverse/swap': { + input: SchemaPixverseSwapInput + output: SchemaPixverseSwapOutput + } + 'fal-ai/pika/v2.2/pikaframes': { + input: SchemaPikaV22PikaframesInput + output: SchemaPikaV22PikaframesOutput + } + 'fal-ai/longcat-video/image-to-video/720p': { + input: SchemaLongcatVideoImageToVideo720pInput + output: SchemaLongcatVideoImageToVideo720pOutput + } + 'fal-ai/longcat-video/image-to-video/480p': { + input: SchemaLongcatVideoImageToVideo480pInput + output: SchemaLongcatVideoImageToVideo480pOutput + } + 'fal-ai/longcat-video/distilled/image-to-video/720p': { + input: SchemaLongcatVideoDistilledImageToVideo720pInput + output: SchemaLongcatVideoDistilledImageToVideo720pOutput + } + 'fal-ai/longcat-video/distilled/image-to-video/480p': { + input: SchemaLongcatVideoDistilledImageToVideo480pInput + output: SchemaLongcatVideoDistilledImageToVideo480pOutput + } + 'fal-ai/minimax/hailuo-2.3-fast/standard/image-to-video': { + input: SchemaMinimaxHailuo23FastStandardImageToVideoInput + output: SchemaMinimaxHailuo23FastStandardImageToVideoOutput + } + 'fal-ai/minimax/hailuo-2.3/standard/image-to-video': { + input: SchemaMinimaxHailuo23StandardImageToVideoInput + output: SchemaMinimaxHailuo23StandardImageToVideoOutput + } + 'fal-ai/minimax/hailuo-2.3-fast/pro/image-to-video': { + input: SchemaMinimaxHailuo23FastProImageToVideoInput + output: SchemaMinimaxHailuo23FastProImageToVideoOutput + } + 'fal-ai/bytedance/seedance/v1/pro/fast/image-to-video': { + input: SchemaBytedanceSeedanceV1ProFastImageToVideoInput + output: SchemaBytedanceSeedanceV1ProFastImageToVideoOutput + } + 'fal-ai/vidu/q2/image-to-video/turbo': { + input: SchemaViduQ2ImageToVideoTurboInput + output: SchemaViduQ2ImageToVideoTurboOutput + } + 'fal-ai/vidu/q2/image-to-video/pro': { + input: SchemaViduQ2ImageToVideoProInput + output: SchemaViduQ2ImageToVideoProOutput + } + 'fal-ai/kling-video/v2.5-turbo/standard/image-to-video': { + input: SchemaKlingVideoV25TurboStandardImageToVideoInput + output: SchemaKlingVideoV25TurboStandardImageToVideoOutput + } + 'fal-ai/veo3.1/fast/first-last-frame-to-video': { + input: SchemaVeo31FastFirstLastFrameToVideoInput + output: SchemaVeo31FastFirstLastFrameToVideoOutput + } + 'fal-ai/veo3.1/first-last-frame-to-video': { + input: SchemaVeo31FirstLastFrameToVideoInput + output: SchemaVeo31FirstLastFrameToVideoOutput + } + 'fal-ai/veo3.1/reference-to-video': { + input: SchemaVeo31ReferenceToVideoInput + output: SchemaVeo31ReferenceToVideoOutput + } + 'fal-ai/veo3.1/fast/image-to-video': { + input: SchemaVeo31FastImageToVideoInput + output: SchemaVeo31FastImageToVideoOutput + } + 'fal-ai/veo3.1/image-to-video': { + input: SchemaVeo31ImageToVideoInput + output: SchemaVeo31ImageToVideoOutput + } + 'fal-ai/sora-2/image-to-video/pro': { + input: SchemaSora2ImageToVideoProInput + output: SchemaSora2ImageToVideoProOutput + } + 'fal-ai/sora-2/image-to-video': { + input: SchemaSora2ImageToVideoInput + output: SchemaSora2ImageToVideoOutput + } + 'fal-ai/ovi/image-to-video': { + input: SchemaOviImageToVideoInput + output: SchemaOviImageToVideoOutput + } + 'veed/fabric-1.0/fast': { + input: SchemaFabric10FastInput + output: SchemaFabric10FastOutput + } + 'fal-ai/bytedance/omnihuman/v1.5': { + input: SchemaBytedanceOmnihumanV15Input + output: SchemaBytedanceOmnihumanV15Output + } + 'veed/fabric-1.0': { + input: SchemaFabric10Input + output: SchemaFabric10Output + } + 'fal-ai/kling-video/v1/standard/ai-avatar': { + input: SchemaKlingVideoV1StandardAiAvatarInput + output: SchemaKlingVideoV1StandardAiAvatarOutput + } + 'fal-ai/kling-video/v1/pro/ai-avatar': { + input: SchemaKlingVideoV1ProAiAvatarInput + output: SchemaKlingVideoV1ProAiAvatarOutput + } + 'decart/lucy-14b/image-to-video': { + input: SchemaLucy14bImageToVideoInput + output: SchemaLucy14bImageToVideoOutput + } + 'fal-ai/bytedance/seedance/v1/lite/reference-to-video': { + input: SchemaBytedanceSeedanceV1LiteReferenceToVideoInput + output: SchemaBytedanceSeedanceV1LiteReferenceToVideoOutput + } + 'fal-ai/wan-ati': { + input: SchemaWanAtiInput + output: SchemaWanAtiOutput + } + 'fal-ai/decart/lucy-5b/image-to-video': { + input: SchemaDecartLucy5bImageToVideoInput + output: SchemaDecartLucy5bImageToVideoOutput + } + 'fal-ai/pixverse/v5/transition': { + input: SchemaPixverseV5TransitionInput + output: SchemaPixverseV5TransitionOutput + } + 'fal-ai/pixverse/v5/effects': { + input: SchemaPixverseV5EffectsInput + output: SchemaPixverseV5EffectsOutput + } + 'fal-ai/pixverse/v5/image-to-video': { + input: SchemaPixverseV5ImageToVideoInput + output: SchemaPixverseV5ImageToVideoOutput + } + 'moonvalley/marey/i2v': { + input: SchemaMareyI2vInput + output: SchemaMareyI2vOutput + } + 'fal-ai/bytedance/video-stylize': { + input: SchemaBytedanceVideoStylizeInput + output: SchemaBytedanceVideoStylizeOutput + } + 'fal-ai/wan/v2.2-a14b/image-to-video/lora': { + input: SchemaWanV22A14bImageToVideoLoraInput + output: SchemaWanV22A14bImageToVideoLoraOutput + } + 'fal-ai/minimax/hailuo-02-fast/image-to-video': { + input: SchemaMinimaxHailuo02FastImageToVideoInput + output: SchemaMinimaxHailuo02FastImageToVideoOutput + } + 'fal-ai/veo3/image-to-video': { + input: SchemaVeo3ImageToVideoInput + output: SchemaVeo3ImageToVideoOutput + } + 'fal-ai/wan/v2.2-a14b/image-to-video/turbo': { + input: SchemaWanV22A14bImageToVideoTurboInput + output: SchemaWanV22A14bImageToVideoTurboOutput + } + 'fal-ai/wan/v2.2-5b/image-to-video': { + input: SchemaWanV225bImageToVideoInput + output: SchemaWanV225bImageToVideoOutput + } + 'fal-ai/wan/v2.2-a14b/image-to-video': { + input: SchemaWanV22A14bImageToVideoInput + output: SchemaWanV22A14bImageToVideoOutput + } + 'fal-ai/bytedance/omnihuman': { + input: SchemaBytedanceOmnihumanInput + output: SchemaBytedanceOmnihumanOutput + } + 'fal-ai/ltxv-13b-098-distilled/image-to-video': { + input: SchemaLtxv13B098DistilledImageToVideoInput + output: SchemaLtxv13B098DistilledImageToVideoOutput + } + 'fal-ai/veo3/fast/image-to-video': { + input: SchemaVeo3FastImageToVideoInput + output: SchemaVeo3FastImageToVideoOutput + } + 'fal-ai/vidu/q1/reference-to-video': { + input: SchemaViduQ1ReferenceToVideoInput + output: SchemaViduQ1ReferenceToVideoOutput + } + 'fal-ai/ai-avatar/single-text': { + input: SchemaAiAvatarSingleTextInput + output: SchemaAiAvatarSingleTextOutput + } + 'fal-ai/ai-avatar': { + input: SchemaAiAvatarInput + output: SchemaAiAvatarOutput + } + 'fal-ai/ai-avatar/multi-text': { + input: SchemaAiAvatarMultiTextInput + output: SchemaAiAvatarMultiTextOutput + } + 'fal-ai/ai-avatar/multi': { + input: SchemaAiAvatarMultiInput + output: SchemaAiAvatarMultiOutput + } + 'fal-ai/minimax/hailuo-02/pro/image-to-video': { + input: SchemaMinimaxHailuo02ProImageToVideoInput + output: SchemaMinimaxHailuo02ProImageToVideoOutput + } + 'fal-ai/bytedance/seedance/v1/lite/image-to-video': { + input: SchemaBytedanceSeedanceV1LiteImageToVideoInput + output: SchemaBytedanceSeedanceV1LiteImageToVideoOutput + } + 'fal-ai/hunyuan-avatar': { + input: SchemaHunyuanAvatarInput + output: SchemaHunyuanAvatarOutput + } + 'fal-ai/kling-video/v2.1/pro/image-to-video': { + input: SchemaKlingVideoV21ProImageToVideoInput + output: SchemaKlingVideoV21ProImageToVideoOutput + } + 'fal-ai/hunyuan-portrait': { + input: SchemaHunyuanPortraitInput + output: SchemaHunyuanPortraitOutput + } + 'fal-ai/kling-video/v1.6/standard/elements': { + input: SchemaKlingVideoV16StandardElementsInput + output: SchemaKlingVideoV16StandardElementsOutput + } + 'fal-ai/kling-video/v1.6/pro/elements': { + input: SchemaKlingVideoV16ProElementsInput + output: SchemaKlingVideoV16ProElementsOutput + } + 'fal-ai/ltx-video-13b-distilled/image-to-video': { + input: SchemaLtxVideo13bDistilledImageToVideoInput + output: SchemaLtxVideo13bDistilledImageToVideoOutput + } + 'fal-ai/ltx-video-13b-dev/image-to-video': { + input: SchemaLtxVideo13bDevImageToVideoInput + output: SchemaLtxVideo13bDevImageToVideoOutput + } + 'fal-ai/ltx-video-lora/image-to-video': { + input: SchemaLtxVideoLoraImageToVideoInput + output: SchemaLtxVideoLoraImageToVideoOutput + } + 'fal-ai/pixverse/v4.5/transition': { + input: SchemaPixverseV45TransitionInput + output: SchemaPixverseV45TransitionOutput + } + 'fal-ai/pixverse/v4.5/image-to-video/fast': { + input: SchemaPixverseV45ImageToVideoFastInput + output: SchemaPixverseV45ImageToVideoFastOutput + } + 'fal-ai/pixverse/v4.5/effects': { + input: SchemaPixverseV45EffectsInput + output: SchemaPixverseV45EffectsOutput + } + 'fal-ai/hunyuan-custom': { + input: SchemaHunyuanCustomInput + output: SchemaHunyuanCustomOutput + } + 'fal-ai/framepack/f1': { + input: SchemaFramepackF1Input + output: SchemaFramepackF1Output + } + 'fal-ai/vidu/q1/start-end-to-video': { + input: SchemaViduQ1StartEndToVideoInput + output: SchemaViduQ1StartEndToVideoOutput + } + 'fal-ai/vidu/q1/image-to-video': { + input: SchemaViduQ1ImageToVideoInput + output: SchemaViduQ1ImageToVideoOutput + } + 'fal-ai/magi/image-to-video': { + input: SchemaMagiImageToVideoInput + output: SchemaMagiImageToVideoOutput + } + 'fal-ai/pixverse/v4/effects': { + input: SchemaPixverseV4EffectsInput + output: SchemaPixverseV4EffectsOutput + } + 'fal-ai/magi-distilled/image-to-video': { + input: SchemaMagiDistilledImageToVideoInput + output: SchemaMagiDistilledImageToVideoOutput + } + 'fal-ai/framepack/flf2v': { + input: SchemaFramepackFlf2vInput + output: SchemaFramepackFlf2vOutput + } + 'fal-ai/wan-flf2v': { + input: SchemaWanFlf2vInput + output: SchemaWanFlf2vOutput + } + 'fal-ai/framepack': { + input: SchemaFramepackInput + output: SchemaFramepackOutput + } + 'fal-ai/pixverse/v4/image-to-video/fast': { + input: SchemaPixverseV4ImageToVideoFastInput + output: SchemaPixverseV4ImageToVideoFastOutput + } + 'fal-ai/pixverse/v4/image-to-video': { + input: SchemaPixverseV4ImageToVideoInput + output: SchemaPixverseV4ImageToVideoOutput + } + 'fal-ai/pixverse/v3.5/effects': { + input: SchemaPixverseV35EffectsInput + output: SchemaPixverseV35EffectsOutput + } + 'fal-ai/pixverse/v3.5/transition': { + input: SchemaPixverseV35TransitionInput + output: SchemaPixverseV35TransitionOutput + } + 'fal-ai/luma-dream-machine/ray-2-flash/image-to-video': { + input: SchemaLumaDreamMachineRay2FlashImageToVideoInput + output: SchemaLumaDreamMachineRay2FlashImageToVideoOutput + } + 'fal-ai/pika/v1.5/pikaffects': { + input: SchemaPikaV15PikaffectsInput + output: SchemaPikaV15PikaffectsOutput + } + 'fal-ai/pika/v2/turbo/image-to-video': { + input: SchemaPikaV2TurboImageToVideoInput + output: SchemaPikaV2TurboImageToVideoOutput + } + 'fal-ai/pika/v2.2/pikascenes': { + input: SchemaPikaV22PikascenesInput + output: SchemaPikaV22PikascenesOutput + } + 'fal-ai/pika/v2.2/image-to-video': { + input: SchemaPikaV22ImageToVideoInput + output: SchemaPikaV22ImageToVideoOutput + } + 'fal-ai/pika/v2.1/image-to-video': { + input: SchemaPikaV21ImageToVideoInput + output: SchemaPikaV21ImageToVideoOutput + } + 'fal-ai/vidu/image-to-video': { + input: SchemaViduImageToVideoInput + output: SchemaViduImageToVideoOutput + } + 'fal-ai/vidu/start-end-to-video': { + input: SchemaViduStartEndToVideoInput + output: SchemaViduStartEndToVideoOutput + } + 'fal-ai/vidu/reference-to-video': { + input: SchemaViduReferenceToVideoInput + output: SchemaViduReferenceToVideoOutput + } + 'fal-ai/vidu/template-to-video': { + input: SchemaViduTemplateToVideoInput + output: SchemaViduTemplateToVideoOutput + } + 'fal-ai/wan-i2v-lora': { + input: SchemaWanI2vLoraInput + output: SchemaWanI2vLoraOutput + } + 'fal-ai/hunyuan-video-image-to-video': { + input: SchemaHunyuanVideoImageToVideoInput + output: SchemaHunyuanVideoImageToVideoOutput + } + 'fal-ai/minimax/video-01-director/image-to-video': { + input: SchemaMinimaxVideo01DirectorImageToVideoInput + output: SchemaMinimaxVideo01DirectorImageToVideoOutput + } + 'fal-ai/skyreels-i2v': { + input: SchemaSkyreelsI2vInput + output: SchemaSkyreelsI2vOutput + } + 'fal-ai/luma-dream-machine/ray-2/image-to-video': { + input: SchemaLumaDreamMachineRay2ImageToVideoInput + output: SchemaLumaDreamMachineRay2ImageToVideoOutput + } + 'fal-ai/hunyuan-video-img2vid-lora': { + input: SchemaHunyuanVideoImg2VidLoraInput + output: SchemaHunyuanVideoImg2VidLoraOutput + } + 'fal-ai/pixverse/v3.5/image-to-video/fast': { + input: SchemaPixverseV35ImageToVideoFastInput + output: SchemaPixverseV35ImageToVideoFastOutput + } + 'fal-ai/pixverse/v3.5/image-to-video': { + input: SchemaPixverseV35ImageToVideoInput + output: SchemaPixverseV35ImageToVideoOutput + } + 'fal-ai/minimax/video-01-subject-reference': { + input: SchemaMinimaxVideo01SubjectReferenceInput + output: SchemaMinimaxVideo01SubjectReferenceOutput + } + 'fal-ai/kling-video/v1.6/standard/image-to-video': { + input: SchemaKlingVideoV16StandardImageToVideoInput + output: SchemaKlingVideoV16StandardImageToVideoOutput + } + 'fal-ai/sadtalker/reference': { + input: SchemaSadtalkerReferenceInput + output: SchemaSadtalkerReferenceOutput + } + 'fal-ai/minimax/video-01-live/image-to-video': { + input: SchemaMinimaxVideo01LiveImageToVideoInput + output: SchemaMinimaxVideo01LiveImageToVideoOutput + } + 'fal-ai/ltx-video/image-to-video': { + input: SchemaLtxVideoImageToVideoInput + output: SchemaLtxVideoImageToVideoOutput + } + 'fal-ai/cogvideox-5b/image-to-video': { + input: SchemaCogvideox5bImageToVideoInput + output: SchemaCogvideox5bImageToVideoOutput + } + 'fal-ai/kling-video/v1.5/pro/image-to-video': { + input: SchemaKlingVideoV15ProImageToVideoInput + output: SchemaKlingVideoV15ProImageToVideoOutput + } + 'fal-ai/kling-video/v1/standard/image-to-video': { + input: SchemaKlingVideoV1StandardImageToVideoInput + output: SchemaKlingVideoV1StandardImageToVideoOutput + } + 'fal-ai/stable-video': { + input: SchemaStableVideoInput + output: SchemaStableVideoOutput + } + 'fal-ai/amt-interpolation/frame-interpolation': { + input: SchemaAmtInterpolationFrameInterpolationInput + output: SchemaAmtInterpolationFrameInterpolationOutput + } + 'fal-ai/live-portrait': { + input: SchemaLivePortraitInput + output: SchemaLivePortraitOutput + } + 'fal-ai/musetalk': { + input: SchemaMusetalkInput + output: SchemaMusetalkOutput + } + 'fal-ai/sadtalker': { + input: SchemaSadtalkerInput + output: SchemaSadtalkerOutput + } + 'fal-ai/fast-svd-lcm': { + input: SchemaFastSvdLcmInput + output: SchemaFastSvdLcmOutput + } +} + +/** Union type of all image-to-video model endpoint IDs */ +export type ImageToVideoModel = keyof ImageToVideoEndpointMap + +export const ImageToVideoSchemaMap: Record< + ImageToVideoModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['fal-ai/wan-effects']: { + input: zSchemaWanEffectsInput, + output: zSchemaWanEffectsOutput, + }, + ['fal-ai/wan-pro/image-to-video']: { + input: zSchemaWanProImageToVideoInput, + output: zSchemaWanProImageToVideoOutput, + }, + ['fal-ai/veo2/image-to-video']: { + input: zSchemaVeo2ImageToVideoInput, + output: zSchemaVeo2ImageToVideoOutput, + }, + ['fal-ai/kling-video/v1.6/pro/image-to-video']: { + input: zSchemaKlingVideoV16ProImageToVideoInput, + output: zSchemaKlingVideoV16ProImageToVideoOutput, + }, + ['fal-ai/minimax/video-01/image-to-video']: { + input: zSchemaMinimaxVideo01ImageToVideoInput, + output: zSchemaMinimaxVideo01ImageToVideoOutput, + }, + ['fal-ai/minimax/hailuo-2.3/pro/image-to-video']: { + input: zSchemaMinimaxHailuo23ProImageToVideoInput, + output: zSchemaMinimaxHailuo23ProImageToVideoOutput, + }, + ['fal-ai/wan-25-preview/image-to-video']: { + input: zSchemaWan25PreviewImageToVideoInput, + output: zSchemaWan25PreviewImageToVideoOutput, + }, + ['fal-ai/kling-video/v2.5-turbo/pro/image-to-video']: { + input: zSchemaKlingVideoV25TurboProImageToVideoInput, + output: zSchemaKlingVideoV25TurboProImageToVideoOutput, + }, + ['fal-ai/minimax/hailuo-02/standard/image-to-video']: { + input: zSchemaMinimaxHailuo02StandardImageToVideoInput, + output: zSchemaMinimaxHailuo02StandardImageToVideoOutput, + }, + ['fal-ai/bytedance/seedance/v1/pro/image-to-video']: { + input: zSchemaBytedanceSeedanceV1ProImageToVideoInput, + output: zSchemaBytedanceSeedanceV1ProImageToVideoOutput, + }, + ['fal-ai/kling-video/v2.1/master/image-to-video']: { + input: zSchemaKlingVideoV21MasterImageToVideoInput, + output: zSchemaKlingVideoV21MasterImageToVideoOutput, + }, + ['fal-ai/kling-video/v2.1/standard/image-to-video']: { + input: zSchemaKlingVideoV21StandardImageToVideoInput, + output: zSchemaKlingVideoV21StandardImageToVideoOutput, + }, + ['fal-ai/pixverse/v4.5/image-to-video']: { + input: zSchemaPixverseV45ImageToVideoInput, + output: zSchemaPixverseV45ImageToVideoOutput, + }, + ['fal-ai/kling-video/v2/master/image-to-video']: { + input: zSchemaKlingVideoV2MasterImageToVideoInput, + output: zSchemaKlingVideoV2MasterImageToVideoOutput, + }, + ['fal-ai/wan-i2v']: { + input: zSchemaWanI2vInput, + output: zSchemaWanI2vOutput, + }, + ['fal-ai/pixverse/v5.6/transition']: { + input: zSchemaPixverseV56TransitionInput, + output: zSchemaPixverseV56TransitionOutput, + }, + ['fal-ai/pixverse/v5.6/image-to-video']: { + input: zSchemaPixverseV56ImageToVideoInput, + output: zSchemaPixverseV56ImageToVideoOutput, + }, + ['fal-ai/vidu/q2/reference-to-video/pro']: { + input: zSchemaViduQ2ReferenceToVideoProInput, + output: zSchemaViduQ2ReferenceToVideoProOutput, + }, + ['wan/v2.6/image-to-video/flash']: { + input: zSchemaV26ImageToVideoFlashInput, + output: zSchemaV26ImageToVideoFlashOutput, + }, + ['fal-ai/ltx-2-19b/distilled/image-to-video/lora']: { + input: zSchemaLtx219bDistilledImageToVideoLoraInput, + output: zSchemaLtx219bDistilledImageToVideoLoraOutput, + }, + ['fal-ai/ltx-2-19b/distilled/image-to-video']: { + input: zSchemaLtx219bDistilledImageToVideoInput, + output: zSchemaLtx219bDistilledImageToVideoOutput, + }, + ['fal-ai/ltx-2-19b/image-to-video/lora']: { + input: zSchemaLtx219bImageToVideoLoraInput, + output: zSchemaLtx219bImageToVideoLoraOutput, + }, + ['fal-ai/ltx-2-19b/image-to-video']: { + input: zSchemaLtx219bImageToVideoInput, + output: zSchemaLtx219bImageToVideoOutput, + }, + ['fal-ai/wan-move']: { + input: zSchemaWanMoveInput, + output: zSchemaWanMoveOutput, + }, + ['fal-ai/kandinsky5-pro/image-to-video']: { + input: zSchemaKandinsky5ProImageToVideoInput, + output: zSchemaKandinsky5ProImageToVideoOutput, + }, + ['fal-ai/bytedance/seedance/v1.5/pro/image-to-video']: { + input: zSchemaBytedanceSeedanceV15ProImageToVideoInput, + output: zSchemaBytedanceSeedanceV15ProImageToVideoOutput, + }, + ['fal-ai/live-avatar']: { + input: zSchemaLiveAvatarInput, + output: zSchemaLiveAvatarOutput, + }, + ['fal-ai/hunyuan-video-v1.5/image-to-video']: { + input: zSchemaHunyuanVideoV15ImageToVideoInput, + output: zSchemaHunyuanVideoV15ImageToVideoOutput, + }, + ['wan/v2.6/image-to-video']: { + input: zSchemaV26ImageToVideoInput, + output: zSchemaV26ImageToVideoOutput, + }, + ['fal-ai/kling-video/o1/standard/reference-to-video']: { + input: zSchemaKlingVideoO1StandardReferenceToVideoInput, + output: zSchemaKlingVideoO1StandardReferenceToVideoOutput, + }, + ['fal-ai/kling-video/o1/standard/image-to-video']: { + input: zSchemaKlingVideoO1StandardImageToVideoInput, + output: zSchemaKlingVideoO1StandardImageToVideoOutput, + }, + ['fal-ai/creatify/aurora']: { + input: zSchemaCreatifyAuroraInput, + output: zSchemaCreatifyAuroraOutput, + }, + ['fal-ai/kling-video/ai-avatar/v2/pro']: { + input: zSchemaKlingVideoAiAvatarV2ProInput, + output: zSchemaKlingVideoAiAvatarV2ProOutput, + }, + ['fal-ai/kling-video/ai-avatar/v2/standard']: { + input: zSchemaKlingVideoAiAvatarV2StandardInput, + output: zSchemaKlingVideoAiAvatarV2StandardOutput, + }, + ['fal-ai/kling-video/v2.6/pro/image-to-video']: { + input: zSchemaKlingVideoV26ProImageToVideoInput, + output: zSchemaKlingVideoV26ProImageToVideoOutput, + }, + ['fal-ai/pixverse/v5.5/effects']: { + input: zSchemaPixverseV55EffectsInput, + output: zSchemaPixverseV55EffectsOutput, + }, + ['fal-ai/pixverse/v5.5/transition']: { + input: zSchemaPixverseV55TransitionInput, + output: zSchemaPixverseV55TransitionOutput, + }, + ['fal-ai/pixverse/v5.5/image-to-video']: { + input: zSchemaPixverseV55ImageToVideoInput, + output: zSchemaPixverseV55ImageToVideoOutput, + }, + ['fal-ai/kling-video/o1/image-to-video']: { + input: zSchemaKlingVideoO1ImageToVideoInput, + output: zSchemaKlingVideoO1ImageToVideoOutput, + }, + ['fal-ai/kling-video/o1/reference-to-video']: { + input: zSchemaKlingVideoO1ReferenceToVideoInput, + output: zSchemaKlingVideoO1ReferenceToVideoOutput, + }, + ['fal-ai/ltx-2/image-to-video/fast']: { + input: zSchemaLtx2ImageToVideoFastInput, + output: zSchemaLtx2ImageToVideoFastOutput, + }, + ['fal-ai/ltx-2/image-to-video']: { + input: zSchemaLtx2ImageToVideoInput, + output: zSchemaLtx2ImageToVideoOutput, + }, + ['bytedance/lynx']: { + input: zSchemaLynxInput, + output: zSchemaLynxOutput, + }, + ['fal-ai/pixverse/swap']: { + input: zSchemaPixverseSwapInput, + output: zSchemaPixverseSwapOutput, + }, + ['fal-ai/pika/v2.2/pikaframes']: { + input: zSchemaPikaV22PikaframesInput, + output: zSchemaPikaV22PikaframesOutput, + }, + ['fal-ai/longcat-video/image-to-video/720p']: { + input: zSchemaLongcatVideoImageToVideo720pInput, + output: zSchemaLongcatVideoImageToVideo720pOutput, + }, + ['fal-ai/longcat-video/image-to-video/480p']: { + input: zSchemaLongcatVideoImageToVideo480pInput, + output: zSchemaLongcatVideoImageToVideo480pOutput, + }, + ['fal-ai/longcat-video/distilled/image-to-video/720p']: { + input: zSchemaLongcatVideoDistilledImageToVideo720pInput, + output: zSchemaLongcatVideoDistilledImageToVideo720pOutput, + }, + ['fal-ai/longcat-video/distilled/image-to-video/480p']: { + input: zSchemaLongcatVideoDistilledImageToVideo480pInput, + output: zSchemaLongcatVideoDistilledImageToVideo480pOutput, + }, + ['fal-ai/minimax/hailuo-2.3-fast/standard/image-to-video']: { + input: zSchemaMinimaxHailuo23FastStandardImageToVideoInput, + output: zSchemaMinimaxHailuo23FastStandardImageToVideoOutput, + }, + ['fal-ai/minimax/hailuo-2.3/standard/image-to-video']: { + input: zSchemaMinimaxHailuo23StandardImageToVideoInput, + output: zSchemaMinimaxHailuo23StandardImageToVideoOutput, + }, + ['fal-ai/minimax/hailuo-2.3-fast/pro/image-to-video']: { + input: zSchemaMinimaxHailuo23FastProImageToVideoInput, + output: zSchemaMinimaxHailuo23FastProImageToVideoOutput, + }, + ['fal-ai/bytedance/seedance/v1/pro/fast/image-to-video']: { + input: zSchemaBytedanceSeedanceV1ProFastImageToVideoInput, + output: zSchemaBytedanceSeedanceV1ProFastImageToVideoOutput, + }, + ['fal-ai/vidu/q2/image-to-video/turbo']: { + input: zSchemaViduQ2ImageToVideoTurboInput, + output: zSchemaViduQ2ImageToVideoTurboOutput, + }, + ['fal-ai/vidu/q2/image-to-video/pro']: { + input: zSchemaViduQ2ImageToVideoProInput, + output: zSchemaViduQ2ImageToVideoProOutput, + }, + ['fal-ai/kling-video/v2.5-turbo/standard/image-to-video']: { + input: zSchemaKlingVideoV25TurboStandardImageToVideoInput, + output: zSchemaKlingVideoV25TurboStandardImageToVideoOutput, + }, + ['fal-ai/veo3.1/fast/first-last-frame-to-video']: { + input: zSchemaVeo31FastFirstLastFrameToVideoInput, + output: zSchemaVeo31FastFirstLastFrameToVideoOutput, + }, + ['fal-ai/veo3.1/first-last-frame-to-video']: { + input: zSchemaVeo31FirstLastFrameToVideoInput, + output: zSchemaVeo31FirstLastFrameToVideoOutput, + }, + ['fal-ai/veo3.1/reference-to-video']: { + input: zSchemaVeo31ReferenceToVideoInput, + output: zSchemaVeo31ReferenceToVideoOutput, + }, + ['fal-ai/veo3.1/fast/image-to-video']: { + input: zSchemaVeo31FastImageToVideoInput, + output: zSchemaVeo31FastImageToVideoOutput, + }, + ['fal-ai/veo3.1/image-to-video']: { + input: zSchemaVeo31ImageToVideoInput, + output: zSchemaVeo31ImageToVideoOutput, + }, + ['fal-ai/sora-2/image-to-video/pro']: { + input: zSchemaSora2ImageToVideoProInput, + output: zSchemaSora2ImageToVideoProOutput, + }, + ['fal-ai/sora-2/image-to-video']: { + input: zSchemaSora2ImageToVideoInput, + output: zSchemaSora2ImageToVideoOutput, + }, + ['fal-ai/ovi/image-to-video']: { + input: zSchemaOviImageToVideoInput, + output: zSchemaOviImageToVideoOutput, + }, + ['veed/fabric-1.0/fast']: { + input: zSchemaFabric10FastInput, + output: zSchemaFabric10FastOutput, + }, + ['fal-ai/bytedance/omnihuman/v1.5']: { + input: zSchemaBytedanceOmnihumanV15Input, + output: zSchemaBytedanceOmnihumanV15Output, + }, + ['veed/fabric-1.0']: { + input: zSchemaFabric10Input, + output: zSchemaFabric10Output, + }, + ['fal-ai/kling-video/v1/standard/ai-avatar']: { + input: zSchemaKlingVideoV1StandardAiAvatarInput, + output: zSchemaKlingVideoV1StandardAiAvatarOutput, + }, + ['fal-ai/kling-video/v1/pro/ai-avatar']: { + input: zSchemaKlingVideoV1ProAiAvatarInput, + output: zSchemaKlingVideoV1ProAiAvatarOutput, + }, + ['decart/lucy-14b/image-to-video']: { + input: zSchemaLucy14bImageToVideoInput, + output: zSchemaLucy14bImageToVideoOutput, + }, + ['fal-ai/bytedance/seedance/v1/lite/reference-to-video']: { + input: zSchemaBytedanceSeedanceV1LiteReferenceToVideoInput, + output: zSchemaBytedanceSeedanceV1LiteReferenceToVideoOutput, + }, + ['fal-ai/wan-ati']: { + input: zSchemaWanAtiInput, + output: zSchemaWanAtiOutput, + }, + ['fal-ai/decart/lucy-5b/image-to-video']: { + input: zSchemaDecartLucy5bImageToVideoInput, + output: zSchemaDecartLucy5bImageToVideoOutput, + }, + ['fal-ai/pixverse/v5/transition']: { + input: zSchemaPixverseV5TransitionInput, + output: zSchemaPixverseV5TransitionOutput, + }, + ['fal-ai/pixverse/v5/effects']: { + input: zSchemaPixverseV5EffectsInput, + output: zSchemaPixverseV5EffectsOutput, + }, + ['fal-ai/pixverse/v5/image-to-video']: { + input: zSchemaPixverseV5ImageToVideoInput, + output: zSchemaPixverseV5ImageToVideoOutput, + }, + ['moonvalley/marey/i2v']: { + input: zSchemaMareyI2vInput, + output: zSchemaMareyI2vOutput, + }, + ['fal-ai/bytedance/video-stylize']: { + input: zSchemaBytedanceVideoStylizeInput, + output: zSchemaBytedanceVideoStylizeOutput, + }, + ['fal-ai/wan/v2.2-a14b/image-to-video/lora']: { + input: zSchemaWanV22A14bImageToVideoLoraInput, + output: zSchemaWanV22A14bImageToVideoLoraOutput, + }, + ['fal-ai/minimax/hailuo-02-fast/image-to-video']: { + input: zSchemaMinimaxHailuo02FastImageToVideoInput, + output: zSchemaMinimaxHailuo02FastImageToVideoOutput, + }, + ['fal-ai/veo3/image-to-video']: { + input: zSchemaVeo3ImageToVideoInput, + output: zSchemaVeo3ImageToVideoOutput, + }, + ['fal-ai/wan/v2.2-a14b/image-to-video/turbo']: { + input: zSchemaWanV22A14bImageToVideoTurboInput, + output: zSchemaWanV22A14bImageToVideoTurboOutput, + }, + ['fal-ai/wan/v2.2-5b/image-to-video']: { + input: zSchemaWanV225bImageToVideoInput, + output: zSchemaWanV225bImageToVideoOutput, + }, + ['fal-ai/wan/v2.2-a14b/image-to-video']: { + input: zSchemaWanV22A14bImageToVideoInput, + output: zSchemaWanV22A14bImageToVideoOutput, + }, + ['fal-ai/bytedance/omnihuman']: { + input: zSchemaBytedanceOmnihumanInput, + output: zSchemaBytedanceOmnihumanOutput, + }, + ['fal-ai/ltxv-13b-098-distilled/image-to-video']: { + input: zSchemaLtxv13B098DistilledImageToVideoInput, + output: zSchemaLtxv13B098DistilledImageToVideoOutput, + }, + ['fal-ai/veo3/fast/image-to-video']: { + input: zSchemaVeo3FastImageToVideoInput, + output: zSchemaVeo3FastImageToVideoOutput, + }, + ['fal-ai/vidu/q1/reference-to-video']: { + input: zSchemaViduQ1ReferenceToVideoInput, + output: zSchemaViduQ1ReferenceToVideoOutput, + }, + ['fal-ai/ai-avatar/single-text']: { + input: zSchemaAiAvatarSingleTextInput, + output: zSchemaAiAvatarSingleTextOutput, + }, + ['fal-ai/ai-avatar']: { + input: zSchemaAiAvatarInput, + output: zSchemaAiAvatarOutput, + }, + ['fal-ai/ai-avatar/multi-text']: { + input: zSchemaAiAvatarMultiTextInput, + output: zSchemaAiAvatarMultiTextOutput, + }, + ['fal-ai/ai-avatar/multi']: { + input: zSchemaAiAvatarMultiInput, + output: zSchemaAiAvatarMultiOutput, + }, + ['fal-ai/minimax/hailuo-02/pro/image-to-video']: { + input: zSchemaMinimaxHailuo02ProImageToVideoInput, + output: zSchemaMinimaxHailuo02ProImageToVideoOutput, + }, + ['fal-ai/bytedance/seedance/v1/lite/image-to-video']: { + input: zSchemaBytedanceSeedanceV1LiteImageToVideoInput, + output: zSchemaBytedanceSeedanceV1LiteImageToVideoOutput, + }, + ['fal-ai/hunyuan-avatar']: { + input: zSchemaHunyuanAvatarInput, + output: zSchemaHunyuanAvatarOutput, + }, + ['fal-ai/kling-video/v2.1/pro/image-to-video']: { + input: zSchemaKlingVideoV21ProImageToVideoInput, + output: zSchemaKlingVideoV21ProImageToVideoOutput, + }, + ['fal-ai/hunyuan-portrait']: { + input: zSchemaHunyuanPortraitInput, + output: zSchemaHunyuanPortraitOutput, + }, + ['fal-ai/kling-video/v1.6/standard/elements']: { + input: zSchemaKlingVideoV16StandardElementsInput, + output: zSchemaKlingVideoV16StandardElementsOutput, + }, + ['fal-ai/kling-video/v1.6/pro/elements']: { + input: zSchemaKlingVideoV16ProElementsInput, + output: zSchemaKlingVideoV16ProElementsOutput, + }, + ['fal-ai/ltx-video-13b-distilled/image-to-video']: { + input: zSchemaLtxVideo13bDistilledImageToVideoInput, + output: zSchemaLtxVideo13bDistilledImageToVideoOutput, + }, + ['fal-ai/ltx-video-13b-dev/image-to-video']: { + input: zSchemaLtxVideo13bDevImageToVideoInput, + output: zSchemaLtxVideo13bDevImageToVideoOutput, + }, + ['fal-ai/ltx-video-lora/image-to-video']: { + input: zSchemaLtxVideoLoraImageToVideoInput, + output: zSchemaLtxVideoLoraImageToVideoOutput, + }, + ['fal-ai/pixverse/v4.5/transition']: { + input: zSchemaPixverseV45TransitionInput, + output: zSchemaPixverseV45TransitionOutput, + }, + ['fal-ai/pixverse/v4.5/image-to-video/fast']: { + input: zSchemaPixverseV45ImageToVideoFastInput, + output: zSchemaPixverseV45ImageToVideoFastOutput, + }, + ['fal-ai/pixverse/v4.5/effects']: { + input: zSchemaPixverseV45EffectsInput, + output: zSchemaPixverseV45EffectsOutput, + }, + ['fal-ai/hunyuan-custom']: { + input: zSchemaHunyuanCustomInput, + output: zSchemaHunyuanCustomOutput, + }, + ['fal-ai/framepack/f1']: { + input: zSchemaFramepackF1Input, + output: zSchemaFramepackF1Output, + }, + ['fal-ai/vidu/q1/start-end-to-video']: { + input: zSchemaViduQ1StartEndToVideoInput, + output: zSchemaViduQ1StartEndToVideoOutput, + }, + ['fal-ai/vidu/q1/image-to-video']: { + input: zSchemaViduQ1ImageToVideoInput, + output: zSchemaViduQ1ImageToVideoOutput, + }, + ['fal-ai/magi/image-to-video']: { + input: zSchemaMagiImageToVideoInput, + output: zSchemaMagiImageToVideoOutput, + }, + ['fal-ai/pixverse/v4/effects']: { + input: zSchemaPixverseV4EffectsInput, + output: zSchemaPixverseV4EffectsOutput, + }, + ['fal-ai/magi-distilled/image-to-video']: { + input: zSchemaMagiDistilledImageToVideoInput, + output: zSchemaMagiDistilledImageToVideoOutput, + }, + ['fal-ai/framepack/flf2v']: { + input: zSchemaFramepackFlf2vInput, + output: zSchemaFramepackFlf2vOutput, + }, + ['fal-ai/wan-flf2v']: { + input: zSchemaWanFlf2vInput, + output: zSchemaWanFlf2vOutput, + }, + ['fal-ai/framepack']: { + input: zSchemaFramepackInput, + output: zSchemaFramepackOutput, + }, + ['fal-ai/pixverse/v4/image-to-video/fast']: { + input: zSchemaPixverseV4ImageToVideoFastInput, + output: zSchemaPixverseV4ImageToVideoFastOutput, + }, + ['fal-ai/pixverse/v4/image-to-video']: { + input: zSchemaPixverseV4ImageToVideoInput, + output: zSchemaPixverseV4ImageToVideoOutput, + }, + ['fal-ai/pixverse/v3.5/effects']: { + input: zSchemaPixverseV35EffectsInput, + output: zSchemaPixverseV35EffectsOutput, + }, + ['fal-ai/pixverse/v3.5/transition']: { + input: zSchemaPixverseV35TransitionInput, + output: zSchemaPixverseV35TransitionOutput, + }, + ['fal-ai/luma-dream-machine/ray-2-flash/image-to-video']: { + input: zSchemaLumaDreamMachineRay2FlashImageToVideoInput, + output: zSchemaLumaDreamMachineRay2FlashImageToVideoOutput, + }, + ['fal-ai/pika/v1.5/pikaffects']: { + input: zSchemaPikaV15PikaffectsInput, + output: zSchemaPikaV15PikaffectsOutput, + }, + ['fal-ai/pika/v2/turbo/image-to-video']: { + input: zSchemaPikaV2TurboImageToVideoInput, + output: zSchemaPikaV2TurboImageToVideoOutput, + }, + ['fal-ai/pika/v2.2/pikascenes']: { + input: zSchemaPikaV22PikascenesInput, + output: zSchemaPikaV22PikascenesOutput, + }, + ['fal-ai/pika/v2.2/image-to-video']: { + input: zSchemaPikaV22ImageToVideoInput, + output: zSchemaPikaV22ImageToVideoOutput, + }, + ['fal-ai/pika/v2.1/image-to-video']: { + input: zSchemaPikaV21ImageToVideoInput, + output: zSchemaPikaV21ImageToVideoOutput, + }, + ['fal-ai/vidu/image-to-video']: { + input: zSchemaViduImageToVideoInput, + output: zSchemaViduImageToVideoOutput, + }, + ['fal-ai/vidu/start-end-to-video']: { + input: zSchemaViduStartEndToVideoInput, + output: zSchemaViduStartEndToVideoOutput, + }, + ['fal-ai/vidu/reference-to-video']: { + input: zSchemaViduReferenceToVideoInput, + output: zSchemaViduReferenceToVideoOutput, + }, + ['fal-ai/vidu/template-to-video']: { + input: zSchemaViduTemplateToVideoInput, + output: zSchemaViduTemplateToVideoOutput, + }, + ['fal-ai/wan-i2v-lora']: { + input: zSchemaWanI2vLoraInput, + output: zSchemaWanI2vLoraOutput, + }, + ['fal-ai/hunyuan-video-image-to-video']: { + input: zSchemaHunyuanVideoImageToVideoInput, + output: zSchemaHunyuanVideoImageToVideoOutput, + }, + ['fal-ai/minimax/video-01-director/image-to-video']: { + input: zSchemaMinimaxVideo01DirectorImageToVideoInput, + output: zSchemaMinimaxVideo01DirectorImageToVideoOutput, + }, + ['fal-ai/skyreels-i2v']: { + input: zSchemaSkyreelsI2vInput, + output: zSchemaSkyreelsI2vOutput, + }, + ['fal-ai/luma-dream-machine/ray-2/image-to-video']: { + input: zSchemaLumaDreamMachineRay2ImageToVideoInput, + output: zSchemaLumaDreamMachineRay2ImageToVideoOutput, + }, + ['fal-ai/hunyuan-video-img2vid-lora']: { + input: zSchemaHunyuanVideoImg2VidLoraInput, + output: zSchemaHunyuanVideoImg2VidLoraOutput, + }, + ['fal-ai/pixverse/v3.5/image-to-video/fast']: { + input: zSchemaPixverseV35ImageToVideoFastInput, + output: zSchemaPixverseV35ImageToVideoFastOutput, + }, + ['fal-ai/pixverse/v3.5/image-to-video']: { + input: zSchemaPixverseV35ImageToVideoInput, + output: zSchemaPixverseV35ImageToVideoOutput, + }, + ['fal-ai/minimax/video-01-subject-reference']: { + input: zSchemaMinimaxVideo01SubjectReferenceInput, + output: zSchemaMinimaxVideo01SubjectReferenceOutput, + }, + ['fal-ai/kling-video/v1.6/standard/image-to-video']: { + input: zSchemaKlingVideoV16StandardImageToVideoInput, + output: zSchemaKlingVideoV16StandardImageToVideoOutput, + }, + ['fal-ai/sadtalker/reference']: { + input: zSchemaSadtalkerReferenceInput, + output: zSchemaSadtalkerReferenceOutput, + }, + ['fal-ai/minimax/video-01-live/image-to-video']: { + input: zSchemaMinimaxVideo01LiveImageToVideoInput, + output: zSchemaMinimaxVideo01LiveImageToVideoOutput, + }, + ['fal-ai/ltx-video/image-to-video']: { + input: zSchemaLtxVideoImageToVideoInput, + output: zSchemaLtxVideoImageToVideoOutput, + }, + ['fal-ai/cogvideox-5b/image-to-video']: { + input: zSchemaCogvideox5bImageToVideoInput, + output: zSchemaCogvideox5bImageToVideoOutput, + }, + ['fal-ai/kling-video/v1.5/pro/image-to-video']: { + input: zSchemaKlingVideoV15ProImageToVideoInput, + output: zSchemaKlingVideoV15ProImageToVideoOutput, + }, + ['fal-ai/kling-video/v1/standard/image-to-video']: { + input: zSchemaKlingVideoV1StandardImageToVideoInput, + output: zSchemaKlingVideoV1StandardImageToVideoOutput, + }, + ['fal-ai/stable-video']: { + input: zSchemaStableVideoInput, + output: zSchemaStableVideoOutput, + }, + ['fal-ai/amt-interpolation/frame-interpolation']: { + input: zSchemaAmtInterpolationFrameInterpolationInput, + output: zSchemaAmtInterpolationFrameInterpolationOutput, + }, + ['fal-ai/live-portrait']: { + input: zSchemaLivePortraitInput, + output: zSchemaLivePortraitOutput, + }, + ['fal-ai/musetalk']: { + input: zSchemaMusetalkInput, + output: zSchemaMusetalkOutput, + }, + ['fal-ai/sadtalker']: { + input: zSchemaSadtalkerInput, + output: zSchemaSadtalkerOutput, + }, + ['fal-ai/fast-svd-lcm']: { + input: zSchemaFastSvdLcmInput, + output: zSchemaFastSvdLcmOutput, + }, +} as const + +/** Get the input type for a specific image-to-video model */ +export type ImageToVideoModelInput = + ImageToVideoEndpointMap[T]['input'] + +/** Get the output type for a specific image-to-video model */ +export type ImageToVideoModelOutput = + ImageToVideoEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/image-to-video/types.gen.ts b/packages/typescript/ai-fal/src/generated/image-to-video/types.gen.ts new file mode 100644 index 00000000..4656ccf3 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/image-to-video/types.gen.ts @@ -0,0 +1,26143 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * FastSVDOutput + */ +export type SchemaFastSvdLcmOutput = { + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + * + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * File + */ +export type SchemaFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * FastSVDImageInput + */ +export type SchemaFastSvdLcmInput = { + /** + * Motion Bucket Id + * + * + * The motion bucket id determines the motion of the generated video. The + * higher the number, the more motion there will be. + * + */ + motion_bucket_id?: number + /** + * Fps + * + * + * The FPS of the generated video. The higher the number, the faster the video will + * play. Total video length is 25 frames. + * + */ + fps?: number + /** + * Steps + * + * + * The number of steps to run the model for. The higher the number the better + * the quality and longer it will take to generate. + * + */ + steps?: number + /** + * Cond Aug + * + * + * The conditoning augmentation determines the amount of noise that will be + * added to the conditioning frame. The higher the number, the more noise + * there will be, and the less the video will look like the initial image. + * Increase it for more motion. + * + */ + cond_aug?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Image Url + * + * The URL of the image to use as a starting point for the generation. + */ + image_url: string +} + +/** + * SadTalkerOutput + */ +export type SchemaSadtalkerOutput = { + /** + * Video + * + * URL of the generated video + */ + video: SchemaFile +} + +/** + * SadTalkerInput + */ +export type SchemaSadtalkerInput = { + /** + * Pose Style + * + * The style of the pose + */ + pose_style?: number + /** + * Source Image Url + * + * URL of the source image + */ + source_image_url: string + /** + * Driven Audio Url + * + * URL of the driven audio + */ + driven_audio_url: string + /** + * Face Enhancer + * + * The type of face enhancer to use + */ + face_enhancer?: 'gfpgan' + /** + * Expression Scale + * + * The scale of the expression + */ + expression_scale?: number + /** + * Face Model Resolution + * + * The resolution of the face model + */ + face_model_resolution?: '256' | '512' + /** + * Still Mode + * + * Whether to use still mode. Fewer head motion, works with preprocess `full`. + */ + still_mode?: boolean + /** + * Preprocess + * + * The type of preprocessing to use + */ + preprocess?: 'crop' | 'extcrop' | 'resize' | 'full' | 'extfull' +} + +/** + * MuseTalkOutput + */ +export type SchemaMusetalkOutput = { + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * MuseTalkInput + */ +export type SchemaMusetalkInput = { + /** + * Source Video Url + * + * URL of the source video + */ + source_video_url: string + /** + * Audio Url + * + * URL of the audio + */ + audio_url: string +} + +/** + * LivePortraitOutput + */ +export type SchemaLivePortraitOutput = { + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * LivePortraitInput + */ +export type SchemaLivePortraitInput = { + /** + * Smile + * + * Amount to smile + */ + smile?: number + /** + * Video Url + * + * URL of the video to drive the lip syncing. + */ + video_url: string + /** + * Eyebrow + * + * Amount to raise or lower eyebrows + */ + eyebrow?: number + /** + * Flag Stitching + * + * Whether to enable stitching. Recommended to set to True. + */ + flag_stitching?: boolean + /** + * Wink + * + * Amount to wink + */ + wink?: number + /** + * Rotate Pitch + * + * Amount to rotate the face in pitch + */ + rotate_pitch?: number + /** + * Blink + * + * Amount to blink the eyes + */ + blink?: number + /** + * Scale + * + * Scaling factor for the face crop. + */ + scale?: number + /** + * Eee + * + * Amount to shape mouth in 'eee' position + */ + eee?: number + /** + * Flag Pasteback + * + * Whether to paste-back/stitch the animated face cropping from the face-cropping space to the original image space. + */ + flag_pasteback?: boolean + /** + * Pupil Y + * + * Amount to move pupils vertically + */ + pupil_y?: number + /** + * Rotate Yaw + * + * Amount to rotate the face in yaw + */ + rotate_yaw?: number + /** + * Flag Do Rot + * + * Whether to conduct the rotation when flag_do_crop is True. + */ + flag_do_rot?: boolean + /** + * Woo + * + * Amount to shape mouth in 'woo' position + */ + woo?: number + /** + * Aaa + * + * Amount to open mouth in 'aaa' shape + */ + aaa?: number + /** + * Image Url + * + * URL of the image to be animated + */ + image_url: string + /** + * Flag Relative + * + * Whether to use relative motion. + */ + flag_relative?: boolean + /** + * Flag Eye Retargeting + * + * Whether to enable eye retargeting. + */ + flag_eye_retargeting?: boolean + /** + * Flag Lip Zero + * + * Whether to set the lip to closed state before animation. Only takes effect when flag_eye_retargeting and flag_lip_retargeting are False. + */ + flag_lip_zero?: boolean + /** + * Batch Size + * + * Batch size for the model. The larger the batch size, the faster the model will run, but the more memory it will consume. + */ + batch_size?: number + /** + * Rotate Roll + * + * Amount to rotate the face in roll + */ + rotate_roll?: number + /** + * Pupil X + * + * Amount to move pupils horizontally + */ + pupil_x?: number + /** + * Vy Ratio + * + * Vertical offset ratio for face crop. Positive values move up, negative values move down. + */ + vy_ratio?: number + /** + * Dsize + * + * Size of the output image. + */ + dsize?: number + /** + * Enable Safety Checker + * + * + * Whether to enable the safety checker. If enabled, the model will check if the input image contains a face before processing it. + * The safety checker will process the input image + * + */ + enable_safety_checker?: boolean + /** + * Vx Ratio + * + * Horizontal offset ratio for face crop. + */ + vx_ratio?: number + /** + * Flag Lip Retargeting + * + * Whether to enable lip retargeting. + */ + flag_lip_retargeting?: boolean + /** + * Flag Do Crop + * + * Whether to crop the source portrait to the face-cropping space. + */ + flag_do_crop?: boolean +} + +/** + * Frame + */ +export type SchemaFrame = { + /** + * URL + * + * URL of the frame + */ + url: string +} + +/** + * AMTInterpolationOutput + */ +export type SchemaAmtInterpolationFrameInterpolationOutput = { + /** + * Video + * + * Generated video + */ + video: SchemaFile +} + +/** + * AMTFrameInterpolationInput + */ +export type SchemaAmtInterpolationFrameInterpolationInput = { + /** + * Frames + * + * Frames to interpolate + */ + frames: Array + /** + * Recursive Interpolation Passes + * + * Number of recursive interpolation passes + */ + recursive_interpolation_passes?: number + /** + * Output FPS + * + * Output frames per second + */ + output_fps?: number +} + +/** + * VideoOutput + */ +export type SchemaStableVideoOutput = { + /** + * Seed + * + * Seed for random number generator + */ + seed: number + /** + * Video + * + * Generated video + */ + video: SchemaFile +} + +/** + * ImageInput + */ +export type SchemaStableVideoInput = { + /** + * Motion Bucket Id + * + * + * The motion bucket id determines the motion of the generated video. The + * higher the number, the more motion there will be. + * + */ + motion_bucket_id?: number + /** + * Fps + * + * The frames per second of the generated video. + */ + fps?: number + /** + * Cond Aug + * + * + * The conditoning augmentation determines the amount of noise that will be + * added to the conditioning frame. The higher the number, the more noise + * there will be, and the less the video will look like the initial image. + * Increase it for more motion. + * + */ + cond_aug?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Image Url + * + * The URL of the image to use as a starting point for the generation. + */ + image_url: string +} + +/** + * KlingV1I2VOutput + */ +export type SchemaKlingVideoV1StandardImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * V1ImageToVideoRequest + */ +export type SchemaKlingVideoV1StandardImageToVideoInput = { + /** + * Prompt + * + * The prompt for the video + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Negative Prompt + */ + negative_prompt?: string + /** + * Image Url + * + * URL of the image to be used for the video + */ + image_url: string + /** + * Static Mask Url + * + * URL of the image for Static Brush Application Area (Mask image created by users using the motion brush) + */ + static_mask_url?: string + /** + * Dynamic Masks + * + * List of dynamic masks + */ + dynamic_masks?: Array + /** + * Tail Image Url + * + * URL of the image to be used for the end of the video + */ + tail_image_url?: string + /** + * Cfg Scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt. + * + */ + cfg_scale?: number +} + +/** + * Trajectory + */ +export type SchemaTrajectory = { + /** + * Y + * + * Y coordinate of the motion trajectory + */ + y: number + /** + * X + * + * X coordinate of the motion trajectory + */ + x: number +} + +/** + * DynamicMask + */ +export type SchemaDynamicMask = { + /** + * Trajectories + * + * List of trajectories + */ + trajectories?: Array + /** + * Mask Url + * + * URL of the image for Dynamic Brush Application Area (Mask image created by users using the motion brush) + */ + mask_url: string +} + +/** + * I2VOutput + */ +export type SchemaKlingVideoV15ProImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * KlingV15ProImageToVideoRequest + */ +export type SchemaKlingVideoV15ProImageToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video frame + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Negative Prompt + */ + negative_prompt?: string + /** + * Image Url + */ + image_url: string + /** + * Static Mask Url + * + * URL of the image for Static Brush Application Area (Mask image created by users using the motion brush) + */ + static_mask_url?: string + /** + * Dynamic Masks + * + * List of dynamic masks + */ + dynamic_masks?: Array + /** + * Tail Image Url + * + * URL of the image to be used for the end of the video + */ + tail_image_url?: string + /** + * Cfg Scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt. + * + */ + cfg_scale?: number +} + +/** + * Output + */ +export type SchemaCogvideox5bImageToVideoOutput = { + /** + * Prompt + * + * The prompt used for generating the video. + */ + prompt: string + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Seed + * + * + * Seed of the generated video. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Video + * + * The URL to the generated video + */ + video: SchemaFile +} + +/** + * ImageToVideoInput + */ +export type SchemaCogvideox5bImageToVideoInput = { + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Use Rife + * + * Use RIFE for video interpolation + */ + use_rife?: boolean + /** + * Image URL + * + * The URL to the image to generate the video from. + */ + image_url: string + /** + * Loras + * + * + * The LoRAs to use for the image generation. We currently support one lora. + * + */ + loras?: Array + /** + * Video Size + * + * The size of the generated video. + */ + video_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related video to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Export Fps + * + * The target FPS of the video + */ + export_fps?: number + /** + * Negative Prompt + * + * The negative prompt to generate video from + */ + negative_prompt?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number +} + +/** + * ImageSize + */ +export type SchemaImageSize = { + /** + * Height + * + * The height of the generated image. + */ + height?: number + /** + * Width + * + * The width of the generated image. + */ + width?: number +} + +/** + * LoraWeight + */ +export type SchemaLoraWeight = { + /** + * Path + * + * URL or the path to the LoRA weights. + */ + path: string + /** + * Scale + * + * + * The scale of the LoRA weight. This is used to scale the LoRA weight + * before merging it with the base model. + * + */ + scale?: number +} + +/** + * Output + */ +export type SchemaLtxVideoImageToVideoOutput = { + /** + * Seed + * + * The seed used for random number generation. + */ + seed: number + /** + * Video + * + * The generated video. + */ + video: SchemaFile +} + +/** + * ImageToVideoInput + */ +export type SchemaLtxVideoImageToVideoInput = { + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Guidance Scale + * + * The guidance scale to use. + */ + guidance_scale?: number + /** + * Seed + * + * The seed to use for random number generation. + */ + seed?: number + /** + * Number of Inference Steps + * + * The number of inference steps to take. + */ + num_inference_steps?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt?: string + /** + * Image URL + * + * The URL of the image to generate the video from. + */ + image_url: string +} + +/** + * I2VLiveOutput + */ +export type SchemaMinimaxVideo01LiveImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ImageToVideoRequest + */ +export type SchemaMinimaxVideo01LiveImageToVideoInput = { + /** + * Prompt Optimizer + * + * Whether to use the model's prompt optimizer + */ + prompt_optimizer?: boolean + /** + * Prompt + */ + prompt: string + /** + * Image Url + * + * URL of the image to use as the first frame + */ + image_url: string +} + +/** + * SadTalkerOutput + */ +export type SchemaSadtalkerReferenceOutput = { + /** + * Video + * + * URL of the generated video + */ + video: SchemaFile +} + +/** + * SadTalkerRefVideoInput + */ +export type SchemaSadtalkerReferenceInput = { + /** + * Pose Style + * + * The style of the pose + */ + pose_style?: number + /** + * Source Image Url + * + * URL of the source image + */ + source_image_url: string + /** + * Reference Pose Video Url + * + * URL of the reference video + */ + reference_pose_video_url: string + /** + * Driven Audio Url + * + * URL of the driven audio + */ + driven_audio_url: string + /** + * Face Enhancer + * + * The type of face enhancer to use + */ + face_enhancer?: 'gfpgan' + /** + * Expression Scale + * + * The scale of the expression + */ + expression_scale?: number + /** + * Face Model Resolution + * + * The resolution of the face model + */ + face_model_resolution?: '256' | '512' + /** + * Still Mode + * + * Whether to use still mode. Fewer head motion, works with preprocess `full`. + */ + still_mode?: boolean + /** + * Preprocess + * + * The type of preprocessing to use + */ + preprocess?: 'crop' | 'extcrop' | 'resize' | 'full' | 'extfull' +} + +/** + * I2VOutput + */ +export type SchemaKlingVideoV16StandardImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ImageToVideoRequest + */ +export type SchemaKlingVideoV16StandardImageToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Image Url + */ + image_url: string + /** + * Negative Prompt + */ + negative_prompt?: string + /** + * Cfg Scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt. + * + */ + cfg_scale?: number +} + +/** + * SubjectReferenceOutput + */ +export type SchemaMinimaxVideo01SubjectReferenceOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * SubjectReferenceRequest + */ +export type SchemaMinimaxVideo01SubjectReferenceInput = { + /** + * Prompt Optimizer + * + * Whether to use the model's prompt optimizer + */ + prompt_optimizer?: boolean + /** + * Prompt + */ + prompt: string + /** + * Subject Reference Image Url + * + * URL of the subject reference image to use for consistent subject appearance + */ + subject_reference_image_url: string +} + +/** + * I2VOutput + */ +export type SchemaPixverseV35ImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ImageToVideoRequest + */ +export type SchemaPixverseV35ImageToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' | '1080p' + /** + * Duration + * + * The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds + */ + duration?: '5' | '8' + /** + * Style + * + * The style of the generated video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Image Url + * + * URL of the image to use as the first frame + */ + image_url: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * I2VOutput + */ +export type SchemaPixverseV35ImageToVideoFastOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * FastImageToVideoRequest + */ +export type SchemaPixverseV35ImageToVideoFastInput = { + /** + * Prompt + */ + prompt: string + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' + /** + * Style + * + * The style of the generated video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string + /** + * Image Url + * + * URL of the image to use as the first frame + */ + image_url: string +} + +/** + * Output + */ +export type SchemaHunyuanVideoImg2VidLoraOutput = { + /** + * Seed + * + * The seed used for generating the video. + */ + seed: number + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * Input + */ +export type SchemaHunyuanVideoImg2VidLoraInput = { + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Seed + * + * The seed to use for generating the video. + */ + seed?: number + /** + * Image URL + * + * The URL to the image to generate the video from. The image must be 960x544 or it will get cropped and resized to that size. + */ + image_url: string +} + +/** + * Ray2I2VOutput + */ +export type SchemaLumaDreamMachineRay2ImageToVideoOutput = { + /** + * Video + * + * URL of the generated video + */ + video: SchemaFile +} + +/** + * Ray2ImageToVideoRequest + */ +export type SchemaLumaDreamMachineRay2ImageToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '9:16' | '4:3' | '3:4' | '21:9' | '9:21' + /** + * Resolution + * + * The resolution of the generated video (720p costs 2x more, 1080p costs 4x more) + */ + resolution?: '540p' | '720p' | '1080p' + /** + * Loop + * + * Whether the video should loop (end of video is blended with the beginning) + */ + loop?: boolean + /** + * Duration + * + * The duration of the generated video + */ + duration?: '5s' | '9s' + /** + * Image Url + * + * Initial image to start the video from. Can be used together with end_image_url. + */ + image_url?: string + /** + * End Image Url + * + * Final image to end the video with. Can be used together with image_url. + */ + end_image_url?: string +} + +/** + * SkyreelsI2VResponse + */ +export type SchemaSkyreelsI2vOutput = { + /** + * Seed + * + * The seed used for generation + */ + seed: number + /** + * Video + */ + video: SchemaFile +} + +/** + * SkyreelsI2VRequest + */ +export type SchemaSkyreelsI2vInput = { + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Aspect Ratio + * + * Aspect ratio of the output video + */ + aspect_ratio?: '16:9' | '9:16' + /** + * Image Url + * + * URL of the image input. + */ + image_url: string + /** + * Guidance Scale + * + * Guidance scale for generation (between 1.0 and 20.0) + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for generation. If not provided, a random seed will be used. + */ + seed?: number + /** + * Num Inference Steps + * + * Number of denoising steps (between 1 and 50). Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Negative Prompt + * + * Negative prompt to guide generation away from certain attributes. + */ + negative_prompt?: string +} + +/** + * I2VDirectorOutput + */ +export type SchemaMinimaxVideo01DirectorImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ImageToVideoDirectorRequest + */ +export type SchemaMinimaxVideo01DirectorImageToVideoInput = { + /** + * Prompt Optimizer + * + * Whether to use the model's prompt optimizer + */ + prompt_optimizer?: boolean + /** + * Prompt + * + * Text prompt for video generation. Camera movement instructions can be added using square brackets (e.g. [Pan left] or [Zoom in]). You can use up to 3 combined movements per prompt. Supported movements: Truck left/right, Pan left/right, Push in/Pull out, Pedestal up/down, Tilt up/down, Zoom in/out, Shake, Tracking shot, Static shot. For example: [Truck left, Pan right, Zoom in]. For a more detailed guide, refer https://sixth-switch-2ac.notion.site/T2V-01-Director-Model-Tutorial-with-camera-movement-1886c20a98eb80f395b8e05291ad8645 + */ + prompt: string + /** + * Image Url + * + * URL of the image to use as the first frame + */ + image_url: string +} + +/** + * HunyuanI2VResponse + */ +export type SchemaHunyuanVideoImageToVideoOutput = { + /** + * Seed + * + * The seed used for generating the video. + */ + seed: number + video: SchemaFile +} + +/** + * HunyuanVideoRequest + */ +export type SchemaHunyuanVideoImageToVideoInput = { + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Aspect Ratio (W:H) + * + * The aspect ratio of the video to generate. + */ + aspect_ratio?: '16:9' | '9:16' + /** + * Resolution + * + * The resolution of the video to generate. + */ + resolution?: '720p' + /** + * Image Url + * + * URL of the image input. + */ + image_url: string + /** + * Seed + * + * The seed to use for generating the video. + */ + seed?: number + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: '129' + /** + * I2V Stability + * + * Turning on I2V Stability reduces hallucination but also reduces motion. + */ + i2v_stability?: boolean +} + +/** + * WanI2VResponse + */ +export type SchemaWanI2vLoraOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * WanLoRAI2VRequest + */ +export type SchemaWanI2vLoraInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Shift + * + * Shift parameter for video generation. + */ + shift?: number + /** + * Reverse Video + * + * If true, the video will be reversed. + */ + reverse_video?: boolean + /** + * Loras + * + * LoRA weights to be used in the inference. + */ + loras?: Array + /** + * Frames Per Second + * + * Frames per second of the generated video. Must be between 5 to 24. + */ + frames_per_second?: number + /** + * Turbo Mode + * + * If true, the video will be generated faster with no noticeable degradation in the visual quality. + */ + turbo_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Frames + * + * Number of frames to generate. Must be between 81 to 100 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units. + */ + num_frames?: number + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Aspect Ratio + * + * Aspect ratio of the output video. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' | '1:1' + /** + * Resolution + * + * Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit. + */ + resolution?: '480p' | '720p' + /** + * Image Url + * + * URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped. + */ + image_url: string + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Guide Scale + * + * Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality. + */ + guide_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number +} + +/** + * TemplateToVideoOutput + */ +export type SchemaViduTemplateToVideoOutput = { + /** + * Video + * + * The generated video using a predefined template + */ + video: SchemaFile +} + +/** + * TemplateToVideoRequest + */ +export type SchemaViduTemplateToVideoInput = { + /** + * Aspect Ratio + * + * The aspect ratio of the output video + */ + aspect_ratio?: '16:9' | '9:16' + /** + * Template + * + * AI video template to use. Pricing varies by template: Standard templates (hug, kiss, love_pose, etc.) cost 4 credits ($0.20), Premium templates (lunar_newyear, dynasty_dress, dreamy_wedding, etc.) cost 6 credits ($0.30), and Advanced templates (live_photo) cost 10 credits ($0.50). + */ + template?: + | 'dreamy_wedding' + | 'romantic_lift' + | 'sweet_proposal' + | 'couple_arrival' + | 'cupid_arrow' + | 'pet_lovers' + | 'lunar_newyear' + | 'hug' + | 'kiss' + | 'dynasty_dress' + | 'wish_sender' + | 'love_pose' + | 'hair_swap' + | 'youth_rewind' + | 'morphlab' + | 'live_photo' + | 'emotionlab' + | 'live_memory' + | 'interaction' + | 'christmas' + | 'pet_finger' + | 'eat_mushrooms' + | 'beast_chase_library' + | 'beast_chase_supermarket' + | 'petal_scattered' + | 'emoji_figure' + | 'hair_color_change' + | 'multiple_people_kissing' + | 'beast_chase_amazon' + | 'beast_chase_mountain' + | 'balloonman_explodes_pro' + | 'get_thinner' + | 'jump2pool' + | 'bodyshake' + | 'jiggle_up' + | 'shake_it_dance' + | 'subject_3' + | 'pubg_winner_hit' + | 'shake_it_down' + | 'blueprint_supreme' + | 'hip_twist' + | 'motor_dance' + | 'rat_dance' + | 'kwok_dance' + | 'leg_sweep_dance' + | 'heeseung_march' + | 'shake_to_max' + | 'dame_un_grrr' + | 'i_know' + | 'lit_bounce' + | 'wave_dance' + | 'chill_dance' + | 'hip_flicking' + | 'sakura_season' + | 'zongzi_wrap' + | 'zongzi_drop' + | 'dragonboat_shot' + | 'rain_kiss' + | 'child_memory' + | 'couple_drop' + | 'couple_walk' + | 'flower_receive' + | 'love_drop' + | 'cheek_kiss' + | 'carry_me' + | 'blow_kiss' + | 'love_fall' + | 'french_kiss_8s' + | 'workday_feels' + | 'love_story' + | 'bloom_magic' + | 'ghibli' + | 'minecraft' + | 'box_me' + | 'claw_me' + | 'clayshot' + | 'manga_meme' + | 'quad_meme' + | 'pixel_me' + | 'clayshot_duo' + | 'irasutoya' + | 'american_comic' + | 'simpsons_comic' + | 'yayoi_kusama_style' + | 'pop_art' + | 'jojo_style' + | 'slice_therapy' + | 'balloon_flyaway' + | 'flying' + | 'paperman' + | 'pinch' + | 'bloom_doorobear' + | 'gender_swap' + | 'nap_me' + | 'sexy_me' + | 'spin360' + | 'smooth_shift' + | 'paper_fall' + | 'jump_to_cloud' + | 'pilot' + | 'sweet_dreams' + | 'soul_depart' + | 'punch_hit' + | 'watermelon_hit' + | 'split_stance_pet' + | 'make_face' + | 'break_glass' + | 'split_stance_human' + | 'covered_liquid_metal' + | 'fluffy_plunge' + | 'pet_belly_dance' + | 'water_float' + | 'relax_cut' + | 'head_to_balloon' + | 'cloning' + | 'across_the_universe_jungle' + | 'clothes_spinning_remnant' + | 'across_the_universe_jurassic' + | 'across_the_universe_moon' + | 'fisheye_pet' + | 'hitchcock_zoom' + | 'cute_bangs' + | 'earth_zoom_out' + | 'fisheye_human' + | 'drive_yacht' + | 'virtual_singer' + | 'earth_zoom_in' + | 'aliens_coming' + | 'drive_ferrari' + | 'bjd_style' + | 'virtual_fitting' + | 'orbit' + | 'zoom_in' + | 'ai_outfit' + | 'spin180' + | 'orbit_dolly' + | 'orbit_dolly_fast' + | 'auto_spin' + | 'walk_forward' + | 'outfit_show' + | 'zoom_in_fast' + | 'zoom_out_image' + | 'zoom_out_startend' + | 'muscling' + | 'captain_america' + | 'hulk' + | 'cap_walk' + | 'hulk_dive' + | 'exotic_princess' + | 'beast_companion' + | 'cartoon_doll' + | 'golden_epoch' + | 'oscar_gala' + | 'fashion_stride' + | 'star_carpet' + | 'flame_carpet' + | 'frost_carpet' + | 'mecha_x' + | 'style_me' + | 'tap_me' + | 'saber_warrior' + | 'pet2human' + | 'graduation' + | 'fishermen' + | 'happy_birthday' + | 'fairy_me' + | 'ladudu_me' + | 'ladudu_me_random' + | 'squid_game' + | 'superman' + | 'grow_wings' + | 'clevage' + | 'fly_with_doraemon' + | 'creatice_product_down' + | 'pole_dance' + | 'hug_from_behind' + | 'creatice_product_up_cybercity' + | 'creatice_product_up_bluecircuit' + | 'creatice_product_up' + | 'run_fast' + | 'background_explosion' + /** + * Seed + * + * Random seed for generation + */ + seed?: number + /** + * Input Image Urls + * + * URLs of the images to use with the template. Number of images required varies by template: 'dynasty_dress' and 'shop_frame' accept 1-2 images, 'wish_sender' requires exactly 3 images, all other templates accept only 1 image. + */ + input_image_urls: Array +} + +/** + * ReferenceToVideoOutput + */ +export type SchemaViduReferenceToVideoOutput = { + /** + * Video + * + * The generated video with consistent subjects from reference images + */ + video: SchemaFile +} + +/** + * ReferenceToVideoRequest + */ +export type SchemaViduReferenceToVideoInput = { + /** + * Prompt + * + * Text prompt for video generation, max 1500 characters + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the output video + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Reference Image Urls + * + * URLs of the reference images to use for consistent subject appearance + */ + reference_image_urls: Array + /** + * Seed + * + * Random seed for generation + */ + seed?: number + /** + * Movement Amplitude + * + * The movement amplitude of objects in the frame + */ + movement_amplitude?: 'auto' | 'small' | 'medium' | 'large' +} + +/** + * StartEndToVideoOutput + */ +export type SchemaViduStartEndToVideoOutput = { + /** + * Video + * + * The generated transition video between start and end frames + */ + video: SchemaFile +} + +/** + * StartEndToVideoRequest + */ +export type SchemaViduStartEndToVideoInput = { + /** + * Prompt + * + * Text prompt for video generation, max 1500 characters + */ + prompt: string + /** + * Start Image Url + * + * URL of the image to use as the first frame + */ + start_image_url: string + /** + * Movement Amplitude + * + * The movement amplitude of objects in the frame + */ + movement_amplitude?: 'auto' | 'small' | 'medium' | 'large' + /** + * Seed + * + * Random seed for generation + */ + seed?: number + /** + * End Image Url + * + * URL of the image to use as the last frame + */ + end_image_url: string +} + +/** + * VideoOutput + */ +export type SchemaViduImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ImageToVideoRequest + */ +export type SchemaViduImageToVideoInput = { + /** + * Prompt + * + * Text prompt for video generation, max 1500 characters + */ + prompt: string + /** + * Seed + * + * Random seed for generation + */ + seed?: number + /** + * Movement Amplitude + * + * The movement amplitude of objects in the frame + */ + movement_amplitude?: 'auto' | 'small' | 'medium' | 'large' + /** + * Image Url + * + * URL of the image to use as the first frame + */ + image_url: string +} + +/** + * ImageToVideoV21Output + * + * Output from image-to-video generation + */ +export type SchemaPikaV21ImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ImageToVideov21Input + * + * Base request for image-to-video generation + */ +export type SchemaPikaV21ImageToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '720p' | '1080p' + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: number + /** + * Seed + * + * The seed for the random number generator + */ + seed?: number + /** + * Negative Prompt + * + * A negative prompt to guide the model + */ + negative_prompt?: string + /** + * Image Url + */ + image_url: string +} + +/** + * Pika22ImageToVideoOutput + * + * Output model for Pika 2.2 image-to-video generation + */ +export type SchemaPikaV22ImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * Pika22ImageToVideoRequest + * + * Request model for Pika 2.2 image-to-video generation + */ +export type SchemaPikaV22ImageToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '720p' | '1080p' + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: 5 | 10 + /** + * Seed + * + * The seed for the random number generator + */ + seed?: number + /** + * Negative Prompt + * + * A negative prompt to guide the model + */ + negative_prompt?: string + /** + * Image Url + * + * URL of the image to use as the first frame + */ + image_url: string +} + +/** + * Pika22PikascenesOutput + * + * Output model for Pika 2.2 Pikascenes generation + */ +export type SchemaPikaV22PikascenesOutput = { + /** + * Video + * + * The generated video combining multiple images + */ + video: SchemaFile +} + +/** + * Pika22PikascenesRequest + * + * Request model for Pika 2.2 Pikascenes (collection-to-video) generation + */ +export type SchemaPikaV22PikascenesInput = { + /** + * Prompt + * + * Text prompt describing the desired video + */ + prompt: string + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '720p' | '1080p' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' | '4:5' | '5:4' | '3:2' | '2:3' + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: 5 | 10 + /** + * Ingredients Mode + * + * Mode for integrating multiple images. Precise mode is more accurate, creative mode is more creative. + */ + ingredients_mode?: 'precise' | 'creative' + /** + * Seed + * + * The seed for the random number generator + */ + seed?: number + /** + * Image Urls + * + * URLs of images to combine into a video + */ + image_urls: Array + /** + * Negative Prompt + * + * A negative prompt to guide the model + */ + negative_prompt?: string +} + +/** + * TurboImageToVideoOutput + * + * Output model for all video generation endpoints + */ +export type SchemaPikaV2TurboImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ImageToVideoTurboInput + * + * Base request for image-to-video generation + */ +export type SchemaPikaV2TurboImageToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '720p' | '1080p' + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: number + /** + * Seed + * + * The seed for the random number generator + */ + seed?: number + /** + * Negative Prompt + * + * A negative prompt to guide the model + */ + negative_prompt?: string + /** + * Image Url + */ + image_url: string +} + +/** + * PikaffectsOutput + * + * Output from Pikaffects generation + */ +export type SchemaPikaV15PikaffectsOutput = { + /** + * Video + * + * The generated video with applied effect + */ + video: SchemaFile +} + +/** + * PikaffectsRequest + * + * Request model for Pikaffects endpoint + */ +export type SchemaPikaV15PikaffectsInput = { + /** + * Pikaffect + * + * The Pikaffect to apply + */ + pikaffect: + | 'Cake-ify' + | 'Crumble' + | 'Crush' + | 'Decapitate' + | 'Deflate' + | 'Dissolve' + | 'Explode' + | 'Eye-pop' + | 'Inflate' + | 'Levitate' + | 'Melt' + | 'Peel' + | 'Poke' + | 'Squish' + | 'Ta-da' + | 'Tear' + /** + * Prompt + * + * Text prompt to guide the effect + */ + prompt?: string + /** + * Seed + * + * The seed for the random number generator + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to guide the model + */ + negative_prompt?: string + /** + * Image Url + * + * URL of the input image + */ + image_url: string +} + +/** + * Ray2I2VOutput + */ +export type SchemaLumaDreamMachineRay2FlashImageToVideoOutput = { + /** + * Video + * + * URL of the generated video + */ + video: SchemaFile +} + +/** + * Ray2ImageToVideoRequest + */ +export type SchemaLumaDreamMachineRay2FlashImageToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '9:16' | '4:3' | '3:4' | '21:9' | '9:21' + /** + * Resolution + * + * The resolution of the generated video (720p costs 2x more, 1080p costs 4x more) + */ + resolution?: '540p' | '720p' | '1080p' + /** + * Loop + * + * Whether the video should loop (end of video is blended with the beginning) + */ + loop?: boolean + /** + * Duration + * + * The duration of the generated video + */ + duration?: '5s' | '9s' + /** + * Image Url + * + * Initial image to start the video from. Can be used together with end_image_url. + */ + image_url?: string + /** + * End Image Url + * + * Final image to end the video with. Can be used together with image_url. + */ + end_image_url?: string +} + +/** + * TransitionOutput + */ +export type SchemaPixverseV35TransitionOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TransitionRequest + */ +export type SchemaPixverseV35TransitionInput = { + /** + * First Image Url + * + * URL of the image to use as the first frame + */ + first_image_url: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '4:3' | '1:1' | '3:4' | '9:16' + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' | '1080p' + /** + * Style + * + * The style of the generated video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Prompt + * + * The prompt for the transition + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '8' + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number + /** + * End Image Url + * + * URL of the image to use as the last frame + */ + end_image_url?: string + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * EffectOutput + */ +export type SchemaPixverseV35EffectsOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * EffectInput + */ +export type SchemaPixverseV35EffectsInput = { + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '8' + /** + * Resolution + * + * The resolution of the generated video. + */ + resolution?: '360p' | '540p' | '720p' | '1080p' + /** + * Effect + * + * The effect to apply to the video + */ + effect: + | 'Kiss Me AI' + | 'Kiss' + | 'Muscle Surge' + | 'Warmth of Jesus' + | 'Anything, Robot' + | 'The Tiger Touch' + | 'Hug' + | 'Holy Wings' + | 'Microwave' + | 'Zombie Mode' + | 'Squid Game' + | 'Baby Face' + | 'Black Myth: Wukong' + | 'Long Hair Magic' + | 'Leggy Run' + | 'Fin-tastic Mermaid' + | 'Punch Face' + | 'Creepy Devil Smile' + | 'Thunder God' + | 'Eye Zoom Challenge' + | "Who's Arrested?" + | 'Baby Arrived' + | 'Werewolf Rage' + | 'Bald Swipe' + | 'BOOM DROP' + | 'Huge Cutie' + | 'Liquid Metal' + | 'Sharksnap!' + | 'Dust Me Away' + | '3D Figurine Factor' + | 'Bikini Up' + | 'My Girlfriends' + | 'My Boyfriends' + | 'Subject 3 Fever' + | 'Earth Zoom' + | 'Pole Dance' + | 'Vroom Dance' + | 'GhostFace Terror' + | 'Dragon Evoker' + | 'Skeletal Bae' + | 'Summoning succubus' + | 'Halloween Voodoo Doll' + | '3D Naked-Eye AD' + | 'Package Explosion' + | 'Dishes Served' + | 'Ocean ad' + | 'Supermarket AD' + | 'Tree doll' + | 'Come Feel My Abs' + | 'The Bicep Flex' + | 'London Elite Vibe' + | 'Flora Nymph Gown' + | 'Christmas Costume' + | "It's Snowy" + | 'Reindeer Cruiser' + | 'Snow Globe Maker' + | 'Pet Christmas Outfit' + | 'Adopt a Polar Pal' + | 'Cat Christmas Box' + | 'Starlight Gift Box' + | 'Xmas Poster' + | 'Pet Christmas Tree' + | 'City Santa Hat' + | 'Stocking Sweetie' + | 'Christmas Night' + | 'Xmas Front Page Karma' + | "Grinch's Xmas Hijack" + | 'Giant Product' + | 'Truck Fashion Shoot' + | 'Beach AD' + | 'Shoal Surround' + | 'Mechanical Assembly' + | 'Lighting AD' + | 'Billboard AD' + | 'Product close-up' + | 'Parachute Delivery' + | 'Dreamlike Cloud' + | 'Macaron Machine' + | 'Poster AD' + | 'Truck AD' + | 'Graffiti AD' + | '3D Figurine Factory' + | 'The Exclusive First Class' + | 'Art Zoom Challenge' + | 'I Quit' + | 'Hitchcock Dolly Zoom' + | 'Smell the Lens' + | 'I believe I can fly' + | 'Strikout Dance' + | 'Pixel World' + | 'Mint in Box' + | 'Hands up, Hand' + | 'Flora Nymph Go' + | 'Somber Embrace' + | 'Beam me up' + | 'Suit Swagger' + /** + * Image Url + * + * Optional URL of the image to use as the first frame. If not provided, generates from text + */ + image_url: string +} + +/** + * I2VOutputV4 + */ +export type SchemaPixverseV4ImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ImageToVideoRequestV4 + */ +export type SchemaPixverseV4ImageToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' | '1080p' + /** + * Duration + * + * The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds + */ + duration?: '5' | '8' + /** + * Style + * + * The style of the generated video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Camera Movement + * + * The type of camera movement to apply to the video + */ + camera_movement?: + | 'horizontal_left' + | 'horizontal_right' + | 'vertical_up' + | 'vertical_down' + | 'zoom_in' + | 'zoom_out' + | 'crane_up' + | 'quickly_zoom_in' + | 'quickly_zoom_out' + | 'smooth_zoom_in' + | 'camera_rotation' + | 'robo_arm' + | 'super_dolly_out' + | 'whip_pan' + | 'hitchcock' + | 'left_follow' + | 'right_follow' + | 'pan_left' + | 'pan_right' + | 'fix_bg' + /** + * Image Url + * + * URL of the image to use as the first frame + */ + image_url: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * I2VOutputV4 + */ +export type SchemaPixverseV4ImageToVideoFastOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * FastImageToVideoRequestV4 + */ +export type SchemaPixverseV4ImageToVideoFastInput = { + /** + * Prompt + */ + prompt: string + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' + /** + * Style + * + * The style of the generated video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Camera Movement + * + * The type of camera movement to apply to the video + */ + camera_movement?: + | 'horizontal_left' + | 'horizontal_right' + | 'vertical_up' + | 'vertical_down' + | 'zoom_in' + | 'zoom_out' + | 'crane_up' + | 'quickly_zoom_in' + | 'quickly_zoom_out' + | 'smooth_zoom_in' + | 'camera_rotation' + | 'robo_arm' + | 'super_dolly_out' + | 'whip_pan' + | 'hitchcock' + | 'left_follow' + | 'right_follow' + | 'pan_left' + | 'pan_right' + | 'fix_bg' + /** + * Image Url + * + * URL of the image to use as the first frame + */ + image_url: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * FramePackResponse + */ +export type SchemaFramepackOutput = { + /** + * Seed + * + * The seed used for generating the video. + */ + seed: number + video: SchemaFile +} + +/** + * FramePackRequest + */ +export type SchemaFramepackInput = { + /** + * Prompt + * + * Text prompt for video generation (max 500 characters). + */ + prompt: string + /** + * Aspect Ratio (W:H) + * + * The aspect ratio of the video to generate. + */ + aspect_ratio?: '16:9' | '9:16' + /** + * Resolution + * + * The resolution of the video to generate. 720p generations cost 1.5x more than 480p generations. + */ + resolution?: '720p' | '480p' + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Image Url + * + * URL of the image input. + */ + image_url: string + /** + * Guidance Scale + * + * Guidance scale for the generation. + */ + guidance_scale?: number + /** + * Seed + * + * The seed to use for generating the video. + */ + seed?: number | unknown + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * CFG Scale + * + * Classifier-Free Guidance scale for the generation. + */ + cfg_scale?: number +} + +/** + * WanFLF2VResponse + */ +export type SchemaWanFlf2vOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * WanFLF2VRequest + */ +export type SchemaWanFlf2vInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Shift + * + * Shift parameter for video generation. + */ + shift?: number + /** + * Acceleration + * + * Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'. + */ + acceleration?: 'none' | 'regular' + /** + * Frames Per Second + * + * Frames per second of the generated video. Must be between 5 to 24. + */ + frames_per_second?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Start Image Url + * + * URL of the starting image. If the input image does not match the chosen aspect ratio, it is resized and center cropped. + */ + start_image_url: string + /** + * End Image Url + * + * URL of the ending image. If the input image does not match the chosen aspect ratio, it is resized and center cropped. + */ + end_image_url: string + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Num Frames + * + * Number of frames to generate. Must be between 81 to 100 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units. + */ + num_frames?: number + /** + * Resolution + * + * Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit. + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' | '1:1' + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Guide Scale + * + * Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality. + */ + guide_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number +} + +/** + * FramePackFLF2VResponse + */ +export type SchemaFramepackFlf2vOutput = { + /** + * Seed + * + * The seed used for generating the video. + */ + seed: number + video: SchemaFile +} + +/** + * FramePackF2LFRequest + */ +export type SchemaFramepackFlf2vInput = { + /** + * Prompt + * + * Text prompt for video generation (max 500 characters). + */ + prompt: string + /** + * Aspect Ratio (W:H) + * + * The aspect ratio of the video to generate. + */ + aspect_ratio?: '16:9' | '9:16' + /** + * Resolution + * + * The resolution of the video to generate. 720p generations cost 1.5x more than 480p generations. + */ + resolution?: '720p' | '480p' + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Image Url + * + * URL of the image input. + */ + image_url: string + /** + * Strength of last frame + * + * Determines the influence of the final frame on the generated video. Higher values result in the output being more heavily influenced by the last frame. + */ + strength?: number + /** + * Guidance Scale + * + * Guidance scale for the generation. + */ + guidance_scale?: number + /** + * Seed + * + * The seed to use for generating the video. + */ + seed?: number | unknown + /** + * End Image Url + * + * URL of the end image input. + */ + end_image_url: string + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * CFG Scale + * + * Classifier-Free Guidance scale for the generation. + */ + cfg_scale?: number +} + +/** + * MagiImageToVideoResponse + */ +export type SchemaMagiDistilledImageToVideoOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * MagiImageToVideoRequest + */ +export type SchemaMagiDistilledImageToVideoInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Resolution + * + * Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit. + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' | '1:1' + /** + * Image Url + * + * URL of the input image to represent the first frame of the video. If the input image does not match the chosen aspect ratio, it is resized and center cropped. + */ + image_url: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: 4 | 8 | 16 | 32 + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Num Frames + * + * Number of frames to generate. Must be between 96 and 192 (inclusive). Each additional 24 frames beyond 96 incurs an additional billing unit. + */ + num_frames?: number +} + +/** + * EffectOutput + */ +export type SchemaPixverseV4EffectsOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * EffectInput + */ +export type SchemaPixverseV4EffectsInput = { + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '8' + /** + * Resolution + * + * The resolution of the generated video. + */ + resolution?: '360p' | '540p' | '720p' | '1080p' + /** + * Effect + * + * The effect to apply to the video + */ + effect: + | 'Kiss Me AI' + | 'Kiss' + | 'Muscle Surge' + | 'Warmth of Jesus' + | 'Anything, Robot' + | 'The Tiger Touch' + | 'Hug' + | 'Holy Wings' + | 'Microwave' + | 'Zombie Mode' + | 'Squid Game' + | 'Baby Face' + | 'Black Myth: Wukong' + | 'Long Hair Magic' + | 'Leggy Run' + | 'Fin-tastic Mermaid' + | 'Punch Face' + | 'Creepy Devil Smile' + | 'Thunder God' + | 'Eye Zoom Challenge' + | "Who's Arrested?" + | 'Baby Arrived' + | 'Werewolf Rage' + | 'Bald Swipe' + | 'BOOM DROP' + | 'Huge Cutie' + | 'Liquid Metal' + | 'Sharksnap!' + | 'Dust Me Away' + | '3D Figurine Factor' + | 'Bikini Up' + | 'My Girlfriends' + | 'My Boyfriends' + | 'Subject 3 Fever' + | 'Earth Zoom' + | 'Pole Dance' + | 'Vroom Dance' + | 'GhostFace Terror' + | 'Dragon Evoker' + | 'Skeletal Bae' + | 'Summoning succubus' + | 'Halloween Voodoo Doll' + | '3D Naked-Eye AD' + | 'Package Explosion' + | 'Dishes Served' + | 'Ocean ad' + | 'Supermarket AD' + | 'Tree doll' + | 'Come Feel My Abs' + | 'The Bicep Flex' + | 'London Elite Vibe' + | 'Flora Nymph Gown' + | 'Christmas Costume' + | "It's Snowy" + | 'Reindeer Cruiser' + | 'Snow Globe Maker' + | 'Pet Christmas Outfit' + | 'Adopt a Polar Pal' + | 'Cat Christmas Box' + | 'Starlight Gift Box' + | 'Xmas Poster' + | 'Pet Christmas Tree' + | 'City Santa Hat' + | 'Stocking Sweetie' + | 'Christmas Night' + | 'Xmas Front Page Karma' + | "Grinch's Xmas Hijack" + | 'Giant Product' + | 'Truck Fashion Shoot' + | 'Beach AD' + | 'Shoal Surround' + | 'Mechanical Assembly' + | 'Lighting AD' + | 'Billboard AD' + | 'Product close-up' + | 'Parachute Delivery' + | 'Dreamlike Cloud' + | 'Macaron Machine' + | 'Poster AD' + | 'Truck AD' + | 'Graffiti AD' + | '3D Figurine Factory' + | 'The Exclusive First Class' + | 'Art Zoom Challenge' + | 'I Quit' + | 'Hitchcock Dolly Zoom' + | 'Smell the Lens' + | 'I believe I can fly' + | 'Strikout Dance' + | 'Pixel World' + | 'Mint in Box' + | 'Hands up, Hand' + | 'Flora Nymph Go' + | 'Somber Embrace' + | 'Beam me up' + | 'Suit Swagger' + /** + * Image Url + * + * Optional URL of the image to use as the first frame. If not provided, generates from text + */ + image_url: string +} + +/** + * MagiImageToVideoResponse + */ +export type SchemaMagiImageToVideoOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * MagiImageToVideoRequest + */ +export type SchemaMagiImageToVideoInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Resolution + * + * Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit. + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' | '1:1' + /** + * Image Url + * + * URL of the input image to represent the first frame of the video. If the input image does not match the chosen aspect ratio, it is resized and center cropped. + */ + image_url: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: 4 | 8 | 16 | 32 | 64 + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Num Frames + * + * Number of frames to generate. Must be between 96 and 192 (inclusive). Each additional 24 frames beyond 96 incurs an additional billing unit. + */ + num_frames?: number +} + +/** + * Q1ImageToVideoOutput + */ +export type SchemaViduQ1ImageToVideoOutput = { + /** + * Video + * + * The generated video using the Q1 model from a single image + */ + video: SchemaFile +} + +/** + * Q1ImageToVideoRequest + */ +export type SchemaViduQ1ImageToVideoInput = { + /** + * Prompt + * + * Text prompt for video generation, max 1500 characters + */ + prompt: string + /** + * Seed + * + * Seed for the random number generator + */ + seed?: number + /** + * Movement Amplitude + * + * The movement amplitude of objects in the frame + */ + movement_amplitude?: 'auto' | 'small' | 'medium' | 'large' + /** + * Image Url + * + * URL of the image to use as the first frame + */ + image_url: string +} + +/** + * Q1StartEndToVideoOutput + */ +export type SchemaViduQ1StartEndToVideoOutput = { + /** + * Video + * + * The generated transition video between start and end frames using the Q1 model + */ + video: SchemaFile +} + +/** + * Q1StartEndToVideoRequest + */ +export type SchemaViduQ1StartEndToVideoInput = { + /** + * Prompt + * + * Text prompt for video generation, max 1500 characters + */ + prompt: string + /** + * Start Image Url + * + * URL of the image to use as the first frame + */ + start_image_url: string + /** + * Movement Amplitude + * + * The movement amplitude of objects in the frame + */ + movement_amplitude?: 'auto' | 'small' | 'medium' | 'large' + /** + * Seed + * + * Seed for the random number generator + */ + seed?: number + /** + * End Image Url + * + * URL of the image to use as the last frame + */ + end_image_url: string +} + +/** + * FramePackF1Response + */ +export type SchemaFramepackF1Output = { + /** + * Seed + * + * The seed used for generating the video. + */ + seed: number + video: SchemaFile +} + +/** + * FramePackF1Request + */ +export type SchemaFramepackF1Input = { + /** + * Prompt + * + * Text prompt for video generation (max 500 characters). + */ + prompt: string + /** + * Aspect Ratio (W:H) + * + * The aspect ratio of the video to generate. + */ + aspect_ratio?: '16:9' | '9:16' + /** + * Resolution + * + * The resolution of the video to generate. 720p generations cost 1.5x more than 480p generations. + */ + resolution?: '720p' | '480p' + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Image Url + * + * URL of the image input. + */ + image_url: string + /** + * Guidance Scale + * + * Guidance scale for the generation. + */ + guidance_scale?: number + /** + * Seed + * + * The seed to use for generating the video. + */ + seed?: number | unknown + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * CFG Scale + * + * Classifier-Free Guidance scale for the generation. + */ + cfg_scale?: number +} + +/** + * HunyuanCustomResponse + */ +export type SchemaHunyuanCustomOutput = { + /** + * Seed + * + * The seed used for generating the video. + */ + seed: number + /** + * Video + */ + video: SchemaFile +} + +/** + * HunyuanCustomRequest + */ +export type SchemaHunyuanCustomInput = { + /** + * Prompt + * + * Text prompt for video generation (max 500 characters). + */ + prompt: string + /** + * Aspect Ratio (W:H) + * + * The aspect ratio of the video to generate. + */ + aspect_ratio?: '16:9' | '9:16' + /** + * Resolution + * + * The resolution of the video to generate. 720p generations cost 1.5x more than 480p generations. + */ + resolution?: '512p' | '720p' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Image Url + * + * URL of the image input. + */ + image_url: string + /** + * Frames per second + * + * The frames per second of the generated video. + */ + fps?: number + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * The seed to use for generating the video. + */ + seed?: number + /** + * Num Inference Steps + * + * The number of inference steps to run. Lower gets faster results, higher gets better results. + */ + num_inference_steps?: number + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * CFG Scale + * + * Classifier-Free Guidance scale for the generation. + */ + cfg_scale?: number +} + +/** + * EffectOutput + */ +export type SchemaPixverseV45EffectsOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * EffectInput + */ +export type SchemaPixverseV45EffectsInput = { + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '8' + /** + * Resolution + * + * The resolution of the generated video. + */ + resolution?: '360p' | '540p' | '720p' | '1080p' + /** + * Effect + * + * The effect to apply to the video + */ + effect: + | 'Kiss Me AI' + | 'Kiss' + | 'Muscle Surge' + | 'Warmth of Jesus' + | 'Anything, Robot' + | 'The Tiger Touch' + | 'Hug' + | 'Holy Wings' + | 'Microwave' + | 'Zombie Mode' + | 'Squid Game' + | 'Baby Face' + | 'Black Myth: Wukong' + | 'Long Hair Magic' + | 'Leggy Run' + | 'Fin-tastic Mermaid' + | 'Punch Face' + | 'Creepy Devil Smile' + | 'Thunder God' + | 'Eye Zoom Challenge' + | "Who's Arrested?" + | 'Baby Arrived' + | 'Werewolf Rage' + | 'Bald Swipe' + | 'BOOM DROP' + | 'Huge Cutie' + | 'Liquid Metal' + | 'Sharksnap!' + | 'Dust Me Away' + | '3D Figurine Factor' + | 'Bikini Up' + | 'My Girlfriends' + | 'My Boyfriends' + | 'Subject 3 Fever' + | 'Earth Zoom' + | 'Pole Dance' + | 'Vroom Dance' + | 'GhostFace Terror' + | 'Dragon Evoker' + | 'Skeletal Bae' + | 'Summoning succubus' + | 'Halloween Voodoo Doll' + | '3D Naked-Eye AD' + | 'Package Explosion' + | 'Dishes Served' + | 'Ocean ad' + | 'Supermarket AD' + | 'Tree doll' + | 'Come Feel My Abs' + | 'The Bicep Flex' + | 'London Elite Vibe' + | 'Flora Nymph Gown' + | 'Christmas Costume' + | "It's Snowy" + | 'Reindeer Cruiser' + | 'Snow Globe Maker' + | 'Pet Christmas Outfit' + | 'Adopt a Polar Pal' + | 'Cat Christmas Box' + | 'Starlight Gift Box' + | 'Xmas Poster' + | 'Pet Christmas Tree' + | 'City Santa Hat' + | 'Stocking Sweetie' + | 'Christmas Night' + | 'Xmas Front Page Karma' + | "Grinch's Xmas Hijack" + | 'Giant Product' + | 'Truck Fashion Shoot' + | 'Beach AD' + | 'Shoal Surround' + | 'Mechanical Assembly' + | 'Lighting AD' + | 'Billboard AD' + | 'Product close-up' + | 'Parachute Delivery' + | 'Dreamlike Cloud' + | 'Macaron Machine' + | 'Poster AD' + | 'Truck AD' + | 'Graffiti AD' + | '3D Figurine Factory' + | 'The Exclusive First Class' + | 'Art Zoom Challenge' + | 'I Quit' + | 'Hitchcock Dolly Zoom' + | 'Smell the Lens' + | 'I believe I can fly' + | 'Strikout Dance' + | 'Pixel World' + | 'Mint in Box' + | 'Hands up, Hand' + | 'Flora Nymph Go' + | 'Somber Embrace' + | 'Beam me up' + | 'Suit Swagger' + /** + * Image Url + * + * Optional URL of the image to use as the first frame. If not provided, generates from text + */ + image_url: string +} + +/** + * I2VOutputV4 + */ +export type SchemaPixverseV45ImageToVideoFastOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * FastImageToVideoRequestV4 + */ +export type SchemaPixverseV45ImageToVideoFastInput = { + /** + * Prompt + */ + prompt: string + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' + /** + * Style + * + * The style of the generated video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Camera Movement + * + * The type of camera movement to apply to the video + */ + camera_movement?: + | 'horizontal_left' + | 'horizontal_right' + | 'vertical_up' + | 'vertical_down' + | 'zoom_in' + | 'zoom_out' + | 'crane_up' + | 'quickly_zoom_in' + | 'quickly_zoom_out' + | 'smooth_zoom_in' + | 'camera_rotation' + | 'robo_arm' + | 'super_dolly_out' + | 'whip_pan' + | 'hitchcock' + | 'left_follow' + | 'right_follow' + | 'pan_left' + | 'pan_right' + | 'fix_bg' + /** + * Image Url + * + * URL of the image to use as the first frame + */ + image_url: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * TransitionOutput + */ +export type SchemaPixverseV45TransitionOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TransitionRequest + */ +export type SchemaPixverseV45TransitionInput = { + /** + * First Image Url + * + * URL of the image to use as the first frame + */ + first_image_url: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '4:3' | '1:1' | '3:4' | '9:16' + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' | '1080p' + /** + * Style + * + * The style of the generated video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Prompt + * + * The prompt for the transition + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '8' + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number + /** + * End Image Url + * + * URL of the image to use as the last frame + */ + end_image_url?: string + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * ImageToVideoOutput + */ +export type SchemaLtxVideoLoraImageToVideoOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video. + */ + video: SchemaFile +} + +/** + * ImageToVideoInput + * + * Request model for image-to-video generation. + */ +export type SchemaLtxVideoLoraImageToVideoInput = { + /** + * Number Of Steps + * + * The number of inference steps to use. + */ + number_of_steps?: number + /** + * Resolution + * + * The resolution of the video. + */ + resolution?: '480p' | '720p' + /** + * Reverse Video + * + * Whether to reverse the video. + */ + reverse_video?: boolean + /** + * Aspect Ratio + * + * The aspect ratio of the video. + */ + aspect_ratio?: '16:9' | '1:1' | '9:16' | 'auto' + /** + * Frame Rate + * + * The frame rate of the video. + */ + frame_rate?: number + /** + * Expand Prompt + * + * Whether to expand the prompt using the LLM. + */ + expand_prompt?: boolean + /** + * Number Of Frames + * + * The number of frames in the video. + */ + number_of_frames?: number + /** + * Image Url + * + * The URL of the image to use as input. + */ + image_url: string + /** + * Loras + * + * The LoRA weights to use for generation. + */ + loras?: Array + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * The seed to use for generation. + */ + seed?: number + /** + * Negative Prompt + * + * The negative prompt to use. + */ + negative_prompt?: string +} + +/** + * LoRAWeight + * + * LoRA weight to use for generation. + */ +export type SchemaLoRaWeight = { + /** + * Path + * + * URL or path to the LoRA weights. + */ + path: string + /** + * Scale + * + * Scale of the LoRA weight. This is a multiplier applied to the LoRA weight when loading it. + */ + scale?: number + /** + * Weight Name + * + * Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights. + */ + weight_name?: string +} + +/** + * ImageToVideoOutput + */ +export type SchemaLtxVideo13bDevImageToVideoOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * ImageToVideoInput + */ +export type SchemaLtxVideo13bDevImageToVideoInput = { + /** + * Second Pass Skip Initial Steps + * + * The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes. + */ + second_pass_skip_initial_steps?: number + /** + * First Pass Num Inference Steps + * + * Number of inference steps during the first pass. + */ + first_pass_num_inference_steps?: number + /** + * Frame Rate + * + * The frame rate of the video. + */ + frame_rate?: number + /** + * Prompt + * + * Text prompt to guide generation + */ + prompt: string + /** + * Reverse Video + * + * Whether to reverse the video. + */ + reverse_video?: boolean + /** + * Expand Prompt + * + * Whether to expand the prompt using a language model. + */ + expand_prompt?: boolean + /** + * Loras + * + * LoRA weights to use for generation + */ + loras?: Array + /** + * Second Pass Num Inference Steps + * + * Number of inference steps during the second pass. + */ + second_pass_num_inference_steps?: number + /** + * Num Frames + * + * The number of frames in the video. + */ + num_frames?: number + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * Negative prompt for generation + */ + negative_prompt?: string + /** + * Resolution + * + * Resolution of the generated video (480p or 720p). + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * The aspect ratio of the video. + */ + aspect_ratio?: '9:16' | '1:1' | '16:9' | 'auto' + /** + * Image Url + * + * Image URL for Image-to-Video task + */ + image_url: string + /** + * Constant Rate Factor + * + * The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality. + */ + constant_rate_factor?: number + /** + * First Pass Skip Final Steps + * + * Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details. + */ + first_pass_skip_final_steps?: number + /** + * Seed + * + * Random seed for generation + */ + seed?: number +} + +/** + * ImageToVideoOutput + */ +export type SchemaLtxVideo13bDistilledImageToVideoOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * DistilledImageToVideoInput + * + * Distilled model input + */ +export type SchemaLtxVideo13bDistilledImageToVideoInput = { + /** + * Second Pass Skip Initial Steps + * + * The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes. + */ + second_pass_skip_initial_steps?: number + /** + * First Pass Num Inference Steps + * + * Number of inference steps during the first pass. + */ + first_pass_num_inference_steps?: number + /** + * Frame Rate + * + * The frame rate of the video. + */ + frame_rate?: number + /** + * Reverse Video + * + * Whether to reverse the video. + */ + reverse_video?: boolean + /** + * Prompt + * + * Text prompt to guide generation + */ + prompt: string + /** + * Expand Prompt + * + * Whether to expand the prompt using a language model. + */ + expand_prompt?: boolean + /** + * Loras + * + * LoRA weights to use for generation + */ + loras?: Array + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Num Frames + * + * The number of frames in the video. + */ + num_frames?: number + /** + * Second Pass Num Inference Steps + * + * Number of inference steps during the second pass. + */ + second_pass_num_inference_steps?: number + /** + * Negative Prompt + * + * Negative prompt for generation + */ + negative_prompt?: string + /** + * Resolution + * + * Resolution of the generated video (480p or 720p). + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * The aspect ratio of the video. + */ + aspect_ratio?: '9:16' | '1:1' | '16:9' | 'auto' + /** + * Image Url + * + * Image URL for Image-to-Video task + */ + image_url: string + /** + * Constant Rate Factor + * + * The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality. + */ + constant_rate_factor?: number + /** + * First Pass Skip Final Steps + * + * Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details. + */ + first_pass_skip_final_steps?: number + /** + * Seed + * + * Random seed for generation + */ + seed?: number +} + +/** + * ElementsOutput + */ +export type SchemaKlingVideoV16ProElementsOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * MultiImageToVideoRequest + */ +export type SchemaKlingVideoV16ProElementsInput = { + /** + * Prompt + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video frame + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Input Image Urls + * + * List of image URLs to use for video generation. Supports up to 4 images. + */ + input_image_urls: Array + /** + * Negative Prompt + */ + negative_prompt?: string +} + +/** + * ElementsOutput + */ +export type SchemaKlingVideoV16StandardElementsOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * MultiImageToVideoRequest + */ +export type SchemaKlingVideoV16StandardElementsInput = { + /** + * Prompt + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video frame + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Input Image Urls + * + * List of image URLs to use for video generation. Supports up to 4 images. + */ + input_image_urls: Array + /** + * Negative Prompt + */ + negative_prompt?: string +} + +/** + * Output + */ +export type SchemaHunyuanPortraitOutput = { + /** + * Video + * + * The generated video with the portrait animation. + */ + video: SchemaFile +} + +/** + * Input + */ +export type SchemaHunyuanPortraitInput = { + /** + * Video Url + * + * The URL of the driving video. + */ + video_url: string + /** + * Seed + * + * Random seed for generation. If None, a random seed will be used. + */ + seed?: number + /** + * Use Arcface + * + * Whether to use ArcFace for face recognition. + */ + use_arcface?: boolean + /** + * Image Url + * + * The URL of the source image. + */ + image_url: string +} + +/** + * ImageToVideoV21ProOutput + */ +export type SchemaKlingVideoV21ProImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ImageToVideoV21ProRequest + */ +export type SchemaKlingVideoV21ProImageToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Image Url + * + * URL of the image to be used for the video + */ + image_url: string + /** + * Negative Prompt + */ + negative_prompt?: string + /** + * Tail Image Url + * + * URL of the image to be used for the end of the video + */ + tail_image_url?: string + /** + * Cfg Scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt. + * + */ + cfg_scale?: number +} + +/** + * Output + */ +export type SchemaHunyuanAvatarOutput = { + /** + * Video + * + * The generated video with the avatar animation. + */ + video: SchemaFile +} + +/** + * Input + */ +export type SchemaHunyuanAvatarInput = { + /** + * Text + * + * Text prompt describing the scene. + */ + text?: string + /** + * Image Url + * + * The URL of the reference image. + */ + image_url: string + /** + * Turbo Mode + * + * If true, the video will be generated faster with no noticeable degradation in the visual quality. + */ + turbo_mode?: boolean + /** + * Audio Url + * + * The URL of the audio file. + */ + audio_url: string + /** + * Seed + * + * Random seed for generation. + */ + seed?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Num Frames + * + * Number of video frames to generate at 25 FPS. If greater than the input audio length, it will capped to the length of the input audio. + */ + num_frames?: number +} + +/** + * SeedanceVideoOutput + */ +export type SchemaBytedanceSeedanceV1LiteImageToVideoOutput = { + /** + * Seed + * + * Seed used for generation + */ + seed: number + /** + * Video + * + * Generated video file + */ + video: SchemaFile +} + +/** + * SeedanceImageToVideoInput + */ +export type SchemaBytedanceSeedanceV1LiteImageToVideoInput = { + /** + * Prompt + * + * The text prompt used to generate the video + */ + prompt: string + /** + * Resolution + * + * Video resolution - 480p for faster generation, 720p for higher quality + */ + resolution?: '480p' | '720p' | '1080p' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '21:9' | '16:9' | '4:3' | '1:1' | '3:4' | '9:16' | 'auto' + /** + * Duration + * + * Duration of the video in seconds + */ + duration?: '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | '10' | '11' | '12' + /** + * Image Url + * + * The URL of the image used to generate video + */ + image_url: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Camera Fixed + * + * Whether to fix the camera position + */ + camera_fixed?: boolean + /** + * End Image Url + * + * The URL of the image the video ends with. Defaults to None. + */ + end_image_url?: string + /** + * Seed + * + * Random seed to control video generation. Use -1 for random. + */ + seed?: number +} + +/** + * ImageToVideoHailuo02Output + */ +export type SchemaMinimaxHailuo02ProImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ProImageToVideoHailuo02Input + */ +export type SchemaMinimaxHailuo02ProImageToVideoInput = { + /** + * Prompt Optimizer + * + * Whether to use the model's prompt optimizer + */ + prompt_optimizer?: boolean + /** + * Prompt + */ + prompt: string + /** + * End Image Url + * + * Optional URL of the image to use as the last frame of the video + */ + end_image_url?: string + /** + * Image Url + */ + image_url: string +} + +/** + * AvatarMultiAudioResponse + */ +export type SchemaAiAvatarMultiOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * AvatarMultiAudioPersonRequest + */ +export type SchemaAiAvatarMultiInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Resolution + * + * Resolution of the video to generate. Must be either 480p or 720p. + */ + resolution?: '480p' | '720p' + /** + * Acceleration + * + * The acceleration level to use for generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * First Audio URL + * + * The URL of the Person 1 audio file. + */ + first_audio_url: string + /** + * Image URL + * + * URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped. + */ + image_url: string + /** + * Second Audio URL + * + * The URL of the Person 2 audio file. + */ + second_audio_url?: string + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Use Only First Audio + * + * Whether to use only the first audio file. + */ + use_only_first_audio?: boolean + /** + * Number of Frames + * + * Number of frames to generate. Must be between 81 to 129 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units. + */ + num_frames?: number +} + +/** + * AvatarMultiTextResponse + */ +export type SchemaAiAvatarMultiTextOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * AvatarMultiTextRequest + */ +export type SchemaAiAvatarMultiTextInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Second Text Input + * + * The text input to guide video generation. + */ + second_text_input: string + /** + * Acceleration + * + * The acceleration level to use for generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Resolution + * + * Resolution of the video to generate. Must be either 480p or 720p. + */ + resolution?: '480p' | '720p' + /** + * First Text Input + * + * The text input to guide video generation. + */ + first_text_input: string + /** + * Image URL + * + * URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped. + */ + image_url: string + /** + * Voice2 + * + * The second person's voice to use for speech generation + */ + voice2?: + | 'Aria' + | 'Roger' + | 'Sarah' + | 'Laura' + | 'Charlie' + | 'George' + | 'Callum' + | 'River' + | 'Liam' + | 'Charlotte' + | 'Alice' + | 'Matilda' + | 'Will' + | 'Jessica' + | 'Eric' + | 'Chris' + | 'Brian' + | 'Daniel' + | 'Lily' + | 'Bill' + /** + * Voice1 + * + * The first person's voice to use for speech generation + */ + voice1?: + | 'Aria' + | 'Roger' + | 'Sarah' + | 'Laura' + | 'Charlie' + | 'George' + | 'Callum' + | 'River' + | 'Liam' + | 'Charlotte' + | 'Alice' + | 'Matilda' + | 'Will' + | 'Jessica' + | 'Eric' + | 'Chris' + | 'Brian' + | 'Daniel' + | 'Lily' + | 'Bill' + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Number of Frames + * + * Number of frames to generate. Must be between 81 to 129 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units. + */ + num_frames?: number +} + +/** + * AvatarSingleAudioResponse + */ +export type SchemaAiAvatarOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * AvatarSingleAudioRequest + */ +export type SchemaAiAvatarInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Resolution + * + * Resolution of the video to generate. Must be either 480p or 720p. + */ + resolution?: '480p' | '720p' + /** + * Acceleration + * + * The acceleration level to use for generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Image URL + * + * URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped. + */ + image_url: string + /** + * Audio URL + * + * The URL of the audio file. + */ + audio_url: string + /** + * Number of Frames + * + * Number of frames to generate. Must be between 81 to 129 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units. + */ + num_frames?: number + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number +} + +/** + * AvatarSingleTextResponse + */ +export type SchemaAiAvatarSingleTextOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * AvatarSingleTextRequest + */ +export type SchemaAiAvatarSingleTextInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Resolution + * + * Resolution of the video to generate. Must be either 480p or 720p. + */ + resolution?: '480p' | '720p' + /** + * Acceleration + * + * The acceleration level to use for generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Text Input + * + * The text input to guide video generation. + */ + text_input: string + /** + * Image URL + * + * URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped. + */ + image_url: string + /** + * Voice + * + * The voice to use for speech generation + */ + voice: + | 'Aria' + | 'Roger' + | 'Sarah' + | 'Laura' + | 'Charlie' + | 'George' + | 'Callum' + | 'River' + | 'Liam' + | 'Charlotte' + | 'Alice' + | 'Matilda' + | 'Will' + | 'Jessica' + | 'Eric' + | 'Chris' + | 'Brian' + | 'Daniel' + | 'Lily' + | 'Bill' + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Number of Frames + * + * Number of frames to generate. Must be between 81 to 129 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units. + */ + num_frames?: number +} + +/** + * Q1ReferenceToVideoOutput + */ +export type SchemaViduQ1ReferenceToVideoOutput = { + /** + * Video + * + * The generated video with consistent subjects from reference images using the Q1 model + */ + video: SchemaFile +} + +/** + * Q1ReferenceToVideoRequest + */ +export type SchemaViduQ1ReferenceToVideoInput = { + /** + * Prompt + * + * Text prompt for video generation, max 1500 characters + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the output video + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Bgm + * + * Whether to add background music to the generated video + */ + bgm?: boolean + /** + * Reference Image Urls + * + * URLs of the reference images to use for consistent subject appearance. Q1 model supports up to 7 reference images. + */ + reference_image_urls: Array + /** + * Seed + * + * Random seed for generation + */ + seed?: number + /** + * Movement Amplitude + * + * The movement amplitude of objects in the frame + */ + movement_amplitude?: 'auto' | 'small' | 'medium' | 'large' +} + +/** + * Veo3ImageToVideoOutput + */ +export type SchemaVeo3FastImageToVideoOutput = { + /** + * Video + * + * The generated video. + */ + video: SchemaFile +} + +/** + * Veo3ImageToVideoInput + */ +export type SchemaVeo3FastImageToVideoInput = { + /** + * Prompt + * + * The text prompt describing how the image should be animated + */ + prompt: string + /** + * Resolution + * + * The resolution of the generated video. + */ + resolution?: '720p' | '1080p' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * Auto Fix + * + * Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them. + */ + auto_fix?: boolean + /** + * Duration + * + * The duration of the generated video. + */ + duration?: '4s' | '6s' | '8s' + /** + * Image URL + * + * URL of the input image to animate. Should be 720p or higher resolution in 16:9 or 9:16 aspect ratio. If the image is not in 16:9 or 9:16 aspect ratio, it will be cropped to fit. + */ + image_url: string +} + +/** + * ImageToVideoOutput + */ +export type SchemaLtxv13B098DistilledImageToVideoOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * DistilledImageToVideoInput + * + * Distilled model input + */ +export type SchemaLtxv13B098DistilledImageToVideoInput = { + /** + * Second Pass Skip Initial Steps + * + * The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes. + */ + second_pass_skip_initial_steps?: number + /** + * Number of Inference Steps + * + * Number of inference steps during the first pass. + */ + first_pass_num_inference_steps?: number + /** + * Frame Rate + * + * The frame rate of the video. + */ + frame_rate?: number + /** + * Reverse Video + * + * Whether to reverse the video. + */ + reverse_video?: boolean + /** + * Prompt + * + * Text prompt to guide generation + */ + prompt: string + /** + * Expand Prompt + * + * Whether to expand the prompt using a language model. + */ + expand_prompt?: boolean + /** + * Temporal AdaIN Factor + * + * The factor for adaptive instance normalization (AdaIN) applied to generated video chunks after the first. This can help deal with a gradual increase in saturation/contrast in the generated video by normalizing the color distribution across the video. A high value will ensure the color distribution is more consistent across the video, while a low value will allow for more variation in color distribution. + */ + temporal_adain_factor?: number + /** + * Loras + * + * LoRA weights to use for generation + */ + loras?: Array + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Number of Frames + * + * The number of frames in the video. + */ + num_frames?: number + /** + * Second Pass Number of Inference Steps + * + * Number of inference steps during the second pass. + */ + second_pass_num_inference_steps?: number + /** + * Negative Prompt + * + * Negative prompt for generation + */ + negative_prompt?: string + /** + * Enable Detail Pass + * + * Whether to use a detail pass. If True, the model will perform a second pass to refine the video and enhance details. This incurs a 2.0x cost multiplier on the base price. + */ + enable_detail_pass?: boolean + /** + * Resolution + * + * Resolution of the generated video. + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * The aspect ratio of the video. + */ + aspect_ratio?: '9:16' | '1:1' | '16:9' | 'auto' + /** + * Tone Map Compression Ratio + * + * The compression ratio for tone mapping. This is used to compress the dynamic range of the video to improve visual quality. A value of 0.0 means no compression, while a value of 1.0 means maximum compression. + */ + tone_map_compression_ratio?: number + /** + * Image URL + * + * Image URL for Image-to-Video task + */ + image_url: string + /** + * Constant Rate Factor + * + * The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality. + */ + constant_rate_factor?: number + /** + * Seed + * + * Random seed for generation + */ + seed?: number +} + +/** + * OmniHumanOutput + */ +export type SchemaBytedanceOmnihumanOutput = { + /** + * Duration + * + * Duration of audio input/video output as used for billing. + */ + duration: number + /** + * Video + * + * Generated video file + */ + video: SchemaFile +} + +/** + * OmniHumanInput + */ +export type SchemaBytedanceOmnihumanInput = { + /** + * Audio Url + * + * The URL of the audio file to generate the video. Audio must be under 30s long. + */ + audio_url: string + /** + * Image Url + * + * The URL of the image used to generate the video + */ + image_url: string +} + +/** + * WanI2VResponse + */ +export type SchemaWanV22A14bImageToVideoOutput = { + /** + * Prompt + * + * The text prompt used for video generation. + */ + prompt?: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * WanI2VRequest + */ +export type SchemaWanV22A14bImageToVideoInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Shift + * + * Shift value for the video. Must be between 1.0 and 10.0. + */ + shift?: number + /** + * Acceleration + * + * Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'. + */ + acceleration?: 'none' | 'regular' + /** + * Number of Interpolated Frames + * + * Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4. + */ + num_interpolated_frames?: number + /** + * Frames per Second + * + * Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is. + */ + frames_per_second?: number + /** + * Guidance Scale (1st Stage) + * + * Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality. + */ + guidance_scale?: number + /** + * Number of Frames + * + * Number of frames to generate. Must be between 17 to 161 (inclusive). + */ + num_frames?: number + /** + * End Image URL + * + * URL of the end image. + */ + end_image_url?: string + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, input data will be checked for safety before processing. + */ + enable_safety_checker?: boolean + /** + * Video Write Mode + * + * The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Resolution + * + * Resolution of the generated video (480p, 580p, or 720p). + */ + resolution?: '480p' | '580p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' | '1:1' + /** + * Enable Output Safety Checker + * + * If set to true, output video will be checked for safety after generation. + */ + enable_output_safety_checker?: boolean + /** + * Image URL + * + * URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped. + */ + image_url: string + /** + * Video Quality + * + * The quality of the output video. Higher quality means better visual quality but larger file size. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Guidance Scale (2nd Stage) + * + * Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model. + */ + guidance_scale_2?: number + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Interpolator Model + * + * The model to use for frame interpolation. If None, no interpolation is applied. + */ + interpolator_model?: 'none' | 'film' | 'rife' + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Adjust FPS for Interpolation + * + * If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is. + */ + adjust_fps_for_interpolation?: boolean +} + +/** + * WanSmallI2VResponse + */ +export type SchemaWanV225bImageToVideoOutput = { + /** + * Prompt + * + * The text prompt used for video generation. + */ + prompt?: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * WanSmallI2VRequest + */ +export type SchemaWanV225bImageToVideoInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Shift + * + * Shift value for the video. Must be between 1.0 and 10.0. + */ + shift?: number + /** + * Number of Interpolated Frames + * + * Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4. + */ + num_interpolated_frames?: number + /** + * Frames per Second + * + * Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is. + */ + frames_per_second?: number + /** + * Guidance Scale + * + * Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality. + */ + guidance_scale?: number + /** + * Number of Frames + * + * Number of frames to generate. Must be between 17 to 161 (inclusive). + */ + num_frames?: number + /** + * Enable Safety Checker + * + * If set to true, input data will be checked for safety before processing. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Resolution + * + * Resolution of the generated video (580p or 720p). + */ + resolution?: '580p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' | '1:1' + /** + * Enable Output Safety Checker + * + * If set to true, output video will be checked for safety after generation. + */ + enable_output_safety_checker?: boolean + /** + * Image URL + * + * URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped. + */ + image_url: string + /** + * Video Quality + * + * The quality of the output video. Higher quality means better visual quality but larger file size. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Interpolator Model + * + * The model to use for frame interpolation. If None, no interpolation is applied. + */ + interpolator_model?: 'none' | 'film' | 'rife' + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Adjust FPS for Interpolation + * + * If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is. + */ + adjust_fps_for_interpolation?: boolean +} + +/** + * WanTurboI2VResponse + */ +export type SchemaWanV22A14bImageToVideoTurboOutput = { + /** + * Prompt + * + * The text prompt used for video generation. + */ + prompt?: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * WanTurboI2VRequest + */ +export type SchemaWanV22A14bImageToVideoTurboInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Resolution + * + * Resolution of the generated video (480p, 580p, or 720p). + */ + resolution?: '480p' | '580p' | '720p' + /** + * Acceleration + * + * Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'. + */ + acceleration?: 'none' | 'regular' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' | '1:1' + /** + * Video Write Mode + * + * The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Enable Output Safety Checker + * + * If set to true, output video will be checked for safety after generation. + */ + enable_output_safety_checker?: boolean + /** + * Image URL + * + * URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped. + */ + image_url: string + /** + * Video Quality + * + * The quality of the output video. Higher quality means better visual quality but larger file size. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Enable Safety Checker + * + * If set to true, input data will be checked for safety before processing. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * End Image URL + * + * URL of the end image. + */ + end_image_url?: string + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning. + */ + enable_prompt_expansion?: boolean +} + +/** + * Veo3ImageToVideoOutput + */ +export type SchemaVeo3ImageToVideoOutput = { + /** + * Video + * + * The generated video. + */ + video: SchemaFile +} + +/** + * Veo3ImageToVideoInput + */ +export type SchemaVeo3ImageToVideoInput = { + /** + * Prompt + * + * The text prompt describing how the image should be animated + */ + prompt: string + /** + * Resolution + * + * The resolution of the generated video. + */ + resolution?: '720p' | '1080p' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * Auto Fix + * + * Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them. + */ + auto_fix?: boolean + /** + * Duration + * + * The duration of the generated video. + */ + duration?: '4s' | '6s' | '8s' + /** + * Image URL + * + * URL of the input image to animate. Should be 720p or higher resolution in 16:9 or 9:16 aspect ratio. If the image is not in 16:9 or 9:16 aspect ratio, it will be cropped to fit. + */ + image_url: string +} + +/** + * ImageToVideoHailuo02FastOutput + */ +export type SchemaMinimaxHailuo02FastImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * FastImageToVideoHailuo02Input + */ +export type SchemaMinimaxHailuo02FastImageToVideoInput = { + /** + * Prompt Optimizer + * + * Whether to use the model's prompt optimizer + */ + prompt_optimizer?: boolean + /** + * Duration + * + * The duration of the video in seconds. 10 seconds videos are not supported for 1080p resolution. + */ + duration?: '6' | '10' + /** + * Prompt + */ + prompt: string + /** + * Image Url + */ + image_url: string +} + +/** + * WanI2VResponse + */ +export type SchemaWanV22A14bImageToVideoLoraOutput = { + /** + * Prompt + * + * The text prompt used for video generation. + */ + prompt?: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * WanLoRAI2VRequest + */ +export type SchemaWanV22A14bImageToVideoLoraInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Shift + * + * Shift value for the video. Must be between 1.0 and 10.0. + */ + shift?: number + /** + * Acceleration + * + * Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'. + */ + acceleration?: 'none' | 'regular' + /** + * Number of Interpolated Frames + * + * Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4. + */ + num_interpolated_frames?: number + /** + * Reverse Video + * + * If true, the video will be reversed. + */ + reverse_video?: boolean + /** + * Loras + * + * LoRA weights to be used in the inference. + */ + loras?: Array + /** + * Frames per Second + * + * Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is. + */ + frames_per_second?: number + /** + * Enable Safety Checker + * + * If set to true, input data will be checked for safety before processing. + */ + enable_safety_checker?: boolean + /** + * Number of Frames + * + * Number of frames to generate. Must be between 17 to 161 (inclusive). + */ + num_frames?: number + /** + * End Image URL + * + * URL of the end image. + */ + end_image_url?: string + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Guidance Scale (1st Stage) + * + * Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality. + */ + guidance_scale?: number + /** + * Video Write Mode + * + * The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Resolution + * + * Resolution of the generated video (480p, 580p, or 720p). + */ + resolution?: '480p' | '580p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' | '1:1' + /** + * Enable Output Safety Checker + * + * If set to true, output video will be checked for safety after generation. + */ + enable_output_safety_checker?: boolean + /** + * Image URL + * + * URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped. + */ + image_url: string + /** + * Video Quality + * + * The quality of the output video. Higher quality means better visual quality but larger file size. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Guidance Scale (2nd Stage) + * + * Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model. + */ + guidance_scale_2?: number + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Interpolator Model + * + * The model to use for frame interpolation. If None, no interpolation is applied. + */ + interpolator_model?: 'none' | 'film' | 'rife' + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Adjust FPS for Interpolation + * + * If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is. + */ + adjust_fps_for_interpolation?: boolean +} + +export type SchemaBytedanceVideoStylizeOutput = unknown + +/** + * StylizeInput + */ +export type SchemaBytedanceVideoStylizeInput = { + /** + * Style + * + * The style for your character in the video. Please use a short description. + */ + style: string + /** + * Image Url + * + * URL of the image to make the stylized video from. + */ + image_url: string +} + +/** + * MareyOutput + */ +export type SchemaMareyI2vOutput = { + video: SchemaFile +} + +/** + * MareyInputI2V + */ +export type SchemaMareyI2vInput = { + /** + * Prompt + * + * The prompt to generate a video from + */ + prompt: string + /** + * Duration + * + * The duration of the generated video. + */ + duration?: '5s' | '10s' + /** + * Image Url + * + * The URL of the image to use as the first frame of the video. + */ + image_url: string + /** + * Dimensions + * + * The dimensions of the generated video in width x height format. + */ + dimensions?: + | '1920x1080' + | '1080x1920' + | '1152x1152' + | '1536x1152' + | '1152x1536' + /** + * Guidance Scale + * + * Controls how strongly the generation is guided by the prompt (0-20). Higher values follow the prompt more closely. + */ + guidance_scale?: number | unknown + /** + * Seed + * + * Seed for random number generation. Use -1 for random seed each run. + */ + seed?: number | unknown + /** + * Negative Prompt + * + * Negative prompt used to guide the model away from undesirable features. + */ + negative_prompt?: string | unknown +} + +/** + * I2VOutputV5 + */ +export type SchemaPixverseV5ImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ImageToVideoRequestV5 + */ +export type SchemaPixverseV5ImageToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' | '1080p' + /** + * Duration + * + * The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds + */ + duration?: '5' | '8' + /** + * Style + * + * The style of the generated video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Image Url + * + * URL of the image to use as the first frame + */ + image_url: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * EffectOutput + */ +export type SchemaPixverseV5EffectsOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * EffectInput + */ +export type SchemaPixverseV5EffectsInput = { + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '8' + /** + * Resolution + * + * The resolution of the generated video. + */ + resolution?: '360p' | '540p' | '720p' | '1080p' + /** + * Effect + * + * The effect to apply to the video + */ + effect: + | 'Kiss Me AI' + | 'Kiss' + | 'Muscle Surge' + | 'Warmth of Jesus' + | 'Anything, Robot' + | 'The Tiger Touch' + | 'Hug' + | 'Holy Wings' + | 'Microwave' + | 'Zombie Mode' + | 'Squid Game' + | 'Baby Face' + | 'Black Myth: Wukong' + | 'Long Hair Magic' + | 'Leggy Run' + | 'Fin-tastic Mermaid' + | 'Punch Face' + | 'Creepy Devil Smile' + | 'Thunder God' + | 'Eye Zoom Challenge' + | "Who's Arrested?" + | 'Baby Arrived' + | 'Werewolf Rage' + | 'Bald Swipe' + | 'BOOM DROP' + | 'Huge Cutie' + | 'Liquid Metal' + | 'Sharksnap!' + | 'Dust Me Away' + | '3D Figurine Factor' + | 'Bikini Up' + | 'My Girlfriends' + | 'My Boyfriends' + | 'Subject 3 Fever' + | 'Earth Zoom' + | 'Pole Dance' + | 'Vroom Dance' + | 'GhostFace Terror' + | 'Dragon Evoker' + | 'Skeletal Bae' + | 'Summoning succubus' + | 'Halloween Voodoo Doll' + | '3D Naked-Eye AD' + | 'Package Explosion' + | 'Dishes Served' + | 'Ocean ad' + | 'Supermarket AD' + | 'Tree doll' + | 'Come Feel My Abs' + | 'The Bicep Flex' + | 'London Elite Vibe' + | 'Flora Nymph Gown' + | 'Christmas Costume' + | "It's Snowy" + | 'Reindeer Cruiser' + | 'Snow Globe Maker' + | 'Pet Christmas Outfit' + | 'Adopt a Polar Pal' + | 'Cat Christmas Box' + | 'Starlight Gift Box' + | 'Xmas Poster' + | 'Pet Christmas Tree' + | 'City Santa Hat' + | 'Stocking Sweetie' + | 'Christmas Night' + | 'Xmas Front Page Karma' + | "Grinch's Xmas Hijack" + | 'Giant Product' + | 'Truck Fashion Shoot' + | 'Beach AD' + | 'Shoal Surround' + | 'Mechanical Assembly' + | 'Lighting AD' + | 'Billboard AD' + | 'Product close-up' + | 'Parachute Delivery' + | 'Dreamlike Cloud' + | 'Macaron Machine' + | 'Poster AD' + | 'Truck AD' + | 'Graffiti AD' + | '3D Figurine Factory' + | 'The Exclusive First Class' + | 'Art Zoom Challenge' + | 'I Quit' + | 'Hitchcock Dolly Zoom' + | 'Smell the Lens' + | 'I believe I can fly' + | 'Strikout Dance' + | 'Pixel World' + | 'Mint in Box' + | 'Hands up, Hand' + | 'Flora Nymph Go' + | 'Somber Embrace' + | 'Beam me up' + | 'Suit Swagger' + /** + * Image Url + * + * Optional URL of the image to use as the first frame. If not provided, generates from text + */ + image_url: string +} + +/** + * TransitionOutputV5 + */ +export type SchemaPixverseV5TransitionOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TransitionRequest + */ +export type SchemaPixverseV5TransitionInput = { + /** + * First Image Url + * + * URL of the image to use as the first frame + */ + first_image_url: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '4:3' | '1:1' | '3:4' | '9:16' + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' | '1080p' + /** + * Style + * + * The style of the generated video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Prompt + * + * The prompt for the transition + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '8' + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number + /** + * End Image Url + * + * URL of the image to use as the last frame + */ + end_image_url?: string + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * ProcessOutput + */ +export type SchemaDecartLucy5bImageToVideoOutput = { + /** + * Video + * + * The generated MP4 video with H.264 encoding + */ + video: SchemaFile +} + +/** + * ProcessRequest + */ +export type SchemaDecartLucy5bImageToVideoInput = { + /** + * Prompt + * + * Text description of the desired video content + */ + prompt: string + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. + */ + aspect_ratio?: '9:16' | '16:9' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Resolution + * + * Resolution of the generated video + */ + resolution?: '720p' + /** + * Image Url + * + * URL of the image to use as the first frame + */ + image_url: string +} + +/** + * A coordinate point with x and y values for motion tracking + */ +export type SchemaTrackPoint = { + /** + * X coordinate + */ + x: number + /** + * Y coordinate + */ + y: number +} + +/** + * WanATIResponse + */ +export type SchemaWanAtiOutput = { + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * WanATIRequest + */ +export type SchemaWanAtiInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Resolution + * + * Resolution of the generated video (480p, 580p, 720p). + */ + resolution?: '480p' | '580p' | '720p' + /** + * Image URL + * + * URL of the input image. + */ + image_url: string + /** + * Track + * + * Motion tracks to guide video generation. Each track is a sequence of points defining a motion trajectory. Multiple tracks can control different elements or objects in the video. Expected format: array of tracks, where each track is an array of points with 'x' and 'y' coordinates (up to 121 points per track). Points will be automatically padded to 121 if fewer are provided. Coordinates should be within the image dimensions. + */ + track: Array> + /** + * Guidance Scale (1st Stage) + * + * Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality. + */ + guidance_scale?: number + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number +} + +/** + * SeedanceReferenceToVideoOutput + */ +export type SchemaBytedanceSeedanceV1LiteReferenceToVideoOutput = { + /** + * Seed + * + * Seed used for generation + */ + seed: number + /** + * Video + * + * Generated video file + */ + video: SchemaFile +} + +/** + * SeedanceReferenceToVideoInput + */ +export type SchemaBytedanceSeedanceV1LiteReferenceToVideoInput = { + /** + * Prompt + * + * The text prompt used to generate the video + */ + prompt: string + /** + * Resolution + * + * Video resolution - 480p for faster generation, 720p for higher quality + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '21:9' | '16:9' | '4:3' | '1:1' | '3:4' | '9:16' | 'auto' + /** + * Duration + * + * Duration of the video in seconds + */ + duration?: '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | '10' | '11' | '12' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Camera Fixed + * + * Whether to fix the camera position + */ + camera_fixed?: boolean + /** + * Reference Image Urls + * + * Reference images to generate the video with. + */ + reference_image_urls: Array + /** + * Seed + * + * Random seed to control video generation. Use -1 for random. + */ + seed?: number +} + +/** + * Lucy14BOutput + */ +export type SchemaLucy14bImageToVideoOutput = { + /** + * Video + * + * The generated MP4 video with H.264 encoding + */ + video: SchemaFile +} + +/** + * Lucy14BImageToVideoInput + */ +export type SchemaLucy14bImageToVideoInput = { + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated + * and uploaded before returning the response. This will increase the + * latency of the function but it allows you to get the image directly + * in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. + */ + aspect_ratio?: '9:16' | '16:9' + /** + * Prompt + * + * Text description of the desired video content + */ + prompt: string + /** + * Resolution + * + * Resolution of the generated video + */ + resolution?: '720p' + /** + * Image Url + * + * URL of the image to use as the first frame + */ + image_url: string +} + +/** + * AIAvatarOutput + */ +export type SchemaKlingVideoV1ProAiAvatarOutput = { + /** + * Duration + * + * Duration of the output video in seconds. + */ + duration: number + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * AIAvatarInput + */ +export type SchemaKlingVideoV1ProAiAvatarInput = { + /** + * Prompt + * + * The prompt to use for the video generation. + */ + prompt?: string + /** + * Audio Url + * + * The URL of the audio file. + */ + audio_url: string + /** + * Image Url + * + * The URL of the image to use as your avatar + */ + image_url: string +} + +/** + * AIAvatarOutput + */ +export type SchemaKlingVideoV1StandardAiAvatarOutput = { + /** + * Duration + * + * Duration of the output video in seconds. + */ + duration: number + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * AIAvatarInput + */ +export type SchemaKlingVideoV1StandardAiAvatarInput = { + /** + * Prompt + * + * The prompt to use for the video generation. + */ + prompt?: string + /** + * Audio Url + * + * The URL of the audio file. + */ + audio_url: string + /** + * Image Url + * + * The URL of the image to use as your avatar + */ + image_url: string +} + +/** + * FabricOneOutput + */ +export type SchemaFabric10Output = { + video: SchemaFile +} + +/** + * FabricOneLipsyncInput + */ +export type SchemaFabric10Input = { + /** + * Resolution + * + * Resolution + */ + resolution: '720p' | '480p' + /** + * Audio Url + */ + audio_url: string + /** + * Image Url + */ + image_url: string +} + +/** + * OmniHumanv15Output + */ +export type SchemaBytedanceOmnihumanV15Output = { + /** + * Duration + * + * Duration of audio input/video output as used for billing. + */ + duration: number + /** + * Video + * + * Generated video file + */ + video: SchemaFile +} + +/** + * OmniHumanv15Input + */ +export type SchemaBytedanceOmnihumanV15Input = { + /** + * Turbo Mode + * + * Generate a video at a faster rate with a slight quality trade-off. + */ + turbo_mode?: boolean + /** + * Resolution + * + * The resolution of the generated video. Defaults to 1080p. 720p generation is faster and higher in quality. 1080p generation is limited to 30s audio and 720p generation is limited to 60s audio. + */ + resolution?: '720p' | '1080p' + /** + * Prompt + * + * The text prompt used to guide the video generation. + */ + prompt?: string + /** + * Audio Url + * + * The URL of the audio file to generate the video. Audio must be under 30s long for 1080p generation and under 60s long for 720p generation. + */ + audio_url: string + /** + * Image Url + * + * The URL of the image used to generate the video + */ + image_url: string +} + +/** + * FabricOneOutput + */ +export type SchemaFabric10FastOutput = { + video: SchemaFile +} + +/** + * FabricOneLipsyncInput + */ +export type SchemaFabric10FastInput = { + /** + * Resolution + * + * Resolution + */ + resolution: '720p' | '480p' + /** + * Audio Url + */ + audio_url: string + /** + * Image Url + */ + image_url: string +} + +/** + * OviI2VResponse + */ +export type SchemaOviImageToVideoOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * The generated video file. + */ + video?: SchemaFile | unknown +} + +/** + * OviI2VRequest + */ +export type SchemaOviImageToVideoInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number | unknown + /** + * Num Inference Steps + * + * The number of inference steps. + */ + num_inference_steps?: number + /** + * Audio Negative Prompt + * + * Negative prompt for audio generation. + */ + audio_negative_prompt?: string + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Image Url + * + * The image URL to guide video generation. + */ + image_url: string +} + +/** + * ImageToVideoOutput + */ +export type SchemaSora2ImageToVideoOutput = { + /** + * Spritesheet + * + * Spritesheet image for the video + */ + spritesheet?: SchemaImageFile + /** + * Thumbnail + * + * Thumbnail image for the video + */ + thumbnail?: SchemaImageFile + /** + * Video ID + * + * The ID of the generated video + */ + video_id: string + /** + * Video + * + * The generated video + */ + video: SchemaVideoFile +} + +/** + * VideoFile + */ +export type SchemaVideoFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * Duration + * + * The duration of the video + */ + duration?: number + /** + * Height + * + * The height of the video + */ + height?: number + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * Fps + * + * The FPS of the video + */ + fps?: number + /** + * Width + * + * The width of the video + */ + width?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * Num Frames + * + * The number of frames in the video + */ + num_frames?: number + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * ImageFile + */ +export type SchemaImageFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * Height + * + * The height of the image + */ + height?: number + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * Width + * + * The width of the image + */ + width?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * ImageToVideoInput + */ +export type SchemaSora2ImageToVideoInput = { + /** + * Prompt + * + * The text prompt describing the video you want to generate + */ + prompt: string + /** + * Duration + * + * Duration of the generated video in seconds + */ + duration?: 4 | 8 | 12 + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: 'auto' | '720p' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: 'auto' | '9:16' | '16:9' + /** + * Image URL + * + * The URL of the image to use as the first frame + */ + image_url: string + /** + * Model + * + * The model to use for the generation. When the default model is selected, the latest snapshot of the model will be used - otherwise, select a specific snapshot of the model. + */ + model?: 'sora-2' | 'sora-2-2025-12-08' | 'sora-2-2025-10-06' + /** + * Delete Video + * + * Whether to delete the video after generation for privacy reasons. If True, the video cannot be used for remixing and will be permanently deleted. + */ + delete_video?: boolean +} + +/** + * ProImageToVideoOutput + */ +export type SchemaSora2ImageToVideoProOutput = { + /** + * Spritesheet + * + * Spritesheet image for the video + */ + spritesheet?: SchemaImageFile + /** + * Thumbnail + * + * Thumbnail image for the video + */ + thumbnail?: SchemaImageFile + /** + * Video ID + * + * The ID of the generated video + */ + video_id: string + /** + * Video + * + * The generated video + */ + video: SchemaVideoFile +} + +/** + * ProImageToVideoInput + */ +export type SchemaSora2ImageToVideoProInput = { + /** + * Prompt + * + * The text prompt describing the video you want to generate + */ + prompt: string + /** + * Duration + * + * Duration of the generated video in seconds + */ + duration?: 4 | 8 | 12 + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: 'auto' | '720p' | '1080p' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: 'auto' | '9:16' | '16:9' + /** + * Delete Video + * + * Whether to delete the video after generation for privacy reasons. If True, the video cannot be used for remixing and will be permanently deleted. + */ + delete_video?: boolean + /** + * Image URL + * + * The URL of the image to use as the first frame + */ + image_url: string +} + +/** + * Veo31ImageToVideoOutput + */ +export type SchemaVeo31ImageToVideoOutput = { + /** + * Video + * + * The generated video. + */ + video: SchemaFile +} + +/** + * Veo31ImageToVideoInput + */ +export type SchemaVeo31ImageToVideoInput = { + /** + * Prompt + * + * The text prompt describing the video you want to generate + */ + prompt: string + /** + * Duration + * + * The duration of the generated video. + */ + duration?: '4s' | '6s' | '8s' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video. Only 16:9 and 9:16 are supported. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * Auto Fix + * + * Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them. + */ + auto_fix?: boolean + /** + * Resolution + * + * The resolution of the generated video. + */ + resolution?: '720p' | '1080p' | '4k' + /** + * Image URL + * + * URL of the input image to animate. Should be 720p or higher resolution in 16:9 or 9:16 aspect ratio. If the image is not in 16:9 or 9:16 aspect ratio, it will be cropped to fit. + */ + image_url: string + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number + /** + * Negative Prompt + * + * A negative prompt to guide the video generation. + */ + negative_prompt?: string +} + +/** + * Veo31ImageToVideoOutput + */ +export type SchemaVeo31FastImageToVideoOutput = { + /** + * Video + * + * The generated video. + */ + video: SchemaFile +} + +/** + * Veo31ImageToVideoInput + */ +export type SchemaVeo31FastImageToVideoInput = { + /** + * Prompt + * + * The text prompt describing the video you want to generate + */ + prompt: string + /** + * Duration + * + * The duration of the generated video. + */ + duration?: '4s' | '6s' | '8s' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video. Only 16:9 and 9:16 are supported. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * Auto Fix + * + * Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them. + */ + auto_fix?: boolean + /** + * Resolution + * + * The resolution of the generated video. + */ + resolution?: '720p' | '1080p' | '4k' + /** + * Image URL + * + * URL of the input image to animate. Should be 720p or higher resolution in 16:9 or 9:16 aspect ratio. If the image is not in 16:9 or 9:16 aspect ratio, it will be cropped to fit. + */ + image_url: string + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number + /** + * Negative Prompt + * + * A negative prompt to guide the video generation. + */ + negative_prompt?: string +} + +/** + * Veo31ReferenceToVideoOutput + */ +export type SchemaVeo31ReferenceToVideoOutput = { + /** + * Video + * + * The generated video. + */ + video: SchemaFile +} + +/** + * Veo31ReferenceToVideoInput + */ +export type SchemaVeo31ReferenceToVideoInput = { + /** + * Prompt + * + * The text prompt describing the video you want to generate + */ + prompt: string + /** + * Duration + * + * The duration of the generated video. + */ + duration?: '8s' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video. + */ + aspect_ratio?: '16:9' | '9:16' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * Resolution + * + * The resolution of the generated video. + */ + resolution?: '720p' | '1080p' | '4k' + /** + * Auto Fix + * + * Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them. + */ + auto_fix?: boolean + /** + * Image Urls + * + * URLs of the reference images to use for consistent subject appearance + */ + image_urls: Array +} + +/** + * Veo31FirstLastFrameToVideoOutput + */ +export type SchemaVeo31FirstLastFrameToVideoOutput = { + /** + * Video + * + * The generated video. + */ + video: SchemaFile +} + +/** + * Veo31FirstLastFrameToVideoInput + */ +export type SchemaVeo31FirstLastFrameToVideoInput = { + /** + * Prompt + * + * The text prompt describing the video you want to generate + */ + prompt: string + /** + * Duration + * + * The duration of the generated video. + */ + duration?: '4s' | '6s' | '8s' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * Auto Fix + * + * Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them. + */ + auto_fix?: boolean + /** + * Resolution + * + * The resolution of the generated video. + */ + resolution?: '720p' | '1080p' | '4k' + /** + * First Frame URL + * + * URL of the first frame of the video + */ + first_frame_url: string + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number + /** + * Last Frame URL + * + * URL of the last frame of the video + */ + last_frame_url: string + /** + * Negative Prompt + * + * A negative prompt to guide the video generation. + */ + negative_prompt?: string +} + +/** + * Veo31FirstLastFrameToVideoOutput + */ +export type SchemaVeo31FastFirstLastFrameToVideoOutput = { + /** + * Video + * + * The generated video. + */ + video: SchemaFile +} + +/** + * Veo31FirstLastFrameToVideoInput + */ +export type SchemaVeo31FastFirstLastFrameToVideoInput = { + /** + * Prompt + * + * The text prompt describing the video you want to generate + */ + prompt: string + /** + * Duration + * + * The duration of the generated video. + */ + duration?: '4s' | '6s' | '8s' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * Auto Fix + * + * Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them. + */ + auto_fix?: boolean + /** + * Resolution + * + * The resolution of the generated video. + */ + resolution?: '720p' | '1080p' | '4k' + /** + * First Frame URL + * + * URL of the first frame of the video + */ + first_frame_url: string + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number + /** + * Last Frame URL + * + * URL of the last frame of the video + */ + last_frame_url: string + /** + * Negative Prompt + * + * A negative prompt to guide the video generation. + */ + negative_prompt?: string +} + +/** + * ImageToVideoV25StandardOutput + */ +export type SchemaKlingVideoV25TurboStandardImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ImageToVideoV25StandardRequest + */ +export type SchemaKlingVideoV25TurboStandardImageToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Image Url + * + * URL of the image to be used for the video + */ + image_url: string + /** + * Negative Prompt + */ + negative_prompt?: string + /** + * Cfg Scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt. + * + */ + cfg_scale?: number +} + +/** + * Q2ImageToVideoOutput + */ +export type SchemaViduQ2ImageToVideoProOutput = { + /** + * Video + * + * The generated video from image using the Q2 model + */ + video: SchemaFile +} + +/** + * Q2ImageToVideoRequest + */ +export type SchemaViduQ2ImageToVideoProInput = { + /** + * Prompt + * + * Text prompt for video generation, max 3000 characters + */ + prompt: string + /** + * Resolution + * + * Output video resolution + */ + resolution?: '720p' | '1080p' + /** + * Duration + * + * Duration of the video in seconds + */ + duration?: 2 | 3 | 4 | 5 | 6 | 7 | 8 + /** + * Image Url + * + * URL of the image to use as the starting frame + */ + image_url: string + /** + * Bgm + * + * Whether to add background music to the video (only for 4-second videos) + */ + bgm?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Movement Amplitude + * + * The movement amplitude of objects in the frame + */ + movement_amplitude?: 'auto' | 'small' | 'medium' | 'large' + /** + * End Image Url + * + * URL of the image to use as the ending frame. When provided, generates a transition video between start and end frames. + */ + end_image_url?: string +} + +/** + * Q2ImageToVideoOutput + */ +export type SchemaViduQ2ImageToVideoTurboOutput = { + /** + * Video + * + * The generated video from image using the Q2 model + */ + video: SchemaFile +} + +/** + * Q2ImageToVideoRequest + */ +export type SchemaViduQ2ImageToVideoTurboInput = { + /** + * Prompt + * + * Text prompt for video generation, max 3000 characters + */ + prompt: string + /** + * Resolution + * + * Output video resolution + */ + resolution?: '720p' | '1080p' + /** + * Duration + * + * Duration of the video in seconds + */ + duration?: 2 | 3 | 4 | 5 | 6 | 7 | 8 + /** + * Image Url + * + * URL of the image to use as the starting frame + */ + image_url: string + /** + * Bgm + * + * Whether to add background music to the video (only for 4-second videos) + */ + bgm?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Movement Amplitude + * + * The movement amplitude of objects in the frame + */ + movement_amplitude?: 'auto' | 'small' | 'medium' | 'large' + /** + * End Image Url + * + * URL of the image to use as the ending frame. When provided, generates a transition video between start and end frames. + */ + end_image_url?: string +} + +/** + * SeedanceFastI2VVideoOutput + */ +export type SchemaBytedanceSeedanceV1ProFastImageToVideoOutput = { + /** + * Seed + * + * Seed used for generation + */ + seed: number + /** + * Video + * + * Generated video file + */ + video: SchemaFile +} + +/** + * SeedanceProFastImageToVideoInput + */ +export type SchemaBytedanceSeedanceV1ProFastImageToVideoInput = { + /** + * Prompt + * + * The text prompt used to generate the video + */ + prompt: string + /** + * Resolution + * + * Video resolution - 480p for faster generation, 720p for balance, 1080p for higher quality + */ + resolution?: '480p' | '720p' | '1080p' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '21:9' | '16:9' | '4:3' | '1:1' | '3:4' | '9:16' | 'auto' + /** + * Duration + * + * Duration of the video in seconds + */ + duration?: '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | '10' | '11' | '12' + /** + * Image Url + * + * The URL of the image used to generate video + */ + image_url: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Camera Fixed + * + * Whether to fix the camera position + */ + camera_fixed?: boolean + /** + * Seed + * + * Random seed to control video generation. Use -1 for random. + */ + seed?: number +} + +/** + * ProFastImageToVideoHailuo23Output + */ +export type SchemaMinimaxHailuo23FastProImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ProFastImageToVideoHailuo23Input + */ +export type SchemaMinimaxHailuo23FastProImageToVideoInput = { + /** + * Prompt Optimizer + * + * Whether to use the model's prompt optimizer + */ + prompt_optimizer?: boolean + /** + * Prompt + * + * Text prompt for video generation + */ + prompt: string + /** + * Image Url + * + * URL of the image to use as the first frame + */ + image_url: string +} + +/** + * StandardImageToVideoHailuo23Output + */ +export type SchemaMinimaxHailuo23StandardImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * StandardImageToVideoHailuo23Input + */ +export type SchemaMinimaxHailuo23StandardImageToVideoInput = { + /** + * Prompt Optimizer + * + * Whether to use the model's prompt optimizer + */ + prompt_optimizer?: boolean + /** + * Duration + * + * The duration of the video in seconds. + */ + duration?: '6' | '10' + /** + * Prompt + * + * Text prompt for video generation + */ + prompt: string + /** + * Image Url + * + * URL of the image to use as the first frame + */ + image_url: string +} + +/** + * StandardFastImageToVideoHailuo23Output + */ +export type SchemaMinimaxHailuo23FastStandardImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * StandardFastImageToVideoHailuo23Input + */ +export type SchemaMinimaxHailuo23FastStandardImageToVideoInput = { + /** + * Prompt Optimizer + * + * Whether to use the model's prompt optimizer + */ + prompt_optimizer?: boolean + /** + * Duration + * + * The duration of the video in seconds. + */ + duration?: '6' | '10' + /** + * Prompt + * + * Text prompt for video generation + */ + prompt: string + /** + * Image Url + * + * URL of the image to use as the first frame + */ + image_url: string +} + +/** + * LongCatImageToVideoResponse + */ +export type SchemaLongcatVideoDistilledImageToVideo480pOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * LongCatImageToVideoRequest + */ +export type SchemaLongcatVideoDistilledImageToVideo480pInput = { + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Prompt + * + * The prompt to guide the video generation. + */ + prompt?: string + /** + * FPS + * + * The frame rate of the generated video. + */ + fps?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Image URL + * + * The URL of the image to generate a video from. + */ + image_url: string + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Enable Safety Checker + * + * Whether to enable safety checker. + */ + enable_safety_checker?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to use. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean +} + +/** + * LongCatImageToVideoResponse + */ +export type SchemaLongcatVideoDistilledImageToVideo720pOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * LongCat720PImageToVideoRequest + */ +export type SchemaLongcatVideoDistilledImageToVideo720pInput = { + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Prompt + * + * The prompt to guide the video generation. + */ + prompt?: string + /** + * FPS + * + * The frame rate of the generated video. + */ + fps?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Number of Refinement Inference Steps + * + * The number of inference steps to use for refinement. + */ + num_refine_inference_steps?: number + /** + * Image URL + * + * The URL of the image to generate a video from. + */ + image_url: string + /** + * Enable Safety Checker + * + * Whether to enable safety checker. + */ + enable_safety_checker?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to use. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' +} + +/** + * LongCatImageToVideoResponse + */ +export type SchemaLongcatVideoImageToVideo480pOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * LongCatCFGImageToVideoRequest + */ +export type SchemaLongcatVideoImageToVideo480pInput = { + /** + * Prompt + * + * The prompt to guide the video generation. + */ + prompt?: string + /** + * Acceleration + * + * The acceleration level to use for the video generation. + */ + acceleration?: 'none' | 'regular' + /** + * FPS + * + * The frame rate of the generated video. + */ + fps?: number + /** + * Guidance Scale + * + * The guidance scale to use for the video generation. + */ + guidance_scale?: number + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Enable Safety Checker + * + * Whether to enable safety checker. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * The negative prompt to use for the video generation. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Image URL + * + * The URL of the image to generate a video from. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to use for the video generation. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number +} + +/** + * LongCatImageToVideoResponse + */ +export type SchemaLongcatVideoImageToVideo720pOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * LongCat720PCFGImageToVideoRequest + */ +export type SchemaLongcatVideoImageToVideo720pInput = { + /** + * Prompt + * + * The prompt to guide the video generation. + */ + prompt?: string + /** + * Acceleration + * + * The acceleration level to use for the video generation. + */ + acceleration?: 'none' | 'regular' + /** + * FPS + * + * The frame rate of the generated video. + */ + fps?: number + /** + * Number of Refinement Inference Steps + * + * The number of inference steps to use for refinement. + */ + num_refine_inference_steps?: number + /** + * Guidance Scale + * + * The guidance scale to use for the video generation. + */ + guidance_scale?: number + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Enable Safety Checker + * + * Whether to enable safety checker. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * The negative prompt to use for the video generation. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Image URL + * + * The URL of the image to generate a video from. + */ + image_url: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to use for the video generation. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number +} + +/** + * KeyframeTransition + * + * Configuration for a transition between two keyframes + */ +export type SchemaKeyframeTransition = { + /** + * Prompt + * + * Specific prompt for this transition. Overrides the global prompt if provided. + */ + prompt?: string + /** + * Duration + * + * Duration of this transition in seconds + */ + duration?: number +} + +/** + * Pika22KeyframesToVideoOutput + * + * Output model for Pika 2.2 keyframes-to-video generation + */ +export type SchemaPikaV22PikaframesOutput = { + /** + * Video + * + * The generated video with transitions between keyframes + */ + video: SchemaFile +} + +/** + * Pika22KeyframesToVideoRequest + */ +export type SchemaPikaV22PikaframesInput = { + /** + * Prompt + * + * Default prompt for all transitions. Individual transition prompts override this. + */ + prompt?: string + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '720p' | '1080p' + /** + * Transitions + * + * Configuration for each transition. Length must be len(image_urls) - 1. Total duration of all transitions must not exceed 25 seconds. If not provided, uses default 5-second transitions with the global prompt. + */ + transitions?: Array + /** + * Seed + * + * The seed for the random number generator + */ + seed?: number + /** + * Image Urls + * + * URLs of keyframe images (2-5 images) to create transitions between + */ + image_urls: Array + /** + * Negative Prompt + * + * A negative prompt to guide the model + */ + negative_prompt?: string +} + +/** + * SwapOutput + */ +export type SchemaPixverseSwapOutput = { + /** + * Video + * + * The generated swapped video + */ + video: SchemaFile +} + +/** + * SwapRequest + */ +export type SchemaPixverseSwapInput = { + /** + * Original Sound Switch + * + * Whether to keep the original audio + */ + original_sound_switch?: boolean + /** + * Video Url + * + * URL of the external video to swap + */ + video_url: string + /** + * Keyframe Id + * + * The keyframe ID (from 1 to the last frame position) + */ + keyframe_id?: number + /** + * Mode + * + * The swap mode to use + */ + mode?: 'person' | 'object' | 'background' + /** + * Resolution + * + * The output resolution (1080p not supported) + */ + resolution?: '360p' | '540p' | '720p' + /** + * Image Url + * + * URL of the target image for swapping + */ + image_url: string +} + +/** + * LynxOutput + */ +export type SchemaLynxOutput = { + /** + * Seed + * + * The seed used for generation + */ + seed: number + /** + * Video + * + * The generated video file + */ + video: SchemaVideoFile +} + +/** + * LynxInput + */ +export type SchemaLynxInput = { + /** + * Prompt + * + * Text prompt to guide video generation + */ + prompt: string + /** + * Resolution + * + * Resolution of the generated video (480p, 580p, or 720p) + */ + resolution?: '480p' | '580p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video (16:9, 9:16, or 1:1) + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Num Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Guidance Scale 2 + * + * Image guidance scale. Controls how closely the generated video follows the reference image. Higher values increase adherence to the reference image but may decrease quality. + */ + guidance_scale_2?: number + /** + * Strength + * + * Reference image scale. Controls the influence of the reference image on the generated video. + */ + strength?: number + /** + * Frames Per Second + * + * Frames per second of the generated video. Must be between 5 to 30. + */ + frames_per_second?: number + /** + * Image Url + * + * The URL of the subject image to be used for video generation + */ + image_url: string + /** + * Guidance Scale + * + * Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Num Frames + * + * Number of frames in the generated video. Must be between 9 to 100. + */ + num_frames?: number + /** + * Negative Prompt + * + * Negative prompt to guide what should not appear in the generated video + */ + negative_prompt?: string + /** + * Ip Scale + * + * Identity preservation scale. Controls how closely the generated video preserves the subject's identity from the reference image. + */ + ip_scale?: number +} + +/** + * LTXVImageToVideoResponse + */ +export type SchemaLtx2ImageToVideoOutput = { + /** + * Video + * + * The generated video file + */ + video: SchemaVideoFile +} + +/** + * LTXVImageToVideoRequest + */ +export type SchemaLtx2ImageToVideoInput = { + /** + * Prompt + * + * The prompt to generate the video from + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: 6 | 8 | 10 + /** + * Generate Audio + * + * Whether to generate audio for the generated video + */ + generate_audio?: boolean + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '1080p' | '1440p' | '2160p' + /** + * Image URL + * + * URL of the image to generate the video from. Must be publicly accessible or base64 data URI. Supports PNG, JPEG, WebP, AVIF, and HEIF formats. + */ + image_url: string + /** + * Frames per Second + * + * The frames per second of the generated video + */ + fps?: 25 | 50 +} + +/** + * LTXVImageToVideoResponse + */ +export type SchemaLtx2ImageToVideoFastOutput = { + /** + * Video + * + * The generated video file + */ + video: SchemaVideoFile +} + +/** + * LTXVImageToVideoFastRequest + */ +export type SchemaLtx2ImageToVideoFastInput = { + /** + * Prompt + * + * The prompt to generate the video from + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' + /** + * Duration + * + * The duration of the generated video in seconds. The fast model supports 6-20 seconds. Note: Durations longer than 10 seconds (12, 14, 16, 18, 20) are only supported with 25 FPS and 1080p resolution. + */ + duration?: 6 | 8 | 10 | 12 | 14 | 16 | 18 | 20 + /** + * Generate Audio + * + * Whether to generate audio for the generated video + */ + generate_audio?: boolean + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '1080p' | '1440p' | '2160p' + /** + * Image URL + * + * URL of the image to generate the video from. Must be publicly accessible or base64 data URI. Supports PNG, JPEG, WebP, AVIF, and HEIF formats. + */ + image_url: string + /** + * Frames per Second + * + * The frames per second of the generated video + */ + fps?: 25 | 50 +} + +/** + * OmniVideoReferenceToVideoOutput + */ +export type SchemaKlingVideoO1ReferenceToVideoOutput = { + /** + * Video + * + * The generated video. + */ + video: SchemaFile +} + +/** + * OmniVideoReferenceToVideoInput + * + * Input for start-frame video generation with optional reference images and elements. + */ +export type SchemaKlingVideoO1ReferenceToVideoInput = { + /** + * Prompt + * + * Take @Element1, @Element2 to reference elements and @Image1, @Image2 to reference images in order. + */ + prompt: string + /** + * Duration + * + * Video duration in seconds. + */ + duration?: '3' | '4' | '5' | '6' | '7' | '8' | '9' | '10' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video frame. + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Elements + * + * Elements (characters/objects) to include in the video. Reference in prompt as @Element1, @Element2, etc. Maximum 7 total (elements + reference images + start image). + */ + elements?: Array + /** + * Image Urls + * + * Additional reference images for style/appearance. Reference in prompt as @Image1, @Image2, etc. Maximum 7 total (elements + reference images + start image). + */ + image_urls?: Array +} + +/** + * OmniVideoElementInput + */ +export type SchemaOmniVideoElementInput = { + /** + * Reference Image Urls + * + * Additional reference images from different angles. 1-4 images supported. At least one image is required. + */ + reference_image_urls?: Array + /** + * Frontal Image Url + * + * The frontal image of the element (main view). + * + * Max file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s + */ + frontal_image_url: string +} + +/** + * OmniVideoImageToVideoOutput + * + * Output for Kling Omni Video generation. + */ +export type SchemaKlingVideoO1ImageToVideoOutput = { + /** + * Video + * + * The generated video. + */ + video: SchemaFile +} + +/** + * OmniVideoImageToVideoInput + */ +export type SchemaKlingVideoO1ImageToVideoInput = { + /** + * Prompt + * + * Use @Image1 to reference the start frame, @Image2 to reference the end frame. + */ + prompt: string + /** + * Duration + * + * Video duration in seconds. + */ + duration?: '3' | '4' | '5' | '6' | '7' | '8' | '9' | '10' + /** + * Start Image Url + * + * Image to use as the first frame of the video. + * + * Max file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s + */ + start_image_url: string + /** + * End Image Url + * + * Image to use as the last frame of the video. + * + * Max file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s + */ + end_image_url?: string +} + +/** + * I2VOutputV5_5 + */ +export type SchemaPixverseV55ImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ImageToVideoRequestV5_5 + */ +export type SchemaPixverseV55ImageToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' | '1080p' + /** + * Duration + * + * The duration of the generated video in seconds. Longer durations cost more. 1080p videos are limited to 5 or 8 seconds + */ + duration?: '5' | '8' | '10' + /** + * Style + * + * The style of the generated video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Thinking Type + * + * Prompt optimization mode: 'enabled' to optimize, 'disabled' to turn off, 'auto' for model decision + */ + thinking_type?: 'enabled' | 'disabled' | 'auto' + /** + * Generate Multi Clip Switch + * + * Enable multi-clip generation with dynamic camera changes + */ + generate_multi_clip_switch?: boolean + /** + * Image Url + * + * URL of the image to use as the first frame + */ + image_url: string + /** + * Generate Audio Switch + * + * Enable audio generation (BGM, SFX, dialogue) + */ + generate_audio_switch?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * TransitionOutputV5_5 + */ +export type SchemaPixverseV55TransitionOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TransitionRequestV5_5 + */ +export type SchemaPixverseV55TransitionInput = { + /** + * First Image Url + * + * URL of the image to use as the first frame + */ + first_image_url: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '4:3' | '1:1' | '3:4' | '9:16' + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' | '1080p' + /** + * Style + * + * The style of the generated video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Thinking Type + * + * Prompt optimization mode: 'enabled' to optimize, 'disabled' to turn off, 'auto' for model decision + */ + thinking_type?: 'enabled' | 'disabled' | 'auto' + /** + * Prompt + * + * The prompt for the transition + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds. Longer durations cost more. 1080p videos are limited to 5 or 8 seconds + */ + duration?: '5' | '8' | '10' + /** + * Generate Audio Switch + * + * Enable audio generation (BGM, SFX, dialogue) + */ + generate_audio_switch?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number + /** + * End Image Url + * + * URL of the image to use as the last frame + */ + end_image_url?: string + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * EffectOutput + */ +export type SchemaPixverseV55EffectsOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * EffectInputV5_5 + */ +export type SchemaPixverseV55EffectsInput = { + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '8' | '10' + /** + * Resolution + * + * The resolution of the generated video. + */ + resolution?: '360p' | '540p' | '720p' | '1080p' + /** + * Thinking Type + * + * Prompt optimization mode: 'enabled' to optimize, 'disabled' to turn off, 'auto' for model decision + */ + thinking_type?: 'enabled' | 'disabled' | 'auto' + /** + * Effect + * + * The effect to apply to the video + */ + effect: + | 'Kiss Me AI' + | 'Kiss' + | 'Muscle Surge' + | 'Warmth of Jesus' + | 'Anything, Robot' + | 'The Tiger Touch' + | 'Hug' + | 'Holy Wings' + | 'Microwave' + | 'Zombie Mode' + | 'Squid Game' + | 'Baby Face' + | 'Black Myth: Wukong' + | 'Long Hair Magic' + | 'Leggy Run' + | 'Fin-tastic Mermaid' + | 'Punch Face' + | 'Creepy Devil Smile' + | 'Thunder God' + | 'Eye Zoom Challenge' + | "Who's Arrested?" + | 'Baby Arrived' + | 'Werewolf Rage' + | 'Bald Swipe' + | 'BOOM DROP' + | 'Huge Cutie' + | 'Liquid Metal' + | 'Sharksnap!' + | 'Dust Me Away' + | '3D Figurine Factor' + | 'Bikini Up' + | 'My Girlfriends' + | 'My Boyfriends' + | 'Subject 3 Fever' + | 'Earth Zoom' + | 'Pole Dance' + | 'Vroom Dance' + | 'GhostFace Terror' + | 'Dragon Evoker' + | 'Skeletal Bae' + | 'Summoning succubus' + | 'Halloween Voodoo Doll' + | '3D Naked-Eye AD' + | 'Package Explosion' + | 'Dishes Served' + | 'Ocean ad' + | 'Supermarket AD' + | 'Tree doll' + | 'Come Feel My Abs' + | 'The Bicep Flex' + | 'London Elite Vibe' + | 'Flora Nymph Gown' + | 'Christmas Costume' + | "It's Snowy" + | 'Reindeer Cruiser' + | 'Snow Globe Maker' + | 'Pet Christmas Outfit' + | 'Adopt a Polar Pal' + | 'Cat Christmas Box' + | 'Starlight Gift Box' + | 'Xmas Poster' + | 'Pet Christmas Tree' + | 'City Santa Hat' + | 'Stocking Sweetie' + | 'Christmas Night' + | 'Xmas Front Page Karma' + | "Grinch's Xmas Hijack" + | 'Giant Product' + | 'Truck Fashion Shoot' + | 'Beach AD' + | 'Shoal Surround' + | 'Mechanical Assembly' + | 'Lighting AD' + | 'Billboard AD' + | 'Product close-up' + | 'Parachute Delivery' + | 'Dreamlike Cloud' + | 'Macaron Machine' + | 'Poster AD' + | 'Truck AD' + | 'Graffiti AD' + | '3D Figurine Factory' + | 'The Exclusive First Class' + | 'Art Zoom Challenge' + | 'I Quit' + | 'Hitchcock Dolly Zoom' + | 'Smell the Lens' + | 'I believe I can fly' + | 'Strikout Dance' + | 'Pixel World' + | 'Mint in Box' + | 'Hands up, Hand' + | 'Flora Nymph Go' + | 'Somber Embrace' + | 'Beam me up' + | 'Suit Swagger' + /** + * Image Url + * + * Optional URL of the image to use as the first frame. If not provided, generates from text + */ + image_url: string +} + +/** + * ImageToVideoV26ProOutput + */ +export type SchemaKlingVideoV26ProImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ImageToVideoV26ProRequest + */ +export type SchemaKlingVideoV26ProImageToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Voice Ids + * + * List of voice IDs to use for voice control. Reference voices in the prompt using <<>>, <<>>. Maximum 2 voices allowed. When provided and referenced in prompt, enables voice control billing. + */ + voice_ids?: Array + /** + * Generate Audio + * + * Whether to generate native audio for the video. Supports Chinese and English voice output. Other languages are automatically translated to English. For English speech, use lowercase letters; for acronyms or proper nouns, use uppercase. + */ + generate_audio?: boolean + /** + * Start Image Url + * + * URL of the image to be used for the video + */ + start_image_url: string + /** + * End Image Url + * + * URL of the image to be used for the end of the video + */ + end_image_url?: string + /** + * Negative Prompt + */ + negative_prompt?: string +} + +/** + * AIAvatarOutput + */ +export type SchemaKlingVideoAiAvatarV2StandardOutput = { + /** + * Duration + * + * Duration of the output video in seconds. + */ + duration: number + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * AIAvatarInput + */ +export type SchemaKlingVideoAiAvatarV2StandardInput = { + /** + * Prompt + * + * The prompt to use for the video generation. + */ + prompt?: string + /** + * Audio Url + * + * The URL of the audio file. + */ + audio_url: string + /** + * Image Url + * + * The URL of the image to use as your avatar + */ + image_url: string +} + +/** + * AIAvatarOutput + */ +export type SchemaKlingVideoAiAvatarV2ProOutput = { + /** + * Duration + * + * Duration of the output video in seconds. + */ + duration: number + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * AIAvatarInput + */ +export type SchemaKlingVideoAiAvatarV2ProInput = { + /** + * Prompt + * + * The prompt to use for the video generation. + */ + prompt?: string + /** + * Audio Url + * + * The URL of the audio file. + */ + audio_url: string + /** + * Image Url + * + * The URL of the image to use as your avatar + */ + image_url: string +} + +/** + * AuroraOutputModel + */ +export type SchemaCreatifyAuroraOutput = { + /** + * Video + * + * The generated video file. + */ + video: SchemaVideoFile +} + +/** + * AuroraInputModel + */ +export type SchemaCreatifyAuroraInput = { + /** + * Prompt + * + * A text prompt to guide the video generation process. + */ + prompt?: string + /** + * Resolution + * + * The resolution of the generated video. + */ + resolution?: '480p' | '720p' + /** + * Guidance Scale + * + * Guidance scale to be used for text prompt adherence. + */ + guidance_scale?: number + /** + * Audio Guidance Scale + * + * Guidance scale to be used for audio adherence. + */ + audio_guidance_scale?: number + /** + * Audio Url + * + * The URL of the audio file to be used for video generation. + */ + audio_url: string + /** + * Image Url + * + * The URL of the image file to be used for video generation. + */ + image_url: string +} + +/** + * OmniVideoImageToVideoOutput + * + * Output for Kling Omni Video generation. + */ +export type SchemaKlingVideoO1StandardImageToVideoOutput = { + /** + * Video + * + * The generated video. + */ + video: SchemaFile +} + +/** + * OmniVideoImageToVideoInput + */ +export type SchemaKlingVideoO1StandardImageToVideoInput = { + /** + * Prompt + * + * Use @Image1 to reference the start frame, @Image2 to reference the end frame. + */ + prompt: string + /** + * Duration + * + * Video duration in seconds. + */ + duration?: '3' | '4' | '5' | '6' | '7' | '8' | '9' | '10' + /** + * Start Image Url + * + * Image to use as the first frame of the video. + * + * Max file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s + */ + start_image_url: string + /** + * End Image Url + * + * Image to use as the last frame of the video. + * + * Max file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s + */ + end_image_url?: string +} + +/** + * OmniVideoReferenceToVideoOutput + */ +export type SchemaKlingVideoO1StandardReferenceToVideoOutput = { + /** + * Video + * + * The generated video. + */ + video: SchemaFile +} + +/** + * OmniVideoReferenceToVideoInput + * + * Input for start-frame video generation with optional reference images and elements. + */ +export type SchemaKlingVideoO1StandardReferenceToVideoInput = { + /** + * Prompt + * + * Take @Element1, @Element2 to reference elements and @Image1, @Image2 to reference images in order. + */ + prompt: string + /** + * Duration + * + * Video duration in seconds. + */ + duration?: '3' | '4' | '5' | '6' | '7' | '8' | '9' | '10' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video frame. + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Elements + * + * Elements (characters/objects) to include in the video. Reference in prompt as @Element1, @Element2, etc. Maximum 7 total (elements + reference images + start image). + */ + elements?: Array + /** + * Image Urls + * + * Additional reference images for style/appearance. Reference in prompt as @Image1, @Image2, etc. Maximum 7 total (elements + reference images + start image). + */ + image_urls?: Array +} + +/** + * ImageToVideoOutput + * + * Output for image-to-video generation + */ +export type SchemaV26ImageToVideoOutput = { + /** + * Actual Prompt + * + * The actual prompt used if prompt rewriting was enabled + */ + actual_prompt?: string + /** + * Seed + * + * The seed used for generation + */ + seed: number + /** + * Video + * + * The generated video file + */ + video: SchemaVideoFile +} + +/** + * ImageToVideoInput + * + * Input for Wan 2.6 image-to-video generation + */ +export type SchemaV26ImageToVideoInput = { + /** + * Prompt + * + * The text prompt describing the desired video motion. Max 800 characters. + */ + prompt: string + /** + * Resolution + * + * Video resolution. Valid values: 720p, 1080p + */ + resolution?: '720p' | '1080p' + /** + * Duration + * + * Duration of the generated video in seconds. Choose between 5, 10 or 15 seconds. + */ + duration?: '5' | '10' | '15' + /** + * Audio Url + * + * + * URL of the audio to use as the background music. Must be publicly accessible. + * Limit handling: If the audio duration exceeds the duration value (5, 10, or 15 seconds), + * the audio is truncated to the first N seconds, and the rest is discarded. If + * the audio is shorter than the video, the remaining part of the video will be silent. + * For example, if the audio is 3 seconds long and the video duration is 5 seconds, the + * first 3 seconds of the output video will have sound, and the last 2 seconds will be silent. + * - Format: WAV, MP3. + * - Duration: 3 to 30 s. + * - File size: Up to 15 MB. + * + */ + audio_url?: string + /** + * Image URL + * + * URL of the image to use as the first frame. Must be publicly accessible or base64 data URI. Image dimensions must be between 240 and 7680. + */ + image_url: string + /** + * Enable Prompt Expansion + * + * Whether to enable prompt rewriting using LLM. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Multi Shots + * + * When true, enables intelligent multi-shot segmentation. Only active when enable_prompt_expansion is True. Set to false for single-shot generation. + */ + multi_shots?: boolean + /** + * Negative Prompt + * + * Negative prompt to describe content to avoid. Max 500 characters. + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * HunyuanVideo15Response + */ +export type SchemaHunyuanVideoV15ImageToVideoOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * HunyuanVideo15I2VRequest + */ +export type SchemaHunyuanVideoV15ImageToVideoInput = { + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the video. + */ + aspect_ratio?: '16:9' | '9:16' + /** + * Resolution + * + * The resolution of the video. + */ + resolution?: '480p' + /** + * Image Url + * + * URL of the reference image for image-to-video generation. + */ + image_url: string + /** + * Enable Prompt Expansion + * + * Enable prompt expansion to enhance the input prompt. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * Random seed for reproducibility. + */ + seed?: number + /** + * Num Inference Steps + * + * The number of inference steps. + */ + num_inference_steps?: number + /** + * Negative Prompt + * + * The negative prompt to guide what not to generate. + */ + negative_prompt?: string + /** + * Num Frames + * + * The number of frames to generate. + */ + num_frames?: number +} + +/** + * LiveAvatarResponse + */ +export type SchemaLiveAvatarOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated avatar video file with synchronized audio. + */ + video: SchemaVideoFile +} + +/** + * LiveAvatarRequest + */ +export type SchemaLiveAvatarInput = { + /** + * Frames per Clip + * + * Number of frames per clip. Must be a multiple of 4. Higher values = smoother but slower generation. + */ + frames_per_clip?: number + /** + * Prompt + * + * A text prompt describing the scene and character. Helps guide the video generation style and context. + */ + prompt: string + /** + * Acceleration + * + * Acceleration level for faster video decoding + */ + acceleration?: 'none' | 'light' | 'regular' | 'high' + /** + * Reference Image URL + * + * The URL of the reference image for avatar generation. The character in this image will be animated. + */ + image_url: string + /** + * Number of Clips + * + * Number of video clips to generate. Each clip is approximately 3 seconds. Set higher for longer videos. + */ + num_clips?: number + /** + * Audio URL + * + * The URL of the driving audio file (WAV or MP3). The avatar will be animated to match this audio. + */ + audio_url: string + /** + * Seed + * + * Random seed for reproducible generation. + */ + seed?: number + /** + * Guidance Scale + * + * Classifier-free guidance scale. Higher values follow the prompt more closely. + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * Enable safety checker for content moderation. + */ + enable_safety_checker?: boolean +} + +/** + * SeedanceProv15I2VVideoOutput + */ +export type SchemaBytedanceSeedanceV15ProImageToVideoOutput = { + /** + * Seed + * + * Seed used for generation + */ + seed: number + /** + * Video + * + * Generated video file + */ + video: SchemaFile +} + +/** + * SeedanceProv15ImageToVideoInput + */ +export type SchemaBytedanceSeedanceV15ProImageToVideoInput = { + /** + * Prompt + * + * The text prompt used to generate the video + */ + prompt: string + /** + * Resolution + * + * Video resolution - 480p for faster generation, 720p for balance, 1080p for higher quality + */ + resolution?: '480p' | '720p' | '1080p' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '21:9' | '16:9' | '4:3' | '1:1' | '3:4' | '9:16' + /** + * Generate Audio + * + * Whether to generate audio for the video + */ + generate_audio?: boolean + /** + * Duration + * + * Duration of the video in seconds + */ + duration?: '4' | '5' | '6' | '7' | '8' | '9' | '10' | '11' | '12' + /** + * Image Url + * + * The URL of the image used to generate video + */ + image_url: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Camera Fixed + * + * Whether to fix the camera position + */ + camera_fixed?: boolean + /** + * End Image Url + * + * The URL of the image the video ends with. Defaults to None. + */ + end_image_url?: string + /** + * Seed + * + * Random seed to control video generation. Use -1 for random. + */ + seed?: number +} + +/** + * KandinskyI2VResponse + */ +export type SchemaKandinsky5ProImageToVideoOutput = { + /** + * Video + * + * The generated video file. + */ + video?: SchemaFile +} + +/** + * KandinskyI2VRequest + */ +export type SchemaKandinsky5ProImageToVideoInput = { + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Resolution + * + * Video resolution: 512p or 1024p. + */ + resolution?: '512P' | '1024P' + /** + * Acceleration + * + * Acceleration level for faster generation. + */ + acceleration?: 'none' | 'regular' + /** + * Duration + * + * Video duration. + */ + duration?: '5s' + /** + * Num Inference Steps + */ + num_inference_steps?: number + /** + * Image Url + * + * The URL of the image to use as a reference for the video generation. + */ + image_url: string +} + +/** + * Schema referenced but not defined by fal.ai (missing from source OpenAPI spec) + */ +export type SchemaTrajectoryPoint = { + [key: string]: unknown +} + +/** + * WanMoveOutput + */ +export type SchemaWanMoveOutput = { + /** + * Seed + * + * Random seed used for generation. + */ + seed: number + /** + * Video + * + * Generated Video File + */ + video: SchemaVideoFile +} + +/** + * WANMoveInput + */ +export type SchemaWanMoveInput = { + /** + * Prompt + * + * Text prompt to guide the video generation. + */ + prompt: string + /** + * Trajectories + * + * A list of trajectories. Each trajectory list means the movement of one object. + */ + trajectories: Array> + /** + * Image Url + * + * URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped. + */ + image_url: string + /** + * Guidance Scale + * + * Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to guide the video generation. + */ + negative_prompt?: string +} + +/** + * LTX2ImageToVideoOutput + */ +export type SchemaLtx219bImageToVideoOutput = { + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Seed + * + * The seed used for the random number generator. + */ + seed: number + video: SchemaVideoFile +} + +/** + * LTX2ImageToVideoInput + */ +export type SchemaLtx219bImageToVideoInput = { + /** + * Use Multi-Scale + * + * Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details. + */ + use_multiscale?: boolean + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' | 'full' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * FPS + * + * The frames per second of the generated video. + */ + fps?: number + /** + * Camera LoRA + * + * The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora?: + | 'dolly_in' + | 'dolly_out' + | 'dolly_left' + | 'dolly_right' + | 'jib_up' + | 'jib_down' + | 'static' + | 'none' + /** + * Video Size + * + * The size of the generated video. + */ + video_size?: + | SchemaImageSize + | 'auto' + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Camera LoRA Scale + * + * The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora_scale?: number + /** + * Image Strength + * + * The strength of the image to use for the video generation. + */ + image_strength?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt?: string + /** + * Guidance Scale + * + * The guidance scale to use. + */ + guidance_scale?: number + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Image URL + * + * The URL of the image to generate the video from. + */ + image_url: string + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to use. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number | unknown +} + +/** + * LTX2ImageToVideoOutput + */ +export type SchemaLtx219bImageToVideoLoraOutput = { + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Seed + * + * The seed used for the random number generator. + */ + seed: number + video: SchemaVideoFile +} + +/** + * LTX2LoRAImageToVideoInput + */ +export type SchemaLtx219bImageToVideoLoraInput = { + /** + * Use Multi-Scale + * + * Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details. + */ + use_multiscale?: boolean + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' | 'full' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * FPS + * + * The frames per second of the generated video. + */ + fps?: number + /** + * LoRAs + * + * The LoRAs to use for the generation. + */ + loras: Array + /** + * Camera LoRA + * + * The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora?: + | 'dolly_in' + | 'dolly_out' + | 'dolly_left' + | 'dolly_right' + | 'jib_up' + | 'jib_down' + | 'static' + | 'none' + /** + * Video Size + * + * The size of the generated video. + */ + video_size?: + | SchemaImageSize + | 'auto' + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Camera LoRA Scale + * + * The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora_scale?: number + /** + * Image Strength + * + * The strength of the image to use for the video generation. + */ + image_strength?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt?: string + /** + * Guidance Scale + * + * The guidance scale to use. + */ + guidance_scale?: number + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Image URL + * + * The URL of the image to generate the video from. + */ + image_url: string + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to use. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number | unknown +} + +/** + * LoRAInput + * + * LoRA weight configuration. + */ +export type SchemaLoRaInput = { + /** + * Path + * + * URL, HuggingFace repo ID (owner/repo) to lora weights. + */ + path: string + /** + * Scale + * + * Scale factor for LoRA application (0.0 to 4.0). + */ + scale?: number + /** + * Weight Name + * + * Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights. + */ + weight_name?: string | unknown +} + +/** + * LTX2ImageToVideoOutput + */ +export type SchemaLtx219bDistilledImageToVideoOutput = { + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Seed + * + * The seed used for the random number generator. + */ + seed: number + video: SchemaVideoFile +} + +/** + * LTX2DistilledImageToVideoInput + */ +export type SchemaLtx219bDistilledImageToVideoInput = { + /** + * Use Multi-Scale + * + * Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details. + */ + use_multiscale?: boolean + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' | 'full' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * FPS + * + * The frames per second of the generated video. + */ + fps?: number + /** + * Camera LoRA + * + * The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora?: + | 'dolly_in' + | 'dolly_out' + | 'dolly_left' + | 'dolly_right' + | 'jib_up' + | 'jib_down' + | 'static' + | 'none' + /** + * Video Size + * + * The size of the generated video. + */ + video_size?: + | SchemaImageSize + | 'auto' + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Image Strength + * + * The strength of the image to use for the video generation. + */ + image_strength?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt?: string + /** + * Camera LoRA Scale + * + * The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora_scale?: number + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Image URL + * + * The URL of the image to generate the video from. + */ + image_url: string + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number | unknown +} + +/** + * LTX2ImageToVideoOutput + */ +export type SchemaLtx219bDistilledImageToVideoLoraOutput = { + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Seed + * + * The seed used for the random number generator. + */ + seed: number + video: SchemaVideoFile +} + +/** + * LTX2LoRADistilledImageToVideoInput + */ +export type SchemaLtx219bDistilledImageToVideoLoraInput = { + /** + * Use Multi-Scale + * + * Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details. + */ + use_multiscale?: boolean + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' | 'full' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * FPS + * + * The frames per second of the generated video. + */ + fps?: number + /** + * LoRAs + * + * The LoRAs to use for the generation. + */ + loras: Array + /** + * Camera LoRA + * + * The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora?: + | 'dolly_in' + | 'dolly_out' + | 'dolly_left' + | 'dolly_right' + | 'jib_up' + | 'jib_down' + | 'static' + | 'none' + /** + * Video Size + * + * The size of the generated video. + */ + video_size?: + | SchemaImageSize + | 'auto' + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Image Strength + * + * The strength of the image to use for the video generation. + */ + image_strength?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt?: string + /** + * Camera LoRA Scale + * + * The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora_scale?: number + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Image URL + * + * The URL of the image to generate the video from. + */ + image_url: string + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number | unknown +} + +/** + * ImageToVideoOutput + * + * Output for image-to-video generation + */ +export type SchemaV26ImageToVideoFlashOutput = { + /** + * Actual Prompt + * + * The actual prompt used if prompt rewriting was enabled + */ + actual_prompt?: string + /** + * Seed + * + * The seed used for generation + */ + seed: number + /** + * Video + * + * The generated video file + */ + video: SchemaVideoFile +} + +/** + * ImageToVideoInput + * + * Input for Wan 2.6 image-to-video generation + */ +export type SchemaV26ImageToVideoFlashInput = { + /** + * Prompt + * + * The text prompt describing the desired video motion. Max 800 characters. + */ + prompt: string + /** + * Resolution + * + * Video resolution. Valid values: 720p, 1080p + */ + resolution?: '720p' | '1080p' + /** + * Duration + * + * Duration of the generated video in seconds. Choose between 5, 10 or 15 seconds. + */ + duration?: '5' | '10' | '15' + /** + * Audio Url + * + * + * URL of the audio to use as the background music. Must be publicly accessible. + * Limit handling: If the audio duration exceeds the duration value (5, 10, or 15 seconds), + * the audio is truncated to the first N seconds, and the rest is discarded. If + * the audio is shorter than the video, the remaining part of the video will be silent. + * For example, if the audio is 3 seconds long and the video duration is 5 seconds, the + * first 3 seconds of the output video will have sound, and the last 2 seconds will be silent. + * - Format: WAV, MP3. + * - Duration: 3 to 30 s. + * - File size: Up to 15 MB. + * + */ + audio_url?: string + /** + * Image URL + * + * URL of the image to use as the first frame. Must be publicly accessible or base64 data URI. Image dimensions must be between 240 and 7680. + */ + image_url: string + /** + * Enable Prompt Expansion + * + * Whether to enable prompt rewriting using LLM. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Multi Shots + * + * When true, enables intelligent multi-shot segmentation. Only active when enable_prompt_expansion is True. Set to false for single-shot generation. + */ + multi_shots?: boolean + /** + * Negative Prompt + * + * Negative prompt to describe content to avoid. Max 500 characters. + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Q2ProReferenceToVideoOutput + */ +export type SchemaViduQ2ReferenceToVideoProOutput = { + /** + * Video + * + * The generated video with video/image references using the Q2 Pro model + */ + video: SchemaFile +} + +/** + * Q2ProReferenceToVideoRequest + */ +export type SchemaViduQ2ReferenceToVideoProInput = { + /** + * Prompt + * + * Text prompt for video generation, max 2000 characters + */ + prompt: string + /** + * Resolution + * + * Output video resolution + */ + resolution?: '540p' | '720p' | '1080p' + /** + * Aspect Ratio + * + * Aspect ratio of the output video (e.g., auto, 16:9, 9:16, 1:1, or any W:H) + */ + aspect_ratio?: string + /** + * Duration + * + * Duration of the video in seconds (0 for automatic duration) + */ + duration?: number + /** + * Reference Video Urls + * + * URLs of the reference videos for video editing or motion reference. Supports up to 2 videos. + */ + reference_video_urls?: Array + /** + * Bgm + * + * Whether to add background music to the generated video + */ + bgm?: boolean + /** + * Reference Image Urls + * + * URLs of the reference images for subject appearance. If videos are provided, up to 4 images are allowed; otherwise up to 7 images. + */ + reference_image_urls?: Array + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Movement Amplitude + * + * The movement amplitude of objects in the frame + */ + movement_amplitude?: 'auto' | 'small' | 'medium' | 'large' +} + +/** + * I2VOutputV5_5 + */ +export type SchemaPixverseV56ImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ImageToVideoRequestV5_6 + */ +export type SchemaPixverseV56ImageToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' | '1080p' + /** + * Duration + * + * The duration of the generated video in seconds. 1080p videos are limited to 5 or 8 seconds + */ + duration?: '5' | '8' | '10' + /** + * Style + * + * The style of the generated video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Thinking Type + * + * Prompt optimization mode: 'enabled' to optimize, 'disabled' to turn off, 'auto' for model decision + */ + thinking_type?: 'enabled' | 'disabled' | 'auto' + /** + * Image Url + * + * URL of the image to use as the first frame + */ + image_url: string + /** + * Generate Audio Switch + * + * Enable audio generation (BGM, SFX, dialogue) + */ + generate_audio_switch?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * TransitionOutputV5_5 + */ +export type SchemaPixverseV56TransitionOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TransitionRequestV5_6 + */ +export type SchemaPixverseV56TransitionInput = { + /** + * First Image Url + * + * URL of the image to use as the first frame + */ + first_image_url: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '4:3' | '1:1' | '3:4' | '9:16' + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' | '1080p' + /** + * Style + * + * The style of the generated video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Thinking Type + * + * Prompt optimization mode: 'enabled' to optimize, 'disabled' to turn off, 'auto' for model decision + */ + thinking_type?: 'enabled' | 'disabled' | 'auto' + /** + * Prompt + * + * The prompt for the transition + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds. 1080p videos are limited to 5 or 8 seconds + */ + duration?: '5' | '8' | '10' + /** + * Generate Audio Switch + * + * Enable audio generation (BGM, SFX, dialogue) + */ + generate_audio_switch?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number + /** + * End Image Url + * + * URL of the image to use as the last frame + */ + end_image_url?: string + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * WanI2VResponse + */ +export type SchemaWanI2vOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * WanI2VRequest + */ +export type SchemaWanI2vInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Shift + * + * Shift parameter for video generation. + */ + shift?: number + /** + * Acceleration + * + * Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'. + */ + acceleration?: 'none' | 'regular' + /** + * Frames Per Second + * + * Frames per second of the generated video. Must be between 5 to 24. + */ + frames_per_second?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Frames + * + * Number of frames to generate. Must be between 81 to 100 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units. + */ + num_frames?: number + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' | '1:1' + /** + * Resolution + * + * Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit. + */ + resolution?: '480p' | '720p' + /** + * Image Url + * + * URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped. + */ + image_url: string + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Num Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Guide Scale + * + * Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality. + */ + guide_scale?: number + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number +} + +/** + * ImageToVideoV2MasterOutput + */ +export type SchemaKlingVideoV2MasterImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ImageToVideoV2MasterRequest + */ +export type SchemaKlingVideoV2MasterImageToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Image Url + * + * URL of the image to be used for the video + */ + image_url: string + /** + * Negative Prompt + */ + negative_prompt?: string + /** + * Cfg Scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt. + * + */ + cfg_scale?: number +} + +/** + * I2VOutputV4 + */ +export type SchemaPixverseV45ImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ImageToVideoRequestV4 + */ +export type SchemaPixverseV45ImageToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' | '1080p' + /** + * Duration + * + * The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds + */ + duration?: '5' | '8' + /** + * Style + * + * The style of the generated video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Camera Movement + * + * The type of camera movement to apply to the video + */ + camera_movement?: + | 'horizontal_left' + | 'horizontal_right' + | 'vertical_up' + | 'vertical_down' + | 'zoom_in' + | 'zoom_out' + | 'crane_up' + | 'quickly_zoom_in' + | 'quickly_zoom_out' + | 'smooth_zoom_in' + | 'camera_rotation' + | 'robo_arm' + | 'super_dolly_out' + | 'whip_pan' + | 'hitchcock' + | 'left_follow' + | 'right_follow' + | 'pan_left' + | 'pan_right' + | 'fix_bg' + /** + * Image Url + * + * URL of the image to use as the first frame + */ + image_url: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * ImageToVideoV21StandardOutput + */ +export type SchemaKlingVideoV21StandardImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ImageToVideoV21StandardRequest + */ +export type SchemaKlingVideoV21StandardImageToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Image Url + * + * URL of the image to be used for the video + */ + image_url: string + /** + * Negative Prompt + */ + negative_prompt?: string + /** + * Cfg Scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt. + * + */ + cfg_scale?: number +} + +/** + * ImageToVideoV21MasterOutput + */ +export type SchemaKlingVideoV21MasterImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ImageToVideoV21MasterRequest + */ +export type SchemaKlingVideoV21MasterImageToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Image Url + * + * URL of the image to be used for the video + */ + image_url: string + /** + * Negative Prompt + */ + negative_prompt?: string + /** + * Cfg Scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt. + * + */ + cfg_scale?: number +} + +/** + * SeedanceProI2VVideoOutput + */ +export type SchemaBytedanceSeedanceV1ProImageToVideoOutput = { + /** + * Seed + * + * Seed used for generation + */ + seed: number + /** + * Video + * + * Generated video file + */ + video: SchemaFile +} + +/** + * SeedanceProImageToVideoInput + */ +export type SchemaBytedanceSeedanceV1ProImageToVideoInput = { + /** + * Prompt + * + * The text prompt used to generate the video + */ + prompt: string + /** + * Resolution + * + * Video resolution - 480p for faster generation, 720p for balance, 1080p for higher quality + */ + resolution?: '480p' | '720p' | '1080p' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '21:9' | '16:9' | '4:3' | '1:1' | '3:4' | '9:16' | 'auto' + /** + * Duration + * + * Duration of the video in seconds + */ + duration?: '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | '10' | '11' | '12' + /** + * Image Url + * + * The URL of the image used to generate video + */ + image_url: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Camera Fixed + * + * Whether to fix the camera position + */ + camera_fixed?: boolean + /** + * End Image Url + * + * The URL of the image the video ends with. Defaults to None. + */ + end_image_url?: string + /** + * Seed + * + * Random seed to control video generation. Use -1 for random. + */ + seed?: number +} + +/** + * ImageToVideoHailuo02Output + */ +export type SchemaMinimaxHailuo02StandardImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * StandardImageToVideoHailuo02Input + */ +export type SchemaMinimaxHailuo02StandardImageToVideoInput = { + /** + * Prompt Optimizer + * + * Whether to use the model's prompt optimizer + */ + prompt_optimizer?: boolean + /** + * Duration + * + * The duration of the video in seconds. 10 seconds videos are not supported for 1080p resolution. + */ + duration?: '6' | '10' + /** + * Resolution + * + * The resolution of the generated video. + */ + resolution?: '512P' | '768P' + /** + * Prompt + */ + prompt: string + /** + * End Image Url + * + * Optional URL of the image to use as the last frame of the video + */ + end_image_url?: string + /** + * Image Url + */ + image_url: string +} + +/** + * ImageToVideoV25ProOutput + */ +export type SchemaKlingVideoV25TurboProImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ImageToVideoV25ProRequest + */ +export type SchemaKlingVideoV25TurboProImageToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Image Url + * + * URL of the image to be used for the video + */ + image_url: string + /** + * Negative Prompt + */ + negative_prompt?: string + /** + * Tail Image Url + * + * URL of the image to be used for the end of the video + */ + tail_image_url?: string + /** + * Cfg Scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt. + * + */ + cfg_scale?: number +} + +/** + * VideoOutput + * + * Base output for video generation + */ +export type SchemaWan25PreviewImageToVideoOutput = { + /** + * Actual Prompt + * + * The actual prompt used if prompt rewriting was enabled + */ + actual_prompt?: string + /** + * Seed + * + * The seed used for generation + */ + seed: number + /** + * Video + * + * The generated video file + */ + video: SchemaVideoFile +} + +/** + * ImageToVideoInput + * + * Input for image-to-video generation + */ +export type SchemaWan25PreviewImageToVideoInput = { + /** + * Prompt + * + * The text prompt describing the desired video motion. Max 800 characters. + */ + prompt: string + /** + * Resolution + * + * Video resolution. Valid values: 480p, 720p, 1080p + */ + resolution?: '480p' | '720p' | '1080p' + /** + * Duration + * + * Duration of the generated video in seconds. Choose between 5 or 10 seconds. + */ + duration?: '5' | '10' + /** + * Image URL + * + * URL of the image to use as the first frame. Must be publicly accessible or base64 data URI. + * + * Max file size: 25.0MB, Min width: 360px, Min height: 360px, Max width: 2000px, Max height: 2000px, Timeout: 20.0s + */ + image_url: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Audio Url + * + * + * URL of the audio to use as the background music. Must be publicly accessible. + * Limit handling: If the audio duration exceeds the duration value (5 or 10 seconds), + * the audio is truncated to the first 5 or 10 seconds, and the rest is discarded. If + * the audio is shorter than the video, the remaining part of the video will be silent. + * For example, if the audio is 3 seconds long and the video duration is 5 seconds, the + * first 3 seconds of the output video will have sound, and the last 2 seconds will be silent. + * - Format: WAV, MP3. + * - Duration: 3 to 30 s. + * - File size: Up to 15 MB. + * + */ + audio_url?: string + /** + * Negative Prompt + * + * Negative prompt to describe content to avoid. Max 500 characters. + */ + negative_prompt?: string + /** + * Enable Prompt Expansion + * + * Whether to enable prompt rewriting using LLM. + */ + enable_prompt_expansion?: boolean +} + +/** + * ProImageToVideoHailuo23Output + */ +export type SchemaMinimaxHailuo23ProImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ProImageToVideoHailuo23Input + */ +export type SchemaMinimaxHailuo23ProImageToVideoInput = { + /** + * Prompt Optimizer + * + * Whether to use the model's prompt optimizer + */ + prompt_optimizer?: boolean + /** + * Prompt + * + * Text prompt for video generation + */ + prompt: string + /** + * Image Url + * + * URL of the image to use as the first frame + */ + image_url: string +} + +/** + * VideoOutput + */ +export type SchemaMinimaxVideo01ImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ImageToVideoRequest + */ +export type SchemaMinimaxVideo01ImageToVideoInput = { + /** + * Prompt Optimizer + * + * Whether to use the model's prompt optimizer + */ + prompt_optimizer?: boolean + /** + * Prompt + */ + prompt: string + /** + * Image Url + * + * URL of the image to use as the first frame + */ + image_url: string +} + +/** + * I2VOutput + */ +export type SchemaKlingVideoV16ProImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ProImageToVideoRequest + */ +export type SchemaKlingVideoV16ProImageToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video frame + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Negative Prompt + */ + negative_prompt?: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Image Url + */ + image_url: string + /** + * Tail Image Url + * + * URL of the image to be used for the end of the video + */ + tail_image_url?: string + /** + * Cfg Scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt. + * + */ + cfg_scale?: number +} + +/** + * ImageToVideoOutput + */ +export type SchemaVeo2ImageToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ImageToVideoInput + */ +export type SchemaVeo2ImageToVideoInput = { + /** + * Prompt + * + * The text prompt describing how the image should be animated + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5s' | '6s' | '7s' | '8s' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: 'auto' | 'auto_prefer_portrait' | '16:9' | '9:16' + /** + * Image Url + * + * URL of the input image to animate. Should be 720p or higher resolution. + */ + image_url: string +} + +/** + * WanProI2VResponse + */ +export type SchemaWanProImageToVideoOutput = { + video: SchemaFile +} + +/** + * WanProI2VRequest + */ +export type SchemaWanProImageToVideoInput = { + /** + * Prompt + * + * The prompt to generate the video + */ + prompt: string + /** + * Enable Safety Checker + * + * Whether to enable the safety checker + */ + enable_safety_checker?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number | unknown + /** + * Image Url + * + * The URL of the image to generate the video from + */ + image_url: string +} + +/** + * WanEffectsOutput + */ +export type SchemaWanEffectsOutput = { + /** + * Seed + */ + seed: number + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * BaseInput + */ +export type SchemaWanEffectsInput = { + /** + * Effect Type + * + * The type of effect to apply to the video. + */ + effect_type?: + | 'squish' + | 'muscle' + | 'inflate' + | 'crush' + | 'rotate' + | 'gun-shooting' + | 'deflate' + | 'cakeify' + | 'hulk' + | 'baby' + | 'bride' + | 'classy' + | 'puppy' + | 'snow-white' + | 'disney-princess' + | 'mona-lisa' + | 'painting' + | 'pirate-captain' + | 'princess' + | 'jungle' + | 'samurai' + | 'vip' + | 'warrior' + | 'zen' + | 'assassin' + | 'timelapse' + | 'tsunami' + | 'fire' + | 'zoom-call' + | 'doom-fps' + | 'fus-ro-dah' + | 'hug-jesus' + | 'robot-face-reveal' + | 'super-saiyan' + | 'jumpscare' + | 'laughing' + | 'cartoon-jaw-drop' + | 'crying' + | 'kissing' + | 'angry-face' + | 'selfie-younger-self' + | 'animeify' + | 'blast' + /** + * Aspect Ratio + * + * Aspect ratio of the output video. + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Subject + * + * The subject to insert into the predefined prompt template for the selected effect. + */ + subject: string + /** + * Lora Scale + * + * The scale of the LoRA weight. Used to adjust effect intensity. + */ + lora_scale?: number + /** + * Image URL + * + * URL of the input image. + */ + image_url: string + /** + * Turbo Mode + * + * Whether to use turbo mode. If True, the video will be generated faster but with lower quality. + */ + turbo_mode?: boolean + /** + * Frames Per Second + * + * Frames per second of the generated video. + */ + frames_per_second?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Num Frames + * + * Number of frames to generate. + */ + num_frames?: number +} + +export type SchemaQueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetFalAiWanEffectsRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-effects/requests/{request_id}/status' +} + +export type GetFalAiWanEffectsRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanEffectsRequestsByRequestIdStatusResponse = + GetFalAiWanEffectsRequestsByRequestIdStatusResponses[keyof GetFalAiWanEffectsRequestsByRequestIdStatusResponses] + +export type PutFalAiWanEffectsRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-effects/requests/{request_id}/cancel' +} + +export type PutFalAiWanEffectsRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanEffectsRequestsByRequestIdCancelResponse = + PutFalAiWanEffectsRequestsByRequestIdCancelResponses[keyof PutFalAiWanEffectsRequestsByRequestIdCancelResponses] + +export type PostFalAiWanEffectsData = { + body: SchemaWanEffectsInput + path?: never + query?: never + url: '/fal-ai/wan-effects' +} + +export type PostFalAiWanEffectsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanEffectsResponse = + PostFalAiWanEffectsResponses[keyof PostFalAiWanEffectsResponses] + +export type GetFalAiWanEffectsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-effects/requests/{request_id}' +} + +export type GetFalAiWanEffectsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanEffectsOutput +} + +export type GetFalAiWanEffectsRequestsByRequestIdResponse = + GetFalAiWanEffectsRequestsByRequestIdResponses[keyof GetFalAiWanEffectsRequestsByRequestIdResponses] + +export type GetFalAiWanProImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-pro/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiWanProImageToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanProImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiWanProImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiWanProImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiWanProImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-pro/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiWanProImageToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanProImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiWanProImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiWanProImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiWanProImageToVideoData = { + body: SchemaWanProImageToVideoInput + path?: never + query?: never + url: '/fal-ai/wan-pro/image-to-video' +} + +export type PostFalAiWanProImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanProImageToVideoResponse = + PostFalAiWanProImageToVideoResponses[keyof PostFalAiWanProImageToVideoResponses] + +export type GetFalAiWanProImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-pro/image-to-video/requests/{request_id}' +} + +export type GetFalAiWanProImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanProImageToVideoOutput +} + +export type GetFalAiWanProImageToVideoRequestsByRequestIdResponse = + GetFalAiWanProImageToVideoRequestsByRequestIdResponses[keyof GetFalAiWanProImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiVeo2ImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/veo2/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiVeo2ImageToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiVeo2ImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiVeo2ImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiVeo2ImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiVeo2ImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo2/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiVeo2ImageToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiVeo2ImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiVeo2ImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiVeo2ImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiVeo2ImageToVideoData = { + body: SchemaVeo2ImageToVideoInput + path?: never + query?: never + url: '/fal-ai/veo2/image-to-video' +} + +export type PostFalAiVeo2ImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiVeo2ImageToVideoResponse = + PostFalAiVeo2ImageToVideoResponses[keyof PostFalAiVeo2ImageToVideoResponses] + +export type GetFalAiVeo2ImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo2/image-to-video/requests/{request_id}' +} + +export type GetFalAiVeo2ImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVeo2ImageToVideoOutput +} + +export type GetFalAiVeo2ImageToVideoRequestsByRequestIdResponse = + GetFalAiVeo2ImageToVideoRequestsByRequestIdResponses[keyof GetFalAiVeo2ImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV16ProImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v1.6/pro/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoV16ProImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV16ProImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV16ProImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV16ProImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV16ProImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1.6/pro/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoV16ProImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV16ProImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV16ProImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV16ProImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV16ProImageToVideoData = { + body: SchemaKlingVideoV16ProImageToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/v1.6/pro/image-to-video' +} + +export type PostFalAiKlingVideoV16ProImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV16ProImageToVideoResponse = + PostFalAiKlingVideoV16ProImageToVideoResponses[keyof PostFalAiKlingVideoV16ProImageToVideoResponses] + +export type GetFalAiKlingVideoV16ProImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1.6/pro/image-to-video/requests/{request_id}' +} + +export type GetFalAiKlingVideoV16ProImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV16ProImageToVideoOutput +} + +export type GetFalAiKlingVideoV16ProImageToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoV16ProImageToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV16ProImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiMinimaxVideo01ImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/video-01/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiMinimaxVideo01ImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMinimaxVideo01ImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiMinimaxVideo01ImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxVideo01ImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxVideo01ImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/video-01/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiMinimaxVideo01ImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMinimaxVideo01ImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiMinimaxVideo01ImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxVideo01ImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxVideo01ImageToVideoData = { + body: SchemaMinimaxVideo01ImageToVideoInput + path?: never + query?: never + url: '/fal-ai/minimax/video-01/image-to-video' +} + +export type PostFalAiMinimaxVideo01ImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxVideo01ImageToVideoResponse = + PostFalAiMinimaxVideo01ImageToVideoResponses[keyof PostFalAiMinimaxVideo01ImageToVideoResponses] + +export type GetFalAiMinimaxVideo01ImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/video-01/image-to-video/requests/{request_id}' +} + +export type GetFalAiMinimaxVideo01ImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMinimaxVideo01ImageToVideoOutput +} + +export type GetFalAiMinimaxVideo01ImageToVideoRequestsByRequestIdResponse = + GetFalAiMinimaxVideo01ImageToVideoRequestsByRequestIdResponses[keyof GetFalAiMinimaxVideo01ImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiMinimaxHailuo23ProImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/hailuo-2.3/pro/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiMinimaxHailuo23ProImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMinimaxHailuo23ProImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiMinimaxHailuo23ProImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxHailuo23ProImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxHailuo23ProImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/hailuo-2.3/pro/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiMinimaxHailuo23ProImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMinimaxHailuo23ProImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiMinimaxHailuo23ProImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxHailuo23ProImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxHailuo23ProImageToVideoData = { + body: SchemaMinimaxHailuo23ProImageToVideoInput + path?: never + query?: never + url: '/fal-ai/minimax/hailuo-2.3/pro/image-to-video' +} + +export type PostFalAiMinimaxHailuo23ProImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxHailuo23ProImageToVideoResponse = + PostFalAiMinimaxHailuo23ProImageToVideoResponses[keyof PostFalAiMinimaxHailuo23ProImageToVideoResponses] + +export type GetFalAiMinimaxHailuo23ProImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/hailuo-2.3/pro/image-to-video/requests/{request_id}' +} + +export type GetFalAiMinimaxHailuo23ProImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaMinimaxHailuo23ProImageToVideoOutput + } + +export type GetFalAiMinimaxHailuo23ProImageToVideoRequestsByRequestIdResponse = + GetFalAiMinimaxHailuo23ProImageToVideoRequestsByRequestIdResponses[keyof GetFalAiMinimaxHailuo23ProImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiWan25PreviewImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-25-preview/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiWan25PreviewImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiWan25PreviewImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiWan25PreviewImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiWan25PreviewImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiWan25PreviewImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-25-preview/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiWan25PreviewImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiWan25PreviewImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiWan25PreviewImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiWan25PreviewImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiWan25PreviewImageToVideoData = { + body: SchemaWan25PreviewImageToVideoInput + path?: never + query?: never + url: '/fal-ai/wan-25-preview/image-to-video' +} + +export type PostFalAiWan25PreviewImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWan25PreviewImageToVideoResponse = + PostFalAiWan25PreviewImageToVideoResponses[keyof PostFalAiWan25PreviewImageToVideoResponses] + +export type GetFalAiWan25PreviewImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-25-preview/image-to-video/requests/{request_id}' +} + +export type GetFalAiWan25PreviewImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWan25PreviewImageToVideoOutput +} + +export type GetFalAiWan25PreviewImageToVideoRequestsByRequestIdResponse = + GetFalAiWan25PreviewImageToVideoRequestsByRequestIdResponses[keyof GetFalAiWan25PreviewImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV25TurboProImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v2.5-turbo/pro/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoV25TurboProImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV25TurboProImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV25TurboProImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV25TurboProImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV25TurboProImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2.5-turbo/pro/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoV25TurboProImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV25TurboProImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV25TurboProImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV25TurboProImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV25TurboProImageToVideoData = { + body: SchemaKlingVideoV25TurboProImageToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/v2.5-turbo/pro/image-to-video' +} + +export type PostFalAiKlingVideoV25TurboProImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV25TurboProImageToVideoResponse = + PostFalAiKlingVideoV25TurboProImageToVideoResponses[keyof PostFalAiKlingVideoV25TurboProImageToVideoResponses] + +export type GetFalAiKlingVideoV25TurboProImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2.5-turbo/pro/image-to-video/requests/{request_id}' +} + +export type GetFalAiKlingVideoV25TurboProImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV25TurboProImageToVideoOutput + } + +export type GetFalAiKlingVideoV25TurboProImageToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoV25TurboProImageToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV25TurboProImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiMinimaxHailuo02StandardImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/hailuo-02/standard/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiMinimaxHailuo02StandardImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMinimaxHailuo02StandardImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiMinimaxHailuo02StandardImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxHailuo02StandardImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxHailuo02StandardImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/hailuo-02/standard/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiMinimaxHailuo02StandardImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMinimaxHailuo02StandardImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiMinimaxHailuo02StandardImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxHailuo02StandardImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxHailuo02StandardImageToVideoData = { + body: SchemaMinimaxHailuo02StandardImageToVideoInput + path?: never + query?: never + url: '/fal-ai/minimax/hailuo-02/standard/image-to-video' +} + +export type PostFalAiMinimaxHailuo02StandardImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxHailuo02StandardImageToVideoResponse = + PostFalAiMinimaxHailuo02StandardImageToVideoResponses[keyof PostFalAiMinimaxHailuo02StandardImageToVideoResponses] + +export type GetFalAiMinimaxHailuo02StandardImageToVideoRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/hailuo-02/standard/image-to-video/requests/{request_id}' + } + +export type GetFalAiMinimaxHailuo02StandardImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaMinimaxHailuo02StandardImageToVideoOutput + } + +export type GetFalAiMinimaxHailuo02StandardImageToVideoRequestsByRequestIdResponse = + GetFalAiMinimaxHailuo02StandardImageToVideoRequestsByRequestIdResponses[keyof GetFalAiMinimaxHailuo02StandardImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiBytedanceSeedanceV1ProImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bytedance/seedance/v1/pro/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiBytedanceSeedanceV1ProImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiBytedanceSeedanceV1ProImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiBytedanceSeedanceV1ProImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiBytedanceSeedanceV1ProImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiBytedanceSeedanceV1ProImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedance/v1/pro/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiBytedanceSeedanceV1ProImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiBytedanceSeedanceV1ProImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiBytedanceSeedanceV1ProImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiBytedanceSeedanceV1ProImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiBytedanceSeedanceV1ProImageToVideoData = { + body: SchemaBytedanceSeedanceV1ProImageToVideoInput + path?: never + query?: never + url: '/fal-ai/bytedance/seedance/v1/pro/image-to-video' +} + +export type PostFalAiBytedanceSeedanceV1ProImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBytedanceSeedanceV1ProImageToVideoResponse = + PostFalAiBytedanceSeedanceV1ProImageToVideoResponses[keyof PostFalAiBytedanceSeedanceV1ProImageToVideoResponses] + +export type GetFalAiBytedanceSeedanceV1ProImageToVideoRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedance/v1/pro/image-to-video/requests/{request_id}' + } + +export type GetFalAiBytedanceSeedanceV1ProImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaBytedanceSeedanceV1ProImageToVideoOutput + } + +export type GetFalAiBytedanceSeedanceV1ProImageToVideoRequestsByRequestIdResponse = + GetFalAiBytedanceSeedanceV1ProImageToVideoRequestsByRequestIdResponses[keyof GetFalAiBytedanceSeedanceV1ProImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV21MasterImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v2.1/master/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoV21MasterImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV21MasterImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV21MasterImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV21MasterImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV21MasterImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2.1/master/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoV21MasterImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV21MasterImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV21MasterImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV21MasterImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV21MasterImageToVideoData = { + body: SchemaKlingVideoV21MasterImageToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/v2.1/master/image-to-video' +} + +export type PostFalAiKlingVideoV21MasterImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV21MasterImageToVideoResponse = + PostFalAiKlingVideoV21MasterImageToVideoResponses[keyof PostFalAiKlingVideoV21MasterImageToVideoResponses] + +export type GetFalAiKlingVideoV21MasterImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2.1/master/image-to-video/requests/{request_id}' +} + +export type GetFalAiKlingVideoV21MasterImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV21MasterImageToVideoOutput + } + +export type GetFalAiKlingVideoV21MasterImageToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoV21MasterImageToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV21MasterImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV21StandardImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v2.1/standard/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoV21StandardImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV21StandardImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV21StandardImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV21StandardImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV21StandardImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2.1/standard/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoV21StandardImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV21StandardImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV21StandardImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV21StandardImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV21StandardImageToVideoData = { + body: SchemaKlingVideoV21StandardImageToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/v2.1/standard/image-to-video' +} + +export type PostFalAiKlingVideoV21StandardImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV21StandardImageToVideoResponse = + PostFalAiKlingVideoV21StandardImageToVideoResponses[keyof PostFalAiKlingVideoV21StandardImageToVideoResponses] + +export type GetFalAiKlingVideoV21StandardImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2.1/standard/image-to-video/requests/{request_id}' +} + +export type GetFalAiKlingVideoV21StandardImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV21StandardImageToVideoOutput + } + +export type GetFalAiKlingVideoV21StandardImageToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoV21StandardImageToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV21StandardImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiPixverseV45ImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v4.5/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiPixverseV45ImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiPixverseV45ImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiPixverseV45ImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV45ImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV45ImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v4.5/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV45ImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiPixverseV45ImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiPixverseV45ImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV45ImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV45ImageToVideoData = { + body: SchemaPixverseV45ImageToVideoInput + path?: never + query?: never + url: '/fal-ai/pixverse/v4.5/image-to-video' +} + +export type PostFalAiPixverseV45ImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV45ImageToVideoResponse = + PostFalAiPixverseV45ImageToVideoResponses[keyof PostFalAiPixverseV45ImageToVideoResponses] + +export type GetFalAiPixverseV45ImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v4.5/image-to-video/requests/{request_id}' +} + +export type GetFalAiPixverseV45ImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV45ImageToVideoOutput +} + +export type GetFalAiPixverseV45ImageToVideoRequestsByRequestIdResponse = + GetFalAiPixverseV45ImageToVideoRequestsByRequestIdResponses[keyof GetFalAiPixverseV45ImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV2MasterImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v2/master/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoV2MasterImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV2MasterImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV2MasterImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV2MasterImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV2MasterImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2/master/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoV2MasterImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV2MasterImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV2MasterImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV2MasterImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV2MasterImageToVideoData = { + body: SchemaKlingVideoV2MasterImageToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/v2/master/image-to-video' +} + +export type PostFalAiKlingVideoV2MasterImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV2MasterImageToVideoResponse = + PostFalAiKlingVideoV2MasterImageToVideoResponses[keyof PostFalAiKlingVideoV2MasterImageToVideoResponses] + +export type GetFalAiKlingVideoV2MasterImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2/master/image-to-video/requests/{request_id}' +} + +export type GetFalAiKlingVideoV2MasterImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV2MasterImageToVideoOutput + } + +export type GetFalAiKlingVideoV2MasterImageToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoV2MasterImageToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV2MasterImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiWanI2vRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-i2v/requests/{request_id}/status' +} + +export type GetFalAiWanI2vRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanI2vRequestsByRequestIdStatusResponse = + GetFalAiWanI2vRequestsByRequestIdStatusResponses[keyof GetFalAiWanI2vRequestsByRequestIdStatusResponses] + +export type PutFalAiWanI2vRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-i2v/requests/{request_id}/cancel' +} + +export type PutFalAiWanI2vRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanI2vRequestsByRequestIdCancelResponse = + PutFalAiWanI2vRequestsByRequestIdCancelResponses[keyof PutFalAiWanI2vRequestsByRequestIdCancelResponses] + +export type PostFalAiWanI2vData = { + body: SchemaWanI2vInput + path?: never + query?: never + url: '/fal-ai/wan-i2v' +} + +export type PostFalAiWanI2vResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanI2vResponse = + PostFalAiWanI2vResponses[keyof PostFalAiWanI2vResponses] + +export type GetFalAiWanI2vRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-i2v/requests/{request_id}' +} + +export type GetFalAiWanI2vRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanI2vOutput +} + +export type GetFalAiWanI2vRequestsByRequestIdResponse = + GetFalAiWanI2vRequestsByRequestIdResponses[keyof GetFalAiWanI2vRequestsByRequestIdResponses] + +export type GetFalAiPixverseV56TransitionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v5.6/transition/requests/{request_id}/status' +} + +export type GetFalAiPixverseV56TransitionRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixverseV56TransitionRequestsByRequestIdStatusResponse = + GetFalAiPixverseV56TransitionRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV56TransitionRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV56TransitionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v5.6/transition/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV56TransitionRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixverseV56TransitionRequestsByRequestIdCancelResponse = + PutFalAiPixverseV56TransitionRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV56TransitionRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV56TransitionData = { + body: SchemaPixverseV56TransitionInput + path?: never + query?: never + url: '/fal-ai/pixverse/v5.6/transition' +} + +export type PostFalAiPixverseV56TransitionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV56TransitionResponse = + PostFalAiPixverseV56TransitionResponses[keyof PostFalAiPixverseV56TransitionResponses] + +export type GetFalAiPixverseV56TransitionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v5.6/transition/requests/{request_id}' +} + +export type GetFalAiPixverseV56TransitionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV56TransitionOutput +} + +export type GetFalAiPixverseV56TransitionRequestsByRequestIdResponse = + GetFalAiPixverseV56TransitionRequestsByRequestIdResponses[keyof GetFalAiPixverseV56TransitionRequestsByRequestIdResponses] + +export type GetFalAiPixverseV56ImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v5.6/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiPixverseV56ImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiPixverseV56ImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiPixverseV56ImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV56ImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV56ImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v5.6/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV56ImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiPixverseV56ImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiPixverseV56ImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV56ImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV56ImageToVideoData = { + body: SchemaPixverseV56ImageToVideoInput + path?: never + query?: never + url: '/fal-ai/pixverse/v5.6/image-to-video' +} + +export type PostFalAiPixverseV56ImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV56ImageToVideoResponse = + PostFalAiPixverseV56ImageToVideoResponses[keyof PostFalAiPixverseV56ImageToVideoResponses] + +export type GetFalAiPixverseV56ImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v5.6/image-to-video/requests/{request_id}' +} + +export type GetFalAiPixverseV56ImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV56ImageToVideoOutput +} + +export type GetFalAiPixverseV56ImageToVideoRequestsByRequestIdResponse = + GetFalAiPixverseV56ImageToVideoRequestsByRequestIdResponses[keyof GetFalAiPixverseV56ImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiViduQ2ReferenceToVideoProRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/vidu/q2/reference-to-video/pro/requests/{request_id}/status' +} + +export type GetFalAiViduQ2ReferenceToVideoProRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiViduQ2ReferenceToVideoProRequestsByRequestIdStatusResponse = + GetFalAiViduQ2ReferenceToVideoProRequestsByRequestIdStatusResponses[keyof GetFalAiViduQ2ReferenceToVideoProRequestsByRequestIdStatusResponses] + +export type PutFalAiViduQ2ReferenceToVideoProRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/q2/reference-to-video/pro/requests/{request_id}/cancel' +} + +export type PutFalAiViduQ2ReferenceToVideoProRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiViduQ2ReferenceToVideoProRequestsByRequestIdCancelResponse = + PutFalAiViduQ2ReferenceToVideoProRequestsByRequestIdCancelResponses[keyof PutFalAiViduQ2ReferenceToVideoProRequestsByRequestIdCancelResponses] + +export type PostFalAiViduQ2ReferenceToVideoProData = { + body: SchemaViduQ2ReferenceToVideoProInput + path?: never + query?: never + url: '/fal-ai/vidu/q2/reference-to-video/pro' +} + +export type PostFalAiViduQ2ReferenceToVideoProResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiViduQ2ReferenceToVideoProResponse = + PostFalAiViduQ2ReferenceToVideoProResponses[keyof PostFalAiViduQ2ReferenceToVideoProResponses] + +export type GetFalAiViduQ2ReferenceToVideoProRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/q2/reference-to-video/pro/requests/{request_id}' +} + +export type GetFalAiViduQ2ReferenceToVideoProRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaViduQ2ReferenceToVideoProOutput +} + +export type GetFalAiViduQ2ReferenceToVideoProRequestsByRequestIdResponse = + GetFalAiViduQ2ReferenceToVideoProRequestsByRequestIdResponses[keyof GetFalAiViduQ2ReferenceToVideoProRequestsByRequestIdResponses] + +export type GetWanV26ImageToVideoFlashRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/wan/v2.6/image-to-video/flash/requests/{request_id}/status' +} + +export type GetWanV26ImageToVideoFlashRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetWanV26ImageToVideoFlashRequestsByRequestIdStatusResponse = + GetWanV26ImageToVideoFlashRequestsByRequestIdStatusResponses[keyof GetWanV26ImageToVideoFlashRequestsByRequestIdStatusResponses] + +export type PutWanV26ImageToVideoFlashRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/wan/v2.6/image-to-video/flash/requests/{request_id}/cancel' +} + +export type PutWanV26ImageToVideoFlashRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutWanV26ImageToVideoFlashRequestsByRequestIdCancelResponse = + PutWanV26ImageToVideoFlashRequestsByRequestIdCancelResponses[keyof PutWanV26ImageToVideoFlashRequestsByRequestIdCancelResponses] + +export type PostWanV26ImageToVideoFlashData = { + body: SchemaV26ImageToVideoFlashInput + path?: never + query?: never + url: '/wan/v2.6/image-to-video/flash' +} + +export type PostWanV26ImageToVideoFlashResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostWanV26ImageToVideoFlashResponse = + PostWanV26ImageToVideoFlashResponses[keyof PostWanV26ImageToVideoFlashResponses] + +export type GetWanV26ImageToVideoFlashRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/wan/v2.6/image-to-video/flash/requests/{request_id}' +} + +export type GetWanV26ImageToVideoFlashRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaV26ImageToVideoFlashOutput +} + +export type GetWanV26ImageToVideoFlashRequestsByRequestIdResponse = + GetWanV26ImageToVideoFlashRequestsByRequestIdResponses[keyof GetWanV26ImageToVideoFlashRequestsByRequestIdResponses] + +export type GetFalAiLtx219bDistilledImageToVideoLoraRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2-19b/distilled/image-to-video/lora/requests/{request_id}/status' + } + +export type GetFalAiLtx219bDistilledImageToVideoLoraRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtx219bDistilledImageToVideoLoraRequestsByRequestIdStatusResponse = + GetFalAiLtx219bDistilledImageToVideoLoraRequestsByRequestIdStatusResponses[keyof GetFalAiLtx219bDistilledImageToVideoLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx219bDistilledImageToVideoLoraRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/distilled/image-to-video/lora/requests/{request_id}/cancel' + } + +export type PutFalAiLtx219bDistilledImageToVideoLoraRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtx219bDistilledImageToVideoLoraRequestsByRequestIdCancelResponse = + PutFalAiLtx219bDistilledImageToVideoLoraRequestsByRequestIdCancelResponses[keyof PutFalAiLtx219bDistilledImageToVideoLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx219bDistilledImageToVideoLoraData = { + body: SchemaLtx219bDistilledImageToVideoLoraInput + path?: never + query?: never + url: '/fal-ai/ltx-2-19b/distilled/image-to-video/lora' +} + +export type PostFalAiLtx219bDistilledImageToVideoLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx219bDistilledImageToVideoLoraResponse = + PostFalAiLtx219bDistilledImageToVideoLoraResponses[keyof PostFalAiLtx219bDistilledImageToVideoLoraResponses] + +export type GetFalAiLtx219bDistilledImageToVideoLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/distilled/image-to-video/lora/requests/{request_id}' +} + +export type GetFalAiLtx219bDistilledImageToVideoLoraRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaLtx219bDistilledImageToVideoLoraOutput + } + +export type GetFalAiLtx219bDistilledImageToVideoLoraRequestsByRequestIdResponse = + GetFalAiLtx219bDistilledImageToVideoLoraRequestsByRequestIdResponses[keyof GetFalAiLtx219bDistilledImageToVideoLoraRequestsByRequestIdResponses] + +export type GetFalAiLtx219bDistilledImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2-19b/distilled/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiLtx219bDistilledImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtx219bDistilledImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiLtx219bDistilledImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLtx219bDistilledImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx219bDistilledImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/distilled/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiLtx219bDistilledImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtx219bDistilledImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiLtx219bDistilledImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLtx219bDistilledImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx219bDistilledImageToVideoData = { + body: SchemaLtx219bDistilledImageToVideoInput + path?: never + query?: never + url: '/fal-ai/ltx-2-19b/distilled/image-to-video' +} + +export type PostFalAiLtx219bDistilledImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx219bDistilledImageToVideoResponse = + PostFalAiLtx219bDistilledImageToVideoResponses[keyof PostFalAiLtx219bDistilledImageToVideoResponses] + +export type GetFalAiLtx219bDistilledImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/distilled/image-to-video/requests/{request_id}' +} + +export type GetFalAiLtx219bDistilledImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtx219bDistilledImageToVideoOutput +} + +export type GetFalAiLtx219bDistilledImageToVideoRequestsByRequestIdResponse = + GetFalAiLtx219bDistilledImageToVideoRequestsByRequestIdResponses[keyof GetFalAiLtx219bDistilledImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiLtx219bImageToVideoLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2-19b/image-to-video/lora/requests/{request_id}/status' +} + +export type GetFalAiLtx219bImageToVideoLoraRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtx219bImageToVideoLoraRequestsByRequestIdStatusResponse = + GetFalAiLtx219bImageToVideoLoraRequestsByRequestIdStatusResponses[keyof GetFalAiLtx219bImageToVideoLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx219bImageToVideoLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/image-to-video/lora/requests/{request_id}/cancel' +} + +export type PutFalAiLtx219bImageToVideoLoraRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtx219bImageToVideoLoraRequestsByRequestIdCancelResponse = + PutFalAiLtx219bImageToVideoLoraRequestsByRequestIdCancelResponses[keyof PutFalAiLtx219bImageToVideoLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx219bImageToVideoLoraData = { + body: SchemaLtx219bImageToVideoLoraInput + path?: never + query?: never + url: '/fal-ai/ltx-2-19b/image-to-video/lora' +} + +export type PostFalAiLtx219bImageToVideoLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx219bImageToVideoLoraResponse = + PostFalAiLtx219bImageToVideoLoraResponses[keyof PostFalAiLtx219bImageToVideoLoraResponses] + +export type GetFalAiLtx219bImageToVideoLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/image-to-video/lora/requests/{request_id}' +} + +export type GetFalAiLtx219bImageToVideoLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtx219bImageToVideoLoraOutput +} + +export type GetFalAiLtx219bImageToVideoLoraRequestsByRequestIdResponse = + GetFalAiLtx219bImageToVideoLoraRequestsByRequestIdResponses[keyof GetFalAiLtx219bImageToVideoLoraRequestsByRequestIdResponses] + +export type GetFalAiLtx219bImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2-19b/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiLtx219bImageToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLtx219bImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiLtx219bImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLtx219bImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx219bImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiLtx219bImageToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLtx219bImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiLtx219bImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLtx219bImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx219bImageToVideoData = { + body: SchemaLtx219bImageToVideoInput + path?: never + query?: never + url: '/fal-ai/ltx-2-19b/image-to-video' +} + +export type PostFalAiLtx219bImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx219bImageToVideoResponse = + PostFalAiLtx219bImageToVideoResponses[keyof PostFalAiLtx219bImageToVideoResponses] + +export type GetFalAiLtx219bImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/image-to-video/requests/{request_id}' +} + +export type GetFalAiLtx219bImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtx219bImageToVideoOutput +} + +export type GetFalAiLtx219bImageToVideoRequestsByRequestIdResponse = + GetFalAiLtx219bImageToVideoRequestsByRequestIdResponses[keyof GetFalAiLtx219bImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiWanMoveRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-move/requests/{request_id}/status' +} + +export type GetFalAiWanMoveRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanMoveRequestsByRequestIdStatusResponse = + GetFalAiWanMoveRequestsByRequestIdStatusResponses[keyof GetFalAiWanMoveRequestsByRequestIdStatusResponses] + +export type PutFalAiWanMoveRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-move/requests/{request_id}/cancel' +} + +export type PutFalAiWanMoveRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanMoveRequestsByRequestIdCancelResponse = + PutFalAiWanMoveRequestsByRequestIdCancelResponses[keyof PutFalAiWanMoveRequestsByRequestIdCancelResponses] + +export type PostFalAiWanMoveData = { + body: SchemaWanMoveInput + path?: never + query?: never + url: '/fal-ai/wan-move' +} + +export type PostFalAiWanMoveResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanMoveResponse = + PostFalAiWanMoveResponses[keyof PostFalAiWanMoveResponses] + +export type GetFalAiWanMoveRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-move/requests/{request_id}' +} + +export type GetFalAiWanMoveRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanMoveOutput +} + +export type GetFalAiWanMoveRequestsByRequestIdResponse = + GetFalAiWanMoveRequestsByRequestIdResponses[keyof GetFalAiWanMoveRequestsByRequestIdResponses] + +export type GetFalAiKandinsky5ProImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kandinsky5-pro/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiKandinsky5ProImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKandinsky5ProImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiKandinsky5ProImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKandinsky5ProImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKandinsky5ProImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kandinsky5-pro/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiKandinsky5ProImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKandinsky5ProImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiKandinsky5ProImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKandinsky5ProImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKandinsky5ProImageToVideoData = { + body: SchemaKandinsky5ProImageToVideoInput + path?: never + query?: never + url: '/fal-ai/kandinsky5-pro/image-to-video' +} + +export type PostFalAiKandinsky5ProImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKandinsky5ProImageToVideoResponse = + PostFalAiKandinsky5ProImageToVideoResponses[keyof PostFalAiKandinsky5ProImageToVideoResponses] + +export type GetFalAiKandinsky5ProImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kandinsky5-pro/image-to-video/requests/{request_id}' +} + +export type GetFalAiKandinsky5ProImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKandinsky5ProImageToVideoOutput +} + +export type GetFalAiKandinsky5ProImageToVideoRequestsByRequestIdResponse = + GetFalAiKandinsky5ProImageToVideoRequestsByRequestIdResponses[keyof GetFalAiKandinsky5ProImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiBytedanceSeedanceV15ProImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bytedance/seedance/v1.5/pro/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiBytedanceSeedanceV15ProImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiBytedanceSeedanceV15ProImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiBytedanceSeedanceV15ProImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiBytedanceSeedanceV15ProImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiBytedanceSeedanceV15ProImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedance/v1.5/pro/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiBytedanceSeedanceV15ProImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiBytedanceSeedanceV15ProImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiBytedanceSeedanceV15ProImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiBytedanceSeedanceV15ProImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiBytedanceSeedanceV15ProImageToVideoData = { + body: SchemaBytedanceSeedanceV15ProImageToVideoInput + path?: never + query?: never + url: '/fal-ai/bytedance/seedance/v1.5/pro/image-to-video' +} + +export type PostFalAiBytedanceSeedanceV15ProImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBytedanceSeedanceV15ProImageToVideoResponse = + PostFalAiBytedanceSeedanceV15ProImageToVideoResponses[keyof PostFalAiBytedanceSeedanceV15ProImageToVideoResponses] + +export type GetFalAiBytedanceSeedanceV15ProImageToVideoRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedance/v1.5/pro/image-to-video/requests/{request_id}' + } + +export type GetFalAiBytedanceSeedanceV15ProImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaBytedanceSeedanceV15ProImageToVideoOutput + } + +export type GetFalAiBytedanceSeedanceV15ProImageToVideoRequestsByRequestIdResponse = + GetFalAiBytedanceSeedanceV15ProImageToVideoRequestsByRequestIdResponses[keyof GetFalAiBytedanceSeedanceV15ProImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiLiveAvatarRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/live-avatar/requests/{request_id}/status' +} + +export type GetFalAiLiveAvatarRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLiveAvatarRequestsByRequestIdStatusResponse = + GetFalAiLiveAvatarRequestsByRequestIdStatusResponses[keyof GetFalAiLiveAvatarRequestsByRequestIdStatusResponses] + +export type PutFalAiLiveAvatarRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/live-avatar/requests/{request_id}/cancel' +} + +export type PutFalAiLiveAvatarRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLiveAvatarRequestsByRequestIdCancelResponse = + PutFalAiLiveAvatarRequestsByRequestIdCancelResponses[keyof PutFalAiLiveAvatarRequestsByRequestIdCancelResponses] + +export type PostFalAiLiveAvatarData = { + body: SchemaLiveAvatarInput + path?: never + query?: never + url: '/fal-ai/live-avatar' +} + +export type PostFalAiLiveAvatarResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLiveAvatarResponse = + PostFalAiLiveAvatarResponses[keyof PostFalAiLiveAvatarResponses] + +export type GetFalAiLiveAvatarRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/live-avatar/requests/{request_id}' +} + +export type GetFalAiLiveAvatarRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLiveAvatarOutput +} + +export type GetFalAiLiveAvatarRequestsByRequestIdResponse = + GetFalAiLiveAvatarRequestsByRequestIdResponses[keyof GetFalAiLiveAvatarRequestsByRequestIdResponses] + +export type GetFalAiHunyuanVideoV15ImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan-video-v1.5/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiHunyuanVideoV15ImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiHunyuanVideoV15ImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiHunyuanVideoV15ImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuanVideoV15ImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuanVideoV15ImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-video-v1.5/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuanVideoV15ImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiHunyuanVideoV15ImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiHunyuanVideoV15ImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuanVideoV15ImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuanVideoV15ImageToVideoData = { + body: SchemaHunyuanVideoV15ImageToVideoInput + path?: never + query?: never + url: '/fal-ai/hunyuan-video-v1.5/image-to-video' +} + +export type PostFalAiHunyuanVideoV15ImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuanVideoV15ImageToVideoResponse = + PostFalAiHunyuanVideoV15ImageToVideoResponses[keyof PostFalAiHunyuanVideoV15ImageToVideoResponses] + +export type GetFalAiHunyuanVideoV15ImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-video-v1.5/image-to-video/requests/{request_id}' +} + +export type GetFalAiHunyuanVideoV15ImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuanVideoV15ImageToVideoOutput +} + +export type GetFalAiHunyuanVideoV15ImageToVideoRequestsByRequestIdResponse = + GetFalAiHunyuanVideoV15ImageToVideoRequestsByRequestIdResponses[keyof GetFalAiHunyuanVideoV15ImageToVideoRequestsByRequestIdResponses] + +export type GetWanV26ImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/wan/v2.6/image-to-video/requests/{request_id}/status' +} + +export type GetWanV26ImageToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetWanV26ImageToVideoRequestsByRequestIdStatusResponse = + GetWanV26ImageToVideoRequestsByRequestIdStatusResponses[keyof GetWanV26ImageToVideoRequestsByRequestIdStatusResponses] + +export type PutWanV26ImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/wan/v2.6/image-to-video/requests/{request_id}/cancel' +} + +export type PutWanV26ImageToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutWanV26ImageToVideoRequestsByRequestIdCancelResponse = + PutWanV26ImageToVideoRequestsByRequestIdCancelResponses[keyof PutWanV26ImageToVideoRequestsByRequestIdCancelResponses] + +export type PostWanV26ImageToVideoData = { + body: SchemaV26ImageToVideoInput + path?: never + query?: never + url: '/wan/v2.6/image-to-video' +} + +export type PostWanV26ImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostWanV26ImageToVideoResponse = + PostWanV26ImageToVideoResponses[keyof PostWanV26ImageToVideoResponses] + +export type GetWanV26ImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/wan/v2.6/image-to-video/requests/{request_id}' +} + +export type GetWanV26ImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaV26ImageToVideoOutput +} + +export type GetWanV26ImageToVideoRequestsByRequestIdResponse = + GetWanV26ImageToVideoRequestsByRequestIdResponses[keyof GetWanV26ImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoO1StandardReferenceToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/o1/standard/reference-to-video/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoO1StandardReferenceToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoO1StandardReferenceToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoO1StandardReferenceToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoO1StandardReferenceToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoO1StandardReferenceToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/o1/standard/reference-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoO1StandardReferenceToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoO1StandardReferenceToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoO1StandardReferenceToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoO1StandardReferenceToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoO1StandardReferenceToVideoData = { + body: SchemaKlingVideoO1StandardReferenceToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/o1/standard/reference-to-video' +} + +export type PostFalAiKlingVideoO1StandardReferenceToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoO1StandardReferenceToVideoResponse = + PostFalAiKlingVideoO1StandardReferenceToVideoResponses[keyof PostFalAiKlingVideoO1StandardReferenceToVideoResponses] + +export type GetFalAiKlingVideoO1StandardReferenceToVideoRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/o1/standard/reference-to-video/requests/{request_id}' + } + +export type GetFalAiKlingVideoO1StandardReferenceToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaKlingVideoO1StandardReferenceToVideoOutput + } + +export type GetFalAiKlingVideoO1StandardReferenceToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoO1StandardReferenceToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoO1StandardReferenceToVideoRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoO1StandardImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/o1/standard/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoO1StandardImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoO1StandardImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoO1StandardImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoO1StandardImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoO1StandardImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/o1/standard/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoO1StandardImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoO1StandardImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoO1StandardImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoO1StandardImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoO1StandardImageToVideoData = { + body: SchemaKlingVideoO1StandardImageToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/o1/standard/image-to-video' +} + +export type PostFalAiKlingVideoO1StandardImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoO1StandardImageToVideoResponse = + PostFalAiKlingVideoO1StandardImageToVideoResponses[keyof PostFalAiKlingVideoO1StandardImageToVideoResponses] + +export type GetFalAiKlingVideoO1StandardImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/o1/standard/image-to-video/requests/{request_id}' +} + +export type GetFalAiKlingVideoO1StandardImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaKlingVideoO1StandardImageToVideoOutput + } + +export type GetFalAiKlingVideoO1StandardImageToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoO1StandardImageToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoO1StandardImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiCreatifyAuroraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/creatify/aurora/requests/{request_id}/status' +} + +export type GetFalAiCreatifyAuroraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiCreatifyAuroraRequestsByRequestIdStatusResponse = + GetFalAiCreatifyAuroraRequestsByRequestIdStatusResponses[keyof GetFalAiCreatifyAuroraRequestsByRequestIdStatusResponses] + +export type PutFalAiCreatifyAuroraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/creatify/aurora/requests/{request_id}/cancel' +} + +export type PutFalAiCreatifyAuroraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiCreatifyAuroraRequestsByRequestIdCancelResponse = + PutFalAiCreatifyAuroraRequestsByRequestIdCancelResponses[keyof PutFalAiCreatifyAuroraRequestsByRequestIdCancelResponses] + +export type PostFalAiCreatifyAuroraData = { + body: SchemaCreatifyAuroraInput + path?: never + query?: never + url: '/fal-ai/creatify/aurora' +} + +export type PostFalAiCreatifyAuroraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiCreatifyAuroraResponse = + PostFalAiCreatifyAuroraResponses[keyof PostFalAiCreatifyAuroraResponses] + +export type GetFalAiCreatifyAuroraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/creatify/aurora/requests/{request_id}' +} + +export type GetFalAiCreatifyAuroraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaCreatifyAuroraOutput +} + +export type GetFalAiCreatifyAuroraRequestsByRequestIdResponse = + GetFalAiCreatifyAuroraRequestsByRequestIdResponses[keyof GetFalAiCreatifyAuroraRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoAiAvatarV2ProRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/ai-avatar/v2/pro/requests/{request_id}/status' +} + +export type GetFalAiKlingVideoAiAvatarV2ProRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoAiAvatarV2ProRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoAiAvatarV2ProRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoAiAvatarV2ProRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoAiAvatarV2ProRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/ai-avatar/v2/pro/requests/{request_id}/cancel' +} + +export type PutFalAiKlingVideoAiAvatarV2ProRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoAiAvatarV2ProRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoAiAvatarV2ProRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoAiAvatarV2ProRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoAiAvatarV2ProData = { + body: SchemaKlingVideoAiAvatarV2ProInput + path?: never + query?: never + url: '/fal-ai/kling-video/ai-avatar/v2/pro' +} + +export type PostFalAiKlingVideoAiAvatarV2ProResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoAiAvatarV2ProResponse = + PostFalAiKlingVideoAiAvatarV2ProResponses[keyof PostFalAiKlingVideoAiAvatarV2ProResponses] + +export type GetFalAiKlingVideoAiAvatarV2ProRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/ai-avatar/v2/pro/requests/{request_id}' +} + +export type GetFalAiKlingVideoAiAvatarV2ProRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingVideoAiAvatarV2ProOutput +} + +export type GetFalAiKlingVideoAiAvatarV2ProRequestsByRequestIdResponse = + GetFalAiKlingVideoAiAvatarV2ProRequestsByRequestIdResponses[keyof GetFalAiKlingVideoAiAvatarV2ProRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoAiAvatarV2StandardRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/ai-avatar/v2/standard/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoAiAvatarV2StandardRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoAiAvatarV2StandardRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoAiAvatarV2StandardRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoAiAvatarV2StandardRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoAiAvatarV2StandardRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/ai-avatar/v2/standard/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoAiAvatarV2StandardRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoAiAvatarV2StandardRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoAiAvatarV2StandardRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoAiAvatarV2StandardRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoAiAvatarV2StandardData = { + body: SchemaKlingVideoAiAvatarV2StandardInput + path?: never + query?: never + url: '/fal-ai/kling-video/ai-avatar/v2/standard' +} + +export type PostFalAiKlingVideoAiAvatarV2StandardResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoAiAvatarV2StandardResponse = + PostFalAiKlingVideoAiAvatarV2StandardResponses[keyof PostFalAiKlingVideoAiAvatarV2StandardResponses] + +export type GetFalAiKlingVideoAiAvatarV2StandardRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/ai-avatar/v2/standard/requests/{request_id}' +} + +export type GetFalAiKlingVideoAiAvatarV2StandardRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingVideoAiAvatarV2StandardOutput +} + +export type GetFalAiKlingVideoAiAvatarV2StandardRequestsByRequestIdResponse = + GetFalAiKlingVideoAiAvatarV2StandardRequestsByRequestIdResponses[keyof GetFalAiKlingVideoAiAvatarV2StandardRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV26ProImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v2.6/pro/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoV26ProImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV26ProImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV26ProImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV26ProImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV26ProImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2.6/pro/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoV26ProImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV26ProImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV26ProImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV26ProImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV26ProImageToVideoData = { + body: SchemaKlingVideoV26ProImageToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/v2.6/pro/image-to-video' +} + +export type PostFalAiKlingVideoV26ProImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV26ProImageToVideoResponse = + PostFalAiKlingVideoV26ProImageToVideoResponses[keyof PostFalAiKlingVideoV26ProImageToVideoResponses] + +export type GetFalAiKlingVideoV26ProImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2.6/pro/image-to-video/requests/{request_id}' +} + +export type GetFalAiKlingVideoV26ProImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV26ProImageToVideoOutput +} + +export type GetFalAiKlingVideoV26ProImageToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoV26ProImageToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV26ProImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiPixverseV55EffectsRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v5.5/effects/requests/{request_id}/status' +} + +export type GetFalAiPixverseV55EffectsRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixverseV55EffectsRequestsByRequestIdStatusResponse = + GetFalAiPixverseV55EffectsRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV55EffectsRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV55EffectsRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v5.5/effects/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV55EffectsRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixverseV55EffectsRequestsByRequestIdCancelResponse = + PutFalAiPixverseV55EffectsRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV55EffectsRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV55EffectsData = { + body: SchemaPixverseV55EffectsInput + path?: never + query?: never + url: '/fal-ai/pixverse/v5.5/effects' +} + +export type PostFalAiPixverseV55EffectsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV55EffectsResponse = + PostFalAiPixverseV55EffectsResponses[keyof PostFalAiPixverseV55EffectsResponses] + +export type GetFalAiPixverseV55EffectsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v5.5/effects/requests/{request_id}' +} + +export type GetFalAiPixverseV55EffectsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV55EffectsOutput +} + +export type GetFalAiPixverseV55EffectsRequestsByRequestIdResponse = + GetFalAiPixverseV55EffectsRequestsByRequestIdResponses[keyof GetFalAiPixverseV55EffectsRequestsByRequestIdResponses] + +export type GetFalAiPixverseV55TransitionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v5.5/transition/requests/{request_id}/status' +} + +export type GetFalAiPixverseV55TransitionRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixverseV55TransitionRequestsByRequestIdStatusResponse = + GetFalAiPixverseV55TransitionRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV55TransitionRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV55TransitionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v5.5/transition/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV55TransitionRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixverseV55TransitionRequestsByRequestIdCancelResponse = + PutFalAiPixverseV55TransitionRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV55TransitionRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV55TransitionData = { + body: SchemaPixverseV55TransitionInput + path?: never + query?: never + url: '/fal-ai/pixverse/v5.5/transition' +} + +export type PostFalAiPixverseV55TransitionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV55TransitionResponse = + PostFalAiPixverseV55TransitionResponses[keyof PostFalAiPixverseV55TransitionResponses] + +export type GetFalAiPixverseV55TransitionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v5.5/transition/requests/{request_id}' +} + +export type GetFalAiPixverseV55TransitionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV55TransitionOutput +} + +export type GetFalAiPixverseV55TransitionRequestsByRequestIdResponse = + GetFalAiPixverseV55TransitionRequestsByRequestIdResponses[keyof GetFalAiPixverseV55TransitionRequestsByRequestIdResponses] + +export type GetFalAiPixverseV55ImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v5.5/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiPixverseV55ImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiPixverseV55ImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiPixverseV55ImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV55ImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV55ImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v5.5/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV55ImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiPixverseV55ImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiPixverseV55ImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV55ImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV55ImageToVideoData = { + body: SchemaPixverseV55ImageToVideoInput + path?: never + query?: never + url: '/fal-ai/pixverse/v5.5/image-to-video' +} + +export type PostFalAiPixverseV55ImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV55ImageToVideoResponse = + PostFalAiPixverseV55ImageToVideoResponses[keyof PostFalAiPixverseV55ImageToVideoResponses] + +export type GetFalAiPixverseV55ImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v5.5/image-to-video/requests/{request_id}' +} + +export type GetFalAiPixverseV55ImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV55ImageToVideoOutput +} + +export type GetFalAiPixverseV55ImageToVideoRequestsByRequestIdResponse = + GetFalAiPixverseV55ImageToVideoRequestsByRequestIdResponses[keyof GetFalAiPixverseV55ImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoO1ImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/o1/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiKlingVideoO1ImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoO1ImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoO1ImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoO1ImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoO1ImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/o1/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiKlingVideoO1ImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoO1ImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoO1ImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoO1ImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoO1ImageToVideoData = { + body: SchemaKlingVideoO1ImageToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/o1/image-to-video' +} + +export type PostFalAiKlingVideoO1ImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoO1ImageToVideoResponse = + PostFalAiKlingVideoO1ImageToVideoResponses[keyof PostFalAiKlingVideoO1ImageToVideoResponses] + +export type GetFalAiKlingVideoO1ImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/o1/image-to-video/requests/{request_id}' +} + +export type GetFalAiKlingVideoO1ImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingVideoO1ImageToVideoOutput +} + +export type GetFalAiKlingVideoO1ImageToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoO1ImageToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoO1ImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoO1ReferenceToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/o1/reference-to-video/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoO1ReferenceToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoO1ReferenceToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoO1ReferenceToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoO1ReferenceToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoO1ReferenceToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/o1/reference-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoO1ReferenceToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoO1ReferenceToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoO1ReferenceToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoO1ReferenceToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoO1ReferenceToVideoData = { + body: SchemaKlingVideoO1ReferenceToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/o1/reference-to-video' +} + +export type PostFalAiKlingVideoO1ReferenceToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoO1ReferenceToVideoResponse = + PostFalAiKlingVideoO1ReferenceToVideoResponses[keyof PostFalAiKlingVideoO1ReferenceToVideoResponses] + +export type GetFalAiKlingVideoO1ReferenceToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/o1/reference-to-video/requests/{request_id}' +} + +export type GetFalAiKlingVideoO1ReferenceToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingVideoO1ReferenceToVideoOutput +} + +export type GetFalAiKlingVideoO1ReferenceToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoO1ReferenceToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoO1ReferenceToVideoRequestsByRequestIdResponses] + +export type GetFalAiLtx2ImageToVideoFastRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2/image-to-video/fast/requests/{request_id}/status' +} + +export type GetFalAiLtx2ImageToVideoFastRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLtx2ImageToVideoFastRequestsByRequestIdStatusResponse = + GetFalAiLtx2ImageToVideoFastRequestsByRequestIdStatusResponses[keyof GetFalAiLtx2ImageToVideoFastRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx2ImageToVideoFastRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2/image-to-video/fast/requests/{request_id}/cancel' +} + +export type PutFalAiLtx2ImageToVideoFastRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLtx2ImageToVideoFastRequestsByRequestIdCancelResponse = + PutFalAiLtx2ImageToVideoFastRequestsByRequestIdCancelResponses[keyof PutFalAiLtx2ImageToVideoFastRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx2ImageToVideoFastData = { + body: SchemaLtx2ImageToVideoFastInput + path?: never + query?: never + url: '/fal-ai/ltx-2/image-to-video/fast' +} + +export type PostFalAiLtx2ImageToVideoFastResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx2ImageToVideoFastResponse = + PostFalAiLtx2ImageToVideoFastResponses[keyof PostFalAiLtx2ImageToVideoFastResponses] + +export type GetFalAiLtx2ImageToVideoFastRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2/image-to-video/fast/requests/{request_id}' +} + +export type GetFalAiLtx2ImageToVideoFastRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtx2ImageToVideoFastOutput +} + +export type GetFalAiLtx2ImageToVideoFastRequestsByRequestIdResponse = + GetFalAiLtx2ImageToVideoFastRequestsByRequestIdResponses[keyof GetFalAiLtx2ImageToVideoFastRequestsByRequestIdResponses] + +export type GetFalAiLtx2ImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiLtx2ImageToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLtx2ImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiLtx2ImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLtx2ImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx2ImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiLtx2ImageToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLtx2ImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiLtx2ImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLtx2ImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx2ImageToVideoData = { + body: SchemaLtx2ImageToVideoInput + path?: never + query?: never + url: '/fal-ai/ltx-2/image-to-video' +} + +export type PostFalAiLtx2ImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx2ImageToVideoResponse = + PostFalAiLtx2ImageToVideoResponses[keyof PostFalAiLtx2ImageToVideoResponses] + +export type GetFalAiLtx2ImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2/image-to-video/requests/{request_id}' +} + +export type GetFalAiLtx2ImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtx2ImageToVideoOutput +} + +export type GetFalAiLtx2ImageToVideoRequestsByRequestIdResponse = + GetFalAiLtx2ImageToVideoRequestsByRequestIdResponses[keyof GetFalAiLtx2ImageToVideoRequestsByRequestIdResponses] + +export type GetBytedanceLynxRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bytedance/lynx/requests/{request_id}/status' +} + +export type GetBytedanceLynxRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetBytedanceLynxRequestsByRequestIdStatusResponse = + GetBytedanceLynxRequestsByRequestIdStatusResponses[keyof GetBytedanceLynxRequestsByRequestIdStatusResponses] + +export type PutBytedanceLynxRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bytedance/lynx/requests/{request_id}/cancel' +} + +export type PutBytedanceLynxRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutBytedanceLynxRequestsByRequestIdCancelResponse = + PutBytedanceLynxRequestsByRequestIdCancelResponses[keyof PutBytedanceLynxRequestsByRequestIdCancelResponses] + +export type PostBytedanceLynxData = { + body: SchemaLynxInput + path?: never + query?: never + url: '/bytedance/lynx' +} + +export type PostBytedanceLynxResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBytedanceLynxResponse = + PostBytedanceLynxResponses[keyof PostBytedanceLynxResponses] + +export type GetBytedanceLynxRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bytedance/lynx/requests/{request_id}' +} + +export type GetBytedanceLynxRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLynxOutput +} + +export type GetBytedanceLynxRequestsByRequestIdResponse = + GetBytedanceLynxRequestsByRequestIdResponses[keyof GetBytedanceLynxRequestsByRequestIdResponses] + +export type GetFalAiPixverseSwapRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/swap/requests/{request_id}/status' +} + +export type GetFalAiPixverseSwapRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixverseSwapRequestsByRequestIdStatusResponse = + GetFalAiPixverseSwapRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseSwapRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseSwapRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/swap/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseSwapRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixverseSwapRequestsByRequestIdCancelResponse = + PutFalAiPixverseSwapRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseSwapRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseSwapData = { + body: SchemaPixverseSwapInput + path?: never + query?: never + url: '/fal-ai/pixverse/swap' +} + +export type PostFalAiPixverseSwapResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseSwapResponse = + PostFalAiPixverseSwapResponses[keyof PostFalAiPixverseSwapResponses] + +export type GetFalAiPixverseSwapRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/swap/requests/{request_id}' +} + +export type GetFalAiPixverseSwapRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseSwapOutput +} + +export type GetFalAiPixverseSwapRequestsByRequestIdResponse = + GetFalAiPixverseSwapRequestsByRequestIdResponses[keyof GetFalAiPixverseSwapRequestsByRequestIdResponses] + +export type GetFalAiPikaV22PikaframesRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pika/v2.2/pikaframes/requests/{request_id}/status' +} + +export type GetFalAiPikaV22PikaframesRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPikaV22PikaframesRequestsByRequestIdStatusResponse = + GetFalAiPikaV22PikaframesRequestsByRequestIdStatusResponses[keyof GetFalAiPikaV22PikaframesRequestsByRequestIdStatusResponses] + +export type PutFalAiPikaV22PikaframesRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pika/v2.2/pikaframes/requests/{request_id}/cancel' +} + +export type PutFalAiPikaV22PikaframesRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPikaV22PikaframesRequestsByRequestIdCancelResponse = + PutFalAiPikaV22PikaframesRequestsByRequestIdCancelResponses[keyof PutFalAiPikaV22PikaframesRequestsByRequestIdCancelResponses] + +export type PostFalAiPikaV22PikaframesData = { + body: SchemaPikaV22PikaframesInput + path?: never + query?: never + url: '/fal-ai/pika/v2.2/pikaframes' +} + +export type PostFalAiPikaV22PikaframesResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPikaV22PikaframesResponse = + PostFalAiPikaV22PikaframesResponses[keyof PostFalAiPikaV22PikaframesResponses] + +export type GetFalAiPikaV22PikaframesRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pika/v2.2/pikaframes/requests/{request_id}' +} + +export type GetFalAiPikaV22PikaframesRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPikaV22PikaframesOutput +} + +export type GetFalAiPikaV22PikaframesRequestsByRequestIdResponse = + GetFalAiPikaV22PikaframesRequestsByRequestIdResponses[keyof GetFalAiPikaV22PikaframesRequestsByRequestIdResponses] + +export type GetFalAiLongcatVideoImageToVideo720pRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/longcat-video/image-to-video/720p/requests/{request_id}/status' + } + +export type GetFalAiLongcatVideoImageToVideo720pRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLongcatVideoImageToVideo720pRequestsByRequestIdStatusResponse = + GetFalAiLongcatVideoImageToVideo720pRequestsByRequestIdStatusResponses[keyof GetFalAiLongcatVideoImageToVideo720pRequestsByRequestIdStatusResponses] + +export type PutFalAiLongcatVideoImageToVideo720pRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-video/image-to-video/720p/requests/{request_id}/cancel' + } + +export type PutFalAiLongcatVideoImageToVideo720pRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLongcatVideoImageToVideo720pRequestsByRequestIdCancelResponse = + PutFalAiLongcatVideoImageToVideo720pRequestsByRequestIdCancelResponses[keyof PutFalAiLongcatVideoImageToVideo720pRequestsByRequestIdCancelResponses] + +export type PostFalAiLongcatVideoImageToVideo720pData = { + body: SchemaLongcatVideoImageToVideo720pInput + path?: never + query?: never + url: '/fal-ai/longcat-video/image-to-video/720p' +} + +export type PostFalAiLongcatVideoImageToVideo720pResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLongcatVideoImageToVideo720pResponse = + PostFalAiLongcatVideoImageToVideo720pResponses[keyof PostFalAiLongcatVideoImageToVideo720pResponses] + +export type GetFalAiLongcatVideoImageToVideo720pRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-video/image-to-video/720p/requests/{request_id}' +} + +export type GetFalAiLongcatVideoImageToVideo720pRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLongcatVideoImageToVideo720pOutput +} + +export type GetFalAiLongcatVideoImageToVideo720pRequestsByRequestIdResponse = + GetFalAiLongcatVideoImageToVideo720pRequestsByRequestIdResponses[keyof GetFalAiLongcatVideoImageToVideo720pRequestsByRequestIdResponses] + +export type GetFalAiLongcatVideoImageToVideo480pRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/longcat-video/image-to-video/480p/requests/{request_id}/status' + } + +export type GetFalAiLongcatVideoImageToVideo480pRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLongcatVideoImageToVideo480pRequestsByRequestIdStatusResponse = + GetFalAiLongcatVideoImageToVideo480pRequestsByRequestIdStatusResponses[keyof GetFalAiLongcatVideoImageToVideo480pRequestsByRequestIdStatusResponses] + +export type PutFalAiLongcatVideoImageToVideo480pRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-video/image-to-video/480p/requests/{request_id}/cancel' + } + +export type PutFalAiLongcatVideoImageToVideo480pRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLongcatVideoImageToVideo480pRequestsByRequestIdCancelResponse = + PutFalAiLongcatVideoImageToVideo480pRequestsByRequestIdCancelResponses[keyof PutFalAiLongcatVideoImageToVideo480pRequestsByRequestIdCancelResponses] + +export type PostFalAiLongcatVideoImageToVideo480pData = { + body: SchemaLongcatVideoImageToVideo480pInput + path?: never + query?: never + url: '/fal-ai/longcat-video/image-to-video/480p' +} + +export type PostFalAiLongcatVideoImageToVideo480pResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLongcatVideoImageToVideo480pResponse = + PostFalAiLongcatVideoImageToVideo480pResponses[keyof PostFalAiLongcatVideoImageToVideo480pResponses] + +export type GetFalAiLongcatVideoImageToVideo480pRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-video/image-to-video/480p/requests/{request_id}' +} + +export type GetFalAiLongcatVideoImageToVideo480pRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLongcatVideoImageToVideo480pOutput +} + +export type GetFalAiLongcatVideoImageToVideo480pRequestsByRequestIdResponse = + GetFalAiLongcatVideoImageToVideo480pRequestsByRequestIdResponses[keyof GetFalAiLongcatVideoImageToVideo480pRequestsByRequestIdResponses] + +export type GetFalAiLongcatVideoDistilledImageToVideo720pRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/longcat-video/distilled/image-to-video/720p/requests/{request_id}/status' + } + +export type GetFalAiLongcatVideoDistilledImageToVideo720pRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLongcatVideoDistilledImageToVideo720pRequestsByRequestIdStatusResponse = + GetFalAiLongcatVideoDistilledImageToVideo720pRequestsByRequestIdStatusResponses[keyof GetFalAiLongcatVideoDistilledImageToVideo720pRequestsByRequestIdStatusResponses] + +export type PutFalAiLongcatVideoDistilledImageToVideo720pRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-video/distilled/image-to-video/720p/requests/{request_id}/cancel' + } + +export type PutFalAiLongcatVideoDistilledImageToVideo720pRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLongcatVideoDistilledImageToVideo720pRequestsByRequestIdCancelResponse = + PutFalAiLongcatVideoDistilledImageToVideo720pRequestsByRequestIdCancelResponses[keyof PutFalAiLongcatVideoDistilledImageToVideo720pRequestsByRequestIdCancelResponses] + +export type PostFalAiLongcatVideoDistilledImageToVideo720pData = { + body: SchemaLongcatVideoDistilledImageToVideo720pInput + path?: never + query?: never + url: '/fal-ai/longcat-video/distilled/image-to-video/720p' +} + +export type PostFalAiLongcatVideoDistilledImageToVideo720pResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLongcatVideoDistilledImageToVideo720pResponse = + PostFalAiLongcatVideoDistilledImageToVideo720pResponses[keyof PostFalAiLongcatVideoDistilledImageToVideo720pResponses] + +export type GetFalAiLongcatVideoDistilledImageToVideo720pRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-video/distilled/image-to-video/720p/requests/{request_id}' + } + +export type GetFalAiLongcatVideoDistilledImageToVideo720pRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaLongcatVideoDistilledImageToVideo720pOutput + } + +export type GetFalAiLongcatVideoDistilledImageToVideo720pRequestsByRequestIdResponse = + GetFalAiLongcatVideoDistilledImageToVideo720pRequestsByRequestIdResponses[keyof GetFalAiLongcatVideoDistilledImageToVideo720pRequestsByRequestIdResponses] + +export type GetFalAiLongcatVideoDistilledImageToVideo480pRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/longcat-video/distilled/image-to-video/480p/requests/{request_id}/status' + } + +export type GetFalAiLongcatVideoDistilledImageToVideo480pRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLongcatVideoDistilledImageToVideo480pRequestsByRequestIdStatusResponse = + GetFalAiLongcatVideoDistilledImageToVideo480pRequestsByRequestIdStatusResponses[keyof GetFalAiLongcatVideoDistilledImageToVideo480pRequestsByRequestIdStatusResponses] + +export type PutFalAiLongcatVideoDistilledImageToVideo480pRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-video/distilled/image-to-video/480p/requests/{request_id}/cancel' + } + +export type PutFalAiLongcatVideoDistilledImageToVideo480pRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLongcatVideoDistilledImageToVideo480pRequestsByRequestIdCancelResponse = + PutFalAiLongcatVideoDistilledImageToVideo480pRequestsByRequestIdCancelResponses[keyof PutFalAiLongcatVideoDistilledImageToVideo480pRequestsByRequestIdCancelResponses] + +export type PostFalAiLongcatVideoDistilledImageToVideo480pData = { + body: SchemaLongcatVideoDistilledImageToVideo480pInput + path?: never + query?: never + url: '/fal-ai/longcat-video/distilled/image-to-video/480p' +} + +export type PostFalAiLongcatVideoDistilledImageToVideo480pResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLongcatVideoDistilledImageToVideo480pResponse = + PostFalAiLongcatVideoDistilledImageToVideo480pResponses[keyof PostFalAiLongcatVideoDistilledImageToVideo480pResponses] + +export type GetFalAiLongcatVideoDistilledImageToVideo480pRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-video/distilled/image-to-video/480p/requests/{request_id}' + } + +export type GetFalAiLongcatVideoDistilledImageToVideo480pRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaLongcatVideoDistilledImageToVideo480pOutput + } + +export type GetFalAiLongcatVideoDistilledImageToVideo480pRequestsByRequestIdResponse = + GetFalAiLongcatVideoDistilledImageToVideo480pRequestsByRequestIdResponses[keyof GetFalAiLongcatVideoDistilledImageToVideo480pRequestsByRequestIdResponses] + +export type GetFalAiMinimaxHailuo23FastStandardImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/hailuo-2.3-fast/standard/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiMinimaxHailuo23FastStandardImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMinimaxHailuo23FastStandardImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiMinimaxHailuo23FastStandardImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxHailuo23FastStandardImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxHailuo23FastStandardImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/hailuo-2.3-fast/standard/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiMinimaxHailuo23FastStandardImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMinimaxHailuo23FastStandardImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiMinimaxHailuo23FastStandardImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxHailuo23FastStandardImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxHailuo23FastStandardImageToVideoData = { + body: SchemaMinimaxHailuo23FastStandardImageToVideoInput + path?: never + query?: never + url: '/fal-ai/minimax/hailuo-2.3-fast/standard/image-to-video' +} + +export type PostFalAiMinimaxHailuo23FastStandardImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxHailuo23FastStandardImageToVideoResponse = + PostFalAiMinimaxHailuo23FastStandardImageToVideoResponses[keyof PostFalAiMinimaxHailuo23FastStandardImageToVideoResponses] + +export type GetFalAiMinimaxHailuo23FastStandardImageToVideoRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/hailuo-2.3-fast/standard/image-to-video/requests/{request_id}' + } + +export type GetFalAiMinimaxHailuo23FastStandardImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaMinimaxHailuo23FastStandardImageToVideoOutput + } + +export type GetFalAiMinimaxHailuo23FastStandardImageToVideoRequestsByRequestIdResponse = + GetFalAiMinimaxHailuo23FastStandardImageToVideoRequestsByRequestIdResponses[keyof GetFalAiMinimaxHailuo23FastStandardImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiMinimaxHailuo23StandardImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/hailuo-2.3/standard/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiMinimaxHailuo23StandardImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMinimaxHailuo23StandardImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiMinimaxHailuo23StandardImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxHailuo23StandardImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxHailuo23StandardImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/hailuo-2.3/standard/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiMinimaxHailuo23StandardImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMinimaxHailuo23StandardImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiMinimaxHailuo23StandardImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxHailuo23StandardImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxHailuo23StandardImageToVideoData = { + body: SchemaMinimaxHailuo23StandardImageToVideoInput + path?: never + query?: never + url: '/fal-ai/minimax/hailuo-2.3/standard/image-to-video' +} + +export type PostFalAiMinimaxHailuo23StandardImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxHailuo23StandardImageToVideoResponse = + PostFalAiMinimaxHailuo23StandardImageToVideoResponses[keyof PostFalAiMinimaxHailuo23StandardImageToVideoResponses] + +export type GetFalAiMinimaxHailuo23StandardImageToVideoRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/hailuo-2.3/standard/image-to-video/requests/{request_id}' + } + +export type GetFalAiMinimaxHailuo23StandardImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaMinimaxHailuo23StandardImageToVideoOutput + } + +export type GetFalAiMinimaxHailuo23StandardImageToVideoRequestsByRequestIdResponse = + GetFalAiMinimaxHailuo23StandardImageToVideoRequestsByRequestIdResponses[keyof GetFalAiMinimaxHailuo23StandardImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiMinimaxHailuo23FastProImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/hailuo-2.3-fast/pro/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiMinimaxHailuo23FastProImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMinimaxHailuo23FastProImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiMinimaxHailuo23FastProImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxHailuo23FastProImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxHailuo23FastProImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/hailuo-2.3-fast/pro/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiMinimaxHailuo23FastProImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMinimaxHailuo23FastProImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiMinimaxHailuo23FastProImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxHailuo23FastProImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxHailuo23FastProImageToVideoData = { + body: SchemaMinimaxHailuo23FastProImageToVideoInput + path?: never + query?: never + url: '/fal-ai/minimax/hailuo-2.3-fast/pro/image-to-video' +} + +export type PostFalAiMinimaxHailuo23FastProImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxHailuo23FastProImageToVideoResponse = + PostFalAiMinimaxHailuo23FastProImageToVideoResponses[keyof PostFalAiMinimaxHailuo23FastProImageToVideoResponses] + +export type GetFalAiMinimaxHailuo23FastProImageToVideoRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/hailuo-2.3-fast/pro/image-to-video/requests/{request_id}' + } + +export type GetFalAiMinimaxHailuo23FastProImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaMinimaxHailuo23FastProImageToVideoOutput + } + +export type GetFalAiMinimaxHailuo23FastProImageToVideoRequestsByRequestIdResponse = + GetFalAiMinimaxHailuo23FastProImageToVideoRequestsByRequestIdResponses[keyof GetFalAiMinimaxHailuo23FastProImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiBytedanceSeedanceV1ProFastImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bytedance/seedance/v1/pro/fast/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiBytedanceSeedanceV1ProFastImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiBytedanceSeedanceV1ProFastImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiBytedanceSeedanceV1ProFastImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiBytedanceSeedanceV1ProFastImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiBytedanceSeedanceV1ProFastImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedance/v1/pro/fast/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiBytedanceSeedanceV1ProFastImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiBytedanceSeedanceV1ProFastImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiBytedanceSeedanceV1ProFastImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiBytedanceSeedanceV1ProFastImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiBytedanceSeedanceV1ProFastImageToVideoData = { + body: SchemaBytedanceSeedanceV1ProFastImageToVideoInput + path?: never + query?: never + url: '/fal-ai/bytedance/seedance/v1/pro/fast/image-to-video' +} + +export type PostFalAiBytedanceSeedanceV1ProFastImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBytedanceSeedanceV1ProFastImageToVideoResponse = + PostFalAiBytedanceSeedanceV1ProFastImageToVideoResponses[keyof PostFalAiBytedanceSeedanceV1ProFastImageToVideoResponses] + +export type GetFalAiBytedanceSeedanceV1ProFastImageToVideoRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedance/v1/pro/fast/image-to-video/requests/{request_id}' + } + +export type GetFalAiBytedanceSeedanceV1ProFastImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaBytedanceSeedanceV1ProFastImageToVideoOutput + } + +export type GetFalAiBytedanceSeedanceV1ProFastImageToVideoRequestsByRequestIdResponse = + GetFalAiBytedanceSeedanceV1ProFastImageToVideoRequestsByRequestIdResponses[keyof GetFalAiBytedanceSeedanceV1ProFastImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiViduQ2ImageToVideoTurboRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/vidu/q2/image-to-video/turbo/requests/{request_id}/status' +} + +export type GetFalAiViduQ2ImageToVideoTurboRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiViduQ2ImageToVideoTurboRequestsByRequestIdStatusResponse = + GetFalAiViduQ2ImageToVideoTurboRequestsByRequestIdStatusResponses[keyof GetFalAiViduQ2ImageToVideoTurboRequestsByRequestIdStatusResponses] + +export type PutFalAiViduQ2ImageToVideoTurboRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/q2/image-to-video/turbo/requests/{request_id}/cancel' +} + +export type PutFalAiViduQ2ImageToVideoTurboRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiViduQ2ImageToVideoTurboRequestsByRequestIdCancelResponse = + PutFalAiViduQ2ImageToVideoTurboRequestsByRequestIdCancelResponses[keyof PutFalAiViduQ2ImageToVideoTurboRequestsByRequestIdCancelResponses] + +export type PostFalAiViduQ2ImageToVideoTurboData = { + body: SchemaViduQ2ImageToVideoTurboInput + path?: never + query?: never + url: '/fal-ai/vidu/q2/image-to-video/turbo' +} + +export type PostFalAiViduQ2ImageToVideoTurboResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiViduQ2ImageToVideoTurboResponse = + PostFalAiViduQ2ImageToVideoTurboResponses[keyof PostFalAiViduQ2ImageToVideoTurboResponses] + +export type GetFalAiViduQ2ImageToVideoTurboRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/q2/image-to-video/turbo/requests/{request_id}' +} + +export type GetFalAiViduQ2ImageToVideoTurboRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaViduQ2ImageToVideoTurboOutput +} + +export type GetFalAiViduQ2ImageToVideoTurboRequestsByRequestIdResponse = + GetFalAiViduQ2ImageToVideoTurboRequestsByRequestIdResponses[keyof GetFalAiViduQ2ImageToVideoTurboRequestsByRequestIdResponses] + +export type GetFalAiViduQ2ImageToVideoProRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/vidu/q2/image-to-video/pro/requests/{request_id}/status' +} + +export type GetFalAiViduQ2ImageToVideoProRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiViduQ2ImageToVideoProRequestsByRequestIdStatusResponse = + GetFalAiViduQ2ImageToVideoProRequestsByRequestIdStatusResponses[keyof GetFalAiViduQ2ImageToVideoProRequestsByRequestIdStatusResponses] + +export type PutFalAiViduQ2ImageToVideoProRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/q2/image-to-video/pro/requests/{request_id}/cancel' +} + +export type PutFalAiViduQ2ImageToVideoProRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiViduQ2ImageToVideoProRequestsByRequestIdCancelResponse = + PutFalAiViduQ2ImageToVideoProRequestsByRequestIdCancelResponses[keyof PutFalAiViduQ2ImageToVideoProRequestsByRequestIdCancelResponses] + +export type PostFalAiViduQ2ImageToVideoProData = { + body: SchemaViduQ2ImageToVideoProInput + path?: never + query?: never + url: '/fal-ai/vidu/q2/image-to-video/pro' +} + +export type PostFalAiViduQ2ImageToVideoProResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiViduQ2ImageToVideoProResponse = + PostFalAiViduQ2ImageToVideoProResponses[keyof PostFalAiViduQ2ImageToVideoProResponses] + +export type GetFalAiViduQ2ImageToVideoProRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/q2/image-to-video/pro/requests/{request_id}' +} + +export type GetFalAiViduQ2ImageToVideoProRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaViduQ2ImageToVideoProOutput +} + +export type GetFalAiViduQ2ImageToVideoProRequestsByRequestIdResponse = + GetFalAiViduQ2ImageToVideoProRequestsByRequestIdResponses[keyof GetFalAiViduQ2ImageToVideoProRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV25TurboStandardImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v2.5-turbo/standard/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoV25TurboStandardImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV25TurboStandardImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV25TurboStandardImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV25TurboStandardImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV25TurboStandardImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2.5-turbo/standard/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoV25TurboStandardImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV25TurboStandardImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV25TurboStandardImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV25TurboStandardImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV25TurboStandardImageToVideoData = { + body: SchemaKlingVideoV25TurboStandardImageToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/v2.5-turbo/standard/image-to-video' +} + +export type PostFalAiKlingVideoV25TurboStandardImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV25TurboStandardImageToVideoResponse = + PostFalAiKlingVideoV25TurboStandardImageToVideoResponses[keyof PostFalAiKlingVideoV25TurboStandardImageToVideoResponses] + +export type GetFalAiKlingVideoV25TurboStandardImageToVideoRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2.5-turbo/standard/image-to-video/requests/{request_id}' + } + +export type GetFalAiKlingVideoV25TurboStandardImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV25TurboStandardImageToVideoOutput + } + +export type GetFalAiKlingVideoV25TurboStandardImageToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoV25TurboStandardImageToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV25TurboStandardImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiVeo31FastFirstLastFrameToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/veo3.1/fast/first-last-frame-to-video/requests/{request_id}/status' + } + +export type GetFalAiVeo31FastFirstLastFrameToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiVeo31FastFirstLastFrameToVideoRequestsByRequestIdStatusResponse = + GetFalAiVeo31FastFirstLastFrameToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiVeo31FastFirstLastFrameToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiVeo31FastFirstLastFrameToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3.1/fast/first-last-frame-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiVeo31FastFirstLastFrameToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiVeo31FastFirstLastFrameToVideoRequestsByRequestIdCancelResponse = + PutFalAiVeo31FastFirstLastFrameToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiVeo31FastFirstLastFrameToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiVeo31FastFirstLastFrameToVideoData = { + body: SchemaVeo31FastFirstLastFrameToVideoInput + path?: never + query?: never + url: '/fal-ai/veo3.1/fast/first-last-frame-to-video' +} + +export type PostFalAiVeo31FastFirstLastFrameToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiVeo31FastFirstLastFrameToVideoResponse = + PostFalAiVeo31FastFirstLastFrameToVideoResponses[keyof PostFalAiVeo31FastFirstLastFrameToVideoResponses] + +export type GetFalAiVeo31FastFirstLastFrameToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3.1/fast/first-last-frame-to-video/requests/{request_id}' +} + +export type GetFalAiVeo31FastFirstLastFrameToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaVeo31FastFirstLastFrameToVideoOutput + } + +export type GetFalAiVeo31FastFirstLastFrameToVideoRequestsByRequestIdResponse = + GetFalAiVeo31FastFirstLastFrameToVideoRequestsByRequestIdResponses[keyof GetFalAiVeo31FastFirstLastFrameToVideoRequestsByRequestIdResponses] + +export type GetFalAiVeo31FirstLastFrameToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/veo3.1/first-last-frame-to-video/requests/{request_id}/status' +} + +export type GetFalAiVeo31FirstLastFrameToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiVeo31FirstLastFrameToVideoRequestsByRequestIdStatusResponse = + GetFalAiVeo31FirstLastFrameToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiVeo31FirstLastFrameToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiVeo31FirstLastFrameToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3.1/first-last-frame-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiVeo31FirstLastFrameToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiVeo31FirstLastFrameToVideoRequestsByRequestIdCancelResponse = + PutFalAiVeo31FirstLastFrameToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiVeo31FirstLastFrameToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiVeo31FirstLastFrameToVideoData = { + body: SchemaVeo31FirstLastFrameToVideoInput + path?: never + query?: never + url: '/fal-ai/veo3.1/first-last-frame-to-video' +} + +export type PostFalAiVeo31FirstLastFrameToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiVeo31FirstLastFrameToVideoResponse = + PostFalAiVeo31FirstLastFrameToVideoResponses[keyof PostFalAiVeo31FirstLastFrameToVideoResponses] + +export type GetFalAiVeo31FirstLastFrameToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3.1/first-last-frame-to-video/requests/{request_id}' +} + +export type GetFalAiVeo31FirstLastFrameToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVeo31FirstLastFrameToVideoOutput +} + +export type GetFalAiVeo31FirstLastFrameToVideoRequestsByRequestIdResponse = + GetFalAiVeo31FirstLastFrameToVideoRequestsByRequestIdResponses[keyof GetFalAiVeo31FirstLastFrameToVideoRequestsByRequestIdResponses] + +export type GetFalAiVeo31ReferenceToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/veo3.1/reference-to-video/requests/{request_id}/status' +} + +export type GetFalAiVeo31ReferenceToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiVeo31ReferenceToVideoRequestsByRequestIdStatusResponse = + GetFalAiVeo31ReferenceToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiVeo31ReferenceToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiVeo31ReferenceToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3.1/reference-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiVeo31ReferenceToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiVeo31ReferenceToVideoRequestsByRequestIdCancelResponse = + PutFalAiVeo31ReferenceToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiVeo31ReferenceToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiVeo31ReferenceToVideoData = { + body: SchemaVeo31ReferenceToVideoInput + path?: never + query?: never + url: '/fal-ai/veo3.1/reference-to-video' +} + +export type PostFalAiVeo31ReferenceToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiVeo31ReferenceToVideoResponse = + PostFalAiVeo31ReferenceToVideoResponses[keyof PostFalAiVeo31ReferenceToVideoResponses] + +export type GetFalAiVeo31ReferenceToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3.1/reference-to-video/requests/{request_id}' +} + +export type GetFalAiVeo31ReferenceToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVeo31ReferenceToVideoOutput +} + +export type GetFalAiVeo31ReferenceToVideoRequestsByRequestIdResponse = + GetFalAiVeo31ReferenceToVideoRequestsByRequestIdResponses[keyof GetFalAiVeo31ReferenceToVideoRequestsByRequestIdResponses] + +export type GetFalAiVeo31FastImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/veo3.1/fast/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiVeo31FastImageToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiVeo31FastImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiVeo31FastImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiVeo31FastImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiVeo31FastImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3.1/fast/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiVeo31FastImageToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiVeo31FastImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiVeo31FastImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiVeo31FastImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiVeo31FastImageToVideoData = { + body: SchemaVeo31FastImageToVideoInput + path?: never + query?: never + url: '/fal-ai/veo3.1/fast/image-to-video' +} + +export type PostFalAiVeo31FastImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiVeo31FastImageToVideoResponse = + PostFalAiVeo31FastImageToVideoResponses[keyof PostFalAiVeo31FastImageToVideoResponses] + +export type GetFalAiVeo31FastImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3.1/fast/image-to-video/requests/{request_id}' +} + +export type GetFalAiVeo31FastImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVeo31FastImageToVideoOutput +} + +export type GetFalAiVeo31FastImageToVideoRequestsByRequestIdResponse = + GetFalAiVeo31FastImageToVideoRequestsByRequestIdResponses[keyof GetFalAiVeo31FastImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiVeo31ImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/veo3.1/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiVeo31ImageToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiVeo31ImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiVeo31ImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiVeo31ImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiVeo31ImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3.1/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiVeo31ImageToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiVeo31ImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiVeo31ImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiVeo31ImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiVeo31ImageToVideoData = { + body: SchemaVeo31ImageToVideoInput + path?: never + query?: never + url: '/fal-ai/veo3.1/image-to-video' +} + +export type PostFalAiVeo31ImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiVeo31ImageToVideoResponse = + PostFalAiVeo31ImageToVideoResponses[keyof PostFalAiVeo31ImageToVideoResponses] + +export type GetFalAiVeo31ImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3.1/image-to-video/requests/{request_id}' +} + +export type GetFalAiVeo31ImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVeo31ImageToVideoOutput +} + +export type GetFalAiVeo31ImageToVideoRequestsByRequestIdResponse = + GetFalAiVeo31ImageToVideoRequestsByRequestIdResponses[keyof GetFalAiVeo31ImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiSora2ImageToVideoProRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sora-2/image-to-video/pro/requests/{request_id}/status' +} + +export type GetFalAiSora2ImageToVideoProRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSora2ImageToVideoProRequestsByRequestIdStatusResponse = + GetFalAiSora2ImageToVideoProRequestsByRequestIdStatusResponses[keyof GetFalAiSora2ImageToVideoProRequestsByRequestIdStatusResponses] + +export type PutFalAiSora2ImageToVideoProRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sora-2/image-to-video/pro/requests/{request_id}/cancel' +} + +export type PutFalAiSora2ImageToVideoProRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSora2ImageToVideoProRequestsByRequestIdCancelResponse = + PutFalAiSora2ImageToVideoProRequestsByRequestIdCancelResponses[keyof PutFalAiSora2ImageToVideoProRequestsByRequestIdCancelResponses] + +export type PostFalAiSora2ImageToVideoProData = { + body: SchemaSora2ImageToVideoProInput + path?: never + query?: never + url: '/fal-ai/sora-2/image-to-video/pro' +} + +export type PostFalAiSora2ImageToVideoProResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSora2ImageToVideoProResponse = + PostFalAiSora2ImageToVideoProResponses[keyof PostFalAiSora2ImageToVideoProResponses] + +export type GetFalAiSora2ImageToVideoProRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sora-2/image-to-video/pro/requests/{request_id}' +} + +export type GetFalAiSora2ImageToVideoProRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSora2ImageToVideoProOutput +} + +export type GetFalAiSora2ImageToVideoProRequestsByRequestIdResponse = + GetFalAiSora2ImageToVideoProRequestsByRequestIdResponses[keyof GetFalAiSora2ImageToVideoProRequestsByRequestIdResponses] + +export type GetFalAiSora2ImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sora-2/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiSora2ImageToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSora2ImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiSora2ImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiSora2ImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiSora2ImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sora-2/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiSora2ImageToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSora2ImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiSora2ImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiSora2ImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiSora2ImageToVideoData = { + body: SchemaSora2ImageToVideoInput + path?: never + query?: never + url: '/fal-ai/sora-2/image-to-video' +} + +export type PostFalAiSora2ImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSora2ImageToVideoResponse = + PostFalAiSora2ImageToVideoResponses[keyof PostFalAiSora2ImageToVideoResponses] + +export type GetFalAiSora2ImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sora-2/image-to-video/requests/{request_id}' +} + +export type GetFalAiSora2ImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSora2ImageToVideoOutput +} + +export type GetFalAiSora2ImageToVideoRequestsByRequestIdResponse = + GetFalAiSora2ImageToVideoRequestsByRequestIdResponses[keyof GetFalAiSora2ImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiOviImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ovi/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiOviImageToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiOviImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiOviImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiOviImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiOviImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ovi/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiOviImageToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiOviImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiOviImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiOviImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiOviImageToVideoData = { + body: SchemaOviImageToVideoInput + path?: never + query?: never + url: '/fal-ai/ovi/image-to-video' +} + +export type PostFalAiOviImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiOviImageToVideoResponse = + PostFalAiOviImageToVideoResponses[keyof PostFalAiOviImageToVideoResponses] + +export type GetFalAiOviImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ovi/image-to-video/requests/{request_id}' +} + +export type GetFalAiOviImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaOviImageToVideoOutput +} + +export type GetFalAiOviImageToVideoRequestsByRequestIdResponse = + GetFalAiOviImageToVideoRequestsByRequestIdResponses[keyof GetFalAiOviImageToVideoRequestsByRequestIdResponses] + +export type GetVeedFabric10FastRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/veed/fabric-1.0/fast/requests/{request_id}/status' +} + +export type GetVeedFabric10FastRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetVeedFabric10FastRequestsByRequestIdStatusResponse = + GetVeedFabric10FastRequestsByRequestIdStatusResponses[keyof GetVeedFabric10FastRequestsByRequestIdStatusResponses] + +export type PutVeedFabric10FastRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/veed/fabric-1.0/fast/requests/{request_id}/cancel' +} + +export type PutVeedFabric10FastRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutVeedFabric10FastRequestsByRequestIdCancelResponse = + PutVeedFabric10FastRequestsByRequestIdCancelResponses[keyof PutVeedFabric10FastRequestsByRequestIdCancelResponses] + +export type PostVeedFabric10FastData = { + body: SchemaFabric10FastInput + path?: never + query?: never + url: '/veed/fabric-1.0/fast' +} + +export type PostVeedFabric10FastResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostVeedFabric10FastResponse = + PostVeedFabric10FastResponses[keyof PostVeedFabric10FastResponses] + +export type GetVeedFabric10FastRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/veed/fabric-1.0/fast/requests/{request_id}' +} + +export type GetVeedFabric10FastRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFabric10FastOutput +} + +export type GetVeedFabric10FastRequestsByRequestIdResponse = + GetVeedFabric10FastRequestsByRequestIdResponses[keyof GetVeedFabric10FastRequestsByRequestIdResponses] + +export type GetFalAiBytedanceOmnihumanV15RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bytedance/omnihuman/v1.5/requests/{request_id}/status' +} + +export type GetFalAiBytedanceOmnihumanV15RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiBytedanceOmnihumanV15RequestsByRequestIdStatusResponse = + GetFalAiBytedanceOmnihumanV15RequestsByRequestIdStatusResponses[keyof GetFalAiBytedanceOmnihumanV15RequestsByRequestIdStatusResponses] + +export type PutFalAiBytedanceOmnihumanV15RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/omnihuman/v1.5/requests/{request_id}/cancel' +} + +export type PutFalAiBytedanceOmnihumanV15RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiBytedanceOmnihumanV15RequestsByRequestIdCancelResponse = + PutFalAiBytedanceOmnihumanV15RequestsByRequestIdCancelResponses[keyof PutFalAiBytedanceOmnihumanV15RequestsByRequestIdCancelResponses] + +export type PostFalAiBytedanceOmnihumanV15Data = { + body: SchemaBytedanceOmnihumanV15Input + path?: never + query?: never + url: '/fal-ai/bytedance/omnihuman/v1.5' +} + +export type PostFalAiBytedanceOmnihumanV15Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBytedanceOmnihumanV15Response = + PostFalAiBytedanceOmnihumanV15Responses[keyof PostFalAiBytedanceOmnihumanV15Responses] + +export type GetFalAiBytedanceOmnihumanV15RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/omnihuman/v1.5/requests/{request_id}' +} + +export type GetFalAiBytedanceOmnihumanV15RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBytedanceOmnihumanV15Output +} + +export type GetFalAiBytedanceOmnihumanV15RequestsByRequestIdResponse = + GetFalAiBytedanceOmnihumanV15RequestsByRequestIdResponses[keyof GetFalAiBytedanceOmnihumanV15RequestsByRequestIdResponses] + +export type GetVeedFabric10RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/veed/fabric-1.0/requests/{request_id}/status' +} + +export type GetVeedFabric10RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetVeedFabric10RequestsByRequestIdStatusResponse = + GetVeedFabric10RequestsByRequestIdStatusResponses[keyof GetVeedFabric10RequestsByRequestIdStatusResponses] + +export type PutVeedFabric10RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/veed/fabric-1.0/requests/{request_id}/cancel' +} + +export type PutVeedFabric10RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutVeedFabric10RequestsByRequestIdCancelResponse = + PutVeedFabric10RequestsByRequestIdCancelResponses[keyof PutVeedFabric10RequestsByRequestIdCancelResponses] + +export type PostVeedFabric10Data = { + body: SchemaFabric10Input + path?: never + query?: never + url: '/veed/fabric-1.0' +} + +export type PostVeedFabric10Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostVeedFabric10Response = + PostVeedFabric10Responses[keyof PostVeedFabric10Responses] + +export type GetVeedFabric10RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/veed/fabric-1.0/requests/{request_id}' +} + +export type GetVeedFabric10RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFabric10Output +} + +export type GetVeedFabric10RequestsByRequestIdResponse = + GetVeedFabric10RequestsByRequestIdResponses[keyof GetVeedFabric10RequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV1StandardAiAvatarRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v1/standard/ai-avatar/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoV1StandardAiAvatarRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV1StandardAiAvatarRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV1StandardAiAvatarRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV1StandardAiAvatarRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV1StandardAiAvatarRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1/standard/ai-avatar/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoV1StandardAiAvatarRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV1StandardAiAvatarRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV1StandardAiAvatarRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV1StandardAiAvatarRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV1StandardAiAvatarData = { + body: SchemaKlingVideoV1StandardAiAvatarInput + path?: never + query?: never + url: '/fal-ai/kling-video/v1/standard/ai-avatar' +} + +export type PostFalAiKlingVideoV1StandardAiAvatarResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV1StandardAiAvatarResponse = + PostFalAiKlingVideoV1StandardAiAvatarResponses[keyof PostFalAiKlingVideoV1StandardAiAvatarResponses] + +export type GetFalAiKlingVideoV1StandardAiAvatarRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1/standard/ai-avatar/requests/{request_id}' +} + +export type GetFalAiKlingVideoV1StandardAiAvatarRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV1StandardAiAvatarOutput +} + +export type GetFalAiKlingVideoV1StandardAiAvatarRequestsByRequestIdResponse = + GetFalAiKlingVideoV1StandardAiAvatarRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV1StandardAiAvatarRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV1ProAiAvatarRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v1/pro/ai-avatar/requests/{request_id}/status' +} + +export type GetFalAiKlingVideoV1ProAiAvatarRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV1ProAiAvatarRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV1ProAiAvatarRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV1ProAiAvatarRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV1ProAiAvatarRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1/pro/ai-avatar/requests/{request_id}/cancel' +} + +export type PutFalAiKlingVideoV1ProAiAvatarRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV1ProAiAvatarRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV1ProAiAvatarRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV1ProAiAvatarRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV1ProAiAvatarData = { + body: SchemaKlingVideoV1ProAiAvatarInput + path?: never + query?: never + url: '/fal-ai/kling-video/v1/pro/ai-avatar' +} + +export type PostFalAiKlingVideoV1ProAiAvatarResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV1ProAiAvatarResponse = + PostFalAiKlingVideoV1ProAiAvatarResponses[keyof PostFalAiKlingVideoV1ProAiAvatarResponses] + +export type GetFalAiKlingVideoV1ProAiAvatarRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1/pro/ai-avatar/requests/{request_id}' +} + +export type GetFalAiKlingVideoV1ProAiAvatarRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV1ProAiAvatarOutput +} + +export type GetFalAiKlingVideoV1ProAiAvatarRequestsByRequestIdResponse = + GetFalAiKlingVideoV1ProAiAvatarRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV1ProAiAvatarRequestsByRequestIdResponses] + +export type GetDecartLucy14bImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/decart/lucy-14b/image-to-video/requests/{request_id}/status' +} + +export type GetDecartLucy14bImageToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetDecartLucy14bImageToVideoRequestsByRequestIdStatusResponse = + GetDecartLucy14bImageToVideoRequestsByRequestIdStatusResponses[keyof GetDecartLucy14bImageToVideoRequestsByRequestIdStatusResponses] + +export type PutDecartLucy14bImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/decart/lucy-14b/image-to-video/requests/{request_id}/cancel' +} + +export type PutDecartLucy14bImageToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutDecartLucy14bImageToVideoRequestsByRequestIdCancelResponse = + PutDecartLucy14bImageToVideoRequestsByRequestIdCancelResponses[keyof PutDecartLucy14bImageToVideoRequestsByRequestIdCancelResponses] + +export type PostDecartLucy14bImageToVideoData = { + body: SchemaLucy14bImageToVideoInput + path?: never + query?: never + url: '/decart/lucy-14b/image-to-video' +} + +export type PostDecartLucy14bImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostDecartLucy14bImageToVideoResponse = + PostDecartLucy14bImageToVideoResponses[keyof PostDecartLucy14bImageToVideoResponses] + +export type GetDecartLucy14bImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/decart/lucy-14b/image-to-video/requests/{request_id}' +} + +export type GetDecartLucy14bImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLucy14bImageToVideoOutput +} + +export type GetDecartLucy14bImageToVideoRequestsByRequestIdResponse = + GetDecartLucy14bImageToVideoRequestsByRequestIdResponses[keyof GetDecartLucy14bImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiBytedanceSeedanceV1LiteReferenceToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bytedance/seedance/v1/lite/reference-to-video/requests/{request_id}/status' + } + +export type GetFalAiBytedanceSeedanceV1LiteReferenceToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiBytedanceSeedanceV1LiteReferenceToVideoRequestsByRequestIdStatusResponse = + GetFalAiBytedanceSeedanceV1LiteReferenceToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiBytedanceSeedanceV1LiteReferenceToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiBytedanceSeedanceV1LiteReferenceToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedance/v1/lite/reference-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiBytedanceSeedanceV1LiteReferenceToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiBytedanceSeedanceV1LiteReferenceToVideoRequestsByRequestIdCancelResponse = + PutFalAiBytedanceSeedanceV1LiteReferenceToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiBytedanceSeedanceV1LiteReferenceToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiBytedanceSeedanceV1LiteReferenceToVideoData = { + body: SchemaBytedanceSeedanceV1LiteReferenceToVideoInput + path?: never + query?: never + url: '/fal-ai/bytedance/seedance/v1/lite/reference-to-video' +} + +export type PostFalAiBytedanceSeedanceV1LiteReferenceToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBytedanceSeedanceV1LiteReferenceToVideoResponse = + PostFalAiBytedanceSeedanceV1LiteReferenceToVideoResponses[keyof PostFalAiBytedanceSeedanceV1LiteReferenceToVideoResponses] + +export type GetFalAiBytedanceSeedanceV1LiteReferenceToVideoRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedance/v1/lite/reference-to-video/requests/{request_id}' + } + +export type GetFalAiBytedanceSeedanceV1LiteReferenceToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaBytedanceSeedanceV1LiteReferenceToVideoOutput + } + +export type GetFalAiBytedanceSeedanceV1LiteReferenceToVideoRequestsByRequestIdResponse = + GetFalAiBytedanceSeedanceV1LiteReferenceToVideoRequestsByRequestIdResponses[keyof GetFalAiBytedanceSeedanceV1LiteReferenceToVideoRequestsByRequestIdResponses] + +export type GetFalAiWanAtiRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-ati/requests/{request_id}/status' +} + +export type GetFalAiWanAtiRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanAtiRequestsByRequestIdStatusResponse = + GetFalAiWanAtiRequestsByRequestIdStatusResponses[keyof GetFalAiWanAtiRequestsByRequestIdStatusResponses] + +export type PutFalAiWanAtiRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-ati/requests/{request_id}/cancel' +} + +export type PutFalAiWanAtiRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanAtiRequestsByRequestIdCancelResponse = + PutFalAiWanAtiRequestsByRequestIdCancelResponses[keyof PutFalAiWanAtiRequestsByRequestIdCancelResponses] + +export type PostFalAiWanAtiData = { + body: SchemaWanAtiInput + path?: never + query?: never + url: '/fal-ai/wan-ati' +} + +export type PostFalAiWanAtiResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanAtiResponse = + PostFalAiWanAtiResponses[keyof PostFalAiWanAtiResponses] + +export type GetFalAiWanAtiRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-ati/requests/{request_id}' +} + +export type GetFalAiWanAtiRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanAtiOutput +} + +export type GetFalAiWanAtiRequestsByRequestIdResponse = + GetFalAiWanAtiRequestsByRequestIdResponses[keyof GetFalAiWanAtiRequestsByRequestIdResponses] + +export type GetFalAiDecartLucy5bImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/decart/lucy-5b/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiDecartLucy5bImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiDecartLucy5bImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiDecartLucy5bImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiDecartLucy5bImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiDecartLucy5bImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/decart/lucy-5b/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiDecartLucy5bImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiDecartLucy5bImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiDecartLucy5bImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiDecartLucy5bImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiDecartLucy5bImageToVideoData = { + body: SchemaDecartLucy5bImageToVideoInput + path?: never + query?: never + url: '/fal-ai/decart/lucy-5b/image-to-video' +} + +export type PostFalAiDecartLucy5bImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiDecartLucy5bImageToVideoResponse = + PostFalAiDecartLucy5bImageToVideoResponses[keyof PostFalAiDecartLucy5bImageToVideoResponses] + +export type GetFalAiDecartLucy5bImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/decart/lucy-5b/image-to-video/requests/{request_id}' +} + +export type GetFalAiDecartLucy5bImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaDecartLucy5bImageToVideoOutput +} + +export type GetFalAiDecartLucy5bImageToVideoRequestsByRequestIdResponse = + GetFalAiDecartLucy5bImageToVideoRequestsByRequestIdResponses[keyof GetFalAiDecartLucy5bImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiPixverseV5TransitionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v5/transition/requests/{request_id}/status' +} + +export type GetFalAiPixverseV5TransitionRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixverseV5TransitionRequestsByRequestIdStatusResponse = + GetFalAiPixverseV5TransitionRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV5TransitionRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV5TransitionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v5/transition/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV5TransitionRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixverseV5TransitionRequestsByRequestIdCancelResponse = + PutFalAiPixverseV5TransitionRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV5TransitionRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV5TransitionData = { + body: SchemaPixverseV5TransitionInput + path?: never + query?: never + url: '/fal-ai/pixverse/v5/transition' +} + +export type PostFalAiPixverseV5TransitionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV5TransitionResponse = + PostFalAiPixverseV5TransitionResponses[keyof PostFalAiPixverseV5TransitionResponses] + +export type GetFalAiPixverseV5TransitionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v5/transition/requests/{request_id}' +} + +export type GetFalAiPixverseV5TransitionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV5TransitionOutput +} + +export type GetFalAiPixverseV5TransitionRequestsByRequestIdResponse = + GetFalAiPixverseV5TransitionRequestsByRequestIdResponses[keyof GetFalAiPixverseV5TransitionRequestsByRequestIdResponses] + +export type GetFalAiPixverseV5EffectsRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v5/effects/requests/{request_id}/status' +} + +export type GetFalAiPixverseV5EffectsRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixverseV5EffectsRequestsByRequestIdStatusResponse = + GetFalAiPixverseV5EffectsRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV5EffectsRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV5EffectsRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v5/effects/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV5EffectsRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixverseV5EffectsRequestsByRequestIdCancelResponse = + PutFalAiPixverseV5EffectsRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV5EffectsRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV5EffectsData = { + body: SchemaPixverseV5EffectsInput + path?: never + query?: never + url: '/fal-ai/pixverse/v5/effects' +} + +export type PostFalAiPixverseV5EffectsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV5EffectsResponse = + PostFalAiPixverseV5EffectsResponses[keyof PostFalAiPixverseV5EffectsResponses] + +export type GetFalAiPixverseV5EffectsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v5/effects/requests/{request_id}' +} + +export type GetFalAiPixverseV5EffectsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV5EffectsOutput +} + +export type GetFalAiPixverseV5EffectsRequestsByRequestIdResponse = + GetFalAiPixverseV5EffectsRequestsByRequestIdResponses[keyof GetFalAiPixverseV5EffectsRequestsByRequestIdResponses] + +export type GetFalAiPixverseV5ImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v5/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiPixverseV5ImageToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixverseV5ImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiPixverseV5ImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV5ImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV5ImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v5/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV5ImageToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixverseV5ImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiPixverseV5ImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV5ImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV5ImageToVideoData = { + body: SchemaPixverseV5ImageToVideoInput + path?: never + query?: never + url: '/fal-ai/pixverse/v5/image-to-video' +} + +export type PostFalAiPixverseV5ImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV5ImageToVideoResponse = + PostFalAiPixverseV5ImageToVideoResponses[keyof PostFalAiPixverseV5ImageToVideoResponses] + +export type GetFalAiPixverseV5ImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v5/image-to-video/requests/{request_id}' +} + +export type GetFalAiPixverseV5ImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV5ImageToVideoOutput +} + +export type GetFalAiPixverseV5ImageToVideoRequestsByRequestIdResponse = + GetFalAiPixverseV5ImageToVideoRequestsByRequestIdResponses[keyof GetFalAiPixverseV5ImageToVideoRequestsByRequestIdResponses] + +export type GetMoonvalleyMareyI2vRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/moonvalley/marey/i2v/requests/{request_id}/status' +} + +export type GetMoonvalleyMareyI2vRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetMoonvalleyMareyI2vRequestsByRequestIdStatusResponse = + GetMoonvalleyMareyI2vRequestsByRequestIdStatusResponses[keyof GetMoonvalleyMareyI2vRequestsByRequestIdStatusResponses] + +export type PutMoonvalleyMareyI2vRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/moonvalley/marey/i2v/requests/{request_id}/cancel' +} + +export type PutMoonvalleyMareyI2vRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutMoonvalleyMareyI2vRequestsByRequestIdCancelResponse = + PutMoonvalleyMareyI2vRequestsByRequestIdCancelResponses[keyof PutMoonvalleyMareyI2vRequestsByRequestIdCancelResponses] + +export type PostMoonvalleyMareyI2vData = { + body: SchemaMareyI2vInput + path?: never + query?: never + url: '/moonvalley/marey/i2v' +} + +export type PostMoonvalleyMareyI2vResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostMoonvalleyMareyI2vResponse = + PostMoonvalleyMareyI2vResponses[keyof PostMoonvalleyMareyI2vResponses] + +export type GetMoonvalleyMareyI2vRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/moonvalley/marey/i2v/requests/{request_id}' +} + +export type GetMoonvalleyMareyI2vRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMareyI2vOutput +} + +export type GetMoonvalleyMareyI2vRequestsByRequestIdResponse = + GetMoonvalleyMareyI2vRequestsByRequestIdResponses[keyof GetMoonvalleyMareyI2vRequestsByRequestIdResponses] + +export type GetFalAiBytedanceVideoStylizeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bytedance/video-stylize/requests/{request_id}/status' +} + +export type GetFalAiBytedanceVideoStylizeRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiBytedanceVideoStylizeRequestsByRequestIdStatusResponse = + GetFalAiBytedanceVideoStylizeRequestsByRequestIdStatusResponses[keyof GetFalAiBytedanceVideoStylizeRequestsByRequestIdStatusResponses] + +export type PutFalAiBytedanceVideoStylizeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/video-stylize/requests/{request_id}/cancel' +} + +export type PutFalAiBytedanceVideoStylizeRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiBytedanceVideoStylizeRequestsByRequestIdCancelResponse = + PutFalAiBytedanceVideoStylizeRequestsByRequestIdCancelResponses[keyof PutFalAiBytedanceVideoStylizeRequestsByRequestIdCancelResponses] + +export type PostFalAiBytedanceVideoStylizeData = { + body: SchemaBytedanceVideoStylizeInput + path?: never + query?: never + url: '/fal-ai/bytedance/video-stylize' +} + +export type PostFalAiBytedanceVideoStylizeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBytedanceVideoStylizeResponse = + PostFalAiBytedanceVideoStylizeResponses[keyof PostFalAiBytedanceVideoStylizeResponses] + +export type GetFalAiBytedanceVideoStylizeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/video-stylize/requests/{request_id}' +} + +export type GetFalAiBytedanceVideoStylizeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBytedanceVideoStylizeOutput +} + +export type GetFalAiBytedanceVideoStylizeRequestsByRequestIdResponse = + GetFalAiBytedanceVideoStylizeRequestsByRequestIdResponses[keyof GetFalAiBytedanceVideoStylizeRequestsByRequestIdResponses] + +export type GetFalAiWanV22A14bImageToVideoLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan/v2.2-a14b/image-to-video/lora/requests/{request_id}/status' +} + +export type GetFalAiWanV22A14bImageToVideoLoraRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiWanV22A14bImageToVideoLoraRequestsByRequestIdStatusResponse = + GetFalAiWanV22A14bImageToVideoLoraRequestsByRequestIdStatusResponses[keyof GetFalAiWanV22A14bImageToVideoLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiWanV22A14bImageToVideoLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-a14b/image-to-video/lora/requests/{request_id}/cancel' +} + +export type PutFalAiWanV22A14bImageToVideoLoraRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiWanV22A14bImageToVideoLoraRequestsByRequestIdCancelResponse = + PutFalAiWanV22A14bImageToVideoLoraRequestsByRequestIdCancelResponses[keyof PutFalAiWanV22A14bImageToVideoLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiWanV22A14bImageToVideoLoraData = { + body: SchemaWanV22A14bImageToVideoLoraInput + path?: never + query?: never + url: '/fal-ai/wan/v2.2-a14b/image-to-video/lora' +} + +export type PostFalAiWanV22A14bImageToVideoLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanV22A14bImageToVideoLoraResponse = + PostFalAiWanV22A14bImageToVideoLoraResponses[keyof PostFalAiWanV22A14bImageToVideoLoraResponses] + +export type GetFalAiWanV22A14bImageToVideoLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-a14b/image-to-video/lora/requests/{request_id}' +} + +export type GetFalAiWanV22A14bImageToVideoLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanV22A14bImageToVideoLoraOutput +} + +export type GetFalAiWanV22A14bImageToVideoLoraRequestsByRequestIdResponse = + GetFalAiWanV22A14bImageToVideoLoraRequestsByRequestIdResponses[keyof GetFalAiWanV22A14bImageToVideoLoraRequestsByRequestIdResponses] + +export type GetFalAiMinimaxHailuo02FastImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/hailuo-02-fast/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiMinimaxHailuo02FastImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMinimaxHailuo02FastImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiMinimaxHailuo02FastImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxHailuo02FastImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxHailuo02FastImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/hailuo-02-fast/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiMinimaxHailuo02FastImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMinimaxHailuo02FastImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiMinimaxHailuo02FastImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxHailuo02FastImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxHailuo02FastImageToVideoData = { + body: SchemaMinimaxHailuo02FastImageToVideoInput + path?: never + query?: never + url: '/fal-ai/minimax/hailuo-02-fast/image-to-video' +} + +export type PostFalAiMinimaxHailuo02FastImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxHailuo02FastImageToVideoResponse = + PostFalAiMinimaxHailuo02FastImageToVideoResponses[keyof PostFalAiMinimaxHailuo02FastImageToVideoResponses] + +export type GetFalAiMinimaxHailuo02FastImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/hailuo-02-fast/image-to-video/requests/{request_id}' +} + +export type GetFalAiMinimaxHailuo02FastImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaMinimaxHailuo02FastImageToVideoOutput + } + +export type GetFalAiMinimaxHailuo02FastImageToVideoRequestsByRequestIdResponse = + GetFalAiMinimaxHailuo02FastImageToVideoRequestsByRequestIdResponses[keyof GetFalAiMinimaxHailuo02FastImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiVeo3ImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/veo3/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiVeo3ImageToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiVeo3ImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiVeo3ImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiVeo3ImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiVeo3ImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiVeo3ImageToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiVeo3ImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiVeo3ImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiVeo3ImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiVeo3ImageToVideoData = { + body: SchemaVeo3ImageToVideoInput + path?: never + query?: never + url: '/fal-ai/veo3/image-to-video' +} + +export type PostFalAiVeo3ImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiVeo3ImageToVideoResponse = + PostFalAiVeo3ImageToVideoResponses[keyof PostFalAiVeo3ImageToVideoResponses] + +export type GetFalAiVeo3ImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3/image-to-video/requests/{request_id}' +} + +export type GetFalAiVeo3ImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVeo3ImageToVideoOutput +} + +export type GetFalAiVeo3ImageToVideoRequestsByRequestIdResponse = + GetFalAiVeo3ImageToVideoRequestsByRequestIdResponses[keyof GetFalAiVeo3ImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiWanV22A14bImageToVideoTurboRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan/v2.2-a14b/image-to-video/turbo/requests/{request_id}/status' +} + +export type GetFalAiWanV22A14bImageToVideoTurboRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiWanV22A14bImageToVideoTurboRequestsByRequestIdStatusResponse = + GetFalAiWanV22A14bImageToVideoTurboRequestsByRequestIdStatusResponses[keyof GetFalAiWanV22A14bImageToVideoTurboRequestsByRequestIdStatusResponses] + +export type PutFalAiWanV22A14bImageToVideoTurboRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-a14b/image-to-video/turbo/requests/{request_id}/cancel' +} + +export type PutFalAiWanV22A14bImageToVideoTurboRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiWanV22A14bImageToVideoTurboRequestsByRequestIdCancelResponse = + PutFalAiWanV22A14bImageToVideoTurboRequestsByRequestIdCancelResponses[keyof PutFalAiWanV22A14bImageToVideoTurboRequestsByRequestIdCancelResponses] + +export type PostFalAiWanV22A14bImageToVideoTurboData = { + body: SchemaWanV22A14bImageToVideoTurboInput + path?: never + query?: never + url: '/fal-ai/wan/v2.2-a14b/image-to-video/turbo' +} + +export type PostFalAiWanV22A14bImageToVideoTurboResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanV22A14bImageToVideoTurboResponse = + PostFalAiWanV22A14bImageToVideoTurboResponses[keyof PostFalAiWanV22A14bImageToVideoTurboResponses] + +export type GetFalAiWanV22A14bImageToVideoTurboRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-a14b/image-to-video/turbo/requests/{request_id}' +} + +export type GetFalAiWanV22A14bImageToVideoTurboRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanV22A14bImageToVideoTurboOutput +} + +export type GetFalAiWanV22A14bImageToVideoTurboRequestsByRequestIdResponse = + GetFalAiWanV22A14bImageToVideoTurboRequestsByRequestIdResponses[keyof GetFalAiWanV22A14bImageToVideoTurboRequestsByRequestIdResponses] + +export type GetFalAiWanV225bImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan/v2.2-5b/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiWanV225bImageToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanV225bImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiWanV225bImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiWanV225bImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiWanV225bImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-5b/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiWanV225bImageToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanV225bImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiWanV225bImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiWanV225bImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiWanV225bImageToVideoData = { + body: SchemaWanV225bImageToVideoInput + path?: never + query?: never + url: '/fal-ai/wan/v2.2-5b/image-to-video' +} + +export type PostFalAiWanV225bImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanV225bImageToVideoResponse = + PostFalAiWanV225bImageToVideoResponses[keyof PostFalAiWanV225bImageToVideoResponses] + +export type GetFalAiWanV225bImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-5b/image-to-video/requests/{request_id}' +} + +export type GetFalAiWanV225bImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanV225bImageToVideoOutput +} + +export type GetFalAiWanV225bImageToVideoRequestsByRequestIdResponse = + GetFalAiWanV225bImageToVideoRequestsByRequestIdResponses[keyof GetFalAiWanV225bImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiWanV22A14bImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan/v2.2-a14b/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiWanV22A14bImageToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanV22A14bImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiWanV22A14bImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiWanV22A14bImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiWanV22A14bImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-a14b/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiWanV22A14bImageToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanV22A14bImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiWanV22A14bImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiWanV22A14bImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiWanV22A14bImageToVideoData = { + body: SchemaWanV22A14bImageToVideoInput + path?: never + query?: never + url: '/fal-ai/wan/v2.2-a14b/image-to-video' +} + +export type PostFalAiWanV22A14bImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanV22A14bImageToVideoResponse = + PostFalAiWanV22A14bImageToVideoResponses[keyof PostFalAiWanV22A14bImageToVideoResponses] + +export type GetFalAiWanV22A14bImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-a14b/image-to-video/requests/{request_id}' +} + +export type GetFalAiWanV22A14bImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanV22A14bImageToVideoOutput +} + +export type GetFalAiWanV22A14bImageToVideoRequestsByRequestIdResponse = + GetFalAiWanV22A14bImageToVideoRequestsByRequestIdResponses[keyof GetFalAiWanV22A14bImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiBytedanceOmnihumanRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bytedance/omnihuman/requests/{request_id}/status' +} + +export type GetFalAiBytedanceOmnihumanRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiBytedanceOmnihumanRequestsByRequestIdStatusResponse = + GetFalAiBytedanceOmnihumanRequestsByRequestIdStatusResponses[keyof GetFalAiBytedanceOmnihumanRequestsByRequestIdStatusResponses] + +export type PutFalAiBytedanceOmnihumanRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/omnihuman/requests/{request_id}/cancel' +} + +export type PutFalAiBytedanceOmnihumanRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiBytedanceOmnihumanRequestsByRequestIdCancelResponse = + PutFalAiBytedanceOmnihumanRequestsByRequestIdCancelResponses[keyof PutFalAiBytedanceOmnihumanRequestsByRequestIdCancelResponses] + +export type PostFalAiBytedanceOmnihumanData = { + body: SchemaBytedanceOmnihumanInput + path?: never + query?: never + url: '/fal-ai/bytedance/omnihuman' +} + +export type PostFalAiBytedanceOmnihumanResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBytedanceOmnihumanResponse = + PostFalAiBytedanceOmnihumanResponses[keyof PostFalAiBytedanceOmnihumanResponses] + +export type GetFalAiBytedanceOmnihumanRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/omnihuman/requests/{request_id}' +} + +export type GetFalAiBytedanceOmnihumanRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBytedanceOmnihumanOutput +} + +export type GetFalAiBytedanceOmnihumanRequestsByRequestIdResponse = + GetFalAiBytedanceOmnihumanRequestsByRequestIdResponses[keyof GetFalAiBytedanceOmnihumanRequestsByRequestIdResponses] + +export type GetFalAiLtxv13B098DistilledImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltxv-13b-098-distilled/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiLtxv13B098DistilledImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtxv13B098DistilledImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiLtxv13B098DistilledImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLtxv13B098DistilledImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLtxv13B098DistilledImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltxv-13b-098-distilled/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiLtxv13B098DistilledImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtxv13B098DistilledImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiLtxv13B098DistilledImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLtxv13B098DistilledImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLtxv13B098DistilledImageToVideoData = { + body: SchemaLtxv13B098DistilledImageToVideoInput + path?: never + query?: never + url: '/fal-ai/ltxv-13b-098-distilled/image-to-video' +} + +export type PostFalAiLtxv13B098DistilledImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtxv13B098DistilledImageToVideoResponse = + PostFalAiLtxv13B098DistilledImageToVideoResponses[keyof PostFalAiLtxv13B098DistilledImageToVideoResponses] + +export type GetFalAiLtxv13B098DistilledImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltxv-13b-098-distilled/image-to-video/requests/{request_id}' +} + +export type GetFalAiLtxv13B098DistilledImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaLtxv13B098DistilledImageToVideoOutput + } + +export type GetFalAiLtxv13B098DistilledImageToVideoRequestsByRequestIdResponse = + GetFalAiLtxv13B098DistilledImageToVideoRequestsByRequestIdResponses[keyof GetFalAiLtxv13B098DistilledImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiVeo3FastImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/veo3/fast/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiVeo3FastImageToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiVeo3FastImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiVeo3FastImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiVeo3FastImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiVeo3FastImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3/fast/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiVeo3FastImageToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiVeo3FastImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiVeo3FastImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiVeo3FastImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiVeo3FastImageToVideoData = { + body: SchemaVeo3FastImageToVideoInput + path?: never + query?: never + url: '/fal-ai/veo3/fast/image-to-video' +} + +export type PostFalAiVeo3FastImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiVeo3FastImageToVideoResponse = + PostFalAiVeo3FastImageToVideoResponses[keyof PostFalAiVeo3FastImageToVideoResponses] + +export type GetFalAiVeo3FastImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3/fast/image-to-video/requests/{request_id}' +} + +export type GetFalAiVeo3FastImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVeo3FastImageToVideoOutput +} + +export type GetFalAiVeo3FastImageToVideoRequestsByRequestIdResponse = + GetFalAiVeo3FastImageToVideoRequestsByRequestIdResponses[keyof GetFalAiVeo3FastImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiViduQ1ReferenceToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/vidu/q1/reference-to-video/requests/{request_id}/status' +} + +export type GetFalAiViduQ1ReferenceToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiViduQ1ReferenceToVideoRequestsByRequestIdStatusResponse = + GetFalAiViduQ1ReferenceToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiViduQ1ReferenceToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiViduQ1ReferenceToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/q1/reference-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiViduQ1ReferenceToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiViduQ1ReferenceToVideoRequestsByRequestIdCancelResponse = + PutFalAiViduQ1ReferenceToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiViduQ1ReferenceToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiViduQ1ReferenceToVideoData = { + body: SchemaViduQ1ReferenceToVideoInput + path?: never + query?: never + url: '/fal-ai/vidu/q1/reference-to-video' +} + +export type PostFalAiViduQ1ReferenceToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiViduQ1ReferenceToVideoResponse = + PostFalAiViduQ1ReferenceToVideoResponses[keyof PostFalAiViduQ1ReferenceToVideoResponses] + +export type GetFalAiViduQ1ReferenceToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/q1/reference-to-video/requests/{request_id}' +} + +export type GetFalAiViduQ1ReferenceToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaViduQ1ReferenceToVideoOutput +} + +export type GetFalAiViduQ1ReferenceToVideoRequestsByRequestIdResponse = + GetFalAiViduQ1ReferenceToVideoRequestsByRequestIdResponses[keyof GetFalAiViduQ1ReferenceToVideoRequestsByRequestIdResponses] + +export type GetFalAiAiAvatarSingleTextRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ai-avatar/single-text/requests/{request_id}/status' +} + +export type GetFalAiAiAvatarSingleTextRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiAiAvatarSingleTextRequestsByRequestIdStatusResponse = + GetFalAiAiAvatarSingleTextRequestsByRequestIdStatusResponses[keyof GetFalAiAiAvatarSingleTextRequestsByRequestIdStatusResponses] + +export type PutFalAiAiAvatarSingleTextRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ai-avatar/single-text/requests/{request_id}/cancel' +} + +export type PutFalAiAiAvatarSingleTextRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiAiAvatarSingleTextRequestsByRequestIdCancelResponse = + PutFalAiAiAvatarSingleTextRequestsByRequestIdCancelResponses[keyof PutFalAiAiAvatarSingleTextRequestsByRequestIdCancelResponses] + +export type PostFalAiAiAvatarSingleTextData = { + body: SchemaAiAvatarSingleTextInput + path?: never + query?: never + url: '/fal-ai/ai-avatar/single-text' +} + +export type PostFalAiAiAvatarSingleTextResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiAiAvatarSingleTextResponse = + PostFalAiAiAvatarSingleTextResponses[keyof PostFalAiAiAvatarSingleTextResponses] + +export type GetFalAiAiAvatarSingleTextRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ai-avatar/single-text/requests/{request_id}' +} + +export type GetFalAiAiAvatarSingleTextRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAiAvatarSingleTextOutput +} + +export type GetFalAiAiAvatarSingleTextRequestsByRequestIdResponse = + GetFalAiAiAvatarSingleTextRequestsByRequestIdResponses[keyof GetFalAiAiAvatarSingleTextRequestsByRequestIdResponses] + +export type GetFalAiAiAvatarRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ai-avatar/requests/{request_id}/status' +} + +export type GetFalAiAiAvatarRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiAiAvatarRequestsByRequestIdStatusResponse = + GetFalAiAiAvatarRequestsByRequestIdStatusResponses[keyof GetFalAiAiAvatarRequestsByRequestIdStatusResponses] + +export type PutFalAiAiAvatarRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ai-avatar/requests/{request_id}/cancel' +} + +export type PutFalAiAiAvatarRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiAiAvatarRequestsByRequestIdCancelResponse = + PutFalAiAiAvatarRequestsByRequestIdCancelResponses[keyof PutFalAiAiAvatarRequestsByRequestIdCancelResponses] + +export type PostFalAiAiAvatarData = { + body: SchemaAiAvatarInput + path?: never + query?: never + url: '/fal-ai/ai-avatar' +} + +export type PostFalAiAiAvatarResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiAiAvatarResponse = + PostFalAiAiAvatarResponses[keyof PostFalAiAiAvatarResponses] + +export type GetFalAiAiAvatarRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ai-avatar/requests/{request_id}' +} + +export type GetFalAiAiAvatarRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAiAvatarOutput +} + +export type GetFalAiAiAvatarRequestsByRequestIdResponse = + GetFalAiAiAvatarRequestsByRequestIdResponses[keyof GetFalAiAiAvatarRequestsByRequestIdResponses] + +export type GetFalAiAiAvatarMultiTextRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ai-avatar/multi-text/requests/{request_id}/status' +} + +export type GetFalAiAiAvatarMultiTextRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiAiAvatarMultiTextRequestsByRequestIdStatusResponse = + GetFalAiAiAvatarMultiTextRequestsByRequestIdStatusResponses[keyof GetFalAiAiAvatarMultiTextRequestsByRequestIdStatusResponses] + +export type PutFalAiAiAvatarMultiTextRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ai-avatar/multi-text/requests/{request_id}/cancel' +} + +export type PutFalAiAiAvatarMultiTextRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiAiAvatarMultiTextRequestsByRequestIdCancelResponse = + PutFalAiAiAvatarMultiTextRequestsByRequestIdCancelResponses[keyof PutFalAiAiAvatarMultiTextRequestsByRequestIdCancelResponses] + +export type PostFalAiAiAvatarMultiTextData = { + body: SchemaAiAvatarMultiTextInput + path?: never + query?: never + url: '/fal-ai/ai-avatar/multi-text' +} + +export type PostFalAiAiAvatarMultiTextResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiAiAvatarMultiTextResponse = + PostFalAiAiAvatarMultiTextResponses[keyof PostFalAiAiAvatarMultiTextResponses] + +export type GetFalAiAiAvatarMultiTextRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ai-avatar/multi-text/requests/{request_id}' +} + +export type GetFalAiAiAvatarMultiTextRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAiAvatarMultiTextOutput +} + +export type GetFalAiAiAvatarMultiTextRequestsByRequestIdResponse = + GetFalAiAiAvatarMultiTextRequestsByRequestIdResponses[keyof GetFalAiAiAvatarMultiTextRequestsByRequestIdResponses] + +export type GetFalAiAiAvatarMultiRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ai-avatar/multi/requests/{request_id}/status' +} + +export type GetFalAiAiAvatarMultiRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiAiAvatarMultiRequestsByRequestIdStatusResponse = + GetFalAiAiAvatarMultiRequestsByRequestIdStatusResponses[keyof GetFalAiAiAvatarMultiRequestsByRequestIdStatusResponses] + +export type PutFalAiAiAvatarMultiRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ai-avatar/multi/requests/{request_id}/cancel' +} + +export type PutFalAiAiAvatarMultiRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiAiAvatarMultiRequestsByRequestIdCancelResponse = + PutFalAiAiAvatarMultiRequestsByRequestIdCancelResponses[keyof PutFalAiAiAvatarMultiRequestsByRequestIdCancelResponses] + +export type PostFalAiAiAvatarMultiData = { + body: SchemaAiAvatarMultiInput + path?: never + query?: never + url: '/fal-ai/ai-avatar/multi' +} + +export type PostFalAiAiAvatarMultiResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiAiAvatarMultiResponse = + PostFalAiAiAvatarMultiResponses[keyof PostFalAiAiAvatarMultiResponses] + +export type GetFalAiAiAvatarMultiRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ai-avatar/multi/requests/{request_id}' +} + +export type GetFalAiAiAvatarMultiRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAiAvatarMultiOutput +} + +export type GetFalAiAiAvatarMultiRequestsByRequestIdResponse = + GetFalAiAiAvatarMultiRequestsByRequestIdResponses[keyof GetFalAiAiAvatarMultiRequestsByRequestIdResponses] + +export type GetFalAiMinimaxHailuo02ProImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/hailuo-02/pro/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiMinimaxHailuo02ProImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMinimaxHailuo02ProImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiMinimaxHailuo02ProImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxHailuo02ProImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxHailuo02ProImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/hailuo-02/pro/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiMinimaxHailuo02ProImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMinimaxHailuo02ProImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiMinimaxHailuo02ProImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxHailuo02ProImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxHailuo02ProImageToVideoData = { + body: SchemaMinimaxHailuo02ProImageToVideoInput + path?: never + query?: never + url: '/fal-ai/minimax/hailuo-02/pro/image-to-video' +} + +export type PostFalAiMinimaxHailuo02ProImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxHailuo02ProImageToVideoResponse = + PostFalAiMinimaxHailuo02ProImageToVideoResponses[keyof PostFalAiMinimaxHailuo02ProImageToVideoResponses] + +export type GetFalAiMinimaxHailuo02ProImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/hailuo-02/pro/image-to-video/requests/{request_id}' +} + +export type GetFalAiMinimaxHailuo02ProImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaMinimaxHailuo02ProImageToVideoOutput + } + +export type GetFalAiMinimaxHailuo02ProImageToVideoRequestsByRequestIdResponse = + GetFalAiMinimaxHailuo02ProImageToVideoRequestsByRequestIdResponses[keyof GetFalAiMinimaxHailuo02ProImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiBytedanceSeedanceV1LiteImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bytedance/seedance/v1/lite/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiBytedanceSeedanceV1LiteImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiBytedanceSeedanceV1LiteImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiBytedanceSeedanceV1LiteImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiBytedanceSeedanceV1LiteImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiBytedanceSeedanceV1LiteImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedance/v1/lite/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiBytedanceSeedanceV1LiteImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiBytedanceSeedanceV1LiteImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiBytedanceSeedanceV1LiteImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiBytedanceSeedanceV1LiteImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiBytedanceSeedanceV1LiteImageToVideoData = { + body: SchemaBytedanceSeedanceV1LiteImageToVideoInput + path?: never + query?: never + url: '/fal-ai/bytedance/seedance/v1/lite/image-to-video' +} + +export type PostFalAiBytedanceSeedanceV1LiteImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBytedanceSeedanceV1LiteImageToVideoResponse = + PostFalAiBytedanceSeedanceV1LiteImageToVideoResponses[keyof PostFalAiBytedanceSeedanceV1LiteImageToVideoResponses] + +export type GetFalAiBytedanceSeedanceV1LiteImageToVideoRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedance/v1/lite/image-to-video/requests/{request_id}' + } + +export type GetFalAiBytedanceSeedanceV1LiteImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaBytedanceSeedanceV1LiteImageToVideoOutput + } + +export type GetFalAiBytedanceSeedanceV1LiteImageToVideoRequestsByRequestIdResponse = + GetFalAiBytedanceSeedanceV1LiteImageToVideoRequestsByRequestIdResponses[keyof GetFalAiBytedanceSeedanceV1LiteImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiHunyuanAvatarRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan-avatar/requests/{request_id}/status' +} + +export type GetFalAiHunyuanAvatarRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHunyuanAvatarRequestsByRequestIdStatusResponse = + GetFalAiHunyuanAvatarRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuanAvatarRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuanAvatarRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-avatar/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuanAvatarRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHunyuanAvatarRequestsByRequestIdCancelResponse = + PutFalAiHunyuanAvatarRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuanAvatarRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuanAvatarData = { + body: SchemaHunyuanAvatarInput + path?: never + query?: never + url: '/fal-ai/hunyuan-avatar' +} + +export type PostFalAiHunyuanAvatarResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuanAvatarResponse = + PostFalAiHunyuanAvatarResponses[keyof PostFalAiHunyuanAvatarResponses] + +export type GetFalAiHunyuanAvatarRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-avatar/requests/{request_id}' +} + +export type GetFalAiHunyuanAvatarRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuanAvatarOutput +} + +export type GetFalAiHunyuanAvatarRequestsByRequestIdResponse = + GetFalAiHunyuanAvatarRequestsByRequestIdResponses[keyof GetFalAiHunyuanAvatarRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV21ProImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v2.1/pro/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoV21ProImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV21ProImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV21ProImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV21ProImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV21ProImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2.1/pro/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoV21ProImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV21ProImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV21ProImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV21ProImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV21ProImageToVideoData = { + body: SchemaKlingVideoV21ProImageToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/v2.1/pro/image-to-video' +} + +export type PostFalAiKlingVideoV21ProImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV21ProImageToVideoResponse = + PostFalAiKlingVideoV21ProImageToVideoResponses[keyof PostFalAiKlingVideoV21ProImageToVideoResponses] + +export type GetFalAiKlingVideoV21ProImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2.1/pro/image-to-video/requests/{request_id}' +} + +export type GetFalAiKlingVideoV21ProImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV21ProImageToVideoOutput +} + +export type GetFalAiKlingVideoV21ProImageToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoV21ProImageToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV21ProImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiHunyuanPortraitRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan-portrait/requests/{request_id}/status' +} + +export type GetFalAiHunyuanPortraitRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHunyuanPortraitRequestsByRequestIdStatusResponse = + GetFalAiHunyuanPortraitRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuanPortraitRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuanPortraitRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-portrait/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuanPortraitRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHunyuanPortraitRequestsByRequestIdCancelResponse = + PutFalAiHunyuanPortraitRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuanPortraitRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuanPortraitData = { + body: SchemaHunyuanPortraitInput + path?: never + query?: never + url: '/fal-ai/hunyuan-portrait' +} + +export type PostFalAiHunyuanPortraitResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuanPortraitResponse = + PostFalAiHunyuanPortraitResponses[keyof PostFalAiHunyuanPortraitResponses] + +export type GetFalAiHunyuanPortraitRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-portrait/requests/{request_id}' +} + +export type GetFalAiHunyuanPortraitRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuanPortraitOutput +} + +export type GetFalAiHunyuanPortraitRequestsByRequestIdResponse = + GetFalAiHunyuanPortraitRequestsByRequestIdResponses[keyof GetFalAiHunyuanPortraitRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV16StandardElementsRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v1.6/standard/elements/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoV16StandardElementsRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV16StandardElementsRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV16StandardElementsRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV16StandardElementsRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV16StandardElementsRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1.6/standard/elements/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoV16StandardElementsRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV16StandardElementsRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV16StandardElementsRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV16StandardElementsRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV16StandardElementsData = { + body: SchemaKlingVideoV16StandardElementsInput + path?: never + query?: never + url: '/fal-ai/kling-video/v1.6/standard/elements' +} + +export type PostFalAiKlingVideoV16StandardElementsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV16StandardElementsResponse = + PostFalAiKlingVideoV16StandardElementsResponses[keyof PostFalAiKlingVideoV16StandardElementsResponses] + +export type GetFalAiKlingVideoV16StandardElementsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1.6/standard/elements/requests/{request_id}' +} + +export type GetFalAiKlingVideoV16StandardElementsRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV16StandardElementsOutput + } + +export type GetFalAiKlingVideoV16StandardElementsRequestsByRequestIdResponse = + GetFalAiKlingVideoV16StandardElementsRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV16StandardElementsRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV16ProElementsRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v1.6/pro/elements/requests/{request_id}/status' +} + +export type GetFalAiKlingVideoV16ProElementsRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV16ProElementsRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV16ProElementsRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV16ProElementsRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV16ProElementsRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1.6/pro/elements/requests/{request_id}/cancel' +} + +export type PutFalAiKlingVideoV16ProElementsRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV16ProElementsRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV16ProElementsRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV16ProElementsRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV16ProElementsData = { + body: SchemaKlingVideoV16ProElementsInput + path?: never + query?: never + url: '/fal-ai/kling-video/v1.6/pro/elements' +} + +export type PostFalAiKlingVideoV16ProElementsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV16ProElementsResponse = + PostFalAiKlingVideoV16ProElementsResponses[keyof PostFalAiKlingVideoV16ProElementsResponses] + +export type GetFalAiKlingVideoV16ProElementsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1.6/pro/elements/requests/{request_id}' +} + +export type GetFalAiKlingVideoV16ProElementsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV16ProElementsOutput +} + +export type GetFalAiKlingVideoV16ProElementsRequestsByRequestIdResponse = + GetFalAiKlingVideoV16ProElementsRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV16ProElementsRequestsByRequestIdResponses] + +export type GetFalAiLtxVideo13bDistilledImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-video-13b-distilled/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiLtxVideo13bDistilledImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtxVideo13bDistilledImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiLtxVideo13bDistilledImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLtxVideo13bDistilledImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLtxVideo13bDistilledImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-13b-distilled/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiLtxVideo13bDistilledImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtxVideo13bDistilledImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiLtxVideo13bDistilledImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLtxVideo13bDistilledImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLtxVideo13bDistilledImageToVideoData = { + body: SchemaLtxVideo13bDistilledImageToVideoInput + path?: never + query?: never + url: '/fal-ai/ltx-video-13b-distilled/image-to-video' +} + +export type PostFalAiLtxVideo13bDistilledImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtxVideo13bDistilledImageToVideoResponse = + PostFalAiLtxVideo13bDistilledImageToVideoResponses[keyof PostFalAiLtxVideo13bDistilledImageToVideoResponses] + +export type GetFalAiLtxVideo13bDistilledImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-13b-distilled/image-to-video/requests/{request_id}' +} + +export type GetFalAiLtxVideo13bDistilledImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaLtxVideo13bDistilledImageToVideoOutput + } + +export type GetFalAiLtxVideo13bDistilledImageToVideoRequestsByRequestIdResponse = + GetFalAiLtxVideo13bDistilledImageToVideoRequestsByRequestIdResponses[keyof GetFalAiLtxVideo13bDistilledImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiLtxVideo13bDevImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-video-13b-dev/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiLtxVideo13bDevImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtxVideo13bDevImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiLtxVideo13bDevImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLtxVideo13bDevImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLtxVideo13bDevImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-13b-dev/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiLtxVideo13bDevImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtxVideo13bDevImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiLtxVideo13bDevImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLtxVideo13bDevImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLtxVideo13bDevImageToVideoData = { + body: SchemaLtxVideo13bDevImageToVideoInput + path?: never + query?: never + url: '/fal-ai/ltx-video-13b-dev/image-to-video' +} + +export type PostFalAiLtxVideo13bDevImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtxVideo13bDevImageToVideoResponse = + PostFalAiLtxVideo13bDevImageToVideoResponses[keyof PostFalAiLtxVideo13bDevImageToVideoResponses] + +export type GetFalAiLtxVideo13bDevImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-13b-dev/image-to-video/requests/{request_id}' +} + +export type GetFalAiLtxVideo13bDevImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtxVideo13bDevImageToVideoOutput +} + +export type GetFalAiLtxVideo13bDevImageToVideoRequestsByRequestIdResponse = + GetFalAiLtxVideo13bDevImageToVideoRequestsByRequestIdResponses[keyof GetFalAiLtxVideo13bDevImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiLtxVideoLoraImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-video-lora/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiLtxVideoLoraImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtxVideoLoraImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiLtxVideoLoraImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLtxVideoLoraImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLtxVideoLoraImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-lora/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiLtxVideoLoraImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtxVideoLoraImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiLtxVideoLoraImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLtxVideoLoraImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLtxVideoLoraImageToVideoData = { + body: SchemaLtxVideoLoraImageToVideoInput + path?: never + query?: never + url: '/fal-ai/ltx-video-lora/image-to-video' +} + +export type PostFalAiLtxVideoLoraImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtxVideoLoraImageToVideoResponse = + PostFalAiLtxVideoLoraImageToVideoResponses[keyof PostFalAiLtxVideoLoraImageToVideoResponses] + +export type GetFalAiLtxVideoLoraImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-lora/image-to-video/requests/{request_id}' +} + +export type GetFalAiLtxVideoLoraImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtxVideoLoraImageToVideoOutput +} + +export type GetFalAiLtxVideoLoraImageToVideoRequestsByRequestIdResponse = + GetFalAiLtxVideoLoraImageToVideoRequestsByRequestIdResponses[keyof GetFalAiLtxVideoLoraImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiPixverseV45TransitionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v4.5/transition/requests/{request_id}/status' +} + +export type GetFalAiPixverseV45TransitionRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixverseV45TransitionRequestsByRequestIdStatusResponse = + GetFalAiPixverseV45TransitionRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV45TransitionRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV45TransitionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v4.5/transition/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV45TransitionRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixverseV45TransitionRequestsByRequestIdCancelResponse = + PutFalAiPixverseV45TransitionRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV45TransitionRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV45TransitionData = { + body: SchemaPixverseV45TransitionInput + path?: never + query?: never + url: '/fal-ai/pixverse/v4.5/transition' +} + +export type PostFalAiPixverseV45TransitionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV45TransitionResponse = + PostFalAiPixverseV45TransitionResponses[keyof PostFalAiPixverseV45TransitionResponses] + +export type GetFalAiPixverseV45TransitionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v4.5/transition/requests/{request_id}' +} + +export type GetFalAiPixverseV45TransitionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV45TransitionOutput +} + +export type GetFalAiPixverseV45TransitionRequestsByRequestIdResponse = + GetFalAiPixverseV45TransitionRequestsByRequestIdResponses[keyof GetFalAiPixverseV45TransitionRequestsByRequestIdResponses] + +export type GetFalAiPixverseV45ImageToVideoFastRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v4.5/image-to-video/fast/requests/{request_id}/status' +} + +export type GetFalAiPixverseV45ImageToVideoFastRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiPixverseV45ImageToVideoFastRequestsByRequestIdStatusResponse = + GetFalAiPixverseV45ImageToVideoFastRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV45ImageToVideoFastRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV45ImageToVideoFastRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v4.5/image-to-video/fast/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV45ImageToVideoFastRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiPixverseV45ImageToVideoFastRequestsByRequestIdCancelResponse = + PutFalAiPixverseV45ImageToVideoFastRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV45ImageToVideoFastRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV45ImageToVideoFastData = { + body: SchemaPixverseV45ImageToVideoFastInput + path?: never + query?: never + url: '/fal-ai/pixverse/v4.5/image-to-video/fast' +} + +export type PostFalAiPixverseV45ImageToVideoFastResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV45ImageToVideoFastResponse = + PostFalAiPixverseV45ImageToVideoFastResponses[keyof PostFalAiPixverseV45ImageToVideoFastResponses] + +export type GetFalAiPixverseV45ImageToVideoFastRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v4.5/image-to-video/fast/requests/{request_id}' +} + +export type GetFalAiPixverseV45ImageToVideoFastRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV45ImageToVideoFastOutput +} + +export type GetFalAiPixverseV45ImageToVideoFastRequestsByRequestIdResponse = + GetFalAiPixverseV45ImageToVideoFastRequestsByRequestIdResponses[keyof GetFalAiPixverseV45ImageToVideoFastRequestsByRequestIdResponses] + +export type GetFalAiPixverseV45EffectsRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v4.5/effects/requests/{request_id}/status' +} + +export type GetFalAiPixverseV45EffectsRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixverseV45EffectsRequestsByRequestIdStatusResponse = + GetFalAiPixverseV45EffectsRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV45EffectsRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV45EffectsRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v4.5/effects/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV45EffectsRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixverseV45EffectsRequestsByRequestIdCancelResponse = + PutFalAiPixverseV45EffectsRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV45EffectsRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV45EffectsData = { + body: SchemaPixverseV45EffectsInput + path?: never + query?: never + url: '/fal-ai/pixverse/v4.5/effects' +} + +export type PostFalAiPixverseV45EffectsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV45EffectsResponse = + PostFalAiPixverseV45EffectsResponses[keyof PostFalAiPixverseV45EffectsResponses] + +export type GetFalAiPixverseV45EffectsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v4.5/effects/requests/{request_id}' +} + +export type GetFalAiPixverseV45EffectsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV45EffectsOutput +} + +export type GetFalAiPixverseV45EffectsRequestsByRequestIdResponse = + GetFalAiPixverseV45EffectsRequestsByRequestIdResponses[keyof GetFalAiPixverseV45EffectsRequestsByRequestIdResponses] + +export type GetFalAiHunyuanCustomRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan-custom/requests/{request_id}/status' +} + +export type GetFalAiHunyuanCustomRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHunyuanCustomRequestsByRequestIdStatusResponse = + GetFalAiHunyuanCustomRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuanCustomRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuanCustomRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-custom/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuanCustomRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHunyuanCustomRequestsByRequestIdCancelResponse = + PutFalAiHunyuanCustomRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuanCustomRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuanCustomData = { + body: SchemaHunyuanCustomInput + path?: never + query?: never + url: '/fal-ai/hunyuan-custom' +} + +export type PostFalAiHunyuanCustomResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuanCustomResponse = + PostFalAiHunyuanCustomResponses[keyof PostFalAiHunyuanCustomResponses] + +export type GetFalAiHunyuanCustomRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-custom/requests/{request_id}' +} + +export type GetFalAiHunyuanCustomRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuanCustomOutput +} + +export type GetFalAiHunyuanCustomRequestsByRequestIdResponse = + GetFalAiHunyuanCustomRequestsByRequestIdResponses[keyof GetFalAiHunyuanCustomRequestsByRequestIdResponses] + +export type GetFalAiFramepackF1RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/framepack/f1/requests/{request_id}/status' +} + +export type GetFalAiFramepackF1RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFramepackF1RequestsByRequestIdStatusResponse = + GetFalAiFramepackF1RequestsByRequestIdStatusResponses[keyof GetFalAiFramepackF1RequestsByRequestIdStatusResponses] + +export type PutFalAiFramepackF1RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/framepack/f1/requests/{request_id}/cancel' +} + +export type PutFalAiFramepackF1RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFramepackF1RequestsByRequestIdCancelResponse = + PutFalAiFramepackF1RequestsByRequestIdCancelResponses[keyof PutFalAiFramepackF1RequestsByRequestIdCancelResponses] + +export type PostFalAiFramepackF1Data = { + body: SchemaFramepackF1Input + path?: never + query?: never + url: '/fal-ai/framepack/f1' +} + +export type PostFalAiFramepackF1Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFramepackF1Response = + PostFalAiFramepackF1Responses[keyof PostFalAiFramepackF1Responses] + +export type GetFalAiFramepackF1RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/framepack/f1/requests/{request_id}' +} + +export type GetFalAiFramepackF1RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFramepackF1Output +} + +export type GetFalAiFramepackF1RequestsByRequestIdResponse = + GetFalAiFramepackF1RequestsByRequestIdResponses[keyof GetFalAiFramepackF1RequestsByRequestIdResponses] + +export type GetFalAiViduQ1StartEndToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/vidu/q1/start-end-to-video/requests/{request_id}/status' +} + +export type GetFalAiViduQ1StartEndToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiViduQ1StartEndToVideoRequestsByRequestIdStatusResponse = + GetFalAiViduQ1StartEndToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiViduQ1StartEndToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiViduQ1StartEndToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/q1/start-end-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiViduQ1StartEndToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiViduQ1StartEndToVideoRequestsByRequestIdCancelResponse = + PutFalAiViduQ1StartEndToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiViduQ1StartEndToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiViduQ1StartEndToVideoData = { + body: SchemaViduQ1StartEndToVideoInput + path?: never + query?: never + url: '/fal-ai/vidu/q1/start-end-to-video' +} + +export type PostFalAiViduQ1StartEndToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiViduQ1StartEndToVideoResponse = + PostFalAiViduQ1StartEndToVideoResponses[keyof PostFalAiViduQ1StartEndToVideoResponses] + +export type GetFalAiViduQ1StartEndToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/q1/start-end-to-video/requests/{request_id}' +} + +export type GetFalAiViduQ1StartEndToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaViduQ1StartEndToVideoOutput +} + +export type GetFalAiViduQ1StartEndToVideoRequestsByRequestIdResponse = + GetFalAiViduQ1StartEndToVideoRequestsByRequestIdResponses[keyof GetFalAiViduQ1StartEndToVideoRequestsByRequestIdResponses] + +export type GetFalAiViduQ1ImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/vidu/q1/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiViduQ1ImageToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiViduQ1ImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiViduQ1ImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiViduQ1ImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiViduQ1ImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/q1/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiViduQ1ImageToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiViduQ1ImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiViduQ1ImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiViduQ1ImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiViduQ1ImageToVideoData = { + body: SchemaViduQ1ImageToVideoInput + path?: never + query?: never + url: '/fal-ai/vidu/q1/image-to-video' +} + +export type PostFalAiViduQ1ImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiViduQ1ImageToVideoResponse = + PostFalAiViduQ1ImageToVideoResponses[keyof PostFalAiViduQ1ImageToVideoResponses] + +export type GetFalAiViduQ1ImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/q1/image-to-video/requests/{request_id}' +} + +export type GetFalAiViduQ1ImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaViduQ1ImageToVideoOutput +} + +export type GetFalAiViduQ1ImageToVideoRequestsByRequestIdResponse = + GetFalAiViduQ1ImageToVideoRequestsByRequestIdResponses[keyof GetFalAiViduQ1ImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiMagiImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/magi/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiMagiImageToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMagiImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiMagiImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiMagiImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiMagiImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/magi/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiMagiImageToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMagiImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiMagiImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiMagiImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiMagiImageToVideoData = { + body: SchemaMagiImageToVideoInput + path?: never + query?: never + url: '/fal-ai/magi/image-to-video' +} + +export type PostFalAiMagiImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMagiImageToVideoResponse = + PostFalAiMagiImageToVideoResponses[keyof PostFalAiMagiImageToVideoResponses] + +export type GetFalAiMagiImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/magi/image-to-video/requests/{request_id}' +} + +export type GetFalAiMagiImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMagiImageToVideoOutput +} + +export type GetFalAiMagiImageToVideoRequestsByRequestIdResponse = + GetFalAiMagiImageToVideoRequestsByRequestIdResponses[keyof GetFalAiMagiImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiPixverseV4EffectsRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v4/effects/requests/{request_id}/status' +} + +export type GetFalAiPixverseV4EffectsRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixverseV4EffectsRequestsByRequestIdStatusResponse = + GetFalAiPixverseV4EffectsRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV4EffectsRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV4EffectsRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v4/effects/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV4EffectsRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixverseV4EffectsRequestsByRequestIdCancelResponse = + PutFalAiPixverseV4EffectsRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV4EffectsRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV4EffectsData = { + body: SchemaPixverseV4EffectsInput + path?: never + query?: never + url: '/fal-ai/pixverse/v4/effects' +} + +export type PostFalAiPixverseV4EffectsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV4EffectsResponse = + PostFalAiPixverseV4EffectsResponses[keyof PostFalAiPixverseV4EffectsResponses] + +export type GetFalAiPixverseV4EffectsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v4/effects/requests/{request_id}' +} + +export type GetFalAiPixverseV4EffectsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV4EffectsOutput +} + +export type GetFalAiPixverseV4EffectsRequestsByRequestIdResponse = + GetFalAiPixverseV4EffectsRequestsByRequestIdResponses[keyof GetFalAiPixverseV4EffectsRequestsByRequestIdResponses] + +export type GetFalAiMagiDistilledImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/magi-distilled/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiMagiDistilledImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMagiDistilledImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiMagiDistilledImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiMagiDistilledImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiMagiDistilledImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/magi-distilled/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiMagiDistilledImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMagiDistilledImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiMagiDistilledImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiMagiDistilledImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiMagiDistilledImageToVideoData = { + body: SchemaMagiDistilledImageToVideoInput + path?: never + query?: never + url: '/fal-ai/magi-distilled/image-to-video' +} + +export type PostFalAiMagiDistilledImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMagiDistilledImageToVideoResponse = + PostFalAiMagiDistilledImageToVideoResponses[keyof PostFalAiMagiDistilledImageToVideoResponses] + +export type GetFalAiMagiDistilledImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/magi-distilled/image-to-video/requests/{request_id}' +} + +export type GetFalAiMagiDistilledImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMagiDistilledImageToVideoOutput +} + +export type GetFalAiMagiDistilledImageToVideoRequestsByRequestIdResponse = + GetFalAiMagiDistilledImageToVideoRequestsByRequestIdResponses[keyof GetFalAiMagiDistilledImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiFramepackFlf2vRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/framepack/flf2v/requests/{request_id}/status' +} + +export type GetFalAiFramepackFlf2vRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFramepackFlf2vRequestsByRequestIdStatusResponse = + GetFalAiFramepackFlf2vRequestsByRequestIdStatusResponses[keyof GetFalAiFramepackFlf2vRequestsByRequestIdStatusResponses] + +export type PutFalAiFramepackFlf2vRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/framepack/flf2v/requests/{request_id}/cancel' +} + +export type PutFalAiFramepackFlf2vRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFramepackFlf2vRequestsByRequestIdCancelResponse = + PutFalAiFramepackFlf2vRequestsByRequestIdCancelResponses[keyof PutFalAiFramepackFlf2vRequestsByRequestIdCancelResponses] + +export type PostFalAiFramepackFlf2vData = { + body: SchemaFramepackFlf2vInput + path?: never + query?: never + url: '/fal-ai/framepack/flf2v' +} + +export type PostFalAiFramepackFlf2vResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFramepackFlf2vResponse = + PostFalAiFramepackFlf2vResponses[keyof PostFalAiFramepackFlf2vResponses] + +export type GetFalAiFramepackFlf2vRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/framepack/flf2v/requests/{request_id}' +} + +export type GetFalAiFramepackFlf2vRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFramepackFlf2vOutput +} + +export type GetFalAiFramepackFlf2vRequestsByRequestIdResponse = + GetFalAiFramepackFlf2vRequestsByRequestIdResponses[keyof GetFalAiFramepackFlf2vRequestsByRequestIdResponses] + +export type GetFalAiWanFlf2vRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-flf2v/requests/{request_id}/status' +} + +export type GetFalAiWanFlf2vRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanFlf2vRequestsByRequestIdStatusResponse = + GetFalAiWanFlf2vRequestsByRequestIdStatusResponses[keyof GetFalAiWanFlf2vRequestsByRequestIdStatusResponses] + +export type PutFalAiWanFlf2vRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-flf2v/requests/{request_id}/cancel' +} + +export type PutFalAiWanFlf2vRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanFlf2vRequestsByRequestIdCancelResponse = + PutFalAiWanFlf2vRequestsByRequestIdCancelResponses[keyof PutFalAiWanFlf2vRequestsByRequestIdCancelResponses] + +export type PostFalAiWanFlf2vData = { + body: SchemaWanFlf2vInput + path?: never + query?: never + url: '/fal-ai/wan-flf2v' +} + +export type PostFalAiWanFlf2vResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanFlf2vResponse = + PostFalAiWanFlf2vResponses[keyof PostFalAiWanFlf2vResponses] + +export type GetFalAiWanFlf2vRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-flf2v/requests/{request_id}' +} + +export type GetFalAiWanFlf2vRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanFlf2vOutput +} + +export type GetFalAiWanFlf2vRequestsByRequestIdResponse = + GetFalAiWanFlf2vRequestsByRequestIdResponses[keyof GetFalAiWanFlf2vRequestsByRequestIdResponses] + +export type GetFalAiFramepackRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/framepack/requests/{request_id}/status' +} + +export type GetFalAiFramepackRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFramepackRequestsByRequestIdStatusResponse = + GetFalAiFramepackRequestsByRequestIdStatusResponses[keyof GetFalAiFramepackRequestsByRequestIdStatusResponses] + +export type PutFalAiFramepackRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/framepack/requests/{request_id}/cancel' +} + +export type PutFalAiFramepackRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFramepackRequestsByRequestIdCancelResponse = + PutFalAiFramepackRequestsByRequestIdCancelResponses[keyof PutFalAiFramepackRequestsByRequestIdCancelResponses] + +export type PostFalAiFramepackData = { + body: SchemaFramepackInput + path?: never + query?: never + url: '/fal-ai/framepack' +} + +export type PostFalAiFramepackResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFramepackResponse = + PostFalAiFramepackResponses[keyof PostFalAiFramepackResponses] + +export type GetFalAiFramepackRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/framepack/requests/{request_id}' +} + +export type GetFalAiFramepackRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFramepackOutput +} + +export type GetFalAiFramepackRequestsByRequestIdResponse = + GetFalAiFramepackRequestsByRequestIdResponses[keyof GetFalAiFramepackRequestsByRequestIdResponses] + +export type GetFalAiPixverseV4ImageToVideoFastRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v4/image-to-video/fast/requests/{request_id}/status' +} + +export type GetFalAiPixverseV4ImageToVideoFastRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiPixverseV4ImageToVideoFastRequestsByRequestIdStatusResponse = + GetFalAiPixverseV4ImageToVideoFastRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV4ImageToVideoFastRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV4ImageToVideoFastRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v4/image-to-video/fast/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV4ImageToVideoFastRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiPixverseV4ImageToVideoFastRequestsByRequestIdCancelResponse = + PutFalAiPixverseV4ImageToVideoFastRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV4ImageToVideoFastRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV4ImageToVideoFastData = { + body: SchemaPixverseV4ImageToVideoFastInput + path?: never + query?: never + url: '/fal-ai/pixverse/v4/image-to-video/fast' +} + +export type PostFalAiPixverseV4ImageToVideoFastResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV4ImageToVideoFastResponse = + PostFalAiPixverseV4ImageToVideoFastResponses[keyof PostFalAiPixverseV4ImageToVideoFastResponses] + +export type GetFalAiPixverseV4ImageToVideoFastRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v4/image-to-video/fast/requests/{request_id}' +} + +export type GetFalAiPixverseV4ImageToVideoFastRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV4ImageToVideoFastOutput +} + +export type GetFalAiPixverseV4ImageToVideoFastRequestsByRequestIdResponse = + GetFalAiPixverseV4ImageToVideoFastRequestsByRequestIdResponses[keyof GetFalAiPixverseV4ImageToVideoFastRequestsByRequestIdResponses] + +export type GetFalAiPixverseV4ImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v4/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiPixverseV4ImageToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixverseV4ImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiPixverseV4ImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV4ImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV4ImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v4/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV4ImageToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixverseV4ImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiPixverseV4ImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV4ImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV4ImageToVideoData = { + body: SchemaPixverseV4ImageToVideoInput + path?: never + query?: never + url: '/fal-ai/pixverse/v4/image-to-video' +} + +export type PostFalAiPixverseV4ImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV4ImageToVideoResponse = + PostFalAiPixverseV4ImageToVideoResponses[keyof PostFalAiPixverseV4ImageToVideoResponses] + +export type GetFalAiPixverseV4ImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v4/image-to-video/requests/{request_id}' +} + +export type GetFalAiPixverseV4ImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV4ImageToVideoOutput +} + +export type GetFalAiPixverseV4ImageToVideoRequestsByRequestIdResponse = + GetFalAiPixverseV4ImageToVideoRequestsByRequestIdResponses[keyof GetFalAiPixverseV4ImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiPixverseV35EffectsRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v3.5/effects/requests/{request_id}/status' +} + +export type GetFalAiPixverseV35EffectsRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixverseV35EffectsRequestsByRequestIdStatusResponse = + GetFalAiPixverseV35EffectsRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV35EffectsRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV35EffectsRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v3.5/effects/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV35EffectsRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixverseV35EffectsRequestsByRequestIdCancelResponse = + PutFalAiPixverseV35EffectsRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV35EffectsRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV35EffectsData = { + body: SchemaPixverseV35EffectsInput + path?: never + query?: never + url: '/fal-ai/pixverse/v3.5/effects' +} + +export type PostFalAiPixverseV35EffectsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV35EffectsResponse = + PostFalAiPixverseV35EffectsResponses[keyof PostFalAiPixverseV35EffectsResponses] + +export type GetFalAiPixverseV35EffectsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v3.5/effects/requests/{request_id}' +} + +export type GetFalAiPixverseV35EffectsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV35EffectsOutput +} + +export type GetFalAiPixverseV35EffectsRequestsByRequestIdResponse = + GetFalAiPixverseV35EffectsRequestsByRequestIdResponses[keyof GetFalAiPixverseV35EffectsRequestsByRequestIdResponses] + +export type GetFalAiPixverseV35TransitionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v3.5/transition/requests/{request_id}/status' +} + +export type GetFalAiPixverseV35TransitionRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixverseV35TransitionRequestsByRequestIdStatusResponse = + GetFalAiPixverseV35TransitionRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV35TransitionRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV35TransitionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v3.5/transition/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV35TransitionRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixverseV35TransitionRequestsByRequestIdCancelResponse = + PutFalAiPixverseV35TransitionRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV35TransitionRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV35TransitionData = { + body: SchemaPixverseV35TransitionInput + path?: never + query?: never + url: '/fal-ai/pixverse/v3.5/transition' +} + +export type PostFalAiPixverseV35TransitionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV35TransitionResponse = + PostFalAiPixverseV35TransitionResponses[keyof PostFalAiPixverseV35TransitionResponses] + +export type GetFalAiPixverseV35TransitionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v3.5/transition/requests/{request_id}' +} + +export type GetFalAiPixverseV35TransitionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV35TransitionOutput +} + +export type GetFalAiPixverseV35TransitionRequestsByRequestIdResponse = + GetFalAiPixverseV35TransitionRequestsByRequestIdResponses[keyof GetFalAiPixverseV35TransitionRequestsByRequestIdResponses] + +export type GetFalAiLumaDreamMachineRay2FlashImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/luma-dream-machine/ray-2-flash/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiLumaDreamMachineRay2FlashImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLumaDreamMachineRay2FlashImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiLumaDreamMachineRay2FlashImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLumaDreamMachineRay2FlashImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLumaDreamMachineRay2FlashImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-dream-machine/ray-2-flash/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiLumaDreamMachineRay2FlashImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLumaDreamMachineRay2FlashImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiLumaDreamMachineRay2FlashImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLumaDreamMachineRay2FlashImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLumaDreamMachineRay2FlashImageToVideoData = { + body: SchemaLumaDreamMachineRay2FlashImageToVideoInput + path?: never + query?: never + url: '/fal-ai/luma-dream-machine/ray-2-flash/image-to-video' +} + +export type PostFalAiLumaDreamMachineRay2FlashImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLumaDreamMachineRay2FlashImageToVideoResponse = + PostFalAiLumaDreamMachineRay2FlashImageToVideoResponses[keyof PostFalAiLumaDreamMachineRay2FlashImageToVideoResponses] + +export type GetFalAiLumaDreamMachineRay2FlashImageToVideoRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-dream-machine/ray-2-flash/image-to-video/requests/{request_id}' + } + +export type GetFalAiLumaDreamMachineRay2FlashImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaLumaDreamMachineRay2FlashImageToVideoOutput + } + +export type GetFalAiLumaDreamMachineRay2FlashImageToVideoRequestsByRequestIdResponse = + GetFalAiLumaDreamMachineRay2FlashImageToVideoRequestsByRequestIdResponses[keyof GetFalAiLumaDreamMachineRay2FlashImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiPikaV15PikaffectsRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pika/v1.5/pikaffects/requests/{request_id}/status' +} + +export type GetFalAiPikaV15PikaffectsRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPikaV15PikaffectsRequestsByRequestIdStatusResponse = + GetFalAiPikaV15PikaffectsRequestsByRequestIdStatusResponses[keyof GetFalAiPikaV15PikaffectsRequestsByRequestIdStatusResponses] + +export type PutFalAiPikaV15PikaffectsRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pika/v1.5/pikaffects/requests/{request_id}/cancel' +} + +export type PutFalAiPikaV15PikaffectsRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPikaV15PikaffectsRequestsByRequestIdCancelResponse = + PutFalAiPikaV15PikaffectsRequestsByRequestIdCancelResponses[keyof PutFalAiPikaV15PikaffectsRequestsByRequestIdCancelResponses] + +export type PostFalAiPikaV15PikaffectsData = { + body: SchemaPikaV15PikaffectsInput + path?: never + query?: never + url: '/fal-ai/pika/v1.5/pikaffects' +} + +export type PostFalAiPikaV15PikaffectsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPikaV15PikaffectsResponse = + PostFalAiPikaV15PikaffectsResponses[keyof PostFalAiPikaV15PikaffectsResponses] + +export type GetFalAiPikaV15PikaffectsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pika/v1.5/pikaffects/requests/{request_id}' +} + +export type GetFalAiPikaV15PikaffectsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPikaV15PikaffectsOutput +} + +export type GetFalAiPikaV15PikaffectsRequestsByRequestIdResponse = + GetFalAiPikaV15PikaffectsRequestsByRequestIdResponses[keyof GetFalAiPikaV15PikaffectsRequestsByRequestIdResponses] + +export type GetFalAiPikaV2TurboImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pika/v2/turbo/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiPikaV2TurboImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiPikaV2TurboImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiPikaV2TurboImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiPikaV2TurboImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiPikaV2TurboImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pika/v2/turbo/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiPikaV2TurboImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiPikaV2TurboImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiPikaV2TurboImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiPikaV2TurboImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiPikaV2TurboImageToVideoData = { + body: SchemaPikaV2TurboImageToVideoInput + path?: never + query?: never + url: '/fal-ai/pika/v2/turbo/image-to-video' +} + +export type PostFalAiPikaV2TurboImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPikaV2TurboImageToVideoResponse = + PostFalAiPikaV2TurboImageToVideoResponses[keyof PostFalAiPikaV2TurboImageToVideoResponses] + +export type GetFalAiPikaV2TurboImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pika/v2/turbo/image-to-video/requests/{request_id}' +} + +export type GetFalAiPikaV2TurboImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPikaV2TurboImageToVideoOutput +} + +export type GetFalAiPikaV2TurboImageToVideoRequestsByRequestIdResponse = + GetFalAiPikaV2TurboImageToVideoRequestsByRequestIdResponses[keyof GetFalAiPikaV2TurboImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiPikaV22PikascenesRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pika/v2.2/pikascenes/requests/{request_id}/status' +} + +export type GetFalAiPikaV22PikascenesRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPikaV22PikascenesRequestsByRequestIdStatusResponse = + GetFalAiPikaV22PikascenesRequestsByRequestIdStatusResponses[keyof GetFalAiPikaV22PikascenesRequestsByRequestIdStatusResponses] + +export type PutFalAiPikaV22PikascenesRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pika/v2.2/pikascenes/requests/{request_id}/cancel' +} + +export type PutFalAiPikaV22PikascenesRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPikaV22PikascenesRequestsByRequestIdCancelResponse = + PutFalAiPikaV22PikascenesRequestsByRequestIdCancelResponses[keyof PutFalAiPikaV22PikascenesRequestsByRequestIdCancelResponses] + +export type PostFalAiPikaV22PikascenesData = { + body: SchemaPikaV22PikascenesInput + path?: never + query?: never + url: '/fal-ai/pika/v2.2/pikascenes' +} + +export type PostFalAiPikaV22PikascenesResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPikaV22PikascenesResponse = + PostFalAiPikaV22PikascenesResponses[keyof PostFalAiPikaV22PikascenesResponses] + +export type GetFalAiPikaV22PikascenesRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pika/v2.2/pikascenes/requests/{request_id}' +} + +export type GetFalAiPikaV22PikascenesRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPikaV22PikascenesOutput +} + +export type GetFalAiPikaV22PikascenesRequestsByRequestIdResponse = + GetFalAiPikaV22PikascenesRequestsByRequestIdResponses[keyof GetFalAiPikaV22PikascenesRequestsByRequestIdResponses] + +export type GetFalAiPikaV22ImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pika/v2.2/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiPikaV22ImageToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPikaV22ImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiPikaV22ImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiPikaV22ImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiPikaV22ImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pika/v2.2/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiPikaV22ImageToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPikaV22ImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiPikaV22ImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiPikaV22ImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiPikaV22ImageToVideoData = { + body: SchemaPikaV22ImageToVideoInput + path?: never + query?: never + url: '/fal-ai/pika/v2.2/image-to-video' +} + +export type PostFalAiPikaV22ImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPikaV22ImageToVideoResponse = + PostFalAiPikaV22ImageToVideoResponses[keyof PostFalAiPikaV22ImageToVideoResponses] + +export type GetFalAiPikaV22ImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pika/v2.2/image-to-video/requests/{request_id}' +} + +export type GetFalAiPikaV22ImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPikaV22ImageToVideoOutput +} + +export type GetFalAiPikaV22ImageToVideoRequestsByRequestIdResponse = + GetFalAiPikaV22ImageToVideoRequestsByRequestIdResponses[keyof GetFalAiPikaV22ImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiPikaV21ImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pika/v2.1/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiPikaV21ImageToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPikaV21ImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiPikaV21ImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiPikaV21ImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiPikaV21ImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pika/v2.1/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiPikaV21ImageToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPikaV21ImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiPikaV21ImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiPikaV21ImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiPikaV21ImageToVideoData = { + body: SchemaPikaV21ImageToVideoInput + path?: never + query?: never + url: '/fal-ai/pika/v2.1/image-to-video' +} + +export type PostFalAiPikaV21ImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPikaV21ImageToVideoResponse = + PostFalAiPikaV21ImageToVideoResponses[keyof PostFalAiPikaV21ImageToVideoResponses] + +export type GetFalAiPikaV21ImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pika/v2.1/image-to-video/requests/{request_id}' +} + +export type GetFalAiPikaV21ImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPikaV21ImageToVideoOutput +} + +export type GetFalAiPikaV21ImageToVideoRequestsByRequestIdResponse = + GetFalAiPikaV21ImageToVideoRequestsByRequestIdResponses[keyof GetFalAiPikaV21ImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiViduImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/vidu/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiViduImageToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiViduImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiViduImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiViduImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiViduImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiViduImageToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiViduImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiViduImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiViduImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiViduImageToVideoData = { + body: SchemaViduImageToVideoInput + path?: never + query?: never + url: '/fal-ai/vidu/image-to-video' +} + +export type PostFalAiViduImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiViduImageToVideoResponse = + PostFalAiViduImageToVideoResponses[keyof PostFalAiViduImageToVideoResponses] + +export type GetFalAiViduImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/image-to-video/requests/{request_id}' +} + +export type GetFalAiViduImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaViduImageToVideoOutput +} + +export type GetFalAiViduImageToVideoRequestsByRequestIdResponse = + GetFalAiViduImageToVideoRequestsByRequestIdResponses[keyof GetFalAiViduImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiViduStartEndToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/vidu/start-end-to-video/requests/{request_id}/status' +} + +export type GetFalAiViduStartEndToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiViduStartEndToVideoRequestsByRequestIdStatusResponse = + GetFalAiViduStartEndToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiViduStartEndToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiViduStartEndToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/start-end-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiViduStartEndToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiViduStartEndToVideoRequestsByRequestIdCancelResponse = + PutFalAiViduStartEndToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiViduStartEndToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiViduStartEndToVideoData = { + body: SchemaViduStartEndToVideoInput + path?: never + query?: never + url: '/fal-ai/vidu/start-end-to-video' +} + +export type PostFalAiViduStartEndToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiViduStartEndToVideoResponse = + PostFalAiViduStartEndToVideoResponses[keyof PostFalAiViduStartEndToVideoResponses] + +export type GetFalAiViduStartEndToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/start-end-to-video/requests/{request_id}' +} + +export type GetFalAiViduStartEndToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaViduStartEndToVideoOutput +} + +export type GetFalAiViduStartEndToVideoRequestsByRequestIdResponse = + GetFalAiViduStartEndToVideoRequestsByRequestIdResponses[keyof GetFalAiViduStartEndToVideoRequestsByRequestIdResponses] + +export type GetFalAiViduReferenceToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/vidu/reference-to-video/requests/{request_id}/status' +} + +export type GetFalAiViduReferenceToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiViduReferenceToVideoRequestsByRequestIdStatusResponse = + GetFalAiViduReferenceToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiViduReferenceToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiViduReferenceToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/reference-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiViduReferenceToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiViduReferenceToVideoRequestsByRequestIdCancelResponse = + PutFalAiViduReferenceToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiViduReferenceToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiViduReferenceToVideoData = { + body: SchemaViduReferenceToVideoInput + path?: never + query?: never + url: '/fal-ai/vidu/reference-to-video' +} + +export type PostFalAiViduReferenceToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiViduReferenceToVideoResponse = + PostFalAiViduReferenceToVideoResponses[keyof PostFalAiViduReferenceToVideoResponses] + +export type GetFalAiViduReferenceToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/reference-to-video/requests/{request_id}' +} + +export type GetFalAiViduReferenceToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaViduReferenceToVideoOutput +} + +export type GetFalAiViduReferenceToVideoRequestsByRequestIdResponse = + GetFalAiViduReferenceToVideoRequestsByRequestIdResponses[keyof GetFalAiViduReferenceToVideoRequestsByRequestIdResponses] + +export type GetFalAiViduTemplateToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/vidu/template-to-video/requests/{request_id}/status' +} + +export type GetFalAiViduTemplateToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiViduTemplateToVideoRequestsByRequestIdStatusResponse = + GetFalAiViduTemplateToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiViduTemplateToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiViduTemplateToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/template-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiViduTemplateToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiViduTemplateToVideoRequestsByRequestIdCancelResponse = + PutFalAiViduTemplateToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiViduTemplateToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiViduTemplateToVideoData = { + body: SchemaViduTemplateToVideoInput + path?: never + query?: never + url: '/fal-ai/vidu/template-to-video' +} + +export type PostFalAiViduTemplateToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiViduTemplateToVideoResponse = + PostFalAiViduTemplateToVideoResponses[keyof PostFalAiViduTemplateToVideoResponses] + +export type GetFalAiViduTemplateToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/template-to-video/requests/{request_id}' +} + +export type GetFalAiViduTemplateToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaViduTemplateToVideoOutput +} + +export type GetFalAiViduTemplateToVideoRequestsByRequestIdResponse = + GetFalAiViduTemplateToVideoRequestsByRequestIdResponses[keyof GetFalAiViduTemplateToVideoRequestsByRequestIdResponses] + +export type GetFalAiWanI2vLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-i2v-lora/requests/{request_id}/status' +} + +export type GetFalAiWanI2vLoraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanI2vLoraRequestsByRequestIdStatusResponse = + GetFalAiWanI2vLoraRequestsByRequestIdStatusResponses[keyof GetFalAiWanI2vLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiWanI2vLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-i2v-lora/requests/{request_id}/cancel' +} + +export type PutFalAiWanI2vLoraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanI2vLoraRequestsByRequestIdCancelResponse = + PutFalAiWanI2vLoraRequestsByRequestIdCancelResponses[keyof PutFalAiWanI2vLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiWanI2vLoraData = { + body: SchemaWanI2vLoraInput + path?: never + query?: never + url: '/fal-ai/wan-i2v-lora' +} + +export type PostFalAiWanI2vLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanI2vLoraResponse = + PostFalAiWanI2vLoraResponses[keyof PostFalAiWanI2vLoraResponses] + +export type GetFalAiWanI2vLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-i2v-lora/requests/{request_id}' +} + +export type GetFalAiWanI2vLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanI2vLoraOutput +} + +export type GetFalAiWanI2vLoraRequestsByRequestIdResponse = + GetFalAiWanI2vLoraRequestsByRequestIdResponses[keyof GetFalAiWanI2vLoraRequestsByRequestIdResponses] + +export type GetFalAiHunyuanVideoImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan-video-image-to-video/requests/{request_id}/status' +} + +export type GetFalAiHunyuanVideoImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiHunyuanVideoImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiHunyuanVideoImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuanVideoImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuanVideoImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-video-image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuanVideoImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiHunyuanVideoImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiHunyuanVideoImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuanVideoImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuanVideoImageToVideoData = { + body: SchemaHunyuanVideoImageToVideoInput + path?: never + query?: never + url: '/fal-ai/hunyuan-video-image-to-video' +} + +export type PostFalAiHunyuanVideoImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuanVideoImageToVideoResponse = + PostFalAiHunyuanVideoImageToVideoResponses[keyof PostFalAiHunyuanVideoImageToVideoResponses] + +export type GetFalAiHunyuanVideoImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-video-image-to-video/requests/{request_id}' +} + +export type GetFalAiHunyuanVideoImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuanVideoImageToVideoOutput +} + +export type GetFalAiHunyuanVideoImageToVideoRequestsByRequestIdResponse = + GetFalAiHunyuanVideoImageToVideoRequestsByRequestIdResponses[keyof GetFalAiHunyuanVideoImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiMinimaxVideo01DirectorImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/video-01-director/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiMinimaxVideo01DirectorImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMinimaxVideo01DirectorImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiMinimaxVideo01DirectorImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxVideo01DirectorImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxVideo01DirectorImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/video-01-director/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiMinimaxVideo01DirectorImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMinimaxVideo01DirectorImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiMinimaxVideo01DirectorImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxVideo01DirectorImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxVideo01DirectorImageToVideoData = { + body: SchemaMinimaxVideo01DirectorImageToVideoInput + path?: never + query?: never + url: '/fal-ai/minimax/video-01-director/image-to-video' +} + +export type PostFalAiMinimaxVideo01DirectorImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxVideo01DirectorImageToVideoResponse = + PostFalAiMinimaxVideo01DirectorImageToVideoResponses[keyof PostFalAiMinimaxVideo01DirectorImageToVideoResponses] + +export type GetFalAiMinimaxVideo01DirectorImageToVideoRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/video-01-director/image-to-video/requests/{request_id}' + } + +export type GetFalAiMinimaxVideo01DirectorImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaMinimaxVideo01DirectorImageToVideoOutput + } + +export type GetFalAiMinimaxVideo01DirectorImageToVideoRequestsByRequestIdResponse = + GetFalAiMinimaxVideo01DirectorImageToVideoRequestsByRequestIdResponses[keyof GetFalAiMinimaxVideo01DirectorImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiSkyreelsI2vRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/skyreels-i2v/requests/{request_id}/status' +} + +export type GetFalAiSkyreelsI2vRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSkyreelsI2vRequestsByRequestIdStatusResponse = + GetFalAiSkyreelsI2vRequestsByRequestIdStatusResponses[keyof GetFalAiSkyreelsI2vRequestsByRequestIdStatusResponses] + +export type PutFalAiSkyreelsI2vRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/skyreels-i2v/requests/{request_id}/cancel' +} + +export type PutFalAiSkyreelsI2vRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSkyreelsI2vRequestsByRequestIdCancelResponse = + PutFalAiSkyreelsI2vRequestsByRequestIdCancelResponses[keyof PutFalAiSkyreelsI2vRequestsByRequestIdCancelResponses] + +export type PostFalAiSkyreelsI2vData = { + body: SchemaSkyreelsI2vInput + path?: never + query?: never + url: '/fal-ai/skyreels-i2v' +} + +export type PostFalAiSkyreelsI2vResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSkyreelsI2vResponse = + PostFalAiSkyreelsI2vResponses[keyof PostFalAiSkyreelsI2vResponses] + +export type GetFalAiSkyreelsI2vRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/skyreels-i2v/requests/{request_id}' +} + +export type GetFalAiSkyreelsI2vRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSkyreelsI2vOutput +} + +export type GetFalAiSkyreelsI2vRequestsByRequestIdResponse = + GetFalAiSkyreelsI2vRequestsByRequestIdResponses[keyof GetFalAiSkyreelsI2vRequestsByRequestIdResponses] + +export type GetFalAiLumaDreamMachineRay2ImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/luma-dream-machine/ray-2/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiLumaDreamMachineRay2ImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLumaDreamMachineRay2ImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiLumaDreamMachineRay2ImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLumaDreamMachineRay2ImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLumaDreamMachineRay2ImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-dream-machine/ray-2/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiLumaDreamMachineRay2ImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLumaDreamMachineRay2ImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiLumaDreamMachineRay2ImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLumaDreamMachineRay2ImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLumaDreamMachineRay2ImageToVideoData = { + body: SchemaLumaDreamMachineRay2ImageToVideoInput + path?: never + query?: never + url: '/fal-ai/luma-dream-machine/ray-2/image-to-video' +} + +export type PostFalAiLumaDreamMachineRay2ImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLumaDreamMachineRay2ImageToVideoResponse = + PostFalAiLumaDreamMachineRay2ImageToVideoResponses[keyof PostFalAiLumaDreamMachineRay2ImageToVideoResponses] + +export type GetFalAiLumaDreamMachineRay2ImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-dream-machine/ray-2/image-to-video/requests/{request_id}' +} + +export type GetFalAiLumaDreamMachineRay2ImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaLumaDreamMachineRay2ImageToVideoOutput + } + +export type GetFalAiLumaDreamMachineRay2ImageToVideoRequestsByRequestIdResponse = + GetFalAiLumaDreamMachineRay2ImageToVideoRequestsByRequestIdResponses[keyof GetFalAiLumaDreamMachineRay2ImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiHunyuanVideoImg2VidLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan-video-img2vid-lora/requests/{request_id}/status' +} + +export type GetFalAiHunyuanVideoImg2VidLoraRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiHunyuanVideoImg2VidLoraRequestsByRequestIdStatusResponse = + GetFalAiHunyuanVideoImg2VidLoraRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuanVideoImg2VidLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuanVideoImg2VidLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-video-img2vid-lora/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuanVideoImg2VidLoraRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiHunyuanVideoImg2VidLoraRequestsByRequestIdCancelResponse = + PutFalAiHunyuanVideoImg2VidLoraRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuanVideoImg2VidLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuanVideoImg2VidLoraData = { + body: SchemaHunyuanVideoImg2VidLoraInput + path?: never + query?: never + url: '/fal-ai/hunyuan-video-img2vid-lora' +} + +export type PostFalAiHunyuanVideoImg2VidLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuanVideoImg2VidLoraResponse = + PostFalAiHunyuanVideoImg2VidLoraResponses[keyof PostFalAiHunyuanVideoImg2VidLoraResponses] + +export type GetFalAiHunyuanVideoImg2VidLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-video-img2vid-lora/requests/{request_id}' +} + +export type GetFalAiHunyuanVideoImg2VidLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuanVideoImg2VidLoraOutput +} + +export type GetFalAiHunyuanVideoImg2VidLoraRequestsByRequestIdResponse = + GetFalAiHunyuanVideoImg2VidLoraRequestsByRequestIdResponses[keyof GetFalAiHunyuanVideoImg2VidLoraRequestsByRequestIdResponses] + +export type GetFalAiPixverseV35ImageToVideoFastRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v3.5/image-to-video/fast/requests/{request_id}/status' +} + +export type GetFalAiPixverseV35ImageToVideoFastRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiPixverseV35ImageToVideoFastRequestsByRequestIdStatusResponse = + GetFalAiPixverseV35ImageToVideoFastRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV35ImageToVideoFastRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV35ImageToVideoFastRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v3.5/image-to-video/fast/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV35ImageToVideoFastRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiPixverseV35ImageToVideoFastRequestsByRequestIdCancelResponse = + PutFalAiPixverseV35ImageToVideoFastRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV35ImageToVideoFastRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV35ImageToVideoFastData = { + body: SchemaPixverseV35ImageToVideoFastInput + path?: never + query?: never + url: '/fal-ai/pixverse/v3.5/image-to-video/fast' +} + +export type PostFalAiPixverseV35ImageToVideoFastResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV35ImageToVideoFastResponse = + PostFalAiPixverseV35ImageToVideoFastResponses[keyof PostFalAiPixverseV35ImageToVideoFastResponses] + +export type GetFalAiPixverseV35ImageToVideoFastRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v3.5/image-to-video/fast/requests/{request_id}' +} + +export type GetFalAiPixverseV35ImageToVideoFastRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV35ImageToVideoFastOutput +} + +export type GetFalAiPixverseV35ImageToVideoFastRequestsByRequestIdResponse = + GetFalAiPixverseV35ImageToVideoFastRequestsByRequestIdResponses[keyof GetFalAiPixverseV35ImageToVideoFastRequestsByRequestIdResponses] + +export type GetFalAiPixverseV35ImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v3.5/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiPixverseV35ImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiPixverseV35ImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiPixverseV35ImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV35ImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV35ImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v3.5/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV35ImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiPixverseV35ImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiPixverseV35ImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV35ImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV35ImageToVideoData = { + body: SchemaPixverseV35ImageToVideoInput + path?: never + query?: never + url: '/fal-ai/pixverse/v3.5/image-to-video' +} + +export type PostFalAiPixverseV35ImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV35ImageToVideoResponse = + PostFalAiPixverseV35ImageToVideoResponses[keyof PostFalAiPixverseV35ImageToVideoResponses] + +export type GetFalAiPixverseV35ImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v3.5/image-to-video/requests/{request_id}' +} + +export type GetFalAiPixverseV35ImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV35ImageToVideoOutput +} + +export type GetFalAiPixverseV35ImageToVideoRequestsByRequestIdResponse = + GetFalAiPixverseV35ImageToVideoRequestsByRequestIdResponses[keyof GetFalAiPixverseV35ImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiMinimaxVideo01SubjectReferenceRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/video-01-subject-reference/requests/{request_id}/status' + } + +export type GetFalAiMinimaxVideo01SubjectReferenceRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMinimaxVideo01SubjectReferenceRequestsByRequestIdStatusResponse = + GetFalAiMinimaxVideo01SubjectReferenceRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxVideo01SubjectReferenceRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxVideo01SubjectReferenceRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/video-01-subject-reference/requests/{request_id}/cancel' + } + +export type PutFalAiMinimaxVideo01SubjectReferenceRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMinimaxVideo01SubjectReferenceRequestsByRequestIdCancelResponse = + PutFalAiMinimaxVideo01SubjectReferenceRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxVideo01SubjectReferenceRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxVideo01SubjectReferenceData = { + body: SchemaMinimaxVideo01SubjectReferenceInput + path?: never + query?: never + url: '/fal-ai/minimax/video-01-subject-reference' +} + +export type PostFalAiMinimaxVideo01SubjectReferenceResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxVideo01SubjectReferenceResponse = + PostFalAiMinimaxVideo01SubjectReferenceResponses[keyof PostFalAiMinimaxVideo01SubjectReferenceResponses] + +export type GetFalAiMinimaxVideo01SubjectReferenceRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/video-01-subject-reference/requests/{request_id}' +} + +export type GetFalAiMinimaxVideo01SubjectReferenceRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaMinimaxVideo01SubjectReferenceOutput + } + +export type GetFalAiMinimaxVideo01SubjectReferenceRequestsByRequestIdResponse = + GetFalAiMinimaxVideo01SubjectReferenceRequestsByRequestIdResponses[keyof GetFalAiMinimaxVideo01SubjectReferenceRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV16StandardImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v1.6/standard/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoV16StandardImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV16StandardImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV16StandardImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV16StandardImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV16StandardImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1.6/standard/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoV16StandardImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV16StandardImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV16StandardImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV16StandardImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV16StandardImageToVideoData = { + body: SchemaKlingVideoV16StandardImageToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/v1.6/standard/image-to-video' +} + +export type PostFalAiKlingVideoV16StandardImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV16StandardImageToVideoResponse = + PostFalAiKlingVideoV16StandardImageToVideoResponses[keyof PostFalAiKlingVideoV16StandardImageToVideoResponses] + +export type GetFalAiKlingVideoV16StandardImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1.6/standard/image-to-video/requests/{request_id}' +} + +export type GetFalAiKlingVideoV16StandardImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV16StandardImageToVideoOutput + } + +export type GetFalAiKlingVideoV16StandardImageToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoV16StandardImageToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV16StandardImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiSadtalkerReferenceRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sadtalker/reference/requests/{request_id}/status' +} + +export type GetFalAiSadtalkerReferenceRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSadtalkerReferenceRequestsByRequestIdStatusResponse = + GetFalAiSadtalkerReferenceRequestsByRequestIdStatusResponses[keyof GetFalAiSadtalkerReferenceRequestsByRequestIdStatusResponses] + +export type PutFalAiSadtalkerReferenceRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sadtalker/reference/requests/{request_id}/cancel' +} + +export type PutFalAiSadtalkerReferenceRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSadtalkerReferenceRequestsByRequestIdCancelResponse = + PutFalAiSadtalkerReferenceRequestsByRequestIdCancelResponses[keyof PutFalAiSadtalkerReferenceRequestsByRequestIdCancelResponses] + +export type PostFalAiSadtalkerReferenceData = { + body: SchemaSadtalkerReferenceInput + path?: never + query?: never + url: '/fal-ai/sadtalker/reference' +} + +export type PostFalAiSadtalkerReferenceResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSadtalkerReferenceResponse = + PostFalAiSadtalkerReferenceResponses[keyof PostFalAiSadtalkerReferenceResponses] + +export type GetFalAiSadtalkerReferenceRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sadtalker/reference/requests/{request_id}' +} + +export type GetFalAiSadtalkerReferenceRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSadtalkerReferenceOutput +} + +export type GetFalAiSadtalkerReferenceRequestsByRequestIdResponse = + GetFalAiSadtalkerReferenceRequestsByRequestIdResponses[keyof GetFalAiSadtalkerReferenceRequestsByRequestIdResponses] + +export type GetFalAiMinimaxVideo01LiveImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/video-01-live/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiMinimaxVideo01LiveImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMinimaxVideo01LiveImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiMinimaxVideo01LiveImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxVideo01LiveImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxVideo01LiveImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/video-01-live/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiMinimaxVideo01LiveImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMinimaxVideo01LiveImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiMinimaxVideo01LiveImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxVideo01LiveImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxVideo01LiveImageToVideoData = { + body: SchemaMinimaxVideo01LiveImageToVideoInput + path?: never + query?: never + url: '/fal-ai/minimax/video-01-live/image-to-video' +} + +export type PostFalAiMinimaxVideo01LiveImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxVideo01LiveImageToVideoResponse = + PostFalAiMinimaxVideo01LiveImageToVideoResponses[keyof PostFalAiMinimaxVideo01LiveImageToVideoResponses] + +export type GetFalAiMinimaxVideo01LiveImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/video-01-live/image-to-video/requests/{request_id}' +} + +export type GetFalAiMinimaxVideo01LiveImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaMinimaxVideo01LiveImageToVideoOutput + } + +export type GetFalAiMinimaxVideo01LiveImageToVideoRequestsByRequestIdResponse = + GetFalAiMinimaxVideo01LiveImageToVideoRequestsByRequestIdResponses[keyof GetFalAiMinimaxVideo01LiveImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiLtxVideoImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-video/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiLtxVideoImageToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLtxVideoImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiLtxVideoImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLtxVideoImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLtxVideoImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiLtxVideoImageToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLtxVideoImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiLtxVideoImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLtxVideoImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLtxVideoImageToVideoData = { + body: SchemaLtxVideoImageToVideoInput + path?: never + query?: never + url: '/fal-ai/ltx-video/image-to-video' +} + +export type PostFalAiLtxVideoImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtxVideoImageToVideoResponse = + PostFalAiLtxVideoImageToVideoResponses[keyof PostFalAiLtxVideoImageToVideoResponses] + +export type GetFalAiLtxVideoImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video/image-to-video/requests/{request_id}' +} + +export type GetFalAiLtxVideoImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtxVideoImageToVideoOutput +} + +export type GetFalAiLtxVideoImageToVideoRequestsByRequestIdResponse = + GetFalAiLtxVideoImageToVideoRequestsByRequestIdResponses[keyof GetFalAiLtxVideoImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiCogvideox5bImageToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/cogvideox-5b/image-to-video/requests/{request_id}/status' +} + +export type GetFalAiCogvideox5bImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiCogvideox5bImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiCogvideox5bImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiCogvideox5bImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiCogvideox5bImageToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/cogvideox-5b/image-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiCogvideox5bImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiCogvideox5bImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiCogvideox5bImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiCogvideox5bImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiCogvideox5bImageToVideoData = { + body: SchemaCogvideox5bImageToVideoInput + path?: never + query?: never + url: '/fal-ai/cogvideox-5b/image-to-video' +} + +export type PostFalAiCogvideox5bImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiCogvideox5bImageToVideoResponse = + PostFalAiCogvideox5bImageToVideoResponses[keyof PostFalAiCogvideox5bImageToVideoResponses] + +export type GetFalAiCogvideox5bImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/cogvideox-5b/image-to-video/requests/{request_id}' +} + +export type GetFalAiCogvideox5bImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaCogvideox5bImageToVideoOutput +} + +export type GetFalAiCogvideox5bImageToVideoRequestsByRequestIdResponse = + GetFalAiCogvideox5bImageToVideoRequestsByRequestIdResponses[keyof GetFalAiCogvideox5bImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV15ProImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v1.5/pro/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoV15ProImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV15ProImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV15ProImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV15ProImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV15ProImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1.5/pro/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoV15ProImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV15ProImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV15ProImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV15ProImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV15ProImageToVideoData = { + body: SchemaKlingVideoV15ProImageToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/v1.5/pro/image-to-video' +} + +export type PostFalAiKlingVideoV15ProImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV15ProImageToVideoResponse = + PostFalAiKlingVideoV15ProImageToVideoResponses[keyof PostFalAiKlingVideoV15ProImageToVideoResponses] + +export type GetFalAiKlingVideoV15ProImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1.5/pro/image-to-video/requests/{request_id}' +} + +export type GetFalAiKlingVideoV15ProImageToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV15ProImageToVideoOutput +} + +export type GetFalAiKlingVideoV15ProImageToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoV15ProImageToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV15ProImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV1StandardImageToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v1/standard/image-to-video/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoV1StandardImageToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV1StandardImageToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV1StandardImageToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV1StandardImageToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV1StandardImageToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1/standard/image-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoV1StandardImageToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV1StandardImageToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV1StandardImageToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV1StandardImageToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV1StandardImageToVideoData = { + body: SchemaKlingVideoV1StandardImageToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/v1/standard/image-to-video' +} + +export type PostFalAiKlingVideoV1StandardImageToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV1StandardImageToVideoResponse = + PostFalAiKlingVideoV1StandardImageToVideoResponses[keyof PostFalAiKlingVideoV1StandardImageToVideoResponses] + +export type GetFalAiKlingVideoV1StandardImageToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1/standard/image-to-video/requests/{request_id}' +} + +export type GetFalAiKlingVideoV1StandardImageToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV1StandardImageToVideoOutput + } + +export type GetFalAiKlingVideoV1StandardImageToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoV1StandardImageToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV1StandardImageToVideoRequestsByRequestIdResponses] + +export type GetFalAiStableVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/stable-video/requests/{request_id}/status' +} + +export type GetFalAiStableVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiStableVideoRequestsByRequestIdStatusResponse = + GetFalAiStableVideoRequestsByRequestIdStatusResponses[keyof GetFalAiStableVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiStableVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-video/requests/{request_id}/cancel' +} + +export type PutFalAiStableVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiStableVideoRequestsByRequestIdCancelResponse = + PutFalAiStableVideoRequestsByRequestIdCancelResponses[keyof PutFalAiStableVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiStableVideoData = { + body: SchemaStableVideoInput + path?: never + query?: never + url: '/fal-ai/stable-video' +} + +export type PostFalAiStableVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiStableVideoResponse = + PostFalAiStableVideoResponses[keyof PostFalAiStableVideoResponses] + +export type GetFalAiStableVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-video/requests/{request_id}' +} + +export type GetFalAiStableVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaStableVideoOutput +} + +export type GetFalAiStableVideoRequestsByRequestIdResponse = + GetFalAiStableVideoRequestsByRequestIdResponses[keyof GetFalAiStableVideoRequestsByRequestIdResponses] + +export type GetFalAiAmtInterpolationFrameInterpolationRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/amt-interpolation/frame-interpolation/requests/{request_id}/status' + } + +export type GetFalAiAmtInterpolationFrameInterpolationRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiAmtInterpolationFrameInterpolationRequestsByRequestIdStatusResponse = + GetFalAiAmtInterpolationFrameInterpolationRequestsByRequestIdStatusResponses[keyof GetFalAiAmtInterpolationFrameInterpolationRequestsByRequestIdStatusResponses] + +export type PutFalAiAmtInterpolationFrameInterpolationRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/amt-interpolation/frame-interpolation/requests/{request_id}/cancel' + } + +export type PutFalAiAmtInterpolationFrameInterpolationRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiAmtInterpolationFrameInterpolationRequestsByRequestIdCancelResponse = + PutFalAiAmtInterpolationFrameInterpolationRequestsByRequestIdCancelResponses[keyof PutFalAiAmtInterpolationFrameInterpolationRequestsByRequestIdCancelResponses] + +export type PostFalAiAmtInterpolationFrameInterpolationData = { + body: SchemaAmtInterpolationFrameInterpolationInput + path?: never + query?: never + url: '/fal-ai/amt-interpolation/frame-interpolation' +} + +export type PostFalAiAmtInterpolationFrameInterpolationResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiAmtInterpolationFrameInterpolationResponse = + PostFalAiAmtInterpolationFrameInterpolationResponses[keyof PostFalAiAmtInterpolationFrameInterpolationResponses] + +export type GetFalAiAmtInterpolationFrameInterpolationRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/amt-interpolation/frame-interpolation/requests/{request_id}' + } + +export type GetFalAiAmtInterpolationFrameInterpolationRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaAmtInterpolationFrameInterpolationOutput + } + +export type GetFalAiAmtInterpolationFrameInterpolationRequestsByRequestIdResponse = + GetFalAiAmtInterpolationFrameInterpolationRequestsByRequestIdResponses[keyof GetFalAiAmtInterpolationFrameInterpolationRequestsByRequestIdResponses] + +export type GetFalAiLivePortraitRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/live-portrait/requests/{request_id}/status' +} + +export type GetFalAiLivePortraitRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLivePortraitRequestsByRequestIdStatusResponse = + GetFalAiLivePortraitRequestsByRequestIdStatusResponses[keyof GetFalAiLivePortraitRequestsByRequestIdStatusResponses] + +export type PutFalAiLivePortraitRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/live-portrait/requests/{request_id}/cancel' +} + +export type PutFalAiLivePortraitRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLivePortraitRequestsByRequestIdCancelResponse = + PutFalAiLivePortraitRequestsByRequestIdCancelResponses[keyof PutFalAiLivePortraitRequestsByRequestIdCancelResponses] + +export type PostFalAiLivePortraitData = { + body: SchemaLivePortraitInput + path?: never + query?: never + url: '/fal-ai/live-portrait' +} + +export type PostFalAiLivePortraitResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLivePortraitResponse = + PostFalAiLivePortraitResponses[keyof PostFalAiLivePortraitResponses] + +export type GetFalAiLivePortraitRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/live-portrait/requests/{request_id}' +} + +export type GetFalAiLivePortraitRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLivePortraitOutput +} + +export type GetFalAiLivePortraitRequestsByRequestIdResponse = + GetFalAiLivePortraitRequestsByRequestIdResponses[keyof GetFalAiLivePortraitRequestsByRequestIdResponses] + +export type GetFalAiMusetalkRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/musetalk/requests/{request_id}/status' +} + +export type GetFalAiMusetalkRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMusetalkRequestsByRequestIdStatusResponse = + GetFalAiMusetalkRequestsByRequestIdStatusResponses[keyof GetFalAiMusetalkRequestsByRequestIdStatusResponses] + +export type PutFalAiMusetalkRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/musetalk/requests/{request_id}/cancel' +} + +export type PutFalAiMusetalkRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMusetalkRequestsByRequestIdCancelResponse = + PutFalAiMusetalkRequestsByRequestIdCancelResponses[keyof PutFalAiMusetalkRequestsByRequestIdCancelResponses] + +export type PostFalAiMusetalkData = { + body: SchemaMusetalkInput + path?: never + query?: never + url: '/fal-ai/musetalk' +} + +export type PostFalAiMusetalkResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMusetalkResponse = + PostFalAiMusetalkResponses[keyof PostFalAiMusetalkResponses] + +export type GetFalAiMusetalkRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/musetalk/requests/{request_id}' +} + +export type GetFalAiMusetalkRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMusetalkOutput +} + +export type GetFalAiMusetalkRequestsByRequestIdResponse = + GetFalAiMusetalkRequestsByRequestIdResponses[keyof GetFalAiMusetalkRequestsByRequestIdResponses] + +export type GetFalAiSadtalkerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sadtalker/requests/{request_id}/status' +} + +export type GetFalAiSadtalkerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSadtalkerRequestsByRequestIdStatusResponse = + GetFalAiSadtalkerRequestsByRequestIdStatusResponses[keyof GetFalAiSadtalkerRequestsByRequestIdStatusResponses] + +export type PutFalAiSadtalkerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sadtalker/requests/{request_id}/cancel' +} + +export type PutFalAiSadtalkerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSadtalkerRequestsByRequestIdCancelResponse = + PutFalAiSadtalkerRequestsByRequestIdCancelResponses[keyof PutFalAiSadtalkerRequestsByRequestIdCancelResponses] + +export type PostFalAiSadtalkerData = { + body: SchemaSadtalkerInput + path?: never + query?: never + url: '/fal-ai/sadtalker' +} + +export type PostFalAiSadtalkerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSadtalkerResponse = + PostFalAiSadtalkerResponses[keyof PostFalAiSadtalkerResponses] + +export type GetFalAiSadtalkerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sadtalker/requests/{request_id}' +} + +export type GetFalAiSadtalkerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSadtalkerOutput +} + +export type GetFalAiSadtalkerRequestsByRequestIdResponse = + GetFalAiSadtalkerRequestsByRequestIdResponses[keyof GetFalAiSadtalkerRequestsByRequestIdResponses] + +export type GetFalAiFastSvdLcmRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fast-svd-lcm/requests/{request_id}/status' +} + +export type GetFalAiFastSvdLcmRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFastSvdLcmRequestsByRequestIdStatusResponse = + GetFalAiFastSvdLcmRequestsByRequestIdStatusResponses[keyof GetFalAiFastSvdLcmRequestsByRequestIdStatusResponses] + +export type PutFalAiFastSvdLcmRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-svd-lcm/requests/{request_id}/cancel' +} + +export type PutFalAiFastSvdLcmRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFastSvdLcmRequestsByRequestIdCancelResponse = + PutFalAiFastSvdLcmRequestsByRequestIdCancelResponses[keyof PutFalAiFastSvdLcmRequestsByRequestIdCancelResponses] + +export type PostFalAiFastSvdLcmData = { + body: SchemaFastSvdLcmInput + path?: never + query?: never + url: '/fal-ai/fast-svd-lcm' +} + +export type PostFalAiFastSvdLcmResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFastSvdLcmResponse = + PostFalAiFastSvdLcmResponses[keyof PostFalAiFastSvdLcmResponses] + +export type GetFalAiFastSvdLcmRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-svd-lcm/requests/{request_id}' +} + +export type GetFalAiFastSvdLcmRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFastSvdLcmOutput +} + +export type GetFalAiFastSvdLcmRequestsByRequestIdResponse = + GetFalAiFastSvdLcmRequestsByRequestIdResponses[keyof GetFalAiFastSvdLcmRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/image-to-video/zod.gen.ts b/packages/typescript/ai-fal/src/generated/image-to-video/zod.gen.ts new file mode 100644 index 00000000..c48d3298 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/image-to-video/zod.gen.ts @@ -0,0 +1,22422 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * File + */ +export const zSchemaFile = z.object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), +}) + +/** + * FastSVDOutput + */ +export const zSchemaFastSvdLcmOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n\n ', + }), + video: zSchemaFile, +}) + +/** + * FastSVDImageInput + */ +export const zSchemaFastSvdLcmInput = z.object({ + motion_bucket_id: z + .optional( + z.int().gte(1).lte(255).register(z.globalRegistry, { + description: + '\n The motion bucket id determines the motion of the generated video. The\n higher the number, the more motion there will be.\n ', + }), + ) + .default(127), + fps: z + .optional( + z.int().gte(1).lte(25).register(z.globalRegistry, { + description: + '\n The FPS of the generated video. The higher the number, the faster the video will\n play. Total video length is 25 frames.\n ', + }), + ) + .default(10), + steps: z + .optional( + z.int().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The number of steps to run the model for. The higher the number the better\n the quality and longer it will take to generate.\n ', + }), + ) + .default(4), + cond_aug: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: + '\n The conditoning augmentation determines the amount of noise that will be\n added to the conditioning frame. The higher the number, the more noise\n there will be, and the less the video will look like the initial image.\n Increase it for more motion.\n ', + }), + ) + .default(0.02), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image to use as a starting point for the generation.', + }), +}) + +/** + * SadTalkerOutput + */ +export const zSchemaSadtalkerOutput = z.object({ + video: zSchemaFile, +}) + +/** + * SadTalkerInput + */ +export const zSchemaSadtalkerInput = z.object({ + pose_style: z + .optional( + z.int().gte(0).lte(45).register(z.globalRegistry, { + description: 'The style of the pose', + }), + ) + .default(0), + source_image_url: z.string().register(z.globalRegistry, { + description: 'URL of the source image', + }), + driven_audio_url: z.string().register(z.globalRegistry, { + description: 'URL of the driven audio', + }), + face_enhancer: z.optional( + z.enum(['gfpgan']).register(z.globalRegistry, { + description: 'The type of face enhancer to use', + }), + ), + expression_scale: z + .optional( + z.number().gte(0).lte(3).register(z.globalRegistry, { + description: 'The scale of the expression', + }), + ) + .default(1), + face_model_resolution: z.optional( + z.enum(['256', '512']).register(z.globalRegistry, { + description: 'The resolution of the face model', + }), + ), + still_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use still mode. Fewer head motion, works with preprocess `full`.', + }), + ) + .default(false), + preprocess: z.optional( + z + .enum(['crop', 'extcrop', 'resize', 'full', 'extfull']) + .register(z.globalRegistry, { + description: 'The type of preprocessing to use', + }), + ), +}) + +/** + * MuseTalkOutput + */ +export const zSchemaMusetalkOutput = z.object({ + video: zSchemaFile, +}) + +/** + * MuseTalkInput + */ +export const zSchemaMusetalkInput = z.object({ + source_video_url: z.string().register(z.globalRegistry, { + description: 'URL of the source video', + }), + audio_url: z.string().register(z.globalRegistry, { + description: 'URL of the audio', + }), +}) + +/** + * LivePortraitOutput + */ +export const zSchemaLivePortraitOutput = z.object({ + video: zSchemaFile, +}) + +/** + * LivePortraitInput + */ +export const zSchemaLivePortraitInput = z.object({ + smile: z + .optional( + z.number().gte(-2).lte(2).register(z.globalRegistry, { + description: 'Amount to smile', + }), + ) + .default(0), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the video to drive the lip syncing.', + }), + eyebrow: z + .optional( + z.number().gte(-30).lte(30).register(z.globalRegistry, { + description: 'Amount to raise or lower eyebrows', + }), + ) + .default(0), + flag_stitching: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable stitching. Recommended to set to True.', + }), + ) + .default(true), + wink: z + .optional( + z.number().gte(0).lte(25).register(z.globalRegistry, { + description: 'Amount to wink', + }), + ) + .default(0), + rotate_pitch: z + .optional( + z.number().gte(-45).lte(45).register(z.globalRegistry, { + description: 'Amount to rotate the face in pitch', + }), + ) + .default(0), + blink: z + .optional( + z.number().gte(-30).lte(30).register(z.globalRegistry, { + description: 'Amount to blink the eyes', + }), + ) + .default(0), + scale: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Scaling factor for the face crop.', + }), + ) + .default(2.3), + eee: z + .optional( + z.number().gte(-40).lte(40).register(z.globalRegistry, { + description: "Amount to shape mouth in 'eee' position", + }), + ) + .default(0), + flag_pasteback: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to paste-back/stitch the animated face cropping from the face-cropping space to the original image space.', + }), + ) + .default(true), + pupil_y: z + .optional( + z.number().gte(-45).lte(45).register(z.globalRegistry, { + description: 'Amount to move pupils vertically', + }), + ) + .default(0), + rotate_yaw: z + .optional( + z.number().gte(-45).lte(45).register(z.globalRegistry, { + description: 'Amount to rotate the face in yaw', + }), + ) + .default(0), + flag_do_rot: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to conduct the rotation when flag_do_crop is True.', + }), + ) + .default(true), + woo: z + .optional( + z.number().gte(-100).lte(100).register(z.globalRegistry, { + description: "Amount to shape mouth in 'woo' position", + }), + ) + .default(0), + aaa: z + .optional( + z.number().gte(-200).lte(200).register(z.globalRegistry, { + description: "Amount to open mouth in 'aaa' shape", + }), + ) + .default(0), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be animated', + }), + flag_relative: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to use relative motion.', + }), + ) + .default(true), + flag_eye_retargeting: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable eye retargeting.', + }), + ) + .default(false), + flag_lip_zero: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to set the lip to closed state before animation. Only takes effect when flag_eye_retargeting and flag_lip_retargeting are False.', + }), + ) + .default(true), + batch_size: z + .optional( + z.int().register(z.globalRegistry, { + description: + 'Batch size for the model. The larger the batch size, the faster the model will run, but the more memory it will consume.', + }), + ) + .default(32), + rotate_roll: z + .optional( + z.number().gte(-45).lte(45).register(z.globalRegistry, { + description: 'Amount to rotate the face in roll', + }), + ) + .default(0), + pupil_x: z + .optional( + z.number().gte(-45).lte(45).register(z.globalRegistry, { + description: 'Amount to move pupils horizontally', + }), + ) + .default(0), + vy_ratio: z + .optional( + z.number().register(z.globalRegistry, { + description: + 'Vertical offset ratio for face crop. Positive values move up, negative values move down.', + }), + ) + .default(-0.125), + dsize: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Size of the output image.', + }), + ) + .default(512), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n Whether to enable the safety checker. If enabled, the model will check if the input image contains a face before processing it.\n The safety checker will process the input image\n ', + }), + ) + .default(false), + vx_ratio: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Horizontal offset ratio for face crop.', + }), + ) + .default(0), + flag_lip_retargeting: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable lip retargeting.', + }), + ) + .default(false), + flag_do_crop: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to crop the source portrait to the face-cropping space.', + }), + ) + .default(true), +}) + +/** + * Frame + */ +export const zSchemaFrame = z.object({ + url: z.string().register(z.globalRegistry, { + description: 'URL of the frame', + }), +}) + +/** + * AMTInterpolationOutput + */ +export const zSchemaAmtInterpolationFrameInterpolationOutput = z.object({ + video: zSchemaFile, +}) + +/** + * AMTFrameInterpolationInput + */ +export const zSchemaAmtInterpolationFrameInterpolationInput = z.object({ + frames: z.array(zSchemaFrame).register(z.globalRegistry, { + description: 'Frames to interpolate', + }), + recursive_interpolation_passes: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Number of recursive interpolation passes', + }), + ) + .default(4), + output_fps: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Output frames per second', + }), + ) + .default(24), +}) + +/** + * VideoOutput + */ +export const zSchemaStableVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'Seed for random number generator', + }), + video: zSchemaFile, +}) + +/** + * ImageInput + */ +export const zSchemaStableVideoInput = z.object({ + motion_bucket_id: z + .optional( + z.int().gte(1).lte(255).register(z.globalRegistry, { + description: + '\n The motion bucket id determines the motion of the generated video. The\n higher the number, the more motion there will be.\n ', + }), + ) + .default(127), + fps: z + .optional( + z.int().gte(10).lte(100).register(z.globalRegistry, { + description: 'The frames per second of the generated video.', + }), + ) + .default(25), + cond_aug: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: + '\n The conditoning augmentation determines the amount of noise that will be\n added to the conditioning frame. The higher the number, the more noise\n there will be, and the less the video will look like the initial image.\n Increase it for more motion.\n ', + }), + ) + .default(0.02), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + image_url: z.string().min(1).register(z.globalRegistry, { + description: + 'The URL of the image to use as a starting point for the generation.', + }), +}) + +/** + * KlingV1I2VOutput + */ +export const zSchemaKlingVideoV1StandardImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Trajectory + */ +export const zSchemaTrajectory = z.object({ + y: z.int().register(z.globalRegistry, { + description: 'Y coordinate of the motion trajectory', + }), + x: z.int().register(z.globalRegistry, { + description: 'X coordinate of the motion trajectory', + }), +}) + +/** + * DynamicMask + */ +export const zSchemaDynamicMask = z.object({ + trajectories: z.optional( + z.array(zSchemaTrajectory).register(z.globalRegistry, { + description: 'List of trajectories', + }), + ), + mask_url: z.string().register(z.globalRegistry, { + description: + 'URL of the image for Dynamic Brush Application Area (Mask image created by users using the motion brush)', + }), +}) + +/** + * V1ImageToVideoRequest + */ +export const zSchemaKlingVideoV1StandardImageToVideoInput = z.object({ + prompt: z.string().max(2500).register(z.globalRegistry, { + description: 'The prompt for the video', + }), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + negative_prompt: z + .optional(z.string().max(2500)) + .default('blur, distort, and low quality'), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be used for the video', + }), + static_mask_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'URL of the image for Static Brush Application Area (Mask image created by users using the motion brush)', + }), + ), + dynamic_masks: z.optional( + z.array(zSchemaDynamicMask).register(z.globalRegistry, { + description: 'List of dynamic masks', + }), + ), + tail_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of the image to be used for the end of the video', + }), + ), + cfg_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ', + }), + ) + .default(0.5), +}) + +/** + * I2VOutput + */ +export const zSchemaKlingVideoV15ProImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * KlingV15ProImageToVideoRequest + */ +export const zSchemaKlingVideoV15ProImageToVideoInput = z.object({ + prompt: z.string().max(2500), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video frame', + }), + ), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + negative_prompt: z + .optional(z.string().max(2500)) + .default('blur, distort, and low quality'), + image_url: z.string(), + static_mask_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'URL of the image for Static Brush Application Area (Mask image created by users using the motion brush)', + }), + ), + dynamic_masks: z.optional( + z.array(zSchemaDynamicMask).register(z.globalRegistry, { + description: 'List of dynamic masks', + }), + ), + tail_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of the image to be used for the end of the video', + }), + ), + cfg_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ', + }), + ) + .default(0.5), +}) + +/** + * Output + */ +export const zSchemaCogvideox5bImageToVideoOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the video.', + }), + timings: z.record(z.string(), z.number()), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated video. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + video: zSchemaFile, +}) + +/** + * ImageSize + */ +export const zSchemaImageSize = z.object({ + height: z + .optional( + z.int().lte(14142).register(z.globalRegistry, { + description: 'The height of the generated image.', + }), + ) + .default(512), + width: z + .optional( + z.int().lte(14142).register(z.globalRegistry, { + description: 'The width of the generated image.', + }), + ) + .default(512), +}) + +/** + * LoraWeight + */ +export const zSchemaLoraWeight = z.object({ + path: z.string().register(z.globalRegistry, { + description: 'URL or the path to the LoRA weights.', + }), + scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + '\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ', + }), + ) + .default(1), +}) + +/** + * ImageToVideoInput + */ +export const zSchemaCogvideox5bImageToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + use_rife: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Use RIFE for video interpolation', + }), + ) + .default(true), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL to the image to generate the video from.', + }), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. We currently support one lora.\n ', + }), + ) + .default([]), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related video to show you.\n ', + }), + ) + .default(7), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(50), + export_fps: z + .optional( + z.int().gte(4).lte(32).register(z.globalRegistry, { + description: 'The target FPS of the video', + }), + ) + .default(16), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate video from', + }), + ) + .default(''), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), +}) + +/** + * Output + */ +export const zSchemaLtxVideoImageToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for random number generation.', + }), + video: zSchemaFile, +}) + +/** + * ImageToVideoInput + */ +export const zSchemaLtxVideoImageToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + guidance_scale: z + .optional( + z.number().lte(10).register(z.globalRegistry, { + description: 'The guidance scale to use.', + }), + ) + .default(3), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for random number generation.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to take.', + }), + ) + .default(30), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), + ) + .default( + 'low quality, worst quality, deformed, distorted, disfigured, motion smear, motion artifacts, fused fingers, bad anatomy, weird hand, ugly', + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate the video from.', + }), +}) + +/** + * I2VLiveOutput + */ +export const zSchemaMinimaxVideo01LiveImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ImageToVideoRequest + */ +export const zSchemaMinimaxVideo01LiveImageToVideoInput = z.object({ + prompt_optimizer: z + .optional( + z.boolean().register(z.globalRegistry, { + description: "Whether to use the model's prompt optimizer", + }), + ) + .default(true), + prompt: z.string().max(2000), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), +}) + +/** + * SadTalkerOutput + */ +export const zSchemaSadtalkerReferenceOutput = z.object({ + video: zSchemaFile, +}) + +/** + * SadTalkerRefVideoInput + */ +export const zSchemaSadtalkerReferenceInput = z.object({ + pose_style: z + .optional( + z.int().gte(0).lte(45).register(z.globalRegistry, { + description: 'The style of the pose', + }), + ) + .default(0), + source_image_url: z.string().register(z.globalRegistry, { + description: 'URL of the source image', + }), + reference_pose_video_url: z.string().register(z.globalRegistry, { + description: 'URL of the reference video', + }), + driven_audio_url: z.string().register(z.globalRegistry, { + description: 'URL of the driven audio', + }), + face_enhancer: z.optional( + z.enum(['gfpgan']).register(z.globalRegistry, { + description: 'The type of face enhancer to use', + }), + ), + expression_scale: z + .optional( + z.number().gte(0).lte(3).register(z.globalRegistry, { + description: 'The scale of the expression', + }), + ) + .default(1), + face_model_resolution: z.optional( + z.enum(['256', '512']).register(z.globalRegistry, { + description: 'The resolution of the face model', + }), + ), + still_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use still mode. Fewer head motion, works with preprocess `full`.', + }), + ) + .default(false), + preprocess: z.optional( + z + .enum(['crop', 'extcrop', 'resize', 'full', 'extfull']) + .register(z.globalRegistry, { + description: 'The type of preprocessing to use', + }), + ), +}) + +/** + * I2VOutput + */ +export const zSchemaKlingVideoV16StandardImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ImageToVideoRequest + */ +export const zSchemaKlingVideoV16StandardImageToVideoInput = z.object({ + prompt: z.string().max(2500), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + image_url: z.string(), + negative_prompt: z + .optional(z.string().max(2500)) + .default('blur, distort, and low quality'), + cfg_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ', + }), + ) + .default(0.5), +}) + +/** + * SubjectReferenceOutput + */ +export const zSchemaMinimaxVideo01SubjectReferenceOutput = z.object({ + video: zSchemaFile, +}) + +/** + * SubjectReferenceRequest + */ +export const zSchemaMinimaxVideo01SubjectReferenceInput = z.object({ + prompt_optimizer: z + .optional( + z.boolean().register(z.globalRegistry, { + description: "Whether to use the model's prompt optimizer", + }), + ) + .default(true), + prompt: z.string().max(2000), + subject_reference_image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the subject reference image to use for consistent subject appearance', + }), +}) + +/** + * I2VOutput + */ +export const zSchemaPixverseV35ImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ImageToVideoRequest + */ +export const zSchemaPixverseV35ImageToVideoInput = z.object({ + prompt: z.string(), + resolution: z.optional( + z.enum(['360p', '540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + duration: z.optional( + z.enum(['5', '8']).register(z.globalRegistry, { + description: + 'The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the generated video', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * I2VOutput + */ +export const zSchemaPixverseV35ImageToVideoFastOutput = z.object({ + video: zSchemaFile, +}) + +/** + * FastImageToVideoRequest + */ +export const zSchemaPixverseV35ImageToVideoFastInput = z.object({ + prompt: z.string(), + resolution: z.optional( + z.enum(['360p', '540p', '720p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the generated video', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), +}) + +/** + * Output + */ +export const zSchemaHunyuanVideoImg2VidLoraOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generating the video.', + }), + video: zSchemaFile, +}) + +/** + * Input + */ +export const zSchemaHunyuanVideoImg2VidLoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for generating the video.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL to the image to generate the video from. The image must be 960x544 or it will get cropped and resized to that size.', + }), +}) + +/** + * Ray2I2VOutput + */ +export const zSchemaLumaDreamMachineRay2ImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Ray2ImageToVideoRequest + */ +export const zSchemaLumaDreamMachineRay2ImageToVideoInput = z.object({ + prompt: z.string().min(3).max(5000), + aspect_ratio: z.optional( + z + .enum(['16:9', '9:16', '4:3', '3:4', '21:9', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + resolution: z.optional( + z.enum(['540p', '720p', '1080p']).register(z.globalRegistry, { + description: + 'The resolution of the generated video (720p costs 2x more, 1080p costs 4x more)', + }), + ), + loop: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether the video should loop (end of video is blended with the beginning)', + }), + ) + .default(false), + duration: z.optional( + z.enum(['5s', '9s']).register(z.globalRegistry, { + description: 'The duration of the generated video', + }), + ), + image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Initial image to start the video from. Can be used together with end_image_url.', + }), + ), + end_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Final image to end the video with. Can be used together with image_url.', + }), + ), +}) + +/** + * SkyreelsI2VResponse + */ +export const zSchemaSkyreelsI2vOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + video: zSchemaFile, +}) + +/** + * SkyreelsI2VRequest + */ +export const zSchemaSkyreelsI2vInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16']).register(z.globalRegistry, { + description: 'Aspect ratio of the output video', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image input.', + }), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: 'Guidance scale for generation (between 1.0 and 20.0)', + }), + ) + .default(6), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for generation. If not provided, a random seed will be used.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: + 'Number of denoising steps (between 1 and 50). Higher values give better quality but take longer.', + }), + ) + .default(30), + negative_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Negative prompt to guide generation away from certain attributes.', + }), + ), +}) + +/** + * I2VDirectorOutput + */ +export const zSchemaMinimaxVideo01DirectorImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ImageToVideoDirectorRequest + */ +export const zSchemaMinimaxVideo01DirectorImageToVideoInput = z.object({ + prompt_optimizer: z + .optional( + z.boolean().register(z.globalRegistry, { + description: "Whether to use the model's prompt optimizer", + }), + ) + .default(true), + prompt: z.string().max(2000).register(z.globalRegistry, { + description: + 'Text prompt for video generation. Camera movement instructions can be added using square brackets (e.g. [Pan left] or [Zoom in]). You can use up to 3 combined movements per prompt. Supported movements: Truck left/right, Pan left/right, Push in/Pull out, Pedestal up/down, Tilt up/down, Zoom in/out, Shake, Tracking shot, Static shot. For example: [Truck left, Pan right, Zoom in]. For a more detailed guide, refer https://sixth-switch-2ac.notion.site/T2V-01-Director-Model-Tutorial-with-camera-movement-1886c20a98eb80f395b8e05291ad8645', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), +}) + +/** + * HunyuanI2VResponse + */ +export const zSchemaHunyuanVideoImageToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generating the video.', + }), + video: zSchemaFile, +}) + +/** + * HunyuanVideoRequest + */ +export const zSchemaHunyuanVideoImageToVideoInput = z.object({ + prompt: z.string().max(1000).register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the video to generate.', + }), + ), + resolution: z.optional( + z.enum(['720p']).register(z.globalRegistry, { + description: 'The resolution of the video to generate.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image input.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for generating the video.', + }), + ), + num_frames: z.optional( + z.enum(['129']).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ), + i2v_stability: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Turning on I2V Stability reduces hallucination but also reduces motion.', + }), + ) + .default(false), +}) + +/** + * WanI2VResponse + */ +export const zSchemaWanI2vLoraOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * WanLoRAI2VRequest + */ +export const zSchemaWanI2vLoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + shift: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Shift parameter for video generation.', + }), + ) + .default(5), + reverse_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If true, the video will be reversed.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: 'LoRA weights to be used in the inference.', + }), + ) + .default([]), + frames_per_second: z + .optional( + z.int().gte(5).lte(24).register(z.globalRegistry, { + description: + 'Frames per second of the generated video. Must be between 5 to 24.', + }), + ) + .default(16), + turbo_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the video will be generated faster with no noticeable degradation in the visual quality.', + }), + ) + .default(true), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + num_frames: z + .optional( + z.int().gte(81).lte(100).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 100 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units.', + }), + ) + .default(81), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default( + 'bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards', + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'Aspect ratio of the output video.', + }), + ), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: + 'Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.', + }), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + guide_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.', + }), + ) + .default(5), + num_inference_steps: z + .optional( + z.int().gte(2).lte(40).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(30), +}) + +/** + * TemplateToVideoOutput + */ +export const zSchemaViduTemplateToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * TemplateToVideoRequest + */ +export const zSchemaViduTemplateToVideoInput = z.object({ + aspect_ratio: z.optional( + z.enum(['16:9', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the output video', + }), + ), + template: z.optional( + z + .enum([ + 'dreamy_wedding', + 'romantic_lift', + 'sweet_proposal', + 'couple_arrival', + 'cupid_arrow', + 'pet_lovers', + 'lunar_newyear', + 'hug', + 'kiss', + 'dynasty_dress', + 'wish_sender', + 'love_pose', + 'hair_swap', + 'youth_rewind', + 'morphlab', + 'live_photo', + 'emotionlab', + 'live_memory', + 'interaction', + 'christmas', + 'pet_finger', + 'eat_mushrooms', + 'beast_chase_library', + 'beast_chase_supermarket', + 'petal_scattered', + 'emoji_figure', + 'hair_color_change', + 'multiple_people_kissing', + 'beast_chase_amazon', + 'beast_chase_mountain', + 'balloonman_explodes_pro', + 'get_thinner', + 'jump2pool', + 'bodyshake', + 'jiggle_up', + 'shake_it_dance', + 'subject_3', + 'pubg_winner_hit', + 'shake_it_down', + 'blueprint_supreme', + 'hip_twist', + 'motor_dance', + 'rat_dance', + 'kwok_dance', + 'leg_sweep_dance', + 'heeseung_march', + 'shake_to_max', + 'dame_un_grrr', + 'i_know', + 'lit_bounce', + 'wave_dance', + 'chill_dance', + 'hip_flicking', + 'sakura_season', + 'zongzi_wrap', + 'zongzi_drop', + 'dragonboat_shot', + 'rain_kiss', + 'child_memory', + 'couple_drop', + 'couple_walk', + 'flower_receive', + 'love_drop', + 'cheek_kiss', + 'carry_me', + 'blow_kiss', + 'love_fall', + 'french_kiss_8s', + 'workday_feels', + 'love_story', + 'bloom_magic', + 'ghibli', + 'minecraft', + 'box_me', + 'claw_me', + 'clayshot', + 'manga_meme', + 'quad_meme', + 'pixel_me', + 'clayshot_duo', + 'irasutoya', + 'american_comic', + 'simpsons_comic', + 'yayoi_kusama_style', + 'pop_art', + 'jojo_style', + 'slice_therapy', + 'balloon_flyaway', + 'flying', + 'paperman', + 'pinch', + 'bloom_doorobear', + 'gender_swap', + 'nap_me', + 'sexy_me', + 'spin360', + 'smooth_shift', + 'paper_fall', + 'jump_to_cloud', + 'pilot', + 'sweet_dreams', + 'soul_depart', + 'punch_hit', + 'watermelon_hit', + 'split_stance_pet', + 'make_face', + 'break_glass', + 'split_stance_human', + 'covered_liquid_metal', + 'fluffy_plunge', + 'pet_belly_dance', + 'water_float', + 'relax_cut', + 'head_to_balloon', + 'cloning', + 'across_the_universe_jungle', + 'clothes_spinning_remnant', + 'across_the_universe_jurassic', + 'across_the_universe_moon', + 'fisheye_pet', + 'hitchcock_zoom', + 'cute_bangs', + 'earth_zoom_out', + 'fisheye_human', + 'drive_yacht', + 'virtual_singer', + 'earth_zoom_in', + 'aliens_coming', + 'drive_ferrari', + 'bjd_style', + 'virtual_fitting', + 'orbit', + 'zoom_in', + 'ai_outfit', + 'spin180', + 'orbit_dolly', + 'orbit_dolly_fast', + 'auto_spin', + 'walk_forward', + 'outfit_show', + 'zoom_in_fast', + 'zoom_out_image', + 'zoom_out_startend', + 'muscling', + 'captain_america', + 'hulk', + 'cap_walk', + 'hulk_dive', + 'exotic_princess', + 'beast_companion', + 'cartoon_doll', + 'golden_epoch', + 'oscar_gala', + 'fashion_stride', + 'star_carpet', + 'flame_carpet', + 'frost_carpet', + 'mecha_x', + 'style_me', + 'tap_me', + 'saber_warrior', + 'pet2human', + 'graduation', + 'fishermen', + 'happy_birthday', + 'fairy_me', + 'ladudu_me', + 'ladudu_me_random', + 'squid_game', + 'superman', + 'grow_wings', + 'clevage', + 'fly_with_doraemon', + 'creatice_product_down', + 'pole_dance', + 'hug_from_behind', + 'creatice_product_up_cybercity', + 'creatice_product_up_bluecircuit', + 'creatice_product_up', + 'run_fast', + 'background_explosion', + ]) + .register(z.globalRegistry, { + description: + 'AI video template to use. Pricing varies by template: Standard templates (hug, kiss, love_pose, etc.) cost 4 credits ($0.20), Premium templates (lunar_newyear, dynasty_dress, dreamy_wedding, etc.) cost 6 credits ($0.30), and Advanced templates (live_photo) cost 10 credits ($0.50).', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), + input_image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + "URLs of the images to use with the template. Number of images required varies by template: 'dynasty_dress' and 'shop_frame' accept 1-2 images, 'wish_sender' requires exactly 3 images, all other templates accept only 1 image.", + }), +}) + +/** + * ReferenceToVideoOutput + */ +export const zSchemaViduReferenceToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ReferenceToVideoRequest + */ +export const zSchemaViduReferenceToVideoInput = z.object({ + prompt: z.string().max(1500).register(z.globalRegistry, { + description: 'Text prompt for video generation, max 1500 characters', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the output video', + }), + ), + reference_image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'URLs of the reference images to use for consistent subject appearance', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), + movement_amplitude: z.optional( + z.enum(['auto', 'small', 'medium', 'large']).register(z.globalRegistry, { + description: 'The movement amplitude of objects in the frame', + }), + ), +}) + +/** + * StartEndToVideoOutput + */ +export const zSchemaViduStartEndToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * StartEndToVideoRequest + */ +export const zSchemaViduStartEndToVideoInput = z.object({ + prompt: z.string().max(1500).register(z.globalRegistry, { + description: 'Text prompt for video generation, max 1500 characters', + }), + start_image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), + movement_amplitude: z.optional( + z.enum(['auto', 'small', 'medium', 'large']).register(z.globalRegistry, { + description: 'The movement amplitude of objects in the frame', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), + end_image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the last frame', + }), +}) + +/** + * VideoOutput + */ +export const zSchemaViduImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ImageToVideoRequest + */ +export const zSchemaViduImageToVideoInput = z.object({ + prompt: z.string().max(1500).register(z.globalRegistry, { + description: 'Text prompt for video generation, max 1500 characters', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), + movement_amplitude: z.optional( + z.enum(['auto', 'small', 'medium', 'large']).register(z.globalRegistry, { + description: 'The movement amplitude of objects in the frame', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), +}) + +/** + * ImageToVideoV21Output + * + * Output from image-to-video generation + */ +export const zSchemaPikaV21ImageToVideoOutput = z + .object({ + video: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output from image-to-video generation', + }) + +/** + * ImageToVideov21Input + * + * Base request for image-to-video generation + */ +export const zSchemaPikaV21ImageToVideoInput = z + .object({ + prompt: z.string(), + resolution: z.optional( + z.enum(['720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + duration: z + .optional( + z.int().register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ) + .default(5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to guide the model', + }), + ) + .default(''), + image_url: z.string(), + }) + .register(z.globalRegistry, { + description: 'Base request for image-to-video generation', + }) + +/** + * Pika22ImageToVideoOutput + * + * Output model for Pika 2.2 image-to-video generation + */ +export const zSchemaPikaV22ImageToVideoOutput = z + .object({ + video: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output model for Pika 2.2 image-to-video generation', + }) + +/** + * Pika22ImageToVideoRequest + * + * Request model for Pika 2.2 image-to-video generation + */ +export const zSchemaPikaV22ImageToVideoInput = z + .object({ + prompt: z.string(), + resolution: z.optional( + z.enum(['720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + duration: z.optional( + z.union([z.literal(5), z.literal(10)]).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to guide the model', + }), + ) + .default(''), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), + }) + .register(z.globalRegistry, { + description: 'Request model for Pika 2.2 image-to-video generation', + }) + +/** + * Pika22PikascenesOutput + * + * Output model for Pika 2.2 Pikascenes generation + */ +export const zSchemaPikaV22PikascenesOutput = z + .object({ + video: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output model for Pika 2.2 Pikascenes generation', + }) + +/** + * Pika22PikascenesRequest + * + * Request model for Pika 2.2 Pikascenes (collection-to-video) generation + */ +export const zSchemaPikaV22PikascenesInput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt describing the desired video', + }), + resolution: z.optional( + z.enum(['720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + aspect_ratio: z.optional( + z + .enum(['16:9', '9:16', '1:1', '4:5', '5:4', '3:2', '2:3']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + duration: z.optional( + z.union([z.literal(5), z.literal(10)]).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + ingredients_mode: z.optional( + z.enum(['precise', 'creative']).register(z.globalRegistry, { + description: + 'Mode for integrating multiple images. Precise mode is more accurate, creative mode is more creative.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator', + }), + ), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'URLs of images to combine into a video', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to guide the model', + }), + ) + .default('ugly, bad, terrible'), + }) + .register(z.globalRegistry, { + description: + 'Request model for Pika 2.2 Pikascenes (collection-to-video) generation', + }) + +/** + * TurboImageToVideoOutput + * + * Output model for all video generation endpoints + */ +export const zSchemaPikaV2TurboImageToVideoOutput = z + .object({ + video: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output model for all video generation endpoints', + }) + +/** + * ImageToVideoTurboInput + * + * Base request for image-to-video generation + */ +export const zSchemaPikaV2TurboImageToVideoInput = z + .object({ + prompt: z.string(), + resolution: z.optional( + z.enum(['720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + duration: z + .optional( + z.int().register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ) + .default(5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to guide the model', + }), + ) + .default(''), + image_url: z.string(), + }) + .register(z.globalRegistry, { + description: 'Base request for image-to-video generation', + }) + +/** + * PikaffectsOutput + * + * Output from Pikaffects generation + */ +export const zSchemaPikaV15PikaffectsOutput = z + .object({ + video: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output from Pikaffects generation', + }) + +/** + * PikaffectsRequest + * + * Request model for Pikaffects endpoint + */ +export const zSchemaPikaV15PikaffectsInput = z + .object({ + pikaffect: z + .enum([ + 'Cake-ify', + 'Crumble', + 'Crush', + 'Decapitate', + 'Deflate', + 'Dissolve', + 'Explode', + 'Eye-pop', + 'Inflate', + 'Levitate', + 'Melt', + 'Peel', + 'Poke', + 'Squish', + 'Ta-da', + 'Tear', + ]) + .register(z.globalRegistry, { + description: 'The Pikaffect to apply', + }), + prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'Text prompt to guide the effect', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator', + }), + ), + negative_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to guide the model', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the input image', + }), + }) + .register(z.globalRegistry, { + description: 'Request model for Pikaffects endpoint', + }) + +/** + * Ray2I2VOutput + */ +export const zSchemaLumaDreamMachineRay2FlashImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Ray2ImageToVideoRequest + */ +export const zSchemaLumaDreamMachineRay2FlashImageToVideoInput = z.object({ + prompt: z.string().min(3).max(5000), + aspect_ratio: z.optional( + z + .enum(['16:9', '9:16', '4:3', '3:4', '21:9', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + resolution: z.optional( + z.enum(['540p', '720p', '1080p']).register(z.globalRegistry, { + description: + 'The resolution of the generated video (720p costs 2x more, 1080p costs 4x more)', + }), + ), + loop: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether the video should loop (end of video is blended with the beginning)', + }), + ) + .default(false), + duration: z.optional( + z.enum(['5s', '9s']).register(z.globalRegistry, { + description: 'The duration of the generated video', + }), + ), + image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Initial image to start the video from. Can be used together with end_image_url.', + }), + ), + end_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Final image to end the video with. Can be used together with image_url.', + }), + ), +}) + +/** + * TransitionOutput + */ +export const zSchemaPixverseV35TransitionOutput = z.object({ + video: zSchemaFile, +}) + +/** + * TransitionRequest + */ +export const zSchemaPixverseV35TransitionInput = z.object({ + first_image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '4:3', '1:1', '3:4', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + resolution: z.optional( + z.enum(['360p', '540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the generated video', + }), + ), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt for the transition', + }), + duration: z.optional( + z.enum(['5', '8']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), + end_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the last frame', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * EffectOutput + */ +export const zSchemaPixverseV35EffectsOutput = z.object({ + video: zSchemaFile, +}) + +/** + * EffectInput + */ +export const zSchemaPixverseV35EffectsInput = z.object({ + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), + duration: z.optional( + z.enum(['5', '8']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + resolution: z.optional( + z.enum(['360p', '540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video.', + }), + ), + effect: z + .enum([ + 'Kiss Me AI', + 'Kiss', + 'Muscle Surge', + 'Warmth of Jesus', + 'Anything, Robot', + 'The Tiger Touch', + 'Hug', + 'Holy Wings', + 'Microwave', + 'Zombie Mode', + 'Squid Game', + 'Baby Face', + 'Black Myth: Wukong', + 'Long Hair Magic', + 'Leggy Run', + 'Fin-tastic Mermaid', + 'Punch Face', + 'Creepy Devil Smile', + 'Thunder God', + 'Eye Zoom Challenge', + "Who's Arrested?", + 'Baby Arrived', + 'Werewolf Rage', + 'Bald Swipe', + 'BOOM DROP', + 'Huge Cutie', + 'Liquid Metal', + 'Sharksnap!', + 'Dust Me Away', + '3D Figurine Factor', + 'Bikini Up', + 'My Girlfriends', + 'My Boyfriends', + 'Subject 3 Fever', + 'Earth Zoom', + 'Pole Dance', + 'Vroom Dance', + 'GhostFace Terror', + 'Dragon Evoker', + 'Skeletal Bae', + 'Summoning succubus', + 'Halloween Voodoo Doll', + '3D Naked-Eye AD', + 'Package Explosion', + 'Dishes Served', + 'Ocean ad', + 'Supermarket AD', + 'Tree doll', + 'Come Feel My Abs', + 'The Bicep Flex', + 'London Elite Vibe', + 'Flora Nymph Gown', + 'Christmas Costume', + "It's Snowy", + 'Reindeer Cruiser', + 'Snow Globe Maker', + 'Pet Christmas Outfit', + 'Adopt a Polar Pal', + 'Cat Christmas Box', + 'Starlight Gift Box', + 'Xmas Poster', + 'Pet Christmas Tree', + 'City Santa Hat', + 'Stocking Sweetie', + 'Christmas Night', + 'Xmas Front Page Karma', + "Grinch's Xmas Hijack", + 'Giant Product', + 'Truck Fashion Shoot', + 'Beach AD', + 'Shoal Surround', + 'Mechanical Assembly', + 'Lighting AD', + 'Billboard AD', + 'Product close-up', + 'Parachute Delivery', + 'Dreamlike Cloud', + 'Macaron Machine', + 'Poster AD', + 'Truck AD', + 'Graffiti AD', + '3D Figurine Factory', + 'The Exclusive First Class', + 'Art Zoom Challenge', + 'I Quit', + 'Hitchcock Dolly Zoom', + 'Smell the Lens', + 'I believe I can fly', + 'Strikout Dance', + 'Pixel World', + 'Mint in Box', + 'Hands up, Hand', + 'Flora Nymph Go', + 'Somber Embrace', + 'Beam me up', + 'Suit Swagger', + ]) + .register(z.globalRegistry, { + description: 'The effect to apply to the video', + }), + image_url: z.string().register(z.globalRegistry, { + description: + 'Optional URL of the image to use as the first frame. If not provided, generates from text', + }), +}) + +/** + * I2VOutputV4 + */ +export const zSchemaPixverseV4ImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ImageToVideoRequestV4 + */ +export const zSchemaPixverseV4ImageToVideoInput = z.object({ + prompt: z.string(), + resolution: z.optional( + z.enum(['360p', '540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + duration: z.optional( + z.enum(['5', '8']).register(z.globalRegistry, { + description: + 'The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the generated video', + }), + ), + camera_movement: z.optional( + z + .enum([ + 'horizontal_left', + 'horizontal_right', + 'vertical_up', + 'vertical_down', + 'zoom_in', + 'zoom_out', + 'crane_up', + 'quickly_zoom_in', + 'quickly_zoom_out', + 'smooth_zoom_in', + 'camera_rotation', + 'robo_arm', + 'super_dolly_out', + 'whip_pan', + 'hitchcock', + 'left_follow', + 'right_follow', + 'pan_left', + 'pan_right', + 'fix_bg', + ]) + .register(z.globalRegistry, { + description: 'The type of camera movement to apply to the video', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * I2VOutputV4 + */ +export const zSchemaPixverseV4ImageToVideoFastOutput = z.object({ + video: zSchemaFile, +}) + +/** + * FastImageToVideoRequestV4 + */ +export const zSchemaPixverseV4ImageToVideoFastInput = z.object({ + prompt: z.string(), + resolution: z.optional( + z.enum(['360p', '540p', '720p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the generated video', + }), + ), + camera_movement: z.optional( + z + .enum([ + 'horizontal_left', + 'horizontal_right', + 'vertical_up', + 'vertical_down', + 'zoom_in', + 'zoom_out', + 'crane_up', + 'quickly_zoom_in', + 'quickly_zoom_out', + 'smooth_zoom_in', + 'camera_rotation', + 'robo_arm', + 'super_dolly_out', + 'whip_pan', + 'hitchcock', + 'left_follow', + 'right_follow', + 'pan_left', + 'pan_right', + 'fix_bg', + ]) + .register(z.globalRegistry, { + description: 'The type of camera movement to apply to the video', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * FramePackResponse + */ +export const zSchemaFramepackOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generating the video.', + }), + video: zSchemaFile, +}) + +/** + * FramePackRequest + */ +export const zSchemaFramepackInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt for video generation (max 500 characters).', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the video to generate.', + }), + ), + resolution: z.optional( + z.enum(['720p', '480p']).register(z.globalRegistry, { + description: + 'The resolution of the video to generate. 720p generations cost 1.5x more than 480p generations.', + }), + ), + num_frames: z + .optional( + z.int().gte(30).lte(900).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(180), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image input.', + }), + guidance_scale: z + .optional( + z.number().gte(0).lte(32).register(z.globalRegistry, { + description: 'Guidance scale for the generation.', + }), + ) + .default(10), + seed: z.optional(z.union([z.int(), z.unknown()])), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default(''), + cfg_scale: z + .optional( + z.number().gte(0).lte(7).register(z.globalRegistry, { + description: 'Classifier-Free Guidance scale for the generation.', + }), + ) + .default(1), +}) + +/** + * WanFLF2VResponse + */ +export const zSchemaWanFlf2vOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * WanFLF2VRequest + */ +export const zSchemaWanFlf2vInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + shift: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Shift parameter for video generation.', + }), + ) + .default(5), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + }), + ), + frames_per_second: z + .optional( + z.int().gte(5).lte(24).register(z.globalRegistry, { + description: + 'Frames per second of the generated video. Must be between 5 to 24.', + }), + ) + .default(16), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + start_image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the starting image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.', + }), + end_image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the ending image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default( + 'bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards', + ), + num_frames: z + .optional( + z.int().gte(81).lte(100).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 100 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units.', + }), + ) + .default(81), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: + 'Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit.', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: + "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + guide_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.', + }), + ) + .default(5), + num_inference_steps: z + .optional( + z.int().gte(2).lte(40).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(30), +}) + +/** + * FramePackFLF2VResponse + */ +export const zSchemaFramepackFlf2vOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generating the video.', + }), + video: zSchemaFile, +}) + +/** + * FramePackF2LFRequest + */ +export const zSchemaFramepackFlf2vInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt for video generation (max 500 characters).', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the video to generate.', + }), + ), + resolution: z.optional( + z.enum(['720p', '480p']).register(z.globalRegistry, { + description: + 'The resolution of the video to generate. 720p generations cost 1.5x more than 480p generations.', + }), + ), + num_frames: z + .optional( + z.int().gte(30).lte(1800).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(240), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image input.', + }), + strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Determines the influence of the final frame on the generated video. Higher values result in the output being more heavily influenced by the last frame.', + }), + ) + .default(0.8), + guidance_scale: z + .optional( + z.number().gte(0).lte(32).register(z.globalRegistry, { + description: 'Guidance scale for the generation.', + }), + ) + .default(10), + seed: z.optional(z.union([z.int(), z.unknown()])), + end_image_url: z.string().register(z.globalRegistry, { + description: 'URL of the end image input.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default(''), + cfg_scale: z + .optional( + z.number().gte(0).lte(7).register(z.globalRegistry, { + description: 'Classifier-Free Guidance scale for the generation.', + }), + ) + .default(1), +}) + +/** + * MagiImageToVideoResponse + */ +export const zSchemaMagiDistilledImageToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * MagiImageToVideoRequest + */ +export const zSchemaMagiDistilledImageToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: + 'Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit.', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: + "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input image to represent the first frame of the video. If the input image does not match the chosen aspect ratio, it is resized and center cropped.', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z.optional( + z + .union([z.literal(4), z.literal(8), z.literal(16), z.literal(32)]) + .register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + num_frames: z + .optional( + z.int().gte(96).lte(192).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 96 and 192 (inclusive). Each additional 24 frames beyond 96 incurs an additional billing unit.', + }), + ) + .default(96), +}) + +/** + * EffectOutput + */ +export const zSchemaPixverseV4EffectsOutput = z.object({ + video: zSchemaFile, +}) + +/** + * EffectInput + */ +export const zSchemaPixverseV4EffectsInput = z.object({ + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), + duration: z.optional( + z.enum(['5', '8']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + resolution: z.optional( + z.enum(['360p', '540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video.', + }), + ), + effect: z + .enum([ + 'Kiss Me AI', + 'Kiss', + 'Muscle Surge', + 'Warmth of Jesus', + 'Anything, Robot', + 'The Tiger Touch', + 'Hug', + 'Holy Wings', + 'Microwave', + 'Zombie Mode', + 'Squid Game', + 'Baby Face', + 'Black Myth: Wukong', + 'Long Hair Magic', + 'Leggy Run', + 'Fin-tastic Mermaid', + 'Punch Face', + 'Creepy Devil Smile', + 'Thunder God', + 'Eye Zoom Challenge', + "Who's Arrested?", + 'Baby Arrived', + 'Werewolf Rage', + 'Bald Swipe', + 'BOOM DROP', + 'Huge Cutie', + 'Liquid Metal', + 'Sharksnap!', + 'Dust Me Away', + '3D Figurine Factor', + 'Bikini Up', + 'My Girlfriends', + 'My Boyfriends', + 'Subject 3 Fever', + 'Earth Zoom', + 'Pole Dance', + 'Vroom Dance', + 'GhostFace Terror', + 'Dragon Evoker', + 'Skeletal Bae', + 'Summoning succubus', + 'Halloween Voodoo Doll', + '3D Naked-Eye AD', + 'Package Explosion', + 'Dishes Served', + 'Ocean ad', + 'Supermarket AD', + 'Tree doll', + 'Come Feel My Abs', + 'The Bicep Flex', + 'London Elite Vibe', + 'Flora Nymph Gown', + 'Christmas Costume', + "It's Snowy", + 'Reindeer Cruiser', + 'Snow Globe Maker', + 'Pet Christmas Outfit', + 'Adopt a Polar Pal', + 'Cat Christmas Box', + 'Starlight Gift Box', + 'Xmas Poster', + 'Pet Christmas Tree', + 'City Santa Hat', + 'Stocking Sweetie', + 'Christmas Night', + 'Xmas Front Page Karma', + "Grinch's Xmas Hijack", + 'Giant Product', + 'Truck Fashion Shoot', + 'Beach AD', + 'Shoal Surround', + 'Mechanical Assembly', + 'Lighting AD', + 'Billboard AD', + 'Product close-up', + 'Parachute Delivery', + 'Dreamlike Cloud', + 'Macaron Machine', + 'Poster AD', + 'Truck AD', + 'Graffiti AD', + '3D Figurine Factory', + 'The Exclusive First Class', + 'Art Zoom Challenge', + 'I Quit', + 'Hitchcock Dolly Zoom', + 'Smell the Lens', + 'I believe I can fly', + 'Strikout Dance', + 'Pixel World', + 'Mint in Box', + 'Hands up, Hand', + 'Flora Nymph Go', + 'Somber Embrace', + 'Beam me up', + 'Suit Swagger', + ]) + .register(z.globalRegistry, { + description: 'The effect to apply to the video', + }), + image_url: z.string().register(z.globalRegistry, { + description: + 'Optional URL of the image to use as the first frame. If not provided, generates from text', + }), +}) + +/** + * MagiImageToVideoResponse + */ +export const zSchemaMagiImageToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * MagiImageToVideoRequest + */ +export const zSchemaMagiImageToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: + 'Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit.', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: + "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input image to represent the first frame of the video. If the input image does not match the chosen aspect ratio, it is resized and center cropped.', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z.optional( + z + .union([ + z.literal(4), + z.literal(8), + z.literal(16), + z.literal(32), + z.literal(64), + ]) + .register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + num_frames: z + .optional( + z.int().gte(96).lte(192).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 96 and 192 (inclusive). Each additional 24 frames beyond 96 incurs an additional billing unit.', + }), + ) + .default(96), +}) + +/** + * Q1ImageToVideoOutput + */ +export const zSchemaViduQ1ImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Q1ImageToVideoRequest + */ +export const zSchemaViduQ1ImageToVideoInput = z.object({ + prompt: z.string().max(1500).register(z.globalRegistry, { + description: 'Text prompt for video generation, max 1500 characters', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Seed for the random number generator', + }), + ), + movement_amplitude: z.optional( + z.enum(['auto', 'small', 'medium', 'large']).register(z.globalRegistry, { + description: 'The movement amplitude of objects in the frame', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), +}) + +/** + * Q1StartEndToVideoOutput + */ +export const zSchemaViduQ1StartEndToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Q1StartEndToVideoRequest + */ +export const zSchemaViduQ1StartEndToVideoInput = z.object({ + prompt: z.string().max(1500).register(z.globalRegistry, { + description: 'Text prompt for video generation, max 1500 characters', + }), + start_image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), + movement_amplitude: z.optional( + z.enum(['auto', 'small', 'medium', 'large']).register(z.globalRegistry, { + description: 'The movement amplitude of objects in the frame', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Seed for the random number generator', + }), + ), + end_image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the last frame', + }), +}) + +/** + * FramePackF1Response + */ +export const zSchemaFramepackF1Output = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generating the video.', + }), + video: zSchemaFile, +}) + +/** + * FramePackF1Request + */ +export const zSchemaFramepackF1Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt for video generation (max 500 characters).', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the video to generate.', + }), + ), + resolution: z.optional( + z.enum(['720p', '480p']).register(z.globalRegistry, { + description: + 'The resolution of the video to generate. 720p generations cost 1.5x more than 480p generations.', + }), + ), + num_frames: z + .optional( + z.int().gte(30).lte(900).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(180), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image input.', + }), + guidance_scale: z + .optional( + z.number().gte(0).lte(32).register(z.globalRegistry, { + description: 'Guidance scale for the generation.', + }), + ) + .default(10), + seed: z.optional(z.union([z.int(), z.unknown()])), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default(''), + cfg_scale: z + .optional( + z.number().gte(0).lte(7).register(z.globalRegistry, { + description: 'Classifier-Free Guidance scale for the generation.', + }), + ) + .default(1), +}) + +/** + * HunyuanCustomResponse + */ +export const zSchemaHunyuanCustomOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generating the video.', + }), + video: zSchemaFile, +}) + +/** + * HunyuanCustomRequest + */ +export const zSchemaHunyuanCustomInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt for video generation (max 500 characters).', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the video to generate.', + }), + ), + resolution: z.optional( + z.enum(['512p', '720p']).register(z.globalRegistry, { + description: + 'The resolution of the video to generate. 720p generations cost 1.5x more than 480p generations.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_frames: z + .optional( + z.int().gte(81).lte(129).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(129), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image input.', + }), + fps: z + .optional( + z.int().gte(16).lte(30).register(z.globalRegistry, { + description: 'The frames per second of the generated video.', + }), + ) + .default(25), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for generating the video.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(10).lte(30).register(z.globalRegistry, { + description: + 'The number of inference steps to run. Lower gets faster results, higher gets better results.', + }), + ) + .default(30), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default( + 'Aerial view, aerial view, overexposed, low quality, deformation, a poor composition, bad hands, bad teeth, bad eyes, bad limbs, distortion, blurring, text, subtitles, static, picture, black border.', + ), + cfg_scale: z + .optional( + z.number().gte(1.5).lte(13).register(z.globalRegistry, { + description: 'Classifier-Free Guidance scale for the generation.', + }), + ) + .default(7.5), +}) + +/** + * EffectOutput + */ +export const zSchemaPixverseV45EffectsOutput = z.object({ + video: zSchemaFile, +}) + +/** + * EffectInput + */ +export const zSchemaPixverseV45EffectsInput = z.object({ + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), + duration: z.optional( + z.enum(['5', '8']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + resolution: z.optional( + z.enum(['360p', '540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video.', + }), + ), + effect: z + .enum([ + 'Kiss Me AI', + 'Kiss', + 'Muscle Surge', + 'Warmth of Jesus', + 'Anything, Robot', + 'The Tiger Touch', + 'Hug', + 'Holy Wings', + 'Microwave', + 'Zombie Mode', + 'Squid Game', + 'Baby Face', + 'Black Myth: Wukong', + 'Long Hair Magic', + 'Leggy Run', + 'Fin-tastic Mermaid', + 'Punch Face', + 'Creepy Devil Smile', + 'Thunder God', + 'Eye Zoom Challenge', + "Who's Arrested?", + 'Baby Arrived', + 'Werewolf Rage', + 'Bald Swipe', + 'BOOM DROP', + 'Huge Cutie', + 'Liquid Metal', + 'Sharksnap!', + 'Dust Me Away', + '3D Figurine Factor', + 'Bikini Up', + 'My Girlfriends', + 'My Boyfriends', + 'Subject 3 Fever', + 'Earth Zoom', + 'Pole Dance', + 'Vroom Dance', + 'GhostFace Terror', + 'Dragon Evoker', + 'Skeletal Bae', + 'Summoning succubus', + 'Halloween Voodoo Doll', + '3D Naked-Eye AD', + 'Package Explosion', + 'Dishes Served', + 'Ocean ad', + 'Supermarket AD', + 'Tree doll', + 'Come Feel My Abs', + 'The Bicep Flex', + 'London Elite Vibe', + 'Flora Nymph Gown', + 'Christmas Costume', + "It's Snowy", + 'Reindeer Cruiser', + 'Snow Globe Maker', + 'Pet Christmas Outfit', + 'Adopt a Polar Pal', + 'Cat Christmas Box', + 'Starlight Gift Box', + 'Xmas Poster', + 'Pet Christmas Tree', + 'City Santa Hat', + 'Stocking Sweetie', + 'Christmas Night', + 'Xmas Front Page Karma', + "Grinch's Xmas Hijack", + 'Giant Product', + 'Truck Fashion Shoot', + 'Beach AD', + 'Shoal Surround', + 'Mechanical Assembly', + 'Lighting AD', + 'Billboard AD', + 'Product close-up', + 'Parachute Delivery', + 'Dreamlike Cloud', + 'Macaron Machine', + 'Poster AD', + 'Truck AD', + 'Graffiti AD', + '3D Figurine Factory', + 'The Exclusive First Class', + 'Art Zoom Challenge', + 'I Quit', + 'Hitchcock Dolly Zoom', + 'Smell the Lens', + 'I believe I can fly', + 'Strikout Dance', + 'Pixel World', + 'Mint in Box', + 'Hands up, Hand', + 'Flora Nymph Go', + 'Somber Embrace', + 'Beam me up', + 'Suit Swagger', + ]) + .register(z.globalRegistry, { + description: 'The effect to apply to the video', + }), + image_url: z.string().register(z.globalRegistry, { + description: + 'Optional URL of the image to use as the first frame. If not provided, generates from text', + }), +}) + +/** + * I2VOutputV4 + */ +export const zSchemaPixverseV45ImageToVideoFastOutput = z.object({ + video: zSchemaFile, +}) + +/** + * FastImageToVideoRequestV4 + */ +export const zSchemaPixverseV45ImageToVideoFastInput = z.object({ + prompt: z.string(), + resolution: z.optional( + z.enum(['360p', '540p', '720p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the generated video', + }), + ), + camera_movement: z.optional( + z + .enum([ + 'horizontal_left', + 'horizontal_right', + 'vertical_up', + 'vertical_down', + 'zoom_in', + 'zoom_out', + 'crane_up', + 'quickly_zoom_in', + 'quickly_zoom_out', + 'smooth_zoom_in', + 'camera_rotation', + 'robo_arm', + 'super_dolly_out', + 'whip_pan', + 'hitchcock', + 'left_follow', + 'right_follow', + 'pan_left', + 'pan_right', + 'fix_bg', + ]) + .register(z.globalRegistry, { + description: 'The type of camera movement to apply to the video', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * TransitionOutput + */ +export const zSchemaPixverseV45TransitionOutput = z.object({ + video: zSchemaFile, +}) + +/** + * TransitionRequest + */ +export const zSchemaPixverseV45TransitionInput = z.object({ + first_image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '4:3', '1:1', '3:4', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + resolution: z.optional( + z.enum(['360p', '540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the generated video', + }), + ), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt for the transition', + }), + duration: z.optional( + z.enum(['5', '8']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), + end_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the last frame', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * ImageToVideoOutput + */ +export const zSchemaLtxVideoLoraImageToVideoOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * LoRAWeight + * + * LoRA weight to use for generation. + */ +export const zSchemaLoRaWeight = z + .object({ + path: z.string().register(z.globalRegistry, { + description: 'URL or path to the LoRA weights.', + }), + scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'Scale of the LoRA weight. This is a multiplier applied to the LoRA weight when loading it.', + }), + ) + .default(1), + weight_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'LoRA weight to use for generation.', + }) + +/** + * ImageToVideoInput + * + * Request model for image-to-video generation. + */ +export const zSchemaLtxVideoLoraImageToVideoInput = z + .object({ + number_of_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to use.', + }), + ) + .default(30), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: 'The resolution of the video.', + }), + ), + reverse_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to reverse the video.', + }), + ) + .default(false), + aspect_ratio: z.optional( + z.enum(['16:9', '1:1', '9:16', 'auto']).register(z.globalRegistry, { + description: 'The aspect ratio of the video.', + }), + ), + frame_rate: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frame rate of the video.', + }), + ) + .default(25), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to expand the prompt using the LLM.', + }), + ) + .default(false), + number_of_frames: z + .optional( + z.int().gte(9).lte(161).register(z.globalRegistry, { + description: 'The number of frames in the video.', + }), + ) + .default(89), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to use as input.', + }), + loras: z + .optional( + z.array(zSchemaLoRaWeight).register(z.globalRegistry, { + description: 'The LoRA weights to use for generation.', + }), + ) + .default([]), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for generation.', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to use.', + }), + ) + .default( + 'blurry, low quality, low resolution, inconsistent motion, jittery, distorted', + ), + }) + .register(z.globalRegistry, { + description: 'Request model for image-to-video generation.', + }) + +/** + * ImageToVideoOutput + */ +export const zSchemaLtxVideo13bDevImageToVideoOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * ImageToVideoInput + */ +export const zSchemaLtxVideo13bDevImageToVideoInput = z.object({ + second_pass_skip_initial_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: + 'The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.', + }), + ) + .default(17), + first_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps during the first pass.', + }), + ) + .default(30), + frame_rate: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frame rate of the video.', + }), + ) + .default(30), + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt to guide generation', + }), + reverse_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to reverse the video.', + }), + ) + .default(false), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to expand the prompt using a language model.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoRaWeight).register(z.globalRegistry, { + description: 'LoRA weights to use for generation', + }), + ) + .default([]), + second_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps during the second pass.', + }), + ) + .default(30), + num_frames: z + .optional( + z.int().gte(9).lte(161).register(z.globalRegistry, { + description: 'The number of frames in the video.', + }), + ) + .default(121), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for generation', + }), + ) + .default('worst quality, inconsistent motion, blurry, jittery, distorted'), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p or 720p).', + }), + ), + aspect_ratio: z.optional( + z.enum(['9:16', '1:1', '16:9', 'auto']).register(z.globalRegistry, { + description: 'The aspect ratio of the video.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Image URL for Image-to-Video task', + }), + constant_rate_factor: z + .optional( + z.int().gte(20).lte(60).register(z.globalRegistry, { + description: + "The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality.", + }), + ) + .default(35), + first_pass_skip_final_steps: z + .optional( + z.int().gte(0).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details.', + }), + ) + .default(3), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), +}) + +/** + * ImageToVideoOutput + */ +export const zSchemaLtxVideo13bDistilledImageToVideoOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * DistilledImageToVideoInput + * + * Distilled model input + */ +export const zSchemaLtxVideo13bDistilledImageToVideoInput = z + .object({ + second_pass_skip_initial_steps: z + .optional( + z.int().gte(1).lte(20).register(z.globalRegistry, { + description: + 'The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.', + }), + ) + .default(5), + first_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(20).register(z.globalRegistry, { + description: 'Number of inference steps during the first pass.', + }), + ) + .default(8), + frame_rate: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frame rate of the video.', + }), + ) + .default(30), + reverse_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to reverse the video.', + }), + ) + .default(false), + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt to guide generation', + }), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to expand the prompt using a language model.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoRaWeight).register(z.globalRegistry, { + description: 'LoRA weights to use for generation', + }), + ) + .default([]), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + num_frames: z + .optional( + z.int().gte(9).lte(161).register(z.globalRegistry, { + description: 'The number of frames in the video.', + }), + ) + .default(121), + second_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(20).register(z.globalRegistry, { + description: 'Number of inference steps during the second pass.', + }), + ) + .default(8), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for generation', + }), + ) + .default( + 'worst quality, inconsistent motion, blurry, jittery, distorted', + ), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p or 720p).', + }), + ), + aspect_ratio: z.optional( + z.enum(['9:16', '1:1', '16:9', 'auto']).register(z.globalRegistry, { + description: 'The aspect ratio of the video.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Image URL for Image-to-Video task', + }), + constant_rate_factor: z + .optional( + z.int().gte(20).lte(60).register(z.globalRegistry, { + description: + "The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality.", + }), + ) + .default(35), + first_pass_skip_final_steps: z + .optional( + z.int().gte(0).lte(20).register(z.globalRegistry, { + description: + 'Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details.', + }), + ) + .default(1), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Distilled model input', + }) + +/** + * ElementsOutput + */ +export const zSchemaKlingVideoV16ProElementsOutput = z.object({ + video: zSchemaFile, +}) + +/** + * MultiImageToVideoRequest + */ +export const zSchemaKlingVideoV16ProElementsInput = z.object({ + prompt: z.string().max(2500), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video frame', + }), + ), + input_image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'List of image URLs to use for video generation. Supports up to 4 images.', + }), + negative_prompt: z + .optional(z.string().max(2500)) + .default('blur, distort, and low quality'), +}) + +/** + * ElementsOutput + */ +export const zSchemaKlingVideoV16StandardElementsOutput = z.object({ + video: zSchemaFile, +}) + +/** + * MultiImageToVideoRequest + */ +export const zSchemaKlingVideoV16StandardElementsInput = z.object({ + prompt: z.string().max(2500), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video frame', + }), + ), + input_image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'List of image URLs to use for video generation. Supports up to 4 images.', + }), + negative_prompt: z + .optional(z.string().max(2500)) + .default('blur, distort, and low quality'), +}) + +/** + * Output + */ +export const zSchemaHunyuanPortraitOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Input + */ +export const zSchemaHunyuanPortraitInput = z.object({ + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the driving video.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for generation. If None, a random seed will be used.', + }), + ), + use_arcface: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to use ArcFace for face recognition.', + }), + ) + .default(true), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the source image.', + }), +}) + +/** + * ImageToVideoV21ProOutput + */ +export const zSchemaKlingVideoV21ProImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ImageToVideoV21ProRequest + */ +export const zSchemaKlingVideoV21ProImageToVideoInput = z.object({ + prompt: z.string().max(2500), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be used for the video', + }), + negative_prompt: z + .optional(z.string().max(2500)) + .default('blur, distort, and low quality'), + tail_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of the image to be used for the end of the video', + }), + ), + cfg_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ', + }), + ) + .default(0.5), +}) + +/** + * Output + */ +export const zSchemaHunyuanAvatarOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Input + */ +export const zSchemaHunyuanAvatarInput = z.object({ + text: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Text prompt describing the scene.', + }), + ) + .default('A cat is singing.'), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the reference image.', + }), + turbo_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the video will be generated faster with no noticeable degradation in the visual quality.', + }), + ) + .default(true), + audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the audio file.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(30).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(30), + num_frames: z + .optional( + z.int().gte(129).lte(401).register(z.globalRegistry, { + description: + 'Number of video frames to generate at 25 FPS. If greater than the input audio length, it will capped to the length of the input audio.', + }), + ) + .default(129), +}) + +/** + * SeedanceVideoOutput + */ +export const zSchemaBytedanceSeedanceV1LiteImageToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generation', + }), + video: zSchemaFile, +}) + +/** + * SeedanceImageToVideoInput + */ +export const zSchemaBytedanceSeedanceV1LiteImageToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt used to generate the video', + }), + resolution: z.optional( + z.enum(['480p', '720p', '1080p']).register(z.globalRegistry, { + description: + 'Video resolution - 480p for faster generation, 720p for higher quality', + }), + ), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '1:1', '3:4', '9:16', 'auto']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + duration: z.optional( + z + .enum(['2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']) + .register(z.globalRegistry, { + description: 'Duration of the video in seconds', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image used to generate video', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + camera_fixed: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to fix the camera position', + }), + ) + .default(false), + end_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The URL of the image the video ends with. Defaults to None.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed to control video generation. Use -1 for random.', + }), + ), +}) + +/** + * ImageToVideoHailuo02Output + */ +export const zSchemaMinimaxHailuo02ProImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ProImageToVideoHailuo02Input + */ +export const zSchemaMinimaxHailuo02ProImageToVideoInput = z.object({ + prompt_optimizer: z + .optional( + z.boolean().register(z.globalRegistry, { + description: "Whether to use the model's prompt optimizer", + }), + ) + .default(true), + prompt: z.string().max(2000), + end_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Optional URL of the image to use as the last frame of the video', + }), + ), + image_url: z.string(), +}) + +/** + * AvatarMultiAudioResponse + */ +export const zSchemaAiAvatarMultiOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * AvatarMultiAudioPersonRequest + */ +export const zSchemaAiAvatarMultiInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: + 'Resolution of the video to generate. Must be either 480p or 720p.', + }), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use for generation.', + }), + ), + first_audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the Person 1 audio file.', + }), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.', + }), + second_audio_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The URL of the Person 2 audio file.', + }), + ), + seed: z + .optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ) + .default(81), + use_only_first_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to use only the first audio file.', + }), + ) + .default(false), + num_frames: z + .optional( + z.int().gte(41).lte(241).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 129 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units.', + }), + ) + .default(181), +}) + +/** + * AvatarMultiTextResponse + */ +export const zSchemaAiAvatarMultiTextOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * AvatarMultiTextRequest + */ +export const zSchemaAiAvatarMultiTextInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + second_text_input: z.string().register(z.globalRegistry, { + description: 'The text input to guide video generation.', + }), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use for generation.', + }), + ), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: + 'Resolution of the video to generate. Must be either 480p or 720p.', + }), + ), + first_text_input: z.string().register(z.globalRegistry, { + description: 'The text input to guide video generation.', + }), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.', + }), + voice2: z.optional( + z + .enum([ + 'Aria', + 'Roger', + 'Sarah', + 'Laura', + 'Charlie', + 'George', + 'Callum', + 'River', + 'Liam', + 'Charlotte', + 'Alice', + 'Matilda', + 'Will', + 'Jessica', + 'Eric', + 'Chris', + 'Brian', + 'Daniel', + 'Lily', + 'Bill', + ]) + .register(z.globalRegistry, { + description: "The second person's voice to use for speech generation", + }), + ), + voice1: z.optional( + z + .enum([ + 'Aria', + 'Roger', + 'Sarah', + 'Laura', + 'Charlie', + 'George', + 'Callum', + 'River', + 'Liam', + 'Charlotte', + 'Alice', + 'Matilda', + 'Will', + 'Jessica', + 'Eric', + 'Chris', + 'Brian', + 'Daniel', + 'Lily', + 'Bill', + ]) + .register(z.globalRegistry, { + description: "The first person's voice to use for speech generation", + }), + ), + seed: z + .optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ) + .default(81), + num_frames: z + .optional( + z.int().gte(41).lte(241).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 129 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units.', + }), + ) + .default(191), +}) + +/** + * AvatarSingleAudioResponse + */ +export const zSchemaAiAvatarOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * AvatarSingleAudioRequest + */ +export const zSchemaAiAvatarInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: + 'Resolution of the video to generate. Must be either 480p or 720p.', + }), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use for generation.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.', + }), + audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the audio file.', + }), + num_frames: z + .optional( + z.int().gte(41).lte(241).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 129 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units.', + }), + ) + .default(145), + seed: z + .optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ) + .default(42), +}) + +/** + * AvatarSingleTextResponse + */ +export const zSchemaAiAvatarSingleTextOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * AvatarSingleTextRequest + */ +export const zSchemaAiAvatarSingleTextInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: + 'Resolution of the video to generate. Must be either 480p or 720p.', + }), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use for generation.', + }), + ), + text_input: z.string().register(z.globalRegistry, { + description: 'The text input to guide video generation.', + }), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.', + }), + voice: z + .enum([ + 'Aria', + 'Roger', + 'Sarah', + 'Laura', + 'Charlie', + 'George', + 'Callum', + 'River', + 'Liam', + 'Charlotte', + 'Alice', + 'Matilda', + 'Will', + 'Jessica', + 'Eric', + 'Chris', + 'Brian', + 'Daniel', + 'Lily', + 'Bill', + ]) + .register(z.globalRegistry, { + description: 'The voice to use for speech generation', + }), + seed: z + .optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ) + .default(42), + num_frames: z + .optional( + z.int().gte(41).lte(241).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 129 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units.', + }), + ) + .default(136), +}) + +/** + * Q1ReferenceToVideoOutput + */ +export const zSchemaViduQ1ReferenceToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Q1ReferenceToVideoRequest + */ +export const zSchemaViduQ1ReferenceToVideoInput = z.object({ + prompt: z.string().max(1500).register(z.globalRegistry, { + description: 'Text prompt for video generation, max 1500 characters', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the output video', + }), + ), + bgm: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to add background music to the generated video', + }), + ) + .default(false), + reference_image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'URLs of the reference images to use for consistent subject appearance. Q1 model supports up to 7 reference images.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), + movement_amplitude: z.optional( + z.enum(['auto', 'small', 'medium', 'large']).register(z.globalRegistry, { + description: 'The movement amplitude of objects in the frame', + }), + ), +}) + +/** + * Veo3ImageToVideoOutput + */ +export const zSchemaVeo3FastImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Veo3ImageToVideoInput + */ +export const zSchemaVeo3FastImageToVideoInput = z.object({ + prompt: z.string().max(20000).register(z.globalRegistry, { + description: 'The text prompt describing how the image should be animated', + }), + resolution: z.optional( + z.enum(['720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + auto_fix: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.', + }), + ) + .default(false), + duration: z.optional( + z.enum(['4s', '6s', '8s']).register(z.globalRegistry, { + description: 'The duration of the generated video.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input image to animate. Should be 720p or higher resolution in 16:9 or 9:16 aspect ratio. If the image is not in 16:9 or 9:16 aspect ratio, it will be cropped to fit.', + }), +}) + +/** + * ImageToVideoOutput + */ +export const zSchemaLtxv13B098DistilledImageToVideoOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * DistilledImageToVideoInput + * + * Distilled model input + */ +export const zSchemaLtxv13B098DistilledImageToVideoInput = z + .object({ + second_pass_skip_initial_steps: z + .optional( + z.int().gte(1).lte(11).register(z.globalRegistry, { + description: + 'The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.', + }), + ) + .default(5), + first_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(12).register(z.globalRegistry, { + description: 'Number of inference steps during the first pass.', + }), + ) + .default(8), + frame_rate: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frame rate of the video.', + }), + ) + .default(24), + reverse_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to reverse the video.', + }), + ) + .default(false), + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt to guide generation', + }), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to expand the prompt using a language model.', + }), + ) + .default(false), + temporal_adain_factor: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The factor for adaptive instance normalization (AdaIN) applied to generated video chunks after the first. This can help deal with a gradual increase in saturation/contrast in the generated video by normalizing the color distribution across the video. A high value will ensure the color distribution is more consistent across the video, while a low value will allow for more variation in color distribution.', + }), + ) + .default(0.5), + loras: z + .optional( + z.array(zSchemaLoRaWeight).register(z.globalRegistry, { + description: 'LoRA weights to use for generation', + }), + ) + .default([]), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + num_frames: z + .optional( + z.int().gte(9).lte(1441).register(z.globalRegistry, { + description: 'The number of frames in the video.', + }), + ) + .default(121), + second_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(12).register(z.globalRegistry, { + description: 'Number of inference steps during the second pass.', + }), + ) + .default(8), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for generation', + }), + ) + .default( + 'worst quality, inconsistent motion, blurry, jittery, distorted', + ), + enable_detail_pass: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use a detail pass. If True, the model will perform a second pass to refine the video and enhance details. This incurs a 2.0x cost multiplier on the base price.', + }), + ) + .default(false), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['9:16', '1:1', '16:9', 'auto']).register(z.globalRegistry, { + description: 'The aspect ratio of the video.', + }), + ), + tone_map_compression_ratio: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The compression ratio for tone mapping. This is used to compress the dynamic range of the video to improve visual quality. A value of 0.0 means no compression, while a value of 1.0 means maximum compression.', + }), + ) + .default(0), + image_url: z.string().register(z.globalRegistry, { + description: 'Image URL for Image-to-Video task', + }), + constant_rate_factor: z + .optional( + z.int().gte(0).lte(51).register(z.globalRegistry, { + description: + "The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality.", + }), + ) + .default(29), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Distilled model input', + }) + +/** + * OmniHumanOutput + */ +export const zSchemaBytedanceOmnihumanOutput = z.object({ + duration: z.number().register(z.globalRegistry, { + description: 'Duration of audio input/video output as used for billing.', + }), + video: zSchemaFile, +}) + +/** + * OmniHumanInput + */ +export const zSchemaBytedanceOmnihumanInput = z.object({ + audio_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the audio file to generate the video. Audio must be under 30s long.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image used to generate the video', + }), +}) + +/** + * WanI2VResponse + */ +export const zSchemaWanV22A14bImageToVideoOutput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The text prompt used for video generation.', + }), + ) + .default(''), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * WanI2VRequest + */ +export const zSchemaWanV22A14bImageToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + shift: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Shift value for the video. Must be between 1.0 and 10.0.', + }), + ) + .default(5), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + }), + ), + num_interpolated_frames: z + .optional( + z.int().gte(0).lte(4).register(z.globalRegistry, { + description: + 'Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4.', + }), + ) + .default(1), + frames_per_second: z + .optional( + z.int().gte(4).lte(60).register(z.globalRegistry, { + description: + 'Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is.', + }), + ) + .default(16), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.', + }), + ) + .default(3.5), + num_frames: z + .optional( + z.int().gte(17).lte(161).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 17 to 161 (inclusive).', + }), + ) + .default(81), + end_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of the end image.', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, input data will be checked for safety before processing.', + }), + ) + .default(false), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: + 'The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.', + }), + ), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p, 580p, or 720p).', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: + "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + }), + ), + enable_output_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, output video will be checked for safety after generation.', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.', + }), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: + 'The quality of the output video. Higher quality means better visual quality but larger file size.', + }), + ), + guidance_scale_2: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model.', + }), + ) + .default(3.5), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + interpolator_model: z.optional( + z.enum(['none', 'film', 'rife']).register(z.globalRegistry, { + description: + 'The model to use for frame interpolation. If None, no interpolation is applied.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(2).lte(40).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(27), + adjust_fps_for_interpolation: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is.', + }), + ) + .default(true), +}) + +/** + * WanSmallI2VResponse + */ +export const zSchemaWanV225bImageToVideoOutput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The text prompt used for video generation.', + }), + ) + .default(''), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * WanSmallI2VRequest + */ +export const zSchemaWanV225bImageToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + shift: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Shift value for the video. Must be between 1.0 and 10.0.', + }), + ) + .default(5), + num_interpolated_frames: z + .optional( + z.int().gte(0).lte(4).register(z.globalRegistry, { + description: + 'Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4.', + }), + ) + .default(0), + frames_per_second: z + .optional( + z.int().gte(4).lte(60).register(z.globalRegistry, { + description: + 'Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is.', + }), + ) + .default(24), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.', + }), + ) + .default(3.5), + num_frames: z + .optional( + z.int().gte(17).lte(161).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 17 to 161 (inclusive).', + }), + ) + .default(81), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, input data will be checked for safety before processing.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default(''), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: + 'The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.', + }), + ), + resolution: z.optional( + z.enum(['580p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (580p or 720p).', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: + "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + }), + ), + enable_output_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, output video will be checked for safety after generation.', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.', + }), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: + 'The quality of the output video. Higher quality means better visual quality but larger file size.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(40), + interpolator_model: z.optional( + z.enum(['none', 'film', 'rife']).register(z.globalRegistry, { + description: + 'The model to use for frame interpolation. If None, no interpolation is applied.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + adjust_fps_for_interpolation: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is.', + }), + ) + .default(true), +}) + +/** + * WanTurboI2VResponse + */ +export const zSchemaWanV22A14bImageToVideoTurboOutput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The text prompt used for video generation.', + }), + ) + .default(''), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * WanTurboI2VRequest + */ +export const zSchemaWanV22A14bImageToVideoTurboInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p, 580p, or 720p).', + }), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: + "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + }), + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: + 'The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.', + }), + ), + enable_output_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, output video will be checked for safety after generation.', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.', + }), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: + 'The quality of the output video. Higher quality means better visual quality but larger file size.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, input data will be checked for safety before processing.', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + end_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of the end image.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.', + }), + ) + .default(false), +}) + +/** + * Veo3ImageToVideoOutput + */ +export const zSchemaVeo3ImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Veo3ImageToVideoInput + */ +export const zSchemaVeo3ImageToVideoInput = z.object({ + prompt: z.string().max(20000).register(z.globalRegistry, { + description: 'The text prompt describing how the image should be animated', + }), + resolution: z.optional( + z.enum(['720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + auto_fix: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.', + }), + ) + .default(false), + duration: z.optional( + z.enum(['4s', '6s', '8s']).register(z.globalRegistry, { + description: 'The duration of the generated video.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input image to animate. Should be 720p or higher resolution in 16:9 or 9:16 aspect ratio. If the image is not in 16:9 or 9:16 aspect ratio, it will be cropped to fit.', + }), +}) + +/** + * ImageToVideoHailuo02FastOutput + */ +export const zSchemaMinimaxHailuo02FastImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * FastImageToVideoHailuo02Input + */ +export const zSchemaMinimaxHailuo02FastImageToVideoInput = z.object({ + prompt_optimizer: z + .optional( + z.boolean().register(z.globalRegistry, { + description: "Whether to use the model's prompt optimizer", + }), + ) + .default(true), + duration: z.optional( + z.enum(['6', '10']).register(z.globalRegistry, { + description: + 'The duration of the video in seconds. 10 seconds videos are not supported for 1080p resolution.', + }), + ), + prompt: z.string().max(2000), + image_url: z.string(), +}) + +/** + * WanI2VResponse + */ +export const zSchemaWanV22A14bImageToVideoLoraOutput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The text prompt used for video generation.', + }), + ) + .default(''), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * WanLoRAI2VRequest + */ +export const zSchemaWanV22A14bImageToVideoLoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + shift: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Shift value for the video. Must be between 1.0 and 10.0.', + }), + ) + .default(5), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + }), + ), + num_interpolated_frames: z + .optional( + z.int().gte(0).lte(4).register(z.globalRegistry, { + description: + 'Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4.', + }), + ) + .default(1), + reverse_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If true, the video will be reversed.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoRaWeight).register(z.globalRegistry, { + description: 'LoRA weights to be used in the inference.', + }), + ) + .default([]), + frames_per_second: z + .optional( + z.int().gte(4).lte(60).register(z.globalRegistry, { + description: + 'Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is.', + }), + ) + .default(16), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, input data will be checked for safety before processing.', + }), + ) + .default(false), + num_frames: z + .optional( + z.int().gte(17).lte(161).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 17 to 161 (inclusive).', + }), + ) + .default(81), + end_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of the end image.', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default(''), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.', + }), + ) + .default(3.5), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: + 'The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.', + }), + ), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p, 580p, or 720p).', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: + "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + }), + ), + enable_output_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, output video will be checked for safety after generation.', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.', + }), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: + 'The quality of the output video. Higher quality means better visual quality but larger file size.', + }), + ), + guidance_scale_2: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model.', + }), + ) + .default(4), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(40).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(27), + interpolator_model: z.optional( + z.enum(['none', 'film', 'rife']).register(z.globalRegistry, { + description: + 'The model to use for frame interpolation. If None, no interpolation is applied.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + adjust_fps_for_interpolation: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is.', + }), + ) + .default(true), +}) + +export const zSchemaBytedanceVideoStylizeOutput = z.unknown() + +/** + * StylizeInput + */ +export const zSchemaBytedanceVideoStylizeInput = z.object({ + style: z.string().max(100).register(z.globalRegistry, { + description: + 'The style for your character in the video. Please use a short description.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to make the stylized video from.', + }), +}) + +/** + * MareyOutput + */ +export const zSchemaMareyI2vOutput = z.object({ + video: zSchemaFile, +}) + +/** + * MareyInputI2V + */ +export const zSchemaMareyI2vInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate a video from', + }), + duration: z.optional( + z.enum(['5s', '10s']).register(z.globalRegistry, { + description: 'The duration of the generated video.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to use as the first frame of the video.', + }), + dimensions: z.optional( + z + .enum(['1920x1080', '1080x1920', '1152x1152', '1536x1152', '1152x1536']) + .register(z.globalRegistry, { + description: + 'The dimensions of the generated video in width x height format.', + }), + ), + guidance_scale: z.optional(z.union([z.number(), z.unknown()])), + seed: z.optional(z.union([z.int(), z.unknown()])), + negative_prompt: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * I2VOutputV5 + */ +export const zSchemaPixverseV5ImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ImageToVideoRequestV5 + */ +export const zSchemaPixverseV5ImageToVideoInput = z.object({ + prompt: z.string(), + resolution: z.optional( + z.enum(['360p', '540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + duration: z.optional( + z.enum(['5', '8']).register(z.globalRegistry, { + description: + 'The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the generated video', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * EffectOutput + */ +export const zSchemaPixverseV5EffectsOutput = z.object({ + video: zSchemaFile, +}) + +/** + * EffectInput + */ +export const zSchemaPixverseV5EffectsInput = z.object({ + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), + duration: z.optional( + z.enum(['5', '8']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + resolution: z.optional( + z.enum(['360p', '540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video.', + }), + ), + effect: z + .enum([ + 'Kiss Me AI', + 'Kiss', + 'Muscle Surge', + 'Warmth of Jesus', + 'Anything, Robot', + 'The Tiger Touch', + 'Hug', + 'Holy Wings', + 'Microwave', + 'Zombie Mode', + 'Squid Game', + 'Baby Face', + 'Black Myth: Wukong', + 'Long Hair Magic', + 'Leggy Run', + 'Fin-tastic Mermaid', + 'Punch Face', + 'Creepy Devil Smile', + 'Thunder God', + 'Eye Zoom Challenge', + "Who's Arrested?", + 'Baby Arrived', + 'Werewolf Rage', + 'Bald Swipe', + 'BOOM DROP', + 'Huge Cutie', + 'Liquid Metal', + 'Sharksnap!', + 'Dust Me Away', + '3D Figurine Factor', + 'Bikini Up', + 'My Girlfriends', + 'My Boyfriends', + 'Subject 3 Fever', + 'Earth Zoom', + 'Pole Dance', + 'Vroom Dance', + 'GhostFace Terror', + 'Dragon Evoker', + 'Skeletal Bae', + 'Summoning succubus', + 'Halloween Voodoo Doll', + '3D Naked-Eye AD', + 'Package Explosion', + 'Dishes Served', + 'Ocean ad', + 'Supermarket AD', + 'Tree doll', + 'Come Feel My Abs', + 'The Bicep Flex', + 'London Elite Vibe', + 'Flora Nymph Gown', + 'Christmas Costume', + "It's Snowy", + 'Reindeer Cruiser', + 'Snow Globe Maker', + 'Pet Christmas Outfit', + 'Adopt a Polar Pal', + 'Cat Christmas Box', + 'Starlight Gift Box', + 'Xmas Poster', + 'Pet Christmas Tree', + 'City Santa Hat', + 'Stocking Sweetie', + 'Christmas Night', + 'Xmas Front Page Karma', + "Grinch's Xmas Hijack", + 'Giant Product', + 'Truck Fashion Shoot', + 'Beach AD', + 'Shoal Surround', + 'Mechanical Assembly', + 'Lighting AD', + 'Billboard AD', + 'Product close-up', + 'Parachute Delivery', + 'Dreamlike Cloud', + 'Macaron Machine', + 'Poster AD', + 'Truck AD', + 'Graffiti AD', + '3D Figurine Factory', + 'The Exclusive First Class', + 'Art Zoom Challenge', + 'I Quit', + 'Hitchcock Dolly Zoom', + 'Smell the Lens', + 'I believe I can fly', + 'Strikout Dance', + 'Pixel World', + 'Mint in Box', + 'Hands up, Hand', + 'Flora Nymph Go', + 'Somber Embrace', + 'Beam me up', + 'Suit Swagger', + ]) + .register(z.globalRegistry, { + description: 'The effect to apply to the video', + }), + image_url: z.string().register(z.globalRegistry, { + description: + 'Optional URL of the image to use as the first frame. If not provided, generates from text', + }), +}) + +/** + * TransitionOutputV5 + */ +export const zSchemaPixverseV5TransitionOutput = z.object({ + video: zSchemaFile, +}) + +/** + * TransitionRequest + */ +export const zSchemaPixverseV5TransitionInput = z.object({ + first_image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '4:3', '1:1', '3:4', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + resolution: z.optional( + z.enum(['360p', '540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the generated video', + }), + ), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt for the transition', + }), + duration: z.optional( + z.enum(['5', '8']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), + end_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the last frame', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * ProcessOutput + */ +export const zSchemaDecartLucy5bImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ProcessRequest + */ +export const zSchemaDecartLucy5bImageToVideoInput = z.object({ + prompt: z.string().max(1500).register(z.globalRegistry, { + description: 'Text description of the desired video content', + }), + aspect_ratio: z.optional( + z.enum(['9:16', '16:9']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(true), + resolution: z.optional( + z.enum(['720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), +}) + +/** + * A coordinate point with x and y values for motion tracking + */ +export const zSchemaTrackPoint = z + .object({ + x: z.number().register(z.globalRegistry, { + description: 'X coordinate', + }), + y: z.number().register(z.globalRegistry, { + description: 'Y coordinate', + }), + }) + .register(z.globalRegistry, { + description: 'A coordinate point with x and y values for motion tracking', + }) + +/** + * WanATIResponse + */ +export const zSchemaWanAtiOutput = z.object({ + video: zSchemaFile, +}) + +/** + * WanATIRequest + */ +export const zSchemaWanAtiInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p, 580p, 720p).', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the input image.', + }), + track: z.array(z.array(zSchemaTrackPoint)).register(z.globalRegistry, { + description: + "Motion tracks to guide video generation. Each track is a sequence of points defining a motion trajectory. Multiple tracks can control different elements or objects in the video. Expected format: array of tracks, where each track is an array of points with 'x' and 'y' coordinates (up to 121 points per track). Points will be automatically padded to 121 if fewer are provided. Coordinates should be within the image dimensions.", + }), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.', + }), + ) + .default(5), + num_inference_steps: z + .optional( + z.int().gte(2).lte(40).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(40), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), +}) + +/** + * SeedanceReferenceToVideoOutput + */ +export const zSchemaBytedanceSeedanceV1LiteReferenceToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generation', + }), + video: zSchemaFile, +}) + +/** + * SeedanceReferenceToVideoInput + */ +export const zSchemaBytedanceSeedanceV1LiteReferenceToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt used to generate the video', + }), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: + 'Video resolution - 480p for faster generation, 720p for higher quality', + }), + ), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '1:1', '3:4', '9:16', 'auto']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + duration: z.optional( + z + .enum(['2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']) + .register(z.globalRegistry, { + description: 'Duration of the video in seconds', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + camera_fixed: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to fix the camera position', + }), + ) + .default(false), + reference_image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'Reference images to generate the video with.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed to control video generation. Use -1 for random.', + }), + ), +}) + +/** + * Lucy14BOutput + */ +export const zSchemaLucy14bImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Lucy14BImageToVideoInput + */ +export const zSchemaLucy14bImageToVideoInput = z.object({ + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated\n and uploaded before returning the response. This will increase the\n latency of the function but it allows you to get the image directly\n in the response without going through the CDN.\n ', + }), + ) + .default(true), + aspect_ratio: z.optional( + z.enum(['9:16', '16:9']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video.', + }), + ), + prompt: z.string().max(1500).register(z.globalRegistry, { + description: 'Text description of the desired video content', + }), + resolution: z.optional( + z.enum(['720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), +}) + +/** + * AIAvatarOutput + */ +export const zSchemaKlingVideoV1ProAiAvatarOutput = z.object({ + duration: z.number().register(z.globalRegistry, { + description: 'Duration of the output video in seconds.', + }), + video: zSchemaFile, +}) + +/** + * AIAvatarInput + */ +export const zSchemaKlingVideoV1ProAiAvatarInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The prompt to use for the video generation.', + }), + ) + .default('.'), + audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the audio file.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to use as your avatar', + }), +}) + +/** + * AIAvatarOutput + */ +export const zSchemaKlingVideoV1StandardAiAvatarOutput = z.object({ + duration: z.number().register(z.globalRegistry, { + description: 'Duration of the output video in seconds.', + }), + video: zSchemaFile, +}) + +/** + * AIAvatarInput + */ +export const zSchemaKlingVideoV1StandardAiAvatarInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The prompt to use for the video generation.', + }), + ) + .default('.'), + audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the audio file.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to use as your avatar', + }), +}) + +/** + * FabricOneOutput + */ +export const zSchemaFabric10Output = z.object({ + video: zSchemaFile, +}) + +/** + * FabricOneLipsyncInput + */ +export const zSchemaFabric10Input = z.object({ + resolution: z.enum(['720p', '480p']).register(z.globalRegistry, { + description: 'Resolution', + }), + audio_url: z.url().min(1).max(2083), + image_url: z.url().min(1).max(2083), +}) + +/** + * OmniHumanv15Output + */ +export const zSchemaBytedanceOmnihumanV15Output = z.object({ + duration: z.number().register(z.globalRegistry, { + description: 'Duration of audio input/video output as used for billing.', + }), + video: zSchemaFile, +}) + +/** + * OmniHumanv15Input + */ +export const zSchemaBytedanceOmnihumanV15Input = z.object({ + turbo_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Generate a video at a faster rate with a slight quality trade-off.', + }), + ) + .default(false), + resolution: z.optional( + z.enum(['720p', '1080p']).register(z.globalRegistry, { + description: + 'The resolution of the generated video. Defaults to 1080p. 720p generation is faster and higher in quality. 1080p generation is limited to 30s audio and 720p generation is limited to 60s audio.', + }), + ), + prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'The text prompt used to guide the video generation.', + }), + ), + audio_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the audio file to generate the video. Audio must be under 30s long for 1080p generation and under 60s long for 720p generation.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image used to generate the video', + }), +}) + +/** + * FabricOneOutput + */ +export const zSchemaFabric10FastOutput = z.object({ + video: zSchemaFile, +}) + +/** + * FabricOneLipsyncInput + */ +export const zSchemaFabric10FastInput = z.object({ + resolution: z.enum(['720p', '480p']).register(z.globalRegistry, { + description: 'Resolution', + }), + audio_url: z.url().min(1).max(2083), + image_url: z.url().min(1).max(2083), +}) + +/** + * OviI2VResponse + */ +export const zSchemaOviImageToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: z.optional(z.union([zSchemaFile, z.unknown()])), +}) + +/** + * OviI2VRequest + */ +export const zSchemaOviImageToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + seed: z.optional(z.union([z.int(), z.unknown()])), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps.', + }), + ) + .default(30), + audio_negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for audio generation.', + }), + ) + .default('robotic, muffled, echo, distorted'), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default('jitter, bad hands, blur, distortion'), + image_url: z.string().register(z.globalRegistry, { + description: 'The image URL to guide video generation.', + }), +}) + +/** + * VideoFile + */ +export const zSchemaVideoFile = z.object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + duration: z.optional( + z.number().register(z.globalRegistry, { + description: 'The duration of the video', + }), + ), + height: z.optional( + z.int().register(z.globalRegistry, { + description: 'The height of the video', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + fps: z.optional( + z.number().register(z.globalRegistry, { + description: 'The FPS of the video', + }), + ), + width: z.optional( + z.int().register(z.globalRegistry, { + description: 'The width of the video', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + num_frames: z.optional( + z.int().register(z.globalRegistry, { + description: 'The number of frames in the video', + }), + ), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), +}) + +/** + * ImageFile + */ +export const zSchemaImageFile = z.object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + height: z.optional( + z.int().register(z.globalRegistry, { + description: 'The height of the image', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + width: z.optional( + z.int().register(z.globalRegistry, { + description: 'The width of the image', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), +}) + +/** + * ImageToVideoOutput + */ +export const zSchemaSora2ImageToVideoOutput = z.object({ + spritesheet: z.optional(zSchemaImageFile), + thumbnail: z.optional(zSchemaImageFile), + video_id: z.string().register(z.globalRegistry, { + description: 'The ID of the generated video', + }), + video: zSchemaVideoFile, +}) + +/** + * ImageToVideoInput + */ +export const zSchemaSora2ImageToVideoInput = z.object({ + prompt: z.string().min(1).max(5000).register(z.globalRegistry, { + description: 'The text prompt describing the video you want to generate', + }), + duration: z.optional( + z + .union([z.literal(4), z.literal(8), z.literal(12)]) + .register(z.globalRegistry, { + description: 'Duration of the generated video in seconds', + }), + ), + resolution: z.optional( + z.enum(['auto', '720p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '9:16', '16:9']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to use as the first frame', + }), + model: z.optional( + z + .enum(['sora-2', 'sora-2-2025-12-08', 'sora-2-2025-10-06']) + .register(z.globalRegistry, { + description: + 'The model to use for the generation. When the default model is selected, the latest snapshot of the model will be used - otherwise, select a specific snapshot of the model.', + }), + ), + delete_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to delete the video after generation for privacy reasons. If True, the video cannot be used for remixing and will be permanently deleted.', + }), + ) + .default(true), +}) + +/** + * ProImageToVideoOutput + */ +export const zSchemaSora2ImageToVideoProOutput = z.object({ + spritesheet: z.optional(zSchemaImageFile), + thumbnail: z.optional(zSchemaImageFile), + video_id: z.string().register(z.globalRegistry, { + description: 'The ID of the generated video', + }), + video: zSchemaVideoFile, +}) + +/** + * ProImageToVideoInput + */ +export const zSchemaSora2ImageToVideoProInput = z.object({ + prompt: z.string().min(1).max(5000).register(z.globalRegistry, { + description: 'The text prompt describing the video you want to generate', + }), + duration: z.optional( + z + .union([z.literal(4), z.literal(8), z.literal(12)]) + .register(z.globalRegistry, { + description: 'Duration of the generated video in seconds', + }), + ), + resolution: z.optional( + z.enum(['auto', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '9:16', '16:9']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + delete_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to delete the video after generation for privacy reasons. If True, the video cannot be used for remixing and will be permanently deleted.', + }), + ) + .default(true), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to use as the first frame', + }), +}) + +/** + * Veo31ImageToVideoOutput + */ +export const zSchemaVeo31ImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Veo31ImageToVideoInput + */ +export const zSchemaVeo31ImageToVideoInput = z.object({ + prompt: z.string().max(20000).register(z.globalRegistry, { + description: 'The text prompt describing the video you want to generate', + }), + duration: z.optional( + z.enum(['4s', '6s', '8s']).register(z.globalRegistry, { + description: 'The duration of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16']).register(z.globalRegistry, { + description: + 'The aspect ratio of the generated video. Only 16:9 and 9:16 are supported.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + auto_fix: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.', + }), + ) + .default(false), + resolution: z.optional( + z.enum(['720p', '1080p', '4k']).register(z.globalRegistry, { + description: 'The resolution of the generated video.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input image to animate. Should be 720p or higher resolution in 16:9 or 9:16 aspect ratio. If the image is not in 16:9 or 9:16 aspect ratio, it will be cropped to fit.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), + negative_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to guide the video generation.', + }), + ), +}) + +/** + * Veo31ImageToVideoOutput + */ +export const zSchemaVeo31FastImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Veo31ImageToVideoInput + */ +export const zSchemaVeo31FastImageToVideoInput = z.object({ + prompt: z.string().max(20000).register(z.globalRegistry, { + description: 'The text prompt describing the video you want to generate', + }), + duration: z.optional( + z.enum(['4s', '6s', '8s']).register(z.globalRegistry, { + description: 'The duration of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16']).register(z.globalRegistry, { + description: + 'The aspect ratio of the generated video. Only 16:9 and 9:16 are supported.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + auto_fix: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.', + }), + ) + .default(false), + resolution: z.optional( + z.enum(['720p', '1080p', '4k']).register(z.globalRegistry, { + description: 'The resolution of the generated video.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input image to animate. Should be 720p or higher resolution in 16:9 or 9:16 aspect ratio. If the image is not in 16:9 or 9:16 aspect ratio, it will be cropped to fit.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), + negative_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to guide the video generation.', + }), + ), +}) + +/** + * Veo31ReferenceToVideoOutput + */ +export const zSchemaVeo31ReferenceToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Veo31ReferenceToVideoInput + */ +export const zSchemaVeo31ReferenceToVideoInput = z.object({ + prompt: z.string().max(20000).register(z.globalRegistry, { + description: 'The text prompt describing the video you want to generate', + }), + duration: z.optional( + z.enum(['8s']).register(z.globalRegistry, { + description: 'The duration of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + resolution: z.optional( + z.enum(['720p', '1080p', '4k']).register(z.globalRegistry, { + description: 'The resolution of the generated video.', + }), + ), + auto_fix: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.', + }), + ) + .default(false), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'URLs of the reference images to use for consistent subject appearance', + }), +}) + +/** + * Veo31FirstLastFrameToVideoOutput + */ +export const zSchemaVeo31FirstLastFrameToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Veo31FirstLastFrameToVideoInput + */ +export const zSchemaVeo31FirstLastFrameToVideoInput = z.object({ + prompt: z.string().max(20000).register(z.globalRegistry, { + description: 'The text prompt describing the video you want to generate', + }), + duration: z.optional( + z.enum(['4s', '6s', '8s']).register(z.globalRegistry, { + description: 'The duration of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + auto_fix: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.', + }), + ) + .default(false), + resolution: z.optional( + z.enum(['720p', '1080p', '4k']).register(z.globalRegistry, { + description: 'The resolution of the generated video.', + }), + ), + first_frame_url: z.string().register(z.globalRegistry, { + description: 'URL of the first frame of the video', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), + last_frame_url: z.string().register(z.globalRegistry, { + description: 'URL of the last frame of the video', + }), + negative_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to guide the video generation.', + }), + ), +}) + +/** + * Veo31FirstLastFrameToVideoOutput + */ +export const zSchemaVeo31FastFirstLastFrameToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Veo31FirstLastFrameToVideoInput + */ +export const zSchemaVeo31FastFirstLastFrameToVideoInput = z.object({ + prompt: z.string().max(20000).register(z.globalRegistry, { + description: 'The text prompt describing the video you want to generate', + }), + duration: z.optional( + z.enum(['4s', '6s', '8s']).register(z.globalRegistry, { + description: 'The duration of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + auto_fix: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.', + }), + ) + .default(false), + resolution: z.optional( + z.enum(['720p', '1080p', '4k']).register(z.globalRegistry, { + description: 'The resolution of the generated video.', + }), + ), + first_frame_url: z.string().register(z.globalRegistry, { + description: 'URL of the first frame of the video', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), + last_frame_url: z.string().register(z.globalRegistry, { + description: 'URL of the last frame of the video', + }), + negative_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to guide the video generation.', + }), + ), +}) + +/** + * ImageToVideoV25StandardOutput + */ +export const zSchemaKlingVideoV25TurboStandardImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ImageToVideoV25StandardRequest + */ +export const zSchemaKlingVideoV25TurboStandardImageToVideoInput = z.object({ + prompt: z.string().max(2500), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be used for the video', + }), + negative_prompt: z + .optional(z.string().max(2500)) + .default('blur, distort, and low quality'), + cfg_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ', + }), + ) + .default(0.5), +}) + +/** + * Q2ImageToVideoOutput + */ +export const zSchemaViduQ2ImageToVideoProOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Q2ImageToVideoRequest + */ +export const zSchemaViduQ2ImageToVideoProInput = z.object({ + prompt: z.string().max(3000).register(z.globalRegistry, { + description: 'Text prompt for video generation, max 3000 characters', + }), + resolution: z.optional( + z.enum(['720p', '1080p']).register(z.globalRegistry, { + description: 'Output video resolution', + }), + ), + duration: z.optional( + z + .union([ + z.literal(2), + z.literal(3), + z.literal(4), + z.literal(5), + z.literal(6), + z.literal(7), + z.literal(8), + ]) + .register(z.globalRegistry, { + description: 'Duration of the video in seconds', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the starting frame', + }), + bgm: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to add background music to the video (only for 4-second videos)', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + movement_amplitude: z.optional( + z.enum(['auto', 'small', 'medium', 'large']).register(z.globalRegistry, { + description: 'The movement amplitude of objects in the frame', + }), + ), + end_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'URL of the image to use as the ending frame. When provided, generates a transition video between start and end frames.', + }), + ), +}) + +/** + * Q2ImageToVideoOutput + */ +export const zSchemaViduQ2ImageToVideoTurboOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Q2ImageToVideoRequest + */ +export const zSchemaViduQ2ImageToVideoTurboInput = z.object({ + prompt: z.string().max(3000).register(z.globalRegistry, { + description: 'Text prompt for video generation, max 3000 characters', + }), + resolution: z.optional( + z.enum(['720p', '1080p']).register(z.globalRegistry, { + description: 'Output video resolution', + }), + ), + duration: z.optional( + z + .union([ + z.literal(2), + z.literal(3), + z.literal(4), + z.literal(5), + z.literal(6), + z.literal(7), + z.literal(8), + ]) + .register(z.globalRegistry, { + description: 'Duration of the video in seconds', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the starting frame', + }), + bgm: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to add background music to the video (only for 4-second videos)', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + movement_amplitude: z.optional( + z.enum(['auto', 'small', 'medium', 'large']).register(z.globalRegistry, { + description: 'The movement amplitude of objects in the frame', + }), + ), + end_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'URL of the image to use as the ending frame. When provided, generates a transition video between start and end frames.', + }), + ), +}) + +/** + * SeedanceFastI2VVideoOutput + */ +export const zSchemaBytedanceSeedanceV1ProFastImageToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generation', + }), + video: zSchemaFile, +}) + +/** + * SeedanceProFastImageToVideoInput + */ +export const zSchemaBytedanceSeedanceV1ProFastImageToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt used to generate the video', + }), + resolution: z.optional( + z.enum(['480p', '720p', '1080p']).register(z.globalRegistry, { + description: + 'Video resolution - 480p for faster generation, 720p for balance, 1080p for higher quality', + }), + ), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '1:1', '3:4', '9:16', 'auto']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + duration: z.optional( + z + .enum(['2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']) + .register(z.globalRegistry, { + description: 'Duration of the video in seconds', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image used to generate video', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + camera_fixed: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to fix the camera position', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed to control video generation. Use -1 for random.', + }), + ), +}) + +/** + * ProFastImageToVideoHailuo23Output + */ +export const zSchemaMinimaxHailuo23FastProImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ProFastImageToVideoHailuo23Input + */ +export const zSchemaMinimaxHailuo23FastProImageToVideoInput = z.object({ + prompt_optimizer: z + .optional( + z.boolean().register(z.globalRegistry, { + description: "Whether to use the model's prompt optimizer", + }), + ) + .default(true), + prompt: z.string().min(1).max(2000).register(z.globalRegistry, { + description: 'Text prompt for video generation', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), +}) + +/** + * StandardImageToVideoHailuo23Output + */ +export const zSchemaMinimaxHailuo23StandardImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * StandardImageToVideoHailuo23Input + */ +export const zSchemaMinimaxHailuo23StandardImageToVideoInput = z.object({ + prompt_optimizer: z + .optional( + z.boolean().register(z.globalRegistry, { + description: "Whether to use the model's prompt optimizer", + }), + ) + .default(true), + duration: z.optional( + z.enum(['6', '10']).register(z.globalRegistry, { + description: 'The duration of the video in seconds.', + }), + ), + prompt: z.string().min(1).max(2000).register(z.globalRegistry, { + description: 'Text prompt for video generation', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), +}) + +/** + * StandardFastImageToVideoHailuo23Output + */ +export const zSchemaMinimaxHailuo23FastStandardImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * StandardFastImageToVideoHailuo23Input + */ +export const zSchemaMinimaxHailuo23FastStandardImageToVideoInput = z.object({ + prompt_optimizer: z + .optional( + z.boolean().register(z.globalRegistry, { + description: "Whether to use the model's prompt optimizer", + }), + ) + .default(true), + duration: z.optional( + z.enum(['6', '10']).register(z.globalRegistry, { + description: 'The duration of the video in seconds.', + }), + ), + prompt: z.string().min(1).max(2000).register(z.globalRegistry, { + description: 'Text prompt for video generation', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), +}) + +/** + * LongCatImageToVideoResponse + */ +export const zSchemaLongcatVideoDistilledImageToVideo480pOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * LongCatImageToVideoRequest + */ +export const zSchemaLongcatVideoDistilledImageToVideo480pInput = z.object({ + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The prompt to guide the video generation.', + }), + ) + .default( + "First-person view from the cockpit of a Formula 1 car. The driver's gloved hands firmly grip the intricate, carbon-fiber steering wheel adorned with numerous colorful buttons and a vibrant digital display showing race data. Beyond the windshield, a sun-drenched racetrack stretches ahead, lined with cheering spectators in the grandstands. Several rival cars are visible in the distance, creating a dynamic sense of competition. The sky above is a clear, brilliant blue, reflecting the exhilarating atmosphere of a high-speed race. high resolution 4k", + ), + fps: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frame rate of the generated video.', + }), + ) + .default(15), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate a video from.', + }), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable safety checker.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(2).lte(16).register(z.globalRegistry, { + description: 'The number of inference steps to use.', + }), + ) + .default(12), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), + num_frames: z + .optional( + z.int().gte(17).lte(961).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(162), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), +}) + +/** + * LongCatImageToVideoResponse + */ +export const zSchemaLongcatVideoDistilledImageToVideo720pOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * LongCat720PImageToVideoRequest + */ +export const zSchemaLongcatVideoDistilledImageToVideo720pInput = z.object({ + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The prompt to guide the video generation.', + }), + ) + .default( + "First-person view from the cockpit of a Formula 1 car. The driver's gloved hands firmly grip the intricate, carbon-fiber steering wheel adorned with numerous colorful buttons and a vibrant digital display showing race data. Beyond the windshield, a sun-drenched racetrack stretches ahead, lined with cheering spectators in the grandstands. Several rival cars are visible in the distance, creating a dynamic sense of competition. The sky above is a clear, brilliant blue, reflecting the exhilarating atmosphere of a high-speed race. high resolution 4k", + ), + fps: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frame rate of the generated video.', + }), + ) + .default(30), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + num_refine_inference_steps: z + .optional( + z.int().gte(2).lte(16).register(z.globalRegistry, { + description: 'The number of inference steps to use for refinement.', + }), + ) + .default(12), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate a video from.', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable safety checker.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(2).lte(16).register(z.globalRegistry, { + description: 'The number of inference steps to use.', + }), + ) + .default(12), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), + num_frames: z + .optional( + z.int().gte(17).lte(961).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(162), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), +}) + +/** + * LongCatImageToVideoResponse + */ +export const zSchemaLongcatVideoImageToVideo480pOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * LongCatCFGImageToVideoRequest + */ +export const zSchemaLongcatVideoImageToVideo480pInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The prompt to guide the video generation.', + }), + ) + .default( + "First-person view from the cockpit of a Formula 1 car. The driver's gloved hands firmly grip the intricate, carbon-fiber steering wheel adorned with numerous colorful buttons and a vibrant digital display showing race data. Beyond the windshield, a sun-drenched racetrack stretches ahead, lined with cheering spectators in the grandstands. Several rival cars are visible in the distance, creating a dynamic sense of competition. The sky above is a clear, brilliant blue, reflecting the exhilarating atmosphere of a high-speed race. high resolution 4k", + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: 'The acceleration level to use for the video generation.', + }), + ), + fps: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frame rate of the generated video.', + }), + ) + .default(15), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The guidance scale to use for the video generation.', + }), + ) + .default(4), + num_frames: z + .optional( + z.int().gte(17).lte(961).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(162), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable safety checker.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to use for the video generation.', + }), + ) + .default( + 'Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards', + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate a video from.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(8).lte(50).register(z.globalRegistry, { + description: + 'The number of inference steps to use for the video generation.', + }), + ) + .default(40), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), +}) + +/** + * LongCatImageToVideoResponse + */ +export const zSchemaLongcatVideoImageToVideo720pOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * LongCat720PCFGImageToVideoRequest + */ +export const zSchemaLongcatVideoImageToVideo720pInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The prompt to guide the video generation.', + }), + ) + .default( + "First-person view from the cockpit of a Formula 1 car. The driver's gloved hands firmly grip the intricate, carbon-fiber steering wheel adorned with numerous colorful buttons and a vibrant digital display showing race data. Beyond the windshield, a sun-drenched racetrack stretches ahead, lined with cheering spectators in the grandstands. Several rival cars are visible in the distance, creating a dynamic sense of competition. The sky above is a clear, brilliant blue, reflecting the exhilarating atmosphere of a high-speed race. high resolution 4k", + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: 'The acceleration level to use for the video generation.', + }), + ), + fps: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frame rate of the generated video.', + }), + ) + .default(30), + num_refine_inference_steps: z + .optional( + z.int().gte(8).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to use for refinement.', + }), + ) + .default(40), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The guidance scale to use for the video generation.', + }), + ) + .default(4), + num_frames: z + .optional( + z.int().gte(17).lte(961).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(162), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable safety checker.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to use for the video generation.', + }), + ) + .default( + 'Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards', + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate a video from.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(8).lte(50).register(z.globalRegistry, { + description: + 'The number of inference steps to use for the video generation.', + }), + ) + .default(40), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), +}) + +/** + * KeyframeTransition + * + * Configuration for a transition between two keyframes + */ +export const zSchemaKeyframeTransition = z + .object({ + prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Specific prompt for this transition. Overrides the global prompt if provided.', + }), + ), + duration: z + .optional( + z.int().gte(1).lte(25).register(z.globalRegistry, { + description: 'Duration of this transition in seconds', + }), + ) + .default(5), + }) + .register(z.globalRegistry, { + description: 'Configuration for a transition between two keyframes', + }) + +/** + * Pika22KeyframesToVideoOutput + * + * Output model for Pika 2.2 keyframes-to-video generation + */ +export const zSchemaPikaV22PikaframesOutput = z + .object({ + video: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output model for Pika 2.2 keyframes-to-video generation', + }) + +/** + * Pika22KeyframesToVideoRequest + */ +export const zSchemaPikaV22PikaframesInput = z.object({ + prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Default prompt for all transitions. Individual transition prompts override this.', + }), + ), + resolution: z.optional( + z.enum(['720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + transitions: z.optional( + z.array(zSchemaKeyframeTransition).register(z.globalRegistry, { + description: + 'Configuration for each transition. Length must be len(image_urls) - 1. Total duration of all transitions must not exceed 25 seconds. If not provided, uses default 5-second transitions with the global prompt.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator', + }), + ), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'URLs of keyframe images (2-5 images) to create transitions between', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to guide the model', + }), + ) + .default(''), +}) + +/** + * SwapOutput + */ +export const zSchemaPixverseSwapOutput = z.object({ + video: zSchemaFile, +}) + +/** + * SwapRequest + */ +export const zSchemaPixverseSwapInput = z.object({ + original_sound_switch: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to keep the original audio', + }), + ) + .default(true), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the external video to swap', + }), + keyframe_id: z + .optional( + z.int().gte(1).register(z.globalRegistry, { + description: 'The keyframe ID (from 1 to the last frame position)', + }), + ) + .default(1), + mode: z.optional( + z.enum(['person', 'object', 'background']).register(z.globalRegistry, { + description: 'The swap mode to use', + }), + ), + resolution: z.optional( + z.enum(['360p', '540p', '720p']).register(z.globalRegistry, { + description: 'The output resolution (1080p not supported)', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the target image for swapping', + }), +}) + +/** + * LynxOutput + */ +export const zSchemaLynxOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + video: zSchemaVideoFile, +}) + +/** + * LynxInput + */ +export const zSchemaLynxInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt to guide video generation', + }), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p, 580p, or 720p)', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video (16:9, 9:16, or 1:1)', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(75).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(50), + guidance_scale_2: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: + 'Image guidance scale. Controls how closely the generated video follows the reference image. Higher values increase adherence to the reference image but may decrease quality.', + }), + ) + .default(2), + strength: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: + 'Reference image scale. Controls the influence of the reference image on the generated video.', + }), + ) + .default(1), + frames_per_second: z + .optional( + z.int().gte(5).lte(30).register(z.globalRegistry, { + description: + 'Frames per second of the generated video. Must be between 5 to 30.', + }), + ) + .default(16), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the subject image to be used for video generation', + }), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.', + }), + ) + .default(5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + num_frames: z + .optional( + z.int().gte(9).lte(81).register(z.globalRegistry, { + description: + 'Number of frames in the generated video. Must be between 9 to 100.', + }), + ) + .default(81), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Negative prompt to guide what should not appear in the generated video', + }), + ) + .default( + 'Bright tones, overexposed, blurred background, static, subtitles, style, works, paintings, images, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards', + ), + ip_scale: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: + "Identity preservation scale. Controls how closely the generated video preserves the subject's identity from the reference image.", + }), + ) + .default(1), +}) + +/** + * LTXVImageToVideoResponse + */ +export const zSchemaLtx2ImageToVideoOutput = z.object({ + video: zSchemaVideoFile, +}) + +/** + * LTXVImageToVideoRequest + */ +export const zSchemaLtx2ImageToVideoInput = z.object({ + prompt: z.string().min(1).max(5000).register(z.globalRegistry, { + description: 'The prompt to generate the video from', + }), + aspect_ratio: z.optional( + z.enum(['16:9']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + duration: z.optional( + z + .union([z.literal(6), z.literal(8), z.literal(10)]) + .register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the generated video', + }), + ) + .default(true), + resolution: z.optional( + z.enum(['1080p', '1440p', '2160p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the image to generate the video from. Must be publicly accessible or base64 data URI. Supports PNG, JPEG, WebP, AVIF, and HEIF formats.', + }), + fps: z.optional( + z.union([z.literal(25), z.literal(50)]).register(z.globalRegistry, { + description: 'The frames per second of the generated video', + }), + ), +}) + +/** + * LTXVImageToVideoResponse + */ +export const zSchemaLtx2ImageToVideoFastOutput = z.object({ + video: zSchemaVideoFile, +}) + +/** + * LTXVImageToVideoFastRequest + */ +export const zSchemaLtx2ImageToVideoFastInput = z.object({ + prompt: z.string().min(1).max(5000).register(z.globalRegistry, { + description: 'The prompt to generate the video from', + }), + aspect_ratio: z.optional( + z.enum(['16:9']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + duration: z.optional( + z + .union([ + z.literal(6), + z.literal(8), + z.literal(10), + z.literal(12), + z.literal(14), + z.literal(16), + z.literal(18), + z.literal(20), + ]) + .register(z.globalRegistry, { + description: + 'The duration of the generated video in seconds. The fast model supports 6-20 seconds. Note: Durations longer than 10 seconds (12, 14, 16, 18, 20) are only supported with 25 FPS and 1080p resolution.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the generated video', + }), + ) + .default(true), + resolution: z.optional( + z.enum(['1080p', '1440p', '2160p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the image to generate the video from. Must be publicly accessible or base64 data URI. Supports PNG, JPEG, WebP, AVIF, and HEIF formats.', + }), + fps: z.optional( + z.union([z.literal(25), z.literal(50)]).register(z.globalRegistry, { + description: 'The frames per second of the generated video', + }), + ), +}) + +/** + * OmniVideoReferenceToVideoOutput + */ +export const zSchemaKlingVideoO1ReferenceToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * OmniVideoElementInput + */ +export const zSchemaOmniVideoElementInput = z.object({ + reference_image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'Additional reference images from different angles. 1-4 images supported. At least one image is required.', + }), + ), + frontal_image_url: z.string().register(z.globalRegistry, { + description: + 'The frontal image of the element (main view).\n\nMax file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s', + }), +}) + +/** + * OmniVideoReferenceToVideoInput + * + * Input for start-frame video generation with optional reference images and elements. + */ +export const zSchemaKlingVideoO1ReferenceToVideoInput = z + .object({ + prompt: z.string().max(2500).register(z.globalRegistry, { + description: + 'Take @Element1, @Element2 to reference elements and @Image1, @Image2 to reference images in order.', + }), + duration: z.optional( + z + .enum(['3', '4', '5', '6', '7', '8', '9', '10']) + .register(z.globalRegistry, { + description: 'Video duration in seconds.', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video frame.', + }), + ), + elements: z.optional( + z.array(zSchemaOmniVideoElementInput).register(z.globalRegistry, { + description: + 'Elements (characters/objects) to include in the video. Reference in prompt as @Element1, @Element2, etc. Maximum 7 total (elements + reference images + start image).', + }), + ), + image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'Additional reference images for style/appearance. Reference in prompt as @Image1, @Image2, etc. Maximum 7 total (elements + reference images + start image).', + }), + ), + }) + .register(z.globalRegistry, { + description: + 'Input for start-frame video generation with optional reference images and elements.', + }) + +/** + * OmniVideoImageToVideoOutput + * + * Output for Kling Omni Video generation. + */ +export const zSchemaKlingVideoO1ImageToVideoOutput = z + .object({ + video: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output for Kling Omni Video generation.', + }) + +/** + * OmniVideoImageToVideoInput + */ +export const zSchemaKlingVideoO1ImageToVideoInput = z.object({ + prompt: z.string().max(2500).register(z.globalRegistry, { + description: + 'Use @Image1 to reference the start frame, @Image2 to reference the end frame.', + }), + duration: z.optional( + z + .enum(['3', '4', '5', '6', '7', '8', '9', '10']) + .register(z.globalRegistry, { + description: 'Video duration in seconds.', + }), + ), + start_image_url: z.string().register(z.globalRegistry, { + description: + 'Image to use as the first frame of the video.\n\nMax file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s', + }), + end_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Image to use as the last frame of the video.\n\nMax file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s', + }), + ), +}) + +/** + * I2VOutputV5_5 + */ +export const zSchemaPixverseV55ImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ImageToVideoRequestV5_5 + */ +export const zSchemaPixverseV55ImageToVideoInput = z.object({ + prompt: z.string(), + resolution: z.optional( + z.enum(['360p', '540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + duration: z.optional( + z.enum(['5', '8', '10']).register(z.globalRegistry, { + description: + 'The duration of the generated video in seconds. Longer durations cost more. 1080p videos are limited to 5 or 8 seconds', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the generated video', + }), + ), + thinking_type: z.optional( + z.enum(['enabled', 'disabled', 'auto']).register(z.globalRegistry, { + description: + "Prompt optimization mode: 'enabled' to optimize, 'disabled' to turn off, 'auto' for model decision", + }), + ), + generate_multi_clip_switch: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable multi-clip generation with dynamic camera changes', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), + generate_audio_switch: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable audio generation (BGM, SFX, dialogue)', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * TransitionOutputV5_5 + */ +export const zSchemaPixverseV55TransitionOutput = z.object({ + video: zSchemaFile, +}) + +/** + * TransitionRequestV5_5 + */ +export const zSchemaPixverseV55TransitionInput = z.object({ + first_image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '4:3', '1:1', '3:4', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + resolution: z.optional( + z.enum(['360p', '540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the generated video', + }), + ), + thinking_type: z.optional( + z.enum(['enabled', 'disabled', 'auto']).register(z.globalRegistry, { + description: + "Prompt optimization mode: 'enabled' to optimize, 'disabled' to turn off, 'auto' for model decision", + }), + ), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt for the transition', + }), + duration: z.optional( + z.enum(['5', '8', '10']).register(z.globalRegistry, { + description: + 'The duration of the generated video in seconds. Longer durations cost more. 1080p videos are limited to 5 or 8 seconds', + }), + ), + generate_audio_switch: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable audio generation (BGM, SFX, dialogue)', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), + end_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the last frame', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * EffectOutput + */ +export const zSchemaPixverseV55EffectsOutput = z.object({ + video: zSchemaFile, +}) + +/** + * EffectInputV5_5 + */ +export const zSchemaPixverseV55EffectsInput = z.object({ + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), + duration: z.optional( + z.enum(['5', '8', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + resolution: z.optional( + z.enum(['360p', '540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video.', + }), + ), + thinking_type: z.optional( + z.enum(['enabled', 'disabled', 'auto']).register(z.globalRegistry, { + description: + "Prompt optimization mode: 'enabled' to optimize, 'disabled' to turn off, 'auto' for model decision", + }), + ), + effect: z + .enum([ + 'Kiss Me AI', + 'Kiss', + 'Muscle Surge', + 'Warmth of Jesus', + 'Anything, Robot', + 'The Tiger Touch', + 'Hug', + 'Holy Wings', + 'Microwave', + 'Zombie Mode', + 'Squid Game', + 'Baby Face', + 'Black Myth: Wukong', + 'Long Hair Magic', + 'Leggy Run', + 'Fin-tastic Mermaid', + 'Punch Face', + 'Creepy Devil Smile', + 'Thunder God', + 'Eye Zoom Challenge', + "Who's Arrested?", + 'Baby Arrived', + 'Werewolf Rage', + 'Bald Swipe', + 'BOOM DROP', + 'Huge Cutie', + 'Liquid Metal', + 'Sharksnap!', + 'Dust Me Away', + '3D Figurine Factor', + 'Bikini Up', + 'My Girlfriends', + 'My Boyfriends', + 'Subject 3 Fever', + 'Earth Zoom', + 'Pole Dance', + 'Vroom Dance', + 'GhostFace Terror', + 'Dragon Evoker', + 'Skeletal Bae', + 'Summoning succubus', + 'Halloween Voodoo Doll', + '3D Naked-Eye AD', + 'Package Explosion', + 'Dishes Served', + 'Ocean ad', + 'Supermarket AD', + 'Tree doll', + 'Come Feel My Abs', + 'The Bicep Flex', + 'London Elite Vibe', + 'Flora Nymph Gown', + 'Christmas Costume', + "It's Snowy", + 'Reindeer Cruiser', + 'Snow Globe Maker', + 'Pet Christmas Outfit', + 'Adopt a Polar Pal', + 'Cat Christmas Box', + 'Starlight Gift Box', + 'Xmas Poster', + 'Pet Christmas Tree', + 'City Santa Hat', + 'Stocking Sweetie', + 'Christmas Night', + 'Xmas Front Page Karma', + "Grinch's Xmas Hijack", + 'Giant Product', + 'Truck Fashion Shoot', + 'Beach AD', + 'Shoal Surround', + 'Mechanical Assembly', + 'Lighting AD', + 'Billboard AD', + 'Product close-up', + 'Parachute Delivery', + 'Dreamlike Cloud', + 'Macaron Machine', + 'Poster AD', + 'Truck AD', + 'Graffiti AD', + '3D Figurine Factory', + 'The Exclusive First Class', + 'Art Zoom Challenge', + 'I Quit', + 'Hitchcock Dolly Zoom', + 'Smell the Lens', + 'I believe I can fly', + 'Strikout Dance', + 'Pixel World', + 'Mint in Box', + 'Hands up, Hand', + 'Flora Nymph Go', + 'Somber Embrace', + 'Beam me up', + 'Suit Swagger', + ]) + .register(z.globalRegistry, { + description: 'The effect to apply to the video', + }), + image_url: z.string().register(z.globalRegistry, { + description: + 'Optional URL of the image to use as the first frame. If not provided, generates from text', + }), +}) + +/** + * ImageToVideoV26ProOutput + */ +export const zSchemaKlingVideoV26ProImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ImageToVideoV26ProRequest + */ +export const zSchemaKlingVideoV26ProImageToVideoInput = z.object({ + prompt: z.string().max(2500), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + voice_ids: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'List of voice IDs to use for voice control. Reference voices in the prompt using <<>>, <<>>. Maximum 2 voices allowed. When provided and referenced in prompt, enables voice control billing.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to generate native audio for the video. Supports Chinese and English voice output. Other languages are automatically translated to English. For English speech, use lowercase letters; for acronyms or proper nouns, use uppercase.', + }), + ) + .default(true), + start_image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be used for the video', + }), + end_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of the image to be used for the end of the video', + }), + ), + negative_prompt: z + .optional(z.string().max(2500)) + .default('blur, distort, and low quality'), +}) + +/** + * AIAvatarOutput + */ +export const zSchemaKlingVideoAiAvatarV2StandardOutput = z.object({ + duration: z.number().register(z.globalRegistry, { + description: 'Duration of the output video in seconds.', + }), + video: zSchemaFile, +}) + +/** + * AIAvatarInput + */ +export const zSchemaKlingVideoAiAvatarV2StandardInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The prompt to use for the video generation.', + }), + ) + .default('.'), + audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the audio file.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to use as your avatar', + }), +}) + +/** + * AIAvatarOutput + */ +export const zSchemaKlingVideoAiAvatarV2ProOutput = z.object({ + duration: z.number().register(z.globalRegistry, { + description: 'Duration of the output video in seconds.', + }), + video: zSchemaFile, +}) + +/** + * AIAvatarInput + */ +export const zSchemaKlingVideoAiAvatarV2ProInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The prompt to use for the video generation.', + }), + ) + .default('.'), + audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the audio file.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to use as your avatar', + }), +}) + +/** + * AuroraOutputModel + */ +export const zSchemaCreatifyAuroraOutput = z.object({ + video: zSchemaVideoFile, +}) + +/** + * AuroraInputModel + */ +export const zSchemaCreatifyAuroraInput = z.object({ + prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'A text prompt to guide the video generation process.', + }), + ), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: 'The resolution of the generated video.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(5).register(z.globalRegistry, { + description: 'Guidance scale to be used for text prompt adherence.', + }), + ) + .default(1), + audio_guidance_scale: z + .optional( + z.number().gte(0).lte(5).register(z.globalRegistry, { + description: 'Guidance scale to be used for audio adherence.', + }), + ) + .default(2), + audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the audio file to be used for video generation.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image file to be used for video generation.', + }), +}) + +/** + * OmniVideoImageToVideoOutput + * + * Output for Kling Omni Video generation. + */ +export const zSchemaKlingVideoO1StandardImageToVideoOutput = z + .object({ + video: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output for Kling Omni Video generation.', + }) + +/** + * OmniVideoImageToVideoInput + */ +export const zSchemaKlingVideoO1StandardImageToVideoInput = z.object({ + prompt: z.string().max(2500).register(z.globalRegistry, { + description: + 'Use @Image1 to reference the start frame, @Image2 to reference the end frame.', + }), + duration: z.optional( + z + .enum(['3', '4', '5', '6', '7', '8', '9', '10']) + .register(z.globalRegistry, { + description: 'Video duration in seconds.', + }), + ), + start_image_url: z.string().register(z.globalRegistry, { + description: + 'Image to use as the first frame of the video.\n\nMax file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s', + }), + end_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Image to use as the last frame of the video.\n\nMax file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s', + }), + ), +}) + +/** + * OmniVideoReferenceToVideoOutput + */ +export const zSchemaKlingVideoO1StandardReferenceToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * OmniVideoReferenceToVideoInput + * + * Input for start-frame video generation with optional reference images and elements. + */ +export const zSchemaKlingVideoO1StandardReferenceToVideoInput = z + .object({ + prompt: z.string().max(2500).register(z.globalRegistry, { + description: + 'Take @Element1, @Element2 to reference elements and @Image1, @Image2 to reference images in order.', + }), + duration: z.optional( + z + .enum(['3', '4', '5', '6', '7', '8', '9', '10']) + .register(z.globalRegistry, { + description: 'Video duration in seconds.', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video frame.', + }), + ), + elements: z.optional( + z.array(zSchemaOmniVideoElementInput).register(z.globalRegistry, { + description: + 'Elements (characters/objects) to include in the video. Reference in prompt as @Element1, @Element2, etc. Maximum 7 total (elements + reference images + start image).', + }), + ), + image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'Additional reference images for style/appearance. Reference in prompt as @Image1, @Image2, etc. Maximum 7 total (elements + reference images + start image).', + }), + ), + }) + .register(z.globalRegistry, { + description: + 'Input for start-frame video generation with optional reference images and elements.', + }) + +/** + * ImageToVideoOutput + * + * Output for image-to-video generation + */ +export const zSchemaV26ImageToVideoOutput = z + .object({ + actual_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'The actual prompt used if prompt rewriting was enabled', + }), + ), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + video: zSchemaVideoFile, + }) + .register(z.globalRegistry, { + description: 'Output for image-to-video generation', + }) + +/** + * ImageToVideoInput + * + * Input for Wan 2.6 image-to-video generation + */ +export const zSchemaV26ImageToVideoInput = z + .object({ + prompt: z.string().min(1).register(z.globalRegistry, { + description: + 'The text prompt describing the desired video motion. Max 800 characters.', + }), + resolution: z.optional( + z.enum(['720p', '1080p']).register(z.globalRegistry, { + description: 'Video resolution. Valid values: 720p, 1080p', + }), + ), + duration: z.optional( + z.enum(['5', '10', '15']).register(z.globalRegistry, { + description: + 'Duration of the generated video in seconds. Choose between 5, 10 or 15 seconds.', + }), + ), + audio_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\nURL of the audio to use as the background music. Must be publicly accessible.\nLimit handling: If the audio duration exceeds the duration value (5, 10, or 15 seconds),\nthe audio is truncated to the first N seconds, and the rest is discarded. If\nthe audio is shorter than the video, the remaining part of the video will be silent.\nFor example, if the audio is 3 seconds long and the video duration is 5 seconds, the\nfirst 3 seconds of the output video will have sound, and the last 2 seconds will be silent.\n- Format: WAV, MP3.\n- Duration: 3 to 30 s.\n- File size: Up to 15 MB.\n', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the image to use as the first frame. Must be publicly accessible or base64 data URI. Image dimensions must be between 240 and 7680.', + }), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt rewriting using LLM.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + multi_shots: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'When true, enables intelligent multi-shot segmentation. Only active when enable_prompt_expansion is True. Set to false for single-shot generation.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Negative prompt to describe content to avoid. Max 500 characters.', + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + }) + .register(z.globalRegistry, { + description: 'Input for Wan 2.6 image-to-video generation', + }) + +/** + * HunyuanVideo15Response + */ +export const zSchemaHunyuanVideoV15ImageToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * HunyuanVideo15I2VRequest + */ +export const zSchemaHunyuanVideoV15ImageToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the video.', + }), + ), + resolution: z.optional( + z.enum(['480p']).register(z.globalRegistry, { + description: 'The resolution of the video.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the reference image for image-to-video generation.', + }), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable prompt expansion to enhance the input prompt.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducibility.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps.', + }), + ) + .default(28), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to guide what not to generate.', + }), + ) + .default(''), + num_frames: z + .optional( + z.int().gte(1).lte(121).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(121), +}) + +/** + * LiveAvatarResponse + */ +export const zSchemaLiveAvatarOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaVideoFile, +}) + +/** + * LiveAvatarRequest + */ +export const zSchemaLiveAvatarInput = z.object({ + frames_per_clip: z + .optional( + z.int().gte(16).lte(80).register(z.globalRegistry, { + description: + 'Number of frames per clip. Must be a multiple of 4. Higher values = smoother but slower generation.', + }), + ) + .default(48), + prompt: z.string().register(z.globalRegistry, { + description: + 'A text prompt describing the scene and character. Helps guide the video generation style and context.', + }), + acceleration: z.optional( + z.enum(['none', 'light', 'regular', 'high']).register(z.globalRegistry, { + description: 'Acceleration level for faster video decoding ', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the reference image for avatar generation. The character in this image will be animated.', + }), + num_clips: z + .optional( + z.int().gte(1).lte(100).register(z.globalRegistry, { + description: + 'Number of video clips to generate. Each clip is approximately 3 seconds. Set higher for longer videos.', + }), + ) + .default(10), + audio_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the driving audio file (WAV or MP3). The avatar will be animated to match this audio.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducible generation.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values follow the prompt more closely.', + }), + ) + .default(0), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable safety checker for content moderation.', + }), + ) + .default(true), +}) + +/** + * SeedanceProv15I2VVideoOutput + */ +export const zSchemaBytedanceSeedanceV15ProImageToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generation', + }), + video: zSchemaFile, +}) + +/** + * SeedanceProv15ImageToVideoInput + */ +export const zSchemaBytedanceSeedanceV15ProImageToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt used to generate the video', + }), + resolution: z.optional( + z.enum(['480p', '720p', '1080p']).register(z.globalRegistry, { + description: + 'Video resolution - 480p for faster generation, 720p for balance, 1080p for higher quality', + }), + ), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '1:1', '3:4', '9:16']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video', + }), + ) + .default(true), + duration: z.optional( + z + .enum(['4', '5', '6', '7', '8', '9', '10', '11', '12']) + .register(z.globalRegistry, { + description: 'Duration of the video in seconds', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image used to generate video', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + camera_fixed: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to fix the camera position', + }), + ) + .default(false), + end_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The URL of the image the video ends with. Defaults to None.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed to control video generation. Use -1 for random.', + }), + ), +}) + +/** + * KandinskyI2VResponse + */ +export const zSchemaKandinsky5ProImageToVideoOutput = z.object({ + video: z.optional(zSchemaFile), +}) + +/** + * KandinskyI2VRequest + */ +export const zSchemaKandinsky5ProImageToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + resolution: z.optional( + z.enum(['512P', '1024P']).register(z.globalRegistry, { + description: 'Video resolution: 512p or 1024p.', + }), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: 'Acceleration level for faster generation.', + }), + ), + duration: z.optional( + z.enum(['5s']).register(z.globalRegistry, { + description: 'Video duration.', + }), + ), + num_inference_steps: z.optional(z.int().gte(1).lte(40)).default(28), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image to use as a reference for the video generation.', + }), +}) + +/** + * Schema referenced but not defined by fal.ai (missing from source OpenAPI spec) + */ +export const zSchemaTrajectoryPoint = z + .record(z.string(), z.unknown()) + .register(z.globalRegistry, { + description: + 'Schema referenced but not defined by fal.ai (missing from source OpenAPI spec)', + }) + +/** + * WanMoveOutput + */ +export const zSchemaWanMoveOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'Random seed used for generation.', + }), + video: zSchemaVideoFile, +}) + +/** + * WANMoveInput + */ +export const zSchemaWanMoveInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt to guide the video generation.', + }), + trajectories: z + .array(z.array(zSchemaTrajectoryPoint)) + .register(z.globalRegistry, { + description: + 'A list of trajectories. Each trajectory list means the movement of one object.', + }), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.', + }), + guidance_scale: z + .optional( + z.number().gte(1).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(40), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to guide the video generation.', + }), + ) + .default( + '色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走', + ), +}) + +/** + * LTX2ImageToVideoOutput + */ +export const zSchemaLtx219bImageToVideoOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the random number generator.', + }), + video: zSchemaVideoFile, +}) + +/** + * LTX2ImageToVideoInput + */ +export const zSchemaLtx219bImageToVideoInput = z.object({ + use_multiscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.', + }), + ) + .default(true), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + acceleration: z.optional( + z.enum(['none', 'regular', 'high', 'full']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frames per second of the generated video.', + }), + ) + .default(25), + camera_lora: z.optional( + z + .enum([ + 'dolly_in', + 'dolly_out', + 'dolly_left', + 'dolly_right', + 'jib_up', + 'jib_down', + 'static', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'auto', + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + camera_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ) + .default(1), + image_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The strength of the image to use for the video generation.', + }), + ) + .default(1), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), + ) + .default( + 'blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts.', + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The guidance scale to use.', + }), + ) + .default(3), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + num_frames: z + .optional( + z.int().gte(9).lte(481).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(121), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate the video from.', + }), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(8).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to use.', + }), + ) + .default(40), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * LTX2ImageToVideoOutput + */ +export const zSchemaLtx219bImageToVideoLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the random number generator.', + }), + video: zSchemaVideoFile, +}) + +/** + * LoRAInput + * + * LoRA weight configuration. + */ +export const zSchemaLoRaInput = z + .object({ + path: z.string().register(z.globalRegistry, { + description: 'URL, HuggingFace repo ID (owner/repo) to lora weights.', + }), + scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: 'Scale factor for LoRA application (0.0 to 4.0).', + }), + ) + .default(1), + weight_name: z.optional(z.union([z.string(), z.unknown()])), + }) + .register(z.globalRegistry, { + description: 'LoRA weight configuration.', + }) + +/** + * LTX2LoRAImageToVideoInput + */ +export const zSchemaLtx219bImageToVideoLoraInput = z.object({ + use_multiscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.', + }), + ) + .default(true), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + acceleration: z.optional( + z.enum(['none', 'regular', 'high', 'full']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frames per second of the generated video.', + }), + ) + .default(25), + loras: z.array(zSchemaLoRaInput).register(z.globalRegistry, { + description: 'The LoRAs to use for the generation.', + }), + camera_lora: z.optional( + z + .enum([ + 'dolly_in', + 'dolly_out', + 'dolly_left', + 'dolly_right', + 'jib_up', + 'jib_down', + 'static', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'auto', + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + camera_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ) + .default(1), + image_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The strength of the image to use for the video generation.', + }), + ) + .default(1), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), + ) + .default( + 'blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts.', + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The guidance scale to use.', + }), + ) + .default(3), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + num_frames: z + .optional( + z.int().gte(9).lte(481).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(121), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate the video from.', + }), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(8).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to use.', + }), + ) + .default(40), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * LTX2ImageToVideoOutput + */ +export const zSchemaLtx219bDistilledImageToVideoOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the random number generator.', + }), + video: zSchemaVideoFile, +}) + +/** + * LTX2DistilledImageToVideoInput + */ +export const zSchemaLtx219bDistilledImageToVideoInput = z.object({ + use_multiscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.', + }), + ) + .default(true), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + acceleration: z.optional( + z.enum(['none', 'regular', 'high', 'full']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frames per second of the generated video.', + }), + ) + .default(25), + camera_lora: z.optional( + z + .enum([ + 'dolly_in', + 'dolly_out', + 'dolly_left', + 'dolly_right', + 'jib_up', + 'jib_down', + 'static', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'auto', + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + num_frames: z + .optional( + z.int().gte(9).lte(481).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(121), + image_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The strength of the image to use for the video generation.', + }), + ) + .default(1), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), + ) + .default( + 'blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts.', + ), + camera_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ) + .default(1), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate the video from.', + }), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * LTX2ImageToVideoOutput + */ +export const zSchemaLtx219bDistilledImageToVideoLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the random number generator.', + }), + video: zSchemaVideoFile, +}) + +/** + * LTX2LoRADistilledImageToVideoInput + */ +export const zSchemaLtx219bDistilledImageToVideoLoraInput = z.object({ + use_multiscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.', + }), + ) + .default(true), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + acceleration: z.optional( + z.enum(['none', 'regular', 'high', 'full']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frames per second of the generated video.', + }), + ) + .default(25), + loras: z.array(zSchemaLoRaInput).register(z.globalRegistry, { + description: 'The LoRAs to use for the generation.', + }), + camera_lora: z.optional( + z + .enum([ + 'dolly_in', + 'dolly_out', + 'dolly_left', + 'dolly_right', + 'jib_up', + 'jib_down', + 'static', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'auto', + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + num_frames: z + .optional( + z.int().gte(9).lte(481).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(121), + image_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The strength of the image to use for the video generation.', + }), + ) + .default(1), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), + ) + .default( + 'blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts.', + ), + camera_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ) + .default(1), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate the video from.', + }), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * ImageToVideoOutput + * + * Output for image-to-video generation + */ +export const zSchemaV26ImageToVideoFlashOutput = z + .object({ + actual_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'The actual prompt used if prompt rewriting was enabled', + }), + ), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + video: zSchemaVideoFile, + }) + .register(z.globalRegistry, { + description: 'Output for image-to-video generation', + }) + +/** + * ImageToVideoInput + * + * Input for Wan 2.6 image-to-video generation + */ +export const zSchemaV26ImageToVideoFlashInput = z + .object({ + prompt: z.string().min(1).register(z.globalRegistry, { + description: + 'The text prompt describing the desired video motion. Max 800 characters.', + }), + resolution: z.optional( + z.enum(['720p', '1080p']).register(z.globalRegistry, { + description: 'Video resolution. Valid values: 720p, 1080p', + }), + ), + duration: z.optional( + z.enum(['5', '10', '15']).register(z.globalRegistry, { + description: + 'Duration of the generated video in seconds. Choose between 5, 10 or 15 seconds.', + }), + ), + audio_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\nURL of the audio to use as the background music. Must be publicly accessible.\nLimit handling: If the audio duration exceeds the duration value (5, 10, or 15 seconds),\nthe audio is truncated to the first N seconds, and the rest is discarded. If\nthe audio is shorter than the video, the remaining part of the video will be silent.\nFor example, if the audio is 3 seconds long and the video duration is 5 seconds, the\nfirst 3 seconds of the output video will have sound, and the last 2 seconds will be silent.\n- Format: WAV, MP3.\n- Duration: 3 to 30 s.\n- File size: Up to 15 MB.\n', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the image to use as the first frame. Must be publicly accessible or base64 data URI. Image dimensions must be between 240 and 7680.', + }), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt rewriting using LLM.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + multi_shots: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'When true, enables intelligent multi-shot segmentation. Only active when enable_prompt_expansion is True. Set to false for single-shot generation.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Negative prompt to describe content to avoid. Max 500 characters.', + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + }) + .register(z.globalRegistry, { + description: 'Input for Wan 2.6 image-to-video generation', + }) + +/** + * Q2ProReferenceToVideoOutput + */ +export const zSchemaViduQ2ReferenceToVideoProOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Q2ProReferenceToVideoRequest + */ +export const zSchemaViduQ2ReferenceToVideoProInput = z.object({ + prompt: z.string().max(2000).register(z.globalRegistry, { + description: 'Text prompt for video generation, max 2000 characters', + }), + resolution: z.optional( + z.enum(['540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'Output video resolution', + }), + ), + aspect_ratio: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Aspect ratio of the output video (e.g., auto, 16:9, 9:16, 1:1, or any W:H)', + }), + ) + .default('16:9'), + duration: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: + 'Duration of the video in seconds (0 for automatic duration)', + }), + ) + .default(4), + reference_video_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'URLs of the reference videos for video editing or motion reference. Supports up to 2 videos.', + }), + ), + bgm: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to add background music to the generated video', + }), + ) + .default(false), + reference_image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'URLs of the reference images for subject appearance. If videos are provided, up to 4 images are allowed; otherwise up to 7 images.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + movement_amplitude: z.optional( + z.enum(['auto', 'small', 'medium', 'large']).register(z.globalRegistry, { + description: 'The movement amplitude of objects in the frame', + }), + ), +}) + +/** + * I2VOutputV5_5 + */ +export const zSchemaPixverseV56ImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ImageToVideoRequestV5_6 + */ +export const zSchemaPixverseV56ImageToVideoInput = z.object({ + prompt: z.string(), + resolution: z.optional( + z.enum(['360p', '540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + duration: z.optional( + z.enum(['5', '8', '10']).register(z.globalRegistry, { + description: + 'The duration of the generated video in seconds. 1080p videos are limited to 5 or 8 seconds', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the generated video', + }), + ), + thinking_type: z.optional( + z.enum(['enabled', 'disabled', 'auto']).register(z.globalRegistry, { + description: + "Prompt optimization mode: 'enabled' to optimize, 'disabled' to turn off, 'auto' for model decision", + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), + generate_audio_switch: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable audio generation (BGM, SFX, dialogue)', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * TransitionOutputV5_5 + */ +export const zSchemaPixverseV56TransitionOutput = z.object({ + video: zSchemaFile, +}) + +/** + * TransitionRequestV5_6 + */ +export const zSchemaPixverseV56TransitionInput = z.object({ + first_image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '4:3', '1:1', '3:4', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + resolution: z.optional( + z.enum(['360p', '540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the generated video', + }), + ), + thinking_type: z.optional( + z.enum(['enabled', 'disabled', 'auto']).register(z.globalRegistry, { + description: + "Prompt optimization mode: 'enabled' to optimize, 'disabled' to turn off, 'auto' for model decision", + }), + ), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt for the transition', + }), + duration: z.optional( + z.enum(['5', '8', '10']).register(z.globalRegistry, { + description: + 'The duration of the generated video in seconds. 1080p videos are limited to 5 or 8 seconds', + }), + ), + generate_audio_switch: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable audio generation (BGM, SFX, dialogue)', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), + end_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the last frame', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * WanI2VResponse + */ +export const zSchemaWanI2vOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * WanI2VRequest + */ +export const zSchemaWanI2vInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + shift: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Shift parameter for video generation.', + }), + ) + .default(5), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + }), + ), + frames_per_second: z + .optional( + z.int().gte(5).lte(24).register(z.globalRegistry, { + description: + 'Frames per second of the generated video. Must be between 5 to 24.', + }), + ) + .default(16), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + num_frames: z + .optional( + z.int().gte(81).lte(100).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 100 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units.', + }), + ) + .default(81), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default( + 'bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards', + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: + "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + }), + ), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: + 'Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.', + }), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(40).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(30), + guide_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.', + }), + ) + .default(5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), +}) + +/** + * ImageToVideoV2MasterOutput + */ +export const zSchemaKlingVideoV2MasterImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ImageToVideoV2MasterRequest + */ +export const zSchemaKlingVideoV2MasterImageToVideoInput = z.object({ + prompt: z.string().max(2500), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be used for the video', + }), + negative_prompt: z + .optional(z.string().max(2500)) + .default('blur, distort, and low quality'), + cfg_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ', + }), + ) + .default(0.5), +}) + +/** + * I2VOutputV4 + */ +export const zSchemaPixverseV45ImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ImageToVideoRequestV4 + */ +export const zSchemaPixverseV45ImageToVideoInput = z.object({ + prompt: z.string(), + resolution: z.optional( + z.enum(['360p', '540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + duration: z.optional( + z.enum(['5', '8']).register(z.globalRegistry, { + description: + 'The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the generated video', + }), + ), + camera_movement: z.optional( + z + .enum([ + 'horizontal_left', + 'horizontal_right', + 'vertical_up', + 'vertical_down', + 'zoom_in', + 'zoom_out', + 'crane_up', + 'quickly_zoom_in', + 'quickly_zoom_out', + 'smooth_zoom_in', + 'camera_rotation', + 'robo_arm', + 'super_dolly_out', + 'whip_pan', + 'hitchcock', + 'left_follow', + 'right_follow', + 'pan_left', + 'pan_right', + 'fix_bg', + ]) + .register(z.globalRegistry, { + description: 'The type of camera movement to apply to the video', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * ImageToVideoV21StandardOutput + */ +export const zSchemaKlingVideoV21StandardImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ImageToVideoV21StandardRequest + */ +export const zSchemaKlingVideoV21StandardImageToVideoInput = z.object({ + prompt: z.string().max(2500), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be used for the video', + }), + negative_prompt: z + .optional(z.string().max(2500)) + .default('blur, distort, and low quality'), + cfg_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ', + }), + ) + .default(0.5), +}) + +/** + * ImageToVideoV21MasterOutput + */ +export const zSchemaKlingVideoV21MasterImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ImageToVideoV21MasterRequest + */ +export const zSchemaKlingVideoV21MasterImageToVideoInput = z.object({ + prompt: z.string().max(2500), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be used for the video', + }), + negative_prompt: z + .optional(z.string().max(2500)) + .default('blur, distort, and low quality'), + cfg_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ', + }), + ) + .default(0.5), +}) + +/** + * SeedanceProI2VVideoOutput + */ +export const zSchemaBytedanceSeedanceV1ProImageToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generation', + }), + video: zSchemaFile, +}) + +/** + * SeedanceProImageToVideoInput + */ +export const zSchemaBytedanceSeedanceV1ProImageToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt used to generate the video', + }), + resolution: z.optional( + z.enum(['480p', '720p', '1080p']).register(z.globalRegistry, { + description: + 'Video resolution - 480p for faster generation, 720p for balance, 1080p for higher quality', + }), + ), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '1:1', '3:4', '9:16', 'auto']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + duration: z.optional( + z + .enum(['2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']) + .register(z.globalRegistry, { + description: 'Duration of the video in seconds', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image used to generate video', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + camera_fixed: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to fix the camera position', + }), + ) + .default(false), + end_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The URL of the image the video ends with. Defaults to None.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed to control video generation. Use -1 for random.', + }), + ), +}) + +/** + * ImageToVideoHailuo02Output + */ +export const zSchemaMinimaxHailuo02StandardImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * StandardImageToVideoHailuo02Input + */ +export const zSchemaMinimaxHailuo02StandardImageToVideoInput = z.object({ + prompt_optimizer: z + .optional( + z.boolean().register(z.globalRegistry, { + description: "Whether to use the model's prompt optimizer", + }), + ) + .default(true), + duration: z.optional( + z.enum(['6', '10']).register(z.globalRegistry, { + description: + 'The duration of the video in seconds. 10 seconds videos are not supported for 1080p resolution.', + }), + ), + resolution: z.optional( + z.enum(['512P', '768P']).register(z.globalRegistry, { + description: 'The resolution of the generated video.', + }), + ), + prompt: z.string().max(2000), + end_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Optional URL of the image to use as the last frame of the video', + }), + ), + image_url: z.string(), +}) + +/** + * ImageToVideoV25ProOutput + */ +export const zSchemaKlingVideoV25TurboProImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ImageToVideoV25ProRequest + */ +export const zSchemaKlingVideoV25TurboProImageToVideoInput = z.object({ + prompt: z.string().max(2500), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be used for the video', + }), + negative_prompt: z + .optional(z.string().max(2500)) + .default('blur, distort, and low quality'), + tail_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of the image to be used for the end of the video', + }), + ), + cfg_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ', + }), + ) + .default(0.5), +}) + +/** + * VideoOutput + * + * Base output for video generation + */ +export const zSchemaWan25PreviewImageToVideoOutput = z + .object({ + actual_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'The actual prompt used if prompt rewriting was enabled', + }), + ), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + video: zSchemaVideoFile, + }) + .register(z.globalRegistry, { + description: 'Base output for video generation', + }) + +/** + * ImageToVideoInput + * + * Input for image-to-video generation + */ +export const zSchemaWan25PreviewImageToVideoInput = z + .object({ + prompt: z.string().min(1).register(z.globalRegistry, { + description: + 'The text prompt describing the desired video motion. Max 800 characters.', + }), + resolution: z.optional( + z.enum(['480p', '720p', '1080p']).register(z.globalRegistry, { + description: 'Video resolution. Valid values: 480p, 720p, 1080p', + }), + ), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: + 'Duration of the generated video in seconds. Choose between 5 or 10 seconds.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the image to use as the first frame. Must be publicly accessible or base64 data URI.\n\nMax file size: 25.0MB, Min width: 360px, Min height: 360px, Max width: 2000px, Max height: 2000px, Timeout: 20.0s', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + audio_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\nURL of the audio to use as the background music. Must be publicly accessible.\nLimit handling: If the audio duration exceeds the duration value (5 or 10 seconds),\nthe audio is truncated to the first 5 or 10 seconds, and the rest is discarded. If\nthe audio is shorter than the video, the remaining part of the video will be silent.\nFor example, if the audio is 3 seconds long and the video duration is 5 seconds, the\nfirst 3 seconds of the output video will have sound, and the last 2 seconds will be silent.\n- Format: WAV, MP3.\n- Duration: 3 to 30 s.\n- File size: Up to 15 MB.\n', + }), + ), + negative_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Negative prompt to describe content to avoid. Max 500 characters.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt rewriting using LLM.', + }), + ) + .default(true), + }) + .register(z.globalRegistry, { + description: 'Input for image-to-video generation', + }) + +/** + * ProImageToVideoHailuo23Output + */ +export const zSchemaMinimaxHailuo23ProImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ProImageToVideoHailuo23Input + */ +export const zSchemaMinimaxHailuo23ProImageToVideoInput = z.object({ + prompt_optimizer: z + .optional( + z.boolean().register(z.globalRegistry, { + description: "Whether to use the model's prompt optimizer", + }), + ) + .default(true), + prompt: z.string().min(1).max(2000).register(z.globalRegistry, { + description: 'Text prompt for video generation', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), +}) + +/** + * VideoOutput + */ +export const zSchemaMinimaxVideo01ImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ImageToVideoRequest + */ +export const zSchemaMinimaxVideo01ImageToVideoInput = z.object({ + prompt_optimizer: z + .optional( + z.boolean().register(z.globalRegistry, { + description: "Whether to use the model's prompt optimizer", + }), + ) + .default(true), + prompt: z.string().max(2000), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to use as the first frame', + }), +}) + +/** + * I2VOutput + */ +export const zSchemaKlingVideoV16ProImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ProImageToVideoRequest + */ +export const zSchemaKlingVideoV16ProImageToVideoInput = z.object({ + prompt: z.string().max(2500), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video frame', + }), + ), + negative_prompt: z + .optional(z.string().max(2500)) + .default('blur, distort, and low quality'), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + image_url: z.string(), + tail_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of the image to be used for the end of the video', + }), + ), + cfg_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ', + }), + ) + .default(0.5), +}) + +/** + * ImageToVideoOutput + */ +export const zSchemaVeo2ImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ImageToVideoInput + */ +export const zSchemaVeo2ImageToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt describing how the image should be animated', + }), + duration: z.optional( + z.enum(['5s', '6s', '7s', '8s']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + aspect_ratio: z.optional( + z + .enum(['auto', 'auto_prefer_portrait', '16:9', '9:16']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input image to animate. Should be 720p or higher resolution.', + }), +}) + +/** + * WanProI2VResponse + */ +export const zSchemaWanProImageToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * WanProI2VRequest + */ +export const zSchemaWanProImageToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker', + }), + ) + .default(true), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to generate the video from', + }), +}) + +/** + * WanEffectsOutput + */ +export const zSchemaWanEffectsOutput = z.object({ + seed: z.int(), + video: zSchemaFile, +}) + +/** + * BaseInput + */ +export const zSchemaWanEffectsInput = z.object({ + effect_type: z.optional( + z + .enum([ + 'squish', + 'muscle', + 'inflate', + 'crush', + 'rotate', + 'gun-shooting', + 'deflate', + 'cakeify', + 'hulk', + 'baby', + 'bride', + 'classy', + 'puppy', + 'snow-white', + 'disney-princess', + 'mona-lisa', + 'painting', + 'pirate-captain', + 'princess', + 'jungle', + 'samurai', + 'vip', + 'warrior', + 'zen', + 'assassin', + 'timelapse', + 'tsunami', + 'fire', + 'zoom-call', + 'doom-fps', + 'fus-ro-dah', + 'hug-jesus', + 'robot-face-reveal', + 'super-saiyan', + 'jumpscare', + 'laughing', + 'cartoon-jaw-drop', + 'crying', + 'kissing', + 'angry-face', + 'selfie-younger-self', + 'animeify', + 'blast', + ]) + .register(z.globalRegistry, { + description: 'The type of effect to apply to the video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'Aspect ratio of the output video.', + }), + ), + subject: z.string().register(z.globalRegistry, { + description: + 'The subject to insert into the predefined prompt template for the selected effect.', + }), + lora_scale: z + .optional( + z.number().gte(0.1).lte(2).register(z.globalRegistry, { + description: + 'The scale of the LoRA weight. Used to adjust effect intensity.', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the input image.', + }), + turbo_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use turbo mode. If True, the video will be generated faster but with lower quality.', + }), + ) + .default(false), + frames_per_second: z + .optional( + z.int().gte(5).lte(24).register(z.globalRegistry, { + description: 'Frames per second of the generated video.', + }), + ) + .default(16), + num_inference_steps: z + .optional( + z.int().gte(2).lte(40).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + num_frames: z + .optional( + z.int().gte(81).lte(100).register(z.globalRegistry, { + description: 'Number of frames to generate.', + }), + ) + .default(81), +}) + +export const zSchemaQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetFalAiWanEffectsRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiWanEffectsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanEffectsRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanEffectsRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanEffectsData = z.object({ + body: zSchemaWanEffectsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanEffectsResponse = zSchemaQueueStatus + +export const zGetFalAiWanEffectsRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanEffectsRequestsByRequestIdResponse = + zSchemaWanEffectsOutput + +export const zGetFalAiWanProImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanProImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanProImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanProImageToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanProImageToVideoData = z.object({ + body: zSchemaWanProImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanProImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiWanProImageToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanProImageToVideoRequestsByRequestIdResponse = + zSchemaWanProImageToVideoOutput + +export const zGetFalAiVeo2ImageToVideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiVeo2ImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiVeo2ImageToVideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiVeo2ImageToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiVeo2ImageToVideoData = z.object({ + body: zSchemaVeo2ImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiVeo2ImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiVeo2ImageToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiVeo2ImageToVideoRequestsByRequestIdResponse = + zSchemaVeo2ImageToVideoOutput + +export const zGetFalAiKlingVideoV16ProImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV16ProImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV16ProImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV16ProImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV16ProImageToVideoData = z.object({ + body: zSchemaKlingVideoV16ProImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV16ProImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiKlingVideoV16ProImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV16ProImageToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoV16ProImageToVideoOutput + +export const zGetFalAiMinimaxVideo01ImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMinimaxVideo01ImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxVideo01ImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxVideo01ImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxVideo01ImageToVideoData = z.object({ + body: zSchemaMinimaxVideo01ImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxVideo01ImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiMinimaxVideo01ImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxVideo01ImageToVideoRequestsByRequestIdResponse = + zSchemaMinimaxVideo01ImageToVideoOutput + +export const zGetFalAiMinimaxHailuo23ProImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMinimaxHailuo23ProImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxHailuo23ProImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxHailuo23ProImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxHailuo23ProImageToVideoData = z.object({ + body: zSchemaMinimaxHailuo23ProImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxHailuo23ProImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiMinimaxHailuo23ProImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxHailuo23ProImageToVideoRequestsByRequestIdResponse = + zSchemaMinimaxHailuo23ProImageToVideoOutput + +export const zGetFalAiWan25PreviewImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWan25PreviewImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWan25PreviewImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWan25PreviewImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWan25PreviewImageToVideoData = z.object({ + body: zSchemaWan25PreviewImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWan25PreviewImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiWan25PreviewImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiWan25PreviewImageToVideoRequestsByRequestIdResponse = + zSchemaWan25PreviewImageToVideoOutput + +export const zGetFalAiKlingVideoV25TurboProImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV25TurboProImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV25TurboProImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV25TurboProImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV25TurboProImageToVideoData = z.object({ + body: zSchemaKlingVideoV25TurboProImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV25TurboProImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiKlingVideoV25TurboProImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV25TurboProImageToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoV25TurboProImageToVideoOutput + +export const zGetFalAiMinimaxHailuo02StandardImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMinimaxHailuo02StandardImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxHailuo02StandardImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxHailuo02StandardImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxHailuo02StandardImageToVideoData = z.object({ + body: zSchemaMinimaxHailuo02StandardImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxHailuo02StandardImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiMinimaxHailuo02StandardImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxHailuo02StandardImageToVideoRequestsByRequestIdResponse = + zSchemaMinimaxHailuo02StandardImageToVideoOutput + +export const zGetFalAiBytedanceSeedanceV1ProImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBytedanceSeedanceV1ProImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBytedanceSeedanceV1ProImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBytedanceSeedanceV1ProImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBytedanceSeedanceV1ProImageToVideoData = z.object({ + body: zSchemaBytedanceSeedanceV1ProImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBytedanceSeedanceV1ProImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiBytedanceSeedanceV1ProImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiBytedanceSeedanceV1ProImageToVideoRequestsByRequestIdResponse = + zSchemaBytedanceSeedanceV1ProImageToVideoOutput + +export const zGetFalAiKlingVideoV21MasterImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV21MasterImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV21MasterImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV21MasterImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV21MasterImageToVideoData = z.object({ + body: zSchemaKlingVideoV21MasterImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV21MasterImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiKlingVideoV21MasterImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV21MasterImageToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoV21MasterImageToVideoOutput + +export const zGetFalAiKlingVideoV21StandardImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV21StandardImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV21StandardImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV21StandardImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV21StandardImageToVideoData = z.object({ + body: zSchemaKlingVideoV21StandardImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV21StandardImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiKlingVideoV21StandardImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV21StandardImageToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoV21StandardImageToVideoOutput + +export const zGetFalAiPixverseV45ImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV45ImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV45ImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV45ImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV45ImageToVideoData = z.object({ + body: zSchemaPixverseV45ImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV45ImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV45ImageToVideoRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV45ImageToVideoRequestsByRequestIdResponse = + zSchemaPixverseV45ImageToVideoOutput + +export const zGetFalAiKlingVideoV2MasterImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV2MasterImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV2MasterImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV2MasterImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV2MasterImageToVideoData = z.object({ + body: zSchemaKlingVideoV2MasterImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV2MasterImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiKlingVideoV2MasterImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV2MasterImageToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoV2MasterImageToVideoOutput + +export const zGetFalAiWanI2vRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiWanI2vRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanI2vRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanI2vRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanI2vData = z.object({ + body: zSchemaWanI2vInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanI2vResponse = zSchemaQueueStatus + +export const zGetFalAiWanI2vRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanI2vRequestsByRequestIdResponse = zSchemaWanI2vOutput + +export const zGetFalAiPixverseV56TransitionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV56TransitionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV56TransitionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV56TransitionRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV56TransitionData = z.object({ + body: zSchemaPixverseV56TransitionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV56TransitionResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV56TransitionRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV56TransitionRequestsByRequestIdResponse = + zSchemaPixverseV56TransitionOutput + +export const zGetFalAiPixverseV56ImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV56ImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV56ImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV56ImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV56ImageToVideoData = z.object({ + body: zSchemaPixverseV56ImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV56ImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV56ImageToVideoRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV56ImageToVideoRequestsByRequestIdResponse = + zSchemaPixverseV56ImageToVideoOutput + +export const zGetFalAiViduQ2ReferenceToVideoProRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiViduQ2ReferenceToVideoProRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiViduQ2ReferenceToVideoProRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiViduQ2ReferenceToVideoProRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiViduQ2ReferenceToVideoProData = z.object({ + body: zSchemaViduQ2ReferenceToVideoProInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiViduQ2ReferenceToVideoProResponse = zSchemaQueueStatus + +export const zGetFalAiViduQ2ReferenceToVideoProRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiViduQ2ReferenceToVideoProRequestsByRequestIdResponse = + zSchemaViduQ2ReferenceToVideoProOutput + +export const zGetWanV26ImageToVideoFlashRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetWanV26ImageToVideoFlashRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutWanV26ImageToVideoFlashRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutWanV26ImageToVideoFlashRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostWanV26ImageToVideoFlashData = z.object({ + body: zSchemaV26ImageToVideoFlashInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostWanV26ImageToVideoFlashResponse = zSchemaQueueStatus + +export const zGetWanV26ImageToVideoFlashRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetWanV26ImageToVideoFlashRequestsByRequestIdResponse = + zSchemaV26ImageToVideoFlashOutput + +export const zGetFalAiLtx219bDistilledImageToVideoLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtx219bDistilledImageToVideoLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx219bDistilledImageToVideoLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx219bDistilledImageToVideoLoraRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx219bDistilledImageToVideoLoraData = z.object({ + body: zSchemaLtx219bDistilledImageToVideoLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx219bDistilledImageToVideoLoraResponse = + zSchemaQueueStatus + +export const zGetFalAiLtx219bDistilledImageToVideoLoraRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLtx219bDistilledImageToVideoLoraRequestsByRequestIdResponse = + zSchemaLtx219bDistilledImageToVideoLoraOutput + +export const zGetFalAiLtx219bDistilledImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtx219bDistilledImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx219bDistilledImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx219bDistilledImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx219bDistilledImageToVideoData = z.object({ + body: zSchemaLtx219bDistilledImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx219bDistilledImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiLtx219bDistilledImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLtx219bDistilledImageToVideoRequestsByRequestIdResponse = + zSchemaLtx219bDistilledImageToVideoOutput + +export const zGetFalAiLtx219bImageToVideoLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtx219bImageToVideoLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx219bImageToVideoLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx219bImageToVideoLoraRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx219bImageToVideoLoraData = z.object({ + body: zSchemaLtx219bImageToVideoLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx219bImageToVideoLoraResponse = zSchemaQueueStatus + +export const zGetFalAiLtx219bImageToVideoLoraRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiLtx219bImageToVideoLoraRequestsByRequestIdResponse = + zSchemaLtx219bImageToVideoLoraOutput + +export const zGetFalAiLtx219bImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtx219bImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx219bImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx219bImageToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx219bImageToVideoData = z.object({ + body: zSchemaLtx219bImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx219bImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiLtx219bImageToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLtx219bImageToVideoRequestsByRequestIdResponse = + zSchemaLtx219bImageToVideoOutput + +export const zGetFalAiWanMoveRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiWanMoveRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanMoveRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanMoveRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanMoveData = z.object({ + body: zSchemaWanMoveInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanMoveResponse = zSchemaQueueStatus + +export const zGetFalAiWanMoveRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanMoveRequestsByRequestIdResponse = zSchemaWanMoveOutput + +export const zGetFalAiKandinsky5ProImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKandinsky5ProImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKandinsky5ProImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKandinsky5ProImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKandinsky5ProImageToVideoData = z.object({ + body: zSchemaKandinsky5ProImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKandinsky5ProImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiKandinsky5ProImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKandinsky5ProImageToVideoRequestsByRequestIdResponse = + zSchemaKandinsky5ProImageToVideoOutput + +export const zGetFalAiBytedanceSeedanceV15ProImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBytedanceSeedanceV15ProImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBytedanceSeedanceV15ProImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBytedanceSeedanceV15ProImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBytedanceSeedanceV15ProImageToVideoData = z.object({ + body: zSchemaBytedanceSeedanceV15ProImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBytedanceSeedanceV15ProImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiBytedanceSeedanceV15ProImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiBytedanceSeedanceV15ProImageToVideoRequestsByRequestIdResponse = + zSchemaBytedanceSeedanceV15ProImageToVideoOutput + +export const zGetFalAiLiveAvatarRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLiveAvatarRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLiveAvatarRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLiveAvatarRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLiveAvatarData = z.object({ + body: zSchemaLiveAvatarInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLiveAvatarResponse = zSchemaQueueStatus + +export const zGetFalAiLiveAvatarRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLiveAvatarRequestsByRequestIdResponse = + zSchemaLiveAvatarOutput + +export const zGetFalAiHunyuanVideoV15ImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiHunyuanVideoV15ImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuanVideoV15ImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuanVideoV15ImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuanVideoV15ImageToVideoData = z.object({ + body: zSchemaHunyuanVideoV15ImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuanVideoV15ImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuanVideoV15ImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuanVideoV15ImageToVideoRequestsByRequestIdResponse = + zSchemaHunyuanVideoV15ImageToVideoOutput + +export const zGetWanV26ImageToVideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetWanV26ImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutWanV26ImageToVideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutWanV26ImageToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostWanV26ImageToVideoData = z.object({ + body: zSchemaV26ImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostWanV26ImageToVideoResponse = zSchemaQueueStatus + +export const zGetWanV26ImageToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetWanV26ImageToVideoRequestsByRequestIdResponse = + zSchemaV26ImageToVideoOutput + +export const zGetFalAiKlingVideoO1StandardReferenceToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoO1StandardReferenceToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoO1StandardReferenceToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoO1StandardReferenceToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoO1StandardReferenceToVideoData = z.object({ + body: zSchemaKlingVideoO1StandardReferenceToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoO1StandardReferenceToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiKlingVideoO1StandardReferenceToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoO1StandardReferenceToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoO1StandardReferenceToVideoOutput + +export const zGetFalAiKlingVideoO1StandardImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoO1StandardImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoO1StandardImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoO1StandardImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoO1StandardImageToVideoData = z.object({ + body: zSchemaKlingVideoO1StandardImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoO1StandardImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiKlingVideoO1StandardImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoO1StandardImageToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoO1StandardImageToVideoOutput + +export const zGetFalAiCreatifyAuroraRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiCreatifyAuroraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiCreatifyAuroraRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiCreatifyAuroraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiCreatifyAuroraData = z.object({ + body: zSchemaCreatifyAuroraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiCreatifyAuroraResponse = zSchemaQueueStatus + +export const zGetFalAiCreatifyAuroraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiCreatifyAuroraRequestsByRequestIdResponse = + zSchemaCreatifyAuroraOutput + +export const zGetFalAiKlingVideoAiAvatarV2ProRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoAiAvatarV2ProRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoAiAvatarV2ProRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoAiAvatarV2ProRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoAiAvatarV2ProData = z.object({ + body: zSchemaKlingVideoAiAvatarV2ProInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoAiAvatarV2ProResponse = zSchemaQueueStatus + +export const zGetFalAiKlingVideoAiAvatarV2ProRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoAiAvatarV2ProRequestsByRequestIdResponse = + zSchemaKlingVideoAiAvatarV2ProOutput + +export const zGetFalAiKlingVideoAiAvatarV2StandardRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoAiAvatarV2StandardRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoAiAvatarV2StandardRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoAiAvatarV2StandardRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoAiAvatarV2StandardData = z.object({ + body: zSchemaKlingVideoAiAvatarV2StandardInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoAiAvatarV2StandardResponse = zSchemaQueueStatus + +export const zGetFalAiKlingVideoAiAvatarV2StandardRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoAiAvatarV2StandardRequestsByRequestIdResponse = + zSchemaKlingVideoAiAvatarV2StandardOutput + +export const zGetFalAiKlingVideoV26ProImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV26ProImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV26ProImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV26ProImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV26ProImageToVideoData = z.object({ + body: zSchemaKlingVideoV26ProImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV26ProImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiKlingVideoV26ProImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV26ProImageToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoV26ProImageToVideoOutput + +export const zGetFalAiPixverseV55EffectsRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV55EffectsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV55EffectsRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV55EffectsRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV55EffectsData = z.object({ + body: zSchemaPixverseV55EffectsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV55EffectsResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV55EffectsRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV55EffectsRequestsByRequestIdResponse = + zSchemaPixverseV55EffectsOutput + +export const zGetFalAiPixverseV55TransitionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV55TransitionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV55TransitionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV55TransitionRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV55TransitionData = z.object({ + body: zSchemaPixverseV55TransitionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV55TransitionResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV55TransitionRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV55TransitionRequestsByRequestIdResponse = + zSchemaPixverseV55TransitionOutput + +export const zGetFalAiPixverseV55ImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV55ImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV55ImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV55ImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV55ImageToVideoData = z.object({ + body: zSchemaPixverseV55ImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV55ImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV55ImageToVideoRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV55ImageToVideoRequestsByRequestIdResponse = + zSchemaPixverseV55ImageToVideoOutput + +export const zGetFalAiKlingVideoO1ImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoO1ImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoO1ImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoO1ImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoO1ImageToVideoData = z.object({ + body: zSchemaKlingVideoO1ImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoO1ImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiKlingVideoO1ImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoO1ImageToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoO1ImageToVideoOutput + +export const zGetFalAiKlingVideoO1ReferenceToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoO1ReferenceToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoO1ReferenceToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoO1ReferenceToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoO1ReferenceToVideoData = z.object({ + body: zSchemaKlingVideoO1ReferenceToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoO1ReferenceToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiKlingVideoO1ReferenceToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoO1ReferenceToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoO1ReferenceToVideoOutput + +export const zGetFalAiLtx2ImageToVideoFastRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtx2ImageToVideoFastRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx2ImageToVideoFastRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx2ImageToVideoFastRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx2ImageToVideoFastData = z.object({ + body: zSchemaLtx2ImageToVideoFastInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx2ImageToVideoFastResponse = zSchemaQueueStatus + +export const zGetFalAiLtx2ImageToVideoFastRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLtx2ImageToVideoFastRequestsByRequestIdResponse = + zSchemaLtx2ImageToVideoFastOutput + +export const zGetFalAiLtx2ImageToVideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLtx2ImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx2ImageToVideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx2ImageToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx2ImageToVideoData = z.object({ + body: zSchemaLtx2ImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx2ImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiLtx2ImageToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLtx2ImageToVideoRequestsByRequestIdResponse = + zSchemaLtx2ImageToVideoOutput + +export const zGetBytedanceLynxRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetBytedanceLynxRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBytedanceLynxRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutBytedanceLynxRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBytedanceLynxData = z.object({ + body: zSchemaLynxInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBytedanceLynxResponse = zSchemaQueueStatus + +export const zGetBytedanceLynxRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetBytedanceLynxRequestsByRequestIdResponse = zSchemaLynxOutput + +export const zGetFalAiPixverseSwapRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiPixverseSwapRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseSwapRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseSwapRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseSwapData = z.object({ + body: zSchemaPixverseSwapInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseSwapResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseSwapRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseSwapRequestsByRequestIdResponse = + zSchemaPixverseSwapOutput + +export const zGetFalAiPikaV22PikaframesRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiPikaV22PikaframesRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPikaV22PikaframesRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiPikaV22PikaframesRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPikaV22PikaframesData = z.object({ + body: zSchemaPikaV22PikaframesInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPikaV22PikaframesResponse = zSchemaQueueStatus + +export const zGetFalAiPikaV22PikaframesRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPikaV22PikaframesRequestsByRequestIdResponse = + zSchemaPikaV22PikaframesOutput + +export const zGetFalAiLongcatVideoImageToVideo720pRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLongcatVideoImageToVideo720pRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLongcatVideoImageToVideo720pRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLongcatVideoImageToVideo720pRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLongcatVideoImageToVideo720pData = z.object({ + body: zSchemaLongcatVideoImageToVideo720pInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLongcatVideoImageToVideo720pResponse = zSchemaQueueStatus + +export const zGetFalAiLongcatVideoImageToVideo720pRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLongcatVideoImageToVideo720pRequestsByRequestIdResponse = + zSchemaLongcatVideoImageToVideo720pOutput + +export const zGetFalAiLongcatVideoImageToVideo480pRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLongcatVideoImageToVideo480pRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLongcatVideoImageToVideo480pRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLongcatVideoImageToVideo480pRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLongcatVideoImageToVideo480pData = z.object({ + body: zSchemaLongcatVideoImageToVideo480pInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLongcatVideoImageToVideo480pResponse = zSchemaQueueStatus + +export const zGetFalAiLongcatVideoImageToVideo480pRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLongcatVideoImageToVideo480pRequestsByRequestIdResponse = + zSchemaLongcatVideoImageToVideo480pOutput + +export const zGetFalAiLongcatVideoDistilledImageToVideo720pRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLongcatVideoDistilledImageToVideo720pRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLongcatVideoDistilledImageToVideo720pRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLongcatVideoDistilledImageToVideo720pRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLongcatVideoDistilledImageToVideo720pData = z.object({ + body: zSchemaLongcatVideoDistilledImageToVideo720pInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLongcatVideoDistilledImageToVideo720pResponse = + zSchemaQueueStatus + +export const zGetFalAiLongcatVideoDistilledImageToVideo720pRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLongcatVideoDistilledImageToVideo720pRequestsByRequestIdResponse = + zSchemaLongcatVideoDistilledImageToVideo720pOutput + +export const zGetFalAiLongcatVideoDistilledImageToVideo480pRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLongcatVideoDistilledImageToVideo480pRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLongcatVideoDistilledImageToVideo480pRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLongcatVideoDistilledImageToVideo480pRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLongcatVideoDistilledImageToVideo480pData = z.object({ + body: zSchemaLongcatVideoDistilledImageToVideo480pInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLongcatVideoDistilledImageToVideo480pResponse = + zSchemaQueueStatus + +export const zGetFalAiLongcatVideoDistilledImageToVideo480pRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLongcatVideoDistilledImageToVideo480pRequestsByRequestIdResponse = + zSchemaLongcatVideoDistilledImageToVideo480pOutput + +export const zGetFalAiMinimaxHailuo23FastStandardImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMinimaxHailuo23FastStandardImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxHailuo23FastStandardImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxHailuo23FastStandardImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxHailuo23FastStandardImageToVideoData = z.object({ + body: zSchemaMinimaxHailuo23FastStandardImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxHailuo23FastStandardImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiMinimaxHailuo23FastStandardImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxHailuo23FastStandardImageToVideoRequestsByRequestIdResponse = + zSchemaMinimaxHailuo23FastStandardImageToVideoOutput + +export const zGetFalAiMinimaxHailuo23StandardImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMinimaxHailuo23StandardImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxHailuo23StandardImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxHailuo23StandardImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxHailuo23StandardImageToVideoData = z.object({ + body: zSchemaMinimaxHailuo23StandardImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxHailuo23StandardImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiMinimaxHailuo23StandardImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxHailuo23StandardImageToVideoRequestsByRequestIdResponse = + zSchemaMinimaxHailuo23StandardImageToVideoOutput + +export const zGetFalAiMinimaxHailuo23FastProImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMinimaxHailuo23FastProImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxHailuo23FastProImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxHailuo23FastProImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxHailuo23FastProImageToVideoData = z.object({ + body: zSchemaMinimaxHailuo23FastProImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxHailuo23FastProImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiMinimaxHailuo23FastProImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxHailuo23FastProImageToVideoRequestsByRequestIdResponse = + zSchemaMinimaxHailuo23FastProImageToVideoOutput + +export const zGetFalAiBytedanceSeedanceV1ProFastImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBytedanceSeedanceV1ProFastImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBytedanceSeedanceV1ProFastImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBytedanceSeedanceV1ProFastImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBytedanceSeedanceV1ProFastImageToVideoData = z.object({ + body: zSchemaBytedanceSeedanceV1ProFastImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBytedanceSeedanceV1ProFastImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiBytedanceSeedanceV1ProFastImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiBytedanceSeedanceV1ProFastImageToVideoRequestsByRequestIdResponse = + zSchemaBytedanceSeedanceV1ProFastImageToVideoOutput + +export const zGetFalAiViduQ2ImageToVideoTurboRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiViduQ2ImageToVideoTurboRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiViduQ2ImageToVideoTurboRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiViduQ2ImageToVideoTurboRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiViduQ2ImageToVideoTurboData = z.object({ + body: zSchemaViduQ2ImageToVideoTurboInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiViduQ2ImageToVideoTurboResponse = zSchemaQueueStatus + +export const zGetFalAiViduQ2ImageToVideoTurboRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiViduQ2ImageToVideoTurboRequestsByRequestIdResponse = + zSchemaViduQ2ImageToVideoTurboOutput + +export const zGetFalAiViduQ2ImageToVideoProRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiViduQ2ImageToVideoProRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiViduQ2ImageToVideoProRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiViduQ2ImageToVideoProRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiViduQ2ImageToVideoProData = z.object({ + body: zSchemaViduQ2ImageToVideoProInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiViduQ2ImageToVideoProResponse = zSchemaQueueStatus + +export const zGetFalAiViduQ2ImageToVideoProRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiViduQ2ImageToVideoProRequestsByRequestIdResponse = + zSchemaViduQ2ImageToVideoProOutput + +export const zGetFalAiKlingVideoV25TurboStandardImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV25TurboStandardImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV25TurboStandardImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV25TurboStandardImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV25TurboStandardImageToVideoData = z.object({ + body: zSchemaKlingVideoV25TurboStandardImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV25TurboStandardImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiKlingVideoV25TurboStandardImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV25TurboStandardImageToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoV25TurboStandardImageToVideoOutput + +export const zGetFalAiVeo31FastFirstLastFrameToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiVeo31FastFirstLastFrameToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiVeo31FastFirstLastFrameToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiVeo31FastFirstLastFrameToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiVeo31FastFirstLastFrameToVideoData = z.object({ + body: zSchemaVeo31FastFirstLastFrameToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiVeo31FastFirstLastFrameToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiVeo31FastFirstLastFrameToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiVeo31FastFirstLastFrameToVideoRequestsByRequestIdResponse = + zSchemaVeo31FastFirstLastFrameToVideoOutput + +export const zGetFalAiVeo31FirstLastFrameToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiVeo31FirstLastFrameToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiVeo31FirstLastFrameToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiVeo31FirstLastFrameToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiVeo31FirstLastFrameToVideoData = z.object({ + body: zSchemaVeo31FirstLastFrameToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiVeo31FirstLastFrameToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiVeo31FirstLastFrameToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiVeo31FirstLastFrameToVideoRequestsByRequestIdResponse = + zSchemaVeo31FirstLastFrameToVideoOutput + +export const zGetFalAiVeo31ReferenceToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiVeo31ReferenceToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiVeo31ReferenceToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiVeo31ReferenceToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiVeo31ReferenceToVideoData = z.object({ + body: zSchemaVeo31ReferenceToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiVeo31ReferenceToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiVeo31ReferenceToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiVeo31ReferenceToVideoRequestsByRequestIdResponse = + zSchemaVeo31ReferenceToVideoOutput + +export const zGetFalAiVeo31FastImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiVeo31FastImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiVeo31FastImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiVeo31FastImageToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiVeo31FastImageToVideoData = z.object({ + body: zSchemaVeo31FastImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiVeo31FastImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiVeo31FastImageToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiVeo31FastImageToVideoRequestsByRequestIdResponse = + zSchemaVeo31FastImageToVideoOutput + +export const zGetFalAiVeo31ImageToVideoRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiVeo31ImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiVeo31ImageToVideoRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiVeo31ImageToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiVeo31ImageToVideoData = z.object({ + body: zSchemaVeo31ImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiVeo31ImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiVeo31ImageToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiVeo31ImageToVideoRequestsByRequestIdResponse = + zSchemaVeo31ImageToVideoOutput + +export const zGetFalAiSora2ImageToVideoProRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiSora2ImageToVideoProRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSora2ImageToVideoProRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiSora2ImageToVideoProRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSora2ImageToVideoProData = z.object({ + body: zSchemaSora2ImageToVideoProInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSora2ImageToVideoProResponse = zSchemaQueueStatus + +export const zGetFalAiSora2ImageToVideoProRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSora2ImageToVideoProRequestsByRequestIdResponse = + zSchemaSora2ImageToVideoProOutput + +export const zGetFalAiSora2ImageToVideoRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiSora2ImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSora2ImageToVideoRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiSora2ImageToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSora2ImageToVideoData = z.object({ + body: zSchemaSora2ImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSora2ImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiSora2ImageToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSora2ImageToVideoRequestsByRequestIdResponse = + zSchemaSora2ImageToVideoOutput + +export const zGetFalAiOviImageToVideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiOviImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiOviImageToVideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiOviImageToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiOviImageToVideoData = z.object({ + body: zSchemaOviImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiOviImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiOviImageToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiOviImageToVideoRequestsByRequestIdResponse = + zSchemaOviImageToVideoOutput + +export const zGetVeedFabric10FastRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetVeedFabric10FastRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutVeedFabric10FastRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutVeedFabric10FastRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostVeedFabric10FastData = z.object({ + body: zSchemaFabric10FastInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostVeedFabric10FastResponse = zSchemaQueueStatus + +export const zGetVeedFabric10FastRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetVeedFabric10FastRequestsByRequestIdResponse = + zSchemaFabric10FastOutput + +export const zGetFalAiBytedanceOmnihumanV15RequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBytedanceOmnihumanV15RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBytedanceOmnihumanV15RequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBytedanceOmnihumanV15RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBytedanceOmnihumanV15Data = z.object({ + body: zSchemaBytedanceOmnihumanV15Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBytedanceOmnihumanV15Response = zSchemaQueueStatus + +export const zGetFalAiBytedanceOmnihumanV15RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiBytedanceOmnihumanV15RequestsByRequestIdResponse = + zSchemaBytedanceOmnihumanV15Output + +export const zGetVeedFabric10RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetVeedFabric10RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutVeedFabric10RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutVeedFabric10RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostVeedFabric10Data = z.object({ + body: zSchemaFabric10Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostVeedFabric10Response = zSchemaQueueStatus + +export const zGetVeedFabric10RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetVeedFabric10RequestsByRequestIdResponse = zSchemaFabric10Output + +export const zGetFalAiKlingVideoV1StandardAiAvatarRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV1StandardAiAvatarRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV1StandardAiAvatarRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV1StandardAiAvatarRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV1StandardAiAvatarData = z.object({ + body: zSchemaKlingVideoV1StandardAiAvatarInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV1StandardAiAvatarResponse = zSchemaQueueStatus + +export const zGetFalAiKlingVideoV1StandardAiAvatarRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV1StandardAiAvatarRequestsByRequestIdResponse = + zSchemaKlingVideoV1StandardAiAvatarOutput + +export const zGetFalAiKlingVideoV1ProAiAvatarRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV1ProAiAvatarRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV1ProAiAvatarRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV1ProAiAvatarRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV1ProAiAvatarData = z.object({ + body: zSchemaKlingVideoV1ProAiAvatarInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV1ProAiAvatarResponse = zSchemaQueueStatus + +export const zGetFalAiKlingVideoV1ProAiAvatarRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV1ProAiAvatarRequestsByRequestIdResponse = + zSchemaKlingVideoV1ProAiAvatarOutput + +export const zGetDecartLucy14bImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetDecartLucy14bImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutDecartLucy14bImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutDecartLucy14bImageToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostDecartLucy14bImageToVideoData = z.object({ + body: zSchemaLucy14bImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostDecartLucy14bImageToVideoResponse = zSchemaQueueStatus + +export const zGetDecartLucy14bImageToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetDecartLucy14bImageToVideoRequestsByRequestIdResponse = + zSchemaLucy14bImageToVideoOutput + +export const zGetFalAiBytedanceSeedanceV1LiteReferenceToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBytedanceSeedanceV1LiteReferenceToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBytedanceSeedanceV1LiteReferenceToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBytedanceSeedanceV1LiteReferenceToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBytedanceSeedanceV1LiteReferenceToVideoData = z.object({ + body: zSchemaBytedanceSeedanceV1LiteReferenceToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBytedanceSeedanceV1LiteReferenceToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiBytedanceSeedanceV1LiteReferenceToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiBytedanceSeedanceV1LiteReferenceToVideoRequestsByRequestIdResponse = + zSchemaBytedanceSeedanceV1LiteReferenceToVideoOutput + +export const zGetFalAiWanAtiRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiWanAtiRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanAtiRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanAtiRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanAtiData = z.object({ + body: zSchemaWanAtiInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanAtiResponse = zSchemaQueueStatus + +export const zGetFalAiWanAtiRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanAtiRequestsByRequestIdResponse = zSchemaWanAtiOutput + +export const zGetFalAiDecartLucy5bImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiDecartLucy5bImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiDecartLucy5bImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiDecartLucy5bImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiDecartLucy5bImageToVideoData = z.object({ + body: zSchemaDecartLucy5bImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiDecartLucy5bImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiDecartLucy5bImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiDecartLucy5bImageToVideoRequestsByRequestIdResponse = + zSchemaDecartLucy5bImageToVideoOutput + +export const zGetFalAiPixverseV5TransitionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV5TransitionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV5TransitionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV5TransitionRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV5TransitionData = z.object({ + body: zSchemaPixverseV5TransitionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV5TransitionResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV5TransitionRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV5TransitionRequestsByRequestIdResponse = + zSchemaPixverseV5TransitionOutput + +export const zGetFalAiPixverseV5EffectsRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiPixverseV5EffectsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV5EffectsRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV5EffectsRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV5EffectsData = z.object({ + body: zSchemaPixverseV5EffectsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV5EffectsResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV5EffectsRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV5EffectsRequestsByRequestIdResponse = + zSchemaPixverseV5EffectsOutput + +export const zGetFalAiPixverseV5ImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV5ImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV5ImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV5ImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV5ImageToVideoData = z.object({ + body: zSchemaPixverseV5ImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV5ImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV5ImageToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV5ImageToVideoRequestsByRequestIdResponse = + zSchemaPixverseV5ImageToVideoOutput + +export const zGetMoonvalleyMareyI2vRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetMoonvalleyMareyI2vRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutMoonvalleyMareyI2vRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutMoonvalleyMareyI2vRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostMoonvalleyMareyI2vData = z.object({ + body: zSchemaMareyI2vInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostMoonvalleyMareyI2vResponse = zSchemaQueueStatus + +export const zGetMoonvalleyMareyI2vRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetMoonvalleyMareyI2vRequestsByRequestIdResponse = + zSchemaMareyI2vOutput + +export const zGetFalAiBytedanceVideoStylizeRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBytedanceVideoStylizeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBytedanceVideoStylizeRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBytedanceVideoStylizeRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBytedanceVideoStylizeData = z.object({ + body: zSchemaBytedanceVideoStylizeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBytedanceVideoStylizeResponse = zSchemaQueueStatus + +export const zGetFalAiBytedanceVideoStylizeRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiBytedanceVideoStylizeRequestsByRequestIdResponse = + zSchemaBytedanceVideoStylizeOutput + +export const zGetFalAiWanV22A14bImageToVideoLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanV22A14bImageToVideoLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanV22A14bImageToVideoLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanV22A14bImageToVideoLoraRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanV22A14bImageToVideoLoraData = z.object({ + body: zSchemaWanV22A14bImageToVideoLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanV22A14bImageToVideoLoraResponse = zSchemaQueueStatus + +export const zGetFalAiWanV22A14bImageToVideoLoraRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiWanV22A14bImageToVideoLoraRequestsByRequestIdResponse = + zSchemaWanV22A14bImageToVideoLoraOutput + +export const zGetFalAiMinimaxHailuo02FastImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMinimaxHailuo02FastImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxHailuo02FastImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxHailuo02FastImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxHailuo02FastImageToVideoData = z.object({ + body: zSchemaMinimaxHailuo02FastImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxHailuo02FastImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiMinimaxHailuo02FastImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxHailuo02FastImageToVideoRequestsByRequestIdResponse = + zSchemaMinimaxHailuo02FastImageToVideoOutput + +export const zGetFalAiVeo3ImageToVideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiVeo3ImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiVeo3ImageToVideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiVeo3ImageToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiVeo3ImageToVideoData = z.object({ + body: zSchemaVeo3ImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiVeo3ImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiVeo3ImageToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiVeo3ImageToVideoRequestsByRequestIdResponse = + zSchemaVeo3ImageToVideoOutput + +export const zGetFalAiWanV22A14bImageToVideoTurboRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanV22A14bImageToVideoTurboRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanV22A14bImageToVideoTurboRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanV22A14bImageToVideoTurboRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanV22A14bImageToVideoTurboData = z.object({ + body: zSchemaWanV22A14bImageToVideoTurboInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanV22A14bImageToVideoTurboResponse = zSchemaQueueStatus + +export const zGetFalAiWanV22A14bImageToVideoTurboRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiWanV22A14bImageToVideoTurboRequestsByRequestIdResponse = + zSchemaWanV22A14bImageToVideoTurboOutput + +export const zGetFalAiWanV225bImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanV225bImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanV225bImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanV225bImageToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanV225bImageToVideoData = z.object({ + body: zSchemaWanV225bImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanV225bImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiWanV225bImageToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanV225bImageToVideoRequestsByRequestIdResponse = + zSchemaWanV225bImageToVideoOutput + +export const zGetFalAiWanV22A14bImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanV22A14bImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanV22A14bImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanV22A14bImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanV22A14bImageToVideoData = z.object({ + body: zSchemaWanV22A14bImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanV22A14bImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiWanV22A14bImageToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanV22A14bImageToVideoRequestsByRequestIdResponse = + zSchemaWanV22A14bImageToVideoOutput + +export const zGetFalAiBytedanceOmnihumanRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBytedanceOmnihumanRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBytedanceOmnihumanRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBytedanceOmnihumanRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBytedanceOmnihumanData = z.object({ + body: zSchemaBytedanceOmnihumanInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBytedanceOmnihumanResponse = zSchemaQueueStatus + +export const zGetFalAiBytedanceOmnihumanRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiBytedanceOmnihumanRequestsByRequestIdResponse = + zSchemaBytedanceOmnihumanOutput + +export const zGetFalAiLtxv13B098DistilledImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtxv13B098DistilledImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtxv13B098DistilledImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtxv13B098DistilledImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtxv13B098DistilledImageToVideoData = z.object({ + body: zSchemaLtxv13B098DistilledImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtxv13B098DistilledImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiLtxv13B098DistilledImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLtxv13B098DistilledImageToVideoRequestsByRequestIdResponse = + zSchemaLtxv13B098DistilledImageToVideoOutput + +export const zGetFalAiVeo3FastImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiVeo3FastImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiVeo3FastImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiVeo3FastImageToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiVeo3FastImageToVideoData = z.object({ + body: zSchemaVeo3FastImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiVeo3FastImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiVeo3FastImageToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiVeo3FastImageToVideoRequestsByRequestIdResponse = + zSchemaVeo3FastImageToVideoOutput + +export const zGetFalAiViduQ1ReferenceToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiViduQ1ReferenceToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiViduQ1ReferenceToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiViduQ1ReferenceToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiViduQ1ReferenceToVideoData = z.object({ + body: zSchemaViduQ1ReferenceToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiViduQ1ReferenceToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiViduQ1ReferenceToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiViduQ1ReferenceToVideoRequestsByRequestIdResponse = + zSchemaViduQ1ReferenceToVideoOutput + +export const zGetFalAiAiAvatarSingleTextRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiAiAvatarSingleTextRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiAiAvatarSingleTextRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiAiAvatarSingleTextRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiAiAvatarSingleTextData = z.object({ + body: zSchemaAiAvatarSingleTextInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiAiAvatarSingleTextResponse = zSchemaQueueStatus + +export const zGetFalAiAiAvatarSingleTextRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiAiAvatarSingleTextRequestsByRequestIdResponse = + zSchemaAiAvatarSingleTextOutput + +export const zGetFalAiAiAvatarRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiAiAvatarRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiAiAvatarRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiAiAvatarRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiAiAvatarData = z.object({ + body: zSchemaAiAvatarInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiAiAvatarResponse = zSchemaQueueStatus + +export const zGetFalAiAiAvatarRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiAiAvatarRequestsByRequestIdResponse = + zSchemaAiAvatarOutput + +export const zGetFalAiAiAvatarMultiTextRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiAiAvatarMultiTextRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiAiAvatarMultiTextRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiAiAvatarMultiTextRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiAiAvatarMultiTextData = z.object({ + body: zSchemaAiAvatarMultiTextInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiAiAvatarMultiTextResponse = zSchemaQueueStatus + +export const zGetFalAiAiAvatarMultiTextRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiAiAvatarMultiTextRequestsByRequestIdResponse = + zSchemaAiAvatarMultiTextOutput + +export const zGetFalAiAiAvatarMultiRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiAiAvatarMultiRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiAiAvatarMultiRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiAiAvatarMultiRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiAiAvatarMultiData = z.object({ + body: zSchemaAiAvatarMultiInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiAiAvatarMultiResponse = zSchemaQueueStatus + +export const zGetFalAiAiAvatarMultiRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiAiAvatarMultiRequestsByRequestIdResponse = + zSchemaAiAvatarMultiOutput + +export const zGetFalAiMinimaxHailuo02ProImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMinimaxHailuo02ProImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxHailuo02ProImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxHailuo02ProImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxHailuo02ProImageToVideoData = z.object({ + body: zSchemaMinimaxHailuo02ProImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxHailuo02ProImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiMinimaxHailuo02ProImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxHailuo02ProImageToVideoRequestsByRequestIdResponse = + zSchemaMinimaxHailuo02ProImageToVideoOutput + +export const zGetFalAiBytedanceSeedanceV1LiteImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBytedanceSeedanceV1LiteImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBytedanceSeedanceV1LiteImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBytedanceSeedanceV1LiteImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBytedanceSeedanceV1LiteImageToVideoData = z.object({ + body: zSchemaBytedanceSeedanceV1LiteImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBytedanceSeedanceV1LiteImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiBytedanceSeedanceV1LiteImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiBytedanceSeedanceV1LiteImageToVideoRequestsByRequestIdResponse = + zSchemaBytedanceSeedanceV1LiteImageToVideoOutput + +export const zGetFalAiHunyuanAvatarRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiHunyuanAvatarRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuanAvatarRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuanAvatarRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuanAvatarData = z.object({ + body: zSchemaHunyuanAvatarInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuanAvatarResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuanAvatarRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuanAvatarRequestsByRequestIdResponse = + zSchemaHunyuanAvatarOutput + +export const zGetFalAiKlingVideoV21ProImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV21ProImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV21ProImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV21ProImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV21ProImageToVideoData = z.object({ + body: zSchemaKlingVideoV21ProImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV21ProImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiKlingVideoV21ProImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV21ProImageToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoV21ProImageToVideoOutput + +export const zGetFalAiHunyuanPortraitRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiHunyuanPortraitRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuanPortraitRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuanPortraitRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuanPortraitData = z.object({ + body: zSchemaHunyuanPortraitInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuanPortraitResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuanPortraitRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuanPortraitRequestsByRequestIdResponse = + zSchemaHunyuanPortraitOutput + +export const zGetFalAiKlingVideoV16StandardElementsRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV16StandardElementsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV16StandardElementsRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV16StandardElementsRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV16StandardElementsData = z.object({ + body: zSchemaKlingVideoV16StandardElementsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV16StandardElementsResponse = + zSchemaQueueStatus + +export const zGetFalAiKlingVideoV16StandardElementsRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV16StandardElementsRequestsByRequestIdResponse = + zSchemaKlingVideoV16StandardElementsOutput + +export const zGetFalAiKlingVideoV16ProElementsRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV16ProElementsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV16ProElementsRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV16ProElementsRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV16ProElementsData = z.object({ + body: zSchemaKlingVideoV16ProElementsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV16ProElementsResponse = zSchemaQueueStatus + +export const zGetFalAiKlingVideoV16ProElementsRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV16ProElementsRequestsByRequestIdResponse = + zSchemaKlingVideoV16ProElementsOutput + +export const zGetFalAiLtxVideo13bDistilledImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtxVideo13bDistilledImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtxVideo13bDistilledImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtxVideo13bDistilledImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtxVideo13bDistilledImageToVideoData = z.object({ + body: zSchemaLtxVideo13bDistilledImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtxVideo13bDistilledImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiLtxVideo13bDistilledImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLtxVideo13bDistilledImageToVideoRequestsByRequestIdResponse = + zSchemaLtxVideo13bDistilledImageToVideoOutput + +export const zGetFalAiLtxVideo13bDevImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtxVideo13bDevImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtxVideo13bDevImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtxVideo13bDevImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtxVideo13bDevImageToVideoData = z.object({ + body: zSchemaLtxVideo13bDevImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtxVideo13bDevImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiLtxVideo13bDevImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLtxVideo13bDevImageToVideoRequestsByRequestIdResponse = + zSchemaLtxVideo13bDevImageToVideoOutput + +export const zGetFalAiLtxVideoLoraImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtxVideoLoraImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtxVideoLoraImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtxVideoLoraImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtxVideoLoraImageToVideoData = z.object({ + body: zSchemaLtxVideoLoraImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtxVideoLoraImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiLtxVideoLoraImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLtxVideoLoraImageToVideoRequestsByRequestIdResponse = + zSchemaLtxVideoLoraImageToVideoOutput + +export const zGetFalAiPixverseV45TransitionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV45TransitionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV45TransitionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV45TransitionRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV45TransitionData = z.object({ + body: zSchemaPixverseV45TransitionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV45TransitionResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV45TransitionRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV45TransitionRequestsByRequestIdResponse = + zSchemaPixverseV45TransitionOutput + +export const zGetFalAiPixverseV45ImageToVideoFastRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV45ImageToVideoFastRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV45ImageToVideoFastRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV45ImageToVideoFastRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV45ImageToVideoFastData = z.object({ + body: zSchemaPixverseV45ImageToVideoFastInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV45ImageToVideoFastResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV45ImageToVideoFastRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV45ImageToVideoFastRequestsByRequestIdResponse = + zSchemaPixverseV45ImageToVideoFastOutput + +export const zGetFalAiPixverseV45EffectsRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV45EffectsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV45EffectsRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV45EffectsRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV45EffectsData = z.object({ + body: zSchemaPixverseV45EffectsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV45EffectsResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV45EffectsRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV45EffectsRequestsByRequestIdResponse = + zSchemaPixverseV45EffectsOutput + +export const zGetFalAiHunyuanCustomRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiHunyuanCustomRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuanCustomRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuanCustomRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuanCustomData = z.object({ + body: zSchemaHunyuanCustomInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuanCustomResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuanCustomRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuanCustomRequestsByRequestIdResponse = + zSchemaHunyuanCustomOutput + +export const zGetFalAiFramepackF1RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFramepackF1RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFramepackF1RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFramepackF1RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFramepackF1Data = z.object({ + body: zSchemaFramepackF1Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFramepackF1Response = zSchemaQueueStatus + +export const zGetFalAiFramepackF1RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFramepackF1RequestsByRequestIdResponse = + zSchemaFramepackF1Output + +export const zGetFalAiViduQ1StartEndToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiViduQ1StartEndToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiViduQ1StartEndToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiViduQ1StartEndToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiViduQ1StartEndToVideoData = z.object({ + body: zSchemaViduQ1StartEndToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiViduQ1StartEndToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiViduQ1StartEndToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiViduQ1StartEndToVideoRequestsByRequestIdResponse = + zSchemaViduQ1StartEndToVideoOutput + +export const zGetFalAiViduQ1ImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiViduQ1ImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiViduQ1ImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiViduQ1ImageToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiViduQ1ImageToVideoData = z.object({ + body: zSchemaViduQ1ImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiViduQ1ImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiViduQ1ImageToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiViduQ1ImageToVideoRequestsByRequestIdResponse = + zSchemaViduQ1ImageToVideoOutput + +export const zGetFalAiMagiImageToVideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiMagiImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMagiImageToVideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiMagiImageToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMagiImageToVideoData = z.object({ + body: zSchemaMagiImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMagiImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiMagiImageToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMagiImageToVideoRequestsByRequestIdResponse = + zSchemaMagiImageToVideoOutput + +export const zGetFalAiPixverseV4EffectsRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiPixverseV4EffectsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV4EffectsRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV4EffectsRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV4EffectsData = z.object({ + body: zSchemaPixverseV4EffectsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV4EffectsResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV4EffectsRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV4EffectsRequestsByRequestIdResponse = + zSchemaPixverseV4EffectsOutput + +export const zGetFalAiMagiDistilledImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMagiDistilledImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMagiDistilledImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMagiDistilledImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMagiDistilledImageToVideoData = z.object({ + body: zSchemaMagiDistilledImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMagiDistilledImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiMagiDistilledImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMagiDistilledImageToVideoRequestsByRequestIdResponse = + zSchemaMagiDistilledImageToVideoOutput + +export const zGetFalAiFramepackFlf2vRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFramepackFlf2vRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFramepackFlf2vRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFramepackFlf2vRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFramepackFlf2vData = z.object({ + body: zSchemaFramepackFlf2vInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFramepackFlf2vResponse = zSchemaQueueStatus + +export const zGetFalAiFramepackFlf2vRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFramepackFlf2vRequestsByRequestIdResponse = + zSchemaFramepackFlf2vOutput + +export const zGetFalAiWanFlf2vRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiWanFlf2vRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanFlf2vRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanFlf2vRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanFlf2vData = z.object({ + body: zSchemaWanFlf2vInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanFlf2vResponse = zSchemaQueueStatus + +export const zGetFalAiWanFlf2vRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanFlf2vRequestsByRequestIdResponse = + zSchemaWanFlf2vOutput + +export const zGetFalAiFramepackRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFramepackRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFramepackRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFramepackRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFramepackData = z.object({ + body: zSchemaFramepackInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFramepackResponse = zSchemaQueueStatus + +export const zGetFalAiFramepackRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFramepackRequestsByRequestIdResponse = + zSchemaFramepackOutput + +export const zGetFalAiPixverseV4ImageToVideoFastRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV4ImageToVideoFastRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV4ImageToVideoFastRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV4ImageToVideoFastRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV4ImageToVideoFastData = z.object({ + body: zSchemaPixverseV4ImageToVideoFastInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV4ImageToVideoFastResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV4ImageToVideoFastRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV4ImageToVideoFastRequestsByRequestIdResponse = + zSchemaPixverseV4ImageToVideoFastOutput + +export const zGetFalAiPixverseV4ImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV4ImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV4ImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV4ImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV4ImageToVideoData = z.object({ + body: zSchemaPixverseV4ImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV4ImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV4ImageToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV4ImageToVideoRequestsByRequestIdResponse = + zSchemaPixverseV4ImageToVideoOutput + +export const zGetFalAiPixverseV35EffectsRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV35EffectsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV35EffectsRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV35EffectsRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV35EffectsData = z.object({ + body: zSchemaPixverseV35EffectsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV35EffectsResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV35EffectsRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV35EffectsRequestsByRequestIdResponse = + zSchemaPixverseV35EffectsOutput + +export const zGetFalAiPixverseV35TransitionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV35TransitionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV35TransitionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV35TransitionRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV35TransitionData = z.object({ + body: zSchemaPixverseV35TransitionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV35TransitionResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV35TransitionRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV35TransitionRequestsByRequestIdResponse = + zSchemaPixverseV35TransitionOutput + +export const zGetFalAiLumaDreamMachineRay2FlashImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLumaDreamMachineRay2FlashImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLumaDreamMachineRay2FlashImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLumaDreamMachineRay2FlashImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLumaDreamMachineRay2FlashImageToVideoData = z.object({ + body: zSchemaLumaDreamMachineRay2FlashImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLumaDreamMachineRay2FlashImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiLumaDreamMachineRay2FlashImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLumaDreamMachineRay2FlashImageToVideoRequestsByRequestIdResponse = + zSchemaLumaDreamMachineRay2FlashImageToVideoOutput + +export const zGetFalAiPikaV15PikaffectsRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiPikaV15PikaffectsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPikaV15PikaffectsRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiPikaV15PikaffectsRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPikaV15PikaffectsData = z.object({ + body: zSchemaPikaV15PikaffectsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPikaV15PikaffectsResponse = zSchemaQueueStatus + +export const zGetFalAiPikaV15PikaffectsRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPikaV15PikaffectsRequestsByRequestIdResponse = + zSchemaPikaV15PikaffectsOutput + +export const zGetFalAiPikaV2TurboImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPikaV2TurboImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPikaV2TurboImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPikaV2TurboImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPikaV2TurboImageToVideoData = z.object({ + body: zSchemaPikaV2TurboImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPikaV2TurboImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiPikaV2TurboImageToVideoRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiPikaV2TurboImageToVideoRequestsByRequestIdResponse = + zSchemaPikaV2TurboImageToVideoOutput + +export const zGetFalAiPikaV22PikascenesRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiPikaV22PikascenesRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPikaV22PikascenesRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiPikaV22PikascenesRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPikaV22PikascenesData = z.object({ + body: zSchemaPikaV22PikascenesInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPikaV22PikascenesResponse = zSchemaQueueStatus + +export const zGetFalAiPikaV22PikascenesRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPikaV22PikascenesRequestsByRequestIdResponse = + zSchemaPikaV22PikascenesOutput + +export const zGetFalAiPikaV22ImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPikaV22ImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPikaV22ImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPikaV22ImageToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPikaV22ImageToVideoData = z.object({ + body: zSchemaPikaV22ImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPikaV22ImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiPikaV22ImageToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPikaV22ImageToVideoRequestsByRequestIdResponse = + zSchemaPikaV22ImageToVideoOutput + +export const zGetFalAiPikaV21ImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPikaV21ImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPikaV21ImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPikaV21ImageToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPikaV21ImageToVideoData = z.object({ + body: zSchemaPikaV21ImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPikaV21ImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiPikaV21ImageToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPikaV21ImageToVideoRequestsByRequestIdResponse = + zSchemaPikaV21ImageToVideoOutput + +export const zGetFalAiViduImageToVideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiViduImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiViduImageToVideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiViduImageToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiViduImageToVideoData = z.object({ + body: zSchemaViduImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiViduImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiViduImageToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiViduImageToVideoRequestsByRequestIdResponse = + zSchemaViduImageToVideoOutput + +export const zGetFalAiViduStartEndToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiViduStartEndToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiViduStartEndToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiViduStartEndToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiViduStartEndToVideoData = z.object({ + body: zSchemaViduStartEndToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiViduStartEndToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiViduStartEndToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiViduStartEndToVideoRequestsByRequestIdResponse = + zSchemaViduStartEndToVideoOutput + +export const zGetFalAiViduReferenceToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiViduReferenceToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiViduReferenceToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiViduReferenceToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiViduReferenceToVideoData = z.object({ + body: zSchemaViduReferenceToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiViduReferenceToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiViduReferenceToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiViduReferenceToVideoRequestsByRequestIdResponse = + zSchemaViduReferenceToVideoOutput + +export const zGetFalAiViduTemplateToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiViduTemplateToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiViduTemplateToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiViduTemplateToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiViduTemplateToVideoData = z.object({ + body: zSchemaViduTemplateToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiViduTemplateToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiViduTemplateToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiViduTemplateToVideoRequestsByRequestIdResponse = + zSchemaViduTemplateToVideoOutput + +export const zGetFalAiWanI2vLoraRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiWanI2vLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanI2vLoraRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanI2vLoraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanI2vLoraData = z.object({ + body: zSchemaWanI2vLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanI2vLoraResponse = zSchemaQueueStatus + +export const zGetFalAiWanI2vLoraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanI2vLoraRequestsByRequestIdResponse = + zSchemaWanI2vLoraOutput + +export const zGetFalAiHunyuanVideoImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiHunyuanVideoImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuanVideoImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuanVideoImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuanVideoImageToVideoData = z.object({ + body: zSchemaHunyuanVideoImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuanVideoImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuanVideoImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuanVideoImageToVideoRequestsByRequestIdResponse = + zSchemaHunyuanVideoImageToVideoOutput + +export const zGetFalAiMinimaxVideo01DirectorImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMinimaxVideo01DirectorImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxVideo01DirectorImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxVideo01DirectorImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxVideo01DirectorImageToVideoData = z.object({ + body: zSchemaMinimaxVideo01DirectorImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxVideo01DirectorImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiMinimaxVideo01DirectorImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxVideo01DirectorImageToVideoRequestsByRequestIdResponse = + zSchemaMinimaxVideo01DirectorImageToVideoOutput + +export const zGetFalAiSkyreelsI2vRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSkyreelsI2vRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSkyreelsI2vRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSkyreelsI2vRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSkyreelsI2vData = z.object({ + body: zSchemaSkyreelsI2vInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSkyreelsI2vResponse = zSchemaQueueStatus + +export const zGetFalAiSkyreelsI2vRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSkyreelsI2vRequestsByRequestIdResponse = + zSchemaSkyreelsI2vOutput + +export const zGetFalAiLumaDreamMachineRay2ImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLumaDreamMachineRay2ImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLumaDreamMachineRay2ImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLumaDreamMachineRay2ImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLumaDreamMachineRay2ImageToVideoData = z.object({ + body: zSchemaLumaDreamMachineRay2ImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLumaDreamMachineRay2ImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiLumaDreamMachineRay2ImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLumaDreamMachineRay2ImageToVideoRequestsByRequestIdResponse = + zSchemaLumaDreamMachineRay2ImageToVideoOutput + +export const zGetFalAiHunyuanVideoImg2VidLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiHunyuanVideoImg2VidLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuanVideoImg2VidLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuanVideoImg2VidLoraRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuanVideoImg2VidLoraData = z.object({ + body: zSchemaHunyuanVideoImg2VidLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuanVideoImg2VidLoraResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuanVideoImg2VidLoraRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuanVideoImg2VidLoraRequestsByRequestIdResponse = + zSchemaHunyuanVideoImg2VidLoraOutput + +export const zGetFalAiPixverseV35ImageToVideoFastRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV35ImageToVideoFastRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV35ImageToVideoFastRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV35ImageToVideoFastRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV35ImageToVideoFastData = z.object({ + body: zSchemaPixverseV35ImageToVideoFastInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV35ImageToVideoFastResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV35ImageToVideoFastRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV35ImageToVideoFastRequestsByRequestIdResponse = + zSchemaPixverseV35ImageToVideoFastOutput + +export const zGetFalAiPixverseV35ImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV35ImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV35ImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV35ImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV35ImageToVideoData = z.object({ + body: zSchemaPixverseV35ImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV35ImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV35ImageToVideoRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV35ImageToVideoRequestsByRequestIdResponse = + zSchemaPixverseV35ImageToVideoOutput + +export const zGetFalAiMinimaxVideo01SubjectReferenceRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMinimaxVideo01SubjectReferenceRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxVideo01SubjectReferenceRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxVideo01SubjectReferenceRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxVideo01SubjectReferenceData = z.object({ + body: zSchemaMinimaxVideo01SubjectReferenceInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxVideo01SubjectReferenceResponse = + zSchemaQueueStatus + +export const zGetFalAiMinimaxVideo01SubjectReferenceRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxVideo01SubjectReferenceRequestsByRequestIdResponse = + zSchemaMinimaxVideo01SubjectReferenceOutput + +export const zGetFalAiKlingVideoV16StandardImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV16StandardImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV16StandardImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV16StandardImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV16StandardImageToVideoData = z.object({ + body: zSchemaKlingVideoV16StandardImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV16StandardImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiKlingVideoV16StandardImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV16StandardImageToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoV16StandardImageToVideoOutput + +export const zGetFalAiSadtalkerReferenceRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiSadtalkerReferenceRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSadtalkerReferenceRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiSadtalkerReferenceRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSadtalkerReferenceData = z.object({ + body: zSchemaSadtalkerReferenceInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSadtalkerReferenceResponse = zSchemaQueueStatus + +export const zGetFalAiSadtalkerReferenceRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSadtalkerReferenceRequestsByRequestIdResponse = + zSchemaSadtalkerReferenceOutput + +export const zGetFalAiMinimaxVideo01LiveImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMinimaxVideo01LiveImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxVideo01LiveImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxVideo01LiveImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxVideo01LiveImageToVideoData = z.object({ + body: zSchemaMinimaxVideo01LiveImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxVideo01LiveImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiMinimaxVideo01LiveImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxVideo01LiveImageToVideoRequestsByRequestIdResponse = + zSchemaMinimaxVideo01LiveImageToVideoOutput + +export const zGetFalAiLtxVideoImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtxVideoImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtxVideoImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtxVideoImageToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtxVideoImageToVideoData = z.object({ + body: zSchemaLtxVideoImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtxVideoImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiLtxVideoImageToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLtxVideoImageToVideoRequestsByRequestIdResponse = + zSchemaLtxVideoImageToVideoOutput + +export const zGetFalAiCogvideox5bImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiCogvideox5bImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiCogvideox5bImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiCogvideox5bImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiCogvideox5bImageToVideoData = z.object({ + body: zSchemaCogvideox5bImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiCogvideox5bImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiCogvideox5bImageToVideoRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiCogvideox5bImageToVideoRequestsByRequestIdResponse = + zSchemaCogvideox5bImageToVideoOutput + +export const zGetFalAiKlingVideoV15ProImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV15ProImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV15ProImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV15ProImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV15ProImageToVideoData = z.object({ + body: zSchemaKlingVideoV15ProImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV15ProImageToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiKlingVideoV15ProImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV15ProImageToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoV15ProImageToVideoOutput + +export const zGetFalAiKlingVideoV1StandardImageToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV1StandardImageToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV1StandardImageToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV1StandardImageToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV1StandardImageToVideoData = z.object({ + body: zSchemaKlingVideoV1StandardImageToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV1StandardImageToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiKlingVideoV1StandardImageToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV1StandardImageToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoV1StandardImageToVideoOutput + +export const zGetFalAiStableVideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiStableVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiStableVideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiStableVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiStableVideoData = z.object({ + body: zSchemaStableVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiStableVideoResponse = zSchemaQueueStatus + +export const zGetFalAiStableVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiStableVideoRequestsByRequestIdResponse = + zSchemaStableVideoOutput + +export const zGetFalAiAmtInterpolationFrameInterpolationRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiAmtInterpolationFrameInterpolationRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiAmtInterpolationFrameInterpolationRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiAmtInterpolationFrameInterpolationRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiAmtInterpolationFrameInterpolationData = z.object({ + body: zSchemaAmtInterpolationFrameInterpolationInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiAmtInterpolationFrameInterpolationResponse = + zSchemaQueueStatus + +export const zGetFalAiAmtInterpolationFrameInterpolationRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiAmtInterpolationFrameInterpolationRequestsByRequestIdResponse = + zSchemaAmtInterpolationFrameInterpolationOutput + +export const zGetFalAiLivePortraitRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLivePortraitRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLivePortraitRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLivePortraitRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLivePortraitData = z.object({ + body: zSchemaLivePortraitInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLivePortraitResponse = zSchemaQueueStatus + +export const zGetFalAiLivePortraitRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLivePortraitRequestsByRequestIdResponse = + zSchemaLivePortraitOutput + +export const zGetFalAiMusetalkRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiMusetalkRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMusetalkRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiMusetalkRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMusetalkData = z.object({ + body: zSchemaMusetalkInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMusetalkResponse = zSchemaQueueStatus + +export const zGetFalAiMusetalkRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMusetalkRequestsByRequestIdResponse = + zSchemaMusetalkOutput + +export const zGetFalAiSadtalkerRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSadtalkerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSadtalkerRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSadtalkerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSadtalkerData = z.object({ + body: zSchemaSadtalkerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSadtalkerResponse = zSchemaQueueStatus + +export const zGetFalAiSadtalkerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSadtalkerRequestsByRequestIdResponse = + zSchemaSadtalkerOutput + +export const zGetFalAiFastSvdLcmRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFastSvdLcmRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFastSvdLcmRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFastSvdLcmRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFastSvdLcmData = z.object({ + body: zSchemaFastSvdLcmInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFastSvdLcmResponse = zSchemaQueueStatus + +export const zGetFalAiFastSvdLcmRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFastSvdLcmRequestsByRequestIdResponse = + zSchemaFastSvdLcmOutput diff --git a/packages/typescript/ai-fal/src/generated/index.ts b/packages/typescript/ai-fal/src/generated/index.ts new file mode 100644 index 00000000..b0302250 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/index.ts @@ -0,0 +1,485 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +// Import value exports (SchemaMap constants) from category endpoint maps +import { Gen3dTo3dSchemaMap } from './3d-to-3d/endpoint-map' +import { AudioToAudioSchemaMap } from './audio-to-audio/endpoint-map' +import { AudioToTextSchemaMap } from './audio-to-text/endpoint-map' +import { AudioToVideoSchemaMap } from './audio-to-video/endpoint-map' +import { ImageTo3dSchemaMap } from './image-to-3d/endpoint-map' +import { ImageToImageSchemaMap } from './image-to-image/endpoint-map' +import { ImageToJsonSchemaMap } from './image-to-json/endpoint-map' +import { ImageToVideoSchemaMap } from './image-to-video/endpoint-map' +import { JsonSchemaMap } from './json/endpoint-map' +import { SpeechToSpeechSchemaMap } from './speech-to-speech/endpoint-map' +import { SpeechToTextSchemaMap } from './speech-to-text/endpoint-map' +import { TextTo3dSchemaMap } from './text-to-3d/endpoint-map' +import { TextToAudioSchemaMap } from './text-to-audio/endpoint-map' +import { TextToImageSchemaMap } from './text-to-image/endpoint-map' +import { TextToJsonSchemaMap } from './text-to-json/endpoint-map' +import { TextToSpeechSchemaMap } from './text-to-speech/endpoint-map' +import { TextToTextSchemaMap } from './text-to-text/endpoint-map' +import { TextToVideoSchemaMap } from './text-to-video/endpoint-map' +import { VideoToTextSchemaMap } from './video-to-text/endpoint-map' +import { VideoToVideoSchemaMap } from './video-to-video/endpoint-map' +import { VisionSchemaMap } from './vision/endpoint-map' + +// Import type exports from category endpoint maps +import type { + Gen3dTo3dModel, + Gen3dTo3dModelInput, + Gen3dTo3dModelOutput, +} from './3d-to-3d/endpoint-map' +import type { + AudioToAudioModel, + AudioToAudioModelInput, + AudioToAudioModelOutput, +} from './audio-to-audio/endpoint-map' +import type { + AudioToTextModel, + AudioToTextModelInput, + AudioToTextModelOutput, +} from './audio-to-text/endpoint-map' +import type { + AudioToVideoModel, + AudioToVideoModelInput, + AudioToVideoModelOutput, +} from './audio-to-video/endpoint-map' +import type { + ImageTo3dModel, + ImageTo3dModelInput, + ImageTo3dModelOutput, +} from './image-to-3d/endpoint-map' +import type { + ImageToImageModel, + ImageToImageModelInput, + ImageToImageModelOutput, +} from './image-to-image/endpoint-map' +import type { + ImageToJsonModel, + ImageToJsonModelInput, + ImageToJsonModelOutput, +} from './image-to-json/endpoint-map' +import type { + ImageToVideoModel, + ImageToVideoModelInput, + ImageToVideoModelOutput, +} from './image-to-video/endpoint-map' +import type { + JsonModel, + JsonModelInput, + JsonModelOutput, +} from './json/endpoint-map' +import type { LlmModel } from './llm/endpoint-map' +import type { + SpeechToSpeechModel, + SpeechToSpeechModelInput, + SpeechToSpeechModelOutput, +} from './speech-to-speech/endpoint-map' +import type { + SpeechToTextModel, + SpeechToTextModelInput, + SpeechToTextModelOutput, +} from './speech-to-text/endpoint-map' +import type { + TextTo3dModel, + TextTo3dModelInput, + TextTo3dModelOutput, +} from './text-to-3d/endpoint-map' +import type { + TextToAudioModel, + TextToAudioModelInput, + TextToAudioModelOutput, +} from './text-to-audio/endpoint-map' +import type { + TextToImageModel, + TextToImageModelInput, + TextToImageModelOutput, +} from './text-to-image/endpoint-map' +import type { + TextToJsonModel, + TextToJsonModelInput, + TextToJsonModelOutput, +} from './text-to-json/endpoint-map' +import type { + TextToSpeechModel, + TextToSpeechModelInput, + TextToSpeechModelOutput, +} from './text-to-speech/endpoint-map' +import type { + TextToTextModel, + TextToTextModelInput, + TextToTextModelOutput, +} from './text-to-text/endpoint-map' +import type { + TextToVideoModel, + TextToVideoModelInput, + TextToVideoModelOutput, +} from './text-to-video/endpoint-map' +import type { TrainingModel } from './training/endpoint-map' +import type { UnknownModel } from './unknown/endpoint-map' +import type { VideoToAudioModel } from './video-to-audio/endpoint-map' +import type { + VideoToTextModel, + VideoToTextModelInput, + VideoToTextModelOutput, +} from './video-to-text/endpoint-map' +import type { + VideoToVideoModel, + VideoToVideoModelInput, + VideoToVideoModelOutput, +} from './video-to-video/endpoint-map' +import type { + VisionModel, + VisionModelInput, + VisionModelOutput, +} from './vision/endpoint-map' + +import type { z } from 'zod' + +// Import official fal.ai endpoint types +import type { EndpointTypeMap } from '@fal-ai/client/endpoints' + +// Re-export all category endpoint maps +export * from './3d-to-3d/endpoint-map' +export * from './audio-to-audio/endpoint-map' +export * from './audio-to-text/endpoint-map' +export * from './audio-to-video/endpoint-map' +export * from './image-to-3d/endpoint-map' +export * from './image-to-image/endpoint-map' +export * from './image-to-json/endpoint-map' +export * from './image-to-video/endpoint-map' +export * from './json/endpoint-map' +export * from './llm/endpoint-map' +export * from './speech-to-speech/endpoint-map' +export * from './speech-to-text/endpoint-map' +export * from './text-to-3d/endpoint-map' +export * from './text-to-audio/endpoint-map' +export * from './text-to-image/endpoint-map' +export * from './text-to-json/endpoint-map' +export * from './text-to-speech/endpoint-map' +export * from './text-to-text/endpoint-map' +export * from './text-to-video/endpoint-map' +export * from './training/endpoint-map' +export * from './unknown/endpoint-map' +export * from './video-to-audio/endpoint-map' +export * from './video-to-text/endpoint-map' +export * from './video-to-video/endpoint-map' +export * from './vision/endpoint-map' + +/** + * Union type of all Fal.ai model endpoint IDs across all categories. + * + * Note: Using this union type loses some type precision. For better type safety, + * import category-specific types like ImageToImageModel, TextToImageModel, etc. + */ +export type FalModel = + | AudioToAudioModel + | AudioToTextModel + | AudioToVideoModel + | Gen3dTo3dModel + | ImageTo3dModel + | ImageToImageModel + | ImageToJsonModel + | ImageToVideoModel + | JsonModel + | LlmModel + | SpeechToSpeechModel + | SpeechToTextModel + | TextTo3dModel + | TextToAudioModel + | TextToImageModel + | TextToJsonModel + | TextToSpeechModel + | TextToTextModel + | TextToVideoModel + | TrainingModel + | UnknownModel + | VideoToAudioModel + | VideoToTextModel + | VideoToVideoModel + | VisionModel + +/** Union of all image generation models */ +export type FalImageModel = TextToImageModel | ImageToImageModel + +/** + * Get the input type for a specific image model. + * Checks official fal.ai EndpointTypeMap first, then falls back to category-specific types. + */ +export type FalImageInput = + T extends keyof EndpointTypeMap + ? EndpointTypeMap[T]['input'] + : T extends TextToImageModel + ? TextToImageModelInput + : T extends ImageToImageModel + ? ImageToImageModelInput + : never + +/** + * Get the output type for a specific image model. + * Checks official fal.ai EndpointTypeMap first, then falls back to category-specific types. + */ +export type FalImageOutput = + T extends keyof EndpointTypeMap + ? EndpointTypeMap[T]['output'] + : T extends TextToImageModel + ? TextToImageModelOutput + : T extends ImageToImageModel + ? ImageToImageModelOutput + : never + +/** Combined schema map for all image models */ +export const FalImageSchemaMap: Record< + FalImageModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ...TextToImageSchemaMap, + ...ImageToImageSchemaMap, +} as const + +/** Union of all video generation models */ +export type FalVideoModel = + | TextToVideoModel + | ImageToVideoModel + | VideoToVideoModel + | AudioToVideoModel + +/** + * Get the input type for a specific video model. + * Checks official fal.ai EndpointTypeMap first, then falls back to category-specific types. + */ +export type FalVideoInput = + T extends keyof EndpointTypeMap + ? EndpointTypeMap[T]['input'] + : T extends TextToVideoModel + ? TextToVideoModelInput + : T extends ImageToVideoModel + ? ImageToVideoModelInput + : T extends VideoToVideoModel + ? VideoToVideoModelInput + : T extends AudioToVideoModel + ? AudioToVideoModelInput + : never + +/** + * Get the output type for a specific video model. + * Checks official fal.ai EndpointTypeMap first, then falls back to category-specific types. + */ +export type FalVideoOutput = + T extends keyof EndpointTypeMap + ? EndpointTypeMap[T]['output'] + : T extends TextToVideoModel + ? TextToVideoModelOutput + : T extends ImageToVideoModel + ? ImageToVideoModelOutput + : T extends VideoToVideoModel + ? VideoToVideoModelOutput + : T extends AudioToVideoModel + ? AudioToVideoModelOutput + : never + +/** Combined schema map for all video models */ +export const FalVideoSchemaMap: Record< + FalVideoModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ...TextToVideoSchemaMap, + ...ImageToVideoSchemaMap, + ...VideoToVideoSchemaMap, + ...AudioToVideoSchemaMap, +} as const + +/** Union of all audio generation models */ +export type FalAudioModel = + | TextToAudioModel + | AudioToAudioModel + | SpeechToSpeechModel + | TextToSpeechModel + +/** + * Get the input type for a specific audio model. + * Checks official fal.ai EndpointTypeMap first, then falls back to category-specific types. + */ +export type FalAudioInput = + T extends keyof EndpointTypeMap + ? EndpointTypeMap[T]['input'] + : T extends TextToAudioModel + ? TextToAudioModelInput + : T extends AudioToAudioModel + ? AudioToAudioModelInput + : T extends SpeechToSpeechModel + ? SpeechToSpeechModelInput + : T extends TextToSpeechModel + ? TextToSpeechModelInput + : never + +/** + * Get the output type for a specific audio model. + * Checks official fal.ai EndpointTypeMap first, then falls back to category-specific types. + */ +export type FalAudioOutput = + T extends keyof EndpointTypeMap + ? EndpointTypeMap[T]['output'] + : T extends TextToAudioModel + ? TextToAudioModelOutput + : T extends AudioToAudioModel + ? AudioToAudioModelOutput + : T extends SpeechToSpeechModel + ? SpeechToSpeechModelOutput + : T extends TextToSpeechModel + ? TextToSpeechModelOutput + : never + +/** Combined schema map for all audio models */ +export const FalAudioSchemaMap: Record< + FalAudioModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ...TextToAudioSchemaMap, + ...AudioToAudioSchemaMap, + ...SpeechToSpeechSchemaMap, + ...TextToSpeechSchemaMap, +} as const + +/** Union of all text generation models */ +export type FalTextModel = + | TextToTextModel + | AudioToTextModel + | VideoToTextModel + | VisionModel + | SpeechToTextModel + +/** + * Get the input type for a specific text model. + * Checks official fal.ai EndpointTypeMap first, then falls back to category-specific types. + */ +export type FalTextInput = + T extends keyof EndpointTypeMap + ? EndpointTypeMap[T]['input'] + : T extends TextToTextModel + ? TextToTextModelInput + : T extends AudioToTextModel + ? AudioToTextModelInput + : T extends VideoToTextModel + ? VideoToTextModelInput + : T extends VisionModel + ? VisionModelInput + : T extends SpeechToTextModel + ? SpeechToTextModelInput + : never + +/** + * Get the output type for a specific text model. + * Checks official fal.ai EndpointTypeMap first, then falls back to category-specific types. + */ +export type FalTextOutput = + T extends keyof EndpointTypeMap + ? EndpointTypeMap[T]['output'] + : T extends TextToTextModel + ? TextToTextModelOutput + : T extends AudioToTextModel + ? AudioToTextModelOutput + : T extends VideoToTextModel + ? VideoToTextModelOutput + : T extends VisionModel + ? VisionModelOutput + : T extends SpeechToTextModel + ? SpeechToTextModelOutput + : never + +/** Combined schema map for all text models */ +export const FalTextSchemaMap: Record< + FalTextModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ...TextToTextSchemaMap, + ...AudioToTextSchemaMap, + ...VideoToTextSchemaMap, + ...VisionSchemaMap, + ...SpeechToTextSchemaMap, +} as const + +/** Union of all 3d generation models */ +export type Fal3dModel = TextTo3dModel | ImageTo3dModel | Gen3dTo3dModel + +/** + * Get the input type for a specific 3d model. + * Checks official fal.ai EndpointTypeMap first, then falls back to category-specific types. + */ +export type Fal3dInput = T extends keyof EndpointTypeMap + ? EndpointTypeMap[T]['input'] + : T extends TextTo3dModel + ? TextTo3dModelInput + : T extends ImageTo3dModel + ? ImageTo3dModelInput + : T extends Gen3dTo3dModel + ? Gen3dTo3dModelInput + : never + +/** + * Get the output type for a specific 3d model. + * Checks official fal.ai EndpointTypeMap first, then falls back to category-specific types. + */ +export type Fal3dOutput = T extends keyof EndpointTypeMap + ? EndpointTypeMap[T]['output'] + : T extends TextTo3dModel + ? TextTo3dModelOutput + : T extends ImageTo3dModel + ? ImageTo3dModelOutput + : T extends Gen3dTo3dModel + ? Gen3dTo3dModelOutput + : never + +/** Combined schema map for all 3d models */ +export const Fal3dSchemaMap: Record< + Fal3dModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ...TextTo3dSchemaMap, + ...ImageTo3dSchemaMap, + ...Gen3dTo3dSchemaMap, +} as const + +/** Union of all json generation models */ +export type FalJsonModel = TextToJsonModel | ImageToJsonModel | JsonModel + +/** + * Get the input type for a specific json model. + * Checks official fal.ai EndpointTypeMap first, then falls back to category-specific types. + */ +export type FalJsonInput = + T extends keyof EndpointTypeMap + ? EndpointTypeMap[T]['input'] + : T extends TextToJsonModel + ? TextToJsonModelInput + : T extends ImageToJsonModel + ? ImageToJsonModelInput + : T extends JsonModel + ? JsonModelInput + : never + +/** + * Get the output type for a specific json model. + * Checks official fal.ai EndpointTypeMap first, then falls back to category-specific types. + */ +export type FalJsonOutput = + T extends keyof EndpointTypeMap + ? EndpointTypeMap[T]['output'] + : T extends TextToJsonModel + ? TextToJsonModelOutput + : T extends ImageToJsonModel + ? ImageToJsonModelOutput + : T extends JsonModel + ? JsonModelOutput + : never + +/** Combined schema map for all json models */ +export const FalJsonSchemaMap: Record< + FalJsonModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ...TextToJsonSchemaMap, + ...ImageToJsonSchemaMap, + ...JsonSchemaMap, +} as const diff --git a/packages/typescript/ai-fal/src/generated/json/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/json/endpoint-map.ts new file mode 100644 index 00000000..30b76090 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/json/endpoint-map.ts @@ -0,0 +1,64 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zSchemaFfmpegApiLoudnormInput, + zSchemaFfmpegApiLoudnormOutput, + zSchemaFfmpegApiMetadataInput, + zSchemaFfmpegApiMetadataOutput, + zSchemaFfmpegApiWaveformInput, + zSchemaFfmpegApiWaveformOutput, +} from './zod.gen' + +import type { + SchemaFfmpegApiLoudnormInput, + SchemaFfmpegApiLoudnormOutput, + SchemaFfmpegApiMetadataInput, + SchemaFfmpegApiMetadataOutput, + SchemaFfmpegApiWaveformInput, + SchemaFfmpegApiWaveformOutput, +} from './types.gen' + +import type { z } from 'zod' + +export type JsonEndpointMap = { + 'fal-ai/ffmpeg-api/loudnorm': { + input: SchemaFfmpegApiLoudnormInput + output: SchemaFfmpegApiLoudnormOutput + } + 'fal-ai/ffmpeg-api/waveform': { + input: SchemaFfmpegApiWaveformInput + output: SchemaFfmpegApiWaveformOutput + } + 'fal-ai/ffmpeg-api/metadata': { + input: SchemaFfmpegApiMetadataInput + output: SchemaFfmpegApiMetadataOutput + } +} + +/** Union type of all json model endpoint IDs */ +export type JsonModel = keyof JsonEndpointMap + +export const JsonSchemaMap: Record< + JsonModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['fal-ai/ffmpeg-api/loudnorm']: { + input: zSchemaFfmpegApiLoudnormInput, + output: zSchemaFfmpegApiLoudnormOutput, + }, + ['fal-ai/ffmpeg-api/waveform']: { + input: zSchemaFfmpegApiWaveformInput, + output: zSchemaFfmpegApiWaveformOutput, + }, + ['fal-ai/ffmpeg-api/metadata']: { + input: zSchemaFfmpegApiMetadataInput, + output: zSchemaFfmpegApiMetadataOutput, + }, +} as const + +/** Get the input type for a specific json model */ +export type JsonModelInput = JsonEndpointMap[T]['input'] + +/** Get the output type for a specific json model */ +export type JsonModelOutput = JsonEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/json/types.gen.ts b/packages/typescript/ai-fal/src/generated/json/types.gen.ts new file mode 100644 index 00000000..ac141550 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/json/types.gen.ts @@ -0,0 +1,859 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * AudioTrack + */ +export type SchemaAudioTrack = { + /** + * Codec + * + * Audio codec used (e.g., 'aac', 'mp3') + */ + codec: string + /** + * Channels + * + * Number of audio channels + */ + channels: number + /** + * Sample Rate + * + * Audio sample rate in Hz + */ + sample_rate: number + /** + * Bitrate + * + * Audio bitrate in bits per second + */ + bitrate: number +} + +/** + * Resolution + */ +export type SchemaResolution = { + /** + * Height + * + * Height of the video in pixels + */ + height: number + /** + * Aspect Ratio + * + * Display aspect ratio (e.g., '16:9') + */ + aspect_ratio: string + /** + * Width + * + * Width of the video in pixels + */ + width: number +} + +/** + * VideoFormat + */ +export type SchemaVideoFormat = { + /** + * Container + * + * Container format of the video + */ + container: string + /** + * Level + * + * Codec level (e.g., 4.1) + */ + level: number + /** + * Pixel Format + * + * Pixel format used (e.g., 'yuv420p') + */ + pixel_format: string + /** + * Video Codec + * + * Video codec used (e.g., 'h264') + */ + video_codec: string + /** + * Profile + * + * Codec profile (e.g., 'main', 'high') + */ + profile: string + /** + * Bitrate + * + * Video bitrate in bits per second + */ + bitrate: number +} + +/** + * Audio + */ +export type SchemaAudio = { + /** + * File Size + * + * Size of the file in bytes + */ + file_size: number + /** + * Duration + * + * Duration of the media in seconds + */ + duration: number + /** + * Bitrate + * + * Overall bitrate of the media in bits per second + */ + bitrate: number + /** + * Url + * + * URL where the media file can be accessed + */ + url: string + /** + * Media Type + * + * Type of media (always 'audio') + */ + media_type?: string + /** + * Codec + * + * Codec used to encode the media + */ + codec: string + /** + * File Name + * + * Original filename of the media + */ + file_name: string + /** + * Sample Rate + * + * Audio sample rate in Hz + */ + sample_rate: number + /** + * Content Type + * + * MIME type of the media file + */ + content_type: string + /** + * Container + * + * Container format of the media file (e.g., 'mp4', 'mov') + */ + container: string + /** + * Channels + * + * Number of audio channels + */ + channels: number +} + +/** + * Video + */ +export type SchemaVideo = { + /** + * File Size + * + * Size of the file in bytes + */ + file_size: number + /** + * Timebase + * + * Time base used for frame timestamps + */ + timebase: string + /** + * Start Frame Url + * + * URL of the extracted first frame + */ + start_frame_url?: string | unknown + /** + * Duration + * + * Duration of the media in seconds + */ + duration: number + /** + * Url + * + * URL where the media file can be accessed + */ + url: string + /** + * Fps + * + * Frames per second + */ + fps: number + /** + * Codec + * + * Codec used to encode the media + */ + codec: string + /** + * Media Type + * + * Type of media (always 'video') + */ + media_type?: string + /** + * End Frame Url + * + * URL of the extracted last frame + */ + end_frame_url?: string | unknown + /** + * Content Type + * + * MIME type of the media file + */ + content_type: string + /** + * Container + * + * Container format of the media file (e.g., 'mp4', 'mov') + */ + container: string + /** + * Bitrate + * + * Overall bitrate of the media in bits per second + */ + bitrate: number + format: SchemaVideoFormat + resolution: SchemaResolution + /** + * Frame Count + * + * Total number of frames in the video + */ + frame_count: number + /** + * File Name + * + * Original filename of the media + */ + file_name: string + /** + * Audio track information if video has audio + */ + audio?: SchemaAudioTrack | unknown +} + +/** + * MetadataOutput + */ +export type SchemaFfmpegApiMetadataOutput = { + /** + * Media + * + * Metadata for the analyzed media file (either Video or Audio) + */ + media: SchemaVideo | SchemaAudio +} + +/** + * MetadataInput + */ +export type SchemaFfmpegApiMetadataInput = { + /** + * Extract Frames + * + * Whether to extract the start and end frames for videos. Note that when true the request will be slower. + */ + extract_frames?: boolean + /** + * Media Url + * + * URL of the media file (video or audio) to analyze + */ + media_url: string +} + +/** + * WaveformOutput + */ +export type SchemaFfmpegApiWaveformOutput = { + /** + * Precision + * + * Number of decimal places used in the waveform values + */ + precision: number + /** + * Duration + * + * Duration of the audio in seconds + */ + duration: number + /** + * Points + * + * Number of points in the waveform data + */ + points: number + /** + * Waveform + * + * Normalized waveform data as an array of values between -1 and 1. The number of points is determined by audio duration × points_per_second. + */ + waveform: Array +} + +/** + * WaveformInput + */ +export type SchemaFfmpegApiWaveformInput = { + /** + * Precision + * + * Number of decimal places for the waveform values. Higher values provide more precision but increase payload size. + */ + precision?: number + /** + * Smoothing Window + * + * Size of the smoothing window. Higher values create a smoother waveform. Must be an odd number. + */ + smoothing_window?: number + /** + * Media Url + * + * URL of the audio file to analyze + */ + media_url: string + /** + * Points Per Second + * + * Controls how many points are sampled per second of audio. Lower values (e.g. 1-2) create a coarser waveform, higher values (e.g. 4-10) create a more detailed one. + */ + points_per_second?: number +} + +/** + * File + */ +export type SchemaFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number | unknown + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string | unknown + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string | unknown + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string +} + +/** + * LoudnormSummary + */ +export type SchemaLoudnormSummary = { + /** + * Output Integrated + * + * Output integrated loudness in LUFS + */ + output_integrated?: number | unknown + /** + * Output True Peak + * + * Output true peak in dBTP + */ + output_true_peak?: number | unknown + /** + * Input Lra + * + * Input loudness range in LU + */ + input_lra?: number | unknown + /** + * Normalization Type + * + * Type of normalization applied (Dynamic/Linear) + */ + normalization_type?: string | unknown + /** + * Output Lra + * + * Output loudness range in LU + */ + output_lra?: number | unknown + /** + * Output Threshold + * + * Output threshold in LUFS + */ + output_threshold?: number | unknown + /** + * Input Integrated + * + * Input integrated loudness in LUFS + */ + input_integrated?: number | unknown + /** + * Input True Peak + * + * Input true peak in dBTP + */ + input_true_peak?: number | unknown + /** + * Target Offset + * + * Target offset in LU + */ + target_offset?: number | unknown + /** + * Input Threshold + * + * Input threshold in LUFS + */ + input_threshold?: number | unknown +} + +/** + * LoudnormOutput + */ +export type SchemaFfmpegApiLoudnormOutput = { + /** + * Structured loudness measurement summary (if requested) + */ + summary?: SchemaLoudnormSummary | unknown + audio: SchemaFile +} + +/** + * LoudnormInput + */ +export type SchemaFfmpegApiLoudnormInput = { + /** + * Measured Tp + * + * Measured true peak of input file in dBTP. Required for linear mode. + */ + measured_tp?: number | unknown + /** + * Linear + * + * Use linear normalization mode (single-pass). If false, uses dynamic mode (two-pass for better quality). + */ + linear?: boolean + /** + * Offset + * + * Offset gain in dB applied before the true-peak limiter + */ + offset?: number + /** + * Measured I + * + * Measured integrated loudness of input file in LUFS. Required for linear mode. + */ + measured_i?: number | unknown + /** + * Print Summary + * + * Return loudness measurement summary with the normalized audio + */ + print_summary?: boolean + /** + * Measured Lra + * + * Measured loudness range of input file in LU. Required for linear mode. + */ + measured_lra?: number | unknown + /** + * Measured Thresh + * + * Measured threshold of input file in LUFS. Required for linear mode. + */ + measured_thresh?: number | unknown + /** + * Dual Mono + * + * Treat mono input files as dual-mono for correct EBU R128 measurement on stereo systems + */ + dual_mono?: boolean + /** + * True Peak + * + * Maximum true peak in dBTP. + */ + true_peak?: number + /** + * Audio Url + * + * URL of the audio file to normalize + */ + audio_url: string + /** + * Integrated Loudness + * + * Integrated loudness target in LUFS. + */ + integrated_loudness?: number + /** + * Loudness Range + * + * Loudness range target in LU + */ + loudness_range?: number +} + +export type SchemaQueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetFalAiFfmpegApiLoudnormRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ffmpeg-api/loudnorm/requests/{request_id}/status' +} + +export type GetFalAiFfmpegApiLoudnormRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFfmpegApiLoudnormRequestsByRequestIdStatusResponse = + GetFalAiFfmpegApiLoudnormRequestsByRequestIdStatusResponses[keyof GetFalAiFfmpegApiLoudnormRequestsByRequestIdStatusResponses] + +export type PutFalAiFfmpegApiLoudnormRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ffmpeg-api/loudnorm/requests/{request_id}/cancel' +} + +export type PutFalAiFfmpegApiLoudnormRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFfmpegApiLoudnormRequestsByRequestIdCancelResponse = + PutFalAiFfmpegApiLoudnormRequestsByRequestIdCancelResponses[keyof PutFalAiFfmpegApiLoudnormRequestsByRequestIdCancelResponses] + +export type PostFalAiFfmpegApiLoudnormData = { + body: SchemaFfmpegApiLoudnormInput + path?: never + query?: never + url: '/fal-ai/ffmpeg-api/loudnorm' +} + +export type PostFalAiFfmpegApiLoudnormResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFfmpegApiLoudnormResponse = + PostFalAiFfmpegApiLoudnormResponses[keyof PostFalAiFfmpegApiLoudnormResponses] + +export type GetFalAiFfmpegApiLoudnormRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ffmpeg-api/loudnorm/requests/{request_id}' +} + +export type GetFalAiFfmpegApiLoudnormRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFfmpegApiLoudnormOutput +} + +export type GetFalAiFfmpegApiLoudnormRequestsByRequestIdResponse = + GetFalAiFfmpegApiLoudnormRequestsByRequestIdResponses[keyof GetFalAiFfmpegApiLoudnormRequestsByRequestIdResponses] + +export type GetFalAiFfmpegApiWaveformRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ffmpeg-api/waveform/requests/{request_id}/status' +} + +export type GetFalAiFfmpegApiWaveformRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFfmpegApiWaveformRequestsByRequestIdStatusResponse = + GetFalAiFfmpegApiWaveformRequestsByRequestIdStatusResponses[keyof GetFalAiFfmpegApiWaveformRequestsByRequestIdStatusResponses] + +export type PutFalAiFfmpegApiWaveformRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ffmpeg-api/waveform/requests/{request_id}/cancel' +} + +export type PutFalAiFfmpegApiWaveformRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFfmpegApiWaveformRequestsByRequestIdCancelResponse = + PutFalAiFfmpegApiWaveformRequestsByRequestIdCancelResponses[keyof PutFalAiFfmpegApiWaveformRequestsByRequestIdCancelResponses] + +export type PostFalAiFfmpegApiWaveformData = { + body: SchemaFfmpegApiWaveformInput + path?: never + query?: never + url: '/fal-ai/ffmpeg-api/waveform' +} + +export type PostFalAiFfmpegApiWaveformResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFfmpegApiWaveformResponse = + PostFalAiFfmpegApiWaveformResponses[keyof PostFalAiFfmpegApiWaveformResponses] + +export type GetFalAiFfmpegApiWaveformRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ffmpeg-api/waveform/requests/{request_id}' +} + +export type GetFalAiFfmpegApiWaveformRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFfmpegApiWaveformOutput +} + +export type GetFalAiFfmpegApiWaveformRequestsByRequestIdResponse = + GetFalAiFfmpegApiWaveformRequestsByRequestIdResponses[keyof GetFalAiFfmpegApiWaveformRequestsByRequestIdResponses] + +export type GetFalAiFfmpegApiMetadataRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ffmpeg-api/metadata/requests/{request_id}/status' +} + +export type GetFalAiFfmpegApiMetadataRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFfmpegApiMetadataRequestsByRequestIdStatusResponse = + GetFalAiFfmpegApiMetadataRequestsByRequestIdStatusResponses[keyof GetFalAiFfmpegApiMetadataRequestsByRequestIdStatusResponses] + +export type PutFalAiFfmpegApiMetadataRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ffmpeg-api/metadata/requests/{request_id}/cancel' +} + +export type PutFalAiFfmpegApiMetadataRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFfmpegApiMetadataRequestsByRequestIdCancelResponse = + PutFalAiFfmpegApiMetadataRequestsByRequestIdCancelResponses[keyof PutFalAiFfmpegApiMetadataRequestsByRequestIdCancelResponses] + +export type PostFalAiFfmpegApiMetadataData = { + body: SchemaFfmpegApiMetadataInput + path?: never + query?: never + url: '/fal-ai/ffmpeg-api/metadata' +} + +export type PostFalAiFfmpegApiMetadataResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFfmpegApiMetadataResponse = + PostFalAiFfmpegApiMetadataResponses[keyof PostFalAiFfmpegApiMetadataResponses] + +export type GetFalAiFfmpegApiMetadataRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ffmpeg-api/metadata/requests/{request_id}' +} + +export type GetFalAiFfmpegApiMetadataRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFfmpegApiMetadataOutput +} + +export type GetFalAiFfmpegApiMetadataRequestsByRequestIdResponse = + GetFalAiFfmpegApiMetadataRequestsByRequestIdResponses[keyof GetFalAiFfmpegApiMetadataRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/json/zod.gen.ts b/packages/typescript/ai-fal/src/generated/json/zod.gen.ts new file mode 100644 index 00000000..75ac8afe --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/json/zod.gen.ts @@ -0,0 +1,613 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * AudioTrack + */ +export const zSchemaAudioTrack = z.object({ + codec: z.string().register(z.globalRegistry, { + description: "Audio codec used (e.g., 'aac', 'mp3')", + }), + channels: z.int().register(z.globalRegistry, { + description: 'Number of audio channels', + }), + sample_rate: z.int().register(z.globalRegistry, { + description: 'Audio sample rate in Hz', + }), + bitrate: z.int().register(z.globalRegistry, { + description: 'Audio bitrate in bits per second', + }), +}) + +/** + * Resolution + */ +export const zSchemaResolution = z.object({ + height: z.int().register(z.globalRegistry, { + description: 'Height of the video in pixels', + }), + aspect_ratio: z.string().register(z.globalRegistry, { + description: "Display aspect ratio (e.g., '16:9')", + }), + width: z.int().register(z.globalRegistry, { + description: 'Width of the video in pixels', + }), +}) + +/** + * VideoFormat + */ +export const zSchemaVideoFormat = z.object({ + container: z.string().register(z.globalRegistry, { + description: 'Container format of the video', + }), + level: z.number().register(z.globalRegistry, { + description: 'Codec level (e.g., 4.1)', + }), + pixel_format: z.string().register(z.globalRegistry, { + description: "Pixel format used (e.g., 'yuv420p')", + }), + video_codec: z.string().register(z.globalRegistry, { + description: "Video codec used (e.g., 'h264')", + }), + profile: z.string().register(z.globalRegistry, { + description: "Codec profile (e.g., 'main', 'high')", + }), + bitrate: z.int().register(z.globalRegistry, { + description: 'Video bitrate in bits per second', + }), +}) + +/** + * Audio + */ +export const zSchemaAudio = z.object({ + file_size: z.int().register(z.globalRegistry, { + description: 'Size of the file in bytes', + }), + duration: z.number().register(z.globalRegistry, { + description: 'Duration of the media in seconds', + }), + bitrate: z.int().register(z.globalRegistry, { + description: 'Overall bitrate of the media in bits per second', + }), + url: z.string().register(z.globalRegistry, { + description: 'URL where the media file can be accessed', + }), + media_type: z + .optional( + z.string().register(z.globalRegistry, { + description: "Type of media (always 'audio')", + }), + ) + .default('audio'), + codec: z.string().register(z.globalRegistry, { + description: 'Codec used to encode the media', + }), + file_name: z.string().register(z.globalRegistry, { + description: 'Original filename of the media', + }), + sample_rate: z.int().register(z.globalRegistry, { + description: 'Audio sample rate in Hz', + }), + content_type: z.string().register(z.globalRegistry, { + description: 'MIME type of the media file', + }), + container: z.string().register(z.globalRegistry, { + description: "Container format of the media file (e.g., 'mp4', 'mov')", + }), + channels: z.int().register(z.globalRegistry, { + description: 'Number of audio channels', + }), +}) + +/** + * Video + */ +export const zSchemaVideo = z.object({ + file_size: z.int().register(z.globalRegistry, { + description: 'Size of the file in bytes', + }), + timebase: z.string().register(z.globalRegistry, { + description: 'Time base used for frame timestamps', + }), + start_frame_url: z.optional(z.union([z.string(), z.unknown()])), + duration: z.number().register(z.globalRegistry, { + description: 'Duration of the media in seconds', + }), + url: z.string().register(z.globalRegistry, { + description: 'URL where the media file can be accessed', + }), + fps: z.int().register(z.globalRegistry, { + description: 'Frames per second', + }), + codec: z.string().register(z.globalRegistry, { + description: 'Codec used to encode the media', + }), + media_type: z + .optional( + z.string().register(z.globalRegistry, { + description: "Type of media (always 'video')", + }), + ) + .default('video'), + end_frame_url: z.optional(z.union([z.string(), z.unknown()])), + content_type: z.string().register(z.globalRegistry, { + description: 'MIME type of the media file', + }), + container: z.string().register(z.globalRegistry, { + description: "Container format of the media file (e.g., 'mp4', 'mov')", + }), + bitrate: z.int().register(z.globalRegistry, { + description: 'Overall bitrate of the media in bits per second', + }), + format: zSchemaVideoFormat, + resolution: zSchemaResolution, + frame_count: z.int().register(z.globalRegistry, { + description: 'Total number of frames in the video', + }), + file_name: z.string().register(z.globalRegistry, { + description: 'Original filename of the media', + }), + audio: z.optional(z.union([zSchemaAudioTrack, z.unknown()])), +}) + +/** + * MetadataOutput + */ +export const zSchemaFfmpegApiMetadataOutput = z.object({ + media: z.union([zSchemaVideo, zSchemaAudio]), +}) + +/** + * MetadataInput + */ +export const zSchemaFfmpegApiMetadataInput = z.object({ + extract_frames: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to extract the start and end frames for videos. Note that when true the request will be slower.', + }), + ) + .default(false), + media_url: z.string().register(z.globalRegistry, { + description: 'URL of the media file (video or audio) to analyze', + }), +}) + +/** + * WaveformOutput + */ +export const zSchemaFfmpegApiWaveformOutput = z.object({ + precision: z.int().register(z.globalRegistry, { + description: 'Number of decimal places used in the waveform values', + }), + duration: z.number().register(z.globalRegistry, { + description: 'Duration of the audio in seconds', + }), + points: z.int().register(z.globalRegistry, { + description: 'Number of points in the waveform data', + }), + waveform: z.array(z.number()).register(z.globalRegistry, { + description: + 'Normalized waveform data as an array of values between -1 and 1. The number of points is determined by audio duration × points_per_second.', + }), +}) + +/** + * WaveformInput + */ +export const zSchemaFfmpegApiWaveformInput = z.object({ + precision: z + .optional( + z.int().gte(1).lte(6).register(z.globalRegistry, { + description: + 'Number of decimal places for the waveform values. Higher values provide more precision but increase payload size.', + }), + ) + .default(2), + smoothing_window: z + .optional( + z.int().gte(1).lte(21).register(z.globalRegistry, { + description: + 'Size of the smoothing window. Higher values create a smoother waveform. Must be an odd number.', + }), + ) + .default(3), + media_url: z.string().register(z.globalRegistry, { + description: 'URL of the audio file to analyze', + }), + points_per_second: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Controls how many points are sampled per second of audio. Lower values (e.g. 1-2) create a coarser waveform, higher values (e.g. 4-10) create a more detailed one.', + }), + ) + .default(4), +}) + +/** + * File + */ +export const zSchemaFile = z.object({ + file_size: z.optional(z.union([z.int(), z.unknown()])), + file_name: z.optional(z.union([z.string(), z.unknown()])), + content_type: z.optional(z.union([z.string(), z.unknown()])), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), +}) + +/** + * LoudnormSummary + */ +export const zSchemaLoudnormSummary = z.object({ + output_integrated: z.optional(z.union([z.number(), z.unknown()])), + output_true_peak: z.optional(z.union([z.number(), z.unknown()])), + input_lra: z.optional(z.union([z.number(), z.unknown()])), + normalization_type: z.optional(z.union([z.string(), z.unknown()])), + output_lra: z.optional(z.union([z.number(), z.unknown()])), + output_threshold: z.optional(z.union([z.number(), z.unknown()])), + input_integrated: z.optional(z.union([z.number(), z.unknown()])), + input_true_peak: z.optional(z.union([z.number(), z.unknown()])), + target_offset: z.optional(z.union([z.number(), z.unknown()])), + input_threshold: z.optional(z.union([z.number(), z.unknown()])), +}) + +/** + * LoudnormOutput + */ +export const zSchemaFfmpegApiLoudnormOutput = z.object({ + summary: z.optional(z.union([zSchemaLoudnormSummary, z.unknown()])), + audio: zSchemaFile, +}) + +/** + * LoudnormInput + */ +export const zSchemaFfmpegApiLoudnormInput = z.object({ + measured_tp: z.optional(z.union([z.number().gte(-99).lte(99), z.unknown()])), + linear: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Use linear normalization mode (single-pass). If false, uses dynamic mode (two-pass for better quality).', + }), + ) + .default(false), + offset: z + .optional( + z.number().gte(-99).lte(99).register(z.globalRegistry, { + description: 'Offset gain in dB applied before the true-peak limiter', + }), + ) + .default(0), + measured_i: z.optional(z.union([z.number().gte(-99).lte(0), z.unknown()])), + print_summary: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Return loudness measurement summary with the normalized audio', + }), + ) + .default(false), + measured_lra: z.optional(z.union([z.number().gte(0).lte(99), z.unknown()])), + measured_thresh: z.optional( + z.union([z.number().gte(-99).lte(0), z.unknown()]), + ), + dual_mono: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Treat mono input files as dual-mono for correct EBU R128 measurement on stereo systems', + }), + ) + .default(false), + true_peak: z + .optional( + z.number().gte(-9).lte(0).register(z.globalRegistry, { + description: 'Maximum true peak in dBTP.', + }), + ) + .default(-0.1), + audio_url: z.string().register(z.globalRegistry, { + description: 'URL of the audio file to normalize', + }), + integrated_loudness: z + .optional( + z.number().gte(-70).lte(-5).register(z.globalRegistry, { + description: 'Integrated loudness target in LUFS.', + }), + ) + .default(-18), + loudness_range: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: 'Loudness range target in LU', + }), + ) + .default(7), +}) + +export const zSchemaQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetFalAiFfmpegApiLoudnormRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiFfmpegApiLoudnormRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFfmpegApiLoudnormRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiFfmpegApiLoudnormRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFfmpegApiLoudnormData = z.object({ + body: zSchemaFfmpegApiLoudnormInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFfmpegApiLoudnormResponse = zSchemaQueueStatus + +export const zGetFalAiFfmpegApiLoudnormRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFfmpegApiLoudnormRequestsByRequestIdResponse = + zSchemaFfmpegApiLoudnormOutput + +export const zGetFalAiFfmpegApiWaveformRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiFfmpegApiWaveformRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFfmpegApiWaveformRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiFfmpegApiWaveformRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFfmpegApiWaveformData = z.object({ + body: zSchemaFfmpegApiWaveformInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFfmpegApiWaveformResponse = zSchemaQueueStatus + +export const zGetFalAiFfmpegApiWaveformRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFfmpegApiWaveformRequestsByRequestIdResponse = + zSchemaFfmpegApiWaveformOutput + +export const zGetFalAiFfmpegApiMetadataRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiFfmpegApiMetadataRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFfmpegApiMetadataRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiFfmpegApiMetadataRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFfmpegApiMetadataData = z.object({ + body: zSchemaFfmpegApiMetadataInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFfmpegApiMetadataResponse = zSchemaQueueStatus + +export const zGetFalAiFfmpegApiMetadataRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFfmpegApiMetadataRequestsByRequestIdResponse = + zSchemaFfmpegApiMetadataOutput diff --git a/packages/typescript/ai-fal/src/generated/llm/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/llm/endpoint-map.ts new file mode 100644 index 00000000..4bbc16e6 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/llm/endpoint-map.ts @@ -0,0 +1,100 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zSchemaQwen3GuardInput, + zSchemaQwen3GuardOutput, + zSchemaRouterInput, + zSchemaRouterOpenaiV1ChatCompletionsInput, + zSchemaRouterOpenaiV1ChatCompletionsOutput, + zSchemaRouterOpenaiV1EmbeddingsInput, + zSchemaRouterOpenaiV1EmbeddingsOutput, + zSchemaRouterOpenaiV1ResponsesInput, + zSchemaRouterOpenaiV1ResponsesOutput, + zSchemaRouterOutput, + zSchemaVideoPromptGeneratorInput, + zSchemaVideoPromptGeneratorOutput, +} from './zod.gen' + +import type { + SchemaQwen3GuardInput, + SchemaQwen3GuardOutput, + SchemaRouterInput, + SchemaRouterOpenaiV1ChatCompletionsInput, + SchemaRouterOpenaiV1ChatCompletionsOutput, + SchemaRouterOpenaiV1EmbeddingsInput, + SchemaRouterOpenaiV1EmbeddingsOutput, + SchemaRouterOpenaiV1ResponsesInput, + SchemaRouterOpenaiV1ResponsesOutput, + SchemaRouterOutput, + SchemaVideoPromptGeneratorInput, + SchemaVideoPromptGeneratorOutput, +} from './types.gen' + +import type { z } from 'zod' + +export type LlmEndpointMap = { + 'openrouter/router/openai/v1/responses': { + input: SchemaRouterOpenaiV1ResponsesInput + output: SchemaRouterOpenaiV1ResponsesOutput + } + 'openrouter/router/openai/v1/embeddings': { + input: SchemaRouterOpenaiV1EmbeddingsInput + output: SchemaRouterOpenaiV1EmbeddingsOutput + } + 'openrouter/router': { + input: SchemaRouterInput + output: SchemaRouterOutput + } + 'openrouter/router/openai/v1/chat/completions': { + input: SchemaRouterOpenaiV1ChatCompletionsInput + output: SchemaRouterOpenaiV1ChatCompletionsOutput + } + 'fal-ai/qwen-3-guard': { + input: SchemaQwen3GuardInput + output: SchemaQwen3GuardOutput + } + 'fal-ai/video-prompt-generator': { + input: SchemaVideoPromptGeneratorInput + output: SchemaVideoPromptGeneratorOutput + } +} + +/** Union type of all llm model endpoint IDs */ +export type LlmModel = keyof LlmEndpointMap + +export const LlmSchemaMap: Record< + LlmModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['openrouter/router/openai/v1/responses']: { + input: zSchemaRouterOpenaiV1ResponsesInput, + output: zSchemaRouterOpenaiV1ResponsesOutput, + }, + ['openrouter/router/openai/v1/embeddings']: { + input: zSchemaRouterOpenaiV1EmbeddingsInput, + output: zSchemaRouterOpenaiV1EmbeddingsOutput, + }, + ['openrouter/router']: { + input: zSchemaRouterInput, + output: zSchemaRouterOutput, + }, + ['openrouter/router/openai/v1/chat/completions']: { + input: zSchemaRouterOpenaiV1ChatCompletionsInput, + output: zSchemaRouterOpenaiV1ChatCompletionsOutput, + }, + ['fal-ai/qwen-3-guard']: { + input: zSchemaQwen3GuardInput, + output: zSchemaQwen3GuardOutput, + }, + ['fal-ai/video-prompt-generator']: { + input: zSchemaVideoPromptGeneratorInput, + output: zSchemaVideoPromptGeneratorOutput, + }, +} as const + +/** Get the input type for a specific llm model */ +export type LlmModelInput = LlmEndpointMap[T]['input'] + +/** Get the output type for a specific llm model */ +export type LlmModelOutput = LlmEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/llm/types.gen.ts b/packages/typescript/ai-fal/src/generated/llm/types.gen.ts new file mode 100644 index 00000000..6540e2c6 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/llm/types.gen.ts @@ -0,0 +1,962 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * OutputModel + */ +export type SchemaVideoPromptGeneratorOutput = { + /** + * Prompt + * + * Generated video prompt + */ + prompt: string +} + +/** + * InputModel + */ +export type SchemaVideoPromptGeneratorInput = { + /** + * Custom Elements + * + * Custom technical elements (optional) + */ + custom_elements?: string + /** + * Style + * + * Style of the video prompt + */ + style?: + | 'Minimalist' + | 'Simple' + | 'Detailed' + | 'Descriptive' + | 'Dynamic' + | 'Cinematic' + | 'Documentary' + | 'Animation' + | 'Action' + | 'Experimental' + /** + * Camera Direction + * + * Camera direction + */ + camera_direction?: + | 'None' + | 'Zoom in' + | 'Zoom out' + | 'Pan left' + | 'Pan right' + | 'Tilt up' + | 'Tilt down' + | 'Orbital rotation' + | 'Push in' + | 'Pull out' + | 'Track forward' + | 'Track backward' + | 'Spiral in' + | 'Spiral out' + | 'Arc movement' + | 'Diagonal traverse' + | 'Vertical rise' + | 'Vertical descent' + /** + * Pacing + * + * Pacing rhythm + */ + pacing?: + | 'None' + | 'Slow burn' + | 'Rhythmic pulse' + | 'Frantic energy' + | 'Ebb and flow' + | 'Hypnotic drift' + | 'Time-lapse rush' + | 'Stop-motion staccato' + | 'Gradual build' + | 'Quick cut rhythm' + | 'Long take meditation' + | 'Jump cut energy' + | 'Match cut flow' + | 'Cross-dissolve dreamscape' + | 'Parallel action' + | 'Slow motion impact' + | 'Ramping dynamics' + | 'Montage tempo' + | 'Continuous flow' + | 'Episodic breaks' + /** + * Special Effects + * + * Special effects approach + */ + special_effects?: + | 'None' + | 'Practical effects' + | 'CGI enhancement' + | 'Analog glitches' + | 'Light painting' + | 'Projection mapping' + | 'Nanosecond exposures' + | 'Double exposure' + | 'Smoke diffusion' + | 'Lens flare artistry' + | 'Particle systems' + | 'Holographic overlay' + | 'Chromatic aberration' + | 'Digital distortion' + | 'Wire removal' + | 'Motion capture' + | 'Miniature integration' + | 'Weather simulation' + | 'Color grading' + | 'Mixed media composite' + | 'Neural style transfer' + /** + * Image Url + * + * URL of an image to analyze and incorporate into the video prompt (optional) + */ + image_url?: string + /** + * Model + * + * Model to use + */ + model?: + | 'anthropic/claude-3.5-sonnet' + | 'anthropic/claude-3-5-haiku' + | 'anthropic/claude-3-haiku' + | 'google/gemini-2.5-flash-lite' + | 'google/gemini-2.0-flash-001' + | 'meta-llama/llama-3.2-1b-instruct' + | 'meta-llama/llama-3.2-3b-instruct' + | 'meta-llama/llama-3.1-8b-instruct' + | 'meta-llama/llama-3.1-70b-instruct' + | 'openai/gpt-4o-mini' + | 'openai/gpt-4o' + | 'deepseek/deepseek-r1' + /** + * Camera Style + * + * Camera movement style + */ + camera_style?: + | 'None' + | 'Steadicam flow' + | 'Drone aerials' + | 'Handheld urgency' + | 'Crane elegance' + | 'Dolly precision' + | 'VR 360' + | 'Multi-angle rig' + | 'Static tripod' + | 'Gimbal smoothness' + | 'Slider motion' + | 'Jib sweep' + | 'POV immersion' + | 'Time-slice array' + | 'Macro extreme' + | 'Tilt-shift miniature' + | 'Snorricam character' + | 'Whip pan dynamics' + | 'Dutch angle tension' + | 'Underwater housing' + | 'Periscope lens' + /** + * Input Concept + * + * Core concept or thematic input for the video prompt + */ + input_concept: string + /** + * Prompt Length + * + * Length of the prompt + */ + prompt_length?: 'Short' | 'Medium' | 'Long' +} + +/** + * Qwen3GuardOutput + */ +export type SchemaQwen3GuardOutput = { + /** + * Categories + * + * The confidence score of the classification + */ + categories: Array< + | 'Violent' + | 'Non-violent Illegal Acts' + | 'Sexual Content or Sexual Acts' + | 'PII' + | 'Suicide & Self-Harm' + | 'Unethical Acts' + | 'Politically Sensitive Topics' + | 'Copyright Violation' + | 'Jailbreak' + | 'None' + > + /** + * Label + * + * The classification label + */ + label: 'Safe' | 'Unsafe' | 'Controversial' +} + +/** + * Qwen3GuardInput + */ +export type SchemaQwen3GuardInput = { + /** + * Prompt + * + * The input text to be classified + */ + prompt: string +} + +/** + * Schema referenced but not defined by fal.ai (missing from source OpenAPI spec) + */ +export type SchemaRouterOpenaiV1ChatCompletionsInput = { + [key: string]: unknown +} + +export type SchemaRouterOpenaiV1ChatCompletionsOutput = unknown + +/** + * UsageInfo + */ +export type SchemaUsageInfo = { + /** + * Prompt Tokens + */ + prompt_tokens?: number + /** + * Total Tokens + */ + total_tokens?: number + /** + * Completion Tokens + */ + completion_tokens?: number + /** + * Cost + */ + cost: number +} + +/** + * ChatOutput + */ +export type SchemaRouterOutput = { + /** + * Usage + * + * Token usage information + */ + usage?: SchemaUsageInfo + /** + * Error + * + * Error message if an error occurred + */ + error?: string + /** + * Partial + * + * Whether the output is partial + */ + partial?: boolean + /** + * Reasoning + * + * Generated reasoning for the final answer + */ + reasoning?: string + /** + * Output + * + * Generated output + */ + output: string +} + +/** + * ChatInput + */ +export type SchemaRouterInput = { + /** + * Model + * + * Name of the model to use. Charged based on actual token usage. + */ + model: string + /** + * Prompt + * + * Prompt to be used for the chat completion + */ + prompt: string + /** + * Max Tokens + * + * This sets the upper limit for the number of tokens the model can generate in response. It won't produce more than this limit. The maximum value is the context length minus the prompt length. + */ + max_tokens?: number + /** + * Temperature + * + * This setting influences the variety in the model's responses. Lower values lead to more predictable and typical responses, while higher values encourage more diverse and less common responses. At 0, the model always gives the same response for a given input. + */ + temperature?: number + /** + * System Prompt + * + * System prompt to provide context or instructions to the model + */ + system_prompt?: string + /** + * Reasoning + * + * Should reasoning be the part of the final answer. + */ + reasoning?: boolean +} + +/** + * Schema referenced but not defined by fal.ai (missing from source OpenAPI spec) + */ +export type SchemaRouterOpenaiV1EmbeddingsInput = { + [key: string]: unknown +} + +export type SchemaRouterOpenaiV1EmbeddingsOutput = unknown + +/** + * Schema referenced but not defined by fal.ai (missing from source OpenAPI spec) + */ +export type SchemaRouterOpenaiV1ResponsesInput = { + [key: string]: unknown +} + +export type SchemaRouterOpenaiV1ResponsesOutput = unknown + +export type SchemaQueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetOpenrouterRouterOpenaiV1ResponsesRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/openrouter/router/openai/v1/responses/requests/{request_id}/status' + } + +export type GetOpenrouterRouterOpenaiV1ResponsesRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetOpenrouterRouterOpenaiV1ResponsesRequestsByRequestIdStatusResponse = + GetOpenrouterRouterOpenaiV1ResponsesRequestsByRequestIdStatusResponses[keyof GetOpenrouterRouterOpenaiV1ResponsesRequestsByRequestIdStatusResponses] + +export type PutOpenrouterRouterOpenaiV1ResponsesRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/openrouter/router/openai/v1/responses/requests/{request_id}/cancel' + } + +export type PutOpenrouterRouterOpenaiV1ResponsesRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutOpenrouterRouterOpenaiV1ResponsesRequestsByRequestIdCancelResponse = + PutOpenrouterRouterOpenaiV1ResponsesRequestsByRequestIdCancelResponses[keyof PutOpenrouterRouterOpenaiV1ResponsesRequestsByRequestIdCancelResponses] + +export type PostOpenrouterRouterOpenaiV1ResponsesData = { + body: SchemaRouterOpenaiV1ResponsesInput + path?: never + query?: never + url: '/openrouter/router/openai/v1/responses' +} + +export type PostOpenrouterRouterOpenaiV1ResponsesResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostOpenrouterRouterOpenaiV1ResponsesResponse = + PostOpenrouterRouterOpenaiV1ResponsesResponses[keyof PostOpenrouterRouterOpenaiV1ResponsesResponses] + +export type GetOpenrouterRouterOpenaiV1ResponsesRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/openrouter/router/openai/v1/responses/requests/{request_id}' +} + +export type GetOpenrouterRouterOpenaiV1ResponsesRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaRouterOpenaiV1ResponsesOutput +} + +export type GetOpenrouterRouterOpenaiV1ResponsesRequestsByRequestIdResponse = + GetOpenrouterRouterOpenaiV1ResponsesRequestsByRequestIdResponses[keyof GetOpenrouterRouterOpenaiV1ResponsesRequestsByRequestIdResponses] + +export type GetOpenrouterRouterOpenaiV1EmbeddingsRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/openrouter/router/openai/v1/embeddings/requests/{request_id}/status' + } + +export type GetOpenrouterRouterOpenaiV1EmbeddingsRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetOpenrouterRouterOpenaiV1EmbeddingsRequestsByRequestIdStatusResponse = + GetOpenrouterRouterOpenaiV1EmbeddingsRequestsByRequestIdStatusResponses[keyof GetOpenrouterRouterOpenaiV1EmbeddingsRequestsByRequestIdStatusResponses] + +export type PutOpenrouterRouterOpenaiV1EmbeddingsRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/openrouter/router/openai/v1/embeddings/requests/{request_id}/cancel' + } + +export type PutOpenrouterRouterOpenaiV1EmbeddingsRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutOpenrouterRouterOpenaiV1EmbeddingsRequestsByRequestIdCancelResponse = + PutOpenrouterRouterOpenaiV1EmbeddingsRequestsByRequestIdCancelResponses[keyof PutOpenrouterRouterOpenaiV1EmbeddingsRequestsByRequestIdCancelResponses] + +export type PostOpenrouterRouterOpenaiV1EmbeddingsData = { + body: SchemaRouterOpenaiV1EmbeddingsInput + path?: never + query?: never + url: '/openrouter/router/openai/v1/embeddings' +} + +export type PostOpenrouterRouterOpenaiV1EmbeddingsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostOpenrouterRouterOpenaiV1EmbeddingsResponse = + PostOpenrouterRouterOpenaiV1EmbeddingsResponses[keyof PostOpenrouterRouterOpenaiV1EmbeddingsResponses] + +export type GetOpenrouterRouterOpenaiV1EmbeddingsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/openrouter/router/openai/v1/embeddings/requests/{request_id}' +} + +export type GetOpenrouterRouterOpenaiV1EmbeddingsRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaRouterOpenaiV1EmbeddingsOutput + } + +export type GetOpenrouterRouterOpenaiV1EmbeddingsRequestsByRequestIdResponse = + GetOpenrouterRouterOpenaiV1EmbeddingsRequestsByRequestIdResponses[keyof GetOpenrouterRouterOpenaiV1EmbeddingsRequestsByRequestIdResponses] + +export type GetOpenrouterRouterRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/openrouter/router/requests/{request_id}/status' +} + +export type GetOpenrouterRouterRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetOpenrouterRouterRequestsByRequestIdStatusResponse = + GetOpenrouterRouterRequestsByRequestIdStatusResponses[keyof GetOpenrouterRouterRequestsByRequestIdStatusResponses] + +export type PutOpenrouterRouterRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/openrouter/router/requests/{request_id}/cancel' +} + +export type PutOpenrouterRouterRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutOpenrouterRouterRequestsByRequestIdCancelResponse = + PutOpenrouterRouterRequestsByRequestIdCancelResponses[keyof PutOpenrouterRouterRequestsByRequestIdCancelResponses] + +export type PostOpenrouterRouterData = { + body: SchemaRouterInput + path?: never + query?: never + url: '/openrouter/router' +} + +export type PostOpenrouterRouterResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostOpenrouterRouterResponse = + PostOpenrouterRouterResponses[keyof PostOpenrouterRouterResponses] + +export type GetOpenrouterRouterRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/openrouter/router/requests/{request_id}' +} + +export type GetOpenrouterRouterRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaRouterOutput +} + +export type GetOpenrouterRouterRequestsByRequestIdResponse = + GetOpenrouterRouterRequestsByRequestIdResponses[keyof GetOpenrouterRouterRequestsByRequestIdResponses] + +export type GetOpenrouterRouterOpenaiV1ChatCompletionsRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/openrouter/router/openai/v1/chat/completions/requests/{request_id}/status' + } + +export type GetOpenrouterRouterOpenaiV1ChatCompletionsRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetOpenrouterRouterOpenaiV1ChatCompletionsRequestsByRequestIdStatusResponse = + GetOpenrouterRouterOpenaiV1ChatCompletionsRequestsByRequestIdStatusResponses[keyof GetOpenrouterRouterOpenaiV1ChatCompletionsRequestsByRequestIdStatusResponses] + +export type PutOpenrouterRouterOpenaiV1ChatCompletionsRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/openrouter/router/openai/v1/chat/completions/requests/{request_id}/cancel' + } + +export type PutOpenrouterRouterOpenaiV1ChatCompletionsRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutOpenrouterRouterOpenaiV1ChatCompletionsRequestsByRequestIdCancelResponse = + PutOpenrouterRouterOpenaiV1ChatCompletionsRequestsByRequestIdCancelResponses[keyof PutOpenrouterRouterOpenaiV1ChatCompletionsRequestsByRequestIdCancelResponses] + +export type PostOpenrouterRouterOpenaiV1ChatCompletionsData = { + body: SchemaRouterOpenaiV1ChatCompletionsInput + path?: never + query?: never + url: '/openrouter/router/openai/v1/chat/completions' +} + +export type PostOpenrouterRouterOpenaiV1ChatCompletionsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostOpenrouterRouterOpenaiV1ChatCompletionsResponse = + PostOpenrouterRouterOpenaiV1ChatCompletionsResponses[keyof PostOpenrouterRouterOpenaiV1ChatCompletionsResponses] + +export type GetOpenrouterRouterOpenaiV1ChatCompletionsRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/openrouter/router/openai/v1/chat/completions/requests/{request_id}' + } + +export type GetOpenrouterRouterOpenaiV1ChatCompletionsRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaRouterOpenaiV1ChatCompletionsOutput + } + +export type GetOpenrouterRouterOpenaiV1ChatCompletionsRequestsByRequestIdResponse = + GetOpenrouterRouterOpenaiV1ChatCompletionsRequestsByRequestIdResponses[keyof GetOpenrouterRouterOpenaiV1ChatCompletionsRequestsByRequestIdResponses] + +export type GetFalAiQwen3GuardRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-3-guard/requests/{request_id}/status' +} + +export type GetFalAiQwen3GuardRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiQwen3GuardRequestsByRequestIdStatusResponse = + GetFalAiQwen3GuardRequestsByRequestIdStatusResponses[keyof GetFalAiQwen3GuardRequestsByRequestIdStatusResponses] + +export type PutFalAiQwen3GuardRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-3-guard/requests/{request_id}/cancel' +} + +export type PutFalAiQwen3GuardRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiQwen3GuardRequestsByRequestIdCancelResponse = + PutFalAiQwen3GuardRequestsByRequestIdCancelResponses[keyof PutFalAiQwen3GuardRequestsByRequestIdCancelResponses] + +export type PostFalAiQwen3GuardData = { + body: SchemaQwen3GuardInput + path?: never + query?: never + url: '/fal-ai/qwen-3-guard' +} + +export type PostFalAiQwen3GuardResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwen3GuardResponse = + PostFalAiQwen3GuardResponses[keyof PostFalAiQwen3GuardResponses] + +export type GetFalAiQwen3GuardRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-3-guard/requests/{request_id}' +} + +export type GetFalAiQwen3GuardRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwen3GuardOutput +} + +export type GetFalAiQwen3GuardRequestsByRequestIdResponse = + GetFalAiQwen3GuardRequestsByRequestIdResponses[keyof GetFalAiQwen3GuardRequestsByRequestIdResponses] + +export type GetFalAiVideoPromptGeneratorRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/video-prompt-generator/requests/{request_id}/status' +} + +export type GetFalAiVideoPromptGeneratorRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiVideoPromptGeneratorRequestsByRequestIdStatusResponse = + GetFalAiVideoPromptGeneratorRequestsByRequestIdStatusResponses[keyof GetFalAiVideoPromptGeneratorRequestsByRequestIdStatusResponses] + +export type PutFalAiVideoPromptGeneratorRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/video-prompt-generator/requests/{request_id}/cancel' +} + +export type PutFalAiVideoPromptGeneratorRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiVideoPromptGeneratorRequestsByRequestIdCancelResponse = + PutFalAiVideoPromptGeneratorRequestsByRequestIdCancelResponses[keyof PutFalAiVideoPromptGeneratorRequestsByRequestIdCancelResponses] + +export type PostFalAiVideoPromptGeneratorData = { + body: SchemaVideoPromptGeneratorInput + path?: never + query?: never + url: '/fal-ai/video-prompt-generator' +} + +export type PostFalAiVideoPromptGeneratorResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiVideoPromptGeneratorResponse = + PostFalAiVideoPromptGeneratorResponses[keyof PostFalAiVideoPromptGeneratorResponses] + +export type GetFalAiVideoPromptGeneratorRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/video-prompt-generator/requests/{request_id}' +} + +export type GetFalAiVideoPromptGeneratorRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVideoPromptGeneratorOutput +} + +export type GetFalAiVideoPromptGeneratorRequestsByRequestIdResponse = + GetFalAiVideoPromptGeneratorRequestsByRequestIdResponses[keyof GetFalAiVideoPromptGeneratorRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/llm/zod.gen.ts b/packages/typescript/ai-fal/src/generated/llm/zod.gen.ts new file mode 100644 index 00000000..b55d59c3 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/llm/zod.gen.ts @@ -0,0 +1,855 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * OutputModel + */ +export const zSchemaVideoPromptGeneratorOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Generated video prompt', + }), +}) + +/** + * InputModel + */ +export const zSchemaVideoPromptGeneratorInput = z.object({ + custom_elements: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Custom technical elements (optional)', + }), + ) + .default(''), + style: z.optional( + z + .enum([ + 'Minimalist', + 'Simple', + 'Detailed', + 'Descriptive', + 'Dynamic', + 'Cinematic', + 'Documentary', + 'Animation', + 'Action', + 'Experimental', + ]) + .register(z.globalRegistry, { + description: 'Style of the video prompt', + }), + ), + camera_direction: z.optional( + z + .enum([ + 'None', + 'Zoom in', + 'Zoom out', + 'Pan left', + 'Pan right', + 'Tilt up', + 'Tilt down', + 'Orbital rotation', + 'Push in', + 'Pull out', + 'Track forward', + 'Track backward', + 'Spiral in', + 'Spiral out', + 'Arc movement', + 'Diagonal traverse', + 'Vertical rise', + 'Vertical descent', + ]) + .register(z.globalRegistry, { + description: 'Camera direction', + }), + ), + pacing: z.optional( + z + .enum([ + 'None', + 'Slow burn', + 'Rhythmic pulse', + 'Frantic energy', + 'Ebb and flow', + 'Hypnotic drift', + 'Time-lapse rush', + 'Stop-motion staccato', + 'Gradual build', + 'Quick cut rhythm', + 'Long take meditation', + 'Jump cut energy', + 'Match cut flow', + 'Cross-dissolve dreamscape', + 'Parallel action', + 'Slow motion impact', + 'Ramping dynamics', + 'Montage tempo', + 'Continuous flow', + 'Episodic breaks', + ]) + .register(z.globalRegistry, { + description: 'Pacing rhythm', + }), + ), + special_effects: z.optional( + z + .enum([ + 'None', + 'Practical effects', + 'CGI enhancement', + 'Analog glitches', + 'Light painting', + 'Projection mapping', + 'Nanosecond exposures', + 'Double exposure', + 'Smoke diffusion', + 'Lens flare artistry', + 'Particle systems', + 'Holographic overlay', + 'Chromatic aberration', + 'Digital distortion', + 'Wire removal', + 'Motion capture', + 'Miniature integration', + 'Weather simulation', + 'Color grading', + 'Mixed media composite', + 'Neural style transfer', + ]) + .register(z.globalRegistry, { + description: 'Special effects approach', + }), + ), + image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'URL of an image to analyze and incorporate into the video prompt (optional)', + }), + ), + model: z.optional( + z + .enum([ + 'anthropic/claude-3.5-sonnet', + 'anthropic/claude-3-5-haiku', + 'anthropic/claude-3-haiku', + 'google/gemini-2.5-flash-lite', + 'google/gemini-2.0-flash-001', + 'meta-llama/llama-3.2-1b-instruct', + 'meta-llama/llama-3.2-3b-instruct', + 'meta-llama/llama-3.1-8b-instruct', + 'meta-llama/llama-3.1-70b-instruct', + 'openai/gpt-4o-mini', + 'openai/gpt-4o', + 'deepseek/deepseek-r1', + ]) + .register(z.globalRegistry, { + description: 'Model to use', + }), + ), + camera_style: z.optional( + z + .enum([ + 'None', + 'Steadicam flow', + 'Drone aerials', + 'Handheld urgency', + 'Crane elegance', + 'Dolly precision', + 'VR 360', + 'Multi-angle rig', + 'Static tripod', + 'Gimbal smoothness', + 'Slider motion', + 'Jib sweep', + 'POV immersion', + 'Time-slice array', + 'Macro extreme', + 'Tilt-shift miniature', + 'Snorricam character', + 'Whip pan dynamics', + 'Dutch angle tension', + 'Underwater housing', + 'Periscope lens', + ]) + .register(z.globalRegistry, { + description: 'Camera movement style', + }), + ), + input_concept: z.string().register(z.globalRegistry, { + description: 'Core concept or thematic input for the video prompt', + }), + prompt_length: z.optional( + z.enum(['Short', 'Medium', 'Long']).register(z.globalRegistry, { + description: 'Length of the prompt', + }), + ), +}) + +/** + * Qwen3GuardOutput + */ +export const zSchemaQwen3GuardOutput = z.object({ + categories: z + .array( + z.enum([ + 'Violent', + 'Non-violent Illegal Acts', + 'Sexual Content or Sexual Acts', + 'PII', + 'Suicide & Self-Harm', + 'Unethical Acts', + 'Politically Sensitive Topics', + 'Copyright Violation', + 'Jailbreak', + 'None', + ]), + ) + .register(z.globalRegistry, { + description: 'The confidence score of the classification', + }), + label: z + .enum(['Safe', 'Unsafe', 'Controversial']) + .register(z.globalRegistry, { + description: 'The classification label', + }), +}) + +/** + * Qwen3GuardInput + */ +export const zSchemaQwen3GuardInput = z.object({ + prompt: z.string().max(131072).register(z.globalRegistry, { + description: 'The input text to be classified', + }), +}) + +/** + * Schema referenced but not defined by fal.ai (missing from source OpenAPI spec) + */ +export const zSchemaRouterOpenaiV1ChatCompletionsInput = z + .record(z.string(), z.unknown()) + .register(z.globalRegistry, { + description: + 'Schema referenced but not defined by fal.ai (missing from source OpenAPI spec)', + }) + +export const zSchemaRouterOpenaiV1ChatCompletionsOutput = z.unknown() + +/** + * UsageInfo + */ +export const zSchemaUsageInfo = z.object({ + prompt_tokens: z.optional(z.int()), + total_tokens: z.optional(z.int()).default(0), + completion_tokens: z.optional(z.int()), + cost: z.number(), +}) + +/** + * ChatOutput + */ +export const zSchemaRouterOutput = z.object({ + usage: z.optional(zSchemaUsageInfo), + error: z.optional( + z.string().register(z.globalRegistry, { + description: 'Error message if an error occurred', + }), + ), + partial: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the output is partial', + }), + ) + .default(false), + reasoning: z.optional( + z.string().register(z.globalRegistry, { + description: 'Generated reasoning for the final answer', + }), + ), + output: z.string().register(z.globalRegistry, { + description: 'Generated output', + }), +}) + +/** + * ChatInput + */ +export const zSchemaRouterInput = z.object({ + model: z.string().register(z.globalRegistry, { + description: + 'Name of the model to use. Charged based on actual token usage.', + }), + prompt: z.string().register(z.globalRegistry, { + description: 'Prompt to be used for the chat completion', + }), + max_tokens: z.optional( + z.int().gte(1).register(z.globalRegistry, { + description: + "This sets the upper limit for the number of tokens the model can generate in response. It won't produce more than this limit. The maximum value is the context length minus the prompt length.", + }), + ), + temperature: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: + "This setting influences the variety in the model's responses. Lower values lead to more predictable and typical responses, while higher values encourage more diverse and less common responses. At 0, the model always gives the same response for a given input.", + }), + ) + .default(1), + system_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + 'System prompt to provide context or instructions to the model', + }), + ), + reasoning: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Should reasoning be the part of the final answer.', + }), + ) + .default(false), +}) + +/** + * Schema referenced but not defined by fal.ai (missing from source OpenAPI spec) + */ +export const zSchemaRouterOpenaiV1EmbeddingsInput = z + .record(z.string(), z.unknown()) + .register(z.globalRegistry, { + description: + 'Schema referenced but not defined by fal.ai (missing from source OpenAPI spec)', + }) + +export const zSchemaRouterOpenaiV1EmbeddingsOutput = z.unknown() + +/** + * Schema referenced but not defined by fal.ai (missing from source OpenAPI spec) + */ +export const zSchemaRouterOpenaiV1ResponsesInput = z + .record(z.string(), z.unknown()) + .register(z.globalRegistry, { + description: + 'Schema referenced but not defined by fal.ai (missing from source OpenAPI spec)', + }) + +export const zSchemaRouterOpenaiV1ResponsesOutput = z.unknown() + +export const zSchemaQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetOpenrouterRouterOpenaiV1ResponsesRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetOpenrouterRouterOpenaiV1ResponsesRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutOpenrouterRouterOpenaiV1ResponsesRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutOpenrouterRouterOpenaiV1ResponsesRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostOpenrouterRouterOpenaiV1ResponsesData = z.object({ + body: zSchemaRouterOpenaiV1ResponsesInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostOpenrouterRouterOpenaiV1ResponsesResponse = zSchemaQueueStatus + +export const zGetOpenrouterRouterOpenaiV1ResponsesRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetOpenrouterRouterOpenaiV1ResponsesRequestsByRequestIdResponse = + zSchemaRouterOpenaiV1ResponsesOutput + +export const zGetOpenrouterRouterOpenaiV1EmbeddingsRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetOpenrouterRouterOpenaiV1EmbeddingsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutOpenrouterRouterOpenaiV1EmbeddingsRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutOpenrouterRouterOpenaiV1EmbeddingsRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostOpenrouterRouterOpenaiV1EmbeddingsData = z.object({ + body: zSchemaRouterOpenaiV1EmbeddingsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostOpenrouterRouterOpenaiV1EmbeddingsResponse = + zSchemaQueueStatus + +export const zGetOpenrouterRouterOpenaiV1EmbeddingsRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetOpenrouterRouterOpenaiV1EmbeddingsRequestsByRequestIdResponse = + zSchemaRouterOpenaiV1EmbeddingsOutput + +export const zGetOpenrouterRouterRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetOpenrouterRouterRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutOpenrouterRouterRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutOpenrouterRouterRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostOpenrouterRouterData = z.object({ + body: zSchemaRouterInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostOpenrouterRouterResponse = zSchemaQueueStatus + +export const zGetOpenrouterRouterRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetOpenrouterRouterRequestsByRequestIdResponse = + zSchemaRouterOutput + +export const zGetOpenrouterRouterOpenaiV1ChatCompletionsRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetOpenrouterRouterOpenaiV1ChatCompletionsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutOpenrouterRouterOpenaiV1ChatCompletionsRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutOpenrouterRouterOpenaiV1ChatCompletionsRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostOpenrouterRouterOpenaiV1ChatCompletionsData = z.object({ + body: zSchemaRouterOpenaiV1ChatCompletionsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostOpenrouterRouterOpenaiV1ChatCompletionsResponse = + zSchemaQueueStatus + +export const zGetOpenrouterRouterOpenaiV1ChatCompletionsRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetOpenrouterRouterOpenaiV1ChatCompletionsRequestsByRequestIdResponse = + zSchemaRouterOpenaiV1ChatCompletionsOutput + +export const zGetFalAiQwen3GuardRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiQwen3GuardRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwen3GuardRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwen3GuardRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwen3GuardData = z.object({ + body: zSchemaQwen3GuardInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwen3GuardResponse = zSchemaQueueStatus + +export const zGetFalAiQwen3GuardRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiQwen3GuardRequestsByRequestIdResponse = + zSchemaQwen3GuardOutput + +export const zGetFalAiVideoPromptGeneratorRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiVideoPromptGeneratorRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiVideoPromptGeneratorRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiVideoPromptGeneratorRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiVideoPromptGeneratorData = z.object({ + body: zSchemaVideoPromptGeneratorInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiVideoPromptGeneratorResponse = zSchemaQueueStatus + +export const zGetFalAiVideoPromptGeneratorRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiVideoPromptGeneratorRequestsByRequestIdResponse = + zSchemaVideoPromptGeneratorOutput diff --git a/packages/typescript/ai-fal/src/generated/speech-to-speech/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/speech-to-speech/endpoint-map.ts new file mode 100644 index 00000000..087c7f0c --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/speech-to-speech/endpoint-map.ts @@ -0,0 +1,54 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zSchemaChatterboxSpeechToSpeechInput, + zSchemaChatterboxSpeechToSpeechOutput, + zSchemaChatterboxhdSpeechToSpeechInput, + zSchemaChatterboxhdSpeechToSpeechOutput, +} from './zod.gen' + +import type { + SchemaChatterboxSpeechToSpeechInput, + SchemaChatterboxSpeechToSpeechOutput, + SchemaChatterboxhdSpeechToSpeechInput, + SchemaChatterboxhdSpeechToSpeechOutput, +} from './types.gen' + +import type { z } from 'zod' + +export type SpeechToSpeechEndpointMap = { + 'resemble-ai/chatterboxhd/speech-to-speech': { + input: SchemaChatterboxhdSpeechToSpeechInput + output: SchemaChatterboxhdSpeechToSpeechOutput + } + 'fal-ai/chatterbox/speech-to-speech': { + input: SchemaChatterboxSpeechToSpeechInput + output: SchemaChatterboxSpeechToSpeechOutput + } +} + +/** Union type of all speech-to-speech model endpoint IDs */ +export type SpeechToSpeechModel = keyof SpeechToSpeechEndpointMap + +export const SpeechToSpeechSchemaMap: Record< + SpeechToSpeechModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['resemble-ai/chatterboxhd/speech-to-speech']: { + input: zSchemaChatterboxhdSpeechToSpeechInput, + output: zSchemaChatterboxhdSpeechToSpeechOutput, + }, + ['fal-ai/chatterbox/speech-to-speech']: { + input: zSchemaChatterboxSpeechToSpeechInput, + output: zSchemaChatterboxSpeechToSpeechOutput, + }, +} as const + +/** Get the input type for a specific speech-to-speech model */ +export type SpeechToSpeechModelInput = + SpeechToSpeechEndpointMap[T]['input'] + +/** Get the output type for a specific speech-to-speech model */ +export type SpeechToSpeechModelOutput = + SpeechToSpeechEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/speech-to-speech/types.gen.ts b/packages/typescript/ai-fal/src/generated/speech-to-speech/types.gen.ts new file mode 100644 index 00000000..e7a967c6 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/speech-to-speech/types.gen.ts @@ -0,0 +1,389 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * File + */ +export type SchemaFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * ChatterboxOutput + */ +export type SchemaChatterboxSpeechToSpeechOutput = { + /** + * Audio + * + * The generated speech audio + */ + audio: SchemaFile +} + +/** + * ChatterboxVCRequest + */ +export type SchemaChatterboxSpeechToSpeechInput = { + /** + * Source Audio Url + */ + source_audio_url: string + /** + * Target Voice Audio Url + * + * Optional URL to an audio file to use as a reference for the generated speech. If provided, the model will try to match the style and tone of the reference audio. + */ + target_voice_audio_url?: string +} + +/** + * Audio + */ +export type SchemaAudio = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * STSOutput + * + * Output parameters for the speech-to-speech request. + */ +export type SchemaChatterboxhdSpeechToSpeechOutput = { + /** + * Audio + * + * The generated voice-converted audio file. + */ + audio: SchemaAudio +} + +/** + * STSInput + * + * Input parameters for the speech-to-speech request. + */ +export type SchemaChatterboxhdSpeechToSpeechInput = { + /** + * High Quality Audio + * + * If True, the generated audio will be upscaled to 48kHz. The generation of the audio will take longer, but the quality will be higher. If False, the generated audio will be 24kHz. + */ + high_quality_audio?: boolean + /** + * Target Voice Audio Url + * + * URL to the audio file which represents the voice of the output audio. If provided, this will override the target_voice setting. If neither target_voice nor target_voice_audio_url are provided, the default target voice will be used. + */ + target_voice_audio_url?: string + /** + * Source Audio Url + * + * URL to the source audio file to be voice-converted. + */ + source_audio_url: string + /** + * Target Voice + * + * The voice to use for the speech-to-speech request. If neither target_voice nor target_voice_audio_url are provided, a random target voice will be used. + */ + target_voice?: + | 'Aurora' + | 'Blade' + | 'Britney' + | 'Carl' + | 'Cliff' + | 'Richard' + | 'Rico' + | 'Siobhan' + | 'Vicky' +} + +export type SchemaQueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetResembleAiChatterboxhdSpeechToSpeechRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/resemble-ai/chatterboxhd/speech-to-speech/requests/{request_id}/status' + } + +export type GetResembleAiChatterboxhdSpeechToSpeechRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetResembleAiChatterboxhdSpeechToSpeechRequestsByRequestIdStatusResponse = + GetResembleAiChatterboxhdSpeechToSpeechRequestsByRequestIdStatusResponses[keyof GetResembleAiChatterboxhdSpeechToSpeechRequestsByRequestIdStatusResponses] + +export type PutResembleAiChatterboxhdSpeechToSpeechRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/resemble-ai/chatterboxhd/speech-to-speech/requests/{request_id}/cancel' + } + +export type PutResembleAiChatterboxhdSpeechToSpeechRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutResembleAiChatterboxhdSpeechToSpeechRequestsByRequestIdCancelResponse = + PutResembleAiChatterboxhdSpeechToSpeechRequestsByRequestIdCancelResponses[keyof PutResembleAiChatterboxhdSpeechToSpeechRequestsByRequestIdCancelResponses] + +export type PostResembleAiChatterboxhdSpeechToSpeechData = { + body: SchemaChatterboxhdSpeechToSpeechInput + path?: never + query?: never + url: '/resemble-ai/chatterboxhd/speech-to-speech' +} + +export type PostResembleAiChatterboxhdSpeechToSpeechResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostResembleAiChatterboxhdSpeechToSpeechResponse = + PostResembleAiChatterboxhdSpeechToSpeechResponses[keyof PostResembleAiChatterboxhdSpeechToSpeechResponses] + +export type GetResembleAiChatterboxhdSpeechToSpeechRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/resemble-ai/chatterboxhd/speech-to-speech/requests/{request_id}' +} + +export type GetResembleAiChatterboxhdSpeechToSpeechRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaChatterboxhdSpeechToSpeechOutput + } + +export type GetResembleAiChatterboxhdSpeechToSpeechRequestsByRequestIdResponse = + GetResembleAiChatterboxhdSpeechToSpeechRequestsByRequestIdResponses[keyof GetResembleAiChatterboxhdSpeechToSpeechRequestsByRequestIdResponses] + +export type GetFalAiChatterboxSpeechToSpeechRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/chatterbox/speech-to-speech/requests/{request_id}/status' +} + +export type GetFalAiChatterboxSpeechToSpeechRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiChatterboxSpeechToSpeechRequestsByRequestIdStatusResponse = + GetFalAiChatterboxSpeechToSpeechRequestsByRequestIdStatusResponses[keyof GetFalAiChatterboxSpeechToSpeechRequestsByRequestIdStatusResponses] + +export type PutFalAiChatterboxSpeechToSpeechRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/chatterbox/speech-to-speech/requests/{request_id}/cancel' +} + +export type PutFalAiChatterboxSpeechToSpeechRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiChatterboxSpeechToSpeechRequestsByRequestIdCancelResponse = + PutFalAiChatterboxSpeechToSpeechRequestsByRequestIdCancelResponses[keyof PutFalAiChatterboxSpeechToSpeechRequestsByRequestIdCancelResponses] + +export type PostFalAiChatterboxSpeechToSpeechData = { + body: SchemaChatterboxSpeechToSpeechInput + path?: never + query?: never + url: '/fal-ai/chatterbox/speech-to-speech' +} + +export type PostFalAiChatterboxSpeechToSpeechResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiChatterboxSpeechToSpeechResponse = + PostFalAiChatterboxSpeechToSpeechResponses[keyof PostFalAiChatterboxSpeechToSpeechResponses] + +export type GetFalAiChatterboxSpeechToSpeechRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/chatterbox/speech-to-speech/requests/{request_id}' +} + +export type GetFalAiChatterboxSpeechToSpeechRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaChatterboxSpeechToSpeechOutput +} + +export type GetFalAiChatterboxSpeechToSpeechRequestsByRequestIdResponse = + GetFalAiChatterboxSpeechToSpeechRequestsByRequestIdResponses[keyof GetFalAiChatterboxSpeechToSpeechRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/speech-to-speech/zod.gen.ts b/packages/typescript/ai-fal/src/generated/speech-to-speech/zod.gen.ts new file mode 100644 index 00000000..49ec09b1 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/speech-to-speech/zod.gen.ts @@ -0,0 +1,343 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * File + */ +export const zSchemaFile = z.object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), +}) + +/** + * ChatterboxOutput + */ +export const zSchemaChatterboxSpeechToSpeechOutput = z.object({ + audio: zSchemaFile, +}) + +/** + * ChatterboxVCRequest + */ +export const zSchemaChatterboxSpeechToSpeechInput = z.object({ + source_audio_url: z.string(), + target_voice_audio_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Optional URL to an audio file to use as a reference for the generated speech. If provided, the model will try to match the style and tone of the reference audio.', + }), + ), +}) + +/** + * Audio + */ +export const zSchemaAudio = z.object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), +}) + +/** + * STSOutput + * + * Output parameters for the speech-to-speech request. + */ +export const zSchemaChatterboxhdSpeechToSpeechOutput = z + .object({ + audio: zSchemaAudio, + }) + .register(z.globalRegistry, { + description: 'Output parameters for the speech-to-speech request.', + }) + +/** + * STSInput + * + * Input parameters for the speech-to-speech request. + */ +export const zSchemaChatterboxhdSpeechToSpeechInput = z + .object({ + high_quality_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If True, the generated audio will be upscaled to 48kHz. The generation of the audio will take longer, but the quality will be higher. If False, the generated audio will be 24kHz. ', + }), + ) + .default(false), + target_voice_audio_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'URL to the audio file which represents the voice of the output audio. If provided, this will override the target_voice setting. If neither target_voice nor target_voice_audio_url are provided, the default target voice will be used.', + }), + ), + source_audio_url: z.string().register(z.globalRegistry, { + description: 'URL to the source audio file to be voice-converted.', + }), + target_voice: z.optional( + z + .enum([ + 'Aurora', + 'Blade', + 'Britney', + 'Carl', + 'Cliff', + 'Richard', + 'Rico', + 'Siobhan', + 'Vicky', + ]) + .register(z.globalRegistry, { + description: + 'The voice to use for the speech-to-speech request. If neither target_voice nor target_voice_audio_url are provided, a random target voice will be used.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Input parameters for the speech-to-speech request.', + }) + +export const zSchemaQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetResembleAiChatterboxhdSpeechToSpeechRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetResembleAiChatterboxhdSpeechToSpeechRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutResembleAiChatterboxhdSpeechToSpeechRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutResembleAiChatterboxhdSpeechToSpeechRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostResembleAiChatterboxhdSpeechToSpeechData = z.object({ + body: zSchemaChatterboxhdSpeechToSpeechInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostResembleAiChatterboxhdSpeechToSpeechResponse = + zSchemaQueueStatus + +export const zGetResembleAiChatterboxhdSpeechToSpeechRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetResembleAiChatterboxhdSpeechToSpeechRequestsByRequestIdResponse = + zSchemaChatterboxhdSpeechToSpeechOutput + +export const zGetFalAiChatterboxSpeechToSpeechRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiChatterboxSpeechToSpeechRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiChatterboxSpeechToSpeechRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiChatterboxSpeechToSpeechRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiChatterboxSpeechToSpeechData = z.object({ + body: zSchemaChatterboxSpeechToSpeechInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiChatterboxSpeechToSpeechResponse = zSchemaQueueStatus + +export const zGetFalAiChatterboxSpeechToSpeechRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiChatterboxSpeechToSpeechRequestsByRequestIdResponse = + zSchemaChatterboxSpeechToSpeechOutput diff --git a/packages/typescript/ai-fal/src/generated/speech-to-text/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/speech-to-text/endpoint-map.ts new file mode 100644 index 00000000..5c86afff --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/speech-to-text/endpoint-map.ts @@ -0,0 +1,138 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zSchemaElevenlabsSpeechToTextInput, + zSchemaElevenlabsSpeechToTextOutput, + zSchemaElevenlabsSpeechToTextScribeV2Input, + zSchemaElevenlabsSpeechToTextScribeV2Output, + zSchemaSmartTurnInput, + zSchemaSmartTurnOutput, + zSchemaSpeechToTextInput, + zSchemaSpeechToTextOutput, + zSchemaSpeechToTextStreamInput, + zSchemaSpeechToTextStreamOutput, + zSchemaSpeechToTextTurboInput, + zSchemaSpeechToTextTurboOutput, + zSchemaSpeechToTextTurboStreamInput, + zSchemaSpeechToTextTurboStreamOutput, + zSchemaWhisperInput, + zSchemaWhisperOutput, + zSchemaWizperInput, + zSchemaWizperOutput, +} from './zod.gen' + +import type { + SchemaElevenlabsSpeechToTextInput, + SchemaElevenlabsSpeechToTextOutput, + SchemaElevenlabsSpeechToTextScribeV2Input, + SchemaElevenlabsSpeechToTextScribeV2Output, + SchemaSmartTurnInput, + SchemaSmartTurnOutput, + SchemaSpeechToTextInput, + SchemaSpeechToTextOutput, + SchemaSpeechToTextStreamInput, + SchemaSpeechToTextStreamOutput, + SchemaSpeechToTextTurboInput, + SchemaSpeechToTextTurboOutput, + SchemaSpeechToTextTurboStreamInput, + SchemaSpeechToTextTurboStreamOutput, + SchemaWhisperInput, + SchemaWhisperOutput, + SchemaWizperInput, + SchemaWizperOutput, +} from './types.gen' + +import type { z } from 'zod' + +export type SpeechToTextEndpointMap = { + 'fal-ai/elevenlabs/speech-to-text/scribe-v2': { + input: SchemaElevenlabsSpeechToTextScribeV2Input + output: SchemaElevenlabsSpeechToTextScribeV2Output + } + 'fal-ai/smart-turn': { + input: SchemaSmartTurnInput + output: SchemaSmartTurnOutput + } + 'fal-ai/speech-to-text/turbo': { + input: SchemaSpeechToTextTurboInput + output: SchemaSpeechToTextTurboOutput + } + 'fal-ai/speech-to-text/turbo/stream': { + input: SchemaSpeechToTextTurboStreamInput + output: SchemaSpeechToTextTurboStreamOutput + } + 'fal-ai/speech-to-text/stream': { + input: SchemaSpeechToTextStreamInput + output: SchemaSpeechToTextStreamOutput + } + 'fal-ai/speech-to-text': { + input: SchemaSpeechToTextInput + output: SchemaSpeechToTextOutput + } + 'fal-ai/elevenlabs/speech-to-text': { + input: SchemaElevenlabsSpeechToTextInput + output: SchemaElevenlabsSpeechToTextOutput + } + 'fal-ai/wizper': { + input: SchemaWizperInput + output: SchemaWizperOutput + } + 'fal-ai/whisper': { + input: SchemaWhisperInput + output: SchemaWhisperOutput + } +} + +/** Union type of all speech-to-text model endpoint IDs */ +export type SpeechToTextModel = keyof SpeechToTextEndpointMap + +export const SpeechToTextSchemaMap: Record< + SpeechToTextModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['fal-ai/elevenlabs/speech-to-text/scribe-v2']: { + input: zSchemaElevenlabsSpeechToTextScribeV2Input, + output: zSchemaElevenlabsSpeechToTextScribeV2Output, + }, + ['fal-ai/smart-turn']: { + input: zSchemaSmartTurnInput, + output: zSchemaSmartTurnOutput, + }, + ['fal-ai/speech-to-text/turbo']: { + input: zSchemaSpeechToTextTurboInput, + output: zSchemaSpeechToTextTurboOutput, + }, + ['fal-ai/speech-to-text/turbo/stream']: { + input: zSchemaSpeechToTextTurboStreamInput, + output: zSchemaSpeechToTextTurboStreamOutput, + }, + ['fal-ai/speech-to-text/stream']: { + input: zSchemaSpeechToTextStreamInput, + output: zSchemaSpeechToTextStreamOutput, + }, + ['fal-ai/speech-to-text']: { + input: zSchemaSpeechToTextInput, + output: zSchemaSpeechToTextOutput, + }, + ['fal-ai/elevenlabs/speech-to-text']: { + input: zSchemaElevenlabsSpeechToTextInput, + output: zSchemaElevenlabsSpeechToTextOutput, + }, + ['fal-ai/wizper']: { + input: zSchemaWizperInput, + output: zSchemaWizperOutput, + }, + ['fal-ai/whisper']: { + input: zSchemaWhisperInput, + output: zSchemaWhisperOutput, + }, +} as const + +/** Get the input type for a specific speech-to-text model */ +export type SpeechToTextModelInput = + SpeechToTextEndpointMap[T]['input'] + +/** Get the output type for a specific speech-to-text model */ +export type SpeechToTextModelOutput = + SpeechToTextEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/speech-to-text/types.gen.ts b/packages/typescript/ai-fal/src/generated/speech-to-text/types.gen.ts new file mode 100644 index 00000000..cfe0f1ce --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/speech-to-text/types.gen.ts @@ -0,0 +1,1814 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * DiarizationSegment + */ +export type SchemaDiarizationSegment = { + /** + * Timestamp + * + * Start and end timestamp of the segment + */ + timestamp: [unknown, unknown] + /** + * Speaker + * + * Speaker ID of the segment + */ + speaker: string +} + +/** + * WhisperOutput + */ +export type SchemaWhisperOutput = { + /** + * Text + * + * Transcription of the audio file + */ + text: string + /** + * Inferred Languages + * + * List of languages that the audio file is inferred to be. Defaults to null. + */ + inferred_languages: Array< + | 'af' + | 'am' + | 'ar' + | 'as' + | 'az' + | 'ba' + | 'be' + | 'bg' + | 'bn' + | 'bo' + | 'br' + | 'bs' + | 'ca' + | 'cs' + | 'cy' + | 'da' + | 'de' + | 'el' + | 'en' + | 'es' + | 'et' + | 'eu' + | 'fa' + | 'fi' + | 'fo' + | 'fr' + | 'gl' + | 'gu' + | 'ha' + | 'haw' + | 'he' + | 'hi' + | 'hr' + | 'ht' + | 'hu' + | 'hy' + | 'id' + | 'is' + | 'it' + | 'ja' + | 'jw' + | 'ka' + | 'kk' + | 'km' + | 'kn' + | 'ko' + | 'la' + | 'lb' + | 'ln' + | 'lo' + | 'lt' + | 'lv' + | 'mg' + | 'mi' + | 'mk' + | 'ml' + | 'mn' + | 'mr' + | 'ms' + | 'mt' + | 'my' + | 'ne' + | 'nl' + | 'nn' + | 'no' + | 'oc' + | 'pa' + | 'pl' + | 'ps' + | 'pt' + | 'ro' + | 'ru' + | 'sa' + | 'sd' + | 'si' + | 'sk' + | 'sl' + | 'sn' + | 'so' + | 'sq' + | 'sr' + | 'su' + | 'sv' + | 'sw' + | 'ta' + | 'te' + | 'tg' + | 'th' + | 'tk' + | 'tl' + | 'tr' + | 'tt' + | 'uk' + | 'ur' + | 'uz' + | 'vi' + | 'yi' + | 'yo' + | 'zh' + > + /** + * Chunks + * + * Timestamp chunks of the audio file + */ + chunks?: Array + /** + * Diarization Segments + * + * Speaker diarization segments of the audio file. Only present if diarization is enabled. + */ + diarization_segments: Array +} + +/** + * WhisperChunk + */ +export type SchemaWhisperChunk = { + /** + * Text + * + * Transcription of the chunk + */ + text: string + /** + * Timestamp + * + * Start and end timestamp of the chunk + */ + timestamp: [unknown, unknown] + /** + * Speaker + * + * Speaker ID of the chunk. Only present if diarization is enabled. + */ + speaker?: string +} + +/** + * WhisperInput + */ +export type SchemaWhisperInput = { + /** + * Version + * + * Version of the model to use. All of the models are the Whisper large variant. + */ + version?: '3' + /** + * Batch Size + */ + batch_size?: number + /** + * Language + * + * + * Language of the audio file. If set to null, the language will be + * automatically detected. Defaults to null. + * + * If translate is selected as the task, the audio will be translated to + * English, regardless of the language selected. + * + */ + language?: + | 'af' + | 'am' + | 'ar' + | 'as' + | 'az' + | 'ba' + | 'be' + | 'bg' + | 'bn' + | 'bo' + | 'br' + | 'bs' + | 'ca' + | 'cs' + | 'cy' + | 'da' + | 'de' + | 'el' + | 'en' + | 'es' + | 'et' + | 'eu' + | 'fa' + | 'fi' + | 'fo' + | 'fr' + | 'gl' + | 'gu' + | 'ha' + | 'haw' + | 'he' + | 'hi' + | 'hr' + | 'ht' + | 'hu' + | 'hy' + | 'id' + | 'is' + | 'it' + | 'ja' + | 'jw' + | 'ka' + | 'kk' + | 'km' + | 'kn' + | 'ko' + | 'la' + | 'lb' + | 'ln' + | 'lo' + | 'lt' + | 'lv' + | 'mg' + | 'mi' + | 'mk' + | 'ml' + | 'mn' + | 'mr' + | 'ms' + | 'mt' + | 'my' + | 'ne' + | 'nl' + | 'nn' + | 'no' + | 'oc' + | 'pa' + | 'pl' + | 'ps' + | 'pt' + | 'ro' + | 'ru' + | 'sa' + | 'sd' + | 'si' + | 'sk' + | 'sl' + | 'sn' + | 'so' + | 'sq' + | 'sr' + | 'su' + | 'sv' + | 'sw' + | 'ta' + | 'te' + | 'tg' + | 'th' + | 'tk' + | 'tl' + | 'tr' + | 'tt' + | 'uk' + | 'ur' + | 'uz' + | 'vi' + | 'yi' + | 'yo' + | 'zh' + /** + * Prompt + * + * Prompt to use for generation. Defaults to an empty string. + */ + prompt?: string + /** + * Num Speakers + * + * + * Number of speakers in the audio file. Defaults to null. + * If not provided, the number of speakers will be automatically + * detected. + * + */ + num_speakers?: number | null + /** + * Task + * + * Task to perform on the audio file. Either transcribe or translate. + */ + task?: 'transcribe' | 'translate' + /** + * Chunk Level + * + * Level of the chunks to return. Either none, segment or word. `none` would imply that all of the audio will be transcribed without the timestamp tokens, we suggest to switch to `none` if you are not satisfied with the transcription quality, since it will usually improve the quality of the results. Switching to `none` will also provide minor speed ups in the transcription due to less amount of generated tokens. Notice that setting to none will produce **a single chunk with the whole transcription**. + */ + chunk_level?: 'none' | 'segment' | 'word' + /** + * Audio Url + * + * URL of the audio file to transcribe. Supported formats: mp3, mp4, mpeg, mpga, m4a, wav or webm. + */ + audio_url: string + /** + * Diarize + * + * Whether to diarize the audio file. Defaults to false. Setting to true will add costs proportional to diarization inference time. + */ + diarize?: boolean +} + +/** + * WhisperOutput + */ +export type SchemaWizperOutput = { + /** + * Text + * + * Transcription of the audio file + */ + text: string + /** + * Languages + * + * List of languages that the audio file is inferred to be. Defaults to null. + */ + languages: Array< + | 'af' + | 'am' + | 'ar' + | 'as' + | 'az' + | 'ba' + | 'be' + | 'bg' + | 'bn' + | 'bo' + | 'br' + | 'bs' + | 'ca' + | 'cs' + | 'cy' + | 'da' + | 'de' + | 'el' + | 'en' + | 'es' + | 'et' + | 'eu' + | 'fa' + | 'fi' + | 'fo' + | 'fr' + | 'gl' + | 'gu' + | 'ha' + | 'haw' + | 'he' + | 'hi' + | 'hr' + | 'ht' + | 'hu' + | 'hy' + | 'id' + | 'is' + | 'it' + | 'ja' + | 'jw' + | 'ka' + | 'kk' + | 'km' + | 'kn' + | 'ko' + | 'la' + | 'lb' + | 'ln' + | 'lo' + | 'lt' + | 'lv' + | 'mg' + | 'mi' + | 'mk' + | 'ml' + | 'mn' + | 'mr' + | 'ms' + | 'mt' + | 'my' + | 'ne' + | 'nl' + | 'nn' + | 'no' + | 'oc' + | 'pa' + | 'pl' + | 'ps' + | 'pt' + | 'ro' + | 'ru' + | 'sa' + | 'sd' + | 'si' + | 'sk' + | 'sl' + | 'sn' + | 'so' + | 'sq' + | 'sr' + | 'su' + | 'sv' + | 'sw' + | 'ta' + | 'te' + | 'tg' + | 'th' + | 'tk' + | 'tl' + | 'tr' + | 'tt' + | 'uk' + | 'ur' + | 'uz' + | 'vi' + | 'yi' + | 'yo' + | 'zh' + > + /** + * Chunks + * + * Timestamp chunks of the audio file + */ + chunks: Array +} + +/** + * WhisperInput + */ +export type SchemaWizperInput = { + /** + * Language + * + * + * Language of the audio file. + * If translate is selected as the task, the audio will be translated to + * English, regardless of the language selected. If `None` is passed, + * the language will be automatically detected. This will also increase + * the inference time. + * + */ + language?: + | 'af' + | 'am' + | 'ar' + | 'as' + | 'az' + | 'ba' + | 'be' + | 'bg' + | 'bn' + | 'bo' + | 'br' + | 'bs' + | 'ca' + | 'cs' + | 'cy' + | 'da' + | 'de' + | 'el' + | 'en' + | 'es' + | 'et' + | 'eu' + | 'fa' + | 'fi' + | 'fo' + | 'fr' + | 'gl' + | 'gu' + | 'ha' + | 'haw' + | 'he' + | 'hi' + | 'hr' + | 'ht' + | 'hu' + | 'hy' + | 'id' + | 'is' + | 'it' + | 'ja' + | 'jw' + | 'ka' + | 'kk' + | 'km' + | 'kn' + | 'ko' + | 'la' + | 'lb' + | 'ln' + | 'lo' + | 'lt' + | 'lv' + | 'mg' + | 'mi' + | 'mk' + | 'ml' + | 'mn' + | 'mr' + | 'ms' + | 'mt' + | 'my' + | 'ne' + | 'nl' + | 'nn' + | 'no' + | 'oc' + | 'pa' + | 'pl' + | 'ps' + | 'pt' + | 'ro' + | 'ru' + | 'sa' + | 'sd' + | 'si' + | 'sk' + | 'sl' + | 'sn' + | 'so' + | 'sq' + | 'sr' + | 'su' + | 'sv' + | 'sw' + | 'ta' + | 'te' + | 'tg' + | 'th' + | 'tk' + | 'tl' + | 'tr' + | 'tt' + | 'uk' + | 'ur' + | 'uz' + | 'vi' + | 'yi' + | 'yo' + | 'zh' + | unknown + /** + * Version + * + * Version of the model to use. All of the models are the Whisper large variant. + */ + version?: string + /** + * Max Segment Len + * + * Maximum speech segment duration in seconds before splitting. + */ + max_segment_len?: number + /** + * Task + * + * Task to perform on the audio file. Either transcribe or translate. + */ + task?: 'transcribe' | 'translate' + /** + * Chunk Level + * + * Level of the chunks to return. + */ + chunk_level?: string + /** + * Audio Url + * + * URL of the audio file to transcribe. Supported formats: mp3, mp4, mpeg, mpga, m4a, wav or webm. + */ + audio_url: string + /** + * Merge Chunks + * + * Whether to merge consecutive chunks. When enabled, chunks are merged if their combined duration does not exceed max_segment_len. + */ + merge_chunks?: boolean +} + +/** + * TranscriptionOutput + */ +export type SchemaElevenlabsSpeechToTextOutput = { + /** + * Text + * + * The full transcribed text + */ + text: string + /** + * Language Probability + * + * Confidence in language detection + */ + language_probability: number + /** + * Language Code + * + * Detected or specified language code + */ + language_code: string + /** + * Words + * + * Word-level transcription details + */ + words: Array +} + +/** + * TranscriptionWord + */ +export type SchemaTranscriptionWord = { + /** + * Text + * + * The transcribed word or audio event + */ + text: string + /** + * Start + * + * Start time in seconds + */ + start: number | unknown + /** + * Type + * + * Type of element (word, spacing, or audio_event) + */ + type: string + /** + * End + * + * End time in seconds + */ + end: number | unknown + /** + * Speaker Id + * + * Speaker identifier if diarization was enabled + */ + speaker_id?: string | unknown +} + +/** + * SpeechToTextRequest + */ +export type SchemaElevenlabsSpeechToTextInput = { + /** + * Language Code + * + * Language code of the audio + */ + language_code?: string | unknown + /** + * Audio Url + * + * URL of the audio file to transcribe + */ + audio_url: string + /** + * Diarize + * + * Whether to annotate who is speaking + */ + diarize?: boolean + /** + * Tag Audio Events + * + * Tag audio events like laughter, applause, etc. + */ + tag_audio_events?: boolean +} + +/** + * SpeechOutput + */ +export type SchemaSpeechToTextOutput = { + /** + * Partial + * + * Indicates if this is a partial (in-progress) transcript + */ + partial?: boolean + /** + * Transcribed Text + * + * The partial or final transcription output from Canary + */ + output: string +} + +/** + * SpeechInput + */ +export type SchemaSpeechToTextInput = { + /** + * Audio Path + * + * Local filesystem path (or remote URL) to a long audio file + */ + audio_url: string + /** + * Use Punctuation/Capitalization (PnC) + * + * Whether to use Canary's built-in punctuation & capitalization + */ + use_pnc?: boolean +} + +export type SchemaSpeechToTextStreamOutput = unknown + +/** + * SpeechInput + */ +export type SchemaSpeechToTextStreamInput = { + /** + * Audio Path + * + * Local filesystem path (or remote URL) to a long audio file + */ + audio_url: string + /** + * Use Punctuation/Capitalization (PnC) + * + * Whether to use Canary's built-in punctuation & capitalization + */ + use_pnc?: boolean +} + +export type SchemaSpeechToTextTurboStreamOutput = unknown + +/** + * SpeechInput + */ +export type SchemaSpeechToTextTurboStreamInput = { + /** + * Audio Path + * + * Local filesystem path (or remote URL) to a long audio file + */ + audio_url: string + /** + * Use Punctuation/Capitalization (PnC) + * + * Whether to use Canary's built-in punctuation & capitalization + */ + use_pnc?: boolean +} + +/** + * SpeechOutput + */ +export type SchemaSpeechToTextTurboOutput = { + /** + * Partial + * + * Indicates if this is a partial (in-progress) transcript + */ + partial?: boolean + /** + * Transcribed Text + * + * The partial or final transcription output from Canary + */ + output: string +} + +/** + * SpeechInput + */ +export type SchemaSpeechToTextTurboInput = { + /** + * Audio Path + * + * Local filesystem path (or remote URL) to a long audio file + */ + audio_url: string + /** + * Use Punctuation/Capitalization (PnC) + * + * Whether to use Canary's built-in punctuation & capitalization + */ + use_pnc?: boolean +} + +/** + * Output + */ +export type SchemaSmartTurnOutput = { + /** + * Prediction + * + * The predicted turn type. 1 for Complete, 0 for Incomplete. + */ + prediction: number + /** + * Probability + * + * The probability of the predicted turn type. + */ + probability: number + /** + * Metrics + * + * The metrics of the inference. + */ + metrics: { + [key: string]: unknown + } +} + +/** + * SmartTurnInput + */ +export type SchemaSmartTurnInput = { + /** + * Audio Url + * + * The URL of the audio file to be processed. + */ + audio_url: string +} + +/** + * TranscriptionOutputV2 + */ +export type SchemaElevenlabsSpeechToTextScribeV2Output = { + /** + * Text + * + * The full transcribed text + */ + text: string + /** + * Language Probability + * + * Confidence in language detection + */ + language_probability: number + /** + * Language Code + * + * Detected or specified language code + */ + language_code: string + /** + * Words + * + * Word-level transcription details + */ + words: Array +} + +/** + * SpeechToTextRequestScribeV2 + */ +export type SchemaElevenlabsSpeechToTextScribeV2Input = { + /** + * Keyterms + * + * Words or sentences to bias the model towards transcribing. Up to 100 keyterms, max 50 characters each. Adds 30% premium over base transcription price. + */ + keyterms?: Array + /** + * Audio Url + * + * URL of the audio file to transcribe + */ + audio_url: string + /** + * Diarize + * + * Whether to annotate who is speaking + */ + diarize?: boolean + /** + * Language Code + * + * Language code of the audio + */ + language_code?: string | unknown + /** + * Tag Audio Events + * + * Tag audio events like laughter, applause, etc. + */ + tag_audio_events?: boolean +} + +export type SchemaQueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetFalAiElevenlabsSpeechToTextScribeV2RequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/elevenlabs/speech-to-text/scribe-v2/requests/{request_id}/status' + } + +export type GetFalAiElevenlabsSpeechToTextScribeV2RequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiElevenlabsSpeechToTextScribeV2RequestsByRequestIdStatusResponse = + GetFalAiElevenlabsSpeechToTextScribeV2RequestsByRequestIdStatusResponses[keyof GetFalAiElevenlabsSpeechToTextScribeV2RequestsByRequestIdStatusResponses] + +export type PutFalAiElevenlabsSpeechToTextScribeV2RequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/elevenlabs/speech-to-text/scribe-v2/requests/{request_id}/cancel' + } + +export type PutFalAiElevenlabsSpeechToTextScribeV2RequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiElevenlabsSpeechToTextScribeV2RequestsByRequestIdCancelResponse = + PutFalAiElevenlabsSpeechToTextScribeV2RequestsByRequestIdCancelResponses[keyof PutFalAiElevenlabsSpeechToTextScribeV2RequestsByRequestIdCancelResponses] + +export type PostFalAiElevenlabsSpeechToTextScribeV2Data = { + body: SchemaElevenlabsSpeechToTextScribeV2Input + path?: never + query?: never + url: '/fal-ai/elevenlabs/speech-to-text/scribe-v2' +} + +export type PostFalAiElevenlabsSpeechToTextScribeV2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiElevenlabsSpeechToTextScribeV2Response = + PostFalAiElevenlabsSpeechToTextScribeV2Responses[keyof PostFalAiElevenlabsSpeechToTextScribeV2Responses] + +export type GetFalAiElevenlabsSpeechToTextScribeV2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/elevenlabs/speech-to-text/scribe-v2/requests/{request_id}' +} + +export type GetFalAiElevenlabsSpeechToTextScribeV2RequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaElevenlabsSpeechToTextScribeV2Output + } + +export type GetFalAiElevenlabsSpeechToTextScribeV2RequestsByRequestIdResponse = + GetFalAiElevenlabsSpeechToTextScribeV2RequestsByRequestIdResponses[keyof GetFalAiElevenlabsSpeechToTextScribeV2RequestsByRequestIdResponses] + +export type GetFalAiSmartTurnRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/smart-turn/requests/{request_id}/status' +} + +export type GetFalAiSmartTurnRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSmartTurnRequestsByRequestIdStatusResponse = + GetFalAiSmartTurnRequestsByRequestIdStatusResponses[keyof GetFalAiSmartTurnRequestsByRequestIdStatusResponses] + +export type PutFalAiSmartTurnRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/smart-turn/requests/{request_id}/cancel' +} + +export type PutFalAiSmartTurnRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSmartTurnRequestsByRequestIdCancelResponse = + PutFalAiSmartTurnRequestsByRequestIdCancelResponses[keyof PutFalAiSmartTurnRequestsByRequestIdCancelResponses] + +export type PostFalAiSmartTurnData = { + body: SchemaSmartTurnInput + path?: never + query?: never + url: '/fal-ai/smart-turn' +} + +export type PostFalAiSmartTurnResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSmartTurnResponse = + PostFalAiSmartTurnResponses[keyof PostFalAiSmartTurnResponses] + +export type GetFalAiSmartTurnRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/smart-turn/requests/{request_id}' +} + +export type GetFalAiSmartTurnRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSmartTurnOutput +} + +export type GetFalAiSmartTurnRequestsByRequestIdResponse = + GetFalAiSmartTurnRequestsByRequestIdResponses[keyof GetFalAiSmartTurnRequestsByRequestIdResponses] + +export type GetFalAiSpeechToTextTurboRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/speech-to-text/turbo/requests/{request_id}/status' +} + +export type GetFalAiSpeechToTextTurboRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSpeechToTextTurboRequestsByRequestIdStatusResponse = + GetFalAiSpeechToTextTurboRequestsByRequestIdStatusResponses[keyof GetFalAiSpeechToTextTurboRequestsByRequestIdStatusResponses] + +export type PutFalAiSpeechToTextTurboRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/speech-to-text/turbo/requests/{request_id}/cancel' +} + +export type PutFalAiSpeechToTextTurboRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSpeechToTextTurboRequestsByRequestIdCancelResponse = + PutFalAiSpeechToTextTurboRequestsByRequestIdCancelResponses[keyof PutFalAiSpeechToTextTurboRequestsByRequestIdCancelResponses] + +export type PostFalAiSpeechToTextTurboData = { + body: SchemaSpeechToTextTurboInput + path?: never + query?: never + url: '/fal-ai/speech-to-text/turbo' +} + +export type PostFalAiSpeechToTextTurboResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSpeechToTextTurboResponse = + PostFalAiSpeechToTextTurboResponses[keyof PostFalAiSpeechToTextTurboResponses] + +export type GetFalAiSpeechToTextTurboRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/speech-to-text/turbo/requests/{request_id}' +} + +export type GetFalAiSpeechToTextTurboRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSpeechToTextTurboOutput +} + +export type GetFalAiSpeechToTextTurboRequestsByRequestIdResponse = + GetFalAiSpeechToTextTurboRequestsByRequestIdResponses[keyof GetFalAiSpeechToTextTurboRequestsByRequestIdResponses] + +export type GetFalAiSpeechToTextTurboStreamRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/speech-to-text/turbo/stream/requests/{request_id}/status' +} + +export type GetFalAiSpeechToTextTurboStreamRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiSpeechToTextTurboStreamRequestsByRequestIdStatusResponse = + GetFalAiSpeechToTextTurboStreamRequestsByRequestIdStatusResponses[keyof GetFalAiSpeechToTextTurboStreamRequestsByRequestIdStatusResponses] + +export type PutFalAiSpeechToTextTurboStreamRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/speech-to-text/turbo/stream/requests/{request_id}/cancel' +} + +export type PutFalAiSpeechToTextTurboStreamRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiSpeechToTextTurboStreamRequestsByRequestIdCancelResponse = + PutFalAiSpeechToTextTurboStreamRequestsByRequestIdCancelResponses[keyof PutFalAiSpeechToTextTurboStreamRequestsByRequestIdCancelResponses] + +export type PostFalAiSpeechToTextTurboStreamData = { + body: SchemaSpeechToTextTurboStreamInput + path?: never + query?: never + url: '/fal-ai/speech-to-text/turbo/stream' +} + +export type PostFalAiSpeechToTextTurboStreamResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSpeechToTextTurboStreamResponse = + PostFalAiSpeechToTextTurboStreamResponses[keyof PostFalAiSpeechToTextTurboStreamResponses] + +export type GetFalAiSpeechToTextTurboStreamRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/speech-to-text/turbo/stream/requests/{request_id}' +} + +export type GetFalAiSpeechToTextTurboStreamRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSpeechToTextTurboStreamOutput +} + +export type GetFalAiSpeechToTextTurboStreamRequestsByRequestIdResponse = + GetFalAiSpeechToTextTurboStreamRequestsByRequestIdResponses[keyof GetFalAiSpeechToTextTurboStreamRequestsByRequestIdResponses] + +export type GetFalAiSpeechToTextStreamRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/speech-to-text/stream/requests/{request_id}/status' +} + +export type GetFalAiSpeechToTextStreamRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSpeechToTextStreamRequestsByRequestIdStatusResponse = + GetFalAiSpeechToTextStreamRequestsByRequestIdStatusResponses[keyof GetFalAiSpeechToTextStreamRequestsByRequestIdStatusResponses] + +export type PutFalAiSpeechToTextStreamRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/speech-to-text/stream/requests/{request_id}/cancel' +} + +export type PutFalAiSpeechToTextStreamRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSpeechToTextStreamRequestsByRequestIdCancelResponse = + PutFalAiSpeechToTextStreamRequestsByRequestIdCancelResponses[keyof PutFalAiSpeechToTextStreamRequestsByRequestIdCancelResponses] + +export type PostFalAiSpeechToTextStreamData = { + body: SchemaSpeechToTextStreamInput + path?: never + query?: never + url: '/fal-ai/speech-to-text/stream' +} + +export type PostFalAiSpeechToTextStreamResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSpeechToTextStreamResponse = + PostFalAiSpeechToTextStreamResponses[keyof PostFalAiSpeechToTextStreamResponses] + +export type GetFalAiSpeechToTextStreamRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/speech-to-text/stream/requests/{request_id}' +} + +export type GetFalAiSpeechToTextStreamRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSpeechToTextStreamOutput +} + +export type GetFalAiSpeechToTextStreamRequestsByRequestIdResponse = + GetFalAiSpeechToTextStreamRequestsByRequestIdResponses[keyof GetFalAiSpeechToTextStreamRequestsByRequestIdResponses] + +export type GetFalAiSpeechToTextRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/speech-to-text/requests/{request_id}/status' +} + +export type GetFalAiSpeechToTextRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSpeechToTextRequestsByRequestIdStatusResponse = + GetFalAiSpeechToTextRequestsByRequestIdStatusResponses[keyof GetFalAiSpeechToTextRequestsByRequestIdStatusResponses] + +export type PutFalAiSpeechToTextRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/speech-to-text/requests/{request_id}/cancel' +} + +export type PutFalAiSpeechToTextRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSpeechToTextRequestsByRequestIdCancelResponse = + PutFalAiSpeechToTextRequestsByRequestIdCancelResponses[keyof PutFalAiSpeechToTextRequestsByRequestIdCancelResponses] + +export type PostFalAiSpeechToTextData = { + body: SchemaSpeechToTextInput + path?: never + query?: never + url: '/fal-ai/speech-to-text' +} + +export type PostFalAiSpeechToTextResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSpeechToTextResponse = + PostFalAiSpeechToTextResponses[keyof PostFalAiSpeechToTextResponses] + +export type GetFalAiSpeechToTextRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/speech-to-text/requests/{request_id}' +} + +export type GetFalAiSpeechToTextRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSpeechToTextOutput +} + +export type GetFalAiSpeechToTextRequestsByRequestIdResponse = + GetFalAiSpeechToTextRequestsByRequestIdResponses[keyof GetFalAiSpeechToTextRequestsByRequestIdResponses] + +export type GetFalAiElevenlabsSpeechToTextRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/elevenlabs/speech-to-text/requests/{request_id}/status' +} + +export type GetFalAiElevenlabsSpeechToTextRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiElevenlabsSpeechToTextRequestsByRequestIdStatusResponse = + GetFalAiElevenlabsSpeechToTextRequestsByRequestIdStatusResponses[keyof GetFalAiElevenlabsSpeechToTextRequestsByRequestIdStatusResponses] + +export type PutFalAiElevenlabsSpeechToTextRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/elevenlabs/speech-to-text/requests/{request_id}/cancel' +} + +export type PutFalAiElevenlabsSpeechToTextRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiElevenlabsSpeechToTextRequestsByRequestIdCancelResponse = + PutFalAiElevenlabsSpeechToTextRequestsByRequestIdCancelResponses[keyof PutFalAiElevenlabsSpeechToTextRequestsByRequestIdCancelResponses] + +export type PostFalAiElevenlabsSpeechToTextData = { + body: SchemaElevenlabsSpeechToTextInput + path?: never + query?: never + url: '/fal-ai/elevenlabs/speech-to-text' +} + +export type PostFalAiElevenlabsSpeechToTextResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiElevenlabsSpeechToTextResponse = + PostFalAiElevenlabsSpeechToTextResponses[keyof PostFalAiElevenlabsSpeechToTextResponses] + +export type GetFalAiElevenlabsSpeechToTextRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/elevenlabs/speech-to-text/requests/{request_id}' +} + +export type GetFalAiElevenlabsSpeechToTextRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaElevenlabsSpeechToTextOutput +} + +export type GetFalAiElevenlabsSpeechToTextRequestsByRequestIdResponse = + GetFalAiElevenlabsSpeechToTextRequestsByRequestIdResponses[keyof GetFalAiElevenlabsSpeechToTextRequestsByRequestIdResponses] + +export type GetFalAiWizperRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wizper/requests/{request_id}/status' +} + +export type GetFalAiWizperRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWizperRequestsByRequestIdStatusResponse = + GetFalAiWizperRequestsByRequestIdStatusResponses[keyof GetFalAiWizperRequestsByRequestIdStatusResponses] + +export type PutFalAiWizperRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wizper/requests/{request_id}/cancel' +} + +export type PutFalAiWizperRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWizperRequestsByRequestIdCancelResponse = + PutFalAiWizperRequestsByRequestIdCancelResponses[keyof PutFalAiWizperRequestsByRequestIdCancelResponses] + +export type PostFalAiWizperData = { + body: SchemaWizperInput + path?: never + query?: never + url: '/fal-ai/wizper' +} + +export type PostFalAiWizperResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWizperResponse = + PostFalAiWizperResponses[keyof PostFalAiWizperResponses] + +export type GetFalAiWizperRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wizper/requests/{request_id}' +} + +export type GetFalAiWizperRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWizperOutput +} + +export type GetFalAiWizperRequestsByRequestIdResponse = + GetFalAiWizperRequestsByRequestIdResponses[keyof GetFalAiWizperRequestsByRequestIdResponses] + +export type GetFalAiWhisperRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/whisper/requests/{request_id}/status' +} + +export type GetFalAiWhisperRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWhisperRequestsByRequestIdStatusResponse = + GetFalAiWhisperRequestsByRequestIdStatusResponses[keyof GetFalAiWhisperRequestsByRequestIdStatusResponses] + +export type PutFalAiWhisperRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/whisper/requests/{request_id}/cancel' +} + +export type PutFalAiWhisperRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWhisperRequestsByRequestIdCancelResponse = + PutFalAiWhisperRequestsByRequestIdCancelResponses[keyof PutFalAiWhisperRequestsByRequestIdCancelResponses] + +export type PostFalAiWhisperData = { + body: SchemaWhisperInput + path?: never + query?: never + url: '/fal-ai/whisper' +} + +export type PostFalAiWhisperResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWhisperResponse = + PostFalAiWhisperResponses[keyof PostFalAiWhisperResponses] + +export type GetFalAiWhisperRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/whisper/requests/{request_id}' +} + +export type GetFalAiWhisperRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWhisperOutput +} + +export type GetFalAiWhisperRequestsByRequestIdResponse = + GetFalAiWhisperRequestsByRequestIdResponses[keyof GetFalAiWhisperRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/speech-to-text/zod.gen.ts b/packages/typescript/ai-fal/src/generated/speech-to-text/zod.gen.ts new file mode 100644 index 00000000..9e6bf691 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/speech-to-text/zod.gen.ts @@ -0,0 +1,1570 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * DiarizationSegment + */ +export const zSchemaDiarizationSegment = z.object({ + timestamp: z.tuple([z.unknown(), z.unknown()]).register(z.globalRegistry, { + description: 'Start and end timestamp of the segment', + }), + speaker: z.string().register(z.globalRegistry, { + description: 'Speaker ID of the segment', + }), +}) + +/** + * WhisperChunk + */ +export const zSchemaWhisperChunk = z.object({ + text: z.string().register(z.globalRegistry, { + description: 'Transcription of the chunk', + }), + timestamp: z.tuple([z.unknown(), z.unknown()]).register(z.globalRegistry, { + description: 'Start and end timestamp of the chunk', + }), + speaker: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Speaker ID of the chunk. Only present if diarization is enabled.', + }), + ), +}) + +/** + * WhisperOutput + */ +export const zSchemaWhisperOutput = z.object({ + text: z.string().register(z.globalRegistry, { + description: 'Transcription of the audio file', + }), + inferred_languages: z + .array( + z.enum([ + 'af', + 'am', + 'ar', + 'as', + 'az', + 'ba', + 'be', + 'bg', + 'bn', + 'bo', + 'br', + 'bs', + 'ca', + 'cs', + 'cy', + 'da', + 'de', + 'el', + 'en', + 'es', + 'et', + 'eu', + 'fa', + 'fi', + 'fo', + 'fr', + 'gl', + 'gu', + 'ha', + 'haw', + 'he', + 'hi', + 'hr', + 'ht', + 'hu', + 'hy', + 'id', + 'is', + 'it', + 'ja', + 'jw', + 'ka', + 'kk', + 'km', + 'kn', + 'ko', + 'la', + 'lb', + 'ln', + 'lo', + 'lt', + 'lv', + 'mg', + 'mi', + 'mk', + 'ml', + 'mn', + 'mr', + 'ms', + 'mt', + 'my', + 'ne', + 'nl', + 'nn', + 'no', + 'oc', + 'pa', + 'pl', + 'ps', + 'pt', + 'ro', + 'ru', + 'sa', + 'sd', + 'si', + 'sk', + 'sl', + 'sn', + 'so', + 'sq', + 'sr', + 'su', + 'sv', + 'sw', + 'ta', + 'te', + 'tg', + 'th', + 'tk', + 'tl', + 'tr', + 'tt', + 'uk', + 'ur', + 'uz', + 'vi', + 'yi', + 'yo', + 'zh', + ]), + ) + .register(z.globalRegistry, { + description: + 'List of languages that the audio file is inferred to be. Defaults to null.', + }), + chunks: z.optional( + z.array(zSchemaWhisperChunk).register(z.globalRegistry, { + description: 'Timestamp chunks of the audio file', + }), + ), + diarization_segments: z + .array(zSchemaDiarizationSegment) + .register(z.globalRegistry, { + description: + 'Speaker diarization segments of the audio file. Only present if diarization is enabled.', + }), +}) + +/** + * WhisperInput + */ +export const zSchemaWhisperInput = z.object({ + version: z.optional( + z.enum(['3']).register(z.globalRegistry, { + description: + 'Version of the model to use. All of the models are the Whisper large variant.', + }), + ), + batch_size: z.optional(z.int().gte(1).lte(64)).default(64), + language: z.optional( + z + .enum([ + 'af', + 'am', + 'ar', + 'as', + 'az', + 'ba', + 'be', + 'bg', + 'bn', + 'bo', + 'br', + 'bs', + 'ca', + 'cs', + 'cy', + 'da', + 'de', + 'el', + 'en', + 'es', + 'et', + 'eu', + 'fa', + 'fi', + 'fo', + 'fr', + 'gl', + 'gu', + 'ha', + 'haw', + 'he', + 'hi', + 'hr', + 'ht', + 'hu', + 'hy', + 'id', + 'is', + 'it', + 'ja', + 'jw', + 'ka', + 'kk', + 'km', + 'kn', + 'ko', + 'la', + 'lb', + 'ln', + 'lo', + 'lt', + 'lv', + 'mg', + 'mi', + 'mk', + 'ml', + 'mn', + 'mr', + 'ms', + 'mt', + 'my', + 'ne', + 'nl', + 'nn', + 'no', + 'oc', + 'pa', + 'pl', + 'ps', + 'pt', + 'ro', + 'ru', + 'sa', + 'sd', + 'si', + 'sk', + 'sl', + 'sn', + 'so', + 'sq', + 'sr', + 'su', + 'sv', + 'sw', + 'ta', + 'te', + 'tg', + 'th', + 'tk', + 'tl', + 'tr', + 'tt', + 'uk', + 'ur', + 'uz', + 'vi', + 'yi', + 'yo', + 'zh', + ]) + .register(z.globalRegistry, { + description: + '\n Language of the audio file. If set to null, the language will be\n automatically detected. Defaults to null.\n\n If translate is selected as the task, the audio will be translated to\n English, regardless of the language selected.\n ', + }), + ), + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Prompt to use for generation. Defaults to an empty string.', + }), + ) + .default(''), + num_speakers: z.optional(z.union([z.int().gte(1), z.null()])), + task: z.optional( + z.enum(['transcribe', 'translate']).register(z.globalRegistry, { + description: + 'Task to perform on the audio file. Either transcribe or translate.', + }), + ), + chunk_level: z.optional( + z.enum(['none', 'segment', 'word']).register(z.globalRegistry, { + description: + 'Level of the chunks to return. Either none, segment or word. `none` would imply that all of the audio will be transcribed without the timestamp tokens, we suggest to switch to `none` if you are not satisfied with the transcription quality, since it will usually improve the quality of the results. Switching to `none` will also provide minor speed ups in the transcription due to less amount of generated tokens. Notice that setting to none will produce **a single chunk with the whole transcription**.', + }), + ), + audio_url: z.string().register(z.globalRegistry, { + description: + 'URL of the audio file to transcribe. Supported formats: mp3, mp4, mpeg, mpga, m4a, wav or webm.', + }), + diarize: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to diarize the audio file. Defaults to false. Setting to true will add costs proportional to diarization inference time.', + }), + ) + .default(false), +}) + +/** + * WhisperOutput + */ +export const zSchemaWizperOutput = z.object({ + text: z.string().register(z.globalRegistry, { + description: 'Transcription of the audio file', + }), + languages: z + .array( + z.enum([ + 'af', + 'am', + 'ar', + 'as', + 'az', + 'ba', + 'be', + 'bg', + 'bn', + 'bo', + 'br', + 'bs', + 'ca', + 'cs', + 'cy', + 'da', + 'de', + 'el', + 'en', + 'es', + 'et', + 'eu', + 'fa', + 'fi', + 'fo', + 'fr', + 'gl', + 'gu', + 'ha', + 'haw', + 'he', + 'hi', + 'hr', + 'ht', + 'hu', + 'hy', + 'id', + 'is', + 'it', + 'ja', + 'jw', + 'ka', + 'kk', + 'km', + 'kn', + 'ko', + 'la', + 'lb', + 'ln', + 'lo', + 'lt', + 'lv', + 'mg', + 'mi', + 'mk', + 'ml', + 'mn', + 'mr', + 'ms', + 'mt', + 'my', + 'ne', + 'nl', + 'nn', + 'no', + 'oc', + 'pa', + 'pl', + 'ps', + 'pt', + 'ro', + 'ru', + 'sa', + 'sd', + 'si', + 'sk', + 'sl', + 'sn', + 'so', + 'sq', + 'sr', + 'su', + 'sv', + 'sw', + 'ta', + 'te', + 'tg', + 'th', + 'tk', + 'tl', + 'tr', + 'tt', + 'uk', + 'ur', + 'uz', + 'vi', + 'yi', + 'yo', + 'zh', + ]), + ) + .register(z.globalRegistry, { + description: + 'List of languages that the audio file is inferred to be. Defaults to null.', + }), + chunks: z.array(zSchemaWhisperChunk).register(z.globalRegistry, { + description: 'Timestamp chunks of the audio file', + }), +}) + +/** + * WhisperInput + */ +export const zSchemaWizperInput = z.object({ + language: z.optional( + z.union([ + z.enum([ + 'af', + 'am', + 'ar', + 'as', + 'az', + 'ba', + 'be', + 'bg', + 'bn', + 'bo', + 'br', + 'bs', + 'ca', + 'cs', + 'cy', + 'da', + 'de', + 'el', + 'en', + 'es', + 'et', + 'eu', + 'fa', + 'fi', + 'fo', + 'fr', + 'gl', + 'gu', + 'ha', + 'haw', + 'he', + 'hi', + 'hr', + 'ht', + 'hu', + 'hy', + 'id', + 'is', + 'it', + 'ja', + 'jw', + 'ka', + 'kk', + 'km', + 'kn', + 'ko', + 'la', + 'lb', + 'ln', + 'lo', + 'lt', + 'lv', + 'mg', + 'mi', + 'mk', + 'ml', + 'mn', + 'mr', + 'ms', + 'mt', + 'my', + 'ne', + 'nl', + 'nn', + 'no', + 'oc', + 'pa', + 'pl', + 'ps', + 'pt', + 'ro', + 'ru', + 'sa', + 'sd', + 'si', + 'sk', + 'sl', + 'sn', + 'so', + 'sq', + 'sr', + 'su', + 'sv', + 'sw', + 'ta', + 'te', + 'tg', + 'th', + 'tk', + 'tl', + 'tr', + 'tt', + 'uk', + 'ur', + 'uz', + 'vi', + 'yi', + 'yo', + 'zh', + ]), + z.unknown(), + ]), + ), + version: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Version of the model to use. All of the models are the Whisper large variant.', + }), + ) + .default('3'), + max_segment_len: z + .optional( + z.int().gte(10).lte(29).register(z.globalRegistry, { + description: + 'Maximum speech segment duration in seconds before splitting.', + }), + ) + .default(29), + task: z.optional( + z.enum(['transcribe', 'translate']).register(z.globalRegistry, { + description: + 'Task to perform on the audio file. Either transcribe or translate.', + }), + ), + chunk_level: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Level of the chunks to return.', + }), + ) + .default('segment'), + audio_url: z.string().register(z.globalRegistry, { + description: + 'URL of the audio file to transcribe. Supported formats: mp3, mp4, mpeg, mpga, m4a, wav or webm.', + }), + merge_chunks: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to merge consecutive chunks. When enabled, chunks are merged if their combined duration does not exceed max_segment_len.', + }), + ) + .default(true), +}) + +/** + * TranscriptionWord + */ +export const zSchemaTranscriptionWord = z.object({ + text: z.string().register(z.globalRegistry, { + description: 'The transcribed word or audio event', + }), + start: z.union([z.number(), z.unknown()]), + type: z.string().register(z.globalRegistry, { + description: 'Type of element (word, spacing, or audio_event)', + }), + end: z.union([z.number(), z.unknown()]), + speaker_id: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * TranscriptionOutput + */ +export const zSchemaElevenlabsSpeechToTextOutput = z.object({ + text: z.string().register(z.globalRegistry, { + description: 'The full transcribed text', + }), + language_probability: z.number().register(z.globalRegistry, { + description: 'Confidence in language detection', + }), + language_code: z.string().register(z.globalRegistry, { + description: 'Detected or specified language code', + }), + words: z.array(zSchemaTranscriptionWord).register(z.globalRegistry, { + description: 'Word-level transcription details', + }), +}) + +/** + * SpeechToTextRequest + */ +export const zSchemaElevenlabsSpeechToTextInput = z.object({ + language_code: z.optional(z.union([z.string(), z.unknown()])), + audio_url: z.string().register(z.globalRegistry, { + description: 'URL of the audio file to transcribe', + }), + diarize: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to annotate who is speaking', + }), + ) + .default(true), + tag_audio_events: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Tag audio events like laughter, applause, etc.', + }), + ) + .default(true), +}) + +/** + * SpeechOutput + */ +export const zSchemaSpeechToTextOutput = z.object({ + partial: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Indicates if this is a partial (in-progress) transcript', + }), + ) + .default(false), + output: z.string().register(z.globalRegistry, { + description: 'The partial or final transcription output from Canary', + }), +}) + +/** + * SpeechInput + */ +export const zSchemaSpeechToTextInput = z.object({ + audio_url: z.string().register(z.globalRegistry, { + description: 'Local filesystem path (or remote URL) to a long audio file', + }), + use_pnc: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "Whether to use Canary's built-in punctuation & capitalization", + }), + ) + .default(true), +}) + +export const zSchemaSpeechToTextStreamOutput = z.unknown() + +/** + * SpeechInput + */ +export const zSchemaSpeechToTextStreamInput = z.object({ + audio_url: z.string().register(z.globalRegistry, { + description: 'Local filesystem path (or remote URL) to a long audio file', + }), + use_pnc: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "Whether to use Canary's built-in punctuation & capitalization", + }), + ) + .default(true), +}) + +export const zSchemaSpeechToTextTurboStreamOutput = z.unknown() + +/** + * SpeechInput + */ +export const zSchemaSpeechToTextTurboStreamInput = z.object({ + audio_url: z.string().register(z.globalRegistry, { + description: 'Local filesystem path (or remote URL) to a long audio file', + }), + use_pnc: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "Whether to use Canary's built-in punctuation & capitalization", + }), + ) + .default(true), +}) + +/** + * SpeechOutput + */ +export const zSchemaSpeechToTextTurboOutput = z.object({ + partial: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Indicates if this is a partial (in-progress) transcript', + }), + ) + .default(false), + output: z.string().register(z.globalRegistry, { + description: 'The partial or final transcription output from Canary', + }), +}) + +/** + * SpeechInput + */ +export const zSchemaSpeechToTextTurboInput = z.object({ + audio_url: z.string().register(z.globalRegistry, { + description: 'Local filesystem path (or remote URL) to a long audio file', + }), + use_pnc: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "Whether to use Canary's built-in punctuation & capitalization", + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaSmartTurnOutput = z.object({ + prediction: z.int().register(z.globalRegistry, { + description: 'The predicted turn type. 1 for Complete, 0 for Incomplete.', + }), + probability: z.number().register(z.globalRegistry, { + description: 'The probability of the predicted turn type.', + }), + metrics: z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics of the inference.', + }), +}) + +/** + * SmartTurnInput + */ +export const zSchemaSmartTurnInput = z.object({ + audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the audio file to be processed.', + }), +}) + +/** + * TranscriptionOutputV2 + */ +export const zSchemaElevenlabsSpeechToTextScribeV2Output = z.object({ + text: z.string().register(z.globalRegistry, { + description: 'The full transcribed text', + }), + language_probability: z.number().register(z.globalRegistry, { + description: 'Confidence in language detection', + }), + language_code: z.string().register(z.globalRegistry, { + description: 'Detected or specified language code', + }), + words: z.array(zSchemaTranscriptionWord).register(z.globalRegistry, { + description: 'Word-level transcription details', + }), +}) + +/** + * SpeechToTextRequestScribeV2 + */ +export const zSchemaElevenlabsSpeechToTextScribeV2Input = z.object({ + keyterms: z + .optional( + z.array(z.string()).max(100).register(z.globalRegistry, { + description: + 'Words or sentences to bias the model towards transcribing. Up to 100 keyterms, max 50 characters each. Adds 30% premium over base transcription price.', + }), + ) + .default([]), + audio_url: z.string().register(z.globalRegistry, { + description: 'URL of the audio file to transcribe', + }), + diarize: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to annotate who is speaking', + }), + ) + .default(true), + language_code: z.optional(z.union([z.string(), z.unknown()])), + tag_audio_events: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Tag audio events like laughter, applause, etc.', + }), + ) + .default(true), +}) + +export const zSchemaQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetFalAiElevenlabsSpeechToTextScribeV2RequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiElevenlabsSpeechToTextScribeV2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiElevenlabsSpeechToTextScribeV2RequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiElevenlabsSpeechToTextScribeV2RequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiElevenlabsSpeechToTextScribeV2Data = z.object({ + body: zSchemaElevenlabsSpeechToTextScribeV2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiElevenlabsSpeechToTextScribeV2Response = + zSchemaQueueStatus + +export const zGetFalAiElevenlabsSpeechToTextScribeV2RequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiElevenlabsSpeechToTextScribeV2RequestsByRequestIdResponse = + zSchemaElevenlabsSpeechToTextScribeV2Output + +export const zGetFalAiSmartTurnRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSmartTurnRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSmartTurnRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSmartTurnRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSmartTurnData = z.object({ + body: zSchemaSmartTurnInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSmartTurnResponse = zSchemaQueueStatus + +export const zGetFalAiSmartTurnRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSmartTurnRequestsByRequestIdResponse = + zSchemaSmartTurnOutput + +export const zGetFalAiSpeechToTextTurboRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiSpeechToTextTurboRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSpeechToTextTurboRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiSpeechToTextTurboRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSpeechToTextTurboData = z.object({ + body: zSchemaSpeechToTextTurboInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSpeechToTextTurboResponse = zSchemaQueueStatus + +export const zGetFalAiSpeechToTextTurboRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSpeechToTextTurboRequestsByRequestIdResponse = + zSchemaSpeechToTextTurboOutput + +export const zGetFalAiSpeechToTextTurboStreamRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiSpeechToTextTurboStreamRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSpeechToTextTurboStreamRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiSpeechToTextTurboStreamRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSpeechToTextTurboStreamData = z.object({ + body: zSchemaSpeechToTextTurboStreamInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSpeechToTextTurboStreamResponse = zSchemaQueueStatus + +export const zGetFalAiSpeechToTextTurboStreamRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiSpeechToTextTurboStreamRequestsByRequestIdResponse = + zSchemaSpeechToTextTurboStreamOutput + +export const zGetFalAiSpeechToTextStreamRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiSpeechToTextStreamRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSpeechToTextStreamRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiSpeechToTextStreamRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSpeechToTextStreamData = z.object({ + body: zSchemaSpeechToTextStreamInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSpeechToTextStreamResponse = zSchemaQueueStatus + +export const zGetFalAiSpeechToTextStreamRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSpeechToTextStreamRequestsByRequestIdResponse = + zSchemaSpeechToTextStreamOutput + +export const zGetFalAiSpeechToTextRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSpeechToTextRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSpeechToTextRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSpeechToTextRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSpeechToTextData = z.object({ + body: zSchemaSpeechToTextInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSpeechToTextResponse = zSchemaQueueStatus + +export const zGetFalAiSpeechToTextRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSpeechToTextRequestsByRequestIdResponse = + zSchemaSpeechToTextOutput + +export const zGetFalAiElevenlabsSpeechToTextRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiElevenlabsSpeechToTextRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiElevenlabsSpeechToTextRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiElevenlabsSpeechToTextRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiElevenlabsSpeechToTextData = z.object({ + body: zSchemaElevenlabsSpeechToTextInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiElevenlabsSpeechToTextResponse = zSchemaQueueStatus + +export const zGetFalAiElevenlabsSpeechToTextRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiElevenlabsSpeechToTextRequestsByRequestIdResponse = + zSchemaElevenlabsSpeechToTextOutput + +export const zGetFalAiWizperRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiWizperRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWizperRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiWizperRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWizperData = z.object({ + body: zSchemaWizperInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWizperResponse = zSchemaQueueStatus + +export const zGetFalAiWizperRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWizperRequestsByRequestIdResponse = zSchemaWizperOutput + +export const zGetFalAiWhisperRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiWhisperRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWhisperRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiWhisperRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWhisperData = z.object({ + body: zSchemaWhisperInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWhisperResponse = zSchemaQueueStatus + +export const zGetFalAiWhisperRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWhisperRequestsByRequestIdResponse = zSchemaWhisperOutput diff --git a/packages/typescript/ai-fal/src/generated/text-to-3d/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/text-to-3d/endpoint-map.ts new file mode 100644 index 00000000..858d4d8b --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/text-to-3d/endpoint-map.ts @@ -0,0 +1,78 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zSchemaHunyuan3dV3TextTo3dInput, + zSchemaHunyuan3dV3TextTo3dOutput, + zSchemaHunyuanMotionFastInput, + zSchemaHunyuanMotionFastOutput, + zSchemaHunyuanMotionInput, + zSchemaHunyuanMotionOutput, + zSchemaMeshyV6PreviewTextTo3dInput, + zSchemaMeshyV6PreviewTextTo3dOutput, +} from './zod.gen' + +import type { + SchemaHunyuan3dV3TextTo3dInput, + SchemaHunyuan3dV3TextTo3dOutput, + SchemaHunyuanMotionFastInput, + SchemaHunyuanMotionFastOutput, + SchemaHunyuanMotionInput, + SchemaHunyuanMotionOutput, + SchemaMeshyV6PreviewTextTo3dInput, + SchemaMeshyV6PreviewTextTo3dOutput, +} from './types.gen' + +import type { z } from 'zod' + +export type TextTo3dEndpointMap = { + 'fal-ai/hunyuan-motion/fast': { + input: SchemaHunyuanMotionFastInput + output: SchemaHunyuanMotionFastOutput + } + 'fal-ai/hunyuan-motion': { + input: SchemaHunyuanMotionInput + output: SchemaHunyuanMotionOutput + } + 'fal-ai/hunyuan3d-v3/text-to-3d': { + input: SchemaHunyuan3dV3TextTo3dInput + output: SchemaHunyuan3dV3TextTo3dOutput + } + 'fal-ai/meshy/v6-preview/text-to-3d': { + input: SchemaMeshyV6PreviewTextTo3dInput + output: SchemaMeshyV6PreviewTextTo3dOutput + } +} + +/** Union type of all text-to-3d model endpoint IDs */ +export type TextTo3dModel = keyof TextTo3dEndpointMap + +export const TextTo3dSchemaMap: Record< + TextTo3dModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['fal-ai/hunyuan-motion/fast']: { + input: zSchemaHunyuanMotionFastInput, + output: zSchemaHunyuanMotionFastOutput, + }, + ['fal-ai/hunyuan-motion']: { + input: zSchemaHunyuanMotionInput, + output: zSchemaHunyuanMotionOutput, + }, + ['fal-ai/hunyuan3d-v3/text-to-3d']: { + input: zSchemaHunyuan3dV3TextTo3dInput, + output: zSchemaHunyuan3dV3TextTo3dOutput, + }, + ['fal-ai/meshy/v6-preview/text-to-3d']: { + input: zSchemaMeshyV6PreviewTextTo3dInput, + output: zSchemaMeshyV6PreviewTextTo3dOutput, + }, +} as const + +/** Get the input type for a specific text-to-3d model */ +export type TextTo3dModelInput = + TextTo3dEndpointMap[T]['input'] + +/** Get the output type for a specific text-to-3d model */ +export type TextTo3dModelOutput = + TextTo3dEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/text-to-3d/types.gen.ts b/packages/typescript/ai-fal/src/generated/text-to-3d/types.gen.ts new file mode 100644 index 00000000..8d4ae524 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/text-to-3d/types.gen.ts @@ -0,0 +1,853 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * TextureFiles + * + * Texture files downloaded and uploaded to CDN + */ +export type SchemaTextureFiles = { + /** + * Base Color + * + * Base color texture + */ + base_color: SchemaFile + /** + * Normal + * + * Normal texture (PBR) + */ + normal?: SchemaFile + /** + * Roughness + * + * Roughness texture (PBR) + */ + roughness?: SchemaFile + /** + * Metallic + * + * Metallic texture (PBR) + */ + metallic?: SchemaFile +} + +/** + * File + */ +export type SchemaFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * TextTo3DOutput + * + * Output for Text to 3D generation + */ +export type SchemaMeshyV6PreviewTextTo3dOutput = { + /** + * Prompt + * + * The text prompt used for generation + */ + prompt: string + /** + * Thumbnail + * + * Preview thumbnail of the generated model + */ + thumbnail?: SchemaFile + /** + * Actual Prompt + * + * The actual prompt used if prompt expansion was enabled + */ + actual_prompt?: string + /** + * Texture Urls + * + * Array of texture file objects + */ + texture_urls?: Array + /** + * Model Glb + * + * Generated 3D object in GLB format. + */ + model_glb: SchemaFile + /** + * Seed + * + * The seed used for generation + */ + seed?: number + /** + * Model Urls + * + * URLs for different 3D model formats + */ + model_urls: SchemaModelUrls +} + +/** + * ModelUrls + * + * 3D model files in various formats + */ +export type SchemaModelUrls = { + /** + * Usdz + * + * USDZ format 3D model + */ + usdz?: SchemaFile + /** + * Fbx + * + * FBX format 3D model + */ + fbx?: SchemaFile + /** + * Blend + * + * Blender format 3D model + */ + blend?: SchemaFile + /** + * Stl + * + * STL format 3D model + */ + stl?: SchemaFile + /** + * Glb + * + * GLB format 3D model + */ + glb?: SchemaFile + /** + * Obj + * + * OBJ format 3D model + */ + obj?: SchemaFile +} + +/** + * TextTo3DInput + * + * Input for Text to 3D conversion + */ +export type SchemaMeshyV6PreviewTextTo3dInput = { + /** + * Prompt + * + * Describe what kind of object the 3D model is. Maximum 600 characters. + */ + prompt: string + /** + * Enable Pbr + * + * Generate PBR Maps (metallic, roughness, normal) in addition to base color. Should be false for sculpture style. + */ + enable_pbr?: boolean + /** + * Target Polycount + * + * Target number of polygons in the generated model + */ + target_polycount?: number + /** + * Art Style + * + * Desired art style of the object. Note: enable_pbr should be false for sculpture style. + */ + art_style?: 'realistic' | 'sculpture' + /** + * Enable Safety Checker + * + * If set to true, input data will be checked for safety before processing. + */ + enable_safety_checker?: boolean + /** + * Mode + * + * Generation mode. 'preview' returns untextured geometry only, 'full' returns textured model (preview + refine). + */ + mode?: 'preview' | 'full' + /** + * Symmetry Mode + * + * Controls symmetry behavior during model generation. + */ + symmetry_mode?: 'off' | 'auto' | 'on' + /** + * Should Remesh + * + * Whether to enable the remesh phase. When false, returns unprocessed triangular mesh. + */ + should_remesh?: boolean + /** + * Texture Image Url + * + * 2D image to guide the texturing process (only used in 'full' mode) + */ + texture_image_url?: string + /** + * Topology + * + * Specify the topology of the generated model. Quad for smooth surfaces, Triangle for detailed geometry. + */ + topology?: 'quad' | 'triangle' + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * Seed for reproducible results. Same prompt and seed usually generate the same result. + */ + seed?: number + /** + * Is A T Pose + * + * Whether to generate the model in an A/T pose + */ + is_a_t_pose?: boolean + /** + * Texture Prompt + * + * Additional text prompt to guide the texturing process (only used in 'full' mode) + */ + texture_prompt?: string +} + +/** + * TextTo3DOutput + */ +export type SchemaHunyuan3dV3TextTo3dOutput = { + /** + * Model Urls + * + * URLs for different 3D model formats + */ + model_urls: SchemaModelUrls + /** + * Thumbnail + * + * Preview thumbnail of the generated model + */ + thumbnail?: SchemaFile + /** + * Seed + * + * The seed used for generation + */ + seed?: number + /** + * Model Glb + * + * Generated 3D object in GLB format. + */ + model_glb: SchemaFile +} + +/** + * TextTo3DInput + */ +export type SchemaHunyuan3dV3TextTo3dInput = { + /** + * Enable Pbr + * + * Whether to enable PBR material generation + */ + enable_pbr?: boolean + /** + * Polygon Type + * + * Polygon type. Only takes effect when GenerateType is LowPoly. + */ + polygon_type?: 'triangle' | 'quadrilateral' + /** + * Face Count + * + * Target face count. Range: 40000-1500000 + */ + face_count?: number + /** + * Prompt + * + * Text description of the 3D content to generate. Supports up to 1024 UTF-8 characters. + */ + prompt: string + /** + * Generate Type + * + * Generation type. Normal: textured model. LowPoly: polygon reduction. Geometry: white model without texture. + */ + generate_type?: 'Normal' | 'LowPoly' | 'Geometry' +} + +/** + * HYMotionOutput + */ +export type SchemaHunyuanMotionOutput = { + /** + * Fbx File + * + * Generated FBX animation file. + */ + fbx_file?: SchemaFile + /** + * Motion Json + * + * Generated motion data as JSON. + */ + motion_json?: SchemaFile + /** + * Seed + * + * Seed used for generation. + */ + seed: number +} + +/** + * HYMotionInput + */ +export type SchemaHunyuanMotionInput = { + /** + * Prompt + * + * Text prompt describing the motion to generate. + */ + prompt: string + /** + * Duration + * + * Motion duration in seconds (0.5-12.0). + */ + duration?: number + /** + * Guidance Scale + * + * Classifier-free guidance scale. Higher = more faithful to prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducible generation. + */ + seed?: number + /** + * Output Format + * + * Output format: 'fbx' for animation files, 'dict' for raw JSON. + */ + output_format?: 'fbx' | 'dict' +} + +/** + * HYMotionOutput + */ +export type SchemaHunyuanMotionFastOutput = { + /** + * Fbx File + * + * Generated FBX animation file. + */ + fbx_file?: SchemaFile + /** + * Motion Json + * + * Generated motion data as JSON. + */ + motion_json?: SchemaFile + /** + * Seed + * + * Seed used for generation. + */ + seed: number +} + +/** + * HYMotionInput + */ +export type SchemaHunyuanMotionFastInput = { + /** + * Prompt + * + * Text prompt describing the motion to generate. + */ + prompt: string + /** + * Duration + * + * Motion duration in seconds (0.5-12.0). + */ + duration?: number + /** + * Guidance Scale + * + * Classifier-free guidance scale. Higher = more faithful to prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducible generation. + */ + seed?: number + /** + * Output Format + * + * Output format: 'fbx' for animation files, 'dict' for raw JSON. + */ + output_format?: 'fbx' | 'dict' +} + +export type SchemaQueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetFalAiHunyuanMotionFastRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan-motion/fast/requests/{request_id}/status' +} + +export type GetFalAiHunyuanMotionFastRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHunyuanMotionFastRequestsByRequestIdStatusResponse = + GetFalAiHunyuanMotionFastRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuanMotionFastRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuanMotionFastRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-motion/fast/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuanMotionFastRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHunyuanMotionFastRequestsByRequestIdCancelResponse = + PutFalAiHunyuanMotionFastRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuanMotionFastRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuanMotionFastData = { + body: SchemaHunyuanMotionFastInput + path?: never + query?: never + url: '/fal-ai/hunyuan-motion/fast' +} + +export type PostFalAiHunyuanMotionFastResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuanMotionFastResponse = + PostFalAiHunyuanMotionFastResponses[keyof PostFalAiHunyuanMotionFastResponses] + +export type GetFalAiHunyuanMotionFastRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-motion/fast/requests/{request_id}' +} + +export type GetFalAiHunyuanMotionFastRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuanMotionFastOutput +} + +export type GetFalAiHunyuanMotionFastRequestsByRequestIdResponse = + GetFalAiHunyuanMotionFastRequestsByRequestIdResponses[keyof GetFalAiHunyuanMotionFastRequestsByRequestIdResponses] + +export type GetFalAiHunyuanMotionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan-motion/requests/{request_id}/status' +} + +export type GetFalAiHunyuanMotionRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHunyuanMotionRequestsByRequestIdStatusResponse = + GetFalAiHunyuanMotionRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuanMotionRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuanMotionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-motion/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuanMotionRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHunyuanMotionRequestsByRequestIdCancelResponse = + PutFalAiHunyuanMotionRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuanMotionRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuanMotionData = { + body: SchemaHunyuanMotionInput + path?: never + query?: never + url: '/fal-ai/hunyuan-motion' +} + +export type PostFalAiHunyuanMotionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuanMotionResponse = + PostFalAiHunyuanMotionResponses[keyof PostFalAiHunyuanMotionResponses] + +export type GetFalAiHunyuanMotionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-motion/requests/{request_id}' +} + +export type GetFalAiHunyuanMotionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuanMotionOutput +} + +export type GetFalAiHunyuanMotionRequestsByRequestIdResponse = + GetFalAiHunyuanMotionRequestsByRequestIdResponses[keyof GetFalAiHunyuanMotionRequestsByRequestIdResponses] + +export type GetFalAiHunyuan3dV3TextTo3dRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan3d-v3/text-to-3d/requests/{request_id}/status' +} + +export type GetFalAiHunyuan3dV3TextTo3dRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHunyuan3dV3TextTo3dRequestsByRequestIdStatusResponse = + GetFalAiHunyuan3dV3TextTo3dRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuan3dV3TextTo3dRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuan3dV3TextTo3dRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan3d-v3/text-to-3d/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuan3dV3TextTo3dRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHunyuan3dV3TextTo3dRequestsByRequestIdCancelResponse = + PutFalAiHunyuan3dV3TextTo3dRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuan3dV3TextTo3dRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuan3dV3TextTo3dData = { + body: SchemaHunyuan3dV3TextTo3dInput + path?: never + query?: never + url: '/fal-ai/hunyuan3d-v3/text-to-3d' +} + +export type PostFalAiHunyuan3dV3TextTo3dResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuan3dV3TextTo3dResponse = + PostFalAiHunyuan3dV3TextTo3dResponses[keyof PostFalAiHunyuan3dV3TextTo3dResponses] + +export type GetFalAiHunyuan3dV3TextTo3dRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan3d-v3/text-to-3d/requests/{request_id}' +} + +export type GetFalAiHunyuan3dV3TextTo3dRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuan3dV3TextTo3dOutput +} + +export type GetFalAiHunyuan3dV3TextTo3dRequestsByRequestIdResponse = + GetFalAiHunyuan3dV3TextTo3dRequestsByRequestIdResponses[keyof GetFalAiHunyuan3dV3TextTo3dRequestsByRequestIdResponses] + +export type GetFalAiMeshyV6PreviewTextTo3dRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/meshy/v6-preview/text-to-3d/requests/{request_id}/status' +} + +export type GetFalAiMeshyV6PreviewTextTo3dRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMeshyV6PreviewTextTo3dRequestsByRequestIdStatusResponse = + GetFalAiMeshyV6PreviewTextTo3dRequestsByRequestIdStatusResponses[keyof GetFalAiMeshyV6PreviewTextTo3dRequestsByRequestIdStatusResponses] + +export type PutFalAiMeshyV6PreviewTextTo3dRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/meshy/v6-preview/text-to-3d/requests/{request_id}/cancel' +} + +export type PutFalAiMeshyV6PreviewTextTo3dRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMeshyV6PreviewTextTo3dRequestsByRequestIdCancelResponse = + PutFalAiMeshyV6PreviewTextTo3dRequestsByRequestIdCancelResponses[keyof PutFalAiMeshyV6PreviewTextTo3dRequestsByRequestIdCancelResponses] + +export type PostFalAiMeshyV6PreviewTextTo3dData = { + body: SchemaMeshyV6PreviewTextTo3dInput + path?: never + query?: never + url: '/fal-ai/meshy/v6-preview/text-to-3d' +} + +export type PostFalAiMeshyV6PreviewTextTo3dResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMeshyV6PreviewTextTo3dResponse = + PostFalAiMeshyV6PreviewTextTo3dResponses[keyof PostFalAiMeshyV6PreviewTextTo3dResponses] + +export type GetFalAiMeshyV6PreviewTextTo3dRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/meshy/v6-preview/text-to-3d/requests/{request_id}' +} + +export type GetFalAiMeshyV6PreviewTextTo3dRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMeshyV6PreviewTextTo3dOutput +} + +export type GetFalAiMeshyV6PreviewTextTo3dRequestsByRequestIdResponse = + GetFalAiMeshyV6PreviewTextTo3dRequestsByRequestIdResponses[keyof GetFalAiMeshyV6PreviewTextTo3dRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/text-to-3d/zod.gen.ts b/packages/typescript/ai-fal/src/generated/text-to-3d/zod.gen.ts new file mode 100644 index 00000000..3a5244d0 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/text-to-3d/zod.gen.ts @@ -0,0 +1,699 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * File + */ +export const zSchemaFile = z.object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), +}) + +/** + * TextureFiles + * + * Texture files downloaded and uploaded to CDN + */ +export const zSchemaTextureFiles = z + .object({ + base_color: zSchemaFile, + normal: z.optional(zSchemaFile), + roughness: z.optional(zSchemaFile), + metallic: z.optional(zSchemaFile), + }) + .register(z.globalRegistry, { + description: 'Texture files downloaded and uploaded to CDN', + }) + +/** + * ModelUrls + * + * 3D model files in various formats + */ +export const zSchemaModelUrls = z + .object({ + usdz: z.optional(zSchemaFile), + fbx: z.optional(zSchemaFile), + blend: z.optional(zSchemaFile), + stl: z.optional(zSchemaFile), + glb: z.optional(zSchemaFile), + obj: z.optional(zSchemaFile), + }) + .register(z.globalRegistry, { + description: '3D model files in various formats', + }) + +/** + * TextTo3DOutput + * + * Output for Text to 3D generation + */ +export const zSchemaMeshyV6PreviewTextTo3dOutput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt used for generation', + }), + thumbnail: z.optional(zSchemaFile), + actual_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'The actual prompt used if prompt expansion was enabled', + }), + ), + texture_urls: z.optional( + z.array(zSchemaTextureFiles).register(z.globalRegistry, { + description: 'Array of texture file objects', + }), + ), + model_glb: zSchemaFile, + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + ), + model_urls: zSchemaModelUrls, + }) + .register(z.globalRegistry, { + description: 'Output for Text to 3D generation', + }) + +/** + * TextTo3DInput + * + * Input for Text to 3D conversion + */ +export const zSchemaMeshyV6PreviewTextTo3dInput = z + .object({ + prompt: z.string().max(600).register(z.globalRegistry, { + description: + 'Describe what kind of object the 3D model is. Maximum 600 characters.', + }), + enable_pbr: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Generate PBR Maps (metallic, roughness, normal) in addition to base color. Should be false for sculpture style.', + }), + ) + .default(false), + target_polycount: z + .optional( + z.int().gte(100).lte(300000).register(z.globalRegistry, { + description: 'Target number of polygons in the generated model', + }), + ) + .default(30000), + art_style: z.optional( + z.enum(['realistic', 'sculpture']).register(z.globalRegistry, { + description: + 'Desired art style of the object. Note: enable_pbr should be false for sculpture style.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, input data will be checked for safety before processing.', + }), + ) + .default(true), + mode: z.optional( + z.enum(['preview', 'full']).register(z.globalRegistry, { + description: + "Generation mode. 'preview' returns untextured geometry only, 'full' returns textured model (preview + refine).", + }), + ), + symmetry_mode: z.optional( + z.enum(['off', 'auto', 'on']).register(z.globalRegistry, { + description: 'Controls symmetry behavior during model generation.', + }), + ), + should_remesh: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the remesh phase. When false, returns unprocessed triangular mesh.', + }), + ) + .default(true), + texture_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + "2D image to guide the texturing process (only used in 'full' mode)", + }), + ), + topology: z.optional( + z.enum(['quad', 'triangle']).register(z.globalRegistry, { + description: + 'Specify the topology of the generated model. Quad for smooth surfaces, Triangle for detailed geometry.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Seed for reproducible results. Same prompt and seed usually generate the same result.', + }), + ), + is_a_t_pose: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate the model in an A/T pose', + }), + ) + .default(false), + texture_prompt: z.optional( + z.string().max(600).register(z.globalRegistry, { + description: + "Additional text prompt to guide the texturing process (only used in 'full' mode)", + }), + ), + }) + .register(z.globalRegistry, { + description: 'Input for Text to 3D conversion', + }) + +/** + * TextTo3DOutput + */ +export const zSchemaHunyuan3dV3TextTo3dOutput = z.object({ + model_urls: zSchemaModelUrls, + thumbnail: z.optional(zSchemaFile), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + ), + model_glb: zSchemaFile, +}) + +/** + * TextTo3DInput + */ +export const zSchemaHunyuan3dV3TextTo3dInput = z.object({ + enable_pbr: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable PBR material generation', + }), + ) + .default(false), + polygon_type: z.optional( + z.enum(['triangle', 'quadrilateral']).register(z.globalRegistry, { + description: + 'Polygon type. Only takes effect when GenerateType is LowPoly.', + }), + ), + face_count: z + .optional( + z.int().gte(40000).lte(1500000).register(z.globalRegistry, { + description: 'Target face count. Range: 40000-1500000', + }), + ) + .default(500000), + prompt: z.string().max(1024).register(z.globalRegistry, { + description: + 'Text description of the 3D content to generate. Supports up to 1024 UTF-8 characters.', + }), + generate_type: z.optional( + z.enum(['Normal', 'LowPoly', 'Geometry']).register(z.globalRegistry, { + description: + 'Generation type. Normal: textured model. LowPoly: polygon reduction. Geometry: white model without texture.', + }), + ), +}) + +/** + * HYMotionOutput + */ +export const zSchemaHunyuanMotionOutput = z.object({ + fbx_file: z.optional(zSchemaFile), + motion_json: z.optional(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generation.', + }), +}) + +/** + * HYMotionInput + */ +export const zSchemaHunyuanMotionInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt describing the motion to generate.', + }), + duration: z + .optional( + z.number().gte(0.5).lte(12).register(z.globalRegistry, { + description: 'Motion duration in seconds (0.5-12.0).', + }), + ) + .default(5), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher = more faithful to prompt.', + }), + ) + .default(5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducible generation.', + }), + ), + output_format: z.optional( + z.enum(['fbx', 'dict']).register(z.globalRegistry, { + description: + "Output format: 'fbx' for animation files, 'dict' for raw JSON.", + }), + ), +}) + +/** + * HYMotionOutput + */ +export const zSchemaHunyuanMotionFastOutput = z.object({ + fbx_file: z.optional(zSchemaFile), + motion_json: z.optional(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generation.', + }), +}) + +/** + * HYMotionInput + */ +export const zSchemaHunyuanMotionFastInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt describing the motion to generate.', + }), + duration: z + .optional( + z.number().gte(0.5).lte(12).register(z.globalRegistry, { + description: 'Motion duration in seconds (0.5-12.0).', + }), + ) + .default(5), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher = more faithful to prompt.', + }), + ) + .default(5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducible generation.', + }), + ), + output_format: z.optional( + z.enum(['fbx', 'dict']).register(z.globalRegistry, { + description: + "Output format: 'fbx' for animation files, 'dict' for raw JSON.", + }), + ), +}) + +export const zSchemaQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetFalAiHunyuanMotionFastRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiHunyuanMotionFastRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuanMotionFastRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuanMotionFastRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuanMotionFastData = z.object({ + body: zSchemaHunyuanMotionFastInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuanMotionFastResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuanMotionFastRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuanMotionFastRequestsByRequestIdResponse = + zSchemaHunyuanMotionFastOutput + +export const zGetFalAiHunyuanMotionRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiHunyuanMotionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuanMotionRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuanMotionRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuanMotionData = z.object({ + body: zSchemaHunyuanMotionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuanMotionResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuanMotionRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuanMotionRequestsByRequestIdResponse = + zSchemaHunyuanMotionOutput + +export const zGetFalAiHunyuan3dV3TextTo3dRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiHunyuan3dV3TextTo3dRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuan3dV3TextTo3dRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuan3dV3TextTo3dRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuan3dV3TextTo3dData = z.object({ + body: zSchemaHunyuan3dV3TextTo3dInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuan3dV3TextTo3dResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuan3dV3TextTo3dRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuan3dV3TextTo3dRequestsByRequestIdResponse = + zSchemaHunyuan3dV3TextTo3dOutput + +export const zGetFalAiMeshyV6PreviewTextTo3dRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMeshyV6PreviewTextTo3dRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMeshyV6PreviewTextTo3dRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMeshyV6PreviewTextTo3dRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMeshyV6PreviewTextTo3dData = z.object({ + body: zSchemaMeshyV6PreviewTextTo3dInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMeshyV6PreviewTextTo3dResponse = zSchemaQueueStatus + +export const zGetFalAiMeshyV6PreviewTextTo3dRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMeshyV6PreviewTextTo3dRequestsByRequestIdResponse = + zSchemaMeshyV6PreviewTextTo3dOutput diff --git a/packages/typescript/ai-fal/src/generated/text-to-audio/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/text-to-audio/endpoint-map.ts new file mode 100644 index 00000000..10272b3f --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/text-to-audio/endpoint-map.ts @@ -0,0 +1,438 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zSchemaAceStepInput, + zSchemaAceStepOutput, + zSchemaAceStepPromptToAudioInput, + zSchemaAceStepPromptToAudioOutput, + zSchemaCsm1bInput, + zSchemaCsm1bOutput, + zSchemaDiffrhythmInput, + zSchemaDiffrhythmOutput, + zSchemaElevenlabsMusicInput, + zSchemaElevenlabsMusicOutput, + zSchemaElevenlabsSoundEffectsV2Input, + zSchemaElevenlabsSoundEffectsV2Output, + zSchemaElevenlabsTextToDialogueElevenV3Input, + zSchemaElevenlabsTextToDialogueElevenV3Output, + zSchemaElevenlabsTtsElevenV3Input, + zSchemaElevenlabsTtsElevenV3Output, + zSchemaElevenlabsTtsMultilingualV2Input, + zSchemaElevenlabsTtsMultilingualV2Output, + zSchemaF5TtsInput, + zSchemaF5TtsOutput, + zSchemaKokoroAmericanEnglishInput, + zSchemaKokoroAmericanEnglishOutput, + zSchemaKokoroBrazilianPortugueseInput, + zSchemaKokoroBrazilianPortugueseOutput, + zSchemaKokoroBritishEnglishInput, + zSchemaKokoroBritishEnglishOutput, + zSchemaKokoroFrenchInput, + zSchemaKokoroFrenchOutput, + zSchemaKokoroHindiInput, + zSchemaKokoroHindiOutput, + zSchemaKokoroItalianInput, + zSchemaKokoroItalianOutput, + zSchemaKokoroJapaneseInput, + zSchemaKokoroJapaneseOutput, + zSchemaKokoroMandarinChineseInput, + zSchemaKokoroMandarinChineseOutput, + zSchemaKokoroSpanishInput, + zSchemaKokoroSpanishOutput, + zSchemaLyria2Input, + zSchemaLyria2Output, + zSchemaMinimaxMusicInput, + zSchemaMinimaxMusicOutput, + zSchemaMinimaxMusicV15Input, + zSchemaMinimaxMusicV15Output, + zSchemaMinimaxMusicV2Input, + zSchemaMinimaxMusicV2Output, + zSchemaMmaudioV2TextToAudioInput, + zSchemaMmaudioV2TextToAudioOutput, + zSchemaMusicGenerationInput, + zSchemaMusicGenerationOutput, + zSchemaMusicGeneratorInput, + zSchemaMusicGeneratorOutput, + zSchemaSoundEffectGenerationInput, + zSchemaSoundEffectGenerationOutput, + zSchemaSoundEffectsGeneratorInput, + zSchemaSoundEffectsGeneratorOutput, + zSchemaStableAudio25TextToAudioInput, + zSchemaStableAudio25TextToAudioOutput, + zSchemaStableAudioInput, + zSchemaStableAudioOutput, + zSchemaV2InpaintInput, + zSchemaV2InpaintOutput, + zSchemaV2TextToMusicInput, + zSchemaV2TextToMusicOutput, + zSchemaYueInput, + zSchemaYueOutput, + zSchemaZonosInput, + zSchemaZonosOutput, +} from './zod.gen' + +import type { + SchemaAceStepInput, + SchemaAceStepOutput, + SchemaAceStepPromptToAudioInput, + SchemaAceStepPromptToAudioOutput, + SchemaCsm1bInput, + SchemaCsm1bOutput, + SchemaDiffrhythmInput, + SchemaDiffrhythmOutput, + SchemaElevenlabsMusicInput, + SchemaElevenlabsMusicOutput, + SchemaElevenlabsSoundEffectsV2Input, + SchemaElevenlabsSoundEffectsV2Output, + SchemaElevenlabsTextToDialogueElevenV3Input, + SchemaElevenlabsTextToDialogueElevenV3Output, + SchemaElevenlabsTtsElevenV3Input, + SchemaElevenlabsTtsElevenV3Output, + SchemaElevenlabsTtsMultilingualV2Input, + SchemaElevenlabsTtsMultilingualV2Output, + SchemaF5TtsInput, + SchemaF5TtsOutput, + SchemaKokoroAmericanEnglishInput, + SchemaKokoroAmericanEnglishOutput, + SchemaKokoroBrazilianPortugueseInput, + SchemaKokoroBrazilianPortugueseOutput, + SchemaKokoroBritishEnglishInput, + SchemaKokoroBritishEnglishOutput, + SchemaKokoroFrenchInput, + SchemaKokoroFrenchOutput, + SchemaKokoroHindiInput, + SchemaKokoroHindiOutput, + SchemaKokoroItalianInput, + SchemaKokoroItalianOutput, + SchemaKokoroJapaneseInput, + SchemaKokoroJapaneseOutput, + SchemaKokoroMandarinChineseInput, + SchemaKokoroMandarinChineseOutput, + SchemaKokoroSpanishInput, + SchemaKokoroSpanishOutput, + SchemaLyria2Input, + SchemaLyria2Output, + SchemaMinimaxMusicInput, + SchemaMinimaxMusicOutput, + SchemaMinimaxMusicV15Input, + SchemaMinimaxMusicV15Output, + SchemaMinimaxMusicV2Input, + SchemaMinimaxMusicV2Output, + SchemaMmaudioV2TextToAudioInput, + SchemaMmaudioV2TextToAudioOutput, + SchemaMusicGenerationInput, + SchemaMusicGenerationOutput, + SchemaMusicGeneratorInput, + SchemaMusicGeneratorOutput, + SchemaSoundEffectGenerationInput, + SchemaSoundEffectGenerationOutput, + SchemaSoundEffectsGeneratorInput, + SchemaSoundEffectsGeneratorOutput, + SchemaStableAudio25TextToAudioInput, + SchemaStableAudio25TextToAudioOutput, + SchemaStableAudioInput, + SchemaStableAudioOutput, + SchemaV2InpaintInput, + SchemaV2InpaintOutput, + SchemaV2TextToMusicInput, + SchemaV2TextToMusicOutput, + SchemaYueInput, + SchemaYueOutput, + SchemaZonosInput, + SchemaZonosOutput, +} from './types.gen' + +import type { z } from 'zod' + +export type TextToAudioEndpointMap = { + 'fal-ai/elevenlabs/music': { + input: SchemaElevenlabsMusicInput + output: SchemaElevenlabsMusicOutput + } + 'fal-ai/minimax-music/v2': { + input: SchemaMinimaxMusicV2Input + output: SchemaMinimaxMusicV2Output + } + 'beatoven/sound-effect-generation': { + input: SchemaSoundEffectGenerationInput + output: SchemaSoundEffectGenerationOutput + } + 'beatoven/music-generation': { + input: SchemaMusicGenerationInput + output: SchemaMusicGenerationOutput + } + 'fal-ai/minimax-music/v1.5': { + input: SchemaMinimaxMusicV15Input + output: SchemaMinimaxMusicV15Output + } + 'fal-ai/stable-audio-25/text-to-audio': { + input: SchemaStableAudio25TextToAudioInput + output: SchemaStableAudio25TextToAudioOutput + } + 'fal-ai/elevenlabs/text-to-dialogue/eleven-v3': { + input: SchemaElevenlabsTextToDialogueElevenV3Input + output: SchemaElevenlabsTextToDialogueElevenV3Output + } + 'fal-ai/elevenlabs/sound-effects/v2': { + input: SchemaElevenlabsSoundEffectsV2Input + output: SchemaElevenlabsSoundEffectsV2Output + } + 'sonauto/v2/inpaint': { + input: SchemaV2InpaintInput + output: SchemaV2InpaintOutput + } + 'sonauto/v2/text-to-music': { + input: SchemaV2TextToMusicInput + output: SchemaV2TextToMusicOutput + } + 'fal-ai/elevenlabs/tts/eleven-v3': { + input: SchemaElevenlabsTtsElevenV3Input + output: SchemaElevenlabsTtsElevenV3Output + } + 'fal-ai/lyria2': { + input: SchemaLyria2Input + output: SchemaLyria2Output + } + 'fal-ai/ace-step/prompt-to-audio': { + input: SchemaAceStepPromptToAudioInput + output: SchemaAceStepPromptToAudioOutput + } + 'fal-ai/ace-step': { + input: SchemaAceStepInput + output: SchemaAceStepOutput + } + 'cassetteai/sound-effects-generator': { + input: SchemaSoundEffectsGeneratorInput + output: SchemaSoundEffectsGeneratorOutput + } + 'cassetteai/music-generator': { + input: SchemaMusicGeneratorInput + output: SchemaMusicGeneratorOutput + } + 'fal-ai/csm-1b': { + input: SchemaCsm1bInput + output: SchemaCsm1bOutput + } + 'fal-ai/diffrhythm': { + input: SchemaDiffrhythmInput + output: SchemaDiffrhythmOutput + } + 'fal-ai/elevenlabs/tts/multilingual-v2': { + input: SchemaElevenlabsTtsMultilingualV2Input + output: SchemaElevenlabsTtsMultilingualV2Output + } + 'fal-ai/kokoro/hindi': { + input: SchemaKokoroHindiInput + output: SchemaKokoroHindiOutput + } + 'fal-ai/kokoro/mandarin-chinese': { + input: SchemaKokoroMandarinChineseInput + output: SchemaKokoroMandarinChineseOutput + } + 'fal-ai/kokoro/spanish': { + input: SchemaKokoroSpanishInput + output: SchemaKokoroSpanishOutput + } + 'fal-ai/kokoro/brazilian-portuguese': { + input: SchemaKokoroBrazilianPortugueseInput + output: SchemaKokoroBrazilianPortugueseOutput + } + 'fal-ai/kokoro/british-english': { + input: SchemaKokoroBritishEnglishInput + output: SchemaKokoroBritishEnglishOutput + } + 'fal-ai/kokoro/french': { + input: SchemaKokoroFrenchInput + output: SchemaKokoroFrenchOutput + } + 'fal-ai/kokoro/japanese': { + input: SchemaKokoroJapaneseInput + output: SchemaKokoroJapaneseOutput + } + 'fal-ai/kokoro/american-english': { + input: SchemaKokoroAmericanEnglishInput + output: SchemaKokoroAmericanEnglishOutput + } + 'fal-ai/zonos': { + input: SchemaZonosInput + output: SchemaZonosOutput + } + 'fal-ai/kokoro/italian': { + input: SchemaKokoroItalianInput + output: SchemaKokoroItalianOutput + } + 'fal-ai/yue': { + input: SchemaYueInput + output: SchemaYueOutput + } + 'fal-ai/mmaudio-v2/text-to-audio': { + input: SchemaMmaudioV2TextToAudioInput + output: SchemaMmaudioV2TextToAudioOutput + } + 'fal-ai/minimax-music': { + input: SchemaMinimaxMusicInput + output: SchemaMinimaxMusicOutput + } + 'fal-ai/f5-tts': { + input: SchemaF5TtsInput + output: SchemaF5TtsOutput + } + 'fal-ai/stable-audio': { + input: SchemaStableAudioInput + output: SchemaStableAudioOutput + } +} + +/** Union type of all text-to-audio model endpoint IDs */ +export type TextToAudioModel = keyof TextToAudioEndpointMap + +export const TextToAudioSchemaMap: Record< + TextToAudioModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['fal-ai/elevenlabs/music']: { + input: zSchemaElevenlabsMusicInput, + output: zSchemaElevenlabsMusicOutput, + }, + ['fal-ai/minimax-music/v2']: { + input: zSchemaMinimaxMusicV2Input, + output: zSchemaMinimaxMusicV2Output, + }, + ['beatoven/sound-effect-generation']: { + input: zSchemaSoundEffectGenerationInput, + output: zSchemaSoundEffectGenerationOutput, + }, + ['beatoven/music-generation']: { + input: zSchemaMusicGenerationInput, + output: zSchemaMusicGenerationOutput, + }, + ['fal-ai/minimax-music/v1.5']: { + input: zSchemaMinimaxMusicV15Input, + output: zSchemaMinimaxMusicV15Output, + }, + ['fal-ai/stable-audio-25/text-to-audio']: { + input: zSchemaStableAudio25TextToAudioInput, + output: zSchemaStableAudio25TextToAudioOutput, + }, + ['fal-ai/elevenlabs/text-to-dialogue/eleven-v3']: { + input: zSchemaElevenlabsTextToDialogueElevenV3Input, + output: zSchemaElevenlabsTextToDialogueElevenV3Output, + }, + ['fal-ai/elevenlabs/sound-effects/v2']: { + input: zSchemaElevenlabsSoundEffectsV2Input, + output: zSchemaElevenlabsSoundEffectsV2Output, + }, + ['sonauto/v2/inpaint']: { + input: zSchemaV2InpaintInput, + output: zSchemaV2InpaintOutput, + }, + ['sonauto/v2/text-to-music']: { + input: zSchemaV2TextToMusicInput, + output: zSchemaV2TextToMusicOutput, + }, + ['fal-ai/elevenlabs/tts/eleven-v3']: { + input: zSchemaElevenlabsTtsElevenV3Input, + output: zSchemaElevenlabsTtsElevenV3Output, + }, + ['fal-ai/lyria2']: { + input: zSchemaLyria2Input, + output: zSchemaLyria2Output, + }, + ['fal-ai/ace-step/prompt-to-audio']: { + input: zSchemaAceStepPromptToAudioInput, + output: zSchemaAceStepPromptToAudioOutput, + }, + ['fal-ai/ace-step']: { + input: zSchemaAceStepInput, + output: zSchemaAceStepOutput, + }, + ['cassetteai/sound-effects-generator']: { + input: zSchemaSoundEffectsGeneratorInput, + output: zSchemaSoundEffectsGeneratorOutput, + }, + ['cassetteai/music-generator']: { + input: zSchemaMusicGeneratorInput, + output: zSchemaMusicGeneratorOutput, + }, + ['fal-ai/csm-1b']: { + input: zSchemaCsm1bInput, + output: zSchemaCsm1bOutput, + }, + ['fal-ai/diffrhythm']: { + input: zSchemaDiffrhythmInput, + output: zSchemaDiffrhythmOutput, + }, + ['fal-ai/elevenlabs/tts/multilingual-v2']: { + input: zSchemaElevenlabsTtsMultilingualV2Input, + output: zSchemaElevenlabsTtsMultilingualV2Output, + }, + ['fal-ai/kokoro/hindi']: { + input: zSchemaKokoroHindiInput, + output: zSchemaKokoroHindiOutput, + }, + ['fal-ai/kokoro/mandarin-chinese']: { + input: zSchemaKokoroMandarinChineseInput, + output: zSchemaKokoroMandarinChineseOutput, + }, + ['fal-ai/kokoro/spanish']: { + input: zSchemaKokoroSpanishInput, + output: zSchemaKokoroSpanishOutput, + }, + ['fal-ai/kokoro/brazilian-portuguese']: { + input: zSchemaKokoroBrazilianPortugueseInput, + output: zSchemaKokoroBrazilianPortugueseOutput, + }, + ['fal-ai/kokoro/british-english']: { + input: zSchemaKokoroBritishEnglishInput, + output: zSchemaKokoroBritishEnglishOutput, + }, + ['fal-ai/kokoro/french']: { + input: zSchemaKokoroFrenchInput, + output: zSchemaKokoroFrenchOutput, + }, + ['fal-ai/kokoro/japanese']: { + input: zSchemaKokoroJapaneseInput, + output: zSchemaKokoroJapaneseOutput, + }, + ['fal-ai/kokoro/american-english']: { + input: zSchemaKokoroAmericanEnglishInput, + output: zSchemaKokoroAmericanEnglishOutput, + }, + ['fal-ai/zonos']: { + input: zSchemaZonosInput, + output: zSchemaZonosOutput, + }, + ['fal-ai/kokoro/italian']: { + input: zSchemaKokoroItalianInput, + output: zSchemaKokoroItalianOutput, + }, + ['fal-ai/yue']: { + input: zSchemaYueInput, + output: zSchemaYueOutput, + }, + ['fal-ai/mmaudio-v2/text-to-audio']: { + input: zSchemaMmaudioV2TextToAudioInput, + output: zSchemaMmaudioV2TextToAudioOutput, + }, + ['fal-ai/minimax-music']: { + input: zSchemaMinimaxMusicInput, + output: zSchemaMinimaxMusicOutput, + }, + ['fal-ai/f5-tts']: { + input: zSchemaF5TtsInput, + output: zSchemaF5TtsOutput, + }, + ['fal-ai/stable-audio']: { + input: zSchemaStableAudioInput, + output: zSchemaStableAudioOutput, + }, +} as const + +/** Get the input type for a specific text-to-audio model */ +export type TextToAudioModelInput = + TextToAudioEndpointMap[T]['input'] + +/** Get the output type for a specific text-to-audio model */ +export type TextToAudioModelOutput = + TextToAudioEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/text-to-audio/types.gen.ts b/packages/typescript/ai-fal/src/generated/text-to-audio/types.gen.ts new file mode 100644 index 00000000..2415f8b4 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/text-to-audio/types.gen.ts @@ -0,0 +1,5217 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * Output + */ +export type SchemaStableAudioOutput = { + audio_file: SchemaFile +} + +/** + * File + */ +export type SchemaFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number | unknown + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string | unknown + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string | unknown + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string +} + +/** + * Input + */ +export type SchemaStableAudioInput = { + /** + * Prompt + * + * The prompt to generate audio from + */ + prompt: string + /** + * Steps + * + * The number of steps to denoise the audio for + */ + steps?: number + /** + * Seconds Total + * + * The duration of the audio clip to generate + */ + seconds_total?: number + /** + * Seconds Start + * + * The start point of the audio clip to generate + */ + seconds_start?: number +} + +/** + * AudioFile + */ +export type SchemaAudioFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number | unknown + /** + * File Name + */ + file_name?: string + /** + * Content Type + */ + content_type?: string + /** + * Url + */ + url: string +} + +/** + * TTSOutput + */ +export type SchemaF5TtsOutput = { + audio_url: SchemaAudioFile +} + +/** + * TTSInput + */ +export type SchemaF5TtsInput = { + /** + * Reference Text for the Reference Audio + * + * The reference text to be used for TTS. If not provided, an ASR (Automatic Speech Recognition) model will be used to generate the reference text. + */ + ref_text?: string + /** + * Remove Silence + * + * Whether to remove the silence from the audio file. + */ + remove_silence?: boolean + /** + * Text to be converted to speech + * + * The text to be converted to speech. + */ + gen_text: string + /** + * Model Type + * + * The name of the model to be used for TTS. + */ + model_type: 'F5-TTS' | 'E2-TTS' + /** + * Reference Audio URL + * + * The URL of the reference audio file. + */ + ref_audio_url: string +} + +/** + * MusicOutput + */ +export type SchemaMinimaxMusicOutput = { + /** + * Audio + * + * The generated music + */ + audio: SchemaFile +} + +/** + * TextToMusicRequest + */ +export type SchemaMinimaxMusicInput = { + /** + * Prompt + * + * Lyrics with optional formatting. You can use a newline to separate each line of lyrics. You can use two newlines to add a pause between lines. You can use double hash marks (##) at the beginning and end of the lyrics to add accompaniment. Maximum 600 characters. + */ + prompt: string + /** + * Reference Audio Url + * + * Reference song, should contain music and vocals. Must be a .wav or .mp3 file longer than 15 seconds. + */ + reference_audio_url: string +} + +/** + * AudioOutput + */ +export type SchemaMmaudioV2TextToAudioOutput = { + /** + * Audio + * + * The generated audio. + */ + audio: SchemaFile +} + +/** + * AudioInput + */ +export type SchemaMmaudioV2TextToAudioInput = { + /** + * Prompt + * + * The prompt to generate the audio for. + */ + prompt: string + /** + * Num Steps + * + * The number of steps to generate the audio for. + */ + num_steps?: number + /** + * Duration + * + * The duration of the audio to generate. + */ + duration?: number + /** + * Cfg Strength + * + * The strength of Classifier Free Guidance. + */ + cfg_strength?: number + /** + * Seed + * + * The seed for the random number generator + */ + seed?: number + /** + * Mask Away Clip + * + * Whether to mask away the clip. + */ + mask_away_clip?: boolean + /** + * Negative Prompt + * + * The negative prompt to generate the audio for. + */ + negative_prompt?: string +} + +/** + * Output + */ +export type SchemaYueOutput = { + /** + * Audio + * + * Generated music file. + */ + audio: SchemaFile +} + +/** + * TextToMusicInput + */ +export type SchemaYueInput = { + /** + * Lyrics + * + * The prompt to generate an image from. Must have two sections. Sections start with either [chorus] or a [verse]. + */ + lyrics: string + /** + * Genres + * + * The genres (separated by a space ' ') to guide the music generation. + */ + genres: string +} + +/** + * ItalianOutput + */ +export type SchemaKokoroItalianOutput = { + /** + * Audio + * + * The generated music + */ + audio: SchemaFile +} + +/** + * ItalianRequest + */ +export type SchemaKokoroItalianInput = { + /** + * Prompt + */ + prompt: string + /** + * Voice + * + * Voice ID for the desired voice. + */ + voice: 'if_sara' | 'im_nicola' + /** + * Speed + * + * Speed of the generated audio. Default is 1.0. + */ + speed?: number +} + +/** + * ZonosOutput + */ +export type SchemaZonosOutput = { + /** + * Audio + * + * The generated audio + */ + audio: SchemaFile +} + +/** + * ZonosInput + */ +export type SchemaZonosInput = { + /** + * Prompt + * + * The content generated using cloned voice. + */ + prompt: string + /** + * Reference Audio Url + * + * The reference audio. + */ + reference_audio_url: string +} + +/** + * AmEngOutput + */ +export type SchemaKokoroAmericanEnglishOutput = { + /** + * Audio + * + * The generated music + */ + audio: SchemaFile +} + +/** + * AmEnglishRequest + */ +export type SchemaKokoroAmericanEnglishInput = { + /** + * Prompt + */ + prompt?: string + /** + * Voice + * + * Voice ID for the desired voice. + */ + voice?: + | 'af_heart' + | 'af_alloy' + | 'af_aoede' + | 'af_bella' + | 'af_jessica' + | 'af_kore' + | 'af_nicole' + | 'af_nova' + | 'af_river' + | 'af_sarah' + | 'af_sky' + | 'am_adam' + | 'am_echo' + | 'am_eric' + | 'am_fenrir' + | 'am_liam' + | 'am_michael' + | 'am_onyx' + | 'am_puck' + | 'am_santa' + /** + * Speed + * + * Speed of the generated audio. Default is 1.0. + */ + speed?: number +} + +/** + * JapaneseOutput + */ +export type SchemaKokoroJapaneseOutput = { + /** + * Audio + * + * The generated music + */ + audio: SchemaFile +} + +/** + * JapaneseRequest + */ +export type SchemaKokoroJapaneseInput = { + /** + * Prompt + */ + prompt: string + /** + * Voice + * + * Voice ID for the desired voice. + */ + voice: 'jf_alpha' | 'jf_gongitsune' | 'jf_nezumi' | 'jf_tebukuro' | 'jm_kumo' + /** + * Speed + * + * Speed of the generated audio. Default is 1.0. + */ + speed?: number +} + +/** + * FrenchOutput + */ +export type SchemaKokoroFrenchOutput = { + /** + * Audio + * + * The generated music + */ + audio: SchemaFile +} + +/** + * FrenchRequest + */ +export type SchemaKokoroFrenchInput = { + /** + * Prompt + */ + prompt: string + /** + * Voice + * + * Voice ID for the desired voice. + */ + voice: 'ff_siwis' + /** + * Speed + * + * Speed of the generated audio. Default is 1.0. + */ + speed?: number +} + +/** + * BrEngOutput + */ +export type SchemaKokoroBritishEnglishOutput = { + /** + * Audio + * + * The generated music + */ + audio: SchemaFile +} + +/** + * BrEnglishRequest + */ +export type SchemaKokoroBritishEnglishInput = { + /** + * Prompt + */ + prompt: string + /** + * Voice + * + * Voice ID for the desired voice. + */ + voice: + | 'bf_alice' + | 'bf_emma' + | 'bf_isabella' + | 'bf_lily' + | 'bm_daniel' + | 'bm_fable' + | 'bm_george' + | 'bm_lewis' + /** + * Speed + * + * Speed of the generated audio. Default is 1.0. + */ + speed?: number +} + +/** + * BrPortugeseOutput + */ +export type SchemaKokoroBrazilianPortugueseOutput = { + /** + * Audio + * + * The generated music + */ + audio: SchemaFile +} + +/** + * BrPortugueseRequest + */ +export type SchemaKokoroBrazilianPortugueseInput = { + /** + * Prompt + */ + prompt: string + /** + * Voice + * + * Voice ID for the desired voice. + */ + voice: 'pf_dora' | 'pm_alex' | 'pm_santa' + /** + * Speed + * + * Speed of the generated audio. Default is 1.0. + */ + speed?: number +} + +/** + * SpanishOutput + */ +export type SchemaKokoroSpanishOutput = { + /** + * Audio + * + * The generated music + */ + audio: SchemaFile +} + +/** + * SpanishRequest + */ +export type SchemaKokoroSpanishInput = { + /** + * Prompt + */ + prompt: string + /** + * Voice + * + * Voice ID for the desired voice. + */ + voice: 'ef_dora' | 'em_alex' | 'em_santa' + /** + * Speed + * + * Speed of the generated audio. Default is 1.0. + */ + speed?: number +} + +/** + * MandarinOutput + */ +export type SchemaKokoroMandarinChineseOutput = { + /** + * Audio + * + * The generated music + */ + audio: SchemaFile +} + +/** + * MandarinRequest + */ +export type SchemaKokoroMandarinChineseInput = { + /** + * Prompt + */ + prompt: string + /** + * Voice + * + * Voice ID for the desired voice. + */ + voice: + | 'zf_xiaobei' + | 'zf_xiaoni' + | 'zf_xiaoxiao' + | 'zf_xiaoyi' + | 'zm_yunjian' + | 'zm_yunxi' + | 'zm_yunxia' + | 'zm_yunyang' + /** + * Speed + * + * Speed of the generated audio. Default is 1.0. + */ + speed?: number +} + +/** + * HindiOutput + */ +export type SchemaKokoroHindiOutput = { + /** + * Audio + * + * The generated music + */ + audio: SchemaFile +} + +/** + * HindiRequest + */ +export type SchemaKokoroHindiInput = { + /** + * Prompt + */ + prompt: string + /** + * Voice + * + * Voice ID for the desired voice. + */ + voice: 'hf_alpha' | 'hf_beta' | 'hm_omega' | 'hm_psi' + /** + * Speed + * + * Speed of the generated audio. Default is 1.0. + */ + speed?: number +} + +/** + * TTSOutput + */ +export type SchemaElevenlabsTtsMultilingualV2Output = { + audio: SchemaFile + /** + * Timestamps + * + * Timestamps for each word in the generated speech. Only returned if `timestamps` is set to True in the request. + */ + timestamps?: Array | unknown +} + +/** + * TextToSpeechRequest + */ +export type SchemaElevenlabsTtsMultilingualV2Input = { + /** + * Stability + * + * Voice stability (0-1) + */ + stability?: number + /** + * Next Text + * + * The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation. + */ + next_text?: string | unknown + /** + * Speed + * + * Speech speed (0.7-1.2). Values below 1.0 slow down the speech, above 1.0 speed it up. Extreme values may affect quality. + */ + speed?: number + /** + * Style + * + * Style exaggeration (0-1) + */ + style?: number + /** + * Text + * + * The text to convert to speech + */ + text: string + /** + * Timestamps + * + * Whether to return timestamps for each word in the generated speech + */ + timestamps?: boolean + /** + * Similarity Boost + * + * Similarity boost (0-1) + */ + similarity_boost?: number + /** + * Voice + * + * The voice to use for speech generation + */ + voice?: string + /** + * Language Code + * + * Language code (ISO 639-1) used to enforce a language for the model. An error will be returned if language code is not supported by the model. + */ + language_code?: string | unknown + /** + * Previous Text + * + * The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation. + */ + previous_text?: string | unknown +} + +/** + * Output + */ +export type SchemaDiffrhythmOutput = { + /** + * Audio + * + * Generated music file. + */ + audio: SchemaFile +} + +/** + * TextToMusicInput + */ +export type SchemaDiffrhythmInput = { + /** + * Lyrics + * + * The prompt to generate the song from. Must have two sections. Sections start with either [chorus] or a [verse]. + */ + lyrics: string + /** + * CFG Strength + * + * The CFG strength to use for the music generation. + */ + cfg_strength?: number + /** + * Reference Audio URL + * + * The URL of the reference audio to use for the music generation. + */ + reference_audio_url?: string + /** + * Music Duration + * + * The duration of the music to generate. + */ + music_duration?: '95s' | '285s' + /** + * Scheduler + * + * The scheduler to use for the music generation. + */ + scheduler?: 'euler' | 'midpoint' | 'rk4' | 'implicit_adams' + /** + * Number of Inference Steps + * + * The number of inference steps to use for the music generation. + */ + num_inference_steps?: number + /** + * Style Prompt + * + * The style prompt to use for the music generation. + */ + style_prompt?: string +} + +/** + * Speaker + */ +export type SchemaSpeaker = { + /** + * Prompt + */ + prompt: string + /** + * Audio Url + */ + audio_url: string + /** + * Speaker Id + */ + speaker_id: number +} + +/** + * Turn + */ +export type SchemaTurn = { + /** + * Text + */ + text: string + /** + * Speaker Id + */ + speaker_id: number +} + +/** + * Output + */ +export type SchemaCsm1bOutput = { + /** + * Audio + * + * The generated audio. + */ + audio: SchemaFile | Blob | File +} + +/** + * Input + */ +export type SchemaCsm1bInput = { + /** + * Scene + * + * The text to generate an audio from. + */ + scene: Array + /** + * Context + * + * The context to generate an audio from. + */ + context?: Array +} + +/** + * AudioOutput + * + * Example Pydantic model showing how to include a File in the output. + */ +export type SchemaMusicGeneratorOutput = { + audio_file: SchemaFile +} + +/** + * Input + */ +export type SchemaMusicGeneratorInput = { + /** + * Prompt + * + * The prompt to generate music from. + */ + prompt: string + /** + * Duration + * + * The duration of the generated music in seconds. + */ + duration: number +} + +/** + * AudioOutput + * + * Example Pydantic model showing how to include a File in the output. + */ +export type SchemaSoundEffectsGeneratorOutput = { + audio_file: SchemaFile +} + +/** + * Input + */ +export type SchemaSoundEffectsGeneratorInput = { + /** + * Prompt + * + * The prompt to generate SFX. + */ + prompt: string + /** + * Duration + * + * The duration of the generated SFX in seconds. + */ + duration: number +} + +/** + * ACEStepResponse + */ +export type SchemaAceStepOutput = { + /** + * Tags + * + * The genre tags used in the generation process. + */ + tags: string + /** + * Lyrics + * + * The lyrics used in the generation process. + */ + lyrics: string + /** + * Seed + * + * The random seed used for the generation process. + */ + seed: number + /** + * Audio + * + * The generated audio file. + */ + audio: SchemaFile +} + +/** + * ACEStepTextToAudioRequest + */ +export type SchemaAceStepInput = { + /** + * Number Of Steps + * + * Number of steps to generate the audio. + */ + number_of_steps?: number + /** + * Duration + * + * The duration of the generated audio in seconds. + */ + duration?: number + /** + * Tags + * + * Comma-separated list of genre tags to control the style of the generated audio. + */ + tags: string + /** + * Minimum Guidance Scale + * + * Minimum guidance scale for the generation after the decay. + */ + minimum_guidance_scale?: number + /** + * Lyrics + * + * Lyrics to be sung in the audio. If not provided or if [inst] or [instrumental] is the content of this field, no lyrics will be sung. Use control structures like [verse], [chorus] and [bridge] to control the structure of the song. + */ + lyrics?: string + /** + * Tag Guidance Scale + * + * Tag guidance scale for the generation. + */ + tag_guidance_scale?: number + /** + * Scheduler + * + * Scheduler to use for the generation process. + */ + scheduler?: 'euler' | 'heun' + /** + * Guidance Scale + * + * Guidance scale for the generation. + */ + guidance_scale?: number + /** + * Guidance Type + * + * Type of CFG to use for the generation process. + */ + guidance_type?: 'cfg' | 'apg' | 'cfg_star' + /** + * Lyric Guidance Scale + * + * Lyric guidance scale for the generation. + */ + lyric_guidance_scale?: number + /** + * Guidance Interval + * + * Guidance interval for the generation. 0.5 means only apply guidance in the middle steps (0.25 * infer_steps to 0.75 * infer_steps) + */ + guidance_interval?: number + /** + * Guidance Interval Decay + * + * Guidance interval decay for the generation. Guidance scale will decay from guidance_scale to min_guidance_scale in the interval. 0.0 means no decay. + */ + guidance_interval_decay?: number + /** + * Seed + * + * Random seed for reproducibility. If not provided, a random seed will be used. + */ + seed?: number + /** + * Granularity Scale + * + * Granularity scale for the generation process. Higher values can reduce artifacts. + */ + granularity_scale?: number +} + +/** + * ACEStepResponse + */ +export type SchemaAceStepPromptToAudioOutput = { + /** + * Tags + * + * The genre tags used in the generation process. + */ + tags: string + /** + * Lyrics + * + * The lyrics used in the generation process. + */ + lyrics: string + /** + * Seed + * + * The random seed used for the generation process. + */ + seed: number + /** + * Audio + * + * The generated audio file. + */ + audio: SchemaFile +} + +/** + * ACEStepPromptToAudioRequest + */ +export type SchemaAceStepPromptToAudioInput = { + /** + * Number Of Steps + * + * Number of steps to generate the audio. + */ + number_of_steps?: number + /** + * Duration + * + * The duration of the generated audio in seconds. + */ + duration?: number + /** + * Prompt + * + * Prompt to control the style of the generated audio. This will be used to generate tags and lyrics. + */ + prompt: string + /** + * Minimum Guidance Scale + * + * Minimum guidance scale for the generation after the decay. + */ + minimum_guidance_scale?: number + /** + * Tag Guidance Scale + * + * Tag guidance scale for the generation. + */ + tag_guidance_scale?: number + /** + * Scheduler + * + * Scheduler to use for the generation process. + */ + scheduler?: 'euler' | 'heun' + /** + * Guidance Scale + * + * Guidance scale for the generation. + */ + guidance_scale?: number + /** + * Guidance Type + * + * Type of CFG to use for the generation process. + */ + guidance_type?: 'cfg' | 'apg' | 'cfg_star' + /** + * Instrumental + * + * Whether to generate an instrumental version of the audio. + */ + instrumental?: boolean + /** + * Lyric Guidance Scale + * + * Lyric guidance scale for the generation. + */ + lyric_guidance_scale?: number + /** + * Guidance Interval + * + * Guidance interval for the generation. 0.5 means only apply guidance in the middle steps (0.25 * infer_steps to 0.75 * infer_steps) + */ + guidance_interval?: number + /** + * Guidance Interval Decay + * + * Guidance interval decay for the generation. Guidance scale will decay from guidance_scale to min_guidance_scale in the interval. 0.0 means no decay. + */ + guidance_interval_decay?: number + /** + * Seed + * + * Random seed for reproducibility. If not provided, a random seed will be used. + */ + seed?: number + /** + * Granularity Scale + * + * Granularity scale for the generation process. Higher values can reduce artifacts. + */ + granularity_scale?: number +} + +/** + * TextToMusicOutput + */ +export type SchemaLyria2Output = { + /** + * Audio + * + * The generated music + */ + audio: SchemaFile +} + +/** + * TextToMusicInput + */ +export type SchemaLyria2Input = { + /** + * Prompt + * + * The text prompt describing the music you want to generate + */ + prompt: string + /** + * Seed + * + * A seed for deterministic generation. If provided, the model will attempt to produce the same audio given the same prompt and other parameters. + */ + seed?: number + /** + * Negative Prompt + * + * A description of what to exclude from the generated audio + */ + negative_prompt?: string +} + +/** + * TTSOutput + */ +export type SchemaElevenlabsTtsElevenV3Output = { + audio: SchemaFile + /** + * Timestamps + * + * Timestamps for each word in the generated speech. Only returned if `timestamps` is set to True in the request. + */ + timestamps?: Array | unknown +} + +/** + * TextToSpeechRequestV3 + * + * Request model for eleven_v3 which doesn't support previous_text/next_text + */ +export type SchemaElevenlabsTtsElevenV3Input = { + /** + * Stability + * + * Voice stability (0-1) + */ + stability?: number + /** + * Speed + * + * Speech speed (0.7-1.2). Values below 1.0 slow down the speech, above 1.0 speed it up. Extreme values may affect quality. + */ + speed?: number + /** + * Text + * + * The text to convert to speech + */ + text: string + /** + * Style + * + * Style exaggeration (0-1) + */ + style?: number + /** + * Timestamps + * + * Whether to return timestamps for each word in the generated speech + */ + timestamps?: boolean + /** + * Similarity Boost + * + * Similarity boost (0-1) + */ + similarity_boost?: number + /** + * Voice + * + * The voice to use for speech generation + */ + voice?: string + /** + * Language Code + * + * Language code (ISO 639-1) used to enforce a language for the model. + */ + language_code?: string | unknown +} + +/** + * GenerateOutput + */ +export type SchemaV2TextToMusicOutput = { + /** + * Tags + * + * The style tags used for generation. + */ + tags?: Array | unknown + /** + * Seed + * + * The seed used for generation. This can be used to generate an identical song by passing the same parameters with this seed in a future request. + */ + seed: number + /** + * Lyrics + * + * The lyrics used for generation. + */ + lyrics?: string | unknown + /** + * Audio + * + * The generated audio files. + */ + audio: Array +} + +/** + * GenerateInput + */ +export type SchemaV2TextToMusicInput = { + /** + * Prompt + * + * A description of the track you want to generate. This prompt will be used to automatically generate the tags and lyrics unless you manually set them. For example, if you set prompt and tags, then the prompt will be used to generate only the lyrics. + */ + prompt?: string | unknown + /** + * Lyrics Prompt + * + * The lyrics sung in the generated song. An empty string will generate an instrumental track. + */ + lyrics_prompt?: string | unknown + /** + * Tags + * + * Tags/styles of the music to generate. You can view a list of all available tags at https://sonauto.ai/tag-explorer. + */ + tags?: Array | unknown + /** + * Prompt Strength + * + * Controls how strongly your prompt influences the output. Greater values adhere more to the prompt but sound less natural. (This is CFG.) + */ + prompt_strength?: number + /** + * Output Bit Rate + * + * The bit rate to use for mp3 and m4a formats. Not available for other formats. + */ + output_bit_rate?: 128 | 192 | 256 | 320 | unknown + /** + * Num Songs + * + * Generating 2 songs costs 1.5x the price of generating 1 song. Also, note that using the same seed may not result in identical songs if the number of songs generated is changed. + */ + num_songs?: number + /** + * Output Format + */ + output_format?: 'flac' | 'mp3' | 'wav' | 'ogg' | 'm4a' + /** + * Bpm + * + * The beats per minute of the song. This can be set to an integer or the literal string "auto" to pick a suitable bpm based on the tags. Set bpm to null to not condition the model on bpm information. + */ + bpm?: number | string | unknown + /** + * Balance Strength + * + * Greater means more natural vocals. Lower means sharper instrumentals. We recommend 0.7. + */ + balance_strength?: number + /** + * Seed + * + * The seed to use for generation. Will pick a random seed if not provided. Repeating a request with identical parameters (must use lyrics and tags, not prompt) and the same seed will generate the same song. + */ + seed?: number | unknown +} + +/** + * InpaintSection + */ +export type SchemaInpaintSection = { + /** + * End + * + * End time in seconds of the section to inpaint. + */ + end: number + /** + * Start + * + * Start time in seconds of the section to inpaint. + */ + start: number +} + +/** + * InpaintOutput + */ +export type SchemaV2InpaintOutput = { + /** + * Seed + * + * The seed used for generation. This can be used to generate an identical song by passing the same parameters with this seed in a future request. + */ + seed: number + /** + * Audio + * + * The generated audio files. + */ + audio: Array +} + +/** + * InpaintInput + */ +export type SchemaV2InpaintInput = { + /** + * Lyrics Prompt + * + * The lyrics sung in the generated song. An empty string will generate an instrumental track. + */ + lyrics_prompt: string + /** + * Tags + * + * Tags/styles of the music to generate. You can view a list of all available tags at https://sonauto.ai/tag-explorer. + */ + tags?: Array + /** + * Prompt Strength + * + * Controls how strongly your prompt influences the output. Greater values adhere more to the prompt but sound less natural. (This is CFG.) + */ + prompt_strength?: number + /** + * Output Bit Rate + * + * The bit rate to use for mp3 and m4a formats. Not available for other formats. + */ + output_bit_rate?: 128 | 192 | 256 | 320 | unknown + /** + * Num Songs + * + * Generating 2 songs costs 1.5x the price of generating 1 song. Also, note that using the same seed may not result in identical songs if the number of songs generated is changed. + */ + num_songs?: number + /** + * Output Format + */ + output_format?: 'flac' | 'mp3' | 'wav' | 'ogg' | 'm4a' + /** + * Selection Crop + * + * Crop to the selected region + */ + selection_crop?: boolean + /** + * Sections + * + * List of sections to inpaint. Currently, only one section is supported so the list length must be 1. + */ + sections: Array + /** + * Balance Strength + * + * Greater means more natural vocals. Lower means sharper instrumentals. We recommend 0.7. + */ + balance_strength?: number + /** + * Audio Url + * + * The URL of the audio file to alter. Must be a valid publicly accessible URL. + */ + audio_url: string + /** + * Seed + * + * The seed to use for generation. Will pick a random seed if not provided. Repeating a request with identical parameters (must use lyrics and tags, not prompt) and the same seed will generate the same song. + */ + seed?: number | unknown +} + +/** + * SoundEffectOutput + * + * Output format for generated sound effects + */ +export type SchemaElevenlabsSoundEffectsV2Output = { + audio: SchemaFile +} + +/** + * SoundEffectRequestV2 + */ +export type SchemaElevenlabsSoundEffectsV2Input = { + /** + * Text + * + * The text describing the sound effect to generate + */ + text: string + /** + * Loop + * + * Whether to create a sound effect that loops smoothly. + */ + loop?: boolean + /** + * Prompt Influence + * + * How closely to follow the prompt (0-1). Higher values mean less variation. + */ + prompt_influence?: number + /** + * Output Format + * + * Output format of the generated audio. Formatted as codec_sample_rate_bitrate. + */ + output_format?: + | 'mp3_22050_32' + | 'mp3_44100_32' + | 'mp3_44100_64' + | 'mp3_44100_96' + | 'mp3_44100_128' + | 'mp3_44100_192' + | 'pcm_8000' + | 'pcm_16000' + | 'pcm_22050' + | 'pcm_24000' + | 'pcm_44100' + | 'pcm_48000' + | 'ulaw_8000' + | 'alaw_8000' + | 'opus_48000_32' + | 'opus_48000_64' + | 'opus_48000_96' + | 'opus_48000_128' + | 'opus_48000_192' + /** + * Duration Seconds + * + * Duration in seconds (0.5-22). If None, optimal duration will be determined from prompt. + */ + duration_seconds?: number | unknown +} + +/** + * PronunciationDictionaryLocator + */ +export type SchemaPronunciationDictionaryLocator = { + /** + * Version Id + * + * The ID of the version of the pronunciation dictionary. If not provided, the latest version will be used. + */ + version_id?: string | unknown + /** + * Pronunciation Dictionary Id + * + * The ID of the pronunciation dictionary. + */ + pronunciation_dictionary_id: string | unknown +} + +/** + * DialogueBlock + */ +export type SchemaDialogueBlock = { + /** + * Text + * + * The dialogue text + */ + text: string + /** + * Voice + * + * The name or the ID of the voice to be used for the generation. + */ + voice: string +} + +/** + * TextToDialogueOutput + */ +export type SchemaElevenlabsTextToDialogueElevenV3Output = { + /** + * Seed + * + * Random seed for reproducibility. + */ + seed: number + audio: SchemaFile +} + +/** + * TextToDialogueRequest + */ +export type SchemaElevenlabsTextToDialogueElevenV3Input = { + /** + * Stability + * + * Determines how stable the voice is and the randomness between each generation. Lower values introduce broader emotional range for the voice. Higher values can result in a monotonous voice with limited emotion. Must be one of 0.0, 0.5, 1.0, else it will be rounded to the nearest value. + */ + stability?: number | unknown + /** + * Inputs + * + * A list of dialogue inputs, each containing text and a voice ID which will be converted into speech. + */ + inputs: Array + /** + * Language Code + * + * Language code (ISO 639-1) used to enforce a language for the model. An error will be returned if language code is not supported by the model. + */ + language_code?: string | unknown + /** + * Seed + * + * Random seed for reproducibility. + */ + seed?: number | unknown + /** + * Use Speaker Boost + * + * This setting boosts the similarity to the original speaker. Using this setting requires a slightly higher computational load, which in turn increases latency. + */ + use_speaker_boost?: boolean | unknown + /** + * Pronunciation Dictionary Locators + * + * A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request + */ + pronunciation_dictionary_locators?: Array +} + +/** + * TextToAudioOutput + */ +export type SchemaStableAudio25TextToAudioOutput = { + /** + * Seed + * + * The random seed used for generation + */ + seed: number + /** + * Audio + * + * The generated audio clip + */ + audio: SchemaFile +} + +/** + * TextToAudioInput + */ +export type SchemaStableAudio25TextToAudioInput = { + /** + * Prompt + * + * The prompt to generate audio from + */ + prompt: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Seconds Total + * + * The duration of the audio clip to generate + */ + seconds_total?: number + /** + * Num Inference Steps + * + * The number of steps to denoise the audio for + */ + num_inference_steps?: number + /** + * Guidance Scale + * + * How strictly the diffusion process adheres to the prompt text (higher values make your audio closer to your prompt). + */ + guidance_scale?: number + /** + * Seed + */ + seed?: number +} + +/** + * MusicV15Output + */ +export type SchemaMinimaxMusicV15Output = { + /** + * Audio + * + * The generated music + */ + audio: SchemaFile +} + +/** + * TextToMusic15Request + */ +export type SchemaMinimaxMusicV15Input = { + /** + * Prompt + * + * Lyrics, supports [intro][verse][chorus][bridge][outro] sections. 10-600 characters. + */ + prompt: string + /** + * Lyrics Prompt + * + * Control music generation. 10-3000 characters. + */ + lyrics_prompt: string + /** + * Audio Setting + * + * Audio configuration settings + */ + audio_setting?: SchemaAudioSetting +} + +/** + * AudioSetting + */ +export type SchemaAudioSetting = { + /** + * Format + * + * Audio format + */ + format?: 'mp3' | 'pcm' | 'flac' + /** + * Sample Rate + * + * Sample rate of generated audio + */ + sample_rate?: 8000 | 16000 | 22050 | 24000 | 32000 | 44100 + /** + * Bitrate + * + * Bitrate of generated audio + */ + bitrate?: 32000 | 64000 | 128000 | 256000 +} + +/** + * MusicGenerationOutput + * + * Output schema for music generation. + */ +export type SchemaMusicGenerationOutput = { + /** + * Prompt + * + * The processed prompt used for generation + */ + prompt: string + /** + * Metadata + * + * Generation metadata including duration, sample rate, and parameters + */ + metadata: { + [key: string]: unknown + } + audio: SchemaFile +} + +/** + * MusicGenerationInput + * + * Input schema for music generation with form controls for the playground. + */ +export type SchemaMusicGenerationInput = { + /** + * Prompt + * + * Describe the music you want to generate + */ + prompt: string + /** + * Duration + * + * Length of the generated music in seconds + */ + duration?: number + /** + * Refinement + * + * Refinement level - higher values may improve quality but take longer + */ + refinement?: number + /** + * Seed + * + * Random seed for reproducible results - leave empty for random generation + */ + seed?: number | unknown + /** + * Negative Prompt + * + * Describe what you want to avoid in the music (instruments, styles, moods). Leave blank for none. + */ + negative_prompt?: string + /** + * Creativity + * + * Creativity level - higher values allow more creative interpretation of the prompt + */ + creativity?: number +} + +/** + * SoundEffectGenerationOutput + * + * Output schema for sound effect generation. + */ +export type SchemaSoundEffectGenerationOutput = { + /** + * Prompt + * + * The processed prompt used for generation + */ + prompt: string + /** + * Metadata + * + * Generation metadata including duration, sample rate, and parameters + */ + metadata: { + [key: string]: unknown + } + audio: SchemaFile +} + +/** + * SoundEffectGenerationInput + * + * Input schema for sound effect generation with form controls for the playground. + */ +export type SchemaSoundEffectGenerationInput = { + /** + * Prompt + * + * Describe the sound effect you want to generate + */ + prompt: string + /** + * Duration + * + * Length of the generated sound effect in seconds + */ + duration?: number + /** + * Refinement + * + * Refinement level - Higher values may improve quality but take longer + */ + refinement?: number + /** + * Seed + * + * Random seed for reproducible results - leave empty for random generation + */ + seed?: number | unknown + /** + * Negative Prompt + * + * Describe the types of sounds you don't want to generate in the output, avoid double-negatives, compare with positive prompts + */ + negative_prompt?: string + /** + * Creativity + * + * Creativity level - higher values allow more creative interpretation of the prompt + */ + creativity?: number +} + +/** + * MusicV15Output + */ +export type SchemaMinimaxMusicV2Output = { + /** + * Audio + * + * The generated music + */ + audio: SchemaFile +} + +/** + * TextToMusic20Request + */ +export type SchemaMinimaxMusicV2Input = { + /** + * Prompt + * + * A description of the music, specifying style, mood, and scenario. 10-300 characters. + */ + prompt: string + /** + * Lyrics Prompt + * + * Lyrics of the song. Use n to separate lines. You may add structure tags like [Intro], [Verse], [Chorus], [Bridge], [Outro] to enhance the arrangement. 10-3000 characters. + */ + lyrics_prompt: string + /** + * Audio Setting + * + * Audio configuration settings + */ + audio_setting?: SchemaAudioSetting +} + +/** + * MusicSection + */ +export type SchemaMusicSection = { + /** + * Positive Local Styles + * + * The styles that should be present in this section. + */ + positive_local_styles: Array + /** + * Lines + * + * The lyrics of the section. Each line must be at most 200 characters long. + */ + lines: Array + /** + * Negative Local Styles + * + * The styles that should not be present in this section. + */ + negative_local_styles: Array + /** + * Duration Ms + * + * The duration of the section in milliseconds. Must be between 3000ms and 120000ms. + */ + duration_ms: number + /** + * Section Name + * + * The name of the section. Must be between 1 and 100 characters. + */ + section_name: string +} + +/** + * MusicCompositionPlan + */ +export type SchemaMusicCompositionPlan = { + /** + * Negative Global Styles + * + * The styles that should not be present in the entire song. + */ + negative_global_styles: Array + /** + * Sections + * + * The sections of the song. + */ + sections: Array + /** + * Positive Global Styles + * + * The styles that should be present in the entire song. + */ + positive_global_styles: Array +} + +/** + * MusicOutput + */ +export type SchemaElevenlabsMusicOutput = { + audio: SchemaFile +} + +/** + * MusicRequest + * + * Request format for Elevenlabs Music API + */ +export type SchemaElevenlabsMusicInput = { + /** + * Prompt + * + * The text prompt describing the music to generate + */ + prompt?: string | unknown + /** + * The composition plan for the music + */ + composition_plan?: SchemaMusicCompositionPlan | unknown + /** + * Music Length Ms + * + * The length of the song to generate in milliseconds. Used only in conjunction with prompt. Must be between 3000ms and 600000ms. Optional - if not provided, the model will choose a length based on the prompt. + */ + music_length_ms?: number | unknown + /** + * Output Format + * + * Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs. + */ + output_format?: + | 'mp3_22050_32' + | 'mp3_44100_32' + | 'mp3_44100_64' + | 'mp3_44100_96' + | 'mp3_44100_128' + | 'mp3_44100_192' + | 'pcm_8000' + | 'pcm_16000' + | 'pcm_22050' + | 'pcm_24000' + | 'pcm_44100' + | 'pcm_48000' + | 'ulaw_8000' + | 'alaw_8000' + | 'opus_48000_32' + | 'opus_48000_64' + | 'opus_48000_96' + | 'opus_48000_128' + | 'opus_48000_192' + /** + * Respect Sections Durations + * + * Controls how strictly section durations in the composition_plan are enforced. It will only have an effect if it is used with composition_plan. When set to true, the model will precisely respect each section's duration_ms from the plan. When set to false, the model may adjust individual section durations which will generally lead to better generation quality and improved latency, while always preserving the total song duration from the plan. + */ + respect_sections_durations?: boolean + /** + * Force Instrumental + * + * If true, guarantees that the generated song will be instrumental. If false, the song may or may not be instrumental depending on the prompt. Can only be used with prompt. + */ + force_instrumental?: boolean +} + +export type SchemaQueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetFalAiElevenlabsMusicRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/elevenlabs/music/requests/{request_id}/status' +} + +export type GetFalAiElevenlabsMusicRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiElevenlabsMusicRequestsByRequestIdStatusResponse = + GetFalAiElevenlabsMusicRequestsByRequestIdStatusResponses[keyof GetFalAiElevenlabsMusicRequestsByRequestIdStatusResponses] + +export type PutFalAiElevenlabsMusicRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/elevenlabs/music/requests/{request_id}/cancel' +} + +export type PutFalAiElevenlabsMusicRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiElevenlabsMusicRequestsByRequestIdCancelResponse = + PutFalAiElevenlabsMusicRequestsByRequestIdCancelResponses[keyof PutFalAiElevenlabsMusicRequestsByRequestIdCancelResponses] + +export type PostFalAiElevenlabsMusicData = { + body: SchemaElevenlabsMusicInput + path?: never + query?: never + url: '/fal-ai/elevenlabs/music' +} + +export type PostFalAiElevenlabsMusicResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiElevenlabsMusicResponse = + PostFalAiElevenlabsMusicResponses[keyof PostFalAiElevenlabsMusicResponses] + +export type GetFalAiElevenlabsMusicRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/elevenlabs/music/requests/{request_id}' +} + +export type GetFalAiElevenlabsMusicRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaElevenlabsMusicOutput +} + +export type GetFalAiElevenlabsMusicRequestsByRequestIdResponse = + GetFalAiElevenlabsMusicRequestsByRequestIdResponses[keyof GetFalAiElevenlabsMusicRequestsByRequestIdResponses] + +export type GetFalAiMinimaxMusicV2RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax-music/v2/requests/{request_id}/status' +} + +export type GetFalAiMinimaxMusicV2RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMinimaxMusicV2RequestsByRequestIdStatusResponse = + GetFalAiMinimaxMusicV2RequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxMusicV2RequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxMusicV2RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax-music/v2/requests/{request_id}/cancel' +} + +export type PutFalAiMinimaxMusicV2RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMinimaxMusicV2RequestsByRequestIdCancelResponse = + PutFalAiMinimaxMusicV2RequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxMusicV2RequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxMusicV2Data = { + body: SchemaMinimaxMusicV2Input + path?: never + query?: never + url: '/fal-ai/minimax-music/v2' +} + +export type PostFalAiMinimaxMusicV2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxMusicV2Response = + PostFalAiMinimaxMusicV2Responses[keyof PostFalAiMinimaxMusicV2Responses] + +export type GetFalAiMinimaxMusicV2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax-music/v2/requests/{request_id}' +} + +export type GetFalAiMinimaxMusicV2RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMinimaxMusicV2Output +} + +export type GetFalAiMinimaxMusicV2RequestsByRequestIdResponse = + GetFalAiMinimaxMusicV2RequestsByRequestIdResponses[keyof GetFalAiMinimaxMusicV2RequestsByRequestIdResponses] + +export type GetBeatovenSoundEffectGenerationRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/beatoven/sound-effect-generation/requests/{request_id}/status' +} + +export type GetBeatovenSoundEffectGenerationRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetBeatovenSoundEffectGenerationRequestsByRequestIdStatusResponse = + GetBeatovenSoundEffectGenerationRequestsByRequestIdStatusResponses[keyof GetBeatovenSoundEffectGenerationRequestsByRequestIdStatusResponses] + +export type PutBeatovenSoundEffectGenerationRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/beatoven/sound-effect-generation/requests/{request_id}/cancel' +} + +export type PutBeatovenSoundEffectGenerationRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutBeatovenSoundEffectGenerationRequestsByRequestIdCancelResponse = + PutBeatovenSoundEffectGenerationRequestsByRequestIdCancelResponses[keyof PutBeatovenSoundEffectGenerationRequestsByRequestIdCancelResponses] + +export type PostBeatovenSoundEffectGenerationData = { + body: SchemaSoundEffectGenerationInput + path?: never + query?: never + url: '/beatoven/sound-effect-generation' +} + +export type PostBeatovenSoundEffectGenerationResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBeatovenSoundEffectGenerationResponse = + PostBeatovenSoundEffectGenerationResponses[keyof PostBeatovenSoundEffectGenerationResponses] + +export type GetBeatovenSoundEffectGenerationRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/beatoven/sound-effect-generation/requests/{request_id}' +} + +export type GetBeatovenSoundEffectGenerationRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSoundEffectGenerationOutput +} + +export type GetBeatovenSoundEffectGenerationRequestsByRequestIdResponse = + GetBeatovenSoundEffectGenerationRequestsByRequestIdResponses[keyof GetBeatovenSoundEffectGenerationRequestsByRequestIdResponses] + +export type GetBeatovenMusicGenerationRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/beatoven/music-generation/requests/{request_id}/status' +} + +export type GetBeatovenMusicGenerationRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetBeatovenMusicGenerationRequestsByRequestIdStatusResponse = + GetBeatovenMusicGenerationRequestsByRequestIdStatusResponses[keyof GetBeatovenMusicGenerationRequestsByRequestIdStatusResponses] + +export type PutBeatovenMusicGenerationRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/beatoven/music-generation/requests/{request_id}/cancel' +} + +export type PutBeatovenMusicGenerationRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutBeatovenMusicGenerationRequestsByRequestIdCancelResponse = + PutBeatovenMusicGenerationRequestsByRequestIdCancelResponses[keyof PutBeatovenMusicGenerationRequestsByRequestIdCancelResponses] + +export type PostBeatovenMusicGenerationData = { + body: SchemaMusicGenerationInput + path?: never + query?: never + url: '/beatoven/music-generation' +} + +export type PostBeatovenMusicGenerationResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBeatovenMusicGenerationResponse = + PostBeatovenMusicGenerationResponses[keyof PostBeatovenMusicGenerationResponses] + +export type GetBeatovenMusicGenerationRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/beatoven/music-generation/requests/{request_id}' +} + +export type GetBeatovenMusicGenerationRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMusicGenerationOutput +} + +export type GetBeatovenMusicGenerationRequestsByRequestIdResponse = + GetBeatovenMusicGenerationRequestsByRequestIdResponses[keyof GetBeatovenMusicGenerationRequestsByRequestIdResponses] + +export type GetFalAiMinimaxMusicV15RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax-music/v1.5/requests/{request_id}/status' +} + +export type GetFalAiMinimaxMusicV15RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMinimaxMusicV15RequestsByRequestIdStatusResponse = + GetFalAiMinimaxMusicV15RequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxMusicV15RequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxMusicV15RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax-music/v1.5/requests/{request_id}/cancel' +} + +export type PutFalAiMinimaxMusicV15RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMinimaxMusicV15RequestsByRequestIdCancelResponse = + PutFalAiMinimaxMusicV15RequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxMusicV15RequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxMusicV15Data = { + body: SchemaMinimaxMusicV15Input + path?: never + query?: never + url: '/fal-ai/minimax-music/v1.5' +} + +export type PostFalAiMinimaxMusicV15Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxMusicV15Response = + PostFalAiMinimaxMusicV15Responses[keyof PostFalAiMinimaxMusicV15Responses] + +export type GetFalAiMinimaxMusicV15RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax-music/v1.5/requests/{request_id}' +} + +export type GetFalAiMinimaxMusicV15RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMinimaxMusicV15Output +} + +export type GetFalAiMinimaxMusicV15RequestsByRequestIdResponse = + GetFalAiMinimaxMusicV15RequestsByRequestIdResponses[keyof GetFalAiMinimaxMusicV15RequestsByRequestIdResponses] + +export type GetFalAiStableAudio25TextToAudioRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/stable-audio-25/text-to-audio/requests/{request_id}/status' +} + +export type GetFalAiStableAudio25TextToAudioRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiStableAudio25TextToAudioRequestsByRequestIdStatusResponse = + GetFalAiStableAudio25TextToAudioRequestsByRequestIdStatusResponses[keyof GetFalAiStableAudio25TextToAudioRequestsByRequestIdStatusResponses] + +export type PutFalAiStableAudio25TextToAudioRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-audio-25/text-to-audio/requests/{request_id}/cancel' +} + +export type PutFalAiStableAudio25TextToAudioRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiStableAudio25TextToAudioRequestsByRequestIdCancelResponse = + PutFalAiStableAudio25TextToAudioRequestsByRequestIdCancelResponses[keyof PutFalAiStableAudio25TextToAudioRequestsByRequestIdCancelResponses] + +export type PostFalAiStableAudio25TextToAudioData = { + body: SchemaStableAudio25TextToAudioInput + path?: never + query?: never + url: '/fal-ai/stable-audio-25/text-to-audio' +} + +export type PostFalAiStableAudio25TextToAudioResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiStableAudio25TextToAudioResponse = + PostFalAiStableAudio25TextToAudioResponses[keyof PostFalAiStableAudio25TextToAudioResponses] + +export type GetFalAiStableAudio25TextToAudioRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-audio-25/text-to-audio/requests/{request_id}' +} + +export type GetFalAiStableAudio25TextToAudioRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaStableAudio25TextToAudioOutput +} + +export type GetFalAiStableAudio25TextToAudioRequestsByRequestIdResponse = + GetFalAiStableAudio25TextToAudioRequestsByRequestIdResponses[keyof GetFalAiStableAudio25TextToAudioRequestsByRequestIdResponses] + +export type GetFalAiElevenlabsTextToDialogueElevenV3RequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/elevenlabs/text-to-dialogue/eleven-v3/requests/{request_id}/status' + } + +export type GetFalAiElevenlabsTextToDialogueElevenV3RequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiElevenlabsTextToDialogueElevenV3RequestsByRequestIdStatusResponse = + GetFalAiElevenlabsTextToDialogueElevenV3RequestsByRequestIdStatusResponses[keyof GetFalAiElevenlabsTextToDialogueElevenV3RequestsByRequestIdStatusResponses] + +export type PutFalAiElevenlabsTextToDialogueElevenV3RequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/elevenlabs/text-to-dialogue/eleven-v3/requests/{request_id}/cancel' + } + +export type PutFalAiElevenlabsTextToDialogueElevenV3RequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiElevenlabsTextToDialogueElevenV3RequestsByRequestIdCancelResponse = + PutFalAiElevenlabsTextToDialogueElevenV3RequestsByRequestIdCancelResponses[keyof PutFalAiElevenlabsTextToDialogueElevenV3RequestsByRequestIdCancelResponses] + +export type PostFalAiElevenlabsTextToDialogueElevenV3Data = { + body: SchemaElevenlabsTextToDialogueElevenV3Input + path?: never + query?: never + url: '/fal-ai/elevenlabs/text-to-dialogue/eleven-v3' +} + +export type PostFalAiElevenlabsTextToDialogueElevenV3Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiElevenlabsTextToDialogueElevenV3Response = + PostFalAiElevenlabsTextToDialogueElevenV3Responses[keyof PostFalAiElevenlabsTextToDialogueElevenV3Responses] + +export type GetFalAiElevenlabsTextToDialogueElevenV3RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/elevenlabs/text-to-dialogue/eleven-v3/requests/{request_id}' +} + +export type GetFalAiElevenlabsTextToDialogueElevenV3RequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaElevenlabsTextToDialogueElevenV3Output + } + +export type GetFalAiElevenlabsTextToDialogueElevenV3RequestsByRequestIdResponse = + GetFalAiElevenlabsTextToDialogueElevenV3RequestsByRequestIdResponses[keyof GetFalAiElevenlabsTextToDialogueElevenV3RequestsByRequestIdResponses] + +export type GetFalAiElevenlabsSoundEffectsV2RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/elevenlabs/sound-effects/v2/requests/{request_id}/status' +} + +export type GetFalAiElevenlabsSoundEffectsV2RequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiElevenlabsSoundEffectsV2RequestsByRequestIdStatusResponse = + GetFalAiElevenlabsSoundEffectsV2RequestsByRequestIdStatusResponses[keyof GetFalAiElevenlabsSoundEffectsV2RequestsByRequestIdStatusResponses] + +export type PutFalAiElevenlabsSoundEffectsV2RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/elevenlabs/sound-effects/v2/requests/{request_id}/cancel' +} + +export type PutFalAiElevenlabsSoundEffectsV2RequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiElevenlabsSoundEffectsV2RequestsByRequestIdCancelResponse = + PutFalAiElevenlabsSoundEffectsV2RequestsByRequestIdCancelResponses[keyof PutFalAiElevenlabsSoundEffectsV2RequestsByRequestIdCancelResponses] + +export type PostFalAiElevenlabsSoundEffectsV2Data = { + body: SchemaElevenlabsSoundEffectsV2Input + path?: never + query?: never + url: '/fal-ai/elevenlabs/sound-effects/v2' +} + +export type PostFalAiElevenlabsSoundEffectsV2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiElevenlabsSoundEffectsV2Response = + PostFalAiElevenlabsSoundEffectsV2Responses[keyof PostFalAiElevenlabsSoundEffectsV2Responses] + +export type GetFalAiElevenlabsSoundEffectsV2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/elevenlabs/sound-effects/v2/requests/{request_id}' +} + +export type GetFalAiElevenlabsSoundEffectsV2RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaElevenlabsSoundEffectsV2Output +} + +export type GetFalAiElevenlabsSoundEffectsV2RequestsByRequestIdResponse = + GetFalAiElevenlabsSoundEffectsV2RequestsByRequestIdResponses[keyof GetFalAiElevenlabsSoundEffectsV2RequestsByRequestIdResponses] + +export type GetSonautoV2InpaintRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/sonauto/v2/inpaint/requests/{request_id}/status' +} + +export type GetSonautoV2InpaintRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetSonautoV2InpaintRequestsByRequestIdStatusResponse = + GetSonautoV2InpaintRequestsByRequestIdStatusResponses[keyof GetSonautoV2InpaintRequestsByRequestIdStatusResponses] + +export type PutSonautoV2InpaintRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/sonauto/v2/inpaint/requests/{request_id}/cancel' +} + +export type PutSonautoV2InpaintRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutSonautoV2InpaintRequestsByRequestIdCancelResponse = + PutSonautoV2InpaintRequestsByRequestIdCancelResponses[keyof PutSonautoV2InpaintRequestsByRequestIdCancelResponses] + +export type PostSonautoV2InpaintData = { + body: SchemaV2InpaintInput + path?: never + query?: never + url: '/sonauto/v2/inpaint' +} + +export type PostSonautoV2InpaintResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostSonautoV2InpaintResponse = + PostSonautoV2InpaintResponses[keyof PostSonautoV2InpaintResponses] + +export type GetSonautoV2InpaintRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/sonauto/v2/inpaint/requests/{request_id}' +} + +export type GetSonautoV2InpaintRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaV2InpaintOutput +} + +export type GetSonautoV2InpaintRequestsByRequestIdResponse = + GetSonautoV2InpaintRequestsByRequestIdResponses[keyof GetSonautoV2InpaintRequestsByRequestIdResponses] + +export type GetSonautoV2TextToMusicRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/sonauto/v2/text-to-music/requests/{request_id}/status' +} + +export type GetSonautoV2TextToMusicRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetSonautoV2TextToMusicRequestsByRequestIdStatusResponse = + GetSonautoV2TextToMusicRequestsByRequestIdStatusResponses[keyof GetSonautoV2TextToMusicRequestsByRequestIdStatusResponses] + +export type PutSonautoV2TextToMusicRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/sonauto/v2/text-to-music/requests/{request_id}/cancel' +} + +export type PutSonautoV2TextToMusicRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutSonautoV2TextToMusicRequestsByRequestIdCancelResponse = + PutSonautoV2TextToMusicRequestsByRequestIdCancelResponses[keyof PutSonautoV2TextToMusicRequestsByRequestIdCancelResponses] + +export type PostSonautoV2TextToMusicData = { + body: SchemaV2TextToMusicInput + path?: never + query?: never + url: '/sonauto/v2/text-to-music' +} + +export type PostSonautoV2TextToMusicResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostSonautoV2TextToMusicResponse = + PostSonautoV2TextToMusicResponses[keyof PostSonautoV2TextToMusicResponses] + +export type GetSonautoV2TextToMusicRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/sonauto/v2/text-to-music/requests/{request_id}' +} + +export type GetSonautoV2TextToMusicRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaV2TextToMusicOutput +} + +export type GetSonautoV2TextToMusicRequestsByRequestIdResponse = + GetSonautoV2TextToMusicRequestsByRequestIdResponses[keyof GetSonautoV2TextToMusicRequestsByRequestIdResponses] + +export type GetFalAiElevenlabsTtsElevenV3RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/elevenlabs/tts/eleven-v3/requests/{request_id}/status' +} + +export type GetFalAiElevenlabsTtsElevenV3RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiElevenlabsTtsElevenV3RequestsByRequestIdStatusResponse = + GetFalAiElevenlabsTtsElevenV3RequestsByRequestIdStatusResponses[keyof GetFalAiElevenlabsTtsElevenV3RequestsByRequestIdStatusResponses] + +export type PutFalAiElevenlabsTtsElevenV3RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/elevenlabs/tts/eleven-v3/requests/{request_id}/cancel' +} + +export type PutFalAiElevenlabsTtsElevenV3RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiElevenlabsTtsElevenV3RequestsByRequestIdCancelResponse = + PutFalAiElevenlabsTtsElevenV3RequestsByRequestIdCancelResponses[keyof PutFalAiElevenlabsTtsElevenV3RequestsByRequestIdCancelResponses] + +export type PostFalAiElevenlabsTtsElevenV3Data = { + body: SchemaElevenlabsTtsElevenV3Input + path?: never + query?: never + url: '/fal-ai/elevenlabs/tts/eleven-v3' +} + +export type PostFalAiElevenlabsTtsElevenV3Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiElevenlabsTtsElevenV3Response = + PostFalAiElevenlabsTtsElevenV3Responses[keyof PostFalAiElevenlabsTtsElevenV3Responses] + +export type GetFalAiElevenlabsTtsElevenV3RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/elevenlabs/tts/eleven-v3/requests/{request_id}' +} + +export type GetFalAiElevenlabsTtsElevenV3RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaElevenlabsTtsElevenV3Output +} + +export type GetFalAiElevenlabsTtsElevenV3RequestsByRequestIdResponse = + GetFalAiElevenlabsTtsElevenV3RequestsByRequestIdResponses[keyof GetFalAiElevenlabsTtsElevenV3RequestsByRequestIdResponses] + +export type GetFalAiLyria2RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/lyria2/requests/{request_id}/status' +} + +export type GetFalAiLyria2RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLyria2RequestsByRequestIdStatusResponse = + GetFalAiLyria2RequestsByRequestIdStatusResponses[keyof GetFalAiLyria2RequestsByRequestIdStatusResponses] + +export type PutFalAiLyria2RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/lyria2/requests/{request_id}/cancel' +} + +export type PutFalAiLyria2RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLyria2RequestsByRequestIdCancelResponse = + PutFalAiLyria2RequestsByRequestIdCancelResponses[keyof PutFalAiLyria2RequestsByRequestIdCancelResponses] + +export type PostFalAiLyria2Data = { + body: SchemaLyria2Input + path?: never + query?: never + url: '/fal-ai/lyria2' +} + +export type PostFalAiLyria2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLyria2Response = + PostFalAiLyria2Responses[keyof PostFalAiLyria2Responses] + +export type GetFalAiLyria2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/lyria2/requests/{request_id}' +} + +export type GetFalAiLyria2RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLyria2Output +} + +export type GetFalAiLyria2RequestsByRequestIdResponse = + GetFalAiLyria2RequestsByRequestIdResponses[keyof GetFalAiLyria2RequestsByRequestIdResponses] + +export type GetFalAiAceStepPromptToAudioRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ace-step/prompt-to-audio/requests/{request_id}/status' +} + +export type GetFalAiAceStepPromptToAudioRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiAceStepPromptToAudioRequestsByRequestIdStatusResponse = + GetFalAiAceStepPromptToAudioRequestsByRequestIdStatusResponses[keyof GetFalAiAceStepPromptToAudioRequestsByRequestIdStatusResponses] + +export type PutFalAiAceStepPromptToAudioRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ace-step/prompt-to-audio/requests/{request_id}/cancel' +} + +export type PutFalAiAceStepPromptToAudioRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiAceStepPromptToAudioRequestsByRequestIdCancelResponse = + PutFalAiAceStepPromptToAudioRequestsByRequestIdCancelResponses[keyof PutFalAiAceStepPromptToAudioRequestsByRequestIdCancelResponses] + +export type PostFalAiAceStepPromptToAudioData = { + body: SchemaAceStepPromptToAudioInput + path?: never + query?: never + url: '/fal-ai/ace-step/prompt-to-audio' +} + +export type PostFalAiAceStepPromptToAudioResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiAceStepPromptToAudioResponse = + PostFalAiAceStepPromptToAudioResponses[keyof PostFalAiAceStepPromptToAudioResponses] + +export type GetFalAiAceStepPromptToAudioRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ace-step/prompt-to-audio/requests/{request_id}' +} + +export type GetFalAiAceStepPromptToAudioRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAceStepPromptToAudioOutput +} + +export type GetFalAiAceStepPromptToAudioRequestsByRequestIdResponse = + GetFalAiAceStepPromptToAudioRequestsByRequestIdResponses[keyof GetFalAiAceStepPromptToAudioRequestsByRequestIdResponses] + +export type GetFalAiAceStepRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ace-step/requests/{request_id}/status' +} + +export type GetFalAiAceStepRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiAceStepRequestsByRequestIdStatusResponse = + GetFalAiAceStepRequestsByRequestIdStatusResponses[keyof GetFalAiAceStepRequestsByRequestIdStatusResponses] + +export type PutFalAiAceStepRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ace-step/requests/{request_id}/cancel' +} + +export type PutFalAiAceStepRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiAceStepRequestsByRequestIdCancelResponse = + PutFalAiAceStepRequestsByRequestIdCancelResponses[keyof PutFalAiAceStepRequestsByRequestIdCancelResponses] + +export type PostFalAiAceStepData = { + body: SchemaAceStepInput + path?: never + query?: never + url: '/fal-ai/ace-step' +} + +export type PostFalAiAceStepResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiAceStepResponse = + PostFalAiAceStepResponses[keyof PostFalAiAceStepResponses] + +export type GetFalAiAceStepRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ace-step/requests/{request_id}' +} + +export type GetFalAiAceStepRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAceStepOutput +} + +export type GetFalAiAceStepRequestsByRequestIdResponse = + GetFalAiAceStepRequestsByRequestIdResponses[keyof GetFalAiAceStepRequestsByRequestIdResponses] + +export type GetCassetteaiSoundEffectsGeneratorRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/cassetteai/sound-effects-generator/requests/{request_id}/status' +} + +export type GetCassetteaiSoundEffectsGeneratorRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetCassetteaiSoundEffectsGeneratorRequestsByRequestIdStatusResponse = + GetCassetteaiSoundEffectsGeneratorRequestsByRequestIdStatusResponses[keyof GetCassetteaiSoundEffectsGeneratorRequestsByRequestIdStatusResponses] + +export type PutCassetteaiSoundEffectsGeneratorRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/cassetteai/sound-effects-generator/requests/{request_id}/cancel' +} + +export type PutCassetteaiSoundEffectsGeneratorRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutCassetteaiSoundEffectsGeneratorRequestsByRequestIdCancelResponse = + PutCassetteaiSoundEffectsGeneratorRequestsByRequestIdCancelResponses[keyof PutCassetteaiSoundEffectsGeneratorRequestsByRequestIdCancelResponses] + +export type PostCassetteaiSoundEffectsGeneratorData = { + body: SchemaSoundEffectsGeneratorInput + path?: never + query?: never + url: '/cassetteai/sound-effects-generator' +} + +export type PostCassetteaiSoundEffectsGeneratorResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostCassetteaiSoundEffectsGeneratorResponse = + PostCassetteaiSoundEffectsGeneratorResponses[keyof PostCassetteaiSoundEffectsGeneratorResponses] + +export type GetCassetteaiSoundEffectsGeneratorRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/cassetteai/sound-effects-generator/requests/{request_id}' +} + +export type GetCassetteaiSoundEffectsGeneratorRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSoundEffectsGeneratorOutput +} + +export type GetCassetteaiSoundEffectsGeneratorRequestsByRequestIdResponse = + GetCassetteaiSoundEffectsGeneratorRequestsByRequestIdResponses[keyof GetCassetteaiSoundEffectsGeneratorRequestsByRequestIdResponses] + +export type GetCassetteaiMusicGeneratorRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/cassetteai/music-generator/requests/{request_id}/status' +} + +export type GetCassetteaiMusicGeneratorRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetCassetteaiMusicGeneratorRequestsByRequestIdStatusResponse = + GetCassetteaiMusicGeneratorRequestsByRequestIdStatusResponses[keyof GetCassetteaiMusicGeneratorRequestsByRequestIdStatusResponses] + +export type PutCassetteaiMusicGeneratorRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/cassetteai/music-generator/requests/{request_id}/cancel' +} + +export type PutCassetteaiMusicGeneratorRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutCassetteaiMusicGeneratorRequestsByRequestIdCancelResponse = + PutCassetteaiMusicGeneratorRequestsByRequestIdCancelResponses[keyof PutCassetteaiMusicGeneratorRequestsByRequestIdCancelResponses] + +export type PostCassetteaiMusicGeneratorData = { + body: SchemaMusicGeneratorInput + path?: never + query?: never + url: '/cassetteai/music-generator' +} + +export type PostCassetteaiMusicGeneratorResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostCassetteaiMusicGeneratorResponse = + PostCassetteaiMusicGeneratorResponses[keyof PostCassetteaiMusicGeneratorResponses] + +export type GetCassetteaiMusicGeneratorRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/cassetteai/music-generator/requests/{request_id}' +} + +export type GetCassetteaiMusicGeneratorRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMusicGeneratorOutput +} + +export type GetCassetteaiMusicGeneratorRequestsByRequestIdResponse = + GetCassetteaiMusicGeneratorRequestsByRequestIdResponses[keyof GetCassetteaiMusicGeneratorRequestsByRequestIdResponses] + +export type GetFalAiCsm1bRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/csm-1b/requests/{request_id}/status' +} + +export type GetFalAiCsm1bRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiCsm1bRequestsByRequestIdStatusResponse = + GetFalAiCsm1bRequestsByRequestIdStatusResponses[keyof GetFalAiCsm1bRequestsByRequestIdStatusResponses] + +export type PutFalAiCsm1bRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/csm-1b/requests/{request_id}/cancel' +} + +export type PutFalAiCsm1bRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiCsm1bRequestsByRequestIdCancelResponse = + PutFalAiCsm1bRequestsByRequestIdCancelResponses[keyof PutFalAiCsm1bRequestsByRequestIdCancelResponses] + +export type PostFalAiCsm1bData = { + body: SchemaCsm1bInput + path?: never + query?: never + url: '/fal-ai/csm-1b' +} + +export type PostFalAiCsm1bResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiCsm1bResponse = + PostFalAiCsm1bResponses[keyof PostFalAiCsm1bResponses] + +export type GetFalAiCsm1bRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/csm-1b/requests/{request_id}' +} + +export type GetFalAiCsm1bRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaCsm1bOutput +} + +export type GetFalAiCsm1bRequestsByRequestIdResponse = + GetFalAiCsm1bRequestsByRequestIdResponses[keyof GetFalAiCsm1bRequestsByRequestIdResponses] + +export type GetFalAiDiffrhythmRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/diffrhythm/requests/{request_id}/status' +} + +export type GetFalAiDiffrhythmRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiDiffrhythmRequestsByRequestIdStatusResponse = + GetFalAiDiffrhythmRequestsByRequestIdStatusResponses[keyof GetFalAiDiffrhythmRequestsByRequestIdStatusResponses] + +export type PutFalAiDiffrhythmRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/diffrhythm/requests/{request_id}/cancel' +} + +export type PutFalAiDiffrhythmRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiDiffrhythmRequestsByRequestIdCancelResponse = + PutFalAiDiffrhythmRequestsByRequestIdCancelResponses[keyof PutFalAiDiffrhythmRequestsByRequestIdCancelResponses] + +export type PostFalAiDiffrhythmData = { + body: SchemaDiffrhythmInput + path?: never + query?: never + url: '/fal-ai/diffrhythm' +} + +export type PostFalAiDiffrhythmResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiDiffrhythmResponse = + PostFalAiDiffrhythmResponses[keyof PostFalAiDiffrhythmResponses] + +export type GetFalAiDiffrhythmRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/diffrhythm/requests/{request_id}' +} + +export type GetFalAiDiffrhythmRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaDiffrhythmOutput +} + +export type GetFalAiDiffrhythmRequestsByRequestIdResponse = + GetFalAiDiffrhythmRequestsByRequestIdResponses[keyof GetFalAiDiffrhythmRequestsByRequestIdResponses] + +export type GetFalAiElevenlabsTtsMultilingualV2RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/elevenlabs/tts/multilingual-v2/requests/{request_id}/status' +} + +export type GetFalAiElevenlabsTtsMultilingualV2RequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiElevenlabsTtsMultilingualV2RequestsByRequestIdStatusResponse = + GetFalAiElevenlabsTtsMultilingualV2RequestsByRequestIdStatusResponses[keyof GetFalAiElevenlabsTtsMultilingualV2RequestsByRequestIdStatusResponses] + +export type PutFalAiElevenlabsTtsMultilingualV2RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/elevenlabs/tts/multilingual-v2/requests/{request_id}/cancel' +} + +export type PutFalAiElevenlabsTtsMultilingualV2RequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiElevenlabsTtsMultilingualV2RequestsByRequestIdCancelResponse = + PutFalAiElevenlabsTtsMultilingualV2RequestsByRequestIdCancelResponses[keyof PutFalAiElevenlabsTtsMultilingualV2RequestsByRequestIdCancelResponses] + +export type PostFalAiElevenlabsTtsMultilingualV2Data = { + body: SchemaElevenlabsTtsMultilingualV2Input + path?: never + query?: never + url: '/fal-ai/elevenlabs/tts/multilingual-v2' +} + +export type PostFalAiElevenlabsTtsMultilingualV2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiElevenlabsTtsMultilingualV2Response = + PostFalAiElevenlabsTtsMultilingualV2Responses[keyof PostFalAiElevenlabsTtsMultilingualV2Responses] + +export type GetFalAiElevenlabsTtsMultilingualV2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/elevenlabs/tts/multilingual-v2/requests/{request_id}' +} + +export type GetFalAiElevenlabsTtsMultilingualV2RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaElevenlabsTtsMultilingualV2Output +} + +export type GetFalAiElevenlabsTtsMultilingualV2RequestsByRequestIdResponse = + GetFalAiElevenlabsTtsMultilingualV2RequestsByRequestIdResponses[keyof GetFalAiElevenlabsTtsMultilingualV2RequestsByRequestIdResponses] + +export type GetFalAiKokoroHindiRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kokoro/hindi/requests/{request_id}/status' +} + +export type GetFalAiKokoroHindiRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiKokoroHindiRequestsByRequestIdStatusResponse = + GetFalAiKokoroHindiRequestsByRequestIdStatusResponses[keyof GetFalAiKokoroHindiRequestsByRequestIdStatusResponses] + +export type PutFalAiKokoroHindiRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kokoro/hindi/requests/{request_id}/cancel' +} + +export type PutFalAiKokoroHindiRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiKokoroHindiRequestsByRequestIdCancelResponse = + PutFalAiKokoroHindiRequestsByRequestIdCancelResponses[keyof PutFalAiKokoroHindiRequestsByRequestIdCancelResponses] + +export type PostFalAiKokoroHindiData = { + body: SchemaKokoroHindiInput + path?: never + query?: never + url: '/fal-ai/kokoro/hindi' +} + +export type PostFalAiKokoroHindiResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKokoroHindiResponse = + PostFalAiKokoroHindiResponses[keyof PostFalAiKokoroHindiResponses] + +export type GetFalAiKokoroHindiRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kokoro/hindi/requests/{request_id}' +} + +export type GetFalAiKokoroHindiRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKokoroHindiOutput +} + +export type GetFalAiKokoroHindiRequestsByRequestIdResponse = + GetFalAiKokoroHindiRequestsByRequestIdResponses[keyof GetFalAiKokoroHindiRequestsByRequestIdResponses] + +export type GetFalAiKokoroMandarinChineseRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kokoro/mandarin-chinese/requests/{request_id}/status' +} + +export type GetFalAiKokoroMandarinChineseRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiKokoroMandarinChineseRequestsByRequestIdStatusResponse = + GetFalAiKokoroMandarinChineseRequestsByRequestIdStatusResponses[keyof GetFalAiKokoroMandarinChineseRequestsByRequestIdStatusResponses] + +export type PutFalAiKokoroMandarinChineseRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kokoro/mandarin-chinese/requests/{request_id}/cancel' +} + +export type PutFalAiKokoroMandarinChineseRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiKokoroMandarinChineseRequestsByRequestIdCancelResponse = + PutFalAiKokoroMandarinChineseRequestsByRequestIdCancelResponses[keyof PutFalAiKokoroMandarinChineseRequestsByRequestIdCancelResponses] + +export type PostFalAiKokoroMandarinChineseData = { + body: SchemaKokoroMandarinChineseInput + path?: never + query?: never + url: '/fal-ai/kokoro/mandarin-chinese' +} + +export type PostFalAiKokoroMandarinChineseResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKokoroMandarinChineseResponse = + PostFalAiKokoroMandarinChineseResponses[keyof PostFalAiKokoroMandarinChineseResponses] + +export type GetFalAiKokoroMandarinChineseRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kokoro/mandarin-chinese/requests/{request_id}' +} + +export type GetFalAiKokoroMandarinChineseRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKokoroMandarinChineseOutput +} + +export type GetFalAiKokoroMandarinChineseRequestsByRequestIdResponse = + GetFalAiKokoroMandarinChineseRequestsByRequestIdResponses[keyof GetFalAiKokoroMandarinChineseRequestsByRequestIdResponses] + +export type GetFalAiKokoroSpanishRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kokoro/spanish/requests/{request_id}/status' +} + +export type GetFalAiKokoroSpanishRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiKokoroSpanishRequestsByRequestIdStatusResponse = + GetFalAiKokoroSpanishRequestsByRequestIdStatusResponses[keyof GetFalAiKokoroSpanishRequestsByRequestIdStatusResponses] + +export type PutFalAiKokoroSpanishRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kokoro/spanish/requests/{request_id}/cancel' +} + +export type PutFalAiKokoroSpanishRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiKokoroSpanishRequestsByRequestIdCancelResponse = + PutFalAiKokoroSpanishRequestsByRequestIdCancelResponses[keyof PutFalAiKokoroSpanishRequestsByRequestIdCancelResponses] + +export type PostFalAiKokoroSpanishData = { + body: SchemaKokoroSpanishInput + path?: never + query?: never + url: '/fal-ai/kokoro/spanish' +} + +export type PostFalAiKokoroSpanishResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKokoroSpanishResponse = + PostFalAiKokoroSpanishResponses[keyof PostFalAiKokoroSpanishResponses] + +export type GetFalAiKokoroSpanishRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kokoro/spanish/requests/{request_id}' +} + +export type GetFalAiKokoroSpanishRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKokoroSpanishOutput +} + +export type GetFalAiKokoroSpanishRequestsByRequestIdResponse = + GetFalAiKokoroSpanishRequestsByRequestIdResponses[keyof GetFalAiKokoroSpanishRequestsByRequestIdResponses] + +export type GetFalAiKokoroBrazilianPortugueseRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kokoro/brazilian-portuguese/requests/{request_id}/status' +} + +export type GetFalAiKokoroBrazilianPortugueseRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKokoroBrazilianPortugueseRequestsByRequestIdStatusResponse = + GetFalAiKokoroBrazilianPortugueseRequestsByRequestIdStatusResponses[keyof GetFalAiKokoroBrazilianPortugueseRequestsByRequestIdStatusResponses] + +export type PutFalAiKokoroBrazilianPortugueseRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kokoro/brazilian-portuguese/requests/{request_id}/cancel' +} + +export type PutFalAiKokoroBrazilianPortugueseRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKokoroBrazilianPortugueseRequestsByRequestIdCancelResponse = + PutFalAiKokoroBrazilianPortugueseRequestsByRequestIdCancelResponses[keyof PutFalAiKokoroBrazilianPortugueseRequestsByRequestIdCancelResponses] + +export type PostFalAiKokoroBrazilianPortugueseData = { + body: SchemaKokoroBrazilianPortugueseInput + path?: never + query?: never + url: '/fal-ai/kokoro/brazilian-portuguese' +} + +export type PostFalAiKokoroBrazilianPortugueseResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKokoroBrazilianPortugueseResponse = + PostFalAiKokoroBrazilianPortugueseResponses[keyof PostFalAiKokoroBrazilianPortugueseResponses] + +export type GetFalAiKokoroBrazilianPortugueseRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kokoro/brazilian-portuguese/requests/{request_id}' +} + +export type GetFalAiKokoroBrazilianPortugueseRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKokoroBrazilianPortugueseOutput +} + +export type GetFalAiKokoroBrazilianPortugueseRequestsByRequestIdResponse = + GetFalAiKokoroBrazilianPortugueseRequestsByRequestIdResponses[keyof GetFalAiKokoroBrazilianPortugueseRequestsByRequestIdResponses] + +export type GetFalAiKokoroBritishEnglishRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kokoro/british-english/requests/{request_id}/status' +} + +export type GetFalAiKokoroBritishEnglishRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiKokoroBritishEnglishRequestsByRequestIdStatusResponse = + GetFalAiKokoroBritishEnglishRequestsByRequestIdStatusResponses[keyof GetFalAiKokoroBritishEnglishRequestsByRequestIdStatusResponses] + +export type PutFalAiKokoroBritishEnglishRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kokoro/british-english/requests/{request_id}/cancel' +} + +export type PutFalAiKokoroBritishEnglishRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiKokoroBritishEnglishRequestsByRequestIdCancelResponse = + PutFalAiKokoroBritishEnglishRequestsByRequestIdCancelResponses[keyof PutFalAiKokoroBritishEnglishRequestsByRequestIdCancelResponses] + +export type PostFalAiKokoroBritishEnglishData = { + body: SchemaKokoroBritishEnglishInput + path?: never + query?: never + url: '/fal-ai/kokoro/british-english' +} + +export type PostFalAiKokoroBritishEnglishResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKokoroBritishEnglishResponse = + PostFalAiKokoroBritishEnglishResponses[keyof PostFalAiKokoroBritishEnglishResponses] + +export type GetFalAiKokoroBritishEnglishRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kokoro/british-english/requests/{request_id}' +} + +export type GetFalAiKokoroBritishEnglishRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKokoroBritishEnglishOutput +} + +export type GetFalAiKokoroBritishEnglishRequestsByRequestIdResponse = + GetFalAiKokoroBritishEnglishRequestsByRequestIdResponses[keyof GetFalAiKokoroBritishEnglishRequestsByRequestIdResponses] + +export type GetFalAiKokoroFrenchRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kokoro/french/requests/{request_id}/status' +} + +export type GetFalAiKokoroFrenchRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiKokoroFrenchRequestsByRequestIdStatusResponse = + GetFalAiKokoroFrenchRequestsByRequestIdStatusResponses[keyof GetFalAiKokoroFrenchRequestsByRequestIdStatusResponses] + +export type PutFalAiKokoroFrenchRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kokoro/french/requests/{request_id}/cancel' +} + +export type PutFalAiKokoroFrenchRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiKokoroFrenchRequestsByRequestIdCancelResponse = + PutFalAiKokoroFrenchRequestsByRequestIdCancelResponses[keyof PutFalAiKokoroFrenchRequestsByRequestIdCancelResponses] + +export type PostFalAiKokoroFrenchData = { + body: SchemaKokoroFrenchInput + path?: never + query?: never + url: '/fal-ai/kokoro/french' +} + +export type PostFalAiKokoroFrenchResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKokoroFrenchResponse = + PostFalAiKokoroFrenchResponses[keyof PostFalAiKokoroFrenchResponses] + +export type GetFalAiKokoroFrenchRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kokoro/french/requests/{request_id}' +} + +export type GetFalAiKokoroFrenchRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKokoroFrenchOutput +} + +export type GetFalAiKokoroFrenchRequestsByRequestIdResponse = + GetFalAiKokoroFrenchRequestsByRequestIdResponses[keyof GetFalAiKokoroFrenchRequestsByRequestIdResponses] + +export type GetFalAiKokoroJapaneseRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kokoro/japanese/requests/{request_id}/status' +} + +export type GetFalAiKokoroJapaneseRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiKokoroJapaneseRequestsByRequestIdStatusResponse = + GetFalAiKokoroJapaneseRequestsByRequestIdStatusResponses[keyof GetFalAiKokoroJapaneseRequestsByRequestIdStatusResponses] + +export type PutFalAiKokoroJapaneseRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kokoro/japanese/requests/{request_id}/cancel' +} + +export type PutFalAiKokoroJapaneseRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiKokoroJapaneseRequestsByRequestIdCancelResponse = + PutFalAiKokoroJapaneseRequestsByRequestIdCancelResponses[keyof PutFalAiKokoroJapaneseRequestsByRequestIdCancelResponses] + +export type PostFalAiKokoroJapaneseData = { + body: SchemaKokoroJapaneseInput + path?: never + query?: never + url: '/fal-ai/kokoro/japanese' +} + +export type PostFalAiKokoroJapaneseResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKokoroJapaneseResponse = + PostFalAiKokoroJapaneseResponses[keyof PostFalAiKokoroJapaneseResponses] + +export type GetFalAiKokoroJapaneseRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kokoro/japanese/requests/{request_id}' +} + +export type GetFalAiKokoroJapaneseRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKokoroJapaneseOutput +} + +export type GetFalAiKokoroJapaneseRequestsByRequestIdResponse = + GetFalAiKokoroJapaneseRequestsByRequestIdResponses[keyof GetFalAiKokoroJapaneseRequestsByRequestIdResponses] + +export type GetFalAiKokoroAmericanEnglishRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kokoro/american-english/requests/{request_id}/status' +} + +export type GetFalAiKokoroAmericanEnglishRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiKokoroAmericanEnglishRequestsByRequestIdStatusResponse = + GetFalAiKokoroAmericanEnglishRequestsByRequestIdStatusResponses[keyof GetFalAiKokoroAmericanEnglishRequestsByRequestIdStatusResponses] + +export type PutFalAiKokoroAmericanEnglishRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kokoro/american-english/requests/{request_id}/cancel' +} + +export type PutFalAiKokoroAmericanEnglishRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiKokoroAmericanEnglishRequestsByRequestIdCancelResponse = + PutFalAiKokoroAmericanEnglishRequestsByRequestIdCancelResponses[keyof PutFalAiKokoroAmericanEnglishRequestsByRequestIdCancelResponses] + +export type PostFalAiKokoroAmericanEnglishData = { + body: SchemaKokoroAmericanEnglishInput + path?: never + query?: never + url: '/fal-ai/kokoro/american-english' +} + +export type PostFalAiKokoroAmericanEnglishResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKokoroAmericanEnglishResponse = + PostFalAiKokoroAmericanEnglishResponses[keyof PostFalAiKokoroAmericanEnglishResponses] + +export type GetFalAiKokoroAmericanEnglishRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kokoro/american-english/requests/{request_id}' +} + +export type GetFalAiKokoroAmericanEnglishRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKokoroAmericanEnglishOutput +} + +export type GetFalAiKokoroAmericanEnglishRequestsByRequestIdResponse = + GetFalAiKokoroAmericanEnglishRequestsByRequestIdResponses[keyof GetFalAiKokoroAmericanEnglishRequestsByRequestIdResponses] + +export type GetFalAiZonosRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/zonos/requests/{request_id}/status' +} + +export type GetFalAiZonosRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiZonosRequestsByRequestIdStatusResponse = + GetFalAiZonosRequestsByRequestIdStatusResponses[keyof GetFalAiZonosRequestsByRequestIdStatusResponses] + +export type PutFalAiZonosRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/zonos/requests/{request_id}/cancel' +} + +export type PutFalAiZonosRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiZonosRequestsByRequestIdCancelResponse = + PutFalAiZonosRequestsByRequestIdCancelResponses[keyof PutFalAiZonosRequestsByRequestIdCancelResponses] + +export type PostFalAiZonosData = { + body: SchemaZonosInput + path?: never + query?: never + url: '/fal-ai/zonos' +} + +export type PostFalAiZonosResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiZonosResponse = + PostFalAiZonosResponses[keyof PostFalAiZonosResponses] + +export type GetFalAiZonosRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/zonos/requests/{request_id}' +} + +export type GetFalAiZonosRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaZonosOutput +} + +export type GetFalAiZonosRequestsByRequestIdResponse = + GetFalAiZonosRequestsByRequestIdResponses[keyof GetFalAiZonosRequestsByRequestIdResponses] + +export type GetFalAiKokoroItalianRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kokoro/italian/requests/{request_id}/status' +} + +export type GetFalAiKokoroItalianRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiKokoroItalianRequestsByRequestIdStatusResponse = + GetFalAiKokoroItalianRequestsByRequestIdStatusResponses[keyof GetFalAiKokoroItalianRequestsByRequestIdStatusResponses] + +export type PutFalAiKokoroItalianRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kokoro/italian/requests/{request_id}/cancel' +} + +export type PutFalAiKokoroItalianRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiKokoroItalianRequestsByRequestIdCancelResponse = + PutFalAiKokoroItalianRequestsByRequestIdCancelResponses[keyof PutFalAiKokoroItalianRequestsByRequestIdCancelResponses] + +export type PostFalAiKokoroItalianData = { + body: SchemaKokoroItalianInput + path?: never + query?: never + url: '/fal-ai/kokoro/italian' +} + +export type PostFalAiKokoroItalianResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKokoroItalianResponse = + PostFalAiKokoroItalianResponses[keyof PostFalAiKokoroItalianResponses] + +export type GetFalAiKokoroItalianRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kokoro/italian/requests/{request_id}' +} + +export type GetFalAiKokoroItalianRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKokoroItalianOutput +} + +export type GetFalAiKokoroItalianRequestsByRequestIdResponse = + GetFalAiKokoroItalianRequestsByRequestIdResponses[keyof GetFalAiKokoroItalianRequestsByRequestIdResponses] + +export type GetFalAiYueRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/yue/requests/{request_id}/status' +} + +export type GetFalAiYueRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiYueRequestsByRequestIdStatusResponse = + GetFalAiYueRequestsByRequestIdStatusResponses[keyof GetFalAiYueRequestsByRequestIdStatusResponses] + +export type PutFalAiYueRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/yue/requests/{request_id}/cancel' +} + +export type PutFalAiYueRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiYueRequestsByRequestIdCancelResponse = + PutFalAiYueRequestsByRequestIdCancelResponses[keyof PutFalAiYueRequestsByRequestIdCancelResponses] + +export type PostFalAiYueData = { + body: SchemaYueInput + path?: never + query?: never + url: '/fal-ai/yue' +} + +export type PostFalAiYueResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiYueResponse = + PostFalAiYueResponses[keyof PostFalAiYueResponses] + +export type GetFalAiYueRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/yue/requests/{request_id}' +} + +export type GetFalAiYueRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaYueOutput +} + +export type GetFalAiYueRequestsByRequestIdResponse = + GetFalAiYueRequestsByRequestIdResponses[keyof GetFalAiYueRequestsByRequestIdResponses] + +export type GetFalAiMmaudioV2TextToAudioRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/mmaudio-v2/text-to-audio/requests/{request_id}/status' +} + +export type GetFalAiMmaudioV2TextToAudioRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMmaudioV2TextToAudioRequestsByRequestIdStatusResponse = + GetFalAiMmaudioV2TextToAudioRequestsByRequestIdStatusResponses[keyof GetFalAiMmaudioV2TextToAudioRequestsByRequestIdStatusResponses] + +export type PutFalAiMmaudioV2TextToAudioRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/mmaudio-v2/text-to-audio/requests/{request_id}/cancel' +} + +export type PutFalAiMmaudioV2TextToAudioRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMmaudioV2TextToAudioRequestsByRequestIdCancelResponse = + PutFalAiMmaudioV2TextToAudioRequestsByRequestIdCancelResponses[keyof PutFalAiMmaudioV2TextToAudioRequestsByRequestIdCancelResponses] + +export type PostFalAiMmaudioV2TextToAudioData = { + body: SchemaMmaudioV2TextToAudioInput + path?: never + query?: never + url: '/fal-ai/mmaudio-v2/text-to-audio' +} + +export type PostFalAiMmaudioV2TextToAudioResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMmaudioV2TextToAudioResponse = + PostFalAiMmaudioV2TextToAudioResponses[keyof PostFalAiMmaudioV2TextToAudioResponses] + +export type GetFalAiMmaudioV2TextToAudioRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/mmaudio-v2/text-to-audio/requests/{request_id}' +} + +export type GetFalAiMmaudioV2TextToAudioRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMmaudioV2TextToAudioOutput +} + +export type GetFalAiMmaudioV2TextToAudioRequestsByRequestIdResponse = + GetFalAiMmaudioV2TextToAudioRequestsByRequestIdResponses[keyof GetFalAiMmaudioV2TextToAudioRequestsByRequestIdResponses] + +export type GetFalAiMinimaxMusicRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax-music/requests/{request_id}/status' +} + +export type GetFalAiMinimaxMusicRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMinimaxMusicRequestsByRequestIdStatusResponse = + GetFalAiMinimaxMusicRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxMusicRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxMusicRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax-music/requests/{request_id}/cancel' +} + +export type PutFalAiMinimaxMusicRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMinimaxMusicRequestsByRequestIdCancelResponse = + PutFalAiMinimaxMusicRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxMusicRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxMusicData = { + body: SchemaMinimaxMusicInput + path?: never + query?: never + url: '/fal-ai/minimax-music' +} + +export type PostFalAiMinimaxMusicResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxMusicResponse = + PostFalAiMinimaxMusicResponses[keyof PostFalAiMinimaxMusicResponses] + +export type GetFalAiMinimaxMusicRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax-music/requests/{request_id}' +} + +export type GetFalAiMinimaxMusicRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMinimaxMusicOutput +} + +export type GetFalAiMinimaxMusicRequestsByRequestIdResponse = + GetFalAiMinimaxMusicRequestsByRequestIdResponses[keyof GetFalAiMinimaxMusicRequestsByRequestIdResponses] + +export type GetFalAiF5TtsRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/f5-tts/requests/{request_id}/status' +} + +export type GetFalAiF5TtsRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiF5TtsRequestsByRequestIdStatusResponse = + GetFalAiF5TtsRequestsByRequestIdStatusResponses[keyof GetFalAiF5TtsRequestsByRequestIdStatusResponses] + +export type PutFalAiF5TtsRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/f5-tts/requests/{request_id}/cancel' +} + +export type PutFalAiF5TtsRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiF5TtsRequestsByRequestIdCancelResponse = + PutFalAiF5TtsRequestsByRequestIdCancelResponses[keyof PutFalAiF5TtsRequestsByRequestIdCancelResponses] + +export type PostFalAiF5TtsData = { + body: SchemaF5TtsInput + path?: never + query?: never + url: '/fal-ai/f5-tts' +} + +export type PostFalAiF5TtsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiF5TtsResponse = + PostFalAiF5TtsResponses[keyof PostFalAiF5TtsResponses] + +export type GetFalAiF5TtsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/f5-tts/requests/{request_id}' +} + +export type GetFalAiF5TtsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaF5TtsOutput +} + +export type GetFalAiF5TtsRequestsByRequestIdResponse = + GetFalAiF5TtsRequestsByRequestIdResponses[keyof GetFalAiF5TtsRequestsByRequestIdResponses] + +export type GetFalAiStableAudioRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/stable-audio/requests/{request_id}/status' +} + +export type GetFalAiStableAudioRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiStableAudioRequestsByRequestIdStatusResponse = + GetFalAiStableAudioRequestsByRequestIdStatusResponses[keyof GetFalAiStableAudioRequestsByRequestIdStatusResponses] + +export type PutFalAiStableAudioRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-audio/requests/{request_id}/cancel' +} + +export type PutFalAiStableAudioRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiStableAudioRequestsByRequestIdCancelResponse = + PutFalAiStableAudioRequestsByRequestIdCancelResponses[keyof PutFalAiStableAudioRequestsByRequestIdCancelResponses] + +export type PostFalAiStableAudioData = { + body: SchemaStableAudioInput + path?: never + query?: never + url: '/fal-ai/stable-audio' +} + +export type PostFalAiStableAudioResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiStableAudioResponse = + PostFalAiStableAudioResponses[keyof PostFalAiStableAudioResponses] + +export type GetFalAiStableAudioRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-audio/requests/{request_id}' +} + +export type GetFalAiStableAudioRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaStableAudioOutput +} + +export type GetFalAiStableAudioRequestsByRequestIdResponse = + GetFalAiStableAudioRequestsByRequestIdResponses[keyof GetFalAiStableAudioRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/text-to-audio/zod.gen.ts b/packages/typescript/ai-fal/src/generated/text-to-audio/zod.gen.ts new file mode 100644 index 00000000..a4c45b39 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/text-to-audio/zod.gen.ts @@ -0,0 +1,4373 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * File + */ +export const zSchemaFile = z.object({ + file_size: z.optional(z.union([z.int(), z.unknown()])), + file_name: z.optional(z.union([z.string(), z.unknown()])), + content_type: z.optional(z.union([z.string(), z.unknown()])), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), +}) + +/** + * Output + */ +export const zSchemaStableAudioOutput = z.object({ + audio_file: zSchemaFile, +}) + +/** + * Input + */ +export const zSchemaStableAudioInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate audio from', + }), + steps: z + .optional( + z.int().gte(1).lte(1000).register(z.globalRegistry, { + description: 'The number of steps to denoise the audio for', + }), + ) + .default(100), + seconds_total: z + .optional( + z.int().gte(0).lte(47).register(z.globalRegistry, { + description: 'The duration of the audio clip to generate', + }), + ) + .default(30), + seconds_start: z + .optional( + z.int().gte(0).lte(47).register(z.globalRegistry, { + description: 'The start point of the audio clip to generate', + }), + ) + .default(0), +}) + +/** + * AudioFile + */ +export const zSchemaAudioFile = z.object({ + file_size: z.optional(z.union([z.int(), z.unknown()])), + file_name: z + .optional(z.string()) + .default('8535dd59e911496a947daa35c07e67a3_tmplkcy6tut.wav'), + content_type: z.optional(z.string()).default('audio/wav'), + url: z.string(), +}) + +/** + * TTSOutput + */ +export const zSchemaF5TtsOutput = z.object({ + audio_url: zSchemaAudioFile, +}) + +/** + * TTSInput + */ +export const zSchemaF5TtsInput = z.object({ + ref_text: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The reference text to be used for TTS. If not provided, an ASR (Automatic Speech Recognition) model will be used to generate the reference text.', + }), + ) + .default(''), + remove_silence: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to remove the silence from the audio file.', + }), + ) + .default(true), + gen_text: z.string().register(z.globalRegistry, { + description: 'The text to be converted to speech.', + }), + model_type: z.enum(['F5-TTS', 'E2-TTS']).register(z.globalRegistry, { + description: 'The name of the model to be used for TTS.', + }), + ref_audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the reference audio file.', + }), +}) + +/** + * MusicOutput + */ +export const zSchemaMinimaxMusicOutput = z.object({ + audio: zSchemaFile, +}) + +/** + * TextToMusicRequest + */ +export const zSchemaMinimaxMusicInput = z.object({ + prompt: z.string().min(1).max(600).register(z.globalRegistry, { + description: + 'Lyrics with optional formatting. You can use a newline to separate each line of lyrics. You can use two newlines to add a pause between lines. You can use double hash marks (##) at the beginning and end of the lyrics to add accompaniment. Maximum 600 characters.', + }), + reference_audio_url: z.string().register(z.globalRegistry, { + description: + 'Reference song, should contain music and vocals. Must be a .wav or .mp3 file longer than 15 seconds.', + }), +}) + +/** + * AudioOutput + */ +export const zSchemaMmaudioV2TextToAudioOutput = z.object({ + audio: zSchemaFile, +}) + +/** + * AudioInput + */ +export const zSchemaMmaudioV2TextToAudioInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the audio for.', + }), + num_steps: z + .optional( + z.int().gte(4).lte(50).register(z.globalRegistry, { + description: 'The number of steps to generate the audio for.', + }), + ) + .default(25), + duration: z + .optional( + z.number().gte(1).lte(30).register(z.globalRegistry, { + description: 'The duration of the audio to generate.', + }), + ) + .default(8), + cfg_strength: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'The strength of Classifier Free Guidance.', + }), + ) + .default(4.5), + seed: z.optional( + z.int().gte(0).lte(65535).register(z.globalRegistry, { + description: 'The seed for the random number generator', + }), + ), + mask_away_clip: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to mask away the clip.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the audio for.', + }), + ) + .default(''), +}) + +/** + * Output + */ +export const zSchemaYueOutput = z.object({ + audio: zSchemaFile, +}) + +/** + * TextToMusicInput + */ +export const zSchemaYueInput = z.object({ + lyrics: z.string().register(z.globalRegistry, { + description: + 'The prompt to generate an image from. Must have two sections. Sections start with either [chorus] or a [verse].', + }), + genres: z.string().register(z.globalRegistry, { + description: + "The genres (separated by a space ' ') to guide the music generation.", + }), +}) + +/** + * ItalianOutput + */ +export const zSchemaKokoroItalianOutput = z.object({ + audio: zSchemaFile, +}) + +/** + * ItalianRequest + */ +export const zSchemaKokoroItalianInput = z.object({ + prompt: z.string(), + voice: z.enum(['if_sara', 'im_nicola']).register(z.globalRegistry, { + description: 'Voice ID for the desired voice.', + }), + speed: z + .optional( + z.number().gte(0.1).lte(5).register(z.globalRegistry, { + description: 'Speed of the generated audio. Default is 1.0.', + }), + ) + .default(1), +}) + +/** + * ZonosOutput + */ +export const zSchemaZonosOutput = z.object({ + audio: zSchemaFile, +}) + +/** + * ZonosInput + */ +export const zSchemaZonosInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The content generated using cloned voice.', + }), + reference_audio_url: z.string().register(z.globalRegistry, { + description: 'The reference audio.', + }), +}) + +/** + * AmEngOutput + */ +export const zSchemaKokoroAmericanEnglishOutput = z.object({ + audio: zSchemaFile, +}) + +/** + * AmEnglishRequest + */ +export const zSchemaKokoroAmericanEnglishInput = z.object({ + prompt: z.optional(z.string()).default(''), + voice: z.optional( + z + .enum([ + 'af_heart', + 'af_alloy', + 'af_aoede', + 'af_bella', + 'af_jessica', + 'af_kore', + 'af_nicole', + 'af_nova', + 'af_river', + 'af_sarah', + 'af_sky', + 'am_adam', + 'am_echo', + 'am_eric', + 'am_fenrir', + 'am_liam', + 'am_michael', + 'am_onyx', + 'am_puck', + 'am_santa', + ]) + .register(z.globalRegistry, { + description: 'Voice ID for the desired voice.', + }), + ), + speed: z + .optional( + z.number().gte(0.1).lte(5).register(z.globalRegistry, { + description: 'Speed of the generated audio. Default is 1.0.', + }), + ) + .default(1), +}) + +/** + * JapaneseOutput + */ +export const zSchemaKokoroJapaneseOutput = z.object({ + audio: zSchemaFile, +}) + +/** + * JapaneseRequest + */ +export const zSchemaKokoroJapaneseInput = z.object({ + prompt: z.string(), + voice: z + .enum(['jf_alpha', 'jf_gongitsune', 'jf_nezumi', 'jf_tebukuro', 'jm_kumo']) + .register(z.globalRegistry, { + description: 'Voice ID for the desired voice.', + }), + speed: z + .optional( + z.number().gte(0.1).lte(5).register(z.globalRegistry, { + description: 'Speed of the generated audio. Default is 1.0.', + }), + ) + .default(1), +}) + +/** + * FrenchOutput + */ +export const zSchemaKokoroFrenchOutput = z.object({ + audio: zSchemaFile, +}) + +/** + * FrenchRequest + */ +export const zSchemaKokoroFrenchInput = z.object({ + prompt: z.string(), + voice: z.enum(['ff_siwis']).register(z.globalRegistry, { + description: 'Voice ID for the desired voice.', + }), + speed: z + .optional( + z.number().gte(0.1).lte(5).register(z.globalRegistry, { + description: 'Speed of the generated audio. Default is 1.0.', + }), + ) + .default(1), +}) + +/** + * BrEngOutput + */ +export const zSchemaKokoroBritishEnglishOutput = z.object({ + audio: zSchemaFile, +}) + +/** + * BrEnglishRequest + */ +export const zSchemaKokoroBritishEnglishInput = z.object({ + prompt: z.string(), + voice: z + .enum([ + 'bf_alice', + 'bf_emma', + 'bf_isabella', + 'bf_lily', + 'bm_daniel', + 'bm_fable', + 'bm_george', + 'bm_lewis', + ]) + .register(z.globalRegistry, { + description: 'Voice ID for the desired voice.', + }), + speed: z + .optional( + z.number().gte(0.1).lte(5).register(z.globalRegistry, { + description: 'Speed of the generated audio. Default is 1.0.', + }), + ) + .default(1), +}) + +/** + * BrPortugeseOutput + */ +export const zSchemaKokoroBrazilianPortugueseOutput = z.object({ + audio: zSchemaFile, +}) + +/** + * BrPortugueseRequest + */ +export const zSchemaKokoroBrazilianPortugueseInput = z.object({ + prompt: z.string(), + voice: z.enum(['pf_dora', 'pm_alex', 'pm_santa']).register(z.globalRegistry, { + description: 'Voice ID for the desired voice.', + }), + speed: z + .optional( + z.number().gte(0.1).lte(5).register(z.globalRegistry, { + description: 'Speed of the generated audio. Default is 1.0.', + }), + ) + .default(1), +}) + +/** + * SpanishOutput + */ +export const zSchemaKokoroSpanishOutput = z.object({ + audio: zSchemaFile, +}) + +/** + * SpanishRequest + */ +export const zSchemaKokoroSpanishInput = z.object({ + prompt: z.string(), + voice: z.enum(['ef_dora', 'em_alex', 'em_santa']).register(z.globalRegistry, { + description: 'Voice ID for the desired voice.', + }), + speed: z + .optional( + z.number().gte(0.1).lte(5).register(z.globalRegistry, { + description: 'Speed of the generated audio. Default is 1.0.', + }), + ) + .default(1), +}) + +/** + * MandarinOutput + */ +export const zSchemaKokoroMandarinChineseOutput = z.object({ + audio: zSchemaFile, +}) + +/** + * MandarinRequest + */ +export const zSchemaKokoroMandarinChineseInput = z.object({ + prompt: z.string(), + voice: z + .enum([ + 'zf_xiaobei', + 'zf_xiaoni', + 'zf_xiaoxiao', + 'zf_xiaoyi', + 'zm_yunjian', + 'zm_yunxi', + 'zm_yunxia', + 'zm_yunyang', + ]) + .register(z.globalRegistry, { + description: 'Voice ID for the desired voice.', + }), + speed: z + .optional( + z.number().gte(0.1).lte(5).register(z.globalRegistry, { + description: 'Speed of the generated audio. Default is 1.0.', + }), + ) + .default(1), +}) + +/** + * HindiOutput + */ +export const zSchemaKokoroHindiOutput = z.object({ + audio: zSchemaFile, +}) + +/** + * HindiRequest + */ +export const zSchemaKokoroHindiInput = z.object({ + prompt: z.string(), + voice: z + .enum(['hf_alpha', 'hf_beta', 'hm_omega', 'hm_psi']) + .register(z.globalRegistry, { + description: 'Voice ID for the desired voice.', + }), + speed: z + .optional( + z.number().gte(0.1).lte(5).register(z.globalRegistry, { + description: 'Speed of the generated audio. Default is 1.0.', + }), + ) + .default(1), +}) + +/** + * TTSOutput + */ +export const zSchemaElevenlabsTtsMultilingualV2Output = z.object({ + audio: zSchemaFile, + timestamps: z.optional(z.union([z.array(z.unknown()), z.unknown()])), +}) + +/** + * TextToSpeechRequest + */ +export const zSchemaElevenlabsTtsMultilingualV2Input = z.object({ + stability: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Voice stability (0-1)', + }), + ) + .default(0.5), + next_text: z.optional(z.union([z.string(), z.unknown()])), + speed: z + .optional( + z.number().gte(0.7).lte(1.2).register(z.globalRegistry, { + description: + 'Speech speed (0.7-1.2). Values below 1.0 slow down the speech, above 1.0 speed it up. Extreme values may affect quality.', + }), + ) + .default(1), + style: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Style exaggeration (0-1)', + }), + ) + .default(0), + text: z.string().min(1).register(z.globalRegistry, { + description: 'The text to convert to speech', + }), + timestamps: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to return timestamps for each word in the generated speech', + }), + ) + .default(false), + similarity_boost: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Similarity boost (0-1)', + }), + ) + .default(0.75), + voice: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The voice to use for speech generation', + }), + ) + .default('Rachel'), + language_code: z.optional(z.union([z.string(), z.unknown()])), + previous_text: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * Output + */ +export const zSchemaDiffrhythmOutput = z.object({ + audio: zSchemaFile, +}) + +/** + * TextToMusicInput + */ +export const zSchemaDiffrhythmInput = z.object({ + lyrics: z.string().register(z.globalRegistry, { + description: + 'The prompt to generate the song from. Must have two sections. Sections start with either [chorus] or a [verse].', + }), + cfg_strength: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The CFG strength to use for the music generation.', + }), + ) + .default(4), + reference_audio_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The URL of the reference audio to use for the music generation.', + }), + ), + music_duration: z.optional( + z.enum(['95s', '285s']).register(z.globalRegistry, { + description: 'The duration of the music to generate.', + }), + ), + scheduler: z.optional( + z + .enum(['euler', 'midpoint', 'rk4', 'implicit_adams']) + .register(z.globalRegistry, { + description: 'The scheduler to use for the music generation.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(10).lte(100).register(z.globalRegistry, { + description: + 'The number of inference steps to use for the music generation.', + }), + ) + .default(32), + style_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'The style prompt to use for the music generation.', + }), + ), +}) + +/** + * Speaker + */ +export const zSchemaSpeaker = z.object({ + prompt: z.string(), + audio_url: z.string(), + speaker_id: z.int(), +}) + +/** + * Turn + */ +export const zSchemaTurn = z.object({ + text: z.string(), + speaker_id: z.int(), +}) + +/** + * Output + */ +export const zSchemaCsm1bOutput = z.object({ + audio: z.union([zSchemaFile, z.string()]), +}) + +/** + * Input + */ +export const zSchemaCsm1bInput = z.object({ + scene: z.array(zSchemaTurn).register(z.globalRegistry, { + description: 'The text to generate an audio from.', + }), + context: z.optional( + z.array(zSchemaSpeaker).register(z.globalRegistry, { + description: 'The context to generate an audio from.', + }), + ), +}) + +/** + * AudioOutput + * + * Example Pydantic model showing how to include a File in the output. + */ +export const zSchemaMusicGeneratorOutput = z + .object({ + audio_file: zSchemaFile, + }) + .register(z.globalRegistry, { + description: + 'Example Pydantic model showing how to include a File in the output.', + }) + +/** + * Input + */ +export const zSchemaMusicGeneratorInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate music from.', + }), + duration: z.int().gte(10).lte(180).register(z.globalRegistry, { + description: 'The duration of the generated music in seconds.', + }), +}) + +/** + * AudioOutput + * + * Example Pydantic model showing how to include a File in the output. + */ +export const zSchemaSoundEffectsGeneratorOutput = z + .object({ + audio_file: zSchemaFile, + }) + .register(z.globalRegistry, { + description: + 'Example Pydantic model showing how to include a File in the output.', + }) + +/** + * Input + */ +export const zSchemaSoundEffectsGeneratorInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate SFX.', + }), + duration: z.int().gte(1).lte(30).register(z.globalRegistry, { + description: 'The duration of the generated SFX in seconds.', + }), +}) + +/** + * ACEStepResponse + */ +export const zSchemaAceStepOutput = z.object({ + tags: z.string().register(z.globalRegistry, { + description: 'The genre tags used in the generation process.', + }), + lyrics: z.string().register(z.globalRegistry, { + description: 'The lyrics used in the generation process.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The random seed used for the generation process.', + }), + audio: zSchemaFile, +}) + +/** + * ACEStepTextToAudioRequest + */ +export const zSchemaAceStepInput = z.object({ + number_of_steps: z + .optional( + z.int().gte(3).lte(60).register(z.globalRegistry, { + description: 'Number of steps to generate the audio.', + }), + ) + .default(27), + duration: z + .optional( + z.number().gte(5).lte(240).register(z.globalRegistry, { + description: 'The duration of the generated audio in seconds.', + }), + ) + .default(60), + tags: z.string().register(z.globalRegistry, { + description: + 'Comma-separated list of genre tags to control the style of the generated audio.', + }), + minimum_guidance_scale: z + .optional( + z.number().gte(0).lte(200).register(z.globalRegistry, { + description: + 'Minimum guidance scale for the generation after the decay.', + }), + ) + .default(3), + lyrics: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Lyrics to be sung in the audio. If not provided or if [inst] or [instrumental] is the content of this field, no lyrics will be sung. Use control structures like [verse], [chorus] and [bridge] to control the structure of the song.', + }), + ) + .default(''), + tag_guidance_scale: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'Tag guidance scale for the generation.', + }), + ) + .default(5), + scheduler: z.optional( + z.enum(['euler', 'heun']).register(z.globalRegistry, { + description: 'Scheduler to use for the generation process.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(200).register(z.globalRegistry, { + description: 'Guidance scale for the generation.', + }), + ) + .default(15), + guidance_type: z.optional( + z.enum(['cfg', 'apg', 'cfg_star']).register(z.globalRegistry, { + description: 'Type of CFG to use for the generation process.', + }), + ), + lyric_guidance_scale: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'Lyric guidance scale for the generation.', + }), + ) + .default(1.5), + guidance_interval: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Guidance interval for the generation. 0.5 means only apply guidance in the middle steps (0.25 * infer_steps to 0.75 * infer_steps)', + }), + ) + .default(0.5), + guidance_interval_decay: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Guidance interval decay for the generation. Guidance scale will decay from guidance_scale to min_guidance_scale in the interval. 0.0 means no decay.', + }), + ) + .default(0), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If not provided, a random seed will be used.', + }), + ), + granularity_scale: z + .optional( + z.int().gte(-100).lte(100).register(z.globalRegistry, { + description: + 'Granularity scale for the generation process. Higher values can reduce artifacts.', + }), + ) + .default(10), +}) + +/** + * ACEStepResponse + */ +export const zSchemaAceStepPromptToAudioOutput = z.object({ + tags: z.string().register(z.globalRegistry, { + description: 'The genre tags used in the generation process.', + }), + lyrics: z.string().register(z.globalRegistry, { + description: 'The lyrics used in the generation process.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The random seed used for the generation process.', + }), + audio: zSchemaFile, +}) + +/** + * ACEStepPromptToAudioRequest + */ +export const zSchemaAceStepPromptToAudioInput = z.object({ + number_of_steps: z + .optional( + z.int().gte(3).lte(60).register(z.globalRegistry, { + description: 'Number of steps to generate the audio.', + }), + ) + .default(27), + duration: z + .optional( + z.number().gte(5).lte(240).register(z.globalRegistry, { + description: 'The duration of the generated audio in seconds.', + }), + ) + .default(60), + prompt: z.string().register(z.globalRegistry, { + description: + 'Prompt to control the style of the generated audio. This will be used to generate tags and lyrics.', + }), + minimum_guidance_scale: z + .optional( + z.number().gte(0).lte(200).register(z.globalRegistry, { + description: + 'Minimum guidance scale for the generation after the decay.', + }), + ) + .default(3), + tag_guidance_scale: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'Tag guidance scale for the generation.', + }), + ) + .default(5), + scheduler: z.optional( + z.enum(['euler', 'heun']).register(z.globalRegistry, { + description: 'Scheduler to use for the generation process.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(200).register(z.globalRegistry, { + description: 'Guidance scale for the generation.', + }), + ) + .default(15), + guidance_type: z.optional( + z.enum(['cfg', 'apg', 'cfg_star']).register(z.globalRegistry, { + description: 'Type of CFG to use for the generation process.', + }), + ), + instrumental: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to generate an instrumental version of the audio.', + }), + ) + .default(false), + lyric_guidance_scale: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: 'Lyric guidance scale for the generation.', + }), + ) + .default(1.5), + guidance_interval: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Guidance interval for the generation. 0.5 means only apply guidance in the middle steps (0.25 * infer_steps to 0.75 * infer_steps)', + }), + ) + .default(0.5), + guidance_interval_decay: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Guidance interval decay for the generation. Guidance scale will decay from guidance_scale to min_guidance_scale in the interval. 0.0 means no decay.', + }), + ) + .default(0), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If not provided, a random seed will be used.', + }), + ), + granularity_scale: z + .optional( + z.int().gte(-100).lte(100).register(z.globalRegistry, { + description: + 'Granularity scale for the generation process. Higher values can reduce artifacts.', + }), + ) + .default(10), +}) + +/** + * TextToMusicOutput + */ +export const zSchemaLyria2Output = z.object({ + audio: zSchemaFile, +}) + +/** + * TextToMusicInput + */ +export const zSchemaLyria2Input = z.object({ + prompt: z.string().min(1).max(2000).register(z.globalRegistry, { + description: 'The text prompt describing the music you want to generate', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'A seed for deterministic generation. If provided, the model will attempt to produce the same audio given the same prompt and other parameters.', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'A description of what to exclude from the generated audio', + }), + ) + .default('low quality'), +}) + +/** + * TTSOutput + */ +export const zSchemaElevenlabsTtsElevenV3Output = z.object({ + audio: zSchemaFile, + timestamps: z.optional(z.union([z.array(z.unknown()), z.unknown()])), +}) + +/** + * TextToSpeechRequestV3 + * + * Request model for eleven_v3 which doesn't support previous_text/next_text + */ +export const zSchemaElevenlabsTtsElevenV3Input = z + .object({ + stability: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Voice stability (0-1)', + }), + ) + .default(0.5), + speed: z + .optional( + z.number().gte(0.7).lte(1.2).register(z.globalRegistry, { + description: + 'Speech speed (0.7-1.2). Values below 1.0 slow down the speech, above 1.0 speed it up. Extreme values may affect quality.', + }), + ) + .default(1), + text: z.string().min(1).max(5000).register(z.globalRegistry, { + description: 'The text to convert to speech', + }), + style: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Style exaggeration (0-1)', + }), + ) + .default(0), + timestamps: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to return timestamps for each word in the generated speech', + }), + ) + .default(false), + similarity_boost: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Similarity boost (0-1)', + }), + ) + .default(0.75), + voice: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The voice to use for speech generation', + }), + ) + .default('Rachel'), + language_code: z.optional(z.union([z.string(), z.unknown()])), + }) + .register(z.globalRegistry, { + description: + "Request model for eleven_v3 which doesn't support previous_text/next_text", + }) + +/** + * GenerateOutput + */ +export const zSchemaV2TextToMusicOutput = z.object({ + tags: z.optional(z.union([z.array(z.string()), z.unknown()])), + seed: z.int().register(z.globalRegistry, { + description: + 'The seed used for generation. This can be used to generate an identical song by passing the same parameters with this seed in a future request.', + }), + lyrics: z.optional(z.union([z.string(), z.unknown()])), + audio: z.array(zSchemaFile).register(z.globalRegistry, { + description: 'The generated audio files.', + }), +}) + +/** + * GenerateInput + */ +export const zSchemaV2TextToMusicInput = z.object({ + prompt: z.optional(z.union([z.string(), z.unknown()])), + lyrics_prompt: z.optional(z.union([z.string(), z.unknown()])), + tags: z.optional(z.union([z.array(z.string()), z.unknown()])), + prompt_strength: z + .optional( + z.number().gte(1.4).lte(3.1).register(z.globalRegistry, { + description: + 'Controls how strongly your prompt influences the output. Greater values adhere more to the prompt but sound less natural. (This is CFG.)', + }), + ) + .default(2), + output_bit_rate: z.optional( + z.union([ + z.union([z.literal(128), z.literal(192), z.literal(256), z.literal(320)]), + z.unknown(), + ]), + ), + num_songs: z + .optional( + z.int().gte(1).lte(2).register(z.globalRegistry, { + description: + 'Generating 2 songs costs 1.5x the price of generating 1 song. Also, note that using the same seed may not result in identical songs if the number of songs generated is changed.', + }), + ) + .default(1), + output_format: z.optional(z.enum(['flac', 'mp3', 'wav', 'ogg', 'm4a'])), + bpm: z.optional(z.union([z.int(), z.string(), z.unknown()])), + balance_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Greater means more natural vocals. Lower means sharper instrumentals. We recommend 0.7.', + }), + ) + .default(0.7), + seed: z.optional( + z.union([ + z.int().gte(-9223372036854776000).lte(9223372036854776000), + z.unknown(), + ]), + ), +}) + +/** + * InpaintSection + */ +export const zSchemaInpaintSection = z.object({ + end: z.number().register(z.globalRegistry, { + description: 'End time in seconds of the section to inpaint.', + }), + start: z.number().gte(0).register(z.globalRegistry, { + description: 'Start time in seconds of the section to inpaint.', + }), +}) + +/** + * InpaintOutput + */ +export const zSchemaV2InpaintOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: + 'The seed used for generation. This can be used to generate an identical song by passing the same parameters with this seed in a future request.', + }), + audio: z.array(zSchemaFile).register(z.globalRegistry, { + description: 'The generated audio files.', + }), +}) + +/** + * InpaintInput + */ +export const zSchemaV2InpaintInput = z.object({ + lyrics_prompt: z.string().register(z.globalRegistry, { + description: + 'The lyrics sung in the generated song. An empty string will generate an instrumental track.', + }), + tags: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'Tags/styles of the music to generate. You can view a list of all available tags at https://sonauto.ai/tag-explorer.', + }), + ), + prompt_strength: z + .optional( + z.number().gte(1.4).lte(3.1).register(z.globalRegistry, { + description: + 'Controls how strongly your prompt influences the output. Greater values adhere more to the prompt but sound less natural. (This is CFG.)', + }), + ) + .default(2), + output_bit_rate: z.optional( + z.union([ + z.union([z.literal(128), z.literal(192), z.literal(256), z.literal(320)]), + z.unknown(), + ]), + ), + num_songs: z + .optional( + z.int().gte(1).lte(2).register(z.globalRegistry, { + description: + 'Generating 2 songs costs 1.5x the price of generating 1 song. Also, note that using the same seed may not result in identical songs if the number of songs generated is changed.', + }), + ) + .default(1), + output_format: z.optional(z.enum(['flac', 'mp3', 'wav', 'ogg', 'm4a'])), + selection_crop: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Crop to the selected region', + }), + ) + .default(false), + sections: z.array(zSchemaInpaintSection).register(z.globalRegistry, { + description: + 'List of sections to inpaint. Currently, only one section is supported so the list length must be 1.', + }), + balance_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Greater means more natural vocals. Lower means sharper instrumentals. We recommend 0.7.', + }), + ) + .default(0.7), + audio_url: z.url().min(1).max(2083).register(z.globalRegistry, { + description: + 'The URL of the audio file to alter. Must be a valid publicly accessible URL.', + }), + seed: z.optional( + z.union([ + z.int().gte(-9223372036854776000).lte(9223372036854776000), + z.unknown(), + ]), + ), +}) + +/** + * SoundEffectOutput + * + * Output format for generated sound effects + */ +export const zSchemaElevenlabsSoundEffectsV2Output = z + .object({ + audio: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output format for generated sound effects', + }) + +/** + * SoundEffectRequestV2 + */ +export const zSchemaElevenlabsSoundEffectsV2Input = z.object({ + text: z.string().register(z.globalRegistry, { + description: 'The text describing the sound effect to generate', + }), + loop: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to create a sound effect that loops smoothly.', + }), + ) + .default(false), + prompt_influence: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'How closely to follow the prompt (0-1). Higher values mean less variation.', + }), + ) + .default(0.3), + output_format: z.optional( + z + .enum([ + 'mp3_22050_32', + 'mp3_44100_32', + 'mp3_44100_64', + 'mp3_44100_96', + 'mp3_44100_128', + 'mp3_44100_192', + 'pcm_8000', + 'pcm_16000', + 'pcm_22050', + 'pcm_24000', + 'pcm_44100', + 'pcm_48000', + 'ulaw_8000', + 'alaw_8000', + 'opus_48000_32', + 'opus_48000_64', + 'opus_48000_96', + 'opus_48000_128', + 'opus_48000_192', + ]) + .register(z.globalRegistry, { + description: + 'Output format of the generated audio. Formatted as codec_sample_rate_bitrate.', + }), + ), + duration_seconds: z.optional( + z.union([z.number().gte(0.5).lte(22), z.unknown()]), + ), +}) + +/** + * PronunciationDictionaryLocator + */ +export const zSchemaPronunciationDictionaryLocator = z.object({ + version_id: z.optional(z.union([z.string(), z.unknown()])), + pronunciation_dictionary_id: z.union([z.string(), z.unknown()]), +}) + +/** + * DialogueBlock + */ +export const zSchemaDialogueBlock = z.object({ + text: z.string().register(z.globalRegistry, { + description: 'The dialogue text', + }), + voice: z.string().register(z.globalRegistry, { + description: + 'The name or the ID of the voice to be used for the generation.', + }), +}) + +/** + * TextToDialogueOutput + */ +export const zSchemaElevenlabsTextToDialogueElevenV3Output = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'Random seed for reproducibility.', + }), + audio: zSchemaFile, +}) + +/** + * TextToDialogueRequest + */ +export const zSchemaElevenlabsTextToDialogueElevenV3Input = z.object({ + stability: z.optional(z.union([z.number().gte(0).lte(1), z.unknown()])), + inputs: z.array(zSchemaDialogueBlock).register(z.globalRegistry, { + description: + 'A list of dialogue inputs, each containing text and a voice ID which will be converted into speech.', + }), + language_code: z.optional(z.union([z.string(), z.unknown()])), + seed: z.optional(z.union([z.int(), z.unknown()])), + use_speaker_boost: z.optional(z.union([z.boolean(), z.unknown()])), + pronunciation_dictionary_locators: z + .optional( + z + .array(zSchemaPronunciationDictionaryLocator) + .register(z.globalRegistry, { + description: + 'A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request', + }), + ) + .default([]), +}) + +/** + * TextToAudioOutput + */ +export const zSchemaStableAudio25TextToAudioOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The random seed used for generation', + }), + audio: zSchemaFile, +}) + +/** + * TextToAudioInput + */ +export const zSchemaStableAudio25TextToAudioInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate audio from', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + seconds_total: z + .optional( + z.int().gte(1).lte(190).register(z.globalRegistry, { + description: 'The duration of the audio clip to generate', + }), + ) + .default(190), + num_inference_steps: z + .optional( + z.int().gte(4).lte(8).register(z.globalRegistry, { + description: 'The number of steps to denoise the audio for', + }), + ) + .default(8), + guidance_scale: z + .optional( + z.int().gte(1).lte(25).register(z.globalRegistry, { + description: + 'How strictly the diffusion process adheres to the prompt text (higher values make your audio closer to your prompt).', + }), + ) + .default(1), + seed: z.optional(z.int()), +}) + +/** + * MusicV15Output + */ +export const zSchemaMinimaxMusicV15Output = z.object({ + audio: zSchemaFile, +}) + +/** + * AudioSetting + */ +export const zSchemaAudioSetting = z.object({ + format: z.optional( + z.enum(['mp3', 'pcm', 'flac']).register(z.globalRegistry, { + description: 'Audio format', + }), + ), + sample_rate: z.optional( + z + .union([ + z.literal(8000), + z.literal(16000), + z.literal(22050), + z.literal(24000), + z.literal(32000), + z.literal(44100), + ]) + .register(z.globalRegistry, { + description: 'Sample rate of generated audio', + }), + ), + bitrate: z.optional( + z + .union([ + z.literal(32000), + z.literal(64000), + z.literal(128000), + z.literal(256000), + ]) + .register(z.globalRegistry, { + description: 'Bitrate of generated audio', + }), + ), +}) + +/** + * TextToMusic15Request + */ +export const zSchemaMinimaxMusicV15Input = z.object({ + prompt: z.string().min(10).max(600).register(z.globalRegistry, { + description: + 'Lyrics, supports [intro][verse][chorus][bridge][outro] sections. 10-600 characters.', + }), + lyrics_prompt: z.string().min(10).max(3000).register(z.globalRegistry, { + description: 'Control music generation. 10-3000 characters.', + }), + audio_setting: z.optional(zSchemaAudioSetting), +}) + +/** + * MusicGenerationOutput + * + * Output schema for music generation. + */ +export const zSchemaMusicGenerationOutput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The processed prompt used for generation', + }), + metadata: z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: + 'Generation metadata including duration, sample rate, and parameters', + }), + audio: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output schema for music generation.', + }) + +/** + * MusicGenerationInput + * + * Input schema for music generation with form controls for the playground. + */ +export const zSchemaMusicGenerationInput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Describe the music you want to generate', + }), + duration: z + .optional( + z.number().gte(5).lte(150).register(z.globalRegistry, { + description: 'Length of the generated music in seconds', + }), + ) + .default(90), + refinement: z + .optional( + z.int().gte(10).lte(200).register(z.globalRegistry, { + description: + 'Refinement level - higher values may improve quality but take longer', + }), + ) + .default(100), + seed: z.optional(z.union([z.int().gte(0).lte(2147483647), z.unknown()])), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Describe what you want to avoid in the music (instruments, styles, moods). Leave blank for none.', + }), + ) + .default(''), + creativity: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + 'Creativity level - higher values allow more creative interpretation of the prompt', + }), + ) + .default(16), + }) + .register(z.globalRegistry, { + description: + 'Input schema for music generation with form controls for the playground.', + }) + +/** + * SoundEffectGenerationOutput + * + * Output schema for sound effect generation. + */ +export const zSchemaSoundEffectGenerationOutput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The processed prompt used for generation', + }), + metadata: z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: + 'Generation metadata including duration, sample rate, and parameters', + }), + audio: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output schema for sound effect generation.', + }) + +/** + * SoundEffectGenerationInput + * + * Input schema for sound effect generation with form controls for the playground. + */ +export const zSchemaSoundEffectGenerationInput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Describe the sound effect you want to generate', + }), + duration: z + .optional( + z.number().gte(1).lte(35).register(z.globalRegistry, { + description: 'Length of the generated sound effect in seconds', + }), + ) + .default(5), + refinement: z + .optional( + z.int().gte(10).lte(200).register(z.globalRegistry, { + description: + 'Refinement level - Higher values may improve quality but take longer', + }), + ) + .default(40), + seed: z.optional(z.union([z.int().gte(0).lte(2147483647), z.unknown()])), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "Describe the types of sounds you don't want to generate in the output, avoid double-negatives, compare with positive prompts", + }), + ) + .default(''), + creativity: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + 'Creativity level - higher values allow more creative interpretation of the prompt', + }), + ) + .default(16), + }) + .register(z.globalRegistry, { + description: + 'Input schema for sound effect generation with form controls for the playground.', + }) + +/** + * MusicV15Output + */ +export const zSchemaMinimaxMusicV2Output = z.object({ + audio: zSchemaFile, +}) + +/** + * TextToMusic20Request + */ +export const zSchemaMinimaxMusicV2Input = z.object({ + prompt: z.string().min(10).max(2000).register(z.globalRegistry, { + description: + 'A description of the music, specifying style, mood, and scenario. 10-300 characters.', + }), + lyrics_prompt: z.string().min(10).max(3000).register(z.globalRegistry, { + description: + 'Lyrics of the song. Use n to separate lines. You may add structure tags like [Intro], [Verse], [Chorus], [Bridge], [Outro] to enhance the arrangement. 10-3000 characters.', + }), + audio_setting: z.optional(zSchemaAudioSetting), +}) + +/** + * MusicSection + */ +export const zSchemaMusicSection = z.object({ + positive_local_styles: z.array(z.string()).register(z.globalRegistry, { + description: 'The styles that should be present in this section.', + }), + lines: z.array(z.string()).register(z.globalRegistry, { + description: + 'The lyrics of the section. Each line must be at most 200 characters long.', + }), + negative_local_styles: z.array(z.string()).register(z.globalRegistry, { + description: 'The styles that should not be present in this section.', + }), + duration_ms: z.int().gte(3000).lte(120000).register(z.globalRegistry, { + description: + 'The duration of the section in milliseconds. Must be between 3000ms and 120000ms.', + }), + section_name: z.string().min(1).max(100).register(z.globalRegistry, { + description: + 'The name of the section. Must be between 1 and 100 characters.', + }), +}) + +/** + * MusicCompositionPlan + */ +export const zSchemaMusicCompositionPlan = z.object({ + negative_global_styles: z.array(z.string()).register(z.globalRegistry, { + description: 'The styles that should not be present in the entire song.', + }), + sections: z.array(zSchemaMusicSection).register(z.globalRegistry, { + description: 'The sections of the song.', + }), + positive_global_styles: z.array(z.string()).register(z.globalRegistry, { + description: 'The styles that should be present in the entire song.', + }), +}) + +/** + * MusicOutput + */ +export const zSchemaElevenlabsMusicOutput = z.object({ + audio: zSchemaFile, +}) + +/** + * MusicRequest + * + * Request format for Elevenlabs Music API + */ +export const zSchemaElevenlabsMusicInput = z + .object({ + prompt: z.optional(z.union([z.string(), z.unknown()])), + composition_plan: z.optional( + z.union([zSchemaMusicCompositionPlan, z.unknown()]), + ), + music_length_ms: z.optional( + z.union([z.int().gte(3000).lte(600000), z.unknown()]), + ), + output_format: z.optional( + z + .enum([ + 'mp3_22050_32', + 'mp3_44100_32', + 'mp3_44100_64', + 'mp3_44100_96', + 'mp3_44100_128', + 'mp3_44100_192', + 'pcm_8000', + 'pcm_16000', + 'pcm_22050', + 'pcm_24000', + 'pcm_44100', + 'pcm_48000', + 'ulaw_8000', + 'alaw_8000', + 'opus_48000_32', + 'opus_48000_64', + 'opus_48000_96', + 'opus_48000_128', + 'opus_48000_192', + ]) + .register(z.globalRegistry, { + description: + 'Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.', + }), + ), + respect_sections_durations: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "Controls how strictly section durations in the composition_plan are enforced. It will only have an effect if it is used with composition_plan. When set to true, the model will precisely respect each section's duration_ms from the plan. When set to false, the model may adjust individual section durations which will generally lead to better generation quality and improved latency, while always preserving the total song duration from the plan.", + }), + ) + .default(true), + force_instrumental: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, guarantees that the generated song will be instrumental. If false, the song may or may not be instrumental depending on the prompt. Can only be used with prompt.', + }), + ) + .default(false), + }) + .register(z.globalRegistry, { + description: 'Request format for Elevenlabs Music API', + }) + +export const zSchemaQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetFalAiElevenlabsMusicRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiElevenlabsMusicRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiElevenlabsMusicRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiElevenlabsMusicRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiElevenlabsMusicData = z.object({ + body: zSchemaElevenlabsMusicInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiElevenlabsMusicResponse = zSchemaQueueStatus + +export const zGetFalAiElevenlabsMusicRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiElevenlabsMusicRequestsByRequestIdResponse = + zSchemaElevenlabsMusicOutput + +export const zGetFalAiMinimaxMusicV2RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiMinimaxMusicV2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxMusicV2RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxMusicV2RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxMusicV2Data = z.object({ + body: zSchemaMinimaxMusicV2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxMusicV2Response = zSchemaQueueStatus + +export const zGetFalAiMinimaxMusicV2RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxMusicV2RequestsByRequestIdResponse = + zSchemaMinimaxMusicV2Output + +export const zGetBeatovenSoundEffectGenerationRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetBeatovenSoundEffectGenerationRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBeatovenSoundEffectGenerationRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutBeatovenSoundEffectGenerationRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBeatovenSoundEffectGenerationData = z.object({ + body: zSchemaSoundEffectGenerationInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBeatovenSoundEffectGenerationResponse = zSchemaQueueStatus + +export const zGetBeatovenSoundEffectGenerationRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetBeatovenSoundEffectGenerationRequestsByRequestIdResponse = + zSchemaSoundEffectGenerationOutput + +export const zGetBeatovenMusicGenerationRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetBeatovenMusicGenerationRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBeatovenMusicGenerationRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutBeatovenMusicGenerationRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBeatovenMusicGenerationData = z.object({ + body: zSchemaMusicGenerationInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBeatovenMusicGenerationResponse = zSchemaQueueStatus + +export const zGetBeatovenMusicGenerationRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetBeatovenMusicGenerationRequestsByRequestIdResponse = + zSchemaMusicGenerationOutput + +export const zGetFalAiMinimaxMusicV15RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiMinimaxMusicV15RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxMusicV15RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxMusicV15RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxMusicV15Data = z.object({ + body: zSchemaMinimaxMusicV15Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxMusicV15Response = zSchemaQueueStatus + +export const zGetFalAiMinimaxMusicV15RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxMusicV15RequestsByRequestIdResponse = + zSchemaMinimaxMusicV15Output + +export const zGetFalAiStableAudio25TextToAudioRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiStableAudio25TextToAudioRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiStableAudio25TextToAudioRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiStableAudio25TextToAudioRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiStableAudio25TextToAudioData = z.object({ + body: zSchemaStableAudio25TextToAudioInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiStableAudio25TextToAudioResponse = zSchemaQueueStatus + +export const zGetFalAiStableAudio25TextToAudioRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiStableAudio25TextToAudioRequestsByRequestIdResponse = + zSchemaStableAudio25TextToAudioOutput + +export const zGetFalAiElevenlabsTextToDialogueElevenV3RequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiElevenlabsTextToDialogueElevenV3RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiElevenlabsTextToDialogueElevenV3RequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiElevenlabsTextToDialogueElevenV3RequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiElevenlabsTextToDialogueElevenV3Data = z.object({ + body: zSchemaElevenlabsTextToDialogueElevenV3Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiElevenlabsTextToDialogueElevenV3Response = + zSchemaQueueStatus + +export const zGetFalAiElevenlabsTextToDialogueElevenV3RequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiElevenlabsTextToDialogueElevenV3RequestsByRequestIdResponse = + zSchemaElevenlabsTextToDialogueElevenV3Output + +export const zGetFalAiElevenlabsSoundEffectsV2RequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiElevenlabsSoundEffectsV2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiElevenlabsSoundEffectsV2RequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiElevenlabsSoundEffectsV2RequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiElevenlabsSoundEffectsV2Data = z.object({ + body: zSchemaElevenlabsSoundEffectsV2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiElevenlabsSoundEffectsV2Response = zSchemaQueueStatus + +export const zGetFalAiElevenlabsSoundEffectsV2RequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiElevenlabsSoundEffectsV2RequestsByRequestIdResponse = + zSchemaElevenlabsSoundEffectsV2Output + +export const zGetSonautoV2InpaintRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetSonautoV2InpaintRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutSonautoV2InpaintRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutSonautoV2InpaintRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostSonautoV2InpaintData = z.object({ + body: zSchemaV2InpaintInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostSonautoV2InpaintResponse = zSchemaQueueStatus + +export const zGetSonautoV2InpaintRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetSonautoV2InpaintRequestsByRequestIdResponse = + zSchemaV2InpaintOutput + +export const zGetSonautoV2TextToMusicRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetSonautoV2TextToMusicRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutSonautoV2TextToMusicRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutSonautoV2TextToMusicRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostSonautoV2TextToMusicData = z.object({ + body: zSchemaV2TextToMusicInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostSonautoV2TextToMusicResponse = zSchemaQueueStatus + +export const zGetSonautoV2TextToMusicRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetSonautoV2TextToMusicRequestsByRequestIdResponse = + zSchemaV2TextToMusicOutput + +export const zGetFalAiElevenlabsTtsElevenV3RequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiElevenlabsTtsElevenV3RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiElevenlabsTtsElevenV3RequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiElevenlabsTtsElevenV3RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiElevenlabsTtsElevenV3Data = z.object({ + body: zSchemaElevenlabsTtsElevenV3Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiElevenlabsTtsElevenV3Response = zSchemaQueueStatus + +export const zGetFalAiElevenlabsTtsElevenV3RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiElevenlabsTtsElevenV3RequestsByRequestIdResponse = + zSchemaElevenlabsTtsElevenV3Output + +export const zGetFalAiLyria2RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLyria2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLyria2RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLyria2RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLyria2Data = z.object({ + body: zSchemaLyria2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLyria2Response = zSchemaQueueStatus + +export const zGetFalAiLyria2RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLyria2RequestsByRequestIdResponse = zSchemaLyria2Output + +export const zGetFalAiAceStepPromptToAudioRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiAceStepPromptToAudioRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiAceStepPromptToAudioRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiAceStepPromptToAudioRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiAceStepPromptToAudioData = z.object({ + body: zSchemaAceStepPromptToAudioInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiAceStepPromptToAudioResponse = zSchemaQueueStatus + +export const zGetFalAiAceStepPromptToAudioRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiAceStepPromptToAudioRequestsByRequestIdResponse = + zSchemaAceStepPromptToAudioOutput + +export const zGetFalAiAceStepRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiAceStepRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiAceStepRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiAceStepRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiAceStepData = z.object({ + body: zSchemaAceStepInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiAceStepResponse = zSchemaQueueStatus + +export const zGetFalAiAceStepRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiAceStepRequestsByRequestIdResponse = zSchemaAceStepOutput + +export const zGetCassetteaiSoundEffectsGeneratorRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetCassetteaiSoundEffectsGeneratorRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutCassetteaiSoundEffectsGeneratorRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutCassetteaiSoundEffectsGeneratorRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostCassetteaiSoundEffectsGeneratorData = z.object({ + body: zSchemaSoundEffectsGeneratorInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostCassetteaiSoundEffectsGeneratorResponse = zSchemaQueueStatus + +export const zGetCassetteaiSoundEffectsGeneratorRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetCassetteaiSoundEffectsGeneratorRequestsByRequestIdResponse = + zSchemaSoundEffectsGeneratorOutput + +export const zGetCassetteaiMusicGeneratorRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetCassetteaiMusicGeneratorRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutCassetteaiMusicGeneratorRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutCassetteaiMusicGeneratorRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostCassetteaiMusicGeneratorData = z.object({ + body: zSchemaMusicGeneratorInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostCassetteaiMusicGeneratorResponse = zSchemaQueueStatus + +export const zGetCassetteaiMusicGeneratorRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetCassetteaiMusicGeneratorRequestsByRequestIdResponse = + zSchemaMusicGeneratorOutput + +export const zGetFalAiCsm1bRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiCsm1bRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiCsm1bRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiCsm1bRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiCsm1bData = z.object({ + body: zSchemaCsm1bInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiCsm1bResponse = zSchemaQueueStatus + +export const zGetFalAiCsm1bRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiCsm1bRequestsByRequestIdResponse = zSchemaCsm1bOutput + +export const zGetFalAiDiffrhythmRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiDiffrhythmRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiDiffrhythmRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiDiffrhythmRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiDiffrhythmData = z.object({ + body: zSchemaDiffrhythmInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiDiffrhythmResponse = zSchemaQueueStatus + +export const zGetFalAiDiffrhythmRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiDiffrhythmRequestsByRequestIdResponse = + zSchemaDiffrhythmOutput + +export const zGetFalAiElevenlabsTtsMultilingualV2RequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiElevenlabsTtsMultilingualV2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiElevenlabsTtsMultilingualV2RequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiElevenlabsTtsMultilingualV2RequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiElevenlabsTtsMultilingualV2Data = z.object({ + body: zSchemaElevenlabsTtsMultilingualV2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiElevenlabsTtsMultilingualV2Response = zSchemaQueueStatus + +export const zGetFalAiElevenlabsTtsMultilingualV2RequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiElevenlabsTtsMultilingualV2RequestsByRequestIdResponse = + zSchemaElevenlabsTtsMultilingualV2Output + +export const zGetFalAiKokoroHindiRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiKokoroHindiRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKokoroHindiRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiKokoroHindiRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKokoroHindiData = z.object({ + body: zSchemaKokoroHindiInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKokoroHindiResponse = zSchemaQueueStatus + +export const zGetFalAiKokoroHindiRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiKokoroHindiRequestsByRequestIdResponse = + zSchemaKokoroHindiOutput + +export const zGetFalAiKokoroMandarinChineseRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKokoroMandarinChineseRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKokoroMandarinChineseRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKokoroMandarinChineseRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKokoroMandarinChineseData = z.object({ + body: zSchemaKokoroMandarinChineseInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKokoroMandarinChineseResponse = zSchemaQueueStatus + +export const zGetFalAiKokoroMandarinChineseRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiKokoroMandarinChineseRequestsByRequestIdResponse = + zSchemaKokoroMandarinChineseOutput + +export const zGetFalAiKokoroSpanishRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiKokoroSpanishRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKokoroSpanishRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiKokoroSpanishRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKokoroSpanishData = z.object({ + body: zSchemaKokoroSpanishInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKokoroSpanishResponse = zSchemaQueueStatus + +export const zGetFalAiKokoroSpanishRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiKokoroSpanishRequestsByRequestIdResponse = + zSchemaKokoroSpanishOutput + +export const zGetFalAiKokoroBrazilianPortugueseRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKokoroBrazilianPortugueseRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKokoroBrazilianPortugueseRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKokoroBrazilianPortugueseRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKokoroBrazilianPortugueseData = z.object({ + body: zSchemaKokoroBrazilianPortugueseInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKokoroBrazilianPortugueseResponse = zSchemaQueueStatus + +export const zGetFalAiKokoroBrazilianPortugueseRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKokoroBrazilianPortugueseRequestsByRequestIdResponse = + zSchemaKokoroBrazilianPortugueseOutput + +export const zGetFalAiKokoroBritishEnglishRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKokoroBritishEnglishRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKokoroBritishEnglishRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKokoroBritishEnglishRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKokoroBritishEnglishData = z.object({ + body: zSchemaKokoroBritishEnglishInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKokoroBritishEnglishResponse = zSchemaQueueStatus + +export const zGetFalAiKokoroBritishEnglishRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiKokoroBritishEnglishRequestsByRequestIdResponse = + zSchemaKokoroBritishEnglishOutput + +export const zGetFalAiKokoroFrenchRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiKokoroFrenchRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKokoroFrenchRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiKokoroFrenchRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKokoroFrenchData = z.object({ + body: zSchemaKokoroFrenchInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKokoroFrenchResponse = zSchemaQueueStatus + +export const zGetFalAiKokoroFrenchRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiKokoroFrenchRequestsByRequestIdResponse = + zSchemaKokoroFrenchOutput + +export const zGetFalAiKokoroJapaneseRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiKokoroJapaneseRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKokoroJapaneseRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiKokoroJapaneseRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKokoroJapaneseData = z.object({ + body: zSchemaKokoroJapaneseInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKokoroJapaneseResponse = zSchemaQueueStatus + +export const zGetFalAiKokoroJapaneseRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiKokoroJapaneseRequestsByRequestIdResponse = + zSchemaKokoroJapaneseOutput + +export const zGetFalAiKokoroAmericanEnglishRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKokoroAmericanEnglishRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKokoroAmericanEnglishRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKokoroAmericanEnglishRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKokoroAmericanEnglishData = z.object({ + body: zSchemaKokoroAmericanEnglishInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKokoroAmericanEnglishResponse = zSchemaQueueStatus + +export const zGetFalAiKokoroAmericanEnglishRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiKokoroAmericanEnglishRequestsByRequestIdResponse = + zSchemaKokoroAmericanEnglishOutput + +export const zGetFalAiZonosRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiZonosRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiZonosRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiZonosRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiZonosData = z.object({ + body: zSchemaZonosInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiZonosResponse = zSchemaQueueStatus + +export const zGetFalAiZonosRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiZonosRequestsByRequestIdResponse = zSchemaZonosOutput + +export const zGetFalAiKokoroItalianRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiKokoroItalianRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKokoroItalianRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiKokoroItalianRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKokoroItalianData = z.object({ + body: zSchemaKokoroItalianInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKokoroItalianResponse = zSchemaQueueStatus + +export const zGetFalAiKokoroItalianRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiKokoroItalianRequestsByRequestIdResponse = + zSchemaKokoroItalianOutput + +export const zGetFalAiYueRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiYueRequestsByRequestIdStatusResponse = zSchemaQueueStatus + +export const zPutFalAiYueRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiYueRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiYueData = z.object({ + body: zSchemaYueInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiYueResponse = zSchemaQueueStatus + +export const zGetFalAiYueRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiYueRequestsByRequestIdResponse = zSchemaYueOutput + +export const zGetFalAiMmaudioV2TextToAudioRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMmaudioV2TextToAudioRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMmaudioV2TextToAudioRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMmaudioV2TextToAudioRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMmaudioV2TextToAudioData = z.object({ + body: zSchemaMmaudioV2TextToAudioInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMmaudioV2TextToAudioResponse = zSchemaQueueStatus + +export const zGetFalAiMmaudioV2TextToAudioRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMmaudioV2TextToAudioRequestsByRequestIdResponse = + zSchemaMmaudioV2TextToAudioOutput + +export const zGetFalAiMinimaxMusicRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiMinimaxMusicRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxMusicRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxMusicRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxMusicData = z.object({ + body: zSchemaMinimaxMusicInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxMusicResponse = zSchemaQueueStatus + +export const zGetFalAiMinimaxMusicRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxMusicRequestsByRequestIdResponse = + zSchemaMinimaxMusicOutput + +export const zGetFalAiF5TtsRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiF5TtsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiF5TtsRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiF5TtsRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiF5TtsData = z.object({ + body: zSchemaF5TtsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiF5TtsResponse = zSchemaQueueStatus + +export const zGetFalAiF5TtsRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiF5TtsRequestsByRequestIdResponse = zSchemaF5TtsOutput + +export const zGetFalAiStableAudioRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiStableAudioRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiStableAudioRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiStableAudioRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiStableAudioData = z.object({ + body: zSchemaStableAudioInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiStableAudioResponse = zSchemaQueueStatus + +export const zGetFalAiStableAudioRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiStableAudioRequestsByRequestIdResponse = + zSchemaStableAudioOutput diff --git a/packages/typescript/ai-fal/src/generated/text-to-image/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/text-to-image/endpoint-map.ts new file mode 100644 index 00000000..c0686ce6 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/text-to-image/endpoint-map.ts @@ -0,0 +1,1842 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zSchemaAuraFlowInput, + zSchemaAuraFlowOutput, + zSchemaBagelInput, + zSchemaBagelOutput, + zSchemaBriaTextToImageBaseInput, + zSchemaBriaTextToImageBaseOutput, + zSchemaBriaTextToImageFastInput, + zSchemaBriaTextToImageFastOutput, + zSchemaBriaTextToImageHdInput, + zSchemaBriaTextToImageHdOutput, + zSchemaBytedanceDreaminaV31TextToImageInput, + zSchemaBytedanceDreaminaV31TextToImageOutput, + zSchemaBytedanceSeedreamV3TextToImageInput, + zSchemaBytedanceSeedreamV3TextToImageOutput, + zSchemaBytedanceSeedreamV45TextToImageInput, + zSchemaBytedanceSeedreamV45TextToImageOutput, + zSchemaBytedanceSeedreamV4TextToImageInput, + zSchemaBytedanceSeedreamV4TextToImageOutput, + zSchemaCogview4Input, + zSchemaCogview4Output, + zSchemaDiffusionEdgeInput, + zSchemaDiffusionEdgeOutput, + zSchemaDreamoInput, + zSchemaDreamoOutput, + zSchemaDreamshaperInput, + zSchemaDreamshaperOutput, + zSchemaEmu35ImageTextToImageInput, + zSchemaEmu35ImageTextToImageOutput, + zSchemaFLiteStandardInput, + zSchemaFLiteStandardOutput, + zSchemaFLiteTextureInput, + zSchemaFLiteTextureOutput, + zSchemaFastFooocusSdxlImageToImageInput, + zSchemaFastFooocusSdxlImageToImageOutput, + zSchemaFastFooocusSdxlInput, + zSchemaFastFooocusSdxlOutput, + zSchemaFastLcmDiffusionInput, + zSchemaFastLcmDiffusionOutput, + zSchemaFastLightningSdxlInput, + zSchemaFastLightningSdxlOutput, + zSchemaFastSdxlControlnetCannyInput, + zSchemaFastSdxlControlnetCannyOutput, + zSchemaFastSdxlInput, + zSchemaFastSdxlOutput, + zSchemaFiboGenerateInput, + zSchemaFiboGenerateOutput, + zSchemaFiboLiteGenerateInput, + zSchemaFiboLiteGenerateOutput, + zSchemaFlux1DevInput, + zSchemaFlux1DevOutput, + zSchemaFlux1KreaInput, + zSchemaFlux1KreaOutput, + zSchemaFlux1SchnellInput, + zSchemaFlux1SchnellOutput, + zSchemaFlux1SrpoInput, + zSchemaFlux1SrpoOutput, + zSchemaFlux2FlashInput, + zSchemaFlux2FlashOutput, + zSchemaFlux2FlexInput, + zSchemaFlux2FlexOutput, + zSchemaFlux2Input, + zSchemaFlux2Klein4bBaseInput, + zSchemaFlux2Klein4bBaseLoraInput, + zSchemaFlux2Klein4bBaseLoraOutput, + zSchemaFlux2Klein4bBaseOutput, + zSchemaFlux2Klein4bInput, + zSchemaFlux2Klein4bOutput, + zSchemaFlux2Klein9bBaseInput, + zSchemaFlux2Klein9bBaseLoraInput, + zSchemaFlux2Klein9bBaseLoraOutput, + zSchemaFlux2Klein9bBaseOutput, + zSchemaFlux2Klein9bInput, + zSchemaFlux2Klein9bOutput, + zSchemaFlux2LoraGalleryBallpointPenSketchInput, + zSchemaFlux2LoraGalleryBallpointPenSketchOutput, + zSchemaFlux2LoraGalleryDigitalComicArtInput, + zSchemaFlux2LoraGalleryDigitalComicArtOutput, + zSchemaFlux2LoraGalleryHdrStyleInput, + zSchemaFlux2LoraGalleryHdrStyleOutput, + zSchemaFlux2LoraGalleryRealismInput, + zSchemaFlux2LoraGalleryRealismOutput, + zSchemaFlux2LoraGallerySatelliteViewStyleInput, + zSchemaFlux2LoraGallerySatelliteViewStyleOutput, + zSchemaFlux2LoraGallerySepiaVintageInput, + zSchemaFlux2LoraGallerySepiaVintageOutput, + zSchemaFlux2LoraInput, + zSchemaFlux2LoraOutput, + zSchemaFlux2MaxInput, + zSchemaFlux2MaxOutput, + zSchemaFlux2Output, + zSchemaFlux2ProInput, + zSchemaFlux2ProOutput, + zSchemaFlux2TurboInput, + zSchemaFlux2TurboOutput, + zSchemaFluxControlLoraCannyInput, + zSchemaFluxControlLoraCannyOutput, + zSchemaFluxControlLoraDepthInput, + zSchemaFluxControlLoraDepthOutput, + zSchemaFluxDevInput, + zSchemaFluxDevOutput, + zSchemaFluxGeneralInput, + zSchemaFluxGeneralOutput, + zSchemaFluxKontextLoraTextToImageInput, + zSchemaFluxKontextLoraTextToImageOutput, + zSchemaFluxKreaInput, + zSchemaFluxKreaLoraInput, + zSchemaFluxKreaLoraOutput, + zSchemaFluxKreaLoraStreamInput, + zSchemaFluxKreaLoraStreamOutput, + zSchemaFluxKreaOutput, + zSchemaFluxLoraInpaintingInput, + zSchemaFluxLoraInpaintingOutput, + zSchemaFluxLoraInput, + zSchemaFluxLoraOutput, + zSchemaFluxLoraStreamInput, + zSchemaFluxLoraStreamOutput, + zSchemaFluxProKontextMaxTextToImageInput, + zSchemaFluxProKontextMaxTextToImageOutput, + zSchemaFluxProKontextTextToImageInput, + zSchemaFluxProKontextTextToImageOutput, + zSchemaFluxProV11Input, + zSchemaFluxProV11Output, + zSchemaFluxProV11UltraFinetunedInput, + zSchemaFluxProV11UltraFinetunedOutput, + zSchemaFluxProV11UltraInput, + zSchemaFluxProV11UltraOutput, + zSchemaFluxSchnellInput, + zSchemaFluxSchnellOutput, + zSchemaFluxSrpoInput, + zSchemaFluxSrpoOutput, + zSchemaFluxSubjectInput, + zSchemaFluxSubjectOutput, + zSchemaFooocusImagePromptInput, + zSchemaFooocusImagePromptOutput, + zSchemaFooocusInpaintInput, + zSchemaFooocusInpaintOutput, + zSchemaFooocusInput, + zSchemaFooocusOutput, + zSchemaFooocusUpscaleOrVaryInput, + zSchemaFooocusUpscaleOrVaryOutput, + zSchemaGemini25FlashImageInput, + zSchemaGemini25FlashImageOutput, + zSchemaGemini3ProImagePreviewInput, + zSchemaGemini3ProImagePreviewOutput, + zSchemaGlmImageInput, + zSchemaGlmImageOutput, + zSchemaGptImage15Input, + zSchemaGptImage15Output, + zSchemaGptImage1MiniInput, + zSchemaGptImage1MiniOutput, + zSchemaGptImage1TextToImageInput, + zSchemaGptImage1TextToImageOutput, + zSchemaHidreamI1DevInput, + zSchemaHidreamI1DevOutput, + zSchemaHidreamI1FastInput, + zSchemaHidreamI1FastOutput, + zSchemaHidreamI1FullInput, + zSchemaHidreamI1FullOutput, + zSchemaHunyuanImageV21TextToImageInput, + zSchemaHunyuanImageV21TextToImageOutput, + zSchemaHunyuanImageV3TextToImageInput, + zSchemaHunyuanImageV3TextToImageOutput, + zSchemaIdeogramV2Input, + zSchemaIdeogramV2Output, + zSchemaIdeogramV2TurboInput, + zSchemaIdeogramV2TurboOutput, + zSchemaIdeogramV2aInput, + zSchemaIdeogramV2aOutput, + zSchemaIdeogramV2aTurboInput, + zSchemaIdeogramV2aTurboOutput, + zSchemaIdeogramV3Input, + zSchemaIdeogramV3Output, + zSchemaIllusionDiffusionInput, + zSchemaIllusionDiffusionOutput, + zSchemaImagen3FastInput, + zSchemaImagen3FastOutput, + zSchemaImagen3Input, + zSchemaImagen3Output, + zSchemaImagen4PreviewFastInput, + zSchemaImagen4PreviewFastOutput, + zSchemaImagen4PreviewInput, + zSchemaImagen4PreviewOutput, + zSchemaImagen4PreviewUltraInput, + zSchemaImagen4PreviewUltraOutput, + zSchemaImagineart15PreviewTextToImageInput, + zSchemaImagineart15PreviewTextToImageOutput, + zSchemaImagineart15ProPreviewTextToImageInput, + zSchemaImagineart15ProPreviewTextToImageOutput, + zSchemaJanusInput, + zSchemaJanusOutput, + zSchemaJuggernautFluxBaseInput, + zSchemaJuggernautFluxBaseOutput, + zSchemaJuggernautFluxLightningInput, + zSchemaJuggernautFluxLightningOutput, + zSchemaJuggernautFluxLoraInput, + zSchemaJuggernautFluxLoraOutput, + zSchemaJuggernautFluxProInput, + zSchemaJuggernautFluxProOutput, + zSchemaKolorsInput, + zSchemaKolorsOutput, + zSchemaLayerDiffusionInput, + zSchemaLayerDiffusionOutput, + zSchemaLcmInput, + zSchemaLcmOutput, + zSchemaLightningModelsInput, + zSchemaLightningModelsOutput, + zSchemaLongcatImageInput, + zSchemaLongcatImageOutput, + zSchemaLoraInput, + zSchemaLoraOutput, + zSchemaLumaPhotonFlashInput, + zSchemaLumaPhotonFlashOutput, + zSchemaLumaPhotonInput, + zSchemaLumaPhotonOutput, + zSchemaLuminaImageV2Input, + zSchemaLuminaImageV2Output, + zSchemaMinimaxImage01Input, + zSchemaMinimaxImage01Output, + zSchemaNanoBananaInput, + zSchemaNanoBananaOutput, + zSchemaNanoBananaProInput, + zSchemaNanoBananaProOutput, + zSchemaOmnigenV1Input, + zSchemaOmnigenV1Output, + zSchemaOmnigenV2Input, + zSchemaOmnigenV2Output, + zSchemaOvisImageInput, + zSchemaOvisImageOutput, + zSchemaPiflowInput, + zSchemaPiflowOutput, + zSchemaPixartSigmaInput, + zSchemaPixartSigmaOutput, + zSchemaPlaygroundV25Input, + zSchemaPlaygroundV25Output, + zSchemaPonyV7Input, + zSchemaPonyV7Output, + zSchemaQwenImage2512Input, + zSchemaQwenImage2512LoraInput, + zSchemaQwenImage2512LoraOutput, + zSchemaQwenImage2512Output, + zSchemaQwenImageInput, + zSchemaQwenImageOutput, + zSchemaRealisticVisionInput, + zSchemaRealisticVisionOutput, + zSchemaRecraft20bInput, + zSchemaRecraft20bOutput, + zSchemaRecraftV3TextToImageInput, + zSchemaRecraftV3TextToImageOutput, + zSchemaReveTextToImageInput, + zSchemaReveTextToImageOutput, + zSchemaRundiffusionPhotoFluxInput, + zSchemaRundiffusionPhotoFluxOutput, + zSchemaSanaInput, + zSchemaSanaOutput, + zSchemaSanaSprintInput, + zSchemaSanaSprintOutput, + zSchemaSanaV1516bInput, + zSchemaSanaV1516bOutput, + zSchemaSanaV1548bInput, + zSchemaSanaV1548bOutput, + zSchemaSdxlControlnetUnionInput, + zSchemaSdxlControlnetUnionOutput, + zSchemaSkyRaccoonInput, + zSchemaSkyRaccoonOutput, + zSchemaStableCascadeInput, + zSchemaStableCascadeOutput, + zSchemaStableCascadeSoteDiffusionInput, + zSchemaStableCascadeSoteDiffusionOutput, + zSchemaStableDiffusionV15Input, + zSchemaStableDiffusionV15Output, + zSchemaStableDiffusionV35LargeInput, + zSchemaStableDiffusionV35LargeOutput, + zSchemaStableDiffusionV35MediumInput, + zSchemaStableDiffusionV35MediumOutput, + zSchemaStableDiffusionV3MediumInput, + zSchemaStableDiffusionV3MediumOutput, + zSchemaSwitti512Input, + zSchemaSwitti512Output, + zSchemaSwittiInput, + zSchemaSwittiOutput, + zSchemaTextToImage32Input, + zSchemaTextToImage32Output, + zSchemaV26TextToImageInput, + zSchemaV26TextToImageOutput, + zSchemaViduQ2TextToImageInput, + zSchemaViduQ2TextToImageOutput, + zSchemaWan25PreviewTextToImageInput, + zSchemaWan25PreviewTextToImageOutput, + zSchemaWanV225bTextToImageInput, + zSchemaWanV225bTextToImageOutput, + zSchemaWanV22A14bTextToImageInput, + zSchemaWanV22A14bTextToImageLoraInput, + zSchemaWanV22A14bTextToImageLoraOutput, + zSchemaWanV22A14bTextToImageOutput, + zSchemaZImageBaseInput, + zSchemaZImageBaseLoraInput, + zSchemaZImageBaseLoraOutput, + zSchemaZImageBaseOutput, + zSchemaZImageTurboInput, + zSchemaZImageTurboLoraInput, + zSchemaZImageTurboLoraOutput, + zSchemaZImageTurboOutput, +} from './zod.gen' + +import type { + SchemaAuraFlowInput, + SchemaAuraFlowOutput, + SchemaBagelInput, + SchemaBagelOutput, + SchemaBriaTextToImageBaseInput, + SchemaBriaTextToImageBaseOutput, + SchemaBriaTextToImageFastInput, + SchemaBriaTextToImageFastOutput, + SchemaBriaTextToImageHdInput, + SchemaBriaTextToImageHdOutput, + SchemaBytedanceDreaminaV31TextToImageInput, + SchemaBytedanceDreaminaV31TextToImageOutput, + SchemaBytedanceSeedreamV3TextToImageInput, + SchemaBytedanceSeedreamV3TextToImageOutput, + SchemaBytedanceSeedreamV45TextToImageInput, + SchemaBytedanceSeedreamV45TextToImageOutput, + SchemaBytedanceSeedreamV4TextToImageInput, + SchemaBytedanceSeedreamV4TextToImageOutput, + SchemaCogview4Input, + SchemaCogview4Output, + SchemaDiffusionEdgeInput, + SchemaDiffusionEdgeOutput, + SchemaDreamoInput, + SchemaDreamoOutput, + SchemaDreamshaperInput, + SchemaDreamshaperOutput, + SchemaEmu35ImageTextToImageInput, + SchemaEmu35ImageTextToImageOutput, + SchemaFLiteStandardInput, + SchemaFLiteStandardOutput, + SchemaFLiteTextureInput, + SchemaFLiteTextureOutput, + SchemaFastFooocusSdxlImageToImageInput, + SchemaFastFooocusSdxlImageToImageOutput, + SchemaFastFooocusSdxlInput, + SchemaFastFooocusSdxlOutput, + SchemaFastLcmDiffusionInput, + SchemaFastLcmDiffusionOutput, + SchemaFastLightningSdxlInput, + SchemaFastLightningSdxlOutput, + SchemaFastSdxlControlnetCannyInput, + SchemaFastSdxlControlnetCannyOutput, + SchemaFastSdxlInput, + SchemaFastSdxlOutput, + SchemaFiboGenerateInput, + SchemaFiboGenerateOutput, + SchemaFiboLiteGenerateInput, + SchemaFiboLiteGenerateOutput, + SchemaFlux1DevInput, + SchemaFlux1DevOutput, + SchemaFlux1KreaInput, + SchemaFlux1KreaOutput, + SchemaFlux1SchnellInput, + SchemaFlux1SchnellOutput, + SchemaFlux1SrpoInput, + SchemaFlux1SrpoOutput, + SchemaFlux2FlashInput, + SchemaFlux2FlashOutput, + SchemaFlux2FlexInput, + SchemaFlux2FlexOutput, + SchemaFlux2Input, + SchemaFlux2Klein4bBaseInput, + SchemaFlux2Klein4bBaseLoraInput, + SchemaFlux2Klein4bBaseLoraOutput, + SchemaFlux2Klein4bBaseOutput, + SchemaFlux2Klein4bInput, + SchemaFlux2Klein4bOutput, + SchemaFlux2Klein9bBaseInput, + SchemaFlux2Klein9bBaseLoraInput, + SchemaFlux2Klein9bBaseLoraOutput, + SchemaFlux2Klein9bBaseOutput, + SchemaFlux2Klein9bInput, + SchemaFlux2Klein9bOutput, + SchemaFlux2LoraGalleryBallpointPenSketchInput, + SchemaFlux2LoraGalleryBallpointPenSketchOutput, + SchemaFlux2LoraGalleryDigitalComicArtInput, + SchemaFlux2LoraGalleryDigitalComicArtOutput, + SchemaFlux2LoraGalleryHdrStyleInput, + SchemaFlux2LoraGalleryHdrStyleOutput, + SchemaFlux2LoraGalleryRealismInput, + SchemaFlux2LoraGalleryRealismOutput, + SchemaFlux2LoraGallerySatelliteViewStyleInput, + SchemaFlux2LoraGallerySatelliteViewStyleOutput, + SchemaFlux2LoraGallerySepiaVintageInput, + SchemaFlux2LoraGallerySepiaVintageOutput, + SchemaFlux2LoraInput, + SchemaFlux2LoraOutput, + SchemaFlux2MaxInput, + SchemaFlux2MaxOutput, + SchemaFlux2Output, + SchemaFlux2ProInput, + SchemaFlux2ProOutput, + SchemaFlux2TurboInput, + SchemaFlux2TurboOutput, + SchemaFluxControlLoraCannyInput, + SchemaFluxControlLoraCannyOutput, + SchemaFluxControlLoraDepthInput, + SchemaFluxControlLoraDepthOutput, + SchemaFluxDevInput, + SchemaFluxDevOutput, + SchemaFluxGeneralInput, + SchemaFluxGeneralOutput, + SchemaFluxKontextLoraTextToImageInput, + SchemaFluxKontextLoraTextToImageOutput, + SchemaFluxKreaInput, + SchemaFluxKreaLoraInput, + SchemaFluxKreaLoraOutput, + SchemaFluxKreaLoraStreamInput, + SchemaFluxKreaLoraStreamOutput, + SchemaFluxKreaOutput, + SchemaFluxLoraInpaintingInput, + SchemaFluxLoraInpaintingOutput, + SchemaFluxLoraInput, + SchemaFluxLoraOutput, + SchemaFluxLoraStreamInput, + SchemaFluxLoraStreamOutput, + SchemaFluxProKontextMaxTextToImageInput, + SchemaFluxProKontextMaxTextToImageOutput, + SchemaFluxProKontextTextToImageInput, + SchemaFluxProKontextTextToImageOutput, + SchemaFluxProV11Input, + SchemaFluxProV11Output, + SchemaFluxProV11UltraFinetunedInput, + SchemaFluxProV11UltraFinetunedOutput, + SchemaFluxProV11UltraInput, + SchemaFluxProV11UltraOutput, + SchemaFluxSchnellInput, + SchemaFluxSchnellOutput, + SchemaFluxSrpoInput, + SchemaFluxSrpoOutput, + SchemaFluxSubjectInput, + SchemaFluxSubjectOutput, + SchemaFooocusImagePromptInput, + SchemaFooocusImagePromptOutput, + SchemaFooocusInpaintInput, + SchemaFooocusInpaintOutput, + SchemaFooocusInput, + SchemaFooocusOutput, + SchemaFooocusUpscaleOrVaryInput, + SchemaFooocusUpscaleOrVaryOutput, + SchemaGemini25FlashImageInput, + SchemaGemini25FlashImageOutput, + SchemaGemini3ProImagePreviewInput, + SchemaGemini3ProImagePreviewOutput, + SchemaGlmImageInput, + SchemaGlmImageOutput, + SchemaGptImage15Input, + SchemaGptImage15Output, + SchemaGptImage1MiniInput, + SchemaGptImage1MiniOutput, + SchemaGptImage1TextToImageInput, + SchemaGptImage1TextToImageOutput, + SchemaHidreamI1DevInput, + SchemaHidreamI1DevOutput, + SchemaHidreamI1FastInput, + SchemaHidreamI1FastOutput, + SchemaHidreamI1FullInput, + SchemaHidreamI1FullOutput, + SchemaHunyuanImageV21TextToImageInput, + SchemaHunyuanImageV21TextToImageOutput, + SchemaHunyuanImageV3TextToImageInput, + SchemaHunyuanImageV3TextToImageOutput, + SchemaIdeogramV2Input, + SchemaIdeogramV2Output, + SchemaIdeogramV2TurboInput, + SchemaIdeogramV2TurboOutput, + SchemaIdeogramV2aInput, + SchemaIdeogramV2aOutput, + SchemaIdeogramV2aTurboInput, + SchemaIdeogramV2aTurboOutput, + SchemaIdeogramV3Input, + SchemaIdeogramV3Output, + SchemaIllusionDiffusionInput, + SchemaIllusionDiffusionOutput, + SchemaImagen3FastInput, + SchemaImagen3FastOutput, + SchemaImagen3Input, + SchemaImagen3Output, + SchemaImagen4PreviewFastInput, + SchemaImagen4PreviewFastOutput, + SchemaImagen4PreviewInput, + SchemaImagen4PreviewOutput, + SchemaImagen4PreviewUltraInput, + SchemaImagen4PreviewUltraOutput, + SchemaImagineart15PreviewTextToImageInput, + SchemaImagineart15PreviewTextToImageOutput, + SchemaImagineart15ProPreviewTextToImageInput, + SchemaImagineart15ProPreviewTextToImageOutput, + SchemaJanusInput, + SchemaJanusOutput, + SchemaJuggernautFluxBaseInput, + SchemaJuggernautFluxBaseOutput, + SchemaJuggernautFluxLightningInput, + SchemaJuggernautFluxLightningOutput, + SchemaJuggernautFluxLoraInput, + SchemaJuggernautFluxLoraOutput, + SchemaJuggernautFluxProInput, + SchemaJuggernautFluxProOutput, + SchemaKolorsInput, + SchemaKolorsOutput, + SchemaLayerDiffusionInput, + SchemaLayerDiffusionOutput, + SchemaLcmInput, + SchemaLcmOutput, + SchemaLightningModelsInput, + SchemaLightningModelsOutput, + SchemaLongcatImageInput, + SchemaLongcatImageOutput, + SchemaLoraInput, + SchemaLoraOutput, + SchemaLumaPhotonFlashInput, + SchemaLumaPhotonFlashOutput, + SchemaLumaPhotonInput, + SchemaLumaPhotonOutput, + SchemaLuminaImageV2Input, + SchemaLuminaImageV2Output, + SchemaMinimaxImage01Input, + SchemaMinimaxImage01Output, + SchemaNanoBananaInput, + SchemaNanoBananaOutput, + SchemaNanoBananaProInput, + SchemaNanoBananaProOutput, + SchemaOmnigenV1Input, + SchemaOmnigenV1Output, + SchemaOmnigenV2Input, + SchemaOmnigenV2Output, + SchemaOvisImageInput, + SchemaOvisImageOutput, + SchemaPiflowInput, + SchemaPiflowOutput, + SchemaPixartSigmaInput, + SchemaPixartSigmaOutput, + SchemaPlaygroundV25Input, + SchemaPlaygroundV25Output, + SchemaPonyV7Input, + SchemaPonyV7Output, + SchemaQwenImage2512Input, + SchemaQwenImage2512LoraInput, + SchemaQwenImage2512LoraOutput, + SchemaQwenImage2512Output, + SchemaQwenImageInput, + SchemaQwenImageOutput, + SchemaRealisticVisionInput, + SchemaRealisticVisionOutput, + SchemaRecraft20bInput, + SchemaRecraft20bOutput, + SchemaRecraftV3TextToImageInput, + SchemaRecraftV3TextToImageOutput, + SchemaReveTextToImageInput, + SchemaReveTextToImageOutput, + SchemaRundiffusionPhotoFluxInput, + SchemaRundiffusionPhotoFluxOutput, + SchemaSanaInput, + SchemaSanaOutput, + SchemaSanaSprintInput, + SchemaSanaSprintOutput, + SchemaSanaV1516bInput, + SchemaSanaV1516bOutput, + SchemaSanaV1548bInput, + SchemaSanaV1548bOutput, + SchemaSdxlControlnetUnionInput, + SchemaSdxlControlnetUnionOutput, + SchemaSkyRaccoonInput, + SchemaSkyRaccoonOutput, + SchemaStableCascadeInput, + SchemaStableCascadeOutput, + SchemaStableCascadeSoteDiffusionInput, + SchemaStableCascadeSoteDiffusionOutput, + SchemaStableDiffusionV15Input, + SchemaStableDiffusionV15Output, + SchemaStableDiffusionV35LargeInput, + SchemaStableDiffusionV35LargeOutput, + SchemaStableDiffusionV35MediumInput, + SchemaStableDiffusionV35MediumOutput, + SchemaStableDiffusionV3MediumInput, + SchemaStableDiffusionV3MediumOutput, + SchemaSwitti512Input, + SchemaSwitti512Output, + SchemaSwittiInput, + SchemaSwittiOutput, + SchemaTextToImage32Input, + SchemaTextToImage32Output, + SchemaV26TextToImageInput, + SchemaV26TextToImageOutput, + SchemaViduQ2TextToImageInput, + SchemaViduQ2TextToImageOutput, + SchemaWan25PreviewTextToImageInput, + SchemaWan25PreviewTextToImageOutput, + SchemaWanV225bTextToImageInput, + SchemaWanV225bTextToImageOutput, + SchemaWanV22A14bTextToImageInput, + SchemaWanV22A14bTextToImageLoraInput, + SchemaWanV22A14bTextToImageLoraOutput, + SchemaWanV22A14bTextToImageOutput, + SchemaZImageBaseInput, + SchemaZImageBaseLoraInput, + SchemaZImageBaseLoraOutput, + SchemaZImageBaseOutput, + SchemaZImageTurboInput, + SchemaZImageTurboLoraInput, + SchemaZImageTurboLoraOutput, + SchemaZImageTurboOutput, +} from './types.gen' + +import type { z } from 'zod' + +export type TextToImageEndpointMap = { + 'fal-ai/imagen4/preview': { + input: SchemaImagen4PreviewInput + output: SchemaImagen4PreviewOutput + } + 'fal-ai/flux-pro/v1.1-ultra': { + input: SchemaFluxProV11UltraInput + output: SchemaFluxProV11UltraOutput + } + 'fal-ai/recraft/v3/text-to-image': { + input: SchemaRecraftV3TextToImageInput + output: SchemaRecraftV3TextToImageOutput + } + 'fal-ai/flux-2/lora': { + input: SchemaFlux2LoraInput + output: SchemaFlux2LoraOutput + } + 'fal-ai/flux-2': { + input: SchemaFlux2Input + output: SchemaFlux2Output + } + 'fal-ai/flux-2-pro': { + input: SchemaFlux2ProInput + output: SchemaFlux2ProOutput + } + 'bria/text-to-image/3.2': { + input: SchemaTextToImage32Input + output: SchemaTextToImage32Output + } + 'fal-ai/imagen4/preview/fast': { + input: SchemaImagen4PreviewFastInput + output: SchemaImagen4PreviewFastOutput + } + 'fal-ai/hidream-i1-full': { + input: SchemaHidreamI1FullInput + output: SchemaHidreamI1FullOutput + } + 'fal-ai/hidream-i1-dev': { + input: SchemaHidreamI1DevInput + output: SchemaHidreamI1DevOutput + } + 'fal-ai/hidream-i1-fast': { + input: SchemaHidreamI1FastInput + output: SchemaHidreamI1FastOutput + } + 'fal-ai/flux/dev': { + input: SchemaFluxDevInput + output: SchemaFluxDevOutput + } + 'fal-ai/ideogram/v2': { + input: SchemaIdeogramV2Input + output: SchemaIdeogramV2Output + } + 'fal-ai/stable-diffusion-v35-large': { + input: SchemaStableDiffusionV35LargeInput + output: SchemaStableDiffusionV35LargeOutput + } + 'fal-ai/flux-general': { + input: SchemaFluxGeneralInput + output: SchemaFluxGeneralOutput + } + 'fal-ai/flux-lora': { + input: SchemaFluxLoraInput + output: SchemaFluxLoraOutput + } + 'fal-ai/z-image/base/lora': { + input: SchemaZImageBaseLoraInput + output: SchemaZImageBaseLoraOutput + } + 'fal-ai/z-image/base': { + input: SchemaZImageBaseInput + output: SchemaZImageBaseOutput + } + 'fal-ai/flux-2/klein/9b/base/lora': { + input: SchemaFlux2Klein9bBaseLoraInput + output: SchemaFlux2Klein9bBaseLoraOutput + } + 'fal-ai/flux-2/klein/4b/base/lora': { + input: SchemaFlux2Klein4bBaseLoraInput + output: SchemaFlux2Klein4bBaseLoraOutput + } + 'fal-ai/flux-2/klein/9b/base': { + input: SchemaFlux2Klein9bBaseInput + output: SchemaFlux2Klein9bBaseOutput + } + 'fal-ai/flux-2/klein/4b/base': { + input: SchemaFlux2Klein4bBaseInput + output: SchemaFlux2Klein4bBaseOutput + } + 'fal-ai/flux-2/klein/9b': { + input: SchemaFlux2Klein9bInput + output: SchemaFlux2Klein9bOutput + } + 'fal-ai/flux-2/klein/4b': { + input: SchemaFlux2Klein4bInput + output: SchemaFlux2Klein4bOutput + } + 'imagineart/imagineart-1.5-pro-preview/text-to-image': { + input: SchemaImagineart15ProPreviewTextToImageInput + output: SchemaImagineart15ProPreviewTextToImageOutput + } + 'fal-ai/glm-image': { + input: SchemaGlmImageInput + output: SchemaGlmImageOutput + } + 'fal-ai/qwen-image-2512/lora': { + input: SchemaQwenImage2512LoraInput + output: SchemaQwenImage2512LoraOutput + } + 'fal-ai/qwen-image-2512': { + input: SchemaQwenImage2512Input + output: SchemaQwenImage2512Output + } + 'wan/v2.6/text-to-image': { + input: SchemaV26TextToImageInput + output: SchemaV26TextToImageOutput + } + 'fal-ai/flux-2/flash': { + input: SchemaFlux2FlashInput + output: SchemaFlux2FlashOutput + } + 'fal-ai/gpt-image-1.5': { + input: SchemaGptImage15Input + output: SchemaGptImage15Output + } + 'bria/fibo-lite/generate': { + input: SchemaFiboLiteGenerateInput + output: SchemaFiboLiteGenerateOutput + } + 'fal-ai/flux-2/turbo': { + input: SchemaFlux2TurboInput + output: SchemaFlux2TurboOutput + } + 'fal-ai/flux-2-max': { + input: SchemaFlux2MaxInput + output: SchemaFlux2MaxOutput + } + 'fal-ai/longcat-image': { + input: SchemaLongcatImageInput + output: SchemaLongcatImageOutput + } + 'fal-ai/bytedance/seedream/v4.5/text-to-image': { + input: SchemaBytedanceSeedreamV45TextToImageInput + output: SchemaBytedanceSeedreamV45TextToImageOutput + } + 'fal-ai/vidu/q2/text-to-image': { + input: SchemaViduQ2TextToImageInput + output: SchemaViduQ2TextToImageOutput + } + 'fal-ai/z-image/turbo/lora': { + input: SchemaZImageTurboLoraInput + output: SchemaZImageTurboLoraOutput + } + 'fal-ai/ovis-image': { + input: SchemaOvisImageInput + output: SchemaOvisImageOutput + } + 'fal-ai/z-image/turbo': { + input: SchemaZImageTurboInput + output: SchemaZImageTurboOutput + } + 'fal-ai/flux-2-lora-gallery/sepia-vintage': { + input: SchemaFlux2LoraGallerySepiaVintageInput + output: SchemaFlux2LoraGallerySepiaVintageOutput + } + 'fal-ai/flux-2-lora-gallery/satellite-view-style': { + input: SchemaFlux2LoraGallerySatelliteViewStyleInput + output: SchemaFlux2LoraGallerySatelliteViewStyleOutput + } + 'fal-ai/flux-2-lora-gallery/realism': { + input: SchemaFlux2LoraGalleryRealismInput + output: SchemaFlux2LoraGalleryRealismOutput + } + 'fal-ai/flux-2-lora-gallery/hdr-style': { + input: SchemaFlux2LoraGalleryHdrStyleInput + output: SchemaFlux2LoraGalleryHdrStyleOutput + } + 'fal-ai/flux-2-lora-gallery/digital-comic-art': { + input: SchemaFlux2LoraGalleryDigitalComicArtInput + output: SchemaFlux2LoraGalleryDigitalComicArtOutput + } + 'fal-ai/flux-2-lora-gallery/ballpoint-pen-sketch': { + input: SchemaFlux2LoraGalleryBallpointPenSketchInput + output: SchemaFlux2LoraGalleryBallpointPenSketchOutput + } + 'fal-ai/flux-2-flex': { + input: SchemaFlux2FlexInput + output: SchemaFlux2FlexOutput + } + 'fal-ai/gemini-3-pro-image-preview': { + input: SchemaGemini3ProImagePreviewInput + output: SchemaGemini3ProImagePreviewOutput + } + 'fal-ai/nano-banana-pro': { + input: SchemaNanoBananaProInput + output: SchemaNanoBananaProOutput + } + 'imagineart/imagineart-1.5-preview/text-to-image': { + input: SchemaImagineart15PreviewTextToImageInput + output: SchemaImagineart15PreviewTextToImageOutput + } + 'fal-ai/emu-3.5-image/text-to-image': { + input: SchemaEmu35ImageTextToImageInput + output: SchemaEmu35ImageTextToImageOutput + } + 'bria/fibo/generate': { + input: SchemaFiboGenerateInput + output: SchemaFiboGenerateOutput + } + 'fal-ai/piflow': { + input: SchemaPiflowInput + output: SchemaPiflowOutput + } + 'fal-ai/gpt-image-1-mini': { + input: SchemaGptImage1MiniInput + output: SchemaGptImage1MiniOutput + } + 'fal-ai/reve/text-to-image': { + input: SchemaReveTextToImageInput + output: SchemaReveTextToImageOutput + } + 'fal-ai/hunyuan-image/v3/text-to-image': { + input: SchemaHunyuanImageV3TextToImageInput + output: SchemaHunyuanImageV3TextToImageOutput + } + 'fal-ai/wan-25-preview/text-to-image': { + input: SchemaWan25PreviewTextToImageInput + output: SchemaWan25PreviewTextToImageOutput + } + 'fal-ai/flux/srpo': { + input: SchemaFluxSrpoInput + output: SchemaFluxSrpoOutput + } + 'fal-ai/flux-1/srpo': { + input: SchemaFlux1SrpoInput + output: SchemaFlux1SrpoOutput + } + 'fal-ai/hunyuan-image/v2.1/text-to-image': { + input: SchemaHunyuanImageV21TextToImageInput + output: SchemaHunyuanImageV21TextToImageOutput + } + 'fal-ai/bytedance/seedream/v4/text-to-image': { + input: SchemaBytedanceSeedreamV4TextToImageInput + output: SchemaBytedanceSeedreamV4TextToImageOutput + } + 'fal-ai/gemini-25-flash-image': { + input: SchemaGemini25FlashImageInput + output: SchemaGemini25FlashImageOutput + } + 'fal-ai/nano-banana': { + input: SchemaNanoBananaInput + output: SchemaNanoBananaOutput + } + 'fal-ai/bytedance/dreamina/v3.1/text-to-image': { + input: SchemaBytedanceDreaminaV31TextToImageInput + output: SchemaBytedanceDreaminaV31TextToImageOutput + } + 'fal-ai/wan/v2.2-a14b/text-to-image/lora': { + input: SchemaWanV22A14bTextToImageLoraInput + output: SchemaWanV22A14bTextToImageLoraOutput + } + 'fal-ai/wan/v2.2-5b/text-to-image': { + input: SchemaWanV225bTextToImageInput + output: SchemaWanV225bTextToImageOutput + } + 'fal-ai/wan/v2.2-a14b/text-to-image': { + input: SchemaWanV22A14bTextToImageInput + output: SchemaWanV22A14bTextToImageOutput + } + 'fal-ai/qwen-image': { + input: SchemaQwenImageInput + output: SchemaQwenImageOutput + } + 'fal-ai/flux-krea-lora/stream': { + input: SchemaFluxKreaLoraStreamInput + output: SchemaFluxKreaLoraStreamOutput + } + 'fal-ai/flux-krea-lora': { + input: SchemaFluxKreaLoraInput + output: SchemaFluxKreaLoraOutput + } + 'fal-ai/flux/krea': { + input: SchemaFluxKreaInput + output: SchemaFluxKreaOutput + } + 'fal-ai/flux-1/krea': { + input: SchemaFlux1KreaInput + output: SchemaFlux1KreaOutput + } + 'fal-ai/sky-raccoon': { + input: SchemaSkyRaccoonInput + output: SchemaSkyRaccoonOutput + } + 'fal-ai/flux-kontext-lora/text-to-image': { + input: SchemaFluxKontextLoraTextToImageInput + output: SchemaFluxKontextLoraTextToImageOutput + } + 'fal-ai/omnigen-v2': { + input: SchemaOmnigenV2Input + output: SchemaOmnigenV2Output + } + 'fal-ai/bytedance/seedream/v3/text-to-image': { + input: SchemaBytedanceSeedreamV3TextToImageInput + output: SchemaBytedanceSeedreamV3TextToImageOutput + } + 'fal-ai/flux-1/schnell': { + input: SchemaFlux1SchnellInput + output: SchemaFlux1SchnellOutput + } + 'fal-ai/flux-1/dev': { + input: SchemaFlux1DevInput + output: SchemaFlux1DevOutput + } + 'fal-ai/flux-pro/kontext/max/text-to-image': { + input: SchemaFluxProKontextMaxTextToImageInput + output: SchemaFluxProKontextMaxTextToImageOutput + } + 'fal-ai/flux-pro/kontext/text-to-image': { + input: SchemaFluxProKontextTextToImageInput + output: SchemaFluxProKontextTextToImageOutput + } + 'fal-ai/bagel': { + input: SchemaBagelInput + output: SchemaBagelOutput + } + 'fal-ai/imagen4/preview/ultra': { + input: SchemaImagen4PreviewUltraInput + output: SchemaImagen4PreviewUltraOutput + } + 'fal-ai/dreamo': { + input: SchemaDreamoInput + output: SchemaDreamoOutput + } + 'fal-ai/flux-lora/stream': { + input: SchemaFluxLoraStreamInput + output: SchemaFluxLoraStreamOutput + } + 'fal-ai/minimax/image-01': { + input: SchemaMinimaxImage01Input + output: SchemaMinimaxImage01Output + } + 'fal-ai/pony-v7': { + input: SchemaPonyV7Input + output: SchemaPonyV7Output + } + 'fal-ai/ideogram/v3': { + input: SchemaIdeogramV3Input + output: SchemaIdeogramV3Output + } + 'fal-ai/f-lite/standard': { + input: SchemaFLiteStandardInput + output: SchemaFLiteStandardOutput + } + 'fal-ai/f-lite/texture': { + input: SchemaFLiteTextureInput + output: SchemaFLiteTextureOutput + } + 'fal-ai/gpt-image-1/text-to-image': { + input: SchemaGptImage1TextToImageInput + output: SchemaGptImage1TextToImageOutput + } + 'fal-ai/sana/v1.5/1.6b': { + input: SchemaSanaV1516bInput + output: SchemaSanaV1516bOutput + } + 'fal-ai/sana/v1.5/4.8b': { + input: SchemaSanaV1548bInput + output: SchemaSanaV1548bOutput + } + 'fal-ai/sana/sprint': { + input: SchemaSanaSprintInput + output: SchemaSanaSprintOutput + } + 'rundiffusion-fal/juggernaut-flux/lightning': { + input: SchemaJuggernautFluxLightningInput + output: SchemaJuggernautFluxLightningOutput + } + 'rundiffusion-fal/juggernaut-flux/pro': { + input: SchemaJuggernautFluxProInput + output: SchemaJuggernautFluxProOutput + } + 'rundiffusion-fal/juggernaut-flux-lora': { + input: SchemaJuggernautFluxLoraInput + output: SchemaJuggernautFluxLoraOutput + } + 'rundiffusion-fal/rundiffusion-photo-flux': { + input: SchemaRundiffusionPhotoFluxInput + output: SchemaRundiffusionPhotoFluxOutput + } + 'rundiffusion-fal/juggernaut-flux/base': { + input: SchemaJuggernautFluxBaseInput + output: SchemaJuggernautFluxBaseOutput + } + 'fal-ai/cogview4': { + input: SchemaCogview4Input + output: SchemaCogview4Output + } + 'fal-ai/ideogram/v2a/turbo': { + input: SchemaIdeogramV2aTurboInput + output: SchemaIdeogramV2aTurboOutput + } + 'fal-ai/ideogram/v2a': { + input: SchemaIdeogramV2aInput + output: SchemaIdeogramV2aOutput + } + 'fal-ai/flux-control-lora-canny': { + input: SchemaFluxControlLoraCannyInput + output: SchemaFluxControlLoraCannyOutput + } + 'fal-ai/flux-control-lora-depth': { + input: SchemaFluxControlLoraDepthInput + output: SchemaFluxControlLoraDepthOutput + } + 'fal-ai/imagen3': { + input: SchemaImagen3Input + output: SchemaImagen3Output + } + 'fal-ai/imagen3/fast': { + input: SchemaImagen3FastInput + output: SchemaImagen3FastOutput + } + 'fal-ai/lumina-image/v2': { + input: SchemaLuminaImageV2Input + output: SchemaLuminaImageV2Output + } + 'fal-ai/janus': { + input: SchemaJanusInput + output: SchemaJanusOutput + } + 'fal-ai/flux-pro/v1.1-ultra-finetuned': { + input: SchemaFluxProV11UltraFinetunedInput + output: SchemaFluxProV11UltraFinetunedOutput + } + 'fal-ai/flux-pro/v1.1': { + input: SchemaFluxProV11Input + output: SchemaFluxProV11Output + } + 'fal-ai/switti': { + input: SchemaSwittiInput + output: SchemaSwittiOutput + } + 'fal-ai/switti/512': { + input: SchemaSwitti512Input + output: SchemaSwitti512Output + } + 'fal-ai/bria/text-to-image/base': { + input: SchemaBriaTextToImageBaseInput + output: SchemaBriaTextToImageBaseOutput + } + 'fal-ai/bria/text-to-image/fast': { + input: SchemaBriaTextToImageFastInput + output: SchemaBriaTextToImageFastOutput + } + 'fal-ai/bria/text-to-image/hd': { + input: SchemaBriaTextToImageHdInput + output: SchemaBriaTextToImageHdOutput + } + 'fal-ai/recraft-20b': { + input: SchemaRecraft20bInput + output: SchemaRecraft20bOutput + } + 'fal-ai/ideogram/v2/turbo': { + input: SchemaIdeogramV2TurboInput + output: SchemaIdeogramV2TurboOutput + } + 'fal-ai/luma-photon/flash': { + input: SchemaLumaPhotonFlashInput + output: SchemaLumaPhotonFlashOutput + } + 'fal-ai/aura-flow': { + input: SchemaAuraFlowInput + output: SchemaAuraFlowOutput + } + 'fal-ai/omnigen-v1': { + input: SchemaOmnigenV1Input + output: SchemaOmnigenV1Output + } + 'fal-ai/flux/schnell': { + input: SchemaFluxSchnellInput + output: SchemaFluxSchnellOutput + } + 'fal-ai/stable-diffusion-v35-medium': { + input: SchemaStableDiffusionV35MediumInput + output: SchemaStableDiffusionV35MediumOutput + } + 'fal-ai/flux-lora/inpainting': { + input: SchemaFluxLoraInpaintingInput + output: SchemaFluxLoraInpaintingOutput + } + 'fal-ai/stable-diffusion-v3-medium': { + input: SchemaStableDiffusionV3MediumInput + output: SchemaStableDiffusionV3MediumOutput + } + 'fal-ai/fooocus/upscale-or-vary': { + input: SchemaFooocusUpscaleOrVaryInput + output: SchemaFooocusUpscaleOrVaryOutput + } + 'fal-ai/sana': { + input: SchemaSanaInput + output: SchemaSanaOutput + } + 'fal-ai/flux-subject': { + input: SchemaFluxSubjectInput + output: SchemaFluxSubjectOutput + } + 'fal-ai/pixart-sigma': { + input: SchemaPixartSigmaInput + output: SchemaPixartSigmaOutput + } + 'fal-ai/sdxl-controlnet-union': { + input: SchemaSdxlControlnetUnionInput + output: SchemaSdxlControlnetUnionOutput + } + 'fal-ai/kolors': { + input: SchemaKolorsInput + output: SchemaKolorsOutput + } + 'fal-ai/stable-cascade': { + input: SchemaStableCascadeInput + output: SchemaStableCascadeOutput + } + 'fal-ai/fast-sdxl': { + input: SchemaFastSdxlInput + output: SchemaFastSdxlOutput + } + 'fal-ai/stable-cascade/sote-diffusion': { + input: SchemaStableCascadeSoteDiffusionInput + output: SchemaStableCascadeSoteDiffusionOutput + } + 'fal-ai/luma-photon': { + input: SchemaLumaPhotonInput + output: SchemaLumaPhotonOutput + } + 'fal-ai/lightning-models': { + input: SchemaLightningModelsInput + output: SchemaLightningModelsOutput + } + 'fal-ai/playground-v25': { + input: SchemaPlaygroundV25Input + output: SchemaPlaygroundV25Output + } + 'fal-ai/realistic-vision': { + input: SchemaRealisticVisionInput + output: SchemaRealisticVisionOutput + } + 'fal-ai/dreamshaper': { + input: SchemaDreamshaperInput + output: SchemaDreamshaperOutput + } + 'fal-ai/stable-diffusion-v15': { + input: SchemaStableDiffusionV15Input + output: SchemaStableDiffusionV15Output + } + 'fal-ai/layer-diffusion': { + input: SchemaLayerDiffusionInput + output: SchemaLayerDiffusionOutput + } + 'fal-ai/fast-lightning-sdxl': { + input: SchemaFastLightningSdxlInput + output: SchemaFastLightningSdxlOutput + } + 'fal-ai/fast-fooocus-sdxl/image-to-image': { + input: SchemaFastFooocusSdxlImageToImageInput + output: SchemaFastFooocusSdxlImageToImageOutput + } + 'fal-ai/fast-sdxl-controlnet-canny': { + input: SchemaFastSdxlControlnetCannyInput + output: SchemaFastSdxlControlnetCannyOutput + } + 'fal-ai/fast-lcm-diffusion': { + input: SchemaFastLcmDiffusionInput + output: SchemaFastLcmDiffusionOutput + } + 'fal-ai/fast-fooocus-sdxl': { + input: SchemaFastFooocusSdxlInput + output: SchemaFastFooocusSdxlOutput + } + 'fal-ai/illusion-diffusion': { + input: SchemaIllusionDiffusionInput + output: SchemaIllusionDiffusionOutput + } + 'fal-ai/fooocus/image-prompt': { + input: SchemaFooocusImagePromptInput + output: SchemaFooocusImagePromptOutput + } + 'fal-ai/fooocus/inpaint': { + input: SchemaFooocusInpaintInput + output: SchemaFooocusInpaintOutput + } + 'fal-ai/lcm': { + input: SchemaLcmInput + output: SchemaLcmOutput + } + 'fal-ai/diffusion-edge': { + input: SchemaDiffusionEdgeInput + output: SchemaDiffusionEdgeOutput + } + 'fal-ai/fooocus': { + input: SchemaFooocusInput + output: SchemaFooocusOutput + } + 'fal-ai/lora': { + input: SchemaLoraInput + output: SchemaLoraOutput + } +} + +/** Union type of all text-to-image model endpoint IDs */ +export type TextToImageModel = keyof TextToImageEndpointMap + +export const TextToImageSchemaMap: Record< + TextToImageModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['fal-ai/imagen4/preview']: { + input: zSchemaImagen4PreviewInput, + output: zSchemaImagen4PreviewOutput, + }, + ['fal-ai/flux-pro/v1.1-ultra']: { + input: zSchemaFluxProV11UltraInput, + output: zSchemaFluxProV11UltraOutput, + }, + ['fal-ai/recraft/v3/text-to-image']: { + input: zSchemaRecraftV3TextToImageInput, + output: zSchemaRecraftV3TextToImageOutput, + }, + ['fal-ai/flux-2/lora']: { + input: zSchemaFlux2LoraInput, + output: zSchemaFlux2LoraOutput, + }, + ['fal-ai/flux-2']: { + input: zSchemaFlux2Input, + output: zSchemaFlux2Output, + }, + ['fal-ai/flux-2-pro']: { + input: zSchemaFlux2ProInput, + output: zSchemaFlux2ProOutput, + }, + ['bria/text-to-image/3.2']: { + input: zSchemaTextToImage32Input, + output: zSchemaTextToImage32Output, + }, + ['fal-ai/imagen4/preview/fast']: { + input: zSchemaImagen4PreviewFastInput, + output: zSchemaImagen4PreviewFastOutput, + }, + ['fal-ai/hidream-i1-full']: { + input: zSchemaHidreamI1FullInput, + output: zSchemaHidreamI1FullOutput, + }, + ['fal-ai/hidream-i1-dev']: { + input: zSchemaHidreamI1DevInput, + output: zSchemaHidreamI1DevOutput, + }, + ['fal-ai/hidream-i1-fast']: { + input: zSchemaHidreamI1FastInput, + output: zSchemaHidreamI1FastOutput, + }, + ['fal-ai/flux/dev']: { + input: zSchemaFluxDevInput, + output: zSchemaFluxDevOutput, + }, + ['fal-ai/ideogram/v2']: { + input: zSchemaIdeogramV2Input, + output: zSchemaIdeogramV2Output, + }, + ['fal-ai/stable-diffusion-v35-large']: { + input: zSchemaStableDiffusionV35LargeInput, + output: zSchemaStableDiffusionV35LargeOutput, + }, + ['fal-ai/flux-general']: { + input: zSchemaFluxGeneralInput, + output: zSchemaFluxGeneralOutput, + }, + ['fal-ai/flux-lora']: { + input: zSchemaFluxLoraInput, + output: zSchemaFluxLoraOutput, + }, + ['fal-ai/z-image/base/lora']: { + input: zSchemaZImageBaseLoraInput, + output: zSchemaZImageBaseLoraOutput, + }, + ['fal-ai/z-image/base']: { + input: zSchemaZImageBaseInput, + output: zSchemaZImageBaseOutput, + }, + ['fal-ai/flux-2/klein/9b/base/lora']: { + input: zSchemaFlux2Klein9bBaseLoraInput, + output: zSchemaFlux2Klein9bBaseLoraOutput, + }, + ['fal-ai/flux-2/klein/4b/base/lora']: { + input: zSchemaFlux2Klein4bBaseLoraInput, + output: zSchemaFlux2Klein4bBaseLoraOutput, + }, + ['fal-ai/flux-2/klein/9b/base']: { + input: zSchemaFlux2Klein9bBaseInput, + output: zSchemaFlux2Klein9bBaseOutput, + }, + ['fal-ai/flux-2/klein/4b/base']: { + input: zSchemaFlux2Klein4bBaseInput, + output: zSchemaFlux2Klein4bBaseOutput, + }, + ['fal-ai/flux-2/klein/9b']: { + input: zSchemaFlux2Klein9bInput, + output: zSchemaFlux2Klein9bOutput, + }, + ['fal-ai/flux-2/klein/4b']: { + input: zSchemaFlux2Klein4bInput, + output: zSchemaFlux2Klein4bOutput, + }, + ['imagineart/imagineart-1.5-pro-preview/text-to-image']: { + input: zSchemaImagineart15ProPreviewTextToImageInput, + output: zSchemaImagineart15ProPreviewTextToImageOutput, + }, + ['fal-ai/glm-image']: { + input: zSchemaGlmImageInput, + output: zSchemaGlmImageOutput, + }, + ['fal-ai/qwen-image-2512/lora']: { + input: zSchemaQwenImage2512LoraInput, + output: zSchemaQwenImage2512LoraOutput, + }, + ['fal-ai/qwen-image-2512']: { + input: zSchemaQwenImage2512Input, + output: zSchemaQwenImage2512Output, + }, + ['wan/v2.6/text-to-image']: { + input: zSchemaV26TextToImageInput, + output: zSchemaV26TextToImageOutput, + }, + ['fal-ai/flux-2/flash']: { + input: zSchemaFlux2FlashInput, + output: zSchemaFlux2FlashOutput, + }, + ['fal-ai/gpt-image-1.5']: { + input: zSchemaGptImage15Input, + output: zSchemaGptImage15Output, + }, + ['bria/fibo-lite/generate']: { + input: zSchemaFiboLiteGenerateInput, + output: zSchemaFiboLiteGenerateOutput, + }, + ['fal-ai/flux-2/turbo']: { + input: zSchemaFlux2TurboInput, + output: zSchemaFlux2TurboOutput, + }, + ['fal-ai/flux-2-max']: { + input: zSchemaFlux2MaxInput, + output: zSchemaFlux2MaxOutput, + }, + ['fal-ai/longcat-image']: { + input: zSchemaLongcatImageInput, + output: zSchemaLongcatImageOutput, + }, + ['fal-ai/bytedance/seedream/v4.5/text-to-image']: { + input: zSchemaBytedanceSeedreamV45TextToImageInput, + output: zSchemaBytedanceSeedreamV45TextToImageOutput, + }, + ['fal-ai/vidu/q2/text-to-image']: { + input: zSchemaViduQ2TextToImageInput, + output: zSchemaViduQ2TextToImageOutput, + }, + ['fal-ai/z-image/turbo/lora']: { + input: zSchemaZImageTurboLoraInput, + output: zSchemaZImageTurboLoraOutput, + }, + ['fal-ai/ovis-image']: { + input: zSchemaOvisImageInput, + output: zSchemaOvisImageOutput, + }, + ['fal-ai/z-image/turbo']: { + input: zSchemaZImageTurboInput, + output: zSchemaZImageTurboOutput, + }, + ['fal-ai/flux-2-lora-gallery/sepia-vintage']: { + input: zSchemaFlux2LoraGallerySepiaVintageInput, + output: zSchemaFlux2LoraGallerySepiaVintageOutput, + }, + ['fal-ai/flux-2-lora-gallery/satellite-view-style']: { + input: zSchemaFlux2LoraGallerySatelliteViewStyleInput, + output: zSchemaFlux2LoraGallerySatelliteViewStyleOutput, + }, + ['fal-ai/flux-2-lora-gallery/realism']: { + input: zSchemaFlux2LoraGalleryRealismInput, + output: zSchemaFlux2LoraGalleryRealismOutput, + }, + ['fal-ai/flux-2-lora-gallery/hdr-style']: { + input: zSchemaFlux2LoraGalleryHdrStyleInput, + output: zSchemaFlux2LoraGalleryHdrStyleOutput, + }, + ['fal-ai/flux-2-lora-gallery/digital-comic-art']: { + input: zSchemaFlux2LoraGalleryDigitalComicArtInput, + output: zSchemaFlux2LoraGalleryDigitalComicArtOutput, + }, + ['fal-ai/flux-2-lora-gallery/ballpoint-pen-sketch']: { + input: zSchemaFlux2LoraGalleryBallpointPenSketchInput, + output: zSchemaFlux2LoraGalleryBallpointPenSketchOutput, + }, + ['fal-ai/flux-2-flex']: { + input: zSchemaFlux2FlexInput, + output: zSchemaFlux2FlexOutput, + }, + ['fal-ai/gemini-3-pro-image-preview']: { + input: zSchemaGemini3ProImagePreviewInput, + output: zSchemaGemini3ProImagePreviewOutput, + }, + ['fal-ai/nano-banana-pro']: { + input: zSchemaNanoBananaProInput, + output: zSchemaNanoBananaProOutput, + }, + ['imagineart/imagineart-1.5-preview/text-to-image']: { + input: zSchemaImagineart15PreviewTextToImageInput, + output: zSchemaImagineart15PreviewTextToImageOutput, + }, + ['fal-ai/emu-3.5-image/text-to-image']: { + input: zSchemaEmu35ImageTextToImageInput, + output: zSchemaEmu35ImageTextToImageOutput, + }, + ['bria/fibo/generate']: { + input: zSchemaFiboGenerateInput, + output: zSchemaFiboGenerateOutput, + }, + ['fal-ai/piflow']: { + input: zSchemaPiflowInput, + output: zSchemaPiflowOutput, + }, + ['fal-ai/gpt-image-1-mini']: { + input: zSchemaGptImage1MiniInput, + output: zSchemaGptImage1MiniOutput, + }, + ['fal-ai/reve/text-to-image']: { + input: zSchemaReveTextToImageInput, + output: zSchemaReveTextToImageOutput, + }, + ['fal-ai/hunyuan-image/v3/text-to-image']: { + input: zSchemaHunyuanImageV3TextToImageInput, + output: zSchemaHunyuanImageV3TextToImageOutput, + }, + ['fal-ai/wan-25-preview/text-to-image']: { + input: zSchemaWan25PreviewTextToImageInput, + output: zSchemaWan25PreviewTextToImageOutput, + }, + ['fal-ai/flux/srpo']: { + input: zSchemaFluxSrpoInput, + output: zSchemaFluxSrpoOutput, + }, + ['fal-ai/flux-1/srpo']: { + input: zSchemaFlux1SrpoInput, + output: zSchemaFlux1SrpoOutput, + }, + ['fal-ai/hunyuan-image/v2.1/text-to-image']: { + input: zSchemaHunyuanImageV21TextToImageInput, + output: zSchemaHunyuanImageV21TextToImageOutput, + }, + ['fal-ai/bytedance/seedream/v4/text-to-image']: { + input: zSchemaBytedanceSeedreamV4TextToImageInput, + output: zSchemaBytedanceSeedreamV4TextToImageOutput, + }, + ['fal-ai/gemini-25-flash-image']: { + input: zSchemaGemini25FlashImageInput, + output: zSchemaGemini25FlashImageOutput, + }, + ['fal-ai/nano-banana']: { + input: zSchemaNanoBananaInput, + output: zSchemaNanoBananaOutput, + }, + ['fal-ai/bytedance/dreamina/v3.1/text-to-image']: { + input: zSchemaBytedanceDreaminaV31TextToImageInput, + output: zSchemaBytedanceDreaminaV31TextToImageOutput, + }, + ['fal-ai/wan/v2.2-a14b/text-to-image/lora']: { + input: zSchemaWanV22A14bTextToImageLoraInput, + output: zSchemaWanV22A14bTextToImageLoraOutput, + }, + ['fal-ai/wan/v2.2-5b/text-to-image']: { + input: zSchemaWanV225bTextToImageInput, + output: zSchemaWanV225bTextToImageOutput, + }, + ['fal-ai/wan/v2.2-a14b/text-to-image']: { + input: zSchemaWanV22A14bTextToImageInput, + output: zSchemaWanV22A14bTextToImageOutput, + }, + ['fal-ai/qwen-image']: { + input: zSchemaQwenImageInput, + output: zSchemaQwenImageOutput, + }, + ['fal-ai/flux-krea-lora/stream']: { + input: zSchemaFluxKreaLoraStreamInput, + output: zSchemaFluxKreaLoraStreamOutput, + }, + ['fal-ai/flux-krea-lora']: { + input: zSchemaFluxKreaLoraInput, + output: zSchemaFluxKreaLoraOutput, + }, + ['fal-ai/flux/krea']: { + input: zSchemaFluxKreaInput, + output: zSchemaFluxKreaOutput, + }, + ['fal-ai/flux-1/krea']: { + input: zSchemaFlux1KreaInput, + output: zSchemaFlux1KreaOutput, + }, + ['fal-ai/sky-raccoon']: { + input: zSchemaSkyRaccoonInput, + output: zSchemaSkyRaccoonOutput, + }, + ['fal-ai/flux-kontext-lora/text-to-image']: { + input: zSchemaFluxKontextLoraTextToImageInput, + output: zSchemaFluxKontextLoraTextToImageOutput, + }, + ['fal-ai/omnigen-v2']: { + input: zSchemaOmnigenV2Input, + output: zSchemaOmnigenV2Output, + }, + ['fal-ai/bytedance/seedream/v3/text-to-image']: { + input: zSchemaBytedanceSeedreamV3TextToImageInput, + output: zSchemaBytedanceSeedreamV3TextToImageOutput, + }, + ['fal-ai/flux-1/schnell']: { + input: zSchemaFlux1SchnellInput, + output: zSchemaFlux1SchnellOutput, + }, + ['fal-ai/flux-1/dev']: { + input: zSchemaFlux1DevInput, + output: zSchemaFlux1DevOutput, + }, + ['fal-ai/flux-pro/kontext/max/text-to-image']: { + input: zSchemaFluxProKontextMaxTextToImageInput, + output: zSchemaFluxProKontextMaxTextToImageOutput, + }, + ['fal-ai/flux-pro/kontext/text-to-image']: { + input: zSchemaFluxProKontextTextToImageInput, + output: zSchemaFluxProKontextTextToImageOutput, + }, + ['fal-ai/bagel']: { + input: zSchemaBagelInput, + output: zSchemaBagelOutput, + }, + ['fal-ai/imagen4/preview/ultra']: { + input: zSchemaImagen4PreviewUltraInput, + output: zSchemaImagen4PreviewUltraOutput, + }, + ['fal-ai/dreamo']: { + input: zSchemaDreamoInput, + output: zSchemaDreamoOutput, + }, + ['fal-ai/flux-lora/stream']: { + input: zSchemaFluxLoraStreamInput, + output: zSchemaFluxLoraStreamOutput, + }, + ['fal-ai/minimax/image-01']: { + input: zSchemaMinimaxImage01Input, + output: zSchemaMinimaxImage01Output, + }, + ['fal-ai/pony-v7']: { + input: zSchemaPonyV7Input, + output: zSchemaPonyV7Output, + }, + ['fal-ai/ideogram/v3']: { + input: zSchemaIdeogramV3Input, + output: zSchemaIdeogramV3Output, + }, + ['fal-ai/f-lite/standard']: { + input: zSchemaFLiteStandardInput, + output: zSchemaFLiteStandardOutput, + }, + ['fal-ai/f-lite/texture']: { + input: zSchemaFLiteTextureInput, + output: zSchemaFLiteTextureOutput, + }, + ['fal-ai/gpt-image-1/text-to-image']: { + input: zSchemaGptImage1TextToImageInput, + output: zSchemaGptImage1TextToImageOutput, + }, + ['fal-ai/sana/v1.5/1.6b']: { + input: zSchemaSanaV1516bInput, + output: zSchemaSanaV1516bOutput, + }, + ['fal-ai/sana/v1.5/4.8b']: { + input: zSchemaSanaV1548bInput, + output: zSchemaSanaV1548bOutput, + }, + ['fal-ai/sana/sprint']: { + input: zSchemaSanaSprintInput, + output: zSchemaSanaSprintOutput, + }, + ['rundiffusion-fal/juggernaut-flux/lightning']: { + input: zSchemaJuggernautFluxLightningInput, + output: zSchemaJuggernautFluxLightningOutput, + }, + ['rundiffusion-fal/juggernaut-flux/pro']: { + input: zSchemaJuggernautFluxProInput, + output: zSchemaJuggernautFluxProOutput, + }, + ['rundiffusion-fal/juggernaut-flux-lora']: { + input: zSchemaJuggernautFluxLoraInput, + output: zSchemaJuggernautFluxLoraOutput, + }, + ['rundiffusion-fal/rundiffusion-photo-flux']: { + input: zSchemaRundiffusionPhotoFluxInput, + output: zSchemaRundiffusionPhotoFluxOutput, + }, + ['rundiffusion-fal/juggernaut-flux/base']: { + input: zSchemaJuggernautFluxBaseInput, + output: zSchemaJuggernautFluxBaseOutput, + }, + ['fal-ai/cogview4']: { + input: zSchemaCogview4Input, + output: zSchemaCogview4Output, + }, + ['fal-ai/ideogram/v2a/turbo']: { + input: zSchemaIdeogramV2aTurboInput, + output: zSchemaIdeogramV2aTurboOutput, + }, + ['fal-ai/ideogram/v2a']: { + input: zSchemaIdeogramV2aInput, + output: zSchemaIdeogramV2aOutput, + }, + ['fal-ai/flux-control-lora-canny']: { + input: zSchemaFluxControlLoraCannyInput, + output: zSchemaFluxControlLoraCannyOutput, + }, + ['fal-ai/flux-control-lora-depth']: { + input: zSchemaFluxControlLoraDepthInput, + output: zSchemaFluxControlLoraDepthOutput, + }, + ['fal-ai/imagen3']: { + input: zSchemaImagen3Input, + output: zSchemaImagen3Output, + }, + ['fal-ai/imagen3/fast']: { + input: zSchemaImagen3FastInput, + output: zSchemaImagen3FastOutput, + }, + ['fal-ai/lumina-image/v2']: { + input: zSchemaLuminaImageV2Input, + output: zSchemaLuminaImageV2Output, + }, + ['fal-ai/janus']: { + input: zSchemaJanusInput, + output: zSchemaJanusOutput, + }, + ['fal-ai/flux-pro/v1.1-ultra-finetuned']: { + input: zSchemaFluxProV11UltraFinetunedInput, + output: zSchemaFluxProV11UltraFinetunedOutput, + }, + ['fal-ai/flux-pro/v1.1']: { + input: zSchemaFluxProV11Input, + output: zSchemaFluxProV11Output, + }, + ['fal-ai/switti']: { + input: zSchemaSwittiInput, + output: zSchemaSwittiOutput, + }, + ['fal-ai/switti/512']: { + input: zSchemaSwitti512Input, + output: zSchemaSwitti512Output, + }, + ['fal-ai/bria/text-to-image/base']: { + input: zSchemaBriaTextToImageBaseInput, + output: zSchemaBriaTextToImageBaseOutput, + }, + ['fal-ai/bria/text-to-image/fast']: { + input: zSchemaBriaTextToImageFastInput, + output: zSchemaBriaTextToImageFastOutput, + }, + ['fal-ai/bria/text-to-image/hd']: { + input: zSchemaBriaTextToImageHdInput, + output: zSchemaBriaTextToImageHdOutput, + }, + ['fal-ai/recraft-20b']: { + input: zSchemaRecraft20bInput, + output: zSchemaRecraft20bOutput, + }, + ['fal-ai/ideogram/v2/turbo']: { + input: zSchemaIdeogramV2TurboInput, + output: zSchemaIdeogramV2TurboOutput, + }, + ['fal-ai/luma-photon/flash']: { + input: zSchemaLumaPhotonFlashInput, + output: zSchemaLumaPhotonFlashOutput, + }, + ['fal-ai/aura-flow']: { + input: zSchemaAuraFlowInput, + output: zSchemaAuraFlowOutput, + }, + ['fal-ai/omnigen-v1']: { + input: zSchemaOmnigenV1Input, + output: zSchemaOmnigenV1Output, + }, + ['fal-ai/flux/schnell']: { + input: zSchemaFluxSchnellInput, + output: zSchemaFluxSchnellOutput, + }, + ['fal-ai/stable-diffusion-v35-medium']: { + input: zSchemaStableDiffusionV35MediumInput, + output: zSchemaStableDiffusionV35MediumOutput, + }, + ['fal-ai/flux-lora/inpainting']: { + input: zSchemaFluxLoraInpaintingInput, + output: zSchemaFluxLoraInpaintingOutput, + }, + ['fal-ai/stable-diffusion-v3-medium']: { + input: zSchemaStableDiffusionV3MediumInput, + output: zSchemaStableDiffusionV3MediumOutput, + }, + ['fal-ai/fooocus/upscale-or-vary']: { + input: zSchemaFooocusUpscaleOrVaryInput, + output: zSchemaFooocusUpscaleOrVaryOutput, + }, + ['fal-ai/sana']: { + input: zSchemaSanaInput, + output: zSchemaSanaOutput, + }, + ['fal-ai/flux-subject']: { + input: zSchemaFluxSubjectInput, + output: zSchemaFluxSubjectOutput, + }, + ['fal-ai/pixart-sigma']: { + input: zSchemaPixartSigmaInput, + output: zSchemaPixartSigmaOutput, + }, + ['fal-ai/sdxl-controlnet-union']: { + input: zSchemaSdxlControlnetUnionInput, + output: zSchemaSdxlControlnetUnionOutput, + }, + ['fal-ai/kolors']: { + input: zSchemaKolorsInput, + output: zSchemaKolorsOutput, + }, + ['fal-ai/stable-cascade']: { + input: zSchemaStableCascadeInput, + output: zSchemaStableCascadeOutput, + }, + ['fal-ai/fast-sdxl']: { + input: zSchemaFastSdxlInput, + output: zSchemaFastSdxlOutput, + }, + ['fal-ai/stable-cascade/sote-diffusion']: { + input: zSchemaStableCascadeSoteDiffusionInput, + output: zSchemaStableCascadeSoteDiffusionOutput, + }, + ['fal-ai/luma-photon']: { + input: zSchemaLumaPhotonInput, + output: zSchemaLumaPhotonOutput, + }, + ['fal-ai/lightning-models']: { + input: zSchemaLightningModelsInput, + output: zSchemaLightningModelsOutput, + }, + ['fal-ai/playground-v25']: { + input: zSchemaPlaygroundV25Input, + output: zSchemaPlaygroundV25Output, + }, + ['fal-ai/realistic-vision']: { + input: zSchemaRealisticVisionInput, + output: zSchemaRealisticVisionOutput, + }, + ['fal-ai/dreamshaper']: { + input: zSchemaDreamshaperInput, + output: zSchemaDreamshaperOutput, + }, + ['fal-ai/stable-diffusion-v15']: { + input: zSchemaStableDiffusionV15Input, + output: zSchemaStableDiffusionV15Output, + }, + ['fal-ai/layer-diffusion']: { + input: zSchemaLayerDiffusionInput, + output: zSchemaLayerDiffusionOutput, + }, + ['fal-ai/fast-lightning-sdxl']: { + input: zSchemaFastLightningSdxlInput, + output: zSchemaFastLightningSdxlOutput, + }, + ['fal-ai/fast-fooocus-sdxl/image-to-image']: { + input: zSchemaFastFooocusSdxlImageToImageInput, + output: zSchemaFastFooocusSdxlImageToImageOutput, + }, + ['fal-ai/fast-sdxl-controlnet-canny']: { + input: zSchemaFastSdxlControlnetCannyInput, + output: zSchemaFastSdxlControlnetCannyOutput, + }, + ['fal-ai/fast-lcm-diffusion']: { + input: zSchemaFastLcmDiffusionInput, + output: zSchemaFastLcmDiffusionOutput, + }, + ['fal-ai/fast-fooocus-sdxl']: { + input: zSchemaFastFooocusSdxlInput, + output: zSchemaFastFooocusSdxlOutput, + }, + ['fal-ai/illusion-diffusion']: { + input: zSchemaIllusionDiffusionInput, + output: zSchemaIllusionDiffusionOutput, + }, + ['fal-ai/fooocus/image-prompt']: { + input: zSchemaFooocusImagePromptInput, + output: zSchemaFooocusImagePromptOutput, + }, + ['fal-ai/fooocus/inpaint']: { + input: zSchemaFooocusInpaintInput, + output: zSchemaFooocusInpaintOutput, + }, + ['fal-ai/lcm']: { + input: zSchemaLcmInput, + output: zSchemaLcmOutput, + }, + ['fal-ai/diffusion-edge']: { + input: zSchemaDiffusionEdgeInput, + output: zSchemaDiffusionEdgeOutput, + }, + ['fal-ai/fooocus']: { + input: zSchemaFooocusInput, + output: zSchemaFooocusOutput, + }, + ['fal-ai/lora']: { + input: zSchemaLoraInput, + output: zSchemaLoraOutput, + }, +} as const + +/** Get the input type for a specific text-to-image model */ +export type TextToImageModelInput = + TextToImageEndpointMap[T]['input'] + +/** Get the output type for a specific text-to-image model */ +export type TextToImageModelOutput = + TextToImageEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/text-to-image/types.gen.ts b/packages/typescript/ai-fal/src/generated/text-to-image/types.gen.ts new file mode 100644 index 00000000..f477c74d --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/text-to-image/types.gen.ts @@ -0,0 +1,33650 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * TimestepsInput + */ +export type SchemaTimestepsInput = { + /** + * Method + * + * + * The method to use for the timesteps. If set to 'array', the timesteps will be set based + * on the provided timesteps schedule in the `array` field. + * Defaults to 'default' which means the scheduler will use the `num_inference_steps` parameter. + * + */ + method?: 'default' | 'array' + /** + * Array + * + * + * Timesteps schedule to be used if 'custom' method is selected. + * + */ + array?: Array +} + +/** + * SigmasInput + */ +export type SchemaSigmasInput = { + /** + * Method + * + * + * The method to use for the sigmas. If set to 'custom', the sigmas will be set based + * on the provided sigmas schedule in the `array` field. + * Defaults to 'default' which means the scheduler will use the sigmas of the scheduler. + * + */ + method?: 'default' | 'array' + /** + * Array + * + * + * Sigmas schedule to be used if 'custom' method is selected. + * + */ + array?: Array +} + +/** + * OutputParameters + */ +export type SchemaLoraOutput = { + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Debug Latents + * + * The latents saved for debugging. + */ + debug_latents?: SchemaFile + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Debug Per Pass Latents + * + * The latents saved for debugging per pass. + */ + debug_per_pass_latents?: SchemaFile +} + +/** + * File + */ +export type SchemaFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * Image + * + * Represents an image file. + */ +export type SchemaImage = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * Height + * + * The height of the image in pixels. + */ + height?: number + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * Width + * + * The width of the image in pixels. + */ + width?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * TextToImageInput + */ +export type SchemaLoraInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Image Size + * + * + * The size of the generated image. You can choose between some presets or custom height and width + * that **must be multiples of 8**. + * + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Tile Height + * + * The size of the tiles to be used for the image generation. + */ + tile_height?: number + /** + * Embeddings + * + * + * The embeddings to use for the image generation. Only a single embedding is supported at the moment. + * The embeddings will be used to map the tokens in the prompt to the embedding weights. + * + */ + embeddings?: Array + /** + * Ic Light Model Url + * + * + * The URL of the IC Light model to use for the image generation. + * + */ + ic_light_model_url?: string + /** + * Image Encoder Weight Name + * + * + * The weight name of the image encoder model to use for the image generation. + * + */ + image_encoder_weight_name?: string + /** + * Ip Adapter + * + * + * The IP adapter to use for the image generation. + * + */ + ip_adapter?: Array + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Scheduler + * + * Scheduler / sampler to use for the image denoising process. + */ + scheduler?: + | 'DPM++ 2M' + | 'DPM++ 2M Karras' + | 'DPM++ 2M SDE' + | 'DPM++ 2M SDE Karras' + | 'Euler' + | 'Euler A' + | 'Euler (trailing timesteps)' + | 'LCM' + | 'LCM (trailing timesteps)' + | 'DDIM' + | 'TCD' + /** + * Sigmas + * + * + * Optionally override the sigmas to use for the denoising process. Only works with schedulers which support the `sigmas` argument in their `set_sigmas` method. + * Defaults to not overriding, in which case the scheduler automatically sets the sigmas based on the `num_inference_steps` parameter. + * If set to a custom sigma schedule, the `num_inference_steps` parameter will be ignored. Cannot be set if `timesteps` is set. + * + */ + sigmas?: SchemaSigmasInput + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Tile Stride Width + * + * The stride of the tiles to be used for the image generation. + */ + tile_stride_width?: number + /** + * Debug Per Pass Latents + * + * If set to true, the latents will be saved for debugging per pass. + */ + debug_per_pass_latents?: boolean + /** + * Timesteps + * + * + * Optionally override the timesteps to use for the denoising process. Only works with schedulers which support the `timesteps` argument in their `set_timesteps` method. + * Defaults to not overriding, in which case the scheduler automatically sets the timesteps based on the `num_inference_steps` parameter. + * If set to a custom timestep schedule, the `num_inference_steps` parameter will be ignored. Cannot be set if `sigmas` is set. + * + */ + timesteps?: SchemaTimestepsInput + /** + * Image Encoder Subfolder + * + * + * The subfolder of the image encoder model to use for the image generation. + * + */ + image_encoder_subfolder?: string + /** + * Prompt Weighting + * + * + * If set to true, the prompt weighting syntax will be used. + * Additionally, this will lift the 77 token limit by averaging embeddings. + * + */ + prompt_weighting?: boolean + /** + * Variant + * + * The variant of the model to use for huggingface models, e.g. 'fp16'. + */ + variant?: string + /** + * Model Name + * + * URL or HuggingFace ID of the base model to generate the image. + */ + model_name: string + /** + * Controlnet Guess Mode + * + * + * If set to true, the controlnet will be applied to only the conditional predictions. + * + */ + controlnet_guess_mode?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Ic Light Model Background Image Url + * + * + * The URL of the IC Light model background image to use for the image generation. + * Make sure to use a background compatible with the model. + * + */ + ic_light_model_background_image_url?: string + /** + * Rescale Betas Snr Zero + * + * + * Whether to set the rescale_betas_snr_zero option or not for the sampler + * + */ + rescale_betas_snr_zero?: boolean + /** + * Tile Width + * + * The size of the tiles to be used for the image generation. + */ + tile_width?: number + /** + * Prediction Type + * + * + * The type of prediction to use for the image generation. + * The `epsilon` is the default. + * + */ + prediction_type?: 'v_prediction' | 'epsilon' + /** + * Eta + * + * The eta value to be used for the image generation. + */ + eta?: number + /** + * Image Encoder Path + * + * + * The path to the image encoder model to use for the image generation. + * + */ + image_encoder_path?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use.Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Image Format + * + * The format of the generated image. + */ + image_format?: 'jpeg' | 'png' + /** + * Number of images + * + * + * Number of images to generate in one request. Note that the higher the batch size, + * the longer it will take to generate the images. + * + */ + num_images?: number + /** + * Debug Latents + * + * If set to true, the latents will be saved for debugging. + */ + debug_latents?: boolean + /** + * Ic Light Image Url + * + * + * The URL of the IC Light model image to use for the image generation. + * + */ + ic_light_image_url?: string + /** + * Unet Name + * + * URL or HuggingFace ID of the custom U-Net model to use for the image generation. + */ + unet_name?: string + /** + * Clip Skip + * + * + * Skips part of the image generation process, leading to slightly different results. + * This means the image renders faster, too. + * + */ + clip_skip?: number + /** + * Tile Stride Height + * + * The stride of the tiles to be used for the image generation. + */ + tile_stride_height?: number + /** + * Controlnets + * + * + * The control nets to use for the image generation. You can use any number of control nets + * and they will be applied to the image at the specified timesteps. + * + */ + controlnets?: Array + /** + * Number of inference steps + * + * + * Increasing the amount of steps tells Stable Diffusion that it should take more steps + * to generate your final result which can increase the amount of detail in your image. + * + */ + num_inference_steps?: number +} + +/** + * ControlNet + */ +export type SchemaControlNet = { + /** + * Conditioning Scale + * + * + * The scale of the control net weight. This is used to scale the control net weight + * before merging it with the base model. + * + */ + conditioning_scale?: number + /** + * Path + * + * URL or the path to the control net weights. + */ + path: string + /** + * Ip Adapter Index + * + * + * The index of the IP adapter to be applied to the controlnet. This is only needed for InstantID ControlNets. + * + */ + ip_adapter_index?: number + /** + * End Percentage + * + * + * The percentage of the image to end applying the controlnet in terms of the total timesteps. + * + */ + end_percentage?: number + /** + * Config Url + * + * optional URL to the controlnet config.json file. + */ + config_url?: string + /** + * Image Url + * + * URL of the image to be used as the control net. + */ + image_url: string + /** + * Variant + * + * The optional variant if a Hugging Face repo key is used. + */ + variant?: string + /** + * Mask Url + * + * + * The mask to use for the controlnet. When using a mask, the control image size and the mask size must be the same and divisible by 32. + * + */ + mask_url?: string + /** + * Start Percentage + * + * + * The percentage of the image to start applying the controlnet in terms of the total timesteps. + * + */ + start_percentage?: number +} + +/** + * LoraWeight + */ +export type SchemaLoraWeight = { + /** + * Path + * + * URL or the path to the LoRA weights. + */ + path: string + /** + * Scale + * + * + * The scale of the LoRA weight. This is used to scale the LoRA weight + * before merging it with the base model. + * + */ + scale?: number +} + +/** + * IPAdapter + */ +export type SchemaIpAdapter = { + /** + * Unconditional Noising Factor + * + * The factor to apply to the unconditional noising of the IP adapter. + */ + unconditional_noising_factor?: number + /** + * Ip Adapter Image Url + * + * URL of the image to be used as the IP adapter. + */ + ip_adapter_image_url: string | Array + /** + * Path + * + * URL or the path to the IP adapter weights. + */ + path: string + /** + * Image Projection Shortcut + * + * + * The value to set the image projection shortcut to. For FaceID plus V1 models, + * this should be set to False. For FaceID plus V2 models, this should be set to True. + * Default is True. + * + */ + image_projection_shortcut?: boolean + /** + * Scale Json + * + * + * The scale of the IP adapter weight. This is used to scale the IP adapter weight + * before merging it with the base model. + * + */ + scale_json?: { + [key: string]: unknown + } + /** + * Ip Adapter Mask Url + * + * + * The mask to use for the IP adapter. When using a mask, the ip-adapter image size and the mask size must be the same + * + */ + ip_adapter_mask_url?: string + /** + * Model Subfolder + * + * Subfolder in the model directory where the IP adapter weights are stored. + */ + model_subfolder?: string + /** + * Scale + * + * + * The scale of the IP adapter weight. This is used to scale the IP adapter weight + * before merging it with the base model. + * + */ + scale?: number + /** + * Insight Face Model Path + * + * URL or the path to the InsightFace model weights. + */ + insight_face_model_path?: string + /** + * Weight Name + * + * Name of the weight file. + */ + weight_name?: string +} + +/** + * Embedding + */ +export type SchemaEmbedding = { + /** + * Tokens + * + * + * The tokens to map the embedding weights to. Use these tokens in your prompts. + * + */ + tokens?: Array + /** + * Path + * + * URL or the path to the embedding weights. + */ + path: string +} + +/** + * ImageSize + */ +export type SchemaImageSize = { + /** + * Height + * + * The height of the generated image. + */ + height?: number + /** + * Width + * + * The width of the generated image. + */ + width?: number +} + +/** + * FooocusOutput + */ +export type SchemaFooocusOutput = { + /** + * Images + * + * The generated image file info. + */ + images: Array + /** + * Timings + * + * The time taken for the generation process. + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array +} + +/** + * FooocusLegacyInput + */ +export type SchemaFooocusInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt?: string + /** + * Performance + * + * + * You can choose Speed or Quality + * + */ + performance?: 'Speed' | 'Quality' | 'Extreme Speed' | 'Lightning' + /** + * Styles + * + * + * The style to use. + * + */ + styles?: Array< + | 'Fooocus V2' + | 'Fooocus Enhance' + | 'Fooocus Sharp' + | 'Fooocus Semi Realistic' + | 'Fooocus Masterpiece' + | 'Fooocus Photograph' + | 'Fooocus Negative' + | 'Fooocus Cinematic' + | 'SAI 3D Model' + | 'SAI Analog Film' + | 'SAI Anime' + | 'SAI Cinematic' + | 'SAI Comic Book' + | 'SAI Craft Clay' + | 'SAI Digital Art' + | 'SAI Enhance' + | 'SAI Fantasy Art' + | 'SAI Isometric' + | 'SAI Line Art' + | 'SAI Lowpoly' + | 'SAI Neonpunk' + | 'SAI Origami' + | 'SAI Photographic' + | 'SAI Pixel Art' + | 'SAI Texture' + | 'MRE Cinematic Dynamic' + | 'MRE Spontaneous Picture' + | 'MRE Artistic Vision' + | 'MRE Dark Dream' + | 'MRE Gloomy Art' + | 'MRE Bad Dream' + | 'MRE Underground' + | 'MRE Surreal Painting' + | 'MRE Dynamic Illustration' + | 'MRE Undead Art' + | 'MRE Elemental Art' + | 'MRE Space Art' + | 'MRE Ancient Illustration' + | 'MRE Brave Art' + | 'MRE Heroic Fantasy' + | 'MRE Dark Cyberpunk' + | 'MRE Lyrical Geometry' + | 'MRE Sumi E Symbolic' + | 'MRE Sumi E Detailed' + | 'MRE Manga' + | 'MRE Anime' + | 'MRE Comic' + | 'Ads Advertising' + | 'Ads Automotive' + | 'Ads Corporate' + | 'Ads Fashion Editorial' + | 'Ads Food Photography' + | 'Ads Gourmet Food Photography' + | 'Ads Luxury' + | 'Ads Real Estate' + | 'Ads Retail' + | 'Artstyle Abstract' + | 'Artstyle Abstract Expressionism' + | 'Artstyle Art Deco' + | 'Artstyle Art Nouveau' + | 'Artstyle Constructivist' + | 'Artstyle Cubist' + | 'Artstyle Expressionist' + | 'Artstyle Graffiti' + | 'Artstyle Hyperrealism' + | 'Artstyle Impressionist' + | 'Artstyle Pointillism' + | 'Artstyle Pop Art' + | 'Artstyle Psychedelic' + | 'Artstyle Renaissance' + | 'Artstyle Steampunk' + | 'Artstyle Surrealist' + | 'Artstyle Typography' + | 'Artstyle Watercolor' + | 'Futuristic Biomechanical' + | 'Futuristic Biomechanical Cyberpunk' + | 'Futuristic Cybernetic' + | 'Futuristic Cybernetic Robot' + | 'Futuristic Cyberpunk Cityscape' + | 'Futuristic Futuristic' + | 'Futuristic Retro Cyberpunk' + | 'Futuristic Retro Futurism' + | 'Futuristic Sci Fi' + | 'Futuristic Vaporwave' + | 'Game Bubble Bobble' + | 'Game Cyberpunk Game' + | 'Game Fighting Game' + | 'Game Gta' + | 'Game Mario' + | 'Game Minecraft' + | 'Game Pokemon' + | 'Game Retro Arcade' + | 'Game Retro Game' + | 'Game Rpg Fantasy Game' + | 'Game Strategy Game' + | 'Game Streetfighter' + | 'Game Zelda' + | 'Misc Architectural' + | 'Misc Disco' + | 'Misc Dreamscape' + | 'Misc Dystopian' + | 'Misc Fairy Tale' + | 'Misc Gothic' + | 'Misc Grunge' + | 'Misc Horror' + | 'Misc Kawaii' + | 'Misc Lovecraftian' + | 'Misc Macabre' + | 'Misc Manga' + | 'Misc Metropolis' + | 'Misc Minimalist' + | 'Misc Monochrome' + | 'Misc Nautical' + | 'Misc Space' + | 'Misc Stained Glass' + | 'Misc Techwear Fashion' + | 'Misc Tribal' + | 'Misc Zentangle' + | 'Papercraft Collage' + | 'Papercraft Flat Papercut' + | 'Papercraft Kirigami' + | 'Papercraft Paper Mache' + | 'Papercraft Paper Quilling' + | 'Papercraft Papercut Collage' + | 'Papercraft Papercut Shadow Box' + | 'Papercraft Stacked Papercut' + | 'Papercraft Thick Layered Papercut' + | 'Photo Alien' + | 'Photo Film Noir' + | 'Photo Glamour' + | 'Photo Hdr' + | 'Photo Iphone Photographic' + | 'Photo Long Exposure' + | 'Photo Neon Noir' + | 'Photo Silhouette' + | 'Photo Tilt Shift' + | 'Cinematic Diva' + | 'Abstract Expressionism' + | 'Academia' + | 'Action Figure' + | 'Adorable 3D Character' + | 'Adorable Kawaii' + | 'Art Deco' + | 'Art Nouveau' + | 'Astral Aura' + | 'Avant Garde' + | 'Baroque' + | 'Bauhaus Style Poster' + | 'Blueprint Schematic Drawing' + | 'Caricature' + | 'Cel Shaded Art' + | 'Character Design Sheet' + | 'Classicism Art' + | 'Color Field Painting' + | 'Colored Pencil Art' + | 'Conceptual Art' + | 'Constructivism' + | 'Cubism' + | 'Dadaism' + | 'Dark Fantasy' + | 'Dark Moody Atmosphere' + | 'Dmt Art Style' + | 'Doodle Art' + | 'Double Exposure' + | 'Dripping Paint Splatter Art' + | 'Expressionism' + | 'Faded Polaroid Photo' + | 'Fauvism' + | 'Flat 2d Art' + | 'Fortnite Art Style' + | 'Futurism' + | 'Glitchcore' + | 'Glo Fi' + | 'Googie Art Style' + | 'Graffiti Art' + | 'Harlem Renaissance Art' + | 'High Fashion' + | 'Idyllic' + | 'Impressionism' + | 'Infographic Drawing' + | 'Ink Dripping Drawing' + | 'Japanese Ink Drawing' + | 'Knolling Photography' + | 'Light Cheery Atmosphere' + | 'Logo Design' + | 'Luxurious Elegance' + | 'Macro Photography' + | 'Mandola Art' + | 'Marker Drawing' + | 'Medievalism' + | 'Minimalism' + | 'Neo Baroque' + | 'Neo Byzantine' + | 'Neo Futurism' + | 'Neo Impressionism' + | 'Neo Rococo' + | 'Neoclassicism' + | 'Op Art' + | 'Ornate And Intricate' + | 'Pencil Sketch Drawing' + | 'Pop Art 2' + | 'Rococo' + | 'Silhouette Art' + | 'Simple Vector Art' + | 'Sketchup' + | 'Steampunk 2' + | 'Surrealism' + | 'Suprematism' + | 'Terragen' + | 'Tranquil Relaxing Atmosphere' + | 'Sticker Designs' + | 'Vibrant Rim Light' + | 'Volumetric Lighting' + | 'Watercolor 2' + | 'Whimsical And Playful' + | 'Mk Chromolithography' + | 'Mk Cross Processing Print' + | 'Mk Dufaycolor Photograph' + | 'Mk Herbarium' + | 'Mk Punk Collage' + | 'Mk Mosaic' + | 'Mk Van Gogh' + | 'Mk Coloring Book' + | 'Mk Singer Sargent' + | 'Mk Pollock' + | 'Mk Basquiat' + | 'Mk Andy Warhol' + | 'Mk Halftone Print' + | 'Mk Gond Painting' + | 'Mk Albumen Print' + | 'Mk Aquatint Print' + | 'Mk Anthotype Print' + | 'Mk Inuit Carving' + | 'Mk Bromoil Print' + | 'Mk Calotype Print' + | 'Mk Color Sketchnote' + | 'Mk Cibulak Porcelain' + | 'Mk Alcohol Ink Art' + | 'Mk One Line Art' + | 'Mk Blacklight Paint' + | 'Mk Carnival Glass' + | 'Mk Cyanotype Print' + | 'Mk Cross Stitching' + | 'Mk Encaustic Paint' + | 'Mk Embroidery' + | 'Mk Gyotaku' + | 'Mk Luminogram' + | 'Mk Lite Brite Art' + | 'Mk Mokume Gane' + | 'Pebble Art' + | 'Mk Palekh' + | 'Mk Suminagashi' + | 'Mk Scrimshaw' + | 'Mk Shibori' + | 'Mk Vitreous Enamel' + | 'Mk Ukiyo E' + | 'Mk Vintage Airline Poster' + | 'Mk Vintage Travel Poster' + | 'Mk Bauhaus Style' + | 'Mk Afrofuturism' + | 'Mk Atompunk' + | 'Mk Constructivism' + | 'Mk Chicano Art' + | 'Mk De Stijl' + | 'Mk Dayak Art' + | 'Mk Fayum Portrait' + | 'Mk Illuminated Manuscript' + | 'Mk Kalighat Painting' + | 'Mk Madhubani Painting' + | 'Mk Pictorialism' + | 'Mk Pichwai Painting' + | 'Mk Patachitra Painting' + | 'Mk Samoan Art Inspired' + | 'Mk Tlingit Art' + | 'Mk Adnate Style' + | 'Mk Ron English Style' + | 'Mk Shepard Fairey Style' + > + /** + * Control Type + * + * The type of image control + */ + control_type?: 'ImagePrompt' | 'PyraCanny' | 'CPDS' | 'FaceSwap' + /** + * Mask Image Url + * + * The image to use as a mask for the generated image. + */ + mask_image_url?: string | null + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use up to 5 LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Enable Safety Checker + * + * If set to false, the safety checker will be disabled. + */ + enable_safety_checker?: boolean + /** + * Sharpness + * + * + * The sharpness of the generated image. Use it to control how sharp the generated + * image should be. Higher value means image and texture are sharper. + * + */ + sharpness?: number + /** + * Guidance Scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Inpaint Image Url + * + * The image to use as a reference for inpainting. + */ + inpaint_image_url?: string | null + /** + * Mixing Image Prompt And Inpaint + */ + mixing_image_prompt_and_inpaint?: boolean + /** + * Aspect Ratio + * + * + * The size of the generated image. You can choose between some presets or + * custom height and width that **must be multiples of 8**. + * + */ + aspect_ratio?: string + /** + * Num Images + * + * + * Number of images to generate in one request + * + */ + num_images?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Refiner Model + * + * Refiner (SDXL or SD 1.5) + */ + refiner_model?: 'None' | 'realisticVisionV60B1_v51VAE.safetensors' + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Control Image Url + * + * The image to use as a reference for the generated image. + */ + control_image_url?: string | null + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number | null + /** + * Refiner Switch At + * + * + * Use 0.4 for SD1.5 realistic models; 0.667 for SD1.5 anime models + * 0.8 for XL-refiners; or any value for switching two SDXL models. + * + */ + refiner_switch?: number + /** + * Control Image Weight + * + * + * The strength of the control image. Use it to control how much the generated image + * should look like the control image. + * + */ + control_image_weight?: number + /** + * Control Image Stop At + * + * + * The stop at value of the control image. Use it to control how much the generated image + * should look like the control image. + * + */ + control_image_stop_at?: number +} + +/** + * DiffusionEdgeOutput + */ +export type SchemaDiffusionEdgeOutput = { + /** + * Image + * + * The generated image file info. + */ + image: SchemaImage +} + +/** + * DiffusionEdgeInput + */ +export type SchemaDiffusionEdgeInput = { + /** + * Image Url + * + * The text prompt you would like to convert to speech. + */ + image_url: string +} + +/** + * LCMOutput + */ +export type SchemaLcmOutput = { + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Request Id + * + * + * An id bound to a request, can be used with response to identify the request + * itself. + * + */ + request_id?: string + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Num Inference Steps + * + * + * Number of inference steps used to generate the image. It will be the same value of the one passed in the + * input or the default one in case none was passed. + * + */ + num_inference_steps?: number + /** + * Nsfw Content Detected + * + * + * A list of booleans indicating whether the generated image contains any + * potentially unsafe content. If the safety check is disabled, this field + * will all will be false. + * + */ + nsfw_content_detected: Array +} + +/** + * LCMInput + */ +export type SchemaLcmInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Controlnet Inpaint + * + * + * If set to true, the inpainting pipeline will use controlnet inpainting. + * Only effective for inpainting pipelines. + * + */ + controlnet_inpaint?: boolean + /** + * Image Size + * + * + * The size of the generated image. You can choose between some presets or + * custom height and width that **must be multiples of 8**. + * + * If not provided: + * - For text-to-image generations, the default size is 512x512. + * - For image-to-image generations, the default size is the same as the input image. + * - For inpainting generations, the default size is the same as the input image. + * + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Enable Safety Checks + * + * + * If set to true, the resulting image will be checked whether it includes any + * potentially unsafe content. If it does, it will be replaced with a black + * image. + * + */ + enable_safety_checks?: boolean + /** + * Model + * + * The model to use for generating the image. + */ + model?: 'sdxl' | 'sdv1-5' + /** + * Lora Url + * + * + * The url of the lora server to use for image generation. + * + */ + lora_url?: string + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Negative Prompt + * + * + * The negative prompt to use.Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Inpaint Mask Only + * + * + * If set to true, the inpainting pipeline will only inpaint the provided mask + * area. Only effective for inpainting pipelines. + * + */ + inpaint_mask_only?: boolean + /** + * Num Images + * + * + * The number of images to generate. The function will return a list of images + * with the same prompt and negative prompt but different seeds. + * + */ + num_images?: number + /** + * Lora Scale + * + * + * The scale of the lora server to use for image generation. + * + */ + lora_scale?: number + /** + * Image Url + * + * + * The base image to use for guiding the image generation on image-to-image + * generations. If the either width or height of the image is larger than 1024 + * pixels, the image will be resized to 1024 pixels while keeping the aspect ratio. + * + */ + image_url?: string + /** + * Strength + * + * + * The strength of the image that is passed as `image_url`. The strength + * determines how much the generated image will be similar to the image passed as + * `image_url`. The higher the strength the more model gets "creative" and + * generates an image that's different from the initial image. A strength of 1.0 + * means that the initial image is more or less ignored and the model will try to + * generate an image that's as close as possible to the prompt. + * + */ + strength?: number + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Request Id + * + * + * An id bound to a request, can be used with response to identify the request + * itself. + * + */ + request_id?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Mask Url + * + * + * The mask to use for guiding the image generation on image + * inpainting. The model will focus on the mask area and try to fill it with + * the most relevant content. + * + * The mask must be a black and white image where the white area is the area + * that needs to be filled and the black area is the area that should be + * ignored. + * + * The mask must have the same dimensions as the image passed as `image_url`. + * + */ + mask_url?: string + /** + * Num Inference Steps + * + * + * The number of inference steps to use for generating the image. The more steps + * the better the image will be but it will also take longer to generate. + * + */ + num_inference_steps?: number +} + +/** + * FooocusOutput + */ +export type SchemaFooocusInpaintOutput = { + /** + * Images + * + * The generated image file info. + */ + images: Array + /** + * Timings + * + * The time taken for the generation process. + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array +} + +/** + * FooocusInpaintInput + */ +export type SchemaFooocusInpaintInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt?: string + /** + * Performance + * + * + * You can choose Speed or Quality + * + */ + performance?: 'Speed' | 'Quality' | 'Extreme Speed' | 'Lightning' + /** + * Styles + * + * + * The style to use. + * + */ + styles?: Array< + | 'Fooocus V2' + | 'Fooocus Enhance' + | 'Fooocus Sharp' + | 'Fooocus Semi Realistic' + | 'Fooocus Masterpiece' + | 'Fooocus Photograph' + | 'Fooocus Negative' + | 'Fooocus Cinematic' + | 'SAI 3D Model' + | 'SAI Analog Film' + | 'SAI Anime' + | 'SAI Cinematic' + | 'SAI Comic Book' + | 'SAI Craft Clay' + | 'SAI Digital Art' + | 'SAI Enhance' + | 'SAI Fantasy Art' + | 'SAI Isometric' + | 'SAI Line Art' + | 'SAI Lowpoly' + | 'SAI Neonpunk' + | 'SAI Origami' + | 'SAI Photographic' + | 'SAI Pixel Art' + | 'SAI Texture' + | 'MRE Cinematic Dynamic' + | 'MRE Spontaneous Picture' + | 'MRE Artistic Vision' + | 'MRE Dark Dream' + | 'MRE Gloomy Art' + | 'MRE Bad Dream' + | 'MRE Underground' + | 'MRE Surreal Painting' + | 'MRE Dynamic Illustration' + | 'MRE Undead Art' + | 'MRE Elemental Art' + | 'MRE Space Art' + | 'MRE Ancient Illustration' + | 'MRE Brave Art' + | 'MRE Heroic Fantasy' + | 'MRE Dark Cyberpunk' + | 'MRE Lyrical Geometry' + | 'MRE Sumi E Symbolic' + | 'MRE Sumi E Detailed' + | 'MRE Manga' + | 'MRE Anime' + | 'MRE Comic' + | 'Ads Advertising' + | 'Ads Automotive' + | 'Ads Corporate' + | 'Ads Fashion Editorial' + | 'Ads Food Photography' + | 'Ads Gourmet Food Photography' + | 'Ads Luxury' + | 'Ads Real Estate' + | 'Ads Retail' + | 'Artstyle Abstract' + | 'Artstyle Abstract Expressionism' + | 'Artstyle Art Deco' + | 'Artstyle Art Nouveau' + | 'Artstyle Constructivist' + | 'Artstyle Cubist' + | 'Artstyle Expressionist' + | 'Artstyle Graffiti' + | 'Artstyle Hyperrealism' + | 'Artstyle Impressionist' + | 'Artstyle Pointillism' + | 'Artstyle Pop Art' + | 'Artstyle Psychedelic' + | 'Artstyle Renaissance' + | 'Artstyle Steampunk' + | 'Artstyle Surrealist' + | 'Artstyle Typography' + | 'Artstyle Watercolor' + | 'Futuristic Biomechanical' + | 'Futuristic Biomechanical Cyberpunk' + | 'Futuristic Cybernetic' + | 'Futuristic Cybernetic Robot' + | 'Futuristic Cyberpunk Cityscape' + | 'Futuristic Futuristic' + | 'Futuristic Retro Cyberpunk' + | 'Futuristic Retro Futurism' + | 'Futuristic Sci Fi' + | 'Futuristic Vaporwave' + | 'Game Bubble Bobble' + | 'Game Cyberpunk Game' + | 'Game Fighting Game' + | 'Game Gta' + | 'Game Mario' + | 'Game Minecraft' + | 'Game Pokemon' + | 'Game Retro Arcade' + | 'Game Retro Game' + | 'Game Rpg Fantasy Game' + | 'Game Strategy Game' + | 'Game Streetfighter' + | 'Game Zelda' + | 'Misc Architectural' + | 'Misc Disco' + | 'Misc Dreamscape' + | 'Misc Dystopian' + | 'Misc Fairy Tale' + | 'Misc Gothic' + | 'Misc Grunge' + | 'Misc Horror' + | 'Misc Kawaii' + | 'Misc Lovecraftian' + | 'Misc Macabre' + | 'Misc Manga' + | 'Misc Metropolis' + | 'Misc Minimalist' + | 'Misc Monochrome' + | 'Misc Nautical' + | 'Misc Space' + | 'Misc Stained Glass' + | 'Misc Techwear Fashion' + | 'Misc Tribal' + | 'Misc Zentangle' + | 'Papercraft Collage' + | 'Papercraft Flat Papercut' + | 'Papercraft Kirigami' + | 'Papercraft Paper Mache' + | 'Papercraft Paper Quilling' + | 'Papercraft Papercut Collage' + | 'Papercraft Papercut Shadow Box' + | 'Papercraft Stacked Papercut' + | 'Papercraft Thick Layered Papercut' + | 'Photo Alien' + | 'Photo Film Noir' + | 'Photo Glamour' + | 'Photo Hdr' + | 'Photo Iphone Photographic' + | 'Photo Long Exposure' + | 'Photo Neon Noir' + | 'Photo Silhouette' + | 'Photo Tilt Shift' + | 'Cinematic Diva' + | 'Abstract Expressionism' + | 'Academia' + | 'Action Figure' + | 'Adorable 3D Character' + | 'Adorable Kawaii' + | 'Art Deco' + | 'Art Nouveau' + | 'Astral Aura' + | 'Avant Garde' + | 'Baroque' + | 'Bauhaus Style Poster' + | 'Blueprint Schematic Drawing' + | 'Caricature' + | 'Cel Shaded Art' + | 'Character Design Sheet' + | 'Classicism Art' + | 'Color Field Painting' + | 'Colored Pencil Art' + | 'Conceptual Art' + | 'Constructivism' + | 'Cubism' + | 'Dadaism' + | 'Dark Fantasy' + | 'Dark Moody Atmosphere' + | 'Dmt Art Style' + | 'Doodle Art' + | 'Double Exposure' + | 'Dripping Paint Splatter Art' + | 'Expressionism' + | 'Faded Polaroid Photo' + | 'Fauvism' + | 'Flat 2d Art' + | 'Fortnite Art Style' + | 'Futurism' + | 'Glitchcore' + | 'Glo Fi' + | 'Googie Art Style' + | 'Graffiti Art' + | 'Harlem Renaissance Art' + | 'High Fashion' + | 'Idyllic' + | 'Impressionism' + | 'Infographic Drawing' + | 'Ink Dripping Drawing' + | 'Japanese Ink Drawing' + | 'Knolling Photography' + | 'Light Cheery Atmosphere' + | 'Logo Design' + | 'Luxurious Elegance' + | 'Macro Photography' + | 'Mandola Art' + | 'Marker Drawing' + | 'Medievalism' + | 'Minimalism' + | 'Neo Baroque' + | 'Neo Byzantine' + | 'Neo Futurism' + | 'Neo Impressionism' + | 'Neo Rococo' + | 'Neoclassicism' + | 'Op Art' + | 'Ornate And Intricate' + | 'Pencil Sketch Drawing' + | 'Pop Art 2' + | 'Rococo' + | 'Silhouette Art' + | 'Simple Vector Art' + | 'Sketchup' + | 'Steampunk 2' + | 'Surrealism' + | 'Suprematism' + | 'Terragen' + | 'Tranquil Relaxing Atmosphere' + | 'Sticker Designs' + | 'Vibrant Rim Light' + | 'Volumetric Lighting' + | 'Watercolor 2' + | 'Whimsical And Playful' + | 'Mk Chromolithography' + | 'Mk Cross Processing Print' + | 'Mk Dufaycolor Photograph' + | 'Mk Herbarium' + | 'Mk Punk Collage' + | 'Mk Mosaic' + | 'Mk Van Gogh' + | 'Mk Coloring Book' + | 'Mk Singer Sargent' + | 'Mk Pollock' + | 'Mk Basquiat' + | 'Mk Andy Warhol' + | 'Mk Halftone Print' + | 'Mk Gond Painting' + | 'Mk Albumen Print' + | 'Mk Aquatint Print' + | 'Mk Anthotype Print' + | 'Mk Inuit Carving' + | 'Mk Bromoil Print' + | 'Mk Calotype Print' + | 'Mk Color Sketchnote' + | 'Mk Cibulak Porcelain' + | 'Mk Alcohol Ink Art' + | 'Mk One Line Art' + | 'Mk Blacklight Paint' + | 'Mk Carnival Glass' + | 'Mk Cyanotype Print' + | 'Mk Cross Stitching' + | 'Mk Encaustic Paint' + | 'Mk Embroidery' + | 'Mk Gyotaku' + | 'Mk Luminogram' + | 'Mk Lite Brite Art' + | 'Mk Mokume Gane' + | 'Pebble Art' + | 'Mk Palekh' + | 'Mk Suminagashi' + | 'Mk Scrimshaw' + | 'Mk Shibori' + | 'Mk Vitreous Enamel' + | 'Mk Ukiyo E' + | 'Mk Vintage Airline Poster' + | 'Mk Vintage Travel Poster' + | 'Mk Bauhaus Style' + | 'Mk Afrofuturism' + | 'Mk Atompunk' + | 'Mk Constructivism' + | 'Mk Chicano Art' + | 'Mk De Stijl' + | 'Mk Dayak Art' + | 'Mk Fayum Portrait' + | 'Mk Illuminated Manuscript' + | 'Mk Kalighat Painting' + | 'Mk Madhubani Painting' + | 'Mk Pictorialism' + | 'Mk Pichwai Painting' + | 'Mk Patachitra Painting' + | 'Mk Samoan Art Inspired' + | 'Mk Tlingit Art' + | 'Mk Adnate Style' + | 'Mk Ron English Style' + | 'Mk Shepard Fairey Style' + > + image_prompt_3?: SchemaImagePrompt + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use up to 5 LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + image_prompt_4?: SchemaImagePrompt + /** + * Guidance Scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Sharpness + * + * + * The sharpness of the generated image. Use it to control how sharp the generated + * image should be. Higher value means image and texture are sharper. + * + */ + sharpness?: number + /** + * Mixing Image Prompt and Inpaint + * + * Mixing Image Prompt and Inpaint + */ + mixing_image_prompt_and_inpaint?: boolean + /** + * Outpaint Direction + * + * The directions to outpaint. + */ + outpaint_selections?: Array<'Left' | 'Right' | 'Top' | 'Bottom'> + /** + * Inpaint Image Url + * + * The image to use as a reference for inpainting. + */ + inpaint_image_url: string + /** + * Refiner Model + * + * Refiner (SDXL or SD 1.5) + */ + refiner_model?: 'None' | 'realisticVisionV60B1_v51VAE.safetensors' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'png' | 'jpeg' | 'webp' + image_prompt_2?: SchemaImagePrompt + /** + * Inpaint Respective Field + * + * + * The area to inpaint. Value 0 is same as "Only Masked" in A1111. Value 1 is + * same as "Whole Image" in A1111. Only used in inpaint, not used in outpaint. + * (Outpaint always use 1.0) + * + */ + inpaint_respective_field?: number + /** + * Inpaint Mode + * + * The mode to use for inpainting. + */ + inpaint_mode?: + | 'Inpaint or Outpaint (default)' + | 'Improve Detail (face, hand, eyes, etc.)' + | 'Modify Content (add objects, change background, etc.)' + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number | null + /** + * Refiner Switch At + * + * + * Use 0.4 for SD1.5 realistic models; 0.667 for SD1.5 anime models + * 0.8 for XL-refiners; or any value for switching two SDXL models. + * + */ + refiner_switch?: number + /** + * Disable Initial Latent In Inpaint + * + * If set to true, the initial preprocessing will be disabled. + */ + inpaint_disable_initial_latent?: boolean + /** + * Mask Image Url + * + * The image to use as a mask for the generated image. + */ + mask_image_url?: string + /** + * Invert Mask + * + * If set to true, the mask will be inverted. + */ + invert_mask?: boolean + image_prompt_1?: SchemaImagePrompt + /** + * Enable Safety Checker + * + * If set to false, the safety checker will be disabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Num Images + * + * + * Number of images to generate in one request + * + */ + num_images?: number + /** + * Aspect Ratio + * + * + * The size of the generated image. You can choose between some presets or + * custom height and width that **must be multiples of 8**. + * + */ + aspect_ratio?: string + /** + * Inpaint Additional Prompt + * + * Describe what you want to inpaint. + */ + inpaint_additional_prompt?: string + /** + * Inpaint Denoising Strength + * + * + * Same as the denoising strength in A1111 inpaint. Only used in inpaint, not + * used in outpaint. (Outpaint always use 1.0) + * + */ + inpaint_strength?: number + /** + * Override Inpaint Options + * + * + * If set to true, the advanced inpaint options ('inpaint_disable_initial_latent', + * 'inpaint_engine', 'inpaint_strength', 'inpaint_respective_field', + * 'inpaint_erode_or_dilate') will be overridden. + * Otherwise, the default values will be used. + * + */ + override_inpaint_options?: boolean + /** + * Inpaint Engine + * + * Version of Fooocus inpaint model + */ + inpaint_engine?: 'None' | 'v1' | 'v2.5' | 'v2.6' + /** + * Mask Erode or Dilate + * + * + * Positive value will make white area in the mask larger, negative value will + * make white area smaller. (default is 0, always process before any mask + * invert) + * + */ + inpaint_erode_or_dilate?: number +} + +/** + * ImagePrompt + */ +export type SchemaImagePrompt = { + /** + * Weight + */ + weight?: number + /** + * Stop At + */ + stop_at?: number + /** + * Type + */ + type?: 'ImagePrompt' | 'PyraCanny' | 'CPDS' | 'FaceSwap' + /** + * Image Url + */ + image_url?: string +} + +/** + * FooocusOutput + */ +export type SchemaFooocusImagePromptOutput = { + /** + * Images + * + * The generated image file info. + */ + images: Array + /** + * Timings + * + * The time taken for the generation process. + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array +} + +/** + * FooocusImagePromptInput + */ +export type SchemaFooocusImagePromptInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt?: string + /** + * UOV Image URL + * + * The image to upscale or vary. + */ + uov_image_url?: string + /** + * Performance + * + * + * You can choose Speed or Quality + * + */ + performance?: 'Speed' | 'Quality' | 'Extreme Speed' | 'Lightning' + image_prompt_3?: SchemaImagePrompt + /** + * Styles + * + * + * The style to use. + * + */ + styles?: Array< + | 'Fooocus V2' + | 'Fooocus Enhance' + | 'Fooocus Sharp' + | 'Fooocus Semi Realistic' + | 'Fooocus Masterpiece' + | 'Fooocus Photograph' + | 'Fooocus Negative' + | 'Fooocus Cinematic' + | 'SAI 3D Model' + | 'SAI Analog Film' + | 'SAI Anime' + | 'SAI Cinematic' + | 'SAI Comic Book' + | 'SAI Craft Clay' + | 'SAI Digital Art' + | 'SAI Enhance' + | 'SAI Fantasy Art' + | 'SAI Isometric' + | 'SAI Line Art' + | 'SAI Lowpoly' + | 'SAI Neonpunk' + | 'SAI Origami' + | 'SAI Photographic' + | 'SAI Pixel Art' + | 'SAI Texture' + | 'MRE Cinematic Dynamic' + | 'MRE Spontaneous Picture' + | 'MRE Artistic Vision' + | 'MRE Dark Dream' + | 'MRE Gloomy Art' + | 'MRE Bad Dream' + | 'MRE Underground' + | 'MRE Surreal Painting' + | 'MRE Dynamic Illustration' + | 'MRE Undead Art' + | 'MRE Elemental Art' + | 'MRE Space Art' + | 'MRE Ancient Illustration' + | 'MRE Brave Art' + | 'MRE Heroic Fantasy' + | 'MRE Dark Cyberpunk' + | 'MRE Lyrical Geometry' + | 'MRE Sumi E Symbolic' + | 'MRE Sumi E Detailed' + | 'MRE Manga' + | 'MRE Anime' + | 'MRE Comic' + | 'Ads Advertising' + | 'Ads Automotive' + | 'Ads Corporate' + | 'Ads Fashion Editorial' + | 'Ads Food Photography' + | 'Ads Gourmet Food Photography' + | 'Ads Luxury' + | 'Ads Real Estate' + | 'Ads Retail' + | 'Artstyle Abstract' + | 'Artstyle Abstract Expressionism' + | 'Artstyle Art Deco' + | 'Artstyle Art Nouveau' + | 'Artstyle Constructivist' + | 'Artstyle Cubist' + | 'Artstyle Expressionist' + | 'Artstyle Graffiti' + | 'Artstyle Hyperrealism' + | 'Artstyle Impressionist' + | 'Artstyle Pointillism' + | 'Artstyle Pop Art' + | 'Artstyle Psychedelic' + | 'Artstyle Renaissance' + | 'Artstyle Steampunk' + | 'Artstyle Surrealist' + | 'Artstyle Typography' + | 'Artstyle Watercolor' + | 'Futuristic Biomechanical' + | 'Futuristic Biomechanical Cyberpunk' + | 'Futuristic Cybernetic' + | 'Futuristic Cybernetic Robot' + | 'Futuristic Cyberpunk Cityscape' + | 'Futuristic Futuristic' + | 'Futuristic Retro Cyberpunk' + | 'Futuristic Retro Futurism' + | 'Futuristic Sci Fi' + | 'Futuristic Vaporwave' + | 'Game Bubble Bobble' + | 'Game Cyberpunk Game' + | 'Game Fighting Game' + | 'Game Gta' + | 'Game Mario' + | 'Game Minecraft' + | 'Game Pokemon' + | 'Game Retro Arcade' + | 'Game Retro Game' + | 'Game Rpg Fantasy Game' + | 'Game Strategy Game' + | 'Game Streetfighter' + | 'Game Zelda' + | 'Misc Architectural' + | 'Misc Disco' + | 'Misc Dreamscape' + | 'Misc Dystopian' + | 'Misc Fairy Tale' + | 'Misc Gothic' + | 'Misc Grunge' + | 'Misc Horror' + | 'Misc Kawaii' + | 'Misc Lovecraftian' + | 'Misc Macabre' + | 'Misc Manga' + | 'Misc Metropolis' + | 'Misc Minimalist' + | 'Misc Monochrome' + | 'Misc Nautical' + | 'Misc Space' + | 'Misc Stained Glass' + | 'Misc Techwear Fashion' + | 'Misc Tribal' + | 'Misc Zentangle' + | 'Papercraft Collage' + | 'Papercraft Flat Papercut' + | 'Papercraft Kirigami' + | 'Papercraft Paper Mache' + | 'Papercraft Paper Quilling' + | 'Papercraft Papercut Collage' + | 'Papercraft Papercut Shadow Box' + | 'Papercraft Stacked Papercut' + | 'Papercraft Thick Layered Papercut' + | 'Photo Alien' + | 'Photo Film Noir' + | 'Photo Glamour' + | 'Photo Hdr' + | 'Photo Iphone Photographic' + | 'Photo Long Exposure' + | 'Photo Neon Noir' + | 'Photo Silhouette' + | 'Photo Tilt Shift' + | 'Cinematic Diva' + | 'Abstract Expressionism' + | 'Academia' + | 'Action Figure' + | 'Adorable 3D Character' + | 'Adorable Kawaii' + | 'Art Deco' + | 'Art Nouveau' + | 'Astral Aura' + | 'Avant Garde' + | 'Baroque' + | 'Bauhaus Style Poster' + | 'Blueprint Schematic Drawing' + | 'Caricature' + | 'Cel Shaded Art' + | 'Character Design Sheet' + | 'Classicism Art' + | 'Color Field Painting' + | 'Colored Pencil Art' + | 'Conceptual Art' + | 'Constructivism' + | 'Cubism' + | 'Dadaism' + | 'Dark Fantasy' + | 'Dark Moody Atmosphere' + | 'Dmt Art Style' + | 'Doodle Art' + | 'Double Exposure' + | 'Dripping Paint Splatter Art' + | 'Expressionism' + | 'Faded Polaroid Photo' + | 'Fauvism' + | 'Flat 2d Art' + | 'Fortnite Art Style' + | 'Futurism' + | 'Glitchcore' + | 'Glo Fi' + | 'Googie Art Style' + | 'Graffiti Art' + | 'Harlem Renaissance Art' + | 'High Fashion' + | 'Idyllic' + | 'Impressionism' + | 'Infographic Drawing' + | 'Ink Dripping Drawing' + | 'Japanese Ink Drawing' + | 'Knolling Photography' + | 'Light Cheery Atmosphere' + | 'Logo Design' + | 'Luxurious Elegance' + | 'Macro Photography' + | 'Mandola Art' + | 'Marker Drawing' + | 'Medievalism' + | 'Minimalism' + | 'Neo Baroque' + | 'Neo Byzantine' + | 'Neo Futurism' + | 'Neo Impressionism' + | 'Neo Rococo' + | 'Neoclassicism' + | 'Op Art' + | 'Ornate And Intricate' + | 'Pencil Sketch Drawing' + | 'Pop Art 2' + | 'Rococo' + | 'Silhouette Art' + | 'Simple Vector Art' + | 'Sketchup' + | 'Steampunk 2' + | 'Surrealism' + | 'Suprematism' + | 'Terragen' + | 'Tranquil Relaxing Atmosphere' + | 'Sticker Designs' + | 'Vibrant Rim Light' + | 'Volumetric Lighting' + | 'Watercolor 2' + | 'Whimsical And Playful' + | 'Mk Chromolithography' + | 'Mk Cross Processing Print' + | 'Mk Dufaycolor Photograph' + | 'Mk Herbarium' + | 'Mk Punk Collage' + | 'Mk Mosaic' + | 'Mk Van Gogh' + | 'Mk Coloring Book' + | 'Mk Singer Sargent' + | 'Mk Pollock' + | 'Mk Basquiat' + | 'Mk Andy Warhol' + | 'Mk Halftone Print' + | 'Mk Gond Painting' + | 'Mk Albumen Print' + | 'Mk Aquatint Print' + | 'Mk Anthotype Print' + | 'Mk Inuit Carving' + | 'Mk Bromoil Print' + | 'Mk Calotype Print' + | 'Mk Color Sketchnote' + | 'Mk Cibulak Porcelain' + | 'Mk Alcohol Ink Art' + | 'Mk One Line Art' + | 'Mk Blacklight Paint' + | 'Mk Carnival Glass' + | 'Mk Cyanotype Print' + | 'Mk Cross Stitching' + | 'Mk Encaustic Paint' + | 'Mk Embroidery' + | 'Mk Gyotaku' + | 'Mk Luminogram' + | 'Mk Lite Brite Art' + | 'Mk Mokume Gane' + | 'Pebble Art' + | 'Mk Palekh' + | 'Mk Suminagashi' + | 'Mk Scrimshaw' + | 'Mk Shibori' + | 'Mk Vitreous Enamel' + | 'Mk Ukiyo E' + | 'Mk Vintage Airline Poster' + | 'Mk Vintage Travel Poster' + | 'Mk Bauhaus Style' + | 'Mk Afrofuturism' + | 'Mk Atompunk' + | 'Mk Constructivism' + | 'Mk Chicano Art' + | 'Mk De Stijl' + | 'Mk Dayak Art' + | 'Mk Fayum Portrait' + | 'Mk Illuminated Manuscript' + | 'Mk Kalighat Painting' + | 'Mk Madhubani Painting' + | 'Mk Pictorialism' + | 'Mk Pichwai Painting' + | 'Mk Patachitra Painting' + | 'Mk Samoan Art Inspired' + | 'Mk Tlingit Art' + | 'Mk Adnate Style' + | 'Mk Ron English Style' + | 'Mk Shepard Fairey Style' + > + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use up to 5 LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + image_prompt_4?: SchemaImagePrompt + /** + * Guidance Scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Sharpness + * + * + * The sharpness of the generated image. Use it to control how sharp the generated + * image should be. Higher value means image and texture are sharper. + * + */ + sharpness?: number + /** + * Mixing Image Prompt and Inpaint + * + * Mixing Image Prompt and Inpaint + */ + mixing_image_prompt_and_inpaint?: boolean + /** + * Outpaint Direction + * + * The directions to outpaint. + */ + outpaint_selections?: Array<'Left' | 'Right' | 'Top' | 'Bottom'> + /** + * Inpaint Image URL + * + * The image to use as a reference for inpainting. + */ + inpaint_image_url?: string + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Refiner Model + * + * Refiner (SDXL or SD 1.5) + */ + refiner_model?: 'None' | 'realisticVisionV60B1_v51VAE.safetensors' + image_prompt_2?: SchemaImagePrompt + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Inpaint Mode + * + * The mode to use for inpainting. + */ + inpaint_mode?: + | 'Inpaint or Outpaint (default)' + | 'Improve Detail (face, hand, eyes, etc.)' + | 'Modify Content (add objects, change background, etc.)' + /** + * UOV Method + * + * The method to use for upscaling or varying. + */ + uov_method?: + | 'Disabled' + | 'Vary (Subtle)' + | 'Vary (Strong)' + | 'Upscale (1.5x)' + | 'Upscale (2x)' + | 'Upscale (Fast 2x)' + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number | null + /** + * Refiner Switch At + * + * + * Use 0.4 for SD1.5 realistic models; 0.667 for SD1.5 anime models + * 0.8 for XL-refiners; or any value for switching two SDXL models. + * + */ + refiner_switch?: number + /** + * Mixing Image Prompt and Vary/Upscale + * + * Mixing Image Prompt and Vary/Upscale + */ + mixing_image_prompt_and_vary_upscale?: boolean + /** + * Mask Image URL + * + * The image to use as a mask for the generated image. + */ + mask_image_url?: string + /** + * Image Prompt 1 + */ + image_prompt_1: SchemaImagePrompt + /** + * Enable Safety Checker + * + * If set to false, the safety checker will be disabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Num Images + * + * + * Number of images to generate in one request + * + */ + num_images?: number + /** + * Aspect Ratio + * + * + * The size of the generated image. You can choose between some presets or + * custom height and width that **must be multiples of 8**. + * + */ + aspect_ratio?: string + /** + * Inpaint Additional Prompt + * + * Describe what you want to inpaint. + */ + inpaint_additional_prompt?: string +} + +/** + * IllusionDiffusionOutput + */ +export type SchemaIllusionDiffusionOutput = { + /** + * Image + * + * The generated image file info. + */ + image: SchemaImage + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * IllusionDiffusionInput + */ +export type SchemaIllusionDiffusionInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Image Size + * + * + * The size of the generated image. You can choose between some presets or + * custom height and width that **must be multiples of 8**. + * + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Controlnet Conditioning Scale + * + * The scale of the ControlNet. + */ + controlnet_conditioning_scale?: number + /** + * Image Url + * + * Input image url. + */ + image_url: string + /** + * Scheduler + * + * Scheduler / sampler to use for the image denoising process. + */ + scheduler?: 'DPM++ Karras SDE' | 'Euler' + /** + * Control Guidance Start + */ + control_guidance_start?: number + /** + * Guidance Scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed?: number + /** + * Control Guidance End + */ + control_guidance_end?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Number of inference steps + * + * + * Increasing the amount of steps tells Stable Diffusion that it should take more steps + * to generate your final result which can increase the amount of detail in your image. + * + */ + num_inference_steps?: number +} + +/** + * Output + */ +export type SchemaFastFooocusSdxlOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageFooocusInput + */ +export type SchemaFastFooocusSdxlInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Enable Refiner + * + * If set to true, a smaller model will try to refine the output after it was processed. + */ + enable_refiner?: boolean + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Embeddings + * + * The list of embeddings to use. + */ + embeddings?: Array + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Guidance Rescale + * + * The rescale factor for the CFG. + */ + guidance_rescale?: number + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Format + * + * The format of the generated image. + */ + format?: 'jpeg' | 'png' + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Safety Checker Version + * + * The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model. + */ + safety_checker_version?: 'v1' | 'v2' + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number +} + +/** + * Output + */ +export type SchemaFastLcmDiffusionOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageLCMInput + */ +export type SchemaFastLcmDiffusionInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Guidance Rescale + * + * The rescale factor for the CFG. + */ + guidance_rescale?: number + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Format + * + * The format of the generated image. + */ + format?: 'jpeg' | 'png' + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Model Name + * + * The name of the model to use. + */ + model_name?: + | 'stabilityai/stable-diffusion-xl-base-1.0' + | 'runwayml/stable-diffusion-v1-5' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Checker Version + * + * The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model. + */ + safety_checker_version?: 'v1' | 'v2' + /** + * Request Id + * + * + * An id bound to a request, can be used with response to identify the request + * itself. + * + */ + request_id?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number +} + +/** + * Output + */ +export type SchemaFastSdxlControlnetCannyOutput = { + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageControlNetInput + */ +export type SchemaFastSdxlControlnetCannyInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. Leave it none to automatically infer from the control image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | null + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Loras + * + * The list of LoRA weights to use. + */ + loras?: Array + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Controlnet Conditioning Scale + * + * The scale of the controlnet conditioning. + */ + controlnet_conditioning_scale?: number + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Control Image Url + * + * The URL of the control image. + */ + control_image_url: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Enable Deep Cache + * + * + * If set to true, DeepCache will be enabled. TBD + * + */ + enable_deep_cache?: boolean +} + +/** + * Output + */ +export type SchemaFastFooocusSdxlImageToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * ImageToImageFooocusInput + */ +export type SchemaFastFooocusSdxlImageToImageInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Enable Refiner + * + * If set to true, a smaller model will try to refine the output after it was processed. + */ + enable_refiner?: boolean + /** + * Image Size + * + * The size of the generated image. Leave it none to automatically infer from the prompt image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | null + /** + * Embeddings + * + * The list of embeddings to use. + */ + embeddings?: Array + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Guidance Rescale + * + * The rescale factor for the CFG. + */ + guidance_rescale?: number + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use.Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Format + * + * The format of the generated image. + */ + format?: 'jpeg' | 'png' + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Url + * + * The URL of the image to use as a starting point for the generation. + */ + image_url: string + /** + * Strength + * + * determines how much the generated image resembles the initial image + */ + strength?: number + /** + * Safety Checker Version + * + * The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model. + */ + safety_checker_version?: 'v1' | 'v2' + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number +} + +/** + * Output + */ +export type SchemaFastLightningSdxlOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageLightningInput + */ +export type SchemaFastLightningSdxlInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Format + * + * The format of the generated image. + */ + format?: 'jpeg' | 'png' + /** + * Embeddings + * + * The list of embeddings to use. + */ + embeddings?: Array + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance Rescale + * + * The rescale factor for the CFG. + */ + guidance_rescale?: number + /** + * Safety Checker Version + * + * The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model. + */ + safety_checker_version?: 'v1' | 'v2' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: '1' | '2' | '4' | '8' + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Request Id + * + * + * An id bound to a request, can be used with response to identify the request + * itself. + * + */ + request_id?: string +} + +/** + * Output + */ +export type SchemaLayerDiffusionOutput = { + /** + * Image + * + * The URL of the generated image. + */ + image: SchemaImage + /** + * Seed + * + * The seed used to generate the image. + */ + seed: number +} + +/** + * Input + */ +export type SchemaLayerDiffusionInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt?: string + /** + * Guidance Scale + * + * The guidance scale for the model. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps for the model. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * The prompt to use for generating the negative image. Be as descriptive as possible for best results. + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to false, the safety checker will be disabled. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaStableDiffusionV15Output = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageSD15Input + */ +export type SchemaStableDiffusionV15Input = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Embeddings + * + * The list of embeddings to use. + */ + embeddings?: Array + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Loras + * + * The list of LoRA weights to use. + */ + loras?: Array + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Format + * + * The format of the generated image. + */ + format?: 'jpeg' | 'png' + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Checker Version + * + * The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model. + */ + safety_checker_version?: 'v1' | 'v2' + /** + * Request Id + * + * + * An id bound to a request, can be used with response to identify the request + * itself. + * + */ + request_id?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number +} + +/** + * Output + */ +export type SchemaDreamshaperOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * DreamshaperTextToImageInput + */ +export type SchemaDreamshaperInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Image Size + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Embeddings + * + * The list of embeddings to use. + */ + embeddings?: Array + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Loras + * + * The list of LoRA weights to use. + */ + loras?: Array + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * The negative prompt to use. Use it to address details that you don't want in the image. + */ + negative_prompt?: string + /** + * Format + * + * The format of the generated image. + */ + format?: 'jpeg' | 'png' + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Model Name + * + * The Dreamshaper model to use. + */ + model_name?: + | 'Lykon/dreamshaper-xl-1-0' + | 'Lykon/dreamshaper-xl-v2-turbo' + | 'Lykon/dreamshaper-8' + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Safety Checker Version + * + * The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model. + */ + safety_checker_version?: 'v1' | 'v2' + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number +} + +/** + * Output + */ +export type SchemaRealisticVisionOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * RealisticVisionTextToImageInput + */ +export type SchemaRealisticVisionInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Image Size + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Embeddings + * + * The list of embeddings to use. + */ + embeddings?: Array + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Loras + * + * The list of LoRA weights to use. + */ + loras?: Array + /** + * Guidance Rescale + * + * The rescale factor for the CFG. + */ + guidance_rescale?: number + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * The negative prompt to use. Use it to address details that you don't want in the image. + */ + negative_prompt?: string + /** + * Format + * + * The format of the generated image. + */ + format?: 'jpeg' | 'png' + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Model Name + * + * The Realistic Vision model to use. + */ + model_name?: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Checker Version + * + * The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model. + */ + safety_checker_version?: 'v1' | 'v2' + /** + * Request Id + * + * + * An id bound to a request, can be used with response to identify the request + * itself. + * + */ + request_id?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number +} + +/** + * Output + */ +export type SchemaPlaygroundV25Output = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImagePlaygroundv25Input + */ +export type SchemaPlaygroundV25Input = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Embeddings + * + * The list of embeddings to use. + */ + embeddings?: Array + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Guidance Rescale + * + * The rescale factor for the CFG. + */ + guidance_rescale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Format + * + * The format of the generated image. + */ + format?: 'jpeg' | 'png' + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Safety Checker Version + * + * The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model. + */ + safety_checker_version?: 'v1' | 'v2' + /** + * Request Id + * + * + * An id bound to a request, can be used with response to identify the request + * itself. + * + */ + request_id?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * Output + */ +export type SchemaLightningModelsOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * LightningModelsTextToImageInput + */ +export type SchemaLightningModelsInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Image Size + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Embeddings + * + * The list of embeddings to use. + */ + embeddings?: Array + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Loras + * + * The list of LoRA weights to use. + */ + loras?: Array + /** + * Scheduler + * + * Scheduler / sampler to use for the image denoising process. + */ + scheduler?: + | 'DPM++ 2M' + | 'DPM++ 2M Karras' + | 'DPM++ 2M SDE' + | 'DPM++ 2M SDE Karras' + | 'DPM++ SDE' + | 'DPM++ SDE Karras' + | 'KDPM 2A' + | 'Euler' + | 'Euler (trailing timesteps)' + | 'Euler A' + | 'LCM' + | 'EDMDPMSolverMultistepScheduler' + | 'TCDScheduler' + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * The negative prompt to use. Use it to address details that you don't want in the image. + */ + negative_prompt?: string + /** + * Format + * + * The format of the generated image. + */ + format?: 'jpeg' | 'png' + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Model Name + * + * The Lightning model to use. + */ + model_name?: string + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Safety Checker Version + * + * The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model. + */ + safety_checker_version?: 'v1' | 'v2' + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number +} + +/** + * T2IOutput + */ +export type SchemaLumaPhotonOutput = { + /** + * Images + * + * The generated image + */ + images: Array +} + +/** + * TextToImageRequest + */ +export type SchemaLumaPhotonInput = { + /** + * Prompt + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' | '4:3' | '3:4' | '21:9' | '9:21' +} + +/** + * Output + */ +export type SchemaStableCascadeSoteDiffusionOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * SoteDiffusionInput + */ +export type SchemaStableCascadeSoteDiffusionInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Decoder Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + second_stage_guidance_scale?: number + /** + * Sync Mode + * + * + * If set to true, the image will be returned as base64 encoded string. + * + */ + sync_mode?: boolean + /** + * First Stage Steps + * + * Number of steps to run the first stage for. + */ + first_stage_steps?: number + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Cascade + * will output the same image every time. + * + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to false, the safety checker will be disabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Second Stage Steps + * + * Number of steps to run the second stage for. + */ + second_stage_steps?: number +} + +/** + * Output + */ +export type SchemaFastSdxlOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * TextToImageInput + */ +export type SchemaFastSdxlInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Embeddings + * + * The list of embeddings to use. + */ + embeddings?: Array + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Loras + * + * The list of LoRA weights to use. + */ + loras?: Array + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Format + * + * The format of the generated image. + */ + format?: 'jpeg' | 'png' + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Checker Version + * + * The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model. + */ + safety_checker_version?: 'v1' | 'v2' + /** + * Request Id + * + * + * An id bound to a request, can be used with response to identify the request + * itself. + * + */ + request_id?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number +} + +/** + * Output + */ +export type SchemaStableCascadeOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * StableCascadeInput + */ +export type SchemaStableCascadeInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Decoder Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + second_stage_guidance_scale?: number + /** + * Sync Mode + * + * + * If set to true, the image will be returned as base64 encoded string. + * + */ + sync_mode?: boolean + /** + * First Stage Steps + * + * Number of steps to run the first stage for. + */ + first_stage_steps?: number + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Cascade + * will output the same image every time. + * + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to false, the safety checker will be disabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Second Stage Steps + * + * Number of steps to run the second stage for. + */ + second_stage_steps?: number +} + +/** + * Output + */ +export type SchemaKolorsOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * KolorsInput + */ +export type SchemaKolorsInput = { + /** + * Prompt + * + * + * The prompt to use for generating the image. Be as descriptive as possible + * for best results. + * + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and + * uploaded before returning the response. This will increase the latency of + * the function but it allows you to get the image directly in the response + * without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Scheduler + * + * The scheduler to use for the model. + */ + scheduler?: + | 'EulerDiscreteScheduler' + | 'EulerAncestralDiscreteScheduler' + | 'DPMSolverMultistepScheduler' + | 'DPMSolverMultistepScheduler_SDE_karras' + | 'UniPCMultistepScheduler' + | 'DEISMultistepScheduler' + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show + * you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * Seed + */ + seed?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small + * details (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * Enable safety checker. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaSdxlControlnetUnionOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageControlNetUnionInput + */ +export type SchemaSdxlControlnetUnionInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Depth Preprocess + * + * Whether to preprocess the depth image. + */ + depth_preprocess?: boolean + /** + * Image Size + * + * The size of the generated image. Leave it none to automatically infer from the control image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | null + /** + * Normal Image Url + * + * The URL of the control image. + */ + normal_image_url?: string + /** + * Embeddings + * + * The list of embeddings to use. + */ + embeddings?: Array + /** + * Teed Image Url + * + * The URL of the control image. + */ + teed_image_url?: string + /** + * Loras + * + * The list of LoRA weights to use. + */ + loras?: Array + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Canny Image Url + * + * The URL of the control image. + */ + canny_image_url?: string + /** + * Segmentation Preprocess + * + * Whether to preprocess the segmentation image. + */ + segmentation_preprocess?: boolean + /** + * Format + * + * The format of the generated image. + */ + format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Request Id + * + * + * An id bound to a request, can be used with response to identify the request + * itself. + * + */ + request_id?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Segmentation Image Url + * + * The URL of the control image. + */ + segmentation_image_url?: string + /** + * Openpose Image Url + * + * The URL of the control image. + */ + openpose_image_url?: string + /** + * Canny Preprocess + * + * Whether to preprocess the canny image. + */ + canny_preprocess?: boolean + /** + * Expand Prompt + * + * If set to true, the prompt will be expanded with additional prompts. + */ + expand_prompt?: boolean + /** + * Depth Image Url + * + * The URL of the control image. + */ + depth_image_url?: string + /** + * Normal Preprocess + * + * Whether to preprocess the normal image. + */ + normal_preprocess?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Teed Preprocess + * + * Whether to preprocess the teed image. + */ + teed_preprocess?: boolean + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Controlnet Conditioning Scale + * + * The scale of the controlnet conditioning. + */ + controlnet_conditioning_scale?: number + /** + * Safety Checker Version + * + * The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model. + */ + safety_checker_version?: 'v1' | 'v2' + /** + * Openpose Preprocess + * + * Whether to preprocess the openpose image. + */ + openpose_preprocess?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * PixArtSigmaOutput + */ +export type SchemaPixartSigmaOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + * + * The timings of the different steps of the generation process. + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * PixArtSigmaInput + */ +export type SchemaPixartSigmaInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Style + * + * The style to apply to the image. + */ + style?: + | '(No style)' + | 'Cinematic' + | 'Photographic' + | 'Anime' + | 'Manga' + | 'Digital Art' + | 'Pixel art' + | 'Fantasy art' + | 'Neonpunk' + | '3D Model' + /** + * Scheduler + * + * The scheduler to use for the model. + */ + scheduler?: 'DPM-SOLVER' | 'SA-SOLVER' + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaFluxSubjectOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * FluxSubjectInput + */ +export type SchemaFluxSubjectInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image Url + * + * URL of image of the subject + */ + image_url: string + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaSanaOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaSanaInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Style Name + * + * The style to generate the image in. + */ + style_name?: + | '(No style)' + | 'Cinematic' + | 'Photographic' + | 'Anime' + | 'Manga' + | 'Digital Art' + | 'Pixel art' + | 'Fantasy art' + | 'Neonpunk' + | '3D Model' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * FooocusOutput + */ +export type SchemaFooocusUpscaleOrVaryOutput = { + /** + * Images + * + * The generated image file info. + */ + images: Array + /** + * Timings + * + * The time taken for the generation process. + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array +} + +/** + * FooocusUpscaleOrVaryInput + */ +export type SchemaFooocusUpscaleOrVaryInput = { + /** + * Styles + * + * + * The style to use. + * + */ + styles?: Array< + | 'Fooocus V2' + | 'Fooocus Enhance' + | 'Fooocus Sharp' + | 'Fooocus Semi Realistic' + | 'Fooocus Masterpiece' + | 'Fooocus Photograph' + | 'Fooocus Negative' + | 'Fooocus Cinematic' + | 'SAI 3D Model' + | 'SAI Analog Film' + | 'SAI Anime' + | 'SAI Cinematic' + | 'SAI Comic Book' + | 'SAI Craft Clay' + | 'SAI Digital Art' + | 'SAI Enhance' + | 'SAI Fantasy Art' + | 'SAI Isometric' + | 'SAI Line Art' + | 'SAI Lowpoly' + | 'SAI Neonpunk' + | 'SAI Origami' + | 'SAI Photographic' + | 'SAI Pixel Art' + | 'SAI Texture' + | 'MRE Cinematic Dynamic' + | 'MRE Spontaneous Picture' + | 'MRE Artistic Vision' + | 'MRE Dark Dream' + | 'MRE Gloomy Art' + | 'MRE Bad Dream' + | 'MRE Underground' + | 'MRE Surreal Painting' + | 'MRE Dynamic Illustration' + | 'MRE Undead Art' + | 'MRE Elemental Art' + | 'MRE Space Art' + | 'MRE Ancient Illustration' + | 'MRE Brave Art' + | 'MRE Heroic Fantasy' + | 'MRE Dark Cyberpunk' + | 'MRE Lyrical Geometry' + | 'MRE Sumi E Symbolic' + | 'MRE Sumi E Detailed' + | 'MRE Manga' + | 'MRE Anime' + | 'MRE Comic' + | 'Ads Advertising' + | 'Ads Automotive' + | 'Ads Corporate' + | 'Ads Fashion Editorial' + | 'Ads Food Photography' + | 'Ads Gourmet Food Photography' + | 'Ads Luxury' + | 'Ads Real Estate' + | 'Ads Retail' + | 'Artstyle Abstract' + | 'Artstyle Abstract Expressionism' + | 'Artstyle Art Deco' + | 'Artstyle Art Nouveau' + | 'Artstyle Constructivist' + | 'Artstyle Cubist' + | 'Artstyle Expressionist' + | 'Artstyle Graffiti' + | 'Artstyle Hyperrealism' + | 'Artstyle Impressionist' + | 'Artstyle Pointillism' + | 'Artstyle Pop Art' + | 'Artstyle Psychedelic' + | 'Artstyle Renaissance' + | 'Artstyle Steampunk' + | 'Artstyle Surrealist' + | 'Artstyle Typography' + | 'Artstyle Watercolor' + | 'Futuristic Biomechanical' + | 'Futuristic Biomechanical Cyberpunk' + | 'Futuristic Cybernetic' + | 'Futuristic Cybernetic Robot' + | 'Futuristic Cyberpunk Cityscape' + | 'Futuristic Futuristic' + | 'Futuristic Retro Cyberpunk' + | 'Futuristic Retro Futurism' + | 'Futuristic Sci Fi' + | 'Futuristic Vaporwave' + | 'Game Bubble Bobble' + | 'Game Cyberpunk Game' + | 'Game Fighting Game' + | 'Game Gta' + | 'Game Mario' + | 'Game Minecraft' + | 'Game Pokemon' + | 'Game Retro Arcade' + | 'Game Retro Game' + | 'Game Rpg Fantasy Game' + | 'Game Strategy Game' + | 'Game Streetfighter' + | 'Game Zelda' + | 'Misc Architectural' + | 'Misc Disco' + | 'Misc Dreamscape' + | 'Misc Dystopian' + | 'Misc Fairy Tale' + | 'Misc Gothic' + | 'Misc Grunge' + | 'Misc Horror' + | 'Misc Kawaii' + | 'Misc Lovecraftian' + | 'Misc Macabre' + | 'Misc Manga' + | 'Misc Metropolis' + | 'Misc Minimalist' + | 'Misc Monochrome' + | 'Misc Nautical' + | 'Misc Space' + | 'Misc Stained Glass' + | 'Misc Techwear Fashion' + | 'Misc Tribal' + | 'Misc Zentangle' + | 'Papercraft Collage' + | 'Papercraft Flat Papercut' + | 'Papercraft Kirigami' + | 'Papercraft Paper Mache' + | 'Papercraft Paper Quilling' + | 'Papercraft Papercut Collage' + | 'Papercraft Papercut Shadow Box' + | 'Papercraft Stacked Papercut' + | 'Papercraft Thick Layered Papercut' + | 'Photo Alien' + | 'Photo Film Noir' + | 'Photo Glamour' + | 'Photo Hdr' + | 'Photo Iphone Photographic' + | 'Photo Long Exposure' + | 'Photo Neon Noir' + | 'Photo Silhouette' + | 'Photo Tilt Shift' + | 'Cinematic Diva' + | 'Abstract Expressionism' + | 'Academia' + | 'Action Figure' + | 'Adorable 3D Character' + | 'Adorable Kawaii' + | 'Art Deco' + | 'Art Nouveau' + | 'Astral Aura' + | 'Avant Garde' + | 'Baroque' + | 'Bauhaus Style Poster' + | 'Blueprint Schematic Drawing' + | 'Caricature' + | 'Cel Shaded Art' + | 'Character Design Sheet' + | 'Classicism Art' + | 'Color Field Painting' + | 'Colored Pencil Art' + | 'Conceptual Art' + | 'Constructivism' + | 'Cubism' + | 'Dadaism' + | 'Dark Fantasy' + | 'Dark Moody Atmosphere' + | 'Dmt Art Style' + | 'Doodle Art' + | 'Double Exposure' + | 'Dripping Paint Splatter Art' + | 'Expressionism' + | 'Faded Polaroid Photo' + | 'Fauvism' + | 'Flat 2d Art' + | 'Fortnite Art Style' + | 'Futurism' + | 'Glitchcore' + | 'Glo Fi' + | 'Googie Art Style' + | 'Graffiti Art' + | 'Harlem Renaissance Art' + | 'High Fashion' + | 'Idyllic' + | 'Impressionism' + | 'Infographic Drawing' + | 'Ink Dripping Drawing' + | 'Japanese Ink Drawing' + | 'Knolling Photography' + | 'Light Cheery Atmosphere' + | 'Logo Design' + | 'Luxurious Elegance' + | 'Macro Photography' + | 'Mandola Art' + | 'Marker Drawing' + | 'Medievalism' + | 'Minimalism' + | 'Neo Baroque' + | 'Neo Byzantine' + | 'Neo Futurism' + | 'Neo Impressionism' + | 'Neo Rococo' + | 'Neoclassicism' + | 'Op Art' + | 'Ornate And Intricate' + | 'Pencil Sketch Drawing' + | 'Pop Art 2' + | 'Rococo' + | 'Silhouette Art' + | 'Simple Vector Art' + | 'Sketchup' + | 'Steampunk 2' + | 'Surrealism' + | 'Suprematism' + | 'Terragen' + | 'Tranquil Relaxing Atmosphere' + | 'Sticker Designs' + | 'Vibrant Rim Light' + | 'Volumetric Lighting' + | 'Watercolor 2' + | 'Whimsical And Playful' + | 'Mk Chromolithography' + | 'Mk Cross Processing Print' + | 'Mk Dufaycolor Photograph' + | 'Mk Herbarium' + | 'Mk Punk Collage' + | 'Mk Mosaic' + | 'Mk Van Gogh' + | 'Mk Coloring Book' + | 'Mk Singer Sargent' + | 'Mk Pollock' + | 'Mk Basquiat' + | 'Mk Andy Warhol' + | 'Mk Halftone Print' + | 'Mk Gond Painting' + | 'Mk Albumen Print' + | 'Mk Aquatint Print' + | 'Mk Anthotype Print' + | 'Mk Inuit Carving' + | 'Mk Bromoil Print' + | 'Mk Calotype Print' + | 'Mk Color Sketchnote' + | 'Mk Cibulak Porcelain' + | 'Mk Alcohol Ink Art' + | 'Mk One Line Art' + | 'Mk Blacklight Paint' + | 'Mk Carnival Glass' + | 'Mk Cyanotype Print' + | 'Mk Cross Stitching' + | 'Mk Encaustic Paint' + | 'Mk Embroidery' + | 'Mk Gyotaku' + | 'Mk Luminogram' + | 'Mk Lite Brite Art' + | 'Mk Mokume Gane' + | 'Pebble Art' + | 'Mk Palekh' + | 'Mk Suminagashi' + | 'Mk Scrimshaw' + | 'Mk Shibori' + | 'Mk Vitreous Enamel' + | 'Mk Ukiyo E' + | 'Mk Vintage Airline Poster' + | 'Mk Vintage Travel Poster' + | 'Mk Bauhaus Style' + | 'Mk Afrofuturism' + | 'Mk Atompunk' + | 'Mk Constructivism' + | 'Mk Chicano Art' + | 'Mk De Stijl' + | 'Mk Dayak Art' + | 'Mk Fayum Portrait' + | 'Mk Illuminated Manuscript' + | 'Mk Kalighat Painting' + | 'Mk Madhubani Painting' + | 'Mk Pictorialism' + | 'Mk Pichwai Painting' + | 'Mk Patachitra Painting' + | 'Mk Samoan Art Inspired' + | 'Mk Tlingit Art' + | 'Mk Adnate Style' + | 'Mk Ron English Style' + | 'Mk Shepard Fairey Style' + > + /** + * UOV Image URL + * + * The image to upscale or vary. + */ + uov_image_url: string + /** + * Performance + * + * + * You can choose Speed or Quality + * + */ + performance?: 'Speed' | 'Quality' | 'Extreme Speed' | 'Lightning' + /** + * Mixing Image Prompt and Vary/Upscale + * + * Mixing Image Prompt and Vary/Upscale + */ + mixing_image_prompt_and_vary_upscale?: boolean + image_prompt_3?: SchemaImagePrompt + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt?: string + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use up to 5 LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + image_prompt_4?: SchemaImagePrompt + image_prompt_1?: SchemaImagePrompt + /** + * Enable Safety Checker + * + * If set to false, the safety checker will be disabled. + */ + enable_safety_checker?: boolean + /** + * Sharpness + * + * + * The sharpness of the generated image. Use it to control how sharp the generated + * image should be. Higher value means image and texture are sharper. + * + */ + sharpness?: number + /** + * Guidance Scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Aspect Ratio + * + * + * The size of the generated image. You can choose between some presets or + * custom height and width that **must be multiples of 8**. + * + */ + aspect_ratio?: string + /** + * Num Images + * + * + * Number of images to generate in one request + * + */ + num_images?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Refiner Model + * + * Refiner (SDXL or SD 1.5) + */ + refiner_model?: 'None' | 'realisticVisionV60B1_v51VAE.safetensors' + image_prompt_2?: SchemaImagePrompt + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * UOV Method + * + * The method to use for upscaling or varying. + */ + uov_method?: + | 'Disabled' + | 'Vary (Subtle)' + | 'Vary (Strong)' + | 'Upscale (1.5x)' + | 'Upscale (2x)' + | 'Upscale (Fast 2x)' + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number | null + /** + * Refiner Switch At + * + * + * Use 0.4 for SD1.5 realistic models; 0.667 for SD1.5 anime models + * 0.8 for XL-refiners; or any value for switching two SDXL models. + * + */ + refiner_switch?: number +} + +/** + * SD3Output + */ +export type SchemaStableDiffusionV3MediumOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Number of Images + * + * The number of images generated. + */ + num_images: number + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaStableDiffusionV3MediumInput = { + /** + * Enhance Prompt + * + * If set to true, prompt will be upsampled with more details. + */ + prompt_expansion?: boolean + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * The negative prompt to generate an image from. + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaFluxLoraInpaintingOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * InpaintInput + */ +export type SchemaFluxLoraInpaintingInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. This is always set to 1 for streaming output. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image Url + * + * URL of image to use for inpainting. or img2img + */ + image_url: string + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Strength + * + * The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original. + */ + strength?: number + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Mask Url + * + * + * The mask to area to Inpaint in. + * + */ + mask_url: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaStableDiffusionV35MediumOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaStableDiffusionV35MediumInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaFluxSchnellOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * SchnellTextToImageInput + */ +export type SchemaFluxSchnellInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number | unknown + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * Output + */ +export type SchemaOmnigenV1Output = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaOmnigenV1Input = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Image Guidance scale + * + * + * The Image Guidance scale is a measure of how close you want + * the model to stick to your input image when looking for a related image to show you. + * + */ + img_guidance_scale?: number + /** + * Input Image Urls + * + * URL of images to use while generating the image, Use <|image_1|> for the first image and so on. + */ + input_image_urls?: Array + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * Output + */ +export type SchemaAuraFlowOutput = { + /** + * Prompt + * + * The expanded prompt + */ + prompt: string + /** + * Images + * + * The generated images + */ + images: Array + /** + * Seed + * + * The seed used to generate the images + */ + seed: number +} + +/** + * Input + */ +export type SchemaAuraFlowInput = { + /** + * Prompt + * + * The prompt to generate images from + */ + prompt: string + /** + * Num Images + * + * The number of images to generate + */ + num_images?: number + /** + * Expand Prompt + * + * Whether to perform prompt expansion (recommended) + */ + expand_prompt?: boolean + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * Classifier free guidance scale + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to take + */ + num_inference_steps?: number + /** + * Seed + * + * The seed to use for generating images + */ + seed?: number +} + +/** + * T2IOutput + */ +export type SchemaLumaPhotonFlashOutput = { + /** + * Images + * + * The generated image + */ + images: Array +} + +/** + * TextToImageRequest + */ +export type SchemaLumaPhotonFlashInput = { + /** + * Prompt + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' | '4:3' | '3:4' | '21:9' | '9:21' +} + +/** + * Output + */ +export type SchemaIdeogramV2TurboOutput = { + /** + * Images + */ + images: Array + /** + * Seed + * + * Seed used for the random number generator + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaIdeogramV2TurboInput = { + /** + * Prompt + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated image + */ + aspect_ratio?: + | '10:16' + | '16:10' + | '9:16' + | '16:9' + | '4:3' + | '3:4' + | '1:1' + | '1:3' + | '3:1' + | '3:2' + | '2:3' + /** + * Style + * + * The style of the generated image + */ + style?: 'auto' | 'general' | 'realistic' | 'design' | 'render_3D' | 'anime' + /** + * Expand Prompt + * + * Whether to expand the prompt with MagicPrompt functionality. + */ + expand_prompt?: boolean + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Seed + * + * Seed for the random number generator + */ + seed?: number | unknown + /** + * Negative Prompt + * + * A negative prompt to avoid in the generated image + */ + negative_prompt?: string +} + +/** + * Recraft20BTextToImageOutput + */ +export type SchemaRecraft20bOutput = { + /** + * Images + */ + images: Array +} + +/** + * Recraft20BTextToImageInput + */ +export type SchemaRecraft20bInput = { + /** + * Prompt + */ + prompt: string + /** + * Image Size + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Colors + * + * An array of preferable colors + */ + colors?: Array + /** + * Style + * + * The style of the generated images. Vector images cost 2X as much. + */ + style?: + | 'any' + | 'realistic_image' + | 'digital_illustration' + | 'vector_illustration' + | 'realistic_image/b_and_w' + | 'realistic_image/enterprise' + | 'realistic_image/hard_flash' + | 'realistic_image/hdr' + | 'realistic_image/motion_blur' + | 'realistic_image/natural_light' + | 'realistic_image/studio_portrait' + | 'digital_illustration/2d_art_poster' + | 'digital_illustration/2d_art_poster_2' + | 'digital_illustration/3d' + | 'digital_illustration/80s' + | 'digital_illustration/engraving_color' + | 'digital_illustration/glow' + | 'digital_illustration/grain' + | 'digital_illustration/hand_drawn' + | 'digital_illustration/hand_drawn_outline' + | 'digital_illustration/handmade_3d' + | 'digital_illustration/infantile_sketch' + | 'digital_illustration/kawaii' + | 'digital_illustration/pixel_art' + | 'digital_illustration/psychedelic' + | 'digital_illustration/seamless' + | 'digital_illustration/voxel' + | 'digital_illustration/watercolor' + | 'vector_illustration/cartoon' + | 'vector_illustration/doodle_line_art' + | 'vector_illustration/engraving' + | 'vector_illustration/flat_2' + | 'vector_illustration/kawaii' + | 'vector_illustration/line_art' + | 'vector_illustration/line_circuit' + | 'vector_illustration/linocut' + | 'vector_illustration/seamless' + /** + * Style Id + * + * The ID of the custom style reference (optional) + */ + style_id?: string +} + +/** + * RGBColor + */ +export type SchemaRgbColor = { + /** + * R + * + * Red color value + */ + r?: number + /** + * B + * + * Blue color value + */ + b?: number + /** + * G + * + * Green color value + */ + g?: number +} + +/** + * Output + */ +export type SchemaBriaTextToImageHdOutput = { + /** + * Images + * + * The generated images + */ + images: Array + /** + * Seed + * + * Seed value used for generation. + */ + seed: number +} + +/** + * TextToImageRequest + */ +export type SchemaBriaTextToImageHdInput = { + /** + * Prompt + * + * The prompt you would like to use to generate images. + */ + prompt: string + /** + * Num Images + * + * How many images you would like to generate. When using any Guidance Method, Value is set to 1. + */ + num_images?: number + /** + * Prompt Enhancement + * + * When set to true, enhances the provided prompt by generating additional, more descriptive variations, resulting in more diverse and creative output images. + */ + prompt_enhancement?: boolean + /** + * Guidance + * + * Guidance images to use for the generation. Up to 4 guidance methods can be combined during a single inference. + */ + guidance?: Array + /** + * Aspect Ratio + * + * The aspect ratio of the image. When a guidance method is being used, the aspect ratio is defined by the guidance image and this parameter is ignored. + */ + aspect_ratio?: + | '1:1' + | '2:3' + | '3:2' + | '3:4' + | '4:3' + | '4:5' + | '5:4' + | '9:16' + | '16:9' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Medium + * + * Which medium should be included in your generated images. This parameter is optional. + */ + medium?: 'photography' | 'art' + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * The negative prompt you would like to use to generate images. + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of iterations the model goes through to refine the generated image. This parameter is optional. + */ + num_inference_steps?: number +} + +/** + * GuidanceInput + */ +export type SchemaGuidanceInput = { + /** + * Scale + * + * Impact of the guidance. + */ + scale?: number + /** + * Method + * + * Which guidance type you would like to include in the generation. Up to 4 guidance methods can be combined during a single inference. This parameter is optional. + */ + method?: + | 'controlnet_canny' + | 'controlnet_depth' + | 'controlnet_recoloring' + | 'controlnet_color_grid' + /** + * Image Url + * + * The image that should be used as guidance, in base64 format, with the method defined in guidance_method_1. Accepted formats are jpeg, jpg, png, webp. Maximum file size 12MB. If more then one guidance method is used, all guidance images must be of the same aspect ratio, and this will be the aspect ratio of the generated results. If guidance_method_1 is selected, an image must be provided. + */ + image_url: string +} + +/** + * Output + */ +export type SchemaBriaTextToImageFastOutput = { + /** + * Images + * + * The generated images + */ + images: Array + /** + * Seed + * + * Seed value used for generation. + */ + seed: number +} + +/** + * FastTextToImageRequest + */ +export type SchemaBriaTextToImageFastInput = { + /** + * Prompt + * + * The prompt you would like to use to generate images. + */ + prompt: string + /** + * Num Images + * + * How many images you would like to generate. When using any Guidance Method, Value is set to 1. + */ + num_images?: number + /** + * Prompt Enhancement + * + * When set to true, enhances the provided prompt by generating additional, more descriptive variations, resulting in more diverse and creative output images. + */ + prompt_enhancement?: boolean + /** + * Guidance + * + * Guidance images to use for the generation. Up to 4 guidance methods can be combined during a single inference. + */ + guidance?: Array + /** + * Aspect Ratio + * + * The aspect ratio of the image. When a guidance method is being used, the aspect ratio is defined by the guidance image and this parameter is ignored. + */ + aspect_ratio?: + | '1:1' + | '2:3' + | '3:2' + | '3:4' + | '4:3' + | '4:5' + | '5:4' + | '9:16' + | '16:9' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Medium + * + * Which medium should be included in your generated images. This parameter is optional. + */ + medium?: 'photography' | 'art' + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * The negative prompt you would like to use to generate images. + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of iterations the model goes through to refine the generated image. This parameter is optional. + */ + num_inference_steps?: number +} + +/** + * Output + */ +export type SchemaBriaTextToImageBaseOutput = { + /** + * Images + * + * The generated images + */ + images: Array + /** + * Seed + * + * Seed value used for generation. + */ + seed: number +} + +/** + * TextToImageRequest + */ +export type SchemaBriaTextToImageBaseInput = { + /** + * Prompt + * + * The prompt you would like to use to generate images. + */ + prompt: string + /** + * Num Images + * + * How many images you would like to generate. When using any Guidance Method, Value is set to 1. + */ + num_images?: number + /** + * Prompt Enhancement + * + * When set to true, enhances the provided prompt by generating additional, more descriptive variations, resulting in more diverse and creative output images. + */ + prompt_enhancement?: boolean + /** + * Guidance + * + * Guidance images to use for the generation. Up to 4 guidance methods can be combined during a single inference. + */ + guidance?: Array + /** + * Aspect Ratio + * + * The aspect ratio of the image. When a guidance method is being used, the aspect ratio is defined by the guidance image and this parameter is ignored. + */ + aspect_ratio?: + | '1:1' + | '2:3' + | '3:2' + | '3:4' + | '4:3' + | '4:5' + | '5:4' + | '9:16' + | '16:9' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Medium + * + * Which medium should be included in your generated images. This parameter is optional. + */ + medium?: 'photography' | 'art' + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * The negative prompt you would like to use to generate images. + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of iterations the model goes through to refine the generated image. This parameter is optional. + */ + num_inference_steps?: number +} + +/** + * SwittiOutput + */ +export type SchemaSwitti512Output = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaSwitti512Input = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Sampling Top-k + * + * The number of top-k tokens to sample from. + */ + sampling_top_k?: number + /** + * Disable CFG starting scale + * + * Disable CFG starting scale + */ + turn_off_cfg_start_si?: number + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Smoothing starting scale + * + * Smoothing starting scale + */ + smooth_start_si?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Temperature after disabling CFG + * + * Temperature after disabling CFG + */ + last_scale_temp?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * More Diverse + * + * More diverse sampling + */ + more_diverse?: boolean + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * More Smooth + * + * Smoothing with Gumbel softmax sampling + */ + more_smooth?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Sampling Top-p + * + * The top-p probability to sample from. + */ + sampling_top_p?: number +} + +/** + * SwittiOutput + */ +export type SchemaSwittiOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaSwittiInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Sampling Top-k + * + * The number of top-k tokens to sample from. + */ + sampling_top_k?: number + /** + * Disable CFG starting scale + * + * Disable CFG starting scale + */ + turn_off_cfg_start_si?: number + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Smoothing starting scale + * + * Smoothing starting scale + */ + smooth_start_si?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Temperature after disabling CFG + * + * Temperature after disabling CFG + */ + last_scale_temp?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * More Diverse + * + * More diverse sampling + */ + more_diverse?: boolean + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * More Smooth + * + * Smoothing with Gumbel softmax sampling + */ + more_smooth?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Sampling Top-p + * + * The top-p probability to sample from. + */ + sampling_top_p?: number +} + +/** + * Output + */ +export type SchemaFluxProV11Output = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * Image + */ +export type SchemaRegistryImageFastSdxlModelsImage = { + /** + * Height + */ + height: number + /** + * Content Type + */ + content_type?: string + /** + * Url + */ + url: string + /** + * Width + */ + width: number +} + +/** + * FluxProPlusTextToImageInput + */ +export type SchemaFluxProV11Input = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enhance Prompt + * + * Whether to enhance the prompt for better results. + */ + enhance_prompt?: boolean +} + +/** + * Output + */ +export type SchemaFluxProV11UltraFinetunedOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * FluxProUltraTextToImageFinetunedInput + */ +export type SchemaFluxProV11UltraFinetunedInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Fine-tune ID + * + * References your specific model + */ + finetune_id: string + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Image Prompt Strength + * + * The strength of the image prompt, between 0 and 1. + */ + image_prompt_strength?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Enhance Prompt + * + * Whether to enhance the prompt for better results. + */ + enhance_prompt?: boolean + /** + * Raw + * + * Generate less processed, more natural-looking images. + */ + raw?: boolean + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + | string + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The image URL to generate an image from. + */ + image_url?: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Fine-tune Strength + * + * + * Controls finetune influence. + * Increase this value if your target concept isn't showing up strongly enough. + * The optimal setting depends on your finetune and prompt + * + */ + finetune_strength: number +} + +/** + * Output + */ +export type SchemaJanusOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * JanusInput + */ +export type SchemaJanusInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * Number of images to generate in parallel. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Cfg Weight + * + * Classifier Free Guidance scale - how closely to follow the prompt. + */ + cfg_weight?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Temperature + * + * Controls randomness in the generation. Higher values make output more random. + */ + temperature?: number + /** + * Seed + * + * Random seed for reproducible generation. + */ + seed?: number +} + +/** + * ImageOutput + */ +export type SchemaLuminaImageV2Output = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaLuminaImageV2Input = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Cfg Trunc Ratio + * + * The ratio of the timestep interval to apply normalization-based guidance scale. + */ + cfg_trunc_ratio?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * System Prompt + * + * The system prompt to use. + */ + system_prompt?: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Cfg Normalization + * + * Whether to apply normalization-based guidance scale. + */ + cfg_normalization?: boolean +} + +/** + * Output + */ +export type SchemaImagen3FastOutput = { + /** + * Images + */ + images: Array + /** + * Seed + * + * Seed used for generation + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaImagen3FastInput = { + /** + * Prompt + * + * The text prompt describing what you want to see + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated image + */ + aspect_ratio?: '1:1' | '16:9' | '9:16' | '3:4' | '4:3' + /** + * Num Images + * + * Number of images to generate (1-4) + */ + num_images?: number + /** + * Seed + * + * Random seed for reproducible generation + */ + seed?: number + /** + * Negative Prompt + * + * A description of what to discourage in the generated images + */ + negative_prompt?: string +} + +/** + * Output + */ +export type SchemaImagen3Output = { + /** + * Images + */ + images: Array + /** + * Seed + * + * Seed used for generation + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaImagen3Input = { + /** + * Prompt + * + * The text prompt describing what you want to see + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated image + */ + aspect_ratio?: '1:1' | '16:9' | '9:16' | '3:4' | '4:3' + /** + * Num Images + * + * Number of images to generate (1-4) + */ + num_images?: number + /** + * Seed + * + * Random seed for reproducible generation + */ + seed?: number + /** + * Negative Prompt + * + * A description of what to discourage in the generated images + */ + negative_prompt?: string +} + +/** + * Output + */ +export type SchemaFluxControlLoraDepthOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * DepthLoraInput + */ +export type SchemaFluxControlLoraDepthInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Control Lora Strength + * + * The strength of the control lora. + */ + control_lora_strength?: number + /** + * Preprocess Depth + * + * + * If set to true, the input image will be preprocessed to extract depth information. + * This is useful for generating depth maps from images. + * + */ + preprocess_depth?: boolean + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Control Lora Image Url + * + * + * The image to use for control lora. This is used to control the style of the generated image. + * + */ + control_lora_image_url: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * Output + */ +export type SchemaFluxControlLoraCannyOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaFluxControlLoraCannyInput = { + /** + * Control Lora Strength + * + * The strength of the control lora. + */ + control_lora_strength?: number + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Control Lora Image Url + * + * + * The image to use for control lora. This is used to control the style of the generated image. + * + */ + control_lora_image_url?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * Output + */ +export type SchemaIdeogramV2aOutput = { + /** + * Images + */ + images: Array + /** + * Seed + * + * Seed used for the random number generator + */ + seed: number +} + +/** + * BaseTextToImageInput + */ +export type SchemaIdeogramV2aInput = { + /** + * Prompt + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated image + */ + aspect_ratio?: + | '10:16' + | '16:10' + | '9:16' + | '16:9' + | '4:3' + | '3:4' + | '1:1' + | '1:3' + | '3:1' + | '3:2' + | '2:3' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Style + * + * The style of the generated image + */ + style?: 'auto' | 'general' | 'realistic' | 'design' | 'render_3D' | 'anime' + /** + * Seed + * + * Seed for the random number generator + */ + seed?: number | unknown + /** + * Expand Prompt + * + * Whether to expand the prompt with MagicPrompt functionality. + */ + expand_prompt?: boolean +} + +/** + * Output + */ +export type SchemaIdeogramV2aTurboOutput = { + /** + * Images + */ + images: Array + /** + * Seed + * + * Seed used for the random number generator + */ + seed: number +} + +/** + * BaseTextToImageInput + */ +export type SchemaIdeogramV2aTurboInput = { + /** + * Prompt + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated image + */ + aspect_ratio?: + | '10:16' + | '16:10' + | '9:16' + | '16:9' + | '4:3' + | '3:4' + | '1:1' + | '1:3' + | '3:1' + | '3:2' + | '2:3' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Style + * + * The style of the generated image + */ + style?: 'auto' | 'general' | 'realistic' | 'design' | 'render_3D' | 'anime' + /** + * Seed + * + * Seed for the random number generator + */ + seed?: number | unknown + /** + * Expand Prompt + * + * Whether to expand the prompt with MagicPrompt functionality. + */ + expand_prompt?: boolean +} + +/** + * ImageOutput + */ +export type SchemaCogview4Output = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaCogview4Input = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaJuggernautFluxBaseOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * DevTextToImageInput + */ +export type SchemaJuggernautFluxBaseInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaRundiffusionPhotoFluxOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * PhotoLoraT2IInput + */ +export type SchemaRundiffusionPhotoFluxInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Photo Lora Scale + * + * LoRA Scale of the photo lora model + */ + photo_lora_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaJuggernautFluxLoraOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaJuggernautFluxLoraInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaJuggernautFluxProOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * DevTextToImageInput + */ +export type SchemaJuggernautFluxProInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaJuggernautFluxLightningOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * SchnellTextToImageInput + */ +export type SchemaJuggernautFluxLightningInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * Output + */ +export type SchemaSanaSprintOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * SprintInput + */ +export type SchemaSanaSprintInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Style Name + * + * The style to generate the image in. + */ + style_name?: + | '(No style)' + | 'Cinematic' + | 'Photographic' + | 'Anime' + | 'Manga' + | 'Digital Art' + | 'Pixel art' + | 'Fantasy art' + | 'Neonpunk' + | '3D Model' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaSanaV1548bOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaSanaV1548bInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Style Name + * + * The style to generate the image in. + */ + style_name?: + | '(No style)' + | 'Cinematic' + | 'Photographic' + | 'Anime' + | 'Manga' + | 'Digital Art' + | 'Pixel art' + | 'Fantasy art' + | 'Neonpunk' + | '3D Model' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaSanaV1516bOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaSanaV1516bInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Style Name + * + * The style to generate the image in. + */ + style_name?: + | '(No style)' + | 'Cinematic' + | 'Photographic' + | 'Anime' + | 'Manga' + | 'Digital Art' + | 'Pixel art' + | 'Fantasy art' + | 'Neonpunk' + | '3D Model' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * ImageResponse + */ +export type SchemaGptImage1TextToImageOutput = { + /** + * Images + * + * The generated images. + */ + images: Array +} + +/** + * ImageFile + */ +export type SchemaImageFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * Height + * + * The height of the image + */ + height?: number + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * Width + * + * The width of the image + */ + width?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * TextToImageRequest + */ +export type SchemaGptImage1TextToImageInput = { + /** + * Prompt + * + * The prompt for image generation + */ + prompt: string + /** + * Number of Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * Aspect ratio for the generated image + */ + image_size?: 'auto' | '1024x1024' | '1536x1024' | '1024x1536' + /** + * Background + * + * Background for the generated image + */ + background?: 'auto' | 'transparent' | 'opaque' + /** + * Quality + * + * Quality for the generated image + */ + quality?: 'auto' | 'low' | 'medium' | 'high' + /** + * Output Format + * + * Output format for the images + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean +} + +/** + * Output + */ +export type SchemaFLiteTextureOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageInputTexture + */ +export type SchemaFLiteTextureInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Negative prompt + * + * Negative Prompt for generation. + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaFLiteStandardOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageInputStandard + */ +export type SchemaFLiteStandardInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Negative prompt + * + * Negative Prompt for generation. + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * ColorPaletteMember + */ +export type SchemaColorPaletteMember = { + /** + * Color Weight + * + * The weight of the color in the color palette + */ + color_weight?: number | unknown + rgb: SchemaRgbColor +} + +/** + * ColorPalette + */ +export type SchemaColorPalette = { + /** + * Members + * + * A list of color palette members that define the color palette + */ + members?: Array | unknown + /** + * Name + * + * A color palette preset value + */ + name?: + | 'EMBER' + | 'FRESH' + | 'JUNGLE' + | 'MAGIC' + | 'MELON' + | 'MOSAIC' + | 'PASTEL' + | 'ULTRAMARINE' + | unknown +} + +/** + * OutputV3 + */ +export type SchemaIdeogramV3Output = { + /** + * Images + */ + images: Array + /** + * Seed + * + * Seed used for the random number generator + */ + seed: number +} + +/** + * BaseTextToImageInputV3 + */ +export type SchemaIdeogramV3Input = { + /** + * Prompt + */ + prompt: string + /** + * Num Images + * + * Number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The resolution of the generated image + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown + /** + * Style + * + * The style type to generate with. Cannot be used with style_codes. + */ + style?: 'AUTO' | 'GENERAL' | 'REALISTIC' | 'DESIGN' | unknown + /** + * Style Preset + * + * Style preset for generation. The chosen style preset will guide the generation. + */ + style_preset?: + | '80S_ILLUSTRATION' + | '90S_NOSTALGIA' + | 'ABSTRACT_ORGANIC' + | 'ANALOG_NOSTALGIA' + | 'ART_BRUT' + | 'ART_DECO' + | 'ART_POSTER' + | 'AURA' + | 'AVANT_GARDE' + | 'BAUHAUS' + | 'BLUEPRINT' + | 'BLURRY_MOTION' + | 'BRIGHT_ART' + | 'C4D_CARTOON' + | 'CHILDRENS_BOOK' + | 'COLLAGE' + | 'COLORING_BOOK_I' + | 'COLORING_BOOK_II' + | 'CUBISM' + | 'DARK_AURA' + | 'DOODLE' + | 'DOUBLE_EXPOSURE' + | 'DRAMATIC_CINEMA' + | 'EDITORIAL' + | 'EMOTIONAL_MINIMAL' + | 'ETHEREAL_PARTY' + | 'EXPIRED_FILM' + | 'FLAT_ART' + | 'FLAT_VECTOR' + | 'FOREST_REVERIE' + | 'GEO_MINIMALIST' + | 'GLASS_PRISM' + | 'GOLDEN_HOUR' + | 'GRAFFITI_I' + | 'GRAFFITI_II' + | 'HALFTONE_PRINT' + | 'HIGH_CONTRAST' + | 'HIPPIE_ERA' + | 'ICONIC' + | 'JAPANDI_FUSION' + | 'JAZZY' + | 'LONG_EXPOSURE' + | 'MAGAZINE_EDITORIAL' + | 'MINIMAL_ILLUSTRATION' + | 'MIXED_MEDIA' + | 'MONOCHROME' + | 'NIGHTLIFE' + | 'OIL_PAINTING' + | 'OLD_CARTOONS' + | 'PAINT_GESTURE' + | 'POP_ART' + | 'RETRO_ETCHING' + | 'RIVIERA_POP' + | 'SPOTLIGHT_80S' + | 'STYLIZED_RED' + | 'SURREAL_COLLAGE' + | 'TRAVEL_POSTER' + | 'VINTAGE_GEO' + | 'VINTAGE_POSTER' + | 'WATERCOLOR' + | 'WEIRD' + | 'WOODBLOCK_PRINT' + | unknown + /** + * Expand Prompt + * + * Determine if MagicPrompt should be used in generating the request or not. + */ + expand_prompt?: boolean + /** + * Rendering Speed + * + * The rendering speed to use. + */ + rendering_speed?: 'TURBO' | 'BALANCED' | 'QUALITY' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * A color palette for generation, must EITHER be specified via one of the presets (name) or explicitly via hexadecimal representations of the color with optional weights (members) + */ + color_palette?: SchemaColorPalette | unknown + /** + * Style Codes + * + * A list of 8 character hexadecimal codes representing the style of the image. Cannot be used in conjunction with style_reference_images or style + */ + style_codes?: Array | unknown + /** + * Seed + * + * Seed for the random number generator + */ + seed?: number | unknown + /** + * Image Urls + * + * A set of images to use as style references (maximum total size 10MB across all style references). The images should be in JPEG, PNG or WebP format + */ + image_urls?: Array | unknown + /** + * Negative Prompt + * + * Description of what to exclude from an image. Descriptions in the prompt take precedence to descriptions in the negative prompt. + */ + negative_prompt?: string +} + +/** + * ImageOutput + */ +export type SchemaPonyV7Output = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * Input + */ +export type SchemaPonyV7Input = { + /** + * Prompt + * + * The prompt to generate images from + */ + prompt: string + /** + * Num Images + * + * The number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Noise Source + * + * + * The source of the noise to use for generating images. + * If set to 'gpu', the noise will be generated on the GPU. + * If set to 'cpu', the noise will be generated on the CPU. + * + */ + noise_source?: 'gpu' | 'cpu' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * Classifier free guidance scale + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to take + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * The seed to use for generating images + */ + seed?: number +} + +/** + * MiniMaxTextToImageOutput + */ +export type SchemaMinimaxImage01Output = { + /** + * Images + * + * Generated images + */ + images: Array +} + +/** + * MiniMaxTextToImageRequest + */ +export type SchemaMinimaxImage01Input = { + /** + * Prompt Optimizer + * + * Whether to enable automatic prompt optimization + */ + prompt_optimizer?: boolean + /** + * Aspect Ratio + * + * Aspect ratio of the generated image + */ + aspect_ratio?: + | '1:1' + | '16:9' + | '4:3' + | '3:2' + | '2:3' + | '3:4' + | '9:16' + | '21:9' + /** + * Num Images + * + * Number of images to generate (1-9) + */ + num_images?: number + /** + * Prompt + * + * Text prompt for image generation (max 1500 characters) + */ + prompt: string +} + +/** + * Output + */ +export type SchemaFluxLoraStreamOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaFluxLoraStreamInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. This is always set to 1 for streaming output. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * DreamOOutput + */ +export type SchemaDreamoOutput = { + /** + * Prompt + * + * The prompt used to generate the image. + */ + prompt: string + /** + * Images + * + * The URLs of the generated images. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * DreamOInput + */ +export type SchemaDreamoInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * First Reference Image URL + * + * URL of first reference image to use for generation. + */ + first_image_url?: string + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Second Reference Image URL + * + * URL of second reference image to use for generation. + */ + second_image_url?: string + /** + * Second Reference Task + * + * Task for second reference image (ip/id/style). + */ + second_reference_task?: 'ip' | 'id' | 'style' + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * First Reference Task + * + * Task for first reference image (ip/id/style). + */ + first_reference_task?: 'ip' | 'id' | 'style' + /** + * Negative Prompt + * + * The prompt to generate an image from. + */ + negative_prompt?: string + /** + * Ref Resolution + * + * Resolution for reference images. + */ + ref_resolution?: number + /** + * Sync Mode + * + * + * If set to true, the function will wait for the image to be generated and uploaded + * before returning the response. This will increase the latency of the function but + * it allows you to get the image directly in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * True Cfg + * + * The weight of the CFG loss. + */ + true_cfg?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * Imagen4TextToImageUltraOutput + */ +export type SchemaImagen4PreviewUltraOutput = { + /** + * Images + * + * The generated images. + */ + images: Array + /** + * Description + * + * The description of the generated images. + */ + description: string +} + +/** + * Imagen4TextToImageUltraInput + */ +export type SchemaImagen4PreviewUltraInput = { + /** + * Prompt + * + * The text prompt to generate an image from. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: '1:1' | '16:9' | '9:16' | '4:3' | '3:4' + /** + * Resolution + * + * The resolution of the generated image. + */ + resolution?: '1K' | '2K' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' +} + +/** + * ImageOutput + */ +export type SchemaBagelOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * ImageGenInput + */ +export type SchemaBagelInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * The seed to use for the generation. + */ + seed?: number + /** + * Use Thought + * + * Whether to use thought tokens for generation. If set to true, the model will "think" to potentially improve generation quality. Increases generation time and increases the cost by 20%. + */ + use_thought?: boolean +} + +/** + * Output + */ +export type SchemaFluxProKontextTextToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * FluxProTextToImageInputWithAR + */ +export type SchemaFluxProKontextTextToImageInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enhance Prompt + * + * Whether to enhance the prompt for better results. + */ + enhance_prompt?: boolean +} + +/** + * Output + */ +export type SchemaFluxProKontextMaxTextToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * FluxProTextToImageInputWithAR + */ +export type SchemaFluxProKontextMaxTextToImageInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enhance Prompt + * + * Whether to enhance the prompt for better results. + */ + enhance_prompt?: boolean +} + +/** + * Output + */ +export type SchemaFlux1DevOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseFlux1Input + */ +export type SchemaFlux1DevInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number | unknown + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number +} + +/** + * Output + */ +export type SchemaFlux1SchnellOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * SchnellFlux1TextToImageInput + */ +export type SchemaFlux1SchnellInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number | unknown + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number +} + +/** + * SeedDreamOutput + */ +export type SchemaBytedanceSeedreamV3TextToImageOutput = { + /** + * Images + * + * Generated images + */ + images: Array + /** + * Seed + * + * Seed used for generation + */ + seed: number +} + +/** + * SeedDreamInput + */ +export type SchemaBytedanceSeedreamV3TextToImageInput = { + /** + * Prompt + * + * The text prompt used to generate the image + */ + prompt: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * Use for finer control over the output image size. Will be used over aspect_ratio, if both are provided. Width and height must be between 512 and 2048. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * Controls how closely the output image aligns with the input prompt. Higher values mean stronger prompt correlation. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed to control the stochasticity of image generation. + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaOmnigenV2Output = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaOmnigenV2Input = { + /** + * Prompt + * + * The prompt to generate or edit an image. Use specific language like 'Add the bird from image 1 to the desk in image 2' for better results. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Scheduler + * + * The scheduler to use for the diffusion process. + */ + scheduler?: 'euler' | 'dpmsolver' + /** + * Cfg Range End + * + * CFG range end value. + */ + cfg_range_end?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * Negative prompt to guide what should not be in the image. + */ + negative_prompt?: string + /** + * Text Guidance scale + * + * + * The Text Guidance scale controls how closely the model follows the text prompt. + * Higher values make the model stick more closely to the prompt. + * + */ + text_guidance_scale?: number + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Guidance scale + * + * + * The Image Guidance scale controls how closely the model follows the input images. + * For image editing: 1.3-2.0, for in-context generation: 2.0-3.0 + * + */ + image_guidance_scale?: number + /** + * Input Image Urls + * + * URLs of input images to use for image editing or multi-image generation. Support up to 3 images. + */ + input_image_urls?: Array + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Cfg Range Start + * + * CFG range start value. + */ + cfg_range_start?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * KontextT2IOutput + */ +export type SchemaFluxKontextLoraTextToImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseKontextInput + */ +export type SchemaFluxKontextLoraTextToImageInput = { + /** + * Prompt + * + * The prompt to generate the image with + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * SkyRaccoonResponse + */ +export type SchemaSkyRaccoonOutput = { + /** + * Image + * + * The generated image file. + */ + image: SchemaFile + /** + * Seed + * + * The seed used for generation. + */ + seed: number +} + +/** + * SkyRaccoonRequest + */ +export type SchemaSkyRaccoonInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Turbo Mode + * + * If true, the video will be generated faster with no noticeable degradation in the visual quality. + */ + turbo_mode?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number +} + +/** + * KreaOutput + */ +export type SchemaFlux1KreaOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseKreaFlux1Input + */ +export type SchemaFlux1KreaInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number | unknown + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number +} + +/** + * KreaOutput + */ +export type SchemaFluxKreaOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseKreaInput + */ +export type SchemaFluxKreaInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number | unknown + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * Output + */ +export type SchemaFluxKreaLoraOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * TextToImageInput + */ +export type SchemaFluxKreaLoraInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. This is always set to 1 for streaming output. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * Output + */ +export type SchemaFluxKreaLoraStreamOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * TextToImageInput + */ +export type SchemaFluxKreaLoraStreamInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. This is always set to 1 for streaming output. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * QwenImageOutput + */ +export type SchemaQwenImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * BaseQwenImageInput + */ +export type SchemaQwenImageInput = { + /** + * Prompt + * + * The prompt to generate the image with + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Acceleration + * + * Acceleration level for image generation. Options: 'none', 'regular', 'high'. Higher acceleration increases speed. 'regular' balances speed and quality. 'high' is recommended for images without text. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use up to 3 LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Use Turbo + * + * Enable turbo mode for faster generation with high quality. When enabled, uses optimized settings (10 steps, CFG=1.2). + */ + use_turbo?: boolean + /** + * Negative Prompt + * + * The negative prompt for the generation + */ + negative_prompt?: string + /** + * Guidance scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number +} + +/** + * WanT2IResponse + */ +export type SchemaWanV22A14bTextToImageOutput = { + /** + * Image + * + * The generated image file. + */ + image: SchemaFile + /** + * Seed + * + * The seed used for generation. + */ + seed: number +} + +/** + * WanT2IRequest + */ +export type SchemaWanV22A14bTextToImageInput = { + /** + * Prompt + * + * The text prompt to guide image generation. + */ + prompt: string + /** + * Guidance Scale (1st Stage) + * + * Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality. + */ + guidance_scale?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'. + */ + acceleration?: 'none' | 'regular' + /** + * Shift + * + * Shift value for the image. Must be between 1.0 and 10.0. + */ + shift?: number + /** + * Enable Output Safety Checker + * + * If set to true, output video will be checked for safety after generation. + */ + enable_output_safety_checker?: boolean + /** + * Guidance Scale (2nd Stage) + * + * Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model. + */ + guidance_scale_2?: number + /** + * Enable Safety Checker + * + * If set to true, input data will be checked for safety before processing. + */ + enable_safety_checker?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning. + */ + enable_prompt_expansion?: boolean +} + +/** + * WanSmallT2IResponse + */ +export type SchemaWanV225bTextToImageOutput = { + /** + * Image + * + * The generated image file. + */ + image: SchemaFile + /** + * Seed + * + * The seed used for generation. + */ + seed: number +} + +/** + * WanSmallT2IRequest + */ +export type SchemaWanV225bTextToImageInput = { + /** + * Prompt + * + * The text prompt to guide image generation. + */ + prompt: string + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning. + */ + enable_prompt_expansion?: boolean + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Shift + * + * Shift value for the image. Must be between 1.0 and 10.0. + */ + shift?: number + /** + * Enable Output Safety Checker + * + * If set to true, output video will be checked for safety after generation. + */ + enable_output_safety_checker?: boolean + /** + * Enable Safety Checker + * + * If set to true, input data will be checked for safety before processing. + */ + enable_safety_checker?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Image Format + * + * The format of the output image. + */ + image_format?: 'png' | 'jpeg' + /** + * Guidance Scale + * + * Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality. + */ + guidance_scale?: number +} + +/** + * LoRAWeight + */ +export type SchemaLoRaWeight = { + /** + * Path + * + * URL or the path to the LoRA weights. + */ + path: string + /** + * Scale + * + * + * The scale of the LoRA weight. This is used to scale the LoRA weight + * before merging it with the base model. + * + */ + scale?: number + /** + * Transformer + * + * Specifies the transformer to load the lora weight into. 'high' loads into the high-noise transformer, 'low' loads it into the low-noise transformer, while 'both' loads the LoRA into both transformers. + */ + transformer?: 'high' | 'low' | 'both' + /** + * Weight Name + * + * Name of the LoRA weight. Used only if `path` is a Hugging Face repository, and required only if you have more than 1 safetensors file in the repo. + */ + weight_name?: string +} + +/** + * WanT2IResponse + */ +export type SchemaWanV22A14bTextToImageLoraOutput = { + /** + * Image + * + * The generated image file. + */ + image: SchemaFile + /** + * Seed + * + * The seed used for generation. + */ + seed: number +} + +/** + * WanLoRAT2IRequest + */ +export type SchemaWanV22A14bTextToImageLoraInput = { + /** + * Prompt + * + * The text prompt to guide image generation. + */ + prompt: string + /** + * Shift + * + * Shift value for the image. Must be between 1.0 and 10.0. + */ + shift?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'. + */ + acceleration?: 'none' | 'regular' + /** + * Reverse Video + * + * If true, the video will be reversed. + */ + reverse_video?: boolean + /** + * Loras + * + * LoRA weights to be used in the inference. + */ + loras?: Array + /** + * Enable Safety Checker + * + * If set to true, input data will be checked for safety before processing. + */ + enable_safety_checker?: boolean + /** + * Guidance Scale (1st Stage) + * + * Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality. + */ + guidance_scale?: number + /** + * Image Format + * + * The format of the output image. + */ + image_format?: 'png' | 'jpeg' + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Enable Output Safety Checker + * + * If set to true, output video will be checked for safety after generation. + */ + enable_output_safety_checker?: boolean + /** + * Guidance Scale (2nd Stage) + * + * Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model. + */ + guidance_scale_2?: number + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number +} + +/** + * DreaminaOutput + */ +export type SchemaBytedanceDreaminaV31TextToImageOutput = { + /** + * Images + * + * Generated images + */ + images: Array + /** + * Seed + * + * Seed used for generation + */ + seed: number +} + +/** + * DreaminaInput + */ +export type SchemaBytedanceDreaminaV31TextToImageInput = { + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. Width and height must be between 512 and 2048. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Prompt + * + * The text prompt used to generate the image + */ + prompt: string + /** + * Seed + * + * Random seed to control the stochasticity of image generation. + */ + seed?: number + /** + * Enhance Prompt + * + * Whether to use an LLM to enhance the prompt + */ + enhance_prompt?: boolean +} + +/** + * NanoBananaTextToImageOutput + */ +export type SchemaNanoBananaOutput = { + /** + * Images + * + * The generated images. + */ + images: Array + /** + * Description + * + * The description of the generated images. + */ + description: string +} + +/** + * NanoBananaTextToImageInput + */ +export type SchemaNanoBananaInput = { + /** + * Prompt + * + * The text prompt to generate an image from. + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '3:2' + | '4:3' + | '5:4' + | '1:1' + | '4:5' + | '3:4' + | '2:3' + | '9:16' + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Limit Generations + * + * Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate. + */ + limit_generations?: boolean +} + +/** + * NanoBananaTextToImageOutput + */ +export type SchemaGemini25FlashImageOutput = { + /** + * Images + * + * The generated images. + */ + images: Array + /** + * Description + * + * The description of the generated images. + */ + description: string +} + +/** + * NanoBananaTextToImageInput + */ +export type SchemaGemini25FlashImageInput = { + /** + * Prompt + * + * The text prompt to generate an image from. + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '3:2' + | '4:3' + | '5:4' + | '1:1' + | '4:5' + | '3:4' + | '2:3' + | '9:16' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Limit Generations + * + * Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate. + */ + limit_generations?: boolean +} + +/** + * SeedDream4T2IOutput + */ +export type SchemaBytedanceSeedreamV4TextToImageOutput = { + /** + * Images + * + * Generated images + */ + images: Array + /** + * Seed + * + * Seed used for generation + */ + seed: number +} + +/** + * SeedDream4T2IInput + */ +export type SchemaBytedanceSeedreamV4TextToImageInput = { + /** + * Prompt + * + * The text prompt used to generate the image + */ + prompt: string + /** + * Num Images + * + * Number of separate model generations to be run with the prompt. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. Total pixels must be between 960x960 and 4096x4096. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | 'auto' + | 'auto_2K' + | 'auto_4K' + /** + * Enhance Prompt Mode + * + * The mode to use for enhancing prompt enhancement. Standard mode provides higher quality results but takes longer to generate. Fast mode provides average quality results but takes less time to generate. + */ + enhance_prompt_mode?: 'standard' | 'fast' + /** + * Max Images + * + * If set to a number greater than one, enables multi-image generation. The model will potentially return up to `max_images` images every generation, and in total, `num_images` generations will be carried out. In total, the number of images generated will be between `num_images` and `max_images*num_images`. + */ + max_images?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * Random seed to control the stochasticity of image generation. + */ + seed?: number +} + +/** + * HunyuanTextToImageOutput + */ +export type SchemaHunyuanImageV21TextToImageOutput = { + /** + * Images + * + * A list of the generated images. + */ + images: Array + /** + * Seed + * + * The base seed used for the generation process. + */ + seed: number +} + +/** + * HunyuanTextToImageInput + */ +export type SchemaHunyuanImageV21TextToImageInput = { + /** + * Prompt + * + * The text prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The desired size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Use Reprompt + * + * Enable prompt enhancement for potentially better results. + */ + use_reprompt?: boolean + /** + * Use Refiner + * + * Enable the refiner model for improved image quality. + */ + use_refiner?: boolean + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * Controls how much the model adheres to the prompt. Higher values mean stricter adherence. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducible results. If None, a random seed is used. + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * The negative prompt to guide the image generation away from certain concepts. + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * Number of denoising steps. + */ + num_inference_steps?: number +} + +/** + * SRPOOutput + */ +export type SchemaFlux1SrpoOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseSRPOFlux1Input + */ +export type SchemaFlux1SrpoInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number | unknown + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number +} + +/** + * SRPOOutput + */ +export type SchemaFluxSrpoOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseSRPOInput + */ +export type SchemaFluxSrpoInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number | unknown + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * TextToImageOutput + * + * Output for text-to-image generation + */ +export type SchemaWan25PreviewTextToImageOutput = { + /** + * Images + * + * The generated images + */ + images: Array + /** + * Seeds + * + * The seeds used for each generated image + */ + seeds: Array + /** + * Actual Prompt + * + * The actual prompt used if prompt rewriting was enabled + */ + actual_prompt?: string +} + +/** + * TextToImageInput + * + * Input for text-to-image generation + */ +export type SchemaWan25PreviewTextToImageInput = { + /** + * Prompt + * + * The prompt for image generation. Supports Chinese and English, max 2000 characters. + */ + prompt: string + /** + * Num Images + * + * Number of images to generate. Values from 1 to 4. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. Can use preset names like 'square', 'landscape_16_9', etc., or specific dimensions. Total pixels must be between 768×768 and 1440×1440, with aspect ratio between [1:4, 4:1]. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Enable Prompt Expansion + * + * Whether to enable prompt rewriting using LLM. Improves results for short prompts but increases processing time. + */ + enable_prompt_expansion?: boolean + /** + * Negative Prompt + * + * Negative prompt to describe content to avoid. Max 500 characters. + */ + negative_prompt?: string +} + +/** + * HunyuanTextToImageV3Output + */ +export type SchemaHunyuanImageV3TextToImageOutput = { + /** + * Images + * + * A list of the generated images. + */ + images: Array + /** + * Seed + * + * The base seed used for the generation process. + */ + seed: number +} + +/** + * HunyuanTextToImageInputV3 + */ +export type SchemaHunyuanImageV3TextToImageInput = { + /** + * Prompt + * + * The text prompt for image-to-image. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The desired size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning. + */ + enable_prompt_expansion?: boolean + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * Controls how much the model adheres to the prompt. Higher values mean stricter adherence. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducible results. If None, a random seed is used. + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * The negative prompt to guide the image generation away from certain concepts. + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * Number of denoising steps. + */ + num_inference_steps?: number +} + +/** + * ReveCreateOutput + * + * Output for Reve text-to-image generation + */ +export type SchemaReveTextToImageOutput = { + /** + * Images + * + * The generated images + */ + images: Array +} + +/** + * ReveCreateInput + * + * Input for Reve text-to-image generation + */ +export type SchemaReveTextToImageInput = { + /** + * Prompt + * + * The text description of the desired image. + */ + prompt: string + /** + * Number of Images + * + * Number of images to generate + */ + num_images?: number + /** + * Aspect Ratio + * + * The desired aspect ratio of the generated image. + */ + aspect_ratio?: '16:9' | '9:16' | '3:2' | '2:3' | '4:3' | '3:4' | '1:1' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Output Format + * + * Output format for the generated image. + */ + output_format?: 'png' | 'jpeg' | 'webp' +} + +/** + * ImageResponseMini + */ +export type SchemaGptImage1MiniOutput = { + /** + * Images + * + * The generated images. + */ + images: Array +} + +/** + * TextToImageRequestMini + */ +export type SchemaGptImage1MiniInput = { + /** + * Prompt + * + * The prompt for image generation + */ + prompt: string + /** + * Number of Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * Aspect ratio for the generated image + */ + image_size?: 'auto' | '1024x1024' | '1536x1024' | '1024x1536' + /** + * Background + * + * Background for the generated image + */ + background?: 'auto' | 'transparent' | 'opaque' + /** + * Quality + * + * Quality for the generated image + */ + quality?: 'auto' | 'low' | 'medium' | 'high' + /** + * Output Format + * + * Output format for the images + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean +} + +/** + * PiQwenOutput + */ +export type SchemaPiflowOutput = { + /** + * Images + * + * The URLs of the generated images. + */ + images: Array + /** + * Seed + * + * The seed used for generation. + */ + seed: number +} + +/** + * PiQwenInput + */ +export type SchemaPiflowInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * + * The size of the generated image. You can choose between some presets or custom height and width + * that **must be multiples of 8**. + * + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * Random seed for reproducible generation. If set to None, a random seed will be used. + */ + seed?: number +} + +/** + * GaiaOutputModel + */ +export type SchemaFiboGenerateOutput = { + /** + * Images + * + * Generated images. + */ + images?: Array<{ + [key: string]: unknown + }> + image: SchemaImage + /** + * Structured Prompt + * + * Current prompt. + */ + structured_prompt: { + [key: string]: unknown + } +} + +/** + * GaiaInputModel + */ +export type SchemaFiboGenerateInput = { + /** + * Prompt + * + * Prompt for image generation. + */ + prompt?: string | unknown + /** + * Steps Num + * + * Number of inference steps. + */ + steps_num?: number + /** + * Aspect Ratio + * + * Aspect ratio. Options: 1:1, 2:3, 3:2, 3:4, 4:3, 4:5, 5:4, 9:16, 16:9 + */ + aspect_ratio?: + | '1:1' + | '2:3' + | '3:2' + | '3:4' + | '4:3' + | '4:5' + | '5:4' + | '9:16' + | '16:9' + /** + * Image Url + * + * Reference image (file or URL). + */ + image_url?: string | unknown + /** + * Sync Mode + * + * If true, returns the image directly in the response (increases latency). + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * Guidance scale for text. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. + */ + seed?: number + /** + * The structured prompt to generate an image from. + */ + structured_prompt?: SchemaStructuredPrompt | unknown + /** + * Negative Prompt + * + * Negative prompt for image generation. + */ + negative_prompt?: string +} + +/** + * Lighting + */ +export type SchemaLighting = { + /** + * Shadows + * + * The shadows in the image to be generated. + */ + shadows?: string | unknown + /** + * Conditions + * + * The conditions of the lighting in the image to be generated. + */ + conditions?: string | unknown + /** + * Direction + * + * The direction of the lighting in the image to be generated. + */ + direction?: string | unknown +} + +/** + * PhotographicCharacteristics + */ +export type SchemaPhotographicCharacteristics = { + /** + * Focus + * + * The focus in the image to be generated. + */ + focus?: string | unknown + /** + * Lens Focal Length + * + * The focal length of the lens in the image to be generated. + */ + lens_focal_length?: string | unknown + /** + * Camera Angle + * + * The angle of the camera in the image to be generated. + */ + camera_angle?: string | unknown + /** + * Depth Of Field + * + * The depth of field in the image to be generated. + */ + depth_of_field?: string | unknown +} + +/** + * PromptObject + */ +export type SchemaPromptObject = { + /** + * Clothing + * + * The clothing of the object in the image. + */ + clothing?: string | unknown + /** + * Description + * + * A description of the object to be generated. + */ + description?: string | unknown + /** + * Skin Tone And Texture + * + * The skin tone and texture of the object in the image. + */ + skin_tone_and_texture?: string | unknown + /** + * Appearance Details + * + * The appearance details of the object. + */ + appearance_details?: string | unknown + /** + * Number Of Objects + * + * The number of objects in the image. + */ + number_of_objects?: number | unknown + /** + * Expression + * + * The expression of the object in the image. + */ + expression?: string | unknown + /** + * Pose + * + * The pose of the object in the image. + */ + pose?: string | unknown + /** + * Shape And Color + * + * The shape and color of the object. + */ + shape_and_color?: string | unknown + /** + * Relationship + * + * The relationship of the object to other objects in the image. + */ + relationship: string + /** + * Texture + * + * The texture of the object. + */ + texture?: string | unknown + /** + * Gender + * + * The gender of the object in the image. + */ + gender?: string | unknown + /** + * Relative Size + * + * The relative size of the object in the image. + */ + relative_size?: string | unknown + /** + * Location + * + * The location of the object in the image. + */ + location?: string | unknown + /** + * Orientation + * + * The orientation of the object in the image. + */ + orientation?: string | unknown + /** + * Action + * + * The action of the object in the image. + */ + action?: string | unknown +} + +/** + * Aesthetics + */ +export type SchemaAesthetics = { + /** + * Composition + * + * The composition of the image to be generated. + */ + composition?: string | unknown + /** + * Mood Atmosphere + * + * The mood and atmosphere of the image to be generated. + */ + mood_atmosphere?: string | unknown + /** + * Color Scheme + * + * The color scheme of the image to be generated. + */ + color_scheme?: string | unknown +} + +/** + * StructuredPrompt + */ +export type SchemaStructuredPrompt = { + /** + * Background Setting + * + * The background setting of the image to be generated. + */ + background_setting?: string | unknown + /** + * Artistic Style + * + * The artistic style of the image to be generated. + */ + artistic_style?: string | unknown + /** + * The aesthetics of the image to be generated. + */ + aesthetics?: SchemaAesthetics | unknown + /** + * Text Render + * + * A list of text to be rendered in the image. + */ + text_render?: Array | unknown + /** + * Objects + * + * A list of objects in the image to be generated, along with their attributes and relationships to other objects in the image. + */ + objects?: Array | unknown + /** + * Style Medium + * + * The style medium of the image to be generated. + */ + style_medium?: string | unknown + /** + * The photographic characteristics of the image to be generated. + */ + photographic_characteristics?: SchemaPhotographicCharacteristics | unknown + /** + * Context + * + * The context of the image to be generated. + */ + context?: string | unknown + /** + * The lighting of the image to be generated. + */ + lighting?: SchemaLighting | unknown + /** + * Short Description + * + * A short description of the image to be generated. + */ + short_description?: string | unknown +} + +/** + * Emu35Output + */ +export type SchemaEmu35ImageTextToImageOutput = { + /** + * Images + * + * The edited image. + */ + images: Array + /** + * Seed + * + * The seed for the inference. + */ + seed: number +} + +/** + * Emu35ImageInput + */ +export type SchemaEmu35ImageTextToImageInput = { + /** + * Prompt + * + * The prompt to create the image. + */ + prompt: string + /** + * Resolution + * + * The resolution of the output image. + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * The aspect ratio of the output image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + /** + * Output Format + * + * The format of the output image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * Whether to return the image in sync mode. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * The seed for the inference. + */ + seed?: number +} + +/** + * Image + * + * Represents an image file. + */ +export type SchemaImageOutput = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number | unknown + /** + * Height + * + * The height of the image in pixels. + */ + height?: number | unknown + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string | unknown + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string | unknown + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * Width + * + * The width of the image in pixels. + */ + width?: number | unknown +} + +/** + * ImagineArt_1_5_Output + */ +export type SchemaImagineart15PreviewTextToImageOutput = { + /** + * Images + * + * Generated image + */ + images: Array +} + +/** + * ImagineArt_1_5_Input + */ +export type SchemaImagineart15PreviewTextToImageInput = { + /** + * Prompt + * + * Text prompt describing the desired image + */ + prompt: string + /** + * Aspect Ratio + * + * Image aspect ratio: 1:1, 3:1, 1:3, 16:9, 9:16, 4:3, 3:4, 3:2, 2:3 + */ + aspect_ratio?: + | '1:1' + | '16:9' + | '9:16' + | '4:3' + | '3:4' + | '3:1' + | '1:3' + | '3:2' + | '2:3' + /** + * Seed + * + * Seed for the image generation + */ + seed?: number +} + +/** + * NanoBananaTextToImageOutput + */ +export type SchemaNanoBananaProOutput = { + /** + * Images + * + * The generated images. + */ + images: Array + /** + * Description + * + * The description of the generated images. + */ + description: string +} + +/** + * NanoBananaTextToImageInput + */ +export type SchemaNanoBananaProInput = { + /** + * Prompt + * + * The text prompt to generate an image from. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Enable Web Search + * + * Enable web search for the image generation task. This will allow the model to use the latest information from the web to generate the image. + */ + enable_web_search?: boolean + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '3:2' + | '4:3' + | '5:4' + | '1:1' + | '4:5' + | '3:4' + | '2:3' + | '9:16' + /** + * Resolution + * + * The resolution of the image to generate. + */ + resolution?: '1K' | '2K' | '4K' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number + /** + * Limit Generations + * + * Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate. + */ + limit_generations?: boolean +} + +/** + * NanoBananaTextToImageOutput + */ +export type SchemaGemini3ProImagePreviewOutput = { + /** + * Images + * + * The generated images. + */ + images: Array + /** + * Description + * + * The description of the generated images. + */ + description: string +} + +/** + * NanoBananaTextToImageInput + */ +export type SchemaGemini3ProImagePreviewInput = { + /** + * Prompt + * + * The text prompt to generate an image from. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Enable Web Search + * + * Enable web search for the image generation task. This will allow the model to use the latest information from the web to generate the image. + */ + enable_web_search?: boolean + /** + * Resolution + * + * The resolution of the image to generate. + */ + resolution?: '1K' | '2K' | '4K' + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '3:2' + | '4:3' + | '5:4' + | '1:1' + | '4:5' + | '3:4' + | '2:3' + | '9:16' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number + /** + * Limit Generations + * + * Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate. + */ + limit_generations?: boolean +} + +/** + * Flux2FlexOutput + */ +export type SchemaFlux2FlexOutput = { + /** + * Images + * + * The generated images. + */ + images: Array + /** + * Seed + * + * The seed used for the generation. + */ + seed: number +} + +/** + * Flux2FlexTextToImageInput + */ +export type SchemaFlux2FlexInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' + /** + * Enable Prompt Expansion + * + * Whether to expand the prompt using the model's own knowledge. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * The seed to use for the generation. + */ + seed?: number + /** + * Guidance Scale + * + * The guidance scale to use for the generation. + */ + guidance_scale?: number +} + +/** + * BallpointPenSketchOutput + */ +export type SchemaFlux2LoraGalleryBallpointPenSketchOutput = { + /** + * Prompt + * + * The prompt used for generation + */ + prompt: string + /** + * Images + * + * The generated ballpoint pen sketch style images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * BallpointPenSketchInput + * + * Input model for Ballpoint Pen Sketch endpoint - Generate ballpoint pen sketch style images + */ +export type SchemaFlux2LoraGalleryBallpointPenSketchInput = { + /** + * Prompt + * + * The prompt to generate a ballpoint pen sketch style image. Use 'b4llp01nt' trigger word for best results. + */ + prompt: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The strength of the ballpoint pen sketch effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * DigitalComicArtOutput + */ +export type SchemaFlux2LoraGalleryDigitalComicArtOutput = { + /** + * Prompt + * + * The prompt used for generation + */ + prompt: string + /** + * Images + * + * The generated digital comic art style images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * DigitalComicArtInput + * + * Input model for Digital Comic Art endpoint - Generate digital comic art style images + */ +export type SchemaFlux2LoraGalleryDigitalComicArtInput = { + /** + * Prompt + * + * The prompt to generate a digital comic art style image. Use 'd1g1t4l' trigger word for best results. + */ + prompt: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The strength of the digital comic art effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * HdrStyleOutput + */ +export type SchemaFlux2LoraGalleryHdrStyleOutput = { + /** + * Prompt + * + * The prompt used for generation + */ + prompt: string + /** + * Images + * + * The generated HDR style images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * HdrStyleInput + * + * Input model for HDR Style endpoint - Generate HDR style images with vibrant colors + */ +export type SchemaFlux2LoraGalleryHdrStyleInput = { + /** + * Prompt + * + * The prompt to generate an HDR style image. The trigger word 'Hyp3rRe4list1c' will be automatically prepended. + */ + prompt: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The strength of the HDR style effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * RealismOutput + */ +export type SchemaFlux2LoraGalleryRealismOutput = { + /** + * Prompt + * + * The prompt used for generation + */ + prompt: string + /** + * Images + * + * The generated realistic style images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * RealismInput + * + * Input model for Realism endpoint - Generate realistic style images + */ +export type SchemaFlux2LoraGalleryRealismInput = { + /** + * Prompt + * + * The prompt to generate a realistic image with natural lighting and authentic details. + */ + prompt: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The strength of the realism effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * SatelliteViewStyleOutput + */ +export type SchemaFlux2LoraGallerySatelliteViewStyleOutput = { + /** + * Prompt + * + * The prompt used for generation + */ + prompt: string + /** + * Images + * + * The generated satellite view style images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * SatelliteViewStyleInput + * + * Input model for Satellite View Style endpoint - Generate satellite/aerial view style images + */ +export type SchemaFlux2LoraGallerySatelliteViewStyleInput = { + /** + * Prompt + * + * The prompt to generate a satellite/aerial view style image. + */ + prompt: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The strength of the satellite view style effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * SepiaVintageOutput + */ +export type SchemaFlux2LoraGallerySepiaVintageOutput = { + /** + * Prompt + * + * The prompt used for generation + */ + prompt: string + /** + * Images + * + * The generated sepia vintage photography style images + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number +} + +/** + * SepiaVintageInput + * + * Input model for Sepia Vintage Photography endpoint - Generate vintage sepia style images + */ +export type SchemaFlux2LoraGallerySepiaVintageInput = { + /** + * Prompt + * + * The prompt to generate a sepia vintage photography style image. + */ + prompt: string + /** + * Num Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * Acceleration level for image generation. 'regular' balances speed and quality. + */ + acceleration?: 'none' | 'regular' + /** + * Lora Scale + * + * The strength of the sepia vintage photography effect. + */ + lora_scale?: number + /** + * Output Format + * + * The format of the output image + */ + output_format?: 'png' | 'jpeg' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and won't be saved in history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. Same seed with same prompt will produce same result. + */ + seed?: number | unknown + /** + * Enable Safety Checker + * + * Whether to enable the safety checker for the generated image. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * ZImageTurboOutput + */ +export type SchemaZImageTurboOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed. + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + * + * The timings of the generation process. + */ + timings: { + [key: string]: number + } +} + +/** + * ZImageTurboTextToImageInput + */ +export type SchemaZImageTurboInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * OvisImageOutput + */ +export type SchemaOvisImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * TextToImageInput + */ +export type SchemaOvisImageInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Guidance Scale + * + * The guidance scale to use for the image generation. + */ + guidance_scale?: number + /** + * Negative Prompt + * + * The negative prompt to generate an image from. + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * ZImageTurboOutput + */ +export type SchemaZImageTurboLoraOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed. + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + * + * The timings of the generation process. + */ + timings: { + [key: string]: number + } +} + +/** + * ZImageTurboTextToImageLoRAInput + */ +export type SchemaZImageTurboLoraInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Loras + * + * List of LoRA weights to apply (maximum 3). + */ + loras?: Array + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * LoRAInput + * + * LoRA weight configuration. + */ +export type SchemaLoRaInput = { + /** + * Path + * + * URL, HuggingFace repo ID (owner/repo) to lora weights. + */ + path: string + /** + * Scale + * + * Scale factor for LoRA application (0.0 to 4.0). + */ + scale?: number +} + +/** + * TextToImageOutput + */ +export type SchemaViduQ2TextToImageOutput = { + /** + * Image + * + * The edited image + */ + image: SchemaImage +} + +/** + * TextToImageRequest + */ +export type SchemaViduQ2TextToImageInput = { + /** + * Prompt + * + * Text prompt for video generation, max 1500 characters + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the output video + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Seed + * + * Random seed for generation + */ + seed?: number +} + +/** + * SeedDream45T2IOutput + */ +export type SchemaBytedanceSeedreamV45TextToImageOutput = { + /** + * Images + * + * Generated images + */ + images: Array + /** + * Seed + * + * Seed used for generation + */ + seed: number +} + +/** + * SeedDream45T2IInput + */ +export type SchemaBytedanceSeedreamV45TextToImageInput = { + /** + * Prompt + * + * The text prompt used to generate the image + */ + prompt: string + /** + * Num Images + * + * Number of separate model generations to be run with the prompt. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. Width and height must be between 1920 and 4096, or total number of pixels must be between 2560*1440 and 4096*4096. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | 'auto_2K' + | 'auto_4K' + /** + * Max Images + * + * If set to a number greater than one, enables multi-image generation. The model will potentially return up to `max_images` images every generation, and in total, `num_images` generations will be carried out. In total, the number of images generated will be between `num_images` and `max_images*num_images`. + */ + max_images?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * Random seed to control the stochasticity of image generation. + */ + seed?: number +} + +/** + * TextToImageOutput + */ +export type SchemaLongcatImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaLongcatImageInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Guidance Scale + * + * The guidance scale to use for the image generation. + */ + guidance_scale?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * Flux2MaxOutput + */ +export type SchemaFlux2MaxOutput = { + /** + * Images + * + * The generated images. + */ + images: Array + /** + * Seed + * + * The seed used for the generation. + */ + seed: number +} + +/** + * Flux2MaxTextToImageInput + */ +export type SchemaFlux2MaxInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * The seed to use for the generation. + */ + seed?: number +} + +/** + * Flux2TurboT2IOutput + */ +export type SchemaFlux2TurboOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * Flux2TurboTextToImageInput + */ +export type SchemaFlux2TurboInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the image to generate. The width and height must be between 512 and 2048 pixels. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used. + */ + seed?: number + /** + * Enable Prompt Expansion + * + * If set to true, the prompt will be expanded for better results. + */ + enable_prompt_expansion?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * GaiaOutputModel + */ +export type SchemaFiboLiteGenerateOutput = { + /** + * Images + * + * Generated images. + */ + images?: Array<{ + [key: string]: unknown + }> + image: SchemaImage + /** + * Structured Prompt + * + * Current prompt. + */ + structured_prompt: { + [key: string]: unknown + } +} + +/** + * GaiaLiteInputModel + */ +export type SchemaFiboLiteGenerateInput = { + /** + * Prompt + * + * Prompt for image generation. + */ + prompt?: string | unknown + /** + * Steps Num + * + * Number of inference steps for Fibo Lite. + */ + steps_num?: number + /** + * Aspect Ratio + * + * Aspect ratio. Options: 1:1, 2:3, 3:2, 3:4, 4:3, 4:5, 5:4, 9:16, 16:9 + */ + aspect_ratio?: + | '1:1' + | '2:3' + | '3:2' + | '3:4' + | '4:3' + | '4:5' + | '5:4' + | '9:16' + | '16:9' + /** + * Image Url + * + * Reference image (file or URL). + */ + image_url?: string | unknown + /** + * Sync Mode + * + * If true, returns the image directly in the response (increases latency). + */ + sync_mode?: boolean + /** + * Seed + * + * Random seed for reproducibility. + */ + seed?: number + /** + * The structured prompt to generate an image from. + */ + structured_prompt?: SchemaStructuredPrompt | unknown +} + +/** + * ImageResponse + */ +export type SchemaGptImage15Output = { + /** + * Images + * + * The generated images. + */ + images: Array +} + +/** + * TextToImageRequest + */ +export type SchemaGptImage15Input = { + /** + * Prompt + * + * The prompt for image generation + */ + prompt: string + /** + * Number of Images + * + * Number of images to generate + */ + num_images?: number + /** + * Image Size + * + * Aspect ratio for the generated image + */ + image_size?: '1024x1024' | '1536x1024' | '1024x1536' + /** + * Background + * + * Background for the generated image + */ + background?: 'auto' | 'transparent' | 'opaque' + /** + * Quality + * + * Quality for the generated image + */ + quality?: 'low' | 'medium' | 'high' + /** + * Output Format + * + * Output format for the images + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean +} + +/** + * Flux2FlashT2IOutput + */ +export type SchemaFlux2FlashOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * Flux2FlashTextToImageInput + */ +export type SchemaFlux2FlashInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the image to generate. The width and height must be between 512 and 2048 pixels. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used. + */ + seed?: number + /** + * Enable Prompt Expansion + * + * If set to true, the prompt will be expanded for better results. + */ + enable_prompt_expansion?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * TextToImageWanOutput + * + * Output for Wan 2.6 text-to-image (can include generated text in mixed mode) + */ +export type SchemaV26TextToImageOutput = { + /** + * Images + * + * Generated images in PNG format + */ + images: Array + /** + * Seed + * + * The seed used for generation + */ + seed: number + /** + * Generated Text + * + * Generated text content (in mixed text-and-image mode). May be None if only images were generated. + */ + generated_text?: string +} + +/** + * TextToImageWanInput + * + * Input for Wan 2.6 text-to-image or mixed text-and-image generation (enable_interleave=true) + */ +export type SchemaV26TextToImageInput = { + /** + * Prompt + * + * Text prompt describing the desired image. Supports Chinese and English. Max 2000 characters. + */ + prompt: string + /** + * Image Size + * + * Output image size. If not set: matches input image size (up to 1280*1280). Use presets like 'square_hd', 'landscape_16_9', or specify exact dimensions. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Max Images + * + * Maximum number of images to generate (1-5). Actual count may be less depending on model inference. + */ + max_images?: number + /** + * Image Url + * + * Optional reference image (0 or 1). When provided, can be used for style guidance. Resolution: 384-5000px each dimension. Max size: 10MB. Formats: JPEG, JPG, PNG (no alpha), BMP, WEBP. + */ + image_url?: string + /** + * Enable Safety Checker + * + * Enable content moderation for input and output. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * Random seed for reproducibility (0-2147483647). + */ + seed?: number + /** + * Negative Prompt + * + * Content to avoid in the generated image. Max 500 characters. + */ + negative_prompt?: string +} + +/** + * QwenImage2512Output + */ +export type SchemaQwenImage2512Output = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaQwenImage2512Input = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Guidance Scale + * + * The guidance scale to use for the image generation. + */ + guidance_scale?: number + /** + * Negative Prompt + * + * The negative prompt to generate an image from. + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * QwenImage2512Output + */ +export type SchemaQwenImage2512LoraOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * LoraInput + */ +export type SchemaQwenImage2512LoraInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use up to 3 LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Guidance Scale + * + * The guidance scale to use for the image generation. + */ + guidance_scale?: number + /** + * Negative Prompt + * + * The negative prompt to generate an image from. + */ + negative_prompt?: string + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * GlmImageOutput + */ +export type SchemaGlmImageOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * List of URLs to the generated images. + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * GlmImageInput + */ +export type SchemaGlmImageInput = { + /** + * Prompt + * + * Text prompt for image generation. + */ + prompt: string + /** + * Num Images + * + * Number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * Output image size. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | 'portrait_3_2' + | 'landscape_3_2' + | 'portrait_hd' + | 'landscape_hd' + /** + * Output Format + * + * Output image format. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If True, the image will be returned as a base64 data URI instead of a URL. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * Classifier-free guidance scale. Higher values make the model follow the prompt more closely. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. The same seed with the same prompt will produce the same image. + */ + seed?: number + /** + * Enable Prompt Expansion + * + * If True, the prompt will be enhanced using an LLM for more detailed and higher quality results. + */ + enable_prompt_expansion?: boolean + /** + * Num Inference Steps + * + * Number of diffusion denoising steps. More steps generally produce higher quality images. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * Enable NSFW safety checking on the generated images. + */ + enable_safety_checker?: boolean +} + +/** + * ImagineArt_1_5_Output + */ +export type SchemaImagineart15ProPreviewTextToImageOutput = { + /** + * Images + * + * Generated image + */ + images: Array +} + +/** + * ImagineArt_1_5_Input + */ +export type SchemaImagineart15ProPreviewTextToImageInput = { + /** + * Prompt + * + * Text prompt describing the desired image + */ + prompt: string + /** + * Aspect Ratio + * + * Image aspect ratio: 1:1, 3:1, 1:3, 16:9, 9:16, 4:3, 3:4, 3:2, 2:3 + */ + aspect_ratio?: + | '1:1' + | '16:9' + | '9:16' + | '4:3' + | '3:4' + | '3:1' + | '1:3' + | '3:2' + | '2:3' + /** + * Seed + * + * Seed for the image generation + */ + seed?: number +} + +/** + * Klein4BDistilledT2IOutput + */ +export type SchemaFlux2Klein4bOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * KleinDistilledInput + */ +export type SchemaFlux2Klein4bInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the image to generate. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI. Output is not stored when this is True. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used. + */ + seed?: number +} + +/** + * Klein9BDistilledT2IOutput + */ +export type SchemaFlux2Klein9bOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * Klein9BDistilledInput + */ +export type SchemaFlux2Klein9bInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the image to generate. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI. Output is not stored when this is True. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used. + */ + seed?: number +} + +/** + * Klein4BT2IOutput + */ +export type SchemaFlux2Klein4bBaseOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * Klein4BBaseInput + */ +export type SchemaFlux2Klein4bBaseInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the image to generate. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The acceleration level to use for image generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI. Output is not stored when this is True. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used. + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt for classifier-free guidance. Describes what to avoid in the image. + */ + negative_prompt?: string + /** + * Guidance Scale + * + * Guidance scale for classifier-free guidance. + */ + guidance_scale?: number +} + +/** + * Klein9BT2IOutput + */ +export type SchemaFlux2Klein9bBaseOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * Klein9BBaseInput + */ +export type SchemaFlux2Klein9bBaseInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the image to generate. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The acceleration level to use for image generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI. Output is not stored when this is True. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used. + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt for classifier-free guidance. Describes what to avoid in the image. + */ + negative_prompt?: string + /** + * Guidance Scale + * + * Guidance scale for classifier-free guidance. + */ + guidance_scale?: number +} + +/** + * KleinT2IOutput + */ +export type SchemaFlux2Klein4bBaseLoraOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * KleinBaseLoRAInput + */ +export type SchemaFlux2Klein4bBaseLoraInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the image to generate. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The acceleration level to use for image generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Loras + * + * List of LoRA weights to apply (maximum 3). + */ + loras?: Array + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI. Output is not stored when this is True. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used. + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt for classifier-free guidance. Describes what to avoid in the image. + */ + negative_prompt?: string + /** + * Guidance Scale + * + * Guidance scale for classifier-free guidance. + */ + guidance_scale?: number +} + +/** + * LoRAInput + */ +export type SchemaFalAiFlux2KleinLoRaInput = { + /** + * Path + * + * URL, HuggingFace repo ID (owner/repo), or local path to LoRA weights. + */ + path: string + /** + * Scale + * + * Scale factor for LoRA application (0.0 to 4.0). + */ + scale?: number +} + +/** + * KleinT2IOutput + */ +export type SchemaFlux2Klein9bBaseLoraOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * KleinBaseLoRAInput + */ +export type SchemaFlux2Klein9bBaseLoraInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the image to generate. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The acceleration level to use for image generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Loras + * + * List of LoRA weights to apply (maximum 3). + */ + loras?: Array + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI. Output is not stored when this is True. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used. + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt for classifier-free guidance. Describes what to avoid in the image. + */ + negative_prompt?: string + /** + * Guidance Scale + * + * Guidance scale for classifier-free guidance. + */ + guidance_scale?: number +} + +/** + * ZImageBaseOutput + */ +export type SchemaZImageBaseOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed. + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + * + * The timings of the generation process. + */ + timings: { + [key: string]: number + } +} + +/** + * ZImageBaseTextToImageInput + */ +export type SchemaZImageBaseInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The guidance scale to use for the image generation. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * The negative prompt to use for the image generation. + */ + negative_prompt?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * ZImageBaseOutput + */ +export type SchemaZImageBaseLoraOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed. + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + * + * The timings of the generation process. + */ + timings: { + [key: string]: number + } +} + +/** + * ZImageBaseTextToImageLoRAInput + */ +export type SchemaZImageBaseLoraInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Loras + * + * List of LoRA weights to apply (maximum 3). + */ + loras?: Array + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * The guidance scale to use for the image generation. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * The negative prompt to use for the image generation. + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Output + */ +export type SchemaFluxLoraOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaFluxLoraInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. This is always set to 1 for streaming output. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * ControlNetUnionInput + */ +export type SchemaControlNetUnionInput = { + /** + * Conditioning Scale + * + * + * The scale of the control net weight. This is used to scale the control net weight + * before merging it with the base model. + * + */ + conditioning_scale?: number + /** + * Mask Threshold + * + * Threshold for mask. + */ + mask_threshold?: number + /** + * End Percentage + * + * + * The percentage of the image to end applying the controlnet in terms of the total timesteps. + * + */ + end_percentage?: number + /** + * Mask Image Url + * + * URL of the mask for the control image. + */ + mask_image_url?: string | null + /** + * Control Image Url + * + * URL of the image to be used as the control image. + */ + control_image_url: string + /** + * Control Mode + * + * Control Mode for Flux Controlnet Union. Supported values are: + * - canny: Uses the edges for guided generation. + * - tile: Uses the tiles for guided generation. + * - depth: Utilizes a grayscale depth map for guided generation. + * - blur: Adds a blur to the image. + * - pose: Uses the pose of the image for guided generation. + * - gray: Converts the image to grayscale. + * - low-quality: Converts the image to a low-quality image. + */ + control_mode: + | 'canny' + | 'tile' + | 'depth' + | 'blur' + | 'pose' + | 'gray' + | 'low-quality' + /** + * Start Percentage + * + * + * The percentage of the image to start applying the controlnet in terms of the total timesteps. + * + */ + start_percentage?: number +} + +/** + * ControlNetUnion + */ +export type SchemaControlNetUnion = { + /** + * Controls + * + * The control images and modes to use for the control net. + */ + controls: Array + /** + * Path + * + * URL or the path to the control net weights. + */ + path: string + /** + * Variant + * + * The optional variant if a Hugging Face repo key is used. + */ + variant?: string + /** + * Config Url + * + * optional URL to the controlnet config.json file. + */ + config_url?: string +} + +/** + * ImageFillInput + */ +export type SchemaImageFillInput = { + /** + * Fill Image Url + * + * URLs of images to be filled for redux prompting + */ + fill_image_url?: string | Array +} + +/** + * EasyControlWeight + */ +export type SchemaEasyControlWeight = { + /** + * Scale + * + * Scale for the control method. + */ + scale?: number + /** + * Image Control Type + * + * Control type of the image. Must be one of `spatial` or `subject`. + */ + image_control_type: 'subject' | 'spatial' + /** + * Control Method Url + * + * URL to safetensor weights of control method to be applied. Can also be one of `canny`, `depth`, `hedsketch`, `inpainting`, `pose`, `seg`, `subject`, `ghibli` + */ + control_method_url: string + /** + * Image Url + * + * URL of an image to use as a control + */ + image_url: string +} + +/** + * ControlLoraWeight + */ +export type SchemaControlLoraWeight = { + /** + * Path + * + * URL or the path to the LoRA weights. + */ + path: string + /** + * Scale + * + * + * The scale of the LoRA weight. This is used to scale the LoRA weight + * before merging it with the base model. Providing a dictionary as {"layer_name":layer_scale} allows per-layer lora scale settings. Layers with no scale provided will have scale 1.0. + * + */ + scale?: + | { + [key: string]: unknown + } + | number + /** + * Control Image Url + * + * URL of the image to be used as the control image. + */ + control_image_url: string + /** + * Preprocess + * + * Type of preprocessing to apply to the input image. + */ + preprocess?: 'canny' | 'depth' | 'None' +} + +/** + * Output + */ +export type SchemaFluxGeneralOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaFluxGeneralInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Proportion of steps to apply NAG + * + * + * The proportion of steps to apply NAG. After the specified proportion + * of steps has been iterated, the remaining steps will use original + * attention processors in FLUX. + * + */ + nag_end?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Control Loras + * + * + * The LoRAs to use for the image generation which use a control image. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + control_loras?: Array + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Scheduler + * + * Scheduler for the denoising process. + */ + scheduler?: 'euler' | 'dpmpp_2m' + /** + * Easycontrols + * + * + * EasyControl Inputs to use for image generation. + * + */ + easycontrols?: Array + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Real CFG scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + real_cfg_scale?: number + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Use CFG-Zero-Init + * + * + * Uses CFG-zero init sampling as in https://arxiv.org/abs/2503.18886. + * + */ + use_cfg_zero?: boolean + /** + * Fill Image + * + * Use an image input to influence the generation. Can be used to fill images in masked areas. + */ + fill_image?: SchemaImageFillInput + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Sigma Schedule + * + * Sigmas schedule for the denoising process. + */ + sigma_schedule?: 'sgm_uniform' + /** + * Reference End + * + * + * The percentage of the total timesteps when the reference guidance is to be ended. + * + */ + reference_end?: number + /** + * Reference Strength + * + * Strength of reference_only generation. Only used if a reference image is provided. + */ + reference_strength?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * NAG scale + * + * + * The scale for NAG. Higher values will result in a image that is more distant + * to the negative prompt. + * + */ + nag_scale?: number + /** + * Reference Image Url + * + * URL of Image for Reference-Only + */ + reference_image_url?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Controlnet Unions + * + * + * The controlnet unions to use for the image generation. Only one controlnet is supported at the moment. + * + */ + controlnet_unions?: Array + /** + * Negative Prompt + * + * + * Negative prompt to steer the image generation away from unwanted features. + * By default, we will be using NAG for processing the negative prompt. + * + */ + negative_prompt?: string + /** + * NAG Tau + * + * + * The tau for NAG. Controls the normalization of the hidden state. + * Higher values will result in a less aggressive normalization, + * but may also lead to unexpected changes with respect to the original image. + * Not recommended to change this value. + * + */ + nag_tau?: number + /** + * Num Images + * + * The number of images to generate. This is always set to 1 for streaming output. + */ + num_images?: number + /** + * Use Beta Schedule + * + * Specifies whether beta sigmas ought to be used. + */ + use_beta_schedule?: boolean + /** + * Ip Adapters + * + * + * IP-Adapter to use for image generation. + * + */ + ip_adapters?: Array + /** + * Base Shift + * + * Base shift for the scheduled timesteps + */ + base_shift?: number + /** + * NAG alpha + * + * + * The alpha value for NAG. This value is used as a final weighting + * factor for steering the normalized guidance (positive and negative prompts) + * in the direction of the positive prompt. Higher values will result in less + * steering on the normalized guidance where lower values will result in + * considering the positive prompt guidance more. + * + */ + nag_alpha?: number + /** + * Use Real CFG + * + * + * Uses classical CFG as in SD1.5, SDXL, etc. Increases generation times and price when set to be true. + * If using XLabs IP-Adapter v1, this will be turned on!. + * + */ + use_real_cfg?: boolean + /** + * Max Shift + * + * Max shift for the scheduled timesteps + */ + max_shift?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Controlnets + * + * + * The controlnets to use for the image generation. Only one controlnet is supported at the moment. + * + */ + controlnets?: Array + /** + * Reference Start + * + * + * The percentage of the total timesteps when the reference guidance is to bestarted. + * + */ + reference_start?: number +} + +/** + * Output + */ +export type SchemaStableDiffusionV35LargeOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * TextToImageInput + */ +export type SchemaStableDiffusionV35LargeInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. Defaults to landscape_4_3 if no controlnet has been passed, otherwise defaults to the size of the controlnet conditioning image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Controlnet + * + * + * ControlNet for inference. + * + */ + controlnet?: SchemaControlNet + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Ip Adapter + * + * + * IP-Adapter to use during inference. + * + */ + ip_adapter?: SchemaIpAdapter + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number +} + +/** + * Output + */ +export type SchemaIdeogramV2Output = { + /** + * Images + */ + images: Array + /** + * Seed + * + * Seed used for the random number generator + */ + seed: number +} + +/** + * TextToImageInput + */ +export type SchemaIdeogramV2Input = { + /** + * Prompt + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated image + */ + aspect_ratio?: + | '10:16' + | '16:10' + | '9:16' + | '16:9' + | '4:3' + | '3:4' + | '1:1' + | '1:3' + | '3:1' + | '3:2' + | '2:3' + /** + * Style + * + * The style of the generated image + */ + style?: 'auto' | 'general' | 'realistic' | 'design' | 'render_3D' | 'anime' + /** + * Expand Prompt + * + * Whether to expand the prompt with MagicPrompt functionality. + */ + expand_prompt?: boolean + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Seed + * + * Seed for the random number generator + */ + seed?: number | unknown + /** + * Negative Prompt + * + * A negative prompt to avoid in the generated image + */ + negative_prompt?: string +} + +/** + * Output + */ +export type SchemaFluxDevOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseInput + */ +export type SchemaFluxDevInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The speed of the generation. The higher the speed, the faster the generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number | unknown + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number +} + +/** + * Output + */ +export type SchemaHidreamI1FastOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * FastInput + */ +export type SchemaHidreamI1FastInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number | unknown + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string +} + +/** + * Output + */ +export type SchemaHidreamI1DevOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * DevInput + */ +export type SchemaHidreamI1DevInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string +} + +export type SchemaHidreamI1FullOutput = unknown + +/** + * TextToImageInput + */ +export type SchemaHidreamI1FullInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Loras + * + * A list of LoRAs to apply to the model. Each LoRA specifies its path, scale, and optional weight name. + */ + loras?: Array + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Imagen4TextToImageFastOutput + */ +export type SchemaImagen4PreviewFastOutput = { + /** + * Images + * + * The generated images. + */ + images: Array + /** + * Description + * + * The description of the generated images. + */ + description: string +} + +/** + * Imagen4TextToImageFastInput + */ +export type SchemaImagen4PreviewFastInput = { + /** + * Prompt + * + * The text prompt to generate an image from. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: '1:1' | '16:9' | '9:16' | '4:3' | '3:4' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' +} + +/** + * OutputModel + */ +export type SchemaTextToImage32Output = { + image: SchemaImage +} + +/** + * InputModel + */ +export type SchemaTextToImage32Input = { + /** + * Prompt + * + * Prompt for image generation. + */ + prompt: string + /** + * Aspect Ratio + * + * Aspect ratio. Options: 1:1, 2:3, 3:2, 3:4, 4:3, 4:5, 5:4, 9:16, 16:9 + */ + aspect_ratio?: + | '1:1' + | '2:3' + | '3:2' + | '3:4' + | '4:3' + | '4:5' + | '5:4' + | '9:16' + | '16:9' + /** + * Prompt Enhancer + * + * Whether to improve the prompt. + */ + prompt_enhancer?: boolean + /** + * Sync Mode + * + * If true, returns the image directly in the response (increases latency). + */ + sync_mode?: boolean + /** + * Truncate Prompt + * + * Whether to truncate the prompt. + */ + truncate_prompt?: boolean + /** + * Guidance Scale + * + * Guidance scale for text. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps. + */ + num_inference_steps?: number + /** + * Seed + * + * Random seed for reproducibility. + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt for image generation. + */ + negative_prompt?: string +} + +/** + * Flux2ProOutput + */ +export type SchemaFlux2ProOutput = { + /** + * Images + * + * The generated images. + */ + images: Array + /** + * Seed + * + * The seed used for the generation. + */ + seed: number +} + +/** + * Flux2ProTextToImageInput + */ +export type SchemaFlux2ProInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Image Size + * + * The size of the generated image. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * The seed to use for the generation. + */ + seed?: number +} + +/** + * Flux2T2IOutput + */ +export type SchemaFlux2Output = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * Flux2TextToImageInput + */ +export type SchemaFlux2Input = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the image to generate. The width and height must be between 512 and 2048 pixels. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The acceleration level to use for the image generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used. + */ + seed?: number + /** + * Enable Prompt Expansion + * + * If set to true, the prompt will be expanded for better results. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * Flux2T2ILoRAOutput + */ +export type SchemaFlux2LoraOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated images + */ + images: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Timings + */ + timings: { + [key: string]: number + } +} + +/** + * Flux2TextToImageLoRAInput + */ +export type SchemaFlux2LoraInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Image Size + * + * The size of the image to generate. The width and height must be between 512 and 2048 pixels. + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Acceleration + * + * The acceleration level to use for the image generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' + /** + * Loras + * + * List of LoRA weights to apply (maximum 3). Each LoRA can be a URL, HuggingFace repo ID, or local path. + */ + loras?: Array + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Guidance Scale + * + * Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used. + */ + seed?: number + /** + * Number of Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Enable Prompt Expansion + * + * If set to true, the prompt will be expanded for better results. + */ + enable_prompt_expansion?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * TextToImageOutput + */ +export type SchemaRecraftV3TextToImageOutput = { + /** + * Images + */ + images: Array +} + +/** + * TextToImageInput + */ +export type SchemaRecraftV3TextToImageInput = { + /** + * Prompt + */ + prompt: string + /** + * Image Size + */ + image_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Style + * + * The style of the generated images. Vector images cost 2X as much. + */ + style?: + | 'any' + | 'realistic_image' + | 'digital_illustration' + | 'vector_illustration' + | 'realistic_image/b_and_w' + | 'realistic_image/hard_flash' + | 'realistic_image/hdr' + | 'realistic_image/natural_light' + | 'realistic_image/studio_portrait' + | 'realistic_image/enterprise' + | 'realistic_image/motion_blur' + | 'realistic_image/evening_light' + | 'realistic_image/faded_nostalgia' + | 'realistic_image/forest_life' + | 'realistic_image/mystic_naturalism' + | 'realistic_image/natural_tones' + | 'realistic_image/organic_calm' + | 'realistic_image/real_life_glow' + | 'realistic_image/retro_realism' + | 'realistic_image/retro_snapshot' + | 'realistic_image/urban_drama' + | 'realistic_image/village_realism' + | 'realistic_image/warm_folk' + | 'digital_illustration/pixel_art' + | 'digital_illustration/hand_drawn' + | 'digital_illustration/grain' + | 'digital_illustration/infantile_sketch' + | 'digital_illustration/2d_art_poster' + | 'digital_illustration/handmade_3d' + | 'digital_illustration/hand_drawn_outline' + | 'digital_illustration/engraving_color' + | 'digital_illustration/2d_art_poster_2' + | 'digital_illustration/antiquarian' + | 'digital_illustration/bold_fantasy' + | 'digital_illustration/child_book' + | 'digital_illustration/child_books' + | 'digital_illustration/cover' + | 'digital_illustration/crosshatch' + | 'digital_illustration/digital_engraving' + | 'digital_illustration/expressionism' + | 'digital_illustration/freehand_details' + | 'digital_illustration/grain_20' + | 'digital_illustration/graphic_intensity' + | 'digital_illustration/hard_comics' + | 'digital_illustration/long_shadow' + | 'digital_illustration/modern_folk' + | 'digital_illustration/multicolor' + | 'digital_illustration/neon_calm' + | 'digital_illustration/noir' + | 'digital_illustration/nostalgic_pastel' + | 'digital_illustration/outline_details' + | 'digital_illustration/pastel_gradient' + | 'digital_illustration/pastel_sketch' + | 'digital_illustration/pop_art' + | 'digital_illustration/pop_renaissance' + | 'digital_illustration/street_art' + | 'digital_illustration/tablet_sketch' + | 'digital_illustration/urban_glow' + | 'digital_illustration/urban_sketching' + | 'digital_illustration/vanilla_dreams' + | 'digital_illustration/young_adult_book' + | 'digital_illustration/young_adult_book_2' + | 'vector_illustration/bold_stroke' + | 'vector_illustration/chemistry' + | 'vector_illustration/colored_stencil' + | 'vector_illustration/contour_pop_art' + | 'vector_illustration/cosmics' + | 'vector_illustration/cutout' + | 'vector_illustration/depressive' + | 'vector_illustration/editorial' + | 'vector_illustration/emotional_flat' + | 'vector_illustration/infographical' + | 'vector_illustration/marker_outline' + | 'vector_illustration/mosaic' + | 'vector_illustration/naivector' + | 'vector_illustration/roundish_flat' + | 'vector_illustration/segmented_colors' + | 'vector_illustration/sharp_contrast' + | 'vector_illustration/thin' + | 'vector_illustration/vector_photo' + | 'vector_illustration/vivid_shapes' + | 'vector_illustration/engraving' + | 'vector_illustration/line_art' + | 'vector_illustration/line_circuit' + | 'vector_illustration/linocut' + /** + * Colors + * + * An array of preferable colors + */ + colors?: Array + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Style Id + * + * The ID of the custom style reference (optional) + */ + style_id?: string +} + +/** + * Output + */ +export type SchemaFluxProV11UltraOutput = { + /** + * Prompt + * + * The prompt used for generating the image. + */ + prompt: string + /** + * Images + * + * The generated image files info. + */ + images: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Has Nsfw Concepts + * + * Whether the generated images contain NSFW concepts. + */ + has_nsfw_concepts: Array + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * FluxProUltraTextToImageInput + */ +export type SchemaFluxProV11UltraInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Num Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: + | '21:9' + | '16:9' + | '4:3' + | '3:2' + | '1:1' + | '2:3' + | '3:4' + | '9:16' + | '9:21' + | string + /** + * Enhance Prompt + * + * Whether to enhance the prompt for better results. + */ + enhance_prompt?: boolean + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' + /** + * Image URL + * + * The image URL to generate an image from. + */ + image_url?: string + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Safety Tolerance + * + * The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive. + */ + safety_tolerance?: '1' | '2' | '3' | '4' | '5' | '6' + /** + * Image Prompt Strength + * + * The strength of the image prompt, between 0 and 1. + */ + image_prompt_strength?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same image every time. + * + */ + seed?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Raw + * + * Generate less processed, more natural-looking images. + */ + raw?: boolean +} + +/** + * Imagen4TextToImageOutput + */ +export type SchemaImagen4PreviewOutput = { + /** + * Images + * + * The generated images. + */ + images: Array + /** + * Description + * + * The description of the generated images. + */ + description: string +} + +/** + * Imagen4TextToImageInput + */ +export type SchemaImagen4PreviewInput = { + /** + * Prompt + * + * The text prompt to generate an image from. + */ + prompt: string + /** + * Number of Images + * + * The number of images to generate. + */ + num_images?: number + /** + * Aspect Ratio + * + * The aspect ratio of the generated image. + */ + aspect_ratio?: '1:1' | '16:9' | '9:16' | '4:3' | '3:4' + /** + * Resolution + * + * The resolution of the generated image. + */ + resolution?: '1K' | '2K' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Output Format + * + * The format of the generated image. + */ + output_format?: 'jpeg' | 'png' | 'webp' +} + +export type SchemaQueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetFalAiImagen4PreviewRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/imagen4/preview/requests/{request_id}/status' +} + +export type GetFalAiImagen4PreviewRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImagen4PreviewRequestsByRequestIdStatusResponse = + GetFalAiImagen4PreviewRequestsByRequestIdStatusResponses[keyof GetFalAiImagen4PreviewRequestsByRequestIdStatusResponses] + +export type PutFalAiImagen4PreviewRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/imagen4/preview/requests/{request_id}/cancel' +} + +export type PutFalAiImagen4PreviewRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImagen4PreviewRequestsByRequestIdCancelResponse = + PutFalAiImagen4PreviewRequestsByRequestIdCancelResponses[keyof PutFalAiImagen4PreviewRequestsByRequestIdCancelResponses] + +export type PostFalAiImagen4PreviewData = { + body: SchemaImagen4PreviewInput + path?: never + query?: never + url: '/fal-ai/imagen4/preview' +} + +export type PostFalAiImagen4PreviewResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImagen4PreviewResponse = + PostFalAiImagen4PreviewResponses[keyof PostFalAiImagen4PreviewResponses] + +export type GetFalAiImagen4PreviewRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/imagen4/preview/requests/{request_id}' +} + +export type GetFalAiImagen4PreviewRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImagen4PreviewOutput +} + +export type GetFalAiImagen4PreviewRequestsByRequestIdResponse = + GetFalAiImagen4PreviewRequestsByRequestIdResponses[keyof GetFalAiImagen4PreviewRequestsByRequestIdResponses] + +export type GetFalAiFluxProV11UltraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-pro/v1.1-ultra/requests/{request_id}/status' +} + +export type GetFalAiFluxProV11UltraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxProV11UltraRequestsByRequestIdStatusResponse = + GetFalAiFluxProV11UltraRequestsByRequestIdStatusResponses[keyof GetFalAiFluxProV11UltraRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxProV11UltraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/v1.1-ultra/requests/{request_id}/cancel' +} + +export type PutFalAiFluxProV11UltraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxProV11UltraRequestsByRequestIdCancelResponse = + PutFalAiFluxProV11UltraRequestsByRequestIdCancelResponses[keyof PutFalAiFluxProV11UltraRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxProV11UltraData = { + body: SchemaFluxProV11UltraInput + path?: never + query?: never + url: '/fal-ai/flux-pro/v1.1-ultra' +} + +export type PostFalAiFluxProV11UltraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxProV11UltraResponse = + PostFalAiFluxProV11UltraResponses[keyof PostFalAiFluxProV11UltraResponses] + +export type GetFalAiFluxProV11UltraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/v1.1-ultra/requests/{request_id}' +} + +export type GetFalAiFluxProV11UltraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxProV11UltraOutput +} + +export type GetFalAiFluxProV11UltraRequestsByRequestIdResponse = + GetFalAiFluxProV11UltraRequestsByRequestIdResponses[keyof GetFalAiFluxProV11UltraRequestsByRequestIdResponses] + +export type GetFalAiRecraftV3TextToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/recraft/v3/text-to-image/requests/{request_id}/status' +} + +export type GetFalAiRecraftV3TextToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiRecraftV3TextToImageRequestsByRequestIdStatusResponse = + GetFalAiRecraftV3TextToImageRequestsByRequestIdStatusResponses[keyof GetFalAiRecraftV3TextToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiRecraftV3TextToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/recraft/v3/text-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiRecraftV3TextToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiRecraftV3TextToImageRequestsByRequestIdCancelResponse = + PutFalAiRecraftV3TextToImageRequestsByRequestIdCancelResponses[keyof PutFalAiRecraftV3TextToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiRecraftV3TextToImageData = { + body: SchemaRecraftV3TextToImageInput + path?: never + query?: never + url: '/fal-ai/recraft/v3/text-to-image' +} + +export type PostFalAiRecraftV3TextToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiRecraftV3TextToImageResponse = + PostFalAiRecraftV3TextToImageResponses[keyof PostFalAiRecraftV3TextToImageResponses] + +export type GetFalAiRecraftV3TextToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/recraft/v3/text-to-image/requests/{request_id}' +} + +export type GetFalAiRecraftV3TextToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaRecraftV3TextToImageOutput +} + +export type GetFalAiRecraftV3TextToImageRequestsByRequestIdResponse = + GetFalAiRecraftV3TextToImageRequestsByRequestIdResponses[keyof GetFalAiRecraftV3TextToImageRequestsByRequestIdResponses] + +export type GetFalAiFlux2LoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2/lora/requests/{request_id}/status' +} + +export type GetFalAiFlux2LoraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2LoraRequestsByRequestIdStatusResponse = + GetFalAiFlux2LoraRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2LoraRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2LoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/lora/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2LoraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2LoraRequestsByRequestIdCancelResponse = + PutFalAiFlux2LoraRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2LoraRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2LoraData = { + body: SchemaFlux2LoraInput + path?: never + query?: never + url: '/fal-ai/flux-2/lora' +} + +export type PostFalAiFlux2LoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2LoraResponse = + PostFalAiFlux2LoraResponses[keyof PostFalAiFlux2LoraResponses] + +export type GetFalAiFlux2LoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/lora/requests/{request_id}' +} + +export type GetFalAiFlux2LoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2LoraOutput +} + +export type GetFalAiFlux2LoraRequestsByRequestIdResponse = + GetFalAiFlux2LoraRequestsByRequestIdResponses[keyof GetFalAiFlux2LoraRequestsByRequestIdResponses] + +export type GetFalAiFlux2RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2/requests/{request_id}/status' +} + +export type GetFalAiFlux2RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2RequestsByRequestIdStatusResponse = + GetFalAiFlux2RequestsByRequestIdStatusResponses[keyof GetFalAiFlux2RequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2RequestsByRequestIdCancelResponse = + PutFalAiFlux2RequestsByRequestIdCancelResponses[keyof PutFalAiFlux2RequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2Data = { + body: SchemaFlux2Input + path?: never + query?: never + url: '/fal-ai/flux-2' +} + +export type PostFalAiFlux2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2Response = + PostFalAiFlux2Responses[keyof PostFalAiFlux2Responses] + +export type GetFalAiFlux2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/requests/{request_id}' +} + +export type GetFalAiFlux2RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2Output +} + +export type GetFalAiFlux2RequestsByRequestIdResponse = + GetFalAiFlux2RequestsByRequestIdResponses[keyof GetFalAiFlux2RequestsByRequestIdResponses] + +export type GetFalAiFlux2ProRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-pro/requests/{request_id}/status' +} + +export type GetFalAiFlux2ProRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2ProRequestsByRequestIdStatusResponse = + GetFalAiFlux2ProRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2ProRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2ProRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-pro/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2ProRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2ProRequestsByRequestIdCancelResponse = + PutFalAiFlux2ProRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2ProRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2ProData = { + body: SchemaFlux2ProInput + path?: never + query?: never + url: '/fal-ai/flux-2-pro' +} + +export type PostFalAiFlux2ProResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2ProResponse = + PostFalAiFlux2ProResponses[keyof PostFalAiFlux2ProResponses] + +export type GetFalAiFlux2ProRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-pro/requests/{request_id}' +} + +export type GetFalAiFlux2ProRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2ProOutput +} + +export type GetFalAiFlux2ProRequestsByRequestIdResponse = + GetFalAiFlux2ProRequestsByRequestIdResponses[keyof GetFalAiFlux2ProRequestsByRequestIdResponses] + +export type GetBriaTextToImage32RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/text-to-image/3.2/requests/{request_id}/status' +} + +export type GetBriaTextToImage32RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetBriaTextToImage32RequestsByRequestIdStatusResponse = + GetBriaTextToImage32RequestsByRequestIdStatusResponses[keyof GetBriaTextToImage32RequestsByRequestIdStatusResponses] + +export type PutBriaTextToImage32RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/text-to-image/3.2/requests/{request_id}/cancel' +} + +export type PutBriaTextToImage32RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutBriaTextToImage32RequestsByRequestIdCancelResponse = + PutBriaTextToImage32RequestsByRequestIdCancelResponses[keyof PutBriaTextToImage32RequestsByRequestIdCancelResponses] + +export type PostBriaTextToImage32Data = { + body: SchemaTextToImage32Input + path?: never + query?: never + url: '/bria/text-to-image/3.2' +} + +export type PostBriaTextToImage32Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaTextToImage32Response = + PostBriaTextToImage32Responses[keyof PostBriaTextToImage32Responses] + +export type GetBriaTextToImage32RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/text-to-image/3.2/requests/{request_id}' +} + +export type GetBriaTextToImage32RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaTextToImage32Output +} + +export type GetBriaTextToImage32RequestsByRequestIdResponse = + GetBriaTextToImage32RequestsByRequestIdResponses[keyof GetBriaTextToImage32RequestsByRequestIdResponses] + +export type GetFalAiImagen4PreviewFastRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/imagen4/preview/fast/requests/{request_id}/status' +} + +export type GetFalAiImagen4PreviewFastRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImagen4PreviewFastRequestsByRequestIdStatusResponse = + GetFalAiImagen4PreviewFastRequestsByRequestIdStatusResponses[keyof GetFalAiImagen4PreviewFastRequestsByRequestIdStatusResponses] + +export type PutFalAiImagen4PreviewFastRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/imagen4/preview/fast/requests/{request_id}/cancel' +} + +export type PutFalAiImagen4PreviewFastRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImagen4PreviewFastRequestsByRequestIdCancelResponse = + PutFalAiImagen4PreviewFastRequestsByRequestIdCancelResponses[keyof PutFalAiImagen4PreviewFastRequestsByRequestIdCancelResponses] + +export type PostFalAiImagen4PreviewFastData = { + body: SchemaImagen4PreviewFastInput + path?: never + query?: never + url: '/fal-ai/imagen4/preview/fast' +} + +export type PostFalAiImagen4PreviewFastResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImagen4PreviewFastResponse = + PostFalAiImagen4PreviewFastResponses[keyof PostFalAiImagen4PreviewFastResponses] + +export type GetFalAiImagen4PreviewFastRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/imagen4/preview/fast/requests/{request_id}' +} + +export type GetFalAiImagen4PreviewFastRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImagen4PreviewFastOutput +} + +export type GetFalAiImagen4PreviewFastRequestsByRequestIdResponse = + GetFalAiImagen4PreviewFastRequestsByRequestIdResponses[keyof GetFalAiImagen4PreviewFastRequestsByRequestIdResponses] + +export type GetFalAiHidreamI1FullRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hidream-i1-full/requests/{request_id}/status' +} + +export type GetFalAiHidreamI1FullRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHidreamI1FullRequestsByRequestIdStatusResponse = + GetFalAiHidreamI1FullRequestsByRequestIdStatusResponses[keyof GetFalAiHidreamI1FullRequestsByRequestIdStatusResponses] + +export type PutFalAiHidreamI1FullRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hidream-i1-full/requests/{request_id}/cancel' +} + +export type PutFalAiHidreamI1FullRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHidreamI1FullRequestsByRequestIdCancelResponse = + PutFalAiHidreamI1FullRequestsByRequestIdCancelResponses[keyof PutFalAiHidreamI1FullRequestsByRequestIdCancelResponses] + +export type PostFalAiHidreamI1FullData = { + body: SchemaHidreamI1FullInput + path?: never + query?: never + url: '/fal-ai/hidream-i1-full' +} + +export type PostFalAiHidreamI1FullResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHidreamI1FullResponse = + PostFalAiHidreamI1FullResponses[keyof PostFalAiHidreamI1FullResponses] + +export type GetFalAiHidreamI1FullRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hidream-i1-full/requests/{request_id}' +} + +export type GetFalAiHidreamI1FullRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHidreamI1FullOutput +} + +export type GetFalAiHidreamI1FullRequestsByRequestIdResponse = + GetFalAiHidreamI1FullRequestsByRequestIdResponses[keyof GetFalAiHidreamI1FullRequestsByRequestIdResponses] + +export type GetFalAiHidreamI1DevRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hidream-i1-dev/requests/{request_id}/status' +} + +export type GetFalAiHidreamI1DevRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHidreamI1DevRequestsByRequestIdStatusResponse = + GetFalAiHidreamI1DevRequestsByRequestIdStatusResponses[keyof GetFalAiHidreamI1DevRequestsByRequestIdStatusResponses] + +export type PutFalAiHidreamI1DevRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hidream-i1-dev/requests/{request_id}/cancel' +} + +export type PutFalAiHidreamI1DevRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHidreamI1DevRequestsByRequestIdCancelResponse = + PutFalAiHidreamI1DevRequestsByRequestIdCancelResponses[keyof PutFalAiHidreamI1DevRequestsByRequestIdCancelResponses] + +export type PostFalAiHidreamI1DevData = { + body: SchemaHidreamI1DevInput + path?: never + query?: never + url: '/fal-ai/hidream-i1-dev' +} + +export type PostFalAiHidreamI1DevResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHidreamI1DevResponse = + PostFalAiHidreamI1DevResponses[keyof PostFalAiHidreamI1DevResponses] + +export type GetFalAiHidreamI1DevRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hidream-i1-dev/requests/{request_id}' +} + +export type GetFalAiHidreamI1DevRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHidreamI1DevOutput +} + +export type GetFalAiHidreamI1DevRequestsByRequestIdResponse = + GetFalAiHidreamI1DevRequestsByRequestIdResponses[keyof GetFalAiHidreamI1DevRequestsByRequestIdResponses] + +export type GetFalAiHidreamI1FastRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hidream-i1-fast/requests/{request_id}/status' +} + +export type GetFalAiHidreamI1FastRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHidreamI1FastRequestsByRequestIdStatusResponse = + GetFalAiHidreamI1FastRequestsByRequestIdStatusResponses[keyof GetFalAiHidreamI1FastRequestsByRequestIdStatusResponses] + +export type PutFalAiHidreamI1FastRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hidream-i1-fast/requests/{request_id}/cancel' +} + +export type PutFalAiHidreamI1FastRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHidreamI1FastRequestsByRequestIdCancelResponse = + PutFalAiHidreamI1FastRequestsByRequestIdCancelResponses[keyof PutFalAiHidreamI1FastRequestsByRequestIdCancelResponses] + +export type PostFalAiHidreamI1FastData = { + body: SchemaHidreamI1FastInput + path?: never + query?: never + url: '/fal-ai/hidream-i1-fast' +} + +export type PostFalAiHidreamI1FastResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHidreamI1FastResponse = + PostFalAiHidreamI1FastResponses[keyof PostFalAiHidreamI1FastResponses] + +export type GetFalAiHidreamI1FastRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hidream-i1-fast/requests/{request_id}' +} + +export type GetFalAiHidreamI1FastRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHidreamI1FastOutput +} + +export type GetFalAiHidreamI1FastRequestsByRequestIdResponse = + GetFalAiHidreamI1FastRequestsByRequestIdResponses[keyof GetFalAiHidreamI1FastRequestsByRequestIdResponses] + +export type GetFalAiFluxDevRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux/dev/requests/{request_id}/status' +} + +export type GetFalAiFluxDevRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxDevRequestsByRequestIdStatusResponse = + GetFalAiFluxDevRequestsByRequestIdStatusResponses[keyof GetFalAiFluxDevRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxDevRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux/dev/requests/{request_id}/cancel' +} + +export type PutFalAiFluxDevRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxDevRequestsByRequestIdCancelResponse = + PutFalAiFluxDevRequestsByRequestIdCancelResponses[keyof PutFalAiFluxDevRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxDevData = { + body: SchemaFluxDevInput + path?: never + query?: never + url: '/fal-ai/flux/dev' +} + +export type PostFalAiFluxDevResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxDevResponse = + PostFalAiFluxDevResponses[keyof PostFalAiFluxDevResponses] + +export type GetFalAiFluxDevRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux/dev/requests/{request_id}' +} + +export type GetFalAiFluxDevRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxDevOutput +} + +export type GetFalAiFluxDevRequestsByRequestIdResponse = + GetFalAiFluxDevRequestsByRequestIdResponses[keyof GetFalAiFluxDevRequestsByRequestIdResponses] + +export type GetFalAiIdeogramV2RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ideogram/v2/requests/{request_id}/status' +} + +export type GetFalAiIdeogramV2RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiIdeogramV2RequestsByRequestIdStatusResponse = + GetFalAiIdeogramV2RequestsByRequestIdStatusResponses[keyof GetFalAiIdeogramV2RequestsByRequestIdStatusResponses] + +export type PutFalAiIdeogramV2RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v2/requests/{request_id}/cancel' +} + +export type PutFalAiIdeogramV2RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiIdeogramV2RequestsByRequestIdCancelResponse = + PutFalAiIdeogramV2RequestsByRequestIdCancelResponses[keyof PutFalAiIdeogramV2RequestsByRequestIdCancelResponses] + +export type PostFalAiIdeogramV2Data = { + body: SchemaIdeogramV2Input + path?: never + query?: never + url: '/fal-ai/ideogram/v2' +} + +export type PostFalAiIdeogramV2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiIdeogramV2Response = + PostFalAiIdeogramV2Responses[keyof PostFalAiIdeogramV2Responses] + +export type GetFalAiIdeogramV2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v2/requests/{request_id}' +} + +export type GetFalAiIdeogramV2RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIdeogramV2Output +} + +export type GetFalAiIdeogramV2RequestsByRequestIdResponse = + GetFalAiIdeogramV2RequestsByRequestIdResponses[keyof GetFalAiIdeogramV2RequestsByRequestIdResponses] + +export type GetFalAiStableDiffusionV35LargeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/stable-diffusion-v35-large/requests/{request_id}/status' +} + +export type GetFalAiStableDiffusionV35LargeRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiStableDiffusionV35LargeRequestsByRequestIdStatusResponse = + GetFalAiStableDiffusionV35LargeRequestsByRequestIdStatusResponses[keyof GetFalAiStableDiffusionV35LargeRequestsByRequestIdStatusResponses] + +export type PutFalAiStableDiffusionV35LargeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-diffusion-v35-large/requests/{request_id}/cancel' +} + +export type PutFalAiStableDiffusionV35LargeRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiStableDiffusionV35LargeRequestsByRequestIdCancelResponse = + PutFalAiStableDiffusionV35LargeRequestsByRequestIdCancelResponses[keyof PutFalAiStableDiffusionV35LargeRequestsByRequestIdCancelResponses] + +export type PostFalAiStableDiffusionV35LargeData = { + body: SchemaStableDiffusionV35LargeInput + path?: never + query?: never + url: '/fal-ai/stable-diffusion-v35-large' +} + +export type PostFalAiStableDiffusionV35LargeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiStableDiffusionV35LargeResponse = + PostFalAiStableDiffusionV35LargeResponses[keyof PostFalAiStableDiffusionV35LargeResponses] + +export type GetFalAiStableDiffusionV35LargeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-diffusion-v35-large/requests/{request_id}' +} + +export type GetFalAiStableDiffusionV35LargeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaStableDiffusionV35LargeOutput +} + +export type GetFalAiStableDiffusionV35LargeRequestsByRequestIdResponse = + GetFalAiStableDiffusionV35LargeRequestsByRequestIdResponses[keyof GetFalAiStableDiffusionV35LargeRequestsByRequestIdResponses] + +export type GetFalAiFluxGeneralRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-general/requests/{request_id}/status' +} + +export type GetFalAiFluxGeneralRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxGeneralRequestsByRequestIdStatusResponse = + GetFalAiFluxGeneralRequestsByRequestIdStatusResponses[keyof GetFalAiFluxGeneralRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxGeneralRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-general/requests/{request_id}/cancel' +} + +export type PutFalAiFluxGeneralRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxGeneralRequestsByRequestIdCancelResponse = + PutFalAiFluxGeneralRequestsByRequestIdCancelResponses[keyof PutFalAiFluxGeneralRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxGeneralData = { + body: SchemaFluxGeneralInput + path?: never + query?: never + url: '/fal-ai/flux-general' +} + +export type PostFalAiFluxGeneralResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxGeneralResponse = + PostFalAiFluxGeneralResponses[keyof PostFalAiFluxGeneralResponses] + +export type GetFalAiFluxGeneralRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-general/requests/{request_id}' +} + +export type GetFalAiFluxGeneralRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxGeneralOutput +} + +export type GetFalAiFluxGeneralRequestsByRequestIdResponse = + GetFalAiFluxGeneralRequestsByRequestIdResponses[keyof GetFalAiFluxGeneralRequestsByRequestIdResponses] + +export type GetFalAiFluxLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-lora/requests/{request_id}/status' +} + +export type GetFalAiFluxLoraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxLoraRequestsByRequestIdStatusResponse = + GetFalAiFluxLoraRequestsByRequestIdStatusResponses[keyof GetFalAiFluxLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-lora/requests/{request_id}/cancel' +} + +export type PutFalAiFluxLoraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxLoraRequestsByRequestIdCancelResponse = + PutFalAiFluxLoraRequestsByRequestIdCancelResponses[keyof PutFalAiFluxLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxLoraData = { + body: SchemaFluxLoraInput + path?: never + query?: never + url: '/fal-ai/flux-lora' +} + +export type PostFalAiFluxLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxLoraResponse = + PostFalAiFluxLoraResponses[keyof PostFalAiFluxLoraResponses] + +export type GetFalAiFluxLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-lora/requests/{request_id}' +} + +export type GetFalAiFluxLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxLoraOutput +} + +export type GetFalAiFluxLoraRequestsByRequestIdResponse = + GetFalAiFluxLoraRequestsByRequestIdResponses[keyof GetFalAiFluxLoraRequestsByRequestIdResponses] + +export type GetFalAiZImageBaseLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/z-image/base/lora/requests/{request_id}/status' +} + +export type GetFalAiZImageBaseLoraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiZImageBaseLoraRequestsByRequestIdStatusResponse = + GetFalAiZImageBaseLoraRequestsByRequestIdStatusResponses[keyof GetFalAiZImageBaseLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiZImageBaseLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image/base/lora/requests/{request_id}/cancel' +} + +export type PutFalAiZImageBaseLoraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiZImageBaseLoraRequestsByRequestIdCancelResponse = + PutFalAiZImageBaseLoraRequestsByRequestIdCancelResponses[keyof PutFalAiZImageBaseLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiZImageBaseLoraData = { + body: SchemaZImageBaseLoraInput + path?: never + query?: never + url: '/fal-ai/z-image/base/lora' +} + +export type PostFalAiZImageBaseLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiZImageBaseLoraResponse = + PostFalAiZImageBaseLoraResponses[keyof PostFalAiZImageBaseLoraResponses] + +export type GetFalAiZImageBaseLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image/base/lora/requests/{request_id}' +} + +export type GetFalAiZImageBaseLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaZImageBaseLoraOutput +} + +export type GetFalAiZImageBaseLoraRequestsByRequestIdResponse = + GetFalAiZImageBaseLoraRequestsByRequestIdResponses[keyof GetFalAiZImageBaseLoraRequestsByRequestIdResponses] + +export type GetFalAiZImageBaseRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/z-image/base/requests/{request_id}/status' +} + +export type GetFalAiZImageBaseRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiZImageBaseRequestsByRequestIdStatusResponse = + GetFalAiZImageBaseRequestsByRequestIdStatusResponses[keyof GetFalAiZImageBaseRequestsByRequestIdStatusResponses] + +export type PutFalAiZImageBaseRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image/base/requests/{request_id}/cancel' +} + +export type PutFalAiZImageBaseRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiZImageBaseRequestsByRequestIdCancelResponse = + PutFalAiZImageBaseRequestsByRequestIdCancelResponses[keyof PutFalAiZImageBaseRequestsByRequestIdCancelResponses] + +export type PostFalAiZImageBaseData = { + body: SchemaZImageBaseInput + path?: never + query?: never + url: '/fal-ai/z-image/base' +} + +export type PostFalAiZImageBaseResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiZImageBaseResponse = + PostFalAiZImageBaseResponses[keyof PostFalAiZImageBaseResponses] + +export type GetFalAiZImageBaseRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image/base/requests/{request_id}' +} + +export type GetFalAiZImageBaseRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaZImageBaseOutput +} + +export type GetFalAiZImageBaseRequestsByRequestIdResponse = + GetFalAiZImageBaseRequestsByRequestIdResponses[keyof GetFalAiZImageBaseRequestsByRequestIdResponses] + +export type GetFalAiFlux2Klein9bBaseLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2/klein/9b/base/lora/requests/{request_id}/status' +} + +export type GetFalAiFlux2Klein9bBaseLoraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2Klein9bBaseLoraRequestsByRequestIdStatusResponse = + GetFalAiFlux2Klein9bBaseLoraRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2Klein9bBaseLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2Klein9bBaseLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/9b/base/lora/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2Klein9bBaseLoraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2Klein9bBaseLoraRequestsByRequestIdCancelResponse = + PutFalAiFlux2Klein9bBaseLoraRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2Klein9bBaseLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2Klein9bBaseLoraData = { + body: SchemaFlux2Klein9bBaseLoraInput + path?: never + query?: never + url: '/fal-ai/flux-2/klein/9b/base/lora' +} + +export type PostFalAiFlux2Klein9bBaseLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2Klein9bBaseLoraResponse = + PostFalAiFlux2Klein9bBaseLoraResponses[keyof PostFalAiFlux2Klein9bBaseLoraResponses] + +export type GetFalAiFlux2Klein9bBaseLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/9b/base/lora/requests/{request_id}' +} + +export type GetFalAiFlux2Klein9bBaseLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2Klein9bBaseLoraOutput +} + +export type GetFalAiFlux2Klein9bBaseLoraRequestsByRequestIdResponse = + GetFalAiFlux2Klein9bBaseLoraRequestsByRequestIdResponses[keyof GetFalAiFlux2Klein9bBaseLoraRequestsByRequestIdResponses] + +export type GetFalAiFlux2Klein4bBaseLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2/klein/4b/base/lora/requests/{request_id}/status' +} + +export type GetFalAiFlux2Klein4bBaseLoraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2Klein4bBaseLoraRequestsByRequestIdStatusResponse = + GetFalAiFlux2Klein4bBaseLoraRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2Klein4bBaseLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2Klein4bBaseLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/4b/base/lora/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2Klein4bBaseLoraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2Klein4bBaseLoraRequestsByRequestIdCancelResponse = + PutFalAiFlux2Klein4bBaseLoraRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2Klein4bBaseLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2Klein4bBaseLoraData = { + body: SchemaFlux2Klein4bBaseLoraInput + path?: never + query?: never + url: '/fal-ai/flux-2/klein/4b/base/lora' +} + +export type PostFalAiFlux2Klein4bBaseLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2Klein4bBaseLoraResponse = + PostFalAiFlux2Klein4bBaseLoraResponses[keyof PostFalAiFlux2Klein4bBaseLoraResponses] + +export type GetFalAiFlux2Klein4bBaseLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/4b/base/lora/requests/{request_id}' +} + +export type GetFalAiFlux2Klein4bBaseLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2Klein4bBaseLoraOutput +} + +export type GetFalAiFlux2Klein4bBaseLoraRequestsByRequestIdResponse = + GetFalAiFlux2Klein4bBaseLoraRequestsByRequestIdResponses[keyof GetFalAiFlux2Klein4bBaseLoraRequestsByRequestIdResponses] + +export type GetFalAiFlux2Klein9bBaseRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2/klein/9b/base/requests/{request_id}/status' +} + +export type GetFalAiFlux2Klein9bBaseRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2Klein9bBaseRequestsByRequestIdStatusResponse = + GetFalAiFlux2Klein9bBaseRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2Klein9bBaseRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2Klein9bBaseRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/9b/base/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2Klein9bBaseRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2Klein9bBaseRequestsByRequestIdCancelResponse = + PutFalAiFlux2Klein9bBaseRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2Klein9bBaseRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2Klein9bBaseData = { + body: SchemaFlux2Klein9bBaseInput + path?: never + query?: never + url: '/fal-ai/flux-2/klein/9b/base' +} + +export type PostFalAiFlux2Klein9bBaseResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2Klein9bBaseResponse = + PostFalAiFlux2Klein9bBaseResponses[keyof PostFalAiFlux2Klein9bBaseResponses] + +export type GetFalAiFlux2Klein9bBaseRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/9b/base/requests/{request_id}' +} + +export type GetFalAiFlux2Klein9bBaseRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2Klein9bBaseOutput +} + +export type GetFalAiFlux2Klein9bBaseRequestsByRequestIdResponse = + GetFalAiFlux2Klein9bBaseRequestsByRequestIdResponses[keyof GetFalAiFlux2Klein9bBaseRequestsByRequestIdResponses] + +export type GetFalAiFlux2Klein4bBaseRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2/klein/4b/base/requests/{request_id}/status' +} + +export type GetFalAiFlux2Klein4bBaseRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2Klein4bBaseRequestsByRequestIdStatusResponse = + GetFalAiFlux2Klein4bBaseRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2Klein4bBaseRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2Klein4bBaseRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/4b/base/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2Klein4bBaseRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2Klein4bBaseRequestsByRequestIdCancelResponse = + PutFalAiFlux2Klein4bBaseRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2Klein4bBaseRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2Klein4bBaseData = { + body: SchemaFlux2Klein4bBaseInput + path?: never + query?: never + url: '/fal-ai/flux-2/klein/4b/base' +} + +export type PostFalAiFlux2Klein4bBaseResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2Klein4bBaseResponse = + PostFalAiFlux2Klein4bBaseResponses[keyof PostFalAiFlux2Klein4bBaseResponses] + +export type GetFalAiFlux2Klein4bBaseRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/4b/base/requests/{request_id}' +} + +export type GetFalAiFlux2Klein4bBaseRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2Klein4bBaseOutput +} + +export type GetFalAiFlux2Klein4bBaseRequestsByRequestIdResponse = + GetFalAiFlux2Klein4bBaseRequestsByRequestIdResponses[keyof GetFalAiFlux2Klein4bBaseRequestsByRequestIdResponses] + +export type GetFalAiFlux2Klein9bRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2/klein/9b/requests/{request_id}/status' +} + +export type GetFalAiFlux2Klein9bRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2Klein9bRequestsByRequestIdStatusResponse = + GetFalAiFlux2Klein9bRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2Klein9bRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2Klein9bRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/9b/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2Klein9bRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2Klein9bRequestsByRequestIdCancelResponse = + PutFalAiFlux2Klein9bRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2Klein9bRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2Klein9bData = { + body: SchemaFlux2Klein9bInput + path?: never + query?: never + url: '/fal-ai/flux-2/klein/9b' +} + +export type PostFalAiFlux2Klein9bResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2Klein9bResponse = + PostFalAiFlux2Klein9bResponses[keyof PostFalAiFlux2Klein9bResponses] + +export type GetFalAiFlux2Klein9bRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/9b/requests/{request_id}' +} + +export type GetFalAiFlux2Klein9bRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2Klein9bOutput +} + +export type GetFalAiFlux2Klein9bRequestsByRequestIdResponse = + GetFalAiFlux2Klein9bRequestsByRequestIdResponses[keyof GetFalAiFlux2Klein9bRequestsByRequestIdResponses] + +export type GetFalAiFlux2Klein4bRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2/klein/4b/requests/{request_id}/status' +} + +export type GetFalAiFlux2Klein4bRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2Klein4bRequestsByRequestIdStatusResponse = + GetFalAiFlux2Klein4bRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2Klein4bRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2Klein4bRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/4b/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2Klein4bRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2Klein4bRequestsByRequestIdCancelResponse = + PutFalAiFlux2Klein4bRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2Klein4bRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2Klein4bData = { + body: SchemaFlux2Klein4bInput + path?: never + query?: never + url: '/fal-ai/flux-2/klein/4b' +} + +export type PostFalAiFlux2Klein4bResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2Klein4bResponse = + PostFalAiFlux2Klein4bResponses[keyof PostFalAiFlux2Klein4bResponses] + +export type GetFalAiFlux2Klein4bRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/klein/4b/requests/{request_id}' +} + +export type GetFalAiFlux2Klein4bRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2Klein4bOutput +} + +export type GetFalAiFlux2Klein4bRequestsByRequestIdResponse = + GetFalAiFlux2Klein4bRequestsByRequestIdResponses[keyof GetFalAiFlux2Klein4bRequestsByRequestIdResponses] + +export type GetImagineartImagineart15ProPreviewTextToImageRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/imagineart/imagineart-1.5-pro-preview/text-to-image/requests/{request_id}/status' + } + +export type GetImagineartImagineart15ProPreviewTextToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetImagineartImagineart15ProPreviewTextToImageRequestsByRequestIdStatusResponse = + GetImagineartImagineart15ProPreviewTextToImageRequestsByRequestIdStatusResponses[keyof GetImagineartImagineart15ProPreviewTextToImageRequestsByRequestIdStatusResponses] + +export type PutImagineartImagineart15ProPreviewTextToImageRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/imagineart/imagineart-1.5-pro-preview/text-to-image/requests/{request_id}/cancel' + } + +export type PutImagineartImagineart15ProPreviewTextToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutImagineartImagineart15ProPreviewTextToImageRequestsByRequestIdCancelResponse = + PutImagineartImagineart15ProPreviewTextToImageRequestsByRequestIdCancelResponses[keyof PutImagineartImagineart15ProPreviewTextToImageRequestsByRequestIdCancelResponses] + +export type PostImagineartImagineart15ProPreviewTextToImageData = { + body: SchemaImagineart15ProPreviewTextToImageInput + path?: never + query?: never + url: '/imagineart/imagineart-1.5-pro-preview/text-to-image' +} + +export type PostImagineartImagineart15ProPreviewTextToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostImagineartImagineart15ProPreviewTextToImageResponse = + PostImagineartImagineart15ProPreviewTextToImageResponses[keyof PostImagineartImagineart15ProPreviewTextToImageResponses] + +export type GetImagineartImagineart15ProPreviewTextToImageRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/imagineart/imagineart-1.5-pro-preview/text-to-image/requests/{request_id}' + } + +export type GetImagineartImagineart15ProPreviewTextToImageRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaImagineart15ProPreviewTextToImageOutput + } + +export type GetImagineartImagineart15ProPreviewTextToImageRequestsByRequestIdResponse = + GetImagineartImagineart15ProPreviewTextToImageRequestsByRequestIdResponses[keyof GetImagineartImagineart15ProPreviewTextToImageRequestsByRequestIdResponses] + +export type GetFalAiGlmImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/glm-image/requests/{request_id}/status' +} + +export type GetFalAiGlmImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiGlmImageRequestsByRequestIdStatusResponse = + GetFalAiGlmImageRequestsByRequestIdStatusResponses[keyof GetFalAiGlmImageRequestsByRequestIdStatusResponses] + +export type PutFalAiGlmImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/glm-image/requests/{request_id}/cancel' +} + +export type PutFalAiGlmImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiGlmImageRequestsByRequestIdCancelResponse = + PutFalAiGlmImageRequestsByRequestIdCancelResponses[keyof PutFalAiGlmImageRequestsByRequestIdCancelResponses] + +export type PostFalAiGlmImageData = { + body: SchemaGlmImageInput + path?: never + query?: never + url: '/fal-ai/glm-image' +} + +export type PostFalAiGlmImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiGlmImageResponse = + PostFalAiGlmImageResponses[keyof PostFalAiGlmImageResponses] + +export type GetFalAiGlmImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/glm-image/requests/{request_id}' +} + +export type GetFalAiGlmImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaGlmImageOutput +} + +export type GetFalAiGlmImageRequestsByRequestIdResponse = + GetFalAiGlmImageRequestsByRequestIdResponses[keyof GetFalAiGlmImageRequestsByRequestIdResponses] + +export type GetFalAiQwenImage2512LoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-2512/lora/requests/{request_id}/status' +} + +export type GetFalAiQwenImage2512LoraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiQwenImage2512LoraRequestsByRequestIdStatusResponse = + GetFalAiQwenImage2512LoraRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImage2512LoraRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImage2512LoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-2512/lora/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImage2512LoraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiQwenImage2512LoraRequestsByRequestIdCancelResponse = + PutFalAiQwenImage2512LoraRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImage2512LoraRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImage2512LoraData = { + body: SchemaQwenImage2512LoraInput + path?: never + query?: never + url: '/fal-ai/qwen-image-2512/lora' +} + +export type PostFalAiQwenImage2512LoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImage2512LoraResponse = + PostFalAiQwenImage2512LoraResponses[keyof PostFalAiQwenImage2512LoraResponses] + +export type GetFalAiQwenImage2512LoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-2512/lora/requests/{request_id}' +} + +export type GetFalAiQwenImage2512LoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImage2512LoraOutput +} + +export type GetFalAiQwenImage2512LoraRequestsByRequestIdResponse = + GetFalAiQwenImage2512LoraRequestsByRequestIdResponses[keyof GetFalAiQwenImage2512LoraRequestsByRequestIdResponses] + +export type GetFalAiQwenImage2512RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-2512/requests/{request_id}/status' +} + +export type GetFalAiQwenImage2512RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiQwenImage2512RequestsByRequestIdStatusResponse = + GetFalAiQwenImage2512RequestsByRequestIdStatusResponses[keyof GetFalAiQwenImage2512RequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImage2512RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-2512/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImage2512RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiQwenImage2512RequestsByRequestIdCancelResponse = + PutFalAiQwenImage2512RequestsByRequestIdCancelResponses[keyof PutFalAiQwenImage2512RequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImage2512Data = { + body: SchemaQwenImage2512Input + path?: never + query?: never + url: '/fal-ai/qwen-image-2512' +} + +export type PostFalAiQwenImage2512Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImage2512Response = + PostFalAiQwenImage2512Responses[keyof PostFalAiQwenImage2512Responses] + +export type GetFalAiQwenImage2512RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-2512/requests/{request_id}' +} + +export type GetFalAiQwenImage2512RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImage2512Output +} + +export type GetFalAiQwenImage2512RequestsByRequestIdResponse = + GetFalAiQwenImage2512RequestsByRequestIdResponses[keyof GetFalAiQwenImage2512RequestsByRequestIdResponses] + +export type GetWanV26TextToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/wan/v2.6/text-to-image/requests/{request_id}/status' +} + +export type GetWanV26TextToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetWanV26TextToImageRequestsByRequestIdStatusResponse = + GetWanV26TextToImageRequestsByRequestIdStatusResponses[keyof GetWanV26TextToImageRequestsByRequestIdStatusResponses] + +export type PutWanV26TextToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/wan/v2.6/text-to-image/requests/{request_id}/cancel' +} + +export type PutWanV26TextToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutWanV26TextToImageRequestsByRequestIdCancelResponse = + PutWanV26TextToImageRequestsByRequestIdCancelResponses[keyof PutWanV26TextToImageRequestsByRequestIdCancelResponses] + +export type PostWanV26TextToImageData = { + body: SchemaV26TextToImageInput + path?: never + query?: never + url: '/wan/v2.6/text-to-image' +} + +export type PostWanV26TextToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostWanV26TextToImageResponse = + PostWanV26TextToImageResponses[keyof PostWanV26TextToImageResponses] + +export type GetWanV26TextToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/wan/v2.6/text-to-image/requests/{request_id}' +} + +export type GetWanV26TextToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaV26TextToImageOutput +} + +export type GetWanV26TextToImageRequestsByRequestIdResponse = + GetWanV26TextToImageRequestsByRequestIdResponses[keyof GetWanV26TextToImageRequestsByRequestIdResponses] + +export type GetFalAiFlux2FlashRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2/flash/requests/{request_id}/status' +} + +export type GetFalAiFlux2FlashRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2FlashRequestsByRequestIdStatusResponse = + GetFalAiFlux2FlashRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2FlashRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2FlashRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/flash/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2FlashRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2FlashRequestsByRequestIdCancelResponse = + PutFalAiFlux2FlashRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2FlashRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2FlashData = { + body: SchemaFlux2FlashInput + path?: never + query?: never + url: '/fal-ai/flux-2/flash' +} + +export type PostFalAiFlux2FlashResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2FlashResponse = + PostFalAiFlux2FlashResponses[keyof PostFalAiFlux2FlashResponses] + +export type GetFalAiFlux2FlashRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/flash/requests/{request_id}' +} + +export type GetFalAiFlux2FlashRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2FlashOutput +} + +export type GetFalAiFlux2FlashRequestsByRequestIdResponse = + GetFalAiFlux2FlashRequestsByRequestIdResponses[keyof GetFalAiFlux2FlashRequestsByRequestIdResponses] + +export type GetFalAiGptImage15RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/gpt-image-1.5/requests/{request_id}/status' +} + +export type GetFalAiGptImage15RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiGptImage15RequestsByRequestIdStatusResponse = + GetFalAiGptImage15RequestsByRequestIdStatusResponses[keyof GetFalAiGptImage15RequestsByRequestIdStatusResponses] + +export type PutFalAiGptImage15RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gpt-image-1.5/requests/{request_id}/cancel' +} + +export type PutFalAiGptImage15RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiGptImage15RequestsByRequestIdCancelResponse = + PutFalAiGptImage15RequestsByRequestIdCancelResponses[keyof PutFalAiGptImage15RequestsByRequestIdCancelResponses] + +export type PostFalAiGptImage15Data = { + body: SchemaGptImage15Input + path?: never + query?: never + url: '/fal-ai/gpt-image-1.5' +} + +export type PostFalAiGptImage15Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiGptImage15Response = + PostFalAiGptImage15Responses[keyof PostFalAiGptImage15Responses] + +export type GetFalAiGptImage15RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gpt-image-1.5/requests/{request_id}' +} + +export type GetFalAiGptImage15RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaGptImage15Output +} + +export type GetFalAiGptImage15RequestsByRequestIdResponse = + GetFalAiGptImage15RequestsByRequestIdResponses[keyof GetFalAiGptImage15RequestsByRequestIdResponses] + +export type GetBriaFiboLiteGenerateRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/fibo-lite/generate/requests/{request_id}/status' +} + +export type GetBriaFiboLiteGenerateRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetBriaFiboLiteGenerateRequestsByRequestIdStatusResponse = + GetBriaFiboLiteGenerateRequestsByRequestIdStatusResponses[keyof GetBriaFiboLiteGenerateRequestsByRequestIdStatusResponses] + +export type PutBriaFiboLiteGenerateRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-lite/generate/requests/{request_id}/cancel' +} + +export type PutBriaFiboLiteGenerateRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutBriaFiboLiteGenerateRequestsByRequestIdCancelResponse = + PutBriaFiboLiteGenerateRequestsByRequestIdCancelResponses[keyof PutBriaFiboLiteGenerateRequestsByRequestIdCancelResponses] + +export type PostBriaFiboLiteGenerateData = { + body: SchemaFiboLiteGenerateInput + path?: never + query?: never + url: '/bria/fibo-lite/generate' +} + +export type PostBriaFiboLiteGenerateResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaFiboLiteGenerateResponse = + PostBriaFiboLiteGenerateResponses[keyof PostBriaFiboLiteGenerateResponses] + +export type GetBriaFiboLiteGenerateRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-lite/generate/requests/{request_id}' +} + +export type GetBriaFiboLiteGenerateRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFiboLiteGenerateOutput +} + +export type GetBriaFiboLiteGenerateRequestsByRequestIdResponse = + GetBriaFiboLiteGenerateRequestsByRequestIdResponses[keyof GetBriaFiboLiteGenerateRequestsByRequestIdResponses] + +export type GetFalAiFlux2TurboRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2/turbo/requests/{request_id}/status' +} + +export type GetFalAiFlux2TurboRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2TurboRequestsByRequestIdStatusResponse = + GetFalAiFlux2TurboRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2TurboRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2TurboRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/turbo/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2TurboRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2TurboRequestsByRequestIdCancelResponse = + PutFalAiFlux2TurboRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2TurboRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2TurboData = { + body: SchemaFlux2TurboInput + path?: never + query?: never + url: '/fal-ai/flux-2/turbo' +} + +export type PostFalAiFlux2TurboResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2TurboResponse = + PostFalAiFlux2TurboResponses[keyof PostFalAiFlux2TurboResponses] + +export type GetFalAiFlux2TurboRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2/turbo/requests/{request_id}' +} + +export type GetFalAiFlux2TurboRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2TurboOutput +} + +export type GetFalAiFlux2TurboRequestsByRequestIdResponse = + GetFalAiFlux2TurboRequestsByRequestIdResponses[keyof GetFalAiFlux2TurboRequestsByRequestIdResponses] + +export type GetFalAiFlux2MaxRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-max/requests/{request_id}/status' +} + +export type GetFalAiFlux2MaxRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2MaxRequestsByRequestIdStatusResponse = + GetFalAiFlux2MaxRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2MaxRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2MaxRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-max/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2MaxRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2MaxRequestsByRequestIdCancelResponse = + PutFalAiFlux2MaxRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2MaxRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2MaxData = { + body: SchemaFlux2MaxInput + path?: never + query?: never + url: '/fal-ai/flux-2-max' +} + +export type PostFalAiFlux2MaxResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2MaxResponse = + PostFalAiFlux2MaxResponses[keyof PostFalAiFlux2MaxResponses] + +export type GetFalAiFlux2MaxRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-max/requests/{request_id}' +} + +export type GetFalAiFlux2MaxRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2MaxOutput +} + +export type GetFalAiFlux2MaxRequestsByRequestIdResponse = + GetFalAiFlux2MaxRequestsByRequestIdResponses[keyof GetFalAiFlux2MaxRequestsByRequestIdResponses] + +export type GetFalAiLongcatImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/longcat-image/requests/{request_id}/status' +} + +export type GetFalAiLongcatImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLongcatImageRequestsByRequestIdStatusResponse = + GetFalAiLongcatImageRequestsByRequestIdStatusResponses[keyof GetFalAiLongcatImageRequestsByRequestIdStatusResponses] + +export type PutFalAiLongcatImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-image/requests/{request_id}/cancel' +} + +export type PutFalAiLongcatImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLongcatImageRequestsByRequestIdCancelResponse = + PutFalAiLongcatImageRequestsByRequestIdCancelResponses[keyof PutFalAiLongcatImageRequestsByRequestIdCancelResponses] + +export type PostFalAiLongcatImageData = { + body: SchemaLongcatImageInput + path?: never + query?: never + url: '/fal-ai/longcat-image' +} + +export type PostFalAiLongcatImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLongcatImageResponse = + PostFalAiLongcatImageResponses[keyof PostFalAiLongcatImageResponses] + +export type GetFalAiLongcatImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-image/requests/{request_id}' +} + +export type GetFalAiLongcatImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLongcatImageOutput +} + +export type GetFalAiLongcatImageRequestsByRequestIdResponse = + GetFalAiLongcatImageRequestsByRequestIdResponses[keyof GetFalAiLongcatImageRequestsByRequestIdResponses] + +export type GetFalAiBytedanceSeedreamV45TextToImageRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bytedance/seedream/v4.5/text-to-image/requests/{request_id}/status' + } + +export type GetFalAiBytedanceSeedreamV45TextToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiBytedanceSeedreamV45TextToImageRequestsByRequestIdStatusResponse = + GetFalAiBytedanceSeedreamV45TextToImageRequestsByRequestIdStatusResponses[keyof GetFalAiBytedanceSeedreamV45TextToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiBytedanceSeedreamV45TextToImageRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedream/v4.5/text-to-image/requests/{request_id}/cancel' + } + +export type PutFalAiBytedanceSeedreamV45TextToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiBytedanceSeedreamV45TextToImageRequestsByRequestIdCancelResponse = + PutFalAiBytedanceSeedreamV45TextToImageRequestsByRequestIdCancelResponses[keyof PutFalAiBytedanceSeedreamV45TextToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiBytedanceSeedreamV45TextToImageData = { + body: SchemaBytedanceSeedreamV45TextToImageInput + path?: never + query?: never + url: '/fal-ai/bytedance/seedream/v4.5/text-to-image' +} + +export type PostFalAiBytedanceSeedreamV45TextToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBytedanceSeedreamV45TextToImageResponse = + PostFalAiBytedanceSeedreamV45TextToImageResponses[keyof PostFalAiBytedanceSeedreamV45TextToImageResponses] + +export type GetFalAiBytedanceSeedreamV45TextToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedream/v4.5/text-to-image/requests/{request_id}' +} + +export type GetFalAiBytedanceSeedreamV45TextToImageRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaBytedanceSeedreamV45TextToImageOutput + } + +export type GetFalAiBytedanceSeedreamV45TextToImageRequestsByRequestIdResponse = + GetFalAiBytedanceSeedreamV45TextToImageRequestsByRequestIdResponses[keyof GetFalAiBytedanceSeedreamV45TextToImageRequestsByRequestIdResponses] + +export type GetFalAiViduQ2TextToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/vidu/q2/text-to-image/requests/{request_id}/status' +} + +export type GetFalAiViduQ2TextToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiViduQ2TextToImageRequestsByRequestIdStatusResponse = + GetFalAiViduQ2TextToImageRequestsByRequestIdStatusResponses[keyof GetFalAiViduQ2TextToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiViduQ2TextToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/q2/text-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiViduQ2TextToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiViduQ2TextToImageRequestsByRequestIdCancelResponse = + PutFalAiViduQ2TextToImageRequestsByRequestIdCancelResponses[keyof PutFalAiViduQ2TextToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiViduQ2TextToImageData = { + body: SchemaViduQ2TextToImageInput + path?: never + query?: never + url: '/fal-ai/vidu/q2/text-to-image' +} + +export type PostFalAiViduQ2TextToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiViduQ2TextToImageResponse = + PostFalAiViduQ2TextToImageResponses[keyof PostFalAiViduQ2TextToImageResponses] + +export type GetFalAiViduQ2TextToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/q2/text-to-image/requests/{request_id}' +} + +export type GetFalAiViduQ2TextToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaViduQ2TextToImageOutput +} + +export type GetFalAiViduQ2TextToImageRequestsByRequestIdResponse = + GetFalAiViduQ2TextToImageRequestsByRequestIdResponses[keyof GetFalAiViduQ2TextToImageRequestsByRequestIdResponses] + +export type GetFalAiZImageTurboLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/z-image/turbo/lora/requests/{request_id}/status' +} + +export type GetFalAiZImageTurboLoraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiZImageTurboLoraRequestsByRequestIdStatusResponse = + GetFalAiZImageTurboLoraRequestsByRequestIdStatusResponses[keyof GetFalAiZImageTurboLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiZImageTurboLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image/turbo/lora/requests/{request_id}/cancel' +} + +export type PutFalAiZImageTurboLoraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiZImageTurboLoraRequestsByRequestIdCancelResponse = + PutFalAiZImageTurboLoraRequestsByRequestIdCancelResponses[keyof PutFalAiZImageTurboLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiZImageTurboLoraData = { + body: SchemaZImageTurboLoraInput + path?: never + query?: never + url: '/fal-ai/z-image/turbo/lora' +} + +export type PostFalAiZImageTurboLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiZImageTurboLoraResponse = + PostFalAiZImageTurboLoraResponses[keyof PostFalAiZImageTurboLoraResponses] + +export type GetFalAiZImageTurboLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image/turbo/lora/requests/{request_id}' +} + +export type GetFalAiZImageTurboLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaZImageTurboLoraOutput +} + +export type GetFalAiZImageTurboLoraRequestsByRequestIdResponse = + GetFalAiZImageTurboLoraRequestsByRequestIdResponses[keyof GetFalAiZImageTurboLoraRequestsByRequestIdResponses] + +export type GetFalAiOvisImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ovis-image/requests/{request_id}/status' +} + +export type GetFalAiOvisImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiOvisImageRequestsByRequestIdStatusResponse = + GetFalAiOvisImageRequestsByRequestIdStatusResponses[keyof GetFalAiOvisImageRequestsByRequestIdStatusResponses] + +export type PutFalAiOvisImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ovis-image/requests/{request_id}/cancel' +} + +export type PutFalAiOvisImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiOvisImageRequestsByRequestIdCancelResponse = + PutFalAiOvisImageRequestsByRequestIdCancelResponses[keyof PutFalAiOvisImageRequestsByRequestIdCancelResponses] + +export type PostFalAiOvisImageData = { + body: SchemaOvisImageInput + path?: never + query?: never + url: '/fal-ai/ovis-image' +} + +export type PostFalAiOvisImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiOvisImageResponse = + PostFalAiOvisImageResponses[keyof PostFalAiOvisImageResponses] + +export type GetFalAiOvisImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ovis-image/requests/{request_id}' +} + +export type GetFalAiOvisImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaOvisImageOutput +} + +export type GetFalAiOvisImageRequestsByRequestIdResponse = + GetFalAiOvisImageRequestsByRequestIdResponses[keyof GetFalAiOvisImageRequestsByRequestIdResponses] + +export type GetFalAiZImageTurboRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/z-image/turbo/requests/{request_id}/status' +} + +export type GetFalAiZImageTurboRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiZImageTurboRequestsByRequestIdStatusResponse = + GetFalAiZImageTurboRequestsByRequestIdStatusResponses[keyof GetFalAiZImageTurboRequestsByRequestIdStatusResponses] + +export type PutFalAiZImageTurboRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image/turbo/requests/{request_id}/cancel' +} + +export type PutFalAiZImageTurboRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiZImageTurboRequestsByRequestIdCancelResponse = + PutFalAiZImageTurboRequestsByRequestIdCancelResponses[keyof PutFalAiZImageTurboRequestsByRequestIdCancelResponses] + +export type PostFalAiZImageTurboData = { + body: SchemaZImageTurboInput + path?: never + query?: never + url: '/fal-ai/z-image/turbo' +} + +export type PostFalAiZImageTurboResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiZImageTurboResponse = + PostFalAiZImageTurboResponses[keyof PostFalAiZImageTurboResponses] + +export type GetFalAiZImageTurboRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image/turbo/requests/{request_id}' +} + +export type GetFalAiZImageTurboRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaZImageTurboOutput +} + +export type GetFalAiZImageTurboRequestsByRequestIdResponse = + GetFalAiZImageTurboRequestsByRequestIdResponses[keyof GetFalAiZImageTurboRequestsByRequestIdResponses] + +export type GetFalAiFlux2LoraGallerySepiaVintageRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-lora-gallery/sepia-vintage/requests/{request_id}/status' + } + +export type GetFalAiFlux2LoraGallerySepiaVintageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlux2LoraGallerySepiaVintageRequestsByRequestIdStatusResponse = + GetFalAiFlux2LoraGallerySepiaVintageRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2LoraGallerySepiaVintageRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2LoraGallerySepiaVintageRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-lora-gallery/sepia-vintage/requests/{request_id}/cancel' + } + +export type PutFalAiFlux2LoraGallerySepiaVintageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlux2LoraGallerySepiaVintageRequestsByRequestIdCancelResponse = + PutFalAiFlux2LoraGallerySepiaVintageRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2LoraGallerySepiaVintageRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2LoraGallerySepiaVintageData = { + body: SchemaFlux2LoraGallerySepiaVintageInput + path?: never + query?: never + url: '/fal-ai/flux-2-lora-gallery/sepia-vintage' +} + +export type PostFalAiFlux2LoraGallerySepiaVintageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2LoraGallerySepiaVintageResponse = + PostFalAiFlux2LoraGallerySepiaVintageResponses[keyof PostFalAiFlux2LoraGallerySepiaVintageResponses] + +export type GetFalAiFlux2LoraGallerySepiaVintageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-lora-gallery/sepia-vintage/requests/{request_id}' +} + +export type GetFalAiFlux2LoraGallerySepiaVintageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2LoraGallerySepiaVintageOutput +} + +export type GetFalAiFlux2LoraGallerySepiaVintageRequestsByRequestIdResponse = + GetFalAiFlux2LoraGallerySepiaVintageRequestsByRequestIdResponses[keyof GetFalAiFlux2LoraGallerySepiaVintageRequestsByRequestIdResponses] + +export type GetFalAiFlux2LoraGallerySatelliteViewStyleRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-lora-gallery/satellite-view-style/requests/{request_id}/status' + } + +export type GetFalAiFlux2LoraGallerySatelliteViewStyleRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlux2LoraGallerySatelliteViewStyleRequestsByRequestIdStatusResponse = + GetFalAiFlux2LoraGallerySatelliteViewStyleRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2LoraGallerySatelliteViewStyleRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2LoraGallerySatelliteViewStyleRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-lora-gallery/satellite-view-style/requests/{request_id}/cancel' + } + +export type PutFalAiFlux2LoraGallerySatelliteViewStyleRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlux2LoraGallerySatelliteViewStyleRequestsByRequestIdCancelResponse = + PutFalAiFlux2LoraGallerySatelliteViewStyleRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2LoraGallerySatelliteViewStyleRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2LoraGallerySatelliteViewStyleData = { + body: SchemaFlux2LoraGallerySatelliteViewStyleInput + path?: never + query?: never + url: '/fal-ai/flux-2-lora-gallery/satellite-view-style' +} + +export type PostFalAiFlux2LoraGallerySatelliteViewStyleResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2LoraGallerySatelliteViewStyleResponse = + PostFalAiFlux2LoraGallerySatelliteViewStyleResponses[keyof PostFalAiFlux2LoraGallerySatelliteViewStyleResponses] + +export type GetFalAiFlux2LoraGallerySatelliteViewStyleRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-lora-gallery/satellite-view-style/requests/{request_id}' + } + +export type GetFalAiFlux2LoraGallerySatelliteViewStyleRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFlux2LoraGallerySatelliteViewStyleOutput + } + +export type GetFalAiFlux2LoraGallerySatelliteViewStyleRequestsByRequestIdResponse = + GetFalAiFlux2LoraGallerySatelliteViewStyleRequestsByRequestIdResponses[keyof GetFalAiFlux2LoraGallerySatelliteViewStyleRequestsByRequestIdResponses] + +export type GetFalAiFlux2LoraGalleryRealismRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-lora-gallery/realism/requests/{request_id}/status' +} + +export type GetFalAiFlux2LoraGalleryRealismRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlux2LoraGalleryRealismRequestsByRequestIdStatusResponse = + GetFalAiFlux2LoraGalleryRealismRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2LoraGalleryRealismRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2LoraGalleryRealismRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-lora-gallery/realism/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2LoraGalleryRealismRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlux2LoraGalleryRealismRequestsByRequestIdCancelResponse = + PutFalAiFlux2LoraGalleryRealismRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2LoraGalleryRealismRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2LoraGalleryRealismData = { + body: SchemaFlux2LoraGalleryRealismInput + path?: never + query?: never + url: '/fal-ai/flux-2-lora-gallery/realism' +} + +export type PostFalAiFlux2LoraGalleryRealismResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2LoraGalleryRealismResponse = + PostFalAiFlux2LoraGalleryRealismResponses[keyof PostFalAiFlux2LoraGalleryRealismResponses] + +export type GetFalAiFlux2LoraGalleryRealismRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-lora-gallery/realism/requests/{request_id}' +} + +export type GetFalAiFlux2LoraGalleryRealismRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2LoraGalleryRealismOutput +} + +export type GetFalAiFlux2LoraGalleryRealismRequestsByRequestIdResponse = + GetFalAiFlux2LoraGalleryRealismRequestsByRequestIdResponses[keyof GetFalAiFlux2LoraGalleryRealismRequestsByRequestIdResponses] + +export type GetFalAiFlux2LoraGalleryHdrStyleRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-lora-gallery/hdr-style/requests/{request_id}/status' +} + +export type GetFalAiFlux2LoraGalleryHdrStyleRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlux2LoraGalleryHdrStyleRequestsByRequestIdStatusResponse = + GetFalAiFlux2LoraGalleryHdrStyleRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2LoraGalleryHdrStyleRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2LoraGalleryHdrStyleRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-lora-gallery/hdr-style/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2LoraGalleryHdrStyleRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlux2LoraGalleryHdrStyleRequestsByRequestIdCancelResponse = + PutFalAiFlux2LoraGalleryHdrStyleRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2LoraGalleryHdrStyleRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2LoraGalleryHdrStyleData = { + body: SchemaFlux2LoraGalleryHdrStyleInput + path?: never + query?: never + url: '/fal-ai/flux-2-lora-gallery/hdr-style' +} + +export type PostFalAiFlux2LoraGalleryHdrStyleResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2LoraGalleryHdrStyleResponse = + PostFalAiFlux2LoraGalleryHdrStyleResponses[keyof PostFalAiFlux2LoraGalleryHdrStyleResponses] + +export type GetFalAiFlux2LoraGalleryHdrStyleRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-lora-gallery/hdr-style/requests/{request_id}' +} + +export type GetFalAiFlux2LoraGalleryHdrStyleRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2LoraGalleryHdrStyleOutput +} + +export type GetFalAiFlux2LoraGalleryHdrStyleRequestsByRequestIdResponse = + GetFalAiFlux2LoraGalleryHdrStyleRequestsByRequestIdResponses[keyof GetFalAiFlux2LoraGalleryHdrStyleRequestsByRequestIdResponses] + +export type GetFalAiFlux2LoraGalleryDigitalComicArtRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-lora-gallery/digital-comic-art/requests/{request_id}/status' + } + +export type GetFalAiFlux2LoraGalleryDigitalComicArtRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlux2LoraGalleryDigitalComicArtRequestsByRequestIdStatusResponse = + GetFalAiFlux2LoraGalleryDigitalComicArtRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2LoraGalleryDigitalComicArtRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2LoraGalleryDigitalComicArtRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-lora-gallery/digital-comic-art/requests/{request_id}/cancel' + } + +export type PutFalAiFlux2LoraGalleryDigitalComicArtRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlux2LoraGalleryDigitalComicArtRequestsByRequestIdCancelResponse = + PutFalAiFlux2LoraGalleryDigitalComicArtRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2LoraGalleryDigitalComicArtRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2LoraGalleryDigitalComicArtData = { + body: SchemaFlux2LoraGalleryDigitalComicArtInput + path?: never + query?: never + url: '/fal-ai/flux-2-lora-gallery/digital-comic-art' +} + +export type PostFalAiFlux2LoraGalleryDigitalComicArtResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2LoraGalleryDigitalComicArtResponse = + PostFalAiFlux2LoraGalleryDigitalComicArtResponses[keyof PostFalAiFlux2LoraGalleryDigitalComicArtResponses] + +export type GetFalAiFlux2LoraGalleryDigitalComicArtRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-lora-gallery/digital-comic-art/requests/{request_id}' +} + +export type GetFalAiFlux2LoraGalleryDigitalComicArtRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFlux2LoraGalleryDigitalComicArtOutput + } + +export type GetFalAiFlux2LoraGalleryDigitalComicArtRequestsByRequestIdResponse = + GetFalAiFlux2LoraGalleryDigitalComicArtRequestsByRequestIdResponses[keyof GetFalAiFlux2LoraGalleryDigitalComicArtRequestsByRequestIdResponses] + +export type GetFalAiFlux2LoraGalleryBallpointPenSketchRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-lora-gallery/ballpoint-pen-sketch/requests/{request_id}/status' + } + +export type GetFalAiFlux2LoraGalleryBallpointPenSketchRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlux2LoraGalleryBallpointPenSketchRequestsByRequestIdStatusResponse = + GetFalAiFlux2LoraGalleryBallpointPenSketchRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2LoraGalleryBallpointPenSketchRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2LoraGalleryBallpointPenSketchRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-lora-gallery/ballpoint-pen-sketch/requests/{request_id}/cancel' + } + +export type PutFalAiFlux2LoraGalleryBallpointPenSketchRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlux2LoraGalleryBallpointPenSketchRequestsByRequestIdCancelResponse = + PutFalAiFlux2LoraGalleryBallpointPenSketchRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2LoraGalleryBallpointPenSketchRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2LoraGalleryBallpointPenSketchData = { + body: SchemaFlux2LoraGalleryBallpointPenSketchInput + path?: never + query?: never + url: '/fal-ai/flux-2-lora-gallery/ballpoint-pen-sketch' +} + +export type PostFalAiFlux2LoraGalleryBallpointPenSketchResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2LoraGalleryBallpointPenSketchResponse = + PostFalAiFlux2LoraGalleryBallpointPenSketchResponses[keyof PostFalAiFlux2LoraGalleryBallpointPenSketchResponses] + +export type GetFalAiFlux2LoraGalleryBallpointPenSketchRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-lora-gallery/ballpoint-pen-sketch/requests/{request_id}' + } + +export type GetFalAiFlux2LoraGalleryBallpointPenSketchRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFlux2LoraGalleryBallpointPenSketchOutput + } + +export type GetFalAiFlux2LoraGalleryBallpointPenSketchRequestsByRequestIdResponse = + GetFalAiFlux2LoraGalleryBallpointPenSketchRequestsByRequestIdResponses[keyof GetFalAiFlux2LoraGalleryBallpointPenSketchRequestsByRequestIdResponses] + +export type GetFalAiFlux2FlexRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-flex/requests/{request_id}/status' +} + +export type GetFalAiFlux2FlexRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2FlexRequestsByRequestIdStatusResponse = + GetFalAiFlux2FlexRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2FlexRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2FlexRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-flex/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2FlexRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2FlexRequestsByRequestIdCancelResponse = + PutFalAiFlux2FlexRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2FlexRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2FlexData = { + body: SchemaFlux2FlexInput + path?: never + query?: never + url: '/fal-ai/flux-2-flex' +} + +export type PostFalAiFlux2FlexResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2FlexResponse = + PostFalAiFlux2FlexResponses[keyof PostFalAiFlux2FlexResponses] + +export type GetFalAiFlux2FlexRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-flex/requests/{request_id}' +} + +export type GetFalAiFlux2FlexRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2FlexOutput +} + +export type GetFalAiFlux2FlexRequestsByRequestIdResponse = + GetFalAiFlux2FlexRequestsByRequestIdResponses[keyof GetFalAiFlux2FlexRequestsByRequestIdResponses] + +export type GetFalAiGemini3ProImagePreviewRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/gemini-3-pro-image-preview/requests/{request_id}/status' +} + +export type GetFalAiGemini3ProImagePreviewRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiGemini3ProImagePreviewRequestsByRequestIdStatusResponse = + GetFalAiGemini3ProImagePreviewRequestsByRequestIdStatusResponses[keyof GetFalAiGemini3ProImagePreviewRequestsByRequestIdStatusResponses] + +export type PutFalAiGemini3ProImagePreviewRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gemini-3-pro-image-preview/requests/{request_id}/cancel' +} + +export type PutFalAiGemini3ProImagePreviewRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiGemini3ProImagePreviewRequestsByRequestIdCancelResponse = + PutFalAiGemini3ProImagePreviewRequestsByRequestIdCancelResponses[keyof PutFalAiGemini3ProImagePreviewRequestsByRequestIdCancelResponses] + +export type PostFalAiGemini3ProImagePreviewData = { + body: SchemaGemini3ProImagePreviewInput + path?: never + query?: never + url: '/fal-ai/gemini-3-pro-image-preview' +} + +export type PostFalAiGemini3ProImagePreviewResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiGemini3ProImagePreviewResponse = + PostFalAiGemini3ProImagePreviewResponses[keyof PostFalAiGemini3ProImagePreviewResponses] + +export type GetFalAiGemini3ProImagePreviewRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gemini-3-pro-image-preview/requests/{request_id}' +} + +export type GetFalAiGemini3ProImagePreviewRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaGemini3ProImagePreviewOutput +} + +export type GetFalAiGemini3ProImagePreviewRequestsByRequestIdResponse = + GetFalAiGemini3ProImagePreviewRequestsByRequestIdResponses[keyof GetFalAiGemini3ProImagePreviewRequestsByRequestIdResponses] + +export type GetFalAiNanoBananaProRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/nano-banana-pro/requests/{request_id}/status' +} + +export type GetFalAiNanoBananaProRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiNanoBananaProRequestsByRequestIdStatusResponse = + GetFalAiNanoBananaProRequestsByRequestIdStatusResponses[keyof GetFalAiNanoBananaProRequestsByRequestIdStatusResponses] + +export type PutFalAiNanoBananaProRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/nano-banana-pro/requests/{request_id}/cancel' +} + +export type PutFalAiNanoBananaProRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiNanoBananaProRequestsByRequestIdCancelResponse = + PutFalAiNanoBananaProRequestsByRequestIdCancelResponses[keyof PutFalAiNanoBananaProRequestsByRequestIdCancelResponses] + +export type PostFalAiNanoBananaProData = { + body: SchemaNanoBananaProInput + path?: never + query?: never + url: '/fal-ai/nano-banana-pro' +} + +export type PostFalAiNanoBananaProResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiNanoBananaProResponse = + PostFalAiNanoBananaProResponses[keyof PostFalAiNanoBananaProResponses] + +export type GetFalAiNanoBananaProRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/nano-banana-pro/requests/{request_id}' +} + +export type GetFalAiNanoBananaProRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaNanoBananaProOutput +} + +export type GetFalAiNanoBananaProRequestsByRequestIdResponse = + GetFalAiNanoBananaProRequestsByRequestIdResponses[keyof GetFalAiNanoBananaProRequestsByRequestIdResponses] + +export type GetImagineartImagineart15PreviewTextToImageRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/imagineart/imagineart-1.5-preview/text-to-image/requests/{request_id}/status' + } + +export type GetImagineartImagineart15PreviewTextToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetImagineartImagineart15PreviewTextToImageRequestsByRequestIdStatusResponse = + GetImagineartImagineart15PreviewTextToImageRequestsByRequestIdStatusResponses[keyof GetImagineartImagineart15PreviewTextToImageRequestsByRequestIdStatusResponses] + +export type PutImagineartImagineart15PreviewTextToImageRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/imagineart/imagineart-1.5-preview/text-to-image/requests/{request_id}/cancel' + } + +export type PutImagineartImagineart15PreviewTextToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutImagineartImagineart15PreviewTextToImageRequestsByRequestIdCancelResponse = + PutImagineartImagineart15PreviewTextToImageRequestsByRequestIdCancelResponses[keyof PutImagineartImagineart15PreviewTextToImageRequestsByRequestIdCancelResponses] + +export type PostImagineartImagineart15PreviewTextToImageData = { + body: SchemaImagineart15PreviewTextToImageInput + path?: never + query?: never + url: '/imagineart/imagineart-1.5-preview/text-to-image' +} + +export type PostImagineartImagineart15PreviewTextToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostImagineartImagineart15PreviewTextToImageResponse = + PostImagineartImagineart15PreviewTextToImageResponses[keyof PostImagineartImagineart15PreviewTextToImageResponses] + +export type GetImagineartImagineart15PreviewTextToImageRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/imagineart/imagineart-1.5-preview/text-to-image/requests/{request_id}' + } + +export type GetImagineartImagineart15PreviewTextToImageRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaImagineart15PreviewTextToImageOutput + } + +export type GetImagineartImagineart15PreviewTextToImageRequestsByRequestIdResponse = + GetImagineartImagineart15PreviewTextToImageRequestsByRequestIdResponses[keyof GetImagineartImagineart15PreviewTextToImageRequestsByRequestIdResponses] + +export type GetFalAiEmu35ImageTextToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/emu-3.5-image/text-to-image/requests/{request_id}/status' +} + +export type GetFalAiEmu35ImageTextToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiEmu35ImageTextToImageRequestsByRequestIdStatusResponse = + GetFalAiEmu35ImageTextToImageRequestsByRequestIdStatusResponses[keyof GetFalAiEmu35ImageTextToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiEmu35ImageTextToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/emu-3.5-image/text-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiEmu35ImageTextToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiEmu35ImageTextToImageRequestsByRequestIdCancelResponse = + PutFalAiEmu35ImageTextToImageRequestsByRequestIdCancelResponses[keyof PutFalAiEmu35ImageTextToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiEmu35ImageTextToImageData = { + body: SchemaEmu35ImageTextToImageInput + path?: never + query?: never + url: '/fal-ai/emu-3.5-image/text-to-image' +} + +export type PostFalAiEmu35ImageTextToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiEmu35ImageTextToImageResponse = + PostFalAiEmu35ImageTextToImageResponses[keyof PostFalAiEmu35ImageTextToImageResponses] + +export type GetFalAiEmu35ImageTextToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/emu-3.5-image/text-to-image/requests/{request_id}' +} + +export type GetFalAiEmu35ImageTextToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaEmu35ImageTextToImageOutput +} + +export type GetFalAiEmu35ImageTextToImageRequestsByRequestIdResponse = + GetFalAiEmu35ImageTextToImageRequestsByRequestIdResponses[keyof GetFalAiEmu35ImageTextToImageRequestsByRequestIdResponses] + +export type GetBriaFiboGenerateRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/fibo/generate/requests/{request_id}/status' +} + +export type GetBriaFiboGenerateRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetBriaFiboGenerateRequestsByRequestIdStatusResponse = + GetBriaFiboGenerateRequestsByRequestIdStatusResponses[keyof GetBriaFiboGenerateRequestsByRequestIdStatusResponses] + +export type PutBriaFiboGenerateRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo/generate/requests/{request_id}/cancel' +} + +export type PutBriaFiboGenerateRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutBriaFiboGenerateRequestsByRequestIdCancelResponse = + PutBriaFiboGenerateRequestsByRequestIdCancelResponses[keyof PutBriaFiboGenerateRequestsByRequestIdCancelResponses] + +export type PostBriaFiboGenerateData = { + body: SchemaFiboGenerateInput + path?: never + query?: never + url: '/bria/fibo/generate' +} + +export type PostBriaFiboGenerateResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaFiboGenerateResponse = + PostBriaFiboGenerateResponses[keyof PostBriaFiboGenerateResponses] + +export type GetBriaFiboGenerateRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo/generate/requests/{request_id}' +} + +export type GetBriaFiboGenerateRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFiboGenerateOutput +} + +export type GetBriaFiboGenerateRequestsByRequestIdResponse = + GetBriaFiboGenerateRequestsByRequestIdResponses[keyof GetBriaFiboGenerateRequestsByRequestIdResponses] + +export type GetFalAiPiflowRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/piflow/requests/{request_id}/status' +} + +export type GetFalAiPiflowRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPiflowRequestsByRequestIdStatusResponse = + GetFalAiPiflowRequestsByRequestIdStatusResponses[keyof GetFalAiPiflowRequestsByRequestIdStatusResponses] + +export type PutFalAiPiflowRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/piflow/requests/{request_id}/cancel' +} + +export type PutFalAiPiflowRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPiflowRequestsByRequestIdCancelResponse = + PutFalAiPiflowRequestsByRequestIdCancelResponses[keyof PutFalAiPiflowRequestsByRequestIdCancelResponses] + +export type PostFalAiPiflowData = { + body: SchemaPiflowInput + path?: never + query?: never + url: '/fal-ai/piflow' +} + +export type PostFalAiPiflowResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPiflowResponse = + PostFalAiPiflowResponses[keyof PostFalAiPiflowResponses] + +export type GetFalAiPiflowRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/piflow/requests/{request_id}' +} + +export type GetFalAiPiflowRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPiflowOutput +} + +export type GetFalAiPiflowRequestsByRequestIdResponse = + GetFalAiPiflowRequestsByRequestIdResponses[keyof GetFalAiPiflowRequestsByRequestIdResponses] + +export type GetFalAiGptImage1MiniRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/gpt-image-1-mini/requests/{request_id}/status' +} + +export type GetFalAiGptImage1MiniRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiGptImage1MiniRequestsByRequestIdStatusResponse = + GetFalAiGptImage1MiniRequestsByRequestIdStatusResponses[keyof GetFalAiGptImage1MiniRequestsByRequestIdStatusResponses] + +export type PutFalAiGptImage1MiniRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gpt-image-1-mini/requests/{request_id}/cancel' +} + +export type PutFalAiGptImage1MiniRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiGptImage1MiniRequestsByRequestIdCancelResponse = + PutFalAiGptImage1MiniRequestsByRequestIdCancelResponses[keyof PutFalAiGptImage1MiniRequestsByRequestIdCancelResponses] + +export type PostFalAiGptImage1MiniData = { + body: SchemaGptImage1MiniInput + path?: never + query?: never + url: '/fal-ai/gpt-image-1-mini' +} + +export type PostFalAiGptImage1MiniResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiGptImage1MiniResponse = + PostFalAiGptImage1MiniResponses[keyof PostFalAiGptImage1MiniResponses] + +export type GetFalAiGptImage1MiniRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gpt-image-1-mini/requests/{request_id}' +} + +export type GetFalAiGptImage1MiniRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaGptImage1MiniOutput +} + +export type GetFalAiGptImage1MiniRequestsByRequestIdResponse = + GetFalAiGptImage1MiniRequestsByRequestIdResponses[keyof GetFalAiGptImage1MiniRequestsByRequestIdResponses] + +export type GetFalAiReveTextToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/reve/text-to-image/requests/{request_id}/status' +} + +export type GetFalAiReveTextToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiReveTextToImageRequestsByRequestIdStatusResponse = + GetFalAiReveTextToImageRequestsByRequestIdStatusResponses[keyof GetFalAiReveTextToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiReveTextToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/reve/text-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiReveTextToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiReveTextToImageRequestsByRequestIdCancelResponse = + PutFalAiReveTextToImageRequestsByRequestIdCancelResponses[keyof PutFalAiReveTextToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiReveTextToImageData = { + body: SchemaReveTextToImageInput + path?: never + query?: never + url: '/fal-ai/reve/text-to-image' +} + +export type PostFalAiReveTextToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiReveTextToImageResponse = + PostFalAiReveTextToImageResponses[keyof PostFalAiReveTextToImageResponses] + +export type GetFalAiReveTextToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/reve/text-to-image/requests/{request_id}' +} + +export type GetFalAiReveTextToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaReveTextToImageOutput +} + +export type GetFalAiReveTextToImageRequestsByRequestIdResponse = + GetFalAiReveTextToImageRequestsByRequestIdResponses[keyof GetFalAiReveTextToImageRequestsByRequestIdResponses] + +export type GetFalAiHunyuanImageV3TextToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan-image/v3/text-to-image/requests/{request_id}/status' +} + +export type GetFalAiHunyuanImageV3TextToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiHunyuanImageV3TextToImageRequestsByRequestIdStatusResponse = + GetFalAiHunyuanImageV3TextToImageRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuanImageV3TextToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuanImageV3TextToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-image/v3/text-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuanImageV3TextToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiHunyuanImageV3TextToImageRequestsByRequestIdCancelResponse = + PutFalAiHunyuanImageV3TextToImageRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuanImageV3TextToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuanImageV3TextToImageData = { + body: SchemaHunyuanImageV3TextToImageInput + path?: never + query?: never + url: '/fal-ai/hunyuan-image/v3/text-to-image' +} + +export type PostFalAiHunyuanImageV3TextToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuanImageV3TextToImageResponse = + PostFalAiHunyuanImageV3TextToImageResponses[keyof PostFalAiHunyuanImageV3TextToImageResponses] + +export type GetFalAiHunyuanImageV3TextToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-image/v3/text-to-image/requests/{request_id}' +} + +export type GetFalAiHunyuanImageV3TextToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuanImageV3TextToImageOutput +} + +export type GetFalAiHunyuanImageV3TextToImageRequestsByRequestIdResponse = + GetFalAiHunyuanImageV3TextToImageRequestsByRequestIdResponses[keyof GetFalAiHunyuanImageV3TextToImageRequestsByRequestIdResponses] + +export type GetFalAiWan25PreviewTextToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-25-preview/text-to-image/requests/{request_id}/status' +} + +export type GetFalAiWan25PreviewTextToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiWan25PreviewTextToImageRequestsByRequestIdStatusResponse = + GetFalAiWan25PreviewTextToImageRequestsByRequestIdStatusResponses[keyof GetFalAiWan25PreviewTextToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiWan25PreviewTextToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-25-preview/text-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiWan25PreviewTextToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiWan25PreviewTextToImageRequestsByRequestIdCancelResponse = + PutFalAiWan25PreviewTextToImageRequestsByRequestIdCancelResponses[keyof PutFalAiWan25PreviewTextToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiWan25PreviewTextToImageData = { + body: SchemaWan25PreviewTextToImageInput + path?: never + query?: never + url: '/fal-ai/wan-25-preview/text-to-image' +} + +export type PostFalAiWan25PreviewTextToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWan25PreviewTextToImageResponse = + PostFalAiWan25PreviewTextToImageResponses[keyof PostFalAiWan25PreviewTextToImageResponses] + +export type GetFalAiWan25PreviewTextToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-25-preview/text-to-image/requests/{request_id}' +} + +export type GetFalAiWan25PreviewTextToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWan25PreviewTextToImageOutput +} + +export type GetFalAiWan25PreviewTextToImageRequestsByRequestIdResponse = + GetFalAiWan25PreviewTextToImageRequestsByRequestIdResponses[keyof GetFalAiWan25PreviewTextToImageRequestsByRequestIdResponses] + +export type GetFalAiFluxSrpoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux/srpo/requests/{request_id}/status' +} + +export type GetFalAiFluxSrpoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxSrpoRequestsByRequestIdStatusResponse = + GetFalAiFluxSrpoRequestsByRequestIdStatusResponses[keyof GetFalAiFluxSrpoRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxSrpoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux/srpo/requests/{request_id}/cancel' +} + +export type PutFalAiFluxSrpoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxSrpoRequestsByRequestIdCancelResponse = + PutFalAiFluxSrpoRequestsByRequestIdCancelResponses[keyof PutFalAiFluxSrpoRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxSrpoData = { + body: SchemaFluxSrpoInput + path?: never + query?: never + url: '/fal-ai/flux/srpo' +} + +export type PostFalAiFluxSrpoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxSrpoResponse = + PostFalAiFluxSrpoResponses[keyof PostFalAiFluxSrpoResponses] + +export type GetFalAiFluxSrpoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux/srpo/requests/{request_id}' +} + +export type GetFalAiFluxSrpoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxSrpoOutput +} + +export type GetFalAiFluxSrpoRequestsByRequestIdResponse = + GetFalAiFluxSrpoRequestsByRequestIdResponses[keyof GetFalAiFluxSrpoRequestsByRequestIdResponses] + +export type GetFalAiFlux1SrpoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-1/srpo/requests/{request_id}/status' +} + +export type GetFalAiFlux1SrpoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux1SrpoRequestsByRequestIdStatusResponse = + GetFalAiFlux1SrpoRequestsByRequestIdStatusResponses[keyof GetFalAiFlux1SrpoRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux1SrpoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-1/srpo/requests/{request_id}/cancel' +} + +export type PutFalAiFlux1SrpoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux1SrpoRequestsByRequestIdCancelResponse = + PutFalAiFlux1SrpoRequestsByRequestIdCancelResponses[keyof PutFalAiFlux1SrpoRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux1SrpoData = { + body: SchemaFlux1SrpoInput + path?: never + query?: never + url: '/fal-ai/flux-1/srpo' +} + +export type PostFalAiFlux1SrpoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux1SrpoResponse = + PostFalAiFlux1SrpoResponses[keyof PostFalAiFlux1SrpoResponses] + +export type GetFalAiFlux1SrpoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-1/srpo/requests/{request_id}' +} + +export type GetFalAiFlux1SrpoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux1SrpoOutput +} + +export type GetFalAiFlux1SrpoRequestsByRequestIdResponse = + GetFalAiFlux1SrpoRequestsByRequestIdResponses[keyof GetFalAiFlux1SrpoRequestsByRequestIdResponses] + +export type GetFalAiHunyuanImageV21TextToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan-image/v2.1/text-to-image/requests/{request_id}/status' +} + +export type GetFalAiHunyuanImageV21TextToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiHunyuanImageV21TextToImageRequestsByRequestIdStatusResponse = + GetFalAiHunyuanImageV21TextToImageRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuanImageV21TextToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuanImageV21TextToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-image/v2.1/text-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuanImageV21TextToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiHunyuanImageV21TextToImageRequestsByRequestIdCancelResponse = + PutFalAiHunyuanImageV21TextToImageRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuanImageV21TextToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuanImageV21TextToImageData = { + body: SchemaHunyuanImageV21TextToImageInput + path?: never + query?: never + url: '/fal-ai/hunyuan-image/v2.1/text-to-image' +} + +export type PostFalAiHunyuanImageV21TextToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuanImageV21TextToImageResponse = + PostFalAiHunyuanImageV21TextToImageResponses[keyof PostFalAiHunyuanImageV21TextToImageResponses] + +export type GetFalAiHunyuanImageV21TextToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-image/v2.1/text-to-image/requests/{request_id}' +} + +export type GetFalAiHunyuanImageV21TextToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuanImageV21TextToImageOutput +} + +export type GetFalAiHunyuanImageV21TextToImageRequestsByRequestIdResponse = + GetFalAiHunyuanImageV21TextToImageRequestsByRequestIdResponses[keyof GetFalAiHunyuanImageV21TextToImageRequestsByRequestIdResponses] + +export type GetFalAiBytedanceSeedreamV4TextToImageRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bytedance/seedream/v4/text-to-image/requests/{request_id}/status' + } + +export type GetFalAiBytedanceSeedreamV4TextToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiBytedanceSeedreamV4TextToImageRequestsByRequestIdStatusResponse = + GetFalAiBytedanceSeedreamV4TextToImageRequestsByRequestIdStatusResponses[keyof GetFalAiBytedanceSeedreamV4TextToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiBytedanceSeedreamV4TextToImageRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedream/v4/text-to-image/requests/{request_id}/cancel' + } + +export type PutFalAiBytedanceSeedreamV4TextToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiBytedanceSeedreamV4TextToImageRequestsByRequestIdCancelResponse = + PutFalAiBytedanceSeedreamV4TextToImageRequestsByRequestIdCancelResponses[keyof PutFalAiBytedanceSeedreamV4TextToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiBytedanceSeedreamV4TextToImageData = { + body: SchemaBytedanceSeedreamV4TextToImageInput + path?: never + query?: never + url: '/fal-ai/bytedance/seedream/v4/text-to-image' +} + +export type PostFalAiBytedanceSeedreamV4TextToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBytedanceSeedreamV4TextToImageResponse = + PostFalAiBytedanceSeedreamV4TextToImageResponses[keyof PostFalAiBytedanceSeedreamV4TextToImageResponses] + +export type GetFalAiBytedanceSeedreamV4TextToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedream/v4/text-to-image/requests/{request_id}' +} + +export type GetFalAiBytedanceSeedreamV4TextToImageRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaBytedanceSeedreamV4TextToImageOutput + } + +export type GetFalAiBytedanceSeedreamV4TextToImageRequestsByRequestIdResponse = + GetFalAiBytedanceSeedreamV4TextToImageRequestsByRequestIdResponses[keyof GetFalAiBytedanceSeedreamV4TextToImageRequestsByRequestIdResponses] + +export type GetFalAiGemini25FlashImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/gemini-25-flash-image/requests/{request_id}/status' +} + +export type GetFalAiGemini25FlashImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiGemini25FlashImageRequestsByRequestIdStatusResponse = + GetFalAiGemini25FlashImageRequestsByRequestIdStatusResponses[keyof GetFalAiGemini25FlashImageRequestsByRequestIdStatusResponses] + +export type PutFalAiGemini25FlashImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gemini-25-flash-image/requests/{request_id}/cancel' +} + +export type PutFalAiGemini25FlashImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiGemini25FlashImageRequestsByRequestIdCancelResponse = + PutFalAiGemini25FlashImageRequestsByRequestIdCancelResponses[keyof PutFalAiGemini25FlashImageRequestsByRequestIdCancelResponses] + +export type PostFalAiGemini25FlashImageData = { + body: SchemaGemini25FlashImageInput + path?: never + query?: never + url: '/fal-ai/gemini-25-flash-image' +} + +export type PostFalAiGemini25FlashImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiGemini25FlashImageResponse = + PostFalAiGemini25FlashImageResponses[keyof PostFalAiGemini25FlashImageResponses] + +export type GetFalAiGemini25FlashImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gemini-25-flash-image/requests/{request_id}' +} + +export type GetFalAiGemini25FlashImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaGemini25FlashImageOutput +} + +export type GetFalAiGemini25FlashImageRequestsByRequestIdResponse = + GetFalAiGemini25FlashImageRequestsByRequestIdResponses[keyof GetFalAiGemini25FlashImageRequestsByRequestIdResponses] + +export type GetFalAiNanoBananaRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/nano-banana/requests/{request_id}/status' +} + +export type GetFalAiNanoBananaRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiNanoBananaRequestsByRequestIdStatusResponse = + GetFalAiNanoBananaRequestsByRequestIdStatusResponses[keyof GetFalAiNanoBananaRequestsByRequestIdStatusResponses] + +export type PutFalAiNanoBananaRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/nano-banana/requests/{request_id}/cancel' +} + +export type PutFalAiNanoBananaRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiNanoBananaRequestsByRequestIdCancelResponse = + PutFalAiNanoBananaRequestsByRequestIdCancelResponses[keyof PutFalAiNanoBananaRequestsByRequestIdCancelResponses] + +export type PostFalAiNanoBananaData = { + body: SchemaNanoBananaInput + path?: never + query?: never + url: '/fal-ai/nano-banana' +} + +export type PostFalAiNanoBananaResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiNanoBananaResponse = + PostFalAiNanoBananaResponses[keyof PostFalAiNanoBananaResponses] + +export type GetFalAiNanoBananaRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/nano-banana/requests/{request_id}' +} + +export type GetFalAiNanoBananaRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaNanoBananaOutput +} + +export type GetFalAiNanoBananaRequestsByRequestIdResponse = + GetFalAiNanoBananaRequestsByRequestIdResponses[keyof GetFalAiNanoBananaRequestsByRequestIdResponses] + +export type GetFalAiBytedanceDreaminaV31TextToImageRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bytedance/dreamina/v3.1/text-to-image/requests/{request_id}/status' + } + +export type GetFalAiBytedanceDreaminaV31TextToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiBytedanceDreaminaV31TextToImageRequestsByRequestIdStatusResponse = + GetFalAiBytedanceDreaminaV31TextToImageRequestsByRequestIdStatusResponses[keyof GetFalAiBytedanceDreaminaV31TextToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiBytedanceDreaminaV31TextToImageRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/dreamina/v3.1/text-to-image/requests/{request_id}/cancel' + } + +export type PutFalAiBytedanceDreaminaV31TextToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiBytedanceDreaminaV31TextToImageRequestsByRequestIdCancelResponse = + PutFalAiBytedanceDreaminaV31TextToImageRequestsByRequestIdCancelResponses[keyof PutFalAiBytedanceDreaminaV31TextToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiBytedanceDreaminaV31TextToImageData = { + body: SchemaBytedanceDreaminaV31TextToImageInput + path?: never + query?: never + url: '/fal-ai/bytedance/dreamina/v3.1/text-to-image' +} + +export type PostFalAiBytedanceDreaminaV31TextToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBytedanceDreaminaV31TextToImageResponse = + PostFalAiBytedanceDreaminaV31TextToImageResponses[keyof PostFalAiBytedanceDreaminaV31TextToImageResponses] + +export type GetFalAiBytedanceDreaminaV31TextToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/dreamina/v3.1/text-to-image/requests/{request_id}' +} + +export type GetFalAiBytedanceDreaminaV31TextToImageRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaBytedanceDreaminaV31TextToImageOutput + } + +export type GetFalAiBytedanceDreaminaV31TextToImageRequestsByRequestIdResponse = + GetFalAiBytedanceDreaminaV31TextToImageRequestsByRequestIdResponses[keyof GetFalAiBytedanceDreaminaV31TextToImageRequestsByRequestIdResponses] + +export type GetFalAiWanV22A14bTextToImageLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan/v2.2-a14b/text-to-image/lora/requests/{request_id}/status' +} + +export type GetFalAiWanV22A14bTextToImageLoraRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiWanV22A14bTextToImageLoraRequestsByRequestIdStatusResponse = + GetFalAiWanV22A14bTextToImageLoraRequestsByRequestIdStatusResponses[keyof GetFalAiWanV22A14bTextToImageLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiWanV22A14bTextToImageLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-a14b/text-to-image/lora/requests/{request_id}/cancel' +} + +export type PutFalAiWanV22A14bTextToImageLoraRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiWanV22A14bTextToImageLoraRequestsByRequestIdCancelResponse = + PutFalAiWanV22A14bTextToImageLoraRequestsByRequestIdCancelResponses[keyof PutFalAiWanV22A14bTextToImageLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiWanV22A14bTextToImageLoraData = { + body: SchemaWanV22A14bTextToImageLoraInput + path?: never + query?: never + url: '/fal-ai/wan/v2.2-a14b/text-to-image/lora' +} + +export type PostFalAiWanV22A14bTextToImageLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanV22A14bTextToImageLoraResponse = + PostFalAiWanV22A14bTextToImageLoraResponses[keyof PostFalAiWanV22A14bTextToImageLoraResponses] + +export type GetFalAiWanV22A14bTextToImageLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-a14b/text-to-image/lora/requests/{request_id}' +} + +export type GetFalAiWanV22A14bTextToImageLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanV22A14bTextToImageLoraOutput +} + +export type GetFalAiWanV22A14bTextToImageLoraRequestsByRequestIdResponse = + GetFalAiWanV22A14bTextToImageLoraRequestsByRequestIdResponses[keyof GetFalAiWanV22A14bTextToImageLoraRequestsByRequestIdResponses] + +export type GetFalAiWanV225bTextToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan/v2.2-5b/text-to-image/requests/{request_id}/status' +} + +export type GetFalAiWanV225bTextToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanV225bTextToImageRequestsByRequestIdStatusResponse = + GetFalAiWanV225bTextToImageRequestsByRequestIdStatusResponses[keyof GetFalAiWanV225bTextToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiWanV225bTextToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-5b/text-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiWanV225bTextToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanV225bTextToImageRequestsByRequestIdCancelResponse = + PutFalAiWanV225bTextToImageRequestsByRequestIdCancelResponses[keyof PutFalAiWanV225bTextToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiWanV225bTextToImageData = { + body: SchemaWanV225bTextToImageInput + path?: never + query?: never + url: '/fal-ai/wan/v2.2-5b/text-to-image' +} + +export type PostFalAiWanV225bTextToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanV225bTextToImageResponse = + PostFalAiWanV225bTextToImageResponses[keyof PostFalAiWanV225bTextToImageResponses] + +export type GetFalAiWanV225bTextToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-5b/text-to-image/requests/{request_id}' +} + +export type GetFalAiWanV225bTextToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanV225bTextToImageOutput +} + +export type GetFalAiWanV225bTextToImageRequestsByRequestIdResponse = + GetFalAiWanV225bTextToImageRequestsByRequestIdResponses[keyof GetFalAiWanV225bTextToImageRequestsByRequestIdResponses] + +export type GetFalAiWanV22A14bTextToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan/v2.2-a14b/text-to-image/requests/{request_id}/status' +} + +export type GetFalAiWanV22A14bTextToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanV22A14bTextToImageRequestsByRequestIdStatusResponse = + GetFalAiWanV22A14bTextToImageRequestsByRequestIdStatusResponses[keyof GetFalAiWanV22A14bTextToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiWanV22A14bTextToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-a14b/text-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiWanV22A14bTextToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanV22A14bTextToImageRequestsByRequestIdCancelResponse = + PutFalAiWanV22A14bTextToImageRequestsByRequestIdCancelResponses[keyof PutFalAiWanV22A14bTextToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiWanV22A14bTextToImageData = { + body: SchemaWanV22A14bTextToImageInput + path?: never + query?: never + url: '/fal-ai/wan/v2.2-a14b/text-to-image' +} + +export type PostFalAiWanV22A14bTextToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanV22A14bTextToImageResponse = + PostFalAiWanV22A14bTextToImageResponses[keyof PostFalAiWanV22A14bTextToImageResponses] + +export type GetFalAiWanV22A14bTextToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-a14b/text-to-image/requests/{request_id}' +} + +export type GetFalAiWanV22A14bTextToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanV22A14bTextToImageOutput +} + +export type GetFalAiWanV22A14bTextToImageRequestsByRequestIdResponse = + GetFalAiWanV22A14bTextToImageRequestsByRequestIdResponses[keyof GetFalAiWanV22A14bTextToImageRequestsByRequestIdResponses] + +export type GetFalAiQwenImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image/requests/{request_id}/status' +} + +export type GetFalAiQwenImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiQwenImageRequestsByRequestIdStatusResponse = + GetFalAiQwenImageRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiQwenImageRequestsByRequestIdCancelResponse = + PutFalAiQwenImageRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageData = { + body: SchemaQwenImageInput + path?: never + query?: never + url: '/fal-ai/qwen-image' +} + +export type PostFalAiQwenImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageResponse = + PostFalAiQwenImageResponses[keyof PostFalAiQwenImageResponses] + +export type GetFalAiQwenImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image/requests/{request_id}' +} + +export type GetFalAiQwenImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImageOutput +} + +export type GetFalAiQwenImageRequestsByRequestIdResponse = + GetFalAiQwenImageRequestsByRequestIdResponses[keyof GetFalAiQwenImageRequestsByRequestIdResponses] + +export type GetFalAiFluxKreaLoraStreamRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-krea-lora/stream/requests/{request_id}/status' +} + +export type GetFalAiFluxKreaLoraStreamRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxKreaLoraStreamRequestsByRequestIdStatusResponse = + GetFalAiFluxKreaLoraStreamRequestsByRequestIdStatusResponses[keyof GetFalAiFluxKreaLoraStreamRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxKreaLoraStreamRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-krea-lora/stream/requests/{request_id}/cancel' +} + +export type PutFalAiFluxKreaLoraStreamRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxKreaLoraStreamRequestsByRequestIdCancelResponse = + PutFalAiFluxKreaLoraStreamRequestsByRequestIdCancelResponses[keyof PutFalAiFluxKreaLoraStreamRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxKreaLoraStreamData = { + body: SchemaFluxKreaLoraStreamInput + path?: never + query?: never + url: '/fal-ai/flux-krea-lora/stream' +} + +export type PostFalAiFluxKreaLoraStreamResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxKreaLoraStreamResponse = + PostFalAiFluxKreaLoraStreamResponses[keyof PostFalAiFluxKreaLoraStreamResponses] + +export type GetFalAiFluxKreaLoraStreamRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-krea-lora/stream/requests/{request_id}' +} + +export type GetFalAiFluxKreaLoraStreamRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxKreaLoraStreamOutput +} + +export type GetFalAiFluxKreaLoraStreamRequestsByRequestIdResponse = + GetFalAiFluxKreaLoraStreamRequestsByRequestIdResponses[keyof GetFalAiFluxKreaLoraStreamRequestsByRequestIdResponses] + +export type GetFalAiFluxKreaLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-krea-lora/requests/{request_id}/status' +} + +export type GetFalAiFluxKreaLoraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxKreaLoraRequestsByRequestIdStatusResponse = + GetFalAiFluxKreaLoraRequestsByRequestIdStatusResponses[keyof GetFalAiFluxKreaLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxKreaLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-krea-lora/requests/{request_id}/cancel' +} + +export type PutFalAiFluxKreaLoraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxKreaLoraRequestsByRequestIdCancelResponse = + PutFalAiFluxKreaLoraRequestsByRequestIdCancelResponses[keyof PutFalAiFluxKreaLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxKreaLoraData = { + body: SchemaFluxKreaLoraInput + path?: never + query?: never + url: '/fal-ai/flux-krea-lora' +} + +export type PostFalAiFluxKreaLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxKreaLoraResponse = + PostFalAiFluxKreaLoraResponses[keyof PostFalAiFluxKreaLoraResponses] + +export type GetFalAiFluxKreaLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-krea-lora/requests/{request_id}' +} + +export type GetFalAiFluxKreaLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxKreaLoraOutput +} + +export type GetFalAiFluxKreaLoraRequestsByRequestIdResponse = + GetFalAiFluxKreaLoraRequestsByRequestIdResponses[keyof GetFalAiFluxKreaLoraRequestsByRequestIdResponses] + +export type GetFalAiFluxKreaRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux/krea/requests/{request_id}/status' +} + +export type GetFalAiFluxKreaRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxKreaRequestsByRequestIdStatusResponse = + GetFalAiFluxKreaRequestsByRequestIdStatusResponses[keyof GetFalAiFluxKreaRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxKreaRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux/krea/requests/{request_id}/cancel' +} + +export type PutFalAiFluxKreaRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxKreaRequestsByRequestIdCancelResponse = + PutFalAiFluxKreaRequestsByRequestIdCancelResponses[keyof PutFalAiFluxKreaRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxKreaData = { + body: SchemaFluxKreaInput + path?: never + query?: never + url: '/fal-ai/flux/krea' +} + +export type PostFalAiFluxKreaResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxKreaResponse = + PostFalAiFluxKreaResponses[keyof PostFalAiFluxKreaResponses] + +export type GetFalAiFluxKreaRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux/krea/requests/{request_id}' +} + +export type GetFalAiFluxKreaRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxKreaOutput +} + +export type GetFalAiFluxKreaRequestsByRequestIdResponse = + GetFalAiFluxKreaRequestsByRequestIdResponses[keyof GetFalAiFluxKreaRequestsByRequestIdResponses] + +export type GetFalAiFlux1KreaRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-1/krea/requests/{request_id}/status' +} + +export type GetFalAiFlux1KreaRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux1KreaRequestsByRequestIdStatusResponse = + GetFalAiFlux1KreaRequestsByRequestIdStatusResponses[keyof GetFalAiFlux1KreaRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux1KreaRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-1/krea/requests/{request_id}/cancel' +} + +export type PutFalAiFlux1KreaRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux1KreaRequestsByRequestIdCancelResponse = + PutFalAiFlux1KreaRequestsByRequestIdCancelResponses[keyof PutFalAiFlux1KreaRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux1KreaData = { + body: SchemaFlux1KreaInput + path?: never + query?: never + url: '/fal-ai/flux-1/krea' +} + +export type PostFalAiFlux1KreaResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux1KreaResponse = + PostFalAiFlux1KreaResponses[keyof PostFalAiFlux1KreaResponses] + +export type GetFalAiFlux1KreaRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-1/krea/requests/{request_id}' +} + +export type GetFalAiFlux1KreaRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux1KreaOutput +} + +export type GetFalAiFlux1KreaRequestsByRequestIdResponse = + GetFalAiFlux1KreaRequestsByRequestIdResponses[keyof GetFalAiFlux1KreaRequestsByRequestIdResponses] + +export type GetFalAiSkyRaccoonRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sky-raccoon/requests/{request_id}/status' +} + +export type GetFalAiSkyRaccoonRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSkyRaccoonRequestsByRequestIdStatusResponse = + GetFalAiSkyRaccoonRequestsByRequestIdStatusResponses[keyof GetFalAiSkyRaccoonRequestsByRequestIdStatusResponses] + +export type PutFalAiSkyRaccoonRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sky-raccoon/requests/{request_id}/cancel' +} + +export type PutFalAiSkyRaccoonRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSkyRaccoonRequestsByRequestIdCancelResponse = + PutFalAiSkyRaccoonRequestsByRequestIdCancelResponses[keyof PutFalAiSkyRaccoonRequestsByRequestIdCancelResponses] + +export type PostFalAiSkyRaccoonData = { + body: SchemaSkyRaccoonInput + path?: never + query?: never + url: '/fal-ai/sky-raccoon' +} + +export type PostFalAiSkyRaccoonResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSkyRaccoonResponse = + PostFalAiSkyRaccoonResponses[keyof PostFalAiSkyRaccoonResponses] + +export type GetFalAiSkyRaccoonRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sky-raccoon/requests/{request_id}' +} + +export type GetFalAiSkyRaccoonRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSkyRaccoonOutput +} + +export type GetFalAiSkyRaccoonRequestsByRequestIdResponse = + GetFalAiSkyRaccoonRequestsByRequestIdResponses[keyof GetFalAiSkyRaccoonRequestsByRequestIdResponses] + +export type GetFalAiFluxKontextLoraTextToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-kontext-lora/text-to-image/requests/{request_id}/status' +} + +export type GetFalAiFluxKontextLoraTextToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFluxKontextLoraTextToImageRequestsByRequestIdStatusResponse = + GetFalAiFluxKontextLoraTextToImageRequestsByRequestIdStatusResponses[keyof GetFalAiFluxKontextLoraTextToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxKontextLoraTextToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-kontext-lora/text-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiFluxKontextLoraTextToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFluxKontextLoraTextToImageRequestsByRequestIdCancelResponse = + PutFalAiFluxKontextLoraTextToImageRequestsByRequestIdCancelResponses[keyof PutFalAiFluxKontextLoraTextToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxKontextLoraTextToImageData = { + body: SchemaFluxKontextLoraTextToImageInput + path?: never + query?: never + url: '/fal-ai/flux-kontext-lora/text-to-image' +} + +export type PostFalAiFluxKontextLoraTextToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxKontextLoraTextToImageResponse = + PostFalAiFluxKontextLoraTextToImageResponses[keyof PostFalAiFluxKontextLoraTextToImageResponses] + +export type GetFalAiFluxKontextLoraTextToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-kontext-lora/text-to-image/requests/{request_id}' +} + +export type GetFalAiFluxKontextLoraTextToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxKontextLoraTextToImageOutput +} + +export type GetFalAiFluxKontextLoraTextToImageRequestsByRequestIdResponse = + GetFalAiFluxKontextLoraTextToImageRequestsByRequestIdResponses[keyof GetFalAiFluxKontextLoraTextToImageRequestsByRequestIdResponses] + +export type GetFalAiOmnigenV2RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/omnigen-v2/requests/{request_id}/status' +} + +export type GetFalAiOmnigenV2RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiOmnigenV2RequestsByRequestIdStatusResponse = + GetFalAiOmnigenV2RequestsByRequestIdStatusResponses[keyof GetFalAiOmnigenV2RequestsByRequestIdStatusResponses] + +export type PutFalAiOmnigenV2RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/omnigen-v2/requests/{request_id}/cancel' +} + +export type PutFalAiOmnigenV2RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiOmnigenV2RequestsByRequestIdCancelResponse = + PutFalAiOmnigenV2RequestsByRequestIdCancelResponses[keyof PutFalAiOmnigenV2RequestsByRequestIdCancelResponses] + +export type PostFalAiOmnigenV2Data = { + body: SchemaOmnigenV2Input + path?: never + query?: never + url: '/fal-ai/omnigen-v2' +} + +export type PostFalAiOmnigenV2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiOmnigenV2Response = + PostFalAiOmnigenV2Responses[keyof PostFalAiOmnigenV2Responses] + +export type GetFalAiOmnigenV2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/omnigen-v2/requests/{request_id}' +} + +export type GetFalAiOmnigenV2RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaOmnigenV2Output +} + +export type GetFalAiOmnigenV2RequestsByRequestIdResponse = + GetFalAiOmnigenV2RequestsByRequestIdResponses[keyof GetFalAiOmnigenV2RequestsByRequestIdResponses] + +export type GetFalAiBytedanceSeedreamV3TextToImageRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bytedance/seedream/v3/text-to-image/requests/{request_id}/status' + } + +export type GetFalAiBytedanceSeedreamV3TextToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiBytedanceSeedreamV3TextToImageRequestsByRequestIdStatusResponse = + GetFalAiBytedanceSeedreamV3TextToImageRequestsByRequestIdStatusResponses[keyof GetFalAiBytedanceSeedreamV3TextToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiBytedanceSeedreamV3TextToImageRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedream/v3/text-to-image/requests/{request_id}/cancel' + } + +export type PutFalAiBytedanceSeedreamV3TextToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiBytedanceSeedreamV3TextToImageRequestsByRequestIdCancelResponse = + PutFalAiBytedanceSeedreamV3TextToImageRequestsByRequestIdCancelResponses[keyof PutFalAiBytedanceSeedreamV3TextToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiBytedanceSeedreamV3TextToImageData = { + body: SchemaBytedanceSeedreamV3TextToImageInput + path?: never + query?: never + url: '/fal-ai/bytedance/seedream/v3/text-to-image' +} + +export type PostFalAiBytedanceSeedreamV3TextToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBytedanceSeedreamV3TextToImageResponse = + PostFalAiBytedanceSeedreamV3TextToImageResponses[keyof PostFalAiBytedanceSeedreamV3TextToImageResponses] + +export type GetFalAiBytedanceSeedreamV3TextToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedream/v3/text-to-image/requests/{request_id}' +} + +export type GetFalAiBytedanceSeedreamV3TextToImageRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaBytedanceSeedreamV3TextToImageOutput + } + +export type GetFalAiBytedanceSeedreamV3TextToImageRequestsByRequestIdResponse = + GetFalAiBytedanceSeedreamV3TextToImageRequestsByRequestIdResponses[keyof GetFalAiBytedanceSeedreamV3TextToImageRequestsByRequestIdResponses] + +export type GetFalAiFlux1SchnellRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-1/schnell/requests/{request_id}/status' +} + +export type GetFalAiFlux1SchnellRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux1SchnellRequestsByRequestIdStatusResponse = + GetFalAiFlux1SchnellRequestsByRequestIdStatusResponses[keyof GetFalAiFlux1SchnellRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux1SchnellRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-1/schnell/requests/{request_id}/cancel' +} + +export type PutFalAiFlux1SchnellRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux1SchnellRequestsByRequestIdCancelResponse = + PutFalAiFlux1SchnellRequestsByRequestIdCancelResponses[keyof PutFalAiFlux1SchnellRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux1SchnellData = { + body: SchemaFlux1SchnellInput + path?: never + query?: never + url: '/fal-ai/flux-1/schnell' +} + +export type PostFalAiFlux1SchnellResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux1SchnellResponse = + PostFalAiFlux1SchnellResponses[keyof PostFalAiFlux1SchnellResponses] + +export type GetFalAiFlux1SchnellRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-1/schnell/requests/{request_id}' +} + +export type GetFalAiFlux1SchnellRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux1SchnellOutput +} + +export type GetFalAiFlux1SchnellRequestsByRequestIdResponse = + GetFalAiFlux1SchnellRequestsByRequestIdResponses[keyof GetFalAiFlux1SchnellRequestsByRequestIdResponses] + +export type GetFalAiFlux1DevRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-1/dev/requests/{request_id}/status' +} + +export type GetFalAiFlux1DevRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux1DevRequestsByRequestIdStatusResponse = + GetFalAiFlux1DevRequestsByRequestIdStatusResponses[keyof GetFalAiFlux1DevRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux1DevRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-1/dev/requests/{request_id}/cancel' +} + +export type PutFalAiFlux1DevRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux1DevRequestsByRequestIdCancelResponse = + PutFalAiFlux1DevRequestsByRequestIdCancelResponses[keyof PutFalAiFlux1DevRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux1DevData = { + body: SchemaFlux1DevInput + path?: never + query?: never + url: '/fal-ai/flux-1/dev' +} + +export type PostFalAiFlux1DevResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux1DevResponse = + PostFalAiFlux1DevResponses[keyof PostFalAiFlux1DevResponses] + +export type GetFalAiFlux1DevRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-1/dev/requests/{request_id}' +} + +export type GetFalAiFlux1DevRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux1DevOutput +} + +export type GetFalAiFlux1DevRequestsByRequestIdResponse = + GetFalAiFlux1DevRequestsByRequestIdResponses[keyof GetFalAiFlux1DevRequestsByRequestIdResponses] + +export type GetFalAiFluxProKontextMaxTextToImageRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-pro/kontext/max/text-to-image/requests/{request_id}/status' + } + +export type GetFalAiFluxProKontextMaxTextToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFluxProKontextMaxTextToImageRequestsByRequestIdStatusResponse = + GetFalAiFluxProKontextMaxTextToImageRequestsByRequestIdStatusResponses[keyof GetFalAiFluxProKontextMaxTextToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxProKontextMaxTextToImageRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/kontext/max/text-to-image/requests/{request_id}/cancel' + } + +export type PutFalAiFluxProKontextMaxTextToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFluxProKontextMaxTextToImageRequestsByRequestIdCancelResponse = + PutFalAiFluxProKontextMaxTextToImageRequestsByRequestIdCancelResponses[keyof PutFalAiFluxProKontextMaxTextToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxProKontextMaxTextToImageData = { + body: SchemaFluxProKontextMaxTextToImageInput + path?: never + query?: never + url: '/fal-ai/flux-pro/kontext/max/text-to-image' +} + +export type PostFalAiFluxProKontextMaxTextToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxProKontextMaxTextToImageResponse = + PostFalAiFluxProKontextMaxTextToImageResponses[keyof PostFalAiFluxProKontextMaxTextToImageResponses] + +export type GetFalAiFluxProKontextMaxTextToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/kontext/max/text-to-image/requests/{request_id}' +} + +export type GetFalAiFluxProKontextMaxTextToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxProKontextMaxTextToImageOutput +} + +export type GetFalAiFluxProKontextMaxTextToImageRequestsByRequestIdResponse = + GetFalAiFluxProKontextMaxTextToImageRequestsByRequestIdResponses[keyof GetFalAiFluxProKontextMaxTextToImageRequestsByRequestIdResponses] + +export type GetFalAiFluxProKontextTextToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-pro/kontext/text-to-image/requests/{request_id}/status' +} + +export type GetFalAiFluxProKontextTextToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFluxProKontextTextToImageRequestsByRequestIdStatusResponse = + GetFalAiFluxProKontextTextToImageRequestsByRequestIdStatusResponses[keyof GetFalAiFluxProKontextTextToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxProKontextTextToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/kontext/text-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiFluxProKontextTextToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFluxProKontextTextToImageRequestsByRequestIdCancelResponse = + PutFalAiFluxProKontextTextToImageRequestsByRequestIdCancelResponses[keyof PutFalAiFluxProKontextTextToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxProKontextTextToImageData = { + body: SchemaFluxProKontextTextToImageInput + path?: never + query?: never + url: '/fal-ai/flux-pro/kontext/text-to-image' +} + +export type PostFalAiFluxProKontextTextToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxProKontextTextToImageResponse = + PostFalAiFluxProKontextTextToImageResponses[keyof PostFalAiFluxProKontextTextToImageResponses] + +export type GetFalAiFluxProKontextTextToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/kontext/text-to-image/requests/{request_id}' +} + +export type GetFalAiFluxProKontextTextToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxProKontextTextToImageOutput +} + +export type GetFalAiFluxProKontextTextToImageRequestsByRequestIdResponse = + GetFalAiFluxProKontextTextToImageRequestsByRequestIdResponses[keyof GetFalAiFluxProKontextTextToImageRequestsByRequestIdResponses] + +export type GetFalAiBagelRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bagel/requests/{request_id}/status' +} + +export type GetFalAiBagelRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiBagelRequestsByRequestIdStatusResponse = + GetFalAiBagelRequestsByRequestIdStatusResponses[keyof GetFalAiBagelRequestsByRequestIdStatusResponses] + +export type PutFalAiBagelRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bagel/requests/{request_id}/cancel' +} + +export type PutFalAiBagelRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiBagelRequestsByRequestIdCancelResponse = + PutFalAiBagelRequestsByRequestIdCancelResponses[keyof PutFalAiBagelRequestsByRequestIdCancelResponses] + +export type PostFalAiBagelData = { + body: SchemaBagelInput + path?: never + query?: never + url: '/fal-ai/bagel' +} + +export type PostFalAiBagelResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBagelResponse = + PostFalAiBagelResponses[keyof PostFalAiBagelResponses] + +export type GetFalAiBagelRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bagel/requests/{request_id}' +} + +export type GetFalAiBagelRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBagelOutput +} + +export type GetFalAiBagelRequestsByRequestIdResponse = + GetFalAiBagelRequestsByRequestIdResponses[keyof GetFalAiBagelRequestsByRequestIdResponses] + +export type GetFalAiImagen4PreviewUltraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/imagen4/preview/ultra/requests/{request_id}/status' +} + +export type GetFalAiImagen4PreviewUltraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImagen4PreviewUltraRequestsByRequestIdStatusResponse = + GetFalAiImagen4PreviewUltraRequestsByRequestIdStatusResponses[keyof GetFalAiImagen4PreviewUltraRequestsByRequestIdStatusResponses] + +export type PutFalAiImagen4PreviewUltraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/imagen4/preview/ultra/requests/{request_id}/cancel' +} + +export type PutFalAiImagen4PreviewUltraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImagen4PreviewUltraRequestsByRequestIdCancelResponse = + PutFalAiImagen4PreviewUltraRequestsByRequestIdCancelResponses[keyof PutFalAiImagen4PreviewUltraRequestsByRequestIdCancelResponses] + +export type PostFalAiImagen4PreviewUltraData = { + body: SchemaImagen4PreviewUltraInput + path?: never + query?: never + url: '/fal-ai/imagen4/preview/ultra' +} + +export type PostFalAiImagen4PreviewUltraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImagen4PreviewUltraResponse = + PostFalAiImagen4PreviewUltraResponses[keyof PostFalAiImagen4PreviewUltraResponses] + +export type GetFalAiImagen4PreviewUltraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/imagen4/preview/ultra/requests/{request_id}' +} + +export type GetFalAiImagen4PreviewUltraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImagen4PreviewUltraOutput +} + +export type GetFalAiImagen4PreviewUltraRequestsByRequestIdResponse = + GetFalAiImagen4PreviewUltraRequestsByRequestIdResponses[keyof GetFalAiImagen4PreviewUltraRequestsByRequestIdResponses] + +export type GetFalAiDreamoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/dreamo/requests/{request_id}/status' +} + +export type GetFalAiDreamoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiDreamoRequestsByRequestIdStatusResponse = + GetFalAiDreamoRequestsByRequestIdStatusResponses[keyof GetFalAiDreamoRequestsByRequestIdStatusResponses] + +export type PutFalAiDreamoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/dreamo/requests/{request_id}/cancel' +} + +export type PutFalAiDreamoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiDreamoRequestsByRequestIdCancelResponse = + PutFalAiDreamoRequestsByRequestIdCancelResponses[keyof PutFalAiDreamoRequestsByRequestIdCancelResponses] + +export type PostFalAiDreamoData = { + body: SchemaDreamoInput + path?: never + query?: never + url: '/fal-ai/dreamo' +} + +export type PostFalAiDreamoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiDreamoResponse = + PostFalAiDreamoResponses[keyof PostFalAiDreamoResponses] + +export type GetFalAiDreamoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/dreamo/requests/{request_id}' +} + +export type GetFalAiDreamoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaDreamoOutput +} + +export type GetFalAiDreamoRequestsByRequestIdResponse = + GetFalAiDreamoRequestsByRequestIdResponses[keyof GetFalAiDreamoRequestsByRequestIdResponses] + +export type GetFalAiFluxLoraStreamRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-lora/stream/requests/{request_id}/status' +} + +export type GetFalAiFluxLoraStreamRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxLoraStreamRequestsByRequestIdStatusResponse = + GetFalAiFluxLoraStreamRequestsByRequestIdStatusResponses[keyof GetFalAiFluxLoraStreamRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxLoraStreamRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-lora/stream/requests/{request_id}/cancel' +} + +export type PutFalAiFluxLoraStreamRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxLoraStreamRequestsByRequestIdCancelResponse = + PutFalAiFluxLoraStreamRequestsByRequestIdCancelResponses[keyof PutFalAiFluxLoraStreamRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxLoraStreamData = { + body: SchemaFluxLoraStreamInput + path?: never + query?: never + url: '/fal-ai/flux-lora/stream' +} + +export type PostFalAiFluxLoraStreamResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxLoraStreamResponse = + PostFalAiFluxLoraStreamResponses[keyof PostFalAiFluxLoraStreamResponses] + +export type GetFalAiFluxLoraStreamRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-lora/stream/requests/{request_id}' +} + +export type GetFalAiFluxLoraStreamRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxLoraStreamOutput +} + +export type GetFalAiFluxLoraStreamRequestsByRequestIdResponse = + GetFalAiFluxLoraStreamRequestsByRequestIdResponses[keyof GetFalAiFluxLoraStreamRequestsByRequestIdResponses] + +export type GetFalAiMinimaxImage01RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/image-01/requests/{request_id}/status' +} + +export type GetFalAiMinimaxImage01RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMinimaxImage01RequestsByRequestIdStatusResponse = + GetFalAiMinimaxImage01RequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxImage01RequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxImage01RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/image-01/requests/{request_id}/cancel' +} + +export type PutFalAiMinimaxImage01RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMinimaxImage01RequestsByRequestIdCancelResponse = + PutFalAiMinimaxImage01RequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxImage01RequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxImage01Data = { + body: SchemaMinimaxImage01Input + path?: never + query?: never + url: '/fal-ai/minimax/image-01' +} + +export type PostFalAiMinimaxImage01Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxImage01Response = + PostFalAiMinimaxImage01Responses[keyof PostFalAiMinimaxImage01Responses] + +export type GetFalAiMinimaxImage01RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/image-01/requests/{request_id}' +} + +export type GetFalAiMinimaxImage01RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMinimaxImage01Output +} + +export type GetFalAiMinimaxImage01RequestsByRequestIdResponse = + GetFalAiMinimaxImage01RequestsByRequestIdResponses[keyof GetFalAiMinimaxImage01RequestsByRequestIdResponses] + +export type GetFalAiPonyV7RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pony-v7/requests/{request_id}/status' +} + +export type GetFalAiPonyV7RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPonyV7RequestsByRequestIdStatusResponse = + GetFalAiPonyV7RequestsByRequestIdStatusResponses[keyof GetFalAiPonyV7RequestsByRequestIdStatusResponses] + +export type PutFalAiPonyV7RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pony-v7/requests/{request_id}/cancel' +} + +export type PutFalAiPonyV7RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPonyV7RequestsByRequestIdCancelResponse = + PutFalAiPonyV7RequestsByRequestIdCancelResponses[keyof PutFalAiPonyV7RequestsByRequestIdCancelResponses] + +export type PostFalAiPonyV7Data = { + body: SchemaPonyV7Input + path?: never + query?: never + url: '/fal-ai/pony-v7' +} + +export type PostFalAiPonyV7Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPonyV7Response = + PostFalAiPonyV7Responses[keyof PostFalAiPonyV7Responses] + +export type GetFalAiPonyV7RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pony-v7/requests/{request_id}' +} + +export type GetFalAiPonyV7RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPonyV7Output +} + +export type GetFalAiPonyV7RequestsByRequestIdResponse = + GetFalAiPonyV7RequestsByRequestIdResponses[keyof GetFalAiPonyV7RequestsByRequestIdResponses] + +export type GetFalAiIdeogramV3RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ideogram/v3/requests/{request_id}/status' +} + +export type GetFalAiIdeogramV3RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiIdeogramV3RequestsByRequestIdStatusResponse = + GetFalAiIdeogramV3RequestsByRequestIdStatusResponses[keyof GetFalAiIdeogramV3RequestsByRequestIdStatusResponses] + +export type PutFalAiIdeogramV3RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v3/requests/{request_id}/cancel' +} + +export type PutFalAiIdeogramV3RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiIdeogramV3RequestsByRequestIdCancelResponse = + PutFalAiIdeogramV3RequestsByRequestIdCancelResponses[keyof PutFalAiIdeogramV3RequestsByRequestIdCancelResponses] + +export type PostFalAiIdeogramV3Data = { + body: SchemaIdeogramV3Input + path?: never + query?: never + url: '/fal-ai/ideogram/v3' +} + +export type PostFalAiIdeogramV3Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiIdeogramV3Response = + PostFalAiIdeogramV3Responses[keyof PostFalAiIdeogramV3Responses] + +export type GetFalAiIdeogramV3RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v3/requests/{request_id}' +} + +export type GetFalAiIdeogramV3RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIdeogramV3Output +} + +export type GetFalAiIdeogramV3RequestsByRequestIdResponse = + GetFalAiIdeogramV3RequestsByRequestIdResponses[keyof GetFalAiIdeogramV3RequestsByRequestIdResponses] + +export type GetFalAiFLiteStandardRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/f-lite/standard/requests/{request_id}/status' +} + +export type GetFalAiFLiteStandardRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFLiteStandardRequestsByRequestIdStatusResponse = + GetFalAiFLiteStandardRequestsByRequestIdStatusResponses[keyof GetFalAiFLiteStandardRequestsByRequestIdStatusResponses] + +export type PutFalAiFLiteStandardRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/f-lite/standard/requests/{request_id}/cancel' +} + +export type PutFalAiFLiteStandardRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFLiteStandardRequestsByRequestIdCancelResponse = + PutFalAiFLiteStandardRequestsByRequestIdCancelResponses[keyof PutFalAiFLiteStandardRequestsByRequestIdCancelResponses] + +export type PostFalAiFLiteStandardData = { + body: SchemaFLiteStandardInput + path?: never + query?: never + url: '/fal-ai/f-lite/standard' +} + +export type PostFalAiFLiteStandardResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFLiteStandardResponse = + PostFalAiFLiteStandardResponses[keyof PostFalAiFLiteStandardResponses] + +export type GetFalAiFLiteStandardRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/f-lite/standard/requests/{request_id}' +} + +export type GetFalAiFLiteStandardRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFLiteStandardOutput +} + +export type GetFalAiFLiteStandardRequestsByRequestIdResponse = + GetFalAiFLiteStandardRequestsByRequestIdResponses[keyof GetFalAiFLiteStandardRequestsByRequestIdResponses] + +export type GetFalAiFLiteTextureRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/f-lite/texture/requests/{request_id}/status' +} + +export type GetFalAiFLiteTextureRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFLiteTextureRequestsByRequestIdStatusResponse = + GetFalAiFLiteTextureRequestsByRequestIdStatusResponses[keyof GetFalAiFLiteTextureRequestsByRequestIdStatusResponses] + +export type PutFalAiFLiteTextureRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/f-lite/texture/requests/{request_id}/cancel' +} + +export type PutFalAiFLiteTextureRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFLiteTextureRequestsByRequestIdCancelResponse = + PutFalAiFLiteTextureRequestsByRequestIdCancelResponses[keyof PutFalAiFLiteTextureRequestsByRequestIdCancelResponses] + +export type PostFalAiFLiteTextureData = { + body: SchemaFLiteTextureInput + path?: never + query?: never + url: '/fal-ai/f-lite/texture' +} + +export type PostFalAiFLiteTextureResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFLiteTextureResponse = + PostFalAiFLiteTextureResponses[keyof PostFalAiFLiteTextureResponses] + +export type GetFalAiFLiteTextureRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/f-lite/texture/requests/{request_id}' +} + +export type GetFalAiFLiteTextureRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFLiteTextureOutput +} + +export type GetFalAiFLiteTextureRequestsByRequestIdResponse = + GetFalAiFLiteTextureRequestsByRequestIdResponses[keyof GetFalAiFLiteTextureRequestsByRequestIdResponses] + +export type GetFalAiGptImage1TextToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/gpt-image-1/text-to-image/requests/{request_id}/status' +} + +export type GetFalAiGptImage1TextToImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiGptImage1TextToImageRequestsByRequestIdStatusResponse = + GetFalAiGptImage1TextToImageRequestsByRequestIdStatusResponses[keyof GetFalAiGptImage1TextToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiGptImage1TextToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gpt-image-1/text-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiGptImage1TextToImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiGptImage1TextToImageRequestsByRequestIdCancelResponse = + PutFalAiGptImage1TextToImageRequestsByRequestIdCancelResponses[keyof PutFalAiGptImage1TextToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiGptImage1TextToImageData = { + body: SchemaGptImage1TextToImageInput + path?: never + query?: never + url: '/fal-ai/gpt-image-1/text-to-image' +} + +export type PostFalAiGptImage1TextToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiGptImage1TextToImageResponse = + PostFalAiGptImage1TextToImageResponses[keyof PostFalAiGptImage1TextToImageResponses] + +export type GetFalAiGptImage1TextToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/gpt-image-1/text-to-image/requests/{request_id}' +} + +export type GetFalAiGptImage1TextToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaGptImage1TextToImageOutput +} + +export type GetFalAiGptImage1TextToImageRequestsByRequestIdResponse = + GetFalAiGptImage1TextToImageRequestsByRequestIdResponses[keyof GetFalAiGptImage1TextToImageRequestsByRequestIdResponses] + +export type GetFalAiSanaV1516bRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sana/v1.5/1.6b/requests/{request_id}/status' +} + +export type GetFalAiSanaV1516bRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSanaV1516bRequestsByRequestIdStatusResponse = + GetFalAiSanaV1516bRequestsByRequestIdStatusResponses[keyof GetFalAiSanaV1516bRequestsByRequestIdStatusResponses] + +export type PutFalAiSanaV1516bRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sana/v1.5/1.6b/requests/{request_id}/cancel' +} + +export type PutFalAiSanaV1516bRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSanaV1516bRequestsByRequestIdCancelResponse = + PutFalAiSanaV1516bRequestsByRequestIdCancelResponses[keyof PutFalAiSanaV1516bRequestsByRequestIdCancelResponses] + +export type PostFalAiSanaV1516bData = { + body: SchemaSanaV1516bInput + path?: never + query?: never + url: '/fal-ai/sana/v1.5/1.6b' +} + +export type PostFalAiSanaV1516bResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSanaV1516bResponse = + PostFalAiSanaV1516bResponses[keyof PostFalAiSanaV1516bResponses] + +export type GetFalAiSanaV1516bRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sana/v1.5/1.6b/requests/{request_id}' +} + +export type GetFalAiSanaV1516bRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSanaV1516bOutput +} + +export type GetFalAiSanaV1516bRequestsByRequestIdResponse = + GetFalAiSanaV1516bRequestsByRequestIdResponses[keyof GetFalAiSanaV1516bRequestsByRequestIdResponses] + +export type GetFalAiSanaV1548bRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sana/v1.5/4.8b/requests/{request_id}/status' +} + +export type GetFalAiSanaV1548bRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSanaV1548bRequestsByRequestIdStatusResponse = + GetFalAiSanaV1548bRequestsByRequestIdStatusResponses[keyof GetFalAiSanaV1548bRequestsByRequestIdStatusResponses] + +export type PutFalAiSanaV1548bRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sana/v1.5/4.8b/requests/{request_id}/cancel' +} + +export type PutFalAiSanaV1548bRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSanaV1548bRequestsByRequestIdCancelResponse = + PutFalAiSanaV1548bRequestsByRequestIdCancelResponses[keyof PutFalAiSanaV1548bRequestsByRequestIdCancelResponses] + +export type PostFalAiSanaV1548bData = { + body: SchemaSanaV1548bInput + path?: never + query?: never + url: '/fal-ai/sana/v1.5/4.8b' +} + +export type PostFalAiSanaV1548bResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSanaV1548bResponse = + PostFalAiSanaV1548bResponses[keyof PostFalAiSanaV1548bResponses] + +export type GetFalAiSanaV1548bRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sana/v1.5/4.8b/requests/{request_id}' +} + +export type GetFalAiSanaV1548bRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSanaV1548bOutput +} + +export type GetFalAiSanaV1548bRequestsByRequestIdResponse = + GetFalAiSanaV1548bRequestsByRequestIdResponses[keyof GetFalAiSanaV1548bRequestsByRequestIdResponses] + +export type GetFalAiSanaSprintRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sana/sprint/requests/{request_id}/status' +} + +export type GetFalAiSanaSprintRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSanaSprintRequestsByRequestIdStatusResponse = + GetFalAiSanaSprintRequestsByRequestIdStatusResponses[keyof GetFalAiSanaSprintRequestsByRequestIdStatusResponses] + +export type PutFalAiSanaSprintRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sana/sprint/requests/{request_id}/cancel' +} + +export type PutFalAiSanaSprintRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSanaSprintRequestsByRequestIdCancelResponse = + PutFalAiSanaSprintRequestsByRequestIdCancelResponses[keyof PutFalAiSanaSprintRequestsByRequestIdCancelResponses] + +export type PostFalAiSanaSprintData = { + body: SchemaSanaSprintInput + path?: never + query?: never + url: '/fal-ai/sana/sprint' +} + +export type PostFalAiSanaSprintResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSanaSprintResponse = + PostFalAiSanaSprintResponses[keyof PostFalAiSanaSprintResponses] + +export type GetFalAiSanaSprintRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sana/sprint/requests/{request_id}' +} + +export type GetFalAiSanaSprintRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSanaSprintOutput +} + +export type GetFalAiSanaSprintRequestsByRequestIdResponse = + GetFalAiSanaSprintRequestsByRequestIdResponses[keyof GetFalAiSanaSprintRequestsByRequestIdResponses] + +export type GetRundiffusionFalJuggernautFluxLightningRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/rundiffusion-fal/juggernaut-flux/lightning/requests/{request_id}/status' + } + +export type GetRundiffusionFalJuggernautFluxLightningRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetRundiffusionFalJuggernautFluxLightningRequestsByRequestIdStatusResponse = + GetRundiffusionFalJuggernautFluxLightningRequestsByRequestIdStatusResponses[keyof GetRundiffusionFalJuggernautFluxLightningRequestsByRequestIdStatusResponses] + +export type PutRundiffusionFalJuggernautFluxLightningRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/rundiffusion-fal/juggernaut-flux/lightning/requests/{request_id}/cancel' + } + +export type PutRundiffusionFalJuggernautFluxLightningRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutRundiffusionFalJuggernautFluxLightningRequestsByRequestIdCancelResponse = + PutRundiffusionFalJuggernautFluxLightningRequestsByRequestIdCancelResponses[keyof PutRundiffusionFalJuggernautFluxLightningRequestsByRequestIdCancelResponses] + +export type PostRundiffusionFalJuggernautFluxLightningData = { + body: SchemaJuggernautFluxLightningInput + path?: never + query?: never + url: '/rundiffusion-fal/juggernaut-flux/lightning' +} + +export type PostRundiffusionFalJuggernautFluxLightningResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostRundiffusionFalJuggernautFluxLightningResponse = + PostRundiffusionFalJuggernautFluxLightningResponses[keyof PostRundiffusionFalJuggernautFluxLightningResponses] + +export type GetRundiffusionFalJuggernautFluxLightningRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/rundiffusion-fal/juggernaut-flux/lightning/requests/{request_id}' +} + +export type GetRundiffusionFalJuggernautFluxLightningRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaJuggernautFluxLightningOutput + } + +export type GetRundiffusionFalJuggernautFluxLightningRequestsByRequestIdResponse = + GetRundiffusionFalJuggernautFluxLightningRequestsByRequestIdResponses[keyof GetRundiffusionFalJuggernautFluxLightningRequestsByRequestIdResponses] + +export type GetRundiffusionFalJuggernautFluxProRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/rundiffusion-fal/juggernaut-flux/pro/requests/{request_id}/status' +} + +export type GetRundiffusionFalJuggernautFluxProRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetRundiffusionFalJuggernautFluxProRequestsByRequestIdStatusResponse = + GetRundiffusionFalJuggernautFluxProRequestsByRequestIdStatusResponses[keyof GetRundiffusionFalJuggernautFluxProRequestsByRequestIdStatusResponses] + +export type PutRundiffusionFalJuggernautFluxProRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/rundiffusion-fal/juggernaut-flux/pro/requests/{request_id}/cancel' +} + +export type PutRundiffusionFalJuggernautFluxProRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutRundiffusionFalJuggernautFluxProRequestsByRequestIdCancelResponse = + PutRundiffusionFalJuggernautFluxProRequestsByRequestIdCancelResponses[keyof PutRundiffusionFalJuggernautFluxProRequestsByRequestIdCancelResponses] + +export type PostRundiffusionFalJuggernautFluxProData = { + body: SchemaJuggernautFluxProInput + path?: never + query?: never + url: '/rundiffusion-fal/juggernaut-flux/pro' +} + +export type PostRundiffusionFalJuggernautFluxProResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostRundiffusionFalJuggernautFluxProResponse = + PostRundiffusionFalJuggernautFluxProResponses[keyof PostRundiffusionFalJuggernautFluxProResponses] + +export type GetRundiffusionFalJuggernautFluxProRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/rundiffusion-fal/juggernaut-flux/pro/requests/{request_id}' +} + +export type GetRundiffusionFalJuggernautFluxProRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaJuggernautFluxProOutput +} + +export type GetRundiffusionFalJuggernautFluxProRequestsByRequestIdResponse = + GetRundiffusionFalJuggernautFluxProRequestsByRequestIdResponses[keyof GetRundiffusionFalJuggernautFluxProRequestsByRequestIdResponses] + +export type GetRundiffusionFalJuggernautFluxLoraRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/rundiffusion-fal/juggernaut-flux-lora/requests/{request_id}/status' + } + +export type GetRundiffusionFalJuggernautFluxLoraRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetRundiffusionFalJuggernautFluxLoraRequestsByRequestIdStatusResponse = + GetRundiffusionFalJuggernautFluxLoraRequestsByRequestIdStatusResponses[keyof GetRundiffusionFalJuggernautFluxLoraRequestsByRequestIdStatusResponses] + +export type PutRundiffusionFalJuggernautFluxLoraRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/rundiffusion-fal/juggernaut-flux-lora/requests/{request_id}/cancel' + } + +export type PutRundiffusionFalJuggernautFluxLoraRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutRundiffusionFalJuggernautFluxLoraRequestsByRequestIdCancelResponse = + PutRundiffusionFalJuggernautFluxLoraRequestsByRequestIdCancelResponses[keyof PutRundiffusionFalJuggernautFluxLoraRequestsByRequestIdCancelResponses] + +export type PostRundiffusionFalJuggernautFluxLoraData = { + body: SchemaJuggernautFluxLoraInput + path?: never + query?: never + url: '/rundiffusion-fal/juggernaut-flux-lora' +} + +export type PostRundiffusionFalJuggernautFluxLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostRundiffusionFalJuggernautFluxLoraResponse = + PostRundiffusionFalJuggernautFluxLoraResponses[keyof PostRundiffusionFalJuggernautFluxLoraResponses] + +export type GetRundiffusionFalJuggernautFluxLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/rundiffusion-fal/juggernaut-flux-lora/requests/{request_id}' +} + +export type GetRundiffusionFalJuggernautFluxLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaJuggernautFluxLoraOutput +} + +export type GetRundiffusionFalJuggernautFluxLoraRequestsByRequestIdResponse = + GetRundiffusionFalJuggernautFluxLoraRequestsByRequestIdResponses[keyof GetRundiffusionFalJuggernautFluxLoraRequestsByRequestIdResponses] + +export type GetRundiffusionFalRundiffusionPhotoFluxRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/rundiffusion-fal/rundiffusion-photo-flux/requests/{request_id}/status' + } + +export type GetRundiffusionFalRundiffusionPhotoFluxRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetRundiffusionFalRundiffusionPhotoFluxRequestsByRequestIdStatusResponse = + GetRundiffusionFalRundiffusionPhotoFluxRequestsByRequestIdStatusResponses[keyof GetRundiffusionFalRundiffusionPhotoFluxRequestsByRequestIdStatusResponses] + +export type PutRundiffusionFalRundiffusionPhotoFluxRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/rundiffusion-fal/rundiffusion-photo-flux/requests/{request_id}/cancel' + } + +export type PutRundiffusionFalRundiffusionPhotoFluxRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutRundiffusionFalRundiffusionPhotoFluxRequestsByRequestIdCancelResponse = + PutRundiffusionFalRundiffusionPhotoFluxRequestsByRequestIdCancelResponses[keyof PutRundiffusionFalRundiffusionPhotoFluxRequestsByRequestIdCancelResponses] + +export type PostRundiffusionFalRundiffusionPhotoFluxData = { + body: SchemaRundiffusionPhotoFluxInput + path?: never + query?: never + url: '/rundiffusion-fal/rundiffusion-photo-flux' +} + +export type PostRundiffusionFalRundiffusionPhotoFluxResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostRundiffusionFalRundiffusionPhotoFluxResponse = + PostRundiffusionFalRundiffusionPhotoFluxResponses[keyof PostRundiffusionFalRundiffusionPhotoFluxResponses] + +export type GetRundiffusionFalRundiffusionPhotoFluxRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/rundiffusion-fal/rundiffusion-photo-flux/requests/{request_id}' +} + +export type GetRundiffusionFalRundiffusionPhotoFluxRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaRundiffusionPhotoFluxOutput + } + +export type GetRundiffusionFalRundiffusionPhotoFluxRequestsByRequestIdResponse = + GetRundiffusionFalRundiffusionPhotoFluxRequestsByRequestIdResponses[keyof GetRundiffusionFalRundiffusionPhotoFluxRequestsByRequestIdResponses] + +export type GetRundiffusionFalJuggernautFluxBaseRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/rundiffusion-fal/juggernaut-flux/base/requests/{request_id}/status' + } + +export type GetRundiffusionFalJuggernautFluxBaseRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetRundiffusionFalJuggernautFluxBaseRequestsByRequestIdStatusResponse = + GetRundiffusionFalJuggernautFluxBaseRequestsByRequestIdStatusResponses[keyof GetRundiffusionFalJuggernautFluxBaseRequestsByRequestIdStatusResponses] + +export type PutRundiffusionFalJuggernautFluxBaseRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/rundiffusion-fal/juggernaut-flux/base/requests/{request_id}/cancel' + } + +export type PutRundiffusionFalJuggernautFluxBaseRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutRundiffusionFalJuggernautFluxBaseRequestsByRequestIdCancelResponse = + PutRundiffusionFalJuggernautFluxBaseRequestsByRequestIdCancelResponses[keyof PutRundiffusionFalJuggernautFluxBaseRequestsByRequestIdCancelResponses] + +export type PostRundiffusionFalJuggernautFluxBaseData = { + body: SchemaJuggernautFluxBaseInput + path?: never + query?: never + url: '/rundiffusion-fal/juggernaut-flux/base' +} + +export type PostRundiffusionFalJuggernautFluxBaseResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostRundiffusionFalJuggernautFluxBaseResponse = + PostRundiffusionFalJuggernautFluxBaseResponses[keyof PostRundiffusionFalJuggernautFluxBaseResponses] + +export type GetRundiffusionFalJuggernautFluxBaseRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/rundiffusion-fal/juggernaut-flux/base/requests/{request_id}' +} + +export type GetRundiffusionFalJuggernautFluxBaseRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaJuggernautFluxBaseOutput +} + +export type GetRundiffusionFalJuggernautFluxBaseRequestsByRequestIdResponse = + GetRundiffusionFalJuggernautFluxBaseRequestsByRequestIdResponses[keyof GetRundiffusionFalJuggernautFluxBaseRequestsByRequestIdResponses] + +export type GetFalAiCogview4RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/cogview4/requests/{request_id}/status' +} + +export type GetFalAiCogview4RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiCogview4RequestsByRequestIdStatusResponse = + GetFalAiCogview4RequestsByRequestIdStatusResponses[keyof GetFalAiCogview4RequestsByRequestIdStatusResponses] + +export type PutFalAiCogview4RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/cogview4/requests/{request_id}/cancel' +} + +export type PutFalAiCogview4RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiCogview4RequestsByRequestIdCancelResponse = + PutFalAiCogview4RequestsByRequestIdCancelResponses[keyof PutFalAiCogview4RequestsByRequestIdCancelResponses] + +export type PostFalAiCogview4Data = { + body: SchemaCogview4Input + path?: never + query?: never + url: '/fal-ai/cogview4' +} + +export type PostFalAiCogview4Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiCogview4Response = + PostFalAiCogview4Responses[keyof PostFalAiCogview4Responses] + +export type GetFalAiCogview4RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/cogview4/requests/{request_id}' +} + +export type GetFalAiCogview4RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaCogview4Output +} + +export type GetFalAiCogview4RequestsByRequestIdResponse = + GetFalAiCogview4RequestsByRequestIdResponses[keyof GetFalAiCogview4RequestsByRequestIdResponses] + +export type GetFalAiIdeogramV2aTurboRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ideogram/v2a/turbo/requests/{request_id}/status' +} + +export type GetFalAiIdeogramV2aTurboRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiIdeogramV2aTurboRequestsByRequestIdStatusResponse = + GetFalAiIdeogramV2aTurboRequestsByRequestIdStatusResponses[keyof GetFalAiIdeogramV2aTurboRequestsByRequestIdStatusResponses] + +export type PutFalAiIdeogramV2aTurboRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v2a/turbo/requests/{request_id}/cancel' +} + +export type PutFalAiIdeogramV2aTurboRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiIdeogramV2aTurboRequestsByRequestIdCancelResponse = + PutFalAiIdeogramV2aTurboRequestsByRequestIdCancelResponses[keyof PutFalAiIdeogramV2aTurboRequestsByRequestIdCancelResponses] + +export type PostFalAiIdeogramV2aTurboData = { + body: SchemaIdeogramV2aTurboInput + path?: never + query?: never + url: '/fal-ai/ideogram/v2a/turbo' +} + +export type PostFalAiIdeogramV2aTurboResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiIdeogramV2aTurboResponse = + PostFalAiIdeogramV2aTurboResponses[keyof PostFalAiIdeogramV2aTurboResponses] + +export type GetFalAiIdeogramV2aTurboRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v2a/turbo/requests/{request_id}' +} + +export type GetFalAiIdeogramV2aTurboRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIdeogramV2aTurboOutput +} + +export type GetFalAiIdeogramV2aTurboRequestsByRequestIdResponse = + GetFalAiIdeogramV2aTurboRequestsByRequestIdResponses[keyof GetFalAiIdeogramV2aTurboRequestsByRequestIdResponses] + +export type GetFalAiIdeogramV2aRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ideogram/v2a/requests/{request_id}/status' +} + +export type GetFalAiIdeogramV2aRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiIdeogramV2aRequestsByRequestIdStatusResponse = + GetFalAiIdeogramV2aRequestsByRequestIdStatusResponses[keyof GetFalAiIdeogramV2aRequestsByRequestIdStatusResponses] + +export type PutFalAiIdeogramV2aRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v2a/requests/{request_id}/cancel' +} + +export type PutFalAiIdeogramV2aRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiIdeogramV2aRequestsByRequestIdCancelResponse = + PutFalAiIdeogramV2aRequestsByRequestIdCancelResponses[keyof PutFalAiIdeogramV2aRequestsByRequestIdCancelResponses] + +export type PostFalAiIdeogramV2aData = { + body: SchemaIdeogramV2aInput + path?: never + query?: never + url: '/fal-ai/ideogram/v2a' +} + +export type PostFalAiIdeogramV2aResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiIdeogramV2aResponse = + PostFalAiIdeogramV2aResponses[keyof PostFalAiIdeogramV2aResponses] + +export type GetFalAiIdeogramV2aRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v2a/requests/{request_id}' +} + +export type GetFalAiIdeogramV2aRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIdeogramV2aOutput +} + +export type GetFalAiIdeogramV2aRequestsByRequestIdResponse = + GetFalAiIdeogramV2aRequestsByRequestIdResponses[keyof GetFalAiIdeogramV2aRequestsByRequestIdResponses] + +export type GetFalAiFluxControlLoraCannyRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-control-lora-canny/requests/{request_id}/status' +} + +export type GetFalAiFluxControlLoraCannyRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxControlLoraCannyRequestsByRequestIdStatusResponse = + GetFalAiFluxControlLoraCannyRequestsByRequestIdStatusResponses[keyof GetFalAiFluxControlLoraCannyRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxControlLoraCannyRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-control-lora-canny/requests/{request_id}/cancel' +} + +export type PutFalAiFluxControlLoraCannyRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxControlLoraCannyRequestsByRequestIdCancelResponse = + PutFalAiFluxControlLoraCannyRequestsByRequestIdCancelResponses[keyof PutFalAiFluxControlLoraCannyRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxControlLoraCannyData = { + body: SchemaFluxControlLoraCannyInput + path?: never + query?: never + url: '/fal-ai/flux-control-lora-canny' +} + +export type PostFalAiFluxControlLoraCannyResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxControlLoraCannyResponse = + PostFalAiFluxControlLoraCannyResponses[keyof PostFalAiFluxControlLoraCannyResponses] + +export type GetFalAiFluxControlLoraCannyRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-control-lora-canny/requests/{request_id}' +} + +export type GetFalAiFluxControlLoraCannyRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxControlLoraCannyOutput +} + +export type GetFalAiFluxControlLoraCannyRequestsByRequestIdResponse = + GetFalAiFluxControlLoraCannyRequestsByRequestIdResponses[keyof GetFalAiFluxControlLoraCannyRequestsByRequestIdResponses] + +export type GetFalAiFluxControlLoraDepthRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-control-lora-depth/requests/{request_id}/status' +} + +export type GetFalAiFluxControlLoraDepthRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxControlLoraDepthRequestsByRequestIdStatusResponse = + GetFalAiFluxControlLoraDepthRequestsByRequestIdStatusResponses[keyof GetFalAiFluxControlLoraDepthRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxControlLoraDepthRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-control-lora-depth/requests/{request_id}/cancel' +} + +export type PutFalAiFluxControlLoraDepthRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxControlLoraDepthRequestsByRequestIdCancelResponse = + PutFalAiFluxControlLoraDepthRequestsByRequestIdCancelResponses[keyof PutFalAiFluxControlLoraDepthRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxControlLoraDepthData = { + body: SchemaFluxControlLoraDepthInput + path?: never + query?: never + url: '/fal-ai/flux-control-lora-depth' +} + +export type PostFalAiFluxControlLoraDepthResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxControlLoraDepthResponse = + PostFalAiFluxControlLoraDepthResponses[keyof PostFalAiFluxControlLoraDepthResponses] + +export type GetFalAiFluxControlLoraDepthRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-control-lora-depth/requests/{request_id}' +} + +export type GetFalAiFluxControlLoraDepthRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxControlLoraDepthOutput +} + +export type GetFalAiFluxControlLoraDepthRequestsByRequestIdResponse = + GetFalAiFluxControlLoraDepthRequestsByRequestIdResponses[keyof GetFalAiFluxControlLoraDepthRequestsByRequestIdResponses] + +export type GetFalAiImagen3RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/imagen3/requests/{request_id}/status' +} + +export type GetFalAiImagen3RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImagen3RequestsByRequestIdStatusResponse = + GetFalAiImagen3RequestsByRequestIdStatusResponses[keyof GetFalAiImagen3RequestsByRequestIdStatusResponses] + +export type PutFalAiImagen3RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/imagen3/requests/{request_id}/cancel' +} + +export type PutFalAiImagen3RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImagen3RequestsByRequestIdCancelResponse = + PutFalAiImagen3RequestsByRequestIdCancelResponses[keyof PutFalAiImagen3RequestsByRequestIdCancelResponses] + +export type PostFalAiImagen3Data = { + body: SchemaImagen3Input + path?: never + query?: never + url: '/fal-ai/imagen3' +} + +export type PostFalAiImagen3Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImagen3Response = + PostFalAiImagen3Responses[keyof PostFalAiImagen3Responses] + +export type GetFalAiImagen3RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/imagen3/requests/{request_id}' +} + +export type GetFalAiImagen3RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImagen3Output +} + +export type GetFalAiImagen3RequestsByRequestIdResponse = + GetFalAiImagen3RequestsByRequestIdResponses[keyof GetFalAiImagen3RequestsByRequestIdResponses] + +export type GetFalAiImagen3FastRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/imagen3/fast/requests/{request_id}/status' +} + +export type GetFalAiImagen3FastRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImagen3FastRequestsByRequestIdStatusResponse = + GetFalAiImagen3FastRequestsByRequestIdStatusResponses[keyof GetFalAiImagen3FastRequestsByRequestIdStatusResponses] + +export type PutFalAiImagen3FastRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/imagen3/fast/requests/{request_id}/cancel' +} + +export type PutFalAiImagen3FastRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImagen3FastRequestsByRequestIdCancelResponse = + PutFalAiImagen3FastRequestsByRequestIdCancelResponses[keyof PutFalAiImagen3FastRequestsByRequestIdCancelResponses] + +export type PostFalAiImagen3FastData = { + body: SchemaImagen3FastInput + path?: never + query?: never + url: '/fal-ai/imagen3/fast' +} + +export type PostFalAiImagen3FastResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImagen3FastResponse = + PostFalAiImagen3FastResponses[keyof PostFalAiImagen3FastResponses] + +export type GetFalAiImagen3FastRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/imagen3/fast/requests/{request_id}' +} + +export type GetFalAiImagen3FastRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImagen3FastOutput +} + +export type GetFalAiImagen3FastRequestsByRequestIdResponse = + GetFalAiImagen3FastRequestsByRequestIdResponses[keyof GetFalAiImagen3FastRequestsByRequestIdResponses] + +export type GetFalAiLuminaImageV2RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/lumina-image/v2/requests/{request_id}/status' +} + +export type GetFalAiLuminaImageV2RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLuminaImageV2RequestsByRequestIdStatusResponse = + GetFalAiLuminaImageV2RequestsByRequestIdStatusResponses[keyof GetFalAiLuminaImageV2RequestsByRequestIdStatusResponses] + +export type PutFalAiLuminaImageV2RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/lumina-image/v2/requests/{request_id}/cancel' +} + +export type PutFalAiLuminaImageV2RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLuminaImageV2RequestsByRequestIdCancelResponse = + PutFalAiLuminaImageV2RequestsByRequestIdCancelResponses[keyof PutFalAiLuminaImageV2RequestsByRequestIdCancelResponses] + +export type PostFalAiLuminaImageV2Data = { + body: SchemaLuminaImageV2Input + path?: never + query?: never + url: '/fal-ai/lumina-image/v2' +} + +export type PostFalAiLuminaImageV2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLuminaImageV2Response = + PostFalAiLuminaImageV2Responses[keyof PostFalAiLuminaImageV2Responses] + +export type GetFalAiLuminaImageV2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/lumina-image/v2/requests/{request_id}' +} + +export type GetFalAiLuminaImageV2RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLuminaImageV2Output +} + +export type GetFalAiLuminaImageV2RequestsByRequestIdResponse = + GetFalAiLuminaImageV2RequestsByRequestIdResponses[keyof GetFalAiLuminaImageV2RequestsByRequestIdResponses] + +export type GetFalAiJanusRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/janus/requests/{request_id}/status' +} + +export type GetFalAiJanusRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiJanusRequestsByRequestIdStatusResponse = + GetFalAiJanusRequestsByRequestIdStatusResponses[keyof GetFalAiJanusRequestsByRequestIdStatusResponses] + +export type PutFalAiJanusRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/janus/requests/{request_id}/cancel' +} + +export type PutFalAiJanusRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiJanusRequestsByRequestIdCancelResponse = + PutFalAiJanusRequestsByRequestIdCancelResponses[keyof PutFalAiJanusRequestsByRequestIdCancelResponses] + +export type PostFalAiJanusData = { + body: SchemaJanusInput + path?: never + query?: never + url: '/fal-ai/janus' +} + +export type PostFalAiJanusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiJanusResponse = + PostFalAiJanusResponses[keyof PostFalAiJanusResponses] + +export type GetFalAiJanusRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/janus/requests/{request_id}' +} + +export type GetFalAiJanusRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaJanusOutput +} + +export type GetFalAiJanusRequestsByRequestIdResponse = + GetFalAiJanusRequestsByRequestIdResponses[keyof GetFalAiJanusRequestsByRequestIdResponses] + +export type GetFalAiFluxProV11UltraFinetunedRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-pro/v1.1-ultra-finetuned/requests/{request_id}/status' +} + +export type GetFalAiFluxProV11UltraFinetunedRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFluxProV11UltraFinetunedRequestsByRequestIdStatusResponse = + GetFalAiFluxProV11UltraFinetunedRequestsByRequestIdStatusResponses[keyof GetFalAiFluxProV11UltraFinetunedRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxProV11UltraFinetunedRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/v1.1-ultra-finetuned/requests/{request_id}/cancel' +} + +export type PutFalAiFluxProV11UltraFinetunedRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFluxProV11UltraFinetunedRequestsByRequestIdCancelResponse = + PutFalAiFluxProV11UltraFinetunedRequestsByRequestIdCancelResponses[keyof PutFalAiFluxProV11UltraFinetunedRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxProV11UltraFinetunedData = { + body: SchemaFluxProV11UltraFinetunedInput + path?: never + query?: never + url: '/fal-ai/flux-pro/v1.1-ultra-finetuned' +} + +export type PostFalAiFluxProV11UltraFinetunedResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxProV11UltraFinetunedResponse = + PostFalAiFluxProV11UltraFinetunedResponses[keyof PostFalAiFluxProV11UltraFinetunedResponses] + +export type GetFalAiFluxProV11UltraFinetunedRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/v1.1-ultra-finetuned/requests/{request_id}' +} + +export type GetFalAiFluxProV11UltraFinetunedRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxProV11UltraFinetunedOutput +} + +export type GetFalAiFluxProV11UltraFinetunedRequestsByRequestIdResponse = + GetFalAiFluxProV11UltraFinetunedRequestsByRequestIdResponses[keyof GetFalAiFluxProV11UltraFinetunedRequestsByRequestIdResponses] + +export type GetFalAiFluxProV11RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-pro/v1.1/requests/{request_id}/status' +} + +export type GetFalAiFluxProV11RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxProV11RequestsByRequestIdStatusResponse = + GetFalAiFluxProV11RequestsByRequestIdStatusResponses[keyof GetFalAiFluxProV11RequestsByRequestIdStatusResponses] + +export type PutFalAiFluxProV11RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/v1.1/requests/{request_id}/cancel' +} + +export type PutFalAiFluxProV11RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxProV11RequestsByRequestIdCancelResponse = + PutFalAiFluxProV11RequestsByRequestIdCancelResponses[keyof PutFalAiFluxProV11RequestsByRequestIdCancelResponses] + +export type PostFalAiFluxProV11Data = { + body: SchemaFluxProV11Input + path?: never + query?: never + url: '/fal-ai/flux-pro/v1.1' +} + +export type PostFalAiFluxProV11Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxProV11Response = + PostFalAiFluxProV11Responses[keyof PostFalAiFluxProV11Responses] + +export type GetFalAiFluxProV11RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-pro/v1.1/requests/{request_id}' +} + +export type GetFalAiFluxProV11RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxProV11Output +} + +export type GetFalAiFluxProV11RequestsByRequestIdResponse = + GetFalAiFluxProV11RequestsByRequestIdResponses[keyof GetFalAiFluxProV11RequestsByRequestIdResponses] + +export type GetFalAiSwittiRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/switti/requests/{request_id}/status' +} + +export type GetFalAiSwittiRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSwittiRequestsByRequestIdStatusResponse = + GetFalAiSwittiRequestsByRequestIdStatusResponses[keyof GetFalAiSwittiRequestsByRequestIdStatusResponses] + +export type PutFalAiSwittiRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/switti/requests/{request_id}/cancel' +} + +export type PutFalAiSwittiRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSwittiRequestsByRequestIdCancelResponse = + PutFalAiSwittiRequestsByRequestIdCancelResponses[keyof PutFalAiSwittiRequestsByRequestIdCancelResponses] + +export type PostFalAiSwittiData = { + body: SchemaSwittiInput + path?: never + query?: never + url: '/fal-ai/switti' +} + +export type PostFalAiSwittiResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSwittiResponse = + PostFalAiSwittiResponses[keyof PostFalAiSwittiResponses] + +export type GetFalAiSwittiRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/switti/requests/{request_id}' +} + +export type GetFalAiSwittiRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSwittiOutput +} + +export type GetFalAiSwittiRequestsByRequestIdResponse = + GetFalAiSwittiRequestsByRequestIdResponses[keyof GetFalAiSwittiRequestsByRequestIdResponses] + +export type GetFalAiSwitti512RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/switti/512/requests/{request_id}/status' +} + +export type GetFalAiSwitti512RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSwitti512RequestsByRequestIdStatusResponse = + GetFalAiSwitti512RequestsByRequestIdStatusResponses[keyof GetFalAiSwitti512RequestsByRequestIdStatusResponses] + +export type PutFalAiSwitti512RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/switti/512/requests/{request_id}/cancel' +} + +export type PutFalAiSwitti512RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSwitti512RequestsByRequestIdCancelResponse = + PutFalAiSwitti512RequestsByRequestIdCancelResponses[keyof PutFalAiSwitti512RequestsByRequestIdCancelResponses] + +export type PostFalAiSwitti512Data = { + body: SchemaSwitti512Input + path?: never + query?: never + url: '/fal-ai/switti/512' +} + +export type PostFalAiSwitti512Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSwitti512Response = + PostFalAiSwitti512Responses[keyof PostFalAiSwitti512Responses] + +export type GetFalAiSwitti512RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/switti/512/requests/{request_id}' +} + +export type GetFalAiSwitti512RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSwitti512Output +} + +export type GetFalAiSwitti512RequestsByRequestIdResponse = + GetFalAiSwitti512RequestsByRequestIdResponses[keyof GetFalAiSwitti512RequestsByRequestIdResponses] + +export type GetFalAiBriaTextToImageBaseRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bria/text-to-image/base/requests/{request_id}/status' +} + +export type GetFalAiBriaTextToImageBaseRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiBriaTextToImageBaseRequestsByRequestIdStatusResponse = + GetFalAiBriaTextToImageBaseRequestsByRequestIdStatusResponses[keyof GetFalAiBriaTextToImageBaseRequestsByRequestIdStatusResponses] + +export type PutFalAiBriaTextToImageBaseRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bria/text-to-image/base/requests/{request_id}/cancel' +} + +export type PutFalAiBriaTextToImageBaseRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiBriaTextToImageBaseRequestsByRequestIdCancelResponse = + PutFalAiBriaTextToImageBaseRequestsByRequestIdCancelResponses[keyof PutFalAiBriaTextToImageBaseRequestsByRequestIdCancelResponses] + +export type PostFalAiBriaTextToImageBaseData = { + body: SchemaBriaTextToImageBaseInput + path?: never + query?: never + url: '/fal-ai/bria/text-to-image/base' +} + +export type PostFalAiBriaTextToImageBaseResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBriaTextToImageBaseResponse = + PostFalAiBriaTextToImageBaseResponses[keyof PostFalAiBriaTextToImageBaseResponses] + +export type GetFalAiBriaTextToImageBaseRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bria/text-to-image/base/requests/{request_id}' +} + +export type GetFalAiBriaTextToImageBaseRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBriaTextToImageBaseOutput +} + +export type GetFalAiBriaTextToImageBaseRequestsByRequestIdResponse = + GetFalAiBriaTextToImageBaseRequestsByRequestIdResponses[keyof GetFalAiBriaTextToImageBaseRequestsByRequestIdResponses] + +export type GetFalAiBriaTextToImageFastRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bria/text-to-image/fast/requests/{request_id}/status' +} + +export type GetFalAiBriaTextToImageFastRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiBriaTextToImageFastRequestsByRequestIdStatusResponse = + GetFalAiBriaTextToImageFastRequestsByRequestIdStatusResponses[keyof GetFalAiBriaTextToImageFastRequestsByRequestIdStatusResponses] + +export type PutFalAiBriaTextToImageFastRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bria/text-to-image/fast/requests/{request_id}/cancel' +} + +export type PutFalAiBriaTextToImageFastRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiBriaTextToImageFastRequestsByRequestIdCancelResponse = + PutFalAiBriaTextToImageFastRequestsByRequestIdCancelResponses[keyof PutFalAiBriaTextToImageFastRequestsByRequestIdCancelResponses] + +export type PostFalAiBriaTextToImageFastData = { + body: SchemaBriaTextToImageFastInput + path?: never + query?: never + url: '/fal-ai/bria/text-to-image/fast' +} + +export type PostFalAiBriaTextToImageFastResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBriaTextToImageFastResponse = + PostFalAiBriaTextToImageFastResponses[keyof PostFalAiBriaTextToImageFastResponses] + +export type GetFalAiBriaTextToImageFastRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bria/text-to-image/fast/requests/{request_id}' +} + +export type GetFalAiBriaTextToImageFastRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBriaTextToImageFastOutput +} + +export type GetFalAiBriaTextToImageFastRequestsByRequestIdResponse = + GetFalAiBriaTextToImageFastRequestsByRequestIdResponses[keyof GetFalAiBriaTextToImageFastRequestsByRequestIdResponses] + +export type GetFalAiBriaTextToImageHdRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bria/text-to-image/hd/requests/{request_id}/status' +} + +export type GetFalAiBriaTextToImageHdRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiBriaTextToImageHdRequestsByRequestIdStatusResponse = + GetFalAiBriaTextToImageHdRequestsByRequestIdStatusResponses[keyof GetFalAiBriaTextToImageHdRequestsByRequestIdStatusResponses] + +export type PutFalAiBriaTextToImageHdRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bria/text-to-image/hd/requests/{request_id}/cancel' +} + +export type PutFalAiBriaTextToImageHdRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiBriaTextToImageHdRequestsByRequestIdCancelResponse = + PutFalAiBriaTextToImageHdRequestsByRequestIdCancelResponses[keyof PutFalAiBriaTextToImageHdRequestsByRequestIdCancelResponses] + +export type PostFalAiBriaTextToImageHdData = { + body: SchemaBriaTextToImageHdInput + path?: never + query?: never + url: '/fal-ai/bria/text-to-image/hd' +} + +export type PostFalAiBriaTextToImageHdResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBriaTextToImageHdResponse = + PostFalAiBriaTextToImageHdResponses[keyof PostFalAiBriaTextToImageHdResponses] + +export type GetFalAiBriaTextToImageHdRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bria/text-to-image/hd/requests/{request_id}' +} + +export type GetFalAiBriaTextToImageHdRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBriaTextToImageHdOutput +} + +export type GetFalAiBriaTextToImageHdRequestsByRequestIdResponse = + GetFalAiBriaTextToImageHdRequestsByRequestIdResponses[keyof GetFalAiBriaTextToImageHdRequestsByRequestIdResponses] + +export type GetFalAiRecraft20bRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/recraft-20b/requests/{request_id}/status' +} + +export type GetFalAiRecraft20bRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiRecraft20bRequestsByRequestIdStatusResponse = + GetFalAiRecraft20bRequestsByRequestIdStatusResponses[keyof GetFalAiRecraft20bRequestsByRequestIdStatusResponses] + +export type PutFalAiRecraft20bRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/recraft-20b/requests/{request_id}/cancel' +} + +export type PutFalAiRecraft20bRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiRecraft20bRequestsByRequestIdCancelResponse = + PutFalAiRecraft20bRequestsByRequestIdCancelResponses[keyof PutFalAiRecraft20bRequestsByRequestIdCancelResponses] + +export type PostFalAiRecraft20bData = { + body: SchemaRecraft20bInput + path?: never + query?: never + url: '/fal-ai/recraft-20b' +} + +export type PostFalAiRecraft20bResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiRecraft20bResponse = + PostFalAiRecraft20bResponses[keyof PostFalAiRecraft20bResponses] + +export type GetFalAiRecraft20bRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/recraft-20b/requests/{request_id}' +} + +export type GetFalAiRecraft20bRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaRecraft20bOutput +} + +export type GetFalAiRecraft20bRequestsByRequestIdResponse = + GetFalAiRecraft20bRequestsByRequestIdResponses[keyof GetFalAiRecraft20bRequestsByRequestIdResponses] + +export type GetFalAiIdeogramV2TurboRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ideogram/v2/turbo/requests/{request_id}/status' +} + +export type GetFalAiIdeogramV2TurboRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiIdeogramV2TurboRequestsByRequestIdStatusResponse = + GetFalAiIdeogramV2TurboRequestsByRequestIdStatusResponses[keyof GetFalAiIdeogramV2TurboRequestsByRequestIdStatusResponses] + +export type PutFalAiIdeogramV2TurboRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v2/turbo/requests/{request_id}/cancel' +} + +export type PutFalAiIdeogramV2TurboRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiIdeogramV2TurboRequestsByRequestIdCancelResponse = + PutFalAiIdeogramV2TurboRequestsByRequestIdCancelResponses[keyof PutFalAiIdeogramV2TurboRequestsByRequestIdCancelResponses] + +export type PostFalAiIdeogramV2TurboData = { + body: SchemaIdeogramV2TurboInput + path?: never + query?: never + url: '/fal-ai/ideogram/v2/turbo' +} + +export type PostFalAiIdeogramV2TurboResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiIdeogramV2TurboResponse = + PostFalAiIdeogramV2TurboResponses[keyof PostFalAiIdeogramV2TurboResponses] + +export type GetFalAiIdeogramV2TurboRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ideogram/v2/turbo/requests/{request_id}' +} + +export type GetFalAiIdeogramV2TurboRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIdeogramV2TurboOutput +} + +export type GetFalAiIdeogramV2TurboRequestsByRequestIdResponse = + GetFalAiIdeogramV2TurboRequestsByRequestIdResponses[keyof GetFalAiIdeogramV2TurboRequestsByRequestIdResponses] + +export type GetFalAiLumaPhotonFlashRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/luma-photon/flash/requests/{request_id}/status' +} + +export type GetFalAiLumaPhotonFlashRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLumaPhotonFlashRequestsByRequestIdStatusResponse = + GetFalAiLumaPhotonFlashRequestsByRequestIdStatusResponses[keyof GetFalAiLumaPhotonFlashRequestsByRequestIdStatusResponses] + +export type PutFalAiLumaPhotonFlashRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-photon/flash/requests/{request_id}/cancel' +} + +export type PutFalAiLumaPhotonFlashRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLumaPhotonFlashRequestsByRequestIdCancelResponse = + PutFalAiLumaPhotonFlashRequestsByRequestIdCancelResponses[keyof PutFalAiLumaPhotonFlashRequestsByRequestIdCancelResponses] + +export type PostFalAiLumaPhotonFlashData = { + body: SchemaLumaPhotonFlashInput + path?: never + query?: never + url: '/fal-ai/luma-photon/flash' +} + +export type PostFalAiLumaPhotonFlashResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLumaPhotonFlashResponse = + PostFalAiLumaPhotonFlashResponses[keyof PostFalAiLumaPhotonFlashResponses] + +export type GetFalAiLumaPhotonFlashRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-photon/flash/requests/{request_id}' +} + +export type GetFalAiLumaPhotonFlashRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLumaPhotonFlashOutput +} + +export type GetFalAiLumaPhotonFlashRequestsByRequestIdResponse = + GetFalAiLumaPhotonFlashRequestsByRequestIdResponses[keyof GetFalAiLumaPhotonFlashRequestsByRequestIdResponses] + +export type GetFalAiAuraFlowRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/aura-flow/requests/{request_id}/status' +} + +export type GetFalAiAuraFlowRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiAuraFlowRequestsByRequestIdStatusResponse = + GetFalAiAuraFlowRequestsByRequestIdStatusResponses[keyof GetFalAiAuraFlowRequestsByRequestIdStatusResponses] + +export type PutFalAiAuraFlowRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/aura-flow/requests/{request_id}/cancel' +} + +export type PutFalAiAuraFlowRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiAuraFlowRequestsByRequestIdCancelResponse = + PutFalAiAuraFlowRequestsByRequestIdCancelResponses[keyof PutFalAiAuraFlowRequestsByRequestIdCancelResponses] + +export type PostFalAiAuraFlowData = { + body: SchemaAuraFlowInput + path?: never + query?: never + url: '/fal-ai/aura-flow' +} + +export type PostFalAiAuraFlowResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiAuraFlowResponse = + PostFalAiAuraFlowResponses[keyof PostFalAiAuraFlowResponses] + +export type GetFalAiAuraFlowRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/aura-flow/requests/{request_id}' +} + +export type GetFalAiAuraFlowRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAuraFlowOutput +} + +export type GetFalAiAuraFlowRequestsByRequestIdResponse = + GetFalAiAuraFlowRequestsByRequestIdResponses[keyof GetFalAiAuraFlowRequestsByRequestIdResponses] + +export type GetFalAiOmnigenV1RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/omnigen-v1/requests/{request_id}/status' +} + +export type GetFalAiOmnigenV1RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiOmnigenV1RequestsByRequestIdStatusResponse = + GetFalAiOmnigenV1RequestsByRequestIdStatusResponses[keyof GetFalAiOmnigenV1RequestsByRequestIdStatusResponses] + +export type PutFalAiOmnigenV1RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/omnigen-v1/requests/{request_id}/cancel' +} + +export type PutFalAiOmnigenV1RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiOmnigenV1RequestsByRequestIdCancelResponse = + PutFalAiOmnigenV1RequestsByRequestIdCancelResponses[keyof PutFalAiOmnigenV1RequestsByRequestIdCancelResponses] + +export type PostFalAiOmnigenV1Data = { + body: SchemaOmnigenV1Input + path?: never + query?: never + url: '/fal-ai/omnigen-v1' +} + +export type PostFalAiOmnigenV1Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiOmnigenV1Response = + PostFalAiOmnigenV1Responses[keyof PostFalAiOmnigenV1Responses] + +export type GetFalAiOmnigenV1RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/omnigen-v1/requests/{request_id}' +} + +export type GetFalAiOmnigenV1RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaOmnigenV1Output +} + +export type GetFalAiOmnigenV1RequestsByRequestIdResponse = + GetFalAiOmnigenV1RequestsByRequestIdResponses[keyof GetFalAiOmnigenV1RequestsByRequestIdResponses] + +export type GetFalAiFluxSchnellRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux/schnell/requests/{request_id}/status' +} + +export type GetFalAiFluxSchnellRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxSchnellRequestsByRequestIdStatusResponse = + GetFalAiFluxSchnellRequestsByRequestIdStatusResponses[keyof GetFalAiFluxSchnellRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxSchnellRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux/schnell/requests/{request_id}/cancel' +} + +export type PutFalAiFluxSchnellRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxSchnellRequestsByRequestIdCancelResponse = + PutFalAiFluxSchnellRequestsByRequestIdCancelResponses[keyof PutFalAiFluxSchnellRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxSchnellData = { + body: SchemaFluxSchnellInput + path?: never + query?: never + url: '/fal-ai/flux/schnell' +} + +export type PostFalAiFluxSchnellResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxSchnellResponse = + PostFalAiFluxSchnellResponses[keyof PostFalAiFluxSchnellResponses] + +export type GetFalAiFluxSchnellRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux/schnell/requests/{request_id}' +} + +export type GetFalAiFluxSchnellRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxSchnellOutput +} + +export type GetFalAiFluxSchnellRequestsByRequestIdResponse = + GetFalAiFluxSchnellRequestsByRequestIdResponses[keyof GetFalAiFluxSchnellRequestsByRequestIdResponses] + +export type GetFalAiStableDiffusionV35MediumRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/stable-diffusion-v35-medium/requests/{request_id}/status' +} + +export type GetFalAiStableDiffusionV35MediumRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiStableDiffusionV35MediumRequestsByRequestIdStatusResponse = + GetFalAiStableDiffusionV35MediumRequestsByRequestIdStatusResponses[keyof GetFalAiStableDiffusionV35MediumRequestsByRequestIdStatusResponses] + +export type PutFalAiStableDiffusionV35MediumRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-diffusion-v35-medium/requests/{request_id}/cancel' +} + +export type PutFalAiStableDiffusionV35MediumRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiStableDiffusionV35MediumRequestsByRequestIdCancelResponse = + PutFalAiStableDiffusionV35MediumRequestsByRequestIdCancelResponses[keyof PutFalAiStableDiffusionV35MediumRequestsByRequestIdCancelResponses] + +export type PostFalAiStableDiffusionV35MediumData = { + body: SchemaStableDiffusionV35MediumInput + path?: never + query?: never + url: '/fal-ai/stable-diffusion-v35-medium' +} + +export type PostFalAiStableDiffusionV35MediumResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiStableDiffusionV35MediumResponse = + PostFalAiStableDiffusionV35MediumResponses[keyof PostFalAiStableDiffusionV35MediumResponses] + +export type GetFalAiStableDiffusionV35MediumRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-diffusion-v35-medium/requests/{request_id}' +} + +export type GetFalAiStableDiffusionV35MediumRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaStableDiffusionV35MediumOutput +} + +export type GetFalAiStableDiffusionV35MediumRequestsByRequestIdResponse = + GetFalAiStableDiffusionV35MediumRequestsByRequestIdResponses[keyof GetFalAiStableDiffusionV35MediumRequestsByRequestIdResponses] + +export type GetFalAiFluxLoraInpaintingRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-lora/inpainting/requests/{request_id}/status' +} + +export type GetFalAiFluxLoraInpaintingRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxLoraInpaintingRequestsByRequestIdStatusResponse = + GetFalAiFluxLoraInpaintingRequestsByRequestIdStatusResponses[keyof GetFalAiFluxLoraInpaintingRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxLoraInpaintingRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-lora/inpainting/requests/{request_id}/cancel' +} + +export type PutFalAiFluxLoraInpaintingRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxLoraInpaintingRequestsByRequestIdCancelResponse = + PutFalAiFluxLoraInpaintingRequestsByRequestIdCancelResponses[keyof PutFalAiFluxLoraInpaintingRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxLoraInpaintingData = { + body: SchemaFluxLoraInpaintingInput + path?: never + query?: never + url: '/fal-ai/flux-lora/inpainting' +} + +export type PostFalAiFluxLoraInpaintingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxLoraInpaintingResponse = + PostFalAiFluxLoraInpaintingResponses[keyof PostFalAiFluxLoraInpaintingResponses] + +export type GetFalAiFluxLoraInpaintingRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-lora/inpainting/requests/{request_id}' +} + +export type GetFalAiFluxLoraInpaintingRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxLoraInpaintingOutput +} + +export type GetFalAiFluxLoraInpaintingRequestsByRequestIdResponse = + GetFalAiFluxLoraInpaintingRequestsByRequestIdResponses[keyof GetFalAiFluxLoraInpaintingRequestsByRequestIdResponses] + +export type GetFalAiStableDiffusionV3MediumRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/stable-diffusion-v3-medium/requests/{request_id}/status' +} + +export type GetFalAiStableDiffusionV3MediumRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiStableDiffusionV3MediumRequestsByRequestIdStatusResponse = + GetFalAiStableDiffusionV3MediumRequestsByRequestIdStatusResponses[keyof GetFalAiStableDiffusionV3MediumRequestsByRequestIdStatusResponses] + +export type PutFalAiStableDiffusionV3MediumRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-diffusion-v3-medium/requests/{request_id}/cancel' +} + +export type PutFalAiStableDiffusionV3MediumRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiStableDiffusionV3MediumRequestsByRequestIdCancelResponse = + PutFalAiStableDiffusionV3MediumRequestsByRequestIdCancelResponses[keyof PutFalAiStableDiffusionV3MediumRequestsByRequestIdCancelResponses] + +export type PostFalAiStableDiffusionV3MediumData = { + body: SchemaStableDiffusionV3MediumInput + path?: never + query?: never + url: '/fal-ai/stable-diffusion-v3-medium' +} + +export type PostFalAiStableDiffusionV3MediumResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiStableDiffusionV3MediumResponse = + PostFalAiStableDiffusionV3MediumResponses[keyof PostFalAiStableDiffusionV3MediumResponses] + +export type GetFalAiStableDiffusionV3MediumRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-diffusion-v3-medium/requests/{request_id}' +} + +export type GetFalAiStableDiffusionV3MediumRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaStableDiffusionV3MediumOutput +} + +export type GetFalAiStableDiffusionV3MediumRequestsByRequestIdResponse = + GetFalAiStableDiffusionV3MediumRequestsByRequestIdResponses[keyof GetFalAiStableDiffusionV3MediumRequestsByRequestIdResponses] + +export type GetFalAiFooocusUpscaleOrVaryRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fooocus/upscale-or-vary/requests/{request_id}/status' +} + +export type GetFalAiFooocusUpscaleOrVaryRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFooocusUpscaleOrVaryRequestsByRequestIdStatusResponse = + GetFalAiFooocusUpscaleOrVaryRequestsByRequestIdStatusResponses[keyof GetFalAiFooocusUpscaleOrVaryRequestsByRequestIdStatusResponses] + +export type PutFalAiFooocusUpscaleOrVaryRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fooocus/upscale-or-vary/requests/{request_id}/cancel' +} + +export type PutFalAiFooocusUpscaleOrVaryRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFooocusUpscaleOrVaryRequestsByRequestIdCancelResponse = + PutFalAiFooocusUpscaleOrVaryRequestsByRequestIdCancelResponses[keyof PutFalAiFooocusUpscaleOrVaryRequestsByRequestIdCancelResponses] + +export type PostFalAiFooocusUpscaleOrVaryData = { + body: SchemaFooocusUpscaleOrVaryInput + path?: never + query?: never + url: '/fal-ai/fooocus/upscale-or-vary' +} + +export type PostFalAiFooocusUpscaleOrVaryResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFooocusUpscaleOrVaryResponse = + PostFalAiFooocusUpscaleOrVaryResponses[keyof PostFalAiFooocusUpscaleOrVaryResponses] + +export type GetFalAiFooocusUpscaleOrVaryRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fooocus/upscale-or-vary/requests/{request_id}' +} + +export type GetFalAiFooocusUpscaleOrVaryRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFooocusUpscaleOrVaryOutput +} + +export type GetFalAiFooocusUpscaleOrVaryRequestsByRequestIdResponse = + GetFalAiFooocusUpscaleOrVaryRequestsByRequestIdResponses[keyof GetFalAiFooocusUpscaleOrVaryRequestsByRequestIdResponses] + +export type GetFalAiSanaRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sana/requests/{request_id}/status' +} + +export type GetFalAiSanaRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSanaRequestsByRequestIdStatusResponse = + GetFalAiSanaRequestsByRequestIdStatusResponses[keyof GetFalAiSanaRequestsByRequestIdStatusResponses] + +export type PutFalAiSanaRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sana/requests/{request_id}/cancel' +} + +export type PutFalAiSanaRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSanaRequestsByRequestIdCancelResponse = + PutFalAiSanaRequestsByRequestIdCancelResponses[keyof PutFalAiSanaRequestsByRequestIdCancelResponses] + +export type PostFalAiSanaData = { + body: SchemaSanaInput + path?: never + query?: never + url: '/fal-ai/sana' +} + +export type PostFalAiSanaResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSanaResponse = + PostFalAiSanaResponses[keyof PostFalAiSanaResponses] + +export type GetFalAiSanaRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sana/requests/{request_id}' +} + +export type GetFalAiSanaRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSanaOutput +} + +export type GetFalAiSanaRequestsByRequestIdResponse = + GetFalAiSanaRequestsByRequestIdResponses[keyof GetFalAiSanaRequestsByRequestIdResponses] + +export type GetFalAiFluxSubjectRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-subject/requests/{request_id}/status' +} + +export type GetFalAiFluxSubjectRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxSubjectRequestsByRequestIdStatusResponse = + GetFalAiFluxSubjectRequestsByRequestIdStatusResponses[keyof GetFalAiFluxSubjectRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxSubjectRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-subject/requests/{request_id}/cancel' +} + +export type PutFalAiFluxSubjectRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxSubjectRequestsByRequestIdCancelResponse = + PutFalAiFluxSubjectRequestsByRequestIdCancelResponses[keyof PutFalAiFluxSubjectRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxSubjectData = { + body: SchemaFluxSubjectInput + path?: never + query?: never + url: '/fal-ai/flux-subject' +} + +export type PostFalAiFluxSubjectResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxSubjectResponse = + PostFalAiFluxSubjectResponses[keyof PostFalAiFluxSubjectResponses] + +export type GetFalAiFluxSubjectRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-subject/requests/{request_id}' +} + +export type GetFalAiFluxSubjectRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxSubjectOutput +} + +export type GetFalAiFluxSubjectRequestsByRequestIdResponse = + GetFalAiFluxSubjectRequestsByRequestIdResponses[keyof GetFalAiFluxSubjectRequestsByRequestIdResponses] + +export type GetFalAiPixartSigmaRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixart-sigma/requests/{request_id}/status' +} + +export type GetFalAiPixartSigmaRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixartSigmaRequestsByRequestIdStatusResponse = + GetFalAiPixartSigmaRequestsByRequestIdStatusResponses[keyof GetFalAiPixartSigmaRequestsByRequestIdStatusResponses] + +export type PutFalAiPixartSigmaRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixart-sigma/requests/{request_id}/cancel' +} + +export type PutFalAiPixartSigmaRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixartSigmaRequestsByRequestIdCancelResponse = + PutFalAiPixartSigmaRequestsByRequestIdCancelResponses[keyof PutFalAiPixartSigmaRequestsByRequestIdCancelResponses] + +export type PostFalAiPixartSigmaData = { + body: SchemaPixartSigmaInput + path?: never + query?: never + url: '/fal-ai/pixart-sigma' +} + +export type PostFalAiPixartSigmaResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixartSigmaResponse = + PostFalAiPixartSigmaResponses[keyof PostFalAiPixartSigmaResponses] + +export type GetFalAiPixartSigmaRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixart-sigma/requests/{request_id}' +} + +export type GetFalAiPixartSigmaRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixartSigmaOutput +} + +export type GetFalAiPixartSigmaRequestsByRequestIdResponse = + GetFalAiPixartSigmaRequestsByRequestIdResponses[keyof GetFalAiPixartSigmaRequestsByRequestIdResponses] + +export type GetFalAiSdxlControlnetUnionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sdxl-controlnet-union/requests/{request_id}/status' +} + +export type GetFalAiSdxlControlnetUnionRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSdxlControlnetUnionRequestsByRequestIdStatusResponse = + GetFalAiSdxlControlnetUnionRequestsByRequestIdStatusResponses[keyof GetFalAiSdxlControlnetUnionRequestsByRequestIdStatusResponses] + +export type PutFalAiSdxlControlnetUnionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sdxl-controlnet-union/requests/{request_id}/cancel' +} + +export type PutFalAiSdxlControlnetUnionRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSdxlControlnetUnionRequestsByRequestIdCancelResponse = + PutFalAiSdxlControlnetUnionRequestsByRequestIdCancelResponses[keyof PutFalAiSdxlControlnetUnionRequestsByRequestIdCancelResponses] + +export type PostFalAiSdxlControlnetUnionData = { + body: SchemaSdxlControlnetUnionInput + path?: never + query?: never + url: '/fal-ai/sdxl-controlnet-union' +} + +export type PostFalAiSdxlControlnetUnionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSdxlControlnetUnionResponse = + PostFalAiSdxlControlnetUnionResponses[keyof PostFalAiSdxlControlnetUnionResponses] + +export type GetFalAiSdxlControlnetUnionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sdxl-controlnet-union/requests/{request_id}' +} + +export type GetFalAiSdxlControlnetUnionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSdxlControlnetUnionOutput +} + +export type GetFalAiSdxlControlnetUnionRequestsByRequestIdResponse = + GetFalAiSdxlControlnetUnionRequestsByRequestIdResponses[keyof GetFalAiSdxlControlnetUnionRequestsByRequestIdResponses] + +export type GetFalAiKolorsRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kolors/requests/{request_id}/status' +} + +export type GetFalAiKolorsRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiKolorsRequestsByRequestIdStatusResponse = + GetFalAiKolorsRequestsByRequestIdStatusResponses[keyof GetFalAiKolorsRequestsByRequestIdStatusResponses] + +export type PutFalAiKolorsRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kolors/requests/{request_id}/cancel' +} + +export type PutFalAiKolorsRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiKolorsRequestsByRequestIdCancelResponse = + PutFalAiKolorsRequestsByRequestIdCancelResponses[keyof PutFalAiKolorsRequestsByRequestIdCancelResponses] + +export type PostFalAiKolorsData = { + body: SchemaKolorsInput + path?: never + query?: never + url: '/fal-ai/kolors' +} + +export type PostFalAiKolorsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKolorsResponse = + PostFalAiKolorsResponses[keyof PostFalAiKolorsResponses] + +export type GetFalAiKolorsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kolors/requests/{request_id}' +} + +export type GetFalAiKolorsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKolorsOutput +} + +export type GetFalAiKolorsRequestsByRequestIdResponse = + GetFalAiKolorsRequestsByRequestIdResponses[keyof GetFalAiKolorsRequestsByRequestIdResponses] + +export type GetFalAiStableCascadeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/stable-cascade/requests/{request_id}/status' +} + +export type GetFalAiStableCascadeRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiStableCascadeRequestsByRequestIdStatusResponse = + GetFalAiStableCascadeRequestsByRequestIdStatusResponses[keyof GetFalAiStableCascadeRequestsByRequestIdStatusResponses] + +export type PutFalAiStableCascadeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-cascade/requests/{request_id}/cancel' +} + +export type PutFalAiStableCascadeRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiStableCascadeRequestsByRequestIdCancelResponse = + PutFalAiStableCascadeRequestsByRequestIdCancelResponses[keyof PutFalAiStableCascadeRequestsByRequestIdCancelResponses] + +export type PostFalAiStableCascadeData = { + body: SchemaStableCascadeInput + path?: never + query?: never + url: '/fal-ai/stable-cascade' +} + +export type PostFalAiStableCascadeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiStableCascadeResponse = + PostFalAiStableCascadeResponses[keyof PostFalAiStableCascadeResponses] + +export type GetFalAiStableCascadeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-cascade/requests/{request_id}' +} + +export type GetFalAiStableCascadeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaStableCascadeOutput +} + +export type GetFalAiStableCascadeRequestsByRequestIdResponse = + GetFalAiStableCascadeRequestsByRequestIdResponses[keyof GetFalAiStableCascadeRequestsByRequestIdResponses] + +export type GetFalAiFastSdxlRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fast-sdxl/requests/{request_id}/status' +} + +export type GetFalAiFastSdxlRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFastSdxlRequestsByRequestIdStatusResponse = + GetFalAiFastSdxlRequestsByRequestIdStatusResponses[keyof GetFalAiFastSdxlRequestsByRequestIdStatusResponses] + +export type PutFalAiFastSdxlRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-sdxl/requests/{request_id}/cancel' +} + +export type PutFalAiFastSdxlRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFastSdxlRequestsByRequestIdCancelResponse = + PutFalAiFastSdxlRequestsByRequestIdCancelResponses[keyof PutFalAiFastSdxlRequestsByRequestIdCancelResponses] + +export type PostFalAiFastSdxlData = { + body: SchemaFastSdxlInput + path?: never + query?: never + url: '/fal-ai/fast-sdxl' +} + +export type PostFalAiFastSdxlResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFastSdxlResponse = + PostFalAiFastSdxlResponses[keyof PostFalAiFastSdxlResponses] + +export type GetFalAiFastSdxlRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-sdxl/requests/{request_id}' +} + +export type GetFalAiFastSdxlRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFastSdxlOutput +} + +export type GetFalAiFastSdxlRequestsByRequestIdResponse = + GetFalAiFastSdxlRequestsByRequestIdResponses[keyof GetFalAiFastSdxlRequestsByRequestIdResponses] + +export type GetFalAiStableCascadeSoteDiffusionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/stable-cascade/sote-diffusion/requests/{request_id}/status' +} + +export type GetFalAiStableCascadeSoteDiffusionRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiStableCascadeSoteDiffusionRequestsByRequestIdStatusResponse = + GetFalAiStableCascadeSoteDiffusionRequestsByRequestIdStatusResponses[keyof GetFalAiStableCascadeSoteDiffusionRequestsByRequestIdStatusResponses] + +export type PutFalAiStableCascadeSoteDiffusionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-cascade/sote-diffusion/requests/{request_id}/cancel' +} + +export type PutFalAiStableCascadeSoteDiffusionRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiStableCascadeSoteDiffusionRequestsByRequestIdCancelResponse = + PutFalAiStableCascadeSoteDiffusionRequestsByRequestIdCancelResponses[keyof PutFalAiStableCascadeSoteDiffusionRequestsByRequestIdCancelResponses] + +export type PostFalAiStableCascadeSoteDiffusionData = { + body: SchemaStableCascadeSoteDiffusionInput + path?: never + query?: never + url: '/fal-ai/stable-cascade/sote-diffusion' +} + +export type PostFalAiStableCascadeSoteDiffusionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiStableCascadeSoteDiffusionResponse = + PostFalAiStableCascadeSoteDiffusionResponses[keyof PostFalAiStableCascadeSoteDiffusionResponses] + +export type GetFalAiStableCascadeSoteDiffusionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-cascade/sote-diffusion/requests/{request_id}' +} + +export type GetFalAiStableCascadeSoteDiffusionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaStableCascadeSoteDiffusionOutput +} + +export type GetFalAiStableCascadeSoteDiffusionRequestsByRequestIdResponse = + GetFalAiStableCascadeSoteDiffusionRequestsByRequestIdResponses[keyof GetFalAiStableCascadeSoteDiffusionRequestsByRequestIdResponses] + +export type GetFalAiLumaPhotonRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/luma-photon/requests/{request_id}/status' +} + +export type GetFalAiLumaPhotonRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLumaPhotonRequestsByRequestIdStatusResponse = + GetFalAiLumaPhotonRequestsByRequestIdStatusResponses[keyof GetFalAiLumaPhotonRequestsByRequestIdStatusResponses] + +export type PutFalAiLumaPhotonRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-photon/requests/{request_id}/cancel' +} + +export type PutFalAiLumaPhotonRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLumaPhotonRequestsByRequestIdCancelResponse = + PutFalAiLumaPhotonRequestsByRequestIdCancelResponses[keyof PutFalAiLumaPhotonRequestsByRequestIdCancelResponses] + +export type PostFalAiLumaPhotonData = { + body: SchemaLumaPhotonInput + path?: never + query?: never + url: '/fal-ai/luma-photon' +} + +export type PostFalAiLumaPhotonResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLumaPhotonResponse = + PostFalAiLumaPhotonResponses[keyof PostFalAiLumaPhotonResponses] + +export type GetFalAiLumaPhotonRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-photon/requests/{request_id}' +} + +export type GetFalAiLumaPhotonRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLumaPhotonOutput +} + +export type GetFalAiLumaPhotonRequestsByRequestIdResponse = + GetFalAiLumaPhotonRequestsByRequestIdResponses[keyof GetFalAiLumaPhotonRequestsByRequestIdResponses] + +export type GetFalAiLightningModelsRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/lightning-models/requests/{request_id}/status' +} + +export type GetFalAiLightningModelsRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLightningModelsRequestsByRequestIdStatusResponse = + GetFalAiLightningModelsRequestsByRequestIdStatusResponses[keyof GetFalAiLightningModelsRequestsByRequestIdStatusResponses] + +export type PutFalAiLightningModelsRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/lightning-models/requests/{request_id}/cancel' +} + +export type PutFalAiLightningModelsRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLightningModelsRequestsByRequestIdCancelResponse = + PutFalAiLightningModelsRequestsByRequestIdCancelResponses[keyof PutFalAiLightningModelsRequestsByRequestIdCancelResponses] + +export type PostFalAiLightningModelsData = { + body: SchemaLightningModelsInput + path?: never + query?: never + url: '/fal-ai/lightning-models' +} + +export type PostFalAiLightningModelsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLightningModelsResponse = + PostFalAiLightningModelsResponses[keyof PostFalAiLightningModelsResponses] + +export type GetFalAiLightningModelsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/lightning-models/requests/{request_id}' +} + +export type GetFalAiLightningModelsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLightningModelsOutput +} + +export type GetFalAiLightningModelsRequestsByRequestIdResponse = + GetFalAiLightningModelsRequestsByRequestIdResponses[keyof GetFalAiLightningModelsRequestsByRequestIdResponses] + +export type GetFalAiPlaygroundV25RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/playground-v25/requests/{request_id}/status' +} + +export type GetFalAiPlaygroundV25RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPlaygroundV25RequestsByRequestIdStatusResponse = + GetFalAiPlaygroundV25RequestsByRequestIdStatusResponses[keyof GetFalAiPlaygroundV25RequestsByRequestIdStatusResponses] + +export type PutFalAiPlaygroundV25RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/playground-v25/requests/{request_id}/cancel' +} + +export type PutFalAiPlaygroundV25RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPlaygroundV25RequestsByRequestIdCancelResponse = + PutFalAiPlaygroundV25RequestsByRequestIdCancelResponses[keyof PutFalAiPlaygroundV25RequestsByRequestIdCancelResponses] + +export type PostFalAiPlaygroundV25Data = { + body: SchemaPlaygroundV25Input + path?: never + query?: never + url: '/fal-ai/playground-v25' +} + +export type PostFalAiPlaygroundV25Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPlaygroundV25Response = + PostFalAiPlaygroundV25Responses[keyof PostFalAiPlaygroundV25Responses] + +export type GetFalAiPlaygroundV25RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/playground-v25/requests/{request_id}' +} + +export type GetFalAiPlaygroundV25RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPlaygroundV25Output +} + +export type GetFalAiPlaygroundV25RequestsByRequestIdResponse = + GetFalAiPlaygroundV25RequestsByRequestIdResponses[keyof GetFalAiPlaygroundV25RequestsByRequestIdResponses] + +export type GetFalAiRealisticVisionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/realistic-vision/requests/{request_id}/status' +} + +export type GetFalAiRealisticVisionRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiRealisticVisionRequestsByRequestIdStatusResponse = + GetFalAiRealisticVisionRequestsByRequestIdStatusResponses[keyof GetFalAiRealisticVisionRequestsByRequestIdStatusResponses] + +export type PutFalAiRealisticVisionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/realistic-vision/requests/{request_id}/cancel' +} + +export type PutFalAiRealisticVisionRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiRealisticVisionRequestsByRequestIdCancelResponse = + PutFalAiRealisticVisionRequestsByRequestIdCancelResponses[keyof PutFalAiRealisticVisionRequestsByRequestIdCancelResponses] + +export type PostFalAiRealisticVisionData = { + body: SchemaRealisticVisionInput + path?: never + query?: never + url: '/fal-ai/realistic-vision' +} + +export type PostFalAiRealisticVisionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiRealisticVisionResponse = + PostFalAiRealisticVisionResponses[keyof PostFalAiRealisticVisionResponses] + +export type GetFalAiRealisticVisionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/realistic-vision/requests/{request_id}' +} + +export type GetFalAiRealisticVisionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaRealisticVisionOutput +} + +export type GetFalAiRealisticVisionRequestsByRequestIdResponse = + GetFalAiRealisticVisionRequestsByRequestIdResponses[keyof GetFalAiRealisticVisionRequestsByRequestIdResponses] + +export type GetFalAiDreamshaperRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/dreamshaper/requests/{request_id}/status' +} + +export type GetFalAiDreamshaperRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiDreamshaperRequestsByRequestIdStatusResponse = + GetFalAiDreamshaperRequestsByRequestIdStatusResponses[keyof GetFalAiDreamshaperRequestsByRequestIdStatusResponses] + +export type PutFalAiDreamshaperRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/dreamshaper/requests/{request_id}/cancel' +} + +export type PutFalAiDreamshaperRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiDreamshaperRequestsByRequestIdCancelResponse = + PutFalAiDreamshaperRequestsByRequestIdCancelResponses[keyof PutFalAiDreamshaperRequestsByRequestIdCancelResponses] + +export type PostFalAiDreamshaperData = { + body: SchemaDreamshaperInput + path?: never + query?: never + url: '/fal-ai/dreamshaper' +} + +export type PostFalAiDreamshaperResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiDreamshaperResponse = + PostFalAiDreamshaperResponses[keyof PostFalAiDreamshaperResponses] + +export type GetFalAiDreamshaperRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/dreamshaper/requests/{request_id}' +} + +export type GetFalAiDreamshaperRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaDreamshaperOutput +} + +export type GetFalAiDreamshaperRequestsByRequestIdResponse = + GetFalAiDreamshaperRequestsByRequestIdResponses[keyof GetFalAiDreamshaperRequestsByRequestIdResponses] + +export type GetFalAiStableDiffusionV15RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/stable-diffusion-v15/requests/{request_id}/status' +} + +export type GetFalAiStableDiffusionV15RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiStableDiffusionV15RequestsByRequestIdStatusResponse = + GetFalAiStableDiffusionV15RequestsByRequestIdStatusResponses[keyof GetFalAiStableDiffusionV15RequestsByRequestIdStatusResponses] + +export type PutFalAiStableDiffusionV15RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-diffusion-v15/requests/{request_id}/cancel' +} + +export type PutFalAiStableDiffusionV15RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiStableDiffusionV15RequestsByRequestIdCancelResponse = + PutFalAiStableDiffusionV15RequestsByRequestIdCancelResponses[keyof PutFalAiStableDiffusionV15RequestsByRequestIdCancelResponses] + +export type PostFalAiStableDiffusionV15Data = { + body: SchemaStableDiffusionV15Input + path?: never + query?: never + url: '/fal-ai/stable-diffusion-v15' +} + +export type PostFalAiStableDiffusionV15Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiStableDiffusionV15Response = + PostFalAiStableDiffusionV15Responses[keyof PostFalAiStableDiffusionV15Responses] + +export type GetFalAiStableDiffusionV15RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/stable-diffusion-v15/requests/{request_id}' +} + +export type GetFalAiStableDiffusionV15RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaStableDiffusionV15Output +} + +export type GetFalAiStableDiffusionV15RequestsByRequestIdResponse = + GetFalAiStableDiffusionV15RequestsByRequestIdResponses[keyof GetFalAiStableDiffusionV15RequestsByRequestIdResponses] + +export type GetFalAiLayerDiffusionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/layer-diffusion/requests/{request_id}/status' +} + +export type GetFalAiLayerDiffusionRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLayerDiffusionRequestsByRequestIdStatusResponse = + GetFalAiLayerDiffusionRequestsByRequestIdStatusResponses[keyof GetFalAiLayerDiffusionRequestsByRequestIdStatusResponses] + +export type PutFalAiLayerDiffusionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/layer-diffusion/requests/{request_id}/cancel' +} + +export type PutFalAiLayerDiffusionRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLayerDiffusionRequestsByRequestIdCancelResponse = + PutFalAiLayerDiffusionRequestsByRequestIdCancelResponses[keyof PutFalAiLayerDiffusionRequestsByRequestIdCancelResponses] + +export type PostFalAiLayerDiffusionData = { + body: SchemaLayerDiffusionInput + path?: never + query?: never + url: '/fal-ai/layer-diffusion' +} + +export type PostFalAiLayerDiffusionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLayerDiffusionResponse = + PostFalAiLayerDiffusionResponses[keyof PostFalAiLayerDiffusionResponses] + +export type GetFalAiLayerDiffusionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/layer-diffusion/requests/{request_id}' +} + +export type GetFalAiLayerDiffusionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLayerDiffusionOutput +} + +export type GetFalAiLayerDiffusionRequestsByRequestIdResponse = + GetFalAiLayerDiffusionRequestsByRequestIdResponses[keyof GetFalAiLayerDiffusionRequestsByRequestIdResponses] + +export type GetFalAiFastLightningSdxlRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fast-lightning-sdxl/requests/{request_id}/status' +} + +export type GetFalAiFastLightningSdxlRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFastLightningSdxlRequestsByRequestIdStatusResponse = + GetFalAiFastLightningSdxlRequestsByRequestIdStatusResponses[keyof GetFalAiFastLightningSdxlRequestsByRequestIdStatusResponses] + +export type PutFalAiFastLightningSdxlRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-lightning-sdxl/requests/{request_id}/cancel' +} + +export type PutFalAiFastLightningSdxlRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFastLightningSdxlRequestsByRequestIdCancelResponse = + PutFalAiFastLightningSdxlRequestsByRequestIdCancelResponses[keyof PutFalAiFastLightningSdxlRequestsByRequestIdCancelResponses] + +export type PostFalAiFastLightningSdxlData = { + body: SchemaFastLightningSdxlInput + path?: never + query?: never + url: '/fal-ai/fast-lightning-sdxl' +} + +export type PostFalAiFastLightningSdxlResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFastLightningSdxlResponse = + PostFalAiFastLightningSdxlResponses[keyof PostFalAiFastLightningSdxlResponses] + +export type GetFalAiFastLightningSdxlRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-lightning-sdxl/requests/{request_id}' +} + +export type GetFalAiFastLightningSdxlRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFastLightningSdxlOutput +} + +export type GetFalAiFastLightningSdxlRequestsByRequestIdResponse = + GetFalAiFastLightningSdxlRequestsByRequestIdResponses[keyof GetFalAiFastLightningSdxlRequestsByRequestIdResponses] + +export type GetFalAiFastFooocusSdxlImageToImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fast-fooocus-sdxl/image-to-image/requests/{request_id}/status' +} + +export type GetFalAiFastFooocusSdxlImageToImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFastFooocusSdxlImageToImageRequestsByRequestIdStatusResponse = + GetFalAiFastFooocusSdxlImageToImageRequestsByRequestIdStatusResponses[keyof GetFalAiFastFooocusSdxlImageToImageRequestsByRequestIdStatusResponses] + +export type PutFalAiFastFooocusSdxlImageToImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-fooocus-sdxl/image-to-image/requests/{request_id}/cancel' +} + +export type PutFalAiFastFooocusSdxlImageToImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFastFooocusSdxlImageToImageRequestsByRequestIdCancelResponse = + PutFalAiFastFooocusSdxlImageToImageRequestsByRequestIdCancelResponses[keyof PutFalAiFastFooocusSdxlImageToImageRequestsByRequestIdCancelResponses] + +export type PostFalAiFastFooocusSdxlImageToImageData = { + body: SchemaFastFooocusSdxlImageToImageInput + path?: never + query?: never + url: '/fal-ai/fast-fooocus-sdxl/image-to-image' +} + +export type PostFalAiFastFooocusSdxlImageToImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFastFooocusSdxlImageToImageResponse = + PostFalAiFastFooocusSdxlImageToImageResponses[keyof PostFalAiFastFooocusSdxlImageToImageResponses] + +export type GetFalAiFastFooocusSdxlImageToImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-fooocus-sdxl/image-to-image/requests/{request_id}' +} + +export type GetFalAiFastFooocusSdxlImageToImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFastFooocusSdxlImageToImageOutput +} + +export type GetFalAiFastFooocusSdxlImageToImageRequestsByRequestIdResponse = + GetFalAiFastFooocusSdxlImageToImageRequestsByRequestIdResponses[keyof GetFalAiFastFooocusSdxlImageToImageRequestsByRequestIdResponses] + +export type GetFalAiFastSdxlControlnetCannyRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fast-sdxl-controlnet-canny/requests/{request_id}/status' +} + +export type GetFalAiFastSdxlControlnetCannyRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFastSdxlControlnetCannyRequestsByRequestIdStatusResponse = + GetFalAiFastSdxlControlnetCannyRequestsByRequestIdStatusResponses[keyof GetFalAiFastSdxlControlnetCannyRequestsByRequestIdStatusResponses] + +export type PutFalAiFastSdxlControlnetCannyRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-sdxl-controlnet-canny/requests/{request_id}/cancel' +} + +export type PutFalAiFastSdxlControlnetCannyRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFastSdxlControlnetCannyRequestsByRequestIdCancelResponse = + PutFalAiFastSdxlControlnetCannyRequestsByRequestIdCancelResponses[keyof PutFalAiFastSdxlControlnetCannyRequestsByRequestIdCancelResponses] + +export type PostFalAiFastSdxlControlnetCannyData = { + body: SchemaFastSdxlControlnetCannyInput + path?: never + query?: never + url: '/fal-ai/fast-sdxl-controlnet-canny' +} + +export type PostFalAiFastSdxlControlnetCannyResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFastSdxlControlnetCannyResponse = + PostFalAiFastSdxlControlnetCannyResponses[keyof PostFalAiFastSdxlControlnetCannyResponses] + +export type GetFalAiFastSdxlControlnetCannyRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-sdxl-controlnet-canny/requests/{request_id}' +} + +export type GetFalAiFastSdxlControlnetCannyRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFastSdxlControlnetCannyOutput +} + +export type GetFalAiFastSdxlControlnetCannyRequestsByRequestIdResponse = + GetFalAiFastSdxlControlnetCannyRequestsByRequestIdResponses[keyof GetFalAiFastSdxlControlnetCannyRequestsByRequestIdResponses] + +export type GetFalAiFastLcmDiffusionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fast-lcm-diffusion/requests/{request_id}/status' +} + +export type GetFalAiFastLcmDiffusionRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFastLcmDiffusionRequestsByRequestIdStatusResponse = + GetFalAiFastLcmDiffusionRequestsByRequestIdStatusResponses[keyof GetFalAiFastLcmDiffusionRequestsByRequestIdStatusResponses] + +export type PutFalAiFastLcmDiffusionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-lcm-diffusion/requests/{request_id}/cancel' +} + +export type PutFalAiFastLcmDiffusionRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFastLcmDiffusionRequestsByRequestIdCancelResponse = + PutFalAiFastLcmDiffusionRequestsByRequestIdCancelResponses[keyof PutFalAiFastLcmDiffusionRequestsByRequestIdCancelResponses] + +export type PostFalAiFastLcmDiffusionData = { + body: SchemaFastLcmDiffusionInput + path?: never + query?: never + url: '/fal-ai/fast-lcm-diffusion' +} + +export type PostFalAiFastLcmDiffusionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFastLcmDiffusionResponse = + PostFalAiFastLcmDiffusionResponses[keyof PostFalAiFastLcmDiffusionResponses] + +export type GetFalAiFastLcmDiffusionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-lcm-diffusion/requests/{request_id}' +} + +export type GetFalAiFastLcmDiffusionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFastLcmDiffusionOutput +} + +export type GetFalAiFastLcmDiffusionRequestsByRequestIdResponse = + GetFalAiFastLcmDiffusionRequestsByRequestIdResponses[keyof GetFalAiFastLcmDiffusionRequestsByRequestIdResponses] + +export type GetFalAiFastFooocusSdxlRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fast-fooocus-sdxl/requests/{request_id}/status' +} + +export type GetFalAiFastFooocusSdxlRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFastFooocusSdxlRequestsByRequestIdStatusResponse = + GetFalAiFastFooocusSdxlRequestsByRequestIdStatusResponses[keyof GetFalAiFastFooocusSdxlRequestsByRequestIdStatusResponses] + +export type PutFalAiFastFooocusSdxlRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-fooocus-sdxl/requests/{request_id}/cancel' +} + +export type PutFalAiFastFooocusSdxlRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFastFooocusSdxlRequestsByRequestIdCancelResponse = + PutFalAiFastFooocusSdxlRequestsByRequestIdCancelResponses[keyof PutFalAiFastFooocusSdxlRequestsByRequestIdCancelResponses] + +export type PostFalAiFastFooocusSdxlData = { + body: SchemaFastFooocusSdxlInput + path?: never + query?: never + url: '/fal-ai/fast-fooocus-sdxl' +} + +export type PostFalAiFastFooocusSdxlResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFastFooocusSdxlResponse = + PostFalAiFastFooocusSdxlResponses[keyof PostFalAiFastFooocusSdxlResponses] + +export type GetFalAiFastFooocusSdxlRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-fooocus-sdxl/requests/{request_id}' +} + +export type GetFalAiFastFooocusSdxlRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFastFooocusSdxlOutput +} + +export type GetFalAiFastFooocusSdxlRequestsByRequestIdResponse = + GetFalAiFastFooocusSdxlRequestsByRequestIdResponses[keyof GetFalAiFastFooocusSdxlRequestsByRequestIdResponses] + +export type GetFalAiIllusionDiffusionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/illusion-diffusion/requests/{request_id}/status' +} + +export type GetFalAiIllusionDiffusionRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiIllusionDiffusionRequestsByRequestIdStatusResponse = + GetFalAiIllusionDiffusionRequestsByRequestIdStatusResponses[keyof GetFalAiIllusionDiffusionRequestsByRequestIdStatusResponses] + +export type PutFalAiIllusionDiffusionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/illusion-diffusion/requests/{request_id}/cancel' +} + +export type PutFalAiIllusionDiffusionRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiIllusionDiffusionRequestsByRequestIdCancelResponse = + PutFalAiIllusionDiffusionRequestsByRequestIdCancelResponses[keyof PutFalAiIllusionDiffusionRequestsByRequestIdCancelResponses] + +export type PostFalAiIllusionDiffusionData = { + body: SchemaIllusionDiffusionInput + path?: never + query?: never + url: '/fal-ai/illusion-diffusion' +} + +export type PostFalAiIllusionDiffusionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiIllusionDiffusionResponse = + PostFalAiIllusionDiffusionResponses[keyof PostFalAiIllusionDiffusionResponses] + +export type GetFalAiIllusionDiffusionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/illusion-diffusion/requests/{request_id}' +} + +export type GetFalAiIllusionDiffusionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIllusionDiffusionOutput +} + +export type GetFalAiIllusionDiffusionRequestsByRequestIdResponse = + GetFalAiIllusionDiffusionRequestsByRequestIdResponses[keyof GetFalAiIllusionDiffusionRequestsByRequestIdResponses] + +export type GetFalAiFooocusImagePromptRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fooocus/image-prompt/requests/{request_id}/status' +} + +export type GetFalAiFooocusImagePromptRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFooocusImagePromptRequestsByRequestIdStatusResponse = + GetFalAiFooocusImagePromptRequestsByRequestIdStatusResponses[keyof GetFalAiFooocusImagePromptRequestsByRequestIdStatusResponses] + +export type PutFalAiFooocusImagePromptRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fooocus/image-prompt/requests/{request_id}/cancel' +} + +export type PutFalAiFooocusImagePromptRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFooocusImagePromptRequestsByRequestIdCancelResponse = + PutFalAiFooocusImagePromptRequestsByRequestIdCancelResponses[keyof PutFalAiFooocusImagePromptRequestsByRequestIdCancelResponses] + +export type PostFalAiFooocusImagePromptData = { + body: SchemaFooocusImagePromptInput + path?: never + query?: never + url: '/fal-ai/fooocus/image-prompt' +} + +export type PostFalAiFooocusImagePromptResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFooocusImagePromptResponse = + PostFalAiFooocusImagePromptResponses[keyof PostFalAiFooocusImagePromptResponses] + +export type GetFalAiFooocusImagePromptRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fooocus/image-prompt/requests/{request_id}' +} + +export type GetFalAiFooocusImagePromptRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFooocusImagePromptOutput +} + +export type GetFalAiFooocusImagePromptRequestsByRequestIdResponse = + GetFalAiFooocusImagePromptRequestsByRequestIdResponses[keyof GetFalAiFooocusImagePromptRequestsByRequestIdResponses] + +export type GetFalAiFooocusInpaintRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fooocus/inpaint/requests/{request_id}/status' +} + +export type GetFalAiFooocusInpaintRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFooocusInpaintRequestsByRequestIdStatusResponse = + GetFalAiFooocusInpaintRequestsByRequestIdStatusResponses[keyof GetFalAiFooocusInpaintRequestsByRequestIdStatusResponses] + +export type PutFalAiFooocusInpaintRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fooocus/inpaint/requests/{request_id}/cancel' +} + +export type PutFalAiFooocusInpaintRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFooocusInpaintRequestsByRequestIdCancelResponse = + PutFalAiFooocusInpaintRequestsByRequestIdCancelResponses[keyof PutFalAiFooocusInpaintRequestsByRequestIdCancelResponses] + +export type PostFalAiFooocusInpaintData = { + body: SchemaFooocusInpaintInput + path?: never + query?: never + url: '/fal-ai/fooocus/inpaint' +} + +export type PostFalAiFooocusInpaintResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFooocusInpaintResponse = + PostFalAiFooocusInpaintResponses[keyof PostFalAiFooocusInpaintResponses] + +export type GetFalAiFooocusInpaintRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fooocus/inpaint/requests/{request_id}' +} + +export type GetFalAiFooocusInpaintRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFooocusInpaintOutput +} + +export type GetFalAiFooocusInpaintRequestsByRequestIdResponse = + GetFalAiFooocusInpaintRequestsByRequestIdResponses[keyof GetFalAiFooocusInpaintRequestsByRequestIdResponses] + +export type GetFalAiLcmRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/lcm/requests/{request_id}/status' +} + +export type GetFalAiLcmRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLcmRequestsByRequestIdStatusResponse = + GetFalAiLcmRequestsByRequestIdStatusResponses[keyof GetFalAiLcmRequestsByRequestIdStatusResponses] + +export type PutFalAiLcmRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/lcm/requests/{request_id}/cancel' +} + +export type PutFalAiLcmRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLcmRequestsByRequestIdCancelResponse = + PutFalAiLcmRequestsByRequestIdCancelResponses[keyof PutFalAiLcmRequestsByRequestIdCancelResponses] + +export type PostFalAiLcmData = { + body: SchemaLcmInput + path?: never + query?: never + url: '/fal-ai/lcm' +} + +export type PostFalAiLcmResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLcmResponse = + PostFalAiLcmResponses[keyof PostFalAiLcmResponses] + +export type GetFalAiLcmRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/lcm/requests/{request_id}' +} + +export type GetFalAiLcmRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLcmOutput +} + +export type GetFalAiLcmRequestsByRequestIdResponse = + GetFalAiLcmRequestsByRequestIdResponses[keyof GetFalAiLcmRequestsByRequestIdResponses] + +export type GetFalAiDiffusionEdgeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/diffusion-edge/requests/{request_id}/status' +} + +export type GetFalAiDiffusionEdgeRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiDiffusionEdgeRequestsByRequestIdStatusResponse = + GetFalAiDiffusionEdgeRequestsByRequestIdStatusResponses[keyof GetFalAiDiffusionEdgeRequestsByRequestIdStatusResponses] + +export type PutFalAiDiffusionEdgeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/diffusion-edge/requests/{request_id}/cancel' +} + +export type PutFalAiDiffusionEdgeRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiDiffusionEdgeRequestsByRequestIdCancelResponse = + PutFalAiDiffusionEdgeRequestsByRequestIdCancelResponses[keyof PutFalAiDiffusionEdgeRequestsByRequestIdCancelResponses] + +export type PostFalAiDiffusionEdgeData = { + body: SchemaDiffusionEdgeInput + path?: never + query?: never + url: '/fal-ai/diffusion-edge' +} + +export type PostFalAiDiffusionEdgeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiDiffusionEdgeResponse = + PostFalAiDiffusionEdgeResponses[keyof PostFalAiDiffusionEdgeResponses] + +export type GetFalAiDiffusionEdgeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/diffusion-edge/requests/{request_id}' +} + +export type GetFalAiDiffusionEdgeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaDiffusionEdgeOutput +} + +export type GetFalAiDiffusionEdgeRequestsByRequestIdResponse = + GetFalAiDiffusionEdgeRequestsByRequestIdResponses[keyof GetFalAiDiffusionEdgeRequestsByRequestIdResponses] + +export type GetFalAiFooocusRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fooocus/requests/{request_id}/status' +} + +export type GetFalAiFooocusRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFooocusRequestsByRequestIdStatusResponse = + GetFalAiFooocusRequestsByRequestIdStatusResponses[keyof GetFalAiFooocusRequestsByRequestIdStatusResponses] + +export type PutFalAiFooocusRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fooocus/requests/{request_id}/cancel' +} + +export type PutFalAiFooocusRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFooocusRequestsByRequestIdCancelResponse = + PutFalAiFooocusRequestsByRequestIdCancelResponses[keyof PutFalAiFooocusRequestsByRequestIdCancelResponses] + +export type PostFalAiFooocusData = { + body: SchemaFooocusInput + path?: never + query?: never + url: '/fal-ai/fooocus' +} + +export type PostFalAiFooocusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFooocusResponse = + PostFalAiFooocusResponses[keyof PostFalAiFooocusResponses] + +export type GetFalAiFooocusRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fooocus/requests/{request_id}' +} + +export type GetFalAiFooocusRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFooocusOutput +} + +export type GetFalAiFooocusRequestsByRequestIdResponse = + GetFalAiFooocusRequestsByRequestIdResponses[keyof GetFalAiFooocusRequestsByRequestIdResponses] + +export type GetFalAiLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/lora/requests/{request_id}/status' +} + +export type GetFalAiLoraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLoraRequestsByRequestIdStatusResponse = + GetFalAiLoraRequestsByRequestIdStatusResponses[keyof GetFalAiLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/lora/requests/{request_id}/cancel' +} + +export type PutFalAiLoraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLoraRequestsByRequestIdCancelResponse = + PutFalAiLoraRequestsByRequestIdCancelResponses[keyof PutFalAiLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiLoraData = { + body: SchemaLoraInput + path?: never + query?: never + url: '/fal-ai/lora' +} + +export type PostFalAiLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLoraResponse = + PostFalAiLoraResponses[keyof PostFalAiLoraResponses] + +export type GetFalAiLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/lora/requests/{request_id}' +} + +export type GetFalAiLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLoraOutput +} + +export type GetFalAiLoraRequestsByRequestIdResponse = + GetFalAiLoraRequestsByRequestIdResponses[keyof GetFalAiLoraRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/text-to-image/zod.gen.ts b/packages/typescript/ai-fal/src/generated/text-to-image/zod.gen.ts new file mode 100644 index 00000000..3a3fd6b2 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/text-to-image/zod.gen.ts @@ -0,0 +1,28553 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * TimestepsInput + */ +export const zSchemaTimestepsInput = z.object({ + method: z.optional( + z.enum(['default', 'array']).register(z.globalRegistry, { + description: + "\n The method to use for the timesteps. If set to 'array', the timesteps will be set based\n on the provided timesteps schedule in the `array` field.\n Defaults to 'default' which means the scheduler will use the `num_inference_steps` parameter.\n ", + }), + ), + array: z + .optional( + z.array(z.int()).register(z.globalRegistry, { + description: + "\n Timesteps schedule to be used if 'custom' method is selected.\n ", + }), + ) + .default([]), +}) + +/** + * SigmasInput + */ +export const zSchemaSigmasInput = z.object({ + method: z.optional( + z.enum(['default', 'array']).register(z.globalRegistry, { + description: + "\n The method to use for the sigmas. If set to 'custom', the sigmas will be set based\n on the provided sigmas schedule in the `array` field.\n Defaults to 'default' which means the scheduler will use the sigmas of the scheduler.\n ", + }), + ), + array: z + .optional( + z.array(z.number()).register(z.globalRegistry, { + description: + "\n Sigmas schedule to be used if 'custom' method is selected.\n ", + }), + ) + .default([]), +}) + +/** + * File + */ +export const zSchemaFile = z.object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), +}) + +/** + * Image + * + * Represents an image file. + */ +export const zSchemaImage = z + .object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + height: z.optional( + z.int().register(z.globalRegistry, { + description: 'The height of the image in pixels.', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + width: z.optional( + z.int().register(z.globalRegistry, { + description: 'The width of the image in pixels.', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Represents an image file.', + }) + +/** + * OutputParameters + */ +export const zSchemaLoraOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + debug_latents: z.optional(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + debug_per_pass_latents: z.optional(zSchemaFile), +}) + +/** + * ControlNet + */ +export const zSchemaControlNet = z.object({ + conditioning_scale: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: + '\n The scale of the control net weight. This is used to scale the control net weight\n before merging it with the base model.\n ', + }), + ) + .default(1), + path: z.string().register(z.globalRegistry, { + description: 'URL or the path to the control net weights.', + }), + ip_adapter_index: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The index of the IP adapter to be applied to the controlnet. This is only needed for InstantID ControlNets.\n ', + }), + ), + end_percentage: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The percentage of the image to end applying the controlnet in terms of the total timesteps.\n ', + }), + ) + .default(1), + config_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'optional URL to the controlnet config.json file.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be used as the control net.', + }), + variant: z.optional( + z.string().register(z.globalRegistry, { + description: 'The optional variant if a Hugging Face repo key is used.', + }), + ), + mask_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The mask to use for the controlnet. When using a mask, the control image size and the mask size must be the same and divisible by 32.\n ', + }), + ), + start_percentage: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The percentage of the image to start applying the controlnet in terms of the total timesteps.\n ', + }), + ) + .default(0), +}) + +/** + * LoraWeight + */ +export const zSchemaLoraWeight = z.object({ + path: z.string().register(z.globalRegistry, { + description: 'URL or the path to the LoRA weights.', + }), + scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + '\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ', + }), + ) + .default(1), +}) + +/** + * IPAdapter + */ +export const zSchemaIpAdapter = z.object({ + unconditional_noising_factor: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The factor to apply to the unconditional noising of the IP adapter.', + }), + ) + .default(0), + ip_adapter_image_url: z.union([z.string(), z.array(z.string())]), + path: z.string().register(z.globalRegistry, { + description: 'URL or the path to the IP adapter weights.', + }), + image_projection_shortcut: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n The value to set the image projection shortcut to. For FaceID plus V1 models,\n this should be set to False. For FaceID plus V2 models, this should be set to True.\n Default is True.\n ', + }), + ) + .default(true), + scale_json: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: + '\n The scale of the IP adapter weight. This is used to scale the IP adapter weight\n before merging it with the base model.\n ', + }), + ), + ip_adapter_mask_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The mask to use for the IP adapter. When using a mask, the ip-adapter image size and the mask size must be the same\n ', + }), + ), + model_subfolder: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Subfolder in the model directory where the IP adapter weights are stored.', + }), + ), + scale: z + .optional( + z.number().gte(0).register(z.globalRegistry, { + description: + '\n The scale of the IP adapter weight. This is used to scale the IP adapter weight\n before merging it with the base model.\n ', + }), + ) + .default(1), + insight_face_model_path: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL or the path to the InsightFace model weights.', + }), + ), + weight_name: z.optional( + z.string().register(z.globalRegistry, { + description: 'Name of the weight file.', + }), + ), +}) + +/** + * Embedding + */ +export const zSchemaEmbedding = z.object({ + tokens: z + .optional( + z.array(z.string()).register(z.globalRegistry, { + description: + '\n The tokens to map the embedding weights to. Use these tokens in your prompts.\n ', + }), + ) + .default(['', '']), + path: z.string().register(z.globalRegistry, { + description: 'URL or the path to the embedding weights.', + }), +}) + +/** + * ImageSize + */ +export const zSchemaImageSize = z.object({ + height: z + .optional( + z.int().lte(14142).register(z.globalRegistry, { + description: 'The height of the generated image.', + }), + ) + .default(512), + width: z + .optional( + z.int().lte(14142).register(z.globalRegistry, { + description: 'The width of the generated image.', + }), + ) + .default(512), +}) + +/** + * TextToImageInput + */ +export const zSchemaLoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + tile_height: z + .optional( + z.int().gte(128).lte(4096).register(z.globalRegistry, { + description: + 'The size of the tiles to be used for the image generation.', + }), + ) + .default(4096), + embeddings: z + .optional( + z.array(zSchemaEmbedding).register(z.globalRegistry, { + description: + '\n The embeddings to use for the image generation. Only a single embedding is supported at the moment.\n The embeddings will be used to map the tokens in the prompt to the embedding weights.\n ', + }), + ) + .default([]), + ic_light_model_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The URL of the IC Light model to use for the image generation.\n ', + }), + ), + image_encoder_weight_name: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n The weight name of the image encoder model to use for the image generation.\n ', + }), + ) + .default('pytorch_model.bin'), + ip_adapter: z + .optional( + z.array(zSchemaIpAdapter).register(z.globalRegistry, { + description: + '\n The IP adapter to use for the image generation.\n ', + }), + ) + .default([]), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + scheduler: z.optional( + z + .enum([ + 'DPM++ 2M', + 'DPM++ 2M Karras', + 'DPM++ 2M SDE', + 'DPM++ 2M SDE Karras', + 'Euler', + 'Euler A', + 'Euler (trailing timesteps)', + 'LCM', + 'LCM (trailing timesteps)', + 'DDIM', + 'TCD', + ]) + .register(z.globalRegistry, { + description: + 'Scheduler / sampler to use for the image denoising process.', + }), + ), + sigmas: z.optional(zSchemaSigmasInput), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(7.5), + tile_stride_width: z + .optional( + z.int().gte(64).lte(2048).register(z.globalRegistry, { + description: + 'The stride of the tiles to be used for the image generation.', + }), + ) + .default(2048), + debug_per_pass_latents: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the latents will be saved for debugging per pass.', + }), + ) + .default(false), + timesteps: z.optional(zSchemaTimestepsInput), + image_encoder_subfolder: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The subfolder of the image encoder model to use for the image generation.\n ', + }), + ), + prompt_weighting: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the prompt weighting syntax will be used.\n Additionally, this will lift the 77 token limit by averaging embeddings.\n ', + }), + ) + .default(false), + variant: z.optional( + z.string().register(z.globalRegistry, { + description: + "The variant of the model to use for huggingface models, e.g. 'fp16'.", + }), + ), + model_name: z.string().register(z.globalRegistry, { + description: + 'URL or HuggingFace ID of the base model to generate the image.', + }), + controlnet_guess_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the controlnet will be applied to only the conditional predictions.\n ', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + ic_light_model_background_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The URL of the IC Light model background image to use for the image generation.\n Make sure to use a background compatible with the model.\n ', + }), + ), + rescale_betas_snr_zero: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n Whether to set the rescale_betas_snr_zero option or not for the sampler\n ', + }), + ) + .default(false), + tile_width: z + .optional( + z.int().gte(128).lte(4096).register(z.globalRegistry, { + description: + 'The size of the tiles to be used for the image generation.', + }), + ) + .default(4096), + prediction_type: z.optional( + z.enum(['v_prediction', 'epsilon']).register(z.globalRegistry, { + description: + '\n The type of prediction to use for the image generation.\n The `epsilon` is the default.\n ', + }), + ), + eta: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The eta value to be used for the image generation.', + }), + ) + .default(0), + image_encoder_path: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The path to the image encoder model to use for the image generation.\n ', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + image_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: + '\n Number of images to generate in one request. Note that the higher the batch size,\n the longer it will take to generate the images.\n ', + }), + ) + .default(1), + debug_latents: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the latents will be saved for debugging.', + }), + ) + .default(false), + ic_light_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The URL of the IC Light model image to use for the image generation.\n ', + }), + ), + unet_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'URL or HuggingFace ID of the custom U-Net model to use for the image generation.', + }), + ), + clip_skip: z + .optional( + z.int().gte(0).lte(2).register(z.globalRegistry, { + description: + '\n Skips part of the image generation process, leading to slightly different results.\n This means the image renders faster, too.\n ', + }), + ) + .default(0), + tile_stride_height: z + .optional( + z.int().gte(64).lte(2048).register(z.globalRegistry, { + description: + 'The stride of the tiles to be used for the image generation.', + }), + ) + .default(2048), + controlnets: z + .optional( + z.array(zSchemaControlNet).register(z.globalRegistry, { + description: + '\n The control nets to use for the image generation. You can use any number of control nets\n and they will be applied to the image at the specified timesteps.\n ', + }), + ) + .default([]), + num_inference_steps: z + .optional( + z.int().gte(1).lte(150).register(z.globalRegistry, { + description: + '\n Increasing the amount of steps tells Stable Diffusion that it should take more steps\n to generate your final result which can increase the amount of detail in your image.\n ', + }), + ) + .default(30), +}) + +/** + * FooocusOutput + */ +export const zSchemaFooocusOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image file info.', + }), + timings: z.record(z.string(), z.number()).register(z.globalRegistry, { + description: 'The time taken for the generation process.', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), +}) + +/** + * FooocusLegacyInput + */ +export const zSchemaFooocusInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + ) + .default(''), + performance: z.optional( + z + .enum(['Speed', 'Quality', 'Extreme Speed', 'Lightning']) + .register(z.globalRegistry, { + description: '\n You can choose Speed or Quality\n ', + }), + ), + styles: z + .optional( + z + .array( + z.enum([ + 'Fooocus V2', + 'Fooocus Enhance', + 'Fooocus Sharp', + 'Fooocus Semi Realistic', + 'Fooocus Masterpiece', + 'Fooocus Photograph', + 'Fooocus Negative', + 'Fooocus Cinematic', + 'SAI 3D Model', + 'SAI Analog Film', + 'SAI Anime', + 'SAI Cinematic', + 'SAI Comic Book', + 'SAI Craft Clay', + 'SAI Digital Art', + 'SAI Enhance', + 'SAI Fantasy Art', + 'SAI Isometric', + 'SAI Line Art', + 'SAI Lowpoly', + 'SAI Neonpunk', + 'SAI Origami', + 'SAI Photographic', + 'SAI Pixel Art', + 'SAI Texture', + 'MRE Cinematic Dynamic', + 'MRE Spontaneous Picture', + 'MRE Artistic Vision', + 'MRE Dark Dream', + 'MRE Gloomy Art', + 'MRE Bad Dream', + 'MRE Underground', + 'MRE Surreal Painting', + 'MRE Dynamic Illustration', + 'MRE Undead Art', + 'MRE Elemental Art', + 'MRE Space Art', + 'MRE Ancient Illustration', + 'MRE Brave Art', + 'MRE Heroic Fantasy', + 'MRE Dark Cyberpunk', + 'MRE Lyrical Geometry', + 'MRE Sumi E Symbolic', + 'MRE Sumi E Detailed', + 'MRE Manga', + 'MRE Anime', + 'MRE Comic', + 'Ads Advertising', + 'Ads Automotive', + 'Ads Corporate', + 'Ads Fashion Editorial', + 'Ads Food Photography', + 'Ads Gourmet Food Photography', + 'Ads Luxury', + 'Ads Real Estate', + 'Ads Retail', + 'Artstyle Abstract', + 'Artstyle Abstract Expressionism', + 'Artstyle Art Deco', + 'Artstyle Art Nouveau', + 'Artstyle Constructivist', + 'Artstyle Cubist', + 'Artstyle Expressionist', + 'Artstyle Graffiti', + 'Artstyle Hyperrealism', + 'Artstyle Impressionist', + 'Artstyle Pointillism', + 'Artstyle Pop Art', + 'Artstyle Psychedelic', + 'Artstyle Renaissance', + 'Artstyle Steampunk', + 'Artstyle Surrealist', + 'Artstyle Typography', + 'Artstyle Watercolor', + 'Futuristic Biomechanical', + 'Futuristic Biomechanical Cyberpunk', + 'Futuristic Cybernetic', + 'Futuristic Cybernetic Robot', + 'Futuristic Cyberpunk Cityscape', + 'Futuristic Futuristic', + 'Futuristic Retro Cyberpunk', + 'Futuristic Retro Futurism', + 'Futuristic Sci Fi', + 'Futuristic Vaporwave', + 'Game Bubble Bobble', + 'Game Cyberpunk Game', + 'Game Fighting Game', + 'Game Gta', + 'Game Mario', + 'Game Minecraft', + 'Game Pokemon', + 'Game Retro Arcade', + 'Game Retro Game', + 'Game Rpg Fantasy Game', + 'Game Strategy Game', + 'Game Streetfighter', + 'Game Zelda', + 'Misc Architectural', + 'Misc Disco', + 'Misc Dreamscape', + 'Misc Dystopian', + 'Misc Fairy Tale', + 'Misc Gothic', + 'Misc Grunge', + 'Misc Horror', + 'Misc Kawaii', + 'Misc Lovecraftian', + 'Misc Macabre', + 'Misc Manga', + 'Misc Metropolis', + 'Misc Minimalist', + 'Misc Monochrome', + 'Misc Nautical', + 'Misc Space', + 'Misc Stained Glass', + 'Misc Techwear Fashion', + 'Misc Tribal', + 'Misc Zentangle', + 'Papercraft Collage', + 'Papercraft Flat Papercut', + 'Papercraft Kirigami', + 'Papercraft Paper Mache', + 'Papercraft Paper Quilling', + 'Papercraft Papercut Collage', + 'Papercraft Papercut Shadow Box', + 'Papercraft Stacked Papercut', + 'Papercraft Thick Layered Papercut', + 'Photo Alien', + 'Photo Film Noir', + 'Photo Glamour', + 'Photo Hdr', + 'Photo Iphone Photographic', + 'Photo Long Exposure', + 'Photo Neon Noir', + 'Photo Silhouette', + 'Photo Tilt Shift', + 'Cinematic Diva', + 'Abstract Expressionism', + 'Academia', + 'Action Figure', + 'Adorable 3D Character', + 'Adorable Kawaii', + 'Art Deco', + 'Art Nouveau', + 'Astral Aura', + 'Avant Garde', + 'Baroque', + 'Bauhaus Style Poster', + 'Blueprint Schematic Drawing', + 'Caricature', + 'Cel Shaded Art', + 'Character Design Sheet', + 'Classicism Art', + 'Color Field Painting', + 'Colored Pencil Art', + 'Conceptual Art', + 'Constructivism', + 'Cubism', + 'Dadaism', + 'Dark Fantasy', + 'Dark Moody Atmosphere', + 'Dmt Art Style', + 'Doodle Art', + 'Double Exposure', + 'Dripping Paint Splatter Art', + 'Expressionism', + 'Faded Polaroid Photo', + 'Fauvism', + 'Flat 2d Art', + 'Fortnite Art Style', + 'Futurism', + 'Glitchcore', + 'Glo Fi', + 'Googie Art Style', + 'Graffiti Art', + 'Harlem Renaissance Art', + 'High Fashion', + 'Idyllic', + 'Impressionism', + 'Infographic Drawing', + 'Ink Dripping Drawing', + 'Japanese Ink Drawing', + 'Knolling Photography', + 'Light Cheery Atmosphere', + 'Logo Design', + 'Luxurious Elegance', + 'Macro Photography', + 'Mandola Art', + 'Marker Drawing', + 'Medievalism', + 'Minimalism', + 'Neo Baroque', + 'Neo Byzantine', + 'Neo Futurism', + 'Neo Impressionism', + 'Neo Rococo', + 'Neoclassicism', + 'Op Art', + 'Ornate And Intricate', + 'Pencil Sketch Drawing', + 'Pop Art 2', + 'Rococo', + 'Silhouette Art', + 'Simple Vector Art', + 'Sketchup', + 'Steampunk 2', + 'Surrealism', + 'Suprematism', + 'Terragen', + 'Tranquil Relaxing Atmosphere', + 'Sticker Designs', + 'Vibrant Rim Light', + 'Volumetric Lighting', + 'Watercolor 2', + 'Whimsical And Playful', + 'Mk Chromolithography', + 'Mk Cross Processing Print', + 'Mk Dufaycolor Photograph', + 'Mk Herbarium', + 'Mk Punk Collage', + 'Mk Mosaic', + 'Mk Van Gogh', + 'Mk Coloring Book', + 'Mk Singer Sargent', + 'Mk Pollock', + 'Mk Basquiat', + 'Mk Andy Warhol', + 'Mk Halftone Print', + 'Mk Gond Painting', + 'Mk Albumen Print', + 'Mk Aquatint Print', + 'Mk Anthotype Print', + 'Mk Inuit Carving', + 'Mk Bromoil Print', + 'Mk Calotype Print', + 'Mk Color Sketchnote', + 'Mk Cibulak Porcelain', + 'Mk Alcohol Ink Art', + 'Mk One Line Art', + 'Mk Blacklight Paint', + 'Mk Carnival Glass', + 'Mk Cyanotype Print', + 'Mk Cross Stitching', + 'Mk Encaustic Paint', + 'Mk Embroidery', + 'Mk Gyotaku', + 'Mk Luminogram', + 'Mk Lite Brite Art', + 'Mk Mokume Gane', + 'Pebble Art', + 'Mk Palekh', + 'Mk Suminagashi', + 'Mk Scrimshaw', + 'Mk Shibori', + 'Mk Vitreous Enamel', + 'Mk Ukiyo E', + 'Mk Vintage Airline Poster', + 'Mk Vintage Travel Poster', + 'Mk Bauhaus Style', + 'Mk Afrofuturism', + 'Mk Atompunk', + 'Mk Constructivism', + 'Mk Chicano Art', + 'Mk De Stijl', + 'Mk Dayak Art', + 'Mk Fayum Portrait', + 'Mk Illuminated Manuscript', + 'Mk Kalighat Painting', + 'Mk Madhubani Painting', + 'Mk Pictorialism', + 'Mk Pichwai Painting', + 'Mk Patachitra Painting', + 'Mk Samoan Art Inspired', + 'Mk Tlingit Art', + 'Mk Adnate Style', + 'Mk Ron English Style', + 'Mk Shepard Fairey Style', + ]), + ) + .register(z.globalRegistry, { + description: '\n The style to use.\n ', + }), + ) + .default(['Fooocus Enhance', 'Fooocus V2', 'Fooocus Sharp']), + control_type: z.optional( + z + .enum(['ImagePrompt', 'PyraCanny', 'CPDS', 'FaceSwap']) + .register(z.globalRegistry, { + description: 'The type of image control', + }), + ), + mask_image_url: z.optional(z.union([z.string(), z.null()])), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use up to 5 LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([ + { + path: 'https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors', + scale: 0.1, + }, + ]), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to false, the safety checker will be disabled.', + }), + ) + .default(true), + sharpness: z + .optional( + z.number().gte(0).lte(30).register(z.globalRegistry, { + description: + '\n The sharpness of the generated image. Use it to control how sharp the generated\n image should be. Higher value means image and texture are sharper.\n ', + }), + ) + .default(2), + guidance_scale: z + .optional( + z.number().gte(1).lte(30).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + inpaint_image_url: z.optional(z.union([z.string(), z.null()])), + mixing_image_prompt_and_inpaint: z.optional(z.boolean()).default(false), + aspect_ratio: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n The size of the generated image. You can choose between some presets or\n custom height and width that **must be multiples of 8**.\n ', + }), + ) + .default('1024x1024'), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + '\n Number of images to generate in one request\n ', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + refiner_model: z.optional( + z + .enum(['None', 'realisticVisionV60B1_v51VAE.safetensors']) + .register(z.globalRegistry, { + description: 'Refiner (SDXL or SD 1.5)', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + control_image_url: z.optional(z.union([z.string(), z.null()])), + seed: z.optional(z.union([z.int(), z.null()])), + refiner_switch: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n Use 0.4 for SD1.5 realistic models; 0.667 for SD1.5 anime models\n 0.8 for XL-refiners; or any value for switching two SDXL models.\n ', + }), + ) + .default(0.8), + control_image_weight: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: + '\n The strength of the control image. Use it to control how much the generated image\n should look like the control image.\n ', + }), + ) + .default(1), + control_image_stop_at: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The stop at value of the control image. Use it to control how much the generated image\n should look like the control image.\n ', + }), + ) + .default(1), +}) + +/** + * DiffusionEdgeOutput + */ +export const zSchemaDiffusionEdgeOutput = z.object({ + image: zSchemaImage, +}) + +/** + * DiffusionEdgeInput + */ +export const zSchemaDiffusionEdgeInput = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: 'The text prompt you would like to convert to speech.', + }), +}) + +/** + * LCMOutput + */ +export const zSchemaLcmOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + request_id: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n An id bound to a request, can be used with response to identify the request\n itself.\n ', + }), + ) + .default(''), + timings: z.record(z.string(), z.number()), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + num_inference_steps: z + .optional( + z.int().register(z.globalRegistry, { + description: + '\n Number of inference steps used to generate the image. It will be the same value of the one passed in the\n input or the default one in case none was passed.\n ', + }), + ) + .default(4), + nsfw_content_detected: z.array(z.boolean()).register(z.globalRegistry, { + description: + '\n A list of booleans indicating whether the generated image contains any\n potentially unsafe content. If the safety check is disabled, this field\n will all will be false.\n ', + }), +}) + +/** + * LCMInput + */ +export const zSchemaLcmInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + controlnet_inpaint: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the inpainting pipeline will use controlnet inpainting.\n Only effective for inpainting pipelines.\n ', + }), + ) + .default(false), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + enable_safety_checks: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the resulting image will be checked whether it includes any\n potentially unsafe content. If it does, it will be replaced with a black\n image.\n ', + }), + ) + .default(true), + model: z.optional( + z.enum(['sdxl', 'sdv1-5']).register(z.globalRegistry, { + description: 'The model to use for generating the image.', + }), + ), + lora_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The url of the lora server to use for image generation.\n ', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(8).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(1), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + inpaint_mask_only: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the inpainting pipeline will only inpaint the provided mask\n area. Only effective for inpainting pipelines.\n ', + }), + ) + .default(false), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: + '\n The number of images to generate. The function will return a list of images\n with the same prompt and negative prompt but different seeds.\n ', + }), + ) + .default(1), + lora_scale: z + .optional( + z.number().register(z.globalRegistry, { + description: + '\n The scale of the lora server to use for image generation.\n ', + }), + ) + .default(1), + image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The base image to use for guiding the image generation on image-to-image\n generations. If the either width or height of the image is larger than 1024\n pixels, the image will be resized to 1024 pixels while keeping the aspect ratio.\n ', + }), + ), + strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The strength of the image that is passed as `image_url`. The strength\n determines how much the generated image will be similar to the image passed as\n `image_url`. The higher the strength the more model gets "creative" and\n generates an image that\'s different from the initial image. A strength of 1.0\n means that the initial image is more or less ignored and the model will try to\n generate an image that\'s as close as possible to the prompt.\n ', + }), + ) + .default(0.8), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + request_id: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n An id bound to a request, can be used with response to identify the request\n itself.\n ', + }), + ) + .default(''), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + mask_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The mask to use for guiding the image generation on image\n inpainting. The model will focus on the mask area and try to fill it with\n the most relevant content.\n\n The mask must be a black and white image where the white area is the area\n that needs to be filled and the black area is the area that should be\n ignored.\n\n The mask must have the same dimensions as the image passed as `image_url`.\n ', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(12).register(z.globalRegistry, { + description: + '\n The number of inference steps to use for generating the image. The more steps\n the better the image will be but it will also take longer to generate.\n ', + }), + ) + .default(4), +}) + +/** + * FooocusOutput + */ +export const zSchemaFooocusInpaintOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image file info.', + }), + timings: z.record(z.string(), z.number()).register(z.globalRegistry, { + description: 'The time taken for the generation process.', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), +}) + +/** + * ImagePrompt + */ +export const zSchemaImagePrompt = z.object({ + weight: z.optional(z.number().gte(0).lte(2)).default(1), + stop_at: z.optional(z.number().gte(0).lte(1)).default(0.5), + type: z.optional(z.enum(['ImagePrompt', 'PyraCanny', 'CPDS', 'FaceSwap'])), + image_url: z.optional(z.string()), +}) + +/** + * FooocusInpaintInput + */ +export const zSchemaFooocusInpaintInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + ) + .default(''), + performance: z.optional( + z + .enum(['Speed', 'Quality', 'Extreme Speed', 'Lightning']) + .register(z.globalRegistry, { + description: '\n You can choose Speed or Quality\n ', + }), + ), + styles: z + .optional( + z + .array( + z.enum([ + 'Fooocus V2', + 'Fooocus Enhance', + 'Fooocus Sharp', + 'Fooocus Semi Realistic', + 'Fooocus Masterpiece', + 'Fooocus Photograph', + 'Fooocus Negative', + 'Fooocus Cinematic', + 'SAI 3D Model', + 'SAI Analog Film', + 'SAI Anime', + 'SAI Cinematic', + 'SAI Comic Book', + 'SAI Craft Clay', + 'SAI Digital Art', + 'SAI Enhance', + 'SAI Fantasy Art', + 'SAI Isometric', + 'SAI Line Art', + 'SAI Lowpoly', + 'SAI Neonpunk', + 'SAI Origami', + 'SAI Photographic', + 'SAI Pixel Art', + 'SAI Texture', + 'MRE Cinematic Dynamic', + 'MRE Spontaneous Picture', + 'MRE Artistic Vision', + 'MRE Dark Dream', + 'MRE Gloomy Art', + 'MRE Bad Dream', + 'MRE Underground', + 'MRE Surreal Painting', + 'MRE Dynamic Illustration', + 'MRE Undead Art', + 'MRE Elemental Art', + 'MRE Space Art', + 'MRE Ancient Illustration', + 'MRE Brave Art', + 'MRE Heroic Fantasy', + 'MRE Dark Cyberpunk', + 'MRE Lyrical Geometry', + 'MRE Sumi E Symbolic', + 'MRE Sumi E Detailed', + 'MRE Manga', + 'MRE Anime', + 'MRE Comic', + 'Ads Advertising', + 'Ads Automotive', + 'Ads Corporate', + 'Ads Fashion Editorial', + 'Ads Food Photography', + 'Ads Gourmet Food Photography', + 'Ads Luxury', + 'Ads Real Estate', + 'Ads Retail', + 'Artstyle Abstract', + 'Artstyle Abstract Expressionism', + 'Artstyle Art Deco', + 'Artstyle Art Nouveau', + 'Artstyle Constructivist', + 'Artstyle Cubist', + 'Artstyle Expressionist', + 'Artstyle Graffiti', + 'Artstyle Hyperrealism', + 'Artstyle Impressionist', + 'Artstyle Pointillism', + 'Artstyle Pop Art', + 'Artstyle Psychedelic', + 'Artstyle Renaissance', + 'Artstyle Steampunk', + 'Artstyle Surrealist', + 'Artstyle Typography', + 'Artstyle Watercolor', + 'Futuristic Biomechanical', + 'Futuristic Biomechanical Cyberpunk', + 'Futuristic Cybernetic', + 'Futuristic Cybernetic Robot', + 'Futuristic Cyberpunk Cityscape', + 'Futuristic Futuristic', + 'Futuristic Retro Cyberpunk', + 'Futuristic Retro Futurism', + 'Futuristic Sci Fi', + 'Futuristic Vaporwave', + 'Game Bubble Bobble', + 'Game Cyberpunk Game', + 'Game Fighting Game', + 'Game Gta', + 'Game Mario', + 'Game Minecraft', + 'Game Pokemon', + 'Game Retro Arcade', + 'Game Retro Game', + 'Game Rpg Fantasy Game', + 'Game Strategy Game', + 'Game Streetfighter', + 'Game Zelda', + 'Misc Architectural', + 'Misc Disco', + 'Misc Dreamscape', + 'Misc Dystopian', + 'Misc Fairy Tale', + 'Misc Gothic', + 'Misc Grunge', + 'Misc Horror', + 'Misc Kawaii', + 'Misc Lovecraftian', + 'Misc Macabre', + 'Misc Manga', + 'Misc Metropolis', + 'Misc Minimalist', + 'Misc Monochrome', + 'Misc Nautical', + 'Misc Space', + 'Misc Stained Glass', + 'Misc Techwear Fashion', + 'Misc Tribal', + 'Misc Zentangle', + 'Papercraft Collage', + 'Papercraft Flat Papercut', + 'Papercraft Kirigami', + 'Papercraft Paper Mache', + 'Papercraft Paper Quilling', + 'Papercraft Papercut Collage', + 'Papercraft Papercut Shadow Box', + 'Papercraft Stacked Papercut', + 'Papercraft Thick Layered Papercut', + 'Photo Alien', + 'Photo Film Noir', + 'Photo Glamour', + 'Photo Hdr', + 'Photo Iphone Photographic', + 'Photo Long Exposure', + 'Photo Neon Noir', + 'Photo Silhouette', + 'Photo Tilt Shift', + 'Cinematic Diva', + 'Abstract Expressionism', + 'Academia', + 'Action Figure', + 'Adorable 3D Character', + 'Adorable Kawaii', + 'Art Deco', + 'Art Nouveau', + 'Astral Aura', + 'Avant Garde', + 'Baroque', + 'Bauhaus Style Poster', + 'Blueprint Schematic Drawing', + 'Caricature', + 'Cel Shaded Art', + 'Character Design Sheet', + 'Classicism Art', + 'Color Field Painting', + 'Colored Pencil Art', + 'Conceptual Art', + 'Constructivism', + 'Cubism', + 'Dadaism', + 'Dark Fantasy', + 'Dark Moody Atmosphere', + 'Dmt Art Style', + 'Doodle Art', + 'Double Exposure', + 'Dripping Paint Splatter Art', + 'Expressionism', + 'Faded Polaroid Photo', + 'Fauvism', + 'Flat 2d Art', + 'Fortnite Art Style', + 'Futurism', + 'Glitchcore', + 'Glo Fi', + 'Googie Art Style', + 'Graffiti Art', + 'Harlem Renaissance Art', + 'High Fashion', + 'Idyllic', + 'Impressionism', + 'Infographic Drawing', + 'Ink Dripping Drawing', + 'Japanese Ink Drawing', + 'Knolling Photography', + 'Light Cheery Atmosphere', + 'Logo Design', + 'Luxurious Elegance', + 'Macro Photography', + 'Mandola Art', + 'Marker Drawing', + 'Medievalism', + 'Minimalism', + 'Neo Baroque', + 'Neo Byzantine', + 'Neo Futurism', + 'Neo Impressionism', + 'Neo Rococo', + 'Neoclassicism', + 'Op Art', + 'Ornate And Intricate', + 'Pencil Sketch Drawing', + 'Pop Art 2', + 'Rococo', + 'Silhouette Art', + 'Simple Vector Art', + 'Sketchup', + 'Steampunk 2', + 'Surrealism', + 'Suprematism', + 'Terragen', + 'Tranquil Relaxing Atmosphere', + 'Sticker Designs', + 'Vibrant Rim Light', + 'Volumetric Lighting', + 'Watercolor 2', + 'Whimsical And Playful', + 'Mk Chromolithography', + 'Mk Cross Processing Print', + 'Mk Dufaycolor Photograph', + 'Mk Herbarium', + 'Mk Punk Collage', + 'Mk Mosaic', + 'Mk Van Gogh', + 'Mk Coloring Book', + 'Mk Singer Sargent', + 'Mk Pollock', + 'Mk Basquiat', + 'Mk Andy Warhol', + 'Mk Halftone Print', + 'Mk Gond Painting', + 'Mk Albumen Print', + 'Mk Aquatint Print', + 'Mk Anthotype Print', + 'Mk Inuit Carving', + 'Mk Bromoil Print', + 'Mk Calotype Print', + 'Mk Color Sketchnote', + 'Mk Cibulak Porcelain', + 'Mk Alcohol Ink Art', + 'Mk One Line Art', + 'Mk Blacklight Paint', + 'Mk Carnival Glass', + 'Mk Cyanotype Print', + 'Mk Cross Stitching', + 'Mk Encaustic Paint', + 'Mk Embroidery', + 'Mk Gyotaku', + 'Mk Luminogram', + 'Mk Lite Brite Art', + 'Mk Mokume Gane', + 'Pebble Art', + 'Mk Palekh', + 'Mk Suminagashi', + 'Mk Scrimshaw', + 'Mk Shibori', + 'Mk Vitreous Enamel', + 'Mk Ukiyo E', + 'Mk Vintage Airline Poster', + 'Mk Vintage Travel Poster', + 'Mk Bauhaus Style', + 'Mk Afrofuturism', + 'Mk Atompunk', + 'Mk Constructivism', + 'Mk Chicano Art', + 'Mk De Stijl', + 'Mk Dayak Art', + 'Mk Fayum Portrait', + 'Mk Illuminated Manuscript', + 'Mk Kalighat Painting', + 'Mk Madhubani Painting', + 'Mk Pictorialism', + 'Mk Pichwai Painting', + 'Mk Patachitra Painting', + 'Mk Samoan Art Inspired', + 'Mk Tlingit Art', + 'Mk Adnate Style', + 'Mk Ron English Style', + 'Mk Shepard Fairey Style', + ]), + ) + .register(z.globalRegistry, { + description: '\n The style to use.\n ', + }), + ) + .default(['Fooocus Enhance', 'Fooocus V2', 'Fooocus Sharp']), + image_prompt_3: z.optional(zSchemaImagePrompt), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use up to 5 LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([ + { + path: 'https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors', + scale: 0.1, + }, + ]), + image_prompt_4: z.optional(zSchemaImagePrompt), + guidance_scale: z + .optional( + z.number().gte(1).lte(30).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4), + sharpness: z + .optional( + z.number().gte(0).lte(30).register(z.globalRegistry, { + description: + '\n The sharpness of the generated image. Use it to control how sharp the generated\n image should be. Higher value means image and texture are sharper.\n ', + }), + ) + .default(2), + mixing_image_prompt_and_inpaint: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Mixing Image Prompt and Inpaint', + }), + ) + .default(false), + outpaint_selections: z + .optional( + z + .array(z.enum(['Left', 'Right', 'Top', 'Bottom'])) + .register(z.globalRegistry, { + description: 'The directions to outpaint.', + }), + ) + .default([]), + inpaint_image_url: z.string().register(z.globalRegistry, { + description: 'The image to use as a reference for inpainting.', + }), + refiner_model: z.optional( + z + .enum(['None', 'realisticVisionV60B1_v51VAE.safetensors']) + .register(z.globalRegistry, { + description: 'Refiner (SDXL or SD 1.5)', + }), + ), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_prompt_2: z.optional(zSchemaImagePrompt), + inpaint_respective_field: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The area to inpaint. Value 0 is same as "Only Masked" in A1111. Value 1 is\n same as "Whole Image" in A1111. Only used in inpaint, not used in outpaint.\n (Outpaint always use 1.0)\n ', + }), + ) + .default(0.618), + inpaint_mode: z.optional( + z + .enum([ + 'Inpaint or Outpaint (default)', + 'Improve Detail (face, hand, eyes, etc.)', + 'Modify Content (add objects, change background, etc.)', + ]) + .register(z.globalRegistry, { + description: 'The mode to use for inpainting.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.null()])), + refiner_switch: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n Use 0.4 for SD1.5 realistic models; 0.667 for SD1.5 anime models\n 0.8 for XL-refiners; or any value for switching two SDXL models.\n ', + }), + ) + .default(0.8), + inpaint_disable_initial_latent: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the initial preprocessing will be disabled.', + }), + ) + .default(false), + mask_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The image to use as a mask for the generated image.', + }), + ), + invert_mask: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the mask will be inverted.', + }), + ) + .default(false), + image_prompt_1: z.optional(zSchemaImagePrompt), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to false, the safety checker will be disabled.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + '\n Number of images to generate in one request\n ', + }), + ) + .default(1), + aspect_ratio: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n The size of the generated image. You can choose between some presets or\n custom height and width that **must be multiples of 8**.\n ', + }), + ) + .default('1024x1024'), + inpaint_additional_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Describe what you want to inpaint.', + }), + ) + .default(''), + inpaint_strength: z + .optional( + z.number().lte(1).register(z.globalRegistry, { + description: + '\n Same as the denoising strength in A1111 inpaint. Only used in inpaint, not\n used in outpaint. (Outpaint always use 1.0)\n ', + }), + ) + .default(1), + override_inpaint_options: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "\n If set to true, the advanced inpaint options ('inpaint_disable_initial_latent',\n 'inpaint_engine', 'inpaint_strength', 'inpaint_respective_field',\n 'inpaint_erode_or_dilate') will be overridden.\n Otherwise, the default values will be used.\n ", + }), + ) + .default(false), + inpaint_engine: z.optional( + z.enum(['None', 'v1', 'v2.5', 'v2.6']).register(z.globalRegistry, { + description: 'Version of Fooocus inpaint model', + }), + ), + inpaint_erode_or_dilate: z + .optional( + z.number().gte(-64).lte(64).register(z.globalRegistry, { + description: + '\n Positive value will make white area in the mask larger, negative value will\n make white area smaller. (default is 0, always process before any mask\n invert)\n ', + }), + ) + .default(0), +}) + +/** + * FooocusOutput + */ +export const zSchemaFooocusImagePromptOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image file info.', + }), + timings: z.record(z.string(), z.number()).register(z.globalRegistry, { + description: 'The time taken for the generation process.', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), +}) + +/** + * FooocusImagePromptInput + */ +export const zSchemaFooocusImagePromptInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + ) + .default(''), + uov_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The image to upscale or vary.', + }), + ), + performance: z.optional( + z + .enum(['Speed', 'Quality', 'Extreme Speed', 'Lightning']) + .register(z.globalRegistry, { + description: '\n You can choose Speed or Quality\n ', + }), + ), + image_prompt_3: z.optional(zSchemaImagePrompt), + styles: z + .optional( + z + .array( + z.enum([ + 'Fooocus V2', + 'Fooocus Enhance', + 'Fooocus Sharp', + 'Fooocus Semi Realistic', + 'Fooocus Masterpiece', + 'Fooocus Photograph', + 'Fooocus Negative', + 'Fooocus Cinematic', + 'SAI 3D Model', + 'SAI Analog Film', + 'SAI Anime', + 'SAI Cinematic', + 'SAI Comic Book', + 'SAI Craft Clay', + 'SAI Digital Art', + 'SAI Enhance', + 'SAI Fantasy Art', + 'SAI Isometric', + 'SAI Line Art', + 'SAI Lowpoly', + 'SAI Neonpunk', + 'SAI Origami', + 'SAI Photographic', + 'SAI Pixel Art', + 'SAI Texture', + 'MRE Cinematic Dynamic', + 'MRE Spontaneous Picture', + 'MRE Artistic Vision', + 'MRE Dark Dream', + 'MRE Gloomy Art', + 'MRE Bad Dream', + 'MRE Underground', + 'MRE Surreal Painting', + 'MRE Dynamic Illustration', + 'MRE Undead Art', + 'MRE Elemental Art', + 'MRE Space Art', + 'MRE Ancient Illustration', + 'MRE Brave Art', + 'MRE Heroic Fantasy', + 'MRE Dark Cyberpunk', + 'MRE Lyrical Geometry', + 'MRE Sumi E Symbolic', + 'MRE Sumi E Detailed', + 'MRE Manga', + 'MRE Anime', + 'MRE Comic', + 'Ads Advertising', + 'Ads Automotive', + 'Ads Corporate', + 'Ads Fashion Editorial', + 'Ads Food Photography', + 'Ads Gourmet Food Photography', + 'Ads Luxury', + 'Ads Real Estate', + 'Ads Retail', + 'Artstyle Abstract', + 'Artstyle Abstract Expressionism', + 'Artstyle Art Deco', + 'Artstyle Art Nouveau', + 'Artstyle Constructivist', + 'Artstyle Cubist', + 'Artstyle Expressionist', + 'Artstyle Graffiti', + 'Artstyle Hyperrealism', + 'Artstyle Impressionist', + 'Artstyle Pointillism', + 'Artstyle Pop Art', + 'Artstyle Psychedelic', + 'Artstyle Renaissance', + 'Artstyle Steampunk', + 'Artstyle Surrealist', + 'Artstyle Typography', + 'Artstyle Watercolor', + 'Futuristic Biomechanical', + 'Futuristic Biomechanical Cyberpunk', + 'Futuristic Cybernetic', + 'Futuristic Cybernetic Robot', + 'Futuristic Cyberpunk Cityscape', + 'Futuristic Futuristic', + 'Futuristic Retro Cyberpunk', + 'Futuristic Retro Futurism', + 'Futuristic Sci Fi', + 'Futuristic Vaporwave', + 'Game Bubble Bobble', + 'Game Cyberpunk Game', + 'Game Fighting Game', + 'Game Gta', + 'Game Mario', + 'Game Minecraft', + 'Game Pokemon', + 'Game Retro Arcade', + 'Game Retro Game', + 'Game Rpg Fantasy Game', + 'Game Strategy Game', + 'Game Streetfighter', + 'Game Zelda', + 'Misc Architectural', + 'Misc Disco', + 'Misc Dreamscape', + 'Misc Dystopian', + 'Misc Fairy Tale', + 'Misc Gothic', + 'Misc Grunge', + 'Misc Horror', + 'Misc Kawaii', + 'Misc Lovecraftian', + 'Misc Macabre', + 'Misc Manga', + 'Misc Metropolis', + 'Misc Minimalist', + 'Misc Monochrome', + 'Misc Nautical', + 'Misc Space', + 'Misc Stained Glass', + 'Misc Techwear Fashion', + 'Misc Tribal', + 'Misc Zentangle', + 'Papercraft Collage', + 'Papercraft Flat Papercut', + 'Papercraft Kirigami', + 'Papercraft Paper Mache', + 'Papercraft Paper Quilling', + 'Papercraft Papercut Collage', + 'Papercraft Papercut Shadow Box', + 'Papercraft Stacked Papercut', + 'Papercraft Thick Layered Papercut', + 'Photo Alien', + 'Photo Film Noir', + 'Photo Glamour', + 'Photo Hdr', + 'Photo Iphone Photographic', + 'Photo Long Exposure', + 'Photo Neon Noir', + 'Photo Silhouette', + 'Photo Tilt Shift', + 'Cinematic Diva', + 'Abstract Expressionism', + 'Academia', + 'Action Figure', + 'Adorable 3D Character', + 'Adorable Kawaii', + 'Art Deco', + 'Art Nouveau', + 'Astral Aura', + 'Avant Garde', + 'Baroque', + 'Bauhaus Style Poster', + 'Blueprint Schematic Drawing', + 'Caricature', + 'Cel Shaded Art', + 'Character Design Sheet', + 'Classicism Art', + 'Color Field Painting', + 'Colored Pencil Art', + 'Conceptual Art', + 'Constructivism', + 'Cubism', + 'Dadaism', + 'Dark Fantasy', + 'Dark Moody Atmosphere', + 'Dmt Art Style', + 'Doodle Art', + 'Double Exposure', + 'Dripping Paint Splatter Art', + 'Expressionism', + 'Faded Polaroid Photo', + 'Fauvism', + 'Flat 2d Art', + 'Fortnite Art Style', + 'Futurism', + 'Glitchcore', + 'Glo Fi', + 'Googie Art Style', + 'Graffiti Art', + 'Harlem Renaissance Art', + 'High Fashion', + 'Idyllic', + 'Impressionism', + 'Infographic Drawing', + 'Ink Dripping Drawing', + 'Japanese Ink Drawing', + 'Knolling Photography', + 'Light Cheery Atmosphere', + 'Logo Design', + 'Luxurious Elegance', + 'Macro Photography', + 'Mandola Art', + 'Marker Drawing', + 'Medievalism', + 'Minimalism', + 'Neo Baroque', + 'Neo Byzantine', + 'Neo Futurism', + 'Neo Impressionism', + 'Neo Rococo', + 'Neoclassicism', + 'Op Art', + 'Ornate And Intricate', + 'Pencil Sketch Drawing', + 'Pop Art 2', + 'Rococo', + 'Silhouette Art', + 'Simple Vector Art', + 'Sketchup', + 'Steampunk 2', + 'Surrealism', + 'Suprematism', + 'Terragen', + 'Tranquil Relaxing Atmosphere', + 'Sticker Designs', + 'Vibrant Rim Light', + 'Volumetric Lighting', + 'Watercolor 2', + 'Whimsical And Playful', + 'Mk Chromolithography', + 'Mk Cross Processing Print', + 'Mk Dufaycolor Photograph', + 'Mk Herbarium', + 'Mk Punk Collage', + 'Mk Mosaic', + 'Mk Van Gogh', + 'Mk Coloring Book', + 'Mk Singer Sargent', + 'Mk Pollock', + 'Mk Basquiat', + 'Mk Andy Warhol', + 'Mk Halftone Print', + 'Mk Gond Painting', + 'Mk Albumen Print', + 'Mk Aquatint Print', + 'Mk Anthotype Print', + 'Mk Inuit Carving', + 'Mk Bromoil Print', + 'Mk Calotype Print', + 'Mk Color Sketchnote', + 'Mk Cibulak Porcelain', + 'Mk Alcohol Ink Art', + 'Mk One Line Art', + 'Mk Blacklight Paint', + 'Mk Carnival Glass', + 'Mk Cyanotype Print', + 'Mk Cross Stitching', + 'Mk Encaustic Paint', + 'Mk Embroidery', + 'Mk Gyotaku', + 'Mk Luminogram', + 'Mk Lite Brite Art', + 'Mk Mokume Gane', + 'Pebble Art', + 'Mk Palekh', + 'Mk Suminagashi', + 'Mk Scrimshaw', + 'Mk Shibori', + 'Mk Vitreous Enamel', + 'Mk Ukiyo E', + 'Mk Vintage Airline Poster', + 'Mk Vintage Travel Poster', + 'Mk Bauhaus Style', + 'Mk Afrofuturism', + 'Mk Atompunk', + 'Mk Constructivism', + 'Mk Chicano Art', + 'Mk De Stijl', + 'Mk Dayak Art', + 'Mk Fayum Portrait', + 'Mk Illuminated Manuscript', + 'Mk Kalighat Painting', + 'Mk Madhubani Painting', + 'Mk Pictorialism', + 'Mk Pichwai Painting', + 'Mk Patachitra Painting', + 'Mk Samoan Art Inspired', + 'Mk Tlingit Art', + 'Mk Adnate Style', + 'Mk Ron English Style', + 'Mk Shepard Fairey Style', + ]), + ) + .register(z.globalRegistry, { + description: '\n The style to use.\n ', + }), + ) + .default(['Fooocus Enhance', 'Fooocus V2', 'Fooocus Sharp']), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use up to 5 LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([ + { + path: 'https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors', + scale: 0.1, + }, + ]), + image_prompt_4: z.optional(zSchemaImagePrompt), + guidance_scale: z + .optional( + z.number().gte(1).lte(30).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4), + sharpness: z + .optional( + z.number().gte(0).lte(30).register(z.globalRegistry, { + description: + '\n The sharpness of the generated image. Use it to control how sharp the generated\n image should be. Higher value means image and texture are sharper.\n ', + }), + ) + .default(2), + mixing_image_prompt_and_inpaint: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Mixing Image Prompt and Inpaint', + }), + ) + .default(false), + outpaint_selections: z + .optional( + z + .array(z.enum(['Left', 'Right', 'Top', 'Bottom'])) + .register(z.globalRegistry, { + description: 'The directions to outpaint.', + }), + ) + .default([]), + inpaint_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The image to use as a reference for inpainting.', + }), + ), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + refiner_model: z.optional( + z + .enum(['None', 'realisticVisionV60B1_v51VAE.safetensors']) + .register(z.globalRegistry, { + description: 'Refiner (SDXL or SD 1.5)', + }), + ), + image_prompt_2: z.optional(zSchemaImagePrompt), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + inpaint_mode: z.optional( + z + .enum([ + 'Inpaint or Outpaint (default)', + 'Improve Detail (face, hand, eyes, etc.)', + 'Modify Content (add objects, change background, etc.)', + ]) + .register(z.globalRegistry, { + description: 'The mode to use for inpainting.', + }), + ), + uov_method: z.optional( + z + .enum([ + 'Disabled', + 'Vary (Subtle)', + 'Vary (Strong)', + 'Upscale (1.5x)', + 'Upscale (2x)', + 'Upscale (Fast 2x)', + ]) + .register(z.globalRegistry, { + description: 'The method to use for upscaling or varying.', + }), + ), + seed: z.optional(z.union([z.int(), z.null()])), + refiner_switch: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n Use 0.4 for SD1.5 realistic models; 0.667 for SD1.5 anime models\n 0.8 for XL-refiners; or any value for switching two SDXL models.\n ', + }), + ) + .default(0.8), + mixing_image_prompt_and_vary_upscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Mixing Image Prompt and Vary/Upscale', + }), + ) + .default(false), + mask_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The image to use as a mask for the generated image.', + }), + ), + image_prompt_1: zSchemaImagePrompt, + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to false, the safety checker will be disabled.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + '\n Number of images to generate in one request\n ', + }), + ) + .default(1), + aspect_ratio: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n The size of the generated image. You can choose between some presets or\n custom height and width that **must be multiples of 8**.\n ', + }), + ) + .default('1024x1024'), + inpaint_additional_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Describe what you want to inpaint.', + }), + ) + .default(''), +}) + +/** + * IllusionDiffusionOutput + */ +export const zSchemaIllusionDiffusionOutput = z.object({ + image: zSchemaImage, + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * IllusionDiffusionInput + */ +export const zSchemaIllusionDiffusionInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + controlnet_conditioning_scale: z + .optional( + z.number().register(z.globalRegistry, { + description: 'The scale of the ControlNet.', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: 'Input image url.', + }), + scheduler: z.optional( + z.enum(['DPM++ Karras SDE', 'Euler']).register(z.globalRegistry, { + description: + 'Scheduler / sampler to use for the image denoising process.', + }), + ), + control_guidance_start: z.optional(z.number().gte(0).lte(1)).default(0), + guidance_scale: z + .optional( + z.number().lte(50).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(7.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + ), + control_guidance_end: z.optional(z.number().gte(0).lte(1)).default(1), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + num_inference_steps: z + .optional( + z.int().gte(0).lte(80).register(z.globalRegistry, { + description: + '\n Increasing the amount of steps tells Stable Diffusion that it should take more steps\n to generate your final result which can increase the amount of detail in your image.\n ', + }), + ) + .default(40), +}) + +/** + * Output + */ +export const zSchemaFastFooocusSdxlOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageFooocusInput + */ +export const zSchemaFastFooocusSdxlInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + enable_refiner: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, a smaller model will try to refine the output after it was processed.', + }), + ) + .default(true), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + embeddings: z + .optional( + z.array(zSchemaEmbedding).register(z.globalRegistry, { + description: 'The list of embeddings to use.', + }), + ) + .default([]), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(true), + guidance_rescale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The rescale factor for the CFG.', + }), + ) + .default(0), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(2), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + safety_checker_version: z.optional( + z.enum(['v1', 'v2']).register(z.globalRegistry, { + description: + 'The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(24).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(8), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), +}) + +/** + * Output + */ +export const zSchemaFastLcmDiffusionOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageLCMInput + */ +export const zSchemaFastLcmDiffusionInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(false), + guidance_rescale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The rescale factor for the CFG.', + }), + ) + .default(0), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(1.5), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + model_name: z.optional( + z + .enum([ + 'stabilityai/stable-diffusion-xl-base-1.0', + 'runwayml/stable-diffusion-v1-5', + ]) + .register(z.globalRegistry, { + description: 'The name of the model to use.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(true), + safety_checker_version: z.optional( + z.enum(['v1', 'v2']).register(z.globalRegistry, { + description: + 'The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.', + }), + ), + request_id: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n An id bound to a request, can be used with response to identify the request\n itself.\n ', + }), + ) + .default(''), + num_inference_steps: z + .optional( + z.int().gte(1).lte(32).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(6), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), +}) + +/** + * Output + */ +export const zSchemaFastSdxlControlnetCannyOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageControlNetInput + */ +export const zSchemaFastSdxlControlnetCannyInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.null(), + ]), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: 'The list of LoRA weights to use.', + }), + ) + .default([]), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(7.5), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + controlnet_conditioning_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The scale of the controlnet conditioning.', + }), + ) + .default(0.5), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + control_image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the control image.', + }), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(25), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + enable_deep_cache: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, DeepCache will be enabled. TBD\n ', + }), + ) + .default(false), +}) + +/** + * Output + */ +export const zSchemaFastFooocusSdxlImageToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * ImageToImageFooocusInput + */ +export const zSchemaFastFooocusSdxlImageToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + enable_refiner: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, a smaller model will try to refine the output after it was processed.', + }), + ) + .default(true), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.null(), + ]), + ), + embeddings: z + .optional( + z.array(zSchemaEmbedding).register(z.globalRegistry, { + description: 'The list of embeddings to use.', + }), + ) + .default([]), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(true), + guidance_rescale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The rescale factor for the CFG.', + }), + ) + .default(0), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(2), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use.Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image to use as a starting point for the generation.', + }), + strength: z + .optional( + z.number().gte(0.05).lte(1).register(z.globalRegistry, { + description: + 'determines how much the generated image resembles the initial image', + }), + ) + .default(0.95), + safety_checker_version: z.optional( + z.enum(['v1', 'v2']).register(z.globalRegistry, { + description: + 'The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(24).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(8), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), +}) + +/** + * Output + */ +export const zSchemaFastLightningSdxlOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageLightningInput + */ +export const zSchemaFastLightningSdxlInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + embeddings: z + .optional( + z.array(zSchemaEmbedding).register(z.globalRegistry, { + description: 'The list of embeddings to use.', + }), + ) + .default([]), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(false), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_rescale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The rescale factor for the CFG.', + }), + ) + .default(0), + safety_checker_version: z.optional( + z.enum(['v1', 'v2']).register(z.globalRegistry, { + description: + 'The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z.optional( + z.enum(['1', '2', '4', '8']).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + request_id: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n An id bound to a request, can be used with response to identify the request\n itself.\n ', + }), + ) + .default(''), +}) + +/** + * Output + */ +export const zSchemaLayerDiffusionOutput = z.object({ + image: zSchemaImage, + seed: z.int().register(z.globalRegistry, { + description: 'The seed used to generate the image.', + }), +}) + +/** + * Input + */ +export const zSchemaLayerDiffusionInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + ) + .default(''), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'The guidance scale for the model.', + }), + ) + .default(8), + num_inference_steps: z + .optional( + z.int().gte(10).lte(40).register(z.globalRegistry, { + description: 'The number of inference steps for the model.', + }), + ) + .default(20), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the negative image. Be as descriptive as possible for best results.', + }), + ) + .default('text, watermark'), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to false, the safety checker will be disabled.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaStableDiffusionV15Output = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageSD15Input + */ +export const zSchemaStableDiffusionV15Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + embeddings: z + .optional( + z.array(zSchemaEmbedding).register(z.globalRegistry, { + description: 'The list of embeddings to use.', + }), + ) + .default([]), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: 'The list of LoRA weights to use.', + }), + ) + .default([]), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(7.5), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_checker_version: z.optional( + z.enum(['v1', 'v2']).register(z.globalRegistry, { + description: + 'The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.', + }), + ), + request_id: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n An id bound to a request, can be used with response to identify the request\n itself.\n ', + }), + ) + .default(''), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(25), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), +}) + +/** + * Output + */ +export const zSchemaDreamshaperOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * DreamshaperTextToImageInput + */ +export const zSchemaDreamshaperInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + embeddings: z + .optional( + z.array(zSchemaEmbedding).register(z.globalRegistry, { + description: 'The list of embeddings to use.', + }), + ) + .default([]), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: 'The list of LoRA weights to use.', + }), + ) + .default([]), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(5), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "The negative prompt to use. Use it to address details that you don't want in the image.", + }), + ) + .default( + '(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)', + ), + format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + model_name: z.optional( + z + .enum([ + 'Lykon/dreamshaper-xl-1-0', + 'Lykon/dreamshaper-xl-v2-turbo', + 'Lykon/dreamshaper-8', + ]) + .register(z.globalRegistry, { + description: 'The Dreamshaper model to use.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + safety_checker_version: z.optional( + z.enum(['v1', 'v2']).register(z.globalRegistry, { + description: + 'The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(70).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(35), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), +}) + +/** + * Output + */ +export const zSchemaRealisticVisionOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * RealisticVisionTextToImageInput + */ +export const zSchemaRealisticVisionInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + embeddings: z + .optional( + z.array(zSchemaEmbedding).register(z.globalRegistry, { + description: 'The list of embeddings to use.', + }), + ) + .default([]), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: 'The list of LoRA weights to use.', + }), + ) + .default([]), + guidance_rescale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The rescale factor for the CFG.', + }), + ) + .default(0), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(5), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "The negative prompt to use. Use it to address details that you don't want in the image.", + }), + ) + .default( + '(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)', + ), + format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + model_name: z.optional( + z.string().register(z.globalRegistry, { + description: 'The Realistic Vision model to use.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_checker_version: z.optional( + z.enum(['v1', 'v2']).register(z.globalRegistry, { + description: + 'The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.', + }), + ), + request_id: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n An id bound to a request, can be used with response to identify the request\n itself.\n ', + }), + ) + .default(''), + num_inference_steps: z + .optional( + z.int().gte(1).lte(70).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(35), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), +}) + +/** + * Output + */ +export const zSchemaPlaygroundV25Output = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImagePlaygroundv25Input + */ +export const zSchemaPlaygroundV25Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + embeddings: z + .optional( + z.array(zSchemaEmbedding).register(z.globalRegistry, { + description: 'The list of embeddings to use.', + }), + ) + .default([]), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(false), + guidance_rescale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The rescale factor for the CFG.', + }), + ) + .default(0), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + safety_checker_version: z.optional( + z.enum(['v1', 'v2']).register(z.globalRegistry, { + description: + 'The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.', + }), + ), + request_id: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n An id bound to a request, can be used with response to identify the request\n itself.\n ', + }), + ) + .default(''), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(25), +}) + +/** + * Output + */ +export const zSchemaLightningModelsOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * LightningModelsTextToImageInput + */ +export const zSchemaLightningModelsInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + embeddings: z + .optional( + z.array(zSchemaEmbedding).register(z.globalRegistry, { + description: 'The list of embeddings to use.', + }), + ) + .default([]), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: 'The list of LoRA weights to use.', + }), + ) + .default([]), + scheduler: z.optional( + z + .enum([ + 'DPM++ 2M', + 'DPM++ 2M Karras', + 'DPM++ 2M SDE', + 'DPM++ 2M SDE Karras', + 'DPM++ SDE', + 'DPM++ SDE Karras', + 'KDPM 2A', + 'Euler', + 'Euler (trailing timesteps)', + 'Euler A', + 'LCM', + 'EDMDPMSolverMultistepScheduler', + 'TCDScheduler', + ]) + .register(z.globalRegistry, { + description: + 'Scheduler / sampler to use for the image denoising process.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(2), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "The negative prompt to use. Use it to address details that you don't want in the image.", + }), + ) + .default( + '(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)', + ), + format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + model_name: z.optional( + z.string().register(z.globalRegistry, { + description: 'The Lightning model to use.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + safety_checker_version: z.optional( + z.enum(['v1', 'v2']).register(z.globalRegistry, { + description: + 'The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(12).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), +}) + +/** + * T2IOutput + */ +export const zSchemaLumaPhotonOutput = z.object({ + images: z.array(zSchemaFile).register(z.globalRegistry, { + description: 'The generated image', + }), +}) + +/** + * TextToImageRequest + */ +export const zSchemaLumaPhotonInput = z.object({ + prompt: z.string().min(3).max(5000), + aspect_ratio: z.optional( + z + .enum(['16:9', '9:16', '1:1', '4:3', '3:4', '21:9', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), +}) + +/** + * Output + */ +export const zSchemaStableCascadeSoteDiffusionOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * SoteDiffusionInput + */ +export const zSchemaStableCascadeSoteDiffusionInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + second_stage_guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(2), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the image will be returned as base64 encoded string.\n ', + }), + ) + .default(false), + first_stage_steps: z + .optional( + z.int().gte(4).lte(50).register(z.globalRegistry, { + description: 'Number of steps to run the first stage for.', + }), + ) + .default(25), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(8), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Cascade\n will output the same image every time.\n ', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to false, the safety checker will be disabled.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + second_stage_steps: z + .optional( + z.int().gte(4).lte(24).register(z.globalRegistry, { + description: 'Number of steps to run the second stage for.', + }), + ) + .default(10), +}) + +/** + * Output + */ +export const zSchemaFastSdxlOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * TextToImageInput + */ +export const zSchemaFastSdxlInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + embeddings: z + .optional( + z.array(zSchemaEmbedding).register(z.globalRegistry, { + description: 'The list of embeddings to use.', + }), + ) + .default([]), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: 'The list of LoRA weights to use.', + }), + ) + .default([]), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(7.5), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_checker_version: z.optional( + z.enum(['v1', 'v2']).register(z.globalRegistry, { + description: + 'The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.', + }), + ), + request_id: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n An id bound to a request, can be used with response to identify the request\n itself.\n ', + }), + ) + .default(''), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(25), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), +}) + +/** + * Output + */ +export const zSchemaStableCascadeOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * StableCascadeInput + */ +export const zSchemaStableCascadeInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + second_stage_guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(0), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the image will be returned as base64 encoded string.\n ', + }), + ) + .default(false), + first_stage_steps: z + .optional( + z.int().gte(4).lte(40).register(z.globalRegistry, { + description: 'Number of steps to run the first stage for.', + }), + ) + .default(20), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Cascade\n will output the same image every time.\n ', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to false, the safety checker will be disabled.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + second_stage_steps: z + .optional( + z.int().gte(4).lte(24).register(z.globalRegistry, { + description: 'Number of steps to run the second stage for.', + }), + ) + .default(10), +}) + +/** + * Output + */ +export const zSchemaKolorsOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * KolorsInput + */ +export const zSchemaKolorsInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + '\n The prompt to use for generating the image. Be as descriptive as possible\n for best results.\n ', + }), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and\n uploaded before returning the response. This will increase the latency of\n the function but it allows you to get the image directly in the response\n without going through the CDN.\n ', + }), + ) + .default(false), + scheduler: z.optional( + z + .enum([ + 'EulerDiscreteScheduler', + 'EulerAncestralDiscreteScheduler', + 'DPMSolverMultistepScheduler', + 'DPMSolverMultistepScheduler_SDE_karras', + 'UniPCMultistepScheduler', + 'DEISMultistepScheduler', + ]) + .register(z.globalRegistry, { + description: 'The scheduler to use for the model.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show\n you.\n ', + }), + ) + .default(5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(150).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(50), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Seed', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small\n details (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable safety checker.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaSdxlControlnetUnionOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageControlNetUnionInput + */ +export const zSchemaSdxlControlnetUnionInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + depth_preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the depth image.', + }), + ) + .default(true), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.null(), + ]), + ), + normal_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The URL of the control image.', + }), + ), + embeddings: z + .optional( + z.array(zSchemaEmbedding).register(z.globalRegistry, { + description: 'The list of embeddings to use.', + }), + ) + .default([]), + teed_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The URL of the control image.', + }), + ), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: 'The list of LoRA weights to use.', + }), + ) + .default([]), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(7.5), + canny_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The URL of the control image.', + }), + ), + segmentation_preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the segmentation image.', + }), + ) + .default(true), + format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + request_id: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n An id bound to a request, can be used with response to identify the request\n itself.\n ', + }), + ) + .default(''), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + segmentation_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The URL of the control image.', + }), + ), + openpose_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The URL of the control image.', + }), + ), + canny_preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the canny image.', + }), + ) + .default(true), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded with additional prompts.', + }), + ) + .default(false), + depth_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The URL of the control image.', + }), + ), + normal_preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the normal image.', + }), + ) + .default(true), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + teed_preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the teed image.', + }), + ) + .default(true), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + controlnet_conditioning_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The scale of the controlnet conditioning.', + }), + ) + .default(0.5), + safety_checker_version: z.optional( + z.enum(['v1', 'v2']).register(z.globalRegistry, { + description: + 'The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.', + }), + ), + openpose_preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the openpose image.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(70).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(35), +}) + +/** + * PixArtSigmaOutput + */ +export const zSchemaPixartSigmaOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()).register(z.globalRegistry, { + description: + 'The timings of the different steps of the generation process.', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * PixArtSigmaInput + */ +export const zSchemaPixartSigmaInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + style: z.optional( + z + .enum([ + '(No style)', + 'Cinematic', + 'Photographic', + 'Anime', + 'Manga', + 'Digital Art', + 'Pixel art', + 'Fantasy art', + 'Neonpunk', + '3D Model', + ]) + .register(z.globalRegistry, { + description: 'The style to apply to the image.', + }), + ), + scheduler: z.optional( + z.enum(['DPM-SOLVER', 'SA-SOLVER']).register(z.globalRegistry, { + description: 'The scheduler to use for the model.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4.5), + num_inference_steps: z + .optional( + z.int().gte(5).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(35), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), +}) + +/** + * Output + */ +export const zSchemaFluxSubjectOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * FluxSubjectInput + */ +export const zSchemaFluxSubjectInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image of the subject', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(8), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaSanaOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaSanaInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + style_name: z.optional( + z + .enum([ + '(No style)', + 'Cinematic', + 'Photographic', + 'Anime', + 'Manga', + 'Digital Art', + 'Pixel art', + 'Fantasy art', + 'Neonpunk', + '3D Model', + ]) + .register(z.globalRegistry, { + description: 'The style to generate the image in.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(18), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * FooocusOutput + */ +export const zSchemaFooocusUpscaleOrVaryOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image file info.', + }), + timings: z.record(z.string(), z.number()).register(z.globalRegistry, { + description: 'The time taken for the generation process.', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), +}) + +/** + * FooocusUpscaleOrVaryInput + */ +export const zSchemaFooocusUpscaleOrVaryInput = z.object({ + styles: z + .optional( + z + .array( + z.enum([ + 'Fooocus V2', + 'Fooocus Enhance', + 'Fooocus Sharp', + 'Fooocus Semi Realistic', + 'Fooocus Masterpiece', + 'Fooocus Photograph', + 'Fooocus Negative', + 'Fooocus Cinematic', + 'SAI 3D Model', + 'SAI Analog Film', + 'SAI Anime', + 'SAI Cinematic', + 'SAI Comic Book', + 'SAI Craft Clay', + 'SAI Digital Art', + 'SAI Enhance', + 'SAI Fantasy Art', + 'SAI Isometric', + 'SAI Line Art', + 'SAI Lowpoly', + 'SAI Neonpunk', + 'SAI Origami', + 'SAI Photographic', + 'SAI Pixel Art', + 'SAI Texture', + 'MRE Cinematic Dynamic', + 'MRE Spontaneous Picture', + 'MRE Artistic Vision', + 'MRE Dark Dream', + 'MRE Gloomy Art', + 'MRE Bad Dream', + 'MRE Underground', + 'MRE Surreal Painting', + 'MRE Dynamic Illustration', + 'MRE Undead Art', + 'MRE Elemental Art', + 'MRE Space Art', + 'MRE Ancient Illustration', + 'MRE Brave Art', + 'MRE Heroic Fantasy', + 'MRE Dark Cyberpunk', + 'MRE Lyrical Geometry', + 'MRE Sumi E Symbolic', + 'MRE Sumi E Detailed', + 'MRE Manga', + 'MRE Anime', + 'MRE Comic', + 'Ads Advertising', + 'Ads Automotive', + 'Ads Corporate', + 'Ads Fashion Editorial', + 'Ads Food Photography', + 'Ads Gourmet Food Photography', + 'Ads Luxury', + 'Ads Real Estate', + 'Ads Retail', + 'Artstyle Abstract', + 'Artstyle Abstract Expressionism', + 'Artstyle Art Deco', + 'Artstyle Art Nouveau', + 'Artstyle Constructivist', + 'Artstyle Cubist', + 'Artstyle Expressionist', + 'Artstyle Graffiti', + 'Artstyle Hyperrealism', + 'Artstyle Impressionist', + 'Artstyle Pointillism', + 'Artstyle Pop Art', + 'Artstyle Psychedelic', + 'Artstyle Renaissance', + 'Artstyle Steampunk', + 'Artstyle Surrealist', + 'Artstyle Typography', + 'Artstyle Watercolor', + 'Futuristic Biomechanical', + 'Futuristic Biomechanical Cyberpunk', + 'Futuristic Cybernetic', + 'Futuristic Cybernetic Robot', + 'Futuristic Cyberpunk Cityscape', + 'Futuristic Futuristic', + 'Futuristic Retro Cyberpunk', + 'Futuristic Retro Futurism', + 'Futuristic Sci Fi', + 'Futuristic Vaporwave', + 'Game Bubble Bobble', + 'Game Cyberpunk Game', + 'Game Fighting Game', + 'Game Gta', + 'Game Mario', + 'Game Minecraft', + 'Game Pokemon', + 'Game Retro Arcade', + 'Game Retro Game', + 'Game Rpg Fantasy Game', + 'Game Strategy Game', + 'Game Streetfighter', + 'Game Zelda', + 'Misc Architectural', + 'Misc Disco', + 'Misc Dreamscape', + 'Misc Dystopian', + 'Misc Fairy Tale', + 'Misc Gothic', + 'Misc Grunge', + 'Misc Horror', + 'Misc Kawaii', + 'Misc Lovecraftian', + 'Misc Macabre', + 'Misc Manga', + 'Misc Metropolis', + 'Misc Minimalist', + 'Misc Monochrome', + 'Misc Nautical', + 'Misc Space', + 'Misc Stained Glass', + 'Misc Techwear Fashion', + 'Misc Tribal', + 'Misc Zentangle', + 'Papercraft Collage', + 'Papercraft Flat Papercut', + 'Papercraft Kirigami', + 'Papercraft Paper Mache', + 'Papercraft Paper Quilling', + 'Papercraft Papercut Collage', + 'Papercraft Papercut Shadow Box', + 'Papercraft Stacked Papercut', + 'Papercraft Thick Layered Papercut', + 'Photo Alien', + 'Photo Film Noir', + 'Photo Glamour', + 'Photo Hdr', + 'Photo Iphone Photographic', + 'Photo Long Exposure', + 'Photo Neon Noir', + 'Photo Silhouette', + 'Photo Tilt Shift', + 'Cinematic Diva', + 'Abstract Expressionism', + 'Academia', + 'Action Figure', + 'Adorable 3D Character', + 'Adorable Kawaii', + 'Art Deco', + 'Art Nouveau', + 'Astral Aura', + 'Avant Garde', + 'Baroque', + 'Bauhaus Style Poster', + 'Blueprint Schematic Drawing', + 'Caricature', + 'Cel Shaded Art', + 'Character Design Sheet', + 'Classicism Art', + 'Color Field Painting', + 'Colored Pencil Art', + 'Conceptual Art', + 'Constructivism', + 'Cubism', + 'Dadaism', + 'Dark Fantasy', + 'Dark Moody Atmosphere', + 'Dmt Art Style', + 'Doodle Art', + 'Double Exposure', + 'Dripping Paint Splatter Art', + 'Expressionism', + 'Faded Polaroid Photo', + 'Fauvism', + 'Flat 2d Art', + 'Fortnite Art Style', + 'Futurism', + 'Glitchcore', + 'Glo Fi', + 'Googie Art Style', + 'Graffiti Art', + 'Harlem Renaissance Art', + 'High Fashion', + 'Idyllic', + 'Impressionism', + 'Infographic Drawing', + 'Ink Dripping Drawing', + 'Japanese Ink Drawing', + 'Knolling Photography', + 'Light Cheery Atmosphere', + 'Logo Design', + 'Luxurious Elegance', + 'Macro Photography', + 'Mandola Art', + 'Marker Drawing', + 'Medievalism', + 'Minimalism', + 'Neo Baroque', + 'Neo Byzantine', + 'Neo Futurism', + 'Neo Impressionism', + 'Neo Rococo', + 'Neoclassicism', + 'Op Art', + 'Ornate And Intricate', + 'Pencil Sketch Drawing', + 'Pop Art 2', + 'Rococo', + 'Silhouette Art', + 'Simple Vector Art', + 'Sketchup', + 'Steampunk 2', + 'Surrealism', + 'Suprematism', + 'Terragen', + 'Tranquil Relaxing Atmosphere', + 'Sticker Designs', + 'Vibrant Rim Light', + 'Volumetric Lighting', + 'Watercolor 2', + 'Whimsical And Playful', + 'Mk Chromolithography', + 'Mk Cross Processing Print', + 'Mk Dufaycolor Photograph', + 'Mk Herbarium', + 'Mk Punk Collage', + 'Mk Mosaic', + 'Mk Van Gogh', + 'Mk Coloring Book', + 'Mk Singer Sargent', + 'Mk Pollock', + 'Mk Basquiat', + 'Mk Andy Warhol', + 'Mk Halftone Print', + 'Mk Gond Painting', + 'Mk Albumen Print', + 'Mk Aquatint Print', + 'Mk Anthotype Print', + 'Mk Inuit Carving', + 'Mk Bromoil Print', + 'Mk Calotype Print', + 'Mk Color Sketchnote', + 'Mk Cibulak Porcelain', + 'Mk Alcohol Ink Art', + 'Mk One Line Art', + 'Mk Blacklight Paint', + 'Mk Carnival Glass', + 'Mk Cyanotype Print', + 'Mk Cross Stitching', + 'Mk Encaustic Paint', + 'Mk Embroidery', + 'Mk Gyotaku', + 'Mk Luminogram', + 'Mk Lite Brite Art', + 'Mk Mokume Gane', + 'Pebble Art', + 'Mk Palekh', + 'Mk Suminagashi', + 'Mk Scrimshaw', + 'Mk Shibori', + 'Mk Vitreous Enamel', + 'Mk Ukiyo E', + 'Mk Vintage Airline Poster', + 'Mk Vintage Travel Poster', + 'Mk Bauhaus Style', + 'Mk Afrofuturism', + 'Mk Atompunk', + 'Mk Constructivism', + 'Mk Chicano Art', + 'Mk De Stijl', + 'Mk Dayak Art', + 'Mk Fayum Portrait', + 'Mk Illuminated Manuscript', + 'Mk Kalighat Painting', + 'Mk Madhubani Painting', + 'Mk Pictorialism', + 'Mk Pichwai Painting', + 'Mk Patachitra Painting', + 'Mk Samoan Art Inspired', + 'Mk Tlingit Art', + 'Mk Adnate Style', + 'Mk Ron English Style', + 'Mk Shepard Fairey Style', + ]), + ) + .register(z.globalRegistry, { + description: '\n The style to use.\n ', + }), + ) + .default(['Fooocus Enhance', 'Fooocus V2', 'Fooocus Sharp']), + uov_image_url: z.string().register(z.globalRegistry, { + description: 'The image to upscale or vary.', + }), + performance: z.optional( + z + .enum(['Speed', 'Quality', 'Extreme Speed', 'Lightning']) + .register(z.globalRegistry, { + description: '\n You can choose Speed or Quality\n ', + }), + ), + mixing_image_prompt_and_vary_upscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Mixing Image Prompt and Vary/Upscale', + }), + ) + .default(false), + image_prompt_3: z.optional(zSchemaImagePrompt), + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + ) + .default(''), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use up to 5 LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([ + { + path: 'https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors', + scale: 0.1, + }, + ]), + image_prompt_4: z.optional(zSchemaImagePrompt), + image_prompt_1: z.optional(zSchemaImagePrompt), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to false, the safety checker will be disabled.', + }), + ) + .default(true), + sharpness: z + .optional( + z.number().gte(0).lte(30).register(z.globalRegistry, { + description: + '\n The sharpness of the generated image. Use it to control how sharp the generated\n image should be. Higher value means image and texture are sharper.\n ', + }), + ) + .default(2), + guidance_scale: z + .optional( + z.number().gte(1).lte(30).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + aspect_ratio: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n The size of the generated image. You can choose between some presets or\n custom height and width that **must be multiples of 8**.\n ', + }), + ) + .default('1024x1024'), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + '\n Number of images to generate in one request\n ', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + refiner_model: z.optional( + z + .enum(['None', 'realisticVisionV60B1_v51VAE.safetensors']) + .register(z.globalRegistry, { + description: 'Refiner (SDXL or SD 1.5)', + }), + ), + image_prompt_2: z.optional(zSchemaImagePrompt), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + uov_method: z.optional( + z + .enum([ + 'Disabled', + 'Vary (Subtle)', + 'Vary (Strong)', + 'Upscale (1.5x)', + 'Upscale (2x)', + 'Upscale (Fast 2x)', + ]) + .register(z.globalRegistry, { + description: 'The method to use for upscaling or varying.', + }), + ), + seed: z.optional(z.union([z.int(), z.null()])), + refiner_switch: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n Use 0.4 for SD1.5 realistic models; 0.667 for SD1.5 anime models\n 0.8 for XL-refiners; or any value for switching two SDXL models.\n ', + }), + ) + .default(0.8), +}) + +/** + * SD3Output + */ +export const zSchemaStableDiffusionV3MediumOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + num_images: z.int().register(z.globalRegistry, { + description: 'The number of images generated.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaStableDiffusionV3MediumInput = z.object({ + prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, prompt will be upsampled with more details.', + }), + ) + .default(false), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate an image from.', + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaFluxLoraInpaintingOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * InpaintInput + */ +export const zSchemaFluxLoraInpaintingInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + 'The number of images to generate. This is always set to 1 for streaming output.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use for inpainting. or img2img', + }), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'The strength to use for inpainting/image-to-image. Only used if the image_url is provided. 1.0 is completely remakes the image while 0.0 preserves the original.', + }), + ) + .default(0.85), + guidance_scale: z + .optional( + z.number().gte(0).lte(35).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + mask_url: z.string().register(z.globalRegistry, { + description: '\n The mask to area to Inpaint in.\n ', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaStableDiffusionV35MediumOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaStableDiffusionV35MediumInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(40), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaFluxSchnellOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * SchnellTextToImageInput + */ +export const zSchemaFluxSchnellInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional(z.union([z.int(), z.unknown()])), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(12).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(4), +}) + +/** + * Output + */ +export const zSchemaOmnigenV1Output = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaOmnigenV1Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + img_guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The Image Guidance scale is a measure of how close you want\n the model to stick to your input image when looking for a related image to show you.\n ', + }), + ) + .default(1.6), + input_image_urls: z + .optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'URL of images to use while generating the image, Use <|image_1|> for the first image and so on.', + }), + ) + .default([]), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(50), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * Output + */ +export const zSchemaAuraFlowOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The expanded prompt', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used to generate the images', + }), +}) + +/** + * Input + */ +export const zSchemaAuraFlowInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate images from', + }), + num_images: z + .optional( + z.int().gte(1).lte(2).register(z.globalRegistry, { + description: 'The number of images to generate', + }), + ) + .default(1), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to perform prompt expansion (recommended)', + }), + ) + .default(true), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'Classifier free guidance scale', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(20).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to take', + }), + ) + .default(50), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for generating images', + }), + ), +}) + +/** + * T2IOutput + */ +export const zSchemaLumaPhotonFlashOutput = z.object({ + images: z.array(zSchemaFile).register(z.globalRegistry, { + description: 'The generated image', + }), +}) + +/** + * TextToImageRequest + */ +export const zSchemaLumaPhotonFlashInput = z.object({ + prompt: z.string().min(3).max(5000), + aspect_ratio: z.optional( + z + .enum(['16:9', '9:16', '1:1', '4:3', '3:4', '21:9', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), +}) + +/** + * Output + */ +export const zSchemaIdeogramV2TurboOutput = z.object({ + images: z.array(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for the random number generator', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaIdeogramV2TurboInput = z.object({ + prompt: z.string(), + aspect_ratio: z.optional( + z + .enum([ + '10:16', + '16:10', + '9:16', + '16:9', + '4:3', + '3:4', + '1:1', + '1:3', + '3:1', + '3:2', + '2:3', + ]) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image', + }), + ), + style: z.optional( + z + .enum(['auto', 'general', 'realistic', 'design', 'render_3D', 'anime']) + .register(z.globalRegistry, { + description: 'The style of the generated image', + }), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to expand the prompt with MagicPrompt functionality.', + }), + ) + .default(true), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.unknown()])), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to avoid in the generated image', + }), + ) + .default(''), +}) + +/** + * Recraft20BTextToImageOutput + */ +export const zSchemaRecraft20bOutput = z.object({ + images: z.array(zSchemaFile), +}) + +/** + * RGBColor + */ +export const zSchemaRgbColor = z.object({ + r: z + .optional( + z.int().gte(0).lte(255).register(z.globalRegistry, { + description: 'Red color value', + }), + ) + .default(0), + b: z + .optional( + z.int().gte(0).lte(255).register(z.globalRegistry, { + description: 'Blue color value', + }), + ) + .default(0), + g: z + .optional( + z.int().gte(0).lte(255).register(z.globalRegistry, { + description: 'Green color value', + }), + ) + .default(0), +}) + +/** + * Recraft20BTextToImageInput + */ +export const zSchemaRecraft20bInput = z.object({ + prompt: z.string().min(1).max(1000), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + colors: z + .optional( + z.array(zSchemaRgbColor).register(z.globalRegistry, { + description: 'An array of preferable colors', + }), + ) + .default([]), + style: z.optional( + z + .enum([ + 'any', + 'realistic_image', + 'digital_illustration', + 'vector_illustration', + 'realistic_image/b_and_w', + 'realistic_image/enterprise', + 'realistic_image/hard_flash', + 'realistic_image/hdr', + 'realistic_image/motion_blur', + 'realistic_image/natural_light', + 'realistic_image/studio_portrait', + 'digital_illustration/2d_art_poster', + 'digital_illustration/2d_art_poster_2', + 'digital_illustration/3d', + 'digital_illustration/80s', + 'digital_illustration/engraving_color', + 'digital_illustration/glow', + 'digital_illustration/grain', + 'digital_illustration/hand_drawn', + 'digital_illustration/hand_drawn_outline', + 'digital_illustration/handmade_3d', + 'digital_illustration/infantile_sketch', + 'digital_illustration/kawaii', + 'digital_illustration/pixel_art', + 'digital_illustration/psychedelic', + 'digital_illustration/seamless', + 'digital_illustration/voxel', + 'digital_illustration/watercolor', + 'vector_illustration/cartoon', + 'vector_illustration/doodle_line_art', + 'vector_illustration/engraving', + 'vector_illustration/flat_2', + 'vector_illustration/kawaii', + 'vector_illustration/line_art', + 'vector_illustration/line_circuit', + 'vector_illustration/linocut', + 'vector_illustration/seamless', + ]) + .register(z.globalRegistry, { + description: + 'The style of the generated images. Vector images cost 2X as much.', + }), + ), + style_id: z.optional( + z.string().register(z.globalRegistry, { + description: 'The ID of the custom style reference (optional)', + }), + ), +}) + +/** + * Output + */ +export const zSchemaBriaTextToImageHdOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'Seed value used for generation.', + }), +}) + +/** + * GuidanceInput + */ +export const zSchemaGuidanceInput = z.object({ + scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Impact of the guidance.', + }), + ) + .default(1), + method: z.optional( + z + .enum([ + 'controlnet_canny', + 'controlnet_depth', + 'controlnet_recoloring', + 'controlnet_color_grid', + ]) + .register(z.globalRegistry, { + description: + 'Which guidance type you would like to include in the generation. Up to 4 guidance methods can be combined during a single inference. This parameter is optional.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'The image that should be used as guidance, in base64 format, with the method defined in guidance_method_1. Accepted formats are jpeg, jpg, png, webp. Maximum file size 12MB. If more then one guidance method is used, all guidance images must be of the same aspect ratio, and this will be the aspect ratio of the generated results. If guidance_method_1 is selected, an image must be provided.', + }), +}) + +/** + * TextToImageRequest + */ +export const zSchemaBriaTextToImageHdInput = z.object({ + prompt: z.string().min(1).register(z.globalRegistry, { + description: 'The prompt you would like to use to generate images.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + 'How many images you would like to generate. When using any Guidance Method, Value is set to 1.', + }), + ) + .default(4), + prompt_enhancement: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'When set to true, enhances the provided prompt by generating additional, more descriptive variations, resulting in more diverse and creative output images.', + }), + ) + .default(false), + guidance: z + .optional( + z.array(zSchemaGuidanceInput).register(z.globalRegistry, { + description: + 'Guidance images to use for the generation. Up to 4 guidance methods can be combined during a single inference.', + }), + ) + .default([]), + aspect_ratio: z.optional( + z + .enum(['1:1', '2:3', '3:2', '3:4', '4:3', '4:5', '5:4', '9:16', '16:9']) + .register(z.globalRegistry, { + description: + 'The aspect ratio of the image. When a guidance method is being used, the aspect ratio is defined by the guidance image and this parameter is ignored.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(5), + medium: z.optional( + z.enum(['photography', 'art']).register(z.globalRegistry, { + description: + 'Which medium should be included in your generated images. This parameter is optional.', + }), + ), + seed: z.optional( + z.int().gte(0).lte(2147483647).register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The negative prompt you would like to use to generate images.', + }), + ) + .default(''), + num_inference_steps: z + .optional( + z.int().gte(20).lte(50).register(z.globalRegistry, { + description: + 'The number of iterations the model goes through to refine the generated image. This parameter is optional.', + }), + ) + .default(30), +}) + +/** + * Output + */ +export const zSchemaBriaTextToImageFastOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'Seed value used for generation.', + }), +}) + +/** + * FastTextToImageRequest + */ +export const zSchemaBriaTextToImageFastInput = z.object({ + prompt: z.string().min(1).register(z.globalRegistry, { + description: 'The prompt you would like to use to generate images.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + 'How many images you would like to generate. When using any Guidance Method, Value is set to 1.', + }), + ) + .default(4), + prompt_enhancement: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'When set to true, enhances the provided prompt by generating additional, more descriptive variations, resulting in more diverse and creative output images.', + }), + ) + .default(false), + guidance: z + .optional( + z.array(zSchemaGuidanceInput).register(z.globalRegistry, { + description: + 'Guidance images to use for the generation. Up to 4 guidance methods can be combined during a single inference.', + }), + ) + .default([]), + aspect_ratio: z.optional( + z + .enum(['1:1', '2:3', '3:2', '3:4', '4:3', '4:5', '5:4', '9:16', '16:9']) + .register(z.globalRegistry, { + description: + 'The aspect ratio of the image. When a guidance method is being used, the aspect ratio is defined by the guidance image and this parameter is ignored.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(5), + medium: z.optional( + z.enum(['photography', 'art']).register(z.globalRegistry, { + description: + 'Which medium should be included in your generated images. This parameter is optional.', + }), + ), + seed: z.optional( + z.int().gte(0).lte(2147483647).register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The negative prompt you would like to use to generate images.', + }), + ) + .default(''), + num_inference_steps: z + .optional( + z.int().gte(4).lte(10).register(z.globalRegistry, { + description: + 'The number of iterations the model goes through to refine the generated image. This parameter is optional.', + }), + ) + .default(8), +}) + +/** + * Output + */ +export const zSchemaBriaTextToImageBaseOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'Seed value used for generation.', + }), +}) + +/** + * TextToImageRequest + */ +export const zSchemaBriaTextToImageBaseInput = z.object({ + prompt: z.string().min(1).register(z.globalRegistry, { + description: 'The prompt you would like to use to generate images.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + 'How many images you would like to generate. When using any Guidance Method, Value is set to 1.', + }), + ) + .default(4), + prompt_enhancement: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'When set to true, enhances the provided prompt by generating additional, more descriptive variations, resulting in more diverse and creative output images.', + }), + ) + .default(false), + guidance: z + .optional( + z.array(zSchemaGuidanceInput).register(z.globalRegistry, { + description: + 'Guidance images to use for the generation. Up to 4 guidance methods can be combined during a single inference.', + }), + ) + .default([]), + aspect_ratio: z.optional( + z + .enum(['1:1', '2:3', '3:2', '3:4', '4:3', '4:5', '5:4', '9:16', '16:9']) + .register(z.globalRegistry, { + description: + 'The aspect ratio of the image. When a guidance method is being used, the aspect ratio is defined by the guidance image and this parameter is ignored.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(5), + medium: z.optional( + z.enum(['photography', 'art']).register(z.globalRegistry, { + description: + 'Which medium should be included in your generated images. This parameter is optional.', + }), + ), + seed: z.optional( + z.int().gte(0).lte(2147483647).register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The negative prompt you would like to use to generate images.', + }), + ) + .default(''), + num_inference_steps: z + .optional( + z.int().gte(20).lte(50).register(z.globalRegistry, { + description: + 'The number of iterations the model goes through to refine the generated image. This parameter is optional.', + }), + ) + .default(30), +}) + +/** + * SwittiOutput + */ +export const zSchemaSwitti512Output = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaSwitti512Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + sampling_top_k: z + .optional( + z.int().gte(10).lte(1000).register(z.globalRegistry, { + description: 'The number of top-k tokens to sample from.', + }), + ) + .default(400), + turn_off_cfg_start_si: z + .optional( + z.int().gte(0).lte(10).register(z.globalRegistry, { + description: 'Disable CFG starting scale', + }), + ) + .default(8), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(6), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + smooth_start_si: z + .optional( + z.int().gte(0).lte(10).register(z.globalRegistry, { + description: 'Smoothing starting scale', + }), + ) + .default(2), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + last_scale_temp: z + .optional( + z.number().gte(0.1).lte(10).register(z.globalRegistry, { + description: 'Temperature after disabling CFG', + }), + ) + .default(0.1), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + more_diverse: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'More diverse sampling', + }), + ) + .default(false), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + more_smooth: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Smoothing with Gumbel softmax sampling', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + sampling_top_p: z + .optional( + z.number().gte(0.1).lte(1).register(z.globalRegistry, { + description: 'The top-p probability to sample from.', + }), + ) + .default(0.95), +}) + +/** + * SwittiOutput + */ +export const zSchemaSwittiOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaSwittiInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + sampling_top_k: z + .optional( + z.int().gte(10).lte(1000).register(z.globalRegistry, { + description: 'The number of top-k tokens to sample from.', + }), + ) + .default(400), + turn_off_cfg_start_si: z + .optional( + z.int().gte(0).lte(10).register(z.globalRegistry, { + description: 'Disable CFG starting scale', + }), + ) + .default(8), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(6), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + smooth_start_si: z + .optional( + z.int().gte(0).lte(10).register(z.globalRegistry, { + description: 'Smoothing starting scale', + }), + ) + .default(2), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + last_scale_temp: z + .optional( + z.number().gte(0.1).lte(10).register(z.globalRegistry, { + description: 'Temperature after disabling CFG', + }), + ) + .default(0.1), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + more_diverse: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'More diverse sampling', + }), + ) + .default(false), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + more_smooth: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Smoothing with Gumbel softmax sampling', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + sampling_top_p: z + .optional( + z.number().gte(0.1).lte(1).register(z.globalRegistry, { + description: 'The top-p probability to sample from.', + }), + ) + .default(0.95), +}) + +/** + * Image + */ +export const zSchemaRegistryImageFastSdxlModelsImage = z.object({ + height: z.int(), + content_type: z.optional(z.string()).default('image/jpeg'), + url: z.string(), + width: z.int(), +}) + +/** + * Output + */ +export const zSchemaFluxProV11Output = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z + .array(zSchemaRegistryImageFastSdxlModelsImage) + .register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * FluxProPlusTextToImageInput + */ +export const zSchemaFluxProV11Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enhance_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enhance the prompt for better results.', + }), + ) + .default(false), +}) + +/** + * Output + */ +export const zSchemaFluxProV11UltraFinetunedOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z + .array(zSchemaRegistryImageFastSdxlModelsImage) + .register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * FluxProUltraTextToImageFinetunedInput + */ +export const zSchemaFluxProV11UltraFinetunedInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + finetune_id: z.string().register(z.globalRegistry, { + description: 'References your specific model', + }), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.', + }), + ), + image_prompt_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The strength of the image prompt, between 0 and 1.', + }), + ) + .default(0.1), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + enhance_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enhance the prompt for better results.', + }), + ) + .default(false), + raw: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Generate less processed, more natural-looking images.', + }), + ) + .default(false), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + aspect_ratio: z.optional( + z.union([ + z.enum([ + '21:9', + '16:9', + '4:3', + '3:2', + '1:1', + '2:3', + '3:4', + '9:16', + '9:21', + ]), + z.string(), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The image URL to generate an image from.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + finetune_strength: z.number().gte(0).lte(2).register(z.globalRegistry, { + description: + "\n Controls finetune influence.\n Increase this value if your target concept isn't showing up strongly enough.\n The optimal setting depends on your finetune and prompt\n ", + }), +}) + +/** + * Output + */ +export const zSchemaJanusOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * JanusInput + */ +export const zSchemaJanusInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(16).register(z.globalRegistry, { + description: 'Number of images to generate in parallel.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + cfg_weight: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + 'Classifier Free Guidance scale - how closely to follow the prompt.', + }), + ) + .default(5), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + temperature: z + .optional( + z.number().gte(0.1).lte(2).register(z.globalRegistry, { + description: + 'Controls randomness in the generation. Higher values make output more random.', + }), + ) + .default(1), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducible generation.', + }), + ), +}) + +/** + * ImageOutput + */ +export const zSchemaLuminaImageV2Output = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaLuminaImageV2Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + cfg_trunc_ratio: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The ratio of the timestep interval to apply normalization-based guidance scale.', + }), + ) + .default(1), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + system_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The system prompt to use.', + }), + ) + .default( + 'You are an assistant designed to generate superior images with the superior degree of image-text alignment based on textual prompts or user prompts.', + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(30), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + cfg_normalization: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to apply normalization-based guidance scale.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaImagen3FastOutput = z.object({ + images: z.array(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generation', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaImagen3FastInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt describing what you want to see', + }), + aspect_ratio: z.optional( + z.enum(['1:1', '16:9', '9:16', '3:4', '4:3']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated image', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate (1-4)', + }), + ) + .default(1), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducible generation', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'A description of what to discourage in the generated images', + }), + ) + .default(''), +}) + +/** + * Output + */ +export const zSchemaImagen3Output = z.object({ + images: z.array(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generation', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaImagen3Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt describing what you want to see', + }), + aspect_ratio: z.optional( + z.enum(['1:1', '16:9', '9:16', '3:4', '4:3']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated image', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate (1-4)', + }), + ) + .default(1), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducible generation', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'A description of what to discourage in the generated images', + }), + ) + .default(''), +}) + +/** + * Output + */ +export const zSchemaFluxControlLoraDepthOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * DepthLoraInput + */ +export const zSchemaFluxControlLoraDepthInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + control_lora_strength: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: 'The strength of the control lora.', + }), + ) + .default(1), + preprocess_depth: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the input image will be preprocessed to extract depth information.\n This is useful for generating depth maps from images.\n ', + }), + ) + .default(true), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + guidance_scale: z + .optional( + z.number().gte(0).lte(35).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + control_lora_image_url: z.string().register(z.globalRegistry, { + description: + '\n The image to use for control lora. This is used to control the style of the generated image.\n ', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * Output + */ +export const zSchemaFluxControlLoraCannyOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaFluxControlLoraCannyInput = z.object({ + control_lora_strength: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: 'The strength of the control lora.', + }), + ) + .default(1), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(35).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + control_lora_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\n The image to use for control lora. This is used to control the style of the generated image.\n ', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * Output + */ +export const zSchemaIdeogramV2aOutput = z.object({ + images: z.array(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for the random number generator', + }), +}) + +/** + * BaseTextToImageInput + */ +export const zSchemaIdeogramV2aInput = z.object({ + prompt: z.string(), + aspect_ratio: z.optional( + z + .enum([ + '10:16', + '16:10', + '9:16', + '16:9', + '4:3', + '3:4', + '1:1', + '1:3', + '3:1', + '3:2', + '2:3', + ]) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + style: z.optional( + z + .enum(['auto', 'general', 'realistic', 'design', 'render_3D', 'anime']) + .register(z.globalRegistry, { + description: 'The style of the generated image', + }), + ), + seed: z.optional(z.union([z.int(), z.unknown()])), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to expand the prompt with MagicPrompt functionality.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaIdeogramV2aTurboOutput = z.object({ + images: z.array(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for the random number generator', + }), +}) + +/** + * BaseTextToImageInput + */ +export const zSchemaIdeogramV2aTurboInput = z.object({ + prompt: z.string(), + aspect_ratio: z.optional( + z + .enum([ + '10:16', + '16:10', + '9:16', + '16:9', + '4:3', + '3:4', + '1:1', + '1:3', + '3:1', + '3:2', + '2:3', + ]) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + style: z.optional( + z + .enum(['auto', 'general', 'realistic', 'design', 'render_3D', 'anime']) + .register(z.globalRegistry, { + description: 'The style of the generated image', + }), + ), + seed: z.optional(z.union([z.int(), z.unknown()])), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to expand the prompt with MagicPrompt functionality.', + }), + ) + .default(true), +}) + +/** + * ImageOutput + */ +export const zSchemaCogview4Output = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaCogview4Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(50), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaJuggernautFluxBaseOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * DevTextToImageInput + */ +export const zSchemaJuggernautFluxBaseInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaRundiffusionPhotoFluxOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * PhotoLoraT2IInput + */ +export const zSchemaRundiffusionPhotoFluxInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(35).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + photo_lora_scale: z + .optional( + z.number().register(z.globalRegistry, { + description: 'LoRA Scale of the photo lora model', + }), + ) + .default(0.75), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaJuggernautFluxLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaJuggernautFluxLoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(35).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaJuggernautFluxProOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * DevTextToImageInput + */ +export const zSchemaJuggernautFluxProInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaJuggernautFluxLightningOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * SchnellTextToImageInput + */ +export const zSchemaJuggernautFluxLightningInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(12).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(4), +}) + +/** + * Output + */ +export const zSchemaSanaSprintOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * SprintInput + */ +export const zSchemaSanaSprintInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + style_name: z.optional( + z + .enum([ + '(No style)', + 'Cinematic', + 'Photographic', + 'Anime', + 'Manga', + 'Digital Art', + 'Pixel art', + 'Fantasy art', + 'Neonpunk', + '3D Model', + ]) + .register(z.globalRegistry, { + description: 'The style to generate the image in.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(20).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(2), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaSanaV1548bOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaSanaV1548bInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + style_name: z.optional( + z + .enum([ + '(No style)', + 'Cinematic', + 'Photographic', + 'Anime', + 'Manga', + 'Digital Art', + 'Pixel art', + 'Fantasy art', + 'Neonpunk', + '3D Model', + ]) + .register(z.globalRegistry, { + description: 'The style to generate the image in.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(18), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaSanaV1516bOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaSanaV1516bInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + style_name: z.optional( + z + .enum([ + '(No style)', + 'Cinematic', + 'Photographic', + 'Anime', + 'Manga', + 'Digital Art', + 'Pixel art', + 'Fantasy art', + 'Neonpunk', + '3D Model', + ]) + .register(z.globalRegistry, { + description: 'The style to generate the image in.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(18), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * ImageFile + */ +export const zSchemaImageFile = z.object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + height: z.optional( + z.int().register(z.globalRegistry, { + description: 'The height of the image', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + width: z.optional( + z.int().register(z.globalRegistry, { + description: 'The width of the image', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), +}) + +/** + * ImageResponse + */ +export const zSchemaGptImage1TextToImageOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images.', + }), +}) + +/** + * TextToImageRequest + */ +export const zSchemaGptImage1TextToImageInput = z.object({ + prompt: z.string().min(2).register(z.globalRegistry, { + description: 'The prompt for image generation', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z + .enum(['auto', '1024x1024', '1536x1024', '1024x1536']) + .register(z.globalRegistry, { + description: 'Aspect ratio for the generated image', + }), + ), + background: z.optional( + z.enum(['auto', 'transparent', 'opaque']).register(z.globalRegistry, { + description: 'Background for the generated image', + }), + ), + quality: z.optional( + z.enum(['auto', 'low', 'medium', 'high']).register(z.globalRegistry, { + description: 'Quality for the generated image', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'Output format for the images', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), +}) + +/** + * Output + */ +export const zSchemaFLiteTextureOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageInputTexture + */ +export const zSchemaFLiteTextureInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative Prompt for generation.', + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaFLiteStandardOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageInputStandard + */ +export const zSchemaFLiteStandardInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative Prompt for generation.', + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * ColorPaletteMember + */ +export const zSchemaColorPaletteMember = z.object({ + color_weight: z.optional(z.union([z.number().gte(0.05).lte(1), z.unknown()])), + rgb: zSchemaRgbColor, +}) + +/** + * ColorPalette + */ +export const zSchemaColorPalette = z.object({ + members: z.optional( + z.union([z.array(zSchemaColorPaletteMember), z.unknown()]), + ), + name: z.optional( + z.union([ + z.enum([ + 'EMBER', + 'FRESH', + 'JUNGLE', + 'MAGIC', + 'MELON', + 'MOSAIC', + 'PASTEL', + 'ULTRAMARINE', + ]), + z.unknown(), + ]), + ), +}) + +/** + * OutputV3 + */ +export const zSchemaIdeogramV3Output = z.object({ + images: z.array(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for the random number generator', + }), +}) + +/** + * BaseTextToImageInputV3 + */ +export const zSchemaIdeogramV3Input = z.object({ + prompt: z.string(), + num_images: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'Number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), + style: z.optional( + z.union([z.enum(['AUTO', 'GENERAL', 'REALISTIC', 'DESIGN']), z.unknown()]), + ), + style_preset: z.optional( + z.union([ + z.enum([ + '80S_ILLUSTRATION', + '90S_NOSTALGIA', + 'ABSTRACT_ORGANIC', + 'ANALOG_NOSTALGIA', + 'ART_BRUT', + 'ART_DECO', + 'ART_POSTER', + 'AURA', + 'AVANT_GARDE', + 'BAUHAUS', + 'BLUEPRINT', + 'BLURRY_MOTION', + 'BRIGHT_ART', + 'C4D_CARTOON', + 'CHILDRENS_BOOK', + 'COLLAGE', + 'COLORING_BOOK_I', + 'COLORING_BOOK_II', + 'CUBISM', + 'DARK_AURA', + 'DOODLE', + 'DOUBLE_EXPOSURE', + 'DRAMATIC_CINEMA', + 'EDITORIAL', + 'EMOTIONAL_MINIMAL', + 'ETHEREAL_PARTY', + 'EXPIRED_FILM', + 'FLAT_ART', + 'FLAT_VECTOR', + 'FOREST_REVERIE', + 'GEO_MINIMALIST', + 'GLASS_PRISM', + 'GOLDEN_HOUR', + 'GRAFFITI_I', + 'GRAFFITI_II', + 'HALFTONE_PRINT', + 'HIGH_CONTRAST', + 'HIPPIE_ERA', + 'ICONIC', + 'JAPANDI_FUSION', + 'JAZZY', + 'LONG_EXPOSURE', + 'MAGAZINE_EDITORIAL', + 'MINIMAL_ILLUSTRATION', + 'MIXED_MEDIA', + 'MONOCHROME', + 'NIGHTLIFE', + 'OIL_PAINTING', + 'OLD_CARTOONS', + 'PAINT_GESTURE', + 'POP_ART', + 'RETRO_ETCHING', + 'RIVIERA_POP', + 'SPOTLIGHT_80S', + 'STYLIZED_RED', + 'SURREAL_COLLAGE', + 'TRAVEL_POSTER', + 'VINTAGE_GEO', + 'VINTAGE_POSTER', + 'WATERCOLOR', + 'WEIRD', + 'WOODBLOCK_PRINT', + ]), + z.unknown(), + ]), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Determine if MagicPrompt should be used in generating the request or not.', + }), + ) + .default(true), + rendering_speed: z.optional( + z.enum(['TURBO', 'BALANCED', 'QUALITY']).register(z.globalRegistry, { + description: 'The rendering speed to use.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + color_palette: z.optional(z.union([zSchemaColorPalette, z.unknown()])), + style_codes: z.optional(z.union([z.array(z.string()), z.unknown()])), + seed: z.optional(z.union([z.int(), z.unknown()])), + image_urls: z.optional(z.union([z.array(z.string()), z.unknown()])), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Description of what to exclude from an image. Descriptions in the prompt take precedence to descriptions in the negative prompt.', + }), + ) + .default(''), +}) + +/** + * ImageOutput + */ +export const zSchemaPonyV7Output = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * Input + */ +export const zSchemaPonyV7Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate images from', + }), + num_images: z + .optional( + z.int().gte(1).lte(2).register(z.globalRegistry, { + description: 'The number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + noise_source: z.optional( + z.enum(['gpu', 'cpu']).register(z.globalRegistry, { + description: + "\n The source of the noise to use for generating images.\n If set to 'gpu', the noise will be generated on the GPU.\n If set to 'cpu', the noise will be generated on the CPU.\n ", + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'Classifier free guidance scale', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(20).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to take', + }), + ) + .default(40), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for generating images', + }), + ), +}) + +/** + * MiniMaxTextToImageOutput + */ +export const zSchemaMinimaxImage01Output = z.object({ + images: z.array(zSchemaFile).register(z.globalRegistry, { + description: 'Generated images', + }), +}) + +/** + * MiniMaxTextToImageRequest + */ +export const zSchemaMinimaxImage01Input = z.object({ + prompt_optimizer: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable automatic prompt optimization', + }), + ) + .default(false), + aspect_ratio: z.optional( + z + .enum(['1:1', '16:9', '4:3', '3:2', '2:3', '3:4', '9:16', '21:9']) + .register(z.globalRegistry, { + description: 'Aspect ratio of the generated image', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(9).register(z.globalRegistry, { + description: 'Number of images to generate (1-9)', + }), + ) + .default(1), + prompt: z.string().min(1).max(1500).register(z.globalRegistry, { + description: 'Text prompt for image generation (max 1500 characters)', + }), +}) + +/** + * Output + */ +export const zSchemaFluxLoraStreamOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaFluxLoraStreamInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + 'The number of images to generate. This is always set to 1 for streaming output.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(35).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * DreamOOutput + */ +export const zSchemaDreamoOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used to generate the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The URLs of the generated images.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * DreamOInput + */ +export const zSchemaDreamoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + first_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of first reference image to use for generation.', + }), + ), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + second_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of second reference image to use for generation.', + }), + ), + second_reference_task: z.optional( + z.enum(['ip', 'id', 'style']).register(z.globalRegistry, { + description: 'Task for second reference image (ip/id/style).', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + first_reference_task: z.optional( + z.enum(['ip', 'id', 'style']).register(z.globalRegistry, { + description: 'Task for first reference image (ip/id/style).', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + ) + .default(''), + ref_resolution: z + .optional( + z.int().gte(512).lte(1024).register(z.globalRegistry, { + description: 'Resolution for reference images.', + }), + ) + .default(512), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the image to be generated and uploaded\n before returning the response. This will increase the latency of the function but\n it allows you to get the image directly in the response without going through the CDN.\n ', + }), + ) + .default(false), + true_cfg: z + .optional( + z.number().gte(1).lte(5).register(z.globalRegistry, { + description: 'The weight of the CFG loss.', + }), + ) + .default(1), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(12), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * Imagen4TextToImageUltraOutput + */ +export const zSchemaImagen4PreviewUltraOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images.', + }), + description: z.string().register(z.globalRegistry, { + description: 'The description of the generated images.', + }), +}) + +/** + * Imagen4TextToImageUltraInput + */ +export const zSchemaImagen4PreviewUltraInput = z.object({ + prompt: z.string().min(3).max(5000).register(z.globalRegistry, { + description: 'The text prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + aspect_ratio: z.optional( + z.enum(['1:1', '16:9', '9:16', '4:3', '3:4']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + resolution: z.optional( + z.enum(['1K', '2K']).register(z.globalRegistry, { + description: 'The resolution of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), +}) + +/** + * ImageOutput + */ +export const zSchemaBagelOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * ImageGenInput + */ +export const zSchemaBagelInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for the generation.', + }), + ), + use_thought: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use thought tokens for generation. If set to true, the model will "think" to potentially improve generation quality. Increases generation time and increases the cost by 20%.', + }), + ) + .default(false), +}) + +/** + * Output + */ +export const zSchemaFluxProKontextTextToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z + .array(zSchemaRegistryImageFastSdxlModelsImage) + .register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * FluxProTextToImageInputWithAR + */ +export const zSchemaFluxProKontextTextToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '3:2', '1:1', '2:3', '3:4', '9:16', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enhance_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enhance the prompt for better results.', + }), + ) + .default(false), +}) + +/** + * Output + */ +export const zSchemaFluxProKontextMaxTextToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z + .array(zSchemaRegistryImageFastSdxlModelsImage) + .register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * FluxProTextToImageInputWithAR + */ +export const zSchemaFluxProKontextMaxTextToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '3:2', '1:1', '2:3', '3:4', '9:16', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enhance_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enhance the prompt for better results.', + }), + ) + .default(false), +}) + +/** + * Output + */ +export const zSchemaFlux1DevOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseFlux1Input + */ +export const zSchemaFlux1DevInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional(z.union([z.int(), z.unknown()])), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), +}) + +/** + * Output + */ +export const zSchemaFlux1SchnellOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * SchnellFlux1TextToImageInput + */ +export const zSchemaFlux1SchnellInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(12).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(4), + seed: z.optional(z.union([z.int(), z.unknown()])), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), +}) + +/** + * SeedDreamOutput + */ +export const zSchemaBytedanceSeedreamV3TextToImageOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Generated images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generation', + }), +}) + +/** + * SeedDreamInput + */ +export const zSchemaBytedanceSeedreamV3TextToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt used to generate the image', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Controls how closely the output image aligns with the input prompt. Higher values mean stronger prompt correlation.', + }), + ) + .default(2.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed to control the stochasticity of image generation.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaOmnigenV2Output = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaOmnigenV2Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + "The prompt to generate or edit an image. Use specific language like 'Add the bird from image 1 to the desk in image 2' for better results.", + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + scheduler: z.optional( + z.enum(['euler', 'dpmsolver']).register(z.globalRegistry, { + description: 'The scheduler to use for the diffusion process.', + }), + ), + cfg_range_end: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'CFG range end value.', + }), + ) + .default(1), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Negative prompt to guide what should not be in the image.', + }), + ) + .default( + '(((deformed))), blurry, over saturation, bad anatomy, disfigured, poorly drawn face, mutation, mutated, (extra_limb), (ugly), (poorly drawn hands), fused fingers, messy drawing, broken legs censor, censored, censor_bar', + ), + text_guidance_scale: z + .optional( + z.number().gte(1).lte(8).register(z.globalRegistry, { + description: + '\n The Text Guidance scale controls how closely the model follows the text prompt.\n Higher values make the model stick more closely to the prompt.\n ', + }), + ) + .default(5), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_guidance_scale: z + .optional( + z.number().gte(1).lte(3).register(z.globalRegistry, { + description: + '\n The Image Guidance scale controls how closely the model follows the input images.\n For image editing: 1.3-2.0, for in-context generation: 2.0-3.0\n ', + }), + ) + .default(2), + input_image_urls: z + .optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'URLs of input images to use for image editing or multi-image generation. Support up to 3 images.', + }), + ) + .default([]), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + cfg_range_start: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'CFG range start value.', + }), + ) + .default(0), + num_inference_steps: z + .optional( + z.int().gte(20).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(50), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * KontextT2IOutput + */ +export const zSchemaFluxKontextLoraTextToImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseKontextInput + */ +export const zSchemaFluxKontextLoraTextToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the image with', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(2.5), + num_inference_steps: z + .optional( + z.int().gte(10).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(30), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * SkyRaccoonResponse + */ +export const zSchemaSkyRaccoonOutput = z.object({ + image: zSchemaFile, + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), +}) + +/** + * SkyRaccoonRequest + */ +export const zSchemaSkyRaccoonInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + turbo_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the video will be generated faster with no noticeable degradation in the visual quality.', + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default( + 'bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards', + ), + num_inference_steps: z + .optional( + z.int().gte(2).lte(40).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(30), +}) + +/** + * KreaOutput + */ +export const zSchemaFlux1KreaOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseKreaFlux1Input + */ +export const zSchemaFlux1KreaInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional(z.union([z.int(), z.unknown()])), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4.5), +}) + +/** + * KreaOutput + */ +export const zSchemaFluxKreaOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseKreaInput + */ +export const zSchemaFluxKreaInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional(z.union([z.int(), z.unknown()])), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), +}) + +/** + * Output + */ +export const zSchemaFluxKreaLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * TextToImageInput + */ +export const zSchemaFluxKreaLoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + 'The number of images to generate. This is always set to 1 for streaming output.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(35).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * Output + */ +export const zSchemaFluxKreaLoraStreamOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * TextToImageInput + */ +export const zSchemaFluxKreaLoraStreamInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + 'The number of images to generate. This is always set to 1 for streaming output.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(35).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * QwenImageOutput + */ +export const zSchemaQwenImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * BaseQwenImageInput + */ +export const zSchemaQwenImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the image with', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. Options: 'none', 'regular', 'high'. Higher acceleration increases speed. 'regular' balances speed and quality. 'high' is recommended for images without text.", + }), + ), + num_inference_steps: z + .optional( + z.int().gte(2).lte(250).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(30), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use up to 3 LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + use_turbo: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Enable turbo mode for faster generation with high quality. When enabled, uses optimized settings (10 steps, CFG=1.2).', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the generation', + }), + ) + .default(' '), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(2.5), +}) + +/** + * WanT2IResponse + */ +export const zSchemaWanV22A14bTextToImageOutput = z.object({ + image: zSchemaFile, + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), +}) + +/** + * WanT2IRequest + */ +export const zSchemaWanV22A14bTextToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide image generation.', + }), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.', + }), + ) + .default(3.5), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + }), + ), + shift: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Shift value for the image. Must be between 1.0 and 10.0.', + }), + ) + .default(2), + enable_output_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, output video will be checked for safety after generation.', + }), + ) + .default(false), + guidance_scale_2: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model.', + }), + ) + .default(4), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, input data will be checked for safety before processing.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(40).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(27), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default(''), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.', + }), + ) + .default(false), +}) + +/** + * WanSmallT2IResponse + */ +export const zSchemaWanV225bTextToImageOutput = z.object({ + image: zSchemaFile, + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), +}) + +/** + * WanSmallT2IRequest + */ +export const zSchemaWanV225bTextToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide image generation.', + }), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.', + }), + ) + .default(false), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default(''), + shift: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Shift value for the image. Must be between 1.0 and 10.0.', + }), + ) + .default(2), + enable_output_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, output video will be checked for safety after generation.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, input data will be checked for safety before processing.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(40), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + image_format: z.optional( + z.enum(['png', 'jpeg']).register(z.globalRegistry, { + description: 'The format of the output image.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.', + }), + ) + .default(3.5), +}) + +/** + * LoRAWeight + */ +export const zSchemaLoRaWeight = z.object({ + path: z.string().register(z.globalRegistry, { + description: 'URL or the path to the LoRA weights.', + }), + scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + '\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ', + }), + ) + .default(1), + transformer: z.optional( + z.enum(['high', 'low', 'both']).register(z.globalRegistry, { + description: + "Specifies the transformer to load the lora weight into. 'high' loads into the high-noise transformer, 'low' loads it into the low-noise transformer, while 'both' loads the LoRA into both transformers.", + }), + ), + weight_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Name of the LoRA weight. Used only if `path` is a Hugging Face repository, and required only if you have more than 1 safetensors file in the repo.', + }), + ), +}) + +/** + * WanT2IResponse + */ +export const zSchemaWanV22A14bTextToImageLoraOutput = z.object({ + image: zSchemaFile, + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), +}) + +/** + * WanLoRAT2IRequest + */ +export const zSchemaWanV22A14bTextToImageLoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide image generation.', + }), + shift: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Shift value for the image. Must be between 1.0 and 10.0.', + }), + ) + .default(2), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + }), + ), + reverse_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If true, the video will be reversed.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoRaWeight).register(z.globalRegistry, { + description: 'LoRA weights to be used in the inference.', + }), + ) + .default([]), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, input data will be checked for safety before processing.', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.', + }), + ) + .default(3.5), + image_format: z.optional( + z.enum(['png', 'jpeg']).register(z.globalRegistry, { + description: 'The format of the output image.', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default(''), + enable_output_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, output video will be checked for safety after generation.', + }), + ) + .default(false), + guidance_scale_2: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model.', + }), + ) + .default(4), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(40).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(27), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), +}) + +/** + * DreaminaOutput + */ +export const zSchemaBytedanceDreaminaV31TextToImageOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Generated images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generation', + }), +}) + +/** + * DreaminaInput + */ +export const zSchemaBytedanceDreaminaV31TextToImageInput = z.object({ + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt used to generate the image', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed to control the stochasticity of image generation.', + }), + ), + enhance_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to use an LLM to enhance the prompt', + }), + ) + .default(false), +}) + +/** + * NanoBananaTextToImageOutput + */ +export const zSchemaNanoBananaOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images.', + }), + description: z.string().register(z.globalRegistry, { + description: 'The description of the generated images.', + }), +}) + +/** + * NanoBananaTextToImageInput + */ +export const zSchemaNanoBananaInput = z.object({ + prompt: z.string().min(3).max(50000).register(z.globalRegistry, { + description: 'The text prompt to generate an image from.', + }), + aspect_ratio: z.optional( + z + .enum([ + '21:9', + '16:9', + '3:2', + '4:3', + '5:4', + '1:1', + '4:5', + '3:4', + '2:3', + '9:16', + ]) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + limit_generations: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate.', + }), + ) + .default(false), +}) + +/** + * NanoBananaTextToImageOutput + */ +export const zSchemaGemini25FlashImageOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images.', + }), + description: z.string().register(z.globalRegistry, { + description: 'The description of the generated images.', + }), +}) + +/** + * NanoBananaTextToImageInput + */ +export const zSchemaGemini25FlashImageInput = z.object({ + prompt: z.string().min(3).max(50000).register(z.globalRegistry, { + description: 'The text prompt to generate an image from.', + }), + aspect_ratio: z.optional( + z + .enum([ + '21:9', + '16:9', + '3:2', + '4:3', + '5:4', + '1:1', + '4:5', + '3:4', + '2:3', + '9:16', + ]) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + limit_generations: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate.', + }), + ) + .default(false), +}) + +/** + * SeedDream4T2IOutput + */ +export const zSchemaBytedanceSeedreamV4TextToImageOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Generated images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generation', + }), +}) + +/** + * SeedDream4T2IInput + */ +export const zSchemaBytedanceSeedreamV4TextToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt used to generate the image', + }), + num_images: z + .optional( + z.int().gte(1).lte(6).register(z.globalRegistry, { + description: + 'Number of separate model generations to be run with the prompt.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + 'auto', + 'auto_2K', + 'auto_4K', + ]), + ]), + ), + enhance_prompt_mode: z.optional( + z.enum(['standard', 'fast']).register(z.globalRegistry, { + description: + 'The mode to use for enhancing prompt enhancement. Standard mode provides higher quality results but takes longer to generate. Fast mode provides average quality results but takes less time to generate.', + }), + ), + max_images: z + .optional( + z.int().gte(1).lte(6).register(z.globalRegistry, { + description: + 'If set to a number greater than one, enables multi-image generation. The model will potentially return up to `max_images` images every generation, and in total, `num_images` generations will be carried out. In total, the number of images generated will be between `num_images` and `max_images*num_images`.', + }), + ) + .default(1), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed to control the stochasticity of image generation.', + }), + ), +}) + +/** + * HunyuanTextToImageOutput + */ +export const zSchemaHunyuanImageV21TextToImageOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'A list of the generated images.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The base seed used for the generation process.', + }), +}) + +/** + * HunyuanTextToImageInput + */ +export const zSchemaHunyuanImageV21TextToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + use_reprompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Enable prompt enhancement for potentially better results.', + }), + ) + .default(true), + use_refiner: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable the refiner model for improved image quality.', + }), + ) + .default(false), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + 'Controls how much the model adheres to the prompt. Higher values mean stricter adherence.', + }), + ) + .default(3.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducible results. If None, a random seed is used.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The negative prompt to guide the image generation away from certain concepts.', + }), + ) + .default(''), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of denoising steps.', + }), + ) + .default(28), +}) + +/** + * SRPOOutput + */ +export const zSchemaFlux1SrpoOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseSRPOFlux1Input + */ +export const zSchemaFlux1SrpoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional(z.union([z.int(), z.unknown()])), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4.5), +}) + +/** + * SRPOOutput + */ +export const zSchemaFluxSrpoOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseSRPOInput + */ +export const zSchemaFluxSrpoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional(z.union([z.int(), z.unknown()])), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(4.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), +}) + +/** + * TextToImageOutput + * + * Output for text-to-image generation + */ +export const zSchemaWan25PreviewTextToImageOutput = z + .object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images', + }), + seeds: z.array(z.int()).register(z.globalRegistry, { + description: 'The seeds used for each generated image', + }), + actual_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'The actual prompt used if prompt rewriting was enabled', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Output for text-to-image generation', + }) + +/** + * TextToImageInput + * + * Input for text-to-image generation + */ +export const zSchemaWan25PreviewTextToImageInput = z + .object({ + prompt: z.string().min(1).register(z.globalRegistry, { + description: + 'The prompt for image generation. Supports Chinese and English, max 2000 characters.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate. Values from 1 to 4.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt rewriting using LLM. Improves results for short prompts but increases processing time.', + }), + ) + .default(true), + negative_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Negative prompt to describe content to avoid. Max 500 characters.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Input for text-to-image generation', + }) + +/** + * HunyuanTextToImageV3Output + */ +export const zSchemaHunyuanImageV3TextToImageOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'A list of the generated images.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The base seed used for the generation process.', + }), +}) + +/** + * HunyuanTextToImageInputV3 + */ +export const zSchemaHunyuanImageV3TextToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt for image-to-image.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.', + }), + ) + .default(false), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + 'Controls how much the model adheres to the prompt. Higher values mean stricter adherence.', + }), + ) + .default(7.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducible results. If None, a random seed is used.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The negative prompt to guide the image generation away from certain concepts.', + }), + ) + .default(''), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of denoising steps.', + }), + ) + .default(28), +}) + +/** + * ReveCreateOutput + * + * Output for Reve text-to-image generation + */ +export const zSchemaReveTextToImageOutput = z + .object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated images', + }), + }) + .register(z.globalRegistry, { + description: 'Output for Reve text-to-image generation', + }) + +/** + * ReveCreateInput + * + * Input for Reve text-to-image generation + */ +export const zSchemaReveTextToImageInput = z + .object({ + prompt: z.string().min(1).max(2560).register(z.globalRegistry, { + description: 'The text description of the desired image.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + aspect_ratio: z.optional( + z + .enum(['16:9', '9:16', '3:2', '2:3', '4:3', '3:4', '1:1']) + .register(z.globalRegistry, { + description: 'The desired aspect ratio of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'Output format for the generated image.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Input for Reve text-to-image generation', + }) + +/** + * ImageResponseMini + */ +export const zSchemaGptImage1MiniOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images.', + }), +}) + +/** + * TextToImageRequestMini + */ +export const zSchemaGptImage1MiniInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt for image generation', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z + .enum(['auto', '1024x1024', '1536x1024', '1024x1536']) + .register(z.globalRegistry, { + description: 'Aspect ratio for the generated image', + }), + ), + background: z.optional( + z.enum(['auto', 'transparent', 'opaque']).register(z.globalRegistry, { + description: 'Background for the generated image', + }), + ), + quality: z.optional( + z.enum(['auto', 'low', 'medium', 'high']).register(z.globalRegistry, { + description: 'Quality for the generated image', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'Output format for the images', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), +}) + +/** + * PiQwenOutput + */ +export const zSchemaPiflowOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The URLs of the generated images.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), +}) + +/** + * PiQwenInput + */ +export const zSchemaPiflowInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(8), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducible generation. If set to None, a random seed will be used.', + }), + ), +}) + +/** + * GaiaOutputModel + */ +export const zSchemaFiboGenerateOutput = z.object({ + images: z + .optional( + z.array(z.record(z.string(), z.unknown())).register(z.globalRegistry, { + description: 'Generated images.', + }), + ) + .default([]), + image: zSchemaImage, + structured_prompt: z + .record(z.string(), z.unknown()) + .register(z.globalRegistry, { + description: 'Current prompt.', + }), +}) + +/** + * Lighting + */ +export const zSchemaLighting = z.object({ + shadows: z.optional(z.union([z.string(), z.unknown()])), + conditions: z.optional(z.union([z.string(), z.unknown()])), + direction: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * PhotographicCharacteristics + */ +export const zSchemaPhotographicCharacteristics = z.object({ + focus: z.optional(z.union([z.string(), z.unknown()])), + lens_focal_length: z.optional(z.union([z.string(), z.unknown()])), + camera_angle: z.optional(z.union([z.string(), z.unknown()])), + depth_of_field: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * PromptObject + */ +export const zSchemaPromptObject = z.object({ + clothing: z.optional(z.union([z.string(), z.unknown()])), + description: z.optional(z.union([z.string(), z.unknown()])), + skin_tone_and_texture: z.optional(z.union([z.string(), z.unknown()])), + appearance_details: z.optional(z.union([z.string(), z.unknown()])), + number_of_objects: z.optional(z.union([z.int(), z.unknown()])), + expression: z.optional(z.union([z.string(), z.unknown()])), + pose: z.optional(z.union([z.string(), z.unknown()])), + shape_and_color: z.optional(z.union([z.string(), z.unknown()])), + relationship: z.string().register(z.globalRegistry, { + description: + 'The relationship of the object to other objects in the image.', + }), + texture: z.optional(z.union([z.string(), z.unknown()])), + gender: z.optional(z.union([z.string(), z.unknown()])), + relative_size: z.optional(z.union([z.string(), z.unknown()])), + location: z.optional(z.union([z.string(), z.unknown()])), + orientation: z.optional(z.union([z.string(), z.unknown()])), + action: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * Aesthetics + */ +export const zSchemaAesthetics = z.object({ + composition: z.optional(z.union([z.string(), z.unknown()])), + mood_atmosphere: z.optional(z.union([z.string(), z.unknown()])), + color_scheme: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * StructuredPrompt + */ +export const zSchemaStructuredPrompt = z.object({ + background_setting: z.optional(z.union([z.string(), z.unknown()])), + artistic_style: z.optional(z.union([z.string(), z.unknown()])), + aesthetics: z.optional(z.union([zSchemaAesthetics, z.unknown()])), + text_render: z.optional(z.union([z.array(z.unknown()), z.unknown()])), + objects: z.optional(z.union([z.array(zSchemaPromptObject), z.unknown()])), + style_medium: z.optional(z.union([z.string(), z.unknown()])), + photographic_characteristics: z.optional( + z.union([zSchemaPhotographicCharacteristics, z.unknown()]), + ), + context: z.optional(z.union([z.string(), z.unknown()])), + lighting: z.optional(z.union([zSchemaLighting, z.unknown()])), + short_description: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * GaiaInputModel + */ +export const zSchemaFiboGenerateInput = z.object({ + prompt: z.optional(z.union([z.string(), z.unknown()])), + steps_num: z + .optional( + z.int().gte(20).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps.', + }), + ) + .default(50), + aspect_ratio: z.optional( + z + .enum(['1:1', '2:3', '3:2', '3:4', '4:3', '4:5', '5:4', '9:16', '16:9']) + .register(z.globalRegistry, { + description: + 'Aspect ratio. Options: 1:1, 2:3, 3:2, 3:4, 4:3, 4:5, 5:4, 9:16, 16:9', + }), + ), + image_url: z.optional(z.union([z.string(), z.unknown()])), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, returns the image directly in the response (increases latency).', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.int().gte(3).lte(5).register(z.globalRegistry, { + description: 'Guidance scale for text.', + }), + ) + .default(5), + seed: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducibility.', + }), + ) + .default(5555), + structured_prompt: z.optional( + z.union([zSchemaStructuredPrompt, z.unknown()]), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for image generation.', + }), + ) + .default(''), +}) + +/** + * Emu35Output + */ +export const zSchemaEmu35ImageTextToImageOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The edited image.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed for the inference.', + }), +}) + +/** + * Emu35ImageInput + */ +export const zSchemaEmu35ImageTextToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to create the image.', + }), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: 'The resolution of the output image.', + }), + ), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '3:2', '1:1', '2:3', '3:4', '9:16', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the output image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to return the image in sync mode.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the inference.', + }), + ), +}) + +/** + * Image + * + * Represents an image file. + */ +export const zSchemaImageOutput = z + .object({ + file_size: z.optional(z.union([z.int(), z.unknown()])), + height: z.optional(z.union([z.int(), z.unknown()])), + file_name: z.optional(z.union([z.string(), z.unknown()])), + content_type: z.optional(z.union([z.string(), z.unknown()])), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + width: z.optional(z.union([z.int(), z.unknown()])), + }) + .register(z.globalRegistry, { + description: 'Represents an image file.', + }) + +/** + * ImagineArt_1_5_Output + */ +export const zSchemaImagineart15PreviewTextToImageOutput = z.object({ + images: z.array(zSchemaImageOutput).register(z.globalRegistry, { + description: 'Generated image', + }), +}) + +/** + * ImagineArt_1_5_Input + */ +export const zSchemaImagineart15PreviewTextToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt describing the desired image', + }), + aspect_ratio: z.optional( + z + .enum(['1:1', '16:9', '9:16', '4:3', '3:4', '3:1', '1:3', '3:2', '2:3']) + .register(z.globalRegistry, { + description: + 'Image aspect ratio: 1:1, 3:1, 1:3, 16:9, 9:16, 4:3, 3:4, 3:2, 2:3', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Seed for the image generation', + }), + ), +}) + +/** + * NanoBananaTextToImageOutput + */ +export const zSchemaNanoBananaProOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images.', + }), + description: z.string().register(z.globalRegistry, { + description: 'The description of the generated images.', + }), +}) + +/** + * NanoBananaTextToImageInput + */ +export const zSchemaNanoBananaProInput = z.object({ + prompt: z.string().min(3).max(50000).register(z.globalRegistry, { + description: 'The text prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + enable_web_search: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Enable web search for the image generation task. This will allow the model to use the latest information from the web to generate the image.', + }), + ) + .default(false), + aspect_ratio: z.optional( + z + .enum([ + '21:9', + '16:9', + '3:2', + '4:3', + '5:4', + '1:1', + '4:5', + '3:4', + '2:3', + '9:16', + ]) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + resolution: z.optional( + z.enum(['1K', '2K', '4K']).register(z.globalRegistry, { + description: 'The resolution of the image to generate.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), + limit_generations: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate.', + }), + ) + .default(false), +}) + +/** + * NanoBananaTextToImageOutput + */ +export const zSchemaGemini3ProImagePreviewOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images.', + }), + description: z.string().register(z.globalRegistry, { + description: 'The description of the generated images.', + }), +}) + +/** + * NanoBananaTextToImageInput + */ +export const zSchemaGemini3ProImagePreviewInput = z.object({ + prompt: z.string().min(3).max(50000).register(z.globalRegistry, { + description: 'The text prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + enable_web_search: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Enable web search for the image generation task. This will allow the model to use the latest information from the web to generate the image.', + }), + ) + .default(false), + resolution: z.optional( + z.enum(['1K', '2K', '4K']).register(z.globalRegistry, { + description: 'The resolution of the image to generate.', + }), + ), + aspect_ratio: z.optional( + z + .enum([ + '21:9', + '16:9', + '3:2', + '4:3', + '5:4', + '1:1', + '4:5', + '3:4', + '2:3', + '9:16', + ]) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), + limit_generations: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Experimental parameter to limit the number of generations from each round of prompting to 1. Set to `True` to to disregard any instructions in the prompt regarding the number of images to generate.', + }), + ) + .default(false), +}) + +/** + * Flux2FlexOutput + */ +export const zSchemaFlux2FlexOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the generation.', + }), +}) + +/** + * Flux2FlexTextToImageInput + */ +export const zSchemaFlux2FlexInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "Whether to expand the prompt using the model's own knowledge.", + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for the generation.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(1.5).lte(10).register(z.globalRegistry, { + description: 'The guidance scale to use for the generation.', + }), + ) + .default(3.5), +}) + +/** + * BallpointPenSketchOutput + */ +export const zSchemaFlux2LoraGalleryBallpointPenSketchOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated ballpoint pen sketch style images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), +}) + +/** + * BallpointPenSketchInput + * + * Input model for Ballpoint Pen Sketch endpoint - Generate ballpoint pen sketch style images + */ +export const zSchemaFlux2LoraGalleryBallpointPenSketchInput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: + "The prompt to generate a ballpoint pen sketch style image. Use 'b4llp01nt' trigger word for best results.", + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: 'The strength of the ballpoint pen sketch effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(2.5), + seed: z.optional(z.union([z.int(), z.unknown()])), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(40), + }) + .register(z.globalRegistry, { + description: + 'Input model for Ballpoint Pen Sketch endpoint - Generate ballpoint pen sketch style images', + }) + +/** + * DigitalComicArtOutput + */ +export const zSchemaFlux2LoraGalleryDigitalComicArtOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated digital comic art style images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), +}) + +/** + * DigitalComicArtInput + * + * Input model for Digital Comic Art endpoint - Generate digital comic art style images + */ +export const zSchemaFlux2LoraGalleryDigitalComicArtInput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: + "The prompt to generate a digital comic art style image. Use 'd1g1t4l' trigger word for best results.", + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: 'The strength of the digital comic art effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(2.5), + seed: z.optional(z.union([z.int(), z.unknown()])), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(40), + }) + .register(z.globalRegistry, { + description: + 'Input model for Digital Comic Art endpoint - Generate digital comic art style images', + }) + +/** + * HdrStyleOutput + */ +export const zSchemaFlux2LoraGalleryHdrStyleOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated HDR style images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), +}) + +/** + * HdrStyleInput + * + * Input model for HDR Style endpoint - Generate HDR style images with vibrant colors + */ +export const zSchemaFlux2LoraGalleryHdrStyleInput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: + "The prompt to generate an HDR style image. The trigger word 'Hyp3rRe4list1c' will be automatically prepended.", + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: 'The strength of the HDR style effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(2.5), + seed: z.optional(z.union([z.int(), z.unknown()])), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(40), + }) + .register(z.globalRegistry, { + description: + 'Input model for HDR Style endpoint - Generate HDR style images with vibrant colors', + }) + +/** + * RealismOutput + */ +export const zSchemaFlux2LoraGalleryRealismOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated realistic style images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), +}) + +/** + * RealismInput + * + * Input model for Realism endpoint - Generate realistic style images + */ +export const zSchemaFlux2LoraGalleryRealismInput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to generate a realistic image with natural lighting and authentic details.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: 'The strength of the realism effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(2.5), + seed: z.optional(z.union([z.int(), z.unknown()])), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(40), + }) + .register(z.globalRegistry, { + description: + 'Input model for Realism endpoint - Generate realistic style images', + }) + +/** + * SatelliteViewStyleOutput + */ +export const zSchemaFlux2LoraGallerySatelliteViewStyleOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated satellite view style images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), +}) + +/** + * SatelliteViewStyleInput + * + * Input model for Satellite View Style endpoint - Generate satellite/aerial view style images + */ +export const zSchemaFlux2LoraGallerySatelliteViewStyleInput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to generate a satellite/aerial view style image.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: 'The strength of the satellite view style effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(2.5), + seed: z.optional(z.union([z.int(), z.unknown()])), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(40), + }) + .register(z.globalRegistry, { + description: + 'Input model for Satellite View Style endpoint - Generate satellite/aerial view style images', + }) + +/** + * SepiaVintageOutput + */ +export const zSchemaFlux2LoraGallerySepiaVintageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated sepia vintage photography style images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), +}) + +/** + * SepiaVintageInput + * + * Input model for Sepia Vintage Photography endpoint - Generate vintage sepia style images + */ +export const zSchemaFlux2LoraGallerySepiaVintageInput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to generate a sepia vintage photography style image.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level for image generation. 'regular' balances speed and quality.", + }), + ), + lora_scale: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: 'The strength of the sepia vintage photography effect.', + }), + ) + .default(1), + output_format: z.optional( + z.enum(['png', 'jpeg', 'webp']).register(z.globalRegistry, { + description: 'The format of the output image', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and won't be saved in history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale. Controls how closely the model follows the prompt.', + }), + ) + .default(2.5), + seed: z.optional(z.union([z.int(), z.unknown()])), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable the safety checker for the generated image.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(40), + }) + .register(z.globalRegistry, { + description: + 'Input model for Sepia Vintage Photography endpoint - Generate vintage sepia style images', + }) + +/** + * ZImageTurboOutput + */ +export const zSchemaZImageTurboOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + 'Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed.', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()).register(z.globalRegistry, { + description: 'The timings of the generation process.', + }), +}) + +/** + * ZImageTurboTextToImageInput + */ +export const zSchemaZImageTurboInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(8), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * OvisImageOutput + */ +export const zSchemaOvisImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * TextToImageInput + */ +export const zSchemaOvisImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: 'The guidance scale to use for the image generation.', + }), + ) + .default(5), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate an image from.', + }), + ) + .default(''), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), +}) + +/** + * ZImageTurboOutput + */ +export const zSchemaZImageTurboLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + 'Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed.', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()).register(z.globalRegistry, { + description: 'The timings of the generation process.', + }), +}) + +/** + * LoRAInput + * + * LoRA weight configuration. + */ +export const zSchemaLoRaInput = z + .object({ + path: z.string().register(z.globalRegistry, { + description: 'URL, HuggingFace repo ID (owner/repo) to lora weights.', + }), + scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: 'Scale factor for LoRA application (0.0 to 4.0).', + }), + ) + .default(1), + }) + .register(z.globalRegistry, { + description: 'LoRA weight configuration.', + }) + +/** + * ZImageTurboTextToImageLoRAInput + */ +export const zSchemaZImageTurboLoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + loras: z + .optional( + z.array(zSchemaLoRaInput).register(z.globalRegistry, { + description: 'List of LoRA weights to apply (maximum 3).', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. Note: this will increase the price by 0.0025 credits per request.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(8), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * TextToImageOutput + */ +export const zSchemaViduQ2TextToImageOutput = z.object({ + image: zSchemaImage, +}) + +/** + * TextToImageRequest + */ +export const zSchemaViduQ2TextToImageInput = z.object({ + prompt: z.string().max(1500).register(z.globalRegistry, { + description: 'Text prompt for video generation, max 1500 characters', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the output video', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), +}) + +/** + * SeedDream45T2IOutput + */ +export const zSchemaBytedanceSeedreamV45TextToImageOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Generated images', + }), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generation', + }), +}) + +/** + * SeedDream45T2IInput + */ +export const zSchemaBytedanceSeedreamV45TextToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt used to generate the image', + }), + num_images: z + .optional( + z.int().gte(1).lte(6).register(z.globalRegistry, { + description: + 'Number of separate model generations to be run with the prompt.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + 'auto_2K', + 'auto_4K', + ]), + ]), + ), + max_images: z + .optional( + z.int().gte(1).lte(6).register(z.globalRegistry, { + description: + 'If set to a number greater than one, enables multi-image generation. The model will potentially return up to `max_images` images every generation, and in total, `num_images` generations will be carried out. In total, the number of images generated will be between `num_images` and `max_images*num_images`.', + }), + ) + .default(1), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed to control the stochasticity of image generation.', + }), + ), +}) + +/** + * TextToImageOutput + */ +export const zSchemaLongcatImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaLongcatImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: 'The guidance scale to use for the image generation.', + }), + ) + .default(4.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * Flux2MaxOutput + */ +export const zSchemaFlux2MaxOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the generation.', + }), +}) + +/** + * Flux2MaxTextToImageInput + */ +export const zSchemaFlux2MaxInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for the generation.', + }), + ), +}) + +/** + * Flux2TurboT2IOutput + */ +export const zSchemaFlux2TurboOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * Flux2TurboTextToImageInput + */ +export const zSchemaFlux2TurboInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(2.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The seed to use for the generation. If not provided, a random seed will be used.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded for better results.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * GaiaOutputModel + */ +export const zSchemaFiboLiteGenerateOutput = z.object({ + images: z + .optional( + z.array(z.record(z.string(), z.unknown())).register(z.globalRegistry, { + description: 'Generated images.', + }), + ) + .default([]), + image: zSchemaImage, + structured_prompt: z + .record(z.string(), z.unknown()) + .register(z.globalRegistry, { + description: 'Current prompt.', + }), +}) + +/** + * GaiaLiteInputModel + */ +export const zSchemaFiboLiteGenerateInput = z.object({ + prompt: z.optional(z.union([z.string(), z.unknown()])), + steps_num: z + .optional( + z.int().gte(4).lte(30).register(z.globalRegistry, { + description: 'Number of inference steps for Fibo Lite.', + }), + ) + .default(8), + aspect_ratio: z.optional( + z + .enum(['1:1', '2:3', '3:2', '3:4', '4:3', '4:5', '5:4', '9:16', '16:9']) + .register(z.globalRegistry, { + description: + 'Aspect ratio. Options: 1:1, 2:3, 3:2, 3:4, 4:3, 4:5, 5:4, 9:16, 16:9', + }), + ), + image_url: z.optional(z.union([z.string(), z.unknown()])), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, returns the image directly in the response (increases latency).', + }), + ) + .default(false), + seed: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducibility.', + }), + ) + .default(5555), + structured_prompt: z.optional( + z.union([zSchemaStructuredPrompt, z.unknown()]), + ), +}) + +/** + * ImageResponse + */ +export const zSchemaGptImage15Output = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images.', + }), +}) + +/** + * TextToImageRequest + */ +export const zSchemaGptImage15Input = z.object({ + prompt: z.string().min(2).register(z.globalRegistry, { + description: 'The prompt for image generation', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate', + }), + ) + .default(1), + image_size: z.optional( + z.enum(['1024x1024', '1536x1024', '1024x1536']).register(z.globalRegistry, { + description: 'Aspect ratio for the generated image', + }), + ), + background: z.optional( + z.enum(['auto', 'transparent', 'opaque']).register(z.globalRegistry, { + description: 'Background for the generated image', + }), + ), + quality: z.optional( + z.enum(['low', 'medium', 'high']).register(z.globalRegistry, { + description: 'Quality for the generated image', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'Output format for the images', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), +}) + +/** + * Flux2FlashT2IOutput + */ +export const zSchemaFlux2FlashOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * Flux2FlashTextToImageInput + */ +export const zSchemaFlux2FlashInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(2.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The seed to use for the generation. If not provided, a random seed will be used.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded for better results.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * TextToImageWanOutput + * + * Output for Wan 2.6 text-to-image (can include generated text in mixed mode) + */ +export const zSchemaV26TextToImageOutput = z + .object({ + images: z.array(zSchemaFile).register(z.globalRegistry, { + description: 'Generated images in PNG format', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + generated_text: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Generated text content (in mixed text-and-image mode). May be None if only images were generated.', + }), + ), + }) + .register(z.globalRegistry, { + description: + 'Output for Wan 2.6 text-to-image (can include generated text in mixed mode)', + }) + +/** + * TextToImageWanInput + * + * Input for Wan 2.6 text-to-image or mixed text-and-image generation (enable_interleave=true) + */ +export const zSchemaV26TextToImageInput = z + .object({ + prompt: z.string().min(1).register(z.globalRegistry, { + description: + 'Text prompt describing the desired image. Supports Chinese and English. Max 2000 characters.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + max_images: z + .optional( + z.int().gte(1).lte(5).register(z.globalRegistry, { + description: + 'Maximum number of images to generate (1-5). Actual count may be less depending on model inference.', + }), + ) + .default(1), + image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Optional reference image (0 or 1). When provided, can be used for style guidance. Resolution: 384-5000px each dimension. Max size: 10MB. Formats: JPEG, JPG, PNG (no alpha), BMP, WEBP.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable content moderation for input and output.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducibility (0-2147483647).', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Content to avoid in the generated image. Max 500 characters.', + }), + ) + .default(''), + }) + .register(z.globalRegistry, { + description: + 'Input for Wan 2.6 text-to-image or mixed text-and-image generation (enable_interleave=true)', + }) + +/** + * QwenImage2512Output + */ +export const zSchemaQwenImage2512Output = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaQwenImage2512Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'The guidance scale to use for the image generation.', + }), + ) + .default(4), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate an image from.', + }), + ) + .default(''), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), +}) + +/** + * QwenImage2512Output + */ +export const zSchemaQwenImage2512LoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * LoraInput + */ +export const zSchemaQwenImage2512LoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use up to 3 LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'The guidance scale to use for the image generation.', + }), + ) + .default(4), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate an image from.', + }), + ) + .default(''), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), +}) + +/** + * GlmImageOutput + */ +export const zSchemaGlmImageOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'List of URLs to the generated images.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * GlmImageInput + */ +export const zSchemaGlmImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt for image generation.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'Number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + 'portrait_3_2', + 'landscape_3_2', + 'portrait_hd', + 'landscape_hd', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'Output image format.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If True, the image will be returned as a base64 data URI instead of a URL.', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values make the model follow the prompt more closely.', + }), + ) + .default(1.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. The same seed with the same prompt will produce the same image.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If True, the prompt will be enhanced using an LLM for more detailed and higher quality results.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(10).lte(100).register(z.globalRegistry, { + description: + 'Number of diffusion denoising steps. More steps generally produce higher quality images.', + }), + ) + .default(30), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable NSFW safety checking on the generated images.', + }), + ) + .default(true), +}) + +/** + * ImagineArt_1_5_Output + */ +export const zSchemaImagineart15ProPreviewTextToImageOutput = z.object({ + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Generated image', + }), +}) + +/** + * ImagineArt_1_5_Input + */ +export const zSchemaImagineart15ProPreviewTextToImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt describing the desired image', + }), + aspect_ratio: z.optional( + z + .enum(['1:1', '16:9', '9:16', '4:3', '3:4', '3:1', '1:3', '3:2', '2:3']) + .register(z.globalRegistry, { + description: + 'Image aspect ratio: 1:1, 3:1, 1:3, 16:9, 9:16, 4:3, 3:4, 3:2, 2:3', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Seed for the image generation', + }), + ), +}) + +/** + * Klein4BDistilledT2IOutput + */ +export const zSchemaFlux2Klein4bOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * KleinDistilledInput + */ +export const zSchemaFlux2Klein4bInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If `True`, the media will be returned as a data URI. Output is not stored when this is True.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(4).lte(8).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(4), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The seed to use for the generation. If not provided, a random seed will be used.', + }), + ), +}) + +/** + * Klein9BDistilledT2IOutput + */ +export const zSchemaFlux2Klein9bOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * Klein9BDistilledInput + */ +export const zSchemaFlux2Klein9bInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If `True`, the media will be returned as a data URI. Output is not stored when this is True.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(4).lte(8).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(4), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The seed to use for the generation. If not provided, a random seed will be used.', + }), + ), +}) + +/** + * Klein4BT2IOutput + */ +export const zSchemaFlux2Klein4bBaseOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * Klein4BBaseInput + */ +export const zSchemaFlux2Klein4bBaseInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use for image generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If `True`, the media will be returned as a data URI. Output is not stored when this is True.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(4).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The seed to use for the generation. If not provided, a random seed will be used.', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Negative prompt for classifier-free guidance. Describes what to avoid in the image.', + }), + ) + .default(''), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'Guidance scale for classifier-free guidance.', + }), + ) + .default(5), +}) + +/** + * Klein9BT2IOutput + */ +export const zSchemaFlux2Klein9bBaseOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * Klein9BBaseInput + */ +export const zSchemaFlux2Klein9bBaseInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use for image generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If `True`, the media will be returned as a data URI. Output is not stored when this is True.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(4).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The seed to use for the generation. If not provided, a random seed will be used.', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Negative prompt for classifier-free guidance. Describes what to avoid in the image.', + }), + ) + .default(''), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'Guidance scale for classifier-free guidance.', + }), + ) + .default(5), +}) + +/** + * KleinT2IOutput + */ +export const zSchemaFlux2Klein4bBaseLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * LoRAInput + */ +export const zSchemaFalAiFlux2KleinLoRaInput = z.object({ + path: z.string().register(z.globalRegistry, { + description: + 'URL, HuggingFace repo ID (owner/repo), or local path to LoRA weights.', + }), + scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: 'Scale factor for LoRA application (0.0 to 4.0).', + }), + ) + .default(1), +}) + +/** + * KleinBaseLoRAInput + */ +export const zSchemaFlux2Klein4bBaseLoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use for image generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + loras: z + .optional( + z.array(zSchemaFalAiFlux2KleinLoRaInput).register(z.globalRegistry, { + description: 'List of LoRA weights to apply (maximum 3).', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If `True`, the media will be returned as a data URI. Output is not stored when this is True.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(4).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The seed to use for the generation. If not provided, a random seed will be used.', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Negative prompt for classifier-free guidance. Describes what to avoid in the image.', + }), + ) + .default(''), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'Guidance scale for classifier-free guidance.', + }), + ) + .default(5), +}) + +/** + * KleinT2IOutput + */ +export const zSchemaFlux2Klein9bBaseLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * KleinBaseLoRAInput + */ +export const zSchemaFlux2Klein9bBaseLoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use for image generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + loras: z + .optional( + z.array(zSchemaFalAiFlux2KleinLoRaInput).register(z.globalRegistry, { + description: 'List of LoRA weights to apply (maximum 3).', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If `True`, the media will be returned as a data URI. Output is not stored when this is True.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(4).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The seed to use for the generation. If not provided, a random seed will be used.', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Negative prompt for classifier-free guidance. Describes what to avoid in the image.', + }), + ) + .default(''), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'Guidance scale for classifier-free guidance.', + }), + ) + .default(5), +}) + +/** + * ZImageBaseOutput + */ +export const zSchemaZImageBaseOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + 'Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed.', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()).register(z.globalRegistry, { + description: 'The timings of the generation process.', + }), +}) + +/** + * ZImageBaseTextToImageInput + */ +export const zSchemaZImageBaseInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: 'The guidance scale to use for the image generation.', + }), + ) + .default(4), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to use for the image generation.', + }), + ) + .default(''), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * ZImageBaseOutput + */ +export const zSchemaZImageBaseLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + 'Seed of the generated Image. It will be the same value of the one passed in the input or the randomly generated that was used in case none was passed.', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()).register(z.globalRegistry, { + description: 'The timings of the generation process.', + }), +}) + +/** + * ZImageBaseTextToImageLoRAInput + */ +export const zSchemaZImageBaseLoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + loras: z + .optional( + z.array(zSchemaLoRaInput).register(z.globalRegistry, { + description: 'List of LoRA weights to apply (maximum 3).', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: 'The guidance scale to use for the image generation.', + }), + ) + .default(4), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to use for the image generation.', + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaFluxLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaFluxLoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + 'The number of images to generate. This is always set to 1 for streaming output.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(35).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * ControlNetUnionInput + */ +export const zSchemaControlNetUnionInput = z.object({ + conditioning_scale: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: + '\n The scale of the control net weight. This is used to scale the control net weight\n before merging it with the base model.\n ', + }), + ) + .default(1), + mask_threshold: z + .optional( + z.number().gte(0.01).lte(0.99).register(z.globalRegistry, { + description: 'Threshold for mask.', + }), + ) + .default(0.5), + end_percentage: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The percentage of the image to end applying the controlnet in terms of the total timesteps.\n ', + }), + ) + .default(1), + mask_image_url: z.optional(z.union([z.string(), z.null()])), + control_image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be used as the control image.', + }), + control_mode: z + .enum(['canny', 'tile', 'depth', 'blur', 'pose', 'gray', 'low-quality']) + .register(z.globalRegistry, { + description: + 'Control Mode for Flux Controlnet Union. Supported values are:\n - canny: Uses the edges for guided generation.\n - tile: Uses the tiles for guided generation.\n - depth: Utilizes a grayscale depth map for guided generation.\n - blur: Adds a blur to the image.\n - pose: Uses the pose of the image for guided generation.\n - gray: Converts the image to grayscale.\n - low-quality: Converts the image to a low-quality image.', + }), + start_percentage: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The percentage of the image to start applying the controlnet in terms of the total timesteps.\n ', + }), + ) + .default(0), +}) + +/** + * ControlNetUnion + */ +export const zSchemaControlNetUnion = z.object({ + controls: z.array(zSchemaControlNetUnionInput).register(z.globalRegistry, { + description: 'The control images and modes to use for the control net.', + }), + path: z.string().register(z.globalRegistry, { + description: 'URL or the path to the control net weights.', + }), + variant: z.optional( + z.string().register(z.globalRegistry, { + description: 'The optional variant if a Hugging Face repo key is used.', + }), + ), + config_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'optional URL to the controlnet config.json file.', + }), + ), +}) + +/** + * ImageFillInput + */ +export const zSchemaImageFillInput = z.object({ + fill_image_url: z.optional(z.union([z.string(), z.array(z.string())])), +}) + +/** + * EasyControlWeight + */ +export const zSchemaEasyControlWeight = z.object({ + scale: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: 'Scale for the control method.', + }), + ) + .default(1), + image_control_type: z + .enum(['subject', 'spatial']) + .register(z.globalRegistry, { + description: + 'Control type of the image. Must be one of `spatial` or `subject`.', + }), + control_method_url: z.string().register(z.globalRegistry, { + description: + 'URL to safetensor weights of control method to be applied. Can also be one of `canny`, `depth`, `hedsketch`, `inpainting`, `pose`, `seg`, `subject`, `ghibli` ', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of an image to use as a control', + }), +}) + +/** + * ControlLoraWeight + */ +export const zSchemaControlLoraWeight = z.object({ + path: z.string().register(z.globalRegistry, { + description: 'URL or the path to the LoRA weights.', + }), + scale: z.optional( + z.union([z.record(z.string(), z.unknown()), z.number().gte(-4).lte(4)]), + ), + control_image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be used as the control image.', + }), + preprocess: z.optional( + z.enum(['canny', 'depth', 'None']).register(z.globalRegistry, { + description: 'Type of preprocessing to apply to the input image.', + }), + ), +}) + +/** + * Output + */ +export const zSchemaFluxGeneralOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaFluxGeneralInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + nag_end: z + .optional( + z.number().lte(1).register(z.globalRegistry, { + description: + '\n The proportion of steps to apply NAG. After the specified proportion\n of steps has been iterated, the remaining steps will use original\n attention processors in FLUX.\n ', + }), + ) + .default(0.25), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + control_loras: z + .optional( + z.array(zSchemaControlLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation which use a control image. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + scheduler: z.optional( + z.enum(['euler', 'dpmpp_2m']).register(z.globalRegistry, { + description: 'Scheduler for the denoising process.', + }), + ), + easycontrols: z + .optional( + z.array(zSchemaEasyControlWeight).register(z.globalRegistry, { + description: + '\n EasyControl Inputs to use for image generation.\n ', + }), + ) + .default([]), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + real_cfg_scale: z + .optional( + z.number().gte(0).lte(5).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + use_cfg_zero: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n Uses CFG-zero init sampling as in https://arxiv.org/abs/2503.18886.\n ', + }), + ) + .default(false), + fill_image: z.optional(zSchemaImageFillInput), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + sigma_schedule: z.optional( + z.enum(['sgm_uniform']).register(z.globalRegistry, { + description: 'Sigmas schedule for the denoising process.', + }), + ), + reference_end: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The percentage of the total timesteps when the reference guidance is to be ended.\n ', + }), + ) + .default(1), + reference_strength: z + .optional( + z.number().gte(-3).lte(3).register(z.globalRegistry, { + description: + 'Strength of reference_only generation. Only used if a reference image is provided.', + }), + ) + .default(0.65), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + nag_scale: z + .optional( + z.number().lte(10).register(z.globalRegistry, { + description: + '\n The scale for NAG. Higher values will result in a image that is more distant\n to the negative prompt.\n ', + }), + ) + .default(3), + reference_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of Image for Reference-Only', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + controlnet_unions: z + .optional( + z.array(zSchemaControlNetUnion).register(z.globalRegistry, { + description: + '\n The controlnet unions to use for the image generation. Only one controlnet is supported at the moment.\n ', + }), + ) + .default([]), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + '\n Negative prompt to steer the image generation away from unwanted features.\n By default, we will be using NAG for processing the negative prompt.\n ', + }), + ) + .default(''), + nag_tau: z + .optional( + z.number().register(z.globalRegistry, { + description: + '\n The tau for NAG. Controls the normalization of the hidden state.\n Higher values will result in a less aggressive normalization,\n but may also lead to unexpected changes with respect to the original image.\n Not recommended to change this value.\n ', + }), + ) + .default(2.5), + num_images: z + .optional( + z.int().gte(1).lte(10).register(z.globalRegistry, { + description: + 'The number of images to generate. This is always set to 1 for streaming output.', + }), + ) + .default(1), + use_beta_schedule: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Specifies whether beta sigmas ought to be used.', + }), + ) + .default(false), + ip_adapters: z + .optional( + z.array(zSchemaIpAdapter).register(z.globalRegistry, { + description: + '\n IP-Adapter to use for image generation.\n ', + }), + ) + .default([]), + base_shift: z + .optional( + z.number().gte(0.01).lte(5).register(z.globalRegistry, { + description: 'Base shift for the scheduled timesteps', + }), + ) + .default(0.5), + nag_alpha: z + .optional( + z.number().lte(1).register(z.globalRegistry, { + description: + '\n The alpha value for NAG. This value is used as a final weighting\n factor for steering the normalized guidance (positive and negative prompts)\n in the direction of the positive prompt. Higher values will result in less\n steering on the normalized guidance where lower values will result in\n considering the positive prompt guidance more.\n ', + }), + ) + .default(0.25), + use_real_cfg: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n Uses classical CFG as in SD1.5, SDXL, etc. Increases generation times and price when set to be true.\n If using XLabs IP-Adapter v1, this will be turned on!.\n ', + }), + ) + .default(false), + max_shift: z + .optional( + z.number().gte(0.01).lte(5).register(z.globalRegistry, { + description: 'Max shift for the scheduled timesteps', + }), + ) + .default(1.15), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + controlnets: z + .optional( + z.array(zSchemaControlNet).register(z.globalRegistry, { + description: + '\n The controlnets to use for the image generation. Only one controlnet is supported at the moment.\n ', + }), + ) + .default([]), + reference_start: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The percentage of the total timesteps when the reference guidance is to bestarted.\n ', + }), + ) + .default(0), +}) + +/** + * Output + */ +export const zSchemaStableDiffusionV35LargeOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * TextToImageInput + */ +export const zSchemaStableDiffusionV35LargeInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + controlnet: z.optional(zSchemaControlNet), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + ip_adapter: z.optional(zSchemaIpAdapter), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), +}) + +/** + * Output + */ +export const zSchemaIdeogramV2Output = z.object({ + images: z.array(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for the random number generator', + }), +}) + +/** + * TextToImageInput + */ +export const zSchemaIdeogramV2Input = z.object({ + prompt: z.string(), + aspect_ratio: z.optional( + z + .enum([ + '10:16', + '16:10', + '9:16', + '16:9', + '4:3', + '3:4', + '1:1', + '1:3', + '3:1', + '3:2', + '2:3', + ]) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated image', + }), + ), + style: z.optional( + z + .enum(['auto', 'general', 'realistic', 'design', 'render_3D', 'anime']) + .register(z.globalRegistry, { + description: 'The style of the generated image', + }), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to expand the prompt with MagicPrompt functionality.', + }), + ) + .default(true), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.unknown()])), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to avoid in the generated image', + }), + ) + .default(''), +}) + +/** + * Output + */ +export const zSchemaFluxDevOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseInput + */ +export const zSchemaFluxDevInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: + 'The speed of the generation. The higher the speed, the faster the generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional(z.union([z.int(), z.unknown()])), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(3.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), +}) + +/** + * Output + */ +export const zSchemaHidreamI1FastOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * FastInput + */ +export const zSchemaHidreamI1FastInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(16), + seed: z.optional(z.union([z.int(), z.unknown()])), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), +}) + +/** + * Output + */ +export const zSchemaHidreamI1DevOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * DevInput + */ +export const zSchemaHidreamI1DevInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), +}) + +export const zSchemaHidreamI1FullOutput = z.unknown() + +/** + * TextToImageInput + */ +export const zSchemaHidreamI1FullInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + 'A list of LoRAs to apply to the model. Each LoRA specifies its path, scale, and optional weight name.', + }), + ) + .default([]), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(50), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * Imagen4TextToImageFastOutput + */ +export const zSchemaImagen4PreviewFastOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images.', + }), + description: z.string().register(z.globalRegistry, { + description: 'The description of the generated images.', + }), +}) + +/** + * Imagen4TextToImageFastInput + */ +export const zSchemaImagen4PreviewFastInput = z.object({ + prompt: z.string().min(3).max(5000).register(z.globalRegistry, { + description: 'The text prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + aspect_ratio: z.optional( + z.enum(['1:1', '16:9', '9:16', '4:3', '3:4']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), +}) + +/** + * OutputModel + */ +export const zSchemaTextToImage32Output = z.object({ + image: zSchemaImage, +}) + +/** + * InputModel + */ +export const zSchemaTextToImage32Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Prompt for image generation.', + }), + aspect_ratio: z.optional( + z + .enum(['1:1', '2:3', '3:2', '3:4', '4:3', '4:5', '5:4', '9:16', '16:9']) + .register(z.globalRegistry, { + description: + 'Aspect ratio. Options: 1:1, 2:3, 3:2, 3:4, 4:3, 4:5, 5:4, 9:16, 16:9', + }), + ), + prompt_enhancer: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to improve the prompt.', + }), + ) + .default(true), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, returns the image directly in the response (increases latency).', + }), + ) + .default(false), + truncate_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to truncate the prompt.', + }), + ) + .default(true), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Guidance scale for text.', + }), + ) + .default(5), + num_inference_steps: z + .optional( + z.int().gte(20).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps.', + }), + ) + .default(30), + seed: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducibility.', + }), + ) + .default(5555), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for image generation.', + }), + ) + .default( + 'Logo,Watermark,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers', + ), +}) + +/** + * Flux2ProOutput + */ +export const zSchemaFlux2ProOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the generation.', + }), +}) + +/** + * Flux2ProTextToImageInput + */ +export const zSchemaFlux2ProInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for the generation.', + }), + ), +}) + +/** + * Flux2T2IOutput + */ +export const zSchemaFlux2Output = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * Flux2TextToImageInput + */ +export const zSchemaFlux2Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use for the image generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(2.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The seed to use for the generation. If not provided, a random seed will be used.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded for better results.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(4).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * Flux2T2ILoRAOutput + */ +export const zSchemaFlux2LoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + timings: z.record(z.string(), z.number()), +}) + +/** + * Flux2TextToImageLoRAInput + */ +export const zSchemaFlux2LoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use for the image generation.', + }), + ), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + loras: z + .optional( + z.array(zSchemaLoRaInput).register(z.globalRegistry, { + description: + 'List of LoRA weights to apply (maximum 3). Each LoRA can be a URL, HuggingFace repo ID, or local path.', + }), + ) + .default([]), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'Guidance Scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(2.5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'The seed to use for the generation. If not provided, a random seed will be used.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(4).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(28), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, the prompt will be expanded for better results.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), +}) + +/** + * TextToImageOutput + */ +export const zSchemaRecraftV3TextToImageOutput = z.object({ + images: z.array(zSchemaFile), +}) + +/** + * TextToImageInput + */ +export const zSchemaRecraftV3TextToImageInput = z.object({ + prompt: z.string().min(1).max(1000), + image_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + style: z.optional( + z + .enum([ + 'any', + 'realistic_image', + 'digital_illustration', + 'vector_illustration', + 'realistic_image/b_and_w', + 'realistic_image/hard_flash', + 'realistic_image/hdr', + 'realistic_image/natural_light', + 'realistic_image/studio_portrait', + 'realistic_image/enterprise', + 'realistic_image/motion_blur', + 'realistic_image/evening_light', + 'realistic_image/faded_nostalgia', + 'realistic_image/forest_life', + 'realistic_image/mystic_naturalism', + 'realistic_image/natural_tones', + 'realistic_image/organic_calm', + 'realistic_image/real_life_glow', + 'realistic_image/retro_realism', + 'realistic_image/retro_snapshot', + 'realistic_image/urban_drama', + 'realistic_image/village_realism', + 'realistic_image/warm_folk', + 'digital_illustration/pixel_art', + 'digital_illustration/hand_drawn', + 'digital_illustration/grain', + 'digital_illustration/infantile_sketch', + 'digital_illustration/2d_art_poster', + 'digital_illustration/handmade_3d', + 'digital_illustration/hand_drawn_outline', + 'digital_illustration/engraving_color', + 'digital_illustration/2d_art_poster_2', + 'digital_illustration/antiquarian', + 'digital_illustration/bold_fantasy', + 'digital_illustration/child_book', + 'digital_illustration/child_books', + 'digital_illustration/cover', + 'digital_illustration/crosshatch', + 'digital_illustration/digital_engraving', + 'digital_illustration/expressionism', + 'digital_illustration/freehand_details', + 'digital_illustration/grain_20', + 'digital_illustration/graphic_intensity', + 'digital_illustration/hard_comics', + 'digital_illustration/long_shadow', + 'digital_illustration/modern_folk', + 'digital_illustration/multicolor', + 'digital_illustration/neon_calm', + 'digital_illustration/noir', + 'digital_illustration/nostalgic_pastel', + 'digital_illustration/outline_details', + 'digital_illustration/pastel_gradient', + 'digital_illustration/pastel_sketch', + 'digital_illustration/pop_art', + 'digital_illustration/pop_renaissance', + 'digital_illustration/street_art', + 'digital_illustration/tablet_sketch', + 'digital_illustration/urban_glow', + 'digital_illustration/urban_sketching', + 'digital_illustration/vanilla_dreams', + 'digital_illustration/young_adult_book', + 'digital_illustration/young_adult_book_2', + 'vector_illustration/bold_stroke', + 'vector_illustration/chemistry', + 'vector_illustration/colored_stencil', + 'vector_illustration/contour_pop_art', + 'vector_illustration/cosmics', + 'vector_illustration/cutout', + 'vector_illustration/depressive', + 'vector_illustration/editorial', + 'vector_illustration/emotional_flat', + 'vector_illustration/infographical', + 'vector_illustration/marker_outline', + 'vector_illustration/mosaic', + 'vector_illustration/naivector', + 'vector_illustration/roundish_flat', + 'vector_illustration/segmented_colors', + 'vector_illustration/sharp_contrast', + 'vector_illustration/thin', + 'vector_illustration/vector_photo', + 'vector_illustration/vivid_shapes', + 'vector_illustration/engraving', + 'vector_illustration/line_art', + 'vector_illustration/line_circuit', + 'vector_illustration/linocut', + ]) + .register(z.globalRegistry, { + description: + 'The style of the generated images. Vector images cost 2X as much.', + }), + ), + colors: z + .optional( + z.array(zSchemaRgbColor).register(z.globalRegistry, { + description: 'An array of preferable colors', + }), + ) + .default([]), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + style_id: z.optional( + z.string().register(z.globalRegistry, { + description: 'The ID of the custom style reference (optional)', + }), + ), +}) + +/** + * Output + */ +export const zSchemaFluxProV11UltraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the image.', + }), + images: z + .array(zSchemaRegistryImageFastSdxlModelsImage) + .register(z.globalRegistry, { + description: 'The generated image files info.', + }), + timings: z.record(z.string(), z.number()), + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'Whether the generated images contain NSFW concepts.', + }), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * FluxProUltraTextToImageInput + */ +export const zSchemaFluxProV11UltraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + aspect_ratio: z.optional( + z.union([ + z.enum([ + '21:9', + '16:9', + '4:3', + '3:2', + '1:1', + '2:3', + '3:4', + '9:16', + '9:21', + ]), + z.string(), + ]), + ), + enhance_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enhance the prompt for better results.', + }), + ) + .default(false), + output_format: z.optional( + z.enum(['jpeg', 'png']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), + image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The image URL to generate an image from.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + safety_tolerance: z.optional( + z.enum(['1', '2', '3', '4', '5', '6']).register(z.globalRegistry, { + description: + 'The safety tolerance level for the generated image. 1 being the most strict and 5 being the most permissive.', + }), + ), + image_prompt_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The strength of the image prompt, between 0 and 1.', + }), + ) + .default(0.1), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same image every time.\n ', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + raw: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Generate less processed, more natural-looking images.', + }), + ) + .default(false), +}) + +/** + * Imagen4TextToImageOutput + */ +export const zSchemaImagen4PreviewOutput = z.object({ + images: z.array(zSchemaImageFile).register(z.globalRegistry, { + description: 'The generated images.', + }), + description: z.string().register(z.globalRegistry, { + description: 'The description of the generated images.', + }), +}) + +/** + * Imagen4TextToImageInput + */ +export const zSchemaImagen4PreviewInput = z.object({ + prompt: z.string().min(3).max(5000).register(z.globalRegistry, { + description: 'The text prompt to generate an image from.', + }), + num_images: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: 'The number of images to generate.', + }), + ) + .default(1), + aspect_ratio: z.optional( + z.enum(['1:1', '16:9', '9:16', '4:3', '3:4']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated image.', + }), + ), + resolution: z.optional( + z.enum(['1K', '2K']).register(z.globalRegistry, { + description: 'The resolution of the generated image.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + output_format: z.optional( + z.enum(['jpeg', 'png', 'webp']).register(z.globalRegistry, { + description: 'The format of the generated image.', + }), + ), +}) + +export const zSchemaQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetFalAiImagen4PreviewRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiImagen4PreviewRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImagen4PreviewRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiImagen4PreviewRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImagen4PreviewData = z.object({ + body: zSchemaImagen4PreviewInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImagen4PreviewResponse = zSchemaQueueStatus + +export const zGetFalAiImagen4PreviewRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImagen4PreviewRequestsByRequestIdResponse = + zSchemaImagen4PreviewOutput + +export const zGetFalAiFluxProV11UltraRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxProV11UltraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxProV11UltraRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxProV11UltraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxProV11UltraData = z.object({ + body: zSchemaFluxProV11UltraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxProV11UltraResponse = zSchemaQueueStatus + +export const zGetFalAiFluxProV11UltraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxProV11UltraRequestsByRequestIdResponse = + zSchemaFluxProV11UltraOutput + +export const zGetFalAiRecraftV3TextToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiRecraftV3TextToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiRecraftV3TextToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiRecraftV3TextToImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiRecraftV3TextToImageData = z.object({ + body: zSchemaRecraftV3TextToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiRecraftV3TextToImageResponse = zSchemaQueueStatus + +export const zGetFalAiRecraftV3TextToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiRecraftV3TextToImageRequestsByRequestIdResponse = + zSchemaRecraftV3TextToImageOutput + +export const zGetFalAiFlux2LoraRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux2LoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2LoraRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2LoraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2LoraData = z.object({ + body: zSchemaFlux2LoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2LoraResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2LoraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2LoraRequestsByRequestIdResponse = + zSchemaFlux2LoraOutput + +export const zGetFalAiFlux2RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2Data = z.object({ + body: zSchemaFlux2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2Response = zSchemaQueueStatus + +export const zGetFalAiFlux2RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2RequestsByRequestIdResponse = zSchemaFlux2Output + +export const zGetFalAiFlux2ProRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux2ProRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2ProRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2ProRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2ProData = z.object({ + body: zSchemaFlux2ProInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2ProResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2ProRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2ProRequestsByRequestIdResponse = + zSchemaFlux2ProOutput + +export const zGetBriaTextToImage32RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetBriaTextToImage32RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaTextToImage32RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutBriaTextToImage32RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaTextToImage32Data = z.object({ + body: zSchemaTextToImage32Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaTextToImage32Response = zSchemaQueueStatus + +export const zGetBriaTextToImage32RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetBriaTextToImage32RequestsByRequestIdResponse = + zSchemaTextToImage32Output + +export const zGetFalAiImagen4PreviewFastRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImagen4PreviewFastRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImagen4PreviewFastRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImagen4PreviewFastRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImagen4PreviewFastData = z.object({ + body: zSchemaImagen4PreviewFastInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImagen4PreviewFastResponse = zSchemaQueueStatus + +export const zGetFalAiImagen4PreviewFastRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImagen4PreviewFastRequestsByRequestIdResponse = + zSchemaImagen4PreviewFastOutput + +export const zGetFalAiHidreamI1FullRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiHidreamI1FullRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHidreamI1FullRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiHidreamI1FullRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHidreamI1FullData = z.object({ + body: zSchemaHidreamI1FullInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHidreamI1FullResponse = zSchemaQueueStatus + +export const zGetFalAiHidreamI1FullRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHidreamI1FullRequestsByRequestIdResponse = + zSchemaHidreamI1FullOutput + +export const zGetFalAiHidreamI1DevRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiHidreamI1DevRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHidreamI1DevRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiHidreamI1DevRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHidreamI1DevData = z.object({ + body: zSchemaHidreamI1DevInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHidreamI1DevResponse = zSchemaQueueStatus + +export const zGetFalAiHidreamI1DevRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHidreamI1DevRequestsByRequestIdResponse = + zSchemaHidreamI1DevOutput + +export const zGetFalAiHidreamI1FastRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiHidreamI1FastRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHidreamI1FastRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiHidreamI1FastRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHidreamI1FastData = z.object({ + body: zSchemaHidreamI1FastInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHidreamI1FastResponse = zSchemaQueueStatus + +export const zGetFalAiHidreamI1FastRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHidreamI1FastRequestsByRequestIdResponse = + zSchemaHidreamI1FastOutput + +export const zGetFalAiFluxDevRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxDevRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxDevRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxDevRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxDevData = z.object({ + body: zSchemaFluxDevInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxDevResponse = zSchemaQueueStatus + +export const zGetFalAiFluxDevRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxDevRequestsByRequestIdResponse = zSchemaFluxDevOutput + +export const zGetFalAiIdeogramV2RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiIdeogramV2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiIdeogramV2RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiIdeogramV2RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiIdeogramV2Data = z.object({ + body: zSchemaIdeogramV2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiIdeogramV2Response = zSchemaQueueStatus + +export const zGetFalAiIdeogramV2RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiIdeogramV2RequestsByRequestIdResponse = + zSchemaIdeogramV2Output + +export const zGetFalAiStableDiffusionV35LargeRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiStableDiffusionV35LargeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiStableDiffusionV35LargeRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiStableDiffusionV35LargeRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiStableDiffusionV35LargeData = z.object({ + body: zSchemaStableDiffusionV35LargeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiStableDiffusionV35LargeResponse = zSchemaQueueStatus + +export const zGetFalAiStableDiffusionV35LargeRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiStableDiffusionV35LargeRequestsByRequestIdResponse = + zSchemaStableDiffusionV35LargeOutput + +export const zGetFalAiFluxGeneralRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxGeneralRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxGeneralRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxGeneralRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxGeneralData = z.object({ + body: zSchemaFluxGeneralInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxGeneralResponse = zSchemaQueueStatus + +export const zGetFalAiFluxGeneralRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxGeneralRequestsByRequestIdResponse = + zSchemaFluxGeneralOutput + +export const zGetFalAiFluxLoraRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxLoraRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxLoraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxLoraData = z.object({ + body: zSchemaFluxLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxLoraResponse = zSchemaQueueStatus + +export const zGetFalAiFluxLoraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxLoraRequestsByRequestIdResponse = + zSchemaFluxLoraOutput + +export const zGetFalAiZImageBaseLoraRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiZImageBaseLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiZImageBaseLoraRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiZImageBaseLoraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiZImageBaseLoraData = z.object({ + body: zSchemaZImageBaseLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiZImageBaseLoraResponse = zSchemaQueueStatus + +export const zGetFalAiZImageBaseLoraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiZImageBaseLoraRequestsByRequestIdResponse = + zSchemaZImageBaseLoraOutput + +export const zGetFalAiZImageBaseRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiZImageBaseRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiZImageBaseRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiZImageBaseRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiZImageBaseData = z.object({ + body: zSchemaZImageBaseInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiZImageBaseResponse = zSchemaQueueStatus + +export const zGetFalAiZImageBaseRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiZImageBaseRequestsByRequestIdResponse = + zSchemaZImageBaseOutput + +export const zGetFalAiFlux2Klein9bBaseLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux2Klein9bBaseLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2Klein9bBaseLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2Klein9bBaseLoraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2Klein9bBaseLoraData = z.object({ + body: zSchemaFlux2Klein9bBaseLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2Klein9bBaseLoraResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2Klein9bBaseLoraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2Klein9bBaseLoraRequestsByRequestIdResponse = + zSchemaFlux2Klein9bBaseLoraOutput + +export const zGetFalAiFlux2Klein4bBaseLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux2Klein4bBaseLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2Klein4bBaseLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2Klein4bBaseLoraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2Klein4bBaseLoraData = z.object({ + body: zSchemaFlux2Klein4bBaseLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2Klein4bBaseLoraResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2Klein4bBaseLoraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2Klein4bBaseLoraRequestsByRequestIdResponse = + zSchemaFlux2Klein4bBaseLoraOutput + +export const zGetFalAiFlux2Klein9bBaseRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux2Klein9bBaseRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2Klein9bBaseRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2Klein9bBaseRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2Klein9bBaseData = z.object({ + body: zSchemaFlux2Klein9bBaseInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2Klein9bBaseResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2Klein9bBaseRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2Klein9bBaseRequestsByRequestIdResponse = + zSchemaFlux2Klein9bBaseOutput + +export const zGetFalAiFlux2Klein4bBaseRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux2Klein4bBaseRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2Klein4bBaseRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2Klein4bBaseRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2Klein4bBaseData = z.object({ + body: zSchemaFlux2Klein4bBaseInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2Klein4bBaseResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2Klein4bBaseRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2Klein4bBaseRequestsByRequestIdResponse = + zSchemaFlux2Klein4bBaseOutput + +export const zGetFalAiFlux2Klein9bRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux2Klein9bRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2Klein9bRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2Klein9bRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2Klein9bData = z.object({ + body: zSchemaFlux2Klein9bInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2Klein9bResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2Klein9bRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2Klein9bRequestsByRequestIdResponse = + zSchemaFlux2Klein9bOutput + +export const zGetFalAiFlux2Klein4bRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux2Klein4bRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2Klein4bRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2Klein4bRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2Klein4bData = z.object({ + body: zSchemaFlux2Klein4bInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2Klein4bResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2Klein4bRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2Klein4bRequestsByRequestIdResponse = + zSchemaFlux2Klein4bOutput + +export const zGetImagineartImagineart15ProPreviewTextToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetImagineartImagineart15ProPreviewTextToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutImagineartImagineart15ProPreviewTextToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutImagineartImagineart15ProPreviewTextToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostImagineartImagineart15ProPreviewTextToImageData = z.object({ + body: zSchemaImagineart15ProPreviewTextToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostImagineartImagineart15ProPreviewTextToImageResponse = + zSchemaQueueStatus + +export const zGetImagineartImagineart15ProPreviewTextToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetImagineartImagineart15ProPreviewTextToImageRequestsByRequestIdResponse = + zSchemaImagineart15ProPreviewTextToImageOutput + +export const zGetFalAiGlmImageRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiGlmImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiGlmImageRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiGlmImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiGlmImageData = z.object({ + body: zSchemaGlmImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiGlmImageResponse = zSchemaQueueStatus + +export const zGetFalAiGlmImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiGlmImageRequestsByRequestIdResponse = + zSchemaGlmImageOutput + +export const zGetFalAiQwenImage2512LoraRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiQwenImage2512LoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImage2512LoraRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImage2512LoraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImage2512LoraData = z.object({ + body: zSchemaQwenImage2512LoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImage2512LoraResponse = zSchemaQueueStatus + +export const zGetFalAiQwenImage2512LoraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImage2512LoraRequestsByRequestIdResponse = + zSchemaQwenImage2512LoraOutput + +export const zGetFalAiQwenImage2512RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiQwenImage2512RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImage2512RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImage2512RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImage2512Data = z.object({ + body: zSchemaQwenImage2512Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImage2512Response = zSchemaQueueStatus + +export const zGetFalAiQwenImage2512RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImage2512RequestsByRequestIdResponse = + zSchemaQwenImage2512Output + +export const zGetWanV26TextToImageRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetWanV26TextToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutWanV26TextToImageRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutWanV26TextToImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostWanV26TextToImageData = z.object({ + body: zSchemaV26TextToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostWanV26TextToImageResponse = zSchemaQueueStatus + +export const zGetWanV26TextToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetWanV26TextToImageRequestsByRequestIdResponse = + zSchemaV26TextToImageOutput + +export const zGetFalAiFlux2FlashRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux2FlashRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2FlashRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2FlashRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2FlashData = z.object({ + body: zSchemaFlux2FlashInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2FlashResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2FlashRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2FlashRequestsByRequestIdResponse = + zSchemaFlux2FlashOutput + +export const zGetFalAiGptImage15RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiGptImage15RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiGptImage15RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiGptImage15RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiGptImage15Data = z.object({ + body: zSchemaGptImage15Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiGptImage15Response = zSchemaQueueStatus + +export const zGetFalAiGptImage15RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiGptImage15RequestsByRequestIdResponse = + zSchemaGptImage15Output + +export const zGetBriaFiboLiteGenerateRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetBriaFiboLiteGenerateRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaFiboLiteGenerateRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutBriaFiboLiteGenerateRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaFiboLiteGenerateData = z.object({ + body: zSchemaFiboLiteGenerateInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaFiboLiteGenerateResponse = zSchemaQueueStatus + +export const zGetBriaFiboLiteGenerateRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetBriaFiboLiteGenerateRequestsByRequestIdResponse = + zSchemaFiboLiteGenerateOutput + +export const zGetFalAiFlux2TurboRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux2TurboRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2TurboRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2TurboRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2TurboData = z.object({ + body: zSchemaFlux2TurboInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2TurboResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2TurboRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2TurboRequestsByRequestIdResponse = + zSchemaFlux2TurboOutput + +export const zGetFalAiFlux2MaxRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux2MaxRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2MaxRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2MaxRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2MaxData = z.object({ + body: zSchemaFlux2MaxInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2MaxResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2MaxRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2MaxRequestsByRequestIdResponse = + zSchemaFlux2MaxOutput + +export const zGetFalAiLongcatImageRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLongcatImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLongcatImageRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLongcatImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLongcatImageData = z.object({ + body: zSchemaLongcatImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLongcatImageResponse = zSchemaQueueStatus + +export const zGetFalAiLongcatImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLongcatImageRequestsByRequestIdResponse = + zSchemaLongcatImageOutput + +export const zGetFalAiBytedanceSeedreamV45TextToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBytedanceSeedreamV45TextToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBytedanceSeedreamV45TextToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBytedanceSeedreamV45TextToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBytedanceSeedreamV45TextToImageData = z.object({ + body: zSchemaBytedanceSeedreamV45TextToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBytedanceSeedreamV45TextToImageResponse = + zSchemaQueueStatus + +export const zGetFalAiBytedanceSeedreamV45TextToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiBytedanceSeedreamV45TextToImageRequestsByRequestIdResponse = + zSchemaBytedanceSeedreamV45TextToImageOutput + +export const zGetFalAiViduQ2TextToImageRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiViduQ2TextToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiViduQ2TextToImageRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiViduQ2TextToImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiViduQ2TextToImageData = z.object({ + body: zSchemaViduQ2TextToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiViduQ2TextToImageResponse = zSchemaQueueStatus + +export const zGetFalAiViduQ2TextToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiViduQ2TextToImageRequestsByRequestIdResponse = + zSchemaViduQ2TextToImageOutput + +export const zGetFalAiZImageTurboLoraRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiZImageTurboLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiZImageTurboLoraRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiZImageTurboLoraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiZImageTurboLoraData = z.object({ + body: zSchemaZImageTurboLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiZImageTurboLoraResponse = zSchemaQueueStatus + +export const zGetFalAiZImageTurboLoraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiZImageTurboLoraRequestsByRequestIdResponse = + zSchemaZImageTurboLoraOutput + +export const zGetFalAiOvisImageRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiOvisImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiOvisImageRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiOvisImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiOvisImageData = z.object({ + body: zSchemaOvisImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiOvisImageResponse = zSchemaQueueStatus + +export const zGetFalAiOvisImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiOvisImageRequestsByRequestIdResponse = + zSchemaOvisImageOutput + +export const zGetFalAiZImageTurboRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiZImageTurboRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiZImageTurboRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiZImageTurboRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiZImageTurboData = z.object({ + body: zSchemaZImageTurboInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiZImageTurboResponse = zSchemaQueueStatus + +export const zGetFalAiZImageTurboRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiZImageTurboRequestsByRequestIdResponse = + zSchemaZImageTurboOutput + +export const zGetFalAiFlux2LoraGallerySepiaVintageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux2LoraGallerySepiaVintageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2LoraGallerySepiaVintageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2LoraGallerySepiaVintageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2LoraGallerySepiaVintageData = z.object({ + body: zSchemaFlux2LoraGallerySepiaVintageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2LoraGallerySepiaVintageResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2LoraGallerySepiaVintageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2LoraGallerySepiaVintageRequestsByRequestIdResponse = + zSchemaFlux2LoraGallerySepiaVintageOutput + +export const zGetFalAiFlux2LoraGallerySatelliteViewStyleRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux2LoraGallerySatelliteViewStyleRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2LoraGallerySatelliteViewStyleRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2LoraGallerySatelliteViewStyleRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2LoraGallerySatelliteViewStyleData = z.object({ + body: zSchemaFlux2LoraGallerySatelliteViewStyleInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2LoraGallerySatelliteViewStyleResponse = + zSchemaQueueStatus + +export const zGetFalAiFlux2LoraGallerySatelliteViewStyleRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2LoraGallerySatelliteViewStyleRequestsByRequestIdResponse = + zSchemaFlux2LoraGallerySatelliteViewStyleOutput + +export const zGetFalAiFlux2LoraGalleryRealismRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux2LoraGalleryRealismRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2LoraGalleryRealismRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2LoraGalleryRealismRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2LoraGalleryRealismData = z.object({ + body: zSchemaFlux2LoraGalleryRealismInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2LoraGalleryRealismResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2LoraGalleryRealismRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2LoraGalleryRealismRequestsByRequestIdResponse = + zSchemaFlux2LoraGalleryRealismOutput + +export const zGetFalAiFlux2LoraGalleryHdrStyleRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux2LoraGalleryHdrStyleRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2LoraGalleryHdrStyleRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2LoraGalleryHdrStyleRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2LoraGalleryHdrStyleData = z.object({ + body: zSchemaFlux2LoraGalleryHdrStyleInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2LoraGalleryHdrStyleResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2LoraGalleryHdrStyleRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2LoraGalleryHdrStyleRequestsByRequestIdResponse = + zSchemaFlux2LoraGalleryHdrStyleOutput + +export const zGetFalAiFlux2LoraGalleryDigitalComicArtRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux2LoraGalleryDigitalComicArtRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2LoraGalleryDigitalComicArtRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2LoraGalleryDigitalComicArtRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2LoraGalleryDigitalComicArtData = z.object({ + body: zSchemaFlux2LoraGalleryDigitalComicArtInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2LoraGalleryDigitalComicArtResponse = + zSchemaQueueStatus + +export const zGetFalAiFlux2LoraGalleryDigitalComicArtRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2LoraGalleryDigitalComicArtRequestsByRequestIdResponse = + zSchemaFlux2LoraGalleryDigitalComicArtOutput + +export const zGetFalAiFlux2LoraGalleryBallpointPenSketchRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux2LoraGalleryBallpointPenSketchRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2LoraGalleryBallpointPenSketchRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2LoraGalleryBallpointPenSketchRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2LoraGalleryBallpointPenSketchData = z.object({ + body: zSchemaFlux2LoraGalleryBallpointPenSketchInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2LoraGalleryBallpointPenSketchResponse = + zSchemaQueueStatus + +export const zGetFalAiFlux2LoraGalleryBallpointPenSketchRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2LoraGalleryBallpointPenSketchRequestsByRequestIdResponse = + zSchemaFlux2LoraGalleryBallpointPenSketchOutput + +export const zGetFalAiFlux2FlexRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux2FlexRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2FlexRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2FlexRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2FlexData = z.object({ + body: zSchemaFlux2FlexInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2FlexResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2FlexRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2FlexRequestsByRequestIdResponse = + zSchemaFlux2FlexOutput + +export const zGetFalAiGemini3ProImagePreviewRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiGemini3ProImagePreviewRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiGemini3ProImagePreviewRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiGemini3ProImagePreviewRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiGemini3ProImagePreviewData = z.object({ + body: zSchemaGemini3ProImagePreviewInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiGemini3ProImagePreviewResponse = zSchemaQueueStatus + +export const zGetFalAiGemini3ProImagePreviewRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiGemini3ProImagePreviewRequestsByRequestIdResponse = + zSchemaGemini3ProImagePreviewOutput + +export const zGetFalAiNanoBananaProRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiNanoBananaProRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiNanoBananaProRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiNanoBananaProRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiNanoBananaProData = z.object({ + body: zSchemaNanoBananaProInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiNanoBananaProResponse = zSchemaQueueStatus + +export const zGetFalAiNanoBananaProRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiNanoBananaProRequestsByRequestIdResponse = + zSchemaNanoBananaProOutput + +export const zGetImagineartImagineart15PreviewTextToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetImagineartImagineart15PreviewTextToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutImagineartImagineart15PreviewTextToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutImagineartImagineart15PreviewTextToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostImagineartImagineart15PreviewTextToImageData = z.object({ + body: zSchemaImagineart15PreviewTextToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostImagineartImagineart15PreviewTextToImageResponse = + zSchemaQueueStatus + +export const zGetImagineartImagineart15PreviewTextToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetImagineartImagineart15PreviewTextToImageRequestsByRequestIdResponse = + zSchemaImagineart15PreviewTextToImageOutput + +export const zGetFalAiEmu35ImageTextToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiEmu35ImageTextToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiEmu35ImageTextToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiEmu35ImageTextToImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiEmu35ImageTextToImageData = z.object({ + body: zSchemaEmu35ImageTextToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiEmu35ImageTextToImageResponse = zSchemaQueueStatus + +export const zGetFalAiEmu35ImageTextToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiEmu35ImageTextToImageRequestsByRequestIdResponse = + zSchemaEmu35ImageTextToImageOutput + +export const zGetBriaFiboGenerateRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetBriaFiboGenerateRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaFiboGenerateRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutBriaFiboGenerateRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaFiboGenerateData = z.object({ + body: zSchemaFiboGenerateInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaFiboGenerateResponse = zSchemaQueueStatus + +export const zGetBriaFiboGenerateRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetBriaFiboGenerateRequestsByRequestIdResponse = + zSchemaFiboGenerateOutput + +export const zGetFalAiPiflowRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiPiflowRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPiflowRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiPiflowRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPiflowData = z.object({ + body: zSchemaPiflowInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPiflowResponse = zSchemaQueueStatus + +export const zGetFalAiPiflowRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPiflowRequestsByRequestIdResponse = zSchemaPiflowOutput + +export const zGetFalAiGptImage1MiniRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiGptImage1MiniRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiGptImage1MiniRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiGptImage1MiniRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiGptImage1MiniData = z.object({ + body: zSchemaGptImage1MiniInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiGptImage1MiniResponse = zSchemaQueueStatus + +export const zGetFalAiGptImage1MiniRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiGptImage1MiniRequestsByRequestIdResponse = + zSchemaGptImage1MiniOutput + +export const zGetFalAiReveTextToImageRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiReveTextToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiReveTextToImageRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiReveTextToImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiReveTextToImageData = z.object({ + body: zSchemaReveTextToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiReveTextToImageResponse = zSchemaQueueStatus + +export const zGetFalAiReveTextToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiReveTextToImageRequestsByRequestIdResponse = + zSchemaReveTextToImageOutput + +export const zGetFalAiHunyuanImageV3TextToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiHunyuanImageV3TextToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuanImageV3TextToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuanImageV3TextToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuanImageV3TextToImageData = z.object({ + body: zSchemaHunyuanImageV3TextToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuanImageV3TextToImageResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuanImageV3TextToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuanImageV3TextToImageRequestsByRequestIdResponse = + zSchemaHunyuanImageV3TextToImageOutput + +export const zGetFalAiWan25PreviewTextToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWan25PreviewTextToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWan25PreviewTextToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWan25PreviewTextToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWan25PreviewTextToImageData = z.object({ + body: zSchemaWan25PreviewTextToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWan25PreviewTextToImageResponse = zSchemaQueueStatus + +export const zGetFalAiWan25PreviewTextToImageRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiWan25PreviewTextToImageRequestsByRequestIdResponse = + zSchemaWan25PreviewTextToImageOutput + +export const zGetFalAiFluxSrpoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxSrpoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxSrpoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxSrpoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxSrpoData = z.object({ + body: zSchemaFluxSrpoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxSrpoResponse = zSchemaQueueStatus + +export const zGetFalAiFluxSrpoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxSrpoRequestsByRequestIdResponse = + zSchemaFluxSrpoOutput + +export const zGetFalAiFlux1SrpoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux1SrpoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux1SrpoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux1SrpoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux1SrpoData = z.object({ + body: zSchemaFlux1SrpoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux1SrpoResponse = zSchemaQueueStatus + +export const zGetFalAiFlux1SrpoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux1SrpoRequestsByRequestIdResponse = + zSchemaFlux1SrpoOutput + +export const zGetFalAiHunyuanImageV21TextToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiHunyuanImageV21TextToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuanImageV21TextToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuanImageV21TextToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuanImageV21TextToImageData = z.object({ + body: zSchemaHunyuanImageV21TextToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuanImageV21TextToImageResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuanImageV21TextToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuanImageV21TextToImageRequestsByRequestIdResponse = + zSchemaHunyuanImageV21TextToImageOutput + +export const zGetFalAiBytedanceSeedreamV4TextToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBytedanceSeedreamV4TextToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBytedanceSeedreamV4TextToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBytedanceSeedreamV4TextToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBytedanceSeedreamV4TextToImageData = z.object({ + body: zSchemaBytedanceSeedreamV4TextToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBytedanceSeedreamV4TextToImageResponse = + zSchemaQueueStatus + +export const zGetFalAiBytedanceSeedreamV4TextToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiBytedanceSeedreamV4TextToImageRequestsByRequestIdResponse = + zSchemaBytedanceSeedreamV4TextToImageOutput + +export const zGetFalAiGemini25FlashImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiGemini25FlashImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiGemini25FlashImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiGemini25FlashImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiGemini25FlashImageData = z.object({ + body: zSchemaGemini25FlashImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiGemini25FlashImageResponse = zSchemaQueueStatus + +export const zGetFalAiGemini25FlashImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiGemini25FlashImageRequestsByRequestIdResponse = + zSchemaGemini25FlashImageOutput + +export const zGetFalAiNanoBananaRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiNanoBananaRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiNanoBananaRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiNanoBananaRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiNanoBananaData = z.object({ + body: zSchemaNanoBananaInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiNanoBananaResponse = zSchemaQueueStatus + +export const zGetFalAiNanoBananaRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiNanoBananaRequestsByRequestIdResponse = + zSchemaNanoBananaOutput + +export const zGetFalAiBytedanceDreaminaV31TextToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBytedanceDreaminaV31TextToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBytedanceDreaminaV31TextToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBytedanceDreaminaV31TextToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBytedanceDreaminaV31TextToImageData = z.object({ + body: zSchemaBytedanceDreaminaV31TextToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBytedanceDreaminaV31TextToImageResponse = + zSchemaQueueStatus + +export const zGetFalAiBytedanceDreaminaV31TextToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiBytedanceDreaminaV31TextToImageRequestsByRequestIdResponse = + zSchemaBytedanceDreaminaV31TextToImageOutput + +export const zGetFalAiWanV22A14bTextToImageLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanV22A14bTextToImageLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanV22A14bTextToImageLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanV22A14bTextToImageLoraRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanV22A14bTextToImageLoraData = z.object({ + body: zSchemaWanV22A14bTextToImageLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanV22A14bTextToImageLoraResponse = zSchemaQueueStatus + +export const zGetFalAiWanV22A14bTextToImageLoraRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiWanV22A14bTextToImageLoraRequestsByRequestIdResponse = + zSchemaWanV22A14bTextToImageLoraOutput + +export const zGetFalAiWanV225bTextToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanV225bTextToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanV225bTextToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanV225bTextToImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanV225bTextToImageData = z.object({ + body: zSchemaWanV225bTextToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanV225bTextToImageResponse = zSchemaQueueStatus + +export const zGetFalAiWanV225bTextToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanV225bTextToImageRequestsByRequestIdResponse = + zSchemaWanV225bTextToImageOutput + +export const zGetFalAiWanV22A14bTextToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanV22A14bTextToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanV22A14bTextToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanV22A14bTextToImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanV22A14bTextToImageData = z.object({ + body: zSchemaWanV22A14bTextToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanV22A14bTextToImageResponse = zSchemaQueueStatus + +export const zGetFalAiWanV22A14bTextToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanV22A14bTextToImageRequestsByRequestIdResponse = + zSchemaWanV22A14bTextToImageOutput + +export const zGetFalAiQwenImageRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiQwenImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageData = z.object({ + body: zSchemaQwenImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageResponse = zSchemaQueueStatus + +export const zGetFalAiQwenImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageRequestsByRequestIdResponse = + zSchemaQwenImageOutput + +export const zGetFalAiFluxKreaLoraStreamRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxKreaLoraStreamRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxKreaLoraStreamRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxKreaLoraStreamRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxKreaLoraStreamData = z.object({ + body: zSchemaFluxKreaLoraStreamInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxKreaLoraStreamResponse = zSchemaQueueStatus + +export const zGetFalAiFluxKreaLoraStreamRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxKreaLoraStreamRequestsByRequestIdResponse = + zSchemaFluxKreaLoraStreamOutput + +export const zGetFalAiFluxKreaLoraRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxKreaLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxKreaLoraRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxKreaLoraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxKreaLoraData = z.object({ + body: zSchemaFluxKreaLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxKreaLoraResponse = zSchemaQueueStatus + +export const zGetFalAiFluxKreaLoraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxKreaLoraRequestsByRequestIdResponse = + zSchemaFluxKreaLoraOutput + +export const zGetFalAiFluxKreaRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxKreaRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxKreaRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxKreaRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxKreaData = z.object({ + body: zSchemaFluxKreaInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxKreaResponse = zSchemaQueueStatus + +export const zGetFalAiFluxKreaRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxKreaRequestsByRequestIdResponse = + zSchemaFluxKreaOutput + +export const zGetFalAiFlux1KreaRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux1KreaRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux1KreaRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux1KreaRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux1KreaData = z.object({ + body: zSchemaFlux1KreaInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux1KreaResponse = zSchemaQueueStatus + +export const zGetFalAiFlux1KreaRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux1KreaRequestsByRequestIdResponse = + zSchemaFlux1KreaOutput + +export const zGetFalAiSkyRaccoonRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSkyRaccoonRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSkyRaccoonRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSkyRaccoonRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSkyRaccoonData = z.object({ + body: zSchemaSkyRaccoonInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSkyRaccoonResponse = zSchemaQueueStatus + +export const zGetFalAiSkyRaccoonRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSkyRaccoonRequestsByRequestIdResponse = + zSchemaSkyRaccoonOutput + +export const zGetFalAiFluxKontextLoraTextToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxKontextLoraTextToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxKontextLoraTextToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxKontextLoraTextToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxKontextLoraTextToImageData = z.object({ + body: zSchemaFluxKontextLoraTextToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxKontextLoraTextToImageResponse = zSchemaQueueStatus + +export const zGetFalAiFluxKontextLoraTextToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFluxKontextLoraTextToImageRequestsByRequestIdResponse = + zSchemaFluxKontextLoraTextToImageOutput + +export const zGetFalAiOmnigenV2RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiOmnigenV2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiOmnigenV2RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiOmnigenV2RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiOmnigenV2Data = z.object({ + body: zSchemaOmnigenV2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiOmnigenV2Response = zSchemaQueueStatus + +export const zGetFalAiOmnigenV2RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiOmnigenV2RequestsByRequestIdResponse = + zSchemaOmnigenV2Output + +export const zGetFalAiBytedanceSeedreamV3TextToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBytedanceSeedreamV3TextToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBytedanceSeedreamV3TextToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBytedanceSeedreamV3TextToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBytedanceSeedreamV3TextToImageData = z.object({ + body: zSchemaBytedanceSeedreamV3TextToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBytedanceSeedreamV3TextToImageResponse = + zSchemaQueueStatus + +export const zGetFalAiBytedanceSeedreamV3TextToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiBytedanceSeedreamV3TextToImageRequestsByRequestIdResponse = + zSchemaBytedanceSeedreamV3TextToImageOutput + +export const zGetFalAiFlux1SchnellRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux1SchnellRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux1SchnellRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux1SchnellRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux1SchnellData = z.object({ + body: zSchemaFlux1SchnellInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux1SchnellResponse = zSchemaQueueStatus + +export const zGetFalAiFlux1SchnellRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux1SchnellRequestsByRequestIdResponse = + zSchemaFlux1SchnellOutput + +export const zGetFalAiFlux1DevRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux1DevRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux1DevRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux1DevRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux1DevData = z.object({ + body: zSchemaFlux1DevInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux1DevResponse = zSchemaQueueStatus + +export const zGetFalAiFlux1DevRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux1DevRequestsByRequestIdResponse = + zSchemaFlux1DevOutput + +export const zGetFalAiFluxProKontextMaxTextToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxProKontextMaxTextToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxProKontextMaxTextToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxProKontextMaxTextToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxProKontextMaxTextToImageData = z.object({ + body: zSchemaFluxProKontextMaxTextToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxProKontextMaxTextToImageResponse = zSchemaQueueStatus + +export const zGetFalAiFluxProKontextMaxTextToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFluxProKontextMaxTextToImageRequestsByRequestIdResponse = + zSchemaFluxProKontextMaxTextToImageOutput + +export const zGetFalAiFluxProKontextTextToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxProKontextTextToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxProKontextTextToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxProKontextTextToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxProKontextTextToImageData = z.object({ + body: zSchemaFluxProKontextTextToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxProKontextTextToImageResponse = zSchemaQueueStatus + +export const zGetFalAiFluxProKontextTextToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFluxProKontextTextToImageRequestsByRequestIdResponse = + zSchemaFluxProKontextTextToImageOutput + +export const zGetFalAiBagelRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiBagelRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBagelRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiBagelRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBagelData = z.object({ + body: zSchemaBagelInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBagelResponse = zSchemaQueueStatus + +export const zGetFalAiBagelRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiBagelRequestsByRequestIdResponse = zSchemaBagelOutput + +export const zGetFalAiImagen4PreviewUltraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiImagen4PreviewUltraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImagen4PreviewUltraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiImagen4PreviewUltraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImagen4PreviewUltraData = z.object({ + body: zSchemaImagen4PreviewUltraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImagen4PreviewUltraResponse = zSchemaQueueStatus + +export const zGetFalAiImagen4PreviewUltraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImagen4PreviewUltraRequestsByRequestIdResponse = + zSchemaImagen4PreviewUltraOutput + +export const zGetFalAiDreamoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiDreamoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiDreamoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiDreamoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiDreamoData = z.object({ + body: zSchemaDreamoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiDreamoResponse = zSchemaQueueStatus + +export const zGetFalAiDreamoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiDreamoRequestsByRequestIdResponse = zSchemaDreamoOutput + +export const zGetFalAiFluxLoraStreamRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxLoraStreamRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxLoraStreamRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxLoraStreamRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxLoraStreamData = z.object({ + body: zSchemaFluxLoraStreamInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxLoraStreamResponse = zSchemaQueueStatus + +export const zGetFalAiFluxLoraStreamRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxLoraStreamRequestsByRequestIdResponse = + zSchemaFluxLoraStreamOutput + +export const zGetFalAiMinimaxImage01RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiMinimaxImage01RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxImage01RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxImage01RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxImage01Data = z.object({ + body: zSchemaMinimaxImage01Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxImage01Response = zSchemaQueueStatus + +export const zGetFalAiMinimaxImage01RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxImage01RequestsByRequestIdResponse = + zSchemaMinimaxImage01Output + +export const zGetFalAiPonyV7RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiPonyV7RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPonyV7RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiPonyV7RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPonyV7Data = z.object({ + body: zSchemaPonyV7Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPonyV7Response = zSchemaQueueStatus + +export const zGetFalAiPonyV7RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPonyV7RequestsByRequestIdResponse = zSchemaPonyV7Output + +export const zGetFalAiIdeogramV3RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiIdeogramV3RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiIdeogramV3RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiIdeogramV3RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiIdeogramV3Data = z.object({ + body: zSchemaIdeogramV3Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiIdeogramV3Response = zSchemaQueueStatus + +export const zGetFalAiIdeogramV3RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiIdeogramV3RequestsByRequestIdResponse = + zSchemaIdeogramV3Output + +export const zGetFalAiFLiteStandardRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFLiteStandardRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFLiteStandardRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFLiteStandardRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFLiteStandardData = z.object({ + body: zSchemaFLiteStandardInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFLiteStandardResponse = zSchemaQueueStatus + +export const zGetFalAiFLiteStandardRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFLiteStandardRequestsByRequestIdResponse = + zSchemaFLiteStandardOutput + +export const zGetFalAiFLiteTextureRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFLiteTextureRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFLiteTextureRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFLiteTextureRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFLiteTextureData = z.object({ + body: zSchemaFLiteTextureInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFLiteTextureResponse = zSchemaQueueStatus + +export const zGetFalAiFLiteTextureRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFLiteTextureRequestsByRequestIdResponse = + zSchemaFLiteTextureOutput + +export const zGetFalAiGptImage1TextToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiGptImage1TextToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiGptImage1TextToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiGptImage1TextToImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiGptImage1TextToImageData = z.object({ + body: zSchemaGptImage1TextToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiGptImage1TextToImageResponse = zSchemaQueueStatus + +export const zGetFalAiGptImage1TextToImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiGptImage1TextToImageRequestsByRequestIdResponse = + zSchemaGptImage1TextToImageOutput + +export const zGetFalAiSanaV1516bRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSanaV1516bRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSanaV1516bRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSanaV1516bRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSanaV1516bData = z.object({ + body: zSchemaSanaV1516bInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSanaV1516bResponse = zSchemaQueueStatus + +export const zGetFalAiSanaV1516bRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSanaV1516bRequestsByRequestIdResponse = + zSchemaSanaV1516bOutput + +export const zGetFalAiSanaV1548bRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSanaV1548bRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSanaV1548bRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSanaV1548bRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSanaV1548bData = z.object({ + body: zSchemaSanaV1548bInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSanaV1548bResponse = zSchemaQueueStatus + +export const zGetFalAiSanaV1548bRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSanaV1548bRequestsByRequestIdResponse = + zSchemaSanaV1548bOutput + +export const zGetFalAiSanaSprintRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSanaSprintRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSanaSprintRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSanaSprintRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSanaSprintData = z.object({ + body: zSchemaSanaSprintInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSanaSprintResponse = zSchemaQueueStatus + +export const zGetFalAiSanaSprintRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSanaSprintRequestsByRequestIdResponse = + zSchemaSanaSprintOutput + +export const zGetRundiffusionFalJuggernautFluxLightningRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetRundiffusionFalJuggernautFluxLightningRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutRundiffusionFalJuggernautFluxLightningRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutRundiffusionFalJuggernautFluxLightningRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostRundiffusionFalJuggernautFluxLightningData = z.object({ + body: zSchemaJuggernautFluxLightningInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostRundiffusionFalJuggernautFluxLightningResponse = + zSchemaQueueStatus + +export const zGetRundiffusionFalJuggernautFluxLightningRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetRundiffusionFalJuggernautFluxLightningRequestsByRequestIdResponse = + zSchemaJuggernautFluxLightningOutput + +export const zGetRundiffusionFalJuggernautFluxProRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetRundiffusionFalJuggernautFluxProRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutRundiffusionFalJuggernautFluxProRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutRundiffusionFalJuggernautFluxProRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostRundiffusionFalJuggernautFluxProData = z.object({ + body: zSchemaJuggernautFluxProInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostRundiffusionFalJuggernautFluxProResponse = zSchemaQueueStatus + +export const zGetRundiffusionFalJuggernautFluxProRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetRundiffusionFalJuggernautFluxProRequestsByRequestIdResponse = + zSchemaJuggernautFluxProOutput + +export const zGetRundiffusionFalJuggernautFluxLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetRundiffusionFalJuggernautFluxLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutRundiffusionFalJuggernautFluxLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutRundiffusionFalJuggernautFluxLoraRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostRundiffusionFalJuggernautFluxLoraData = z.object({ + body: zSchemaJuggernautFluxLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostRundiffusionFalJuggernautFluxLoraResponse = zSchemaQueueStatus + +export const zGetRundiffusionFalJuggernautFluxLoraRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetRundiffusionFalJuggernautFluxLoraRequestsByRequestIdResponse = + zSchemaJuggernautFluxLoraOutput + +export const zGetRundiffusionFalRundiffusionPhotoFluxRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetRundiffusionFalRundiffusionPhotoFluxRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutRundiffusionFalRundiffusionPhotoFluxRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutRundiffusionFalRundiffusionPhotoFluxRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostRundiffusionFalRundiffusionPhotoFluxData = z.object({ + body: zSchemaRundiffusionPhotoFluxInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostRundiffusionFalRundiffusionPhotoFluxResponse = + zSchemaQueueStatus + +export const zGetRundiffusionFalRundiffusionPhotoFluxRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetRundiffusionFalRundiffusionPhotoFluxRequestsByRequestIdResponse = + zSchemaRundiffusionPhotoFluxOutput + +export const zGetRundiffusionFalJuggernautFluxBaseRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetRundiffusionFalJuggernautFluxBaseRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutRundiffusionFalJuggernautFluxBaseRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutRundiffusionFalJuggernautFluxBaseRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostRundiffusionFalJuggernautFluxBaseData = z.object({ + body: zSchemaJuggernautFluxBaseInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostRundiffusionFalJuggernautFluxBaseResponse = zSchemaQueueStatus + +export const zGetRundiffusionFalJuggernautFluxBaseRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetRundiffusionFalJuggernautFluxBaseRequestsByRequestIdResponse = + zSchemaJuggernautFluxBaseOutput + +export const zGetFalAiCogview4RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiCogview4RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiCogview4RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiCogview4RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiCogview4Data = z.object({ + body: zSchemaCogview4Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiCogview4Response = zSchemaQueueStatus + +export const zGetFalAiCogview4RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiCogview4RequestsByRequestIdResponse = + zSchemaCogview4Output + +export const zGetFalAiIdeogramV2aTurboRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiIdeogramV2aTurboRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiIdeogramV2aTurboRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiIdeogramV2aTurboRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiIdeogramV2aTurboData = z.object({ + body: zSchemaIdeogramV2aTurboInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiIdeogramV2aTurboResponse = zSchemaQueueStatus + +export const zGetFalAiIdeogramV2aTurboRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiIdeogramV2aTurboRequestsByRequestIdResponse = + zSchemaIdeogramV2aTurboOutput + +export const zGetFalAiIdeogramV2aRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiIdeogramV2aRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiIdeogramV2aRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiIdeogramV2aRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiIdeogramV2aData = z.object({ + body: zSchemaIdeogramV2aInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiIdeogramV2aResponse = zSchemaQueueStatus + +export const zGetFalAiIdeogramV2aRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiIdeogramV2aRequestsByRequestIdResponse = + zSchemaIdeogramV2aOutput + +export const zGetFalAiFluxControlLoraCannyRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxControlLoraCannyRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxControlLoraCannyRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxControlLoraCannyRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxControlLoraCannyData = z.object({ + body: zSchemaFluxControlLoraCannyInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxControlLoraCannyResponse = zSchemaQueueStatus + +export const zGetFalAiFluxControlLoraCannyRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxControlLoraCannyRequestsByRequestIdResponse = + zSchemaFluxControlLoraCannyOutput + +export const zGetFalAiFluxControlLoraDepthRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxControlLoraDepthRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxControlLoraDepthRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxControlLoraDepthRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxControlLoraDepthData = z.object({ + body: zSchemaFluxControlLoraDepthInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxControlLoraDepthResponse = zSchemaQueueStatus + +export const zGetFalAiFluxControlLoraDepthRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxControlLoraDepthRequestsByRequestIdResponse = + zSchemaFluxControlLoraDepthOutput + +export const zGetFalAiImagen3RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiImagen3RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImagen3RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiImagen3RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImagen3Data = z.object({ + body: zSchemaImagen3Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImagen3Response = zSchemaQueueStatus + +export const zGetFalAiImagen3RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImagen3RequestsByRequestIdResponse = zSchemaImagen3Output + +export const zGetFalAiImagen3FastRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiImagen3FastRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImagen3FastRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiImagen3FastRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImagen3FastData = z.object({ + body: zSchemaImagen3FastInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImagen3FastResponse = zSchemaQueueStatus + +export const zGetFalAiImagen3FastRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImagen3FastRequestsByRequestIdResponse = + zSchemaImagen3FastOutput + +export const zGetFalAiLuminaImageV2RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLuminaImageV2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLuminaImageV2RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLuminaImageV2RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLuminaImageV2Data = z.object({ + body: zSchemaLuminaImageV2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLuminaImageV2Response = zSchemaQueueStatus + +export const zGetFalAiLuminaImageV2RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLuminaImageV2RequestsByRequestIdResponse = + zSchemaLuminaImageV2Output + +export const zGetFalAiJanusRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiJanusRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiJanusRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiJanusRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiJanusData = z.object({ + body: zSchemaJanusInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiJanusResponse = zSchemaQueueStatus + +export const zGetFalAiJanusRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiJanusRequestsByRequestIdResponse = zSchemaJanusOutput + +export const zGetFalAiFluxProV11UltraFinetunedRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxProV11UltraFinetunedRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxProV11UltraFinetunedRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxProV11UltraFinetunedRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxProV11UltraFinetunedData = z.object({ + body: zSchemaFluxProV11UltraFinetunedInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxProV11UltraFinetunedResponse = zSchemaQueueStatus + +export const zGetFalAiFluxProV11UltraFinetunedRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFluxProV11UltraFinetunedRequestsByRequestIdResponse = + zSchemaFluxProV11UltraFinetunedOutput + +export const zGetFalAiFluxProV11RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxProV11RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxProV11RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxProV11RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxProV11Data = z.object({ + body: zSchemaFluxProV11Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxProV11Response = zSchemaQueueStatus + +export const zGetFalAiFluxProV11RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxProV11RequestsByRequestIdResponse = + zSchemaFluxProV11Output + +export const zGetFalAiSwittiRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSwittiRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSwittiRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSwittiRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSwittiData = z.object({ + body: zSchemaSwittiInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSwittiResponse = zSchemaQueueStatus + +export const zGetFalAiSwittiRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSwittiRequestsByRequestIdResponse = zSchemaSwittiOutput + +export const zGetFalAiSwitti512RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSwitti512RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSwitti512RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSwitti512RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSwitti512Data = z.object({ + body: zSchemaSwitti512Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSwitti512Response = zSchemaQueueStatus + +export const zGetFalAiSwitti512RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSwitti512RequestsByRequestIdResponse = + zSchemaSwitti512Output + +export const zGetFalAiBriaTextToImageBaseRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBriaTextToImageBaseRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBriaTextToImageBaseRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBriaTextToImageBaseRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBriaTextToImageBaseData = z.object({ + body: zSchemaBriaTextToImageBaseInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBriaTextToImageBaseResponse = zSchemaQueueStatus + +export const zGetFalAiBriaTextToImageBaseRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiBriaTextToImageBaseRequestsByRequestIdResponse = + zSchemaBriaTextToImageBaseOutput + +export const zGetFalAiBriaTextToImageFastRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBriaTextToImageFastRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBriaTextToImageFastRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBriaTextToImageFastRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBriaTextToImageFastData = z.object({ + body: zSchemaBriaTextToImageFastInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBriaTextToImageFastResponse = zSchemaQueueStatus + +export const zGetFalAiBriaTextToImageFastRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiBriaTextToImageFastRequestsByRequestIdResponse = + zSchemaBriaTextToImageFastOutput + +export const zGetFalAiBriaTextToImageHdRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiBriaTextToImageHdRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBriaTextToImageHdRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiBriaTextToImageHdRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBriaTextToImageHdData = z.object({ + body: zSchemaBriaTextToImageHdInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBriaTextToImageHdResponse = zSchemaQueueStatus + +export const zGetFalAiBriaTextToImageHdRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiBriaTextToImageHdRequestsByRequestIdResponse = + zSchemaBriaTextToImageHdOutput + +export const zGetFalAiRecraft20bRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiRecraft20bRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiRecraft20bRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiRecraft20bRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiRecraft20bData = z.object({ + body: zSchemaRecraft20bInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiRecraft20bResponse = zSchemaQueueStatus + +export const zGetFalAiRecraft20bRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiRecraft20bRequestsByRequestIdResponse = + zSchemaRecraft20bOutput + +export const zGetFalAiIdeogramV2TurboRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiIdeogramV2TurboRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiIdeogramV2TurboRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiIdeogramV2TurboRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiIdeogramV2TurboData = z.object({ + body: zSchemaIdeogramV2TurboInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiIdeogramV2TurboResponse = zSchemaQueueStatus + +export const zGetFalAiIdeogramV2TurboRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiIdeogramV2TurboRequestsByRequestIdResponse = + zSchemaIdeogramV2TurboOutput + +export const zGetFalAiLumaPhotonFlashRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLumaPhotonFlashRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLumaPhotonFlashRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLumaPhotonFlashRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLumaPhotonFlashData = z.object({ + body: zSchemaLumaPhotonFlashInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLumaPhotonFlashResponse = zSchemaQueueStatus + +export const zGetFalAiLumaPhotonFlashRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLumaPhotonFlashRequestsByRequestIdResponse = + zSchemaLumaPhotonFlashOutput + +export const zGetFalAiAuraFlowRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiAuraFlowRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiAuraFlowRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiAuraFlowRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiAuraFlowData = z.object({ + body: zSchemaAuraFlowInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiAuraFlowResponse = zSchemaQueueStatus + +export const zGetFalAiAuraFlowRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiAuraFlowRequestsByRequestIdResponse = + zSchemaAuraFlowOutput + +export const zGetFalAiOmnigenV1RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiOmnigenV1RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiOmnigenV1RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiOmnigenV1RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiOmnigenV1Data = z.object({ + body: zSchemaOmnigenV1Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiOmnigenV1Response = zSchemaQueueStatus + +export const zGetFalAiOmnigenV1RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiOmnigenV1RequestsByRequestIdResponse = + zSchemaOmnigenV1Output + +export const zGetFalAiFluxSchnellRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxSchnellRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxSchnellRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxSchnellRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxSchnellData = z.object({ + body: zSchemaFluxSchnellInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxSchnellResponse = zSchemaQueueStatus + +export const zGetFalAiFluxSchnellRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxSchnellRequestsByRequestIdResponse = + zSchemaFluxSchnellOutput + +export const zGetFalAiStableDiffusionV35MediumRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiStableDiffusionV35MediumRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiStableDiffusionV35MediumRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiStableDiffusionV35MediumRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiStableDiffusionV35MediumData = z.object({ + body: zSchemaStableDiffusionV35MediumInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiStableDiffusionV35MediumResponse = zSchemaQueueStatus + +export const zGetFalAiStableDiffusionV35MediumRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiStableDiffusionV35MediumRequestsByRequestIdResponse = + zSchemaStableDiffusionV35MediumOutput + +export const zGetFalAiFluxLoraInpaintingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxLoraInpaintingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxLoraInpaintingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxLoraInpaintingRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxLoraInpaintingData = z.object({ + body: zSchemaFluxLoraInpaintingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxLoraInpaintingResponse = zSchemaQueueStatus + +export const zGetFalAiFluxLoraInpaintingRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxLoraInpaintingRequestsByRequestIdResponse = + zSchemaFluxLoraInpaintingOutput + +export const zGetFalAiStableDiffusionV3MediumRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiStableDiffusionV3MediumRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiStableDiffusionV3MediumRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiStableDiffusionV3MediumRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiStableDiffusionV3MediumData = z.object({ + body: zSchemaStableDiffusionV3MediumInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiStableDiffusionV3MediumResponse = zSchemaQueueStatus + +export const zGetFalAiStableDiffusionV3MediumRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiStableDiffusionV3MediumRequestsByRequestIdResponse = + zSchemaStableDiffusionV3MediumOutput + +export const zGetFalAiFooocusUpscaleOrVaryRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFooocusUpscaleOrVaryRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFooocusUpscaleOrVaryRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFooocusUpscaleOrVaryRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFooocusUpscaleOrVaryData = z.object({ + body: zSchemaFooocusUpscaleOrVaryInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFooocusUpscaleOrVaryResponse = zSchemaQueueStatus + +export const zGetFalAiFooocusUpscaleOrVaryRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFooocusUpscaleOrVaryRequestsByRequestIdResponse = + zSchemaFooocusUpscaleOrVaryOutput + +export const zGetFalAiSanaRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSanaRequestsByRequestIdStatusResponse = zSchemaQueueStatus + +export const zPutFalAiSanaRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSanaRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSanaData = z.object({ + body: zSchemaSanaInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSanaResponse = zSchemaQueueStatus + +export const zGetFalAiSanaRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSanaRequestsByRequestIdResponse = zSchemaSanaOutput + +export const zGetFalAiFluxSubjectRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxSubjectRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxSubjectRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxSubjectRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxSubjectData = z.object({ + body: zSchemaFluxSubjectInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxSubjectResponse = zSchemaQueueStatus + +export const zGetFalAiFluxSubjectRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxSubjectRequestsByRequestIdResponse = + zSchemaFluxSubjectOutput + +export const zGetFalAiPixartSigmaRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiPixartSigmaRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixartSigmaRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixartSigmaRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixartSigmaData = z.object({ + body: zSchemaPixartSigmaInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixartSigmaResponse = zSchemaQueueStatus + +export const zGetFalAiPixartSigmaRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixartSigmaRequestsByRequestIdResponse = + zSchemaPixartSigmaOutput + +export const zGetFalAiSdxlControlnetUnionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiSdxlControlnetUnionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSdxlControlnetUnionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiSdxlControlnetUnionRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSdxlControlnetUnionData = z.object({ + body: zSchemaSdxlControlnetUnionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSdxlControlnetUnionResponse = zSchemaQueueStatus + +export const zGetFalAiSdxlControlnetUnionRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSdxlControlnetUnionRequestsByRequestIdResponse = + zSchemaSdxlControlnetUnionOutput + +export const zGetFalAiKolorsRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiKolorsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKolorsRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiKolorsRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKolorsData = z.object({ + body: zSchemaKolorsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKolorsResponse = zSchemaQueueStatus + +export const zGetFalAiKolorsRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiKolorsRequestsByRequestIdResponse = zSchemaKolorsOutput + +export const zGetFalAiStableCascadeRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiStableCascadeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiStableCascadeRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiStableCascadeRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiStableCascadeData = z.object({ + body: zSchemaStableCascadeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiStableCascadeResponse = zSchemaQueueStatus + +export const zGetFalAiStableCascadeRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiStableCascadeRequestsByRequestIdResponse = + zSchemaStableCascadeOutput + +export const zGetFalAiFastSdxlRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFastSdxlRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFastSdxlRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFastSdxlRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFastSdxlData = z.object({ + body: zSchemaFastSdxlInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFastSdxlResponse = zSchemaQueueStatus + +export const zGetFalAiFastSdxlRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFastSdxlRequestsByRequestIdResponse = + zSchemaFastSdxlOutput + +export const zGetFalAiStableCascadeSoteDiffusionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiStableCascadeSoteDiffusionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiStableCascadeSoteDiffusionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiStableCascadeSoteDiffusionRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiStableCascadeSoteDiffusionData = z.object({ + body: zSchemaStableCascadeSoteDiffusionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiStableCascadeSoteDiffusionResponse = zSchemaQueueStatus + +export const zGetFalAiStableCascadeSoteDiffusionRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiStableCascadeSoteDiffusionRequestsByRequestIdResponse = + zSchemaStableCascadeSoteDiffusionOutput + +export const zGetFalAiLumaPhotonRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLumaPhotonRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLumaPhotonRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLumaPhotonRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLumaPhotonData = z.object({ + body: zSchemaLumaPhotonInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLumaPhotonResponse = zSchemaQueueStatus + +export const zGetFalAiLumaPhotonRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLumaPhotonRequestsByRequestIdResponse = + zSchemaLumaPhotonOutput + +export const zGetFalAiLightningModelsRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLightningModelsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLightningModelsRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLightningModelsRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLightningModelsData = z.object({ + body: zSchemaLightningModelsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLightningModelsResponse = zSchemaQueueStatus + +export const zGetFalAiLightningModelsRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLightningModelsRequestsByRequestIdResponse = + zSchemaLightningModelsOutput + +export const zGetFalAiPlaygroundV25RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiPlaygroundV25RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPlaygroundV25RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiPlaygroundV25RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPlaygroundV25Data = z.object({ + body: zSchemaPlaygroundV25Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPlaygroundV25Response = zSchemaQueueStatus + +export const zGetFalAiPlaygroundV25RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPlaygroundV25RequestsByRequestIdResponse = + zSchemaPlaygroundV25Output + +export const zGetFalAiRealisticVisionRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiRealisticVisionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiRealisticVisionRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiRealisticVisionRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiRealisticVisionData = z.object({ + body: zSchemaRealisticVisionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiRealisticVisionResponse = zSchemaQueueStatus + +export const zGetFalAiRealisticVisionRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiRealisticVisionRequestsByRequestIdResponse = + zSchemaRealisticVisionOutput + +export const zGetFalAiDreamshaperRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiDreamshaperRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiDreamshaperRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiDreamshaperRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiDreamshaperData = z.object({ + body: zSchemaDreamshaperInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiDreamshaperResponse = zSchemaQueueStatus + +export const zGetFalAiDreamshaperRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiDreamshaperRequestsByRequestIdResponse = + zSchemaDreamshaperOutput + +export const zGetFalAiStableDiffusionV15RequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiStableDiffusionV15RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiStableDiffusionV15RequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiStableDiffusionV15RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiStableDiffusionV15Data = z.object({ + body: zSchemaStableDiffusionV15Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiStableDiffusionV15Response = zSchemaQueueStatus + +export const zGetFalAiStableDiffusionV15RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiStableDiffusionV15RequestsByRequestIdResponse = + zSchemaStableDiffusionV15Output + +export const zGetFalAiLayerDiffusionRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLayerDiffusionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLayerDiffusionRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLayerDiffusionRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLayerDiffusionData = z.object({ + body: zSchemaLayerDiffusionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLayerDiffusionResponse = zSchemaQueueStatus + +export const zGetFalAiLayerDiffusionRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLayerDiffusionRequestsByRequestIdResponse = + zSchemaLayerDiffusionOutput + +export const zGetFalAiFastLightningSdxlRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiFastLightningSdxlRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFastLightningSdxlRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiFastLightningSdxlRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFastLightningSdxlData = z.object({ + body: zSchemaFastLightningSdxlInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFastLightningSdxlResponse = zSchemaQueueStatus + +export const zGetFalAiFastLightningSdxlRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFastLightningSdxlRequestsByRequestIdResponse = + zSchemaFastLightningSdxlOutput + +export const zGetFalAiFastFooocusSdxlImageToImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFastFooocusSdxlImageToImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFastFooocusSdxlImageToImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFastFooocusSdxlImageToImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFastFooocusSdxlImageToImageData = z.object({ + body: zSchemaFastFooocusSdxlImageToImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFastFooocusSdxlImageToImageResponse = zSchemaQueueStatus + +export const zGetFalAiFastFooocusSdxlImageToImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFastFooocusSdxlImageToImageRequestsByRequestIdResponse = + zSchemaFastFooocusSdxlImageToImageOutput + +export const zGetFalAiFastSdxlControlnetCannyRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFastSdxlControlnetCannyRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFastSdxlControlnetCannyRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFastSdxlControlnetCannyRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFastSdxlControlnetCannyData = z.object({ + body: zSchemaFastSdxlControlnetCannyInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFastSdxlControlnetCannyResponse = zSchemaQueueStatus + +export const zGetFalAiFastSdxlControlnetCannyRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiFastSdxlControlnetCannyRequestsByRequestIdResponse = + zSchemaFastSdxlControlnetCannyOutput + +export const zGetFalAiFastLcmDiffusionRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFastLcmDiffusionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFastLcmDiffusionRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFastLcmDiffusionRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFastLcmDiffusionData = z.object({ + body: zSchemaFastLcmDiffusionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFastLcmDiffusionResponse = zSchemaQueueStatus + +export const zGetFalAiFastLcmDiffusionRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFastLcmDiffusionRequestsByRequestIdResponse = + zSchemaFastLcmDiffusionOutput + +export const zGetFalAiFastFooocusSdxlRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFastFooocusSdxlRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFastFooocusSdxlRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFastFooocusSdxlRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFastFooocusSdxlData = z.object({ + body: zSchemaFastFooocusSdxlInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFastFooocusSdxlResponse = zSchemaQueueStatus + +export const zGetFalAiFastFooocusSdxlRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFastFooocusSdxlRequestsByRequestIdResponse = + zSchemaFastFooocusSdxlOutput + +export const zGetFalAiIllusionDiffusionRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiIllusionDiffusionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiIllusionDiffusionRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiIllusionDiffusionRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiIllusionDiffusionData = z.object({ + body: zSchemaIllusionDiffusionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiIllusionDiffusionResponse = zSchemaQueueStatus + +export const zGetFalAiIllusionDiffusionRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiIllusionDiffusionRequestsByRequestIdResponse = + zSchemaIllusionDiffusionOutput + +export const zGetFalAiFooocusImagePromptRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFooocusImagePromptRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFooocusImagePromptRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFooocusImagePromptRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFooocusImagePromptData = z.object({ + body: zSchemaFooocusImagePromptInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFooocusImagePromptResponse = zSchemaQueueStatus + +export const zGetFalAiFooocusImagePromptRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFooocusImagePromptRequestsByRequestIdResponse = + zSchemaFooocusImagePromptOutput + +export const zGetFalAiFooocusInpaintRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFooocusInpaintRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFooocusInpaintRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFooocusInpaintRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFooocusInpaintData = z.object({ + body: zSchemaFooocusInpaintInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFooocusInpaintResponse = zSchemaQueueStatus + +export const zGetFalAiFooocusInpaintRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFooocusInpaintRequestsByRequestIdResponse = + zSchemaFooocusInpaintOutput + +export const zGetFalAiLcmRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLcmRequestsByRequestIdStatusResponse = zSchemaQueueStatus + +export const zPutFalAiLcmRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLcmRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLcmData = z.object({ + body: zSchemaLcmInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLcmResponse = zSchemaQueueStatus + +export const zGetFalAiLcmRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLcmRequestsByRequestIdResponse = zSchemaLcmOutput + +export const zGetFalAiDiffusionEdgeRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiDiffusionEdgeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiDiffusionEdgeRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiDiffusionEdgeRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiDiffusionEdgeData = z.object({ + body: zSchemaDiffusionEdgeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiDiffusionEdgeResponse = zSchemaQueueStatus + +export const zGetFalAiDiffusionEdgeRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiDiffusionEdgeRequestsByRequestIdResponse = + zSchemaDiffusionEdgeOutput + +export const zGetFalAiFooocusRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFooocusRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFooocusRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFooocusRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFooocusData = z.object({ + body: zSchemaFooocusInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFooocusResponse = zSchemaQueueStatus + +export const zGetFalAiFooocusRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFooocusRequestsByRequestIdResponse = zSchemaFooocusOutput + +export const zGetFalAiLoraRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLoraRequestsByRequestIdStatusResponse = zSchemaQueueStatus + +export const zPutFalAiLoraRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLoraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLoraData = z.object({ + body: zSchemaLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLoraResponse = zSchemaQueueStatus + +export const zGetFalAiLoraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLoraRequestsByRequestIdResponse = zSchemaLoraOutput diff --git a/packages/typescript/ai-fal/src/generated/text-to-json/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/text-to-json/endpoint-map.ts new file mode 100644 index 00000000..15525f2a --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/text-to-json/endpoint-map.ts @@ -0,0 +1,78 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zSchemaFiboEditEditStructuredInstructionInput, + zSchemaFiboEditEditStructuredInstructionOutput, + zSchemaFiboGenerateStructuredPromptInput, + zSchemaFiboGenerateStructuredPromptOutput, + zSchemaFiboLiteGenerateStructuredPromptInput, + zSchemaFiboLiteGenerateStructuredPromptLiteInput, + zSchemaFiboLiteGenerateStructuredPromptLiteOutput, + zSchemaFiboLiteGenerateStructuredPromptOutput, +} from './zod.gen' + +import type { + SchemaFiboEditEditStructuredInstructionInput, + SchemaFiboEditEditStructuredInstructionOutput, + SchemaFiboGenerateStructuredPromptInput, + SchemaFiboGenerateStructuredPromptOutput, + SchemaFiboLiteGenerateStructuredPromptInput, + SchemaFiboLiteGenerateStructuredPromptLiteInput, + SchemaFiboLiteGenerateStructuredPromptLiteOutput, + SchemaFiboLiteGenerateStructuredPromptOutput, +} from './types.gen' + +import type { z } from 'zod' + +export type TextToJsonEndpointMap = { + 'bria/fibo-edit/edit/structured_instruction': { + input: SchemaFiboEditEditStructuredInstructionInput + output: SchemaFiboEditEditStructuredInstructionOutput + } + 'bria/fibo-lite/generate/structured_prompt': { + input: SchemaFiboLiteGenerateStructuredPromptInput + output: SchemaFiboLiteGenerateStructuredPromptOutput + } + 'bria/fibo-lite/generate/structured_prompt/lite': { + input: SchemaFiboLiteGenerateStructuredPromptLiteInput + output: SchemaFiboLiteGenerateStructuredPromptLiteOutput + } + 'bria/fibo/generate/structured_prompt': { + input: SchemaFiboGenerateStructuredPromptInput + output: SchemaFiboGenerateStructuredPromptOutput + } +} + +/** Union type of all text-to-json model endpoint IDs */ +export type TextToJsonModel = keyof TextToJsonEndpointMap + +export const TextToJsonSchemaMap: Record< + TextToJsonModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['bria/fibo-edit/edit/structured_instruction']: { + input: zSchemaFiboEditEditStructuredInstructionInput, + output: zSchemaFiboEditEditStructuredInstructionOutput, + }, + ['bria/fibo-lite/generate/structured_prompt']: { + input: zSchemaFiboLiteGenerateStructuredPromptInput, + output: zSchemaFiboLiteGenerateStructuredPromptOutput, + }, + ['bria/fibo-lite/generate/structured_prompt/lite']: { + input: zSchemaFiboLiteGenerateStructuredPromptLiteInput, + output: zSchemaFiboLiteGenerateStructuredPromptLiteOutput, + }, + ['bria/fibo/generate/structured_prompt']: { + input: zSchemaFiboGenerateStructuredPromptInput, + output: zSchemaFiboGenerateStructuredPromptOutput, + }, +} as const + +/** Get the input type for a specific text-to-json model */ +export type TextToJsonModelInput = + TextToJsonEndpointMap[T]['input'] + +/** Get the output type for a specific text-to-json model */ +export type TextToJsonModelOutput = + TextToJsonEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/text-to-json/types.gen.ts b/packages/typescript/ai-fal/src/generated/text-to-json/types.gen.ts new file mode 100644 index 00000000..7675f1cb --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/text-to-json/types.gen.ts @@ -0,0 +1,889 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +export type SchemaFiboGenerateStructuredPromptOutput = unknown + +/** + * StructuredPromptModel + */ +export type SchemaFiboGenerateStructuredPromptInput = { + /** + * Prompt + * + * Prompt for image generation. + */ + prompt?: string | unknown + /** + * Seed + * + * Random seed for reproducibility. + */ + seed?: number + /** + * The structured prompt to generate an image from. + */ + structured_prompt?: SchemaStructuredPrompt | unknown + /** + * Image Url + * + * Reference image (file or URL). + */ + image_url?: string | unknown +} + +/** + * Lighting + */ +export type SchemaLighting = { + /** + * Shadows + * + * The shadows in the image to be generated. + */ + shadows?: string | unknown + /** + * Conditions + * + * The conditions of the lighting in the image to be generated. + */ + conditions?: string | unknown + /** + * Direction + * + * The direction of the lighting in the image to be generated. + */ + direction?: string | unknown +} + +/** + * PhotographicCharacteristics + */ +export type SchemaPhotographicCharacteristics = { + /** + * Focus + * + * The focus in the image to be generated. + */ + focus?: string | unknown + /** + * Lens Focal Length + * + * The focal length of the lens in the image to be generated. + */ + lens_focal_length?: string | unknown + /** + * Camera Angle + * + * The angle of the camera in the image to be generated. + */ + camera_angle?: string | unknown + /** + * Depth Of Field + * + * The depth of field in the image to be generated. + */ + depth_of_field?: string | unknown +} + +/** + * PromptObject + */ +export type SchemaPromptObject = { + /** + * Clothing + * + * The clothing of the object in the image. + */ + clothing?: string | unknown + /** + * Description + * + * A description of the object to be generated. + */ + description?: string | unknown + /** + * Skin Tone And Texture + * + * The skin tone and texture of the object in the image. + */ + skin_tone_and_texture?: string | unknown + /** + * Appearance Details + * + * The appearance details of the object. + */ + appearance_details?: string | unknown + /** + * Number Of Objects + * + * The number of objects in the image. + */ + number_of_objects?: number | unknown + /** + * Expression + * + * The expression of the object in the image. + */ + expression?: string | unknown + /** + * Pose + * + * The pose of the object in the image. + */ + pose?: string | unknown + /** + * Shape And Color + * + * The shape and color of the object. + */ + shape_and_color?: string | unknown + /** + * Relationship + * + * The relationship of the object to other objects in the image. + */ + relationship: string + /** + * Texture + * + * The texture of the object. + */ + texture?: string | unknown + /** + * Gender + * + * The gender of the object in the image. + */ + gender?: string | unknown + /** + * Relative Size + * + * The relative size of the object in the image. + */ + relative_size?: string | unknown + /** + * Location + * + * The location of the object in the image. + */ + location?: string | unknown + /** + * Orientation + * + * The orientation of the object in the image. + */ + orientation?: string | unknown + /** + * Action + * + * The action of the object in the image. + */ + action?: string | unknown +} + +/** + * Aesthetics + */ +export type SchemaAesthetics = { + /** + * Composition + * + * The composition of the image to be generated. + */ + composition?: string | unknown + /** + * Mood Atmosphere + * + * The mood and atmosphere of the image to be generated. + */ + mood_atmosphere?: string | unknown + /** + * Color Scheme + * + * The color scheme of the image to be generated. + */ + color_scheme?: string | unknown +} + +/** + * StructuredPrompt + */ +export type SchemaStructuredPrompt = { + /** + * Background Setting + * + * The background setting of the image to be generated. + */ + background_setting?: string | unknown + /** + * Artistic Style + * + * The artistic style of the image to be generated. + */ + artistic_style?: string | unknown + /** + * The aesthetics of the image to be generated. + */ + aesthetics?: SchemaAesthetics | unknown + /** + * Text Render + * + * A list of text to be rendered in the image. + */ + text_render?: Array | unknown + /** + * Objects + * + * A list of objects in the image to be generated, along with their attributes and relationships to other objects in the image. + */ + objects?: Array | unknown + /** + * Style Medium + * + * The style medium of the image to be generated. + */ + style_medium?: string | unknown + /** + * The photographic characteristics of the image to be generated. + */ + photographic_characteristics?: SchemaPhotographicCharacteristics | unknown + /** + * Context + * + * The context of the image to be generated. + */ + context?: string | unknown + /** + * The lighting of the image to be generated. + */ + lighting?: SchemaLighting | unknown + /** + * Short Description + * + * A short description of the image to be generated. + */ + short_description?: string | unknown +} + +/** + * Aesthetics + */ +export type SchemaBriaFiboVlmAesthetics = { + /** + * Preference Score + * + * The preference score of the image. + */ + preference_score: string + /** + * Composition + * + * The composition of the image to be generated. + */ + composition?: string | unknown + /** + * Mood Atmosphere + * + * The mood and atmosphere of the image to be generated. + */ + mood_atmosphere?: string | unknown + /** + * Aesthetic Score + * + * The aesthetic score of the image. + */ + aesthetic_score: string + /** + * Color Scheme + * + * The color scheme of the image to be generated. + */ + color_scheme?: string | unknown +} + +/** + * StructuredPrompt + */ +export type SchemaBriaFiboVlmStructuredPrompt = { + /** + * Background Setting + * + * The background setting of the image to be generated. + */ + background_setting?: string | unknown + /** + * Artistic Style + * + * The artistic style of the image to be generated. + */ + artistic_style?: string | unknown + /** + * Style Medium + * + * The style medium of the image to be generated. + */ + style_medium?: string | unknown + /** + * Text Render + * + * A list of text to be rendered in the image. + */ + text_render?: Array | unknown + /** + * Objects + * + * A list of objects in the image to be generated, along with their attributes and relationships to other objects in the image. + */ + objects?: Array | unknown + /** + * Context + * + * The context of the image to be generated. + */ + context?: string | unknown + /** + * The photographic characteristics of the image to be generated. + */ + photographic_characteristics?: SchemaPhotographicCharacteristics | unknown + /** + * The aesthetics of the image to be generated. + */ + aesthetics?: SchemaBriaFiboVlmAesthetics | unknown + /** + * The lighting of the image to be generated. + */ + lighting?: SchemaLighting | unknown + /** + * Short Description + * + * A short description of the image to be generated. + */ + short_description?: string | unknown +} + +export type SchemaFiboLiteGenerateStructuredPromptLiteOutput = unknown + +/** + * StructuredPromptModel + */ +export type SchemaFiboLiteGenerateStructuredPromptLiteInput = { + /** + * Prompt + * + * Prompt for image generation. + */ + prompt?: string | unknown + /** + * Seed + * + * Random seed for reproducibility. + */ + seed?: number + /** + * The structured prompt to generate an image from. + */ + structured_prompt?: SchemaBriaFiboVlmStructuredPrompt | unknown + /** + * Image Url + * + * Reference image (file or URL). + */ + image_url?: string | unknown +} + +export type SchemaFiboLiteGenerateStructuredPromptOutput = unknown + +/** + * StructuredPromptModel + */ +export type SchemaFiboLiteGenerateStructuredPromptInput = { + /** + * Prompt + * + * Prompt for image generation. + */ + prompt?: string | unknown + /** + * Seed + * + * Random seed for reproducibility. + */ + seed?: number + /** + * The structured prompt to generate an image from. + */ + structured_prompt?: SchemaStructuredPrompt | unknown + /** + * Image Url + * + * Reference image (file or URL). + */ + image_url?: string | unknown +} + +export type SchemaFiboEditEditStructuredInstructionOutput = unknown + +/** + * StructuredInstructionInputModel + */ +export type SchemaFiboEditEditStructuredInstructionInput = { + /** + * Sync Mode + * + * If true, returns the image directly in the response (increases latency). + */ + sync_mode?: boolean + /** + * Seed + * + * Random seed for reproducibility. + */ + seed?: number + /** + * Mask Url + * + * Reference image mask (file or URL). Optional. + */ + mask_url?: string | unknown + /** + * Instruction + * + * Instruction for image editing. + */ + instruction?: string | unknown + /** + * Image Url + * + * Reference image (file or URL). + */ + image_url?: string | unknown +} + +export type SchemaQueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetBriaFiboEditEditStructuredInstructionRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/fibo-edit/edit/structured_instruction/requests/{request_id}/status' + } + +export type GetBriaFiboEditEditStructuredInstructionRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetBriaFiboEditEditStructuredInstructionRequestsByRequestIdStatusResponse = + GetBriaFiboEditEditStructuredInstructionRequestsByRequestIdStatusResponses[keyof GetBriaFiboEditEditStructuredInstructionRequestsByRequestIdStatusResponses] + +export type PutBriaFiboEditEditStructuredInstructionRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/edit/structured_instruction/requests/{request_id}/cancel' + } + +export type PutBriaFiboEditEditStructuredInstructionRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutBriaFiboEditEditStructuredInstructionRequestsByRequestIdCancelResponse = + PutBriaFiboEditEditStructuredInstructionRequestsByRequestIdCancelResponses[keyof PutBriaFiboEditEditStructuredInstructionRequestsByRequestIdCancelResponses] + +export type PostBriaFiboEditEditStructuredInstructionData = { + body: SchemaFiboEditEditStructuredInstructionInput + path?: never + query?: never + url: '/bria/fibo-edit/edit/structured_instruction' +} + +export type PostBriaFiboEditEditStructuredInstructionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaFiboEditEditStructuredInstructionResponse = + PostBriaFiboEditEditStructuredInstructionResponses[keyof PostBriaFiboEditEditStructuredInstructionResponses] + +export type GetBriaFiboEditEditStructuredInstructionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-edit/edit/structured_instruction/requests/{request_id}' +} + +export type GetBriaFiboEditEditStructuredInstructionRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFiboEditEditStructuredInstructionOutput + } + +export type GetBriaFiboEditEditStructuredInstructionRequestsByRequestIdResponse = + GetBriaFiboEditEditStructuredInstructionRequestsByRequestIdResponses[keyof GetBriaFiboEditEditStructuredInstructionRequestsByRequestIdResponses] + +export type GetBriaFiboLiteGenerateStructuredPromptRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/fibo-lite/generate/structured_prompt/requests/{request_id}/status' + } + +export type GetBriaFiboLiteGenerateStructuredPromptRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetBriaFiboLiteGenerateStructuredPromptRequestsByRequestIdStatusResponse = + GetBriaFiboLiteGenerateStructuredPromptRequestsByRequestIdStatusResponses[keyof GetBriaFiboLiteGenerateStructuredPromptRequestsByRequestIdStatusResponses] + +export type PutBriaFiboLiteGenerateStructuredPromptRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-lite/generate/structured_prompt/requests/{request_id}/cancel' + } + +export type PutBriaFiboLiteGenerateStructuredPromptRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutBriaFiboLiteGenerateStructuredPromptRequestsByRequestIdCancelResponse = + PutBriaFiboLiteGenerateStructuredPromptRequestsByRequestIdCancelResponses[keyof PutBriaFiboLiteGenerateStructuredPromptRequestsByRequestIdCancelResponses] + +export type PostBriaFiboLiteGenerateStructuredPromptData = { + body: SchemaFiboLiteGenerateStructuredPromptInput + path?: never + query?: never + url: '/bria/fibo-lite/generate/structured_prompt' +} + +export type PostBriaFiboLiteGenerateStructuredPromptResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaFiboLiteGenerateStructuredPromptResponse = + PostBriaFiboLiteGenerateStructuredPromptResponses[keyof PostBriaFiboLiteGenerateStructuredPromptResponses] + +export type GetBriaFiboLiteGenerateStructuredPromptRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-lite/generate/structured_prompt/requests/{request_id}' +} + +export type GetBriaFiboLiteGenerateStructuredPromptRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFiboLiteGenerateStructuredPromptOutput + } + +export type GetBriaFiboLiteGenerateStructuredPromptRequestsByRequestIdResponse = + GetBriaFiboLiteGenerateStructuredPromptRequestsByRequestIdResponses[keyof GetBriaFiboLiteGenerateStructuredPromptRequestsByRequestIdResponses] + +export type GetBriaFiboLiteGenerateStructuredPromptLiteRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/fibo-lite/generate/structured_prompt/lite/requests/{request_id}/status' + } + +export type GetBriaFiboLiteGenerateStructuredPromptLiteRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetBriaFiboLiteGenerateStructuredPromptLiteRequestsByRequestIdStatusResponse = + GetBriaFiboLiteGenerateStructuredPromptLiteRequestsByRequestIdStatusResponses[keyof GetBriaFiboLiteGenerateStructuredPromptLiteRequestsByRequestIdStatusResponses] + +export type PutBriaFiboLiteGenerateStructuredPromptLiteRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-lite/generate/structured_prompt/lite/requests/{request_id}/cancel' + } + +export type PutBriaFiboLiteGenerateStructuredPromptLiteRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutBriaFiboLiteGenerateStructuredPromptLiteRequestsByRequestIdCancelResponse = + PutBriaFiboLiteGenerateStructuredPromptLiteRequestsByRequestIdCancelResponses[keyof PutBriaFiboLiteGenerateStructuredPromptLiteRequestsByRequestIdCancelResponses] + +export type PostBriaFiboLiteGenerateStructuredPromptLiteData = { + body: SchemaFiboLiteGenerateStructuredPromptLiteInput + path?: never + query?: never + url: '/bria/fibo-lite/generate/structured_prompt/lite' +} + +export type PostBriaFiboLiteGenerateStructuredPromptLiteResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaFiboLiteGenerateStructuredPromptLiteResponse = + PostBriaFiboLiteGenerateStructuredPromptLiteResponses[keyof PostBriaFiboLiteGenerateStructuredPromptLiteResponses] + +export type GetBriaFiboLiteGenerateStructuredPromptLiteRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo-lite/generate/structured_prompt/lite/requests/{request_id}' + } + +export type GetBriaFiboLiteGenerateStructuredPromptLiteRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFiboLiteGenerateStructuredPromptLiteOutput + } + +export type GetBriaFiboLiteGenerateStructuredPromptLiteRequestsByRequestIdResponse = + GetBriaFiboLiteGenerateStructuredPromptLiteRequestsByRequestIdResponses[keyof GetBriaFiboLiteGenerateStructuredPromptLiteRequestsByRequestIdResponses] + +export type GetBriaFiboGenerateStructuredPromptRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/fibo/generate/structured_prompt/requests/{request_id}/status' +} + +export type GetBriaFiboGenerateStructuredPromptRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetBriaFiboGenerateStructuredPromptRequestsByRequestIdStatusResponse = + GetBriaFiboGenerateStructuredPromptRequestsByRequestIdStatusResponses[keyof GetBriaFiboGenerateStructuredPromptRequestsByRequestIdStatusResponses] + +export type PutBriaFiboGenerateStructuredPromptRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo/generate/structured_prompt/requests/{request_id}/cancel' +} + +export type PutBriaFiboGenerateStructuredPromptRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutBriaFiboGenerateStructuredPromptRequestsByRequestIdCancelResponse = + PutBriaFiboGenerateStructuredPromptRequestsByRequestIdCancelResponses[keyof PutBriaFiboGenerateStructuredPromptRequestsByRequestIdCancelResponses] + +export type PostBriaFiboGenerateStructuredPromptData = { + body: SchemaFiboGenerateStructuredPromptInput + path?: never + query?: never + url: '/bria/fibo/generate/structured_prompt' +} + +export type PostBriaFiboGenerateStructuredPromptResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaFiboGenerateStructuredPromptResponse = + PostBriaFiboGenerateStructuredPromptResponses[keyof PostBriaFiboGenerateStructuredPromptResponses] + +export type GetBriaFiboGenerateStructuredPromptRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/fibo/generate/structured_prompt/requests/{request_id}' +} + +export type GetBriaFiboGenerateStructuredPromptRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFiboGenerateStructuredPromptOutput +} + +export type GetBriaFiboGenerateStructuredPromptRequestsByRequestIdResponse = + GetBriaFiboGenerateStructuredPromptRequestsByRequestIdResponses[keyof GetBriaFiboGenerateStructuredPromptRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/text-to-json/zod.gen.ts b/packages/typescript/ai-fal/src/generated/text-to-json/zod.gen.ts new file mode 100644 index 00000000..612293e4 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/text-to-json/zod.gen.ts @@ -0,0 +1,556 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +export const zSchemaFiboGenerateStructuredPromptOutput = z.unknown() + +/** + * Lighting + */ +export const zSchemaLighting = z.object({ + shadows: z.optional(z.union([z.string(), z.unknown()])), + conditions: z.optional(z.union([z.string(), z.unknown()])), + direction: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * PhotographicCharacteristics + */ +export const zSchemaPhotographicCharacteristics = z.object({ + focus: z.optional(z.union([z.string(), z.unknown()])), + lens_focal_length: z.optional(z.union([z.string(), z.unknown()])), + camera_angle: z.optional(z.union([z.string(), z.unknown()])), + depth_of_field: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * PromptObject + */ +export const zSchemaPromptObject = z.object({ + clothing: z.optional(z.union([z.string(), z.unknown()])), + description: z.optional(z.union([z.string(), z.unknown()])), + skin_tone_and_texture: z.optional(z.union([z.string(), z.unknown()])), + appearance_details: z.optional(z.union([z.string(), z.unknown()])), + number_of_objects: z.optional(z.union([z.int(), z.unknown()])), + expression: z.optional(z.union([z.string(), z.unknown()])), + pose: z.optional(z.union([z.string(), z.unknown()])), + shape_and_color: z.optional(z.union([z.string(), z.unknown()])), + relationship: z.string().register(z.globalRegistry, { + description: + 'The relationship of the object to other objects in the image.', + }), + texture: z.optional(z.union([z.string(), z.unknown()])), + gender: z.optional(z.union([z.string(), z.unknown()])), + relative_size: z.optional(z.union([z.string(), z.unknown()])), + location: z.optional(z.union([z.string(), z.unknown()])), + orientation: z.optional(z.union([z.string(), z.unknown()])), + action: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * Aesthetics + */ +export const zSchemaAesthetics = z.object({ + composition: z.optional(z.union([z.string(), z.unknown()])), + mood_atmosphere: z.optional(z.union([z.string(), z.unknown()])), + color_scheme: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * StructuredPrompt + */ +export const zSchemaStructuredPrompt = z.object({ + background_setting: z.optional(z.union([z.string(), z.unknown()])), + artistic_style: z.optional(z.union([z.string(), z.unknown()])), + aesthetics: z.optional(z.union([zSchemaAesthetics, z.unknown()])), + text_render: z.optional(z.union([z.array(z.unknown()), z.unknown()])), + objects: z.optional(z.union([z.array(zSchemaPromptObject), z.unknown()])), + style_medium: z.optional(z.union([z.string(), z.unknown()])), + photographic_characteristics: z.optional( + z.union([zSchemaPhotographicCharacteristics, z.unknown()]), + ), + context: z.optional(z.union([z.string(), z.unknown()])), + lighting: z.optional(z.union([zSchemaLighting, z.unknown()])), + short_description: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * StructuredPromptModel + */ +export const zSchemaFiboGenerateStructuredPromptInput = z.object({ + prompt: z.optional(z.union([z.string(), z.unknown()])), + seed: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducibility.', + }), + ) + .default(5555), + structured_prompt: z.optional( + z.union([zSchemaStructuredPrompt, z.unknown()]), + ), + image_url: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * Aesthetics + */ +export const zSchemaBriaFiboVlmAesthetics = z.object({ + preference_score: z.string().register(z.globalRegistry, { + description: 'The preference score of the image.', + }), + composition: z.optional(z.union([z.string(), z.unknown()])), + mood_atmosphere: z.optional(z.union([z.string(), z.unknown()])), + aesthetic_score: z.string().register(z.globalRegistry, { + description: 'The aesthetic score of the image.', + }), + color_scheme: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * StructuredPrompt + */ +export const zSchemaBriaFiboVlmStructuredPrompt = z.object({ + background_setting: z.optional(z.union([z.string(), z.unknown()])), + artistic_style: z.optional(z.union([z.string(), z.unknown()])), + style_medium: z.optional(z.union([z.string(), z.unknown()])), + text_render: z.optional(z.union([z.array(z.unknown()), z.unknown()])), + objects: z.optional(z.union([z.array(zSchemaPromptObject), z.unknown()])), + context: z.optional(z.union([z.string(), z.unknown()])), + photographic_characteristics: z.optional( + z.union([zSchemaPhotographicCharacteristics, z.unknown()]), + ), + aesthetics: z.optional(z.union([zSchemaBriaFiboVlmAesthetics, z.unknown()])), + lighting: z.optional(z.union([zSchemaLighting, z.unknown()])), + short_description: z.optional(z.union([z.string(), z.unknown()])), +}) + +export const zSchemaFiboLiteGenerateStructuredPromptLiteOutput = z.unknown() + +/** + * StructuredPromptModel + */ +export const zSchemaFiboLiteGenerateStructuredPromptLiteInput = z.object({ + prompt: z.optional(z.union([z.string(), z.unknown()])), + seed: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducibility.', + }), + ) + .default(5555), + structured_prompt: z.optional( + z.union([zSchemaBriaFiboVlmStructuredPrompt, z.unknown()]), + ), + image_url: z.optional(z.union([z.string(), z.unknown()])), +}) + +export const zSchemaFiboLiteGenerateStructuredPromptOutput = z.unknown() + +/** + * StructuredPromptModel + */ +export const zSchemaFiboLiteGenerateStructuredPromptInput = z.object({ + prompt: z.optional(z.union([z.string(), z.unknown()])), + seed: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducibility.', + }), + ) + .default(5555), + structured_prompt: z.optional( + z.union([zSchemaStructuredPrompt, z.unknown()]), + ), + image_url: z.optional(z.union([z.string(), z.unknown()])), +}) + +export const zSchemaFiboEditEditStructuredInstructionOutput = z.unknown() + +/** + * StructuredInstructionInputModel + */ +export const zSchemaFiboEditEditStructuredInstructionInput = z.object({ + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, returns the image directly in the response (increases latency).', + }), + ) + .default(false), + seed: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducibility.', + }), + ) + .default(5555), + mask_url: z.optional(z.union([z.string(), z.unknown()])), + instruction: z.optional(z.union([z.string(), z.unknown()])), + image_url: z.optional(z.union([z.string(), z.unknown()])), +}) + +export const zSchemaQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetBriaFiboEditEditStructuredInstructionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetBriaFiboEditEditStructuredInstructionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaFiboEditEditStructuredInstructionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutBriaFiboEditEditStructuredInstructionRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaFiboEditEditStructuredInstructionData = z.object({ + body: zSchemaFiboEditEditStructuredInstructionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaFiboEditEditStructuredInstructionResponse = + zSchemaQueueStatus + +export const zGetBriaFiboEditEditStructuredInstructionRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetBriaFiboEditEditStructuredInstructionRequestsByRequestIdResponse = + zSchemaFiboEditEditStructuredInstructionOutput + +export const zGetBriaFiboLiteGenerateStructuredPromptRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetBriaFiboLiteGenerateStructuredPromptRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaFiboLiteGenerateStructuredPromptRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutBriaFiboLiteGenerateStructuredPromptRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaFiboLiteGenerateStructuredPromptData = z.object({ + body: zSchemaFiboLiteGenerateStructuredPromptInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaFiboLiteGenerateStructuredPromptResponse = + zSchemaQueueStatus + +export const zGetBriaFiboLiteGenerateStructuredPromptRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetBriaFiboLiteGenerateStructuredPromptRequestsByRequestIdResponse = + zSchemaFiboLiteGenerateStructuredPromptOutput + +export const zGetBriaFiboLiteGenerateStructuredPromptLiteRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetBriaFiboLiteGenerateStructuredPromptLiteRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaFiboLiteGenerateStructuredPromptLiteRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutBriaFiboLiteGenerateStructuredPromptLiteRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaFiboLiteGenerateStructuredPromptLiteData = z.object({ + body: zSchemaFiboLiteGenerateStructuredPromptLiteInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaFiboLiteGenerateStructuredPromptLiteResponse = + zSchemaQueueStatus + +export const zGetBriaFiboLiteGenerateStructuredPromptLiteRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetBriaFiboLiteGenerateStructuredPromptLiteRequestsByRequestIdResponse = + zSchemaFiboLiteGenerateStructuredPromptLiteOutput + +export const zGetBriaFiboGenerateStructuredPromptRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetBriaFiboGenerateStructuredPromptRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaFiboGenerateStructuredPromptRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutBriaFiboGenerateStructuredPromptRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaFiboGenerateStructuredPromptData = z.object({ + body: zSchemaFiboGenerateStructuredPromptInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaFiboGenerateStructuredPromptResponse = zSchemaQueueStatus + +export const zGetBriaFiboGenerateStructuredPromptRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetBriaFiboGenerateStructuredPromptRequestsByRequestIdResponse = + zSchemaFiboGenerateStructuredPromptOutput diff --git a/packages/typescript/ai-fal/src/generated/text-to-speech/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/text-to-speech/endpoint-map.ts new file mode 100644 index 00000000..ad61cef7 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/text-to-speech/endpoint-map.ts @@ -0,0 +1,330 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zSchemaChatterboxTextToSpeechInput, + zSchemaChatterboxTextToSpeechMultilingualInput, + zSchemaChatterboxTextToSpeechMultilingualOutput, + zSchemaChatterboxTextToSpeechOutput, + zSchemaChatterboxhdTextToSpeechInput, + zSchemaChatterboxhdTextToSpeechOutput, + zSchemaDiaTtsInput, + zSchemaDiaTtsOutput, + zSchemaElevenlabsTtsTurboV25Input, + zSchemaElevenlabsTtsTurboV25Output, + zSchemaIndexTts2TextToSpeechInput, + zSchemaIndexTts2TextToSpeechOutput, + zSchemaKlingVideoV1TtsInput, + zSchemaKlingVideoV1TtsOutput, + zSchemaMayaBatchInput, + zSchemaMayaBatchOutput, + zSchemaMayaInput, + zSchemaMayaOutput, + zSchemaMayaStreamInput, + zSchemaMayaStreamOutput, + zSchemaMinimaxPreviewSpeech25HdInput, + zSchemaMinimaxPreviewSpeech25HdOutput, + zSchemaMinimaxPreviewSpeech25TurboInput, + zSchemaMinimaxPreviewSpeech25TurboOutput, + zSchemaMinimaxSpeech02HdInput, + zSchemaMinimaxSpeech02HdOutput, + zSchemaMinimaxSpeech02TurboInput, + zSchemaMinimaxSpeech02TurboOutput, + zSchemaMinimaxSpeech26HdInput, + zSchemaMinimaxSpeech26HdOutput, + zSchemaMinimaxSpeech26TurboInput, + zSchemaMinimaxSpeech26TurboOutput, + zSchemaMinimaxVoiceCloneInput, + zSchemaMinimaxVoiceCloneOutput, + zSchemaMinimaxVoiceDesignInput, + zSchemaMinimaxVoiceDesignOutput, + zSchemaOrpheusTtsInput, + zSchemaOrpheusTtsOutput, + zSchemaQwen3TtsTextToSpeech06bInput, + zSchemaQwen3TtsTextToSpeech06bOutput, + zSchemaQwen3TtsTextToSpeech17bInput, + zSchemaQwen3TtsTextToSpeech17bOutput, + zSchemaQwen3TtsVoiceDesign17bInput, + zSchemaQwen3TtsVoiceDesign17bOutput, + zSchemaVibevoice05bInput, + zSchemaVibevoice05bOutput, + zSchemaVibevoice7bInput, + zSchemaVibevoice7bOutput, + zSchemaVibevoiceInput, + zSchemaVibevoiceOutput, +} from './zod.gen' + +import type { + SchemaChatterboxTextToSpeechInput, + SchemaChatterboxTextToSpeechMultilingualInput, + SchemaChatterboxTextToSpeechMultilingualOutput, + SchemaChatterboxTextToSpeechOutput, + SchemaChatterboxhdTextToSpeechInput, + SchemaChatterboxhdTextToSpeechOutput, + SchemaDiaTtsInput, + SchemaDiaTtsOutput, + SchemaElevenlabsTtsTurboV25Input, + SchemaElevenlabsTtsTurboV25Output, + SchemaIndexTts2TextToSpeechInput, + SchemaIndexTts2TextToSpeechOutput, + SchemaKlingVideoV1TtsInput, + SchemaKlingVideoV1TtsOutput, + SchemaMayaBatchInput, + SchemaMayaBatchOutput, + SchemaMayaInput, + SchemaMayaOutput, + SchemaMayaStreamInput, + SchemaMayaStreamOutput, + SchemaMinimaxPreviewSpeech25HdInput, + SchemaMinimaxPreviewSpeech25HdOutput, + SchemaMinimaxPreviewSpeech25TurboInput, + SchemaMinimaxPreviewSpeech25TurboOutput, + SchemaMinimaxSpeech02HdInput, + SchemaMinimaxSpeech02HdOutput, + SchemaMinimaxSpeech02TurboInput, + SchemaMinimaxSpeech02TurboOutput, + SchemaMinimaxSpeech26HdInput, + SchemaMinimaxSpeech26HdOutput, + SchemaMinimaxSpeech26TurboInput, + SchemaMinimaxSpeech26TurboOutput, + SchemaMinimaxVoiceCloneInput, + SchemaMinimaxVoiceCloneOutput, + SchemaMinimaxVoiceDesignInput, + SchemaMinimaxVoiceDesignOutput, + SchemaOrpheusTtsInput, + SchemaOrpheusTtsOutput, + SchemaQwen3TtsTextToSpeech06bInput, + SchemaQwen3TtsTextToSpeech06bOutput, + SchemaQwen3TtsTextToSpeech17bInput, + SchemaQwen3TtsTextToSpeech17bOutput, + SchemaQwen3TtsVoiceDesign17bInput, + SchemaQwen3TtsVoiceDesign17bOutput, + SchemaVibevoice05bInput, + SchemaVibevoice05bOutput, + SchemaVibevoice7bInput, + SchemaVibevoice7bOutput, + SchemaVibevoiceInput, + SchemaVibevoiceOutput, +} from './types.gen' + +import type { z } from 'zod' + +export type TextToSpeechEndpointMap = { + 'fal-ai/qwen-3-tts/voice-design/1.7b': { + input: SchemaQwen3TtsVoiceDesign17bInput + output: SchemaQwen3TtsVoiceDesign17bOutput + } + 'fal-ai/qwen-3-tts/text-to-speech/1.7b': { + input: SchemaQwen3TtsTextToSpeech17bInput + output: SchemaQwen3TtsTextToSpeech17bOutput + } + 'fal-ai/qwen-3-tts/text-to-speech/0.6b': { + input: SchemaQwen3TtsTextToSpeech06bInput + output: SchemaQwen3TtsTextToSpeech06bOutput + } + 'fal-ai/vibevoice/0.5b': { + input: SchemaVibevoice05bInput + output: SchemaVibevoice05bOutput + } + 'fal-ai/maya/batch': { + input: SchemaMayaBatchInput + output: SchemaMayaBatchOutput + } + 'fal-ai/maya/stream': { + input: SchemaMayaStreamInput + output: SchemaMayaStreamOutput + } + 'fal-ai/maya': { + input: SchemaMayaInput + output: SchemaMayaOutput + } + 'fal-ai/minimax/speech-2.6-turbo': { + input: SchemaMinimaxSpeech26TurboInput + output: SchemaMinimaxSpeech26TurboOutput + } + 'fal-ai/minimax/speech-2.6-hd': { + input: SchemaMinimaxSpeech26HdInput + output: SchemaMinimaxSpeech26HdOutput + } + 'fal-ai/index-tts-2/text-to-speech': { + input: SchemaIndexTts2TextToSpeechInput + output: SchemaIndexTts2TextToSpeechOutput + } + 'fal-ai/kling-video/v1/tts': { + input: SchemaKlingVideoV1TtsInput + output: SchemaKlingVideoV1TtsOutput + } + 'fal-ai/chatterbox/text-to-speech/multilingual': { + input: SchemaChatterboxTextToSpeechMultilingualInput + output: SchemaChatterboxTextToSpeechMultilingualOutput + } + 'fal-ai/vibevoice/7b': { + input: SchemaVibevoice7bInput + output: SchemaVibevoice7bOutput + } + 'fal-ai/vibevoice': { + input: SchemaVibevoiceInput + output: SchemaVibevoiceOutput + } + 'fal-ai/minimax/preview/speech-2.5-hd': { + input: SchemaMinimaxPreviewSpeech25HdInput + output: SchemaMinimaxPreviewSpeech25HdOutput + } + 'fal-ai/minimax/preview/speech-2.5-turbo': { + input: SchemaMinimaxPreviewSpeech25TurboInput + output: SchemaMinimaxPreviewSpeech25TurboOutput + } + 'fal-ai/minimax/voice-design': { + input: SchemaMinimaxVoiceDesignInput + output: SchemaMinimaxVoiceDesignOutput + } + 'resemble-ai/chatterboxhd/text-to-speech': { + input: SchemaChatterboxhdTextToSpeechInput + output: SchemaChatterboxhdTextToSpeechOutput + } + 'fal-ai/chatterbox/text-to-speech': { + input: SchemaChatterboxTextToSpeechInput + output: SchemaChatterboxTextToSpeechOutput + } + 'fal-ai/minimax/voice-clone': { + input: SchemaMinimaxVoiceCloneInput + output: SchemaMinimaxVoiceCloneOutput + } + 'fal-ai/minimax/speech-02-turbo': { + input: SchemaMinimaxSpeech02TurboInput + output: SchemaMinimaxSpeech02TurboOutput + } + 'fal-ai/minimax/speech-02-hd': { + input: SchemaMinimaxSpeech02HdInput + output: SchemaMinimaxSpeech02HdOutput + } + 'fal-ai/dia-tts': { + input: SchemaDiaTtsInput + output: SchemaDiaTtsOutput + } + 'fal-ai/orpheus-tts': { + input: SchemaOrpheusTtsInput + output: SchemaOrpheusTtsOutput + } + 'fal-ai/elevenlabs/tts/turbo-v2.5': { + input: SchemaElevenlabsTtsTurboV25Input + output: SchemaElevenlabsTtsTurboV25Output + } +} + +/** Union type of all text-to-speech model endpoint IDs */ +export type TextToSpeechModel = keyof TextToSpeechEndpointMap + +export const TextToSpeechSchemaMap: Record< + TextToSpeechModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['fal-ai/qwen-3-tts/voice-design/1.7b']: { + input: zSchemaQwen3TtsVoiceDesign17bInput, + output: zSchemaQwen3TtsVoiceDesign17bOutput, + }, + ['fal-ai/qwen-3-tts/text-to-speech/1.7b']: { + input: zSchemaQwen3TtsTextToSpeech17bInput, + output: zSchemaQwen3TtsTextToSpeech17bOutput, + }, + ['fal-ai/qwen-3-tts/text-to-speech/0.6b']: { + input: zSchemaQwen3TtsTextToSpeech06bInput, + output: zSchemaQwen3TtsTextToSpeech06bOutput, + }, + ['fal-ai/vibevoice/0.5b']: { + input: zSchemaVibevoice05bInput, + output: zSchemaVibevoice05bOutput, + }, + ['fal-ai/maya/batch']: { + input: zSchemaMayaBatchInput, + output: zSchemaMayaBatchOutput, + }, + ['fal-ai/maya/stream']: { + input: zSchemaMayaStreamInput, + output: zSchemaMayaStreamOutput, + }, + ['fal-ai/maya']: { + input: zSchemaMayaInput, + output: zSchemaMayaOutput, + }, + ['fal-ai/minimax/speech-2.6-turbo']: { + input: zSchemaMinimaxSpeech26TurboInput, + output: zSchemaMinimaxSpeech26TurboOutput, + }, + ['fal-ai/minimax/speech-2.6-hd']: { + input: zSchemaMinimaxSpeech26HdInput, + output: zSchemaMinimaxSpeech26HdOutput, + }, + ['fal-ai/index-tts-2/text-to-speech']: { + input: zSchemaIndexTts2TextToSpeechInput, + output: zSchemaIndexTts2TextToSpeechOutput, + }, + ['fal-ai/kling-video/v1/tts']: { + input: zSchemaKlingVideoV1TtsInput, + output: zSchemaKlingVideoV1TtsOutput, + }, + ['fal-ai/chatterbox/text-to-speech/multilingual']: { + input: zSchemaChatterboxTextToSpeechMultilingualInput, + output: zSchemaChatterboxTextToSpeechMultilingualOutput, + }, + ['fal-ai/vibevoice/7b']: { + input: zSchemaVibevoice7bInput, + output: zSchemaVibevoice7bOutput, + }, + ['fal-ai/vibevoice']: { + input: zSchemaVibevoiceInput, + output: zSchemaVibevoiceOutput, + }, + ['fal-ai/minimax/preview/speech-2.5-hd']: { + input: zSchemaMinimaxPreviewSpeech25HdInput, + output: zSchemaMinimaxPreviewSpeech25HdOutput, + }, + ['fal-ai/minimax/preview/speech-2.5-turbo']: { + input: zSchemaMinimaxPreviewSpeech25TurboInput, + output: zSchemaMinimaxPreviewSpeech25TurboOutput, + }, + ['fal-ai/minimax/voice-design']: { + input: zSchemaMinimaxVoiceDesignInput, + output: zSchemaMinimaxVoiceDesignOutput, + }, + ['resemble-ai/chatterboxhd/text-to-speech']: { + input: zSchemaChatterboxhdTextToSpeechInput, + output: zSchemaChatterboxhdTextToSpeechOutput, + }, + ['fal-ai/chatterbox/text-to-speech']: { + input: zSchemaChatterboxTextToSpeechInput, + output: zSchemaChatterboxTextToSpeechOutput, + }, + ['fal-ai/minimax/voice-clone']: { + input: zSchemaMinimaxVoiceCloneInput, + output: zSchemaMinimaxVoiceCloneOutput, + }, + ['fal-ai/minimax/speech-02-turbo']: { + input: zSchemaMinimaxSpeech02TurboInput, + output: zSchemaMinimaxSpeech02TurboOutput, + }, + ['fal-ai/minimax/speech-02-hd']: { + input: zSchemaMinimaxSpeech02HdInput, + output: zSchemaMinimaxSpeech02HdOutput, + }, + ['fal-ai/dia-tts']: { + input: zSchemaDiaTtsInput, + output: zSchemaDiaTtsOutput, + }, + ['fal-ai/orpheus-tts']: { + input: zSchemaOrpheusTtsInput, + output: zSchemaOrpheusTtsOutput, + }, + ['fal-ai/elevenlabs/tts/turbo-v2.5']: { + input: zSchemaElevenlabsTtsTurboV25Input, + output: zSchemaElevenlabsTtsTurboV25Output, + }, +} as const + +/** Get the input type for a specific text-to-speech model */ +export type TextToSpeechModelInput = + TextToSpeechEndpointMap[T]['input'] + +/** Get the output type for a specific text-to-speech model */ +export type TextToSpeechModelOutput = + TextToSpeechEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/text-to-speech/types.gen.ts b/packages/typescript/ai-fal/src/generated/text-to-speech/types.gen.ts new file mode 100644 index 00000000..b987aee0 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/text-to-speech/types.gen.ts @@ -0,0 +1,4715 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * TTSOutput + */ +export type SchemaElevenlabsTtsTurboV25Output = { + audio: SchemaFile + /** + * Timestamps + * + * Timestamps for each word in the generated speech. Only returned if `timestamps` is set to True in the request. + */ + timestamps?: Array | unknown +} + +/** + * File + */ +export type SchemaFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number | unknown + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string | unknown + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string | unknown + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string +} + +/** + * TextToSpeechRequest + */ +export type SchemaElevenlabsTtsTurboV25Input = { + /** + * Stability + * + * Voice stability (0-1) + */ + stability?: number + /** + * Next Text + * + * The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation. + */ + next_text?: string | unknown + /** + * Speed + * + * Speech speed (0.7-1.2). Values below 1.0 slow down the speech, above 1.0 speed it up. Extreme values may affect quality. + */ + speed?: number + /** + * Style + * + * Style exaggeration (0-1) + */ + style?: number + /** + * Text + * + * The text to convert to speech + */ + text: string + /** + * Timestamps + * + * Whether to return timestamps for each word in the generated speech + */ + timestamps?: boolean + /** + * Similarity Boost + * + * Similarity boost (0-1) + */ + similarity_boost?: number + /** + * Voice + * + * The voice to use for speech generation + */ + voice?: string + /** + * Language Code + * + * Language code (ISO 639-1) used to enforce a language for the model. An error will be returned if language code is not supported by the model. + */ + language_code?: string | unknown + /** + * Previous Text + * + * The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation. + */ + previous_text?: string | unknown +} + +/** + * OrpheusOutput + */ +export type SchemaOrpheusTtsOutput = { + audio: SchemaFile +} + +/** + * OrpheusRequest + */ +export type SchemaOrpheusTtsInput = { + /** + * Text + * + * The text to be converted to speech. You can additionally add the following emotive tags: , , , , , , , + */ + text: string + /** + * Voice + * + * Voice ID for the desired voice. + */ + voice?: 'tara' | 'leah' | 'jess' | 'leo' | 'dan' | 'mia' | 'zac' | 'zoe' + /** + * Repetition Penalty + * + * Repetition penalty (>= 1.1 required for stable generations). + */ + repetition_penalty?: number + /** + * Temperature + * + * Temperature for generation (higher = more creative). + */ + temperature?: number +} + +/** + * DiaOutput + */ +export type SchemaDiaTtsOutput = { + /** + * The generated speech audio + */ + audio: SchemaFile +} + +/** + * DiaRequest + */ +export type SchemaDiaTtsInput = { + /** + * Text + * + * The text to be converted to speech. + */ + text: string +} + +/** + * TextToSpeechOutput + */ +export type SchemaMinimaxSpeech02HdOutput = { + /** + * Duration Ms + * + * Duration of the audio in milliseconds + */ + duration_ms: number + /** + * Audio + * + * The generated audio file + */ + audio: SchemaFile +} + +/** + * TextToSpeechHDRequest + */ +export type SchemaMinimaxSpeech02HdInput = { + /** + * Text + * + * Text to convert to speech (max 5000 characters, minimum 1 non-whitespace character) + */ + text: string + /** + * Language Boost + * + * Enhance recognition of specified languages and dialects + */ + language_boost?: + | 'Chinese' + | 'Chinese,Yue' + | 'English' + | 'Arabic' + | 'Russian' + | 'Spanish' + | 'French' + | 'Portuguese' + | 'German' + | 'Turkish' + | 'Dutch' + | 'Ukrainian' + | 'Vietnamese' + | 'Indonesian' + | 'Japanese' + | 'Italian' + | 'Korean' + | 'Thai' + | 'Polish' + | 'Romanian' + | 'Greek' + | 'Czech' + | 'Finnish' + | 'Hindi' + | 'Bulgarian' + | 'Danish' + | 'Hebrew' + | 'Malay' + | 'Slovak' + | 'Swedish' + | 'Croatian' + | 'Hungarian' + | 'Norwegian' + | 'Slovenian' + | 'Catalan' + | 'Nynorsk' + | 'Afrikaans' + | 'auto' + /** + * Voice Setting + * + * Voice configuration settings + */ + voice_setting?: SchemaVoiceSetting + /** + * Output Format + * + * Format of the output content (non-streaming only) + */ + output_format?: 'url' | 'hex' + /** + * Pronunciation Dict + * + * Custom pronunciation dictionary for text replacement + */ + pronunciation_dict?: SchemaPronunciationDict + /** + * Audio Setting + * + * Audio configuration settings + */ + audio_setting?: SchemaAudioSetting +} + +/** + * AudioSetting + */ +export type SchemaAudioSetting = { + /** + * Format + * + * Audio format + */ + format?: 'mp3' | 'pcm' | 'flac' + /** + * Sample Rate + * + * Sample rate of generated audio + */ + sample_rate?: 8000 | 16000 | 22050 | 24000 | 32000 | 44100 + /** + * Channel + * + * Number of audio channels (1=mono, 2=stereo) + */ + channel?: 1 | 2 + /** + * Bitrate + * + * Bitrate of generated audio + */ + bitrate?: 32000 | 64000 | 128000 | 256000 +} + +/** + * PronunciationDict + */ +export type SchemaPronunciationDict = { + /** + * Tone List + * + * List of pronunciation replacements in format ['text/(pronunciation)', ...]. For Chinese, tones are 1-5. Example: ['燕少飞/(yan4)(shao3)(fei1)'] + */ + tone_list?: Array +} + +/** + * VoiceSetting + */ +export type SchemaVoiceSetting = { + /** + * Speed + * + * Speech speed (0.5-2.0) + */ + speed?: number + /** + * Vol + * + * Volume (0-10) + */ + vol?: number + /** + * Voice Id + * + * Predefined voice ID to use for synthesis + */ + voice_id?: string + /** + * Pitch + * + * Voice pitch (-12 to 12) + */ + pitch?: number + /** + * English Normalization + * + * Enables English text normalization to improve number reading performance, with a slight increase in latency + */ + english_normalization?: boolean + /** + * Emotion + * + * Emotion of the generated speech + */ + emotion?: + | 'happy' + | 'sad' + | 'angry' + | 'fearful' + | 'disgusted' + | 'surprised' + | 'neutral' +} + +/** + * TextToSpeechOutput + */ +export type SchemaMinimaxSpeech02TurboOutput = { + /** + * Duration Ms + * + * Duration of the audio in milliseconds + */ + duration_ms: number + /** + * Audio + * + * The generated audio file + */ + audio: SchemaFile +} + +/** + * TextToSpeechTurboRequest + */ +export type SchemaMinimaxSpeech02TurboInput = { + /** + * Text + * + * Text to convert to speech (max 5000 characters, minimum 1 non-whitespace character) + */ + text: string + /** + * Language Boost + * + * Enhance recognition of specified languages and dialects + */ + language_boost?: + | 'Chinese' + | 'Chinese,Yue' + | 'English' + | 'Arabic' + | 'Russian' + | 'Spanish' + | 'French' + | 'Portuguese' + | 'German' + | 'Turkish' + | 'Dutch' + | 'Ukrainian' + | 'Vietnamese' + | 'Indonesian' + | 'Japanese' + | 'Italian' + | 'Korean' + | 'Thai' + | 'Polish' + | 'Romanian' + | 'Greek' + | 'Czech' + | 'Finnish' + | 'Hindi' + | 'Bulgarian' + | 'Danish' + | 'Hebrew' + | 'Malay' + | 'Slovak' + | 'Swedish' + | 'Croatian' + | 'Hungarian' + | 'Norwegian' + | 'Slovenian' + | 'Catalan' + | 'Nynorsk' + | 'Afrikaans' + | 'auto' + /** + * Voice Setting + * + * Voice configuration settings + */ + voice_setting?: SchemaVoiceSetting + /** + * Output Format + * + * Format of the output content (non-streaming only) + */ + output_format?: 'url' | 'hex' + /** + * Pronunciation Dict + * + * Custom pronunciation dictionary for text replacement + */ + pronunciation_dict?: SchemaPronunciationDict + /** + * Audio Setting + * + * Audio configuration settings + */ + audio_setting?: SchemaAudioSetting +} + +/** + * VoiceCloneOutput + */ +export type SchemaMinimaxVoiceCloneOutput = { + /** + * Custom Voice Id + * + * The cloned voice ID for use with TTS + */ + custom_voice_id: string + /** + * Audio + * + * Preview audio generated with the cloned voice (if requested) + */ + audio?: SchemaFile +} + +/** + * VoiceCloneRequest + */ +export type SchemaMinimaxVoiceCloneInput = { + /** + * Model + * + * TTS model to use for preview. Options: speech-02-hd, speech-02-turbo, speech-01-hd, speech-01-turbo + */ + model?: + | 'speech-02-hd' + | 'speech-02-turbo' + | 'speech-01-hd' + | 'speech-01-turbo' + /** + * Text + * + * Text to generate a TTS preview with the cloned voice (optional) + */ + text?: string + /** + * Audio Url + * + * + * URL of the input audio file for voice cloning. Should be at least 10 seconds + * long. To retain the voice permanently, use it with a TTS (text-to-speech) + * endpoint at least once within 7 days. Otherwise, it will be + * automatically deleted. + * + */ + audio_url: string + /** + * Accuracy + * + * Text validation accuracy threshold (0-1) + */ + accuracy?: number + /** + * Noise Reduction + * + * Enable noise reduction for the cloned voice + */ + noise_reduction?: boolean + /** + * Need Volume Normalization + * + * Enable volume normalization for the cloned voice + */ + need_volume_normalization?: boolean +} + +export type SchemaChatterboxTextToSpeechOutput = unknown + +/** + * ChatterboxRequest + */ +export type SchemaChatterboxTextToSpeechInput = { + /** + * Text + * + * The text to be converted to speech. You can additionally add the following emotive tags: , , , , , , , + */ + text: string + /** + * Exaggeration + * + * Exaggeration factor for the generated speech (0.0 = no exaggeration, 1.0 = maximum exaggeration). + */ + exaggeration?: number + /** + * Audio Url + * + * Optional URL to an audio file to use as a reference for the generated speech. If provided, the model will try to match the style and tone of the reference audio. + */ + audio_url?: string + /** + * Temperature + * + * Temperature for generation (higher = more creative). + */ + temperature?: number + /** + * Seed + * + * Useful to control the reproducibility of the generated audio. Assuming all other properties didn't change, a fixed seed should always generate the exact same audio file. Set to 0 for random seed.. + */ + seed?: number + /** + * Cfg + */ + cfg?: number +} + +/** + * Audio + */ +export type SchemaAudio = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * TTSOutput + * + * Output parameters for the TTS request. + */ +export type SchemaChatterboxhdTextToSpeechOutput = { + /** + * Audio + * + * The generated audio file. + */ + audio: SchemaAudio +} + +/** + * TTSInput + * + * Input parameters for the TTS request. + */ +export type SchemaChatterboxhdTextToSpeechInput = { + /** + * Text + * + * Text to synthesize into speech. + */ + text?: string + /** + * Exaggeration + * + * Controls emotion exaggeration. Range typically 0.25 to 2.0. + */ + exaggeration?: number + /** + * High Quality Audio + * + * If True, the generated audio will be upscaled to 48kHz. The generation of the audio will take longer, but the quality will be higher. If False, the generated audio will be 24kHz. + */ + high_quality_audio?: boolean + /** + * Voice + * + * The voice to use for the TTS request. If neither voice nor audio are provided, a random voice will be used. + */ + voice?: + | 'Aurora' + | 'Blade' + | 'Britney' + | 'Carl' + | 'Cliff' + | 'Richard' + | 'Rico' + | 'Siobhan' + | 'Vicky' + /** + * Audio Url + * + * URL to the audio sample to use as a voice prompt for zero-shot TTS voice cloning. Providing a audio sample will override the voice setting. If neither voice nor audio_url are provided, a random voice will be used. + */ + audio_url?: string + /** + * Temperature + * + * Controls the randomness of generation. Range typically 0.05 to 5. + */ + temperature?: number + /** + * Seed + * + * Useful to control the reproducibility of the generated audio. Assuming all other properties didn't change, a fixed seed should always generate the exact same audio file. Set to 0 for random seed. + */ + seed?: number + /** + * Cfg + * + * Classifier-free guidance scale (CFG) controls the conditioning factor. Range typically 0.2 to 1.0. For expressive or dramatic speech, try lower cfg values (e.g. ~0.3) and increase exaggeration to around 0.7 or higher. If the reference speaker has a fast speaking style, lowering cfg to around 0.3 can improve pacing. + */ + cfg?: number +} + +/** + * VoiceDesignOutput + */ +export type SchemaMinimaxVoiceDesignOutput = { + /** + * Custom Voice Id + * + * The voice_id of the generated voice + */ + custom_voice_id: string + /** + * Audio + * + * The preview audio using the generated voice + */ + audio: SchemaFile +} + +/** + * VoiceDesignRequest + */ +export type SchemaMinimaxVoiceDesignInput = { + /** + * Preview Text + * + * Text for audio preview. Limited to 500 characters. A fee of $30 per 1M characters will be charged for the generation of the preview audio. + */ + preview_text: string + /** + * Prompt + * + * Voice description prompt for generating a personalized voice + */ + prompt: string +} + +/** + * TextToSpeechOutput + */ +export type SchemaMinimaxPreviewSpeech25TurboOutput = { + /** + * Duration Ms + * + * Duration of the audio in milliseconds + */ + duration_ms: number + /** + * Audio + * + * The generated audio file + */ + audio: SchemaFile +} + +/** + * TextToSpeechTurbov25Request + */ +export type SchemaMinimaxPreviewSpeech25TurboInput = { + /** + * Text + * + * Text to convert to speech (max 5000 characters, minimum 1 non-whitespace character) + */ + text: string + /** + * Language Boost + * + * Enhance recognition of specified languages and dialects + */ + language_boost?: + | 'Persian' + | 'Filipino' + | 'Tamil' + | 'Chinese' + | 'Chinese,Yue' + | 'English' + | 'Arabic' + | 'Russian' + | 'Spanish' + | 'French' + | 'Portuguese' + | 'German' + | 'Turkish' + | 'Dutch' + | 'Ukrainian' + | 'Vietnamese' + | 'Indonesian' + | 'Japanese' + | 'Italian' + | 'Korean' + | 'Thai' + | 'Polish' + | 'Romanian' + | 'Greek' + | 'Czech' + | 'Finnish' + | 'Hindi' + | 'Bulgarian' + | 'Danish' + | 'Hebrew' + | 'Malay' + | 'Slovak' + | 'Swedish' + | 'Croatian' + | 'Hungarian' + | 'Norwegian' + | 'Slovenian' + | 'Catalan' + | 'Nynorsk' + | 'Afrikaans' + | 'auto' + /** + * Voice Setting + * + * Voice configuration settings + */ + voice_setting?: SchemaVoiceSetting + /** + * Output Format + * + * Format of the output content (non-streaming only) + */ + output_format?: 'url' | 'hex' + /** + * Pronunciation Dict + * + * Custom pronunciation dictionary for text replacement + */ + pronunciation_dict?: SchemaPronunciationDict + /** + * Audio Setting + * + * Audio configuration settings + */ + audio_setting?: SchemaAudioSetting +} + +/** + * TextToSpeechOutput + */ +export type SchemaMinimaxPreviewSpeech25HdOutput = { + /** + * Duration Ms + * + * Duration of the audio in milliseconds + */ + duration_ms: number + /** + * Audio + * + * The generated audio file + */ + audio: SchemaFile +} + +/** + * TextToSpeechHDv25Request + */ +export type SchemaMinimaxPreviewSpeech25HdInput = { + /** + * Text + * + * Text to convert to speech (max 5000 characters, minimum 1 non-whitespace character) + */ + text: string + /** + * Language Boost + * + * Enhance recognition of specified languages and dialects + */ + language_boost?: + | 'Persian' + | 'Filipino' + | 'Tamil' + | 'Chinese' + | 'Chinese,Yue' + | 'English' + | 'Arabic' + | 'Russian' + | 'Spanish' + | 'French' + | 'Portuguese' + | 'German' + | 'Turkish' + | 'Dutch' + | 'Ukrainian' + | 'Vietnamese' + | 'Indonesian' + | 'Japanese' + | 'Italian' + | 'Korean' + | 'Thai' + | 'Polish' + | 'Romanian' + | 'Greek' + | 'Czech' + | 'Finnish' + | 'Hindi' + | 'Bulgarian' + | 'Danish' + | 'Hebrew' + | 'Malay' + | 'Slovak' + | 'Swedish' + | 'Croatian' + | 'Hungarian' + | 'Norwegian' + | 'Slovenian' + | 'Catalan' + | 'Nynorsk' + | 'Afrikaans' + | 'auto' + /** + * Voice Setting + * + * Voice configuration settings + */ + voice_setting?: SchemaVoiceSetting + /** + * Output Format + * + * Format of the output content (non-streaming only) + */ + output_format?: 'url' | 'hex' + /** + * Pronunciation Dict + * + * Custom pronunciation dictionary for text replacement + */ + pronunciation_dict?: SchemaPronunciationDict + /** + * Audio Setting + * + * Audio configuration settings + */ + audio_setting?: SchemaAudioSetting +} + +/** + * VibeVoiceOutput + * + * Output schema for VibeVoice TTS generation + */ +export type SchemaVibevoiceOutput = { + /** + * Duration + * + * Duration of the generated audio in seconds + */ + duration: number + /** + * Rtf + * + * Real-time factor (generation_time / audio_duration). Lower is better. + */ + rtf: number + /** + * Sample Rate + * + * Sample rate of the generated audio + */ + sample_rate: number + /** + * Generation Time + * + * Time taken to generate the audio in seconds + */ + generation_time: number + /** + * Audio + * + * The generated audio file containing the speech + */ + audio: SchemaFile +} + +/** + * VibeVoiceInput + * + * Input schema for VibeVoice TTS generation + */ +export type SchemaVibevoiceInput = { + /** + * Script + * + * The script to convert to speech. Can be formatted with 'Speaker X:' prefixes for multi-speaker dialogues. + */ + script: string + /** + * Seed + * + * Random seed for reproducible generation. + */ + seed?: number + /** + * Speakers + * + * List of speakers to use for the script. If not provided, will be inferred from the script or voice samples. + */ + speakers: Array + /** + * CFG Scale + * + * CFG (Classifier-Free Guidance) scale for generation. Higher values increase adherence to text. + */ + cfg_scale?: number +} + +/** + * VibeVoiceSpeaker + */ +export type SchemaVibeVoiceSpeaker = { + /** + * Preset + * + * Default voice preset to use for the speaker. Not used if `audio_url` is provided. + */ + preset?: + | 'Alice [EN]' + | 'Carter [EN]' + | 'Frank [EN]' + | 'Mary [EN] (Background Music)' + | 'Maya [EN]' + | 'Anchen [ZH] (Background Music)' + | 'Bowen [ZH]' + | 'Xinran [ZH]' + /** + * Audio URL + * + * URL to a voice sample audio file. If provided, `preset` will be ignored. + */ + audio_url?: string +} + +/** + * VibeVoiceOutput + * + * Output schema for VibeVoice TTS generation + */ +export type SchemaVibevoice7bOutput = { + /** + * Duration + * + * Duration of the generated audio in seconds + */ + duration: number + /** + * Rtf + * + * Real-time factor (generation_time / audio_duration). Lower is better. + */ + rtf: number + /** + * Sample Rate + * + * Sample rate of the generated audio + */ + sample_rate: number + /** + * Generation Time + * + * Time taken to generate the audio in seconds + */ + generation_time: number + /** + * Audio + * + * The generated audio file containing the speech + */ + audio: SchemaFile +} + +/** + * VibeVoice7bInput + * + * Input schema for VibeVoice-7b TTS generation + */ +export type SchemaVibevoice7bInput = { + /** + * Script + * + * The script to convert to speech. Can be formatted with 'Speaker X:' prefixes for multi-speaker dialogues. + */ + script: string + /** + * Seed + * + * Random seed for reproducible generation. + */ + seed?: number + /** + * Speakers + * + * List of speakers to use for the script. If not provided, will be inferred from the script or voice samples. + */ + speakers: Array + /** + * CFG Scale + * + * CFG (Classifier-Free Guidance) scale for generation. Higher values increase adherence to text. + */ + cfg_scale?: number +} + +/** + * ChatterboxMultilingualOutput + */ +export type SchemaChatterboxTextToSpeechMultilingualOutput = { + /** + * Audio + * + * The generated multilingual speech audio file + */ + audio: SchemaFile +} + +/** + * ChatterboxMultilingualRequest + */ +export type SchemaChatterboxTextToSpeechMultilingualInput = { + /** + * Text + * + * The text to be converted to speech (maximum 300 characters). Supports 23 languages including English, French, German, Spanish, Italian, Portuguese, Hindi, Arabic, Chinese, Japanese, Korean, and more. + */ + text: string + /** + * Custom Audio Language + * + * If using a custom audio URL, specify the language of the audio here. Ignored if voice is not a custom url. + */ + custom_audio_language?: + | 'english' + | 'arabic' + | 'danish' + | 'german' + | 'greek' + | 'spanish' + | 'finnish' + | 'french' + | 'hebrew' + | 'hindi' + | 'italian' + | 'japanese' + | 'korean' + | 'malay' + | 'dutch' + | 'norwegian' + | 'polish' + | 'portuguese' + | 'russian' + | 'swedish' + | 'swahili' + | 'turkish' + | 'chinese' + /** + * Exaggeration + * + * Controls speech expressiveness and emotional intensity (0.25-2.0). 0.5 is neutral, higher values increase expressiveness. Extreme values may be unstable. + */ + exaggeration?: number + /** + * Voice + * + * Language code for synthesis. In case using custom please provide audio url and select custom_audio_language. + */ + voice?: string + /** + * Temperature + * + * Controls randomness and variation in generation (0.05-5.0). Higher values create more varied speech patterns. + */ + temperature?: number + /** + * Seed + * + * Random seed for reproducible results. Set to 0 for random generation, or provide a specific number for consistent outputs. + */ + seed?: number + /** + * CFG Scale + * + * Configuration/pace weight controlling generation guidance (0.0-1.0). Use 0.0 for language transfer to mitigate accent inheritance. + */ + cfg_scale?: number +} + +/** + * TTSOutput + */ +export type SchemaKlingVideoV1TtsOutput = { + /** + * Audio + * + * The generated audio + */ + audio: SchemaFile +} + +/** + * TTSInput + */ +export type SchemaKlingVideoV1TtsInput = { + /** + * Text + * + * The text to be converted to speech + */ + text: string + /** + * Voice Id + * + * The voice ID to use for speech synthesis + */ + voice_id?: + | 'genshin_vindi2' + | 'zhinen_xuesheng' + | 'AOT' + | 'ai_shatang' + | 'genshin_klee2' + | 'genshin_kirara' + | 'ai_kaiya' + | 'oversea_male1' + | 'ai_chenjiahao_712' + | 'girlfriend_4_speech02' + | 'chat1_female_new-3' + | 'chat_0407_5-1' + | 'cartoon-boy-07' + | 'uk_boy1' + | 'cartoon-girl-01' + | 'PeppaPig_platform' + | 'ai_huangzhong_712' + | 'ai_huangyaoshi_712' + | 'ai_laoguowang_712' + | 'chengshu_jiejie' + | 'you_pingjing' + | 'calm_story1' + | 'uk_man2' + | 'laopopo_speech02' + | 'heainainai_speech02' + | 'reader_en_m-v1' + | 'commercial_lady_en_f-v1' + | 'tiyuxi_xuedi' + | 'tiexin_nanyou' + | 'girlfriend_1_speech02' + | 'girlfriend_2_speech02' + | 'zhuxi_speech02' + | 'uk_oldman3' + | 'dongbeilaotie_speech02' + | 'chongqingxiaohuo_speech02' + | 'chuanmeizi_speech02' + | 'chaoshandashu_speech02' + | 'ai_taiwan_man2_speech02' + | 'xianzhanggui_speech02' + | 'tianjinjiejie_speech02' + | 'diyinnansang_DB_CN_M_04-v2' + | 'yizhipiannan-v1' + | 'guanxiaofang-v2' + | 'tianmeixuemei-v1' + | 'daopianyansang-v1' + | 'mengwa-v1' + /** + * Voice Speed + * + * Rate of speech + */ + voice_speed?: number +} + +/** + * EmotionalStrengths + */ +export type SchemaEmotionalStrengths = { + /** + * Afraid + * + * Strength of fear emotion + */ + afraid?: number + /** + * Calm + * + * Strength of calm emotion + */ + calm?: number + /** + * Disgusted + * + * Strength of disgust emotion + */ + disgusted?: number + /** + * Angry + * + * Strength of anger emotion + */ + angry?: number + /** + * Sad + * + * Strength of sadness emotion + */ + sad?: number + /** + * Melancholic + * + * Strength of melancholic emotion + */ + melancholic?: number + /** + * Surprised + * + * Strength of surprise emotion + */ + surprised?: number + /** + * Happy + * + * Strength of happiness emotion + */ + happy?: number +} + +/** + * IndexTTS2Output + */ +export type SchemaIndexTts2TextToSpeechOutput = { + /** + * Audio + * + * The generated audio file in base64 format. + */ + audio: SchemaFile +} + +/** + * IndexTTS2Input + */ +export type SchemaIndexTts2TextToSpeechInput = { + /** + * Prompt + * + * The speech prompt to generate + */ + prompt: string + /** + * Emotional Strengths + * + * The strengths of individual emotions for fine-grained control. + */ + emotional_strengths?: SchemaEmotionalStrengths + /** + * Strength + * + * The strength of the emotional style transfer. Higher values result in stronger emotional influence. + */ + strength?: number + /** + * Emotional Audio Url + * + * The emotional reference audio file to extract the style from. + */ + emotional_audio_url?: string + /** + * Audio Url + * + * The audio file to generate the speech from. + */ + audio_url: string + /** + * Emotion Prompt + * + * The emotional prompt to influence the emotional style. Must be used together with should_use_prompt_for_emotion. + */ + emotion_prompt?: string + /** + * Should Use Prompt For Emotion + * + * Whether to use the `prompt` to calculate emotional strengths, if enabled it will overwrite the `emotional_strengths` values. If `emotion_prompt` is provided, it will be used to instead of `prompt` to extract the emotional style. + */ + should_use_prompt_for_emotion?: boolean +} + +/** + * TextToSpeechHD26Output + */ +export type SchemaMinimaxSpeech26HdOutput = { + /** + * Duration Ms + * + * Duration of the audio in milliseconds + */ + duration_ms: number + /** + * Audio + * + * The generated audio file + */ + audio: SchemaFile +} + +/** + * TextToSpeechHD26Request + */ +export type SchemaMinimaxSpeech26HdInput = { + /** + * Prompt + * + * Text to convert to speech. Paragraph breaks should be marked with newline characters. **NOTE**: You can customize speech pauses by adding markers in the form `<#x#>`, where `x` is the pause duration in seconds. Valid range: `[0.01, 99.99]`, up to two decimal places. Pause markers must be placed between speakable text segments and cannot be used consecutively. + */ + prompt: string + /** + * Language Boost + * + * Enhance recognition of specified languages and dialects + */ + language_boost?: + | 'Chinese' + | 'Chinese,Yue' + | 'English' + | 'Arabic' + | 'Russian' + | 'Spanish' + | 'French' + | 'Portuguese' + | 'German' + | 'Turkish' + | 'Dutch' + | 'Ukrainian' + | 'Vietnamese' + | 'Indonesian' + | 'Japanese' + | 'Italian' + | 'Korean' + | 'Thai' + | 'Polish' + | 'Romanian' + | 'Greek' + | 'Czech' + | 'Finnish' + | 'Hindi' + | 'Bulgarian' + | 'Danish' + | 'Hebrew' + | 'Malay' + | 'Slovak' + | 'Swedish' + | 'Croatian' + | 'Hungarian' + | 'Norwegian' + | 'Slovenian' + | 'Catalan' + | 'Nynorsk' + | 'Afrikaans' + | 'auto' + /** + * Output Format + * + * Format of the output content (non-streaming only) + */ + output_format?: 'url' | 'hex' + /** + * Pronunciation Dict + * + * Custom pronunciation dictionary for text replacement + */ + pronunciation_dict?: SchemaPronunciationDict + /** + * Voice Setting + * + * Voice configuration settings + */ + voice_setting?: SchemaVoiceSetting + /** + * Normalization Setting + * + * Loudness normalization settings for the audio + */ + normalization_setting?: SchemaLoudnessNormalizationSetting + /** + * Audio Setting + * + * Audio configuration settings + */ + audio_setting?: SchemaAudioSetting +} + +/** + * LoudnessNormalizationSetting + */ +export type SchemaLoudnessNormalizationSetting = { + /** + * Enabled + * + * Enable loudness normalization for the audio + */ + enabled?: boolean + /** + * Target Loudness + * + * Target loudness in LUFS (default -18.0) + */ + target_loudness?: number + /** + * Target Range + * + * Target loudness range in LU (default 8.0) + */ + target_range?: number + /** + * Target Peak + * + * Target peak level in dBTP (default -0.5). + */ + target_peak?: number +} + +/** + * TextToSpeechTurbo26Output + */ +export type SchemaMinimaxSpeech26TurboOutput = { + /** + * Duration Ms + * + * Duration of the audio in milliseconds + */ + duration_ms: number + /** + * Audio + * + * The generated audio file + */ + audio: SchemaFile +} + +/** + * TextToSpeechTurbo26Request + */ +export type SchemaMinimaxSpeech26TurboInput = { + /** + * Prompt + * + * Text to convert to speech. Paragraph breaks should be marked with newline characters. **NOTE**: You can customize speech pauses by adding markers in the form `<#x#>`, where `x` is the pause duration in seconds. Valid range: `[0.01, 99.99]`, up to two decimal places. Pause markers must be placed between speakable text segments and cannot be used consecutively. + */ + prompt: string + /** + * Language Boost + * + * Enhance recognition of specified languages and dialects + */ + language_boost?: + | 'Chinese' + | 'Chinese,Yue' + | 'English' + | 'Arabic' + | 'Russian' + | 'Spanish' + | 'French' + | 'Portuguese' + | 'German' + | 'Turkish' + | 'Dutch' + | 'Ukrainian' + | 'Vietnamese' + | 'Indonesian' + | 'Japanese' + | 'Italian' + | 'Korean' + | 'Thai' + | 'Polish' + | 'Romanian' + | 'Greek' + | 'Czech' + | 'Finnish' + | 'Hindi' + | 'Bulgarian' + | 'Danish' + | 'Hebrew' + | 'Malay' + | 'Slovak' + | 'Swedish' + | 'Croatian' + | 'Hungarian' + | 'Norwegian' + | 'Slovenian' + | 'Catalan' + | 'Nynorsk' + | 'Afrikaans' + | 'auto' + /** + * Output Format + * + * Format of the output content (non-streaming only) + */ + output_format?: 'url' | 'hex' + /** + * Pronunciation Dict + * + * Custom pronunciation dictionary for text replacement + */ + pronunciation_dict?: SchemaPronunciationDict + /** + * Voice Setting + * + * Voice configuration settings + */ + voice_setting?: SchemaVoiceSetting + /** + * Normalization Setting + * + * Loudness normalization settings for the audio + */ + normalization_setting?: SchemaLoudnessNormalizationSetting + /** + * Audio Setting + * + * Audio configuration settings + */ + audio_setting?: SchemaAudioSetting +} + +/** + * MayaVoiceOutput + * + * Output schema for Maya-1-Voice TTS generation + */ +export type SchemaMayaOutput = { + /** + * Rtf + * + * Real-time factor (generation_time / audio_duration). Lower is better. + */ + rtf: number + /** + * Duration + * + * Duration of the generated audio in seconds + */ + duration: number + /** + * Sample Rate + * + * Sample rate of the generated audio + */ + sample_rate: string + /** + * Generation Time + * + * Time taken to generate the audio in seconds + */ + generation_time: number + audio: SchemaFile +} + +/** + * MayaVoiceInput + * + * Input schema for Maya-1-Voice TTS generation + */ +export type SchemaMayaInput = { + /** + * Repetition Penalty + * + * Penalty for repeating tokens. Higher values reduce repetition artifacts. + */ + repetition_penalty?: number + /** + * Prompt + * + * Description of the voice/character. Includes attributes like age, accent, pitch, timbre, pacing, tone, and intensity. See examples for format. + */ + prompt: string + /** + * Top P + * + * Nucleus sampling parameter. Controls diversity of token selection. + */ + top_p?: number + /** + * Text + * + * The text to synthesize into speech. You can embed emotion tags anywhere in the text using the format . Available emotions: laugh, laugh_harder, sigh, chuckle, gasp, angry, excited, whisper, cry, scream, sing, snort, exhale, gulp, giggle, sarcastic, curious. Example: 'Hello world! This is amazing!' or 'I can't believe this happened again.' + */ + text: string + /** + * Output Format + * + * Output audio format for the generated speech + */ + output_format?: 'wav' | 'mp3' + /** + * Max Tokens + * + * Maximum number of SNAC tokens to generate (7 tokens per frame). Controls maximum audio length. + */ + max_tokens?: number + /** + * Temperature + * + * Sampling temperature. Lower values (0.2-0.5) produce more stable/consistent audio. Higher values add variation. + */ + temperature?: number + /** + * Sample Rate + * + * Output audio sample rate. 48 kHz provides higher quality audio, 24 kHz is faster. + */ + sample_rate?: '48 kHz' | '24 kHz' +} + +export type SchemaMayaStreamOutput = unknown + +/** + * MayaVoiceStreamingInput + * + * Input schema for Maya-1-Voice streaming TTS generation + */ +export type SchemaMayaStreamInput = { + /** + * Repetition Penalty + * + * Penalty for repeating tokens. Higher values reduce repetition artifacts. + */ + repetition_penalty?: number + /** + * Prompt + * + * Description of the voice/character. Includes attributes like age, accent, pitch, timbre, pacing, tone, and intensity. See examples for format. + */ + prompt: string + /** + * Top P + * + * Nucleus sampling parameter. Controls diversity of token selection. + */ + top_p?: number + /** + * Text + * + * The text to synthesize into speech. You can embed emotion tags anywhere in the text using the format . Available emotions: laugh, laugh_harder, sigh, chuckle, gasp, angry, excited, whisper, cry, scream, sing, snort, exhale, gulp, giggle, sarcastic, curious. Example: 'Hello world! This is amazing!' or 'I can't believe this happened again.' + */ + text: string + /** + * Output Format + * + * Output audio format. 'mp3' for browser-playable audio, 'wav' for uncompressed audio, 'pcm' for raw PCM (lowest latency, requires client-side decoding). + */ + output_format?: 'mp3' | 'wav' | 'pcm' + /** + * Max Tokens + * + * Maximum number of SNAC tokens to generate (7 tokens per frame). Controls maximum audio length. + */ + max_tokens?: number + /** + * Temperature + * + * Sampling temperature. Lower values (0.2-0.5) produce more stable/consistent audio. Higher values add variation. + */ + temperature?: number + /** + * Sample Rate + * + * Output audio sample rate. 48 kHz uses upsampling for higher quality audio, 24 kHz is native SNAC output (faster, lower latency). + */ + sample_rate?: '48 kHz' | '24 kHz' +} + +/** + * MayaVoiceBatchOutput + * + * Output schema for batch Maya-1-Voice TTS generation + */ +export type SchemaMayaBatchOutput = { + /** + * Average Rtf + * + * Average real-time factor across all generations + */ + average_rtf: number + /** + * Sample Rate + * + * Sample rate of all generated audio files + */ + sample_rate: string + /** + * Total Generation Time + * + * Total time taken to generate all audio files in seconds + */ + total_generation_time: number + /** + * Audios + * + * List of generated audio files + */ + audios: Array + /** + * Durations + * + * Duration of each generated audio in seconds + */ + durations: Array +} + +/** + * MayaVoiceBatchInput + * + * Input schema for batch Maya-1-Voice TTS generation + */ +export type SchemaMayaBatchInput = { + /** + * Repetition Penalty + * + * Repetition penalty for all generations. + */ + repetition_penalty?: number + /** + * Top P + * + * Nucleus sampling parameter for all generations. + */ + top_p?: number + /** + * Output Format + * + * Output audio format for all generated speech files + */ + output_format?: 'wav' | 'mp3' + /** + * Texts + * + * List of texts to synthesize into speech. You can embed emotion tags in each text using the format . + */ + texts: Array + /** + * Prompts + * + * List of voice descriptions for each text. Must match the length of texts list. Each describes the voice/character attributes. + */ + prompts: Array + /** + * Max Tokens + * + * Maximum SNAC tokens per generation. + */ + max_tokens?: number + /** + * Temperature + * + * Sampling temperature for all generations. + */ + temperature?: number + /** + * Sample Rate + * + * Output audio sample rate for all generations. 48 kHz provides higher quality, 24 kHz is faster. + */ + sample_rate?: '48 kHz' | '24 kHz' +} + +/** + * VibeVoice_0_5BOutput + * + * Output schema for VibeVoice-0.5b TTS generation + */ +export type SchemaVibevoice05bOutput = { + /** + * Duration + * + * Duration of the generated audio in seconds + */ + duration: number + /** + * Rtf + * + * Real-time factor (generation_time / audio_duration). Lower is better. + */ + rtf: number + /** + * Sample Rate + * + * Sample rate of the generated audio + */ + sample_rate: number + /** + * Generation Time + * + * Time taken to generate the audio in seconds + */ + generation_time: number + /** + * Audio + * + * The generated audio file containing the speech + */ + audio: SchemaFile +} + +/** + * VibeVoice0_5bInput + * + * Input schema for VibeVoice-0.5b TTS generation + */ +export type SchemaVibevoice05bInput = { + /** + * Script + * + * The script to convert to speech. + */ + script: string + /** + * Seed + * + * Random seed for reproducible generation. + */ + seed?: number + /** + * Speaker + * + * Voice to use for speaking. + */ + speaker: 'Frank' | 'Wayne' | 'Carter' | 'Emma' | 'Grace' | 'Mike' + /** + * CFG Scale + * + * CFG (Classifier-Free Guidance) scale for generation. Higher values increase adherence to text. + */ + cfg_scale?: number +} + +/** + * Qwen3TTSOutput06b + */ +export type SchemaQwen3TtsTextToSpeech06bOutput = { + /** + * Audio + * + * The generated speech audio file. + */ + audio: SchemaAudioFile +} + +/** + * AudioFile + */ +export type SchemaAudioFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * Duration + * + * The duration of the audio + */ + duration?: number + /** + * File Data + * + * File data + */ + file_data?: Blob | File + /** + * Channels + * + * The number of channels in the audio + */ + channels?: number + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Sample Rate + * + * The sample rate of the audio + */ + sample_rate?: number + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * Bitrate + * + * The bitrate of the audio (e.g., '192k' or 192000) + */ + bitrate?: string | number +} + +/** + * Qwen3TTSInput06b + */ +export type SchemaQwen3TtsTextToSpeech06bInput = { + /** + * Prompt + * + * Optional prompt to guide the style of the generated speech. This prompt will be ignored if a speaker embedding is provided. + */ + prompt?: string + /** + * Speaker Voice Embedding File Url + * + * URL to a speaker embedding file in safetensors format, from `fal-ai/qwen-3-tts/clone-voice/0.6b` endpoint. If provided, the TTS model will use the cloned voice for synthesis instead of the predefined voices. + */ + speaker_voice_embedding_file_url?: string + /** + * Top P + * + * Top-p sampling parameter. + */ + top_p?: number + /** + * Repetition Penalty + * + * Penalty to reduce repeated tokens/codes. + */ + repetition_penalty?: number + /** + * Subtalker Temperature + * + * Temperature for sub-talker sampling. + */ + subtalker_temperature?: number + /** + * Top K + * + * Top-k sampling parameter. + */ + top_k?: number + /** + * Voice + * + * The voice to be used for speech synthesis, will be ignored if a speaker embedding is provided. Check out the **[documentation](https://github.com/QwenLM/Qwen3-TTS/tree/main?tab=readme-ov-file#custom-voice-generate)** for each voice's details and which language they primarily support. + */ + voice?: + | 'Vivian' + | 'Serena' + | 'Uncle_Fu' + | 'Dylan' + | 'Eric' + | 'Ryan' + | 'Aiden' + | 'Ono_Anna' + | 'Sohee' + /** + * Reference Text + * + * Optional reference text that was used when creating the speaker embedding. Providing this can improve synthesis quality when using a cloned voice. + */ + reference_text?: string + /** + * Temperature + * + * Sampling temperature; higher => more random. + */ + temperature?: number + /** + * Language + * + * The language of the voice. + */ + language?: + | 'Auto' + | 'English' + | 'Chinese' + | 'Spanish' + | 'French' + | 'German' + | 'Italian' + | 'Japanese' + | 'Korean' + | 'Portuguese' + | 'Russian' + /** + * Subtalker Top K + * + * Top-k for sub-talker sampling. + */ + subtalker_top_k?: number + /** + * Text + * + * The text to be converted to speech. + */ + text: string + /** + * Max New Tokens + * + * Maximum number of new codec tokens to generate. + */ + max_new_tokens?: number + /** + * Subtalker Dosample + * + * Sampling switch for the sub-talker. + */ + subtalker_dosample?: boolean + /** + * Subtalker Top P + * + * Top-p for sub-talker sampling. + */ + subtalker_top_p?: number +} + +/** + * Qwen3TTSOutput + */ +export type SchemaQwen3TtsTextToSpeech17bOutput = { + /** + * Audio + * + * The generated speech audio file. + */ + audio: SchemaAudioFile +} + +/** + * Qwen3TTSInput + */ +export type SchemaQwen3TtsTextToSpeech17bInput = { + /** + * Prompt + * + * Optional prompt to guide the style of the generated speech. This prompt will be ignored if a speaker embedding is provided. + */ + prompt?: string + /** + * Speaker Voice Embedding File Url + * + * URL to a speaker embedding file in safetensors format, from `fal-ai/qwen-3-tts/clone-voice` endpoint. If provided, the TTS model will use the cloned voice for synthesis instead of the predefined voices. + */ + speaker_voice_embedding_file_url?: string + /** + * Top P + * + * Top-p sampling parameter. + */ + top_p?: number + /** + * Repetition Penalty + * + * Penalty to reduce repeated tokens/codes. + */ + repetition_penalty?: number + /** + * Subtalker Temperature + * + * Temperature for sub-talker sampling. + */ + subtalker_temperature?: number + /** + * Top K + * + * Top-k sampling parameter. + */ + top_k?: number + /** + * Voice + * + * The voice to be used for speech synthesis, will be ignored if a speaker embedding is provided. Check out the **[documentation](https://github.com/QwenLM/Qwen3-TTS/tree/main?tab=readme-ov-file#custom-voice-generate)** for each voice's details and which language they primarily support. + */ + voice?: + | 'Vivian' + | 'Serena' + | 'Uncle_Fu' + | 'Dylan' + | 'Eric' + | 'Ryan' + | 'Aiden' + | 'Ono_Anna' + | 'Sohee' + /** + * Reference Text + * + * Optional reference text that was used when creating the speaker embedding. Providing this can improve synthesis quality when using a cloned voice. + */ + reference_text?: string + /** + * Temperature + * + * Sampling temperature; higher => more random. + */ + temperature?: number + /** + * Language + * + * The language of the voice. + */ + language?: + | 'Auto' + | 'English' + | 'Chinese' + | 'Spanish' + | 'French' + | 'German' + | 'Italian' + | 'Japanese' + | 'Korean' + | 'Portuguese' + | 'Russian' + /** + * Subtalker Top K + * + * Top-k for sub-talker sampling. + */ + subtalker_top_k?: number + /** + * Text + * + * The text to be converted to speech. + */ + text: string + /** + * Max New Tokens + * + * Maximum number of new codec tokens to generate. + */ + max_new_tokens?: number + /** + * Subtalker Dosample + * + * Sampling switch for the sub-talker. + */ + subtalker_dosample?: boolean + /** + * Subtalker Top P + * + * Top-p for sub-talker sampling. + */ + subtalker_top_p?: number +} + +/** + * Qwen3DesignVoiceOutput + */ +export type SchemaQwen3TtsVoiceDesign17bOutput = { + /** + * Audio + * + * The generated speech audio file. + */ + audio: SchemaAudioFile +} + +/** + * Qwen3DesignVoiceInput + */ +export type SchemaQwen3TtsVoiceDesign17bInput = { + /** + * Repetition Penalty + * + * Penalty to reduce repeated tokens/codes. + */ + repetition_penalty?: number + /** + * Subtalker Top K + * + * Top-k for sub-talker sampling. + */ + subtalker_top_k?: number + /** + * Top P + * + * Top-p sampling parameter. + */ + top_p?: number + /** + * Prompt + * + * Optional prompt to guide the style of the generated speech. + */ + prompt: string + /** + * Max New Tokens + * + * Maximum number of new codec tokens to generate. + */ + max_new_tokens?: number + /** + * Text + * + * The text to be converted to speech. + */ + text: string + /** + * Language + * + * The language of the voice to be designed. + */ + language?: + | 'Auto' + | 'English' + | 'Chinese' + | 'Spanish' + | 'French' + | 'German' + | 'Italian' + | 'Japanese' + | 'Korean' + | 'Portuguese' + | 'Russian' + /** + * Top K + * + * Top-k sampling parameter. + */ + top_k?: number + /** + * Subtalker Dosample + * + * Sampling switch for the sub-talker. + */ + subtalker_dosample?: boolean + /** + * Subtalker Temperature + * + * Temperature for sub-talker sampling. + */ + subtalker_temperature?: number + /** + * Subtalker Top P + * + * Top-p for sub-talker sampling. + */ + subtalker_top_p?: number + /** + * Temperature + * + * Sampling temperature; higher => more random. + */ + temperature?: number +} + +export type SchemaQueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetFalAiQwen3TtsVoiceDesign17bRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-3-tts/voice-design/1.7b/requests/{request_id}/status' +} + +export type GetFalAiQwen3TtsVoiceDesign17bRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiQwen3TtsVoiceDesign17bRequestsByRequestIdStatusResponse = + GetFalAiQwen3TtsVoiceDesign17bRequestsByRequestIdStatusResponses[keyof GetFalAiQwen3TtsVoiceDesign17bRequestsByRequestIdStatusResponses] + +export type PutFalAiQwen3TtsVoiceDesign17bRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-3-tts/voice-design/1.7b/requests/{request_id}/cancel' +} + +export type PutFalAiQwen3TtsVoiceDesign17bRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiQwen3TtsVoiceDesign17bRequestsByRequestIdCancelResponse = + PutFalAiQwen3TtsVoiceDesign17bRequestsByRequestIdCancelResponses[keyof PutFalAiQwen3TtsVoiceDesign17bRequestsByRequestIdCancelResponses] + +export type PostFalAiQwen3TtsVoiceDesign17bData = { + body: SchemaQwen3TtsVoiceDesign17bInput + path?: never + query?: never + url: '/fal-ai/qwen-3-tts/voice-design/1.7b' +} + +export type PostFalAiQwen3TtsVoiceDesign17bResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwen3TtsVoiceDesign17bResponse = + PostFalAiQwen3TtsVoiceDesign17bResponses[keyof PostFalAiQwen3TtsVoiceDesign17bResponses] + +export type GetFalAiQwen3TtsVoiceDesign17bRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-3-tts/voice-design/1.7b/requests/{request_id}' +} + +export type GetFalAiQwen3TtsVoiceDesign17bRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwen3TtsVoiceDesign17bOutput +} + +export type GetFalAiQwen3TtsVoiceDesign17bRequestsByRequestIdResponse = + GetFalAiQwen3TtsVoiceDesign17bRequestsByRequestIdResponses[keyof GetFalAiQwen3TtsVoiceDesign17bRequestsByRequestIdResponses] + +export type GetFalAiQwen3TtsTextToSpeech17bRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-3-tts/text-to-speech/1.7b/requests/{request_id}/status' +} + +export type GetFalAiQwen3TtsTextToSpeech17bRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwen3TtsTextToSpeech17bRequestsByRequestIdStatusResponse = + GetFalAiQwen3TtsTextToSpeech17bRequestsByRequestIdStatusResponses[keyof GetFalAiQwen3TtsTextToSpeech17bRequestsByRequestIdStatusResponses] + +export type PutFalAiQwen3TtsTextToSpeech17bRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-3-tts/text-to-speech/1.7b/requests/{request_id}/cancel' +} + +export type PutFalAiQwen3TtsTextToSpeech17bRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwen3TtsTextToSpeech17bRequestsByRequestIdCancelResponse = + PutFalAiQwen3TtsTextToSpeech17bRequestsByRequestIdCancelResponses[keyof PutFalAiQwen3TtsTextToSpeech17bRequestsByRequestIdCancelResponses] + +export type PostFalAiQwen3TtsTextToSpeech17bData = { + body: SchemaQwen3TtsTextToSpeech17bInput + path?: never + query?: never + url: '/fal-ai/qwen-3-tts/text-to-speech/1.7b' +} + +export type PostFalAiQwen3TtsTextToSpeech17bResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwen3TtsTextToSpeech17bResponse = + PostFalAiQwen3TtsTextToSpeech17bResponses[keyof PostFalAiQwen3TtsTextToSpeech17bResponses] + +export type GetFalAiQwen3TtsTextToSpeech17bRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-3-tts/text-to-speech/1.7b/requests/{request_id}' +} + +export type GetFalAiQwen3TtsTextToSpeech17bRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwen3TtsTextToSpeech17bOutput +} + +export type GetFalAiQwen3TtsTextToSpeech17bRequestsByRequestIdResponse = + GetFalAiQwen3TtsTextToSpeech17bRequestsByRequestIdResponses[keyof GetFalAiQwen3TtsTextToSpeech17bRequestsByRequestIdResponses] + +export type GetFalAiQwen3TtsTextToSpeech06bRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-3-tts/text-to-speech/0.6b/requests/{request_id}/status' +} + +export type GetFalAiQwen3TtsTextToSpeech06bRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwen3TtsTextToSpeech06bRequestsByRequestIdStatusResponse = + GetFalAiQwen3TtsTextToSpeech06bRequestsByRequestIdStatusResponses[keyof GetFalAiQwen3TtsTextToSpeech06bRequestsByRequestIdStatusResponses] + +export type PutFalAiQwen3TtsTextToSpeech06bRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-3-tts/text-to-speech/0.6b/requests/{request_id}/cancel' +} + +export type PutFalAiQwen3TtsTextToSpeech06bRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwen3TtsTextToSpeech06bRequestsByRequestIdCancelResponse = + PutFalAiQwen3TtsTextToSpeech06bRequestsByRequestIdCancelResponses[keyof PutFalAiQwen3TtsTextToSpeech06bRequestsByRequestIdCancelResponses] + +export type PostFalAiQwen3TtsTextToSpeech06bData = { + body: SchemaQwen3TtsTextToSpeech06bInput + path?: never + query?: never + url: '/fal-ai/qwen-3-tts/text-to-speech/0.6b' +} + +export type PostFalAiQwen3TtsTextToSpeech06bResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwen3TtsTextToSpeech06bResponse = + PostFalAiQwen3TtsTextToSpeech06bResponses[keyof PostFalAiQwen3TtsTextToSpeech06bResponses] + +export type GetFalAiQwen3TtsTextToSpeech06bRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-3-tts/text-to-speech/0.6b/requests/{request_id}' +} + +export type GetFalAiQwen3TtsTextToSpeech06bRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwen3TtsTextToSpeech06bOutput +} + +export type GetFalAiQwen3TtsTextToSpeech06bRequestsByRequestIdResponse = + GetFalAiQwen3TtsTextToSpeech06bRequestsByRequestIdResponses[keyof GetFalAiQwen3TtsTextToSpeech06bRequestsByRequestIdResponses] + +export type GetFalAiVibevoice05bRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/vibevoice/0.5b/requests/{request_id}/status' +} + +export type GetFalAiVibevoice05bRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiVibevoice05bRequestsByRequestIdStatusResponse = + GetFalAiVibevoice05bRequestsByRequestIdStatusResponses[keyof GetFalAiVibevoice05bRequestsByRequestIdStatusResponses] + +export type PutFalAiVibevoice05bRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vibevoice/0.5b/requests/{request_id}/cancel' +} + +export type PutFalAiVibevoice05bRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiVibevoice05bRequestsByRequestIdCancelResponse = + PutFalAiVibevoice05bRequestsByRequestIdCancelResponses[keyof PutFalAiVibevoice05bRequestsByRequestIdCancelResponses] + +export type PostFalAiVibevoice05bData = { + body: SchemaVibevoice05bInput + path?: never + query?: never + url: '/fal-ai/vibevoice/0.5b' +} + +export type PostFalAiVibevoice05bResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiVibevoice05bResponse = + PostFalAiVibevoice05bResponses[keyof PostFalAiVibevoice05bResponses] + +export type GetFalAiVibevoice05bRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vibevoice/0.5b/requests/{request_id}' +} + +export type GetFalAiVibevoice05bRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVibevoice05bOutput +} + +export type GetFalAiVibevoice05bRequestsByRequestIdResponse = + GetFalAiVibevoice05bRequestsByRequestIdResponses[keyof GetFalAiVibevoice05bRequestsByRequestIdResponses] + +export type GetFalAiMayaBatchRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/maya/batch/requests/{request_id}/status' +} + +export type GetFalAiMayaBatchRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMayaBatchRequestsByRequestIdStatusResponse = + GetFalAiMayaBatchRequestsByRequestIdStatusResponses[keyof GetFalAiMayaBatchRequestsByRequestIdStatusResponses] + +export type PutFalAiMayaBatchRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/maya/batch/requests/{request_id}/cancel' +} + +export type PutFalAiMayaBatchRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMayaBatchRequestsByRequestIdCancelResponse = + PutFalAiMayaBatchRequestsByRequestIdCancelResponses[keyof PutFalAiMayaBatchRequestsByRequestIdCancelResponses] + +export type PostFalAiMayaBatchData = { + body: SchemaMayaBatchInput + path?: never + query?: never + url: '/fal-ai/maya/batch' +} + +export type PostFalAiMayaBatchResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMayaBatchResponse = + PostFalAiMayaBatchResponses[keyof PostFalAiMayaBatchResponses] + +export type GetFalAiMayaBatchRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/maya/batch/requests/{request_id}' +} + +export type GetFalAiMayaBatchRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMayaBatchOutput +} + +export type GetFalAiMayaBatchRequestsByRequestIdResponse = + GetFalAiMayaBatchRequestsByRequestIdResponses[keyof GetFalAiMayaBatchRequestsByRequestIdResponses] + +export type GetFalAiMayaStreamRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/maya/stream/requests/{request_id}/status' +} + +export type GetFalAiMayaStreamRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMayaStreamRequestsByRequestIdStatusResponse = + GetFalAiMayaStreamRequestsByRequestIdStatusResponses[keyof GetFalAiMayaStreamRequestsByRequestIdStatusResponses] + +export type PutFalAiMayaStreamRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/maya/stream/requests/{request_id}/cancel' +} + +export type PutFalAiMayaStreamRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMayaStreamRequestsByRequestIdCancelResponse = + PutFalAiMayaStreamRequestsByRequestIdCancelResponses[keyof PutFalAiMayaStreamRequestsByRequestIdCancelResponses] + +export type PostFalAiMayaStreamData = { + body: SchemaMayaStreamInput + path?: never + query?: never + url: '/fal-ai/maya/stream' +} + +export type PostFalAiMayaStreamResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMayaStreamResponse = + PostFalAiMayaStreamResponses[keyof PostFalAiMayaStreamResponses] + +export type GetFalAiMayaStreamRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/maya/stream/requests/{request_id}' +} + +export type GetFalAiMayaStreamRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMayaStreamOutput +} + +export type GetFalAiMayaStreamRequestsByRequestIdResponse = + GetFalAiMayaStreamRequestsByRequestIdResponses[keyof GetFalAiMayaStreamRequestsByRequestIdResponses] + +export type GetFalAiMayaRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/maya/requests/{request_id}/status' +} + +export type GetFalAiMayaRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMayaRequestsByRequestIdStatusResponse = + GetFalAiMayaRequestsByRequestIdStatusResponses[keyof GetFalAiMayaRequestsByRequestIdStatusResponses] + +export type PutFalAiMayaRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/maya/requests/{request_id}/cancel' +} + +export type PutFalAiMayaRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMayaRequestsByRequestIdCancelResponse = + PutFalAiMayaRequestsByRequestIdCancelResponses[keyof PutFalAiMayaRequestsByRequestIdCancelResponses] + +export type PostFalAiMayaData = { + body: SchemaMayaInput + path?: never + query?: never + url: '/fal-ai/maya' +} + +export type PostFalAiMayaResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMayaResponse = + PostFalAiMayaResponses[keyof PostFalAiMayaResponses] + +export type GetFalAiMayaRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/maya/requests/{request_id}' +} + +export type GetFalAiMayaRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMayaOutput +} + +export type GetFalAiMayaRequestsByRequestIdResponse = + GetFalAiMayaRequestsByRequestIdResponses[keyof GetFalAiMayaRequestsByRequestIdResponses] + +export type GetFalAiMinimaxSpeech26TurboRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/speech-2.6-turbo/requests/{request_id}/status' +} + +export type GetFalAiMinimaxSpeech26TurboRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMinimaxSpeech26TurboRequestsByRequestIdStatusResponse = + GetFalAiMinimaxSpeech26TurboRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxSpeech26TurboRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxSpeech26TurboRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/speech-2.6-turbo/requests/{request_id}/cancel' +} + +export type PutFalAiMinimaxSpeech26TurboRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMinimaxSpeech26TurboRequestsByRequestIdCancelResponse = + PutFalAiMinimaxSpeech26TurboRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxSpeech26TurboRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxSpeech26TurboData = { + body: SchemaMinimaxSpeech26TurboInput + path?: never + query?: never + url: '/fal-ai/minimax/speech-2.6-turbo' +} + +export type PostFalAiMinimaxSpeech26TurboResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxSpeech26TurboResponse = + PostFalAiMinimaxSpeech26TurboResponses[keyof PostFalAiMinimaxSpeech26TurboResponses] + +export type GetFalAiMinimaxSpeech26TurboRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/speech-2.6-turbo/requests/{request_id}' +} + +export type GetFalAiMinimaxSpeech26TurboRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMinimaxSpeech26TurboOutput +} + +export type GetFalAiMinimaxSpeech26TurboRequestsByRequestIdResponse = + GetFalAiMinimaxSpeech26TurboRequestsByRequestIdResponses[keyof GetFalAiMinimaxSpeech26TurboRequestsByRequestIdResponses] + +export type GetFalAiMinimaxSpeech26HdRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/speech-2.6-hd/requests/{request_id}/status' +} + +export type GetFalAiMinimaxSpeech26HdRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMinimaxSpeech26HdRequestsByRequestIdStatusResponse = + GetFalAiMinimaxSpeech26HdRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxSpeech26HdRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxSpeech26HdRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/speech-2.6-hd/requests/{request_id}/cancel' +} + +export type PutFalAiMinimaxSpeech26HdRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMinimaxSpeech26HdRequestsByRequestIdCancelResponse = + PutFalAiMinimaxSpeech26HdRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxSpeech26HdRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxSpeech26HdData = { + body: SchemaMinimaxSpeech26HdInput + path?: never + query?: never + url: '/fal-ai/minimax/speech-2.6-hd' +} + +export type PostFalAiMinimaxSpeech26HdResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxSpeech26HdResponse = + PostFalAiMinimaxSpeech26HdResponses[keyof PostFalAiMinimaxSpeech26HdResponses] + +export type GetFalAiMinimaxSpeech26HdRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/speech-2.6-hd/requests/{request_id}' +} + +export type GetFalAiMinimaxSpeech26HdRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMinimaxSpeech26HdOutput +} + +export type GetFalAiMinimaxSpeech26HdRequestsByRequestIdResponse = + GetFalAiMinimaxSpeech26HdRequestsByRequestIdResponses[keyof GetFalAiMinimaxSpeech26HdRequestsByRequestIdResponses] + +export type GetFalAiIndexTts2TextToSpeechRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/index-tts-2/text-to-speech/requests/{request_id}/status' +} + +export type GetFalAiIndexTts2TextToSpeechRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiIndexTts2TextToSpeechRequestsByRequestIdStatusResponse = + GetFalAiIndexTts2TextToSpeechRequestsByRequestIdStatusResponses[keyof GetFalAiIndexTts2TextToSpeechRequestsByRequestIdStatusResponses] + +export type PutFalAiIndexTts2TextToSpeechRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/index-tts-2/text-to-speech/requests/{request_id}/cancel' +} + +export type PutFalAiIndexTts2TextToSpeechRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiIndexTts2TextToSpeechRequestsByRequestIdCancelResponse = + PutFalAiIndexTts2TextToSpeechRequestsByRequestIdCancelResponses[keyof PutFalAiIndexTts2TextToSpeechRequestsByRequestIdCancelResponses] + +export type PostFalAiIndexTts2TextToSpeechData = { + body: SchemaIndexTts2TextToSpeechInput + path?: never + query?: never + url: '/fal-ai/index-tts-2/text-to-speech' +} + +export type PostFalAiIndexTts2TextToSpeechResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiIndexTts2TextToSpeechResponse = + PostFalAiIndexTts2TextToSpeechResponses[keyof PostFalAiIndexTts2TextToSpeechResponses] + +export type GetFalAiIndexTts2TextToSpeechRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/index-tts-2/text-to-speech/requests/{request_id}' +} + +export type GetFalAiIndexTts2TextToSpeechRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIndexTts2TextToSpeechOutput +} + +export type GetFalAiIndexTts2TextToSpeechRequestsByRequestIdResponse = + GetFalAiIndexTts2TextToSpeechRequestsByRequestIdResponses[keyof GetFalAiIndexTts2TextToSpeechRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV1TtsRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v1/tts/requests/{request_id}/status' +} + +export type GetFalAiKlingVideoV1TtsRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiKlingVideoV1TtsRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV1TtsRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV1TtsRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV1TtsRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1/tts/requests/{request_id}/cancel' +} + +export type PutFalAiKlingVideoV1TtsRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiKlingVideoV1TtsRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV1TtsRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV1TtsRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV1TtsData = { + body: SchemaKlingVideoV1TtsInput + path?: never + query?: never + url: '/fal-ai/kling-video/v1/tts' +} + +export type PostFalAiKlingVideoV1TtsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV1TtsResponse = + PostFalAiKlingVideoV1TtsResponses[keyof PostFalAiKlingVideoV1TtsResponses] + +export type GetFalAiKlingVideoV1TtsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1/tts/requests/{request_id}' +} + +export type GetFalAiKlingVideoV1TtsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV1TtsOutput +} + +export type GetFalAiKlingVideoV1TtsRequestsByRequestIdResponse = + GetFalAiKlingVideoV1TtsRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV1TtsRequestsByRequestIdResponses] + +export type GetFalAiChatterboxTextToSpeechMultilingualRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/chatterbox/text-to-speech/multilingual/requests/{request_id}/status' + } + +export type GetFalAiChatterboxTextToSpeechMultilingualRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiChatterboxTextToSpeechMultilingualRequestsByRequestIdStatusResponse = + GetFalAiChatterboxTextToSpeechMultilingualRequestsByRequestIdStatusResponses[keyof GetFalAiChatterboxTextToSpeechMultilingualRequestsByRequestIdStatusResponses] + +export type PutFalAiChatterboxTextToSpeechMultilingualRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/chatterbox/text-to-speech/multilingual/requests/{request_id}/cancel' + } + +export type PutFalAiChatterboxTextToSpeechMultilingualRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiChatterboxTextToSpeechMultilingualRequestsByRequestIdCancelResponse = + PutFalAiChatterboxTextToSpeechMultilingualRequestsByRequestIdCancelResponses[keyof PutFalAiChatterboxTextToSpeechMultilingualRequestsByRequestIdCancelResponses] + +export type PostFalAiChatterboxTextToSpeechMultilingualData = { + body: SchemaChatterboxTextToSpeechMultilingualInput + path?: never + query?: never + url: '/fal-ai/chatterbox/text-to-speech/multilingual' +} + +export type PostFalAiChatterboxTextToSpeechMultilingualResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiChatterboxTextToSpeechMultilingualResponse = + PostFalAiChatterboxTextToSpeechMultilingualResponses[keyof PostFalAiChatterboxTextToSpeechMultilingualResponses] + +export type GetFalAiChatterboxTextToSpeechMultilingualRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/chatterbox/text-to-speech/multilingual/requests/{request_id}' + } + +export type GetFalAiChatterboxTextToSpeechMultilingualRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaChatterboxTextToSpeechMultilingualOutput + } + +export type GetFalAiChatterboxTextToSpeechMultilingualRequestsByRequestIdResponse = + GetFalAiChatterboxTextToSpeechMultilingualRequestsByRequestIdResponses[keyof GetFalAiChatterboxTextToSpeechMultilingualRequestsByRequestIdResponses] + +export type GetFalAiVibevoice7bRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/vibevoice/7b/requests/{request_id}/status' +} + +export type GetFalAiVibevoice7bRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiVibevoice7bRequestsByRequestIdStatusResponse = + GetFalAiVibevoice7bRequestsByRequestIdStatusResponses[keyof GetFalAiVibevoice7bRequestsByRequestIdStatusResponses] + +export type PutFalAiVibevoice7bRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vibevoice/7b/requests/{request_id}/cancel' +} + +export type PutFalAiVibevoice7bRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiVibevoice7bRequestsByRequestIdCancelResponse = + PutFalAiVibevoice7bRequestsByRequestIdCancelResponses[keyof PutFalAiVibevoice7bRequestsByRequestIdCancelResponses] + +export type PostFalAiVibevoice7bData = { + body: SchemaVibevoice7bInput + path?: never + query?: never + url: '/fal-ai/vibevoice/7b' +} + +export type PostFalAiVibevoice7bResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiVibevoice7bResponse = + PostFalAiVibevoice7bResponses[keyof PostFalAiVibevoice7bResponses] + +export type GetFalAiVibevoice7bRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vibevoice/7b/requests/{request_id}' +} + +export type GetFalAiVibevoice7bRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVibevoice7bOutput +} + +export type GetFalAiVibevoice7bRequestsByRequestIdResponse = + GetFalAiVibevoice7bRequestsByRequestIdResponses[keyof GetFalAiVibevoice7bRequestsByRequestIdResponses] + +export type GetFalAiVibevoiceRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/vibevoice/requests/{request_id}/status' +} + +export type GetFalAiVibevoiceRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiVibevoiceRequestsByRequestIdStatusResponse = + GetFalAiVibevoiceRequestsByRequestIdStatusResponses[keyof GetFalAiVibevoiceRequestsByRequestIdStatusResponses] + +export type PutFalAiVibevoiceRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vibevoice/requests/{request_id}/cancel' +} + +export type PutFalAiVibevoiceRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiVibevoiceRequestsByRequestIdCancelResponse = + PutFalAiVibevoiceRequestsByRequestIdCancelResponses[keyof PutFalAiVibevoiceRequestsByRequestIdCancelResponses] + +export type PostFalAiVibevoiceData = { + body: SchemaVibevoiceInput + path?: never + query?: never + url: '/fal-ai/vibevoice' +} + +export type PostFalAiVibevoiceResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiVibevoiceResponse = + PostFalAiVibevoiceResponses[keyof PostFalAiVibevoiceResponses] + +export type GetFalAiVibevoiceRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vibevoice/requests/{request_id}' +} + +export type GetFalAiVibevoiceRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVibevoiceOutput +} + +export type GetFalAiVibevoiceRequestsByRequestIdResponse = + GetFalAiVibevoiceRequestsByRequestIdResponses[keyof GetFalAiVibevoiceRequestsByRequestIdResponses] + +export type GetFalAiMinimaxPreviewSpeech25HdRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/preview/speech-2.5-hd/requests/{request_id}/status' +} + +export type GetFalAiMinimaxPreviewSpeech25HdRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMinimaxPreviewSpeech25HdRequestsByRequestIdStatusResponse = + GetFalAiMinimaxPreviewSpeech25HdRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxPreviewSpeech25HdRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxPreviewSpeech25HdRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/preview/speech-2.5-hd/requests/{request_id}/cancel' +} + +export type PutFalAiMinimaxPreviewSpeech25HdRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMinimaxPreviewSpeech25HdRequestsByRequestIdCancelResponse = + PutFalAiMinimaxPreviewSpeech25HdRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxPreviewSpeech25HdRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxPreviewSpeech25HdData = { + body: SchemaMinimaxPreviewSpeech25HdInput + path?: never + query?: never + url: '/fal-ai/minimax/preview/speech-2.5-hd' +} + +export type PostFalAiMinimaxPreviewSpeech25HdResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxPreviewSpeech25HdResponse = + PostFalAiMinimaxPreviewSpeech25HdResponses[keyof PostFalAiMinimaxPreviewSpeech25HdResponses] + +export type GetFalAiMinimaxPreviewSpeech25HdRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/preview/speech-2.5-hd/requests/{request_id}' +} + +export type GetFalAiMinimaxPreviewSpeech25HdRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMinimaxPreviewSpeech25HdOutput +} + +export type GetFalAiMinimaxPreviewSpeech25HdRequestsByRequestIdResponse = + GetFalAiMinimaxPreviewSpeech25HdRequestsByRequestIdResponses[keyof GetFalAiMinimaxPreviewSpeech25HdRequestsByRequestIdResponses] + +export type GetFalAiMinimaxPreviewSpeech25TurboRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/preview/speech-2.5-turbo/requests/{request_id}/status' +} + +export type GetFalAiMinimaxPreviewSpeech25TurboRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMinimaxPreviewSpeech25TurboRequestsByRequestIdStatusResponse = + GetFalAiMinimaxPreviewSpeech25TurboRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxPreviewSpeech25TurboRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxPreviewSpeech25TurboRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/preview/speech-2.5-turbo/requests/{request_id}/cancel' +} + +export type PutFalAiMinimaxPreviewSpeech25TurboRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMinimaxPreviewSpeech25TurboRequestsByRequestIdCancelResponse = + PutFalAiMinimaxPreviewSpeech25TurboRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxPreviewSpeech25TurboRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxPreviewSpeech25TurboData = { + body: SchemaMinimaxPreviewSpeech25TurboInput + path?: never + query?: never + url: '/fal-ai/minimax/preview/speech-2.5-turbo' +} + +export type PostFalAiMinimaxPreviewSpeech25TurboResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxPreviewSpeech25TurboResponse = + PostFalAiMinimaxPreviewSpeech25TurboResponses[keyof PostFalAiMinimaxPreviewSpeech25TurboResponses] + +export type GetFalAiMinimaxPreviewSpeech25TurboRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/preview/speech-2.5-turbo/requests/{request_id}' +} + +export type GetFalAiMinimaxPreviewSpeech25TurboRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMinimaxPreviewSpeech25TurboOutput +} + +export type GetFalAiMinimaxPreviewSpeech25TurboRequestsByRequestIdResponse = + GetFalAiMinimaxPreviewSpeech25TurboRequestsByRequestIdResponses[keyof GetFalAiMinimaxPreviewSpeech25TurboRequestsByRequestIdResponses] + +export type GetFalAiMinimaxVoiceDesignRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/voice-design/requests/{request_id}/status' +} + +export type GetFalAiMinimaxVoiceDesignRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMinimaxVoiceDesignRequestsByRequestIdStatusResponse = + GetFalAiMinimaxVoiceDesignRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxVoiceDesignRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxVoiceDesignRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/voice-design/requests/{request_id}/cancel' +} + +export type PutFalAiMinimaxVoiceDesignRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMinimaxVoiceDesignRequestsByRequestIdCancelResponse = + PutFalAiMinimaxVoiceDesignRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxVoiceDesignRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxVoiceDesignData = { + body: SchemaMinimaxVoiceDesignInput + path?: never + query?: never + url: '/fal-ai/minimax/voice-design' +} + +export type PostFalAiMinimaxVoiceDesignResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxVoiceDesignResponse = + PostFalAiMinimaxVoiceDesignResponses[keyof PostFalAiMinimaxVoiceDesignResponses] + +export type GetFalAiMinimaxVoiceDesignRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/voice-design/requests/{request_id}' +} + +export type GetFalAiMinimaxVoiceDesignRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMinimaxVoiceDesignOutput +} + +export type GetFalAiMinimaxVoiceDesignRequestsByRequestIdResponse = + GetFalAiMinimaxVoiceDesignRequestsByRequestIdResponses[keyof GetFalAiMinimaxVoiceDesignRequestsByRequestIdResponses] + +export type GetResembleAiChatterboxhdTextToSpeechRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/resemble-ai/chatterboxhd/text-to-speech/requests/{request_id}/status' + } + +export type GetResembleAiChatterboxhdTextToSpeechRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetResembleAiChatterboxhdTextToSpeechRequestsByRequestIdStatusResponse = + GetResembleAiChatterboxhdTextToSpeechRequestsByRequestIdStatusResponses[keyof GetResembleAiChatterboxhdTextToSpeechRequestsByRequestIdStatusResponses] + +export type PutResembleAiChatterboxhdTextToSpeechRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/resemble-ai/chatterboxhd/text-to-speech/requests/{request_id}/cancel' + } + +export type PutResembleAiChatterboxhdTextToSpeechRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutResembleAiChatterboxhdTextToSpeechRequestsByRequestIdCancelResponse = + PutResembleAiChatterboxhdTextToSpeechRequestsByRequestIdCancelResponses[keyof PutResembleAiChatterboxhdTextToSpeechRequestsByRequestIdCancelResponses] + +export type PostResembleAiChatterboxhdTextToSpeechData = { + body: SchemaChatterboxhdTextToSpeechInput + path?: never + query?: never + url: '/resemble-ai/chatterboxhd/text-to-speech' +} + +export type PostResembleAiChatterboxhdTextToSpeechResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostResembleAiChatterboxhdTextToSpeechResponse = + PostResembleAiChatterboxhdTextToSpeechResponses[keyof PostResembleAiChatterboxhdTextToSpeechResponses] + +export type GetResembleAiChatterboxhdTextToSpeechRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/resemble-ai/chatterboxhd/text-to-speech/requests/{request_id}' +} + +export type GetResembleAiChatterboxhdTextToSpeechRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaChatterboxhdTextToSpeechOutput + } + +export type GetResembleAiChatterboxhdTextToSpeechRequestsByRequestIdResponse = + GetResembleAiChatterboxhdTextToSpeechRequestsByRequestIdResponses[keyof GetResembleAiChatterboxhdTextToSpeechRequestsByRequestIdResponses] + +export type GetFalAiChatterboxTextToSpeechRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/chatterbox/text-to-speech/requests/{request_id}/status' +} + +export type GetFalAiChatterboxTextToSpeechRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiChatterboxTextToSpeechRequestsByRequestIdStatusResponse = + GetFalAiChatterboxTextToSpeechRequestsByRequestIdStatusResponses[keyof GetFalAiChatterboxTextToSpeechRequestsByRequestIdStatusResponses] + +export type PutFalAiChatterboxTextToSpeechRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/chatterbox/text-to-speech/requests/{request_id}/cancel' +} + +export type PutFalAiChatterboxTextToSpeechRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiChatterboxTextToSpeechRequestsByRequestIdCancelResponse = + PutFalAiChatterboxTextToSpeechRequestsByRequestIdCancelResponses[keyof PutFalAiChatterboxTextToSpeechRequestsByRequestIdCancelResponses] + +export type PostFalAiChatterboxTextToSpeechData = { + body: SchemaChatterboxTextToSpeechInput + path?: never + query?: never + url: '/fal-ai/chatterbox/text-to-speech' +} + +export type PostFalAiChatterboxTextToSpeechResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiChatterboxTextToSpeechResponse = + PostFalAiChatterboxTextToSpeechResponses[keyof PostFalAiChatterboxTextToSpeechResponses] + +export type GetFalAiChatterboxTextToSpeechRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/chatterbox/text-to-speech/requests/{request_id}' +} + +export type GetFalAiChatterboxTextToSpeechRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaChatterboxTextToSpeechOutput +} + +export type GetFalAiChatterboxTextToSpeechRequestsByRequestIdResponse = + GetFalAiChatterboxTextToSpeechRequestsByRequestIdResponses[keyof GetFalAiChatterboxTextToSpeechRequestsByRequestIdResponses] + +export type GetFalAiMinimaxVoiceCloneRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/voice-clone/requests/{request_id}/status' +} + +export type GetFalAiMinimaxVoiceCloneRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMinimaxVoiceCloneRequestsByRequestIdStatusResponse = + GetFalAiMinimaxVoiceCloneRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxVoiceCloneRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxVoiceCloneRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/voice-clone/requests/{request_id}/cancel' +} + +export type PutFalAiMinimaxVoiceCloneRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMinimaxVoiceCloneRequestsByRequestIdCancelResponse = + PutFalAiMinimaxVoiceCloneRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxVoiceCloneRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxVoiceCloneData = { + body: SchemaMinimaxVoiceCloneInput + path?: never + query?: never + url: '/fal-ai/minimax/voice-clone' +} + +export type PostFalAiMinimaxVoiceCloneResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxVoiceCloneResponse = + PostFalAiMinimaxVoiceCloneResponses[keyof PostFalAiMinimaxVoiceCloneResponses] + +export type GetFalAiMinimaxVoiceCloneRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/voice-clone/requests/{request_id}' +} + +export type GetFalAiMinimaxVoiceCloneRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMinimaxVoiceCloneOutput +} + +export type GetFalAiMinimaxVoiceCloneRequestsByRequestIdResponse = + GetFalAiMinimaxVoiceCloneRequestsByRequestIdResponses[keyof GetFalAiMinimaxVoiceCloneRequestsByRequestIdResponses] + +export type GetFalAiMinimaxSpeech02TurboRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/speech-02-turbo/requests/{request_id}/status' +} + +export type GetFalAiMinimaxSpeech02TurboRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMinimaxSpeech02TurboRequestsByRequestIdStatusResponse = + GetFalAiMinimaxSpeech02TurboRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxSpeech02TurboRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxSpeech02TurboRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/speech-02-turbo/requests/{request_id}/cancel' +} + +export type PutFalAiMinimaxSpeech02TurboRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMinimaxSpeech02TurboRequestsByRequestIdCancelResponse = + PutFalAiMinimaxSpeech02TurboRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxSpeech02TurboRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxSpeech02TurboData = { + body: SchemaMinimaxSpeech02TurboInput + path?: never + query?: never + url: '/fal-ai/minimax/speech-02-turbo' +} + +export type PostFalAiMinimaxSpeech02TurboResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxSpeech02TurboResponse = + PostFalAiMinimaxSpeech02TurboResponses[keyof PostFalAiMinimaxSpeech02TurboResponses] + +export type GetFalAiMinimaxSpeech02TurboRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/speech-02-turbo/requests/{request_id}' +} + +export type GetFalAiMinimaxSpeech02TurboRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMinimaxSpeech02TurboOutput +} + +export type GetFalAiMinimaxSpeech02TurboRequestsByRequestIdResponse = + GetFalAiMinimaxSpeech02TurboRequestsByRequestIdResponses[keyof GetFalAiMinimaxSpeech02TurboRequestsByRequestIdResponses] + +export type GetFalAiMinimaxSpeech02HdRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/speech-02-hd/requests/{request_id}/status' +} + +export type GetFalAiMinimaxSpeech02HdRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMinimaxSpeech02HdRequestsByRequestIdStatusResponse = + GetFalAiMinimaxSpeech02HdRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxSpeech02HdRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxSpeech02HdRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/speech-02-hd/requests/{request_id}/cancel' +} + +export type PutFalAiMinimaxSpeech02HdRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMinimaxSpeech02HdRequestsByRequestIdCancelResponse = + PutFalAiMinimaxSpeech02HdRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxSpeech02HdRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxSpeech02HdData = { + body: SchemaMinimaxSpeech02HdInput + path?: never + query?: never + url: '/fal-ai/minimax/speech-02-hd' +} + +export type PostFalAiMinimaxSpeech02HdResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxSpeech02HdResponse = + PostFalAiMinimaxSpeech02HdResponses[keyof PostFalAiMinimaxSpeech02HdResponses] + +export type GetFalAiMinimaxSpeech02HdRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/speech-02-hd/requests/{request_id}' +} + +export type GetFalAiMinimaxSpeech02HdRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMinimaxSpeech02HdOutput +} + +export type GetFalAiMinimaxSpeech02HdRequestsByRequestIdResponse = + GetFalAiMinimaxSpeech02HdRequestsByRequestIdResponses[keyof GetFalAiMinimaxSpeech02HdRequestsByRequestIdResponses] + +export type GetFalAiDiaTtsRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/dia-tts/requests/{request_id}/status' +} + +export type GetFalAiDiaTtsRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiDiaTtsRequestsByRequestIdStatusResponse = + GetFalAiDiaTtsRequestsByRequestIdStatusResponses[keyof GetFalAiDiaTtsRequestsByRequestIdStatusResponses] + +export type PutFalAiDiaTtsRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/dia-tts/requests/{request_id}/cancel' +} + +export type PutFalAiDiaTtsRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiDiaTtsRequestsByRequestIdCancelResponse = + PutFalAiDiaTtsRequestsByRequestIdCancelResponses[keyof PutFalAiDiaTtsRequestsByRequestIdCancelResponses] + +export type PostFalAiDiaTtsData = { + body: SchemaDiaTtsInput + path?: never + query?: never + url: '/fal-ai/dia-tts' +} + +export type PostFalAiDiaTtsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiDiaTtsResponse = + PostFalAiDiaTtsResponses[keyof PostFalAiDiaTtsResponses] + +export type GetFalAiDiaTtsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/dia-tts/requests/{request_id}' +} + +export type GetFalAiDiaTtsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaDiaTtsOutput +} + +export type GetFalAiDiaTtsRequestsByRequestIdResponse = + GetFalAiDiaTtsRequestsByRequestIdResponses[keyof GetFalAiDiaTtsRequestsByRequestIdResponses] + +export type GetFalAiOrpheusTtsRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/orpheus-tts/requests/{request_id}/status' +} + +export type GetFalAiOrpheusTtsRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiOrpheusTtsRequestsByRequestIdStatusResponse = + GetFalAiOrpheusTtsRequestsByRequestIdStatusResponses[keyof GetFalAiOrpheusTtsRequestsByRequestIdStatusResponses] + +export type PutFalAiOrpheusTtsRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/orpheus-tts/requests/{request_id}/cancel' +} + +export type PutFalAiOrpheusTtsRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiOrpheusTtsRequestsByRequestIdCancelResponse = + PutFalAiOrpheusTtsRequestsByRequestIdCancelResponses[keyof PutFalAiOrpheusTtsRequestsByRequestIdCancelResponses] + +export type PostFalAiOrpheusTtsData = { + body: SchemaOrpheusTtsInput + path?: never + query?: never + url: '/fal-ai/orpheus-tts' +} + +export type PostFalAiOrpheusTtsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiOrpheusTtsResponse = + PostFalAiOrpheusTtsResponses[keyof PostFalAiOrpheusTtsResponses] + +export type GetFalAiOrpheusTtsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/orpheus-tts/requests/{request_id}' +} + +export type GetFalAiOrpheusTtsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaOrpheusTtsOutput +} + +export type GetFalAiOrpheusTtsRequestsByRequestIdResponse = + GetFalAiOrpheusTtsRequestsByRequestIdResponses[keyof GetFalAiOrpheusTtsRequestsByRequestIdResponses] + +export type GetFalAiElevenlabsTtsTurboV25RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/elevenlabs/tts/turbo-v2.5/requests/{request_id}/status' +} + +export type GetFalAiElevenlabsTtsTurboV25RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiElevenlabsTtsTurboV25RequestsByRequestIdStatusResponse = + GetFalAiElevenlabsTtsTurboV25RequestsByRequestIdStatusResponses[keyof GetFalAiElevenlabsTtsTurboV25RequestsByRequestIdStatusResponses] + +export type PutFalAiElevenlabsTtsTurboV25RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/elevenlabs/tts/turbo-v2.5/requests/{request_id}/cancel' +} + +export type PutFalAiElevenlabsTtsTurboV25RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiElevenlabsTtsTurboV25RequestsByRequestIdCancelResponse = + PutFalAiElevenlabsTtsTurboV25RequestsByRequestIdCancelResponses[keyof PutFalAiElevenlabsTtsTurboV25RequestsByRequestIdCancelResponses] + +export type PostFalAiElevenlabsTtsTurboV25Data = { + body: SchemaElevenlabsTtsTurboV25Input + path?: never + query?: never + url: '/fal-ai/elevenlabs/tts/turbo-v2.5' +} + +export type PostFalAiElevenlabsTtsTurboV25Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiElevenlabsTtsTurboV25Response = + PostFalAiElevenlabsTtsTurboV25Responses[keyof PostFalAiElevenlabsTtsTurboV25Responses] + +export type GetFalAiElevenlabsTtsTurboV25RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/elevenlabs/tts/turbo-v2.5/requests/{request_id}' +} + +export type GetFalAiElevenlabsTtsTurboV25RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaElevenlabsTtsTurboV25Output +} + +export type GetFalAiElevenlabsTtsTurboV25RequestsByRequestIdResponse = + GetFalAiElevenlabsTtsTurboV25RequestsByRequestIdResponses[keyof GetFalAiElevenlabsTtsTurboV25RequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/text-to-speech/zod.gen.ts b/packages/typescript/ai-fal/src/generated/text-to-speech/zod.gen.ts new file mode 100644 index 00000000..1ed5559e --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/text-to-speech/zod.gen.ts @@ -0,0 +1,4139 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * File + */ +export const zSchemaFile = z.object({ + file_size: z.optional(z.union([z.int(), z.unknown()])), + file_name: z.optional(z.union([z.string(), z.unknown()])), + content_type: z.optional(z.union([z.string(), z.unknown()])), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), +}) + +/** + * TTSOutput + */ +export const zSchemaElevenlabsTtsTurboV25Output = z.object({ + audio: zSchemaFile, + timestamps: z.optional(z.union([z.array(z.unknown()), z.unknown()])), +}) + +/** + * TextToSpeechRequest + */ +export const zSchemaElevenlabsTtsTurboV25Input = z.object({ + stability: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Voice stability (0-1)', + }), + ) + .default(0.5), + next_text: z.optional(z.union([z.string(), z.unknown()])), + speed: z + .optional( + z.number().gte(0.7).lte(1.2).register(z.globalRegistry, { + description: + 'Speech speed (0.7-1.2). Values below 1.0 slow down the speech, above 1.0 speed it up. Extreme values may affect quality.', + }), + ) + .default(1), + style: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Style exaggeration (0-1)', + }), + ) + .default(0), + text: z.string().min(1).register(z.globalRegistry, { + description: 'The text to convert to speech', + }), + timestamps: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to return timestamps for each word in the generated speech', + }), + ) + .default(false), + similarity_boost: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Similarity boost (0-1)', + }), + ) + .default(0.75), + voice: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The voice to use for speech generation', + }), + ) + .default('Rachel'), + language_code: z.optional(z.union([z.string(), z.unknown()])), + previous_text: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * OrpheusOutput + */ +export const zSchemaOrpheusTtsOutput = z.object({ + audio: zSchemaFile, +}) + +/** + * OrpheusRequest + */ +export const zSchemaOrpheusTtsInput = z.object({ + text: z.string().register(z.globalRegistry, { + description: + 'The text to be converted to speech. You can additionally add the following emotive tags: , , , , , , , ', + }), + voice: z.optional( + z + .enum(['tara', 'leah', 'jess', 'leo', 'dan', 'mia', 'zac', 'zoe']) + .register(z.globalRegistry, { + description: 'Voice ID for the desired voice.', + }), + ), + repetition_penalty: z + .optional( + z.number().gte(1.1).lte(2).register(z.globalRegistry, { + description: + 'Repetition penalty (>= 1.1 required for stable generations).', + }), + ) + .default(1.2), + temperature: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: 'Temperature for generation (higher = more creative).', + }), + ) + .default(0.7), +}) + +/** + * DiaOutput + */ +export const zSchemaDiaTtsOutput = z.object({ + audio: zSchemaFile, +}) + +/** + * DiaRequest + */ +export const zSchemaDiaTtsInput = z.object({ + text: z.string().register(z.globalRegistry, { + description: 'The text to be converted to speech.', + }), +}) + +/** + * TextToSpeechOutput + */ +export const zSchemaMinimaxSpeech02HdOutput = z.object({ + duration_ms: z.int().register(z.globalRegistry, { + description: 'Duration of the audio in milliseconds', + }), + audio: zSchemaFile, +}) + +/** + * AudioSetting + */ +export const zSchemaAudioSetting = z.object({ + format: z.optional( + z.enum(['mp3', 'pcm', 'flac']).register(z.globalRegistry, { + description: 'Audio format', + }), + ), + sample_rate: z.optional( + z + .union([ + z.literal(8000), + z.literal(16000), + z.literal(22050), + z.literal(24000), + z.literal(32000), + z.literal(44100), + ]) + .register(z.globalRegistry, { + description: 'Sample rate of generated audio', + }), + ), + channel: z.optional( + z.union([z.literal(1), z.literal(2)]).register(z.globalRegistry, { + description: 'Number of audio channels (1=mono, 2=stereo)', + }), + ), + bitrate: z.optional( + z + .union([ + z.literal(32000), + z.literal(64000), + z.literal(128000), + z.literal(256000), + ]) + .register(z.globalRegistry, { + description: 'Bitrate of generated audio', + }), + ), +}) + +/** + * PronunciationDict + */ +export const zSchemaPronunciationDict = z.object({ + tone_list: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + "List of pronunciation replacements in format ['text/(pronunciation)', ...]. For Chinese, tones are 1-5. Example: ['燕少飞/(yan4)(shao3)(fei1)']", + }), + ), +}) + +/** + * VoiceSetting + */ +export const zSchemaVoiceSetting = z.object({ + speed: z + .optional( + z.number().gte(0.5).lte(2).register(z.globalRegistry, { + description: 'Speech speed (0.5-2.0)', + }), + ) + .default(1), + vol: z + .optional( + z.number().gte(0.01).lte(10).register(z.globalRegistry, { + description: 'Volume (0-10)', + }), + ) + .default(1), + voice_id: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Predefined voice ID to use for synthesis', + }), + ) + .default('Wise_Woman'), + pitch: z + .optional( + z.int().gte(-12).lte(12).register(z.globalRegistry, { + description: 'Voice pitch (-12 to 12)', + }), + ) + .default(0), + english_normalization: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Enables English text normalization to improve number reading performance, with a slight increase in latency', + }), + ) + .default(false), + emotion: z.optional( + z + .enum([ + 'happy', + 'sad', + 'angry', + 'fearful', + 'disgusted', + 'surprised', + 'neutral', + ]) + .register(z.globalRegistry, { + description: 'Emotion of the generated speech', + }), + ), +}) + +/** + * TextToSpeechHDRequest + */ +export const zSchemaMinimaxSpeech02HdInput = z.object({ + text: z.string().min(1).max(5000).register(z.globalRegistry, { + description: + 'Text to convert to speech (max 5000 characters, minimum 1 non-whitespace character)', + }), + language_boost: z.optional( + z + .enum([ + 'Chinese', + 'Chinese,Yue', + 'English', + 'Arabic', + 'Russian', + 'Spanish', + 'French', + 'Portuguese', + 'German', + 'Turkish', + 'Dutch', + 'Ukrainian', + 'Vietnamese', + 'Indonesian', + 'Japanese', + 'Italian', + 'Korean', + 'Thai', + 'Polish', + 'Romanian', + 'Greek', + 'Czech', + 'Finnish', + 'Hindi', + 'Bulgarian', + 'Danish', + 'Hebrew', + 'Malay', + 'Slovak', + 'Swedish', + 'Croatian', + 'Hungarian', + 'Norwegian', + 'Slovenian', + 'Catalan', + 'Nynorsk', + 'Afrikaans', + 'auto', + ]) + .register(z.globalRegistry, { + description: 'Enhance recognition of specified languages and dialects', + }), + ), + voice_setting: z.optional(zSchemaVoiceSetting), + output_format: z.optional( + z.enum(['url', 'hex']).register(z.globalRegistry, { + description: 'Format of the output content (non-streaming only)', + }), + ), + pronunciation_dict: z.optional(zSchemaPronunciationDict), + audio_setting: z.optional(zSchemaAudioSetting), +}) + +/** + * TextToSpeechOutput + */ +export const zSchemaMinimaxSpeech02TurboOutput = z.object({ + duration_ms: z.int().register(z.globalRegistry, { + description: 'Duration of the audio in milliseconds', + }), + audio: zSchemaFile, +}) + +/** + * TextToSpeechTurboRequest + */ +export const zSchemaMinimaxSpeech02TurboInput = z.object({ + text: z.string().min(1).max(5000).register(z.globalRegistry, { + description: + 'Text to convert to speech (max 5000 characters, minimum 1 non-whitespace character)', + }), + language_boost: z.optional( + z + .enum([ + 'Chinese', + 'Chinese,Yue', + 'English', + 'Arabic', + 'Russian', + 'Spanish', + 'French', + 'Portuguese', + 'German', + 'Turkish', + 'Dutch', + 'Ukrainian', + 'Vietnamese', + 'Indonesian', + 'Japanese', + 'Italian', + 'Korean', + 'Thai', + 'Polish', + 'Romanian', + 'Greek', + 'Czech', + 'Finnish', + 'Hindi', + 'Bulgarian', + 'Danish', + 'Hebrew', + 'Malay', + 'Slovak', + 'Swedish', + 'Croatian', + 'Hungarian', + 'Norwegian', + 'Slovenian', + 'Catalan', + 'Nynorsk', + 'Afrikaans', + 'auto', + ]) + .register(z.globalRegistry, { + description: 'Enhance recognition of specified languages and dialects', + }), + ), + voice_setting: z.optional(zSchemaVoiceSetting), + output_format: z.optional( + z.enum(['url', 'hex']).register(z.globalRegistry, { + description: 'Format of the output content (non-streaming only)', + }), + ), + pronunciation_dict: z.optional(zSchemaPronunciationDict), + audio_setting: z.optional(zSchemaAudioSetting), +}) + +/** + * VoiceCloneOutput + */ +export const zSchemaMinimaxVoiceCloneOutput = z.object({ + custom_voice_id: z.string().register(z.globalRegistry, { + description: 'The cloned voice ID for use with TTS', + }), + audio: z.optional(zSchemaFile), +}) + +/** + * VoiceCloneRequest + */ +export const zSchemaMinimaxVoiceCloneInput = z.object({ + model: z.optional( + z + .enum([ + 'speech-02-hd', + 'speech-02-turbo', + 'speech-01-hd', + 'speech-01-turbo', + ]) + .register(z.globalRegistry, { + description: + 'TTS model to use for preview. Options: speech-02-hd, speech-02-turbo, speech-01-hd, speech-01-turbo', + }), + ), + text: z + .optional( + z.string().max(1000).register(z.globalRegistry, { + description: + 'Text to generate a TTS preview with the cloned voice (optional)', + }), + ) + .default( + 'Hello, this is a preview of your cloned voice! I hope you like it!', + ), + audio_url: z.string().register(z.globalRegistry, { + description: + '\n URL of the input audio file for voice cloning. Should be at least 10 seconds\n long. To retain the voice permanently, use it with a TTS (text-to-speech)\n endpoint at least once within 7 days. Otherwise, it will be\n automatically deleted.\n ', + }), + accuracy: z.optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Text validation accuracy threshold (0-1)', + }), + ), + noise_reduction: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable noise reduction for the cloned voice', + }), + ) + .default(false), + need_volume_normalization: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable volume normalization for the cloned voice', + }), + ) + .default(false), +}) + +export const zSchemaChatterboxTextToSpeechOutput = z.unknown() + +/** + * ChatterboxRequest + */ +export const zSchemaChatterboxTextToSpeechInput = z.object({ + text: z.string().register(z.globalRegistry, { + description: + 'The text to be converted to speech. You can additionally add the following emotive tags: , , , , , , , ', + }), + exaggeration: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Exaggeration factor for the generated speech (0.0 = no exaggeration, 1.0 = maximum exaggeration).', + }), + ) + .default(0.25), + audio_url: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Optional URL to an audio file to use as a reference for the generated speech. If provided, the model will try to match the style and tone of the reference audio.', + }), + ) + .default( + 'https://storage.googleapis.com/chatterbox-demo-samples/prompts/male_rickmorty.mp3', + ), + temperature: z + .optional( + z.number().gte(0.05).lte(2).register(z.globalRegistry, { + description: 'Temperature for generation (higher = more creative).', + }), + ) + .default(0.7), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + "Useful to control the reproducibility of the generated audio. Assuming all other properties didn't change, a fixed seed should always generate the exact same audio file. Set to 0 for random seed..", + }), + ), + cfg: z.optional(z.number().gte(0.1).lte(1)).default(0.5), +}) + +/** + * Audio + */ +export const zSchemaAudio = z.object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), +}) + +/** + * TTSOutput + * + * Output parameters for the TTS request. + */ +export const zSchemaChatterboxhdTextToSpeechOutput = z + .object({ + audio: zSchemaAudio, + }) + .register(z.globalRegistry, { + description: 'Output parameters for the TTS request.', + }) + +/** + * TTSInput + * + * Input parameters for the TTS request. + */ +export const zSchemaChatterboxhdTextToSpeechInput = z + .object({ + text: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Text to synthesize into speech.', + }), + ) + .default( + 'My name is Maximus Decimus Meridius, commander of the Armies of the North, General of the Felix Legions and loyal servant to the true emperor, Marcus Aurelius. Father to a murdered son, husband to a murdered wife. And I will have my vengeance, in this life or the next.', + ), + exaggeration: z + .optional( + z.number().gte(0.25).lte(2).register(z.globalRegistry, { + description: + 'Controls emotion exaggeration. Range typically 0.25 to 2.0.', + }), + ) + .default(0.5), + high_quality_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If True, the generated audio will be upscaled to 48kHz. The generation of the audio will take longer, but the quality will be higher. If False, the generated audio will be 24kHz. ', + }), + ) + .default(false), + voice: z.optional( + z + .enum([ + 'Aurora', + 'Blade', + 'Britney', + 'Carl', + 'Cliff', + 'Richard', + 'Rico', + 'Siobhan', + 'Vicky', + ]) + .register(z.globalRegistry, { + description: + 'The voice to use for the TTS request. If neither voice nor audio are provided, a random voice will be used.', + }), + ), + audio_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'URL to the audio sample to use as a voice prompt for zero-shot TTS voice cloning. Providing a audio sample will override the voice setting. If neither voice nor audio_url are provided, a random voice will be used.', + }), + ), + temperature: z + .optional( + z.number().gte(0.05).lte(5).register(z.globalRegistry, { + description: + 'Controls the randomness of generation. Range typically 0.05 to 5.', + }), + ) + .default(0.8), + seed: z + .optional( + z.int().gte(0).register(z.globalRegistry, { + description: + "Useful to control the reproducibility of the generated audio. Assuming all other properties didn't change, a fixed seed should always generate the exact same audio file. Set to 0 for random seed.", + }), + ) + .default(0), + cfg: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale (CFG) controls the conditioning factor. Range typically 0.2 to 1.0. For expressive or dramatic speech, try lower cfg values (e.g. ~0.3) and increase exaggeration to around 0.7 or higher. If the reference speaker has a fast speaking style, lowering cfg to around 0.3 can improve pacing.', + }), + ) + .default(0.5), + }) + .register(z.globalRegistry, { + description: 'Input parameters for the TTS request.', + }) + +/** + * VoiceDesignOutput + */ +export const zSchemaMinimaxVoiceDesignOutput = z.object({ + custom_voice_id: z.string().register(z.globalRegistry, { + description: 'The voice_id of the generated voice', + }), + audio: zSchemaFile, +}) + +/** + * VoiceDesignRequest + */ +export const zSchemaMinimaxVoiceDesignInput = z.object({ + preview_text: z.string().max(500).register(z.globalRegistry, { + description: + 'Text for audio preview. Limited to 500 characters. A fee of $30 per 1M characters will be charged for the generation of the preview audio.', + }), + prompt: z.string().max(2000).register(z.globalRegistry, { + description: 'Voice description prompt for generating a personalized voice', + }), +}) + +/** + * TextToSpeechOutput + */ +export const zSchemaMinimaxPreviewSpeech25TurboOutput = z.object({ + duration_ms: z.int().register(z.globalRegistry, { + description: 'Duration of the audio in milliseconds', + }), + audio: zSchemaFile, +}) + +/** + * TextToSpeechTurbov25Request + */ +export const zSchemaMinimaxPreviewSpeech25TurboInput = z.object({ + text: z.string().min(1).max(5000).register(z.globalRegistry, { + description: + 'Text to convert to speech (max 5000 characters, minimum 1 non-whitespace character)', + }), + language_boost: z.optional( + z + .enum([ + 'Persian', + 'Filipino', + 'Tamil', + 'Chinese', + 'Chinese,Yue', + 'English', + 'Arabic', + 'Russian', + 'Spanish', + 'French', + 'Portuguese', + 'German', + 'Turkish', + 'Dutch', + 'Ukrainian', + 'Vietnamese', + 'Indonesian', + 'Japanese', + 'Italian', + 'Korean', + 'Thai', + 'Polish', + 'Romanian', + 'Greek', + 'Czech', + 'Finnish', + 'Hindi', + 'Bulgarian', + 'Danish', + 'Hebrew', + 'Malay', + 'Slovak', + 'Swedish', + 'Croatian', + 'Hungarian', + 'Norwegian', + 'Slovenian', + 'Catalan', + 'Nynorsk', + 'Afrikaans', + 'auto', + ]) + .register(z.globalRegistry, { + description: 'Enhance recognition of specified languages and dialects', + }), + ), + voice_setting: z.optional(zSchemaVoiceSetting), + output_format: z.optional( + z.enum(['url', 'hex']).register(z.globalRegistry, { + description: 'Format of the output content (non-streaming only)', + }), + ), + pronunciation_dict: z.optional(zSchemaPronunciationDict), + audio_setting: z.optional(zSchemaAudioSetting), +}) + +/** + * TextToSpeechOutput + */ +export const zSchemaMinimaxPreviewSpeech25HdOutput = z.object({ + duration_ms: z.int().register(z.globalRegistry, { + description: 'Duration of the audio in milliseconds', + }), + audio: zSchemaFile, +}) + +/** + * TextToSpeechHDv25Request + */ +export const zSchemaMinimaxPreviewSpeech25HdInput = z.object({ + text: z.string().min(1).max(5000).register(z.globalRegistry, { + description: + 'Text to convert to speech (max 5000 characters, minimum 1 non-whitespace character)', + }), + language_boost: z.optional( + z + .enum([ + 'Persian', + 'Filipino', + 'Tamil', + 'Chinese', + 'Chinese,Yue', + 'English', + 'Arabic', + 'Russian', + 'Spanish', + 'French', + 'Portuguese', + 'German', + 'Turkish', + 'Dutch', + 'Ukrainian', + 'Vietnamese', + 'Indonesian', + 'Japanese', + 'Italian', + 'Korean', + 'Thai', + 'Polish', + 'Romanian', + 'Greek', + 'Czech', + 'Finnish', + 'Hindi', + 'Bulgarian', + 'Danish', + 'Hebrew', + 'Malay', + 'Slovak', + 'Swedish', + 'Croatian', + 'Hungarian', + 'Norwegian', + 'Slovenian', + 'Catalan', + 'Nynorsk', + 'Afrikaans', + 'auto', + ]) + .register(z.globalRegistry, { + description: 'Enhance recognition of specified languages and dialects', + }), + ), + voice_setting: z.optional(zSchemaVoiceSetting), + output_format: z.optional( + z.enum(['url', 'hex']).register(z.globalRegistry, { + description: 'Format of the output content (non-streaming only)', + }), + ), + pronunciation_dict: z.optional(zSchemaPronunciationDict), + audio_setting: z.optional(zSchemaAudioSetting), +}) + +/** + * VibeVoiceOutput + * + * Output schema for VibeVoice TTS generation + */ +export const zSchemaVibevoiceOutput = z + .object({ + duration: z.number().register(z.globalRegistry, { + description: 'Duration of the generated audio in seconds', + }), + rtf: z.number().register(z.globalRegistry, { + description: + 'Real-time factor (generation_time / audio_duration). Lower is better.', + }), + sample_rate: z.int().register(z.globalRegistry, { + description: 'Sample rate of the generated audio', + }), + generation_time: z.number().register(z.globalRegistry, { + description: 'Time taken to generate the audio in seconds', + }), + audio: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output schema for VibeVoice TTS generation', + }) + +/** + * VibeVoiceSpeaker + */ +export const zSchemaVibeVoiceSpeaker = z.object({ + preset: z.optional( + z + .enum([ + 'Alice [EN]', + 'Carter [EN]', + 'Frank [EN]', + 'Mary [EN] (Background Music)', + 'Maya [EN]', + 'Anchen [ZH] (Background Music)', + 'Bowen [ZH]', + 'Xinran [ZH]', + ]) + .register(z.globalRegistry, { + description: + 'Default voice preset to use for the speaker. Not used if `audio_url` is provided.', + }), + ), + audio_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'URL to a voice sample audio file. If provided, `preset` will be ignored.', + }), + ), +}) + +/** + * VibeVoiceInput + * + * Input schema for VibeVoice TTS generation + */ +export const zSchemaVibevoiceInput = z + .object({ + script: z.string().max(90000).register(z.globalRegistry, { + description: + "The script to convert to speech. Can be formatted with 'Speaker X:' prefixes for multi-speaker dialogues.", + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducible generation.', + }), + ), + speakers: z.array(zSchemaVibeVoiceSpeaker).register(z.globalRegistry, { + description: + 'List of speakers to use for the script. If not provided, will be inferred from the script or voice samples.', + }), + cfg_scale: z + .optional( + z.number().gte(1).lte(2).register(z.globalRegistry, { + description: + 'CFG (Classifier-Free Guidance) scale for generation. Higher values increase adherence to text.', + }), + ) + .default(1.3), + }) + .register(z.globalRegistry, { + description: 'Input schema for VibeVoice TTS generation', + }) + +/** + * VibeVoiceOutput + * + * Output schema for VibeVoice TTS generation + */ +export const zSchemaVibevoice7bOutput = z + .object({ + duration: z.number().register(z.globalRegistry, { + description: 'Duration of the generated audio in seconds', + }), + rtf: z.number().register(z.globalRegistry, { + description: + 'Real-time factor (generation_time / audio_duration). Lower is better.', + }), + sample_rate: z.int().register(z.globalRegistry, { + description: 'Sample rate of the generated audio', + }), + generation_time: z.number().register(z.globalRegistry, { + description: 'Time taken to generate the audio in seconds', + }), + audio: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output schema for VibeVoice TTS generation', + }) + +/** + * VibeVoice7bInput + * + * Input schema for VibeVoice-7b TTS generation + */ +export const zSchemaVibevoice7bInput = z + .object({ + script: z.string().max(30000).register(z.globalRegistry, { + description: + "The script to convert to speech. Can be formatted with 'Speaker X:' prefixes for multi-speaker dialogues.", + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducible generation.', + }), + ), + speakers: z.array(zSchemaVibeVoiceSpeaker).register(z.globalRegistry, { + description: + 'List of speakers to use for the script. If not provided, will be inferred from the script or voice samples.', + }), + cfg_scale: z + .optional( + z.number().gte(1).lte(2).register(z.globalRegistry, { + description: + 'CFG (Classifier-Free Guidance) scale for generation. Higher values increase adherence to text.', + }), + ) + .default(1.3), + }) + .register(z.globalRegistry, { + description: 'Input schema for VibeVoice-7b TTS generation', + }) + +/** + * ChatterboxMultilingualOutput + */ +export const zSchemaChatterboxTextToSpeechMultilingualOutput = z.object({ + audio: zSchemaFile, +}) + +/** + * ChatterboxMultilingualRequest + */ +export const zSchemaChatterboxTextToSpeechMultilingualInput = z.object({ + text: z.string().max(300).register(z.globalRegistry, { + description: + 'The text to be converted to speech (maximum 300 characters). Supports 23 languages including English, French, German, Spanish, Italian, Portuguese, Hindi, Arabic, Chinese, Japanese, Korean, and more.', + }), + custom_audio_language: z.optional( + z + .enum([ + 'english', + 'arabic', + 'danish', + 'german', + 'greek', + 'spanish', + 'finnish', + 'french', + 'hebrew', + 'hindi', + 'italian', + 'japanese', + 'korean', + 'malay', + 'dutch', + 'norwegian', + 'polish', + 'portuguese', + 'russian', + 'swedish', + 'swahili', + 'turkish', + 'chinese', + ]) + .register(z.globalRegistry, { + description: + 'If using a custom audio URL, specify the language of the audio here. Ignored if voice is not a custom url.', + }), + ), + exaggeration: z + .optional( + z.number().gte(0.25).lte(2).register(z.globalRegistry, { + description: + 'Controls speech expressiveness and emotional intensity (0.25-2.0). 0.5 is neutral, higher values increase expressiveness. Extreme values may be unstable.', + }), + ) + .default(0.5), + voice: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Language code for synthesis. In case using custom please provide audio url and select custom_audio_language. ', + }), + ) + .default('english'), + temperature: z + .optional( + z.number().gte(0.05).lte(5).register(z.globalRegistry, { + description: + 'Controls randomness and variation in generation (0.05-5.0). Higher values create more varied speech patterns.', + }), + ) + .default(0.8), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducible results. Set to 0 for random generation, or provide a specific number for consistent outputs.', + }), + ), + cfg_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Configuration/pace weight controlling generation guidance (0.0-1.0). Use 0.0 for language transfer to mitigate accent inheritance.', + }), + ) + .default(0.5), +}) + +/** + * TTSOutput + */ +export const zSchemaKlingVideoV1TtsOutput = z.object({ + audio: zSchemaFile, +}) + +/** + * TTSInput + */ +export const zSchemaKlingVideoV1TtsInput = z.object({ + text: z.string().max(500).register(z.globalRegistry, { + description: 'The text to be converted to speech', + }), + voice_id: z.optional( + z + .enum([ + 'genshin_vindi2', + 'zhinen_xuesheng', + 'AOT', + 'ai_shatang', + 'genshin_klee2', + 'genshin_kirara', + 'ai_kaiya', + 'oversea_male1', + 'ai_chenjiahao_712', + 'girlfriend_4_speech02', + 'chat1_female_new-3', + 'chat_0407_5-1', + 'cartoon-boy-07', + 'uk_boy1', + 'cartoon-girl-01', + 'PeppaPig_platform', + 'ai_huangzhong_712', + 'ai_huangyaoshi_712', + 'ai_laoguowang_712', + 'chengshu_jiejie', + 'you_pingjing', + 'calm_story1', + 'uk_man2', + 'laopopo_speech02', + 'heainainai_speech02', + 'reader_en_m-v1', + 'commercial_lady_en_f-v1', + 'tiyuxi_xuedi', + 'tiexin_nanyou', + 'girlfriend_1_speech02', + 'girlfriend_2_speech02', + 'zhuxi_speech02', + 'uk_oldman3', + 'dongbeilaotie_speech02', + 'chongqingxiaohuo_speech02', + 'chuanmeizi_speech02', + 'chaoshandashu_speech02', + 'ai_taiwan_man2_speech02', + 'xianzhanggui_speech02', + 'tianjinjiejie_speech02', + 'diyinnansang_DB_CN_M_04-v2', + 'yizhipiannan-v1', + 'guanxiaofang-v2', + 'tianmeixuemei-v1', + 'daopianyansang-v1', + 'mengwa-v1', + ]) + .register(z.globalRegistry, { + description: 'The voice ID to use for speech synthesis', + }), + ), + voice_speed: z + .optional( + z.number().gte(0.8).lte(2).register(z.globalRegistry, { + description: 'Rate of speech', + }), + ) + .default(1), +}) + +/** + * EmotionalStrengths + */ +export const zSchemaEmotionalStrengths = z.object({ + afraid: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Strength of fear emotion', + }), + ) + .default(0), + calm: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Strength of calm emotion', + }), + ) + .default(0), + disgusted: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Strength of disgust emotion', + }), + ) + .default(0), + angry: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Strength of anger emotion', + }), + ) + .default(0), + sad: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Strength of sadness emotion', + }), + ) + .default(0), + melancholic: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Strength of melancholic emotion', + }), + ) + .default(0), + surprised: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Strength of surprise emotion', + }), + ) + .default(0), + happy: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Strength of happiness emotion', + }), + ) + .default(0), +}) + +/** + * IndexTTS2Output + */ +export const zSchemaIndexTts2TextToSpeechOutput = z.object({ + audio: zSchemaFile, +}) + +/** + * IndexTTS2Input + */ +export const zSchemaIndexTts2TextToSpeechInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The speech prompt to generate', + }), + emotional_strengths: z.optional(zSchemaEmotionalStrengths), + strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The strength of the emotional style transfer. Higher values result in stronger emotional influence.', + }), + ) + .default(1), + emotional_audio_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The emotional reference audio file to extract the style from.', + }), + ), + audio_url: z.string().register(z.globalRegistry, { + description: 'The audio file to generate the speech from.', + }), + emotion_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The emotional prompt to influence the emotional style. Must be used together with should_use_prompt_for_emotion.', + }), + ), + should_use_prompt_for_emotion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use the `prompt` to calculate emotional strengths, if enabled it will overwrite the `emotional_strengths` values. If `emotion_prompt` is provided, it will be used to instead of `prompt` to extract the emotional style.', + }), + ) + .default(false), +}) + +/** + * TextToSpeechHD26Output + */ +export const zSchemaMinimaxSpeech26HdOutput = z.object({ + duration_ms: z.int().register(z.globalRegistry, { + description: 'Duration of the audio in milliseconds', + }), + audio: zSchemaFile, +}) + +/** + * LoudnessNormalizationSetting + */ +export const zSchemaLoudnessNormalizationSetting = z.object({ + enabled: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable loudness normalization for the audio', + }), + ) + .default(true), + target_loudness: z + .optional( + z.number().gte(-70).lte(-10).register(z.globalRegistry, { + description: 'Target loudness in LUFS (default -18.0)', + }), + ) + .default(-18), + target_range: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'Target loudness range in LU (default 8.0)', + }), + ) + .default(8), + target_peak: z + .optional( + z.number().gte(-3).lte(0).register(z.globalRegistry, { + description: 'Target peak level in dBTP (default -0.5).', + }), + ) + .default(-0.5), +}) + +/** + * TextToSpeechHD26Request + */ +export const zSchemaMinimaxSpeech26HdInput = z.object({ + prompt: z.string().min(1).max(10000).register(z.globalRegistry, { + description: + 'Text to convert to speech. Paragraph breaks should be marked with newline characters. **NOTE**: You can customize speech pauses by adding markers in the form `<#x#>`, where `x` is the pause duration in seconds. Valid range: `[0.01, 99.99]`, up to two decimal places. Pause markers must be placed between speakable text segments and cannot be used consecutively.', + }), + language_boost: z.optional( + z + .enum([ + 'Chinese', + 'Chinese,Yue', + 'English', + 'Arabic', + 'Russian', + 'Spanish', + 'French', + 'Portuguese', + 'German', + 'Turkish', + 'Dutch', + 'Ukrainian', + 'Vietnamese', + 'Indonesian', + 'Japanese', + 'Italian', + 'Korean', + 'Thai', + 'Polish', + 'Romanian', + 'Greek', + 'Czech', + 'Finnish', + 'Hindi', + 'Bulgarian', + 'Danish', + 'Hebrew', + 'Malay', + 'Slovak', + 'Swedish', + 'Croatian', + 'Hungarian', + 'Norwegian', + 'Slovenian', + 'Catalan', + 'Nynorsk', + 'Afrikaans', + 'auto', + ]) + .register(z.globalRegistry, { + description: 'Enhance recognition of specified languages and dialects', + }), + ), + output_format: z.optional( + z.enum(['url', 'hex']).register(z.globalRegistry, { + description: 'Format of the output content (non-streaming only)', + }), + ), + pronunciation_dict: z.optional(zSchemaPronunciationDict), + voice_setting: z.optional(zSchemaVoiceSetting), + normalization_setting: z.optional(zSchemaLoudnessNormalizationSetting), + audio_setting: z.optional(zSchemaAudioSetting), +}) + +/** + * TextToSpeechTurbo26Output + */ +export const zSchemaMinimaxSpeech26TurboOutput = z.object({ + duration_ms: z.int().register(z.globalRegistry, { + description: 'Duration of the audio in milliseconds', + }), + audio: zSchemaFile, +}) + +/** + * TextToSpeechTurbo26Request + */ +export const zSchemaMinimaxSpeech26TurboInput = z.object({ + prompt: z.string().min(1).max(10000).register(z.globalRegistry, { + description: + 'Text to convert to speech. Paragraph breaks should be marked with newline characters. **NOTE**: You can customize speech pauses by adding markers in the form `<#x#>`, where `x` is the pause duration in seconds. Valid range: `[0.01, 99.99]`, up to two decimal places. Pause markers must be placed between speakable text segments and cannot be used consecutively.', + }), + language_boost: z.optional( + z + .enum([ + 'Chinese', + 'Chinese,Yue', + 'English', + 'Arabic', + 'Russian', + 'Spanish', + 'French', + 'Portuguese', + 'German', + 'Turkish', + 'Dutch', + 'Ukrainian', + 'Vietnamese', + 'Indonesian', + 'Japanese', + 'Italian', + 'Korean', + 'Thai', + 'Polish', + 'Romanian', + 'Greek', + 'Czech', + 'Finnish', + 'Hindi', + 'Bulgarian', + 'Danish', + 'Hebrew', + 'Malay', + 'Slovak', + 'Swedish', + 'Croatian', + 'Hungarian', + 'Norwegian', + 'Slovenian', + 'Catalan', + 'Nynorsk', + 'Afrikaans', + 'auto', + ]) + .register(z.globalRegistry, { + description: 'Enhance recognition of specified languages and dialects', + }), + ), + output_format: z.optional( + z.enum(['url', 'hex']).register(z.globalRegistry, { + description: 'Format of the output content (non-streaming only)', + }), + ), + pronunciation_dict: z.optional(zSchemaPronunciationDict), + voice_setting: z.optional(zSchemaVoiceSetting), + normalization_setting: z.optional(zSchemaLoudnessNormalizationSetting), + audio_setting: z.optional(zSchemaAudioSetting), +}) + +/** + * MayaVoiceOutput + * + * Output schema for Maya-1-Voice TTS generation + */ +export const zSchemaMayaOutput = z + .object({ + rtf: z.number().register(z.globalRegistry, { + description: + 'Real-time factor (generation_time / audio_duration). Lower is better.', + }), + duration: z.number().register(z.globalRegistry, { + description: 'Duration of the generated audio in seconds', + }), + sample_rate: z.string().register(z.globalRegistry, { + description: 'Sample rate of the generated audio', + }), + generation_time: z.number().register(z.globalRegistry, { + description: 'Time taken to generate the audio in seconds', + }), + audio: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output schema for Maya-1-Voice TTS generation', + }) + +/** + * MayaVoiceInput + * + * Input schema for Maya-1-Voice TTS generation + */ +export const zSchemaMayaInput = z + .object({ + repetition_penalty: z + .optional( + z.number().gte(1).lte(2).register(z.globalRegistry, { + description: + 'Penalty for repeating tokens. Higher values reduce repetition artifacts.', + }), + ) + .default(1.1), + prompt: z.string().max(500).register(z.globalRegistry, { + description: + 'Description of the voice/character. Includes attributes like age, accent, pitch, timbre, pacing, tone, and intensity. See examples for format.', + }), + top_p: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Nucleus sampling parameter. Controls diversity of token selection.', + }), + ) + .default(0.9), + text: z.string().max(5000).register(z.globalRegistry, { + description: + "The text to synthesize into speech. You can embed emotion tags anywhere in the text using the format . Available emotions: laugh, laugh_harder, sigh, chuckle, gasp, angry, excited, whisper, cry, scream, sing, snort, exhale, gulp, giggle, sarcastic, curious. Example: 'Hello world! This is amazing!' or 'I can't believe this happened again.'", + }), + output_format: z.optional( + z.enum(['wav', 'mp3']).register(z.globalRegistry, { + description: 'Output audio format for the generated speech', + }), + ), + max_tokens: z + .optional( + z.int().gte(28).lte(4000).register(z.globalRegistry, { + description: + 'Maximum number of SNAC tokens to generate (7 tokens per frame). Controls maximum audio length.', + }), + ) + .default(2000), + temperature: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: + 'Sampling temperature. Lower values (0.2-0.5) produce more stable/consistent audio. Higher values add variation.', + }), + ) + .default(0.4), + sample_rate: z.optional( + z.enum(['48 kHz', '24 kHz']).register(z.globalRegistry, { + description: + 'Output audio sample rate. 48 kHz provides higher quality audio, 24 kHz is faster.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Input schema for Maya-1-Voice TTS generation', + }) + +export const zSchemaMayaStreamOutput = z.unknown() + +/** + * MayaVoiceStreamingInput + * + * Input schema for Maya-1-Voice streaming TTS generation + */ +export const zSchemaMayaStreamInput = z + .object({ + repetition_penalty: z + .optional( + z.number().gte(1).lte(2).register(z.globalRegistry, { + description: + 'Penalty for repeating tokens. Higher values reduce repetition artifacts.', + }), + ) + .default(1.1), + prompt: z.string().max(500).register(z.globalRegistry, { + description: + 'Description of the voice/character. Includes attributes like age, accent, pitch, timbre, pacing, tone, and intensity. See examples for format.', + }), + top_p: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Nucleus sampling parameter. Controls diversity of token selection.', + }), + ) + .default(0.9), + text: z.string().max(5000).register(z.globalRegistry, { + description: + "The text to synthesize into speech. You can embed emotion tags anywhere in the text using the format . Available emotions: laugh, laugh_harder, sigh, chuckle, gasp, angry, excited, whisper, cry, scream, sing, snort, exhale, gulp, giggle, sarcastic, curious. Example: 'Hello world! This is amazing!' or 'I can't believe this happened again.'", + }), + output_format: z.optional( + z.enum(['mp3', 'wav', 'pcm']).register(z.globalRegistry, { + description: + "Output audio format. 'mp3' for browser-playable audio, 'wav' for uncompressed audio, 'pcm' for raw PCM (lowest latency, requires client-side decoding).", + }), + ), + max_tokens: z + .optional( + z.int().gte(28).lte(4000).register(z.globalRegistry, { + description: + 'Maximum number of SNAC tokens to generate (7 tokens per frame). Controls maximum audio length.', + }), + ) + .default(2000), + temperature: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: + 'Sampling temperature. Lower values (0.2-0.5) produce more stable/consistent audio. Higher values add variation.', + }), + ) + .default(0.4), + sample_rate: z.optional( + z.enum(['48 kHz', '24 kHz']).register(z.globalRegistry, { + description: + 'Output audio sample rate. 48 kHz uses upsampling for higher quality audio, 24 kHz is native SNAC output (faster, lower latency).', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Input schema for Maya-1-Voice streaming TTS generation', + }) + +/** + * MayaVoiceBatchOutput + * + * Output schema for batch Maya-1-Voice TTS generation + */ +export const zSchemaMayaBatchOutput = z + .object({ + average_rtf: z.number().register(z.globalRegistry, { + description: 'Average real-time factor across all generations', + }), + sample_rate: z.string().register(z.globalRegistry, { + description: 'Sample rate of all generated audio files', + }), + total_generation_time: z.number().register(z.globalRegistry, { + description: 'Total time taken to generate all audio files in seconds', + }), + audios: z.array(zSchemaFile).register(z.globalRegistry, { + description: 'List of generated audio files', + }), + durations: z.array(z.number()).register(z.globalRegistry, { + description: 'Duration of each generated audio in seconds', + }), + }) + .register(z.globalRegistry, { + description: 'Output schema for batch Maya-1-Voice TTS generation', + }) + +/** + * MayaVoiceBatchInput + * + * Input schema for batch Maya-1-Voice TTS generation + */ +export const zSchemaMayaBatchInput = z + .object({ + repetition_penalty: z + .optional( + z.number().gte(1).lte(2).register(z.globalRegistry, { + description: 'Repetition penalty for all generations.', + }), + ) + .default(1.1), + top_p: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Nucleus sampling parameter for all generations.', + }), + ) + .default(0.9), + output_format: z.optional( + z.enum(['wav', 'mp3']).register(z.globalRegistry, { + description: 'Output audio format for all generated speech files', + }), + ), + texts: z.array(z.string()).min(1).max(100).register(z.globalRegistry, { + description: + 'List of texts to synthesize into speech. You can embed emotion tags in each text using the format .', + }), + prompts: z.array(z.string()).min(1).max(100).register(z.globalRegistry, { + description: + 'List of voice descriptions for each text. Must match the length of texts list. Each describes the voice/character attributes.', + }), + max_tokens: z + .optional( + z.int().gte(28).lte(4000).register(z.globalRegistry, { + description: 'Maximum SNAC tokens per generation.', + }), + ) + .default(2000), + temperature: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: 'Sampling temperature for all generations.', + }), + ) + .default(0.4), + sample_rate: z.optional( + z.enum(['48 kHz', '24 kHz']).register(z.globalRegistry, { + description: + 'Output audio sample rate for all generations. 48 kHz provides higher quality, 24 kHz is faster.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Input schema for batch Maya-1-Voice TTS generation', + }) + +/** + * VibeVoice_0_5BOutput + * + * Output schema for VibeVoice-0.5b TTS generation + */ +export const zSchemaVibevoice05bOutput = z + .object({ + duration: z.number().register(z.globalRegistry, { + description: 'Duration of the generated audio in seconds', + }), + rtf: z.number().register(z.globalRegistry, { + description: + 'Real-time factor (generation_time / audio_duration). Lower is better.', + }), + sample_rate: z.int().register(z.globalRegistry, { + description: 'Sample rate of the generated audio', + }), + generation_time: z.number().register(z.globalRegistry, { + description: 'Time taken to generate the audio in seconds', + }), + audio: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output schema for VibeVoice-0.5b TTS generation', + }) + +/** + * VibeVoice0_5bInput + * + * Input schema for VibeVoice-0.5b TTS generation + */ +export const zSchemaVibevoice05bInput = z + .object({ + script: z.string().max(90000).register(z.globalRegistry, { + description: 'The script to convert to speech.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducible generation.', + }), + ), + speaker: z + .enum(['Frank', 'Wayne', 'Carter', 'Emma', 'Grace', 'Mike']) + .register(z.globalRegistry, { + description: 'Voice to use for speaking.', + }), + cfg_scale: z + .optional( + z.number().gte(1).lte(2).register(z.globalRegistry, { + description: + 'CFG (Classifier-Free Guidance) scale for generation. Higher values increase adherence to text.', + }), + ) + .default(1.3), + }) + .register(z.globalRegistry, { + description: 'Input schema for VibeVoice-0.5b TTS generation', + }) + +/** + * AudioFile + */ +export const zSchemaAudioFile = z.object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + duration: z.optional( + z.number().register(z.globalRegistry, { + description: 'The duration of the audio', + }), + ), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), + channels: z.optional( + z.int().register(z.globalRegistry, { + description: 'The number of channels in the audio', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + sample_rate: z.optional( + z.int().register(z.globalRegistry, { + description: 'The sample rate of the audio', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + bitrate: z.optional(z.union([z.string(), z.int()])), +}) + +/** + * Qwen3TTSOutput06b + */ +export const zSchemaQwen3TtsTextToSpeech06bOutput = z.object({ + audio: zSchemaAudioFile, +}) + +/** + * Qwen3TTSInput06b + */ +export const zSchemaQwen3TtsTextToSpeech06bInput = z.object({ + prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Optional prompt to guide the style of the generated speech. This prompt will be ignored if a speaker embedding is provided.', + }), + ), + speaker_voice_embedding_file_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'URL to a speaker embedding file in safetensors format, from `fal-ai/qwen-3-tts/clone-voice/0.6b` endpoint. If provided, the TTS model will use the cloned voice for synthesis instead of the predefined voices.', + }), + ), + top_p: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Top-p sampling parameter.', + }), + ) + .default(1), + repetition_penalty: z + .optional( + z.number().gte(0).register(z.globalRegistry, { + description: 'Penalty to reduce repeated tokens/codes.', + }), + ) + .default(1.05), + subtalker_temperature: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Temperature for sub-talker sampling.', + }), + ) + .default(0.9), + top_k: z + .optional( + z.int().gte(0).register(z.globalRegistry, { + description: 'Top-k sampling parameter.', + }), + ) + .default(50), + voice: z.optional( + z + .enum([ + 'Vivian', + 'Serena', + 'Uncle_Fu', + 'Dylan', + 'Eric', + 'Ryan', + 'Aiden', + 'Ono_Anna', + 'Sohee', + ]) + .register(z.globalRegistry, { + description: + "The voice to be used for speech synthesis, will be ignored if a speaker embedding is provided. Check out the **[documentation](https://github.com/QwenLM/Qwen3-TTS/tree/main?tab=readme-ov-file#custom-voice-generate)** for each voice's details and which language they primarily support.", + }), + ), + reference_text: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Optional reference text that was used when creating the speaker embedding. Providing this can improve synthesis quality when using a cloned voice.', + }), + ), + temperature: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Sampling temperature; higher => more random.', + }), + ) + .default(0.9), + language: z.optional( + z + .enum([ + 'Auto', + 'English', + 'Chinese', + 'Spanish', + 'French', + 'German', + 'Italian', + 'Japanese', + 'Korean', + 'Portuguese', + 'Russian', + ]) + .register(z.globalRegistry, { + description: 'The language of the voice.', + }), + ), + subtalker_top_k: z + .optional( + z.int().gte(0).register(z.globalRegistry, { + description: 'Top-k for sub-talker sampling.', + }), + ) + .default(50), + text: z.string().register(z.globalRegistry, { + description: 'The text to be converted to speech.', + }), + max_new_tokens: z + .optional( + z.int().gte(1).lte(8192).register(z.globalRegistry, { + description: 'Maximum number of new codec tokens to generate.', + }), + ) + .default(200), + subtalker_dosample: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Sampling switch for the sub-talker.', + }), + ) + .default(true), + subtalker_top_p: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Top-p for sub-talker sampling.', + }), + ) + .default(1), +}) + +/** + * Qwen3TTSOutput + */ +export const zSchemaQwen3TtsTextToSpeech17bOutput = z.object({ + audio: zSchemaAudioFile, +}) + +/** + * Qwen3TTSInput + */ +export const zSchemaQwen3TtsTextToSpeech17bInput = z.object({ + prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Optional prompt to guide the style of the generated speech. This prompt will be ignored if a speaker embedding is provided.', + }), + ), + speaker_voice_embedding_file_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'URL to a speaker embedding file in safetensors format, from `fal-ai/qwen-3-tts/clone-voice` endpoint. If provided, the TTS model will use the cloned voice for synthesis instead of the predefined voices.', + }), + ), + top_p: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Top-p sampling parameter.', + }), + ) + .default(1), + repetition_penalty: z + .optional( + z.number().gte(0).register(z.globalRegistry, { + description: 'Penalty to reduce repeated tokens/codes.', + }), + ) + .default(1.05), + subtalker_temperature: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Temperature for sub-talker sampling.', + }), + ) + .default(0.9), + top_k: z + .optional( + z.int().gte(0).register(z.globalRegistry, { + description: 'Top-k sampling parameter.', + }), + ) + .default(50), + voice: z.optional( + z + .enum([ + 'Vivian', + 'Serena', + 'Uncle_Fu', + 'Dylan', + 'Eric', + 'Ryan', + 'Aiden', + 'Ono_Anna', + 'Sohee', + ]) + .register(z.globalRegistry, { + description: + "The voice to be used for speech synthesis, will be ignored if a speaker embedding is provided. Check out the **[documentation](https://github.com/QwenLM/Qwen3-TTS/tree/main?tab=readme-ov-file#custom-voice-generate)** for each voice's details and which language they primarily support.", + }), + ), + reference_text: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Optional reference text that was used when creating the speaker embedding. Providing this can improve synthesis quality when using a cloned voice.', + }), + ), + temperature: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Sampling temperature; higher => more random.', + }), + ) + .default(0.9), + language: z.optional( + z + .enum([ + 'Auto', + 'English', + 'Chinese', + 'Spanish', + 'French', + 'German', + 'Italian', + 'Japanese', + 'Korean', + 'Portuguese', + 'Russian', + ]) + .register(z.globalRegistry, { + description: 'The language of the voice.', + }), + ), + subtalker_top_k: z + .optional( + z.int().gte(0).register(z.globalRegistry, { + description: 'Top-k for sub-talker sampling.', + }), + ) + .default(50), + text: z.string().register(z.globalRegistry, { + description: 'The text to be converted to speech.', + }), + max_new_tokens: z + .optional( + z.int().gte(1).lte(8192).register(z.globalRegistry, { + description: 'Maximum number of new codec tokens to generate.', + }), + ) + .default(200), + subtalker_dosample: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Sampling switch for the sub-talker.', + }), + ) + .default(true), + subtalker_top_p: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Top-p for sub-talker sampling.', + }), + ) + .default(1), +}) + +/** + * Qwen3DesignVoiceOutput + */ +export const zSchemaQwen3TtsVoiceDesign17bOutput = z.object({ + audio: zSchemaAudioFile, +}) + +/** + * Qwen3DesignVoiceInput + */ +export const zSchemaQwen3TtsVoiceDesign17bInput = z.object({ + repetition_penalty: z + .optional( + z.number().gte(0).register(z.globalRegistry, { + description: 'Penalty to reduce repeated tokens/codes.', + }), + ) + .default(1.05), + subtalker_top_k: z + .optional( + z.int().gte(0).register(z.globalRegistry, { + description: 'Top-k for sub-talker sampling.', + }), + ) + .default(50), + top_p: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Top-p sampling parameter.', + }), + ) + .default(1), + prompt: z.string().register(z.globalRegistry, { + description: 'Optional prompt to guide the style of the generated speech.', + }), + max_new_tokens: z + .optional( + z.int().gte(1).lte(8192).register(z.globalRegistry, { + description: 'Maximum number of new codec tokens to generate.', + }), + ) + .default(200), + text: z.string().register(z.globalRegistry, { + description: 'The text to be converted to speech.', + }), + language: z.optional( + z + .enum([ + 'Auto', + 'English', + 'Chinese', + 'Spanish', + 'French', + 'German', + 'Italian', + 'Japanese', + 'Korean', + 'Portuguese', + 'Russian', + ]) + .register(z.globalRegistry, { + description: 'The language of the voice to be designed.', + }), + ), + top_k: z + .optional( + z.int().gte(0).register(z.globalRegistry, { + description: 'Top-k sampling parameter.', + }), + ) + .default(50), + subtalker_dosample: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Sampling switch for the sub-talker.', + }), + ) + .default(true), + subtalker_temperature: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Temperature for sub-talker sampling.', + }), + ) + .default(0.9), + subtalker_top_p: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Top-p for sub-talker sampling.', + }), + ) + .default(1), + temperature: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Sampling temperature; higher => more random.', + }), + ) + .default(0.9), +}) + +export const zSchemaQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetFalAiQwen3TtsVoiceDesign17bRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwen3TtsVoiceDesign17bRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwen3TtsVoiceDesign17bRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwen3TtsVoiceDesign17bRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwen3TtsVoiceDesign17bData = z.object({ + body: zSchemaQwen3TtsVoiceDesign17bInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwen3TtsVoiceDesign17bResponse = zSchemaQueueStatus + +export const zGetFalAiQwen3TtsVoiceDesign17bRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiQwen3TtsVoiceDesign17bRequestsByRequestIdResponse = + zSchemaQwen3TtsVoiceDesign17bOutput + +export const zGetFalAiQwen3TtsTextToSpeech17bRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwen3TtsTextToSpeech17bRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwen3TtsTextToSpeech17bRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwen3TtsTextToSpeech17bRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwen3TtsTextToSpeech17bData = z.object({ + body: zSchemaQwen3TtsTextToSpeech17bInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwen3TtsTextToSpeech17bResponse = zSchemaQueueStatus + +export const zGetFalAiQwen3TtsTextToSpeech17bRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiQwen3TtsTextToSpeech17bRequestsByRequestIdResponse = + zSchemaQwen3TtsTextToSpeech17bOutput + +export const zGetFalAiQwen3TtsTextToSpeech06bRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwen3TtsTextToSpeech06bRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwen3TtsTextToSpeech06bRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwen3TtsTextToSpeech06bRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwen3TtsTextToSpeech06bData = z.object({ + body: zSchemaQwen3TtsTextToSpeech06bInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwen3TtsTextToSpeech06bResponse = zSchemaQueueStatus + +export const zGetFalAiQwen3TtsTextToSpeech06bRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiQwen3TtsTextToSpeech06bRequestsByRequestIdResponse = + zSchemaQwen3TtsTextToSpeech06bOutput + +export const zGetFalAiVibevoice05bRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiVibevoice05bRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiVibevoice05bRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiVibevoice05bRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiVibevoice05bData = z.object({ + body: zSchemaVibevoice05bInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiVibevoice05bResponse = zSchemaQueueStatus + +export const zGetFalAiVibevoice05bRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiVibevoice05bRequestsByRequestIdResponse = + zSchemaVibevoice05bOutput + +export const zGetFalAiMayaBatchRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiMayaBatchRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMayaBatchRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiMayaBatchRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMayaBatchData = z.object({ + body: zSchemaMayaBatchInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMayaBatchResponse = zSchemaQueueStatus + +export const zGetFalAiMayaBatchRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMayaBatchRequestsByRequestIdResponse = + zSchemaMayaBatchOutput + +export const zGetFalAiMayaStreamRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiMayaStreamRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMayaStreamRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiMayaStreamRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMayaStreamData = z.object({ + body: zSchemaMayaStreamInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMayaStreamResponse = zSchemaQueueStatus + +export const zGetFalAiMayaStreamRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMayaStreamRequestsByRequestIdResponse = + zSchemaMayaStreamOutput + +export const zGetFalAiMayaRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiMayaRequestsByRequestIdStatusResponse = zSchemaQueueStatus + +export const zPutFalAiMayaRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiMayaRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMayaData = z.object({ + body: zSchemaMayaInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMayaResponse = zSchemaQueueStatus + +export const zGetFalAiMayaRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMayaRequestsByRequestIdResponse = zSchemaMayaOutput + +export const zGetFalAiMinimaxSpeech26TurboRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMinimaxSpeech26TurboRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxSpeech26TurboRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxSpeech26TurboRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxSpeech26TurboData = z.object({ + body: zSchemaMinimaxSpeech26TurboInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxSpeech26TurboResponse = zSchemaQueueStatus + +export const zGetFalAiMinimaxSpeech26TurboRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxSpeech26TurboRequestsByRequestIdResponse = + zSchemaMinimaxSpeech26TurboOutput + +export const zGetFalAiMinimaxSpeech26HdRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiMinimaxSpeech26HdRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxSpeech26HdRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxSpeech26HdRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxSpeech26HdData = z.object({ + body: zSchemaMinimaxSpeech26HdInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxSpeech26HdResponse = zSchemaQueueStatus + +export const zGetFalAiMinimaxSpeech26HdRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxSpeech26HdRequestsByRequestIdResponse = + zSchemaMinimaxSpeech26HdOutput + +export const zGetFalAiIndexTts2TextToSpeechRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiIndexTts2TextToSpeechRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiIndexTts2TextToSpeechRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiIndexTts2TextToSpeechRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiIndexTts2TextToSpeechData = z.object({ + body: zSchemaIndexTts2TextToSpeechInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiIndexTts2TextToSpeechResponse = zSchemaQueueStatus + +export const zGetFalAiIndexTts2TextToSpeechRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiIndexTts2TextToSpeechRequestsByRequestIdResponse = + zSchemaIndexTts2TextToSpeechOutput + +export const zGetFalAiKlingVideoV1TtsRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV1TtsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV1TtsRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV1TtsRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV1TtsData = z.object({ + body: zSchemaKlingVideoV1TtsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV1TtsResponse = zSchemaQueueStatus + +export const zGetFalAiKlingVideoV1TtsRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV1TtsRequestsByRequestIdResponse = + zSchemaKlingVideoV1TtsOutput + +export const zGetFalAiChatterboxTextToSpeechMultilingualRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiChatterboxTextToSpeechMultilingualRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiChatterboxTextToSpeechMultilingualRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiChatterboxTextToSpeechMultilingualRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiChatterboxTextToSpeechMultilingualData = z.object({ + body: zSchemaChatterboxTextToSpeechMultilingualInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiChatterboxTextToSpeechMultilingualResponse = + zSchemaQueueStatus + +export const zGetFalAiChatterboxTextToSpeechMultilingualRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiChatterboxTextToSpeechMultilingualRequestsByRequestIdResponse = + zSchemaChatterboxTextToSpeechMultilingualOutput + +export const zGetFalAiVibevoice7bRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiVibevoice7bRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiVibevoice7bRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiVibevoice7bRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiVibevoice7bData = z.object({ + body: zSchemaVibevoice7bInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiVibevoice7bResponse = zSchemaQueueStatus + +export const zGetFalAiVibevoice7bRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiVibevoice7bRequestsByRequestIdResponse = + zSchemaVibevoice7bOutput + +export const zGetFalAiVibevoiceRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiVibevoiceRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiVibevoiceRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiVibevoiceRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiVibevoiceData = z.object({ + body: zSchemaVibevoiceInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiVibevoiceResponse = zSchemaQueueStatus + +export const zGetFalAiVibevoiceRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiVibevoiceRequestsByRequestIdResponse = + zSchemaVibevoiceOutput + +export const zGetFalAiMinimaxPreviewSpeech25HdRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMinimaxPreviewSpeech25HdRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxPreviewSpeech25HdRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxPreviewSpeech25HdRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxPreviewSpeech25HdData = z.object({ + body: zSchemaMinimaxPreviewSpeech25HdInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxPreviewSpeech25HdResponse = zSchemaQueueStatus + +export const zGetFalAiMinimaxPreviewSpeech25HdRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxPreviewSpeech25HdRequestsByRequestIdResponse = + zSchemaMinimaxPreviewSpeech25HdOutput + +export const zGetFalAiMinimaxPreviewSpeech25TurboRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMinimaxPreviewSpeech25TurboRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxPreviewSpeech25TurboRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxPreviewSpeech25TurboRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxPreviewSpeech25TurboData = z.object({ + body: zSchemaMinimaxPreviewSpeech25TurboInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxPreviewSpeech25TurboResponse = zSchemaQueueStatus + +export const zGetFalAiMinimaxPreviewSpeech25TurboRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxPreviewSpeech25TurboRequestsByRequestIdResponse = + zSchemaMinimaxPreviewSpeech25TurboOutput + +export const zGetFalAiMinimaxVoiceDesignRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMinimaxVoiceDesignRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxVoiceDesignRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxVoiceDesignRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxVoiceDesignData = z.object({ + body: zSchemaMinimaxVoiceDesignInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxVoiceDesignResponse = zSchemaQueueStatus + +export const zGetFalAiMinimaxVoiceDesignRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxVoiceDesignRequestsByRequestIdResponse = + zSchemaMinimaxVoiceDesignOutput + +export const zGetResembleAiChatterboxhdTextToSpeechRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetResembleAiChatterboxhdTextToSpeechRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutResembleAiChatterboxhdTextToSpeechRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutResembleAiChatterboxhdTextToSpeechRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostResembleAiChatterboxhdTextToSpeechData = z.object({ + body: zSchemaChatterboxhdTextToSpeechInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostResembleAiChatterboxhdTextToSpeechResponse = + zSchemaQueueStatus + +export const zGetResembleAiChatterboxhdTextToSpeechRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetResembleAiChatterboxhdTextToSpeechRequestsByRequestIdResponse = + zSchemaChatterboxhdTextToSpeechOutput + +export const zGetFalAiChatterboxTextToSpeechRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiChatterboxTextToSpeechRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiChatterboxTextToSpeechRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiChatterboxTextToSpeechRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiChatterboxTextToSpeechData = z.object({ + body: zSchemaChatterboxTextToSpeechInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiChatterboxTextToSpeechResponse = zSchemaQueueStatus + +export const zGetFalAiChatterboxTextToSpeechRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiChatterboxTextToSpeechRequestsByRequestIdResponse = + zSchemaChatterboxTextToSpeechOutput + +export const zGetFalAiMinimaxVoiceCloneRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiMinimaxVoiceCloneRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxVoiceCloneRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxVoiceCloneRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxVoiceCloneData = z.object({ + body: zSchemaMinimaxVoiceCloneInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxVoiceCloneResponse = zSchemaQueueStatus + +export const zGetFalAiMinimaxVoiceCloneRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxVoiceCloneRequestsByRequestIdResponse = + zSchemaMinimaxVoiceCloneOutput + +export const zGetFalAiMinimaxSpeech02TurboRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMinimaxSpeech02TurboRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxSpeech02TurboRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxSpeech02TurboRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxSpeech02TurboData = z.object({ + body: zSchemaMinimaxSpeech02TurboInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxSpeech02TurboResponse = zSchemaQueueStatus + +export const zGetFalAiMinimaxSpeech02TurboRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxSpeech02TurboRequestsByRequestIdResponse = + zSchemaMinimaxSpeech02TurboOutput + +export const zGetFalAiMinimaxSpeech02HdRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiMinimaxSpeech02HdRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxSpeech02HdRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxSpeech02HdRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxSpeech02HdData = z.object({ + body: zSchemaMinimaxSpeech02HdInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxSpeech02HdResponse = zSchemaQueueStatus + +export const zGetFalAiMinimaxSpeech02HdRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxSpeech02HdRequestsByRequestIdResponse = + zSchemaMinimaxSpeech02HdOutput + +export const zGetFalAiDiaTtsRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiDiaTtsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiDiaTtsRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiDiaTtsRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiDiaTtsData = z.object({ + body: zSchemaDiaTtsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiDiaTtsResponse = zSchemaQueueStatus + +export const zGetFalAiDiaTtsRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiDiaTtsRequestsByRequestIdResponse = zSchemaDiaTtsOutput + +export const zGetFalAiOrpheusTtsRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiOrpheusTtsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiOrpheusTtsRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiOrpheusTtsRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiOrpheusTtsData = z.object({ + body: zSchemaOrpheusTtsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiOrpheusTtsResponse = zSchemaQueueStatus + +export const zGetFalAiOrpheusTtsRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiOrpheusTtsRequestsByRequestIdResponse = + zSchemaOrpheusTtsOutput + +export const zGetFalAiElevenlabsTtsTurboV25RequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiElevenlabsTtsTurboV25RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiElevenlabsTtsTurboV25RequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiElevenlabsTtsTurboV25RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiElevenlabsTtsTurboV25Data = z.object({ + body: zSchemaElevenlabsTtsTurboV25Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiElevenlabsTtsTurboV25Response = zSchemaQueueStatus + +export const zGetFalAiElevenlabsTtsTurboV25RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiElevenlabsTtsTurboV25RequestsByRequestIdResponse = + zSchemaElevenlabsTtsTurboV25Output diff --git a/packages/typescript/ai-fal/src/generated/text-to-text/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/text-to-text/endpoint-map.ts new file mode 100644 index 00000000..ffbb3cea --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/text-to-text/endpoint-map.ts @@ -0,0 +1,42 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zAiDetectorDetectTextInput, + zAiDetectorDetectTextOutput, +} from './zod.gen' + +import type { + AiDetectorDetectTextInput, + AiDetectorDetectTextOutput, +} from './types.gen' + +import type { z } from 'zod' + +export type TextToTextEndpointMap = { + 'half-moon-ai/ai-detector/detect-text': { + input: AiDetectorDetectTextInput + output: AiDetectorDetectTextOutput + } +} + +/** Union type of all text-to-text model endpoint IDs */ +export type TextToTextModel = keyof TextToTextEndpointMap + +export const TextToTextSchemaMap: Record< + TextToTextModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['half-moon-ai/ai-detector/detect-text']: { + input: zAiDetectorDetectTextInput, + output: zAiDetectorDetectTextOutput, + }, +} as const + +/** Get the input type for a specific text-to-text model */ +export type TextToTextModelInput = + TextToTextEndpointMap[T]['input'] + +/** Get the output type for a specific text-to-text model */ +export type TextToTextModelOutput = + TextToTextEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/text-to-text/types.gen.ts b/packages/typescript/ai-fal/src/generated/text-to-text/types.gen.ts new file mode 100644 index 00000000..715615b1 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/text-to-text/types.gen.ts @@ -0,0 +1,170 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * AITextDetectionOutput + */ +export type AiDetectorDetectTextOutput = { + /** + * Latency + */ + latency: number + /** + * Verdict + */ + verdict: string + /** + * Is Ai Generated + */ + is_ai_generated: boolean + /** + * Confidence + */ + confidence: number +} + +/** + * TextDetectionInput + */ +export type AiDetectorDetectTextInput = { + /** + * Text + * + * Text content to analyze for AI generation. + */ + text: string +} + +export type QueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetHalfMoonAiAiDetectorDetectTextRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/half-moon-ai/ai-detector/detect-text/requests/{request_id}/status' +} + +export type GetHalfMoonAiAiDetectorDetectTextRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: QueueStatus + } + +export type GetHalfMoonAiAiDetectorDetectTextRequestsByRequestIdStatusResponse = + GetHalfMoonAiAiDetectorDetectTextRequestsByRequestIdStatusResponses[keyof GetHalfMoonAiAiDetectorDetectTextRequestsByRequestIdStatusResponses] + +export type PutHalfMoonAiAiDetectorDetectTextRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/half-moon-ai/ai-detector/detect-text/requests/{request_id}/cancel' +} + +export type PutHalfMoonAiAiDetectorDetectTextRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutHalfMoonAiAiDetectorDetectTextRequestsByRequestIdCancelResponse = + PutHalfMoonAiAiDetectorDetectTextRequestsByRequestIdCancelResponses[keyof PutHalfMoonAiAiDetectorDetectTextRequestsByRequestIdCancelResponses] + +export type PostHalfMoonAiAiDetectorDetectTextData = { + body: AiDetectorDetectTextInput + path?: never + query?: never + url: '/half-moon-ai/ai-detector/detect-text' +} + +export type PostHalfMoonAiAiDetectorDetectTextResponses = { + /** + * The request status. + */ + 200: QueueStatus +} + +export type PostHalfMoonAiAiDetectorDetectTextResponse = + PostHalfMoonAiAiDetectorDetectTextResponses[keyof PostHalfMoonAiAiDetectorDetectTextResponses] + +export type GetHalfMoonAiAiDetectorDetectTextRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/half-moon-ai/ai-detector/detect-text/requests/{request_id}' +} + +export type GetHalfMoonAiAiDetectorDetectTextRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: AiDetectorDetectTextOutput +} + +export type GetHalfMoonAiAiDetectorDetectTextRequestsByRequestIdResponse = + GetHalfMoonAiAiDetectorDetectTextRequestsByRequestIdResponses[keyof GetHalfMoonAiAiDetectorDetectTextRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/text-to-text/zod.gen.ts b/packages/typescript/ai-fal/src/generated/text-to-text/zod.gen.ts new file mode 100644 index 00000000..d64e9ae5 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/text-to-text/zod.gen.ts @@ -0,0 +1,140 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * AITextDetectionOutput + */ +export const zAiDetectorDetectTextOutput = z.object({ + latency: z.number(), + verdict: z.string(), + is_ai_generated: z.boolean(), + confidence: z.number(), +}) + +/** + * TextDetectionInput + */ +export const zAiDetectorDetectTextInput = z.object({ + text: z.string().min(1).max(20000).register(z.globalRegistry, { + description: 'Text content to analyze for AI generation.', + }), +}) + +export const zQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetHalfMoonAiAiDetectorDetectTextRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetHalfMoonAiAiDetectorDetectTextRequestsByRequestIdStatusResponse = + zQueueStatus + +export const zPutHalfMoonAiAiDetectorDetectTextRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutHalfMoonAiAiDetectorDetectTextRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostHalfMoonAiAiDetectorDetectTextData = z.object({ + body: zAiDetectorDetectTextInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostHalfMoonAiAiDetectorDetectTextResponse = zQueueStatus + +export const zGetHalfMoonAiAiDetectorDetectTextRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetHalfMoonAiAiDetectorDetectTextRequestsByRequestIdResponse = + zAiDetectorDetectTextOutput diff --git a/packages/typescript/ai-fal/src/generated/text-to-video/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/text-to-video/endpoint-map.ts new file mode 100644 index 00000000..b77ce400 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/text-to-video/endpoint-map.ts @@ -0,0 +1,1238 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zSchemaAnimatediffSparsectrlLcmInput, + zSchemaAnimatediffSparsectrlLcmOutput, + zSchemaAvatarsTextToVideoInput, + zSchemaAvatarsTextToVideoOutput, + zSchemaBytedanceSeedanceV15ProTextToVideoInput, + zSchemaBytedanceSeedanceV15ProTextToVideoOutput, + zSchemaBytedanceSeedanceV1LiteTextToVideoInput, + zSchemaBytedanceSeedanceV1LiteTextToVideoOutput, + zSchemaBytedanceSeedanceV1ProFastTextToVideoInput, + zSchemaBytedanceSeedanceV1ProFastTextToVideoOutput, + zSchemaBytedanceSeedanceV1ProTextToVideoInput, + zSchemaBytedanceSeedanceV1ProTextToVideoOutput, + zSchemaCogvideox5bInput, + zSchemaCogvideox5bOutput, + zSchemaFabric10TextInput, + zSchemaFabric10TextOutput, + zSchemaFastAnimatediffTextToVideoInput, + zSchemaFastAnimatediffTextToVideoOutput, + zSchemaFastAnimatediffTurboTextToVideoInput, + zSchemaFastAnimatediffTurboTextToVideoOutput, + zSchemaFastSvdLcmTextToVideoInput, + zSchemaFastSvdLcmTextToVideoOutput, + zSchemaFastSvdTextToVideoInput, + zSchemaFastSvdTextToVideoOutput, + zSchemaHunyuanVideoInput, + zSchemaHunyuanVideoLoraInput, + zSchemaHunyuanVideoLoraOutput, + zSchemaHunyuanVideoOutput, + zSchemaHunyuanVideoV15TextToVideoInput, + zSchemaHunyuanVideoV15TextToVideoOutput, + zSchemaInfinitalkSingleTextInput, + zSchemaInfinitalkSingleTextOutput, + zSchemaInfinityStarTextToVideoInput, + zSchemaInfinityStarTextToVideoOutput, + zSchemaKandinsky5ProTextToVideoInput, + zSchemaKandinsky5ProTextToVideoOutput, + zSchemaKandinsky5TextToVideoDistillInput, + zSchemaKandinsky5TextToVideoDistillOutput, + zSchemaKandinsky5TextToVideoInput, + zSchemaKandinsky5TextToVideoOutput, + zSchemaKlingVideoLipsyncAudioToVideoInput, + zSchemaKlingVideoLipsyncAudioToVideoOutput, + zSchemaKlingVideoLipsyncTextToVideoInput, + zSchemaKlingVideoLipsyncTextToVideoOutput, + zSchemaKlingVideoV15ProEffectsInput, + zSchemaKlingVideoV15ProEffectsOutput, + zSchemaKlingVideoV15ProTextToVideoInput, + zSchemaKlingVideoV15ProTextToVideoOutput, + zSchemaKlingVideoV16ProEffectsInput, + zSchemaKlingVideoV16ProEffectsOutput, + zSchemaKlingVideoV16ProTextToVideoInput, + zSchemaKlingVideoV16ProTextToVideoOutput, + zSchemaKlingVideoV16StandardEffectsInput, + zSchemaKlingVideoV16StandardEffectsOutput, + zSchemaKlingVideoV16StandardTextToVideoInput, + zSchemaKlingVideoV16StandardTextToVideoOutput, + zSchemaKlingVideoV1StandardEffectsInput, + zSchemaKlingVideoV1StandardEffectsOutput, + zSchemaKlingVideoV1StandardTextToVideoInput, + zSchemaKlingVideoV1StandardTextToVideoOutput, + zSchemaKlingVideoV21MasterTextToVideoInput, + zSchemaKlingVideoV21MasterTextToVideoOutput, + zSchemaKlingVideoV25TurboProTextToVideoInput, + zSchemaKlingVideoV25TurboProTextToVideoOutput, + zSchemaKlingVideoV26ProTextToVideoInput, + zSchemaKlingVideoV26ProTextToVideoOutput, + zSchemaKlingVideoV2MasterTextToVideoInput, + zSchemaKlingVideoV2MasterTextToVideoOutput, + zSchemaKreaWan14bTextToVideoInput, + zSchemaKreaWan14bTextToVideoOutput, + zSchemaLongcatVideoDistilledTextToVideo480pInput, + zSchemaLongcatVideoDistilledTextToVideo480pOutput, + zSchemaLongcatVideoDistilledTextToVideo720pInput, + zSchemaLongcatVideoDistilledTextToVideo720pOutput, + zSchemaLongcatVideoTextToVideo480pInput, + zSchemaLongcatVideoTextToVideo480pOutput, + zSchemaLongcatVideoTextToVideo720pInput, + zSchemaLongcatVideoTextToVideo720pOutput, + zSchemaLtx219bDistilledTextToVideoInput, + zSchemaLtx219bDistilledTextToVideoLoraInput, + zSchemaLtx219bDistilledTextToVideoLoraOutput, + zSchemaLtx219bDistilledTextToVideoOutput, + zSchemaLtx219bTextToVideoInput, + zSchemaLtx219bTextToVideoLoraInput, + zSchemaLtx219bTextToVideoLoraOutput, + zSchemaLtx219bTextToVideoOutput, + zSchemaLtx2TextToVideoFastInput, + zSchemaLtx2TextToVideoFastOutput, + zSchemaLtx2TextToVideoInput, + zSchemaLtx2TextToVideoOutput, + zSchemaLtxVideo13bDevInput, + zSchemaLtxVideo13bDevOutput, + zSchemaLtxVideo13bDistilledInput, + zSchemaLtxVideo13bDistilledOutput, + zSchemaLtxVideoInput, + zSchemaLtxVideoOutput, + zSchemaLtxVideoV095Input, + zSchemaLtxVideoV095Output, + zSchemaLtxv13B098DistilledInput, + zSchemaLtxv13B098DistilledOutput, + zSchemaLumaDreamMachineRay2FlashInput, + zSchemaLumaDreamMachineRay2FlashOutput, + zSchemaLumaDreamMachineRay2Input, + zSchemaLumaDreamMachineRay2Output, + zSchemaMagiDistilledInput, + zSchemaMagiDistilledOutput, + zSchemaMagiInput, + zSchemaMagiOutput, + zSchemaMareyT2vInput, + zSchemaMareyT2vOutput, + zSchemaMinimaxHailuo02ProTextToVideoInput, + zSchemaMinimaxHailuo02ProTextToVideoOutput, + zSchemaMinimaxHailuo02StandardTextToVideoInput, + zSchemaMinimaxHailuo02StandardTextToVideoOutput, + zSchemaMinimaxHailuo23ProTextToVideoInput, + zSchemaMinimaxHailuo23ProTextToVideoOutput, + zSchemaMinimaxHailuo23StandardTextToVideoInput, + zSchemaMinimaxHailuo23StandardTextToVideoOutput, + zSchemaMinimaxVideo01DirectorInput, + zSchemaMinimaxVideo01DirectorOutput, + zSchemaMinimaxVideo01Input, + zSchemaMinimaxVideo01LiveInput, + zSchemaMinimaxVideo01LiveOutput, + zSchemaMinimaxVideo01Output, + zSchemaMochiV1Input, + zSchemaMochiV1Output, + zSchemaOviInput, + zSchemaOviOutput, + zSchemaPikaV21TextToVideoInput, + zSchemaPikaV21TextToVideoOutput, + zSchemaPikaV22TextToVideoInput, + zSchemaPikaV22TextToVideoOutput, + zSchemaPikaV2TurboTextToVideoInput, + zSchemaPikaV2TurboTextToVideoOutput, + zSchemaPixverseV35TextToVideoFastInput, + zSchemaPixverseV35TextToVideoFastOutput, + zSchemaPixverseV35TextToVideoInput, + zSchemaPixverseV35TextToVideoOutput, + zSchemaPixverseV45TextToVideoFastInput, + zSchemaPixverseV45TextToVideoFastOutput, + zSchemaPixverseV45TextToVideoInput, + zSchemaPixverseV45TextToVideoOutput, + zSchemaPixverseV4TextToVideoFastInput, + zSchemaPixverseV4TextToVideoFastOutput, + zSchemaPixverseV4TextToVideoInput, + zSchemaPixverseV4TextToVideoOutput, + zSchemaPixverseV55TextToVideoInput, + zSchemaPixverseV55TextToVideoOutput, + zSchemaPixverseV56TextToVideoInput, + zSchemaPixverseV56TextToVideoOutput, + zSchemaPixverseV5TextToVideoInput, + zSchemaPixverseV5TextToVideoOutput, + zSchemaSanaVideoInput, + zSchemaSanaVideoOutput, + zSchemaSora2TextToVideoInput, + zSchemaSora2TextToVideoOutput, + zSchemaSora2TextToVideoProInput, + zSchemaSora2TextToVideoProOutput, + zSchemaT2vTurboInput, + zSchemaT2vTurboOutput, + zSchemaTranspixarInput, + zSchemaTranspixarOutput, + zSchemaV26TextToVideoInput, + zSchemaV26TextToVideoOutput, + zSchemaVeo2Input, + zSchemaVeo2Output, + zSchemaVeo31FastInput, + zSchemaVeo31FastOutput, + zSchemaVeo31Input, + zSchemaVeo31Output, + zSchemaVeo3FastInput, + zSchemaVeo3FastOutput, + zSchemaVeo3Input, + zSchemaVeo3Output, + zSchemaViduQ1TextToVideoInput, + zSchemaViduQ1TextToVideoOutput, + zSchemaViduQ2TextToVideoInput, + zSchemaViduQ2TextToVideoOutput, + zSchemaWan25PreviewTextToVideoInput, + zSchemaWan25PreviewTextToVideoOutput, + zSchemaWanAlphaInput, + zSchemaWanAlphaOutput, + zSchemaWanProTextToVideoInput, + zSchemaWanProTextToVideoOutput, + zSchemaWanT2vInput, + zSchemaWanT2vLoraInput, + zSchemaWanT2vLoraOutput, + zSchemaWanT2vOutput, + zSchemaWanV225bTextToVideoDistillInput, + zSchemaWanV225bTextToVideoDistillOutput, + zSchemaWanV225bTextToVideoFastWanInput, + zSchemaWanV225bTextToVideoFastWanOutput, + zSchemaWanV225bTextToVideoInput, + zSchemaWanV225bTextToVideoOutput, + zSchemaWanV22A14bTextToVideoInput, + zSchemaWanV22A14bTextToVideoLoraInput, + zSchemaWanV22A14bTextToVideoLoraOutput, + zSchemaWanV22A14bTextToVideoOutput, + zSchemaWanV22A14bTextToVideoTurboInput, + zSchemaWanV22A14bTextToVideoTurboOutput, +} from './zod.gen' + +import type { + SchemaAnimatediffSparsectrlLcmInput, + SchemaAnimatediffSparsectrlLcmOutput, + SchemaAvatarsTextToVideoInput, + SchemaAvatarsTextToVideoOutput, + SchemaBytedanceSeedanceV15ProTextToVideoInput, + SchemaBytedanceSeedanceV15ProTextToVideoOutput, + SchemaBytedanceSeedanceV1LiteTextToVideoInput, + SchemaBytedanceSeedanceV1LiteTextToVideoOutput, + SchemaBytedanceSeedanceV1ProFastTextToVideoInput, + SchemaBytedanceSeedanceV1ProFastTextToVideoOutput, + SchemaBytedanceSeedanceV1ProTextToVideoInput, + SchemaBytedanceSeedanceV1ProTextToVideoOutput, + SchemaCogvideox5bInput, + SchemaCogvideox5bOutput, + SchemaFabric10TextInput, + SchemaFabric10TextOutput, + SchemaFastAnimatediffTextToVideoInput, + SchemaFastAnimatediffTextToVideoOutput, + SchemaFastAnimatediffTurboTextToVideoInput, + SchemaFastAnimatediffTurboTextToVideoOutput, + SchemaFastSvdLcmTextToVideoInput, + SchemaFastSvdLcmTextToVideoOutput, + SchemaFastSvdTextToVideoInput, + SchemaFastSvdTextToVideoOutput, + SchemaHunyuanVideoInput, + SchemaHunyuanVideoLoraInput, + SchemaHunyuanVideoLoraOutput, + SchemaHunyuanVideoOutput, + SchemaHunyuanVideoV15TextToVideoInput, + SchemaHunyuanVideoV15TextToVideoOutput, + SchemaInfinitalkSingleTextInput, + SchemaInfinitalkSingleTextOutput, + SchemaInfinityStarTextToVideoInput, + SchemaInfinityStarTextToVideoOutput, + SchemaKandinsky5ProTextToVideoInput, + SchemaKandinsky5ProTextToVideoOutput, + SchemaKandinsky5TextToVideoDistillInput, + SchemaKandinsky5TextToVideoDistillOutput, + SchemaKandinsky5TextToVideoInput, + SchemaKandinsky5TextToVideoOutput, + SchemaKlingVideoLipsyncAudioToVideoInput, + SchemaKlingVideoLipsyncAudioToVideoOutput, + SchemaKlingVideoLipsyncTextToVideoInput, + SchemaKlingVideoLipsyncTextToVideoOutput, + SchemaKlingVideoV15ProEffectsInput, + SchemaKlingVideoV15ProEffectsOutput, + SchemaKlingVideoV15ProTextToVideoInput, + SchemaKlingVideoV15ProTextToVideoOutput, + SchemaKlingVideoV16ProEffectsInput, + SchemaKlingVideoV16ProEffectsOutput, + SchemaKlingVideoV16ProTextToVideoInput, + SchemaKlingVideoV16ProTextToVideoOutput, + SchemaKlingVideoV16StandardEffectsInput, + SchemaKlingVideoV16StandardEffectsOutput, + SchemaKlingVideoV16StandardTextToVideoInput, + SchemaKlingVideoV16StandardTextToVideoOutput, + SchemaKlingVideoV1StandardEffectsInput, + SchemaKlingVideoV1StandardEffectsOutput, + SchemaKlingVideoV1StandardTextToVideoInput, + SchemaKlingVideoV1StandardTextToVideoOutput, + SchemaKlingVideoV21MasterTextToVideoInput, + SchemaKlingVideoV21MasterTextToVideoOutput, + SchemaKlingVideoV25TurboProTextToVideoInput, + SchemaKlingVideoV25TurboProTextToVideoOutput, + SchemaKlingVideoV26ProTextToVideoInput, + SchemaKlingVideoV26ProTextToVideoOutput, + SchemaKlingVideoV2MasterTextToVideoInput, + SchemaKlingVideoV2MasterTextToVideoOutput, + SchemaKreaWan14bTextToVideoInput, + SchemaKreaWan14bTextToVideoOutput, + SchemaLongcatVideoDistilledTextToVideo480pInput, + SchemaLongcatVideoDistilledTextToVideo480pOutput, + SchemaLongcatVideoDistilledTextToVideo720pInput, + SchemaLongcatVideoDistilledTextToVideo720pOutput, + SchemaLongcatVideoTextToVideo480pInput, + SchemaLongcatVideoTextToVideo480pOutput, + SchemaLongcatVideoTextToVideo720pInput, + SchemaLongcatVideoTextToVideo720pOutput, + SchemaLtx219bDistilledTextToVideoInput, + SchemaLtx219bDistilledTextToVideoLoraInput, + SchemaLtx219bDistilledTextToVideoLoraOutput, + SchemaLtx219bDistilledTextToVideoOutput, + SchemaLtx219bTextToVideoInput, + SchemaLtx219bTextToVideoLoraInput, + SchemaLtx219bTextToVideoLoraOutput, + SchemaLtx219bTextToVideoOutput, + SchemaLtx2TextToVideoFastInput, + SchemaLtx2TextToVideoFastOutput, + SchemaLtx2TextToVideoInput, + SchemaLtx2TextToVideoOutput, + SchemaLtxVideo13bDevInput, + SchemaLtxVideo13bDevOutput, + SchemaLtxVideo13bDistilledInput, + SchemaLtxVideo13bDistilledOutput, + SchemaLtxVideoInput, + SchemaLtxVideoOutput, + SchemaLtxVideoV095Input, + SchemaLtxVideoV095Output, + SchemaLtxv13B098DistilledInput, + SchemaLtxv13B098DistilledOutput, + SchemaLumaDreamMachineRay2FlashInput, + SchemaLumaDreamMachineRay2FlashOutput, + SchemaLumaDreamMachineRay2Input, + SchemaLumaDreamMachineRay2Output, + SchemaMagiDistilledInput, + SchemaMagiDistilledOutput, + SchemaMagiInput, + SchemaMagiOutput, + SchemaMareyT2vInput, + SchemaMareyT2vOutput, + SchemaMinimaxHailuo02ProTextToVideoInput, + SchemaMinimaxHailuo02ProTextToVideoOutput, + SchemaMinimaxHailuo02StandardTextToVideoInput, + SchemaMinimaxHailuo02StandardTextToVideoOutput, + SchemaMinimaxHailuo23ProTextToVideoInput, + SchemaMinimaxHailuo23ProTextToVideoOutput, + SchemaMinimaxHailuo23StandardTextToVideoInput, + SchemaMinimaxHailuo23StandardTextToVideoOutput, + SchemaMinimaxVideo01DirectorInput, + SchemaMinimaxVideo01DirectorOutput, + SchemaMinimaxVideo01Input, + SchemaMinimaxVideo01LiveInput, + SchemaMinimaxVideo01LiveOutput, + SchemaMinimaxVideo01Output, + SchemaMochiV1Input, + SchemaMochiV1Output, + SchemaOviInput, + SchemaOviOutput, + SchemaPikaV21TextToVideoInput, + SchemaPikaV21TextToVideoOutput, + SchemaPikaV22TextToVideoInput, + SchemaPikaV22TextToVideoOutput, + SchemaPikaV2TurboTextToVideoInput, + SchemaPikaV2TurboTextToVideoOutput, + SchemaPixverseV35TextToVideoFastInput, + SchemaPixverseV35TextToVideoFastOutput, + SchemaPixverseV35TextToVideoInput, + SchemaPixverseV35TextToVideoOutput, + SchemaPixverseV45TextToVideoFastInput, + SchemaPixverseV45TextToVideoFastOutput, + SchemaPixverseV45TextToVideoInput, + SchemaPixverseV45TextToVideoOutput, + SchemaPixverseV4TextToVideoFastInput, + SchemaPixverseV4TextToVideoFastOutput, + SchemaPixverseV4TextToVideoInput, + SchemaPixverseV4TextToVideoOutput, + SchemaPixverseV55TextToVideoInput, + SchemaPixverseV55TextToVideoOutput, + SchemaPixverseV56TextToVideoInput, + SchemaPixverseV56TextToVideoOutput, + SchemaPixverseV5TextToVideoInput, + SchemaPixverseV5TextToVideoOutput, + SchemaSanaVideoInput, + SchemaSanaVideoOutput, + SchemaSora2TextToVideoInput, + SchemaSora2TextToVideoOutput, + SchemaSora2TextToVideoProInput, + SchemaSora2TextToVideoProOutput, + SchemaT2vTurboInput, + SchemaT2vTurboOutput, + SchemaTranspixarInput, + SchemaTranspixarOutput, + SchemaV26TextToVideoInput, + SchemaV26TextToVideoOutput, + SchemaVeo2Input, + SchemaVeo2Output, + SchemaVeo31FastInput, + SchemaVeo31FastOutput, + SchemaVeo31Input, + SchemaVeo31Output, + SchemaVeo3FastInput, + SchemaVeo3FastOutput, + SchemaVeo3Input, + SchemaVeo3Output, + SchemaViduQ1TextToVideoInput, + SchemaViduQ1TextToVideoOutput, + SchemaViduQ2TextToVideoInput, + SchemaViduQ2TextToVideoOutput, + SchemaWan25PreviewTextToVideoInput, + SchemaWan25PreviewTextToVideoOutput, + SchemaWanAlphaInput, + SchemaWanAlphaOutput, + SchemaWanProTextToVideoInput, + SchemaWanProTextToVideoOutput, + SchemaWanT2vInput, + SchemaWanT2vLoraInput, + SchemaWanT2vLoraOutput, + SchemaWanT2vOutput, + SchemaWanV225bTextToVideoDistillInput, + SchemaWanV225bTextToVideoDistillOutput, + SchemaWanV225bTextToVideoFastWanInput, + SchemaWanV225bTextToVideoFastWanOutput, + SchemaWanV225bTextToVideoInput, + SchemaWanV225bTextToVideoOutput, + SchemaWanV22A14bTextToVideoInput, + SchemaWanV22A14bTextToVideoLoraInput, + SchemaWanV22A14bTextToVideoLoraOutput, + SchemaWanV22A14bTextToVideoOutput, + SchemaWanV22A14bTextToVideoTurboInput, + SchemaWanV22A14bTextToVideoTurboOutput, +} from './types.gen' + +import type { z } from 'zod' + +export type TextToVideoEndpointMap = { + 'fal-ai/kling-video/v2.5-turbo/pro/text-to-video': { + input: SchemaKlingVideoV25TurboProTextToVideoInput + output: SchemaKlingVideoV25TurboProTextToVideoOutput + } + 'fal-ai/veo3/fast': { + input: SchemaVeo3FastInput + output: SchemaVeo3FastOutput + } + 'fal-ai/minimax/hailuo-02/standard/text-to-video': { + input: SchemaMinimaxHailuo02StandardTextToVideoInput + output: SchemaMinimaxHailuo02StandardTextToVideoOutput + } + 'fal-ai/veo3': { + input: SchemaVeo3Input + output: SchemaVeo3Output + } + 'fal-ai/kling-video/v2/master/text-to-video': { + input: SchemaKlingVideoV2MasterTextToVideoInput + output: SchemaKlingVideoV2MasterTextToVideoOutput + } + 'fal-ai/pixverse/v5.6/text-to-video': { + input: SchemaPixverseV56TextToVideoInput + output: SchemaPixverseV56TextToVideoOutput + } + 'fal-ai/ltx-2-19b/distilled/text-to-video/lora': { + input: SchemaLtx219bDistilledTextToVideoLoraInput + output: SchemaLtx219bDistilledTextToVideoLoraOutput + } + 'fal-ai/ltx-2-19b/distilled/text-to-video': { + input: SchemaLtx219bDistilledTextToVideoInput + output: SchemaLtx219bDistilledTextToVideoOutput + } + 'fal-ai/ltx-2-19b/text-to-video/lora': { + input: SchemaLtx219bTextToVideoLoraInput + output: SchemaLtx219bTextToVideoLoraOutput + } + 'fal-ai/ltx-2-19b/text-to-video': { + input: SchemaLtx219bTextToVideoInput + output: SchemaLtx219bTextToVideoOutput + } + 'fal-ai/kandinsky5-pro/text-to-video': { + input: SchemaKandinsky5ProTextToVideoInput + output: SchemaKandinsky5ProTextToVideoOutput + } + 'fal-ai/bytedance/seedance/v1.5/pro/text-to-video': { + input: SchemaBytedanceSeedanceV15ProTextToVideoInput + output: SchemaBytedanceSeedanceV15ProTextToVideoOutput + } + 'wan/v2.6/text-to-video': { + input: SchemaV26TextToVideoInput + output: SchemaV26TextToVideoOutput + } + 'veed/fabric-1.0/text': { + input: SchemaFabric10TextInput + output: SchemaFabric10TextOutput + } + 'fal-ai/kling-video/v2.6/pro/text-to-video': { + input: SchemaKlingVideoV26ProTextToVideoInput + output: SchemaKlingVideoV26ProTextToVideoOutput + } + 'fal-ai/pixverse/v5.5/text-to-video': { + input: SchemaPixverseV55TextToVideoInput + output: SchemaPixverseV55TextToVideoOutput + } + 'fal-ai/ltx-2/text-to-video/fast': { + input: SchemaLtx2TextToVideoFastInput + output: SchemaLtx2TextToVideoFastOutput + } + 'fal-ai/ltx-2/text-to-video': { + input: SchemaLtx2TextToVideoInput + output: SchemaLtx2TextToVideoOutput + } + 'fal-ai/hunyuan-video-v1.5/text-to-video': { + input: SchemaHunyuanVideoV15TextToVideoInput + output: SchemaHunyuanVideoV15TextToVideoOutput + } + 'fal-ai/infinity-star/text-to-video': { + input: SchemaInfinityStarTextToVideoInput + output: SchemaInfinityStarTextToVideoOutput + } + 'fal-ai/sana-video': { + input: SchemaSanaVideoInput + output: SchemaSanaVideoOutput + } + 'fal-ai/longcat-video/text-to-video/720p': { + input: SchemaLongcatVideoTextToVideo720pInput + output: SchemaLongcatVideoTextToVideo720pOutput + } + 'fal-ai/longcat-video/text-to-video/480p': { + input: SchemaLongcatVideoTextToVideo480pInput + output: SchemaLongcatVideoTextToVideo480pOutput + } + 'fal-ai/longcat-video/distilled/text-to-video/720p': { + input: SchemaLongcatVideoDistilledTextToVideo720pInput + output: SchemaLongcatVideoDistilledTextToVideo720pOutput + } + 'fal-ai/longcat-video/distilled/text-to-video/480p': { + input: SchemaLongcatVideoDistilledTextToVideo480pInput + output: SchemaLongcatVideoDistilledTextToVideo480pOutput + } + 'fal-ai/minimax/hailuo-2.3/standard/text-to-video': { + input: SchemaMinimaxHailuo23StandardTextToVideoInput + output: SchemaMinimaxHailuo23StandardTextToVideoOutput + } + 'fal-ai/minimax/hailuo-2.3/pro/text-to-video': { + input: SchemaMinimaxHailuo23ProTextToVideoInput + output: SchemaMinimaxHailuo23ProTextToVideoOutput + } + 'fal-ai/bytedance/seedance/v1/pro/fast/text-to-video': { + input: SchemaBytedanceSeedanceV1ProFastTextToVideoInput + output: SchemaBytedanceSeedanceV1ProFastTextToVideoOutput + } + 'fal-ai/vidu/q2/text-to-video': { + input: SchemaViduQ2TextToVideoInput + output: SchemaViduQ2TextToVideoOutput + } + 'fal-ai/krea-wan-14b/text-to-video': { + input: SchemaKreaWan14bTextToVideoInput + output: SchemaKreaWan14bTextToVideoOutput + } + 'fal-ai/wan-alpha': { + input: SchemaWanAlphaInput + output: SchemaWanAlphaOutput + } + 'fal-ai/kandinsky5/text-to-video/distill': { + input: SchemaKandinsky5TextToVideoDistillInput + output: SchemaKandinsky5TextToVideoDistillOutput + } + 'fal-ai/kandinsky5/text-to-video': { + input: SchemaKandinsky5TextToVideoInput + output: SchemaKandinsky5TextToVideoOutput + } + 'fal-ai/veo3.1/fast': { + input: SchemaVeo31FastInput + output: SchemaVeo31FastOutput + } + 'fal-ai/veo3.1': { + input: SchemaVeo31Input + output: SchemaVeo31Output + } + 'fal-ai/sora-2/text-to-video/pro': { + input: SchemaSora2TextToVideoProInput + output: SchemaSora2TextToVideoProOutput + } + 'fal-ai/sora-2/text-to-video': { + input: SchemaSora2TextToVideoInput + output: SchemaSora2TextToVideoOutput + } + 'fal-ai/ovi': { + input: SchemaOviInput + output: SchemaOviOutput + } + 'fal-ai/wan-25-preview/text-to-video': { + input: SchemaWan25PreviewTextToVideoInput + output: SchemaWan25PreviewTextToVideoOutput + } + 'argil/avatars/text-to-video': { + input: SchemaAvatarsTextToVideoInput + output: SchemaAvatarsTextToVideoOutput + } + 'fal-ai/pixverse/v5/text-to-video': { + input: SchemaPixverseV5TextToVideoInput + output: SchemaPixverseV5TextToVideoOutput + } + 'fal-ai/infinitalk/single-text': { + input: SchemaInfinitalkSingleTextInput + output: SchemaInfinitalkSingleTextOutput + } + 'moonvalley/marey/t2v': { + input: SchemaMareyT2vInput + output: SchemaMareyT2vOutput + } + 'fal-ai/wan/v2.2-a14b/text-to-video/lora': { + input: SchemaWanV22A14bTextToVideoLoraInput + output: SchemaWanV22A14bTextToVideoLoraOutput + } + 'fal-ai/wan/v2.2-5b/text-to-video/distill': { + input: SchemaWanV225bTextToVideoDistillInput + output: SchemaWanV225bTextToVideoDistillOutput + } + 'fal-ai/wan/v2.2-5b/text-to-video/fast-wan': { + input: SchemaWanV225bTextToVideoFastWanInput + output: SchemaWanV225bTextToVideoFastWanOutput + } + 'fal-ai/wan/v2.2-a14b/text-to-video/turbo': { + input: SchemaWanV22A14bTextToVideoTurboInput + output: SchemaWanV22A14bTextToVideoTurboOutput + } + 'fal-ai/wan/v2.2-5b/text-to-video': { + input: SchemaWanV225bTextToVideoInput + output: SchemaWanV225bTextToVideoOutput + } + 'fal-ai/wan/v2.2-a14b/text-to-video': { + input: SchemaWanV22A14bTextToVideoInput + output: SchemaWanV22A14bTextToVideoOutput + } + 'fal-ai/ltxv-13b-098-distilled': { + input: SchemaLtxv13B098DistilledInput + output: SchemaLtxv13B098DistilledOutput + } + 'fal-ai/minimax/hailuo-02/pro/text-to-video': { + input: SchemaMinimaxHailuo02ProTextToVideoInput + output: SchemaMinimaxHailuo02ProTextToVideoOutput + } + 'fal-ai/bytedance/seedance/v1/pro/text-to-video': { + input: SchemaBytedanceSeedanceV1ProTextToVideoInput + output: SchemaBytedanceSeedanceV1ProTextToVideoOutput + } + 'fal-ai/bytedance/seedance/v1/lite/text-to-video': { + input: SchemaBytedanceSeedanceV1LiteTextToVideoInput + output: SchemaBytedanceSeedanceV1LiteTextToVideoOutput + } + 'fal-ai/kling-video/v2.1/master/text-to-video': { + input: SchemaKlingVideoV21MasterTextToVideoInput + output: SchemaKlingVideoV21MasterTextToVideoOutput + } + 'veed/avatars/text-to-video': { + input: SchemaAvatarsTextToVideoInput + output: SchemaAvatarsTextToVideoOutput + } + 'fal-ai/ltx-video-13b-dev': { + input: SchemaLtxVideo13bDevInput + output: SchemaLtxVideo13bDevOutput + } + 'fal-ai/ltx-video-13b-distilled': { + input: SchemaLtxVideo13bDistilledInput + output: SchemaLtxVideo13bDistilledOutput + } + 'fal-ai/pixverse/v4.5/text-to-video/fast': { + input: SchemaPixverseV45TextToVideoFastInput + output: SchemaPixverseV45TextToVideoFastOutput + } + 'fal-ai/pixverse/v4.5/text-to-video': { + input: SchemaPixverseV45TextToVideoInput + output: SchemaPixverseV45TextToVideoOutput + } + 'fal-ai/vidu/q1/text-to-video': { + input: SchemaViduQ1TextToVideoInput + output: SchemaViduQ1TextToVideoOutput + } + 'fal-ai/magi': { + input: SchemaMagiInput + output: SchemaMagiOutput + } + 'fal-ai/magi-distilled': { + input: SchemaMagiDistilledInput + output: SchemaMagiDistilledOutput + } + 'fal-ai/pixverse/v4/text-to-video': { + input: SchemaPixverseV4TextToVideoInput + output: SchemaPixverseV4TextToVideoOutput + } + 'fal-ai/pixverse/v4/text-to-video/fast': { + input: SchemaPixverseV4TextToVideoFastInput + output: SchemaPixverseV4TextToVideoFastOutput + } + 'fal-ai/kling-video/lipsync/audio-to-video': { + input: SchemaKlingVideoLipsyncAudioToVideoInput + output: SchemaKlingVideoLipsyncAudioToVideoOutput + } + 'fal-ai/kling-video/lipsync/text-to-video': { + input: SchemaKlingVideoLipsyncTextToVideoInput + output: SchemaKlingVideoLipsyncTextToVideoOutput + } + 'fal-ai/wan-t2v-lora': { + input: SchemaWanT2vLoraInput + output: SchemaWanT2vLoraOutput + } + 'fal-ai/luma-dream-machine/ray-2-flash': { + input: SchemaLumaDreamMachineRay2FlashInput + output: SchemaLumaDreamMachineRay2FlashOutput + } + 'fal-ai/pika/v2/turbo/text-to-video': { + input: SchemaPikaV2TurboTextToVideoInput + output: SchemaPikaV2TurboTextToVideoOutput + } + 'fal-ai/pika/v2.1/text-to-video': { + input: SchemaPikaV21TextToVideoInput + output: SchemaPikaV21TextToVideoOutput + } + 'fal-ai/pika/v2.2/text-to-video': { + input: SchemaPikaV22TextToVideoInput + output: SchemaPikaV22TextToVideoOutput + } + 'fal-ai/wan-pro/text-to-video': { + input: SchemaWanProTextToVideoInput + output: SchemaWanProTextToVideoOutput + } + 'fal-ai/kling-video/v1.5/pro/effects': { + input: SchemaKlingVideoV15ProEffectsInput + output: SchemaKlingVideoV15ProEffectsOutput + } + 'fal-ai/kling-video/v1.6/pro/effects': { + input: SchemaKlingVideoV16ProEffectsInput + output: SchemaKlingVideoV16ProEffectsOutput + } + 'fal-ai/kling-video/v1/standard/effects': { + input: SchemaKlingVideoV1StandardEffectsInput + output: SchemaKlingVideoV1StandardEffectsOutput + } + 'fal-ai/kling-video/v1.6/standard/effects': { + input: SchemaKlingVideoV16StandardEffectsInput + output: SchemaKlingVideoV16StandardEffectsOutput + } + 'fal-ai/ltx-video-v095': { + input: SchemaLtxVideoV095Input + output: SchemaLtxVideoV095Output + } + 'fal-ai/kling-video/v1.6/pro/text-to-video': { + input: SchemaKlingVideoV16ProTextToVideoInput + output: SchemaKlingVideoV16ProTextToVideoOutput + } + 'fal-ai/wan-t2v': { + input: SchemaWanT2vInput + output: SchemaWanT2vOutput + } + 'fal-ai/veo2': { + input: SchemaVeo2Input + output: SchemaVeo2Output + } + 'fal-ai/minimax/video-01-director': { + input: SchemaMinimaxVideo01DirectorInput + output: SchemaMinimaxVideo01DirectorOutput + } + 'fal-ai/pixverse/v3.5/text-to-video': { + input: SchemaPixverseV35TextToVideoInput + output: SchemaPixverseV35TextToVideoOutput + } + 'fal-ai/pixverse/v3.5/text-to-video/fast': { + input: SchemaPixverseV35TextToVideoFastInput + output: SchemaPixverseV35TextToVideoFastOutput + } + 'fal-ai/luma-dream-machine/ray-2': { + input: SchemaLumaDreamMachineRay2Input + output: SchemaLumaDreamMachineRay2Output + } + 'fal-ai/hunyuan-video-lora': { + input: SchemaHunyuanVideoLoraInput + output: SchemaHunyuanVideoLoraOutput + } + 'fal-ai/transpixar': { + input: SchemaTranspixarInput + output: SchemaTranspixarOutput + } + 'fal-ai/cogvideox-5b': { + input: SchemaCogvideox5bInput + output: SchemaCogvideox5bOutput + } + 'fal-ai/kling-video/v1.6/standard/text-to-video': { + input: SchemaKlingVideoV16StandardTextToVideoInput + output: SchemaKlingVideoV16StandardTextToVideoOutput + } + 'fal-ai/minimax/video-01-live': { + input: SchemaMinimaxVideo01LiveInput + output: SchemaMinimaxVideo01LiveOutput + } + 'fal-ai/kling-video/v1/standard/text-to-video': { + input: SchemaKlingVideoV1StandardTextToVideoInput + output: SchemaKlingVideoV1StandardTextToVideoOutput + } + 'fal-ai/kling-video/v1.5/pro/text-to-video': { + input: SchemaKlingVideoV15ProTextToVideoInput + output: SchemaKlingVideoV15ProTextToVideoOutput + } + 'fal-ai/mochi-v1': { + input: SchemaMochiV1Input + output: SchemaMochiV1Output + } + 'fal-ai/hunyuan-video': { + input: SchemaHunyuanVideoInput + output: SchemaHunyuanVideoOutput + } + 'fal-ai/ltx-video': { + input: SchemaLtxVideoInput + output: SchemaLtxVideoOutput + } + 'fal-ai/fast-svd/text-to-video': { + input: SchemaFastSvdTextToVideoInput + output: SchemaFastSvdTextToVideoOutput + } + 'fal-ai/fast-svd-lcm/text-to-video': { + input: SchemaFastSvdLcmTextToVideoInput + output: SchemaFastSvdLcmTextToVideoOutput + } + 'fal-ai/t2v-turbo': { + input: SchemaT2vTurboInput + output: SchemaT2vTurboOutput + } + 'fal-ai/fast-animatediff/text-to-video': { + input: SchemaFastAnimatediffTextToVideoInput + output: SchemaFastAnimatediffTextToVideoOutput + } + 'fal-ai/fast-animatediff/turbo/text-to-video': { + input: SchemaFastAnimatediffTurboTextToVideoInput + output: SchemaFastAnimatediffTurboTextToVideoOutput + } + 'fal-ai/minimax/video-01': { + input: SchemaMinimaxVideo01Input + output: SchemaMinimaxVideo01Output + } + 'fal-ai/animatediff-sparsectrl-lcm': { + input: SchemaAnimatediffSparsectrlLcmInput + output: SchemaAnimatediffSparsectrlLcmOutput + } +} + +/** Union type of all text-to-video model endpoint IDs */ +export type TextToVideoModel = keyof TextToVideoEndpointMap + +export const TextToVideoSchemaMap: Record< + TextToVideoModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['fal-ai/kling-video/v2.5-turbo/pro/text-to-video']: { + input: zSchemaKlingVideoV25TurboProTextToVideoInput, + output: zSchemaKlingVideoV25TurboProTextToVideoOutput, + }, + ['fal-ai/veo3/fast']: { + input: zSchemaVeo3FastInput, + output: zSchemaVeo3FastOutput, + }, + ['fal-ai/minimax/hailuo-02/standard/text-to-video']: { + input: zSchemaMinimaxHailuo02StandardTextToVideoInput, + output: zSchemaMinimaxHailuo02StandardTextToVideoOutput, + }, + ['fal-ai/veo3']: { + input: zSchemaVeo3Input, + output: zSchemaVeo3Output, + }, + ['fal-ai/kling-video/v2/master/text-to-video']: { + input: zSchemaKlingVideoV2MasterTextToVideoInput, + output: zSchemaKlingVideoV2MasterTextToVideoOutput, + }, + ['fal-ai/pixverse/v5.6/text-to-video']: { + input: zSchemaPixverseV56TextToVideoInput, + output: zSchemaPixverseV56TextToVideoOutput, + }, + ['fal-ai/ltx-2-19b/distilled/text-to-video/lora']: { + input: zSchemaLtx219bDistilledTextToVideoLoraInput, + output: zSchemaLtx219bDistilledTextToVideoLoraOutput, + }, + ['fal-ai/ltx-2-19b/distilled/text-to-video']: { + input: zSchemaLtx219bDistilledTextToVideoInput, + output: zSchemaLtx219bDistilledTextToVideoOutput, + }, + ['fal-ai/ltx-2-19b/text-to-video/lora']: { + input: zSchemaLtx219bTextToVideoLoraInput, + output: zSchemaLtx219bTextToVideoLoraOutput, + }, + ['fal-ai/ltx-2-19b/text-to-video']: { + input: zSchemaLtx219bTextToVideoInput, + output: zSchemaLtx219bTextToVideoOutput, + }, + ['fal-ai/kandinsky5-pro/text-to-video']: { + input: zSchemaKandinsky5ProTextToVideoInput, + output: zSchemaKandinsky5ProTextToVideoOutput, + }, + ['fal-ai/bytedance/seedance/v1.5/pro/text-to-video']: { + input: zSchemaBytedanceSeedanceV15ProTextToVideoInput, + output: zSchemaBytedanceSeedanceV15ProTextToVideoOutput, + }, + ['wan/v2.6/text-to-video']: { + input: zSchemaV26TextToVideoInput, + output: zSchemaV26TextToVideoOutput, + }, + ['veed/fabric-1.0/text']: { + input: zSchemaFabric10TextInput, + output: zSchemaFabric10TextOutput, + }, + ['fal-ai/kling-video/v2.6/pro/text-to-video']: { + input: zSchemaKlingVideoV26ProTextToVideoInput, + output: zSchemaKlingVideoV26ProTextToVideoOutput, + }, + ['fal-ai/pixverse/v5.5/text-to-video']: { + input: zSchemaPixverseV55TextToVideoInput, + output: zSchemaPixverseV55TextToVideoOutput, + }, + ['fal-ai/ltx-2/text-to-video/fast']: { + input: zSchemaLtx2TextToVideoFastInput, + output: zSchemaLtx2TextToVideoFastOutput, + }, + ['fal-ai/ltx-2/text-to-video']: { + input: zSchemaLtx2TextToVideoInput, + output: zSchemaLtx2TextToVideoOutput, + }, + ['fal-ai/hunyuan-video-v1.5/text-to-video']: { + input: zSchemaHunyuanVideoV15TextToVideoInput, + output: zSchemaHunyuanVideoV15TextToVideoOutput, + }, + ['fal-ai/infinity-star/text-to-video']: { + input: zSchemaInfinityStarTextToVideoInput, + output: zSchemaInfinityStarTextToVideoOutput, + }, + ['fal-ai/sana-video']: { + input: zSchemaSanaVideoInput, + output: zSchemaSanaVideoOutput, + }, + ['fal-ai/longcat-video/text-to-video/720p']: { + input: zSchemaLongcatVideoTextToVideo720pInput, + output: zSchemaLongcatVideoTextToVideo720pOutput, + }, + ['fal-ai/longcat-video/text-to-video/480p']: { + input: zSchemaLongcatVideoTextToVideo480pInput, + output: zSchemaLongcatVideoTextToVideo480pOutput, + }, + ['fal-ai/longcat-video/distilled/text-to-video/720p']: { + input: zSchemaLongcatVideoDistilledTextToVideo720pInput, + output: zSchemaLongcatVideoDistilledTextToVideo720pOutput, + }, + ['fal-ai/longcat-video/distilled/text-to-video/480p']: { + input: zSchemaLongcatVideoDistilledTextToVideo480pInput, + output: zSchemaLongcatVideoDistilledTextToVideo480pOutput, + }, + ['fal-ai/minimax/hailuo-2.3/standard/text-to-video']: { + input: zSchemaMinimaxHailuo23StandardTextToVideoInput, + output: zSchemaMinimaxHailuo23StandardTextToVideoOutput, + }, + ['fal-ai/minimax/hailuo-2.3/pro/text-to-video']: { + input: zSchemaMinimaxHailuo23ProTextToVideoInput, + output: zSchemaMinimaxHailuo23ProTextToVideoOutput, + }, + ['fal-ai/bytedance/seedance/v1/pro/fast/text-to-video']: { + input: zSchemaBytedanceSeedanceV1ProFastTextToVideoInput, + output: zSchemaBytedanceSeedanceV1ProFastTextToVideoOutput, + }, + ['fal-ai/vidu/q2/text-to-video']: { + input: zSchemaViduQ2TextToVideoInput, + output: zSchemaViduQ2TextToVideoOutput, + }, + ['fal-ai/krea-wan-14b/text-to-video']: { + input: zSchemaKreaWan14bTextToVideoInput, + output: zSchemaKreaWan14bTextToVideoOutput, + }, + ['fal-ai/wan-alpha']: { + input: zSchemaWanAlphaInput, + output: zSchemaWanAlphaOutput, + }, + ['fal-ai/kandinsky5/text-to-video/distill']: { + input: zSchemaKandinsky5TextToVideoDistillInput, + output: zSchemaKandinsky5TextToVideoDistillOutput, + }, + ['fal-ai/kandinsky5/text-to-video']: { + input: zSchemaKandinsky5TextToVideoInput, + output: zSchemaKandinsky5TextToVideoOutput, + }, + ['fal-ai/veo3.1/fast']: { + input: zSchemaVeo31FastInput, + output: zSchemaVeo31FastOutput, + }, + ['fal-ai/veo3.1']: { + input: zSchemaVeo31Input, + output: zSchemaVeo31Output, + }, + ['fal-ai/sora-2/text-to-video/pro']: { + input: zSchemaSora2TextToVideoProInput, + output: zSchemaSora2TextToVideoProOutput, + }, + ['fal-ai/sora-2/text-to-video']: { + input: zSchemaSora2TextToVideoInput, + output: zSchemaSora2TextToVideoOutput, + }, + ['fal-ai/ovi']: { + input: zSchemaOviInput, + output: zSchemaOviOutput, + }, + ['fal-ai/wan-25-preview/text-to-video']: { + input: zSchemaWan25PreviewTextToVideoInput, + output: zSchemaWan25PreviewTextToVideoOutput, + }, + ['argil/avatars/text-to-video']: { + input: zSchemaAvatarsTextToVideoInput, + output: zSchemaAvatarsTextToVideoOutput, + }, + ['fal-ai/pixverse/v5/text-to-video']: { + input: zSchemaPixverseV5TextToVideoInput, + output: zSchemaPixverseV5TextToVideoOutput, + }, + ['fal-ai/infinitalk/single-text']: { + input: zSchemaInfinitalkSingleTextInput, + output: zSchemaInfinitalkSingleTextOutput, + }, + ['moonvalley/marey/t2v']: { + input: zSchemaMareyT2vInput, + output: zSchemaMareyT2vOutput, + }, + ['fal-ai/wan/v2.2-a14b/text-to-video/lora']: { + input: zSchemaWanV22A14bTextToVideoLoraInput, + output: zSchemaWanV22A14bTextToVideoLoraOutput, + }, + ['fal-ai/wan/v2.2-5b/text-to-video/distill']: { + input: zSchemaWanV225bTextToVideoDistillInput, + output: zSchemaWanV225bTextToVideoDistillOutput, + }, + ['fal-ai/wan/v2.2-5b/text-to-video/fast-wan']: { + input: zSchemaWanV225bTextToVideoFastWanInput, + output: zSchemaWanV225bTextToVideoFastWanOutput, + }, + ['fal-ai/wan/v2.2-a14b/text-to-video/turbo']: { + input: zSchemaWanV22A14bTextToVideoTurboInput, + output: zSchemaWanV22A14bTextToVideoTurboOutput, + }, + ['fal-ai/wan/v2.2-5b/text-to-video']: { + input: zSchemaWanV225bTextToVideoInput, + output: zSchemaWanV225bTextToVideoOutput, + }, + ['fal-ai/wan/v2.2-a14b/text-to-video']: { + input: zSchemaWanV22A14bTextToVideoInput, + output: zSchemaWanV22A14bTextToVideoOutput, + }, + ['fal-ai/ltxv-13b-098-distilled']: { + input: zSchemaLtxv13B098DistilledInput, + output: zSchemaLtxv13B098DistilledOutput, + }, + ['fal-ai/minimax/hailuo-02/pro/text-to-video']: { + input: zSchemaMinimaxHailuo02ProTextToVideoInput, + output: zSchemaMinimaxHailuo02ProTextToVideoOutput, + }, + ['fal-ai/bytedance/seedance/v1/pro/text-to-video']: { + input: zSchemaBytedanceSeedanceV1ProTextToVideoInput, + output: zSchemaBytedanceSeedanceV1ProTextToVideoOutput, + }, + ['fal-ai/bytedance/seedance/v1/lite/text-to-video']: { + input: zSchemaBytedanceSeedanceV1LiteTextToVideoInput, + output: zSchemaBytedanceSeedanceV1LiteTextToVideoOutput, + }, + ['fal-ai/kling-video/v2.1/master/text-to-video']: { + input: zSchemaKlingVideoV21MasterTextToVideoInput, + output: zSchemaKlingVideoV21MasterTextToVideoOutput, + }, + ['veed/avatars/text-to-video']: { + input: zSchemaAvatarsTextToVideoInput, + output: zSchemaAvatarsTextToVideoOutput, + }, + ['fal-ai/ltx-video-13b-dev']: { + input: zSchemaLtxVideo13bDevInput, + output: zSchemaLtxVideo13bDevOutput, + }, + ['fal-ai/ltx-video-13b-distilled']: { + input: zSchemaLtxVideo13bDistilledInput, + output: zSchemaLtxVideo13bDistilledOutput, + }, + ['fal-ai/pixverse/v4.5/text-to-video/fast']: { + input: zSchemaPixverseV45TextToVideoFastInput, + output: zSchemaPixverseV45TextToVideoFastOutput, + }, + ['fal-ai/pixverse/v4.5/text-to-video']: { + input: zSchemaPixverseV45TextToVideoInput, + output: zSchemaPixverseV45TextToVideoOutput, + }, + ['fal-ai/vidu/q1/text-to-video']: { + input: zSchemaViduQ1TextToVideoInput, + output: zSchemaViduQ1TextToVideoOutput, + }, + ['fal-ai/magi']: { + input: zSchemaMagiInput, + output: zSchemaMagiOutput, + }, + ['fal-ai/magi-distilled']: { + input: zSchemaMagiDistilledInput, + output: zSchemaMagiDistilledOutput, + }, + ['fal-ai/pixverse/v4/text-to-video']: { + input: zSchemaPixverseV4TextToVideoInput, + output: zSchemaPixverseV4TextToVideoOutput, + }, + ['fal-ai/pixverse/v4/text-to-video/fast']: { + input: zSchemaPixverseV4TextToVideoFastInput, + output: zSchemaPixverseV4TextToVideoFastOutput, + }, + ['fal-ai/kling-video/lipsync/audio-to-video']: { + input: zSchemaKlingVideoLipsyncAudioToVideoInput, + output: zSchemaKlingVideoLipsyncAudioToVideoOutput, + }, + ['fal-ai/kling-video/lipsync/text-to-video']: { + input: zSchemaKlingVideoLipsyncTextToVideoInput, + output: zSchemaKlingVideoLipsyncTextToVideoOutput, + }, + ['fal-ai/wan-t2v-lora']: { + input: zSchemaWanT2vLoraInput, + output: zSchemaWanT2vLoraOutput, + }, + ['fal-ai/luma-dream-machine/ray-2-flash']: { + input: zSchemaLumaDreamMachineRay2FlashInput, + output: zSchemaLumaDreamMachineRay2FlashOutput, + }, + ['fal-ai/pika/v2/turbo/text-to-video']: { + input: zSchemaPikaV2TurboTextToVideoInput, + output: zSchemaPikaV2TurboTextToVideoOutput, + }, + ['fal-ai/pika/v2.1/text-to-video']: { + input: zSchemaPikaV21TextToVideoInput, + output: zSchemaPikaV21TextToVideoOutput, + }, + ['fal-ai/pika/v2.2/text-to-video']: { + input: zSchemaPikaV22TextToVideoInput, + output: zSchemaPikaV22TextToVideoOutput, + }, + ['fal-ai/wan-pro/text-to-video']: { + input: zSchemaWanProTextToVideoInput, + output: zSchemaWanProTextToVideoOutput, + }, + ['fal-ai/kling-video/v1.5/pro/effects']: { + input: zSchemaKlingVideoV15ProEffectsInput, + output: zSchemaKlingVideoV15ProEffectsOutput, + }, + ['fal-ai/kling-video/v1.6/pro/effects']: { + input: zSchemaKlingVideoV16ProEffectsInput, + output: zSchemaKlingVideoV16ProEffectsOutput, + }, + ['fal-ai/kling-video/v1/standard/effects']: { + input: zSchemaKlingVideoV1StandardEffectsInput, + output: zSchemaKlingVideoV1StandardEffectsOutput, + }, + ['fal-ai/kling-video/v1.6/standard/effects']: { + input: zSchemaKlingVideoV16StandardEffectsInput, + output: zSchemaKlingVideoV16StandardEffectsOutput, + }, + ['fal-ai/ltx-video-v095']: { + input: zSchemaLtxVideoV095Input, + output: zSchemaLtxVideoV095Output, + }, + ['fal-ai/kling-video/v1.6/pro/text-to-video']: { + input: zSchemaKlingVideoV16ProTextToVideoInput, + output: zSchemaKlingVideoV16ProTextToVideoOutput, + }, + ['fal-ai/wan-t2v']: { + input: zSchemaWanT2vInput, + output: zSchemaWanT2vOutput, + }, + ['fal-ai/veo2']: { + input: zSchemaVeo2Input, + output: zSchemaVeo2Output, + }, + ['fal-ai/minimax/video-01-director']: { + input: zSchemaMinimaxVideo01DirectorInput, + output: zSchemaMinimaxVideo01DirectorOutput, + }, + ['fal-ai/pixverse/v3.5/text-to-video']: { + input: zSchemaPixverseV35TextToVideoInput, + output: zSchemaPixverseV35TextToVideoOutput, + }, + ['fal-ai/pixverse/v3.5/text-to-video/fast']: { + input: zSchemaPixverseV35TextToVideoFastInput, + output: zSchemaPixverseV35TextToVideoFastOutput, + }, + ['fal-ai/luma-dream-machine/ray-2']: { + input: zSchemaLumaDreamMachineRay2Input, + output: zSchemaLumaDreamMachineRay2Output, + }, + ['fal-ai/hunyuan-video-lora']: { + input: zSchemaHunyuanVideoLoraInput, + output: zSchemaHunyuanVideoLoraOutput, + }, + ['fal-ai/transpixar']: { + input: zSchemaTranspixarInput, + output: zSchemaTranspixarOutput, + }, + ['fal-ai/cogvideox-5b']: { + input: zSchemaCogvideox5bInput, + output: zSchemaCogvideox5bOutput, + }, + ['fal-ai/kling-video/v1.6/standard/text-to-video']: { + input: zSchemaKlingVideoV16StandardTextToVideoInput, + output: zSchemaKlingVideoV16StandardTextToVideoOutput, + }, + ['fal-ai/minimax/video-01-live']: { + input: zSchemaMinimaxVideo01LiveInput, + output: zSchemaMinimaxVideo01LiveOutput, + }, + ['fal-ai/kling-video/v1/standard/text-to-video']: { + input: zSchemaKlingVideoV1StandardTextToVideoInput, + output: zSchemaKlingVideoV1StandardTextToVideoOutput, + }, + ['fal-ai/kling-video/v1.5/pro/text-to-video']: { + input: zSchemaKlingVideoV15ProTextToVideoInput, + output: zSchemaKlingVideoV15ProTextToVideoOutput, + }, + ['fal-ai/mochi-v1']: { + input: zSchemaMochiV1Input, + output: zSchemaMochiV1Output, + }, + ['fal-ai/hunyuan-video']: { + input: zSchemaHunyuanVideoInput, + output: zSchemaHunyuanVideoOutput, + }, + ['fal-ai/ltx-video']: { + input: zSchemaLtxVideoInput, + output: zSchemaLtxVideoOutput, + }, + ['fal-ai/fast-svd/text-to-video']: { + input: zSchemaFastSvdTextToVideoInput, + output: zSchemaFastSvdTextToVideoOutput, + }, + ['fal-ai/fast-svd-lcm/text-to-video']: { + input: zSchemaFastSvdLcmTextToVideoInput, + output: zSchemaFastSvdLcmTextToVideoOutput, + }, + ['fal-ai/t2v-turbo']: { + input: zSchemaT2vTurboInput, + output: zSchemaT2vTurboOutput, + }, + ['fal-ai/fast-animatediff/text-to-video']: { + input: zSchemaFastAnimatediffTextToVideoInput, + output: zSchemaFastAnimatediffTextToVideoOutput, + }, + ['fal-ai/fast-animatediff/turbo/text-to-video']: { + input: zSchemaFastAnimatediffTurboTextToVideoInput, + output: zSchemaFastAnimatediffTurboTextToVideoOutput, + }, + ['fal-ai/minimax/video-01']: { + input: zSchemaMinimaxVideo01Input, + output: zSchemaMinimaxVideo01Output, + }, + ['fal-ai/animatediff-sparsectrl-lcm']: { + input: zSchemaAnimatediffSparsectrlLcmInput, + output: zSchemaAnimatediffSparsectrlLcmOutput, + }, +} as const + +/** Get the input type for a specific text-to-video model */ +export type TextToVideoModelInput = + TextToVideoEndpointMap[T]['input'] + +/** Get the output type for a specific text-to-video model */ +export type TextToVideoModelOutput = + TextToVideoEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/text-to-video/types.gen.ts b/packages/typescript/ai-fal/src/generated/text-to-video/types.gen.ts new file mode 100644 index 00000000..babe7016 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/text-to-video/types.gen.ts @@ -0,0 +1,17988 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * AnimatediffLCMOutput + */ +export type SchemaAnimatediffSparsectrlLcmOutput = { + /** + * Seed + * + * The seed used to generate the video. + */ + seed: number + /** + * Video + * + * Generated video file. + */ + video: SchemaFile +} + +/** + * File + */ +export type SchemaFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type: string + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string +} + +/** + * AnimatediffLCMInput + */ +export type SchemaAnimatediffSparsectrlLcmInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable + * Diffusion will output the same image every time. + * + */ + seed?: number + /** + * Controlnet Type + * + * The type of controlnet to use for generating the video. The controlnet determines how the video will be animated. + */ + controlnet_type?: 'scribble' | 'rgb' + /** + * Keyframe 2 Index + * + * The frame index of the third keyframe to use for the generation. + */ + keyframe_2_index?: number + /** + * Keyframe 0 Index + * + * The frame index of the first keyframe to use for the generation. + */ + keyframe_0_index?: number + /** + * Keyframe 1 Image Url + * + * The URL of the second keyframe to use for the generation. + */ + keyframe_1_image_url?: string | null + /** + * Keyframe 1 Index + * + * The frame index of the second keyframe to use for the generation. + */ + keyframe_1_index?: number + /** + * Classifier-Free Guidance scale (CFG) + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Number of inference steps + * + * Increasing the amount of steps tells Stable Diffusion that it should take more steps to generate your final result which can increase the amount of detail in your image. + */ + num_inference_steps?: number + /** + * Keyframe 2 Image Url + * + * The URL of the third keyframe to use for the generation. + */ + keyframe_2_image_url?: string | null + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to specify what you don't want. + * + */ + negative_prompt?: string + /** + * Keyframe 0 Image Url + * + * The URL of the first keyframe to use for the generation. + */ + keyframe_0_image_url?: string | null +} + +/** + * VideoOutput + */ +export type SchemaMinimaxVideo01Output = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TextToVideoRequest + */ +export type SchemaMinimaxVideo01Input = { + /** + * Prompt Optimizer + * + * Whether to use the model's prompt optimizer + */ + prompt_optimizer?: boolean + /** + * Prompt + */ + prompt: string +} + +/** + * AnimateDiffT2VOutput + */ +export type SchemaFastAnimatediffTurboTextToVideoOutput = { + /** + * Seed + * + * Seed used for generating the video. + */ + seed: number + /** + * Video + * + * Generated video file. + */ + video: SchemaFile +} + +/** + * AnimateDiffT2VTurboInput + */ +export type SchemaFastAnimatediffTurboTextToVideoInput = { + /** + * Prompt + * + * The prompt to use for generating the video. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Fps + * + * Number of frames per second to extract from the video. + */ + fps?: number + /** + * Video Size + * + * The size of the video to generate. + */ + video_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Num Frames + * + * The number of frames to generate for the video. + */ + num_frames?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. 4-12 is recommended for turbo mode. + */ + num_inference_steps?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Motions + * + * The motions to apply to the video. + */ + motions?: Array< + 'zoom-out' | 'zoom-in' | 'pan-left' | 'pan-right' | 'tilt-up' | 'tilt-down' + > +} + +/** + * ImageSize + */ +export type SchemaImageSize = { + /** + * Height + * + * The height of the generated image. + */ + height?: number + /** + * Width + * + * The width of the generated image. + */ + width?: number +} + +/** + * AnimateDiffT2VOutput + */ +export type SchemaFastAnimatediffTextToVideoOutput = { + /** + * Seed + * + * Seed used for generating the video. + */ + seed: number + /** + * Video + * + * Generated video file. + */ + video: SchemaFile +} + +/** + * AnimateDiffT2VInput + */ +export type SchemaFastAnimatediffTextToVideoInput = { + /** + * Prompt + * + * The prompt to use for generating the video. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Fps + * + * Number of frames per second to extract from the video. + */ + fps?: number + /** + * Video Size + * + * The size of the video to generate. + */ + video_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Frames + * + * The number of frames to generate for the video. + */ + num_frames?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Motions + * + * The motions to apply to the video. + */ + motions?: Array< + 'zoom-out' | 'zoom-in' | 'pan-left' | 'pan-right' | 'tilt-up' | 'tilt-down' + > +} + +/** + * Output + */ +export type SchemaT2vTurboOutput = { + video: SchemaFile +} + +/** + * Input + */ +export type SchemaT2vTurboInput = { + /** + * Prompt + * + * The prompt to generate images from + */ + prompt: string + /** + * Guidance Scale + * + * The guidance scale + */ + guidance_scale?: number + /** + * Seed + * + * The seed to use for the random number generator + */ + seed?: number | unknown + /** + * Export Fps + * + * The FPS of the exported video + */ + export_fps?: number + /** + * Num Frames + * + * The number of frames to generate + */ + num_frames?: number + /** + * Num Inference Steps + * + * The number of steps to sample + */ + num_inference_steps?: number +} + +/** + * FastSVDOutput + */ +export type SchemaFastSvdLcmTextToVideoOutput = { + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + * + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * FastSVDTextInput + */ +export type SchemaFastSvdLcmTextToVideoInput = { + /** + * Prompt + * + * The prompt to use as a starting point for the generation. + */ + prompt: string + /** + * Cond Aug + * + * + * The conditoning augmentation determines the amount of noise that will be + * added to the conditioning frame. The higher the number, the more noise + * there will be, and the less the video will look like the initial image. + * Increase it for more motion. + * + */ + cond_aug?: number + /** + * Fps + * + * + * The FPS of the generated video. The higher the number, the faster the video will + * play. Total video length is 25 frames. + * + */ + fps?: number + /** + * Motion Bucket Id + * + * + * The motion bucket id determines the motion of the generated video. The + * higher the number, the more motion there will be. + * + */ + motion_bucket_id?: number + /** + * Video Size + * + * The size of the generated video. + */ + video_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Steps + * + * + * The number of steps to run the model for. The higher the number the better + * the quality and longer it will take to generate. + * + */ + steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number +} + +/** + * FastSVDOutput + */ +export type SchemaFastSvdTextToVideoOutput = { + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + * + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * FastSVDTextInput + */ +export type SchemaFastSvdTextToVideoInput = { + /** + * Prompt + * + * The prompt to use as a starting point for the generation. + */ + prompt: string + /** + * Cond Aug + * + * + * The conditoning augmentation determines the amount of noise that will be + * added to the conditioning frame. The higher the number, the more noise + * there will be, and the less the video will look like the initial image. + * Increase it for more motion. + * + */ + cond_aug?: number + /** + * Deep Cache + * + * + * Enabling [DeepCache](https://github.com/horseee/DeepCache) will make the execution + * faster, but might sometimes degrade overall quality. The higher the setting, the + * faster the execution will be, but the more quality might be lost. + * + */ + deep_cache?: 'none' | 'minimum' | 'medium' | 'high' + /** + * Fps + * + * + * The FPS of the generated video. The higher the number, the faster the video will + * play. Total video length is 25 frames. + * + */ + fps?: number + /** + * Motion Bucket Id + * + * + * The motion bucket id determines the motion of the generated video. The + * higher the number, the more motion there will be. + * + */ + motion_bucket_id?: number + /** + * Video Size + * + * The size of the generated video. + */ + video_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Steps + * + * + * The number of steps to run the model for. The higher the number the better + * the quality and longer it will take to generate. + * + */ + steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * The negative prompt to use as a starting point for the generation. + */ + negative_prompt?: string +} + +/** + * Output + */ +export type SchemaLtxVideoOutput = { + /** + * Seed + * + * The seed used for random number generation. + */ + seed: number + /** + * Video + * + * The generated video. + */ + video: SchemaFile +} + +/** + * TextToVideoInput + */ +export type SchemaLtxVideoInput = { + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Guidance Scale + * + * The guidance scale to use. + */ + guidance_scale?: number + /** + * Seed + * + * The seed to use for random number generation. + */ + seed?: number + /** + * Number of Inference Steps + * + * The number of inference steps to take. + */ + num_inference_steps?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt?: string +} + +/** + * HunyuanT2VResponse + */ +export type SchemaHunyuanVideoOutput = { + /** + * Seed + * + * The seed used for generating the video. + */ + seed: number + /** + * Video + */ + video: SchemaFile +} + +/** + * HunyuanVideoRequest + */ +export type SchemaHunyuanVideoInput = { + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Aspect Ratio (W:H) + * + * The aspect ratio of the video to generate. + */ + aspect_ratio?: '16:9' | '9:16' + /** + * Resolution + * + * The resolution of the video to generate. + */ + resolution?: '480p' | '580p' | '720p' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to run. Lower gets faster results, higher gets better results. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed to use for generating the video. + */ + seed?: number + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: '129' | '85' + /** + * Pro Mode + * + * By default, generations are done with 35 steps. Pro mode does 55 steps which results in higher quality videos but will take more time and cost 2x more billing units. + */ + pro_mode?: boolean +} + +/** + * MochiT2VOutput + */ +export type SchemaMochiV1Output = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * MochiT2VInput + */ +export type SchemaMochiV1Input = { + /** + * Prompt + * + * The prompt to generate a video from. + */ + prompt: string + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * The seed to use for generating the video. + */ + seed?: number + /** + * Negative Prompt + * + * The negative prompt for the video. + */ + negative_prompt?: string +} + +/** + * T2VOutput + */ +export type SchemaKlingVideoV15ProTextToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TextToVideoRequest + */ +export type SchemaKlingVideoV15ProTextToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video frame + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Negative Prompt + */ + negative_prompt?: string + /** + * Cfg Scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt. + * + */ + cfg_scale?: number +} + +/** + * CameraControl + */ +export type SchemaCameraControl = { + /** + * Movement Type + * + * The type of camera movement + */ + movement_type: 'horizontal' | 'vertical' | 'pan' | 'tilt' | 'roll' | 'zoom' + /** + * Movement Value + * + * The value of the camera movement + */ + movement_value: number +} + +/** + * T2VOutput + */ +export type SchemaKlingVideoV1StandardTextToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * V1TextToVideoRequest + */ +export type SchemaKlingVideoV1StandardTextToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video frame + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Advanced Camera Control + * + * Advanced Camera control parameters + */ + advanced_camera_control?: SchemaCameraControl + /** + * Cfg Scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt. + * + */ + cfg_scale?: number + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Negative Prompt + */ + negative_prompt?: string + /** + * Camera Control + * + * Camera control parameters + */ + camera_control?: + | 'down_back' + | 'forward_up' + | 'right_turn_forward' + | 'left_turn_forward' +} + +/** + * T2VLiveOutput + */ +export type SchemaMinimaxVideo01LiveOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TextToVideoLiveRequest + */ +export type SchemaMinimaxVideo01LiveInput = { + /** + * Prompt Optimizer + * + * Whether to use the model's prompt optimizer + */ + prompt_optimizer?: boolean + /** + * Prompt + */ + prompt: string +} + +/** + * T2VOutput + */ +export type SchemaKlingVideoV16StandardTextToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TextToVideoRequest + */ +export type SchemaKlingVideoV16StandardTextToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video frame + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Negative Prompt + */ + negative_prompt?: string + /** + * Cfg Scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt. + * + */ + cfg_scale?: number +} + +/** + * Output + */ +export type SchemaCogvideox5bOutput = { + /** + * Prompt + * + * The prompt used for generating the video. + */ + prompt: string + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Seed + * + * + * Seed of the generated video. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Video + * + * The URL to the generated video + */ + video: SchemaFile +} + +/** + * BaseInput + */ +export type SchemaCogvideox5bInput = { + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Use Rife + * + * Use RIFE for video interpolation + */ + use_rife?: boolean + /** + * Loras + * + * + * The LoRAs to use for the image generation. We currently support one lora. + * + */ + loras?: Array + /** + * Video Size + * + * The size of the generated video. + */ + video_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related video to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Export Fps + * + * The target FPS of the video + */ + export_fps?: number + /** + * Negative Prompt + * + * The negative prompt to generate video from + */ + negative_prompt?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number +} + +/** + * LoraWeight + */ +export type SchemaLoraWeight = { + /** + * Path + * + * URL or the path to the LoRA weights. + */ + path: string + /** + * Scale + * + * + * The scale of the LoRA weight. This is used to scale the LoRA weight + * before merging it with the base model. + * + */ + scale?: number +} + +/** + * Output + */ +export type SchemaTranspixarOutput = { + /** + * Prompt + * + * The prompt used for generating the video. + */ + prompt: string + /** + * Videos + * + * The URL to the generated video + */ + videos: Array + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Seed + * + * + * Seed of the generated video. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number +} + +/** + * BaseInput + */ +export type SchemaTranspixarInput = { + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related video to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Export Fps + * + * The target FPS of the video + */ + export_fps?: number + /** + * Negative Prompt + * + * The negative prompt to generate video from + */ + negative_prompt?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number | unknown +} + +/** + * HunyuanT2VResponse + */ +export type SchemaHunyuanVideoLoraOutput = { + /** + * Seed + * + * The seed used for generating the video. + */ + seed: number + /** + * Video + */ + video: SchemaFile +} + +/** + * HunyuanT2VRequest + */ +export type SchemaHunyuanVideoLoraInput = { + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Aspect Ratio (W:H) + * + * The aspect ratio of the video to generate. + */ + aspect_ratio?: '16:9' | '9:16' + /** + * Resolution + * + * The resolution of the video to generate. + */ + resolution?: '480p' | '580p' | '720p' + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * The seed to use for generating the video. + */ + seed?: number + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: '129' | '85' + /** + * Pro Mode + * + * By default, generations are done with 35 steps. Pro mode does 55 steps which results in higher quality videos but will take more time and cost 2x more billing units. + */ + pro_mode?: boolean +} + +/** + * Ray2T2VOutput + */ +export type SchemaLumaDreamMachineRay2Output = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * Ray2TextToVideoRequest + */ +export type SchemaLumaDreamMachineRay2Input = { + /** + * Prompt + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '9:16' | '4:3' | '3:4' | '21:9' | '9:21' + /** + * Resolution + * + * The resolution of the generated video (720p costs 2x more, 1080p costs 4x more) + */ + resolution?: '540p' | '720p' | '1080p' + /** + * Loop + * + * Whether the video should loop (end of video is blended with the beginning) + */ + loop?: boolean + /** + * Duration + * + * The duration of the generated video (9s costs 2x more) + */ + duration?: '5s' | '9s' +} + +/** + * VideoOutput + */ +export type SchemaPixverseV35TextToVideoFastOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * FastTextToVideoRequest + */ +export type SchemaPixverseV35TextToVideoFastInput = { + /** + * Prompt + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '4:3' | '1:1' | '3:4' | '9:16' + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' + /** + * Style + * + * The style of the generated video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * VideoOutput + */ +export type SchemaPixverseV35TextToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TextToVideoRequest + */ +export type SchemaPixverseV35TextToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '4:3' | '1:1' | '3:4' | '9:16' + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' | '1080p' + /** + * Style + * + * The style of the generated video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Duration + * + * The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds + */ + duration?: '5' | '8' + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * T2VDirectorOutput + */ +export type SchemaMinimaxVideo01DirectorOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TextToVideoDirectorRequest + */ +export type SchemaMinimaxVideo01DirectorInput = { + /** + * Prompt Optimizer + * + * Whether to use the model's prompt optimizer + */ + prompt_optimizer?: boolean + /** + * Prompt + * + * Text prompt for video generation. Camera movement instructions can be added using square brackets (e.g. [Pan left] or [Zoom in]). You can use up to 3 combined movements per prompt. Supported movements: Truck left/right, Pan left/right, Push in/Pull out, Pedestal up/down, Tilt up/down, Zoom in/out, Shake, Tracking shot, Static shot. For example: [Truck left, Pan right, Zoom in]. For a more detailed guide, refer https://sixth-switch-2ac.notion.site/T2V-01-Director-Model-Tutorial-with-camera-movement-1886c20a98eb80f395b8e05291ad8645 + */ + prompt: string +} + +/** + * TextToVideoOutput + */ +export type SchemaVeo2Output = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TextToVideoInput + */ +export type SchemaVeo2Input = { + /** + * Prompt + * + * The text prompt describing the video you want to generate + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5s' | '6s' | '7s' | '8s' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '9:16' + /** + * Seed + * + * A seed to use for the video generation + */ + seed?: number + /** + * Negative Prompt + * + * A negative prompt to guide the video generation + */ + negative_prompt?: string + /** + * Enhance Prompt + * + * Whether to enhance the video generation + */ + enhance_prompt?: boolean +} + +/** + * WanT2VResponse + */ +export type SchemaWanT2vOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * WanT2VRequest + */ +export type SchemaWanT2vInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Aspect Ratio + * + * Aspect ratio of the generated video (16:9 or 9:16). + */ + aspect_ratio?: '9:16' | '16:9' + /** + * Resolution + * + * Resolution of the generated video (480p, 580p, or 720p). + */ + resolution?: '480p' | '580p' | '720p' + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Turbo Mode + * + * If true, the video will be generated faster with no noticeable degradation in the visual quality. + */ + turbo_mode?: boolean + /** + * Frames Per Second + * + * Frames per second of the generated video. Must be between 5 to 24. + */ + frames_per_second?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Frames + * + * Number of frames to generate. Must be between 81 to 100 (inclusive). + */ + num_frames?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean +} + +/** + * T2VOutput + */ +export type SchemaKlingVideoV16ProTextToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TextToVideoRequest + */ +export type SchemaKlingVideoV16ProTextToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video frame + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Negative Prompt + */ + negative_prompt?: string + /** + * Cfg Scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt. + * + */ + cfg_scale?: number +} + +/** + * TextToVideoOutput + */ +export type SchemaLtxVideoV095Output = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * TextToVideoInput + */ +export type SchemaLtxVideoV095Input = { + /** + * Prompt + * + * Text prompt to guide generation + */ + prompt: string + /** + * Resolution + * + * Resolution of the generated video (480p or 720p). + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video (16:9 or 9:16). + */ + aspect_ratio?: '9:16' | '16:9' + /** + * Expand Prompt + * + * Whether to expand the prompt using the model's own capabilities. + */ + expand_prompt?: boolean + /** + * Seed + * + * Random seed for generation + */ + seed?: number + /** + * Num Inference Steps + * + * Number of inference steps + */ + num_inference_steps?: number + /** + * Negative Prompt + * + * Negative prompt for generation + */ + negative_prompt?: string +} + +/** + * VideoEffectsOutput + */ +export type SchemaKlingVideoV16StandardEffectsOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * VideoEffectsRequest + */ +export type SchemaKlingVideoV16StandardEffectsInput = { + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Input Image Urls + * + * URL of images to be used for hug, kiss or heart_gesture video. + */ + input_image_urls?: Array + /** + * Effect Scene + * + * The effect scene to use for the video generation + */ + effect_scene: + | 'hug' + | 'kiss' + | 'heart_gesture' + | 'squish' + | 'expansion' + | 'fuzzyfuzzy' + | 'bloombloom' + | 'dizzydizzy' + | 'jelly_press' + | 'jelly_slice' + | 'jelly_squish' + | 'jelly_jiggle' + | 'pixelpixel' + | 'yearbook' + | 'instant_film' + | 'anime_figure' + | 'rocketrocket' + | 'fly_fly' + | 'disappear' + | 'lightning_power' + | 'bullet_time' + | 'bullet_time_360' + | 'media_interview' + | 'day_to_night' + | "let's_ride" + | 'jumpdrop' + | 'swish_swish' + | 'running_man' + | 'jazz_jazz' + | 'swing_swing' + | 'skateskate' + | 'building_sweater' + | 'pure_white_wings' + | 'black_wings' + | 'golden_wing' + | 'pink_pink_wings' + | 'rampage_ape' + | 'a_list_look' + | 'countdown_teleport' + | 'firework_2026' + | 'instant_christmas' + | 'birthday_star' + | 'firework' + | 'celebration' + | 'tiger_hug_pro' + | 'pet_lion_pro' + | 'guardian_spirit' + | 'squeeze_scream' + | 'inner_voice' + | 'memory_alive' + | 'guess_what' + | 'eagle_snatch' + | 'hug_from_past' + | 'instant_kid' + | 'dollar_rain' + | 'cry_cry' + | 'building_collapse' + | 'mushroom' + | 'jesus_hug' + | 'shark_alert' + | 'lie_flat' + | 'polar_bear_hug' + | 'brown_bear_hug' + | 'office_escape_plow' + | 'watermelon_bomb' + | 'boss_coming' + | 'wig_out' + | 'car_explosion' + | 'tiger_hug' + | 'siblings' + | 'construction_worker' + | 'snatched' + | 'felt_felt' + | 'plushcut' + | 'drunk_dance' + | 'drunk_dance_pet' + | 'daoma_dance' + | 'bouncy_dance' + | 'smooth_sailing_dance' + | 'new_year_greeting' + | 'lion_dance' + | 'prosperity' + | 'great_success' + | 'golden_horse_fortune' + | 'red_packet_box' + | 'lucky_horse_year' + | 'lucky_red_packet' + | 'lucky_money_come' + | 'lion_dance_pet' + | 'dumpling_making_pet' + | 'fish_making_pet' + | 'pet_red_packet' + | 'lantern_glow' + | 'expression_challenge' + | 'overdrive' + | 'heart_gesture_dance' + | 'poping' + | 'martial_arts' + | 'running' + | 'nezha' + | 'motorcycle_dance' + | 'subject_3_dance' + | 'ghost_step_dance' + | 'phantom_jewel' + | 'zoom_out' + | 'cheers_2026' + | 'kiss_pro' + | 'fight_pro' + | 'hug_pro' + | 'heart_gesture_pro' + | 'dollar_rain_pro' + | 'pet_bee_pro' + | 'santa_random_surprise' + | 'magic_match_tree' + | 'happy_birthday' + | 'thumbs_up_pro' + | 'surprise_bouquet' + | 'bouquet_drop' + | '3d_cartoon_1_pro' + | 'glamour_photo_shoot' + | 'box_of_joy' + | 'first_toast_of_the_year' + | 'my_santa_pic' + | 'santa_gift' + | 'steampunk_christmas' + | 'snowglobe' + | 'christmas_photo_shoot' + | 'ornament_crash' + | 'santa_express' + | 'particle_santa_surround' + | 'coronation_of_frost' + | 'spark_in_the_snow' + | 'scarlet_and_snow' + | 'cozy_toon_wrap' + | 'bullet_time_lite' + | 'magic_cloak' + | 'balloon_parade' + | 'jumping_ginger_joy' + | 'c4d_cartoon_pro' + | 'venomous_spider' + | 'throne_of_king' + | 'luminous_elf' + | 'woodland_elf' + | 'japanese_anime_1' + | 'american_comics' + | 'snowboarding' + | 'witch_transform' + | 'vampire_transform' + | 'pumpkin_head_transform' + | 'demon_transform' + | 'mummy_transform' + | 'zombie_transform' + | 'cute_pumpkin_transform' + | 'cute_ghost_transform' + | 'knock_knock_halloween' + | 'halloween_escape' + | 'baseball' + | 'trampoline' + | 'trampoline_night' + | 'pucker_up' + | 'feed_mooncake' + | 'flyer' + | 'dishwasher' + | 'pet_chinese_opera' + | 'magic_fireball' + | 'gallery_ring' + | 'pet_moto_rider' + | 'muscle_pet' + | 'pet_delivery' + | 'mythic_style' + | 'steampunk' + | '3d_cartoon_2' + | 'pet_chef' + | 'santa_gifts' + | 'santa_hug' + | 'girlfriend' + | 'boyfriend' + | 'heart_gesture_1' + | 'pet_wizard' + | 'smoke_smoke' + | 'gun_shot' + | 'double_gun' + | 'pet_warrior' + | 'long_hair' + | 'pet_dance' + | 'wool_curly' + | 'pet_bee' + | 'marry_me' + | 'piggy_morph' + | 'ski_ski' + | 'magic_broom' + | 'splashsplash' + | 'surfsurf' + | 'fairy_wing' + | 'angel_wing' + | 'dark_wing' + | 'emoji' +} + +/** + * VideoEffectsOutput + */ +export type SchemaKlingVideoV1StandardEffectsOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * VideoEffectsRequest + */ +export type SchemaKlingVideoV1StandardEffectsInput = { + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Input Image Urls + * + * URL of images to be used for hug, kiss or heart_gesture video. + */ + input_image_urls?: Array + /** + * Effect Scene + * + * The effect scene to use for the video generation + */ + effect_scene: + | 'hug' + | 'kiss' + | 'heart_gesture' + | 'squish' + | 'expansion' + | 'fuzzyfuzzy' + | 'bloombloom' + | 'dizzydizzy' + | 'jelly_press' + | 'jelly_slice' + | 'jelly_squish' + | 'jelly_jiggle' + | 'pixelpixel' + | 'yearbook' + | 'instant_film' + | 'anime_figure' + | 'rocketrocket' + | 'fly_fly' + | 'disappear' + | 'lightning_power' + | 'bullet_time' + | 'bullet_time_360' + | 'media_interview' + | 'day_to_night' + | "let's_ride" + | 'jumpdrop' + | 'swish_swish' + | 'running_man' + | 'jazz_jazz' + | 'swing_swing' + | 'skateskate' + | 'building_sweater' + | 'pure_white_wings' + | 'black_wings' + | 'golden_wing' + | 'pink_pink_wings' + | 'rampage_ape' + | 'a_list_look' + | 'countdown_teleport' + | 'firework_2026' + | 'instant_christmas' + | 'birthday_star' + | 'firework' + | 'celebration' + | 'tiger_hug_pro' + | 'pet_lion_pro' + | 'guardian_spirit' + | 'squeeze_scream' + | 'inner_voice' + | 'memory_alive' + | 'guess_what' + | 'eagle_snatch' + | 'hug_from_past' + | 'instant_kid' + | 'dollar_rain' + | 'cry_cry' + | 'building_collapse' + | 'mushroom' + | 'jesus_hug' + | 'shark_alert' + | 'lie_flat' + | 'polar_bear_hug' + | 'brown_bear_hug' + | 'office_escape_plow' + | 'watermelon_bomb' + | 'boss_coming' + | 'wig_out' + | 'car_explosion' + | 'tiger_hug' + | 'siblings' + | 'construction_worker' + | 'snatched' + | 'felt_felt' + | 'plushcut' + | 'drunk_dance' + | 'drunk_dance_pet' + | 'daoma_dance' + | 'bouncy_dance' + | 'smooth_sailing_dance' + | 'new_year_greeting' + | 'lion_dance' + | 'prosperity' + | 'great_success' + | 'golden_horse_fortune' + | 'red_packet_box' + | 'lucky_horse_year' + | 'lucky_red_packet' + | 'lucky_money_come' + | 'lion_dance_pet' + | 'dumpling_making_pet' + | 'fish_making_pet' + | 'pet_red_packet' + | 'lantern_glow' + | 'expression_challenge' + | 'overdrive' + | 'heart_gesture_dance' + | 'poping' + | 'martial_arts' + | 'running' + | 'nezha' + | 'motorcycle_dance' + | 'subject_3_dance' + | 'ghost_step_dance' + | 'phantom_jewel' + | 'zoom_out' + | 'cheers_2026' + | 'kiss_pro' + | 'fight_pro' + | 'hug_pro' + | 'heart_gesture_pro' + | 'dollar_rain_pro' + | 'pet_bee_pro' + | 'santa_random_surprise' + | 'magic_match_tree' + | 'happy_birthday' + | 'thumbs_up_pro' + | 'surprise_bouquet' + | 'bouquet_drop' + | '3d_cartoon_1_pro' + | 'glamour_photo_shoot' + | 'box_of_joy' + | 'first_toast_of_the_year' + | 'my_santa_pic' + | 'santa_gift' + | 'steampunk_christmas' + | 'snowglobe' + | 'christmas_photo_shoot' + | 'ornament_crash' + | 'santa_express' + | 'particle_santa_surround' + | 'coronation_of_frost' + | 'spark_in_the_snow' + | 'scarlet_and_snow' + | 'cozy_toon_wrap' + | 'bullet_time_lite' + | 'magic_cloak' + | 'balloon_parade' + | 'jumping_ginger_joy' + | 'c4d_cartoon_pro' + | 'venomous_spider' + | 'throne_of_king' + | 'luminous_elf' + | 'woodland_elf' + | 'japanese_anime_1' + | 'american_comics' + | 'snowboarding' + | 'witch_transform' + | 'vampire_transform' + | 'pumpkin_head_transform' + | 'demon_transform' + | 'mummy_transform' + | 'zombie_transform' + | 'cute_pumpkin_transform' + | 'cute_ghost_transform' + | 'knock_knock_halloween' + | 'halloween_escape' + | 'baseball' + | 'trampoline' + | 'trampoline_night' + | 'pucker_up' + | 'feed_mooncake' + | 'flyer' + | 'dishwasher' + | 'pet_chinese_opera' + | 'magic_fireball' + | 'gallery_ring' + | 'pet_moto_rider' + | 'muscle_pet' + | 'pet_delivery' + | 'mythic_style' + | 'steampunk' + | '3d_cartoon_2' + | 'pet_chef' + | 'santa_gifts' + | 'santa_hug' + | 'girlfriend' + | 'boyfriend' + | 'heart_gesture_1' + | 'pet_wizard' + | 'smoke_smoke' + | 'gun_shot' + | 'double_gun' + | 'pet_warrior' + | 'long_hair' + | 'pet_dance' + | 'wool_curly' + | 'pet_bee' + | 'marry_me' + | 'piggy_morph' + | 'ski_ski' + | 'magic_broom' + | 'splashsplash' + | 'surfsurf' + | 'fairy_wing' + | 'angel_wing' + | 'dark_wing' + | 'emoji' +} + +/** + * VideoEffectsOutput + */ +export type SchemaKlingVideoV16ProEffectsOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * VideoEffectsRequest + */ +export type SchemaKlingVideoV16ProEffectsInput = { + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Input Image Urls + * + * URL of images to be used for hug, kiss or heart_gesture video. + */ + input_image_urls?: Array + /** + * Effect Scene + * + * The effect scene to use for the video generation + */ + effect_scene: + | 'hug' + | 'kiss' + | 'heart_gesture' + | 'squish' + | 'expansion' + | 'fuzzyfuzzy' + | 'bloombloom' + | 'dizzydizzy' + | 'jelly_press' + | 'jelly_slice' + | 'jelly_squish' + | 'jelly_jiggle' + | 'pixelpixel' + | 'yearbook' + | 'instant_film' + | 'anime_figure' + | 'rocketrocket' + | 'fly_fly' + | 'disappear' + | 'lightning_power' + | 'bullet_time' + | 'bullet_time_360' + | 'media_interview' + | 'day_to_night' + | "let's_ride" + | 'jumpdrop' + | 'swish_swish' + | 'running_man' + | 'jazz_jazz' + | 'swing_swing' + | 'skateskate' + | 'building_sweater' + | 'pure_white_wings' + | 'black_wings' + | 'golden_wing' + | 'pink_pink_wings' + | 'rampage_ape' + | 'a_list_look' + | 'countdown_teleport' + | 'firework_2026' + | 'instant_christmas' + | 'birthday_star' + | 'firework' + | 'celebration' + | 'tiger_hug_pro' + | 'pet_lion_pro' + | 'guardian_spirit' + | 'squeeze_scream' + | 'inner_voice' + | 'memory_alive' + | 'guess_what' + | 'eagle_snatch' + | 'hug_from_past' + | 'instant_kid' + | 'dollar_rain' + | 'cry_cry' + | 'building_collapse' + | 'mushroom' + | 'jesus_hug' + | 'shark_alert' + | 'lie_flat' + | 'polar_bear_hug' + | 'brown_bear_hug' + | 'office_escape_plow' + | 'watermelon_bomb' + | 'boss_coming' + | 'wig_out' + | 'car_explosion' + | 'tiger_hug' + | 'siblings' + | 'construction_worker' + | 'snatched' + | 'felt_felt' + | 'plushcut' + | 'drunk_dance' + | 'drunk_dance_pet' + | 'daoma_dance' + | 'bouncy_dance' + | 'smooth_sailing_dance' + | 'new_year_greeting' + | 'lion_dance' + | 'prosperity' + | 'great_success' + | 'golden_horse_fortune' + | 'red_packet_box' + | 'lucky_horse_year' + | 'lucky_red_packet' + | 'lucky_money_come' + | 'lion_dance_pet' + | 'dumpling_making_pet' + | 'fish_making_pet' + | 'pet_red_packet' + | 'lantern_glow' + | 'expression_challenge' + | 'overdrive' + | 'heart_gesture_dance' + | 'poping' + | 'martial_arts' + | 'running' + | 'nezha' + | 'motorcycle_dance' + | 'subject_3_dance' + | 'ghost_step_dance' + | 'phantom_jewel' + | 'zoom_out' + | 'cheers_2026' + | 'kiss_pro' + | 'fight_pro' + | 'hug_pro' + | 'heart_gesture_pro' + | 'dollar_rain_pro' + | 'pet_bee_pro' + | 'santa_random_surprise' + | 'magic_match_tree' + | 'happy_birthday' + | 'thumbs_up_pro' + | 'surprise_bouquet' + | 'bouquet_drop' + | '3d_cartoon_1_pro' + | 'glamour_photo_shoot' + | 'box_of_joy' + | 'first_toast_of_the_year' + | 'my_santa_pic' + | 'santa_gift' + | 'steampunk_christmas' + | 'snowglobe' + | 'christmas_photo_shoot' + | 'ornament_crash' + | 'santa_express' + | 'particle_santa_surround' + | 'coronation_of_frost' + | 'spark_in_the_snow' + | 'scarlet_and_snow' + | 'cozy_toon_wrap' + | 'bullet_time_lite' + | 'magic_cloak' + | 'balloon_parade' + | 'jumping_ginger_joy' + | 'c4d_cartoon_pro' + | 'venomous_spider' + | 'throne_of_king' + | 'luminous_elf' + | 'woodland_elf' + | 'japanese_anime_1' + | 'american_comics' + | 'snowboarding' + | 'witch_transform' + | 'vampire_transform' + | 'pumpkin_head_transform' + | 'demon_transform' + | 'mummy_transform' + | 'zombie_transform' + | 'cute_pumpkin_transform' + | 'cute_ghost_transform' + | 'knock_knock_halloween' + | 'halloween_escape' + | 'baseball' + | 'trampoline' + | 'trampoline_night' + | 'pucker_up' + | 'feed_mooncake' + | 'flyer' + | 'dishwasher' + | 'pet_chinese_opera' + | 'magic_fireball' + | 'gallery_ring' + | 'pet_moto_rider' + | 'muscle_pet' + | 'pet_delivery' + | 'mythic_style' + | 'steampunk' + | '3d_cartoon_2' + | 'pet_chef' + | 'santa_gifts' + | 'santa_hug' + | 'girlfriend' + | 'boyfriend' + | 'heart_gesture_1' + | 'pet_wizard' + | 'smoke_smoke' + | 'gun_shot' + | 'double_gun' + | 'pet_warrior' + | 'long_hair' + | 'pet_dance' + | 'wool_curly' + | 'pet_bee' + | 'marry_me' + | 'piggy_morph' + | 'ski_ski' + | 'magic_broom' + | 'splashsplash' + | 'surfsurf' + | 'fairy_wing' + | 'angel_wing' + | 'dark_wing' + | 'emoji' +} + +/** + * VideoEffectsOutput + */ +export type SchemaKlingVideoV15ProEffectsOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * VideoEffectsRequest + */ +export type SchemaKlingVideoV15ProEffectsInput = { + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Input Image Urls + * + * URL of images to be used for hug, kiss or heart_gesture video. + */ + input_image_urls?: Array + /** + * Effect Scene + * + * The effect scene to use for the video generation + */ + effect_scene: + | 'hug' + | 'kiss' + | 'heart_gesture' + | 'squish' + | 'expansion' + | 'fuzzyfuzzy' + | 'bloombloom' + | 'dizzydizzy' + | 'jelly_press' + | 'jelly_slice' + | 'jelly_squish' + | 'jelly_jiggle' + | 'pixelpixel' + | 'yearbook' + | 'instant_film' + | 'anime_figure' + | 'rocketrocket' + | 'fly_fly' + | 'disappear' + | 'lightning_power' + | 'bullet_time' + | 'bullet_time_360' + | 'media_interview' + | 'day_to_night' + | "let's_ride" + | 'jumpdrop' + | 'swish_swish' + | 'running_man' + | 'jazz_jazz' + | 'swing_swing' + | 'skateskate' + | 'building_sweater' + | 'pure_white_wings' + | 'black_wings' + | 'golden_wing' + | 'pink_pink_wings' + | 'rampage_ape' + | 'a_list_look' + | 'countdown_teleport' + | 'firework_2026' + | 'instant_christmas' + | 'birthday_star' + | 'firework' + | 'celebration' + | 'tiger_hug_pro' + | 'pet_lion_pro' + | 'guardian_spirit' + | 'squeeze_scream' + | 'inner_voice' + | 'memory_alive' + | 'guess_what' + | 'eagle_snatch' + | 'hug_from_past' + | 'instant_kid' + | 'dollar_rain' + | 'cry_cry' + | 'building_collapse' + | 'mushroom' + | 'jesus_hug' + | 'shark_alert' + | 'lie_flat' + | 'polar_bear_hug' + | 'brown_bear_hug' + | 'office_escape_plow' + | 'watermelon_bomb' + | 'boss_coming' + | 'wig_out' + | 'car_explosion' + | 'tiger_hug' + | 'siblings' + | 'construction_worker' + | 'snatched' + | 'felt_felt' + | 'plushcut' + | 'drunk_dance' + | 'drunk_dance_pet' + | 'daoma_dance' + | 'bouncy_dance' + | 'smooth_sailing_dance' + | 'new_year_greeting' + | 'lion_dance' + | 'prosperity' + | 'great_success' + | 'golden_horse_fortune' + | 'red_packet_box' + | 'lucky_horse_year' + | 'lucky_red_packet' + | 'lucky_money_come' + | 'lion_dance_pet' + | 'dumpling_making_pet' + | 'fish_making_pet' + | 'pet_red_packet' + | 'lantern_glow' + | 'expression_challenge' + | 'overdrive' + | 'heart_gesture_dance' + | 'poping' + | 'martial_arts' + | 'running' + | 'nezha' + | 'motorcycle_dance' + | 'subject_3_dance' + | 'ghost_step_dance' + | 'phantom_jewel' + | 'zoom_out' + | 'cheers_2026' + | 'kiss_pro' + | 'fight_pro' + | 'hug_pro' + | 'heart_gesture_pro' + | 'dollar_rain_pro' + | 'pet_bee_pro' + | 'santa_random_surprise' + | 'magic_match_tree' + | 'happy_birthday' + | 'thumbs_up_pro' + | 'surprise_bouquet' + | 'bouquet_drop' + | '3d_cartoon_1_pro' + | 'glamour_photo_shoot' + | 'box_of_joy' + | 'first_toast_of_the_year' + | 'my_santa_pic' + | 'santa_gift' + | 'steampunk_christmas' + | 'snowglobe' + | 'christmas_photo_shoot' + | 'ornament_crash' + | 'santa_express' + | 'particle_santa_surround' + | 'coronation_of_frost' + | 'spark_in_the_snow' + | 'scarlet_and_snow' + | 'cozy_toon_wrap' + | 'bullet_time_lite' + | 'magic_cloak' + | 'balloon_parade' + | 'jumping_ginger_joy' + | 'c4d_cartoon_pro' + | 'venomous_spider' + | 'throne_of_king' + | 'luminous_elf' + | 'woodland_elf' + | 'japanese_anime_1' + | 'american_comics' + | 'snowboarding' + | 'witch_transform' + | 'vampire_transform' + | 'pumpkin_head_transform' + | 'demon_transform' + | 'mummy_transform' + | 'zombie_transform' + | 'cute_pumpkin_transform' + | 'cute_ghost_transform' + | 'knock_knock_halloween' + | 'halloween_escape' + | 'baseball' + | 'trampoline' + | 'trampoline_night' + | 'pucker_up' + | 'feed_mooncake' + | 'flyer' + | 'dishwasher' + | 'pet_chinese_opera' + | 'magic_fireball' + | 'gallery_ring' + | 'pet_moto_rider' + | 'muscle_pet' + | 'pet_delivery' + | 'mythic_style' + | 'steampunk' + | '3d_cartoon_2' + | 'pet_chef' + | 'santa_gifts' + | 'santa_hug' + | 'girlfriend' + | 'boyfriend' + | 'heart_gesture_1' + | 'pet_wizard' + | 'smoke_smoke' + | 'gun_shot' + | 'double_gun' + | 'pet_warrior' + | 'long_hair' + | 'pet_dance' + | 'wool_curly' + | 'pet_bee' + | 'marry_me' + | 'piggy_morph' + | 'ski_ski' + | 'magic_broom' + | 'splashsplash' + | 'surfsurf' + | 'fairy_wing' + | 'angel_wing' + | 'dark_wing' + | 'emoji' +} + +/** + * WanProT2VResponse + */ +export type SchemaWanProTextToVideoOutput = { + video: SchemaFile +} + +/** + * WanProT2VRequest + */ +export type SchemaWanProTextToVideoInput = { + /** + * Prompt + * + * The prompt to generate the video + */ + prompt: string + /** + * Enable Safety Checker + * + * Whether to enable the safety checker + */ + enable_safety_checker?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number | unknown +} + +/** + * Pika22TextToVideoOutput + * + * Output model for Pika 2.2 text-to-video generation + */ +export type SchemaPikaV22TextToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * Pika22TextToVideoRequest + * + * Request model for Pika 2.2 text-to-video generation + */ +export type SchemaPikaV22TextToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '1080p' | '720p' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' | '4:5' | '5:4' | '3:2' | '2:3' + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: 5 | 10 + /** + * Seed + * + * The seed for the random number generator + */ + seed?: number + /** + * Negative Prompt + * + * A negative prompt to guide the model + */ + negative_prompt?: string +} + +/** + * TextToVideoV21Output + * + * Output from text-to-video generation + */ +export type SchemaPikaV21TextToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TextToVideov21Input + * + * Base request for text-to-video generation + */ +export type SchemaPikaV21TextToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '720p' | '1080p' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' | '4:5' | '5:4' | '3:2' | '2:3' + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: number + /** + * Seed + * + * The seed for the random number generator + */ + seed?: number + /** + * Negative Prompt + * + * A negative prompt to guide the model + */ + negative_prompt?: string +} + +/** + * TurboTextToVideoOutput + * + * Output from text-to-video generation + */ +export type SchemaPikaV2TurboTextToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TextToVideoTurboInput + * + * Base request for text-to-video generation + */ +export type SchemaPikaV2TurboTextToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '720p' | '1080p' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' | '4:5' | '5:4' | '3:2' | '2:3' + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: number + /** + * Seed + * + * The seed for the random number generator + */ + seed?: number + /** + * Negative Prompt + * + * A negative prompt to guide the model + */ + negative_prompt?: string +} + +/** + * Ray2T2VOutput + */ +export type SchemaLumaDreamMachineRay2FlashOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * Ray2TextToVideoRequest + */ +export type SchemaLumaDreamMachineRay2FlashInput = { + /** + * Prompt + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '9:16' | '4:3' | '3:4' | '21:9' | '9:21' + /** + * Resolution + * + * The resolution of the generated video (720p costs 2x more, 1080p costs 4x more) + */ + resolution?: '540p' | '720p' | '1080p' + /** + * Loop + * + * Whether the video should loop (end of video is blended with the beginning) + */ + loop?: boolean + /** + * Duration + * + * The duration of the generated video (9s costs 2x more) + */ + duration?: '5s' | '9s' +} + +/** + * WanT2VResponse + */ +export type SchemaWanT2vLoraOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * WanLoRARequest + */ +export type SchemaWanT2vLoraInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Resolution + * + * Resolution of the generated video (480p,580p, or 720p). + */ + resolution?: '480p' | '580p' | '720p' + /** + * Reverse Video + * + * If true, the video will be reversed. + */ + reverse_video?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Aspect Ratio + * + * Aspect ratio of the generated video (16:9 or 9:16). + */ + aspect_ratio?: '9:16' | '16:9' + /** + * Loras + * + * LoRA weights to be used in the inference. + */ + loras?: Array + /** + * Frames Per Second + * + * Frames per second of the generated video. Must be between 5 to 24. + */ + frames_per_second?: number + /** + * Turbo Mode + * + * If true, the video will be generated faster with no noticeable degradation in the visual quality. + */ + turbo_mode?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Num Frames + * + * Number of frames to generate. Must be between 81 to 100 (inclusive). + */ + num_frames?: number + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean +} + +/** + * LipsyncOutput + */ +export type SchemaKlingVideoLipsyncTextToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * LipsyncT2VRequest + */ +export type SchemaKlingVideoLipsyncTextToVideoInput = { + /** + * Text + * + * Text content for lip-sync video generation. Max 120 characters. + */ + text: string + /** + * Video Url + * + * The URL of the video to generate the lip sync for. Supports .mp4/.mov, ≤100MB, 2-60s, 720p/1080p only, width/height 720–1920px. If validation fails, an error is returned. + */ + video_url: string + /** + * Voice Id + * + * Voice ID to use for speech synthesis + */ + voice_id: + | 'genshin_vindi2' + | 'zhinen_xuesheng' + | 'AOT' + | 'ai_shatang' + | 'genshin_klee2' + | 'genshin_kirara' + | 'ai_kaiya' + | 'oversea_male1' + | 'ai_chenjiahao_712' + | 'girlfriend_4_speech02' + | 'chat1_female_new-3' + | 'chat_0407_5-1' + | 'cartoon-boy-07' + | 'uk_boy1' + | 'cartoon-girl-01' + | 'PeppaPig_platform' + | 'ai_huangzhong_712' + | 'ai_huangyaoshi_712' + | 'ai_laoguowang_712' + | 'chengshu_jiejie' + | 'you_pingjing' + | 'calm_story1' + | 'uk_man2' + | 'laopopo_speech02' + | 'heainainai_speech02' + | 'reader_en_m-v1' + | 'commercial_lady_en_f-v1' + | 'tiyuxi_xuedi' + | 'tiexin_nanyou' + | 'girlfriend_1_speech02' + | 'girlfriend_2_speech02' + | 'zhuxi_speech02' + | 'uk_oldman3' + | 'dongbeilaotie_speech02' + | 'chongqingxiaohuo_speech02' + | 'chuanmeizi_speech02' + | 'chaoshandashu_speech02' + | 'ai_taiwan_man2_speech02' + | 'xianzhanggui_speech02' + | 'tianjinjiejie_speech02' + | 'diyinnansang_DB_CN_M_04-v2' + | 'yizhipiannan-v1' + | 'guanxiaofang-v2' + | 'tianmeixuemei-v1' + | 'daopianyansang-v1' + | 'mengwa-v1' + /** + * Voice Language + * + * The voice language corresponding to the Voice ID + */ + voice_language?: 'zh' | 'en' + /** + * Voice Speed + * + * Speech rate for Text to Video generation + */ + voice_speed?: number +} + +/** + * LipsyncA2VOutput + */ +export type SchemaKlingVideoLipsyncAudioToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * LipsyncA2VRequest + */ +export type SchemaKlingVideoLipsyncAudioToVideoInput = { + /** + * Video Url + * + * The URL of the video to generate the lip sync for. Supports .mp4/.mov, ≤100MB, 2–10s, 720p/1080p only, width/height 720–1920px. + */ + video_url: string + /** + * Audio Url + * + * The URL of the audio to generate the lip sync for. Minimum duration is 2s and maximum duration is 60s. Maximum file size is 5MB. + */ + audio_url: string +} + +/** + * VideoOutputV4 + */ +export type SchemaPixverseV4TextToVideoFastOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * FastTextToVideoRequest + */ +export type SchemaPixverseV4TextToVideoFastInput = { + /** + * Prompt + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '4:3' | '1:1' | '3:4' | '9:16' + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' + /** + * Style + * + * The style of the generated video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * VideoOutputV4 + */ +export type SchemaPixverseV4TextToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TextToVideoRequest + */ +export type SchemaPixverseV4TextToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '4:3' | '1:1' | '3:4' | '9:16' + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' | '1080p' + /** + * Style + * + * The style of the generated video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Duration + * + * The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds + */ + duration?: '5' | '8' + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * MagiResponse + */ +export type SchemaMagiDistilledOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * MagiTextToVideoRequest + */ +export type SchemaMagiDistilledInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Resolution + * + * Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit. + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' | '1:1' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: 4 | 8 | 16 | 32 + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Num Frames + * + * Number of frames to generate. Must be between 96 and 192 (inclusive). Each additional 24 frames beyond 96 incurs an additional billing unit. + */ + num_frames?: number +} + +/** + * MagiResponse + */ +export type SchemaMagiOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * MagiTextToVideoRequest + */ +export type SchemaMagiInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Resolution + * + * Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit. + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' | '1:1' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: 4 | 8 | 16 | 32 | 64 + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Num Frames + * + * Number of frames to generate. Must be between 96 and 192 (inclusive). Each additional 24 frames beyond 96 incurs an additional billing unit. + */ + num_frames?: number +} + +/** + * Q1TextToVideoOutput + */ +export type SchemaViduQ1TextToVideoOutput = { + /** + * Video + * + * The generated video using the Q1 model + */ + video: SchemaFile +} + +/** + * Q1TextToVideoRequest + */ +export type SchemaViduQ1TextToVideoInput = { + /** + * Prompt + * + * Text prompt for video generation, max 1500 characters + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the output video + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Style + * + * The style of output video + */ + style?: 'general' | 'anime' + /** + * Seed + * + * Seed for the random number generator + */ + seed?: number + /** + * Movement Amplitude + * + * The movement amplitude of objects in the frame + */ + movement_amplitude?: 'auto' | 'small' | 'medium' | 'large' +} + +/** + * VideoOutputV4 + */ +export type SchemaPixverseV45TextToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TextToVideoRequest + */ +export type SchemaPixverseV45TextToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '4:3' | '1:1' | '3:4' | '9:16' + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' | '1080p' + /** + * Style + * + * The style of the generated video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Duration + * + * The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds + */ + duration?: '5' | '8' + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * VideoOutputV4 + */ +export type SchemaPixverseV45TextToVideoFastOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * FastTextToVideoRequest + */ +export type SchemaPixverseV45TextToVideoFastInput = { + /** + * Prompt + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '4:3' | '1:1' | '3:4' | '9:16' + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' + /** + * Style + * + * The style of the generated video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * TextToVideoOutput + */ +export type SchemaLtxVideo13bDistilledOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * DistilledTextToVideoInput + * + * Distilled model input + */ +export type SchemaLtxVideo13bDistilledInput = { + /** + * Second Pass Skip Initial Steps + * + * The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes. + */ + second_pass_skip_initial_steps?: number + /** + * First Pass Num Inference Steps + * + * Number of inference steps during the first pass. + */ + first_pass_num_inference_steps?: number + /** + * Frame Rate + * + * The frame rate of the video. + */ + frame_rate?: number + /** + * Reverse Video + * + * Whether to reverse the video. + */ + reverse_video?: boolean + /** + * Prompt + * + * Text prompt to guide generation + */ + prompt: string + /** + * Expand Prompt + * + * Whether to expand the prompt using a language model. + */ + expand_prompt?: boolean + /** + * Loras + * + * LoRA weights to use for generation + */ + loras?: Array + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Num Frames + * + * The number of frames in the video. + */ + num_frames?: number + /** + * Second Pass Num Inference Steps + * + * Number of inference steps during the second pass. + */ + second_pass_num_inference_steps?: number + /** + * Negative Prompt + * + * Negative prompt for generation + */ + negative_prompt?: string + /** + * Resolution + * + * Resolution of the generated video (480p or 720p). + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video (16:9, 1:1 or 9:16). + */ + aspect_ratio?: '9:16' | '1:1' | '16:9' + /** + * First Pass Skip Final Steps + * + * Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details. + */ + first_pass_skip_final_steps?: number + /** + * Seed + * + * Random seed for generation + */ + seed?: number +} + +/** + * LoRAWeight + */ +export type SchemaLoRaWeight = { + /** + * Path + * + * URL or path to the LoRA weights. + */ + path: string + /** + * Scale + * + * Scale of the LoRA weight. This is a multiplier applied to the LoRA weight when loading it. + */ + scale?: number + /** + * Weight Name + * + * Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights. + */ + weight_name?: string +} + +/** + * TextToVideoOutput + */ +export type SchemaLtxVideo13bDevOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * TextToVideoInput + */ +export type SchemaLtxVideo13bDevInput = { + /** + * Second Pass Skip Initial Steps + * + * The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes. + */ + second_pass_skip_initial_steps?: number + /** + * First Pass Num Inference Steps + * + * Number of inference steps during the first pass. + */ + first_pass_num_inference_steps?: number + /** + * Frame Rate + * + * The frame rate of the video. + */ + frame_rate?: number + /** + * Prompt + * + * Text prompt to guide generation + */ + prompt: string + /** + * Reverse Video + * + * Whether to reverse the video. + */ + reverse_video?: boolean + /** + * Expand Prompt + * + * Whether to expand the prompt using a language model. + */ + expand_prompt?: boolean + /** + * Loras + * + * LoRA weights to use for generation + */ + loras?: Array + /** + * Second Pass Num Inference Steps + * + * Number of inference steps during the second pass. + */ + second_pass_num_inference_steps?: number + /** + * Num Frames + * + * The number of frames in the video. + */ + num_frames?: number + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * Negative prompt for generation + */ + negative_prompt?: string + /** + * Resolution + * + * Resolution of the generated video (480p or 720p). + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video (16:9, 1:1 or 9:16). + */ + aspect_ratio?: '9:16' | '1:1' | '16:9' + /** + * First Pass Skip Final Steps + * + * Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details. + */ + first_pass_skip_final_steps?: number + /** + * Seed + * + * Random seed for generation + */ + seed?: number +} + +/** + * TextToVideoV21MasterOutput + */ +export type SchemaKlingVideoV21MasterTextToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TextToVideoV21MasterRequest + */ +export type SchemaKlingVideoV21MasterTextToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video frame + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Negative Prompt + */ + negative_prompt?: string + /** + * Cfg Scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt. + * + */ + cfg_scale?: number +} + +/** + * SeedanceVideoOutput + */ +export type SchemaBytedanceSeedanceV1LiteTextToVideoOutput = { + /** + * Seed + * + * Seed used for generation + */ + seed: number + /** + * Video + * + * Generated video file + */ + video: SchemaFile +} + +/** + * SeedanceTextToVideoInput + */ +export type SchemaBytedanceSeedanceV1LiteTextToVideoInput = { + /** + * Prompt + * + * The text prompt used to generate the video + */ + prompt: string + /** + * Resolution + * + * Video resolution - 480p for faster generation, 720p for higher quality + */ + resolution?: '480p' | '720p' | '1080p' + /** + * Duration + * + * Duration of the video in seconds + */ + duration?: '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | '10' | '11' | '12' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '21:9' | '16:9' | '4:3' | '1:1' | '3:4' | '9:16' | '9:21' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * Random seed to control video generation. Use -1 for random. + */ + seed?: number + /** + * Camera Fixed + * + * Whether to fix the camera position + */ + camera_fixed?: boolean +} + +/** + * SeedanceProT2VVideoOutput + */ +export type SchemaBytedanceSeedanceV1ProTextToVideoOutput = { + /** + * Seed + * + * Seed used for generation + */ + seed: number + /** + * Video + * + * Generated video file + */ + video: SchemaFile +} + +/** + * SeedanceProTextToVideoInput + */ +export type SchemaBytedanceSeedanceV1ProTextToVideoInput = { + /** + * Prompt + * + * The text prompt used to generate the video + */ + prompt: string + /** + * Resolution + * + * Video resolution - 480p for faster generation, 720p for balance, 1080p for higher quality + */ + resolution?: '480p' | '720p' | '1080p' + /** + * Duration + * + * Duration of the video in seconds + */ + duration?: '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | '10' | '11' | '12' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '21:9' | '16:9' | '4:3' | '1:1' | '3:4' | '9:16' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * Random seed to control video generation. Use -1 for random. + */ + seed?: number + /** + * Camera Fixed + * + * Whether to fix the camera position + */ + camera_fixed?: boolean +} + +/** + * TextToVideoHailuo02Output + */ +export type SchemaMinimaxHailuo02ProTextToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ProTextToVideoHailuo02Input + */ +export type SchemaMinimaxHailuo02ProTextToVideoInput = { + /** + * Prompt Optimizer + * + * Whether to use the model's prompt optimizer + */ + prompt_optimizer?: boolean + /** + * Prompt + */ + prompt: string +} + +/** + * TextToVideoOutput + */ +export type SchemaLtxv13B098DistilledOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * DistilledTextToVideoInput + * + * Distilled model input + */ +export type SchemaLtxv13B098DistilledInput = { + /** + * Second Pass Skip Initial Steps + * + * The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes. + */ + second_pass_skip_initial_steps?: number + /** + * Number of Inference Steps + * + * Number of inference steps during the first pass. + */ + first_pass_num_inference_steps?: number + /** + * Frame Rate + * + * The frame rate of the video. + */ + frame_rate?: number + /** + * Reverse Video + * + * Whether to reverse the video. + */ + reverse_video?: boolean + /** + * Prompt + * + * Text prompt to guide generation + */ + prompt: string + /** + * Expand Prompt + * + * Whether to expand the prompt using a language model. + */ + expand_prompt?: boolean + /** + * Temporal AdaIN Factor + * + * The factor for adaptive instance normalization (AdaIN) applied to generated video chunks after the first. This can help deal with a gradual increase in saturation/contrast in the generated video by normalizing the color distribution across the video. A high value will ensure the color distribution is more consistent across the video, while a low value will allow for more variation in color distribution. + */ + temporal_adain_factor?: number + /** + * Loras + * + * LoRA weights to use for generation + */ + loras?: Array + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Number of Frames + * + * The number of frames in the video. + */ + num_frames?: number + /** + * Second Pass Number of Inference Steps + * + * Number of inference steps during the second pass. + */ + second_pass_num_inference_steps?: number + /** + * Negative Prompt + * + * Negative prompt for generation + */ + negative_prompt?: string + /** + * Enable Detail Pass + * + * Whether to use a detail pass. If True, the model will perform a second pass to refine the video and enhance details. This incurs a 2.0x cost multiplier on the base price. + */ + enable_detail_pass?: boolean + /** + * Resolution + * + * Resolution of the generated video. + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. + */ + aspect_ratio?: '9:16' | '1:1' | '16:9' + /** + * Tone Map Compression Ratio + * + * The compression ratio for tone mapping. This is used to compress the dynamic range of the video to improve visual quality. A value of 0.0 means no compression, while a value of 1.0 means maximum compression. + */ + tone_map_compression_ratio?: number + /** + * Seed + * + * Random seed for generation + */ + seed?: number +} + +/** + * WanT2VResponse + */ +export type SchemaWanV22A14bTextToVideoOutput = { + /** + * Prompt + * + * The text prompt used for video generation. + */ + prompt?: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * WanT2VRequest + */ +export type SchemaWanV22A14bTextToVideoInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Shift + * + * Shift value for the video. Must be between 1.0 and 10.0. + */ + shift?: number + /** + * Acceleration + * + * Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'. + */ + acceleration?: 'none' | 'regular' + /** + * Number of Interpolated Frames + * + * Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4. + */ + num_interpolated_frames?: number + /** + * Frames per Second + * + * Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is. + */ + frames_per_second?: number + /** + * Guidance Scale (1st Stage) + * + * Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality. + */ + guidance_scale?: number + /** + * Number of Frames + * + * Number of frames to generate. Must be between 17 to 161 (inclusive). + */ + num_frames?: number + /** + * Enable Safety Checker + * + * If set to true, input data will be checked for safety before processing. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Resolution + * + * Resolution of the generated video (480p, 580p, or 720p). + */ + resolution?: '480p' | '580p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video (16:9 or 9:16). + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Enable Output Safety Checker + * + * If set to true, output video will be checked for safety after generation. + */ + enable_output_safety_checker?: boolean + /** + * Guidance Scale (2nd Stage) + * + * Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model. + */ + guidance_scale_2?: number + /** + * Video Quality + * + * The quality of the output video. Higher quality means better visual quality but larger file size. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Interpolator Model + * + * The model to use for frame interpolation. If None, no interpolation is applied. + */ + interpolator_model?: 'none' | 'film' | 'rife' + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Adjust FPS for Interpolation + * + * If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is. + */ + adjust_fps_for_interpolation?: boolean +} + +/** + * WanSmallT2VResponse + */ +export type SchemaWanV225bTextToVideoOutput = { + /** + * Prompt + * + * The text prompt used for video generation. + */ + prompt?: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * WanSmallT2VRequest + */ +export type SchemaWanV225bTextToVideoInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Shift + * + * Shift value for the video. Must be between 1.0 and 10.0. + */ + shift?: number + /** + * Number of Interpolated Frames + * + * Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4. + */ + num_interpolated_frames?: number + /** + * Frames per Second + * + * Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is. + */ + frames_per_second?: number + /** + * Guidance Scale + * + * Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality. + */ + guidance_scale?: number + /** + * Number of Frames + * + * Number of frames to generate. Must be between 17 to 161 (inclusive). + */ + num_frames?: number + /** + * Enable Safety Checker + * + * If set to true, input data will be checked for safety before processing. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Resolution + * + * Resolution of the generated video (580p or 720p). + */ + resolution?: '580p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video (16:9 or 9:16). + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Enable Output Safety Checker + * + * If set to true, output video will be checked for safety after generation. + */ + enable_output_safety_checker?: boolean + /** + * Video Quality + * + * The quality of the output video. Higher quality means better visual quality but larger file size. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Interpolator Model + * + * The model to use for frame interpolation. If None, no interpolation is applied. + */ + interpolator_model?: 'none' | 'film' | 'rife' + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Adjust FPS for Interpolation + * + * If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is. + */ + adjust_fps_for_interpolation?: boolean +} + +/** + * WanTurboT2VResponse + */ +export type SchemaWanV22A14bTextToVideoTurboOutput = { + /** + * Prompt + * + * The text prompt used for video generation. + */ + prompt?: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * WanTurboT2VRequest + */ +export type SchemaWanV22A14bTextToVideoTurboInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Resolution + * + * Resolution of the generated video (480p, 580p, or 720p). + */ + resolution?: '480p' | '580p' | '720p' + /** + * Acceleration + * + * Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'. + */ + acceleration?: 'none' | 'regular' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video (16:9 or 9:16). + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Video Write Mode + * + * The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Enable Output Safety Checker + * + * If set to true, output video will be checked for safety after generation. + */ + enable_output_safety_checker?: boolean + /** + * Video Quality + * + * The quality of the output video. Higher quality means better visual quality but larger file size. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Enable Safety Checker + * + * If set to true, input data will be checked for safety before processing. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning. + */ + enable_prompt_expansion?: boolean +} + +/** + * WanSmallFastVideoT2VResponse + */ +export type SchemaWanV225bTextToVideoFastWanOutput = { + /** + * Prompt + * + * The text prompt used for video generation. + */ + prompt?: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * WanSmallFastVideoT2VRequest + */ +export type SchemaWanV225bTextToVideoFastWanInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Number of Interpolated Frames + * + * Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4. + */ + num_interpolated_frames?: number + /** + * Frames per Second + * + * Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is. + */ + frames_per_second?: number + /** + * Enable Safety Checker + * + * If set to true, input data will be checked for safety before processing. + */ + enable_safety_checker?: boolean + /** + * Number of Frames + * + * Number of frames to generate. Must be between 17 to 161 (inclusive). + */ + num_frames?: number + /** + * Guidance Scale + * + * Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality. + */ + guidance_scale?: number + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Resolution + * + * Resolution of the generated video (580p or 720p). + */ + resolution?: '480p' | '580p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video (16:9 or 9:16). + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Enable Output Safety Checker + * + * If set to true, output video will be checked for safety after generation. + */ + enable_output_safety_checker?: boolean + /** + * Video Quality + * + * The quality of the output video. Higher quality means better visual quality but larger file size. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Interpolator Model + * + * The model to use for frame interpolation. If None, no interpolation is applied. + */ + interpolator_model?: 'none' | 'film' | 'rife' + /** + * Adjust FPS for Interpolation + * + * If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is. + */ + adjust_fps_for_interpolation?: boolean +} + +/** + * WanSmallT2VResponse + */ +export type SchemaWanV225bTextToVideoDistillOutput = { + /** + * Prompt + * + * The text prompt used for video generation. + */ + prompt?: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * WanDistillT2VRequest + */ +export type SchemaWanV225bTextToVideoDistillInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Shift + * + * Shift value for the video. Must be between 1.0 and 10.0. + */ + shift?: number + /** + * Number of Interpolated Frames + * + * Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4. + */ + num_interpolated_frames?: number + /** + * Frames per Second + * + * Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is. + */ + frames_per_second?: number + /** + * Guidance Scale (1st Stage) + * + * Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality. + */ + guidance_scale?: number + /** + * Number of Frames + * + * Number of frames to generate. Must be between 17 to 161 (inclusive). + */ + num_frames?: number + /** + * Enable Safety Checker + * + * If set to true, input data will be checked for safety before processing. + */ + enable_safety_checker?: boolean + /** + * Video Write Mode + * + * The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Resolution + * + * Resolution of the generated video (580p or 720p). + */ + resolution?: '580p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video (16:9 or 9:16). + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Enable Output Safety Checker + * + * If set to true, output video will be checked for safety after generation. + */ + enable_output_safety_checker?: boolean + /** + * Video Quality + * + * The quality of the output video. Higher quality means better visual quality but larger file size. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Interpolator Model + * + * The model to use for frame interpolation. If None, no interpolation is applied. + */ + interpolator_model?: 'none' | 'film' | 'rife' + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Adjust FPS for Interpolation + * + * If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is. + */ + adjust_fps_for_interpolation?: boolean +} + +/** + * WanT2VResponse + */ +export type SchemaWanV22A14bTextToVideoLoraOutput = { + /** + * Prompt + * + * The text prompt used for video generation. + */ + prompt?: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * WanLoRAT2VRequest + */ +export type SchemaWanV22A14bTextToVideoLoraInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Shift + * + * Shift value for the video. Must be between 1.0 and 10.0. + */ + shift?: number + /** + * Acceleration + * + * Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'. + */ + acceleration?: 'none' | 'regular' + /** + * Number of Interpolated Frames + * + * Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4. + */ + num_interpolated_frames?: number + /** + * Reverse Video + * + * If true, the video will be reversed. + */ + reverse_video?: boolean + /** + * Loras + * + * LoRA weights to be used in the inference. + */ + loras?: Array + /** + * Frames per Second + * + * Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is. + */ + frames_per_second?: number + /** + * Enable Safety Checker + * + * If set to true, input data will be checked for safety before processing. + */ + enable_safety_checker?: boolean + /** + * Number of Frames + * + * Number of frames to generate. Must be between 17 to 161 (inclusive). + */ + num_frames?: number + /** + * Guidance Scale (1st Stage) + * + * Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality. + */ + guidance_scale?: number + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Resolution + * + * Resolution of the generated video (480p, 580p, or 720p). + */ + resolution?: '480p' | '580p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video (16:9 or 9:16). + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Enable Output Safety Checker + * + * If set to true, output video will be checked for safety after generation. + */ + enable_output_safety_checker?: boolean + /** + * Guidance Scale (2nd Stage) + * + * Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model. + */ + guidance_scale_2?: number + /** + * Video Quality + * + * The quality of the output video. Higher quality means better visual quality but larger file size. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Interpolator Model + * + * The model to use for frame interpolation. If None, no interpolation is applied. + */ + interpolator_model?: 'none' | 'film' | 'rife' + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Adjust FPS for Interpolation + * + * If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is. + */ + adjust_fps_for_interpolation?: boolean +} + +/** + * MareyOutput + */ +export type SchemaMareyT2vOutput = { + video: SchemaFile +} + +/** + * MareyInputT2V + */ +export type SchemaMareyT2vInput = { + /** + * Prompt + * + * The prompt to generate a video from + */ + prompt: string + /** + * Duration + * + * The duration of the generated video. + */ + duration?: '5s' | '10s' + /** + * Dimensions + * + * The dimensions of the generated video in width x height format. + */ + dimensions?: '1920x1080' | '1152x1152' | '1536x1152' | '1152x1536' + /** + * Guidance Scale + * + * Controls how strongly the generation is guided by the prompt (0-20). Higher values follow the prompt more closely. + */ + guidance_scale?: number | unknown + /** + * Seed + * + * Seed for random number generation. Use -1 for random seed each run. + */ + seed?: number | unknown + /** + * Negative Prompt + * + * Negative prompt used to guide the model away from undesirable features. + */ + negative_prompt?: string | unknown +} + +/** + * AvatarSingleTextResponse + */ +export type SchemaInfinitalkSingleTextOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * InfiniTalkSingleTextRequest + */ +export type SchemaInfinitalkSingleTextInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Resolution + * + * Resolution of the video to generate. Must be either 480p or 720p. + */ + resolution?: '480p' | '720p' + /** + * Acceleration + * + * The acceleration level to use for generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Text Input + * + * The text input to guide video generation. + */ + text_input: string + /** + * Image URL + * + * URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped. + */ + image_url: string + /** + * Voice + * + * The voice to use for speech generation + */ + voice: + | 'Aria' + | 'Roger' + | 'Sarah' + | 'Laura' + | 'Charlie' + | 'George' + | 'Callum' + | 'River' + | 'Liam' + | 'Charlotte' + | 'Alice' + | 'Matilda' + | 'Will' + | 'Jessica' + | 'Eric' + | 'Chris' + | 'Brian' + | 'Daniel' + | 'Lily' + | 'Bill' + /** + * Number of Frames + * + * Number of frames to generate. Must be between 41 to 721. + */ + num_frames?: number + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number +} + +/** + * VideoOutputV5 + */ +export type SchemaPixverseV5TextToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TextToVideoRequest + */ +export type SchemaPixverseV5TextToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '4:3' | '1:1' | '3:4' | '9:16' + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' | '1080p' + /** + * Style + * + * The style of the generated video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Duration + * + * The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds + */ + duration?: '5' | '8' + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * AvatarsAppOutput + */ +export type SchemaAvatarsTextToVideoOutput = { + video: SchemaFile +} + +/** + * Text2VideoInput + */ +export type SchemaAvatarsTextToVideoInput = { + /** + * Text + */ + text: string + /** + * Avatar Id + * + * The avatar to use for the video + */ + avatar_id: + | 'emily_vertical_primary' + | 'emily_vertical_secondary' + | 'marcus_vertical_primary' + | 'marcus_vertical_secondary' + | 'mira_vertical_primary' + | 'mira_vertical_secondary' + | 'jasmine_vertical_primary' + | 'jasmine_vertical_secondary' + | 'jasmine_vertical_walking' + | 'aisha_vertical_walking' + | 'elena_vertical_primary' + | 'elena_vertical_secondary' + | 'any_male_vertical_primary' + | 'any_female_vertical_primary' + | 'any_male_vertical_secondary' + | 'any_female_vertical_secondary' + | 'any_female_vertical_walking' + | 'emily_primary' + | 'emily_side' + | 'marcus_primary' + | 'marcus_side' + | 'aisha_walking' + | 'elena_primary' + | 'elena_side' + | 'any_male_primary' + | 'any_female_primary' + | 'any_male_side' + | 'any_female_side' +} + +/** + * VideoOutput + * + * Base output for video generation + */ +export type SchemaWan25PreviewTextToVideoOutput = { + /** + * Actual Prompt + * + * The actual prompt used if prompt rewriting was enabled + */ + actual_prompt?: string + /** + * Seed + * + * The seed used for generation + */ + seed: number + /** + * Video + * + * The generated video file + */ + video: SchemaVideoFile +} + +/** + * VideoFile + */ +export type SchemaVideoFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * Duration + * + * The duration of the video + */ + duration?: number + /** + * Height + * + * The height of the video + */ + height?: number + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * Fps + * + * The FPS of the video + */ + fps?: number + /** + * Width + * + * The width of the video + */ + width?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * Num Frames + * + * The number of frames in the video + */ + num_frames?: number + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * TextToVideoInput + * + * Input for text-to-video generation + */ +export type SchemaWan25PreviewTextToVideoInput = { + /** + * Prompt + * + * The text prompt for video generation. Supports Chinese and English, max 800 characters. + */ + prompt: string + /** + * Resolution + * + * Video resolution tier + */ + resolution?: '480p' | '720p' | '1080p' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Duration + * + * Duration of the generated video in seconds. Choose between 5 or 10 seconds. + */ + duration?: '5' | '10' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Audio Url + * + * + * URL of the audio to use as the background music. Must be publicly accessible. + * Limit handling: If the audio duration exceeds the duration value (5 or 10 seconds), + * the audio is truncated to the first 5 or 10 seconds, and the rest is discarded. If + * the audio is shorter than the video, the remaining part of the video will be silent. + * For example, if the audio is 3 seconds long and the video duration is 5 seconds, the + * first 3 seconds of the output video will have sound, and the last 2 seconds will be silent. + * - Format: WAV, MP3. + * - Duration: 3 to 30 s. + * - File size: Up to 15 MB. + * + */ + audio_url?: string + /** + * Negative Prompt + * + * Negative prompt to describe content to avoid. Max 500 characters. + */ + negative_prompt?: string + /** + * Enable Prompt Expansion + * + * Whether to enable prompt rewriting using LLM. Improves results for short prompts but increases processing time. + */ + enable_prompt_expansion?: boolean +} + +/** + * OviT2VResponse + */ +export type SchemaOviOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * The generated video file. + */ + video?: SchemaFile | unknown +} + +/** + * OviT2VRequest + */ +export type SchemaOviInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Resolution + * + * Resolution of the generated video in W:H format. One of (512x992, 992x512, 960x512, 512x960, 720x720, or 448x1120). + */ + resolution?: + | '512x992' + | '992x512' + | '960x512' + | '512x960' + | '720x720' + | '448x1120' + | '1120x448' + /** + * Num Inference Steps + * + * The number of inference steps. + */ + num_inference_steps?: number + /** + * Audio Negative Prompt + * + * Negative prompt for audio generation. + */ + audio_negative_prompt?: string + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number | unknown +} + +/** + * TextToVideoOutput + */ +export type SchemaSora2TextToVideoOutput = { + /** + * Spritesheet + * + * Spritesheet image for the video + */ + spritesheet?: SchemaImageFile + /** + * Thumbnail + * + * Thumbnail image for the video + */ + thumbnail?: SchemaImageFile + /** + * Video ID + * + * The ID of the generated video + */ + video_id: string + /** + * Video + * + * The generated video + */ + video: SchemaVideoFile +} + +/** + * ImageFile + */ +export type SchemaImageFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * Height + * + * The height of the image + */ + height?: number + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * Width + * + * The width of the image + */ + width?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * TextToVideoInput + */ +export type SchemaSora2TextToVideoInput = { + /** + * Prompt + * + * The text prompt describing the video you want to generate + */ + prompt: string + /** + * Duration + * + * Duration of the generated video in seconds + */ + duration?: 4 | 8 | 12 + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '720p' + /** + * Model + * + * The model to use for the generation. When the default model is selected, the latest snapshot of the model will be used - otherwise, select a specific snapshot of the model. + */ + model?: 'sora-2' | 'sora-2-2025-12-08' | 'sora-2-2025-10-06' + /** + * Delete Video + * + * Whether to delete the video after generation for privacy reasons. If True, the video cannot be used for remixing and will be permanently deleted. + */ + delete_video?: boolean + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '9:16' | '16:9' +} + +/** + * ProTextToVideoOutput + */ +export type SchemaSora2TextToVideoProOutput = { + /** + * Spritesheet + * + * Spritesheet image for the video + */ + spritesheet?: SchemaImageFile + /** + * Thumbnail + * + * Thumbnail image for the video + */ + thumbnail?: SchemaImageFile + /** + * Video ID + * + * The ID of the generated video + */ + video_id: string + /** + * Video + * + * The generated video + */ + video: SchemaVideoFile +} + +/** + * ProTextToVideoInput + */ +export type SchemaSora2TextToVideoProInput = { + /** + * Prompt + * + * The text prompt describing the video you want to generate + */ + prompt: string + /** + * Duration + * + * Duration of the generated video in seconds + */ + duration?: 4 | 8 | 12 + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '720p' | '1080p' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '9:16' | '16:9' + /** + * Delete Video + * + * Whether to delete the video after generation for privacy reasons. If True, the video cannot be used for remixing and will be permanently deleted. + */ + delete_video?: boolean +} + +/** + * Veo31TextToVideoOutput + */ +export type SchemaVeo31Output = { + /** + * Video + * + * The generated video. + */ + video: SchemaFile +} + +/** + * Veo31TextToVideoInput + */ +export type SchemaVeo31Input = { + /** + * Prompt + * + * The text prompt describing the video you want to generate + */ + prompt: string + /** + * Duration + * + * The duration of the generated video. + */ + duration?: '4s' | '6s' | '8s' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '9:16' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * Auto Fix + * + * Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them. + */ + auto_fix?: boolean + /** + * Resolution + * + * The resolution of the generated video. + */ + resolution?: '720p' | '1080p' | '4k' + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number + /** + * Negative Prompt + * + * A negative prompt to guide the video generation. + */ + negative_prompt?: string +} + +/** + * Veo31TextToVideoOutput + */ +export type SchemaVeo31FastOutput = { + /** + * Video + * + * The generated video. + */ + video: SchemaFile +} + +/** + * Veo31TextToVideoInput + */ +export type SchemaVeo31FastInput = { + /** + * Prompt + * + * The text prompt describing the video you want to generate + */ + prompt: string + /** + * Duration + * + * The duration of the generated video. + */ + duration?: '4s' | '6s' | '8s' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '9:16' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * Auto Fix + * + * Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them. + */ + auto_fix?: boolean + /** + * Resolution + * + * The resolution of the generated video. + */ + resolution?: '720p' | '1080p' | '4k' + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number + /** + * Negative Prompt + * + * A negative prompt to guide the video generation. + */ + negative_prompt?: string +} + +/** + * KandinskyT2VResponse + */ +export type SchemaKandinsky5TextToVideoOutput = { + /** + * Video + * + * The generated video file. + */ + video?: SchemaFile +} + +/** + * KandinskyT2VRequest + */ +export type SchemaKandinsky5TextToVideoInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Resolution + * + * Resolution of the generated video in W:H format. Will be calculated based on the aspect ratio(768x512, 512x512, 512x768). + */ + resolution?: '768x512' + /** + * Duration + * + * The length of the video to generate (5s or 10s) + */ + duration?: '5s' | '10s' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. One of (3:2, 1:1, 2:3). + */ + aspect_ratio?: '3:2' | '1:1' | '2:3' + /** + * Num Inference Steps + * + * The number of inference steps. + */ + num_inference_steps?: number +} + +/** + * KandinskyT2VResponse + */ +export type SchemaKandinsky5TextToVideoDistillOutput = { + /** + * Video + * + * The generated video file. + */ + video?: SchemaFile +} + +/** + * KandinskyT2VDistillRequest + */ +export type SchemaKandinsky5TextToVideoDistillInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Duration + * + * The length of the video to generate (5s or 10s) + */ + duration?: '5s' | '10s' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. One of (3:2, 1:1, 2:3). + */ + aspect_ratio?: '3:2' | '1:1' | '2:3' + /** + * Resolution + * + * Resolution of the generated video in W:H format. Will be calculated based on the aspect ratio(768x512, 512x512, 512x768). + */ + resolution?: '768x512' +} + +/** + * WanAlphaResponse + */ +export type SchemaWanAlphaOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * Image + * + * The generated image file. + */ + image?: SchemaVideoFile + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Mask + * + * The generated mask file. + */ + mask?: SchemaVideoFile + /** + * Video + * + * The generated video file. + */ + video?: SchemaVideoFile +} + +/** + * WanAlphaRequest + */ +export type SchemaWanAlphaInput = { + /** + * Prompt + * + * The prompt to guide the video generation. + */ + prompt: string + /** + * Shift + * + * The shift of the generated video. + */ + shift?: number + /** + * Mask Clamp Upper + * + * The upper bound of the mask clamping. + */ + mask_clamp_upper?: number + /** + * FPS + * + * The frame rate of the generated video. + */ + fps?: number + /** + * Mask Clamp Lower + * + * The lower bound of the mask clamping. + */ + mask_clamp_lower?: number + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Enable Safety Checker + * + * Whether to enable safety checker. + */ + enable_safety_checker?: boolean + /** + * Mask Binarization Threshold + * + * The threshold for mask binarization. When binarize_mask is True, this threshold will be used to binarize the mask. This will also be used for transparency when the output type is `.webm`. + */ + mask_binarization_threshold?: number + /** + * Sampler + * + * The sampler to use. + */ + sampler?: 'unipc' | 'dpm++' | 'euler' + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Resolution + * + * The resolution of the generated video. + */ + resolution?: '240p' | '360p' | '480p' | '580p' | '720p' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video. + */ + aspect_ratio?: '16:9' | '1:1' | '9:16' + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Binarize Mask + * + * Whether to binarize the mask. + */ + binarize_mask?: boolean + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to use. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number +} + +/** + * VideoToVideoOutput + */ +export type SchemaKreaWan14bTextToVideoOutput = { + video: SchemaFile +} + +/** + * TextToVideoInput + */ +export type SchemaKreaWan14bTextToVideoInput = { + /** + * Prompt + * + * Prompt for the video-to-video generation. + */ + prompt: string + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning. + */ + enable_prompt_expansion?: boolean + /** + * Num Frames + * + * Number of frames to generate. Must be a multiple of 12 plus 6, for example 6, 18, 30, 42, etc. + */ + num_frames?: number + /** + * Seed + * + * Seed for the video-to-video generation. + */ + seed?: number | unknown +} + +/** + * Q2TextToVideoOutput + */ +export type SchemaViduQ2TextToVideoOutput = { + /** + * Video + * + * The generated video from text using the Q2 model + */ + video: SchemaFile +} + +/** + * Q2TextToVideoRequest + */ +export type SchemaViduQ2TextToVideoInput = { + /** + * Prompt + * + * Text prompt for video generation, max 3000 characters + */ + prompt: string + /** + * Resolution + * + * Output video resolution + */ + resolution?: '360p' | '520p' | '720p' | '1080p' + /** + * Aspect Ratio + * + * The aspect ratio of the output video + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Duration + * + * Duration of the video in seconds + */ + duration?: 2 | 3 | 4 | 5 | 6 | 7 | 8 + /** + * Bgm + * + * Whether to add background music to the video (only for 4-second videos) + */ + bgm?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Movement Amplitude + * + * The movement amplitude of objects in the frame + */ + movement_amplitude?: 'auto' | 'small' | 'medium' | 'large' +} + +/** + * SeedanceFastT2VVideoOutput + */ +export type SchemaBytedanceSeedanceV1ProFastTextToVideoOutput = { + /** + * Seed + * + * Seed used for generation + */ + seed: number + /** + * Video + * + * Generated video file + */ + video: SchemaFile +} + +/** + * SeedanceProFastTextToVideoInput + */ +export type SchemaBytedanceSeedanceV1ProFastTextToVideoInput = { + /** + * Prompt + * + * The text prompt used to generate the video + */ + prompt: string + /** + * Resolution + * + * Video resolution - 480p for faster generation, 720p for balance, 1080p for higher quality + */ + resolution?: '480p' | '720p' | '1080p' + /** + * Duration + * + * Duration of the video in seconds + */ + duration?: '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | '10' | '11' | '12' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '21:9' | '16:9' | '4:3' | '1:1' | '3:4' | '9:16' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * Random seed to control video generation. Use -1 for random. + */ + seed?: number + /** + * Camera Fixed + * + * Whether to fix the camera position + */ + camera_fixed?: boolean +} + +/** + * ProTextToVideoHailuo23Output + */ +export type SchemaMinimaxHailuo23ProTextToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * ProTextToVideoHailuo23Input + */ +export type SchemaMinimaxHailuo23ProTextToVideoInput = { + /** + * Prompt Optimizer + * + * Whether to use the model's prompt optimizer + */ + prompt_optimizer?: boolean + /** + * Prompt + * + * Text prompt for video generation + */ + prompt: string +} + +/** + * StandardTextToVideoHailuo23Output + */ +export type SchemaMinimaxHailuo23StandardTextToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * StandardTextToVideoHailuo23Input + */ +export type SchemaMinimaxHailuo23StandardTextToVideoInput = { + /** + * Prompt Optimizer + * + * Whether to use the model's prompt optimizer + */ + prompt_optimizer?: boolean + /** + * Duration + * + * The duration of the video in seconds. + */ + duration?: '6' | '10' + /** + * Prompt + */ + prompt: string +} + +/** + * LongCatVideoResponse + */ +export type SchemaLongcatVideoDistilledTextToVideo480pOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * LongCatVideoRequest + */ +export type SchemaLongcatVideoDistilledTextToVideo480pInput = { + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video. + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Prompt + * + * The prompt to guide the video generation. + */ + prompt: string + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * FPS + * + * The frame rate of the generated video. + */ + fps?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Enable Safety Checker + * + * Whether to enable safety checker. + */ + enable_safety_checker?: boolean + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Number of Inference Steps + * + * The number of inference steps to use. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean +} + +/** + * LongCatVideoResponse + */ +export type SchemaLongcatVideoDistilledTextToVideo720pOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * LongCat720PVideoRequest + */ +export type SchemaLongcatVideoDistilledTextToVideo720pInput = { + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video. + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Prompt + * + * The prompt to guide the video generation. + */ + prompt: string + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * FPS + * + * The frame rate of the generated video. + */ + fps?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Number of Refinement Inference Steps + * + * The number of inference steps to use for refinement. + */ + num_refine_inference_steps?: number + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Enable Safety Checker + * + * Whether to enable safety checker. + */ + enable_safety_checker?: boolean + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Number of Inference Steps + * + * The number of inference steps to use. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean +} + +/** + * LongCatVideoResponse + */ +export type SchemaLongcatVideoTextToVideo480pOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * LongCatCFGVideoRequest + */ +export type SchemaLongcatVideoTextToVideo480pInput = { + /** + * Prompt + * + * The prompt to guide the video generation. + */ + prompt: string + /** + * Acceleration + * + * The acceleration level to use for the video generation. + */ + acceleration?: 'none' | 'regular' + /** + * FPS + * + * The frame rate of the generated video. + */ + fps?: number + /** + * Guidance Scale + * + * The guidance scale to use for the video generation. + */ + guidance_scale?: number + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Enable Safety Checker + * + * Whether to enable safety checker. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * The negative prompt to use for the video generation. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video. + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number + /** + * Number of Inference Steps + * + * The number of inference steps to use for the video generation. + */ + num_inference_steps?: number +} + +/** + * LongCatVideoResponse + */ +export type SchemaLongcatVideoTextToVideo720pOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * LongCat720PCFGVideoRequest + */ +export type SchemaLongcatVideoTextToVideo720pInput = { + /** + * Prompt + * + * The prompt to guide the video generation. + */ + prompt: string + /** + * Acceleration + * + * The acceleration level to use for the video generation. + */ + acceleration?: 'none' | 'regular' + /** + * FPS + * + * The frame rate of the generated video. + */ + fps?: number + /** + * Number of Refinement Inference Steps + * + * The number of inference steps to use for refinement. + */ + num_refine_inference_steps?: number + /** + * Guidance Scale + * + * The guidance scale to use for the video generation. + */ + guidance_scale?: number + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Enable Safety Checker + * + * Whether to enable safety checker. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * The negative prompt to use for the video generation. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video. + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to use for the video generation. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number +} + +/** + * SanaVideoOutput + */ +export type SchemaSanaVideoOutput = { + /** + * Seed + * + * The random seed used for the generation process + */ + seed: number + /** + * Video + * + * Generated video file + */ + video: SchemaFile +} + +/** + * SanaVideoInput + */ +export type SchemaSanaVideoInput = { + /** + * Prompt + * + * The text prompt describing the video to generate + */ + prompt: string + /** + * Resolution + * + * The resolution of the output video + */ + resolution?: '480p' + /** + * Fps + * + * Frames per second for the output video + */ + fps?: number + /** + * Motion Score + * + * Motion intensity score (higher = more motion) + */ + motion_score?: number + /** + * Guidance Scale + * + * Guidance scale for generation (higher = more prompt adherence) + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of denoising steps + */ + num_inference_steps?: number + /** + * Seed + * + * Random seed for reproducible generation. If not provided, a random seed will be used. + */ + seed?: number + /** + * Negative Prompt + * + * The negative prompt describing what to avoid in the generation + */ + negative_prompt?: string + /** + * Num Frames + * + * Number of frames to generate + */ + num_frames?: number +} + +/** + * GenerationOutput + * + * Output model for text-to-video generation + */ +export type SchemaInfinityStarTextToVideoOutput = { + /** + * Video + * + * Generated video file + */ + video: SchemaFile +} + +/** + * GenerationInput + * + * Input model for text-to-video generation + */ +export type SchemaInfinityStarTextToVideoInput = { + /** + * Prompt + * + * Text prompt for generating the video + */ + prompt: string + /** + * Aspect Ratio + * + * Aspect ratio of the generated output + */ + aspect_ratio?: '16:9' | '1:1' | '9:16' + /** + * Enhance Prompt + * + * Whether to use an LLM to enhance the prompt. + */ + enhance_prompt?: boolean + /** + * Use Apg + * + * Whether to use APG + */ + use_apg?: boolean + /** + * Guidance Scale + * + * Guidance scale for generation + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps + */ + num_inference_steps?: number + /** + * Seed + * + * Random seed for reproducibility. Leave empty for random generation. + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to guide what to avoid in generation + */ + negative_prompt?: string + /** + * Tau Video + * + * Tau value for video scale + */ + tau_video?: number +} + +/** + * HunyuanVideo15Response + */ +export type SchemaHunyuanVideoV15TextToVideoOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * HunyuanVideo15T2VRequest + */ +export type SchemaHunyuanVideoV15TextToVideoInput = { + /** + * Prompt + * + * The prompt to generate the video. + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the video. + */ + aspect_ratio?: '16:9' | '9:16' + /** + * Resolution + * + * The resolution of the video. + */ + resolution?: '480p' + /** + * Enable Prompt Expansion + * + * Enable prompt expansion to enhance the input prompt. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * Random seed for reproducibility. + */ + seed?: number + /** + * Num Inference Steps + * + * The number of inference steps. + */ + num_inference_steps?: number + /** + * Negative Prompt + * + * The negative prompt to guide what not to generate. + */ + negative_prompt?: string + /** + * Num Frames + * + * The number of frames to generate. + */ + num_frames?: number +} + +/** + * LTXVTextToVideoResponse + */ +export type SchemaLtx2TextToVideoOutput = { + /** + * Video + * + * The generated video file + */ + video: SchemaVideoFile +} + +/** + * LTXVTextToVideoRequest + */ +export type SchemaLtx2TextToVideoInput = { + /** + * Prompt + * + * The prompt to generate the video from + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: 6 | 8 | 10 + /** + * Generate Audio + * + * Whether to generate audio for the generated video + */ + generate_audio?: boolean + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '1080p' | '1440p' | '2160p' + /** + * Frames per Second + * + * The frames per second of the generated video + */ + fps?: 25 | 50 +} + +/** + * LTXVTextToVideoResponse + */ +export type SchemaLtx2TextToVideoFastOutput = { + /** + * Video + * + * The generated video file + */ + video: SchemaVideoFile +} + +/** + * LTXVTextToVideoFastRequest + */ +export type SchemaLtx2TextToVideoFastInput = { + /** + * Prompt + * + * The prompt to generate the video from + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' + /** + * Duration + * + * The duration of the generated video in seconds. The fast model supports 6-20 seconds. Note: Durations longer than 10 seconds (12, 14, 16, 18, 20) are only supported with 25 FPS and 1080p resolution. + */ + duration?: 6 | 8 | 10 | 12 | 14 | 16 | 18 | 20 + /** + * Generate Audio + * + * Whether to generate audio for the generated video + */ + generate_audio?: boolean + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '1080p' | '1440p' | '2160p' + /** + * Frames per Second + * + * The frames per second of the generated video + */ + fps?: 25 | 50 +} + +/** + * VideoOutputV5_5 + */ +export type SchemaPixverseV55TextToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TextToVideoRequestV5_5 + */ +export type SchemaPixverseV55TextToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '4:3' | '1:1' | '3:4' | '9:16' + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' | '1080p' + /** + * Style + * + * The style of the generated video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Thinking Type + * + * Prompt optimization mode: 'enabled' to optimize, 'disabled' to turn off, 'auto' for model decision + */ + thinking_type?: 'enabled' | 'disabled' | 'auto' + /** + * Generate Multi Clip Switch + * + * Enable multi-clip generation with dynamic camera changes + */ + generate_multi_clip_switch?: boolean + /** + * Duration + * + * The duration of the generated video in seconds. Longer durations cost more. 1080p videos are limited to 5 or 8 seconds + */ + duration?: '5' | '8' | '10' + /** + * Generate Audio Switch + * + * Enable audio generation (BGM, SFX, dialogue) + */ + generate_audio_switch?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * TextToVideoV26ProOutput + */ +export type SchemaKlingVideoV26ProTextToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TextToVideoV26ProRequest + */ +export type SchemaKlingVideoV26ProTextToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video frame + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Generate Audio + * + * Whether to generate native audio for the video. Supports Chinese and English voice output. Other languages are automatically translated to English. For English speech, use lowercase letters; for acronyms or proper nouns, use uppercase. + */ + generate_audio?: boolean + /** + * Negative Prompt + */ + negative_prompt?: string + /** + * Cfg Scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt. + * + */ + cfg_scale?: number +} + +/** + * FabricOneTextOutput + */ +export type SchemaFabric10TextOutput = { + video: SchemaFile +} + +/** + * FabricOneTextInput + */ +export type SchemaFabric10TextInput = { + /** + * Text + */ + text: string + /** + * Resolution + * + * Resolution + */ + resolution: '720p' | '480p' + /** + * Voice Description + * + * Optional additional voice description. The primary voice description is auto-generated from the image. You can use simple descriptors like 'British accent' or 'Confident' or provide a detailed description like 'Confident male voice, mid-20s, with notes of...' + */ + voice_description?: string | unknown + /** + * Image Url + */ + image_url: string +} + +/** + * TextToVideoOutput + * + * Output for text-to-video generation + */ +export type SchemaV26TextToVideoOutput = { + /** + * Actual Prompt + * + * The actual prompt used if prompt rewriting was enabled + */ + actual_prompt?: string + /** + * Seed + * + * The seed used for generation + */ + seed: number + /** + * Video + * + * The generated video file + */ + video: SchemaVideoFile +} + +/** + * TextToVideoInput + * + * Input for Wan 2.6 text-to-video generation + */ +export type SchemaV26TextToVideoInput = { + /** + * Prompt + * + * The text prompt for video generation. Supports Chinese and English, max 800 characters. For multi-shot videos, use format: 'Overall description. First shot [0-3s] content. Second shot [3-5s] content.' + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video. Wan 2.6 supports additional ratios. + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' | '4:3' | '3:4' + /** + * Resolution + * + * Video resolution tier. Wan 2.6 T2V only supports 720p and 1080p (no 480p). + */ + resolution?: '720p' | '1080p' + /** + * Duration + * + * Duration of the generated video in seconds. Choose between 5, 10, or 15 seconds. + */ + duration?: '5' | '10' | '15' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Audio Url + * + * + * URL of the audio to use as the background music. Must be publicly accessible. + * Limit handling: If the audio duration exceeds the duration value (5, 10, or 15 seconds), + * the audio is truncated to the first N seconds, and the rest is discarded. If + * the audio is shorter than the video, the remaining part of the video will be silent. + * For example, if the audio is 3 seconds long and the video duration is 5 seconds, the + * first 3 seconds of the output video will have sound, and the last 2 seconds will be silent. + * - Format: WAV, MP3. + * - Duration: 3 to 30 s. + * - File size: Up to 15 MB. + * + */ + audio_url?: string + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Multi Shots + * + * When true, enables intelligent multi-shot segmentation for coherent narrative videos. Only active when enable_prompt_expansion is True. Set to false for single-shot generation. + */ + multi_shots?: boolean + /** + * Negative Prompt + * + * Negative prompt to describe content to avoid. Max 500 characters. + */ + negative_prompt?: string + /** + * Enable Prompt Expansion + * + * Whether to enable prompt rewriting using LLM. Improves results for short prompts but increases processing time. + */ + enable_prompt_expansion?: boolean +} + +/** + * SeedanceProv15T2VVideoOutput + */ +export type SchemaBytedanceSeedanceV15ProTextToVideoOutput = { + /** + * Seed + * + * Seed used for generation + */ + seed: number + /** + * Video + * + * Generated video file + */ + video: SchemaFile +} + +/** + * SeedanceProv15TextToVideoInput + */ +export type SchemaBytedanceSeedanceV15ProTextToVideoInput = { + /** + * Prompt + * + * The text prompt used to generate the video + */ + prompt: string + /** + * Resolution + * + * Video resolution - 480p for faster generation, 720p for balance, 1080p for higher quality + */ + resolution?: '480p' | '720p' | '1080p' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '21:9' | '16:9' | '4:3' | '1:1' | '3:4' | '9:16' + /** + * Generate Audio + * + * Whether to generate audio for the video + */ + generate_audio?: boolean + /** + * Duration + * + * Duration of the video in seconds + */ + duration?: '4' | '5' | '6' | '7' | '8' | '9' | '10' | '11' | '12' + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Camera Fixed + * + * Whether to fix the camera position + */ + camera_fixed?: boolean + /** + * Seed + * + * Random seed to control video generation. Use -1 for random. + */ + seed?: number +} + +/** + * KandinskyT2VResponse + */ +export type SchemaKandinsky5ProTextToVideoOutput = { + /** + * Video + * + * The generated video file. + */ + video?: SchemaFile +} + +/** + * KandinskyT2VRequest + */ +export type SchemaKandinsky5ProTextToVideoInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Resolution + * + * Video resolution: 512p or 1024p. + */ + resolution?: '512P' | '1024P' + /** + * Acceleration + * + * Acceleration level for faster generation. + */ + acceleration?: 'none' | 'regular' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. One of (3:2, 1:1, 2:3). + */ + aspect_ratio?: '3:2' | '1:1' | '2:3' + /** + * Num Inference Steps + * + * The number of inference steps. + */ + num_inference_steps?: number + /** + * Duration + * + * The length of the video to generate (5s or 10s) + */ + duration?: '5s' +} + +/** + * LTX2TextToVideoOutput + */ +export type SchemaLtx219bTextToVideoOutput = { + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Seed + * + * The seed used for the random number generator. + */ + seed: number + video: SchemaVideoFile +} + +/** + * LTX2TextToVideoInput + */ +export type SchemaLtx219bTextToVideoInput = { + /** + * Use Multi-Scale + * + * Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details. + */ + use_multiscale?: boolean + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' | 'full' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * FPS + * + * The frames per second of the generated video. + */ + fps?: number + /** + * Camera LoRA + * + * The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora?: + | 'dolly_in' + | 'dolly_out' + | 'dolly_left' + | 'dolly_right' + | 'jib_up' + | 'jib_down' + | 'static' + | 'none' + /** + * Video Size + * + * The size of the generated video. + */ + video_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Guidance Scale + * + * The guidance scale to use. + */ + guidance_scale?: number + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Camera LoRA Scale + * + * The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora_scale?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to use. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number | unknown +} + +/** + * LTX2TextToVideoOutput + */ +export type SchemaLtx219bTextToVideoLoraOutput = { + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Seed + * + * The seed used for the random number generator. + */ + seed: number + video: SchemaVideoFile +} + +/** + * LTX2LoRATextToVideoInput + */ +export type SchemaLtx219bTextToVideoLoraInput = { + /** + * Use Multi-Scale + * + * Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details. + */ + use_multiscale?: boolean + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' | 'full' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * FPS + * + * The frames per second of the generated video. + */ + fps?: number + /** + * LoRAs + * + * The LoRAs to use for the generation. + */ + loras: Array + /** + * Camera LoRA + * + * The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora?: + | 'dolly_in' + | 'dolly_out' + | 'dolly_left' + | 'dolly_right' + | 'jib_up' + | 'jib_down' + | 'static' + | 'none' + /** + * Video Size + * + * The size of the generated video. + */ + video_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Guidance Scale + * + * The guidance scale to use. + */ + guidance_scale?: number + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Camera LoRA Scale + * + * The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora_scale?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to use. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number | unknown +} + +/** + * LoRAInput + * + * LoRA weight configuration. + */ +export type SchemaLoRaInput = { + /** + * Path + * + * URL, HuggingFace repo ID (owner/repo) to lora weights. + */ + path: string + /** + * Scale + * + * Scale factor for LoRA application (0.0 to 4.0). + */ + scale?: number + /** + * Weight Name + * + * Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights. + */ + weight_name?: string | unknown +} + +/** + * LTX2TextToVideoOutput + */ +export type SchemaLtx219bDistilledTextToVideoOutput = { + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Seed + * + * The seed used for the random number generator. + */ + seed: number + video: SchemaVideoFile +} + +/** + * LTX2DistilledTextToVideoInput + */ +export type SchemaLtx219bDistilledTextToVideoInput = { + /** + * Use Multi-Scale + * + * Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details. + */ + use_multiscale?: boolean + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' | 'full' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * FPS + * + * The frames per second of the generated video. + */ + fps?: number + /** + * Camera LoRA + * + * The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora?: + | 'dolly_in' + | 'dolly_out' + | 'dolly_left' + | 'dolly_right' + | 'jib_up' + | 'jib_down' + | 'static' + | 'none' + /** + * Video Size + * + * The size of the generated video. + */ + video_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Camera LoRA Scale + * + * The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora_scale?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number | unknown +} + +/** + * LTX2TextToVideoOutput + */ +export type SchemaLtx219bDistilledTextToVideoLoraOutput = { + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Seed + * + * The seed used for the random number generator. + */ + seed: number + video: SchemaVideoFile +} + +/** + * LTX2LoRADistilledTextToVideoInput + */ +export type SchemaLtx219bDistilledTextToVideoLoraInput = { + /** + * Use Multi-Scale + * + * Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details. + */ + use_multiscale?: boolean + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' | 'full' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * FPS + * + * The frames per second of the generated video. + */ + fps?: number + /** + * LoRAs + * + * The LoRAs to use for the generation. + */ + loras: Array + /** + * Camera LoRA + * + * The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora?: + | 'dolly_in' + | 'dolly_out' + | 'dolly_left' + | 'dolly_right' + | 'jib_up' + | 'jib_down' + | 'static' + | 'none' + /** + * Video Size + * + * The size of the generated video. + */ + video_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Camera LoRA Scale + * + * The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora_scale?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number | unknown +} + +/** + * VideoOutputV5_5 + */ +export type SchemaPixverseV56TextToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TextToVideoRequestV5_6 + */ +export type SchemaPixverseV56TextToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video + */ + aspect_ratio?: '16:9' | '4:3' | '1:1' | '3:4' | '9:16' + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' | '1080p' + /** + * Style + * + * The style of the generated video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Thinking Type + * + * Prompt optimization mode: 'enabled' to optimize, 'disabled' to turn off, 'auto' for model decision + */ + thinking_type?: 'enabled' | 'disabled' | 'auto' + /** + * Duration + * + * The duration of the generated video in seconds. 1080p videos are limited to 5 or 8 seconds + */ + duration?: '5' | '8' | '10' + /** + * Generate Audio Switch + * + * Enable audio generation (BGM, SFX, dialogue) + */ + generate_audio_switch?: boolean + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * TextToVideoV2MasterOutput + */ +export type SchemaKlingVideoV2MasterTextToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TextToVideoV2MasterRequest + */ +export type SchemaKlingVideoV2MasterTextToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video frame + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Negative Prompt + */ + negative_prompt?: string + /** + * Cfg Scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt. + * + */ + cfg_scale?: number +} + +/** + * Veo3TextToVideoOutput + */ +export type SchemaVeo3Output = { + /** + * Video + * + * The generated video. + */ + video: SchemaFile +} + +/** + * Veo3TextToVideoInput + */ +export type SchemaVeo3Input = { + /** + * Prompt + * + * The text prompt describing the video you want to generate + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video. + */ + aspect_ratio?: '16:9' | '9:16' + /** + * Duration + * + * The duration of the generated video. + */ + duration?: '4s' | '6s' | '8s' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * Auto Fix + * + * Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them. + */ + auto_fix?: boolean + /** + * Resolution + * + * The resolution of the generated video. + */ + resolution?: '720p' | '1080p' + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number + /** + * Negative Prompt + * + * A negative prompt to guide the video generation. + */ + negative_prompt?: string +} + +/** + * TextToVideoHailuo02Output + */ +export type SchemaMinimaxHailuo02StandardTextToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * StandardTextToVideoHailuo02Input + */ +export type SchemaMinimaxHailuo02StandardTextToVideoInput = { + /** + * Prompt Optimizer + * + * Whether to use the model's prompt optimizer + */ + prompt_optimizer?: boolean + /** + * Duration + * + * The duration of the video in seconds. 10 seconds videos are not supported for 1080p resolution. + */ + duration?: '6' | '10' + /** + * Prompt + */ + prompt: string +} + +/** + * Veo3TextToVideoOutput + */ +export type SchemaVeo3FastOutput = { + /** + * Video + * + * The generated video. + */ + video: SchemaFile +} + +/** + * Veo3TextToVideoInput + */ +export type SchemaVeo3FastInput = { + /** + * Prompt + * + * The text prompt describing the video you want to generate + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video. + */ + aspect_ratio?: '16:9' | '9:16' + /** + * Duration + * + * The duration of the generated video. + */ + duration?: '4s' | '6s' | '8s' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * Auto Fix + * + * Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them. + */ + auto_fix?: boolean + /** + * Resolution + * + * The resolution of the generated video. + */ + resolution?: '720p' | '1080p' + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number + /** + * Negative Prompt + * + * A negative prompt to guide the video generation. + */ + negative_prompt?: string +} + +/** + * TextToVideoV25ProOutput + */ +export type SchemaKlingVideoV25TurboProTextToVideoOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * TextToVideoV25ProRequest + */ +export type SchemaKlingVideoV25TurboProTextToVideoInput = { + /** + * Prompt + */ + prompt: string + /** + * Duration + * + * The duration of the generated video in seconds + */ + duration?: '5' | '10' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video frame + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' + /** + * Negative Prompt + */ + negative_prompt?: string + /** + * Cfg Scale + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt. + * + */ + cfg_scale?: number +} + +export type SchemaQueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetFalAiKlingVideoV25TurboProTextToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v2.5-turbo/pro/text-to-video/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoV25TurboProTextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV25TurboProTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV25TurboProTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV25TurboProTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV25TurboProTextToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2.5-turbo/pro/text-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoV25TurboProTextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV25TurboProTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV25TurboProTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV25TurboProTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV25TurboProTextToVideoData = { + body: SchemaKlingVideoV25TurboProTextToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/v2.5-turbo/pro/text-to-video' +} + +export type PostFalAiKlingVideoV25TurboProTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV25TurboProTextToVideoResponse = + PostFalAiKlingVideoV25TurboProTextToVideoResponses[keyof PostFalAiKlingVideoV25TurboProTextToVideoResponses] + +export type GetFalAiKlingVideoV25TurboProTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2.5-turbo/pro/text-to-video/requests/{request_id}' +} + +export type GetFalAiKlingVideoV25TurboProTextToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV25TurboProTextToVideoOutput + } + +export type GetFalAiKlingVideoV25TurboProTextToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoV25TurboProTextToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV25TurboProTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiVeo3FastRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/veo3/fast/requests/{request_id}/status' +} + +export type GetFalAiVeo3FastRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiVeo3FastRequestsByRequestIdStatusResponse = + GetFalAiVeo3FastRequestsByRequestIdStatusResponses[keyof GetFalAiVeo3FastRequestsByRequestIdStatusResponses] + +export type PutFalAiVeo3FastRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3/fast/requests/{request_id}/cancel' +} + +export type PutFalAiVeo3FastRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiVeo3FastRequestsByRequestIdCancelResponse = + PutFalAiVeo3FastRequestsByRequestIdCancelResponses[keyof PutFalAiVeo3FastRequestsByRequestIdCancelResponses] + +export type PostFalAiVeo3FastData = { + body: SchemaVeo3FastInput + path?: never + query?: never + url: '/fal-ai/veo3/fast' +} + +export type PostFalAiVeo3FastResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiVeo3FastResponse = + PostFalAiVeo3FastResponses[keyof PostFalAiVeo3FastResponses] + +export type GetFalAiVeo3FastRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3/fast/requests/{request_id}' +} + +export type GetFalAiVeo3FastRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVeo3FastOutput +} + +export type GetFalAiVeo3FastRequestsByRequestIdResponse = + GetFalAiVeo3FastRequestsByRequestIdResponses[keyof GetFalAiVeo3FastRequestsByRequestIdResponses] + +export type GetFalAiMinimaxHailuo02StandardTextToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/hailuo-02/standard/text-to-video/requests/{request_id}/status' + } + +export type GetFalAiMinimaxHailuo02StandardTextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMinimaxHailuo02StandardTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiMinimaxHailuo02StandardTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxHailuo02StandardTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxHailuo02StandardTextToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/hailuo-02/standard/text-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiMinimaxHailuo02StandardTextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMinimaxHailuo02StandardTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiMinimaxHailuo02StandardTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxHailuo02StandardTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxHailuo02StandardTextToVideoData = { + body: SchemaMinimaxHailuo02StandardTextToVideoInput + path?: never + query?: never + url: '/fal-ai/minimax/hailuo-02/standard/text-to-video' +} + +export type PostFalAiMinimaxHailuo02StandardTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxHailuo02StandardTextToVideoResponse = + PostFalAiMinimaxHailuo02StandardTextToVideoResponses[keyof PostFalAiMinimaxHailuo02StandardTextToVideoResponses] + +export type GetFalAiMinimaxHailuo02StandardTextToVideoRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/hailuo-02/standard/text-to-video/requests/{request_id}' + } + +export type GetFalAiMinimaxHailuo02StandardTextToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaMinimaxHailuo02StandardTextToVideoOutput + } + +export type GetFalAiMinimaxHailuo02StandardTextToVideoRequestsByRequestIdResponse = + GetFalAiMinimaxHailuo02StandardTextToVideoRequestsByRequestIdResponses[keyof GetFalAiMinimaxHailuo02StandardTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiVeo3RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/veo3/requests/{request_id}/status' +} + +export type GetFalAiVeo3RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiVeo3RequestsByRequestIdStatusResponse = + GetFalAiVeo3RequestsByRequestIdStatusResponses[keyof GetFalAiVeo3RequestsByRequestIdStatusResponses] + +export type PutFalAiVeo3RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3/requests/{request_id}/cancel' +} + +export type PutFalAiVeo3RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiVeo3RequestsByRequestIdCancelResponse = + PutFalAiVeo3RequestsByRequestIdCancelResponses[keyof PutFalAiVeo3RequestsByRequestIdCancelResponses] + +export type PostFalAiVeo3Data = { + body: SchemaVeo3Input + path?: never + query?: never + url: '/fal-ai/veo3' +} + +export type PostFalAiVeo3Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiVeo3Response = + PostFalAiVeo3Responses[keyof PostFalAiVeo3Responses] + +export type GetFalAiVeo3RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3/requests/{request_id}' +} + +export type GetFalAiVeo3RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVeo3Output +} + +export type GetFalAiVeo3RequestsByRequestIdResponse = + GetFalAiVeo3RequestsByRequestIdResponses[keyof GetFalAiVeo3RequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV2MasterTextToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v2/master/text-to-video/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoV2MasterTextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV2MasterTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV2MasterTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV2MasterTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV2MasterTextToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2/master/text-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoV2MasterTextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV2MasterTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV2MasterTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV2MasterTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV2MasterTextToVideoData = { + body: SchemaKlingVideoV2MasterTextToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/v2/master/text-to-video' +} + +export type PostFalAiKlingVideoV2MasterTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV2MasterTextToVideoResponse = + PostFalAiKlingVideoV2MasterTextToVideoResponses[keyof PostFalAiKlingVideoV2MasterTextToVideoResponses] + +export type GetFalAiKlingVideoV2MasterTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2/master/text-to-video/requests/{request_id}' +} + +export type GetFalAiKlingVideoV2MasterTextToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV2MasterTextToVideoOutput + } + +export type GetFalAiKlingVideoV2MasterTextToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoV2MasterTextToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV2MasterTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiPixverseV56TextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v5.6/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiPixverseV56TextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixverseV56TextToVideoRequestsByRequestIdStatusResponse = + GetFalAiPixverseV56TextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV56TextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV56TextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v5.6/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV56TextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixverseV56TextToVideoRequestsByRequestIdCancelResponse = + PutFalAiPixverseV56TextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV56TextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV56TextToVideoData = { + body: SchemaPixverseV56TextToVideoInput + path?: never + query?: never + url: '/fal-ai/pixverse/v5.6/text-to-video' +} + +export type PostFalAiPixverseV56TextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV56TextToVideoResponse = + PostFalAiPixverseV56TextToVideoResponses[keyof PostFalAiPixverseV56TextToVideoResponses] + +export type GetFalAiPixverseV56TextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v5.6/text-to-video/requests/{request_id}' +} + +export type GetFalAiPixverseV56TextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV56TextToVideoOutput +} + +export type GetFalAiPixverseV56TextToVideoRequestsByRequestIdResponse = + GetFalAiPixverseV56TextToVideoRequestsByRequestIdResponses[keyof GetFalAiPixverseV56TextToVideoRequestsByRequestIdResponses] + +export type GetFalAiLtx219bDistilledTextToVideoLoraRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2-19b/distilled/text-to-video/lora/requests/{request_id}/status' + } + +export type GetFalAiLtx219bDistilledTextToVideoLoraRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtx219bDistilledTextToVideoLoraRequestsByRequestIdStatusResponse = + GetFalAiLtx219bDistilledTextToVideoLoraRequestsByRequestIdStatusResponses[keyof GetFalAiLtx219bDistilledTextToVideoLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx219bDistilledTextToVideoLoraRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/distilled/text-to-video/lora/requests/{request_id}/cancel' + } + +export type PutFalAiLtx219bDistilledTextToVideoLoraRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtx219bDistilledTextToVideoLoraRequestsByRequestIdCancelResponse = + PutFalAiLtx219bDistilledTextToVideoLoraRequestsByRequestIdCancelResponses[keyof PutFalAiLtx219bDistilledTextToVideoLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx219bDistilledTextToVideoLoraData = { + body: SchemaLtx219bDistilledTextToVideoLoraInput + path?: never + query?: never + url: '/fal-ai/ltx-2-19b/distilled/text-to-video/lora' +} + +export type PostFalAiLtx219bDistilledTextToVideoLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx219bDistilledTextToVideoLoraResponse = + PostFalAiLtx219bDistilledTextToVideoLoraResponses[keyof PostFalAiLtx219bDistilledTextToVideoLoraResponses] + +export type GetFalAiLtx219bDistilledTextToVideoLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/distilled/text-to-video/lora/requests/{request_id}' +} + +export type GetFalAiLtx219bDistilledTextToVideoLoraRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaLtx219bDistilledTextToVideoLoraOutput + } + +export type GetFalAiLtx219bDistilledTextToVideoLoraRequestsByRequestIdResponse = + GetFalAiLtx219bDistilledTextToVideoLoraRequestsByRequestIdResponses[keyof GetFalAiLtx219bDistilledTextToVideoLoraRequestsByRequestIdResponses] + +export type GetFalAiLtx219bDistilledTextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2-19b/distilled/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiLtx219bDistilledTextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtx219bDistilledTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiLtx219bDistilledTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLtx219bDistilledTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx219bDistilledTextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/distilled/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiLtx219bDistilledTextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtx219bDistilledTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiLtx219bDistilledTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLtx219bDistilledTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx219bDistilledTextToVideoData = { + body: SchemaLtx219bDistilledTextToVideoInput + path?: never + query?: never + url: '/fal-ai/ltx-2-19b/distilled/text-to-video' +} + +export type PostFalAiLtx219bDistilledTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx219bDistilledTextToVideoResponse = + PostFalAiLtx219bDistilledTextToVideoResponses[keyof PostFalAiLtx219bDistilledTextToVideoResponses] + +export type GetFalAiLtx219bDistilledTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/distilled/text-to-video/requests/{request_id}' +} + +export type GetFalAiLtx219bDistilledTextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtx219bDistilledTextToVideoOutput +} + +export type GetFalAiLtx219bDistilledTextToVideoRequestsByRequestIdResponse = + GetFalAiLtx219bDistilledTextToVideoRequestsByRequestIdResponses[keyof GetFalAiLtx219bDistilledTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiLtx219bTextToVideoLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2-19b/text-to-video/lora/requests/{request_id}/status' +} + +export type GetFalAiLtx219bTextToVideoLoraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLtx219bTextToVideoLoraRequestsByRequestIdStatusResponse = + GetFalAiLtx219bTextToVideoLoraRequestsByRequestIdStatusResponses[keyof GetFalAiLtx219bTextToVideoLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx219bTextToVideoLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/text-to-video/lora/requests/{request_id}/cancel' +} + +export type PutFalAiLtx219bTextToVideoLoraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLtx219bTextToVideoLoraRequestsByRequestIdCancelResponse = + PutFalAiLtx219bTextToVideoLoraRequestsByRequestIdCancelResponses[keyof PutFalAiLtx219bTextToVideoLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx219bTextToVideoLoraData = { + body: SchemaLtx219bTextToVideoLoraInput + path?: never + query?: never + url: '/fal-ai/ltx-2-19b/text-to-video/lora' +} + +export type PostFalAiLtx219bTextToVideoLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx219bTextToVideoLoraResponse = + PostFalAiLtx219bTextToVideoLoraResponses[keyof PostFalAiLtx219bTextToVideoLoraResponses] + +export type GetFalAiLtx219bTextToVideoLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/text-to-video/lora/requests/{request_id}' +} + +export type GetFalAiLtx219bTextToVideoLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtx219bTextToVideoLoraOutput +} + +export type GetFalAiLtx219bTextToVideoLoraRequestsByRequestIdResponse = + GetFalAiLtx219bTextToVideoLoraRequestsByRequestIdResponses[keyof GetFalAiLtx219bTextToVideoLoraRequestsByRequestIdResponses] + +export type GetFalAiLtx219bTextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2-19b/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiLtx219bTextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLtx219bTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiLtx219bTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLtx219bTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx219bTextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiLtx219bTextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLtx219bTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiLtx219bTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLtx219bTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx219bTextToVideoData = { + body: SchemaLtx219bTextToVideoInput + path?: never + query?: never + url: '/fal-ai/ltx-2-19b/text-to-video' +} + +export type PostFalAiLtx219bTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx219bTextToVideoResponse = + PostFalAiLtx219bTextToVideoResponses[keyof PostFalAiLtx219bTextToVideoResponses] + +export type GetFalAiLtx219bTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/text-to-video/requests/{request_id}' +} + +export type GetFalAiLtx219bTextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtx219bTextToVideoOutput +} + +export type GetFalAiLtx219bTextToVideoRequestsByRequestIdResponse = + GetFalAiLtx219bTextToVideoRequestsByRequestIdResponses[keyof GetFalAiLtx219bTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiKandinsky5ProTextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kandinsky5-pro/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiKandinsky5ProTextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKandinsky5ProTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiKandinsky5ProTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKandinsky5ProTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKandinsky5ProTextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kandinsky5-pro/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiKandinsky5ProTextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKandinsky5ProTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiKandinsky5ProTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKandinsky5ProTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKandinsky5ProTextToVideoData = { + body: SchemaKandinsky5ProTextToVideoInput + path?: never + query?: never + url: '/fal-ai/kandinsky5-pro/text-to-video' +} + +export type PostFalAiKandinsky5ProTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKandinsky5ProTextToVideoResponse = + PostFalAiKandinsky5ProTextToVideoResponses[keyof PostFalAiKandinsky5ProTextToVideoResponses] + +export type GetFalAiKandinsky5ProTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kandinsky5-pro/text-to-video/requests/{request_id}' +} + +export type GetFalAiKandinsky5ProTextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKandinsky5ProTextToVideoOutput +} + +export type GetFalAiKandinsky5ProTextToVideoRequestsByRequestIdResponse = + GetFalAiKandinsky5ProTextToVideoRequestsByRequestIdResponses[keyof GetFalAiKandinsky5ProTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiBytedanceSeedanceV15ProTextToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bytedance/seedance/v1.5/pro/text-to-video/requests/{request_id}/status' + } + +export type GetFalAiBytedanceSeedanceV15ProTextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiBytedanceSeedanceV15ProTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiBytedanceSeedanceV15ProTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiBytedanceSeedanceV15ProTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiBytedanceSeedanceV15ProTextToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedance/v1.5/pro/text-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiBytedanceSeedanceV15ProTextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiBytedanceSeedanceV15ProTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiBytedanceSeedanceV15ProTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiBytedanceSeedanceV15ProTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiBytedanceSeedanceV15ProTextToVideoData = { + body: SchemaBytedanceSeedanceV15ProTextToVideoInput + path?: never + query?: never + url: '/fal-ai/bytedance/seedance/v1.5/pro/text-to-video' +} + +export type PostFalAiBytedanceSeedanceV15ProTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBytedanceSeedanceV15ProTextToVideoResponse = + PostFalAiBytedanceSeedanceV15ProTextToVideoResponses[keyof PostFalAiBytedanceSeedanceV15ProTextToVideoResponses] + +export type GetFalAiBytedanceSeedanceV15ProTextToVideoRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedance/v1.5/pro/text-to-video/requests/{request_id}' + } + +export type GetFalAiBytedanceSeedanceV15ProTextToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaBytedanceSeedanceV15ProTextToVideoOutput + } + +export type GetFalAiBytedanceSeedanceV15ProTextToVideoRequestsByRequestIdResponse = + GetFalAiBytedanceSeedanceV15ProTextToVideoRequestsByRequestIdResponses[keyof GetFalAiBytedanceSeedanceV15ProTextToVideoRequestsByRequestIdResponses] + +export type GetWanV26TextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/wan/v2.6/text-to-video/requests/{request_id}/status' +} + +export type GetWanV26TextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetWanV26TextToVideoRequestsByRequestIdStatusResponse = + GetWanV26TextToVideoRequestsByRequestIdStatusResponses[keyof GetWanV26TextToVideoRequestsByRequestIdStatusResponses] + +export type PutWanV26TextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/wan/v2.6/text-to-video/requests/{request_id}/cancel' +} + +export type PutWanV26TextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutWanV26TextToVideoRequestsByRequestIdCancelResponse = + PutWanV26TextToVideoRequestsByRequestIdCancelResponses[keyof PutWanV26TextToVideoRequestsByRequestIdCancelResponses] + +export type PostWanV26TextToVideoData = { + body: SchemaV26TextToVideoInput + path?: never + query?: never + url: '/wan/v2.6/text-to-video' +} + +export type PostWanV26TextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostWanV26TextToVideoResponse = + PostWanV26TextToVideoResponses[keyof PostWanV26TextToVideoResponses] + +export type GetWanV26TextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/wan/v2.6/text-to-video/requests/{request_id}' +} + +export type GetWanV26TextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaV26TextToVideoOutput +} + +export type GetWanV26TextToVideoRequestsByRequestIdResponse = + GetWanV26TextToVideoRequestsByRequestIdResponses[keyof GetWanV26TextToVideoRequestsByRequestIdResponses] + +export type GetVeedFabric10TextRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/veed/fabric-1.0/text/requests/{request_id}/status' +} + +export type GetVeedFabric10TextRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetVeedFabric10TextRequestsByRequestIdStatusResponse = + GetVeedFabric10TextRequestsByRequestIdStatusResponses[keyof GetVeedFabric10TextRequestsByRequestIdStatusResponses] + +export type PutVeedFabric10TextRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/veed/fabric-1.0/text/requests/{request_id}/cancel' +} + +export type PutVeedFabric10TextRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutVeedFabric10TextRequestsByRequestIdCancelResponse = + PutVeedFabric10TextRequestsByRequestIdCancelResponses[keyof PutVeedFabric10TextRequestsByRequestIdCancelResponses] + +export type PostVeedFabric10TextData = { + body: SchemaFabric10TextInput + path?: never + query?: never + url: '/veed/fabric-1.0/text' +} + +export type PostVeedFabric10TextResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostVeedFabric10TextResponse = + PostVeedFabric10TextResponses[keyof PostVeedFabric10TextResponses] + +export type GetVeedFabric10TextRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/veed/fabric-1.0/text/requests/{request_id}' +} + +export type GetVeedFabric10TextRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFabric10TextOutput +} + +export type GetVeedFabric10TextRequestsByRequestIdResponse = + GetVeedFabric10TextRequestsByRequestIdResponses[keyof GetVeedFabric10TextRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV26ProTextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v2.6/pro/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiKlingVideoV26ProTextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV26ProTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV26ProTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV26ProTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV26ProTextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2.6/pro/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiKlingVideoV26ProTextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV26ProTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV26ProTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV26ProTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV26ProTextToVideoData = { + body: SchemaKlingVideoV26ProTextToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/v2.6/pro/text-to-video' +} + +export type PostFalAiKlingVideoV26ProTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV26ProTextToVideoResponse = + PostFalAiKlingVideoV26ProTextToVideoResponses[keyof PostFalAiKlingVideoV26ProTextToVideoResponses] + +export type GetFalAiKlingVideoV26ProTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2.6/pro/text-to-video/requests/{request_id}' +} + +export type GetFalAiKlingVideoV26ProTextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV26ProTextToVideoOutput +} + +export type GetFalAiKlingVideoV26ProTextToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoV26ProTextToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV26ProTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiPixverseV55TextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v5.5/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiPixverseV55TextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixverseV55TextToVideoRequestsByRequestIdStatusResponse = + GetFalAiPixverseV55TextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV55TextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV55TextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v5.5/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV55TextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixverseV55TextToVideoRequestsByRequestIdCancelResponse = + PutFalAiPixverseV55TextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV55TextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV55TextToVideoData = { + body: SchemaPixverseV55TextToVideoInput + path?: never + query?: never + url: '/fal-ai/pixverse/v5.5/text-to-video' +} + +export type PostFalAiPixverseV55TextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV55TextToVideoResponse = + PostFalAiPixverseV55TextToVideoResponses[keyof PostFalAiPixverseV55TextToVideoResponses] + +export type GetFalAiPixverseV55TextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v5.5/text-to-video/requests/{request_id}' +} + +export type GetFalAiPixverseV55TextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV55TextToVideoOutput +} + +export type GetFalAiPixverseV55TextToVideoRequestsByRequestIdResponse = + GetFalAiPixverseV55TextToVideoRequestsByRequestIdResponses[keyof GetFalAiPixverseV55TextToVideoRequestsByRequestIdResponses] + +export type GetFalAiLtx2TextToVideoFastRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2/text-to-video/fast/requests/{request_id}/status' +} + +export type GetFalAiLtx2TextToVideoFastRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLtx2TextToVideoFastRequestsByRequestIdStatusResponse = + GetFalAiLtx2TextToVideoFastRequestsByRequestIdStatusResponses[keyof GetFalAiLtx2TextToVideoFastRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx2TextToVideoFastRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2/text-to-video/fast/requests/{request_id}/cancel' +} + +export type PutFalAiLtx2TextToVideoFastRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLtx2TextToVideoFastRequestsByRequestIdCancelResponse = + PutFalAiLtx2TextToVideoFastRequestsByRequestIdCancelResponses[keyof PutFalAiLtx2TextToVideoFastRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx2TextToVideoFastData = { + body: SchemaLtx2TextToVideoFastInput + path?: never + query?: never + url: '/fal-ai/ltx-2/text-to-video/fast' +} + +export type PostFalAiLtx2TextToVideoFastResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx2TextToVideoFastResponse = + PostFalAiLtx2TextToVideoFastResponses[keyof PostFalAiLtx2TextToVideoFastResponses] + +export type GetFalAiLtx2TextToVideoFastRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2/text-to-video/fast/requests/{request_id}' +} + +export type GetFalAiLtx2TextToVideoFastRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtx2TextToVideoFastOutput +} + +export type GetFalAiLtx2TextToVideoFastRequestsByRequestIdResponse = + GetFalAiLtx2TextToVideoFastRequestsByRequestIdResponses[keyof GetFalAiLtx2TextToVideoFastRequestsByRequestIdResponses] + +export type GetFalAiLtx2TextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiLtx2TextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLtx2TextToVideoRequestsByRequestIdStatusResponse = + GetFalAiLtx2TextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLtx2TextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx2TextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiLtx2TextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLtx2TextToVideoRequestsByRequestIdCancelResponse = + PutFalAiLtx2TextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLtx2TextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx2TextToVideoData = { + body: SchemaLtx2TextToVideoInput + path?: never + query?: never + url: '/fal-ai/ltx-2/text-to-video' +} + +export type PostFalAiLtx2TextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx2TextToVideoResponse = + PostFalAiLtx2TextToVideoResponses[keyof PostFalAiLtx2TextToVideoResponses] + +export type GetFalAiLtx2TextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2/text-to-video/requests/{request_id}' +} + +export type GetFalAiLtx2TextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtx2TextToVideoOutput +} + +export type GetFalAiLtx2TextToVideoRequestsByRequestIdResponse = + GetFalAiLtx2TextToVideoRequestsByRequestIdResponses[keyof GetFalAiLtx2TextToVideoRequestsByRequestIdResponses] + +export type GetFalAiHunyuanVideoV15TextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan-video-v1.5/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiHunyuanVideoV15TextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiHunyuanVideoV15TextToVideoRequestsByRequestIdStatusResponse = + GetFalAiHunyuanVideoV15TextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuanVideoV15TextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuanVideoV15TextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-video-v1.5/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuanVideoV15TextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiHunyuanVideoV15TextToVideoRequestsByRequestIdCancelResponse = + PutFalAiHunyuanVideoV15TextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuanVideoV15TextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuanVideoV15TextToVideoData = { + body: SchemaHunyuanVideoV15TextToVideoInput + path?: never + query?: never + url: '/fal-ai/hunyuan-video-v1.5/text-to-video' +} + +export type PostFalAiHunyuanVideoV15TextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuanVideoV15TextToVideoResponse = + PostFalAiHunyuanVideoV15TextToVideoResponses[keyof PostFalAiHunyuanVideoV15TextToVideoResponses] + +export type GetFalAiHunyuanVideoV15TextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-video-v1.5/text-to-video/requests/{request_id}' +} + +export type GetFalAiHunyuanVideoV15TextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuanVideoV15TextToVideoOutput +} + +export type GetFalAiHunyuanVideoV15TextToVideoRequestsByRequestIdResponse = + GetFalAiHunyuanVideoV15TextToVideoRequestsByRequestIdResponses[keyof GetFalAiHunyuanVideoV15TextToVideoRequestsByRequestIdResponses] + +export type GetFalAiInfinityStarTextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/infinity-star/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiInfinityStarTextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiInfinityStarTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiInfinityStarTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiInfinityStarTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiInfinityStarTextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/infinity-star/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiInfinityStarTextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiInfinityStarTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiInfinityStarTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiInfinityStarTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiInfinityStarTextToVideoData = { + body: SchemaInfinityStarTextToVideoInput + path?: never + query?: never + url: '/fal-ai/infinity-star/text-to-video' +} + +export type PostFalAiInfinityStarTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiInfinityStarTextToVideoResponse = + PostFalAiInfinityStarTextToVideoResponses[keyof PostFalAiInfinityStarTextToVideoResponses] + +export type GetFalAiInfinityStarTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/infinity-star/text-to-video/requests/{request_id}' +} + +export type GetFalAiInfinityStarTextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaInfinityStarTextToVideoOutput +} + +export type GetFalAiInfinityStarTextToVideoRequestsByRequestIdResponse = + GetFalAiInfinityStarTextToVideoRequestsByRequestIdResponses[keyof GetFalAiInfinityStarTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiSanaVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sana-video/requests/{request_id}/status' +} + +export type GetFalAiSanaVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSanaVideoRequestsByRequestIdStatusResponse = + GetFalAiSanaVideoRequestsByRequestIdStatusResponses[keyof GetFalAiSanaVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiSanaVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sana-video/requests/{request_id}/cancel' +} + +export type PutFalAiSanaVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSanaVideoRequestsByRequestIdCancelResponse = + PutFalAiSanaVideoRequestsByRequestIdCancelResponses[keyof PutFalAiSanaVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiSanaVideoData = { + body: SchemaSanaVideoInput + path?: never + query?: never + url: '/fal-ai/sana-video' +} + +export type PostFalAiSanaVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSanaVideoResponse = + PostFalAiSanaVideoResponses[keyof PostFalAiSanaVideoResponses] + +export type GetFalAiSanaVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sana-video/requests/{request_id}' +} + +export type GetFalAiSanaVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSanaVideoOutput +} + +export type GetFalAiSanaVideoRequestsByRequestIdResponse = + GetFalAiSanaVideoRequestsByRequestIdResponses[keyof GetFalAiSanaVideoRequestsByRequestIdResponses] + +export type GetFalAiLongcatVideoTextToVideo720pRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/longcat-video/text-to-video/720p/requests/{request_id}/status' +} + +export type GetFalAiLongcatVideoTextToVideo720pRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLongcatVideoTextToVideo720pRequestsByRequestIdStatusResponse = + GetFalAiLongcatVideoTextToVideo720pRequestsByRequestIdStatusResponses[keyof GetFalAiLongcatVideoTextToVideo720pRequestsByRequestIdStatusResponses] + +export type PutFalAiLongcatVideoTextToVideo720pRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-video/text-to-video/720p/requests/{request_id}/cancel' +} + +export type PutFalAiLongcatVideoTextToVideo720pRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLongcatVideoTextToVideo720pRequestsByRequestIdCancelResponse = + PutFalAiLongcatVideoTextToVideo720pRequestsByRequestIdCancelResponses[keyof PutFalAiLongcatVideoTextToVideo720pRequestsByRequestIdCancelResponses] + +export type PostFalAiLongcatVideoTextToVideo720pData = { + body: SchemaLongcatVideoTextToVideo720pInput + path?: never + query?: never + url: '/fal-ai/longcat-video/text-to-video/720p' +} + +export type PostFalAiLongcatVideoTextToVideo720pResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLongcatVideoTextToVideo720pResponse = + PostFalAiLongcatVideoTextToVideo720pResponses[keyof PostFalAiLongcatVideoTextToVideo720pResponses] + +export type GetFalAiLongcatVideoTextToVideo720pRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-video/text-to-video/720p/requests/{request_id}' +} + +export type GetFalAiLongcatVideoTextToVideo720pRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLongcatVideoTextToVideo720pOutput +} + +export type GetFalAiLongcatVideoTextToVideo720pRequestsByRequestIdResponse = + GetFalAiLongcatVideoTextToVideo720pRequestsByRequestIdResponses[keyof GetFalAiLongcatVideoTextToVideo720pRequestsByRequestIdResponses] + +export type GetFalAiLongcatVideoTextToVideo480pRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/longcat-video/text-to-video/480p/requests/{request_id}/status' +} + +export type GetFalAiLongcatVideoTextToVideo480pRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLongcatVideoTextToVideo480pRequestsByRequestIdStatusResponse = + GetFalAiLongcatVideoTextToVideo480pRequestsByRequestIdStatusResponses[keyof GetFalAiLongcatVideoTextToVideo480pRequestsByRequestIdStatusResponses] + +export type PutFalAiLongcatVideoTextToVideo480pRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-video/text-to-video/480p/requests/{request_id}/cancel' +} + +export type PutFalAiLongcatVideoTextToVideo480pRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLongcatVideoTextToVideo480pRequestsByRequestIdCancelResponse = + PutFalAiLongcatVideoTextToVideo480pRequestsByRequestIdCancelResponses[keyof PutFalAiLongcatVideoTextToVideo480pRequestsByRequestIdCancelResponses] + +export type PostFalAiLongcatVideoTextToVideo480pData = { + body: SchemaLongcatVideoTextToVideo480pInput + path?: never + query?: never + url: '/fal-ai/longcat-video/text-to-video/480p' +} + +export type PostFalAiLongcatVideoTextToVideo480pResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLongcatVideoTextToVideo480pResponse = + PostFalAiLongcatVideoTextToVideo480pResponses[keyof PostFalAiLongcatVideoTextToVideo480pResponses] + +export type GetFalAiLongcatVideoTextToVideo480pRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-video/text-to-video/480p/requests/{request_id}' +} + +export type GetFalAiLongcatVideoTextToVideo480pRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLongcatVideoTextToVideo480pOutput +} + +export type GetFalAiLongcatVideoTextToVideo480pRequestsByRequestIdResponse = + GetFalAiLongcatVideoTextToVideo480pRequestsByRequestIdResponses[keyof GetFalAiLongcatVideoTextToVideo480pRequestsByRequestIdResponses] + +export type GetFalAiLongcatVideoDistilledTextToVideo720pRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/longcat-video/distilled/text-to-video/720p/requests/{request_id}/status' + } + +export type GetFalAiLongcatVideoDistilledTextToVideo720pRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLongcatVideoDistilledTextToVideo720pRequestsByRequestIdStatusResponse = + GetFalAiLongcatVideoDistilledTextToVideo720pRequestsByRequestIdStatusResponses[keyof GetFalAiLongcatVideoDistilledTextToVideo720pRequestsByRequestIdStatusResponses] + +export type PutFalAiLongcatVideoDistilledTextToVideo720pRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-video/distilled/text-to-video/720p/requests/{request_id}/cancel' + } + +export type PutFalAiLongcatVideoDistilledTextToVideo720pRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLongcatVideoDistilledTextToVideo720pRequestsByRequestIdCancelResponse = + PutFalAiLongcatVideoDistilledTextToVideo720pRequestsByRequestIdCancelResponses[keyof PutFalAiLongcatVideoDistilledTextToVideo720pRequestsByRequestIdCancelResponses] + +export type PostFalAiLongcatVideoDistilledTextToVideo720pData = { + body: SchemaLongcatVideoDistilledTextToVideo720pInput + path?: never + query?: never + url: '/fal-ai/longcat-video/distilled/text-to-video/720p' +} + +export type PostFalAiLongcatVideoDistilledTextToVideo720pResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLongcatVideoDistilledTextToVideo720pResponse = + PostFalAiLongcatVideoDistilledTextToVideo720pResponses[keyof PostFalAiLongcatVideoDistilledTextToVideo720pResponses] + +export type GetFalAiLongcatVideoDistilledTextToVideo720pRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-video/distilled/text-to-video/720p/requests/{request_id}' + } + +export type GetFalAiLongcatVideoDistilledTextToVideo720pRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaLongcatVideoDistilledTextToVideo720pOutput + } + +export type GetFalAiLongcatVideoDistilledTextToVideo720pRequestsByRequestIdResponse = + GetFalAiLongcatVideoDistilledTextToVideo720pRequestsByRequestIdResponses[keyof GetFalAiLongcatVideoDistilledTextToVideo720pRequestsByRequestIdResponses] + +export type GetFalAiLongcatVideoDistilledTextToVideo480pRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/longcat-video/distilled/text-to-video/480p/requests/{request_id}/status' + } + +export type GetFalAiLongcatVideoDistilledTextToVideo480pRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLongcatVideoDistilledTextToVideo480pRequestsByRequestIdStatusResponse = + GetFalAiLongcatVideoDistilledTextToVideo480pRequestsByRequestIdStatusResponses[keyof GetFalAiLongcatVideoDistilledTextToVideo480pRequestsByRequestIdStatusResponses] + +export type PutFalAiLongcatVideoDistilledTextToVideo480pRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-video/distilled/text-to-video/480p/requests/{request_id}/cancel' + } + +export type PutFalAiLongcatVideoDistilledTextToVideo480pRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLongcatVideoDistilledTextToVideo480pRequestsByRequestIdCancelResponse = + PutFalAiLongcatVideoDistilledTextToVideo480pRequestsByRequestIdCancelResponses[keyof PutFalAiLongcatVideoDistilledTextToVideo480pRequestsByRequestIdCancelResponses] + +export type PostFalAiLongcatVideoDistilledTextToVideo480pData = { + body: SchemaLongcatVideoDistilledTextToVideo480pInput + path?: never + query?: never + url: '/fal-ai/longcat-video/distilled/text-to-video/480p' +} + +export type PostFalAiLongcatVideoDistilledTextToVideo480pResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLongcatVideoDistilledTextToVideo480pResponse = + PostFalAiLongcatVideoDistilledTextToVideo480pResponses[keyof PostFalAiLongcatVideoDistilledTextToVideo480pResponses] + +export type GetFalAiLongcatVideoDistilledTextToVideo480pRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/longcat-video/distilled/text-to-video/480p/requests/{request_id}' + } + +export type GetFalAiLongcatVideoDistilledTextToVideo480pRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaLongcatVideoDistilledTextToVideo480pOutput + } + +export type GetFalAiLongcatVideoDistilledTextToVideo480pRequestsByRequestIdResponse = + GetFalAiLongcatVideoDistilledTextToVideo480pRequestsByRequestIdResponses[keyof GetFalAiLongcatVideoDistilledTextToVideo480pRequestsByRequestIdResponses] + +export type GetFalAiMinimaxHailuo23StandardTextToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/hailuo-2.3/standard/text-to-video/requests/{request_id}/status' + } + +export type GetFalAiMinimaxHailuo23StandardTextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMinimaxHailuo23StandardTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiMinimaxHailuo23StandardTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxHailuo23StandardTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxHailuo23StandardTextToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/hailuo-2.3/standard/text-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiMinimaxHailuo23StandardTextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMinimaxHailuo23StandardTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiMinimaxHailuo23StandardTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxHailuo23StandardTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxHailuo23StandardTextToVideoData = { + body: SchemaMinimaxHailuo23StandardTextToVideoInput + path?: never + query?: never + url: '/fal-ai/minimax/hailuo-2.3/standard/text-to-video' +} + +export type PostFalAiMinimaxHailuo23StandardTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxHailuo23StandardTextToVideoResponse = + PostFalAiMinimaxHailuo23StandardTextToVideoResponses[keyof PostFalAiMinimaxHailuo23StandardTextToVideoResponses] + +export type GetFalAiMinimaxHailuo23StandardTextToVideoRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/hailuo-2.3/standard/text-to-video/requests/{request_id}' + } + +export type GetFalAiMinimaxHailuo23StandardTextToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaMinimaxHailuo23StandardTextToVideoOutput + } + +export type GetFalAiMinimaxHailuo23StandardTextToVideoRequestsByRequestIdResponse = + GetFalAiMinimaxHailuo23StandardTextToVideoRequestsByRequestIdResponses[keyof GetFalAiMinimaxHailuo23StandardTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiMinimaxHailuo23ProTextToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/hailuo-2.3/pro/text-to-video/requests/{request_id}/status' + } + +export type GetFalAiMinimaxHailuo23ProTextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMinimaxHailuo23ProTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiMinimaxHailuo23ProTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxHailuo23ProTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxHailuo23ProTextToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/hailuo-2.3/pro/text-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiMinimaxHailuo23ProTextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMinimaxHailuo23ProTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiMinimaxHailuo23ProTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxHailuo23ProTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxHailuo23ProTextToVideoData = { + body: SchemaMinimaxHailuo23ProTextToVideoInput + path?: never + query?: never + url: '/fal-ai/minimax/hailuo-2.3/pro/text-to-video' +} + +export type PostFalAiMinimaxHailuo23ProTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxHailuo23ProTextToVideoResponse = + PostFalAiMinimaxHailuo23ProTextToVideoResponses[keyof PostFalAiMinimaxHailuo23ProTextToVideoResponses] + +export type GetFalAiMinimaxHailuo23ProTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/hailuo-2.3/pro/text-to-video/requests/{request_id}' +} + +export type GetFalAiMinimaxHailuo23ProTextToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaMinimaxHailuo23ProTextToVideoOutput + } + +export type GetFalAiMinimaxHailuo23ProTextToVideoRequestsByRequestIdResponse = + GetFalAiMinimaxHailuo23ProTextToVideoRequestsByRequestIdResponses[keyof GetFalAiMinimaxHailuo23ProTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiBytedanceSeedanceV1ProFastTextToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bytedance/seedance/v1/pro/fast/text-to-video/requests/{request_id}/status' + } + +export type GetFalAiBytedanceSeedanceV1ProFastTextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiBytedanceSeedanceV1ProFastTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiBytedanceSeedanceV1ProFastTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiBytedanceSeedanceV1ProFastTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiBytedanceSeedanceV1ProFastTextToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedance/v1/pro/fast/text-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiBytedanceSeedanceV1ProFastTextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiBytedanceSeedanceV1ProFastTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiBytedanceSeedanceV1ProFastTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiBytedanceSeedanceV1ProFastTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiBytedanceSeedanceV1ProFastTextToVideoData = { + body: SchemaBytedanceSeedanceV1ProFastTextToVideoInput + path?: never + query?: never + url: '/fal-ai/bytedance/seedance/v1/pro/fast/text-to-video' +} + +export type PostFalAiBytedanceSeedanceV1ProFastTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBytedanceSeedanceV1ProFastTextToVideoResponse = + PostFalAiBytedanceSeedanceV1ProFastTextToVideoResponses[keyof PostFalAiBytedanceSeedanceV1ProFastTextToVideoResponses] + +export type GetFalAiBytedanceSeedanceV1ProFastTextToVideoRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedance/v1/pro/fast/text-to-video/requests/{request_id}' + } + +export type GetFalAiBytedanceSeedanceV1ProFastTextToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaBytedanceSeedanceV1ProFastTextToVideoOutput + } + +export type GetFalAiBytedanceSeedanceV1ProFastTextToVideoRequestsByRequestIdResponse = + GetFalAiBytedanceSeedanceV1ProFastTextToVideoRequestsByRequestIdResponses[keyof GetFalAiBytedanceSeedanceV1ProFastTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiViduQ2TextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/vidu/q2/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiViduQ2TextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiViduQ2TextToVideoRequestsByRequestIdStatusResponse = + GetFalAiViduQ2TextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiViduQ2TextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiViduQ2TextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/q2/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiViduQ2TextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiViduQ2TextToVideoRequestsByRequestIdCancelResponse = + PutFalAiViduQ2TextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiViduQ2TextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiViduQ2TextToVideoData = { + body: SchemaViduQ2TextToVideoInput + path?: never + query?: never + url: '/fal-ai/vidu/q2/text-to-video' +} + +export type PostFalAiViduQ2TextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiViduQ2TextToVideoResponse = + PostFalAiViduQ2TextToVideoResponses[keyof PostFalAiViduQ2TextToVideoResponses] + +export type GetFalAiViduQ2TextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/q2/text-to-video/requests/{request_id}' +} + +export type GetFalAiViduQ2TextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaViduQ2TextToVideoOutput +} + +export type GetFalAiViduQ2TextToVideoRequestsByRequestIdResponse = + GetFalAiViduQ2TextToVideoRequestsByRequestIdResponses[keyof GetFalAiViduQ2TextToVideoRequestsByRequestIdResponses] + +export type GetFalAiKreaWan14bTextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/krea-wan-14b/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiKreaWan14bTextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiKreaWan14bTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiKreaWan14bTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKreaWan14bTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKreaWan14bTextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/krea-wan-14b/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiKreaWan14bTextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiKreaWan14bTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiKreaWan14bTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKreaWan14bTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKreaWan14bTextToVideoData = { + body: SchemaKreaWan14bTextToVideoInput + path?: never + query?: never + url: '/fal-ai/krea-wan-14b/text-to-video' +} + +export type PostFalAiKreaWan14bTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKreaWan14bTextToVideoResponse = + PostFalAiKreaWan14bTextToVideoResponses[keyof PostFalAiKreaWan14bTextToVideoResponses] + +export type GetFalAiKreaWan14bTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/krea-wan-14b/text-to-video/requests/{request_id}' +} + +export type GetFalAiKreaWan14bTextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKreaWan14bTextToVideoOutput +} + +export type GetFalAiKreaWan14bTextToVideoRequestsByRequestIdResponse = + GetFalAiKreaWan14bTextToVideoRequestsByRequestIdResponses[keyof GetFalAiKreaWan14bTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiWanAlphaRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-alpha/requests/{request_id}/status' +} + +export type GetFalAiWanAlphaRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanAlphaRequestsByRequestIdStatusResponse = + GetFalAiWanAlphaRequestsByRequestIdStatusResponses[keyof GetFalAiWanAlphaRequestsByRequestIdStatusResponses] + +export type PutFalAiWanAlphaRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-alpha/requests/{request_id}/cancel' +} + +export type PutFalAiWanAlphaRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanAlphaRequestsByRequestIdCancelResponse = + PutFalAiWanAlphaRequestsByRequestIdCancelResponses[keyof PutFalAiWanAlphaRequestsByRequestIdCancelResponses] + +export type PostFalAiWanAlphaData = { + body: SchemaWanAlphaInput + path?: never + query?: never + url: '/fal-ai/wan-alpha' +} + +export type PostFalAiWanAlphaResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanAlphaResponse = + PostFalAiWanAlphaResponses[keyof PostFalAiWanAlphaResponses] + +export type GetFalAiWanAlphaRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-alpha/requests/{request_id}' +} + +export type GetFalAiWanAlphaRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanAlphaOutput +} + +export type GetFalAiWanAlphaRequestsByRequestIdResponse = + GetFalAiWanAlphaRequestsByRequestIdResponses[keyof GetFalAiWanAlphaRequestsByRequestIdResponses] + +export type GetFalAiKandinsky5TextToVideoDistillRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kandinsky5/text-to-video/distill/requests/{request_id}/status' + } + +export type GetFalAiKandinsky5TextToVideoDistillRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKandinsky5TextToVideoDistillRequestsByRequestIdStatusResponse = + GetFalAiKandinsky5TextToVideoDistillRequestsByRequestIdStatusResponses[keyof GetFalAiKandinsky5TextToVideoDistillRequestsByRequestIdStatusResponses] + +export type PutFalAiKandinsky5TextToVideoDistillRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kandinsky5/text-to-video/distill/requests/{request_id}/cancel' + } + +export type PutFalAiKandinsky5TextToVideoDistillRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKandinsky5TextToVideoDistillRequestsByRequestIdCancelResponse = + PutFalAiKandinsky5TextToVideoDistillRequestsByRequestIdCancelResponses[keyof PutFalAiKandinsky5TextToVideoDistillRequestsByRequestIdCancelResponses] + +export type PostFalAiKandinsky5TextToVideoDistillData = { + body: SchemaKandinsky5TextToVideoDistillInput + path?: never + query?: never + url: '/fal-ai/kandinsky5/text-to-video/distill' +} + +export type PostFalAiKandinsky5TextToVideoDistillResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKandinsky5TextToVideoDistillResponse = + PostFalAiKandinsky5TextToVideoDistillResponses[keyof PostFalAiKandinsky5TextToVideoDistillResponses] + +export type GetFalAiKandinsky5TextToVideoDistillRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kandinsky5/text-to-video/distill/requests/{request_id}' +} + +export type GetFalAiKandinsky5TextToVideoDistillRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKandinsky5TextToVideoDistillOutput +} + +export type GetFalAiKandinsky5TextToVideoDistillRequestsByRequestIdResponse = + GetFalAiKandinsky5TextToVideoDistillRequestsByRequestIdResponses[keyof GetFalAiKandinsky5TextToVideoDistillRequestsByRequestIdResponses] + +export type GetFalAiKandinsky5TextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kandinsky5/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiKandinsky5TextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiKandinsky5TextToVideoRequestsByRequestIdStatusResponse = + GetFalAiKandinsky5TextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKandinsky5TextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKandinsky5TextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kandinsky5/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiKandinsky5TextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiKandinsky5TextToVideoRequestsByRequestIdCancelResponse = + PutFalAiKandinsky5TextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKandinsky5TextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKandinsky5TextToVideoData = { + body: SchemaKandinsky5TextToVideoInput + path?: never + query?: never + url: '/fal-ai/kandinsky5/text-to-video' +} + +export type PostFalAiKandinsky5TextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKandinsky5TextToVideoResponse = + PostFalAiKandinsky5TextToVideoResponses[keyof PostFalAiKandinsky5TextToVideoResponses] + +export type GetFalAiKandinsky5TextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kandinsky5/text-to-video/requests/{request_id}' +} + +export type GetFalAiKandinsky5TextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKandinsky5TextToVideoOutput +} + +export type GetFalAiKandinsky5TextToVideoRequestsByRequestIdResponse = + GetFalAiKandinsky5TextToVideoRequestsByRequestIdResponses[keyof GetFalAiKandinsky5TextToVideoRequestsByRequestIdResponses] + +export type GetFalAiVeo31FastRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/veo3.1/fast/requests/{request_id}/status' +} + +export type GetFalAiVeo31FastRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiVeo31FastRequestsByRequestIdStatusResponse = + GetFalAiVeo31FastRequestsByRequestIdStatusResponses[keyof GetFalAiVeo31FastRequestsByRequestIdStatusResponses] + +export type PutFalAiVeo31FastRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3.1/fast/requests/{request_id}/cancel' +} + +export type PutFalAiVeo31FastRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiVeo31FastRequestsByRequestIdCancelResponse = + PutFalAiVeo31FastRequestsByRequestIdCancelResponses[keyof PutFalAiVeo31FastRequestsByRequestIdCancelResponses] + +export type PostFalAiVeo31FastData = { + body: SchemaVeo31FastInput + path?: never + query?: never + url: '/fal-ai/veo3.1/fast' +} + +export type PostFalAiVeo31FastResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiVeo31FastResponse = + PostFalAiVeo31FastResponses[keyof PostFalAiVeo31FastResponses] + +export type GetFalAiVeo31FastRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3.1/fast/requests/{request_id}' +} + +export type GetFalAiVeo31FastRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVeo31FastOutput +} + +export type GetFalAiVeo31FastRequestsByRequestIdResponse = + GetFalAiVeo31FastRequestsByRequestIdResponses[keyof GetFalAiVeo31FastRequestsByRequestIdResponses] + +export type GetFalAiVeo31RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/veo3.1/requests/{request_id}/status' +} + +export type GetFalAiVeo31RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiVeo31RequestsByRequestIdStatusResponse = + GetFalAiVeo31RequestsByRequestIdStatusResponses[keyof GetFalAiVeo31RequestsByRequestIdStatusResponses] + +export type PutFalAiVeo31RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3.1/requests/{request_id}/cancel' +} + +export type PutFalAiVeo31RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiVeo31RequestsByRequestIdCancelResponse = + PutFalAiVeo31RequestsByRequestIdCancelResponses[keyof PutFalAiVeo31RequestsByRequestIdCancelResponses] + +export type PostFalAiVeo31Data = { + body: SchemaVeo31Input + path?: never + query?: never + url: '/fal-ai/veo3.1' +} + +export type PostFalAiVeo31Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiVeo31Response = + PostFalAiVeo31Responses[keyof PostFalAiVeo31Responses] + +export type GetFalAiVeo31RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3.1/requests/{request_id}' +} + +export type GetFalAiVeo31RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVeo31Output +} + +export type GetFalAiVeo31RequestsByRequestIdResponse = + GetFalAiVeo31RequestsByRequestIdResponses[keyof GetFalAiVeo31RequestsByRequestIdResponses] + +export type GetFalAiSora2TextToVideoProRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sora-2/text-to-video/pro/requests/{request_id}/status' +} + +export type GetFalAiSora2TextToVideoProRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSora2TextToVideoProRequestsByRequestIdStatusResponse = + GetFalAiSora2TextToVideoProRequestsByRequestIdStatusResponses[keyof GetFalAiSora2TextToVideoProRequestsByRequestIdStatusResponses] + +export type PutFalAiSora2TextToVideoProRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sora-2/text-to-video/pro/requests/{request_id}/cancel' +} + +export type PutFalAiSora2TextToVideoProRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSora2TextToVideoProRequestsByRequestIdCancelResponse = + PutFalAiSora2TextToVideoProRequestsByRequestIdCancelResponses[keyof PutFalAiSora2TextToVideoProRequestsByRequestIdCancelResponses] + +export type PostFalAiSora2TextToVideoProData = { + body: SchemaSora2TextToVideoProInput + path?: never + query?: never + url: '/fal-ai/sora-2/text-to-video/pro' +} + +export type PostFalAiSora2TextToVideoProResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSora2TextToVideoProResponse = + PostFalAiSora2TextToVideoProResponses[keyof PostFalAiSora2TextToVideoProResponses] + +export type GetFalAiSora2TextToVideoProRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sora-2/text-to-video/pro/requests/{request_id}' +} + +export type GetFalAiSora2TextToVideoProRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSora2TextToVideoProOutput +} + +export type GetFalAiSora2TextToVideoProRequestsByRequestIdResponse = + GetFalAiSora2TextToVideoProRequestsByRequestIdResponses[keyof GetFalAiSora2TextToVideoProRequestsByRequestIdResponses] + +export type GetFalAiSora2TextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sora-2/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiSora2TextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSora2TextToVideoRequestsByRequestIdStatusResponse = + GetFalAiSora2TextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiSora2TextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiSora2TextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sora-2/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiSora2TextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSora2TextToVideoRequestsByRequestIdCancelResponse = + PutFalAiSora2TextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiSora2TextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiSora2TextToVideoData = { + body: SchemaSora2TextToVideoInput + path?: never + query?: never + url: '/fal-ai/sora-2/text-to-video' +} + +export type PostFalAiSora2TextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSora2TextToVideoResponse = + PostFalAiSora2TextToVideoResponses[keyof PostFalAiSora2TextToVideoResponses] + +export type GetFalAiSora2TextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sora-2/text-to-video/requests/{request_id}' +} + +export type GetFalAiSora2TextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSora2TextToVideoOutput +} + +export type GetFalAiSora2TextToVideoRequestsByRequestIdResponse = + GetFalAiSora2TextToVideoRequestsByRequestIdResponses[keyof GetFalAiSora2TextToVideoRequestsByRequestIdResponses] + +export type GetFalAiOviRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ovi/requests/{request_id}/status' +} + +export type GetFalAiOviRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiOviRequestsByRequestIdStatusResponse = + GetFalAiOviRequestsByRequestIdStatusResponses[keyof GetFalAiOviRequestsByRequestIdStatusResponses] + +export type PutFalAiOviRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ovi/requests/{request_id}/cancel' +} + +export type PutFalAiOviRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiOviRequestsByRequestIdCancelResponse = + PutFalAiOviRequestsByRequestIdCancelResponses[keyof PutFalAiOviRequestsByRequestIdCancelResponses] + +export type PostFalAiOviData = { + body: SchemaOviInput + path?: never + query?: never + url: '/fal-ai/ovi' +} + +export type PostFalAiOviResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiOviResponse = + PostFalAiOviResponses[keyof PostFalAiOviResponses] + +export type GetFalAiOviRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ovi/requests/{request_id}' +} + +export type GetFalAiOviRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaOviOutput +} + +export type GetFalAiOviRequestsByRequestIdResponse = + GetFalAiOviRequestsByRequestIdResponses[keyof GetFalAiOviRequestsByRequestIdResponses] + +export type GetFalAiWan25PreviewTextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-25-preview/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiWan25PreviewTextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiWan25PreviewTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiWan25PreviewTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiWan25PreviewTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiWan25PreviewTextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-25-preview/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiWan25PreviewTextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiWan25PreviewTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiWan25PreviewTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiWan25PreviewTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiWan25PreviewTextToVideoData = { + body: SchemaWan25PreviewTextToVideoInput + path?: never + query?: never + url: '/fal-ai/wan-25-preview/text-to-video' +} + +export type PostFalAiWan25PreviewTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWan25PreviewTextToVideoResponse = + PostFalAiWan25PreviewTextToVideoResponses[keyof PostFalAiWan25PreviewTextToVideoResponses] + +export type GetFalAiWan25PreviewTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-25-preview/text-to-video/requests/{request_id}' +} + +export type GetFalAiWan25PreviewTextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWan25PreviewTextToVideoOutput +} + +export type GetFalAiWan25PreviewTextToVideoRequestsByRequestIdResponse = + GetFalAiWan25PreviewTextToVideoRequestsByRequestIdResponses[keyof GetFalAiWan25PreviewTextToVideoRequestsByRequestIdResponses] + +export type GetArgilAvatarsTextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/argil/avatars/text-to-video/requests/{request_id}/status' +} + +export type GetArgilAvatarsTextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetArgilAvatarsTextToVideoRequestsByRequestIdStatusResponse = + GetArgilAvatarsTextToVideoRequestsByRequestIdStatusResponses[keyof GetArgilAvatarsTextToVideoRequestsByRequestIdStatusResponses] + +export type PutArgilAvatarsTextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/argil/avatars/text-to-video/requests/{request_id}/cancel' +} + +export type PutArgilAvatarsTextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutArgilAvatarsTextToVideoRequestsByRequestIdCancelResponse = + PutArgilAvatarsTextToVideoRequestsByRequestIdCancelResponses[keyof PutArgilAvatarsTextToVideoRequestsByRequestIdCancelResponses] + +export type PostArgilAvatarsTextToVideoData = { + body: SchemaAvatarsTextToVideoInput + path?: never + query?: never + url: '/argil/avatars/text-to-video' +} + +export type PostArgilAvatarsTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostArgilAvatarsTextToVideoResponse = + PostArgilAvatarsTextToVideoResponses[keyof PostArgilAvatarsTextToVideoResponses] + +export type GetArgilAvatarsTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/argil/avatars/text-to-video/requests/{request_id}' +} + +export type GetArgilAvatarsTextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAvatarsTextToVideoOutput +} + +export type GetArgilAvatarsTextToVideoRequestsByRequestIdResponse = + GetArgilAvatarsTextToVideoRequestsByRequestIdResponses[keyof GetArgilAvatarsTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiPixverseV5TextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v5/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiPixverseV5TextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixverseV5TextToVideoRequestsByRequestIdStatusResponse = + GetFalAiPixverseV5TextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV5TextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV5TextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v5/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV5TextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixverseV5TextToVideoRequestsByRequestIdCancelResponse = + PutFalAiPixverseV5TextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV5TextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV5TextToVideoData = { + body: SchemaPixverseV5TextToVideoInput + path?: never + query?: never + url: '/fal-ai/pixverse/v5/text-to-video' +} + +export type PostFalAiPixverseV5TextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV5TextToVideoResponse = + PostFalAiPixverseV5TextToVideoResponses[keyof PostFalAiPixverseV5TextToVideoResponses] + +export type GetFalAiPixverseV5TextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v5/text-to-video/requests/{request_id}' +} + +export type GetFalAiPixverseV5TextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV5TextToVideoOutput +} + +export type GetFalAiPixverseV5TextToVideoRequestsByRequestIdResponse = + GetFalAiPixverseV5TextToVideoRequestsByRequestIdResponses[keyof GetFalAiPixverseV5TextToVideoRequestsByRequestIdResponses] + +export type GetFalAiInfinitalkSingleTextRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/infinitalk/single-text/requests/{request_id}/status' +} + +export type GetFalAiInfinitalkSingleTextRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiInfinitalkSingleTextRequestsByRequestIdStatusResponse = + GetFalAiInfinitalkSingleTextRequestsByRequestIdStatusResponses[keyof GetFalAiInfinitalkSingleTextRequestsByRequestIdStatusResponses] + +export type PutFalAiInfinitalkSingleTextRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/infinitalk/single-text/requests/{request_id}/cancel' +} + +export type PutFalAiInfinitalkSingleTextRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiInfinitalkSingleTextRequestsByRequestIdCancelResponse = + PutFalAiInfinitalkSingleTextRequestsByRequestIdCancelResponses[keyof PutFalAiInfinitalkSingleTextRequestsByRequestIdCancelResponses] + +export type PostFalAiInfinitalkSingleTextData = { + body: SchemaInfinitalkSingleTextInput + path?: never + query?: never + url: '/fal-ai/infinitalk/single-text' +} + +export type PostFalAiInfinitalkSingleTextResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiInfinitalkSingleTextResponse = + PostFalAiInfinitalkSingleTextResponses[keyof PostFalAiInfinitalkSingleTextResponses] + +export type GetFalAiInfinitalkSingleTextRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/infinitalk/single-text/requests/{request_id}' +} + +export type GetFalAiInfinitalkSingleTextRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaInfinitalkSingleTextOutput +} + +export type GetFalAiInfinitalkSingleTextRequestsByRequestIdResponse = + GetFalAiInfinitalkSingleTextRequestsByRequestIdResponses[keyof GetFalAiInfinitalkSingleTextRequestsByRequestIdResponses] + +export type GetMoonvalleyMareyT2vRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/moonvalley/marey/t2v/requests/{request_id}/status' +} + +export type GetMoonvalleyMareyT2vRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetMoonvalleyMareyT2vRequestsByRequestIdStatusResponse = + GetMoonvalleyMareyT2vRequestsByRequestIdStatusResponses[keyof GetMoonvalleyMareyT2vRequestsByRequestIdStatusResponses] + +export type PutMoonvalleyMareyT2vRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/moonvalley/marey/t2v/requests/{request_id}/cancel' +} + +export type PutMoonvalleyMareyT2vRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutMoonvalleyMareyT2vRequestsByRequestIdCancelResponse = + PutMoonvalleyMareyT2vRequestsByRequestIdCancelResponses[keyof PutMoonvalleyMareyT2vRequestsByRequestIdCancelResponses] + +export type PostMoonvalleyMareyT2vData = { + body: SchemaMareyT2vInput + path?: never + query?: never + url: '/moonvalley/marey/t2v' +} + +export type PostMoonvalleyMareyT2vResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostMoonvalleyMareyT2vResponse = + PostMoonvalleyMareyT2vResponses[keyof PostMoonvalleyMareyT2vResponses] + +export type GetMoonvalleyMareyT2vRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/moonvalley/marey/t2v/requests/{request_id}' +} + +export type GetMoonvalleyMareyT2vRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMareyT2vOutput +} + +export type GetMoonvalleyMareyT2vRequestsByRequestIdResponse = + GetMoonvalleyMareyT2vRequestsByRequestIdResponses[keyof GetMoonvalleyMareyT2vRequestsByRequestIdResponses] + +export type GetFalAiWanV22A14bTextToVideoLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan/v2.2-a14b/text-to-video/lora/requests/{request_id}/status' +} + +export type GetFalAiWanV22A14bTextToVideoLoraRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiWanV22A14bTextToVideoLoraRequestsByRequestIdStatusResponse = + GetFalAiWanV22A14bTextToVideoLoraRequestsByRequestIdStatusResponses[keyof GetFalAiWanV22A14bTextToVideoLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiWanV22A14bTextToVideoLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-a14b/text-to-video/lora/requests/{request_id}/cancel' +} + +export type PutFalAiWanV22A14bTextToVideoLoraRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiWanV22A14bTextToVideoLoraRequestsByRequestIdCancelResponse = + PutFalAiWanV22A14bTextToVideoLoraRequestsByRequestIdCancelResponses[keyof PutFalAiWanV22A14bTextToVideoLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiWanV22A14bTextToVideoLoraData = { + body: SchemaWanV22A14bTextToVideoLoraInput + path?: never + query?: never + url: '/fal-ai/wan/v2.2-a14b/text-to-video/lora' +} + +export type PostFalAiWanV22A14bTextToVideoLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanV22A14bTextToVideoLoraResponse = + PostFalAiWanV22A14bTextToVideoLoraResponses[keyof PostFalAiWanV22A14bTextToVideoLoraResponses] + +export type GetFalAiWanV22A14bTextToVideoLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-a14b/text-to-video/lora/requests/{request_id}' +} + +export type GetFalAiWanV22A14bTextToVideoLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanV22A14bTextToVideoLoraOutput +} + +export type GetFalAiWanV22A14bTextToVideoLoraRequestsByRequestIdResponse = + GetFalAiWanV22A14bTextToVideoLoraRequestsByRequestIdResponses[keyof GetFalAiWanV22A14bTextToVideoLoraRequestsByRequestIdResponses] + +export type GetFalAiWanV225bTextToVideoDistillRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan/v2.2-5b/text-to-video/distill/requests/{request_id}/status' +} + +export type GetFalAiWanV225bTextToVideoDistillRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiWanV225bTextToVideoDistillRequestsByRequestIdStatusResponse = + GetFalAiWanV225bTextToVideoDistillRequestsByRequestIdStatusResponses[keyof GetFalAiWanV225bTextToVideoDistillRequestsByRequestIdStatusResponses] + +export type PutFalAiWanV225bTextToVideoDistillRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-5b/text-to-video/distill/requests/{request_id}/cancel' +} + +export type PutFalAiWanV225bTextToVideoDistillRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiWanV225bTextToVideoDistillRequestsByRequestIdCancelResponse = + PutFalAiWanV225bTextToVideoDistillRequestsByRequestIdCancelResponses[keyof PutFalAiWanV225bTextToVideoDistillRequestsByRequestIdCancelResponses] + +export type PostFalAiWanV225bTextToVideoDistillData = { + body: SchemaWanV225bTextToVideoDistillInput + path?: never + query?: never + url: '/fal-ai/wan/v2.2-5b/text-to-video/distill' +} + +export type PostFalAiWanV225bTextToVideoDistillResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanV225bTextToVideoDistillResponse = + PostFalAiWanV225bTextToVideoDistillResponses[keyof PostFalAiWanV225bTextToVideoDistillResponses] + +export type GetFalAiWanV225bTextToVideoDistillRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-5b/text-to-video/distill/requests/{request_id}' +} + +export type GetFalAiWanV225bTextToVideoDistillRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanV225bTextToVideoDistillOutput +} + +export type GetFalAiWanV225bTextToVideoDistillRequestsByRequestIdResponse = + GetFalAiWanV225bTextToVideoDistillRequestsByRequestIdResponses[keyof GetFalAiWanV225bTextToVideoDistillRequestsByRequestIdResponses] + +export type GetFalAiWanV225bTextToVideoFastWanRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan/v2.2-5b/text-to-video/fast-wan/requests/{request_id}/status' +} + +export type GetFalAiWanV225bTextToVideoFastWanRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiWanV225bTextToVideoFastWanRequestsByRequestIdStatusResponse = + GetFalAiWanV225bTextToVideoFastWanRequestsByRequestIdStatusResponses[keyof GetFalAiWanV225bTextToVideoFastWanRequestsByRequestIdStatusResponses] + +export type PutFalAiWanV225bTextToVideoFastWanRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-5b/text-to-video/fast-wan/requests/{request_id}/cancel' +} + +export type PutFalAiWanV225bTextToVideoFastWanRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiWanV225bTextToVideoFastWanRequestsByRequestIdCancelResponse = + PutFalAiWanV225bTextToVideoFastWanRequestsByRequestIdCancelResponses[keyof PutFalAiWanV225bTextToVideoFastWanRequestsByRequestIdCancelResponses] + +export type PostFalAiWanV225bTextToVideoFastWanData = { + body: SchemaWanV225bTextToVideoFastWanInput + path?: never + query?: never + url: '/fal-ai/wan/v2.2-5b/text-to-video/fast-wan' +} + +export type PostFalAiWanV225bTextToVideoFastWanResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanV225bTextToVideoFastWanResponse = + PostFalAiWanV225bTextToVideoFastWanResponses[keyof PostFalAiWanV225bTextToVideoFastWanResponses] + +export type GetFalAiWanV225bTextToVideoFastWanRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-5b/text-to-video/fast-wan/requests/{request_id}' +} + +export type GetFalAiWanV225bTextToVideoFastWanRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanV225bTextToVideoFastWanOutput +} + +export type GetFalAiWanV225bTextToVideoFastWanRequestsByRequestIdResponse = + GetFalAiWanV225bTextToVideoFastWanRequestsByRequestIdResponses[keyof GetFalAiWanV225bTextToVideoFastWanRequestsByRequestIdResponses] + +export type GetFalAiWanV22A14bTextToVideoTurboRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan/v2.2-a14b/text-to-video/turbo/requests/{request_id}/status' +} + +export type GetFalAiWanV22A14bTextToVideoTurboRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiWanV22A14bTextToVideoTurboRequestsByRequestIdStatusResponse = + GetFalAiWanV22A14bTextToVideoTurboRequestsByRequestIdStatusResponses[keyof GetFalAiWanV22A14bTextToVideoTurboRequestsByRequestIdStatusResponses] + +export type PutFalAiWanV22A14bTextToVideoTurboRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-a14b/text-to-video/turbo/requests/{request_id}/cancel' +} + +export type PutFalAiWanV22A14bTextToVideoTurboRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiWanV22A14bTextToVideoTurboRequestsByRequestIdCancelResponse = + PutFalAiWanV22A14bTextToVideoTurboRequestsByRequestIdCancelResponses[keyof PutFalAiWanV22A14bTextToVideoTurboRequestsByRequestIdCancelResponses] + +export type PostFalAiWanV22A14bTextToVideoTurboData = { + body: SchemaWanV22A14bTextToVideoTurboInput + path?: never + query?: never + url: '/fal-ai/wan/v2.2-a14b/text-to-video/turbo' +} + +export type PostFalAiWanV22A14bTextToVideoTurboResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanV22A14bTextToVideoTurboResponse = + PostFalAiWanV22A14bTextToVideoTurboResponses[keyof PostFalAiWanV22A14bTextToVideoTurboResponses] + +export type GetFalAiWanV22A14bTextToVideoTurboRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-a14b/text-to-video/turbo/requests/{request_id}' +} + +export type GetFalAiWanV22A14bTextToVideoTurboRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanV22A14bTextToVideoTurboOutput +} + +export type GetFalAiWanV22A14bTextToVideoTurboRequestsByRequestIdResponse = + GetFalAiWanV22A14bTextToVideoTurboRequestsByRequestIdResponses[keyof GetFalAiWanV22A14bTextToVideoTurboRequestsByRequestIdResponses] + +export type GetFalAiWanV225bTextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan/v2.2-5b/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiWanV225bTextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanV225bTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiWanV225bTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiWanV225bTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiWanV225bTextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-5b/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiWanV225bTextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanV225bTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiWanV225bTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiWanV225bTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiWanV225bTextToVideoData = { + body: SchemaWanV225bTextToVideoInput + path?: never + query?: never + url: '/fal-ai/wan/v2.2-5b/text-to-video' +} + +export type PostFalAiWanV225bTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanV225bTextToVideoResponse = + PostFalAiWanV225bTextToVideoResponses[keyof PostFalAiWanV225bTextToVideoResponses] + +export type GetFalAiWanV225bTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-5b/text-to-video/requests/{request_id}' +} + +export type GetFalAiWanV225bTextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanV225bTextToVideoOutput +} + +export type GetFalAiWanV225bTextToVideoRequestsByRequestIdResponse = + GetFalAiWanV225bTextToVideoRequestsByRequestIdResponses[keyof GetFalAiWanV225bTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiWanV22A14bTextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan/v2.2-a14b/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiWanV22A14bTextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanV22A14bTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiWanV22A14bTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiWanV22A14bTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiWanV22A14bTextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-a14b/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiWanV22A14bTextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanV22A14bTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiWanV22A14bTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiWanV22A14bTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiWanV22A14bTextToVideoData = { + body: SchemaWanV22A14bTextToVideoInput + path?: never + query?: never + url: '/fal-ai/wan/v2.2-a14b/text-to-video' +} + +export type PostFalAiWanV22A14bTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanV22A14bTextToVideoResponse = + PostFalAiWanV22A14bTextToVideoResponses[keyof PostFalAiWanV22A14bTextToVideoResponses] + +export type GetFalAiWanV22A14bTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-a14b/text-to-video/requests/{request_id}' +} + +export type GetFalAiWanV22A14bTextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanV22A14bTextToVideoOutput +} + +export type GetFalAiWanV22A14bTextToVideoRequestsByRequestIdResponse = + GetFalAiWanV22A14bTextToVideoRequestsByRequestIdResponses[keyof GetFalAiWanV22A14bTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiLtxv13B098DistilledRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltxv-13b-098-distilled/requests/{request_id}/status' +} + +export type GetFalAiLtxv13B098DistilledRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLtxv13B098DistilledRequestsByRequestIdStatusResponse = + GetFalAiLtxv13B098DistilledRequestsByRequestIdStatusResponses[keyof GetFalAiLtxv13B098DistilledRequestsByRequestIdStatusResponses] + +export type PutFalAiLtxv13B098DistilledRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltxv-13b-098-distilled/requests/{request_id}/cancel' +} + +export type PutFalAiLtxv13B098DistilledRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLtxv13B098DistilledRequestsByRequestIdCancelResponse = + PutFalAiLtxv13B098DistilledRequestsByRequestIdCancelResponses[keyof PutFalAiLtxv13B098DistilledRequestsByRequestIdCancelResponses] + +export type PostFalAiLtxv13B098DistilledData = { + body: SchemaLtxv13B098DistilledInput + path?: never + query?: never + url: '/fal-ai/ltxv-13b-098-distilled' +} + +export type PostFalAiLtxv13B098DistilledResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtxv13B098DistilledResponse = + PostFalAiLtxv13B098DistilledResponses[keyof PostFalAiLtxv13B098DistilledResponses] + +export type GetFalAiLtxv13B098DistilledRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltxv-13b-098-distilled/requests/{request_id}' +} + +export type GetFalAiLtxv13B098DistilledRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtxv13B098DistilledOutput +} + +export type GetFalAiLtxv13B098DistilledRequestsByRequestIdResponse = + GetFalAiLtxv13B098DistilledRequestsByRequestIdResponses[keyof GetFalAiLtxv13B098DistilledRequestsByRequestIdResponses] + +export type GetFalAiMinimaxHailuo02ProTextToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/hailuo-02/pro/text-to-video/requests/{request_id}/status' + } + +export type GetFalAiMinimaxHailuo02ProTextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMinimaxHailuo02ProTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiMinimaxHailuo02ProTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxHailuo02ProTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxHailuo02ProTextToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/hailuo-02/pro/text-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiMinimaxHailuo02ProTextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMinimaxHailuo02ProTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiMinimaxHailuo02ProTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxHailuo02ProTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxHailuo02ProTextToVideoData = { + body: SchemaMinimaxHailuo02ProTextToVideoInput + path?: never + query?: never + url: '/fal-ai/minimax/hailuo-02/pro/text-to-video' +} + +export type PostFalAiMinimaxHailuo02ProTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxHailuo02ProTextToVideoResponse = + PostFalAiMinimaxHailuo02ProTextToVideoResponses[keyof PostFalAiMinimaxHailuo02ProTextToVideoResponses] + +export type GetFalAiMinimaxHailuo02ProTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/hailuo-02/pro/text-to-video/requests/{request_id}' +} + +export type GetFalAiMinimaxHailuo02ProTextToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaMinimaxHailuo02ProTextToVideoOutput + } + +export type GetFalAiMinimaxHailuo02ProTextToVideoRequestsByRequestIdResponse = + GetFalAiMinimaxHailuo02ProTextToVideoRequestsByRequestIdResponses[keyof GetFalAiMinimaxHailuo02ProTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiBytedanceSeedanceV1ProTextToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bytedance/seedance/v1/pro/text-to-video/requests/{request_id}/status' + } + +export type GetFalAiBytedanceSeedanceV1ProTextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiBytedanceSeedanceV1ProTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiBytedanceSeedanceV1ProTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiBytedanceSeedanceV1ProTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiBytedanceSeedanceV1ProTextToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedance/v1/pro/text-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiBytedanceSeedanceV1ProTextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiBytedanceSeedanceV1ProTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiBytedanceSeedanceV1ProTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiBytedanceSeedanceV1ProTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiBytedanceSeedanceV1ProTextToVideoData = { + body: SchemaBytedanceSeedanceV1ProTextToVideoInput + path?: never + query?: never + url: '/fal-ai/bytedance/seedance/v1/pro/text-to-video' +} + +export type PostFalAiBytedanceSeedanceV1ProTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBytedanceSeedanceV1ProTextToVideoResponse = + PostFalAiBytedanceSeedanceV1ProTextToVideoResponses[keyof PostFalAiBytedanceSeedanceV1ProTextToVideoResponses] + +export type GetFalAiBytedanceSeedanceV1ProTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedance/v1/pro/text-to-video/requests/{request_id}' +} + +export type GetFalAiBytedanceSeedanceV1ProTextToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaBytedanceSeedanceV1ProTextToVideoOutput + } + +export type GetFalAiBytedanceSeedanceV1ProTextToVideoRequestsByRequestIdResponse = + GetFalAiBytedanceSeedanceV1ProTextToVideoRequestsByRequestIdResponses[keyof GetFalAiBytedanceSeedanceV1ProTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiBytedanceSeedanceV1LiteTextToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bytedance/seedance/v1/lite/text-to-video/requests/{request_id}/status' + } + +export type GetFalAiBytedanceSeedanceV1LiteTextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiBytedanceSeedanceV1LiteTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiBytedanceSeedanceV1LiteTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiBytedanceSeedanceV1LiteTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiBytedanceSeedanceV1LiteTextToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedance/v1/lite/text-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiBytedanceSeedanceV1LiteTextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiBytedanceSeedanceV1LiteTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiBytedanceSeedanceV1LiteTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiBytedanceSeedanceV1LiteTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiBytedanceSeedanceV1LiteTextToVideoData = { + body: SchemaBytedanceSeedanceV1LiteTextToVideoInput + path?: never + query?: never + url: '/fal-ai/bytedance/seedance/v1/lite/text-to-video' +} + +export type PostFalAiBytedanceSeedanceV1LiteTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBytedanceSeedanceV1LiteTextToVideoResponse = + PostFalAiBytedanceSeedanceV1LiteTextToVideoResponses[keyof PostFalAiBytedanceSeedanceV1LiteTextToVideoResponses] + +export type GetFalAiBytedanceSeedanceV1LiteTextToVideoRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance/seedance/v1/lite/text-to-video/requests/{request_id}' + } + +export type GetFalAiBytedanceSeedanceV1LiteTextToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaBytedanceSeedanceV1LiteTextToVideoOutput + } + +export type GetFalAiBytedanceSeedanceV1LiteTextToVideoRequestsByRequestIdResponse = + GetFalAiBytedanceSeedanceV1LiteTextToVideoRequestsByRequestIdResponses[keyof GetFalAiBytedanceSeedanceV1LiteTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV21MasterTextToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v2.1/master/text-to-video/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoV21MasterTextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV21MasterTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV21MasterTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV21MasterTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV21MasterTextToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2.1/master/text-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoV21MasterTextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV21MasterTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV21MasterTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV21MasterTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV21MasterTextToVideoData = { + body: SchemaKlingVideoV21MasterTextToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/v2.1/master/text-to-video' +} + +export type PostFalAiKlingVideoV21MasterTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV21MasterTextToVideoResponse = + PostFalAiKlingVideoV21MasterTextToVideoResponses[keyof PostFalAiKlingVideoV21MasterTextToVideoResponses] + +export type GetFalAiKlingVideoV21MasterTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2.1/master/text-to-video/requests/{request_id}' +} + +export type GetFalAiKlingVideoV21MasterTextToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV21MasterTextToVideoOutput + } + +export type GetFalAiKlingVideoV21MasterTextToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoV21MasterTextToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV21MasterTextToVideoRequestsByRequestIdResponses] + +export type GetVeedAvatarsTextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/veed/avatars/text-to-video/requests/{request_id}/status' +} + +export type GetVeedAvatarsTextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetVeedAvatarsTextToVideoRequestsByRequestIdStatusResponse = + GetVeedAvatarsTextToVideoRequestsByRequestIdStatusResponses[keyof GetVeedAvatarsTextToVideoRequestsByRequestIdStatusResponses] + +export type PutVeedAvatarsTextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/veed/avatars/text-to-video/requests/{request_id}/cancel' +} + +export type PutVeedAvatarsTextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutVeedAvatarsTextToVideoRequestsByRequestIdCancelResponse = + PutVeedAvatarsTextToVideoRequestsByRequestIdCancelResponses[keyof PutVeedAvatarsTextToVideoRequestsByRequestIdCancelResponses] + +export type PostVeedAvatarsTextToVideoData = { + body: SchemaAvatarsTextToVideoInput + path?: never + query?: never + url: '/veed/avatars/text-to-video' +} + +export type PostVeedAvatarsTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostVeedAvatarsTextToVideoResponse = + PostVeedAvatarsTextToVideoResponses[keyof PostVeedAvatarsTextToVideoResponses] + +export type GetVeedAvatarsTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/veed/avatars/text-to-video/requests/{request_id}' +} + +export type GetVeedAvatarsTextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAvatarsTextToVideoOutput +} + +export type GetVeedAvatarsTextToVideoRequestsByRequestIdResponse = + GetVeedAvatarsTextToVideoRequestsByRequestIdResponses[keyof GetVeedAvatarsTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiLtxVideo13bDevRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-video-13b-dev/requests/{request_id}/status' +} + +export type GetFalAiLtxVideo13bDevRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLtxVideo13bDevRequestsByRequestIdStatusResponse = + GetFalAiLtxVideo13bDevRequestsByRequestIdStatusResponses[keyof GetFalAiLtxVideo13bDevRequestsByRequestIdStatusResponses] + +export type PutFalAiLtxVideo13bDevRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-13b-dev/requests/{request_id}/cancel' +} + +export type PutFalAiLtxVideo13bDevRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLtxVideo13bDevRequestsByRequestIdCancelResponse = + PutFalAiLtxVideo13bDevRequestsByRequestIdCancelResponses[keyof PutFalAiLtxVideo13bDevRequestsByRequestIdCancelResponses] + +export type PostFalAiLtxVideo13bDevData = { + body: SchemaLtxVideo13bDevInput + path?: never + query?: never + url: '/fal-ai/ltx-video-13b-dev' +} + +export type PostFalAiLtxVideo13bDevResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtxVideo13bDevResponse = + PostFalAiLtxVideo13bDevResponses[keyof PostFalAiLtxVideo13bDevResponses] + +export type GetFalAiLtxVideo13bDevRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-13b-dev/requests/{request_id}' +} + +export type GetFalAiLtxVideo13bDevRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtxVideo13bDevOutput +} + +export type GetFalAiLtxVideo13bDevRequestsByRequestIdResponse = + GetFalAiLtxVideo13bDevRequestsByRequestIdResponses[keyof GetFalAiLtxVideo13bDevRequestsByRequestIdResponses] + +export type GetFalAiLtxVideo13bDistilledRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-video-13b-distilled/requests/{request_id}/status' +} + +export type GetFalAiLtxVideo13bDistilledRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLtxVideo13bDistilledRequestsByRequestIdStatusResponse = + GetFalAiLtxVideo13bDistilledRequestsByRequestIdStatusResponses[keyof GetFalAiLtxVideo13bDistilledRequestsByRequestIdStatusResponses] + +export type PutFalAiLtxVideo13bDistilledRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-13b-distilled/requests/{request_id}/cancel' +} + +export type PutFalAiLtxVideo13bDistilledRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLtxVideo13bDistilledRequestsByRequestIdCancelResponse = + PutFalAiLtxVideo13bDistilledRequestsByRequestIdCancelResponses[keyof PutFalAiLtxVideo13bDistilledRequestsByRequestIdCancelResponses] + +export type PostFalAiLtxVideo13bDistilledData = { + body: SchemaLtxVideo13bDistilledInput + path?: never + query?: never + url: '/fal-ai/ltx-video-13b-distilled' +} + +export type PostFalAiLtxVideo13bDistilledResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtxVideo13bDistilledResponse = + PostFalAiLtxVideo13bDistilledResponses[keyof PostFalAiLtxVideo13bDistilledResponses] + +export type GetFalAiLtxVideo13bDistilledRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-13b-distilled/requests/{request_id}' +} + +export type GetFalAiLtxVideo13bDistilledRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtxVideo13bDistilledOutput +} + +export type GetFalAiLtxVideo13bDistilledRequestsByRequestIdResponse = + GetFalAiLtxVideo13bDistilledRequestsByRequestIdResponses[keyof GetFalAiLtxVideo13bDistilledRequestsByRequestIdResponses] + +export type GetFalAiPixverseV45TextToVideoFastRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v4.5/text-to-video/fast/requests/{request_id}/status' +} + +export type GetFalAiPixverseV45TextToVideoFastRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiPixverseV45TextToVideoFastRequestsByRequestIdStatusResponse = + GetFalAiPixverseV45TextToVideoFastRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV45TextToVideoFastRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV45TextToVideoFastRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v4.5/text-to-video/fast/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV45TextToVideoFastRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiPixverseV45TextToVideoFastRequestsByRequestIdCancelResponse = + PutFalAiPixverseV45TextToVideoFastRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV45TextToVideoFastRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV45TextToVideoFastData = { + body: SchemaPixverseV45TextToVideoFastInput + path?: never + query?: never + url: '/fal-ai/pixverse/v4.5/text-to-video/fast' +} + +export type PostFalAiPixverseV45TextToVideoFastResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV45TextToVideoFastResponse = + PostFalAiPixverseV45TextToVideoFastResponses[keyof PostFalAiPixverseV45TextToVideoFastResponses] + +export type GetFalAiPixverseV45TextToVideoFastRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v4.5/text-to-video/fast/requests/{request_id}' +} + +export type GetFalAiPixverseV45TextToVideoFastRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV45TextToVideoFastOutput +} + +export type GetFalAiPixverseV45TextToVideoFastRequestsByRequestIdResponse = + GetFalAiPixverseV45TextToVideoFastRequestsByRequestIdResponses[keyof GetFalAiPixverseV45TextToVideoFastRequestsByRequestIdResponses] + +export type GetFalAiPixverseV45TextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v4.5/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiPixverseV45TextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixverseV45TextToVideoRequestsByRequestIdStatusResponse = + GetFalAiPixverseV45TextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV45TextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV45TextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v4.5/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV45TextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixverseV45TextToVideoRequestsByRequestIdCancelResponse = + PutFalAiPixverseV45TextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV45TextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV45TextToVideoData = { + body: SchemaPixverseV45TextToVideoInput + path?: never + query?: never + url: '/fal-ai/pixverse/v4.5/text-to-video' +} + +export type PostFalAiPixverseV45TextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV45TextToVideoResponse = + PostFalAiPixverseV45TextToVideoResponses[keyof PostFalAiPixverseV45TextToVideoResponses] + +export type GetFalAiPixverseV45TextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v4.5/text-to-video/requests/{request_id}' +} + +export type GetFalAiPixverseV45TextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV45TextToVideoOutput +} + +export type GetFalAiPixverseV45TextToVideoRequestsByRequestIdResponse = + GetFalAiPixverseV45TextToVideoRequestsByRequestIdResponses[keyof GetFalAiPixverseV45TextToVideoRequestsByRequestIdResponses] + +export type GetFalAiViduQ1TextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/vidu/q1/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiViduQ1TextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiViduQ1TextToVideoRequestsByRequestIdStatusResponse = + GetFalAiViduQ1TextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiViduQ1TextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiViduQ1TextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/q1/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiViduQ1TextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiViduQ1TextToVideoRequestsByRequestIdCancelResponse = + PutFalAiViduQ1TextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiViduQ1TextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiViduQ1TextToVideoData = { + body: SchemaViduQ1TextToVideoInput + path?: never + query?: never + url: '/fal-ai/vidu/q1/text-to-video' +} + +export type PostFalAiViduQ1TextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiViduQ1TextToVideoResponse = + PostFalAiViduQ1TextToVideoResponses[keyof PostFalAiViduQ1TextToVideoResponses] + +export type GetFalAiViduQ1TextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/q1/text-to-video/requests/{request_id}' +} + +export type GetFalAiViduQ1TextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaViduQ1TextToVideoOutput +} + +export type GetFalAiViduQ1TextToVideoRequestsByRequestIdResponse = + GetFalAiViduQ1TextToVideoRequestsByRequestIdResponses[keyof GetFalAiViduQ1TextToVideoRequestsByRequestIdResponses] + +export type GetFalAiMagiRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/magi/requests/{request_id}/status' +} + +export type GetFalAiMagiRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMagiRequestsByRequestIdStatusResponse = + GetFalAiMagiRequestsByRequestIdStatusResponses[keyof GetFalAiMagiRequestsByRequestIdStatusResponses] + +export type PutFalAiMagiRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/magi/requests/{request_id}/cancel' +} + +export type PutFalAiMagiRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMagiRequestsByRequestIdCancelResponse = + PutFalAiMagiRequestsByRequestIdCancelResponses[keyof PutFalAiMagiRequestsByRequestIdCancelResponses] + +export type PostFalAiMagiData = { + body: SchemaMagiInput + path?: never + query?: never + url: '/fal-ai/magi' +} + +export type PostFalAiMagiResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMagiResponse = + PostFalAiMagiResponses[keyof PostFalAiMagiResponses] + +export type GetFalAiMagiRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/magi/requests/{request_id}' +} + +export type GetFalAiMagiRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMagiOutput +} + +export type GetFalAiMagiRequestsByRequestIdResponse = + GetFalAiMagiRequestsByRequestIdResponses[keyof GetFalAiMagiRequestsByRequestIdResponses] + +export type GetFalAiMagiDistilledRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/magi-distilled/requests/{request_id}/status' +} + +export type GetFalAiMagiDistilledRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMagiDistilledRequestsByRequestIdStatusResponse = + GetFalAiMagiDistilledRequestsByRequestIdStatusResponses[keyof GetFalAiMagiDistilledRequestsByRequestIdStatusResponses] + +export type PutFalAiMagiDistilledRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/magi-distilled/requests/{request_id}/cancel' +} + +export type PutFalAiMagiDistilledRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMagiDistilledRequestsByRequestIdCancelResponse = + PutFalAiMagiDistilledRequestsByRequestIdCancelResponses[keyof PutFalAiMagiDistilledRequestsByRequestIdCancelResponses] + +export type PostFalAiMagiDistilledData = { + body: SchemaMagiDistilledInput + path?: never + query?: never + url: '/fal-ai/magi-distilled' +} + +export type PostFalAiMagiDistilledResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMagiDistilledResponse = + PostFalAiMagiDistilledResponses[keyof PostFalAiMagiDistilledResponses] + +export type GetFalAiMagiDistilledRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/magi-distilled/requests/{request_id}' +} + +export type GetFalAiMagiDistilledRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMagiDistilledOutput +} + +export type GetFalAiMagiDistilledRequestsByRequestIdResponse = + GetFalAiMagiDistilledRequestsByRequestIdResponses[keyof GetFalAiMagiDistilledRequestsByRequestIdResponses] + +export type GetFalAiPixverseV4TextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v4/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiPixverseV4TextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixverseV4TextToVideoRequestsByRequestIdStatusResponse = + GetFalAiPixverseV4TextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV4TextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV4TextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v4/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV4TextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixverseV4TextToVideoRequestsByRequestIdCancelResponse = + PutFalAiPixverseV4TextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV4TextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV4TextToVideoData = { + body: SchemaPixverseV4TextToVideoInput + path?: never + query?: never + url: '/fal-ai/pixverse/v4/text-to-video' +} + +export type PostFalAiPixverseV4TextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV4TextToVideoResponse = + PostFalAiPixverseV4TextToVideoResponses[keyof PostFalAiPixverseV4TextToVideoResponses] + +export type GetFalAiPixverseV4TextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v4/text-to-video/requests/{request_id}' +} + +export type GetFalAiPixverseV4TextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV4TextToVideoOutput +} + +export type GetFalAiPixverseV4TextToVideoRequestsByRequestIdResponse = + GetFalAiPixverseV4TextToVideoRequestsByRequestIdResponses[keyof GetFalAiPixverseV4TextToVideoRequestsByRequestIdResponses] + +export type GetFalAiPixverseV4TextToVideoFastRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v4/text-to-video/fast/requests/{request_id}/status' +} + +export type GetFalAiPixverseV4TextToVideoFastRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiPixverseV4TextToVideoFastRequestsByRequestIdStatusResponse = + GetFalAiPixverseV4TextToVideoFastRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV4TextToVideoFastRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV4TextToVideoFastRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v4/text-to-video/fast/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV4TextToVideoFastRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiPixverseV4TextToVideoFastRequestsByRequestIdCancelResponse = + PutFalAiPixverseV4TextToVideoFastRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV4TextToVideoFastRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV4TextToVideoFastData = { + body: SchemaPixverseV4TextToVideoFastInput + path?: never + query?: never + url: '/fal-ai/pixverse/v4/text-to-video/fast' +} + +export type PostFalAiPixverseV4TextToVideoFastResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV4TextToVideoFastResponse = + PostFalAiPixverseV4TextToVideoFastResponses[keyof PostFalAiPixverseV4TextToVideoFastResponses] + +export type GetFalAiPixverseV4TextToVideoFastRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v4/text-to-video/fast/requests/{request_id}' +} + +export type GetFalAiPixverseV4TextToVideoFastRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV4TextToVideoFastOutput +} + +export type GetFalAiPixverseV4TextToVideoFastRequestsByRequestIdResponse = + GetFalAiPixverseV4TextToVideoFastRequestsByRequestIdResponses[keyof GetFalAiPixverseV4TextToVideoFastRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoLipsyncAudioToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/lipsync/audio-to-video/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoLipsyncAudioToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoLipsyncAudioToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoLipsyncAudioToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoLipsyncAudioToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoLipsyncAudioToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/lipsync/audio-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoLipsyncAudioToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoLipsyncAudioToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoLipsyncAudioToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoLipsyncAudioToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoLipsyncAudioToVideoData = { + body: SchemaKlingVideoLipsyncAudioToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/lipsync/audio-to-video' +} + +export type PostFalAiKlingVideoLipsyncAudioToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoLipsyncAudioToVideoResponse = + PostFalAiKlingVideoLipsyncAudioToVideoResponses[keyof PostFalAiKlingVideoLipsyncAudioToVideoResponses] + +export type GetFalAiKlingVideoLipsyncAudioToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/lipsync/audio-to-video/requests/{request_id}' +} + +export type GetFalAiKlingVideoLipsyncAudioToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaKlingVideoLipsyncAudioToVideoOutput + } + +export type GetFalAiKlingVideoLipsyncAudioToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoLipsyncAudioToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoLipsyncAudioToVideoRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoLipsyncTextToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/lipsync/text-to-video/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoLipsyncTextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoLipsyncTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoLipsyncTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoLipsyncTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoLipsyncTextToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/lipsync/text-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoLipsyncTextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoLipsyncTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoLipsyncTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoLipsyncTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoLipsyncTextToVideoData = { + body: SchemaKlingVideoLipsyncTextToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/lipsync/text-to-video' +} + +export type PostFalAiKlingVideoLipsyncTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoLipsyncTextToVideoResponse = + PostFalAiKlingVideoLipsyncTextToVideoResponses[keyof PostFalAiKlingVideoLipsyncTextToVideoResponses] + +export type GetFalAiKlingVideoLipsyncTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/lipsync/text-to-video/requests/{request_id}' +} + +export type GetFalAiKlingVideoLipsyncTextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingVideoLipsyncTextToVideoOutput +} + +export type GetFalAiKlingVideoLipsyncTextToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoLipsyncTextToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoLipsyncTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiWanT2vLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-t2v-lora/requests/{request_id}/status' +} + +export type GetFalAiWanT2vLoraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanT2vLoraRequestsByRequestIdStatusResponse = + GetFalAiWanT2vLoraRequestsByRequestIdStatusResponses[keyof GetFalAiWanT2vLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiWanT2vLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-t2v-lora/requests/{request_id}/cancel' +} + +export type PutFalAiWanT2vLoraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanT2vLoraRequestsByRequestIdCancelResponse = + PutFalAiWanT2vLoraRequestsByRequestIdCancelResponses[keyof PutFalAiWanT2vLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiWanT2vLoraData = { + body: SchemaWanT2vLoraInput + path?: never + query?: never + url: '/fal-ai/wan-t2v-lora' +} + +export type PostFalAiWanT2vLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanT2vLoraResponse = + PostFalAiWanT2vLoraResponses[keyof PostFalAiWanT2vLoraResponses] + +export type GetFalAiWanT2vLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-t2v-lora/requests/{request_id}' +} + +export type GetFalAiWanT2vLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanT2vLoraOutput +} + +export type GetFalAiWanT2vLoraRequestsByRequestIdResponse = + GetFalAiWanT2vLoraRequestsByRequestIdResponses[keyof GetFalAiWanT2vLoraRequestsByRequestIdResponses] + +export type GetFalAiLumaDreamMachineRay2FlashRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/luma-dream-machine/ray-2-flash/requests/{request_id}/status' +} + +export type GetFalAiLumaDreamMachineRay2FlashRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLumaDreamMachineRay2FlashRequestsByRequestIdStatusResponse = + GetFalAiLumaDreamMachineRay2FlashRequestsByRequestIdStatusResponses[keyof GetFalAiLumaDreamMachineRay2FlashRequestsByRequestIdStatusResponses] + +export type PutFalAiLumaDreamMachineRay2FlashRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-dream-machine/ray-2-flash/requests/{request_id}/cancel' +} + +export type PutFalAiLumaDreamMachineRay2FlashRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLumaDreamMachineRay2FlashRequestsByRequestIdCancelResponse = + PutFalAiLumaDreamMachineRay2FlashRequestsByRequestIdCancelResponses[keyof PutFalAiLumaDreamMachineRay2FlashRequestsByRequestIdCancelResponses] + +export type PostFalAiLumaDreamMachineRay2FlashData = { + body: SchemaLumaDreamMachineRay2FlashInput + path?: never + query?: never + url: '/fal-ai/luma-dream-machine/ray-2-flash' +} + +export type PostFalAiLumaDreamMachineRay2FlashResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLumaDreamMachineRay2FlashResponse = + PostFalAiLumaDreamMachineRay2FlashResponses[keyof PostFalAiLumaDreamMachineRay2FlashResponses] + +export type GetFalAiLumaDreamMachineRay2FlashRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-dream-machine/ray-2-flash/requests/{request_id}' +} + +export type GetFalAiLumaDreamMachineRay2FlashRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLumaDreamMachineRay2FlashOutput +} + +export type GetFalAiLumaDreamMachineRay2FlashRequestsByRequestIdResponse = + GetFalAiLumaDreamMachineRay2FlashRequestsByRequestIdResponses[keyof GetFalAiLumaDreamMachineRay2FlashRequestsByRequestIdResponses] + +export type GetFalAiPikaV2TurboTextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pika/v2/turbo/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiPikaV2TurboTextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPikaV2TurboTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiPikaV2TurboTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiPikaV2TurboTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiPikaV2TurboTextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pika/v2/turbo/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiPikaV2TurboTextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPikaV2TurboTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiPikaV2TurboTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiPikaV2TurboTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiPikaV2TurboTextToVideoData = { + body: SchemaPikaV2TurboTextToVideoInput + path?: never + query?: never + url: '/fal-ai/pika/v2/turbo/text-to-video' +} + +export type PostFalAiPikaV2TurboTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPikaV2TurboTextToVideoResponse = + PostFalAiPikaV2TurboTextToVideoResponses[keyof PostFalAiPikaV2TurboTextToVideoResponses] + +export type GetFalAiPikaV2TurboTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pika/v2/turbo/text-to-video/requests/{request_id}' +} + +export type GetFalAiPikaV2TurboTextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPikaV2TurboTextToVideoOutput +} + +export type GetFalAiPikaV2TurboTextToVideoRequestsByRequestIdResponse = + GetFalAiPikaV2TurboTextToVideoRequestsByRequestIdResponses[keyof GetFalAiPikaV2TurboTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiPikaV21TextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pika/v2.1/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiPikaV21TextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPikaV21TextToVideoRequestsByRequestIdStatusResponse = + GetFalAiPikaV21TextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiPikaV21TextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiPikaV21TextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pika/v2.1/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiPikaV21TextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPikaV21TextToVideoRequestsByRequestIdCancelResponse = + PutFalAiPikaV21TextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiPikaV21TextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiPikaV21TextToVideoData = { + body: SchemaPikaV21TextToVideoInput + path?: never + query?: never + url: '/fal-ai/pika/v2.1/text-to-video' +} + +export type PostFalAiPikaV21TextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPikaV21TextToVideoResponse = + PostFalAiPikaV21TextToVideoResponses[keyof PostFalAiPikaV21TextToVideoResponses] + +export type GetFalAiPikaV21TextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pika/v2.1/text-to-video/requests/{request_id}' +} + +export type GetFalAiPikaV21TextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPikaV21TextToVideoOutput +} + +export type GetFalAiPikaV21TextToVideoRequestsByRequestIdResponse = + GetFalAiPikaV21TextToVideoRequestsByRequestIdResponses[keyof GetFalAiPikaV21TextToVideoRequestsByRequestIdResponses] + +export type GetFalAiPikaV22TextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pika/v2.2/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiPikaV22TextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPikaV22TextToVideoRequestsByRequestIdStatusResponse = + GetFalAiPikaV22TextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiPikaV22TextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiPikaV22TextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pika/v2.2/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiPikaV22TextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPikaV22TextToVideoRequestsByRequestIdCancelResponse = + PutFalAiPikaV22TextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiPikaV22TextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiPikaV22TextToVideoData = { + body: SchemaPikaV22TextToVideoInput + path?: never + query?: never + url: '/fal-ai/pika/v2.2/text-to-video' +} + +export type PostFalAiPikaV22TextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPikaV22TextToVideoResponse = + PostFalAiPikaV22TextToVideoResponses[keyof PostFalAiPikaV22TextToVideoResponses] + +export type GetFalAiPikaV22TextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pika/v2.2/text-to-video/requests/{request_id}' +} + +export type GetFalAiPikaV22TextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPikaV22TextToVideoOutput +} + +export type GetFalAiPikaV22TextToVideoRequestsByRequestIdResponse = + GetFalAiPikaV22TextToVideoRequestsByRequestIdResponses[keyof GetFalAiPikaV22TextToVideoRequestsByRequestIdResponses] + +export type GetFalAiWanProTextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-pro/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiWanProTextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanProTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiWanProTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiWanProTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiWanProTextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-pro/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiWanProTextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanProTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiWanProTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiWanProTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiWanProTextToVideoData = { + body: SchemaWanProTextToVideoInput + path?: never + query?: never + url: '/fal-ai/wan-pro/text-to-video' +} + +export type PostFalAiWanProTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanProTextToVideoResponse = + PostFalAiWanProTextToVideoResponses[keyof PostFalAiWanProTextToVideoResponses] + +export type GetFalAiWanProTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-pro/text-to-video/requests/{request_id}' +} + +export type GetFalAiWanProTextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanProTextToVideoOutput +} + +export type GetFalAiWanProTextToVideoRequestsByRequestIdResponse = + GetFalAiWanProTextToVideoRequestsByRequestIdResponses[keyof GetFalAiWanProTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV15ProEffectsRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v1.5/pro/effects/requests/{request_id}/status' +} + +export type GetFalAiKlingVideoV15ProEffectsRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV15ProEffectsRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV15ProEffectsRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV15ProEffectsRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV15ProEffectsRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1.5/pro/effects/requests/{request_id}/cancel' +} + +export type PutFalAiKlingVideoV15ProEffectsRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV15ProEffectsRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV15ProEffectsRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV15ProEffectsRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV15ProEffectsData = { + body: SchemaKlingVideoV15ProEffectsInput + path?: never + query?: never + url: '/fal-ai/kling-video/v1.5/pro/effects' +} + +export type PostFalAiKlingVideoV15ProEffectsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV15ProEffectsResponse = + PostFalAiKlingVideoV15ProEffectsResponses[keyof PostFalAiKlingVideoV15ProEffectsResponses] + +export type GetFalAiKlingVideoV15ProEffectsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1.5/pro/effects/requests/{request_id}' +} + +export type GetFalAiKlingVideoV15ProEffectsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV15ProEffectsOutput +} + +export type GetFalAiKlingVideoV15ProEffectsRequestsByRequestIdResponse = + GetFalAiKlingVideoV15ProEffectsRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV15ProEffectsRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV16ProEffectsRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v1.6/pro/effects/requests/{request_id}/status' +} + +export type GetFalAiKlingVideoV16ProEffectsRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV16ProEffectsRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV16ProEffectsRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV16ProEffectsRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV16ProEffectsRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1.6/pro/effects/requests/{request_id}/cancel' +} + +export type PutFalAiKlingVideoV16ProEffectsRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV16ProEffectsRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV16ProEffectsRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV16ProEffectsRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV16ProEffectsData = { + body: SchemaKlingVideoV16ProEffectsInput + path?: never + query?: never + url: '/fal-ai/kling-video/v1.6/pro/effects' +} + +export type PostFalAiKlingVideoV16ProEffectsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV16ProEffectsResponse = + PostFalAiKlingVideoV16ProEffectsResponses[keyof PostFalAiKlingVideoV16ProEffectsResponses] + +export type GetFalAiKlingVideoV16ProEffectsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1.6/pro/effects/requests/{request_id}' +} + +export type GetFalAiKlingVideoV16ProEffectsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV16ProEffectsOutput +} + +export type GetFalAiKlingVideoV16ProEffectsRequestsByRequestIdResponse = + GetFalAiKlingVideoV16ProEffectsRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV16ProEffectsRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV1StandardEffectsRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v1/standard/effects/requests/{request_id}/status' +} + +export type GetFalAiKlingVideoV1StandardEffectsRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV1StandardEffectsRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV1StandardEffectsRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV1StandardEffectsRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV1StandardEffectsRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1/standard/effects/requests/{request_id}/cancel' +} + +export type PutFalAiKlingVideoV1StandardEffectsRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV1StandardEffectsRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV1StandardEffectsRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV1StandardEffectsRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV1StandardEffectsData = { + body: SchemaKlingVideoV1StandardEffectsInput + path?: never + query?: never + url: '/fal-ai/kling-video/v1/standard/effects' +} + +export type PostFalAiKlingVideoV1StandardEffectsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV1StandardEffectsResponse = + PostFalAiKlingVideoV1StandardEffectsResponses[keyof PostFalAiKlingVideoV1StandardEffectsResponses] + +export type GetFalAiKlingVideoV1StandardEffectsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1/standard/effects/requests/{request_id}' +} + +export type GetFalAiKlingVideoV1StandardEffectsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV1StandardEffectsOutput +} + +export type GetFalAiKlingVideoV1StandardEffectsRequestsByRequestIdResponse = + GetFalAiKlingVideoV1StandardEffectsRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV1StandardEffectsRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV16StandardEffectsRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v1.6/standard/effects/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoV16StandardEffectsRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV16StandardEffectsRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV16StandardEffectsRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV16StandardEffectsRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV16StandardEffectsRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1.6/standard/effects/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoV16StandardEffectsRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV16StandardEffectsRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV16StandardEffectsRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV16StandardEffectsRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV16StandardEffectsData = { + body: SchemaKlingVideoV16StandardEffectsInput + path?: never + query?: never + url: '/fal-ai/kling-video/v1.6/standard/effects' +} + +export type PostFalAiKlingVideoV16StandardEffectsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV16StandardEffectsResponse = + PostFalAiKlingVideoV16StandardEffectsResponses[keyof PostFalAiKlingVideoV16StandardEffectsResponses] + +export type GetFalAiKlingVideoV16StandardEffectsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1.6/standard/effects/requests/{request_id}' +} + +export type GetFalAiKlingVideoV16StandardEffectsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV16StandardEffectsOutput +} + +export type GetFalAiKlingVideoV16StandardEffectsRequestsByRequestIdResponse = + GetFalAiKlingVideoV16StandardEffectsRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV16StandardEffectsRequestsByRequestIdResponses] + +export type GetFalAiLtxVideoV095RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-video-v095/requests/{request_id}/status' +} + +export type GetFalAiLtxVideoV095RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLtxVideoV095RequestsByRequestIdStatusResponse = + GetFalAiLtxVideoV095RequestsByRequestIdStatusResponses[keyof GetFalAiLtxVideoV095RequestsByRequestIdStatusResponses] + +export type PutFalAiLtxVideoV095RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-v095/requests/{request_id}/cancel' +} + +export type PutFalAiLtxVideoV095RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLtxVideoV095RequestsByRequestIdCancelResponse = + PutFalAiLtxVideoV095RequestsByRequestIdCancelResponses[keyof PutFalAiLtxVideoV095RequestsByRequestIdCancelResponses] + +export type PostFalAiLtxVideoV095Data = { + body: SchemaLtxVideoV095Input + path?: never + query?: never + url: '/fal-ai/ltx-video-v095' +} + +export type PostFalAiLtxVideoV095Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtxVideoV095Response = + PostFalAiLtxVideoV095Responses[keyof PostFalAiLtxVideoV095Responses] + +export type GetFalAiLtxVideoV095RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-v095/requests/{request_id}' +} + +export type GetFalAiLtxVideoV095RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtxVideoV095Output +} + +export type GetFalAiLtxVideoV095RequestsByRequestIdResponse = + GetFalAiLtxVideoV095RequestsByRequestIdResponses[keyof GetFalAiLtxVideoV095RequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV16ProTextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v1.6/pro/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiKlingVideoV16ProTextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV16ProTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV16ProTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV16ProTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV16ProTextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1.6/pro/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiKlingVideoV16ProTextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV16ProTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV16ProTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV16ProTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV16ProTextToVideoData = { + body: SchemaKlingVideoV16ProTextToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/v1.6/pro/text-to-video' +} + +export type PostFalAiKlingVideoV16ProTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV16ProTextToVideoResponse = + PostFalAiKlingVideoV16ProTextToVideoResponses[keyof PostFalAiKlingVideoV16ProTextToVideoResponses] + +export type GetFalAiKlingVideoV16ProTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1.6/pro/text-to-video/requests/{request_id}' +} + +export type GetFalAiKlingVideoV16ProTextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV16ProTextToVideoOutput +} + +export type GetFalAiKlingVideoV16ProTextToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoV16ProTextToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV16ProTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiWanT2vRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-t2v/requests/{request_id}/status' +} + +export type GetFalAiWanT2vRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanT2vRequestsByRequestIdStatusResponse = + GetFalAiWanT2vRequestsByRequestIdStatusResponses[keyof GetFalAiWanT2vRequestsByRequestIdStatusResponses] + +export type PutFalAiWanT2vRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-t2v/requests/{request_id}/cancel' +} + +export type PutFalAiWanT2vRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanT2vRequestsByRequestIdCancelResponse = + PutFalAiWanT2vRequestsByRequestIdCancelResponses[keyof PutFalAiWanT2vRequestsByRequestIdCancelResponses] + +export type PostFalAiWanT2vData = { + body: SchemaWanT2vInput + path?: never + query?: never + url: '/fal-ai/wan-t2v' +} + +export type PostFalAiWanT2vResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanT2vResponse = + PostFalAiWanT2vResponses[keyof PostFalAiWanT2vResponses] + +export type GetFalAiWanT2vRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-t2v/requests/{request_id}' +} + +export type GetFalAiWanT2vRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanT2vOutput +} + +export type GetFalAiWanT2vRequestsByRequestIdResponse = + GetFalAiWanT2vRequestsByRequestIdResponses[keyof GetFalAiWanT2vRequestsByRequestIdResponses] + +export type GetFalAiVeo2RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/veo2/requests/{request_id}/status' +} + +export type GetFalAiVeo2RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiVeo2RequestsByRequestIdStatusResponse = + GetFalAiVeo2RequestsByRequestIdStatusResponses[keyof GetFalAiVeo2RequestsByRequestIdStatusResponses] + +export type PutFalAiVeo2RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo2/requests/{request_id}/cancel' +} + +export type PutFalAiVeo2RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiVeo2RequestsByRequestIdCancelResponse = + PutFalAiVeo2RequestsByRequestIdCancelResponses[keyof PutFalAiVeo2RequestsByRequestIdCancelResponses] + +export type PostFalAiVeo2Data = { + body: SchemaVeo2Input + path?: never + query?: never + url: '/fal-ai/veo2' +} + +export type PostFalAiVeo2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiVeo2Response = + PostFalAiVeo2Responses[keyof PostFalAiVeo2Responses] + +export type GetFalAiVeo2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo2/requests/{request_id}' +} + +export type GetFalAiVeo2RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVeo2Output +} + +export type GetFalAiVeo2RequestsByRequestIdResponse = + GetFalAiVeo2RequestsByRequestIdResponses[keyof GetFalAiVeo2RequestsByRequestIdResponses] + +export type GetFalAiMinimaxVideo01DirectorRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/video-01-director/requests/{request_id}/status' +} + +export type GetFalAiMinimaxVideo01DirectorRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMinimaxVideo01DirectorRequestsByRequestIdStatusResponse = + GetFalAiMinimaxVideo01DirectorRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxVideo01DirectorRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxVideo01DirectorRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/video-01-director/requests/{request_id}/cancel' +} + +export type PutFalAiMinimaxVideo01DirectorRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMinimaxVideo01DirectorRequestsByRequestIdCancelResponse = + PutFalAiMinimaxVideo01DirectorRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxVideo01DirectorRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxVideo01DirectorData = { + body: SchemaMinimaxVideo01DirectorInput + path?: never + query?: never + url: '/fal-ai/minimax/video-01-director' +} + +export type PostFalAiMinimaxVideo01DirectorResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxVideo01DirectorResponse = + PostFalAiMinimaxVideo01DirectorResponses[keyof PostFalAiMinimaxVideo01DirectorResponses] + +export type GetFalAiMinimaxVideo01DirectorRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/video-01-director/requests/{request_id}' +} + +export type GetFalAiMinimaxVideo01DirectorRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMinimaxVideo01DirectorOutput +} + +export type GetFalAiMinimaxVideo01DirectorRequestsByRequestIdResponse = + GetFalAiMinimaxVideo01DirectorRequestsByRequestIdResponses[keyof GetFalAiMinimaxVideo01DirectorRequestsByRequestIdResponses] + +export type GetFalAiPixverseV35TextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v3.5/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiPixverseV35TextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixverseV35TextToVideoRequestsByRequestIdStatusResponse = + GetFalAiPixverseV35TextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV35TextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV35TextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v3.5/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV35TextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixverseV35TextToVideoRequestsByRequestIdCancelResponse = + PutFalAiPixverseV35TextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV35TextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV35TextToVideoData = { + body: SchemaPixverseV35TextToVideoInput + path?: never + query?: never + url: '/fal-ai/pixverse/v3.5/text-to-video' +} + +export type PostFalAiPixverseV35TextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV35TextToVideoResponse = + PostFalAiPixverseV35TextToVideoResponses[keyof PostFalAiPixverseV35TextToVideoResponses] + +export type GetFalAiPixverseV35TextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v3.5/text-to-video/requests/{request_id}' +} + +export type GetFalAiPixverseV35TextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV35TextToVideoOutput +} + +export type GetFalAiPixverseV35TextToVideoRequestsByRequestIdResponse = + GetFalAiPixverseV35TextToVideoRequestsByRequestIdResponses[keyof GetFalAiPixverseV35TextToVideoRequestsByRequestIdResponses] + +export type GetFalAiPixverseV35TextToVideoFastRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/v3.5/text-to-video/fast/requests/{request_id}/status' +} + +export type GetFalAiPixverseV35TextToVideoFastRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiPixverseV35TextToVideoFastRequestsByRequestIdStatusResponse = + GetFalAiPixverseV35TextToVideoFastRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseV35TextToVideoFastRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseV35TextToVideoFastRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v3.5/text-to-video/fast/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseV35TextToVideoFastRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiPixverseV35TextToVideoFastRequestsByRequestIdCancelResponse = + PutFalAiPixverseV35TextToVideoFastRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseV35TextToVideoFastRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseV35TextToVideoFastData = { + body: SchemaPixverseV35TextToVideoFastInput + path?: never + query?: never + url: '/fal-ai/pixverse/v3.5/text-to-video/fast' +} + +export type PostFalAiPixverseV35TextToVideoFastResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseV35TextToVideoFastResponse = + PostFalAiPixverseV35TextToVideoFastResponses[keyof PostFalAiPixverseV35TextToVideoFastResponses] + +export type GetFalAiPixverseV35TextToVideoFastRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/v3.5/text-to-video/fast/requests/{request_id}' +} + +export type GetFalAiPixverseV35TextToVideoFastRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseV35TextToVideoFastOutput +} + +export type GetFalAiPixverseV35TextToVideoFastRequestsByRequestIdResponse = + GetFalAiPixverseV35TextToVideoFastRequestsByRequestIdResponses[keyof GetFalAiPixverseV35TextToVideoFastRequestsByRequestIdResponses] + +export type GetFalAiLumaDreamMachineRay2RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/luma-dream-machine/ray-2/requests/{request_id}/status' +} + +export type GetFalAiLumaDreamMachineRay2RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLumaDreamMachineRay2RequestsByRequestIdStatusResponse = + GetFalAiLumaDreamMachineRay2RequestsByRequestIdStatusResponses[keyof GetFalAiLumaDreamMachineRay2RequestsByRequestIdStatusResponses] + +export type PutFalAiLumaDreamMachineRay2RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-dream-machine/ray-2/requests/{request_id}/cancel' +} + +export type PutFalAiLumaDreamMachineRay2RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLumaDreamMachineRay2RequestsByRequestIdCancelResponse = + PutFalAiLumaDreamMachineRay2RequestsByRequestIdCancelResponses[keyof PutFalAiLumaDreamMachineRay2RequestsByRequestIdCancelResponses] + +export type PostFalAiLumaDreamMachineRay2Data = { + body: SchemaLumaDreamMachineRay2Input + path?: never + query?: never + url: '/fal-ai/luma-dream-machine/ray-2' +} + +export type PostFalAiLumaDreamMachineRay2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLumaDreamMachineRay2Response = + PostFalAiLumaDreamMachineRay2Responses[keyof PostFalAiLumaDreamMachineRay2Responses] + +export type GetFalAiLumaDreamMachineRay2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-dream-machine/ray-2/requests/{request_id}' +} + +export type GetFalAiLumaDreamMachineRay2RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLumaDreamMachineRay2Output +} + +export type GetFalAiLumaDreamMachineRay2RequestsByRequestIdResponse = + GetFalAiLumaDreamMachineRay2RequestsByRequestIdResponses[keyof GetFalAiLumaDreamMachineRay2RequestsByRequestIdResponses] + +export type GetFalAiHunyuanVideoLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan-video-lora/requests/{request_id}/status' +} + +export type GetFalAiHunyuanVideoLoraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHunyuanVideoLoraRequestsByRequestIdStatusResponse = + GetFalAiHunyuanVideoLoraRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuanVideoLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuanVideoLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-video-lora/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuanVideoLoraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHunyuanVideoLoraRequestsByRequestIdCancelResponse = + PutFalAiHunyuanVideoLoraRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuanVideoLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuanVideoLoraData = { + body: SchemaHunyuanVideoLoraInput + path?: never + query?: never + url: '/fal-ai/hunyuan-video-lora' +} + +export type PostFalAiHunyuanVideoLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuanVideoLoraResponse = + PostFalAiHunyuanVideoLoraResponses[keyof PostFalAiHunyuanVideoLoraResponses] + +export type GetFalAiHunyuanVideoLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-video-lora/requests/{request_id}' +} + +export type GetFalAiHunyuanVideoLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuanVideoLoraOutput +} + +export type GetFalAiHunyuanVideoLoraRequestsByRequestIdResponse = + GetFalAiHunyuanVideoLoraRequestsByRequestIdResponses[keyof GetFalAiHunyuanVideoLoraRequestsByRequestIdResponses] + +export type GetFalAiTranspixarRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/transpixar/requests/{request_id}/status' +} + +export type GetFalAiTranspixarRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiTranspixarRequestsByRequestIdStatusResponse = + GetFalAiTranspixarRequestsByRequestIdStatusResponses[keyof GetFalAiTranspixarRequestsByRequestIdStatusResponses] + +export type PutFalAiTranspixarRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/transpixar/requests/{request_id}/cancel' +} + +export type PutFalAiTranspixarRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiTranspixarRequestsByRequestIdCancelResponse = + PutFalAiTranspixarRequestsByRequestIdCancelResponses[keyof PutFalAiTranspixarRequestsByRequestIdCancelResponses] + +export type PostFalAiTranspixarData = { + body: SchemaTranspixarInput + path?: never + query?: never + url: '/fal-ai/transpixar' +} + +export type PostFalAiTranspixarResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiTranspixarResponse = + PostFalAiTranspixarResponses[keyof PostFalAiTranspixarResponses] + +export type GetFalAiTranspixarRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/transpixar/requests/{request_id}' +} + +export type GetFalAiTranspixarRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaTranspixarOutput +} + +export type GetFalAiTranspixarRequestsByRequestIdResponse = + GetFalAiTranspixarRequestsByRequestIdResponses[keyof GetFalAiTranspixarRequestsByRequestIdResponses] + +export type GetFalAiCogvideox5bRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/cogvideox-5b/requests/{request_id}/status' +} + +export type GetFalAiCogvideox5bRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiCogvideox5bRequestsByRequestIdStatusResponse = + GetFalAiCogvideox5bRequestsByRequestIdStatusResponses[keyof GetFalAiCogvideox5bRequestsByRequestIdStatusResponses] + +export type PutFalAiCogvideox5bRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/cogvideox-5b/requests/{request_id}/cancel' +} + +export type PutFalAiCogvideox5bRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiCogvideox5bRequestsByRequestIdCancelResponse = + PutFalAiCogvideox5bRequestsByRequestIdCancelResponses[keyof PutFalAiCogvideox5bRequestsByRequestIdCancelResponses] + +export type PostFalAiCogvideox5bData = { + body: SchemaCogvideox5bInput + path?: never + query?: never + url: '/fal-ai/cogvideox-5b' +} + +export type PostFalAiCogvideox5bResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiCogvideox5bResponse = + PostFalAiCogvideox5bResponses[keyof PostFalAiCogvideox5bResponses] + +export type GetFalAiCogvideox5bRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/cogvideox-5b/requests/{request_id}' +} + +export type GetFalAiCogvideox5bRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaCogvideox5bOutput +} + +export type GetFalAiCogvideox5bRequestsByRequestIdResponse = + GetFalAiCogvideox5bRequestsByRequestIdResponses[keyof GetFalAiCogvideox5bRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV16StandardTextToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v1.6/standard/text-to-video/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoV16StandardTextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV16StandardTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV16StandardTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV16StandardTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV16StandardTextToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1.6/standard/text-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoV16StandardTextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV16StandardTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV16StandardTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV16StandardTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV16StandardTextToVideoData = { + body: SchemaKlingVideoV16StandardTextToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/v1.6/standard/text-to-video' +} + +export type PostFalAiKlingVideoV16StandardTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV16StandardTextToVideoResponse = + PostFalAiKlingVideoV16StandardTextToVideoResponses[keyof PostFalAiKlingVideoV16StandardTextToVideoResponses] + +export type GetFalAiKlingVideoV16StandardTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1.6/standard/text-to-video/requests/{request_id}' +} + +export type GetFalAiKlingVideoV16StandardTextToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV16StandardTextToVideoOutput + } + +export type GetFalAiKlingVideoV16StandardTextToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoV16StandardTextToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV16StandardTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiMinimaxVideo01LiveRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/video-01-live/requests/{request_id}/status' +} + +export type GetFalAiMinimaxVideo01LiveRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMinimaxVideo01LiveRequestsByRequestIdStatusResponse = + GetFalAiMinimaxVideo01LiveRequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxVideo01LiveRequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxVideo01LiveRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/video-01-live/requests/{request_id}/cancel' +} + +export type PutFalAiMinimaxVideo01LiveRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMinimaxVideo01LiveRequestsByRequestIdCancelResponse = + PutFalAiMinimaxVideo01LiveRequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxVideo01LiveRequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxVideo01LiveData = { + body: SchemaMinimaxVideo01LiveInput + path?: never + query?: never + url: '/fal-ai/minimax/video-01-live' +} + +export type PostFalAiMinimaxVideo01LiveResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxVideo01LiveResponse = + PostFalAiMinimaxVideo01LiveResponses[keyof PostFalAiMinimaxVideo01LiveResponses] + +export type GetFalAiMinimaxVideo01LiveRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/video-01-live/requests/{request_id}' +} + +export type GetFalAiMinimaxVideo01LiveRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMinimaxVideo01LiveOutput +} + +export type GetFalAiMinimaxVideo01LiveRequestsByRequestIdResponse = + GetFalAiMinimaxVideo01LiveRequestsByRequestIdResponses[keyof GetFalAiMinimaxVideo01LiveRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV1StandardTextToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v1/standard/text-to-video/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoV1StandardTextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV1StandardTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV1StandardTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV1StandardTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV1StandardTextToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1/standard/text-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoV1StandardTextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV1StandardTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV1StandardTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV1StandardTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV1StandardTextToVideoData = { + body: SchemaKlingVideoV1StandardTextToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/v1/standard/text-to-video' +} + +export type PostFalAiKlingVideoV1StandardTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV1StandardTextToVideoResponse = + PostFalAiKlingVideoV1StandardTextToVideoResponses[keyof PostFalAiKlingVideoV1StandardTextToVideoResponses] + +export type GetFalAiKlingVideoV1StandardTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1/standard/text-to-video/requests/{request_id}' +} + +export type GetFalAiKlingVideoV1StandardTextToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV1StandardTextToVideoOutput + } + +export type GetFalAiKlingVideoV1StandardTextToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoV1StandardTextToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV1StandardTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV15ProTextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v1.5/pro/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiKlingVideoV15ProTextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV15ProTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV15ProTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV15ProTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV15ProTextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1.5/pro/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiKlingVideoV15ProTextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV15ProTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV15ProTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV15ProTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV15ProTextToVideoData = { + body: SchemaKlingVideoV15ProTextToVideoInput + path?: never + query?: never + url: '/fal-ai/kling-video/v1.5/pro/text-to-video' +} + +export type PostFalAiKlingVideoV15ProTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV15ProTextToVideoResponse = + PostFalAiKlingVideoV15ProTextToVideoResponses[keyof PostFalAiKlingVideoV15ProTextToVideoResponses] + +export type GetFalAiKlingVideoV15ProTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v1.5/pro/text-to-video/requests/{request_id}' +} + +export type GetFalAiKlingVideoV15ProTextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV15ProTextToVideoOutput +} + +export type GetFalAiKlingVideoV15ProTextToVideoRequestsByRequestIdResponse = + GetFalAiKlingVideoV15ProTextToVideoRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV15ProTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiMochiV1RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/mochi-v1/requests/{request_id}/status' +} + +export type GetFalAiMochiV1RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMochiV1RequestsByRequestIdStatusResponse = + GetFalAiMochiV1RequestsByRequestIdStatusResponses[keyof GetFalAiMochiV1RequestsByRequestIdStatusResponses] + +export type PutFalAiMochiV1RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/mochi-v1/requests/{request_id}/cancel' +} + +export type PutFalAiMochiV1RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMochiV1RequestsByRequestIdCancelResponse = + PutFalAiMochiV1RequestsByRequestIdCancelResponses[keyof PutFalAiMochiV1RequestsByRequestIdCancelResponses] + +export type PostFalAiMochiV1Data = { + body: SchemaMochiV1Input + path?: never + query?: never + url: '/fal-ai/mochi-v1' +} + +export type PostFalAiMochiV1Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMochiV1Response = + PostFalAiMochiV1Responses[keyof PostFalAiMochiV1Responses] + +export type GetFalAiMochiV1RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/mochi-v1/requests/{request_id}' +} + +export type GetFalAiMochiV1RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMochiV1Output +} + +export type GetFalAiMochiV1RequestsByRequestIdResponse = + GetFalAiMochiV1RequestsByRequestIdResponses[keyof GetFalAiMochiV1RequestsByRequestIdResponses] + +export type GetFalAiHunyuanVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan-video/requests/{request_id}/status' +} + +export type GetFalAiHunyuanVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHunyuanVideoRequestsByRequestIdStatusResponse = + GetFalAiHunyuanVideoRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuanVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuanVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-video/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuanVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHunyuanVideoRequestsByRequestIdCancelResponse = + PutFalAiHunyuanVideoRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuanVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuanVideoData = { + body: SchemaHunyuanVideoInput + path?: never + query?: never + url: '/fal-ai/hunyuan-video' +} + +export type PostFalAiHunyuanVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuanVideoResponse = + PostFalAiHunyuanVideoResponses[keyof PostFalAiHunyuanVideoResponses] + +export type GetFalAiHunyuanVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-video/requests/{request_id}' +} + +export type GetFalAiHunyuanVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuanVideoOutput +} + +export type GetFalAiHunyuanVideoRequestsByRequestIdResponse = + GetFalAiHunyuanVideoRequestsByRequestIdResponses[keyof GetFalAiHunyuanVideoRequestsByRequestIdResponses] + +export type GetFalAiLtxVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-video/requests/{request_id}/status' +} + +export type GetFalAiLtxVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLtxVideoRequestsByRequestIdStatusResponse = + GetFalAiLtxVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLtxVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLtxVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video/requests/{request_id}/cancel' +} + +export type PutFalAiLtxVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLtxVideoRequestsByRequestIdCancelResponse = + PutFalAiLtxVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLtxVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLtxVideoData = { + body: SchemaLtxVideoInput + path?: never + query?: never + url: '/fal-ai/ltx-video' +} + +export type PostFalAiLtxVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtxVideoResponse = + PostFalAiLtxVideoResponses[keyof PostFalAiLtxVideoResponses] + +export type GetFalAiLtxVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video/requests/{request_id}' +} + +export type GetFalAiLtxVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtxVideoOutput +} + +export type GetFalAiLtxVideoRequestsByRequestIdResponse = + GetFalAiLtxVideoRequestsByRequestIdResponses[keyof GetFalAiLtxVideoRequestsByRequestIdResponses] + +export type GetFalAiFastSvdTextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fast-svd/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiFastSvdTextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFastSvdTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiFastSvdTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiFastSvdTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiFastSvdTextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-svd/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiFastSvdTextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFastSvdTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiFastSvdTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiFastSvdTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiFastSvdTextToVideoData = { + body: SchemaFastSvdTextToVideoInput + path?: never + query?: never + url: '/fal-ai/fast-svd/text-to-video' +} + +export type PostFalAiFastSvdTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFastSvdTextToVideoResponse = + PostFalAiFastSvdTextToVideoResponses[keyof PostFalAiFastSvdTextToVideoResponses] + +export type GetFalAiFastSvdTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-svd/text-to-video/requests/{request_id}' +} + +export type GetFalAiFastSvdTextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFastSvdTextToVideoOutput +} + +export type GetFalAiFastSvdTextToVideoRequestsByRequestIdResponse = + GetFalAiFastSvdTextToVideoRequestsByRequestIdResponses[keyof GetFalAiFastSvdTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiFastSvdLcmTextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fast-svd-lcm/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiFastSvdLcmTextToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFastSvdLcmTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiFastSvdLcmTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiFastSvdLcmTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiFastSvdLcmTextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-svd-lcm/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiFastSvdLcmTextToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFastSvdLcmTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiFastSvdLcmTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiFastSvdLcmTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiFastSvdLcmTextToVideoData = { + body: SchemaFastSvdLcmTextToVideoInput + path?: never + query?: never + url: '/fal-ai/fast-svd-lcm/text-to-video' +} + +export type PostFalAiFastSvdLcmTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFastSvdLcmTextToVideoResponse = + PostFalAiFastSvdLcmTextToVideoResponses[keyof PostFalAiFastSvdLcmTextToVideoResponses] + +export type GetFalAiFastSvdLcmTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-svd-lcm/text-to-video/requests/{request_id}' +} + +export type GetFalAiFastSvdLcmTextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFastSvdLcmTextToVideoOutput +} + +export type GetFalAiFastSvdLcmTextToVideoRequestsByRequestIdResponse = + GetFalAiFastSvdLcmTextToVideoRequestsByRequestIdResponses[keyof GetFalAiFastSvdLcmTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiT2vTurboRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/t2v-turbo/requests/{request_id}/status' +} + +export type GetFalAiT2vTurboRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiT2vTurboRequestsByRequestIdStatusResponse = + GetFalAiT2vTurboRequestsByRequestIdStatusResponses[keyof GetFalAiT2vTurboRequestsByRequestIdStatusResponses] + +export type PutFalAiT2vTurboRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/t2v-turbo/requests/{request_id}/cancel' +} + +export type PutFalAiT2vTurboRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiT2vTurboRequestsByRequestIdCancelResponse = + PutFalAiT2vTurboRequestsByRequestIdCancelResponses[keyof PutFalAiT2vTurboRequestsByRequestIdCancelResponses] + +export type PostFalAiT2vTurboData = { + body: SchemaT2vTurboInput + path?: never + query?: never + url: '/fal-ai/t2v-turbo' +} + +export type PostFalAiT2vTurboResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiT2vTurboResponse = + PostFalAiT2vTurboResponses[keyof PostFalAiT2vTurboResponses] + +export type GetFalAiT2vTurboRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/t2v-turbo/requests/{request_id}' +} + +export type GetFalAiT2vTurboRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaT2vTurboOutput +} + +export type GetFalAiT2vTurboRequestsByRequestIdResponse = + GetFalAiT2vTurboRequestsByRequestIdResponses[keyof GetFalAiT2vTurboRequestsByRequestIdResponses] + +export type GetFalAiFastAnimatediffTextToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fast-animatediff/text-to-video/requests/{request_id}/status' +} + +export type GetFalAiFastAnimatediffTextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFastAnimatediffTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiFastAnimatediffTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiFastAnimatediffTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiFastAnimatediffTextToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-animatediff/text-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiFastAnimatediffTextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFastAnimatediffTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiFastAnimatediffTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiFastAnimatediffTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiFastAnimatediffTextToVideoData = { + body: SchemaFastAnimatediffTextToVideoInput + path?: never + query?: never + url: '/fal-ai/fast-animatediff/text-to-video' +} + +export type PostFalAiFastAnimatediffTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFastAnimatediffTextToVideoResponse = + PostFalAiFastAnimatediffTextToVideoResponses[keyof PostFalAiFastAnimatediffTextToVideoResponses] + +export type GetFalAiFastAnimatediffTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-animatediff/text-to-video/requests/{request_id}' +} + +export type GetFalAiFastAnimatediffTextToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFastAnimatediffTextToVideoOutput +} + +export type GetFalAiFastAnimatediffTextToVideoRequestsByRequestIdResponse = + GetFalAiFastAnimatediffTextToVideoRequestsByRequestIdResponses[keyof GetFalAiFastAnimatediffTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiFastAnimatediffTurboTextToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fast-animatediff/turbo/text-to-video/requests/{request_id}/status' + } + +export type GetFalAiFastAnimatediffTurboTextToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFastAnimatediffTurboTextToVideoRequestsByRequestIdStatusResponse = + GetFalAiFastAnimatediffTurboTextToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiFastAnimatediffTurboTextToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiFastAnimatediffTurboTextToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-animatediff/turbo/text-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiFastAnimatediffTurboTextToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFastAnimatediffTurboTextToVideoRequestsByRequestIdCancelResponse = + PutFalAiFastAnimatediffTurboTextToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiFastAnimatediffTurboTextToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiFastAnimatediffTurboTextToVideoData = { + body: SchemaFastAnimatediffTurboTextToVideoInput + path?: never + query?: never + url: '/fal-ai/fast-animatediff/turbo/text-to-video' +} + +export type PostFalAiFastAnimatediffTurboTextToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFastAnimatediffTurboTextToVideoResponse = + PostFalAiFastAnimatediffTurboTextToVideoResponses[keyof PostFalAiFastAnimatediffTurboTextToVideoResponses] + +export type GetFalAiFastAnimatediffTurboTextToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-animatediff/turbo/text-to-video/requests/{request_id}' +} + +export type GetFalAiFastAnimatediffTurboTextToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFastAnimatediffTurboTextToVideoOutput + } + +export type GetFalAiFastAnimatediffTurboTextToVideoRequestsByRequestIdResponse = + GetFalAiFastAnimatediffTurboTextToVideoRequestsByRequestIdResponses[keyof GetFalAiFastAnimatediffTurboTextToVideoRequestsByRequestIdResponses] + +export type GetFalAiMinimaxVideo01RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/minimax/video-01/requests/{request_id}/status' +} + +export type GetFalAiMinimaxVideo01RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMinimaxVideo01RequestsByRequestIdStatusResponse = + GetFalAiMinimaxVideo01RequestsByRequestIdStatusResponses[keyof GetFalAiMinimaxVideo01RequestsByRequestIdStatusResponses] + +export type PutFalAiMinimaxVideo01RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/video-01/requests/{request_id}/cancel' +} + +export type PutFalAiMinimaxVideo01RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMinimaxVideo01RequestsByRequestIdCancelResponse = + PutFalAiMinimaxVideo01RequestsByRequestIdCancelResponses[keyof PutFalAiMinimaxVideo01RequestsByRequestIdCancelResponses] + +export type PostFalAiMinimaxVideo01Data = { + body: SchemaMinimaxVideo01Input + path?: never + query?: never + url: '/fal-ai/minimax/video-01' +} + +export type PostFalAiMinimaxVideo01Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMinimaxVideo01Response = + PostFalAiMinimaxVideo01Responses[keyof PostFalAiMinimaxVideo01Responses] + +export type GetFalAiMinimaxVideo01RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/minimax/video-01/requests/{request_id}' +} + +export type GetFalAiMinimaxVideo01RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMinimaxVideo01Output +} + +export type GetFalAiMinimaxVideo01RequestsByRequestIdResponse = + GetFalAiMinimaxVideo01RequestsByRequestIdResponses[keyof GetFalAiMinimaxVideo01RequestsByRequestIdResponses] + +export type GetFalAiAnimatediffSparsectrlLcmRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/animatediff-sparsectrl-lcm/requests/{request_id}/status' +} + +export type GetFalAiAnimatediffSparsectrlLcmRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiAnimatediffSparsectrlLcmRequestsByRequestIdStatusResponse = + GetFalAiAnimatediffSparsectrlLcmRequestsByRequestIdStatusResponses[keyof GetFalAiAnimatediffSparsectrlLcmRequestsByRequestIdStatusResponses] + +export type PutFalAiAnimatediffSparsectrlLcmRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/animatediff-sparsectrl-lcm/requests/{request_id}/cancel' +} + +export type PutFalAiAnimatediffSparsectrlLcmRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiAnimatediffSparsectrlLcmRequestsByRequestIdCancelResponse = + PutFalAiAnimatediffSparsectrlLcmRequestsByRequestIdCancelResponses[keyof PutFalAiAnimatediffSparsectrlLcmRequestsByRequestIdCancelResponses] + +export type PostFalAiAnimatediffSparsectrlLcmData = { + body: SchemaAnimatediffSparsectrlLcmInput + path?: never + query?: never + url: '/fal-ai/animatediff-sparsectrl-lcm' +} + +export type PostFalAiAnimatediffSparsectrlLcmResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiAnimatediffSparsectrlLcmResponse = + PostFalAiAnimatediffSparsectrlLcmResponses[keyof PostFalAiAnimatediffSparsectrlLcmResponses] + +export type GetFalAiAnimatediffSparsectrlLcmRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/animatediff-sparsectrl-lcm/requests/{request_id}' +} + +export type GetFalAiAnimatediffSparsectrlLcmRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAnimatediffSparsectrlLcmOutput +} + +export type GetFalAiAnimatediffSparsectrlLcmRequestsByRequestIdResponse = + GetFalAiAnimatediffSparsectrlLcmRequestsByRequestIdResponses[keyof GetFalAiAnimatediffSparsectrlLcmRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/text-to-video/zod.gen.ts b/packages/typescript/ai-fal/src/generated/text-to-video/zod.gen.ts new file mode 100644 index 00000000..22b8c13e --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/text-to-video/zod.gen.ts @@ -0,0 +1,15683 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * File + */ +export const zSchemaFile = z.object({ + file_size: z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + file_name: z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + content_type: z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), +}) + +/** + * AnimatediffLCMOutput + */ +export const zSchemaAnimatediffSparsectrlLcmOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used to generate the video.', + }), + video: zSchemaFile, +}) + +/** + * AnimatediffLCMInput + */ +export const zSchemaAnimatediffSparsectrlLcmInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable\n Diffusion will output the same image every time.\n ', + }), + ), + controlnet_type: z.optional( + z.enum(['scribble', 'rgb']).register(z.globalRegistry, { + description: + 'The type of controlnet to use for generating the video. The controlnet determines how the video will be animated.', + }), + ), + keyframe_2_index: z + .optional( + z.int().register(z.globalRegistry, { + description: + 'The frame index of the third keyframe to use for the generation.', + }), + ) + .default(0), + keyframe_0_index: z + .optional( + z.int().register(z.globalRegistry, { + description: + 'The frame index of the first keyframe to use for the generation.', + }), + ) + .default(0), + keyframe_1_image_url: z.optional(z.union([z.string(), z.null()])), + keyframe_1_index: z + .optional( + z.int().register(z.globalRegistry, { + description: + 'The frame index of the second keyframe to use for the generation.', + }), + ) + .default(0), + guidance_scale: z + .optional( + z.int().gte(0).lte(2).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(1), + num_inference_steps: z + .optional( + z.int().gte(1).lte(12).register(z.globalRegistry, { + description: + 'Increasing the amount of steps tells Stable Diffusion that it should take more steps to generate your final result which can increase the amount of detail in your image.', + }), + ) + .default(4), + keyframe_2_image_url: z.optional(z.union([z.string(), z.null()])), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to specify what you don't want.\n ", + }), + ) + .default(''), + keyframe_0_image_url: z.optional(z.union([z.string(), z.null()])), +}) + +/** + * VideoOutput + */ +export const zSchemaMinimaxVideo01Output = z.object({ + video: zSchemaFile, +}) + +/** + * TextToVideoRequest + */ +export const zSchemaMinimaxVideo01Input = z.object({ + prompt_optimizer: z + .optional( + z.boolean().register(z.globalRegistry, { + description: "Whether to use the model's prompt optimizer", + }), + ) + .default(true), + prompt: z.string().max(2000), +}) + +/** + * AnimateDiffT2VOutput + */ +export const zSchemaFastAnimatediffTurboTextToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generating the video.', + }), + video: zSchemaFile, +}) + +/** + * ImageSize + */ +export const zSchemaImageSize = z.object({ + height: z + .optional( + z.int().lte(14142).register(z.globalRegistry, { + description: 'The height of the generated image.', + }), + ) + .default(512), + width: z + .optional( + z.int().lte(14142).register(z.globalRegistry, { + description: 'The width of the generated image.', + }), + ) + .default(512), +}) + +/** + * AnimateDiffT2VTurboInput + */ +export const zSchemaFastAnimatediffTurboTextToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the video. Be as descriptive as possible for best results.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + fps: z + .optional( + z.int().gte(1).lte(16).register(z.globalRegistry, { + description: 'Number of frames per second to extract from the video.', + }), + ) + .default(8), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(1), + num_frames: z + .optional( + z.int().gte(1).lte(64).register(z.globalRegistry, { + description: 'The number of frames to generate for the video.', + }), + ) + .default(16), + num_inference_steps: z + .optional( + z.int().gte(1).lte(8).register(z.globalRegistry, { + description: + 'The number of inference steps to perform. 4-12 is recommended for turbo mode.', + }), + ) + .default(4), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default('(bad quality, worst quality:1.2), ugly faces, bad anime'), + motions: z.optional( + z + .array( + z.enum([ + 'zoom-out', + 'zoom-in', + 'pan-left', + 'pan-right', + 'tilt-up', + 'tilt-down', + ]), + ) + .register(z.globalRegistry, { + description: 'The motions to apply to the video.', + }), + ), +}) + +/** + * AnimateDiffT2VOutput + */ +export const zSchemaFastAnimatediffTextToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generating the video.', + }), + video: zSchemaFile, +}) + +/** + * AnimateDiffT2VInput + */ +export const zSchemaFastAnimatediffTextToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the video. Be as descriptive as possible for best results.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + fps: z + .optional( + z.int().gte(1).lte(16).register(z.globalRegistry, { + description: 'Number of frames per second to extract from the video.', + }), + ) + .default(8), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(7.5), + num_frames: z + .optional( + z.int().gte(1).lte(32).register(z.globalRegistry, { + description: 'The number of frames to generate for the video.', + }), + ) + .default(16), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(25), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default('(bad quality, worst quality:1.2), ugly faces, bad anime'), + motions: z.optional( + z + .array( + z.enum([ + 'zoom-out', + 'zoom-in', + 'pan-left', + 'pan-right', + 'tilt-up', + 'tilt-down', + ]), + ) + .register(z.globalRegistry, { + description: 'The motions to apply to the video.', + }), + ), +}) + +/** + * Output + */ +export const zSchemaT2vTurboOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Input + */ +export const zSchemaT2vTurboInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate images from', + }), + guidance_scale: z + .optional( + z.number().gte(0.1).lte(30).register(z.globalRegistry, { + description: 'The guidance scale', + }), + ) + .default(7.5), + seed: z.optional(z.union([z.int().gte(0).lte(203279), z.unknown()])), + export_fps: z + .optional( + z.int().gte(1).lte(24).register(z.globalRegistry, { + description: 'The FPS of the exported video', + }), + ) + .default(8), + num_frames: z + .optional( + z.int().gte(16).lte(32).register(z.globalRegistry, { + description: 'The number of frames to generate', + }), + ) + .default(16), + num_inference_steps: z + .optional( + z.int().gte(1).lte(12).register(z.globalRegistry, { + description: 'The number of steps to sample', + }), + ) + .default(4), +}) + +/** + * FastSVDOutput + */ +export const zSchemaFastSvdLcmTextToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n\n ', + }), + video: zSchemaFile, +}) + +/** + * FastSVDTextInput + */ +export const zSchemaFastSvdLcmTextToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to use as a starting point for the generation.', + }), + cond_aug: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: + '\n The conditoning augmentation determines the amount of noise that will be\n added to the conditioning frame. The higher the number, the more noise\n there will be, and the less the video will look like the initial image.\n Increase it for more motion.\n ', + }), + ) + .default(0.02), + fps: z + .optional( + z.int().gte(1).lte(25).register(z.globalRegistry, { + description: + '\n The FPS of the generated video. The higher the number, the faster the video will\n play. Total video length is 25 frames.\n ', + }), + ) + .default(10), + motion_bucket_id: z + .optional( + z.int().gte(1).lte(255).register(z.globalRegistry, { + description: + '\n The motion bucket id determines the motion of the generated video. The\n higher the number, the more motion there will be.\n ', + }), + ) + .default(127), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + steps: z + .optional( + z.int().gte(1).lte(20).register(z.globalRegistry, { + description: + '\n The number of steps to run the model for. The higher the number the better\n the quality and longer it will take to generate.\n ', + }), + ) + .default(4), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), +}) + +/** + * FastSVDOutput + */ +export const zSchemaFastSvdTextToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n\n ', + }), + video: zSchemaFile, +}) + +/** + * FastSVDTextInput + */ +export const zSchemaFastSvdTextToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to use as a starting point for the generation.', + }), + cond_aug: z + .optional( + z.number().gte(0).lte(10).register(z.globalRegistry, { + description: + '\n The conditoning augmentation determines the amount of noise that will be\n added to the conditioning frame. The higher the number, the more noise\n there will be, and the less the video will look like the initial image.\n Increase it for more motion.\n ', + }), + ) + .default(0.02), + deep_cache: z.optional( + z.enum(['none', 'minimum', 'medium', 'high']).register(z.globalRegistry, { + description: + '\n Enabling [DeepCache](https://github.com/horseee/DeepCache) will make the execution\n faster, but might sometimes degrade overall quality. The higher the setting, the\n faster the execution will be, but the more quality might be lost.\n ', + }), + ), + fps: z + .optional( + z.int().gte(1).lte(25).register(z.globalRegistry, { + description: + '\n The FPS of the generated video. The higher the number, the faster the video will\n play. Total video length is 25 frames.\n ', + }), + ) + .default(10), + motion_bucket_id: z + .optional( + z.int().gte(1).lte(255).register(z.globalRegistry, { + description: + '\n The motion bucket id determines the motion of the generated video. The\n higher the number, the more motion there will be.\n ', + }), + ) + .default(127), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + steps: z + .optional( + z.int().gte(1).lte(100).register(z.globalRegistry, { + description: + '\n The number of steps to run the model for. The higher the number the better\n the quality and longer it will take to generate.\n ', + }), + ) + .default(20), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The negative prompt to use as a starting point for the generation.', + }), + ) + .default( + 'unrealistic, saturated, high contrast, big nose, painting, drawing, sketch, cartoon, anime, manga, render, CG, 3d, watermark, signature, label', + ), +}) + +/** + * Output + */ +export const zSchemaLtxVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for random number generation.', + }), + video: zSchemaFile, +}) + +/** + * TextToVideoInput + */ +export const zSchemaLtxVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + guidance_scale: z + .optional( + z.number().lte(10).register(z.globalRegistry, { + description: 'The guidance scale to use.', + }), + ) + .default(3), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for random number generation.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to take.', + }), + ) + .default(30), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), + ) + .default( + 'low quality, worst quality, deformed, distorted, disfigured, motion smear, motion artifacts, fused fingers, bad anatomy, weird hand, ugly', + ), +}) + +/** + * HunyuanT2VResponse + */ +export const zSchemaHunyuanVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generating the video.', + }), + video: zSchemaFile, +}) + +/** + * HunyuanVideoRequest + */ +export const zSchemaHunyuanVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the video to generate.', + }), + ), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'The resolution of the video to generate.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(30).register(z.globalRegistry, { + description: + 'The number of inference steps to run. Lower gets faster results, higher gets better results.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for generating the video.', + }), + ), + num_frames: z.optional( + z.enum(['129', '85']).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ), + pro_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'By default, generations are done with 35 steps. Pro mode does 55 steps which results in higher quality videos but will take more time and cost 2x more billing units.', + }), + ) + .default(false), +}) + +/** + * MochiT2VOutput + */ +export const zSchemaMochiV1Output = z.object({ + video: zSchemaFile, +}) + +/** + * MochiT2VInput + */ +export const zSchemaMochiV1Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate a video from.', + }), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for generating the video.', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt for the video.', + }), + ) + .default(''), +}) + +/** + * T2VOutput + */ +export const zSchemaKlingVideoV15ProTextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * TextToVideoRequest + */ +export const zSchemaKlingVideoV15ProTextToVideoInput = z.object({ + prompt: z.string().max(2500), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video frame', + }), + ), + negative_prompt: z + .optional(z.string().max(2500)) + .default('blur, distort, and low quality'), + cfg_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ', + }), + ) + .default(0.5), +}) + +/** + * CameraControl + */ +export const zSchemaCameraControl = z.object({ + movement_type: z + .enum(['horizontal', 'vertical', 'pan', 'tilt', 'roll', 'zoom']) + .register(z.globalRegistry, { + description: 'The type of camera movement', + }), + movement_value: z.int().gte(-10).lte(10).register(z.globalRegistry, { + description: 'The value of the camera movement', + }), +}) + +/** + * T2VOutput + */ +export const zSchemaKlingVideoV1StandardTextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * V1TextToVideoRequest + */ +export const zSchemaKlingVideoV1StandardTextToVideoInput = z.object({ + prompt: z.string().max(2500), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video frame', + }), + ), + advanced_camera_control: z.optional(zSchemaCameraControl), + cfg_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ', + }), + ) + .default(0.5), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + negative_prompt: z + .optional(z.string().max(2500)) + .default('blur, distort, and low quality'), + camera_control: z.optional( + z + .enum([ + 'down_back', + 'forward_up', + 'right_turn_forward', + 'left_turn_forward', + ]) + .register(z.globalRegistry, { + description: 'Camera control parameters', + }), + ), +}) + +/** + * T2VLiveOutput + */ +export const zSchemaMinimaxVideo01LiveOutput = z.object({ + video: zSchemaFile, +}) + +/** + * TextToVideoLiveRequest + */ +export const zSchemaMinimaxVideo01LiveInput = z.object({ + prompt_optimizer: z + .optional( + z.boolean().register(z.globalRegistry, { + description: "Whether to use the model's prompt optimizer", + }), + ) + .default(true), + prompt: z.string().max(2000), +}) + +/** + * T2VOutput + */ +export const zSchemaKlingVideoV16StandardTextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * TextToVideoRequest + */ +export const zSchemaKlingVideoV16StandardTextToVideoInput = z.object({ + prompt: z.string().max(2500), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video frame', + }), + ), + negative_prompt: z + .optional(z.string().max(2500)) + .default('blur, distort, and low quality'), + cfg_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ', + }), + ) + .default(0.5), +}) + +/** + * Output + */ +export const zSchemaCogvideox5bOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the video.', + }), + timings: z.record(z.string(), z.number()), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated video. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + video: zSchemaFile, +}) + +/** + * LoraWeight + */ +export const zSchemaLoraWeight = z.object({ + path: z.string().register(z.globalRegistry, { + description: 'URL or the path to the LoRA weights.', + }), + scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + '\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ', + }), + ) + .default(1), +}) + +/** + * BaseInput + */ +export const zSchemaCogvideox5bInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + use_rife: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Use RIFE for video interpolation', + }), + ) + .default(true), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. We currently support one lora.\n ', + }), + ) + .default([]), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related video to show you.\n ', + }), + ) + .default(7), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(50), + export_fps: z + .optional( + z.int().gte(4).lte(32).register(z.globalRegistry, { + description: 'The target FPS of the video', + }), + ) + .default(16), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate video from', + }), + ) + .default(''), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), +}) + +/** + * Output + */ +export const zSchemaTranspixarOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the video.', + }), + videos: z.array(zSchemaFile).register(z.globalRegistry, { + description: 'The URL to the generated video', + }), + timings: z.record(z.string(), z.number()), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated video. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), +}) + +/** + * BaseInput + */ +export const zSchemaTranspixarInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related video to show you.\n ', + }), + ) + .default(7), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(24), + export_fps: z + .optional( + z.int().gte(4).lte(32).register(z.globalRegistry, { + description: 'The target FPS of the video', + }), + ) + .default(8), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate video from', + }), + ) + .default(''), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * HunyuanT2VResponse + */ +export const zSchemaHunyuanVideoLoraOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generating the video.', + }), + video: zSchemaFile, +}) + +/** + * HunyuanT2VRequest + */ +export const zSchemaHunyuanVideoLoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the video to generate.', + }), + ), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'The resolution of the video to generate.', + }), + ), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for generating the video.', + }), + ), + num_frames: z.optional( + z.enum(['129', '85']).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ), + pro_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'By default, generations are done with 35 steps. Pro mode does 55 steps which results in higher quality videos but will take more time and cost 2x more billing units.', + }), + ) + .default(false), +}) + +/** + * Ray2T2VOutput + */ +export const zSchemaLumaDreamMachineRay2Output = z.object({ + video: zSchemaFile, +}) + +/** + * Ray2TextToVideoRequest + */ +export const zSchemaLumaDreamMachineRay2Input = z.object({ + prompt: z.string().min(3).max(5000), + aspect_ratio: z.optional( + z + .enum(['16:9', '9:16', '4:3', '3:4', '21:9', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + resolution: z.optional( + z.enum(['540p', '720p', '1080p']).register(z.globalRegistry, { + description: + 'The resolution of the generated video (720p costs 2x more, 1080p costs 4x more)', + }), + ), + loop: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether the video should loop (end of video is blended with the beginning)', + }), + ) + .default(false), + duration: z.optional( + z.enum(['5s', '9s']).register(z.globalRegistry, { + description: 'The duration of the generated video (9s costs 2x more)', + }), + ), +}) + +/** + * VideoOutput + */ +export const zSchemaPixverseV35TextToVideoFastOutput = z.object({ + video: zSchemaFile, +}) + +/** + * FastTextToVideoRequest + */ +export const zSchemaPixverseV35TextToVideoFastInput = z.object({ + prompt: z.string(), + aspect_ratio: z.optional( + z.enum(['16:9', '4:3', '1:1', '3:4', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + resolution: z.optional( + z.enum(['360p', '540p', '720p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the generated video', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * VideoOutput + */ +export const zSchemaPixverseV35TextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * TextToVideoRequest + */ +export const zSchemaPixverseV35TextToVideoInput = z.object({ + prompt: z.string(), + aspect_ratio: z.optional( + z.enum(['16:9', '4:3', '1:1', '3:4', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + resolution: z.optional( + z.enum(['360p', '540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the generated video', + }), + ), + duration: z.optional( + z.enum(['5', '8']).register(z.globalRegistry, { + description: + 'The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * T2VDirectorOutput + */ +export const zSchemaMinimaxVideo01DirectorOutput = z.object({ + video: zSchemaFile, +}) + +/** + * TextToVideoDirectorRequest + */ +export const zSchemaMinimaxVideo01DirectorInput = z.object({ + prompt_optimizer: z + .optional( + z.boolean().register(z.globalRegistry, { + description: "Whether to use the model's prompt optimizer", + }), + ) + .default(true), + prompt: z.string().max(2000).register(z.globalRegistry, { + description: + 'Text prompt for video generation. Camera movement instructions can be added using square brackets (e.g. [Pan left] or [Zoom in]). You can use up to 3 combined movements per prompt. Supported movements: Truck left/right, Pan left/right, Push in/Pull out, Pedestal up/down, Tilt up/down, Zoom in/out, Shake, Tracking shot, Static shot. For example: [Truck left, Pan right, Zoom in]. For a more detailed guide, refer https://sixth-switch-2ac.notion.site/T2V-01-Director-Model-Tutorial-with-camera-movement-1886c20a98eb80f395b8e05291ad8645', + }), +}) + +/** + * TextToVideoOutput + */ +export const zSchemaVeo2Output = z.object({ + video: zSchemaFile, +}) + +/** + * TextToVideoInput + */ +export const zSchemaVeo2Input = z.object({ + prompt: z.string().min(1).register(z.globalRegistry, { + description: 'The text prompt describing the video you want to generate', + }), + duration: z.optional( + z.enum(['5s', '6s', '7s', '8s']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'A seed to use for the video generation', + }), + ), + negative_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to guide the video generation', + }), + ), + enhance_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enhance the video generation', + }), + ) + .default(true), +}) + +/** + * WanT2VResponse + */ +export const zSchemaWanT2vOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * WanT2VRequest + */ +export const zSchemaWanT2vInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + aspect_ratio: z.optional( + z.enum(['9:16', '16:9']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video (16:9 or 9:16).', + }), + ), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p, 580p, or 720p).', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + turbo_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the video will be generated faster with no noticeable degradation in the visual quality.', + }), + ) + .default(false), + frames_per_second: z + .optional( + z.int().gte(5).lte(24).register(z.globalRegistry, { + description: + 'Frames per second of the generated video. Must be between 5 to 24.', + }), + ) + .default(16), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + num_frames: z + .optional( + z.int().gte(81).lte(100).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 100 (inclusive).', + }), + ) + .default(81), + num_inference_steps: z + .optional( + z.int().gte(2).lte(40).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(30), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default( + 'bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards', + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), +}) + +/** + * T2VOutput + */ +export const zSchemaKlingVideoV16ProTextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * TextToVideoRequest + */ +export const zSchemaKlingVideoV16ProTextToVideoInput = z.object({ + prompt: z.string().max(2500), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video frame', + }), + ), + negative_prompt: z + .optional(z.string().max(2500)) + .default('blur, distort, and low quality'), + cfg_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ', + }), + ) + .default(0.5), +}) + +/** + * TextToVideoOutput + */ +export const zSchemaLtxVideoV095Output = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * TextToVideoInput + */ +export const zSchemaLtxVideoV095Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt to guide generation', + }), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p or 720p).', + }), + ), + aspect_ratio: z.optional( + z.enum(['9:16', '16:9']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video (16:9 or 9:16).', + }), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "Whether to expand the prompt using the model's own capabilities.", + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps', + }), + ) + .default(40), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for generation', + }), + ) + .default('worst quality, inconsistent motion, blurry, jittery, distorted'), +}) + +/** + * VideoEffectsOutput + */ +export const zSchemaKlingVideoV16StandardEffectsOutput = z.object({ + video: zSchemaFile, +}) + +/** + * VideoEffectsRequest + */ +export const zSchemaKlingVideoV16StandardEffectsInput = z.object({ + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + input_image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'URL of images to be used for hug, kiss or heart_gesture video.', + }), + ), + effect_scene: z + .enum([ + 'hug', + 'kiss', + 'heart_gesture', + 'squish', + 'expansion', + 'fuzzyfuzzy', + 'bloombloom', + 'dizzydizzy', + 'jelly_press', + 'jelly_slice', + 'jelly_squish', + 'jelly_jiggle', + 'pixelpixel', + 'yearbook', + 'instant_film', + 'anime_figure', + 'rocketrocket', + 'fly_fly', + 'disappear', + 'lightning_power', + 'bullet_time', + 'bullet_time_360', + 'media_interview', + 'day_to_night', + "let's_ride", + 'jumpdrop', + 'swish_swish', + 'running_man', + 'jazz_jazz', + 'swing_swing', + 'skateskate', + 'building_sweater', + 'pure_white_wings', + 'black_wings', + 'golden_wing', + 'pink_pink_wings', + 'rampage_ape', + 'a_list_look', + 'countdown_teleport', + 'firework_2026', + 'instant_christmas', + 'birthday_star', + 'firework', + 'celebration', + 'tiger_hug_pro', + 'pet_lion_pro', + 'guardian_spirit', + 'squeeze_scream', + 'inner_voice', + 'memory_alive', + 'guess_what', + 'eagle_snatch', + 'hug_from_past', + 'instant_kid', + 'dollar_rain', + 'cry_cry', + 'building_collapse', + 'mushroom', + 'jesus_hug', + 'shark_alert', + 'lie_flat', + 'polar_bear_hug', + 'brown_bear_hug', + 'office_escape_plow', + 'watermelon_bomb', + 'boss_coming', + 'wig_out', + 'car_explosion', + 'tiger_hug', + 'siblings', + 'construction_worker', + 'snatched', + 'felt_felt', + 'plushcut', + 'drunk_dance', + 'drunk_dance_pet', + 'daoma_dance', + 'bouncy_dance', + 'smooth_sailing_dance', + 'new_year_greeting', + 'lion_dance', + 'prosperity', + 'great_success', + 'golden_horse_fortune', + 'red_packet_box', + 'lucky_horse_year', + 'lucky_red_packet', + 'lucky_money_come', + 'lion_dance_pet', + 'dumpling_making_pet', + 'fish_making_pet', + 'pet_red_packet', + 'lantern_glow', + 'expression_challenge', + 'overdrive', + 'heart_gesture_dance', + 'poping', + 'martial_arts', + 'running', + 'nezha', + 'motorcycle_dance', + 'subject_3_dance', + 'ghost_step_dance', + 'phantom_jewel', + 'zoom_out', + 'cheers_2026', + 'kiss_pro', + 'fight_pro', + 'hug_pro', + 'heart_gesture_pro', + 'dollar_rain_pro', + 'pet_bee_pro', + 'santa_random_surprise', + 'magic_match_tree', + 'happy_birthday', + 'thumbs_up_pro', + 'surprise_bouquet', + 'bouquet_drop', + '3d_cartoon_1_pro', + 'glamour_photo_shoot', + 'box_of_joy', + 'first_toast_of_the_year', + 'my_santa_pic', + 'santa_gift', + 'steampunk_christmas', + 'snowglobe', + 'christmas_photo_shoot', + 'ornament_crash', + 'santa_express', + 'particle_santa_surround', + 'coronation_of_frost', + 'spark_in_the_snow', + 'scarlet_and_snow', + 'cozy_toon_wrap', + 'bullet_time_lite', + 'magic_cloak', + 'balloon_parade', + 'jumping_ginger_joy', + 'c4d_cartoon_pro', + 'venomous_spider', + 'throne_of_king', + 'luminous_elf', + 'woodland_elf', + 'japanese_anime_1', + 'american_comics', + 'snowboarding', + 'witch_transform', + 'vampire_transform', + 'pumpkin_head_transform', + 'demon_transform', + 'mummy_transform', + 'zombie_transform', + 'cute_pumpkin_transform', + 'cute_ghost_transform', + 'knock_knock_halloween', + 'halloween_escape', + 'baseball', + 'trampoline', + 'trampoline_night', + 'pucker_up', + 'feed_mooncake', + 'flyer', + 'dishwasher', + 'pet_chinese_opera', + 'magic_fireball', + 'gallery_ring', + 'pet_moto_rider', + 'muscle_pet', + 'pet_delivery', + 'mythic_style', + 'steampunk', + '3d_cartoon_2', + 'pet_chef', + 'santa_gifts', + 'santa_hug', + 'girlfriend', + 'boyfriend', + 'heart_gesture_1', + 'pet_wizard', + 'smoke_smoke', + 'gun_shot', + 'double_gun', + 'pet_warrior', + 'long_hair', + 'pet_dance', + 'wool_curly', + 'pet_bee', + 'marry_me', + 'piggy_morph', + 'ski_ski', + 'magic_broom', + 'splashsplash', + 'surfsurf', + 'fairy_wing', + 'angel_wing', + 'dark_wing', + 'emoji', + ]) + .register(z.globalRegistry, { + description: 'The effect scene to use for the video generation', + }), +}) + +/** + * VideoEffectsOutput + */ +export const zSchemaKlingVideoV1StandardEffectsOutput = z.object({ + video: zSchemaFile, +}) + +/** + * VideoEffectsRequest + */ +export const zSchemaKlingVideoV1StandardEffectsInput = z.object({ + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + input_image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'URL of images to be used for hug, kiss or heart_gesture video.', + }), + ), + effect_scene: z + .enum([ + 'hug', + 'kiss', + 'heart_gesture', + 'squish', + 'expansion', + 'fuzzyfuzzy', + 'bloombloom', + 'dizzydizzy', + 'jelly_press', + 'jelly_slice', + 'jelly_squish', + 'jelly_jiggle', + 'pixelpixel', + 'yearbook', + 'instant_film', + 'anime_figure', + 'rocketrocket', + 'fly_fly', + 'disappear', + 'lightning_power', + 'bullet_time', + 'bullet_time_360', + 'media_interview', + 'day_to_night', + "let's_ride", + 'jumpdrop', + 'swish_swish', + 'running_man', + 'jazz_jazz', + 'swing_swing', + 'skateskate', + 'building_sweater', + 'pure_white_wings', + 'black_wings', + 'golden_wing', + 'pink_pink_wings', + 'rampage_ape', + 'a_list_look', + 'countdown_teleport', + 'firework_2026', + 'instant_christmas', + 'birthday_star', + 'firework', + 'celebration', + 'tiger_hug_pro', + 'pet_lion_pro', + 'guardian_spirit', + 'squeeze_scream', + 'inner_voice', + 'memory_alive', + 'guess_what', + 'eagle_snatch', + 'hug_from_past', + 'instant_kid', + 'dollar_rain', + 'cry_cry', + 'building_collapse', + 'mushroom', + 'jesus_hug', + 'shark_alert', + 'lie_flat', + 'polar_bear_hug', + 'brown_bear_hug', + 'office_escape_plow', + 'watermelon_bomb', + 'boss_coming', + 'wig_out', + 'car_explosion', + 'tiger_hug', + 'siblings', + 'construction_worker', + 'snatched', + 'felt_felt', + 'plushcut', + 'drunk_dance', + 'drunk_dance_pet', + 'daoma_dance', + 'bouncy_dance', + 'smooth_sailing_dance', + 'new_year_greeting', + 'lion_dance', + 'prosperity', + 'great_success', + 'golden_horse_fortune', + 'red_packet_box', + 'lucky_horse_year', + 'lucky_red_packet', + 'lucky_money_come', + 'lion_dance_pet', + 'dumpling_making_pet', + 'fish_making_pet', + 'pet_red_packet', + 'lantern_glow', + 'expression_challenge', + 'overdrive', + 'heart_gesture_dance', + 'poping', + 'martial_arts', + 'running', + 'nezha', + 'motorcycle_dance', + 'subject_3_dance', + 'ghost_step_dance', + 'phantom_jewel', + 'zoom_out', + 'cheers_2026', + 'kiss_pro', + 'fight_pro', + 'hug_pro', + 'heart_gesture_pro', + 'dollar_rain_pro', + 'pet_bee_pro', + 'santa_random_surprise', + 'magic_match_tree', + 'happy_birthday', + 'thumbs_up_pro', + 'surprise_bouquet', + 'bouquet_drop', + '3d_cartoon_1_pro', + 'glamour_photo_shoot', + 'box_of_joy', + 'first_toast_of_the_year', + 'my_santa_pic', + 'santa_gift', + 'steampunk_christmas', + 'snowglobe', + 'christmas_photo_shoot', + 'ornament_crash', + 'santa_express', + 'particle_santa_surround', + 'coronation_of_frost', + 'spark_in_the_snow', + 'scarlet_and_snow', + 'cozy_toon_wrap', + 'bullet_time_lite', + 'magic_cloak', + 'balloon_parade', + 'jumping_ginger_joy', + 'c4d_cartoon_pro', + 'venomous_spider', + 'throne_of_king', + 'luminous_elf', + 'woodland_elf', + 'japanese_anime_1', + 'american_comics', + 'snowboarding', + 'witch_transform', + 'vampire_transform', + 'pumpkin_head_transform', + 'demon_transform', + 'mummy_transform', + 'zombie_transform', + 'cute_pumpkin_transform', + 'cute_ghost_transform', + 'knock_knock_halloween', + 'halloween_escape', + 'baseball', + 'trampoline', + 'trampoline_night', + 'pucker_up', + 'feed_mooncake', + 'flyer', + 'dishwasher', + 'pet_chinese_opera', + 'magic_fireball', + 'gallery_ring', + 'pet_moto_rider', + 'muscle_pet', + 'pet_delivery', + 'mythic_style', + 'steampunk', + '3d_cartoon_2', + 'pet_chef', + 'santa_gifts', + 'santa_hug', + 'girlfriend', + 'boyfriend', + 'heart_gesture_1', + 'pet_wizard', + 'smoke_smoke', + 'gun_shot', + 'double_gun', + 'pet_warrior', + 'long_hair', + 'pet_dance', + 'wool_curly', + 'pet_bee', + 'marry_me', + 'piggy_morph', + 'ski_ski', + 'magic_broom', + 'splashsplash', + 'surfsurf', + 'fairy_wing', + 'angel_wing', + 'dark_wing', + 'emoji', + ]) + .register(z.globalRegistry, { + description: 'The effect scene to use for the video generation', + }), +}) + +/** + * VideoEffectsOutput + */ +export const zSchemaKlingVideoV16ProEffectsOutput = z.object({ + video: zSchemaFile, +}) + +/** + * VideoEffectsRequest + */ +export const zSchemaKlingVideoV16ProEffectsInput = z.object({ + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + input_image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'URL of images to be used for hug, kiss or heart_gesture video.', + }), + ), + effect_scene: z + .enum([ + 'hug', + 'kiss', + 'heart_gesture', + 'squish', + 'expansion', + 'fuzzyfuzzy', + 'bloombloom', + 'dizzydizzy', + 'jelly_press', + 'jelly_slice', + 'jelly_squish', + 'jelly_jiggle', + 'pixelpixel', + 'yearbook', + 'instant_film', + 'anime_figure', + 'rocketrocket', + 'fly_fly', + 'disappear', + 'lightning_power', + 'bullet_time', + 'bullet_time_360', + 'media_interview', + 'day_to_night', + "let's_ride", + 'jumpdrop', + 'swish_swish', + 'running_man', + 'jazz_jazz', + 'swing_swing', + 'skateskate', + 'building_sweater', + 'pure_white_wings', + 'black_wings', + 'golden_wing', + 'pink_pink_wings', + 'rampage_ape', + 'a_list_look', + 'countdown_teleport', + 'firework_2026', + 'instant_christmas', + 'birthday_star', + 'firework', + 'celebration', + 'tiger_hug_pro', + 'pet_lion_pro', + 'guardian_spirit', + 'squeeze_scream', + 'inner_voice', + 'memory_alive', + 'guess_what', + 'eagle_snatch', + 'hug_from_past', + 'instant_kid', + 'dollar_rain', + 'cry_cry', + 'building_collapse', + 'mushroom', + 'jesus_hug', + 'shark_alert', + 'lie_flat', + 'polar_bear_hug', + 'brown_bear_hug', + 'office_escape_plow', + 'watermelon_bomb', + 'boss_coming', + 'wig_out', + 'car_explosion', + 'tiger_hug', + 'siblings', + 'construction_worker', + 'snatched', + 'felt_felt', + 'plushcut', + 'drunk_dance', + 'drunk_dance_pet', + 'daoma_dance', + 'bouncy_dance', + 'smooth_sailing_dance', + 'new_year_greeting', + 'lion_dance', + 'prosperity', + 'great_success', + 'golden_horse_fortune', + 'red_packet_box', + 'lucky_horse_year', + 'lucky_red_packet', + 'lucky_money_come', + 'lion_dance_pet', + 'dumpling_making_pet', + 'fish_making_pet', + 'pet_red_packet', + 'lantern_glow', + 'expression_challenge', + 'overdrive', + 'heart_gesture_dance', + 'poping', + 'martial_arts', + 'running', + 'nezha', + 'motorcycle_dance', + 'subject_3_dance', + 'ghost_step_dance', + 'phantom_jewel', + 'zoom_out', + 'cheers_2026', + 'kiss_pro', + 'fight_pro', + 'hug_pro', + 'heart_gesture_pro', + 'dollar_rain_pro', + 'pet_bee_pro', + 'santa_random_surprise', + 'magic_match_tree', + 'happy_birthday', + 'thumbs_up_pro', + 'surprise_bouquet', + 'bouquet_drop', + '3d_cartoon_1_pro', + 'glamour_photo_shoot', + 'box_of_joy', + 'first_toast_of_the_year', + 'my_santa_pic', + 'santa_gift', + 'steampunk_christmas', + 'snowglobe', + 'christmas_photo_shoot', + 'ornament_crash', + 'santa_express', + 'particle_santa_surround', + 'coronation_of_frost', + 'spark_in_the_snow', + 'scarlet_and_snow', + 'cozy_toon_wrap', + 'bullet_time_lite', + 'magic_cloak', + 'balloon_parade', + 'jumping_ginger_joy', + 'c4d_cartoon_pro', + 'venomous_spider', + 'throne_of_king', + 'luminous_elf', + 'woodland_elf', + 'japanese_anime_1', + 'american_comics', + 'snowboarding', + 'witch_transform', + 'vampire_transform', + 'pumpkin_head_transform', + 'demon_transform', + 'mummy_transform', + 'zombie_transform', + 'cute_pumpkin_transform', + 'cute_ghost_transform', + 'knock_knock_halloween', + 'halloween_escape', + 'baseball', + 'trampoline', + 'trampoline_night', + 'pucker_up', + 'feed_mooncake', + 'flyer', + 'dishwasher', + 'pet_chinese_opera', + 'magic_fireball', + 'gallery_ring', + 'pet_moto_rider', + 'muscle_pet', + 'pet_delivery', + 'mythic_style', + 'steampunk', + '3d_cartoon_2', + 'pet_chef', + 'santa_gifts', + 'santa_hug', + 'girlfriend', + 'boyfriend', + 'heart_gesture_1', + 'pet_wizard', + 'smoke_smoke', + 'gun_shot', + 'double_gun', + 'pet_warrior', + 'long_hair', + 'pet_dance', + 'wool_curly', + 'pet_bee', + 'marry_me', + 'piggy_morph', + 'ski_ski', + 'magic_broom', + 'splashsplash', + 'surfsurf', + 'fairy_wing', + 'angel_wing', + 'dark_wing', + 'emoji', + ]) + .register(z.globalRegistry, { + description: 'The effect scene to use for the video generation', + }), +}) + +/** + * VideoEffectsOutput + */ +export const zSchemaKlingVideoV15ProEffectsOutput = z.object({ + video: zSchemaFile, +}) + +/** + * VideoEffectsRequest + */ +export const zSchemaKlingVideoV15ProEffectsInput = z.object({ + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + input_image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'URL of images to be used for hug, kiss or heart_gesture video.', + }), + ), + effect_scene: z + .enum([ + 'hug', + 'kiss', + 'heart_gesture', + 'squish', + 'expansion', + 'fuzzyfuzzy', + 'bloombloom', + 'dizzydizzy', + 'jelly_press', + 'jelly_slice', + 'jelly_squish', + 'jelly_jiggle', + 'pixelpixel', + 'yearbook', + 'instant_film', + 'anime_figure', + 'rocketrocket', + 'fly_fly', + 'disappear', + 'lightning_power', + 'bullet_time', + 'bullet_time_360', + 'media_interview', + 'day_to_night', + "let's_ride", + 'jumpdrop', + 'swish_swish', + 'running_man', + 'jazz_jazz', + 'swing_swing', + 'skateskate', + 'building_sweater', + 'pure_white_wings', + 'black_wings', + 'golden_wing', + 'pink_pink_wings', + 'rampage_ape', + 'a_list_look', + 'countdown_teleport', + 'firework_2026', + 'instant_christmas', + 'birthday_star', + 'firework', + 'celebration', + 'tiger_hug_pro', + 'pet_lion_pro', + 'guardian_spirit', + 'squeeze_scream', + 'inner_voice', + 'memory_alive', + 'guess_what', + 'eagle_snatch', + 'hug_from_past', + 'instant_kid', + 'dollar_rain', + 'cry_cry', + 'building_collapse', + 'mushroom', + 'jesus_hug', + 'shark_alert', + 'lie_flat', + 'polar_bear_hug', + 'brown_bear_hug', + 'office_escape_plow', + 'watermelon_bomb', + 'boss_coming', + 'wig_out', + 'car_explosion', + 'tiger_hug', + 'siblings', + 'construction_worker', + 'snatched', + 'felt_felt', + 'plushcut', + 'drunk_dance', + 'drunk_dance_pet', + 'daoma_dance', + 'bouncy_dance', + 'smooth_sailing_dance', + 'new_year_greeting', + 'lion_dance', + 'prosperity', + 'great_success', + 'golden_horse_fortune', + 'red_packet_box', + 'lucky_horse_year', + 'lucky_red_packet', + 'lucky_money_come', + 'lion_dance_pet', + 'dumpling_making_pet', + 'fish_making_pet', + 'pet_red_packet', + 'lantern_glow', + 'expression_challenge', + 'overdrive', + 'heart_gesture_dance', + 'poping', + 'martial_arts', + 'running', + 'nezha', + 'motorcycle_dance', + 'subject_3_dance', + 'ghost_step_dance', + 'phantom_jewel', + 'zoom_out', + 'cheers_2026', + 'kiss_pro', + 'fight_pro', + 'hug_pro', + 'heart_gesture_pro', + 'dollar_rain_pro', + 'pet_bee_pro', + 'santa_random_surprise', + 'magic_match_tree', + 'happy_birthday', + 'thumbs_up_pro', + 'surprise_bouquet', + 'bouquet_drop', + '3d_cartoon_1_pro', + 'glamour_photo_shoot', + 'box_of_joy', + 'first_toast_of_the_year', + 'my_santa_pic', + 'santa_gift', + 'steampunk_christmas', + 'snowglobe', + 'christmas_photo_shoot', + 'ornament_crash', + 'santa_express', + 'particle_santa_surround', + 'coronation_of_frost', + 'spark_in_the_snow', + 'scarlet_and_snow', + 'cozy_toon_wrap', + 'bullet_time_lite', + 'magic_cloak', + 'balloon_parade', + 'jumping_ginger_joy', + 'c4d_cartoon_pro', + 'venomous_spider', + 'throne_of_king', + 'luminous_elf', + 'woodland_elf', + 'japanese_anime_1', + 'american_comics', + 'snowboarding', + 'witch_transform', + 'vampire_transform', + 'pumpkin_head_transform', + 'demon_transform', + 'mummy_transform', + 'zombie_transform', + 'cute_pumpkin_transform', + 'cute_ghost_transform', + 'knock_knock_halloween', + 'halloween_escape', + 'baseball', + 'trampoline', + 'trampoline_night', + 'pucker_up', + 'feed_mooncake', + 'flyer', + 'dishwasher', + 'pet_chinese_opera', + 'magic_fireball', + 'gallery_ring', + 'pet_moto_rider', + 'muscle_pet', + 'pet_delivery', + 'mythic_style', + 'steampunk', + '3d_cartoon_2', + 'pet_chef', + 'santa_gifts', + 'santa_hug', + 'girlfriend', + 'boyfriend', + 'heart_gesture_1', + 'pet_wizard', + 'smoke_smoke', + 'gun_shot', + 'double_gun', + 'pet_warrior', + 'long_hair', + 'pet_dance', + 'wool_curly', + 'pet_bee', + 'marry_me', + 'piggy_morph', + 'ski_ski', + 'magic_broom', + 'splashsplash', + 'surfsurf', + 'fairy_wing', + 'angel_wing', + 'dark_wing', + 'emoji', + ]) + .register(z.globalRegistry, { + description: 'The effect scene to use for the video generation', + }), +}) + +/** + * WanProT2VResponse + */ +export const zSchemaWanProTextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * WanProT2VRequest + */ +export const zSchemaWanProTextToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video', + }), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker', + }), + ) + .default(true), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * Pika22TextToVideoOutput + * + * Output model for Pika 2.2 text-to-video generation + */ +export const zSchemaPikaV22TextToVideoOutput = z + .object({ + video: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output model for Pika 2.2 text-to-video generation', + }) + +/** + * Pika22TextToVideoRequest + * + * Request model for Pika 2.2 text-to-video generation + */ +export const zSchemaPikaV22TextToVideoInput = z + .object({ + prompt: z.string(), + resolution: z.optional( + z.enum(['1080p', '720p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + aspect_ratio: z.optional( + z + .enum(['16:9', '9:16', '1:1', '4:5', '5:4', '3:2', '2:3']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + duration: z.optional( + z.union([z.literal(5), z.literal(10)]).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to guide the model', + }), + ) + .default('ugly, bad, terrible'), + }) + .register(z.globalRegistry, { + description: 'Request model for Pika 2.2 text-to-video generation', + }) + +/** + * TextToVideoV21Output + * + * Output from text-to-video generation + */ +export const zSchemaPikaV21TextToVideoOutput = z + .object({ + video: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output from text-to-video generation', + }) + +/** + * TextToVideov21Input + * + * Base request for text-to-video generation + */ +export const zSchemaPikaV21TextToVideoInput = z + .object({ + prompt: z.string(), + resolution: z.optional( + z.enum(['720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + aspect_ratio: z.optional( + z + .enum(['16:9', '9:16', '1:1', '4:5', '5:4', '3:2', '2:3']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + duration: z + .optional( + z.int().register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ) + .default(5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to guide the model', + }), + ) + .default(''), + }) + .register(z.globalRegistry, { + description: 'Base request for text-to-video generation', + }) + +/** + * TurboTextToVideoOutput + * + * Output from text-to-video generation + */ +export const zSchemaPikaV2TurboTextToVideoOutput = z + .object({ + video: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output from text-to-video generation', + }) + +/** + * TextToVideoTurboInput + * + * Base request for text-to-video generation + */ +export const zSchemaPikaV2TurboTextToVideoInput = z + .object({ + prompt: z.string(), + resolution: z.optional( + z.enum(['720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + aspect_ratio: z.optional( + z + .enum(['16:9', '9:16', '1:1', '4:5', '5:4', '3:2', '2:3']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + duration: z + .optional( + z.int().register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ) + .default(5), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to guide the model', + }), + ) + .default(''), + }) + .register(z.globalRegistry, { + description: 'Base request for text-to-video generation', + }) + +/** + * Ray2T2VOutput + */ +export const zSchemaLumaDreamMachineRay2FlashOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Ray2TextToVideoRequest + */ +export const zSchemaLumaDreamMachineRay2FlashInput = z.object({ + prompt: z.string().min(3).max(5000), + aspect_ratio: z.optional( + z + .enum(['16:9', '9:16', '4:3', '3:4', '21:9', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + resolution: z.optional( + z.enum(['540p', '720p', '1080p']).register(z.globalRegistry, { + description: + 'The resolution of the generated video (720p costs 2x more, 1080p costs 4x more)', + }), + ), + loop: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether the video should loop (end of video is blended with the beginning)', + }), + ) + .default(false), + duration: z.optional( + z.enum(['5s', '9s']).register(z.globalRegistry, { + description: 'The duration of the generated video (9s costs 2x more)', + }), + ), +}) + +/** + * WanT2VResponse + */ +export const zSchemaWanT2vLoraOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * WanLoRARequest + */ +export const zSchemaWanT2vLoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p,580p, or 720p).', + }), + ), + reverse_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If true, the video will be reversed.', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + aspect_ratio: z.optional( + z.enum(['9:16', '16:9']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video (16:9 or 9:16).', + }), + ), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: 'LoRA weights to be used in the inference.', + }), + ) + .default([]), + frames_per_second: z + .optional( + z.int().gte(5).lte(24).register(z.globalRegistry, { + description: + 'Frames per second of the generated video. Must be between 5 to 24.', + }), + ) + .default(16), + turbo_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the video will be generated faster with no noticeable degradation in the visual quality.', + }), + ) + .default(true), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(40).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(30), + num_frames: z + .optional( + z.int().gte(81).lte(100).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 100 (inclusive).', + }), + ) + .default(81), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default( + 'bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards', + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), +}) + +/** + * LipsyncOutput + */ +export const zSchemaKlingVideoLipsyncTextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * LipsyncT2VRequest + */ +export const zSchemaKlingVideoLipsyncTextToVideoInput = z.object({ + text: z.string().max(120).register(z.globalRegistry, { + description: + 'Text content for lip-sync video generation. Max 120 characters.', + }), + video_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the video to generate the lip sync for. Supports .mp4/.mov, ≤100MB, 2-60s, 720p/1080p only, width/height 720–1920px. If validation fails, an error is returned.', + }), + voice_id: z + .enum([ + 'genshin_vindi2', + 'zhinen_xuesheng', + 'AOT', + 'ai_shatang', + 'genshin_klee2', + 'genshin_kirara', + 'ai_kaiya', + 'oversea_male1', + 'ai_chenjiahao_712', + 'girlfriend_4_speech02', + 'chat1_female_new-3', + 'chat_0407_5-1', + 'cartoon-boy-07', + 'uk_boy1', + 'cartoon-girl-01', + 'PeppaPig_platform', + 'ai_huangzhong_712', + 'ai_huangyaoshi_712', + 'ai_laoguowang_712', + 'chengshu_jiejie', + 'you_pingjing', + 'calm_story1', + 'uk_man2', + 'laopopo_speech02', + 'heainainai_speech02', + 'reader_en_m-v1', + 'commercial_lady_en_f-v1', + 'tiyuxi_xuedi', + 'tiexin_nanyou', + 'girlfriend_1_speech02', + 'girlfriend_2_speech02', + 'zhuxi_speech02', + 'uk_oldman3', + 'dongbeilaotie_speech02', + 'chongqingxiaohuo_speech02', + 'chuanmeizi_speech02', + 'chaoshandashu_speech02', + 'ai_taiwan_man2_speech02', + 'xianzhanggui_speech02', + 'tianjinjiejie_speech02', + 'diyinnansang_DB_CN_M_04-v2', + 'yizhipiannan-v1', + 'guanxiaofang-v2', + 'tianmeixuemei-v1', + 'daopianyansang-v1', + 'mengwa-v1', + ]) + .register(z.globalRegistry, { + description: 'Voice ID to use for speech synthesis', + }), + voice_language: z.optional( + z.enum(['zh', 'en']).register(z.globalRegistry, { + description: 'The voice language corresponding to the Voice ID', + }), + ), + voice_speed: z + .optional( + z.number().gte(0.8).lte(2).register(z.globalRegistry, { + description: 'Speech rate for Text to Video generation', + }), + ) + .default(1), +}) + +/** + * LipsyncA2VOutput + */ +export const zSchemaKlingVideoLipsyncAudioToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * LipsyncA2VRequest + */ +export const zSchemaKlingVideoLipsyncAudioToVideoInput = z.object({ + video_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the video to generate the lip sync for. Supports .mp4/.mov, ≤100MB, 2–10s, 720p/1080p only, width/height 720–1920px.', + }), + audio_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the audio to generate the lip sync for. Minimum duration is 2s and maximum duration is 60s. Maximum file size is 5MB.', + }), +}) + +/** + * VideoOutputV4 + */ +export const zSchemaPixverseV4TextToVideoFastOutput = z.object({ + video: zSchemaFile, +}) + +/** + * FastTextToVideoRequest + */ +export const zSchemaPixverseV4TextToVideoFastInput = z.object({ + prompt: z.string(), + aspect_ratio: z.optional( + z.enum(['16:9', '4:3', '1:1', '3:4', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + resolution: z.optional( + z.enum(['360p', '540p', '720p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the generated video', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * VideoOutputV4 + */ +export const zSchemaPixverseV4TextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * TextToVideoRequest + */ +export const zSchemaPixverseV4TextToVideoInput = z.object({ + prompt: z.string(), + aspect_ratio: z.optional( + z.enum(['16:9', '4:3', '1:1', '3:4', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + resolution: z.optional( + z.enum(['360p', '540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the generated video', + }), + ), + duration: z.optional( + z.enum(['5', '8']).register(z.globalRegistry, { + description: + 'The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * MagiResponse + */ +export const zSchemaMagiDistilledOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * MagiTextToVideoRequest + */ +export const zSchemaMagiDistilledInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: + 'Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit.', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: + "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z.optional( + z + .union([z.literal(4), z.literal(8), z.literal(16), z.literal(32)]) + .register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + num_frames: z + .optional( + z.int().gte(96).lte(192).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 96 and 192 (inclusive). Each additional 24 frames beyond 96 incurs an additional billing unit.', + }), + ) + .default(96), +}) + +/** + * MagiResponse + */ +export const zSchemaMagiOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * MagiTextToVideoRequest + */ +export const zSchemaMagiInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: + 'Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit.', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: + "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z.optional( + z + .union([ + z.literal(4), + z.literal(8), + z.literal(16), + z.literal(32), + z.literal(64), + ]) + .register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + num_frames: z + .optional( + z.int().gte(96).lte(192).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 96 and 192 (inclusive). Each additional 24 frames beyond 96 incurs an additional billing unit.', + }), + ) + .default(96), +}) + +/** + * Q1TextToVideoOutput + */ +export const zSchemaViduQ1TextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Q1TextToVideoRequest + */ +export const zSchemaViduQ1TextToVideoInput = z.object({ + prompt: z.string().max(1500).register(z.globalRegistry, { + description: 'Text prompt for video generation, max 1500 characters', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the output video', + }), + ), + style: z.optional( + z.enum(['general', 'anime']).register(z.globalRegistry, { + description: 'The style of output video', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Seed for the random number generator', + }), + ), + movement_amplitude: z.optional( + z.enum(['auto', 'small', 'medium', 'large']).register(z.globalRegistry, { + description: 'The movement amplitude of objects in the frame', + }), + ), +}) + +/** + * VideoOutputV4 + */ +export const zSchemaPixverseV45TextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * TextToVideoRequest + */ +export const zSchemaPixverseV45TextToVideoInput = z.object({ + prompt: z.string(), + aspect_ratio: z.optional( + z.enum(['16:9', '4:3', '1:1', '3:4', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + resolution: z.optional( + z.enum(['360p', '540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the generated video', + }), + ), + duration: z.optional( + z.enum(['5', '8']).register(z.globalRegistry, { + description: + 'The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * VideoOutputV4 + */ +export const zSchemaPixverseV45TextToVideoFastOutput = z.object({ + video: zSchemaFile, +}) + +/** + * FastTextToVideoRequest + */ +export const zSchemaPixverseV45TextToVideoFastInput = z.object({ + prompt: z.string(), + aspect_ratio: z.optional( + z.enum(['16:9', '4:3', '1:1', '3:4', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + resolution: z.optional( + z.enum(['360p', '540p', '720p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the generated video', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * TextToVideoOutput + */ +export const zSchemaLtxVideo13bDistilledOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * LoRAWeight + */ +export const zSchemaLoRaWeight = z.object({ + path: z.string().register(z.globalRegistry, { + description: 'URL or path to the LoRA weights.', + }), + scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'Scale of the LoRA weight. This is a multiplier applied to the LoRA weight when loading it.', + }), + ) + .default(1), + weight_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights.', + }), + ), +}) + +/** + * DistilledTextToVideoInput + * + * Distilled model input + */ +export const zSchemaLtxVideo13bDistilledInput = z + .object({ + second_pass_skip_initial_steps: z + .optional( + z.int().gte(1).lte(20).register(z.globalRegistry, { + description: + 'The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.', + }), + ) + .default(5), + first_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(20).register(z.globalRegistry, { + description: 'Number of inference steps during the first pass.', + }), + ) + .default(8), + frame_rate: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frame rate of the video.', + }), + ) + .default(30), + reverse_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to reverse the video.', + }), + ) + .default(false), + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt to guide generation', + }), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to expand the prompt using a language model.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoRaWeight).register(z.globalRegistry, { + description: 'LoRA weights to use for generation', + }), + ) + .default([]), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + num_frames: z + .optional( + z.int().gte(9).lte(161).register(z.globalRegistry, { + description: 'The number of frames in the video.', + }), + ) + .default(121), + second_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(20).register(z.globalRegistry, { + description: 'Number of inference steps during the second pass.', + }), + ) + .default(8), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for generation', + }), + ) + .default( + 'worst quality, inconsistent motion, blurry, jittery, distorted', + ), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p or 720p).', + }), + ), + aspect_ratio: z.optional( + z.enum(['9:16', '1:1', '16:9']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video (16:9, 1:1 or 9:16).', + }), + ), + first_pass_skip_final_steps: z + .optional( + z.int().gte(0).lte(20).register(z.globalRegistry, { + description: + 'Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details.', + }), + ) + .default(1), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Distilled model input', + }) + +/** + * TextToVideoOutput + */ +export const zSchemaLtxVideo13bDevOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * TextToVideoInput + */ +export const zSchemaLtxVideo13bDevInput = z.object({ + second_pass_skip_initial_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: + 'The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.', + }), + ) + .default(17), + first_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps during the first pass.', + }), + ) + .default(30), + frame_rate: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frame rate of the video.', + }), + ) + .default(30), + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt to guide generation', + }), + reverse_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to reverse the video.', + }), + ) + .default(false), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to expand the prompt using a language model.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoRaWeight).register(z.globalRegistry, { + description: 'LoRA weights to use for generation', + }), + ) + .default([]), + second_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps during the second pass.', + }), + ) + .default(30), + num_frames: z + .optional( + z.int().gte(9).lte(161).register(z.globalRegistry, { + description: 'The number of frames in the video.', + }), + ) + .default(121), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for generation', + }), + ) + .default('worst quality, inconsistent motion, blurry, jittery, distorted'), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p or 720p).', + }), + ), + aspect_ratio: z.optional( + z.enum(['9:16', '1:1', '16:9']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video (16:9, 1:1 or 9:16).', + }), + ), + first_pass_skip_final_steps: z + .optional( + z.int().gte(0).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details.', + }), + ) + .default(3), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), +}) + +/** + * TextToVideoV21MasterOutput + */ +export const zSchemaKlingVideoV21MasterTextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * TextToVideoV21MasterRequest + */ +export const zSchemaKlingVideoV21MasterTextToVideoInput = z.object({ + prompt: z.string().max(2500), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video frame', + }), + ), + negative_prompt: z + .optional(z.string().max(2500)) + .default('blur, distort, and low quality'), + cfg_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ', + }), + ) + .default(0.5), +}) + +/** + * SeedanceVideoOutput + */ +export const zSchemaBytedanceSeedanceV1LiteTextToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generation', + }), + video: zSchemaFile, +}) + +/** + * SeedanceTextToVideoInput + */ +export const zSchemaBytedanceSeedanceV1LiteTextToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt used to generate the video', + }), + resolution: z.optional( + z.enum(['480p', '720p', '1080p']).register(z.globalRegistry, { + description: + 'Video resolution - 480p for faster generation, 720p for higher quality', + }), + ), + duration: z.optional( + z + .enum(['2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']) + .register(z.globalRegistry, { + description: 'Duration of the video in seconds', + }), + ), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '1:1', '3:4', '9:16', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed to control video generation. Use -1 for random.', + }), + ), + camera_fixed: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to fix the camera position', + }), + ) + .default(false), +}) + +/** + * SeedanceProT2VVideoOutput + */ +export const zSchemaBytedanceSeedanceV1ProTextToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generation', + }), + video: zSchemaFile, +}) + +/** + * SeedanceProTextToVideoInput + */ +export const zSchemaBytedanceSeedanceV1ProTextToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt used to generate the video', + }), + resolution: z.optional( + z.enum(['480p', '720p', '1080p']).register(z.globalRegistry, { + description: + 'Video resolution - 480p for faster generation, 720p for balance, 1080p for higher quality', + }), + ), + duration: z.optional( + z + .enum(['2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']) + .register(z.globalRegistry, { + description: 'Duration of the video in seconds', + }), + ), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '1:1', '3:4', '9:16']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed to control video generation. Use -1 for random.', + }), + ), + camera_fixed: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to fix the camera position', + }), + ) + .default(false), +}) + +/** + * TextToVideoHailuo02Output + */ +export const zSchemaMinimaxHailuo02ProTextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ProTextToVideoHailuo02Input + */ +export const zSchemaMinimaxHailuo02ProTextToVideoInput = z.object({ + prompt_optimizer: z + .optional( + z.boolean().register(z.globalRegistry, { + description: "Whether to use the model's prompt optimizer", + }), + ) + .default(true), + prompt: z.string().min(1).max(2000), +}) + +/** + * TextToVideoOutput + */ +export const zSchemaLtxv13B098DistilledOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * DistilledTextToVideoInput + * + * Distilled model input + */ +export const zSchemaLtxv13B098DistilledInput = z + .object({ + second_pass_skip_initial_steps: z + .optional( + z.int().gte(1).lte(11).register(z.globalRegistry, { + description: + 'The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.', + }), + ) + .default(5), + first_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(12).register(z.globalRegistry, { + description: 'Number of inference steps during the first pass.', + }), + ) + .default(8), + frame_rate: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frame rate of the video.', + }), + ) + .default(24), + reverse_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to reverse the video.', + }), + ) + .default(false), + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt to guide generation', + }), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to expand the prompt using a language model.', + }), + ) + .default(false), + temporal_adain_factor: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The factor for adaptive instance normalization (AdaIN) applied to generated video chunks after the first. This can help deal with a gradual increase in saturation/contrast in the generated video by normalizing the color distribution across the video. A high value will ensure the color distribution is more consistent across the video, while a low value will allow for more variation in color distribution.', + }), + ) + .default(0.5), + loras: z + .optional( + z.array(zSchemaLoRaWeight).register(z.globalRegistry, { + description: 'LoRA weights to use for generation', + }), + ) + .default([]), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + num_frames: z + .optional( + z.int().gte(9).lte(1441).register(z.globalRegistry, { + description: 'The number of frames in the video.', + }), + ) + .default(121), + second_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(12).register(z.globalRegistry, { + description: 'Number of inference steps during the second pass.', + }), + ) + .default(8), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for generation', + }), + ) + .default( + 'worst quality, inconsistent motion, blurry, jittery, distorted', + ), + enable_detail_pass: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use a detail pass. If True, the model will perform a second pass to refine the video and enhance details. This incurs a 2.0x cost multiplier on the base price.', + }), + ) + .default(false), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['9:16', '1:1', '16:9']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video.', + }), + ), + tone_map_compression_ratio: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The compression ratio for tone mapping. This is used to compress the dynamic range of the video to improve visual quality. A value of 0.0 means no compression, while a value of 1.0 means maximum compression.', + }), + ) + .default(0), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Distilled model input', + }) + +/** + * WanT2VResponse + */ +export const zSchemaWanV22A14bTextToVideoOutput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The text prompt used for video generation.', + }), + ) + .default(''), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * WanT2VRequest + */ +export const zSchemaWanV22A14bTextToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + shift: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Shift value for the video. Must be between 1.0 and 10.0.', + }), + ) + .default(5), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + }), + ), + num_interpolated_frames: z + .optional( + z.int().gte(0).lte(4).register(z.globalRegistry, { + description: + 'Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4.', + }), + ) + .default(1), + frames_per_second: z + .optional( + z.int().gte(4).lte(60).register(z.globalRegistry, { + description: + 'Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is.', + }), + ) + .default(16), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.', + }), + ) + .default(3.5), + num_frames: z + .optional( + z.int().gte(17).lte(161).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 17 to 161 (inclusive).', + }), + ) + .default(81), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, input data will be checked for safety before processing.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default(''), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: + 'The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.', + }), + ), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p, 580p, or 720p).', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video (16:9 or 9:16).', + }), + ), + enable_output_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, output video will be checked for safety after generation.', + }), + ) + .default(false), + guidance_scale_2: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model.', + }), + ) + .default(4), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: + 'The quality of the output video. Higher quality means better visual quality but larger file size.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(40).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(27), + interpolator_model: z.optional( + z.enum(['none', 'film', 'rife']).register(z.globalRegistry, { + description: + 'The model to use for frame interpolation. If None, no interpolation is applied.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + adjust_fps_for_interpolation: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is.', + }), + ) + .default(true), +}) + +/** + * WanSmallT2VResponse + */ +export const zSchemaWanV225bTextToVideoOutput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The text prompt used for video generation.', + }), + ) + .default(''), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * WanSmallT2VRequest + */ +export const zSchemaWanV225bTextToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + shift: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Shift value for the video. Must be between 1.0 and 10.0.', + }), + ) + .default(5), + num_interpolated_frames: z + .optional( + z.int().gte(0).lte(4).register(z.globalRegistry, { + description: + 'Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4.', + }), + ) + .default(0), + frames_per_second: z + .optional( + z.int().gte(4).lte(60).register(z.globalRegistry, { + description: + 'Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is.', + }), + ) + .default(24), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.', + }), + ) + .default(3.5), + num_frames: z + .optional( + z.int().gte(17).lte(161).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 17 to 161 (inclusive).', + }), + ) + .default(81), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, input data will be checked for safety before processing.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default(''), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: + 'The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.', + }), + ), + resolution: z.optional( + z.enum(['580p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (580p or 720p).', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video (16:9 or 9:16).', + }), + ), + enable_output_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, output video will be checked for safety after generation.', + }), + ) + .default(false), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: + 'The quality of the output video. Higher quality means better visual quality but larger file size.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(40), + interpolator_model: z.optional( + z.enum(['none', 'film', 'rife']).register(z.globalRegistry, { + description: + 'The model to use for frame interpolation. If None, no interpolation is applied.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + adjust_fps_for_interpolation: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is.', + }), + ) + .default(true), +}) + +/** + * WanTurboT2VResponse + */ +export const zSchemaWanV22A14bTextToVideoTurboOutput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The text prompt used for video generation.', + }), + ) + .default(''), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * WanTurboT2VRequest + */ +export const zSchemaWanV22A14bTextToVideoTurboInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p, 580p, or 720p).', + }), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video (16:9 or 9:16).', + }), + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: + 'The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.', + }), + ), + enable_output_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, output video will be checked for safety after generation.', + }), + ) + .default(false), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: + 'The quality of the output video. Higher quality means better visual quality but larger file size.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, input data will be checked for safety before processing.', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.', + }), + ) + .default(false), +}) + +/** + * WanSmallFastVideoT2VResponse + */ +export const zSchemaWanV225bTextToVideoFastWanOutput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The text prompt used for video generation.', + }), + ) + .default(''), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * WanSmallFastVideoT2VRequest + */ +export const zSchemaWanV225bTextToVideoFastWanInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + num_interpolated_frames: z + .optional( + z.int().gte(0).lte(4).register(z.globalRegistry, { + description: + 'Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4.', + }), + ) + .default(0), + frames_per_second: z + .optional( + z.int().gte(4).lte(60).register(z.globalRegistry, { + description: + 'Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is.', + }), + ) + .default(24), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, input data will be checked for safety before processing.', + }), + ) + .default(false), + num_frames: z + .optional( + z.int().gte(17).lte(161).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 17 to 161 (inclusive).', + }), + ) + .default(81), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.', + }), + ) + .default(3.5), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default(''), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: + 'The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.', + }), + ), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (580p or 720p).', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video (16:9 or 9:16).', + }), + ), + enable_output_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, output video will be checked for safety after generation.', + }), + ) + .default(false), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: + 'The quality of the output video. Higher quality means better visual quality but larger file size.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + interpolator_model: z.optional( + z.enum(['none', 'film', 'rife']).register(z.globalRegistry, { + description: + 'The model to use for frame interpolation. If None, no interpolation is applied.', + }), + ), + adjust_fps_for_interpolation: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is.', + }), + ) + .default(true), +}) + +/** + * WanSmallT2VResponse + */ +export const zSchemaWanV225bTextToVideoDistillOutput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The text prompt used for video generation.', + }), + ) + .default(''), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * WanDistillT2VRequest + */ +export const zSchemaWanV225bTextToVideoDistillInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + shift: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Shift value for the video. Must be between 1.0 and 10.0.', + }), + ) + .default(5), + num_interpolated_frames: z + .optional( + z.int().gte(0).lte(4).register(z.globalRegistry, { + description: + 'Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4.', + }), + ) + .default(0), + frames_per_second: z + .optional( + z.int().gte(4).lte(60).register(z.globalRegistry, { + description: + 'Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is.', + }), + ) + .default(24), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.', + }), + ) + .default(1), + num_frames: z + .optional( + z.int().gte(17).lte(161).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 17 to 161 (inclusive).', + }), + ) + .default(81), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, input data will be checked for safety before processing.', + }), + ) + .default(false), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: + 'The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.', + }), + ), + resolution: z.optional( + z.enum(['580p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (580p or 720p).', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video (16:9 or 9:16).', + }), + ), + enable_output_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, output video will be checked for safety after generation.', + }), + ) + .default(false), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: + 'The quality of the output video. Higher quality means better visual quality but larger file size.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(40), + interpolator_model: z.optional( + z.enum(['none', 'film', 'rife']).register(z.globalRegistry, { + description: + 'The model to use for frame interpolation. If None, no interpolation is applied.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + adjust_fps_for_interpolation: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is.', + }), + ) + .default(true), +}) + +/** + * WanT2VResponse + */ +export const zSchemaWanV22A14bTextToVideoLoraOutput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The text prompt used for video generation.', + }), + ) + .default(''), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * WanLoRAT2VRequest + */ +export const zSchemaWanV22A14bTextToVideoLoraInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + shift: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Shift value for the video. Must be between 1.0 and 10.0.', + }), + ) + .default(5), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + }), + ), + num_interpolated_frames: z + .optional( + z.int().gte(0).lte(4).register(z.globalRegistry, { + description: + 'Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4.', + }), + ) + .default(1), + reverse_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If true, the video will be reversed.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoRaWeight).register(z.globalRegistry, { + description: 'LoRA weights to be used in the inference.', + }), + ) + .default([]), + frames_per_second: z + .optional( + z.int().gte(4).lte(60).register(z.globalRegistry, { + description: + 'Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is.', + }), + ) + .default(16), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, input data will be checked for safety before processing.', + }), + ) + .default(false), + num_frames: z + .optional( + z.int().gte(17).lte(161).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 17 to 161 (inclusive).', + }), + ) + .default(81), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.', + }), + ) + .default(3.5), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default(''), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: + 'The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.', + }), + ), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p, 580p, or 720p).', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video (16:9 or 9:16).', + }), + ), + enable_output_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, output video will be checked for safety after generation.', + }), + ) + .default(false), + guidance_scale_2: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model.', + }), + ) + .default(4), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: + 'The quality of the output video. Higher quality means better visual quality but larger file size.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(40).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(27), + interpolator_model: z.optional( + z.enum(['none', 'film', 'rife']).register(z.globalRegistry, { + description: + 'The model to use for frame interpolation. If None, no interpolation is applied.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + adjust_fps_for_interpolation: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is.', + }), + ) + .default(true), +}) + +/** + * MareyOutput + */ +export const zSchemaMareyT2vOutput = z.object({ + video: zSchemaFile, +}) + +/** + * MareyInputT2V + */ +export const zSchemaMareyT2vInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate a video from', + }), + duration: z.optional( + z.enum(['5s', '10s']).register(z.globalRegistry, { + description: 'The duration of the generated video.', + }), + ), + dimensions: z.optional( + z + .enum(['1920x1080', '1152x1152', '1536x1152', '1152x1536']) + .register(z.globalRegistry, { + description: + 'The dimensions of the generated video in width x height format.', + }), + ), + guidance_scale: z.optional(z.union([z.number(), z.unknown()])), + seed: z.optional(z.union([z.int(), z.unknown()])), + negative_prompt: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * AvatarSingleTextResponse + */ +export const zSchemaInfinitalkSingleTextOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * InfiniTalkSingleTextRequest + */ +export const zSchemaInfinitalkSingleTextInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: + 'Resolution of the video to generate. Must be either 480p or 720p.', + }), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use for generation.', + }), + ), + text_input: z.string().register(z.globalRegistry, { + description: 'The text input to guide video generation.', + }), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.', + }), + voice: z + .enum([ + 'Aria', + 'Roger', + 'Sarah', + 'Laura', + 'Charlie', + 'George', + 'Callum', + 'River', + 'Liam', + 'Charlotte', + 'Alice', + 'Matilda', + 'Will', + 'Jessica', + 'Eric', + 'Chris', + 'Brian', + 'Daniel', + 'Lily', + 'Bill', + ]) + .register(z.globalRegistry, { + description: 'The voice to use for speech generation', + }), + num_frames: z + .optional( + z.int().gte(41).lte(721).register(z.globalRegistry, { + description: 'Number of frames to generate. Must be between 41 to 721.', + }), + ) + .default(145), + seed: z + .optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ) + .default(42), +}) + +/** + * VideoOutputV5 + */ +export const zSchemaPixverseV5TextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * TextToVideoRequest + */ +export const zSchemaPixverseV5TextToVideoInput = z.object({ + prompt: z.string(), + aspect_ratio: z.optional( + z.enum(['16:9', '4:3', '1:1', '3:4', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + resolution: z.optional( + z.enum(['360p', '540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the generated video', + }), + ), + duration: z.optional( + z.enum(['5', '8']).register(z.globalRegistry, { + description: + 'The duration of the generated video in seconds. 8s videos cost double. 1080p videos are limited to 5 seconds', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * AvatarsAppOutput + */ +export const zSchemaAvatarsTextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Text2VideoInput + */ +export const zSchemaAvatarsTextToVideoInput = z.object({ + text: z.string(), + avatar_id: z + .enum([ + 'emily_vertical_primary', + 'emily_vertical_secondary', + 'marcus_vertical_primary', + 'marcus_vertical_secondary', + 'mira_vertical_primary', + 'mira_vertical_secondary', + 'jasmine_vertical_primary', + 'jasmine_vertical_secondary', + 'jasmine_vertical_walking', + 'aisha_vertical_walking', + 'elena_vertical_primary', + 'elena_vertical_secondary', + 'any_male_vertical_primary', + 'any_female_vertical_primary', + 'any_male_vertical_secondary', + 'any_female_vertical_secondary', + 'any_female_vertical_walking', + 'emily_primary', + 'emily_side', + 'marcus_primary', + 'marcus_side', + 'aisha_walking', + 'elena_primary', + 'elena_side', + 'any_male_primary', + 'any_female_primary', + 'any_male_side', + 'any_female_side', + ]) + .register(z.globalRegistry, { + description: 'The avatar to use for the video', + }), +}) + +/** + * VideoFile + */ +export const zSchemaVideoFile = z.object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + duration: z.optional( + z.number().register(z.globalRegistry, { + description: 'The duration of the video', + }), + ), + height: z.optional( + z.int().register(z.globalRegistry, { + description: 'The height of the video', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + fps: z.optional( + z.number().register(z.globalRegistry, { + description: 'The FPS of the video', + }), + ), + width: z.optional( + z.int().register(z.globalRegistry, { + description: 'The width of the video', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + num_frames: z.optional( + z.int().register(z.globalRegistry, { + description: 'The number of frames in the video', + }), + ), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), +}) + +/** + * VideoOutput + * + * Base output for video generation + */ +export const zSchemaWan25PreviewTextToVideoOutput = z + .object({ + actual_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'The actual prompt used if prompt rewriting was enabled', + }), + ), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + video: zSchemaVideoFile, + }) + .register(z.globalRegistry, { + description: 'Base output for video generation', + }) + +/** + * TextToVideoInput + * + * Input for text-to-video generation + */ +export const zSchemaWan25PreviewTextToVideoInput = z + .object({ + prompt: z.string().min(1).register(z.globalRegistry, { + description: + 'The text prompt for video generation. Supports Chinese and English, max 800 characters.', + }), + resolution: z.optional( + z.enum(['480p', '720p', '1080p']).register(z.globalRegistry, { + description: 'Video resolution tier', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: + 'Duration of the generated video in seconds. Choose between 5 or 10 seconds.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + audio_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\nURL of the audio to use as the background music. Must be publicly accessible.\nLimit handling: If the audio duration exceeds the duration value (5 or 10 seconds),\nthe audio is truncated to the first 5 or 10 seconds, and the rest is discarded. If\nthe audio is shorter than the video, the remaining part of the video will be silent.\nFor example, if the audio is 3 seconds long and the video duration is 5 seconds, the\nfirst 3 seconds of the output video will have sound, and the last 2 seconds will be silent.\n- Format: WAV, MP3.\n- Duration: 3 to 30 s.\n- File size: Up to 15 MB.\n', + }), + ), + negative_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Negative prompt to describe content to avoid. Max 500 characters.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt rewriting using LLM. Improves results for short prompts but increases processing time.', + }), + ) + .default(true), + }) + .register(z.globalRegistry, { + description: 'Input for text-to-video generation', + }) + +/** + * OviT2VResponse + */ +export const zSchemaOviOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: z.optional(z.union([zSchemaFile, z.unknown()])), +}) + +/** + * OviT2VRequest + */ +export const zSchemaOviInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + resolution: z.optional( + z + .enum([ + '512x992', + '992x512', + '960x512', + '512x960', + '720x720', + '448x1120', + '1120x448', + ]) + .register(z.globalRegistry, { + description: + 'Resolution of the generated video in W:H format. One of (512x992, 992x512, 960x512, 512x960, 720x720, or 448x1120).', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps.', + }), + ) + .default(30), + audio_negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for audio generation.', + }), + ) + .default('robotic, muffled, echo, distorted'), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default('jitter, bad hands, blur, distortion'), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * ImageFile + */ +export const zSchemaImageFile = z.object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + height: z.optional( + z.int().register(z.globalRegistry, { + description: 'The height of the image', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + width: z.optional( + z.int().register(z.globalRegistry, { + description: 'The width of the image', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), +}) + +/** + * TextToVideoOutput + */ +export const zSchemaSora2TextToVideoOutput = z.object({ + spritesheet: z.optional(zSchemaImageFile), + thumbnail: z.optional(zSchemaImageFile), + video_id: z.string().register(z.globalRegistry, { + description: 'The ID of the generated video', + }), + video: zSchemaVideoFile, +}) + +/** + * TextToVideoInput + */ +export const zSchemaSora2TextToVideoInput = z.object({ + prompt: z.string().min(1).max(5000).register(z.globalRegistry, { + description: 'The text prompt describing the video you want to generate', + }), + duration: z.optional( + z + .union([z.literal(4), z.literal(8), z.literal(12)]) + .register(z.globalRegistry, { + description: 'Duration of the generated video in seconds', + }), + ), + resolution: z.optional( + z.enum(['720p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + model: z.optional( + z + .enum(['sora-2', 'sora-2-2025-12-08', 'sora-2-2025-10-06']) + .register(z.globalRegistry, { + description: + 'The model to use for the generation. When the default model is selected, the latest snapshot of the model will be used - otherwise, select a specific snapshot of the model.', + }), + ), + delete_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to delete the video after generation for privacy reasons. If True, the video cannot be used for remixing and will be permanently deleted.', + }), + ) + .default(true), + aspect_ratio: z.optional( + z.enum(['9:16', '16:9']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), +}) + +/** + * ProTextToVideoOutput + */ +export const zSchemaSora2TextToVideoProOutput = z.object({ + spritesheet: z.optional(zSchemaImageFile), + thumbnail: z.optional(zSchemaImageFile), + video_id: z.string().register(z.globalRegistry, { + description: 'The ID of the generated video', + }), + video: zSchemaVideoFile, +}) + +/** + * ProTextToVideoInput + */ +export const zSchemaSora2TextToVideoProInput = z.object({ + prompt: z.string().min(1).max(5000).register(z.globalRegistry, { + description: 'The text prompt describing the video you want to generate', + }), + duration: z.optional( + z + .union([z.literal(4), z.literal(8), z.literal(12)]) + .register(z.globalRegistry, { + description: 'Duration of the generated video in seconds', + }), + ), + resolution: z.optional( + z.enum(['720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + aspect_ratio: z.optional( + z.enum(['9:16', '16:9']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + delete_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to delete the video after generation for privacy reasons. If True, the video cannot be used for remixing and will be permanently deleted.', + }), + ) + .default(true), +}) + +/** + * Veo31TextToVideoOutput + */ +export const zSchemaVeo31Output = z.object({ + video: zSchemaFile, +}) + +/** + * Veo31TextToVideoInput + */ +export const zSchemaVeo31Input = z.object({ + prompt: z.string().max(20000).register(z.globalRegistry, { + description: 'The text prompt describing the video you want to generate', + }), + duration: z.optional( + z.enum(['4s', '6s', '8s']).register(z.globalRegistry, { + description: 'The duration of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + auto_fix: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.', + }), + ) + .default(true), + resolution: z.optional( + z.enum(['720p', '1080p', '4k']).register(z.globalRegistry, { + description: 'The resolution of the generated video.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), + negative_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to guide the video generation.', + }), + ), +}) + +/** + * Veo31TextToVideoOutput + */ +export const zSchemaVeo31FastOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Veo31TextToVideoInput + */ +export const zSchemaVeo31FastInput = z.object({ + prompt: z.string().max(20000).register(z.globalRegistry, { + description: 'The text prompt describing the video you want to generate', + }), + duration: z.optional( + z.enum(['4s', '6s', '8s']).register(z.globalRegistry, { + description: 'The duration of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + auto_fix: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.', + }), + ) + .default(true), + resolution: z.optional( + z.enum(['720p', '1080p', '4k']).register(z.globalRegistry, { + description: 'The resolution of the generated video.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), + negative_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to guide the video generation.', + }), + ), +}) + +/** + * KandinskyT2VResponse + */ +export const zSchemaKandinsky5TextToVideoOutput = z.object({ + video: z.optional(zSchemaFile), +}) + +/** + * KandinskyT2VRequest + */ +export const zSchemaKandinsky5TextToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + resolution: z.optional( + z.enum(['768x512']).register(z.globalRegistry, { + description: + 'Resolution of the generated video in W:H format. Will be calculated based on the aspect ratio(768x512, 512x512, 512x768).', + }), + ), + duration: z.optional( + z.enum(['5s', '10s']).register(z.globalRegistry, { + description: 'The length of the video to generate (5s or 10s)', + }), + ), + aspect_ratio: z.optional( + z.enum(['3:2', '1:1', '2:3']).register(z.globalRegistry, { + description: + 'Aspect ratio of the generated video. One of (3:2, 1:1, 2:3).', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps.', + }), + ) + .default(30), +}) + +/** + * KandinskyT2VResponse + */ +export const zSchemaKandinsky5TextToVideoDistillOutput = z.object({ + video: z.optional(zSchemaFile), +}) + +/** + * KandinskyT2VDistillRequest + */ +export const zSchemaKandinsky5TextToVideoDistillInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + duration: z.optional( + z.enum(['5s', '10s']).register(z.globalRegistry, { + description: 'The length of the video to generate (5s or 10s)', + }), + ), + aspect_ratio: z.optional( + z.enum(['3:2', '1:1', '2:3']).register(z.globalRegistry, { + description: + 'Aspect ratio of the generated video. One of (3:2, 1:1, 2:3).', + }), + ), + resolution: z.optional( + z.enum(['768x512']).register(z.globalRegistry, { + description: + 'Resolution of the generated video in W:H format. Will be calculated based on the aspect ratio(768x512, 512x512, 512x768).', + }), + ), +}) + +/** + * WanAlphaResponse + */ +export const zSchemaWanAlphaOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + image: z.optional(zSchemaVideoFile), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + mask: z.optional(zSchemaVideoFile), + video: z.optional(zSchemaVideoFile), +}) + +/** + * WanAlphaRequest + */ +export const zSchemaWanAlphaInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to guide the video generation.', + }), + shift: z + .optional( + z.number().gte(1).lte(15).register(z.globalRegistry, { + description: 'The shift of the generated video.', + }), + ) + .default(10.5), + mask_clamp_upper: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The upper bound of the mask clamping.', + }), + ) + .default(0.75), + fps: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frame rate of the generated video.', + }), + ) + .default(16), + mask_clamp_lower: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The lower bound of the mask clamping.', + }), + ) + .default(0.1), + num_frames: z + .optional( + z.int().gte(17).lte(121).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(81), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable safety checker.', + }), + ) + .default(true), + mask_binarization_threshold: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The threshold for mask binarization. When binarize_mask is True, this threshold will be used to binarize the mask. This will also be used for transparency when the output type is `.webm`.', + }), + ) + .default(0.8), + sampler: z.optional( + z.enum(['unipc', 'dpm++', 'euler']).register(z.globalRegistry, { + description: 'The sampler to use.', + }), + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + resolution: z.optional( + z + .enum(['240p', '360p', '480p', '580p', '720p']) + .register(z.globalRegistry, { + description: 'The resolution of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '1:1', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video.', + }), + ), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + binarize_mask: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to binarize the mask.', + }), + ) + .default(false), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(16).register(z.globalRegistry, { + description: 'The number of inference steps to use.', + }), + ) + .default(8), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), +}) + +/** + * VideoToVideoOutput + */ +export const zSchemaKreaWan14bTextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * TextToVideoInput + */ +export const zSchemaKreaWan14bTextToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Prompt for the video-to-video generation.', + }), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.', + }), + ) + .default(false), + num_frames: z + .optional( + z.int().gte(18).lte(162).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be a multiple of 12 plus 6, for example 6, 18, 30, 42, etc.', + }), + ) + .default(78), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * Q2TextToVideoOutput + */ +export const zSchemaViduQ2TextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Q2TextToVideoRequest + */ +export const zSchemaViduQ2TextToVideoInput = z.object({ + prompt: z.string().max(3000).register(z.globalRegistry, { + description: 'Text prompt for video generation, max 3000 characters', + }), + resolution: z.optional( + z.enum(['360p', '520p', '720p', '1080p']).register(z.globalRegistry, { + description: 'Output video resolution', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the output video', + }), + ), + duration: z.optional( + z + .union([ + z.literal(2), + z.literal(3), + z.literal(4), + z.literal(5), + z.literal(6), + z.literal(7), + z.literal(8), + ]) + .register(z.globalRegistry, { + description: 'Duration of the video in seconds', + }), + ), + bgm: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to add background music to the video (only for 4-second videos)', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + movement_amplitude: z.optional( + z.enum(['auto', 'small', 'medium', 'large']).register(z.globalRegistry, { + description: 'The movement amplitude of objects in the frame', + }), + ), +}) + +/** + * SeedanceFastT2VVideoOutput + */ +export const zSchemaBytedanceSeedanceV1ProFastTextToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generation', + }), + video: zSchemaFile, +}) + +/** + * SeedanceProFastTextToVideoInput + */ +export const zSchemaBytedanceSeedanceV1ProFastTextToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt used to generate the video', + }), + resolution: z.optional( + z.enum(['480p', '720p', '1080p']).register(z.globalRegistry, { + description: + 'Video resolution - 480p for faster generation, 720p for balance, 1080p for higher quality', + }), + ), + duration: z.optional( + z + .enum(['2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']) + .register(z.globalRegistry, { + description: 'Duration of the video in seconds', + }), + ), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '1:1', '3:4', '9:16']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed to control video generation. Use -1 for random.', + }), + ), + camera_fixed: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to fix the camera position', + }), + ) + .default(false), +}) + +/** + * ProTextToVideoHailuo23Output + */ +export const zSchemaMinimaxHailuo23ProTextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ProTextToVideoHailuo23Input + */ +export const zSchemaMinimaxHailuo23ProTextToVideoInput = z.object({ + prompt_optimizer: z + .optional( + z.boolean().register(z.globalRegistry, { + description: "Whether to use the model's prompt optimizer", + }), + ) + .default(true), + prompt: z.string().min(1).max(2000).register(z.globalRegistry, { + description: 'Text prompt for video generation', + }), +}) + +/** + * StandardTextToVideoHailuo23Output + */ +export const zSchemaMinimaxHailuo23StandardTextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * StandardTextToVideoHailuo23Input + */ +export const zSchemaMinimaxHailuo23StandardTextToVideoInput = z.object({ + prompt_optimizer: z + .optional( + z.boolean().register(z.globalRegistry, { + description: "Whether to use the model's prompt optimizer", + }), + ) + .default(true), + duration: z.optional( + z.enum(['6', '10']).register(z.globalRegistry, { + description: 'The duration of the video in seconds.', + }), + ), + prompt: z.string().min(1).max(2000), +}) + +/** + * LongCatVideoResponse + */ +export const zSchemaLongcatVideoDistilledTextToVideo480pOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * LongCatVideoRequest + */ +export const zSchemaLongcatVideoDistilledTextToVideo480pInput = z.object({ + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video.', + }), + ), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to guide the video generation.', + }), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + fps: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frame rate of the generated video.', + }), + ) + .default(15), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable safety checker.', + }), + ) + .default(true), + num_frames: z + .optional( + z.int().gte(17).lte(961).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(162), + num_inference_steps: z + .optional( + z.int().gte(2).lte(16).register(z.globalRegistry, { + description: 'The number of inference steps to use.', + }), + ) + .default(12), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), +}) + +/** + * LongCatVideoResponse + */ +export const zSchemaLongcatVideoDistilledTextToVideo720pOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * LongCat720PVideoRequest + */ +export const zSchemaLongcatVideoDistilledTextToVideo720pInput = z.object({ + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video.', + }), + ), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to guide the video generation.', + }), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + fps: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frame rate of the generated video.', + }), + ) + .default(30), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + num_refine_inference_steps: z + .optional( + z.int().gte(2).lte(16).register(z.globalRegistry, { + description: 'The number of inference steps to use for refinement.', + }), + ) + .default(12), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable safety checker.', + }), + ) + .default(true), + num_frames: z + .optional( + z.int().gte(17).lte(961).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(162), + num_inference_steps: z + .optional( + z.int().gte(2).lte(16).register(z.globalRegistry, { + description: 'The number of inference steps to use.', + }), + ) + .default(12), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), +}) + +/** + * LongCatVideoResponse + */ +export const zSchemaLongcatVideoTextToVideo480pOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * LongCatCFGVideoRequest + */ +export const zSchemaLongcatVideoTextToVideo480pInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to guide the video generation.', + }), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: 'The acceleration level to use for the video generation.', + }), + ), + fps: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frame rate of the generated video.', + }), + ) + .default(15), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The guidance scale to use for the video generation.', + }), + ) + .default(4), + num_frames: z + .optional( + z.int().gte(17).lte(961).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(162), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable safety checker.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to use for the video generation.', + }), + ) + .default( + 'Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards', + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(8).lte(50).register(z.globalRegistry, { + description: + 'The number of inference steps to use for the video generation.', + }), + ) + .default(40), +}) + +/** + * LongCatVideoResponse + */ +export const zSchemaLongcatVideoTextToVideo720pOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * LongCat720PCFGVideoRequest + */ +export const zSchemaLongcatVideoTextToVideo720pInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to guide the video generation.', + }), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: 'The acceleration level to use for the video generation.', + }), + ), + fps: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frame rate of the generated video.', + }), + ) + .default(30), + num_refine_inference_steps: z + .optional( + z.int().gte(8).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to use for refinement.', + }), + ) + .default(40), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The guidance scale to use for the video generation.', + }), + ) + .default(4), + num_frames: z + .optional( + z.int().gte(17).lte(961).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(162), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable safety checker.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to use for the video generation.', + }), + ) + .default( + 'Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards', + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(8).lte(50).register(z.globalRegistry, { + description: + 'The number of inference steps to use for the video generation.', + }), + ) + .default(40), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), +}) + +/** + * SanaVideoOutput + */ +export const zSchemaSanaVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The random seed used for the generation process', + }), + video: zSchemaFile, +}) + +/** + * SanaVideoInput + */ +export const zSchemaSanaVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt describing the video to generate', + }), + resolution: z.optional( + z.enum(['480p']).register(z.globalRegistry, { + description: 'The resolution of the output video', + }), + ), + fps: z + .optional( + z.int().gte(8).lte(30).register(z.globalRegistry, { + description: 'Frames per second for the output video', + }), + ) + .default(16), + motion_score: z + .optional( + z.int().gte(0).lte(100).register(z.globalRegistry, { + description: 'Motion intensity score (higher = more motion)', + }), + ) + .default(30), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: + 'Guidance scale for generation (higher = more prompt adherence)', + }), + ) + .default(6), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of denoising steps', + }), + ) + .default(28), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducible generation. If not provided, a random seed will be used.', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The negative prompt describing what to avoid in the generation', + }), + ) + .default( + 'A chaotic sequence with misshapen, deformed limbs in heavy motion blur, sudden disappearance, jump cuts, jerky movements, rapid shot changes, frames out of sync, inconsistent character shapes, temporal artifacts, jitter, and ghosting effects, creating a disorienting visual experience.', + ), + num_frames: z + .optional( + z.int().gte(16).lte(200).register(z.globalRegistry, { + description: 'Number of frames to generate', + }), + ) + .default(81), +}) + +/** + * GenerationOutput + * + * Output model for text-to-video generation + */ +export const zSchemaInfinityStarTextToVideoOutput = z + .object({ + video: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output model for text-to-video generation', + }) + +/** + * GenerationInput + * + * Input model for text-to-video generation + */ +export const zSchemaInfinityStarTextToVideoInput = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt for generating the video', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '1:1', '9:16']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated output', + }), + ), + enhance_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to use an LLM to enhance the prompt.', + }), + ) + .default(true), + use_apg: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to use APG', + }), + ) + .default(true), + guidance_scale: z + .optional( + z.number().gte(1).lte(40).register(z.globalRegistry, { + description: 'Guidance scale for generation', + }), + ) + .default(7.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(100).register(z.globalRegistry, { + description: 'Number of inference steps', + }), + ) + .default(50), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. Leave empty for random generation.', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to guide what to avoid in generation', + }), + ) + .default(''), + tau_video: z + .optional( + z.number().gte(0.1).lte(1).register(z.globalRegistry, { + description: 'Tau value for video scale', + }), + ) + .default(0.4), + }) + .register(z.globalRegistry, { + description: 'Input model for text-to-video generation', + }) + +/** + * HunyuanVideo15Response + */ +export const zSchemaHunyuanVideoV15TextToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * HunyuanVideo15T2VRequest + */ +export const zSchemaHunyuanVideoV15TextToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video.', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the video.', + }), + ), + resolution: z.optional( + z.enum(['480p']).register(z.globalRegistry, { + description: 'The resolution of the video.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable prompt expansion to enhance the input prompt.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducibility.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps.', + }), + ) + .default(28), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to guide what not to generate.', + }), + ) + .default(''), + num_frames: z + .optional( + z.int().gte(1).lte(121).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(121), +}) + +/** + * LTXVTextToVideoResponse + */ +export const zSchemaLtx2TextToVideoOutput = z.object({ + video: zSchemaVideoFile, +}) + +/** + * LTXVTextToVideoRequest + */ +export const zSchemaLtx2TextToVideoInput = z.object({ + prompt: z.string().min(1).max(5000).register(z.globalRegistry, { + description: 'The prompt to generate the video from', + }), + aspect_ratio: z.optional( + z.enum(['16:9']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + duration: z.optional( + z + .union([z.literal(6), z.literal(8), z.literal(10)]) + .register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the generated video', + }), + ) + .default(true), + resolution: z.optional( + z.enum(['1080p', '1440p', '2160p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + fps: z.optional( + z.union([z.literal(25), z.literal(50)]).register(z.globalRegistry, { + description: 'The frames per second of the generated video', + }), + ), +}) + +/** + * LTXVTextToVideoResponse + */ +export const zSchemaLtx2TextToVideoFastOutput = z.object({ + video: zSchemaVideoFile, +}) + +/** + * LTXVTextToVideoFastRequest + */ +export const zSchemaLtx2TextToVideoFastInput = z.object({ + prompt: z.string().min(1).max(5000).register(z.globalRegistry, { + description: 'The prompt to generate the video from', + }), + aspect_ratio: z.optional( + z.enum(['16:9']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + duration: z.optional( + z + .union([ + z.literal(6), + z.literal(8), + z.literal(10), + z.literal(12), + z.literal(14), + z.literal(16), + z.literal(18), + z.literal(20), + ]) + .register(z.globalRegistry, { + description: + 'The duration of the generated video in seconds. The fast model supports 6-20 seconds. Note: Durations longer than 10 seconds (12, 14, 16, 18, 20) are only supported with 25 FPS and 1080p resolution.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the generated video', + }), + ) + .default(true), + resolution: z.optional( + z.enum(['1080p', '1440p', '2160p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + fps: z.optional( + z.union([z.literal(25), z.literal(50)]).register(z.globalRegistry, { + description: 'The frames per second of the generated video', + }), + ), +}) + +/** + * VideoOutputV5_5 + */ +export const zSchemaPixverseV55TextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * TextToVideoRequestV5_5 + */ +export const zSchemaPixverseV55TextToVideoInput = z.object({ + prompt: z.string(), + aspect_ratio: z.optional( + z.enum(['16:9', '4:3', '1:1', '3:4', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + resolution: z.optional( + z.enum(['360p', '540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the generated video', + }), + ), + thinking_type: z.optional( + z.enum(['enabled', 'disabled', 'auto']).register(z.globalRegistry, { + description: + "Prompt optimization mode: 'enabled' to optimize, 'disabled' to turn off, 'auto' for model decision", + }), + ), + generate_multi_clip_switch: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable multi-clip generation with dynamic camera changes', + }), + ) + .default(false), + duration: z.optional( + z.enum(['5', '8', '10']).register(z.globalRegistry, { + description: + 'The duration of the generated video in seconds. Longer durations cost more. 1080p videos are limited to 5 or 8 seconds', + }), + ), + generate_audio_switch: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable audio generation (BGM, SFX, dialogue)', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * TextToVideoV26ProOutput + */ +export const zSchemaKlingVideoV26ProTextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * TextToVideoV26ProRequest + */ +export const zSchemaKlingVideoV26ProTextToVideoInput = z.object({ + prompt: z.string().max(2500), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video frame', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to generate native audio for the video. Supports Chinese and English voice output. Other languages are automatically translated to English. For English speech, use lowercase letters; for acronyms or proper nouns, use uppercase.', + }), + ) + .default(true), + negative_prompt: z + .optional(z.string().max(2500)) + .default('blur, distort, and low quality'), + cfg_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ', + }), + ) + .default(0.5), +}) + +/** + * FabricOneTextOutput + */ +export const zSchemaFabric10TextOutput = z.object({ + video: zSchemaFile, +}) + +/** + * FabricOneTextInput + */ +export const zSchemaFabric10TextInput = z.object({ + text: z.string().min(1).max(2000), + resolution: z.enum(['720p', '480p']).register(z.globalRegistry, { + description: 'Resolution', + }), + voice_description: z.optional(z.union([z.string(), z.unknown()])), + image_url: z.url().min(1).max(2083), +}) + +/** + * TextToVideoOutput + * + * Output for text-to-video generation + */ +export const zSchemaV26TextToVideoOutput = z + .object({ + actual_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'The actual prompt used if prompt rewriting was enabled', + }), + ), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + video: zSchemaVideoFile, + }) + .register(z.globalRegistry, { + description: 'Output for text-to-video generation', + }) + +/** + * TextToVideoInput + * + * Input for Wan 2.6 text-to-video generation + */ +export const zSchemaV26TextToVideoInput = z + .object({ + prompt: z.string().min(1).register(z.globalRegistry, { + description: + "The text prompt for video generation. Supports Chinese and English, max 800 characters. For multi-shot videos, use format: 'Overall description. First shot [0-3s] content. Second shot [3-5s] content.'", + }), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1', '4:3', '3:4']).register(z.globalRegistry, { + description: + 'The aspect ratio of the generated video. Wan 2.6 supports additional ratios.', + }), + ), + resolution: z.optional( + z.enum(['720p', '1080p']).register(z.globalRegistry, { + description: + 'Video resolution tier. Wan 2.6 T2V only supports 720p and 1080p (no 480p).', + }), + ), + duration: z.optional( + z.enum(['5', '10', '15']).register(z.globalRegistry, { + description: + 'Duration of the generated video in seconds. Choose between 5, 10, or 15 seconds.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + audio_url: z.optional( + z.string().register(z.globalRegistry, { + description: + '\nURL of the audio to use as the background music. Must be publicly accessible.\nLimit handling: If the audio duration exceeds the duration value (5, 10, or 15 seconds),\nthe audio is truncated to the first N seconds, and the rest is discarded. If\nthe audio is shorter than the video, the remaining part of the video will be silent.\nFor example, if the audio is 3 seconds long and the video duration is 5 seconds, the\nfirst 3 seconds of the output video will have sound, and the last 2 seconds will be silent.\n- Format: WAV, MP3.\n- Duration: 3 to 30 s.\n- File size: Up to 15 MB.\n', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + multi_shots: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'When true, enables intelligent multi-shot segmentation for coherent narrative videos. Only active when enable_prompt_expansion is True. Set to false for single-shot generation.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Negative prompt to describe content to avoid. Max 500 characters.', + }), + ) + .default(''), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt rewriting using LLM. Improves results for short prompts but increases processing time.', + }), + ) + .default(true), + }) + .register(z.globalRegistry, { + description: 'Input for Wan 2.6 text-to-video generation', + }) + +/** + * SeedanceProv15T2VVideoOutput + */ +export const zSchemaBytedanceSeedanceV15ProTextToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generation', + }), + video: zSchemaFile, +}) + +/** + * SeedanceProv15TextToVideoInput + */ +export const zSchemaBytedanceSeedanceV15ProTextToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt used to generate the video', + }), + resolution: z.optional( + z.enum(['480p', '720p', '1080p']).register(z.globalRegistry, { + description: + 'Video resolution - 480p for faster generation, 720p for balance, 1080p for higher quality', + }), + ), + aspect_ratio: z.optional( + z + .enum(['21:9', '16:9', '4:3', '1:1', '3:4', '9:16']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video', + }), + ) + .default(true), + duration: z.optional( + z + .enum(['4', '5', '6', '7', '8', '9', '10', '11', '12']) + .register(z.globalRegistry, { + description: 'Duration of the video in seconds', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + camera_fixed: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to fix the camera position', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed to control video generation. Use -1 for random.', + }), + ), +}) + +/** + * KandinskyT2VResponse + */ +export const zSchemaKandinsky5ProTextToVideoOutput = z.object({ + video: z.optional(zSchemaFile), +}) + +/** + * KandinskyT2VRequest + */ +export const zSchemaKandinsky5ProTextToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + resolution: z.optional( + z.enum(['512P', '1024P']).register(z.globalRegistry, { + description: 'Video resolution: 512p or 1024p.', + }), + ), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: 'Acceleration level for faster generation.', + }), + ), + aspect_ratio: z.optional( + z.enum(['3:2', '1:1', '2:3']).register(z.globalRegistry, { + description: + 'Aspect ratio of the generated video. One of (3:2, 1:1, 2:3).', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps.', + }), + ) + .default(28), + duration: z.optional( + z.enum(['5s']).register(z.globalRegistry, { + description: 'The length of the video to generate (5s or 10s)', + }), + ), +}) + +/** + * LTX2TextToVideoOutput + */ +export const zSchemaLtx219bTextToVideoOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the random number generator.', + }), + video: zSchemaVideoFile, +}) + +/** + * LTX2TextToVideoInput + */ +export const zSchemaLtx219bTextToVideoInput = z.object({ + use_multiscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.', + }), + ) + .default(true), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + acceleration: z.optional( + z.enum(['none', 'regular', 'high', 'full']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frames per second of the generated video.', + }), + ) + .default(25), + camera_lora: z.optional( + z + .enum([ + 'dolly_in', + 'dolly_out', + 'dolly_left', + 'dolly_right', + 'jib_up', + 'jib_down', + 'static', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The guidance scale to use.', + }), + ) + .default(3), + num_frames: z + .optional( + z.int().gte(9).lte(481).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(121), + camera_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ) + .default(1), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), + ) + .default( + 'blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts.', + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(8).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to use.', + }), + ) + .default(40), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * LTX2TextToVideoOutput + */ +export const zSchemaLtx219bTextToVideoLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the random number generator.', + }), + video: zSchemaVideoFile, +}) + +/** + * LoRAInput + * + * LoRA weight configuration. + */ +export const zSchemaLoRaInput = z + .object({ + path: z.string().register(z.globalRegistry, { + description: 'URL, HuggingFace repo ID (owner/repo) to lora weights.', + }), + scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: 'Scale factor for LoRA application (0.0 to 4.0).', + }), + ) + .default(1), + weight_name: z.optional(z.union([z.string(), z.unknown()])), + }) + .register(z.globalRegistry, { + description: 'LoRA weight configuration.', + }) + +/** + * LTX2LoRATextToVideoInput + */ +export const zSchemaLtx219bTextToVideoLoraInput = z.object({ + use_multiscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.', + }), + ) + .default(true), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + acceleration: z.optional( + z.enum(['none', 'regular', 'high', 'full']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frames per second of the generated video.', + }), + ) + .default(25), + loras: z.array(zSchemaLoRaInput).register(z.globalRegistry, { + description: 'The LoRAs to use for the generation.', + }), + camera_lora: z.optional( + z + .enum([ + 'dolly_in', + 'dolly_out', + 'dolly_left', + 'dolly_right', + 'jib_up', + 'jib_down', + 'static', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The guidance scale to use.', + }), + ) + .default(3), + num_frames: z + .optional( + z.int().gte(9).lte(481).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(121), + camera_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ) + .default(1), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), + ) + .default( + 'blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts.', + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(8).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to use.', + }), + ) + .default(40), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * LTX2TextToVideoOutput + */ +export const zSchemaLtx219bDistilledTextToVideoOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the random number generator.', + }), + video: zSchemaVideoFile, +}) + +/** + * LTX2DistilledTextToVideoInput + */ +export const zSchemaLtx219bDistilledTextToVideoInput = z.object({ + use_multiscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.', + }), + ) + .default(true), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + acceleration: z.optional( + z.enum(['none', 'regular', 'high', 'full']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frames per second of the generated video.', + }), + ) + .default(25), + camera_lora: z.optional( + z + .enum([ + 'dolly_in', + 'dolly_out', + 'dolly_left', + 'dolly_right', + 'jib_up', + 'jib_down', + 'static', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + num_frames: z + .optional( + z.int().gte(9).lte(481).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(121), + camera_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ) + .default(1), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), + ) + .default( + 'blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts.', + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * LTX2TextToVideoOutput + */ +export const zSchemaLtx219bDistilledTextToVideoLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the random number generator.', + }), + video: zSchemaVideoFile, +}) + +/** + * LTX2LoRADistilledTextToVideoInput + */ +export const zSchemaLtx219bDistilledTextToVideoLoraInput = z.object({ + use_multiscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.', + }), + ) + .default(true), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + acceleration: z.optional( + z.enum(['none', 'regular', 'high', 'full']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frames per second of the generated video.', + }), + ) + .default(25), + loras: z.array(zSchemaLoRaInput).register(z.globalRegistry, { + description: 'The LoRAs to use for the generation.', + }), + camera_lora: z.optional( + z + .enum([ + 'dolly_in', + 'dolly_out', + 'dolly_left', + 'dolly_right', + 'jib_up', + 'jib_down', + 'static', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + num_frames: z + .optional( + z.int().gte(9).lte(481).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(121), + camera_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ) + .default(1), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), + ) + .default( + 'blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts.', + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * VideoOutputV5_5 + */ +export const zSchemaPixverseV56TextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * TextToVideoRequestV5_6 + */ +export const zSchemaPixverseV56TextToVideoInput = z.object({ + prompt: z.string(), + aspect_ratio: z.optional( + z.enum(['16:9', '4:3', '1:1', '3:4', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video', + }), + ), + resolution: z.optional( + z.enum(['360p', '540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the generated video', + }), + ), + thinking_type: z.optional( + z.enum(['enabled', 'disabled', 'auto']).register(z.globalRegistry, { + description: + "Prompt optimization mode: 'enabled' to optimize, 'disabled' to turn off, 'auto' for model decision", + }), + ), + duration: z.optional( + z.enum(['5', '8', '10']).register(z.globalRegistry, { + description: + 'The duration of the generated video in seconds. 1080p videos are limited to 5 or 8 seconds', + }), + ), + generate_audio_switch: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Enable audio generation (BGM, SFX, dialogue)', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * TextToVideoV2MasterOutput + */ +export const zSchemaKlingVideoV2MasterTextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * TextToVideoV2MasterRequest + */ +export const zSchemaKlingVideoV2MasterTextToVideoInput = z.object({ + prompt: z.string().max(2500), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video frame', + }), + ), + negative_prompt: z + .optional(z.string().max(2500)) + .default('blur, distort, and low quality'), + cfg_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ', + }), + ) + .default(0.5), +}) + +/** + * Veo3TextToVideoOutput + */ +export const zSchemaVeo3Output = z.object({ + video: zSchemaFile, +}) + +/** + * Veo3TextToVideoInput + */ +export const zSchemaVeo3Input = z.object({ + prompt: z.string().max(20000).register(z.globalRegistry, { + description: 'The text prompt describing the video you want to generate', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video.', + }), + ), + duration: z.optional( + z.enum(['4s', '6s', '8s']).register(z.globalRegistry, { + description: 'The duration of the generated video.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + auto_fix: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.', + }), + ) + .default(true), + resolution: z.optional( + z.enum(['720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), + negative_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to guide the video generation.', + }), + ), +}) + +/** + * TextToVideoHailuo02Output + */ +export const zSchemaMinimaxHailuo02StandardTextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * StandardTextToVideoHailuo02Input + */ +export const zSchemaMinimaxHailuo02StandardTextToVideoInput = z.object({ + prompt_optimizer: z + .optional( + z.boolean().register(z.globalRegistry, { + description: "Whether to use the model's prompt optimizer", + }), + ) + .default(true), + duration: z.optional( + z.enum(['6', '10']).register(z.globalRegistry, { + description: + 'The duration of the video in seconds. 10 seconds videos are not supported for 1080p resolution.', + }), + ), + prompt: z.string().min(1).max(2000), +}) + +/** + * Veo3TextToVideoOutput + */ +export const zSchemaVeo3FastOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Veo3TextToVideoInput + */ +export const zSchemaVeo3FastInput = z.object({ + prompt: z.string().max(20000).register(z.globalRegistry, { + description: 'The text prompt describing the video you want to generate', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video.', + }), + ), + duration: z.optional( + z.enum(['4s', '6s', '8s']).register(z.globalRegistry, { + description: 'The duration of the generated video.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + auto_fix: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.', + }), + ) + .default(true), + resolution: z.optional( + z.enum(['720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), + negative_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to guide the video generation.', + }), + ), +}) + +/** + * TextToVideoV25ProOutput + */ +export const zSchemaKlingVideoV25TurboProTextToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * TextToVideoV25ProRequest + */ +export const zSchemaKlingVideoV25TurboProTextToVideoInput = z.object({ + prompt: z.string().max(2500), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: 'The duration of the generated video in seconds', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video frame', + }), + ), + negative_prompt: z + .optional(z.string().max(2500)) + .default('blur, distort, and low quality'), + cfg_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt.\n ', + }), + ) + .default(0.5), +}) + +export const zSchemaQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetFalAiKlingVideoV25TurboProTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV25TurboProTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV25TurboProTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV25TurboProTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV25TurboProTextToVideoData = z.object({ + body: zSchemaKlingVideoV25TurboProTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV25TurboProTextToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiKlingVideoV25TurboProTextToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV25TurboProTextToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoV25TurboProTextToVideoOutput + +export const zGetFalAiVeo3FastRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiVeo3FastRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiVeo3FastRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiVeo3FastRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiVeo3FastData = z.object({ + body: zSchemaVeo3FastInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiVeo3FastResponse = zSchemaQueueStatus + +export const zGetFalAiVeo3FastRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiVeo3FastRequestsByRequestIdResponse = + zSchemaVeo3FastOutput + +export const zGetFalAiMinimaxHailuo02StandardTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMinimaxHailuo02StandardTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxHailuo02StandardTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxHailuo02StandardTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxHailuo02StandardTextToVideoData = z.object({ + body: zSchemaMinimaxHailuo02StandardTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxHailuo02StandardTextToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiMinimaxHailuo02StandardTextToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxHailuo02StandardTextToVideoRequestsByRequestIdResponse = + zSchemaMinimaxHailuo02StandardTextToVideoOutput + +export const zGetFalAiVeo3RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiVeo3RequestsByRequestIdStatusResponse = zSchemaQueueStatus + +export const zPutFalAiVeo3RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiVeo3RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiVeo3Data = z.object({ + body: zSchemaVeo3Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiVeo3Response = zSchemaQueueStatus + +export const zGetFalAiVeo3RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiVeo3RequestsByRequestIdResponse = zSchemaVeo3Output + +export const zGetFalAiKlingVideoV2MasterTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV2MasterTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV2MasterTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV2MasterTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV2MasterTextToVideoData = z.object({ + body: zSchemaKlingVideoV2MasterTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV2MasterTextToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiKlingVideoV2MasterTextToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV2MasterTextToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoV2MasterTextToVideoOutput + +export const zGetFalAiPixverseV56TextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV56TextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV56TextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV56TextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV56TextToVideoData = z.object({ + body: zSchemaPixverseV56TextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV56TextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV56TextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV56TextToVideoRequestsByRequestIdResponse = + zSchemaPixverseV56TextToVideoOutput + +export const zGetFalAiLtx219bDistilledTextToVideoLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtx219bDistilledTextToVideoLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx219bDistilledTextToVideoLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx219bDistilledTextToVideoLoraRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx219bDistilledTextToVideoLoraData = z.object({ + body: zSchemaLtx219bDistilledTextToVideoLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx219bDistilledTextToVideoLoraResponse = + zSchemaQueueStatus + +export const zGetFalAiLtx219bDistilledTextToVideoLoraRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLtx219bDistilledTextToVideoLoraRequestsByRequestIdResponse = + zSchemaLtx219bDistilledTextToVideoLoraOutput + +export const zGetFalAiLtx219bDistilledTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtx219bDistilledTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx219bDistilledTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx219bDistilledTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx219bDistilledTextToVideoData = z.object({ + body: zSchemaLtx219bDistilledTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx219bDistilledTextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiLtx219bDistilledTextToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLtx219bDistilledTextToVideoRequestsByRequestIdResponse = + zSchemaLtx219bDistilledTextToVideoOutput + +export const zGetFalAiLtx219bTextToVideoLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtx219bTextToVideoLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx219bTextToVideoLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx219bTextToVideoLoraRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx219bTextToVideoLoraData = z.object({ + body: zSchemaLtx219bTextToVideoLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx219bTextToVideoLoraResponse = zSchemaQueueStatus + +export const zGetFalAiLtx219bTextToVideoLoraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLtx219bTextToVideoLoraRequestsByRequestIdResponse = + zSchemaLtx219bTextToVideoLoraOutput + +export const zGetFalAiLtx219bTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtx219bTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx219bTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx219bTextToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx219bTextToVideoData = z.object({ + body: zSchemaLtx219bTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx219bTextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiLtx219bTextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLtx219bTextToVideoRequestsByRequestIdResponse = + zSchemaLtx219bTextToVideoOutput + +export const zGetFalAiKandinsky5ProTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKandinsky5ProTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKandinsky5ProTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKandinsky5ProTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKandinsky5ProTextToVideoData = z.object({ + body: zSchemaKandinsky5ProTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKandinsky5ProTextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiKandinsky5ProTextToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKandinsky5ProTextToVideoRequestsByRequestIdResponse = + zSchemaKandinsky5ProTextToVideoOutput + +export const zGetFalAiBytedanceSeedanceV15ProTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBytedanceSeedanceV15ProTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBytedanceSeedanceV15ProTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBytedanceSeedanceV15ProTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBytedanceSeedanceV15ProTextToVideoData = z.object({ + body: zSchemaBytedanceSeedanceV15ProTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBytedanceSeedanceV15ProTextToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiBytedanceSeedanceV15ProTextToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiBytedanceSeedanceV15ProTextToVideoRequestsByRequestIdResponse = + zSchemaBytedanceSeedanceV15ProTextToVideoOutput + +export const zGetWanV26TextToVideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetWanV26TextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutWanV26TextToVideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutWanV26TextToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostWanV26TextToVideoData = z.object({ + body: zSchemaV26TextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostWanV26TextToVideoResponse = zSchemaQueueStatus + +export const zGetWanV26TextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetWanV26TextToVideoRequestsByRequestIdResponse = + zSchemaV26TextToVideoOutput + +export const zGetVeedFabric10TextRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetVeedFabric10TextRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutVeedFabric10TextRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutVeedFabric10TextRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostVeedFabric10TextData = z.object({ + body: zSchemaFabric10TextInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostVeedFabric10TextResponse = zSchemaQueueStatus + +export const zGetVeedFabric10TextRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetVeedFabric10TextRequestsByRequestIdResponse = + zSchemaFabric10TextOutput + +export const zGetFalAiKlingVideoV26ProTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV26ProTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV26ProTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV26ProTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV26ProTextToVideoData = z.object({ + body: zSchemaKlingVideoV26ProTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV26ProTextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiKlingVideoV26ProTextToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV26ProTextToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoV26ProTextToVideoOutput + +export const zGetFalAiPixverseV55TextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV55TextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV55TextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV55TextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV55TextToVideoData = z.object({ + body: zSchemaPixverseV55TextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV55TextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV55TextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV55TextToVideoRequestsByRequestIdResponse = + zSchemaPixverseV55TextToVideoOutput + +export const zGetFalAiLtx2TextToVideoFastRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtx2TextToVideoFastRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx2TextToVideoFastRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx2TextToVideoFastRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx2TextToVideoFastData = z.object({ + body: zSchemaLtx2TextToVideoFastInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx2TextToVideoFastResponse = zSchemaQueueStatus + +export const zGetFalAiLtx2TextToVideoFastRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLtx2TextToVideoFastRequestsByRequestIdResponse = + zSchemaLtx2TextToVideoFastOutput + +export const zGetFalAiLtx2TextToVideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLtx2TextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx2TextToVideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx2TextToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx2TextToVideoData = z.object({ + body: zSchemaLtx2TextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx2TextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiLtx2TextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLtx2TextToVideoRequestsByRequestIdResponse = + zSchemaLtx2TextToVideoOutput + +export const zGetFalAiHunyuanVideoV15TextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiHunyuanVideoV15TextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuanVideoV15TextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuanVideoV15TextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuanVideoV15TextToVideoData = z.object({ + body: zSchemaHunyuanVideoV15TextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuanVideoV15TextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuanVideoV15TextToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuanVideoV15TextToVideoRequestsByRequestIdResponse = + zSchemaHunyuanVideoV15TextToVideoOutput + +export const zGetFalAiInfinityStarTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiInfinityStarTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiInfinityStarTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiInfinityStarTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiInfinityStarTextToVideoData = z.object({ + body: zSchemaInfinityStarTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiInfinityStarTextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiInfinityStarTextToVideoRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiInfinityStarTextToVideoRequestsByRequestIdResponse = + zSchemaInfinityStarTextToVideoOutput + +export const zGetFalAiSanaVideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSanaVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSanaVideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSanaVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSanaVideoData = z.object({ + body: zSchemaSanaVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSanaVideoResponse = zSchemaQueueStatus + +export const zGetFalAiSanaVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSanaVideoRequestsByRequestIdResponse = + zSchemaSanaVideoOutput + +export const zGetFalAiLongcatVideoTextToVideo720pRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLongcatVideoTextToVideo720pRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLongcatVideoTextToVideo720pRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLongcatVideoTextToVideo720pRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLongcatVideoTextToVideo720pData = z.object({ + body: zSchemaLongcatVideoTextToVideo720pInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLongcatVideoTextToVideo720pResponse = zSchemaQueueStatus + +export const zGetFalAiLongcatVideoTextToVideo720pRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLongcatVideoTextToVideo720pRequestsByRequestIdResponse = + zSchemaLongcatVideoTextToVideo720pOutput + +export const zGetFalAiLongcatVideoTextToVideo480pRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLongcatVideoTextToVideo480pRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLongcatVideoTextToVideo480pRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLongcatVideoTextToVideo480pRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLongcatVideoTextToVideo480pData = z.object({ + body: zSchemaLongcatVideoTextToVideo480pInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLongcatVideoTextToVideo480pResponse = zSchemaQueueStatus + +export const zGetFalAiLongcatVideoTextToVideo480pRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLongcatVideoTextToVideo480pRequestsByRequestIdResponse = + zSchemaLongcatVideoTextToVideo480pOutput + +export const zGetFalAiLongcatVideoDistilledTextToVideo720pRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLongcatVideoDistilledTextToVideo720pRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLongcatVideoDistilledTextToVideo720pRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLongcatVideoDistilledTextToVideo720pRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLongcatVideoDistilledTextToVideo720pData = z.object({ + body: zSchemaLongcatVideoDistilledTextToVideo720pInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLongcatVideoDistilledTextToVideo720pResponse = + zSchemaQueueStatus + +export const zGetFalAiLongcatVideoDistilledTextToVideo720pRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLongcatVideoDistilledTextToVideo720pRequestsByRequestIdResponse = + zSchemaLongcatVideoDistilledTextToVideo720pOutput + +export const zGetFalAiLongcatVideoDistilledTextToVideo480pRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLongcatVideoDistilledTextToVideo480pRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLongcatVideoDistilledTextToVideo480pRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLongcatVideoDistilledTextToVideo480pRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLongcatVideoDistilledTextToVideo480pData = z.object({ + body: zSchemaLongcatVideoDistilledTextToVideo480pInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLongcatVideoDistilledTextToVideo480pResponse = + zSchemaQueueStatus + +export const zGetFalAiLongcatVideoDistilledTextToVideo480pRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLongcatVideoDistilledTextToVideo480pRequestsByRequestIdResponse = + zSchemaLongcatVideoDistilledTextToVideo480pOutput + +export const zGetFalAiMinimaxHailuo23StandardTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMinimaxHailuo23StandardTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxHailuo23StandardTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxHailuo23StandardTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxHailuo23StandardTextToVideoData = z.object({ + body: zSchemaMinimaxHailuo23StandardTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxHailuo23StandardTextToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiMinimaxHailuo23StandardTextToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxHailuo23StandardTextToVideoRequestsByRequestIdResponse = + zSchemaMinimaxHailuo23StandardTextToVideoOutput + +export const zGetFalAiMinimaxHailuo23ProTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMinimaxHailuo23ProTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxHailuo23ProTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxHailuo23ProTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxHailuo23ProTextToVideoData = z.object({ + body: zSchemaMinimaxHailuo23ProTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxHailuo23ProTextToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiMinimaxHailuo23ProTextToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxHailuo23ProTextToVideoRequestsByRequestIdResponse = + zSchemaMinimaxHailuo23ProTextToVideoOutput + +export const zGetFalAiBytedanceSeedanceV1ProFastTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBytedanceSeedanceV1ProFastTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBytedanceSeedanceV1ProFastTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBytedanceSeedanceV1ProFastTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBytedanceSeedanceV1ProFastTextToVideoData = z.object({ + body: zSchemaBytedanceSeedanceV1ProFastTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBytedanceSeedanceV1ProFastTextToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiBytedanceSeedanceV1ProFastTextToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiBytedanceSeedanceV1ProFastTextToVideoRequestsByRequestIdResponse = + zSchemaBytedanceSeedanceV1ProFastTextToVideoOutput + +export const zGetFalAiViduQ2TextToVideoRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiViduQ2TextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiViduQ2TextToVideoRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiViduQ2TextToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiViduQ2TextToVideoData = z.object({ + body: zSchemaViduQ2TextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiViduQ2TextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiViduQ2TextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiViduQ2TextToVideoRequestsByRequestIdResponse = + zSchemaViduQ2TextToVideoOutput + +export const zGetFalAiKreaWan14bTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKreaWan14bTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKreaWan14bTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKreaWan14bTextToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKreaWan14bTextToVideoData = z.object({ + body: zSchemaKreaWan14bTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKreaWan14bTextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiKreaWan14bTextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiKreaWan14bTextToVideoRequestsByRequestIdResponse = + zSchemaKreaWan14bTextToVideoOutput + +export const zGetFalAiWanAlphaRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiWanAlphaRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanAlphaRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanAlphaRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanAlphaData = z.object({ + body: zSchemaWanAlphaInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanAlphaResponse = zSchemaQueueStatus + +export const zGetFalAiWanAlphaRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanAlphaRequestsByRequestIdResponse = + zSchemaWanAlphaOutput + +export const zGetFalAiKandinsky5TextToVideoDistillRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKandinsky5TextToVideoDistillRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKandinsky5TextToVideoDistillRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKandinsky5TextToVideoDistillRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKandinsky5TextToVideoDistillData = z.object({ + body: zSchemaKandinsky5TextToVideoDistillInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKandinsky5TextToVideoDistillResponse = zSchemaQueueStatus + +export const zGetFalAiKandinsky5TextToVideoDistillRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKandinsky5TextToVideoDistillRequestsByRequestIdResponse = + zSchemaKandinsky5TextToVideoDistillOutput + +export const zGetFalAiKandinsky5TextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKandinsky5TextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKandinsky5TextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKandinsky5TextToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKandinsky5TextToVideoData = z.object({ + body: zSchemaKandinsky5TextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKandinsky5TextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiKandinsky5TextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiKandinsky5TextToVideoRequestsByRequestIdResponse = + zSchemaKandinsky5TextToVideoOutput + +export const zGetFalAiVeo31FastRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiVeo31FastRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiVeo31FastRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiVeo31FastRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiVeo31FastData = z.object({ + body: zSchemaVeo31FastInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiVeo31FastResponse = zSchemaQueueStatus + +export const zGetFalAiVeo31FastRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiVeo31FastRequestsByRequestIdResponse = + zSchemaVeo31FastOutput + +export const zGetFalAiVeo31RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiVeo31RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiVeo31RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiVeo31RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiVeo31Data = z.object({ + body: zSchemaVeo31Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiVeo31Response = zSchemaQueueStatus + +export const zGetFalAiVeo31RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiVeo31RequestsByRequestIdResponse = zSchemaVeo31Output + +export const zGetFalAiSora2TextToVideoProRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiSora2TextToVideoProRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSora2TextToVideoProRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiSora2TextToVideoProRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSora2TextToVideoProData = z.object({ + body: zSchemaSora2TextToVideoProInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSora2TextToVideoProResponse = zSchemaQueueStatus + +export const zGetFalAiSora2TextToVideoProRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSora2TextToVideoProRequestsByRequestIdResponse = + zSchemaSora2TextToVideoProOutput + +export const zGetFalAiSora2TextToVideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSora2TextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSora2TextToVideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSora2TextToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSora2TextToVideoData = z.object({ + body: zSchemaSora2TextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSora2TextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiSora2TextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSora2TextToVideoRequestsByRequestIdResponse = + zSchemaSora2TextToVideoOutput + +export const zGetFalAiOviRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiOviRequestsByRequestIdStatusResponse = zSchemaQueueStatus + +export const zPutFalAiOviRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiOviRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiOviData = z.object({ + body: zSchemaOviInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiOviResponse = zSchemaQueueStatus + +export const zGetFalAiOviRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiOviRequestsByRequestIdResponse = zSchemaOviOutput + +export const zGetFalAiWan25PreviewTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWan25PreviewTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWan25PreviewTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWan25PreviewTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWan25PreviewTextToVideoData = z.object({ + body: zSchemaWan25PreviewTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWan25PreviewTextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiWan25PreviewTextToVideoRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiWan25PreviewTextToVideoRequestsByRequestIdResponse = + zSchemaWan25PreviewTextToVideoOutput + +export const zGetArgilAvatarsTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetArgilAvatarsTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutArgilAvatarsTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutArgilAvatarsTextToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostArgilAvatarsTextToVideoData = z.object({ + body: zSchemaAvatarsTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostArgilAvatarsTextToVideoResponse = zSchemaQueueStatus + +export const zGetArgilAvatarsTextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetArgilAvatarsTextToVideoRequestsByRequestIdResponse = + zSchemaAvatarsTextToVideoOutput + +export const zGetFalAiPixverseV5TextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV5TextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV5TextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV5TextToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV5TextToVideoData = z.object({ + body: zSchemaPixverseV5TextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV5TextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV5TextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV5TextToVideoRequestsByRequestIdResponse = + zSchemaPixverseV5TextToVideoOutput + +export const zGetFalAiInfinitalkSingleTextRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiInfinitalkSingleTextRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiInfinitalkSingleTextRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiInfinitalkSingleTextRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiInfinitalkSingleTextData = z.object({ + body: zSchemaInfinitalkSingleTextInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiInfinitalkSingleTextResponse = zSchemaQueueStatus + +export const zGetFalAiInfinitalkSingleTextRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiInfinitalkSingleTextRequestsByRequestIdResponse = + zSchemaInfinitalkSingleTextOutput + +export const zGetMoonvalleyMareyT2vRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetMoonvalleyMareyT2vRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutMoonvalleyMareyT2vRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutMoonvalleyMareyT2vRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostMoonvalleyMareyT2vData = z.object({ + body: zSchemaMareyT2vInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostMoonvalleyMareyT2vResponse = zSchemaQueueStatus + +export const zGetMoonvalleyMareyT2vRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetMoonvalleyMareyT2vRequestsByRequestIdResponse = + zSchemaMareyT2vOutput + +export const zGetFalAiWanV22A14bTextToVideoLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanV22A14bTextToVideoLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanV22A14bTextToVideoLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanV22A14bTextToVideoLoraRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanV22A14bTextToVideoLoraData = z.object({ + body: zSchemaWanV22A14bTextToVideoLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanV22A14bTextToVideoLoraResponse = zSchemaQueueStatus + +export const zGetFalAiWanV22A14bTextToVideoLoraRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiWanV22A14bTextToVideoLoraRequestsByRequestIdResponse = + zSchemaWanV22A14bTextToVideoLoraOutput + +export const zGetFalAiWanV225bTextToVideoDistillRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanV225bTextToVideoDistillRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanV225bTextToVideoDistillRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanV225bTextToVideoDistillRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanV225bTextToVideoDistillData = z.object({ + body: zSchemaWanV225bTextToVideoDistillInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanV225bTextToVideoDistillResponse = zSchemaQueueStatus + +export const zGetFalAiWanV225bTextToVideoDistillRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiWanV225bTextToVideoDistillRequestsByRequestIdResponse = + zSchemaWanV225bTextToVideoDistillOutput + +export const zGetFalAiWanV225bTextToVideoFastWanRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanV225bTextToVideoFastWanRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanV225bTextToVideoFastWanRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanV225bTextToVideoFastWanRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanV225bTextToVideoFastWanData = z.object({ + body: zSchemaWanV225bTextToVideoFastWanInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanV225bTextToVideoFastWanResponse = zSchemaQueueStatus + +export const zGetFalAiWanV225bTextToVideoFastWanRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiWanV225bTextToVideoFastWanRequestsByRequestIdResponse = + zSchemaWanV225bTextToVideoFastWanOutput + +export const zGetFalAiWanV22A14bTextToVideoTurboRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanV22A14bTextToVideoTurboRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanV22A14bTextToVideoTurboRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanV22A14bTextToVideoTurboRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanV22A14bTextToVideoTurboData = z.object({ + body: zSchemaWanV22A14bTextToVideoTurboInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanV22A14bTextToVideoTurboResponse = zSchemaQueueStatus + +export const zGetFalAiWanV22A14bTextToVideoTurboRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiWanV22A14bTextToVideoTurboRequestsByRequestIdResponse = + zSchemaWanV22A14bTextToVideoTurboOutput + +export const zGetFalAiWanV225bTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanV225bTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanV225bTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanV225bTextToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanV225bTextToVideoData = z.object({ + body: zSchemaWanV225bTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanV225bTextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiWanV225bTextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanV225bTextToVideoRequestsByRequestIdResponse = + zSchemaWanV225bTextToVideoOutput + +export const zGetFalAiWanV22A14bTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanV22A14bTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanV22A14bTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanV22A14bTextToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanV22A14bTextToVideoData = z.object({ + body: zSchemaWanV22A14bTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanV22A14bTextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiWanV22A14bTextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanV22A14bTextToVideoRequestsByRequestIdResponse = + zSchemaWanV22A14bTextToVideoOutput + +export const zGetFalAiLtxv13B098DistilledRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtxv13B098DistilledRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtxv13B098DistilledRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtxv13B098DistilledRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtxv13B098DistilledData = z.object({ + body: zSchemaLtxv13B098DistilledInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtxv13B098DistilledResponse = zSchemaQueueStatus + +export const zGetFalAiLtxv13B098DistilledRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLtxv13B098DistilledRequestsByRequestIdResponse = + zSchemaLtxv13B098DistilledOutput + +export const zGetFalAiMinimaxHailuo02ProTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMinimaxHailuo02ProTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxHailuo02ProTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxHailuo02ProTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxHailuo02ProTextToVideoData = z.object({ + body: zSchemaMinimaxHailuo02ProTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxHailuo02ProTextToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiMinimaxHailuo02ProTextToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxHailuo02ProTextToVideoRequestsByRequestIdResponse = + zSchemaMinimaxHailuo02ProTextToVideoOutput + +export const zGetFalAiBytedanceSeedanceV1ProTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBytedanceSeedanceV1ProTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBytedanceSeedanceV1ProTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBytedanceSeedanceV1ProTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBytedanceSeedanceV1ProTextToVideoData = z.object({ + body: zSchemaBytedanceSeedanceV1ProTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBytedanceSeedanceV1ProTextToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiBytedanceSeedanceV1ProTextToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiBytedanceSeedanceV1ProTextToVideoRequestsByRequestIdResponse = + zSchemaBytedanceSeedanceV1ProTextToVideoOutput + +export const zGetFalAiBytedanceSeedanceV1LiteTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBytedanceSeedanceV1LiteTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBytedanceSeedanceV1LiteTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBytedanceSeedanceV1LiteTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBytedanceSeedanceV1LiteTextToVideoData = z.object({ + body: zSchemaBytedanceSeedanceV1LiteTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBytedanceSeedanceV1LiteTextToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiBytedanceSeedanceV1LiteTextToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiBytedanceSeedanceV1LiteTextToVideoRequestsByRequestIdResponse = + zSchemaBytedanceSeedanceV1LiteTextToVideoOutput + +export const zGetFalAiKlingVideoV21MasterTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV21MasterTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV21MasterTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV21MasterTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV21MasterTextToVideoData = z.object({ + body: zSchemaKlingVideoV21MasterTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV21MasterTextToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiKlingVideoV21MasterTextToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV21MasterTextToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoV21MasterTextToVideoOutput + +export const zGetVeedAvatarsTextToVideoRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetVeedAvatarsTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutVeedAvatarsTextToVideoRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutVeedAvatarsTextToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostVeedAvatarsTextToVideoData = z.object({ + body: zSchemaAvatarsTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostVeedAvatarsTextToVideoResponse = zSchemaQueueStatus + +export const zGetVeedAvatarsTextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetVeedAvatarsTextToVideoRequestsByRequestIdResponse = + zSchemaAvatarsTextToVideoOutput + +export const zGetFalAiLtxVideo13bDevRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLtxVideo13bDevRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtxVideo13bDevRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtxVideo13bDevRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtxVideo13bDevData = z.object({ + body: zSchemaLtxVideo13bDevInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtxVideo13bDevResponse = zSchemaQueueStatus + +export const zGetFalAiLtxVideo13bDevRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLtxVideo13bDevRequestsByRequestIdResponse = + zSchemaLtxVideo13bDevOutput + +export const zGetFalAiLtxVideo13bDistilledRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtxVideo13bDistilledRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtxVideo13bDistilledRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtxVideo13bDistilledRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtxVideo13bDistilledData = z.object({ + body: zSchemaLtxVideo13bDistilledInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtxVideo13bDistilledResponse = zSchemaQueueStatus + +export const zGetFalAiLtxVideo13bDistilledRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLtxVideo13bDistilledRequestsByRequestIdResponse = + zSchemaLtxVideo13bDistilledOutput + +export const zGetFalAiPixverseV45TextToVideoFastRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV45TextToVideoFastRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV45TextToVideoFastRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV45TextToVideoFastRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV45TextToVideoFastData = z.object({ + body: zSchemaPixverseV45TextToVideoFastInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV45TextToVideoFastResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV45TextToVideoFastRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV45TextToVideoFastRequestsByRequestIdResponse = + zSchemaPixverseV45TextToVideoFastOutput + +export const zGetFalAiPixverseV45TextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV45TextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV45TextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV45TextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV45TextToVideoData = z.object({ + body: zSchemaPixverseV45TextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV45TextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV45TextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV45TextToVideoRequestsByRequestIdResponse = + zSchemaPixverseV45TextToVideoOutput + +export const zGetFalAiViduQ1TextToVideoRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiViduQ1TextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiViduQ1TextToVideoRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiViduQ1TextToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiViduQ1TextToVideoData = z.object({ + body: zSchemaViduQ1TextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiViduQ1TextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiViduQ1TextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiViduQ1TextToVideoRequestsByRequestIdResponse = + zSchemaViduQ1TextToVideoOutput + +export const zGetFalAiMagiRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiMagiRequestsByRequestIdStatusResponse = zSchemaQueueStatus + +export const zPutFalAiMagiRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiMagiRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMagiData = z.object({ + body: zSchemaMagiInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMagiResponse = zSchemaQueueStatus + +export const zGetFalAiMagiRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMagiRequestsByRequestIdResponse = zSchemaMagiOutput + +export const zGetFalAiMagiDistilledRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiMagiDistilledRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMagiDistilledRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiMagiDistilledRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMagiDistilledData = z.object({ + body: zSchemaMagiDistilledInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMagiDistilledResponse = zSchemaQueueStatus + +export const zGetFalAiMagiDistilledRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMagiDistilledRequestsByRequestIdResponse = + zSchemaMagiDistilledOutput + +export const zGetFalAiPixverseV4TextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV4TextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV4TextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV4TextToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV4TextToVideoData = z.object({ + body: zSchemaPixverseV4TextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV4TextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV4TextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV4TextToVideoRequestsByRequestIdResponse = + zSchemaPixverseV4TextToVideoOutput + +export const zGetFalAiPixverseV4TextToVideoFastRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV4TextToVideoFastRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV4TextToVideoFastRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV4TextToVideoFastRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV4TextToVideoFastData = z.object({ + body: zSchemaPixverseV4TextToVideoFastInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV4TextToVideoFastResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV4TextToVideoFastRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV4TextToVideoFastRequestsByRequestIdResponse = + zSchemaPixverseV4TextToVideoFastOutput + +export const zGetFalAiKlingVideoLipsyncAudioToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoLipsyncAudioToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoLipsyncAudioToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoLipsyncAudioToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoLipsyncAudioToVideoData = z.object({ + body: zSchemaKlingVideoLipsyncAudioToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoLipsyncAudioToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiKlingVideoLipsyncAudioToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoLipsyncAudioToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoLipsyncAudioToVideoOutput + +export const zGetFalAiKlingVideoLipsyncTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoLipsyncTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoLipsyncTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoLipsyncTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoLipsyncTextToVideoData = z.object({ + body: zSchemaKlingVideoLipsyncTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoLipsyncTextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiKlingVideoLipsyncTextToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoLipsyncTextToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoLipsyncTextToVideoOutput + +export const zGetFalAiWanT2vLoraRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiWanT2vLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanT2vLoraRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanT2vLoraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanT2vLoraData = z.object({ + body: zSchemaWanT2vLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanT2vLoraResponse = zSchemaQueueStatus + +export const zGetFalAiWanT2vLoraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanT2vLoraRequestsByRequestIdResponse = + zSchemaWanT2vLoraOutput + +export const zGetFalAiLumaDreamMachineRay2FlashRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLumaDreamMachineRay2FlashRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLumaDreamMachineRay2FlashRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLumaDreamMachineRay2FlashRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLumaDreamMachineRay2FlashData = z.object({ + body: zSchemaLumaDreamMachineRay2FlashInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLumaDreamMachineRay2FlashResponse = zSchemaQueueStatus + +export const zGetFalAiLumaDreamMachineRay2FlashRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLumaDreamMachineRay2FlashRequestsByRequestIdResponse = + zSchemaLumaDreamMachineRay2FlashOutput + +export const zGetFalAiPikaV2TurboTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPikaV2TurboTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPikaV2TurboTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPikaV2TurboTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPikaV2TurboTextToVideoData = z.object({ + body: zSchemaPikaV2TurboTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPikaV2TurboTextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiPikaV2TurboTextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPikaV2TurboTextToVideoRequestsByRequestIdResponse = + zSchemaPikaV2TurboTextToVideoOutput + +export const zGetFalAiPikaV21TextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPikaV21TextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPikaV21TextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPikaV21TextToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPikaV21TextToVideoData = z.object({ + body: zSchemaPikaV21TextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPikaV21TextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiPikaV21TextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPikaV21TextToVideoRequestsByRequestIdResponse = + zSchemaPikaV21TextToVideoOutput + +export const zGetFalAiPikaV22TextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPikaV22TextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPikaV22TextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPikaV22TextToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPikaV22TextToVideoData = z.object({ + body: zSchemaPikaV22TextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPikaV22TextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiPikaV22TextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPikaV22TextToVideoRequestsByRequestIdResponse = + zSchemaPikaV22TextToVideoOutput + +export const zGetFalAiWanProTextToVideoRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiWanProTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanProTextToVideoRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanProTextToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanProTextToVideoData = z.object({ + body: zSchemaWanProTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanProTextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiWanProTextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanProTextToVideoRequestsByRequestIdResponse = + zSchemaWanProTextToVideoOutput + +export const zGetFalAiKlingVideoV15ProEffectsRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV15ProEffectsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV15ProEffectsRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV15ProEffectsRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV15ProEffectsData = z.object({ + body: zSchemaKlingVideoV15ProEffectsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV15ProEffectsResponse = zSchemaQueueStatus + +export const zGetFalAiKlingVideoV15ProEffectsRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV15ProEffectsRequestsByRequestIdResponse = + zSchemaKlingVideoV15ProEffectsOutput + +export const zGetFalAiKlingVideoV16ProEffectsRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV16ProEffectsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV16ProEffectsRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV16ProEffectsRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV16ProEffectsData = z.object({ + body: zSchemaKlingVideoV16ProEffectsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV16ProEffectsResponse = zSchemaQueueStatus + +export const zGetFalAiKlingVideoV16ProEffectsRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV16ProEffectsRequestsByRequestIdResponse = + zSchemaKlingVideoV16ProEffectsOutput + +export const zGetFalAiKlingVideoV1StandardEffectsRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV1StandardEffectsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV1StandardEffectsRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV1StandardEffectsRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV1StandardEffectsData = z.object({ + body: zSchemaKlingVideoV1StandardEffectsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV1StandardEffectsResponse = zSchemaQueueStatus + +export const zGetFalAiKlingVideoV1StandardEffectsRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV1StandardEffectsRequestsByRequestIdResponse = + zSchemaKlingVideoV1StandardEffectsOutput + +export const zGetFalAiKlingVideoV16StandardEffectsRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV16StandardEffectsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV16StandardEffectsRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV16StandardEffectsRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV16StandardEffectsData = z.object({ + body: zSchemaKlingVideoV16StandardEffectsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV16StandardEffectsResponse = zSchemaQueueStatus + +export const zGetFalAiKlingVideoV16StandardEffectsRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV16StandardEffectsRequestsByRequestIdResponse = + zSchemaKlingVideoV16StandardEffectsOutput + +export const zGetFalAiLtxVideoV095RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLtxVideoV095RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtxVideoV095RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtxVideoV095RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtxVideoV095Data = z.object({ + body: zSchemaLtxVideoV095Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtxVideoV095Response = zSchemaQueueStatus + +export const zGetFalAiLtxVideoV095RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLtxVideoV095RequestsByRequestIdResponse = + zSchemaLtxVideoV095Output + +export const zGetFalAiKlingVideoV16ProTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV16ProTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV16ProTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV16ProTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV16ProTextToVideoData = z.object({ + body: zSchemaKlingVideoV16ProTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV16ProTextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiKlingVideoV16ProTextToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV16ProTextToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoV16ProTextToVideoOutput + +export const zGetFalAiWanT2vRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiWanT2vRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanT2vRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanT2vRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanT2vData = z.object({ + body: zSchemaWanT2vInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanT2vResponse = zSchemaQueueStatus + +export const zGetFalAiWanT2vRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanT2vRequestsByRequestIdResponse = zSchemaWanT2vOutput + +export const zGetFalAiVeo2RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiVeo2RequestsByRequestIdStatusResponse = zSchemaQueueStatus + +export const zPutFalAiVeo2RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiVeo2RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiVeo2Data = z.object({ + body: zSchemaVeo2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiVeo2Response = zSchemaQueueStatus + +export const zGetFalAiVeo2RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiVeo2RequestsByRequestIdResponse = zSchemaVeo2Output + +export const zGetFalAiMinimaxVideo01DirectorRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMinimaxVideo01DirectorRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxVideo01DirectorRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxVideo01DirectorRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxVideo01DirectorData = z.object({ + body: zSchemaMinimaxVideo01DirectorInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxVideo01DirectorResponse = zSchemaQueueStatus + +export const zGetFalAiMinimaxVideo01DirectorRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxVideo01DirectorRequestsByRequestIdResponse = + zSchemaMinimaxVideo01DirectorOutput + +export const zGetFalAiPixverseV35TextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV35TextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV35TextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV35TextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV35TextToVideoData = z.object({ + body: zSchemaPixverseV35TextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV35TextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV35TextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV35TextToVideoRequestsByRequestIdResponse = + zSchemaPixverseV35TextToVideoOutput + +export const zGetFalAiPixverseV35TextToVideoFastRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseV35TextToVideoFastRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseV35TextToVideoFastRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseV35TextToVideoFastRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseV35TextToVideoFastData = z.object({ + body: zSchemaPixverseV35TextToVideoFastInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseV35TextToVideoFastResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseV35TextToVideoFastRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseV35TextToVideoFastRequestsByRequestIdResponse = + zSchemaPixverseV35TextToVideoFastOutput + +export const zGetFalAiLumaDreamMachineRay2RequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLumaDreamMachineRay2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLumaDreamMachineRay2RequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLumaDreamMachineRay2RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLumaDreamMachineRay2Data = z.object({ + body: zSchemaLumaDreamMachineRay2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLumaDreamMachineRay2Response = zSchemaQueueStatus + +export const zGetFalAiLumaDreamMachineRay2RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLumaDreamMachineRay2RequestsByRequestIdResponse = + zSchemaLumaDreamMachineRay2Output + +export const zGetFalAiHunyuanVideoLoraRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiHunyuanVideoLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuanVideoLoraRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuanVideoLoraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuanVideoLoraData = z.object({ + body: zSchemaHunyuanVideoLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuanVideoLoraResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuanVideoLoraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuanVideoLoraRequestsByRequestIdResponse = + zSchemaHunyuanVideoLoraOutput + +export const zGetFalAiTranspixarRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiTranspixarRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiTranspixarRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiTranspixarRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiTranspixarData = z.object({ + body: zSchemaTranspixarInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiTranspixarResponse = zSchemaQueueStatus + +export const zGetFalAiTranspixarRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiTranspixarRequestsByRequestIdResponse = + zSchemaTranspixarOutput + +export const zGetFalAiCogvideox5bRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiCogvideox5bRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiCogvideox5bRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiCogvideox5bRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiCogvideox5bData = z.object({ + body: zSchemaCogvideox5bInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiCogvideox5bResponse = zSchemaQueueStatus + +export const zGetFalAiCogvideox5bRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiCogvideox5bRequestsByRequestIdResponse = + zSchemaCogvideox5bOutput + +export const zGetFalAiKlingVideoV16StandardTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV16StandardTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV16StandardTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV16StandardTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV16StandardTextToVideoData = z.object({ + body: zSchemaKlingVideoV16StandardTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV16StandardTextToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiKlingVideoV16StandardTextToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV16StandardTextToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoV16StandardTextToVideoOutput + +export const zGetFalAiMinimaxVideo01LiveRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMinimaxVideo01LiveRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxVideo01LiveRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxVideo01LiveRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxVideo01LiveData = z.object({ + body: zSchemaMinimaxVideo01LiveInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxVideo01LiveResponse = zSchemaQueueStatus + +export const zGetFalAiMinimaxVideo01LiveRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxVideo01LiveRequestsByRequestIdResponse = + zSchemaMinimaxVideo01LiveOutput + +export const zGetFalAiKlingVideoV1StandardTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV1StandardTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV1StandardTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV1StandardTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV1StandardTextToVideoData = z.object({ + body: zSchemaKlingVideoV1StandardTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV1StandardTextToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiKlingVideoV1StandardTextToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV1StandardTextToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoV1StandardTextToVideoOutput + +export const zGetFalAiKlingVideoV15ProTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV15ProTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV15ProTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV15ProTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV15ProTextToVideoData = z.object({ + body: zSchemaKlingVideoV15ProTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV15ProTextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiKlingVideoV15ProTextToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV15ProTextToVideoRequestsByRequestIdResponse = + zSchemaKlingVideoV15ProTextToVideoOutput + +export const zGetFalAiMochiV1RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiMochiV1RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMochiV1RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiMochiV1RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMochiV1Data = z.object({ + body: zSchemaMochiV1Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMochiV1Response = zSchemaQueueStatus + +export const zGetFalAiMochiV1RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMochiV1RequestsByRequestIdResponse = zSchemaMochiV1Output + +export const zGetFalAiHunyuanVideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiHunyuanVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuanVideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuanVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuanVideoData = z.object({ + body: zSchemaHunyuanVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuanVideoResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuanVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuanVideoRequestsByRequestIdResponse = + zSchemaHunyuanVideoOutput + +export const zGetFalAiLtxVideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLtxVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtxVideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtxVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtxVideoData = z.object({ + body: zSchemaLtxVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtxVideoResponse = zSchemaQueueStatus + +export const zGetFalAiLtxVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLtxVideoRequestsByRequestIdResponse = + zSchemaLtxVideoOutput + +export const zGetFalAiFastSvdTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFastSvdTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFastSvdTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFastSvdTextToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFastSvdTextToVideoData = z.object({ + body: zSchemaFastSvdTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFastSvdTextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiFastSvdTextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFastSvdTextToVideoRequestsByRequestIdResponse = + zSchemaFastSvdTextToVideoOutput + +export const zGetFalAiFastSvdLcmTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFastSvdLcmTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFastSvdLcmTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFastSvdLcmTextToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFastSvdLcmTextToVideoData = z.object({ + body: zSchemaFastSvdLcmTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFastSvdLcmTextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiFastSvdLcmTextToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFastSvdLcmTextToVideoRequestsByRequestIdResponse = + zSchemaFastSvdLcmTextToVideoOutput + +export const zGetFalAiT2vTurboRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiT2vTurboRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiT2vTurboRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiT2vTurboRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiT2vTurboData = z.object({ + body: zSchemaT2vTurboInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiT2vTurboResponse = zSchemaQueueStatus + +export const zGetFalAiT2vTurboRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiT2vTurboRequestsByRequestIdResponse = + zSchemaT2vTurboOutput + +export const zGetFalAiFastAnimatediffTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFastAnimatediffTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFastAnimatediffTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFastAnimatediffTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFastAnimatediffTextToVideoData = z.object({ + body: zSchemaFastAnimatediffTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFastAnimatediffTextToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiFastAnimatediffTextToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFastAnimatediffTextToVideoRequestsByRequestIdResponse = + zSchemaFastAnimatediffTextToVideoOutput + +export const zGetFalAiFastAnimatediffTurboTextToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFastAnimatediffTurboTextToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFastAnimatediffTurboTextToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFastAnimatediffTurboTextToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFastAnimatediffTurboTextToVideoData = z.object({ + body: zSchemaFastAnimatediffTurboTextToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFastAnimatediffTurboTextToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiFastAnimatediffTurboTextToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFastAnimatediffTurboTextToVideoRequestsByRequestIdResponse = + zSchemaFastAnimatediffTurboTextToVideoOutput + +export const zGetFalAiMinimaxVideo01RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiMinimaxVideo01RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMinimaxVideo01RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiMinimaxVideo01RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMinimaxVideo01Data = z.object({ + body: zSchemaMinimaxVideo01Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMinimaxVideo01Response = zSchemaQueueStatus + +export const zGetFalAiMinimaxVideo01RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMinimaxVideo01RequestsByRequestIdResponse = + zSchemaMinimaxVideo01Output + +export const zGetFalAiAnimatediffSparsectrlLcmRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiAnimatediffSparsectrlLcmRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiAnimatediffSparsectrlLcmRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiAnimatediffSparsectrlLcmRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiAnimatediffSparsectrlLcmData = z.object({ + body: zSchemaAnimatediffSparsectrlLcmInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiAnimatediffSparsectrlLcmResponse = zSchemaQueueStatus + +export const zGetFalAiAnimatediffSparsectrlLcmRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiAnimatediffSparsectrlLcmRequestsByRequestIdResponse = + zSchemaAnimatediffSparsectrlLcmOutput diff --git a/packages/typescript/ai-fal/src/generated/training/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/training/endpoint-map.ts new file mode 100644 index 00000000..15fb4058 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/training/endpoint-map.ts @@ -0,0 +1,450 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zSchemaFlux2Klein4bBaseTrainerEditInput, + zSchemaFlux2Klein4bBaseTrainerEditOutput, + zSchemaFlux2Klein4bBaseTrainerInput, + zSchemaFlux2Klein4bBaseTrainerOutput, + zSchemaFlux2Klein9bBaseTrainerEditInput, + zSchemaFlux2Klein9bBaseTrainerEditOutput, + zSchemaFlux2Klein9bBaseTrainerInput, + zSchemaFlux2Klein9bBaseTrainerOutput, + zSchemaFlux2TrainerEditInput, + zSchemaFlux2TrainerEditOutput, + zSchemaFlux2TrainerInput, + zSchemaFlux2TrainerOutput, + zSchemaFlux2TrainerV2EditInput, + zSchemaFlux2TrainerV2EditOutput, + zSchemaFlux2TrainerV2Input, + zSchemaFlux2TrainerV2Output, + zSchemaFluxKontextTrainerInput, + zSchemaFluxKontextTrainerOutput, + zSchemaFluxKreaTrainerInput, + zSchemaFluxKreaTrainerOutput, + zSchemaFluxLoraFastTrainingInput, + zSchemaFluxLoraFastTrainingOutput, + zSchemaFluxLoraPortraitTrainerInput, + zSchemaFluxLoraPortraitTrainerOutput, + zSchemaHunyuanVideoLoraTrainingInput, + zSchemaHunyuanVideoLoraTrainingOutput, + zSchemaLtx2V2vTrainerInput, + zSchemaLtx2V2vTrainerOutput, + zSchemaLtx2VideoTrainerInput, + zSchemaLtx2VideoTrainerOutput, + zSchemaLtxVideoTrainerInput, + zSchemaLtxVideoTrainerOutput, + zSchemaQwenImage2512TrainerInput, + zSchemaQwenImage2512TrainerOutput, + zSchemaQwenImage2512TrainerV2Input, + zSchemaQwenImage2512TrainerV2Output, + zSchemaQwenImageEdit2509TrainerInput, + zSchemaQwenImageEdit2509TrainerOutput, + zSchemaQwenImageEdit2511TrainerInput, + zSchemaQwenImageEdit2511TrainerOutput, + zSchemaQwenImageEditPlusTrainerInput, + zSchemaQwenImageEditPlusTrainerOutput, + zSchemaQwenImageEditTrainerInput, + zSchemaQwenImageEditTrainerOutput, + zSchemaQwenImageLayeredTrainerInput, + zSchemaQwenImageLayeredTrainerOutput, + zSchemaQwenImageTrainerInput, + zSchemaQwenImageTrainerOutput, + zSchemaRecraftV3CreateStyleInput, + zSchemaRecraftV3CreateStyleOutput, + zSchemaTurboFluxTrainerInput, + zSchemaTurboFluxTrainerOutput, + zSchemaWan22ImageTrainerInput, + zSchemaWan22ImageTrainerOutput, + zSchemaWanTrainerFlf2V720pInput, + zSchemaWanTrainerFlf2V720pOutput, + zSchemaWanTrainerI2V720pInput, + zSchemaWanTrainerI2V720pOutput, + zSchemaWanTrainerInput, + zSchemaWanTrainerOutput, + zSchemaWanTrainerT2V14bInput, + zSchemaWanTrainerT2V14bOutput, + zSchemaWanTrainerT2vInput, + zSchemaWanTrainerT2vOutput, + zSchemaZImageBaseTrainerInput, + zSchemaZImageBaseTrainerOutput, + zSchemaZImageTrainerInput, + zSchemaZImageTrainerOutput, + zSchemaZImageTurboTrainerV2Input, + zSchemaZImageTurboTrainerV2Output, +} from './zod.gen' + +import type { + SchemaFlux2Klein4bBaseTrainerEditInput, + SchemaFlux2Klein4bBaseTrainerEditOutput, + SchemaFlux2Klein4bBaseTrainerInput, + SchemaFlux2Klein4bBaseTrainerOutput, + SchemaFlux2Klein9bBaseTrainerEditInput, + SchemaFlux2Klein9bBaseTrainerEditOutput, + SchemaFlux2Klein9bBaseTrainerInput, + SchemaFlux2Klein9bBaseTrainerOutput, + SchemaFlux2TrainerEditInput, + SchemaFlux2TrainerEditOutput, + SchemaFlux2TrainerInput, + SchemaFlux2TrainerOutput, + SchemaFlux2TrainerV2EditInput, + SchemaFlux2TrainerV2EditOutput, + SchemaFlux2TrainerV2Input, + SchemaFlux2TrainerV2Output, + SchemaFluxKontextTrainerInput, + SchemaFluxKontextTrainerOutput, + SchemaFluxKreaTrainerInput, + SchemaFluxKreaTrainerOutput, + SchemaFluxLoraFastTrainingInput, + SchemaFluxLoraFastTrainingOutput, + SchemaFluxLoraPortraitTrainerInput, + SchemaFluxLoraPortraitTrainerOutput, + SchemaHunyuanVideoLoraTrainingInput, + SchemaHunyuanVideoLoraTrainingOutput, + SchemaLtx2V2vTrainerInput, + SchemaLtx2V2vTrainerOutput, + SchemaLtx2VideoTrainerInput, + SchemaLtx2VideoTrainerOutput, + SchemaLtxVideoTrainerInput, + SchemaLtxVideoTrainerOutput, + SchemaQwenImage2512TrainerInput, + SchemaQwenImage2512TrainerOutput, + SchemaQwenImage2512TrainerV2Input, + SchemaQwenImage2512TrainerV2Output, + SchemaQwenImageEdit2509TrainerInput, + SchemaQwenImageEdit2509TrainerOutput, + SchemaQwenImageEdit2511TrainerInput, + SchemaQwenImageEdit2511TrainerOutput, + SchemaQwenImageEditPlusTrainerInput, + SchemaQwenImageEditPlusTrainerOutput, + SchemaQwenImageEditTrainerInput, + SchemaQwenImageEditTrainerOutput, + SchemaQwenImageLayeredTrainerInput, + SchemaQwenImageLayeredTrainerOutput, + SchemaQwenImageTrainerInput, + SchemaQwenImageTrainerOutput, + SchemaRecraftV3CreateStyleInput, + SchemaRecraftV3CreateStyleOutput, + SchemaTurboFluxTrainerInput, + SchemaTurboFluxTrainerOutput, + SchemaWan22ImageTrainerInput, + SchemaWan22ImageTrainerOutput, + SchemaWanTrainerFlf2V720pInput, + SchemaWanTrainerFlf2V720pOutput, + SchemaWanTrainerI2V720pInput, + SchemaWanTrainerI2V720pOutput, + SchemaWanTrainerInput, + SchemaWanTrainerOutput, + SchemaWanTrainerT2V14bInput, + SchemaWanTrainerT2V14bOutput, + SchemaWanTrainerT2vInput, + SchemaWanTrainerT2vOutput, + SchemaZImageBaseTrainerInput, + SchemaZImageBaseTrainerOutput, + SchemaZImageTrainerInput, + SchemaZImageTrainerOutput, + SchemaZImageTurboTrainerV2Input, + SchemaZImageTurboTrainerV2Output, +} from './types.gen' + +import type { z } from 'zod' + +export type TrainingEndpointMap = { + 'fal-ai/flux-krea-trainer': { + input: SchemaFluxKreaTrainerInput + output: SchemaFluxKreaTrainerOutput + } + 'fal-ai/flux-kontext-trainer': { + input: SchemaFluxKontextTrainerInput + output: SchemaFluxKontextTrainerOutput + } + 'fal-ai/flux-lora-fast-training': { + input: SchemaFluxLoraFastTrainingInput + output: SchemaFluxLoraFastTrainingOutput + } + 'fal-ai/flux-lora-portrait-trainer': { + input: SchemaFluxLoraPortraitTrainerInput + output: SchemaFluxLoraPortraitTrainerOutput + } + 'fal-ai/z-image-base-trainer': { + input: SchemaZImageBaseTrainerInput + output: SchemaZImageBaseTrainerOutput + } + 'fal-ai/z-image-turbo-trainer-v2': { + input: SchemaZImageTurboTrainerV2Input + output: SchemaZImageTurboTrainerV2Output + } + 'fal-ai/flux-2-klein-9b-base-trainer/edit': { + input: SchemaFlux2Klein9bBaseTrainerEditInput + output: SchemaFlux2Klein9bBaseTrainerEditOutput + } + 'fal-ai/flux-2-klein-9b-base-trainer': { + input: SchemaFlux2Klein9bBaseTrainerInput + output: SchemaFlux2Klein9bBaseTrainerOutput + } + 'fal-ai/flux-2-klein-4b-base-trainer': { + input: SchemaFlux2Klein4bBaseTrainerInput + output: SchemaFlux2Klein4bBaseTrainerOutput + } + 'fal-ai/flux-2-klein-4b-base-trainer/edit': { + input: SchemaFlux2Klein4bBaseTrainerEditInput + output: SchemaFlux2Klein4bBaseTrainerEditOutput + } + 'fal-ai/qwen-image-2512-trainer-v2': { + input: SchemaQwenImage2512TrainerV2Input + output: SchemaQwenImage2512TrainerV2Output + } + 'fal-ai/flux-2-trainer-v2/edit': { + input: SchemaFlux2TrainerV2EditInput + output: SchemaFlux2TrainerV2EditOutput + } + 'fal-ai/flux-2-trainer-v2': { + input: SchemaFlux2TrainerV2Input + output: SchemaFlux2TrainerV2Output + } + 'fal-ai/ltx2-v2v-trainer': { + input: SchemaLtx2V2vTrainerInput + output: SchemaLtx2V2vTrainerOutput + } + 'fal-ai/ltx2-video-trainer': { + input: SchemaLtx2VideoTrainerInput + output: SchemaLtx2VideoTrainerOutput + } + 'fal-ai/qwen-image-2512-trainer': { + input: SchemaQwenImage2512TrainerInput + output: SchemaQwenImage2512TrainerOutput + } + 'fal-ai/qwen-image-edit-2511-trainer': { + input: SchemaQwenImageEdit2511TrainerInput + output: SchemaQwenImageEdit2511TrainerOutput + } + 'fal-ai/qwen-image-layered-trainer': { + input: SchemaQwenImageLayeredTrainerInput + output: SchemaQwenImageLayeredTrainerOutput + } + 'fal-ai/qwen-image-edit-2509-trainer': { + input: SchemaQwenImageEdit2509TrainerInput + output: SchemaQwenImageEdit2509TrainerOutput + } + 'fal-ai/z-image-trainer': { + input: SchemaZImageTrainerInput + output: SchemaZImageTrainerOutput + } + 'fal-ai/flux-2-trainer/edit': { + input: SchemaFlux2TrainerEditInput + output: SchemaFlux2TrainerEditOutput + } + 'fal-ai/flux-2-trainer': { + input: SchemaFlux2TrainerInput + output: SchemaFlux2TrainerOutput + } + 'fal-ai/qwen-image-edit-plus-trainer': { + input: SchemaQwenImageEditPlusTrainerInput + output: SchemaQwenImageEditPlusTrainerOutput + } + 'fal-ai/qwen-image-edit-trainer': { + input: SchemaQwenImageEditTrainerInput + output: SchemaQwenImageEditTrainerOutput + } + 'fal-ai/qwen-image-trainer': { + input: SchemaQwenImageTrainerInput + output: SchemaQwenImageTrainerOutput + } + 'fal-ai/wan-22-image-trainer': { + input: SchemaWan22ImageTrainerInput + output: SchemaWan22ImageTrainerOutput + } + 'fal-ai/wan-trainer/t2v': { + input: SchemaWanTrainerT2vInput + output: SchemaWanTrainerT2vOutput + } + 'fal-ai/wan-trainer/t2v-14b': { + input: SchemaWanTrainerT2V14bInput + output: SchemaWanTrainerT2V14bOutput + } + 'fal-ai/wan-trainer/i2v-720p': { + input: SchemaWanTrainerI2V720pInput + output: SchemaWanTrainerI2V720pOutput + } + 'fal-ai/wan-trainer/flf2v-720p': { + input: SchemaWanTrainerFlf2V720pInput + output: SchemaWanTrainerFlf2V720pOutput + } + 'fal-ai/ltx-video-trainer': { + input: SchemaLtxVideoTrainerInput + output: SchemaLtxVideoTrainerOutput + } + 'fal-ai/recraft/v3/create-style': { + input: SchemaRecraftV3CreateStyleInput + output: SchemaRecraftV3CreateStyleOutput + } + 'fal-ai/turbo-flux-trainer': { + input: SchemaTurboFluxTrainerInput + output: SchemaTurboFluxTrainerOutput + } + 'fal-ai/wan-trainer': { + input: SchemaWanTrainerInput + output: SchemaWanTrainerOutput + } + 'fal-ai/hunyuan-video-lora-training': { + input: SchemaHunyuanVideoLoraTrainingInput + output: SchemaHunyuanVideoLoraTrainingOutput + } +} + +/** Union type of all training model endpoint IDs */ +export type TrainingModel = keyof TrainingEndpointMap + +export const TrainingSchemaMap: Record< + TrainingModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['fal-ai/flux-krea-trainer']: { + input: zSchemaFluxKreaTrainerInput, + output: zSchemaFluxKreaTrainerOutput, + }, + ['fal-ai/flux-kontext-trainer']: { + input: zSchemaFluxKontextTrainerInput, + output: zSchemaFluxKontextTrainerOutput, + }, + ['fal-ai/flux-lora-fast-training']: { + input: zSchemaFluxLoraFastTrainingInput, + output: zSchemaFluxLoraFastTrainingOutput, + }, + ['fal-ai/flux-lora-portrait-trainer']: { + input: zSchemaFluxLoraPortraitTrainerInput, + output: zSchemaFluxLoraPortraitTrainerOutput, + }, + ['fal-ai/z-image-base-trainer']: { + input: zSchemaZImageBaseTrainerInput, + output: zSchemaZImageBaseTrainerOutput, + }, + ['fal-ai/z-image-turbo-trainer-v2']: { + input: zSchemaZImageTurboTrainerV2Input, + output: zSchemaZImageTurboTrainerV2Output, + }, + ['fal-ai/flux-2-klein-9b-base-trainer/edit']: { + input: zSchemaFlux2Klein9bBaseTrainerEditInput, + output: zSchemaFlux2Klein9bBaseTrainerEditOutput, + }, + ['fal-ai/flux-2-klein-9b-base-trainer']: { + input: zSchemaFlux2Klein9bBaseTrainerInput, + output: zSchemaFlux2Klein9bBaseTrainerOutput, + }, + ['fal-ai/flux-2-klein-4b-base-trainer']: { + input: zSchemaFlux2Klein4bBaseTrainerInput, + output: zSchemaFlux2Klein4bBaseTrainerOutput, + }, + ['fal-ai/flux-2-klein-4b-base-trainer/edit']: { + input: zSchemaFlux2Klein4bBaseTrainerEditInput, + output: zSchemaFlux2Klein4bBaseTrainerEditOutput, + }, + ['fal-ai/qwen-image-2512-trainer-v2']: { + input: zSchemaQwenImage2512TrainerV2Input, + output: zSchemaQwenImage2512TrainerV2Output, + }, + ['fal-ai/flux-2-trainer-v2/edit']: { + input: zSchemaFlux2TrainerV2EditInput, + output: zSchemaFlux2TrainerV2EditOutput, + }, + ['fal-ai/flux-2-trainer-v2']: { + input: zSchemaFlux2TrainerV2Input, + output: zSchemaFlux2TrainerV2Output, + }, + ['fal-ai/ltx2-v2v-trainer']: { + input: zSchemaLtx2V2vTrainerInput, + output: zSchemaLtx2V2vTrainerOutput, + }, + ['fal-ai/ltx2-video-trainer']: { + input: zSchemaLtx2VideoTrainerInput, + output: zSchemaLtx2VideoTrainerOutput, + }, + ['fal-ai/qwen-image-2512-trainer']: { + input: zSchemaQwenImage2512TrainerInput, + output: zSchemaQwenImage2512TrainerOutput, + }, + ['fal-ai/qwen-image-edit-2511-trainer']: { + input: zSchemaQwenImageEdit2511TrainerInput, + output: zSchemaQwenImageEdit2511TrainerOutput, + }, + ['fal-ai/qwen-image-layered-trainer']: { + input: zSchemaQwenImageLayeredTrainerInput, + output: zSchemaQwenImageLayeredTrainerOutput, + }, + ['fal-ai/qwen-image-edit-2509-trainer']: { + input: zSchemaQwenImageEdit2509TrainerInput, + output: zSchemaQwenImageEdit2509TrainerOutput, + }, + ['fal-ai/z-image-trainer']: { + input: zSchemaZImageTrainerInput, + output: zSchemaZImageTrainerOutput, + }, + ['fal-ai/flux-2-trainer/edit']: { + input: zSchemaFlux2TrainerEditInput, + output: zSchemaFlux2TrainerEditOutput, + }, + ['fal-ai/flux-2-trainer']: { + input: zSchemaFlux2TrainerInput, + output: zSchemaFlux2TrainerOutput, + }, + ['fal-ai/qwen-image-edit-plus-trainer']: { + input: zSchemaQwenImageEditPlusTrainerInput, + output: zSchemaQwenImageEditPlusTrainerOutput, + }, + ['fal-ai/qwen-image-edit-trainer']: { + input: zSchemaQwenImageEditTrainerInput, + output: zSchemaQwenImageEditTrainerOutput, + }, + ['fal-ai/qwen-image-trainer']: { + input: zSchemaQwenImageTrainerInput, + output: zSchemaQwenImageTrainerOutput, + }, + ['fal-ai/wan-22-image-trainer']: { + input: zSchemaWan22ImageTrainerInput, + output: zSchemaWan22ImageTrainerOutput, + }, + ['fal-ai/wan-trainer/t2v']: { + input: zSchemaWanTrainerT2vInput, + output: zSchemaWanTrainerT2vOutput, + }, + ['fal-ai/wan-trainer/t2v-14b']: { + input: zSchemaWanTrainerT2V14bInput, + output: zSchemaWanTrainerT2V14bOutput, + }, + ['fal-ai/wan-trainer/i2v-720p']: { + input: zSchemaWanTrainerI2V720pInput, + output: zSchemaWanTrainerI2V720pOutput, + }, + ['fal-ai/wan-trainer/flf2v-720p']: { + input: zSchemaWanTrainerFlf2V720pInput, + output: zSchemaWanTrainerFlf2V720pOutput, + }, + ['fal-ai/ltx-video-trainer']: { + input: zSchemaLtxVideoTrainerInput, + output: zSchemaLtxVideoTrainerOutput, + }, + ['fal-ai/recraft/v3/create-style']: { + input: zSchemaRecraftV3CreateStyleInput, + output: zSchemaRecraftV3CreateStyleOutput, + }, + ['fal-ai/turbo-flux-trainer']: { + input: zSchemaTurboFluxTrainerInput, + output: zSchemaTurboFluxTrainerOutput, + }, + ['fal-ai/wan-trainer']: { + input: zSchemaWanTrainerInput, + output: zSchemaWanTrainerOutput, + }, + ['fal-ai/hunyuan-video-lora-training']: { + input: zSchemaHunyuanVideoLoraTrainingInput, + output: zSchemaHunyuanVideoLoraTrainingOutput, + }, +} as const + +/** Get the input type for a specific training model */ +export type TrainingModelInput = + TrainingEndpointMap[T]['input'] + +/** Get the output type for a specific training model */ +export type TrainingModelOutput = + TrainingEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/training/types.gen.ts b/packages/typescript/ai-fal/src/generated/training/types.gen.ts new file mode 100644 index 00000000..6605a9eb --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/training/types.gen.ts @@ -0,0 +1,5883 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * Output + */ +export type SchemaHunyuanVideoLoraTrainingOutput = { + config_file: SchemaFile + diffusers_lora_file: SchemaFile +} + +/** + * File + */ +export type SchemaFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number | unknown + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string | unknown + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string | unknown + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string +} + +/** + * PublicInput + */ +export type SchemaHunyuanVideoLoraTrainingInput = { + /** + * Trigger Word + * + * The trigger word to use. + */ + trigger_word?: string + /** + * Images Data Url + * + * + * URL to zip archive with images. Try to use at least 4 images in general the more the better. + * + * In addition to images the archive can contain text files with captions. Each text file should have the same name as the image file it corresponds to. + * + */ + images_data_url: string + /** + * Steps + * + * Number of steps to train the LoRA on. + */ + steps: number + /** + * Data Archive Format + * + * The format of the archive. If not specified, the format will be inferred from the URL. + */ + data_archive_format?: string | unknown | null + /** + * Learning Rate + * + * Learning rate to use for training. + */ + learning_rate?: number + /** + * Do Caption + * + * Whether to generate captions for the images. + */ + do_caption?: boolean +} + +/** + * Output + */ +export type SchemaWanTrainerOutput = { + /** + * Lora File + * + * URL to the trained LoRA weights. + */ + lora_file: SchemaFile + /** + * Config File + * + * Configuration used for setting up the inference endpoints. + */ + config_file: SchemaFile +} + +/** + * Input + */ +export type SchemaWanTrainerInput = { + /** + * Number Of Steps + * + * The number of steps to train for. + */ + number_of_steps?: number + /** + * Training Data URL + * + * URL to zip archive with images of a consistent style. Try to use at least 10 images and/or videos, although more is better. + * + * In addition to images the archive can contain text files with captions. Each text file should have the same name as the image/video file it corresponds to. + */ + training_data_url: string + /** + * Trigger Phrase + * + * The phrase that will trigger the model to generate an image. + */ + trigger_phrase?: string + /** + * Learning Rate + * + * The rate at which the model learns. Higher values can lead to faster training, but over-fitting. + */ + learning_rate?: number + /** + * Auto-Scale Input + * + * If true, the input will be automatically scale the video to 81 frames at 16fps. + */ + auto_scale_input?: boolean +} + +/** + * Output + */ +export type SchemaTurboFluxTrainerOutput = { + /** + * Config File + * + * URL to the trained diffusers config file. + */ + config_file: SchemaFile + /** + * Diffusers Lora File + * + * URL to the trained diffusers lora weights. + */ + diffusers_lora_file: SchemaFile +} + +/** + * Input + */ +export type SchemaTurboFluxTrainerInput = { + /** + * Images Data Url + * + * + * URL to zip archive with images of a consistent style. Try to use at least 10 images, although more is better. + * + */ + images_data_url: string + /** + * Trigger Phrase + * + * Trigger phrase to be used in the captions. If None, a trigger word will not be used. + * If no captions are provide the trigger_work will be used instead of captions. If captions are provided, the trigger word will replace the `[trigger]` string in the captions. + * + */ + trigger_phrase?: string + /** + * Steps + * + * Number of steps to train the LoRA on. + */ + steps?: number + /** + * Learning Rate + * + * Learning rate for the training. + */ + learning_rate?: number + /** + * Training Style + * + * Training style to use. + */ + training_style?: 'subject' | 'style' + /** + * Face Crop + * + * Whether to try to detect the face and crop the images to the face. + */ + face_crop?: boolean +} + +/** + * StyleReferenceOutput + */ +export type SchemaRecraftV3CreateStyleOutput = { + /** + * Style Id + * + * The ID of the created style, this ID can be used to reference the style in the future. + */ + style_id: string +} + +/** + * StyleReferenceInput + */ +export type SchemaRecraftV3CreateStyleInput = { + /** + * Images Data Url + * + * URL to zip archive with images, use PNG format. Maximum 5 images are allowed. + */ + images_data_url: string + /** + * Base Style + * + * The base style of the generated images, this topic is covered above. + */ + base_style?: + | 'any' + | 'realistic_image' + | 'digital_illustration' + | 'vector_illustration' + | 'realistic_image/b_and_w' + | 'realistic_image/hard_flash' + | 'realistic_image/hdr' + | 'realistic_image/natural_light' + | 'realistic_image/studio_portrait' + | 'realistic_image/enterprise' + | 'realistic_image/motion_blur' + | 'realistic_image/evening_light' + | 'realistic_image/faded_nostalgia' + | 'realistic_image/forest_life' + | 'realistic_image/mystic_naturalism' + | 'realistic_image/natural_tones' + | 'realistic_image/organic_calm' + | 'realistic_image/real_life_glow' + | 'realistic_image/retro_realism' + | 'realistic_image/retro_snapshot' + | 'realistic_image/urban_drama' + | 'realistic_image/village_realism' + | 'realistic_image/warm_folk' + | 'digital_illustration/pixel_art' + | 'digital_illustration/hand_drawn' + | 'digital_illustration/grain' + | 'digital_illustration/infantile_sketch' + | 'digital_illustration/2d_art_poster' + | 'digital_illustration/handmade_3d' + | 'digital_illustration/hand_drawn_outline' + | 'digital_illustration/engraving_color' + | 'digital_illustration/2d_art_poster_2' + | 'digital_illustration/antiquarian' + | 'digital_illustration/bold_fantasy' + | 'digital_illustration/child_book' + | 'digital_illustration/child_books' + | 'digital_illustration/cover' + | 'digital_illustration/crosshatch' + | 'digital_illustration/digital_engraving' + | 'digital_illustration/expressionism' + | 'digital_illustration/freehand_details' + | 'digital_illustration/grain_20' + | 'digital_illustration/graphic_intensity' + | 'digital_illustration/hard_comics' + | 'digital_illustration/long_shadow' + | 'digital_illustration/modern_folk' + | 'digital_illustration/multicolor' + | 'digital_illustration/neon_calm' + | 'digital_illustration/noir' + | 'digital_illustration/nostalgic_pastel' + | 'digital_illustration/outline_details' + | 'digital_illustration/pastel_gradient' + | 'digital_illustration/pastel_sketch' + | 'digital_illustration/pop_art' + | 'digital_illustration/pop_renaissance' + | 'digital_illustration/street_art' + | 'digital_illustration/tablet_sketch' + | 'digital_illustration/urban_glow' + | 'digital_illustration/urban_sketching' + | 'digital_illustration/vanilla_dreams' + | 'digital_illustration/young_adult_book' + | 'digital_illustration/young_adult_book_2' + | 'vector_illustration/bold_stroke' + | 'vector_illustration/chemistry' + | 'vector_illustration/colored_stencil' + | 'vector_illustration/contour_pop_art' + | 'vector_illustration/cosmics' + | 'vector_illustration/cutout' + | 'vector_illustration/depressive' + | 'vector_illustration/editorial' + | 'vector_illustration/emotional_flat' + | 'vector_illustration/infographical' + | 'vector_illustration/marker_outline' + | 'vector_illustration/mosaic' + | 'vector_illustration/naivector' + | 'vector_illustration/roundish_flat' + | 'vector_illustration/segmented_colors' + | 'vector_illustration/sharp_contrast' + | 'vector_illustration/thin' + | 'vector_illustration/vector_photo' + | 'vector_illustration/vivid_shapes' + | 'vector_illustration/engraving' + | 'vector_illustration/line_art' + | 'vector_illustration/line_circuit' + | 'vector_illustration/linocut' +} + +/** + * TrainingOutput + */ +export type SchemaLtxVideoTrainerOutput = { + lora_file: SchemaFile + config_file: SchemaFile + /** + * The URL to the validations video. + */ + video: SchemaFile | unknown +} + +/** + * Input + */ +export type SchemaLtxVideoTrainerInput = { + /** + * Number Of Steps + * + * The number of steps to train for. + */ + number_of_steps?: number + /** + * Frame Rate + * + * The target frames per second for the video. + */ + frame_rate?: number + /** + * Learning Rate + * + * The rate at which the model learns. Higher values can lead to faster training, but over-fitting. + */ + learning_rate?: number + /** + * Validation + * + * A list of validation prompts to use during training. When providing an image, _all_ validation inputs must have an image. + */ + validation?: Array + /** + * Number Of Frames + * + * The number of frames to use for training. This is the number of frames per second multiplied by the number of seconds. + */ + number_of_frames?: number + /** + * Validation Reverse + * + * If true, the validation videos will be reversed. This is useful for effects that are learned in reverse and then applied in reverse. + */ + validation_reverse?: boolean + /** + * Training Data Url + * + * URL to zip archive with videos or images. Try to use at least 10 files, although more is better. + * + * **Supported video formats:** .mp4, .mov, .avi, .mkv + * **Supported image formats:** .png, .jpg, .jpeg + * + * Note: The dataset must contain ONLY videos OR ONLY images - mixed datasets are not supported. + * + * The archive can also contain text files with captions. Each text file should have the same name as the media file it corresponds to. + */ + training_data_url: string + /** + * Split Input Duration Threshold + * + * The duration threshold in seconds. If a video is longer than this, it will be split into scenes. If you provide captions for a split video, the caption will be applied to each scene. If you do not provide captions, scenes will be auto-captioned. + */ + split_input_duration_threshold?: number + /** + * Rank + * + * The rank of the LoRA. + */ + rank?: 8 | 16 | 32 | 64 | 128 + /** + * Aspect Ratio + * + * The aspect ratio to use for training. This is the aspect ratio of the video. + */ + aspect_ratio?: '16:9' | '1:1' | '9:16' + /** + * Trigger Phrase + * + * The phrase that will trigger the model to generate an image. + */ + trigger_phrase?: string + /** + * Resolution + * + * The resolution to use for training. This is the resolution of the video. + */ + resolution?: 'low' | 'medium' | 'high' + /** + * Split Input Into Scenes + * + * If true, videos above a certain duration threshold will be split into scenes. If you provide captions for a split video, the caption will be applied to each scene. If you do not provide captions, scenes will be auto-captioned. This option has no effect on image datasets. + */ + split_input_into_scenes?: boolean + /** + * Validation Resolution + * + * The resolution to use for validation. + */ + validation_resolution?: 'low' | 'medium' | 'high' + /** + * Validation Number Of Frames + * + * The number of frames to use for validation. + */ + validation_number_of_frames?: number + /** + * Validation Aspect Ratio + * + * The aspect ratio to use for validation. + */ + validation_aspect_ratio?: '16:9' | '1:1' | '9:16' + /** + * Validation Negative Prompt + * + * A negative prompt to use for validation. + */ + validation_negative_prompt?: string + /** + * Auto Scale Input + * + * If true, videos will be automatically scaled to the target frame count and fps. This option has no effect on image datasets. + */ + auto_scale_input?: boolean +} + +/** + * Validation + */ +export type SchemaValidation = { + /** + * Prompt + * + * The prompt to use for validation. + */ + prompt: string + /** + * Image Url + * + * An image to use for image-to-video validation. If provided for one validation, _all_ validation inputs must have an image. + */ + image_url?: string | unknown +} + +/** + * Output + */ +export type SchemaWanTrainerFlf2V720pOutput = { + /** + * Lora File + * + * URL to the trained LoRA weights. + */ + lora_file: SchemaFile + /** + * Config File + * + * Configuration used for setting up the inference endpoints. + */ + config_file: SchemaFile +} + +/** + * Input + */ +export type SchemaWanTrainerFlf2V720pInput = { + /** + * Number Of Steps + * + * The number of steps to train for. + */ + number_of_steps?: number + /** + * Training Data URL + * + * URL to zip archive with images of a consistent style. Try to use at least 10 images and/or videos, although more is better. + * + * In addition to images the archive can contain text files with captions. Each text file should have the same name as the image/video file it corresponds to. + */ + training_data_url: string + /** + * Trigger Phrase + * + * The phrase that will trigger the model to generate an image. + */ + trigger_phrase?: string + /** + * Learning Rate + * + * The rate at which the model learns. Higher values can lead to faster training, but over-fitting. + */ + learning_rate?: number + /** + * Auto-Scale Input + * + * If true, the input will be automatically scale the video to 81 frames at 16fps. + */ + auto_scale_input?: boolean +} + +/** + * Output + */ +export type SchemaWanTrainerI2V720pOutput = { + /** + * Lora File + * + * URL to the trained LoRA weights. + */ + lora_file: SchemaFile + /** + * Config File + * + * Configuration used for setting up the inference endpoints. + */ + config_file: SchemaFile +} + +/** + * Input + */ +export type SchemaWanTrainerI2V720pInput = { + /** + * Number Of Steps + * + * The number of steps to train for. + */ + number_of_steps?: number + /** + * Training Data URL + * + * URL to zip archive with images of a consistent style. Try to use at least 10 images and/or videos, although more is better. + * + * In addition to images the archive can contain text files with captions. Each text file should have the same name as the image/video file it corresponds to. + */ + training_data_url: string + /** + * Trigger Phrase + * + * The phrase that will trigger the model to generate an image. + */ + trigger_phrase?: string + /** + * Learning Rate + * + * The rate at which the model learns. Higher values can lead to faster training, but over-fitting. + */ + learning_rate?: number + /** + * Auto-Scale Input + * + * If true, the input will be automatically scale the video to 81 frames at 16fps. + */ + auto_scale_input?: boolean +} + +/** + * Output + */ +export type SchemaWanTrainerT2V14bOutput = { + /** + * Lora File + * + * URL to the trained LoRA weights. + */ + lora_file: SchemaFile + /** + * Config File + * + * Configuration used for setting up the inference endpoints. + */ + config_file: SchemaFile +} + +/** + * Input + */ +export type SchemaWanTrainerT2V14bInput = { + /** + * Number Of Steps + * + * The number of steps to train for. + */ + number_of_steps?: number + /** + * Training Data URL + * + * URL to zip archive with images of a consistent style. Try to use at least 10 images and/or videos, although more is better. + * + * In addition to images the archive can contain text files with captions. Each text file should have the same name as the image/video file it corresponds to. + */ + training_data_url: string + /** + * Trigger Phrase + * + * The phrase that will trigger the model to generate an image. + */ + trigger_phrase?: string + /** + * Learning Rate + * + * The rate at which the model learns. Higher values can lead to faster training, but over-fitting. + */ + learning_rate?: number + /** + * Auto-Scale Input + * + * If true, the input will be automatically scale the video to 81 frames at 16fps. + */ + auto_scale_input?: boolean +} + +/** + * Output + */ +export type SchemaWanTrainerT2vOutput = { + /** + * Lora File + * + * URL to the trained LoRA weights. + */ + lora_file: SchemaFile + /** + * Config File + * + * Configuration used for setting up the inference endpoints. + */ + config_file: SchemaFile +} + +/** + * Input + */ +export type SchemaWanTrainerT2vInput = { + /** + * Number Of Steps + * + * The number of steps to train for. + */ + number_of_steps?: number + /** + * Training Data URL + * + * URL to zip archive with images of a consistent style. Try to use at least 10 images and/or videos, although more is better. + * + * In addition to images the archive can contain text files with captions. Each text file should have the same name as the image/video file it corresponds to. + */ + training_data_url: string + /** + * Trigger Phrase + * + * The phrase that will trigger the model to generate an image. + */ + trigger_phrase?: string + /** + * Learning Rate + * + * The rate at which the model learns. Higher values can lead to faster training, but over-fitting. + */ + learning_rate?: number + /** + * Auto-Scale Input + * + * If true, the input will be automatically scale the video to 81 frames at 16fps. + */ + auto_scale_input?: boolean +} + +/** + * WanTrainerResponse + */ +export type SchemaWan22ImageTrainerOutput = { + /** + * Config File + * + * Config file helping inference endpoints after training. + */ + config_file: SchemaFile + /** + * High Noise LoRA + * + * High noise LoRA file. + */ + high_noise_lora: SchemaFile + /** + * Low Noise LoRA + * + * Low noise LoRA file. + */ + diffusers_lora_file: SchemaFile +} + +/** + * BasicInput + */ +export type SchemaWan22ImageTrainerInput = { + /** + * Trigger Phrase + * + * Trigger phrase for the model. + */ + trigger_phrase: string + /** + * Use Masks + * + * Whether to use masks for the training data. + */ + use_masks?: boolean + /** + * Learning Rate + * + * Learning rate for training. + */ + learning_rate?: number + /** + * Use Face Cropping + * + * Whether to use face cropping for the training data. When enabled, images will be cropped to the face before resizing. + */ + use_face_cropping?: boolean + /** + * Training Data URL + * + * URL to the training data. + */ + training_data_url: string + /** + * Number of Steps + * + * Number of training steps. + */ + steps?: number + /** + * Include Synthetic Captions + * + * Whether to include synthetic captions. + */ + include_synthetic_captions?: boolean + /** + * Is Style + * + * Whether the training data is style data. If true, face specific options like masking and face detection will be disabled. + */ + is_style?: boolean + /** + * Use Face Detection + * + * Whether to use face detection for the training data. When enabled, images will use the center of the face as the center of the image when resizing. + */ + use_face_detection?: boolean +} + +/** + * Output + */ +export type SchemaQwenImageTrainerOutput = { + /** + * Lora File + * + * URL to the trained LoRA weights file. + */ + lora_file: SchemaFile + /** + * Config File + * + * URL to the training configuration file. + */ + config_file: SchemaFile +} + +/** + * PublicInput + */ +export type SchemaQwenImageTrainerInput = { + /** + * Steps + * + * Total number of training steps to perform. Default is 4000. + */ + steps?: number + /** + * Image Data Url + * + * + * URL to zip archive with images for training. The archive should contain images and corresponding text files with captions. + * Each text file should have the same name as the image file it corresponds to (e.g., image1.jpg and image1.txt). + * If text files are missing for some images, you can provide a trigger_phrase to automatically create them. + * Supported image formats: PNG, JPG, JPEG, WEBP. + * Try to use at least 10 images, although more is better. + * + */ + image_data_url: string + /** + * Learning Rate + * + * Learning rate for training. Default is 5e-4 + */ + learning_rate?: number + /** + * Trigger Phrase + * + * Default caption to use for images that don't have corresponding text files. If provided, missing .txt files will be created automatically. + */ + trigger_phrase?: string +} + +/** + * Output + */ +export type SchemaQwenImageEditTrainerOutput = { + config_file: SchemaFile + diffusers_lora_file: SchemaFile +} + +/** + * InputEdit + */ +export type SchemaQwenImageEditTrainerInput = { + /** + * Steps + * + * Number of steps to train for + */ + steps?: number + /** + * Image Data Url + * + * + * URL to the input data zip archive. + * + * The zip should contain pairs of images. The images should be named: + * + * ROOT_start.EXT and ROOT_end.EXT + * For example: + * photo_start.jpg and photo_end.jpg + * + * The zip can also contain a text file for each image pair. The text file should be named: + * ROOT.txt + * For example: + * photo.txt + * + * This text file can be used to specify the edit instructions for the image pair. + * + * If no text file is provided, the default_caption will be used. + * + * If no default_caption is provided, the training will fail. + * + */ + image_data_url: string + /** + * Learning Rate + * + * Learning rate for LoRA parameters. + */ + learning_rate?: number + /** + * Default Caption + * + * Default caption to use when caption files are missing. If None, missing captions will cause an error. + */ + default_caption?: string | unknown +} + +/** + * Output + */ +export type SchemaQwenImageEditPlusTrainerOutput = { + config_file: SchemaFile + diffusers_lora_file: SchemaFile +} + +/** + * InputPlus + */ +export type SchemaQwenImageEditPlusTrainerInput = { + /** + * Steps + * + * Number of steps to train for + */ + steps?: number + /** + * Image Data Url + * + * + * URL to the input data zip archive. + * + * The zip should contain pairs of images. The images should be named: + * + * ROOT_start.EXT and ROOT_end.EXT + * For example: + * photo_start.jpg and photo_end.jpg + * + * The zip can also contain more than one reference image for each image pair. The reference images should be named: + * ROOT_start.EXT, ROOT_start2.EXT, ROOT_start3.EXT, ..., ROOT_end.EXT + * For example: + * photo_start.jpg, photo_start2.jpg, photo_end.jpg + * + * The Reference Image Count field should be set to the number of reference images. + * + * The zip can also contain a text file for each image pair. The text file should be named: + * ROOT.txt + * For example: + * photo.txt + * + * This text file can be used to specify the edit instructions for the image pair. + * + * If no text file is provided, the default_caption will be used. + * + * If no default_caption is provided, the training will fail. + * + */ + image_data_url: string + /** + * Learning Rate + * + * Learning rate for LoRA parameters. + */ + learning_rate?: number + /** + * Default Caption + * + * Default caption to use when caption files are missing. If None, missing captions will cause an error. + */ + default_caption?: string | unknown +} + +/** + * Output + */ +export type SchemaFlux2TrainerOutput = { + config_file: SchemaFile + diffusers_lora_file: SchemaFile +} + +/** + * InputT2I + */ +export type SchemaFlux2TrainerInput = { + /** + * Steps + * + * Total number of training steps. + */ + steps?: number + /** + * Image Data Url + * + * + * URL to zip archive with images of a consistent style. Try to use at least 10 images, although more is better. + * + * The zip can also contain a text file for each image. The text file should be named: + * ROOT.txt + * For example: + * photo.txt + * + * This text file can be used to specify the edit instructions for the image pair. + * + * If no text file is provided, the default_caption will be used. + * + * If no default_caption is provided, the training will fail. + * + */ + image_data_url: string + /** + * Learning Rate + * + * Learning rate applied to trainable parameters. + */ + learning_rate?: number + /** + * Default Caption + * + * Default caption to use when caption files are missing. If None, missing captions will cause an error. + */ + default_caption?: string | unknown + /** + * Output Lora Format + * + * Dictates the naming scheme for the output weights + */ + output_lora_format?: 'fal' | 'comfy' +} + +/** + * Output + */ +export type SchemaFlux2TrainerEditOutput = { + config_file: SchemaFile + diffusers_lora_file: SchemaFile +} + +/** + * InputEdit + */ +export type SchemaFlux2TrainerEditInput = { + /** + * Steps + * + * Total number of training steps. + */ + steps?: number + /** + * Image Data Url + * + * + * URL to the input data zip archive. + * + * The zip should contain pairs of images. The images should be named: + * + * ROOT_start.EXT and ROOT_end.EXT + * For example: + * photo_start.jpg and photo_end.jpg + * + * The zip can also contain up to four reference image for each image pair. The reference images should be named: + * ROOT_start.EXT, ROOT_start2.EXT, ROOT_start3.EXT, ROOT_start4.EXT, ROOT_end.EXT + * For example: + * photo_start.jpg, photo_start2.jpg, photo_end.jpg + * + * The zip can also contain a text file for each image pair. The text file should be named: + * ROOT.txt + * For example: + * photo.txt + * + * This text file can be used to specify the edit instructions for the image pair. + * + * If no text file is provided, the default_caption will be used. + * + * If no default_caption is provided, the training will fail. + * + */ + image_data_url: string + /** + * Learning Rate + * + * Learning rate applied to trainable parameters. + */ + learning_rate?: number + /** + * Default Caption + * + * Default caption to use when caption files are missing. If None, missing captions will cause an error. + */ + default_caption?: string | unknown + /** + * Output Lora Format + * + * Dictates the naming scheme for the output weights + */ + output_lora_format?: 'fal' | 'comfy' +} + +/** + * Output + */ +export type SchemaZImageTrainerOutput = { + config_file: SchemaFile + diffusers_lora_file: SchemaFile +} + +/** + * Input + */ +export type SchemaZImageTrainerInput = { + /** + * Steps + * + * Total number of training steps. + */ + steps?: number + /** + * Image Data Url + * + * + * URL to zip archive with images of a consistent style. Try to use at least 10 images, although more is better. + * + * The zip can also contain a text file for each image. The text file should be named: + * ROOT.txt + * For example: + * photo.txt + * + * This text file can be used to specify the edit instructions for the image pair. + * + * If no text file is provided, the default_caption will be used. + * + * If no default_caption is provided, the training will fail. + * + */ + image_data_url: string + /** + * Training Type + * + * Type of training to perform. Use 'content' to focus on the content of the images, 'style' to focus on the style of the images, and 'balanced' to focus on a combination of both. + */ + training_type?: 'content' | 'style' | 'balanced' + /** + * Learning Rate + * + * Learning rate applied to trainable parameters. + */ + learning_rate?: number + /** + * Default Caption + * + * Default caption to use when caption files are missing. If None, missing captions will cause an error. + */ + default_caption?: string | unknown +} + +/** + * Output + */ +export type SchemaQwenImageEdit2509TrainerOutput = { + config_file: SchemaFile + diffusers_lora_file: SchemaFile +} + +/** + * InputPlus + */ +export type SchemaQwenImageEdit2509TrainerInput = { + /** + * Steps + * + * Number of steps to train for + */ + steps?: number + /** + * Image Data Url + * + * + * URL to the input data zip archive. + * + * The zip should contain pairs of images. The images should be named: + * + * ROOT_start.EXT and ROOT_end.EXT + * For example: + * photo_start.jpg and photo_end.jpg + * + * The zip can also contain more than one reference image for each image pair. The reference images should be named: + * ROOT_start.EXT, ROOT_start2.EXT, ROOT_start3.EXT, ..., ROOT_end.EXT + * For example: + * photo_start.jpg, photo_start2.jpg, photo_end.jpg + * + * The Reference Image Count field should be set to the number of reference images. + * + * The zip can also contain a text file for each image pair. The text file should be named: + * ROOT.txt + * For example: + * photo.txt + * + * This text file can be used to specify the edit instructions for the image pair. + * + * If no text file is provided, the default_caption will be used. + * + * If no default_caption is provided, the training will fail. + * + */ + image_data_url: string + /** + * Learning Rate + * + * Learning rate for LoRA parameters. + */ + learning_rate?: number + /** + * Default Caption + * + * Default caption to use when caption files are missing. If None, missing captions will cause an error. + */ + default_caption?: string | unknown +} + +/** + * Output + */ +export type SchemaQwenImageLayeredTrainerOutput = { + config_file: SchemaFile + diffusers_lora_file: SchemaFile +} + +/** + * Input + */ +export type SchemaQwenImageLayeredTrainerInput = { + /** + * Steps + * + * Number of steps to train for + */ + steps?: number + /** + * Image Data Url + * + * + * URL to the input data zip archive. + * + * The zip should contain groups of images. The images should be named: + * + * ROOT_start.EXT, ROOT_end.EXT, ROOT_end2.EXT, ..., ROOT_endN.EXT + * For example: + * photo_start.png, photo_end.png, photo_end2.png, ..., photo_endN.png + * + * The start image is the base image that will be decomposed into layers. + * The end images are the layers that will be added to the base image. ROOT_end.EXT is the first layer, ROOT_end2.EXT is the second layer, and so on. + * You can have up to 8 layers. + * All image groups must have the same number of output layers. + * + * The end images can contain transparent regions. Only PNG and WebP images are supported since these are the only formats that support transparency. + * + * The zip can also contain a text file for each image group. The text file should be named: + * ROOT.txt + * For example: + * photo.txt + * + * This text file can be used to specify a description of the base image. + * + * If no text file is provided, the default_caption will be used. + * + * If no default_caption is provided, the training will fail. + * + */ + image_data_url: string + /** + * Learning Rate + * + * Learning rate for LoRA parameters. + */ + learning_rate?: number + /** + * Default Caption + * + * Default caption to use when caption files are missing. If None, missing captions will cause an error. + */ + default_caption?: string | unknown +} + +/** + * Output + */ +export type SchemaQwenImageEdit2511TrainerOutput = { + config_file: SchemaFile + diffusers_lora_file: SchemaFile +} + +/** + * Input2511 + */ +export type SchemaQwenImageEdit2511TrainerInput = { + /** + * Steps + * + * Number of steps to train for + */ + steps?: number + /** + * Image Data Url + * + * + * URL to the input data zip archive. + * + * The zip should contain pairs of images. The images should be named: + * + * ROOT_start.EXT and ROOT_end.EXT + * For example: + * photo_start.jpg and photo_end.jpg + * + * The zip can also contain more than one reference image for each image pair. The reference images should be named: + * ROOT_start.EXT, ROOT_start2.EXT, ROOT_start3.EXT, ..., ROOT_end.EXT + * For example: + * photo_start.jpg, photo_start2.jpg, photo_end.jpg + * + * The Reference Image Count field should be set to the number of reference images. + * + * The zip can also contain a text file for each image pair. The text file should be named: + * ROOT.txt + * For example: + * photo.txt + * + * This text file can be used to specify the edit instructions for the image pair. + * + * If no text file is provided, the default_caption will be used. + * + * If no default_caption is provided, the training will fail. + * + */ + image_data_url: string + /** + * Learning Rate + * + * Learning rate for LoRA parameters. + */ + learning_rate?: number + /** + * Default Caption + * + * Default caption to use when caption files are missing. If None, missing captions will cause an error. + */ + default_caption?: string | unknown +} + +/** + * Output + */ +export type SchemaQwenImage2512TrainerOutput = { + config_file: SchemaFile + diffusers_lora_file: SchemaFile +} + +/** + * InputImage + */ +export type SchemaQwenImage2512TrainerInput = { + /** + * Steps + * + * Number of steps to train for + */ + steps?: number + /** + * Image Data Url + * + * + * URL to the input data zip archive for text-to-image training. + * + * The zip should contain images with their corresponding text captions: + * + * image.EXT and image.txt + * For example: + * photo.jpg and photo.txt + * + * The text file contains the caption/prompt describing the target image. + * + * If no text file is provided for an image, the default_caption will be used. + * + * If no default_caption is provided and a text file is missing, the training will fail. + * + */ + image_data_url: string + /** + * Learning Rate + * + * Learning rate for LoRA parameters. + */ + learning_rate?: number + /** + * Default Caption + * + * Default caption to use when caption files are missing. If None, missing captions will cause an error. + */ + default_caption?: string | unknown +} + +/** + * LTX2Output + * + * Output from LTX-2 training. + */ +export type SchemaLtx2VideoTrainerOutput = { + lora_file: SchemaFile + config_file: SchemaFile + /** + * URL to the debug dataset archive containing decoded videos and audio. + */ + debug_dataset?: SchemaFile | unknown + /** + * The URL to the validation videos, if any. + */ + video: SchemaFile | unknown +} + +/** + * LTX2Input + * + * Input configuration for LTX-2 text-to-video training. + */ +export type SchemaLtx2VideoTrainerInput = { + /** + * Number Of Steps + * + * The number of training steps. + */ + number_of_steps?: number + /** + * Audio Preserve Pitch + * + * When audio duration doesn't match video duration, stretch/compress audio without changing pitch. If disabled, audio is trimmed or padded with silence. + */ + audio_preserve_pitch?: boolean + /** + * Frame Rate + * + * Target frames per second for the video. + */ + frame_rate?: number + /** + * Audio Normalize + * + * Normalize audio peak amplitude to a consistent level. Recommended for consistent audio levels across the dataset. + */ + audio_normalize?: boolean + /** + * Validation + * + * A list of validation prompts to use during training. When providing an image, _all_ validation inputs must have an image. + */ + validation?: Array + /** + * Learning Rate + * + * Learning rate for optimization. Higher values can lead to faster training but may cause overfitting. + */ + learning_rate?: number + /** + * Number Of Frames + * + * Number of frames per training sample. Must satisfy frames % 8 == 1 (e.g., 1, 9, 17, 25, 33, 41, 49, 57, 65, 73, 81, 89, 97). + */ + number_of_frames?: number + /** + * Training Data Url + * + * URL to zip archive with videos or images. Try to use at least 10 files, although more is better. + * + * **Supported video formats:** .mp4, .mov, .avi, .mkv + * **Supported image formats:** .png, .jpg, .jpeg + * + * Note: The dataset must contain ONLY videos OR ONLY images - mixed datasets are not supported. + * + * The archive can also contain text files with captions. Each text file should have the same name as the media file it corresponds to. + */ + training_data_url: string + /** + * Split Input Duration Threshold + * + * The duration threshold in seconds. If a video is longer than this, it will be split into scenes. + */ + split_input_duration_threshold?: number + /** + * Rank + * + * The rank of the LoRA adaptation. Higher values increase capacity but use more memory. + */ + rank?: 8 | 16 | 32 | 64 | 128 + /** + * First Frame Conditioning P + * + * Probability of conditioning on the first frame during training. Higher values improve image-to-video performance. + */ + first_frame_conditioning_p?: number + /** + * Stg Scale + * + * STG (Spatio-Temporal Guidance) scale. 0.0 disables STG. Recommended value is 1.0. + */ + stg_scale?: number + /** + * Aspect Ratio + * + * Aspect ratio to use for training. + */ + aspect_ratio?: '16:9' | '1:1' | '9:16' + /** + * With Audio + * + * Enable joint audio-video training. If None (default), automatically detects whether input videos have audio. Set to True to force audio training, or False to disable. + */ + with_audio?: boolean | unknown + /** + * Trigger Phrase + * + * A phrase that will trigger the LoRA style. Will be prepended to captions during training. + */ + trigger_phrase?: string + /** + * Validation Frame Rate + * + * Target frames per second for validation videos. + */ + validation_frame_rate?: number + /** + * Resolution + * + * Resolution to use for training. Higher resolutions require more memory. + */ + resolution?: 'low' | 'medium' | 'high' + /** + * Split Input Into Scenes + * + * If true, videos above a certain duration threshold will be split into scenes. + */ + split_input_into_scenes?: boolean + /** + * Generate Audio In Validation + * + * Whether to generate audio in validation samples. + */ + generate_audio_in_validation?: boolean + /** + * Validation Resolution + * + * The resolution to use for validation. + */ + validation_resolution?: 'low' | 'medium' | 'high' + /** + * Validation Number Of Frames + * + * The number of frames in validation videos. + */ + validation_number_of_frames?: number + /** + * Validation Aspect Ratio + * + * The aspect ratio to use for validation. + */ + validation_aspect_ratio?: '16:9' | '1:1' | '9:16' + /** + * Validation Negative Prompt + * + * A negative prompt to use for validation. + */ + validation_negative_prompt?: string + /** + * Auto Scale Input + * + * If true, videos will be automatically scaled to the target frame count and fps. This option has no effect on image datasets. + */ + auto_scale_input?: boolean +} + +/** + * V2VValidation + * + * Validation input for video-to-video training. + */ +export type SchemaV2vValidation = { + /** + * Prompt + * + * The prompt to use for validation. + */ + prompt: string + /** + * Reference Video Url + * + * URL to reference video for IC-LoRA validation. This is the input video that will be transformed. + */ + reference_video_url: string +} + +/** + * LTX2V2VOutput + * + * Output from LTX-2 video-to-video training. + */ +export type SchemaLtx2V2vTrainerOutput = { + lora_file: SchemaFile + config_file: SchemaFile + /** + * URL to the debug dataset archive containing decoded videos. + */ + debug_dataset?: SchemaFile | unknown + /** + * The URL to the validation videos (with reference videos side-by-side), if any. + */ + video: SchemaFile | unknown +} + +/** + * LTX2V2VInput + * + * Input configuration for LTX-2 video-to-video (IC-LoRA) training. + */ +export type SchemaLtx2V2vTrainerInput = { + /** + * Number Of Steps + * + * The number of training steps. + */ + number_of_steps?: number + /** + * Frame Rate + * + * Target frames per second for the video. + */ + frame_rate?: number + /** + * Learning Rate + * + * Learning rate for optimization. Higher values can lead to faster training but may cause overfitting. + */ + learning_rate?: number + /** + * Validation + * + * A list of validation inputs with prompts and reference videos. + */ + validation?: Array + /** + * Number Of Frames + * + * Number of frames per training sample. Must satisfy frames % 8 == 1 (e.g., 1, 9, 17, 25, 33, 41, 49, 57, 65, 73, 81, 89, 97). + */ + number_of_frames?: number + /** + * Training Data Url + * + * URL to zip archive with videos or images. Try to use at least 10 files, although more is better. + * + * **Supported video formats:** .mp4, .mov, .avi, .mkv + * **Supported image formats:** .png, .jpg, .jpeg + * + * Note: The dataset must contain ONLY videos OR ONLY images - mixed datasets are not supported. + * + * The archive can also contain text files with captions. Each text file should have the same name as the media file it corresponds to. + */ + training_data_url: string + /** + * Split Input Duration Threshold + * + * The duration threshold in seconds. If a video is longer than this, it will be split into scenes. + */ + split_input_duration_threshold?: number + /** + * Rank + * + * The rank of the LoRA adaptation. Higher values increase capacity but use more memory. + */ + rank?: 8 | 16 | 32 | 64 | 128 + /** + * Stg Scale + * + * STG (Spatio-Temporal Guidance) scale. 0.0 disables STG. Recommended value is 1.0. + */ + stg_scale?: number + /** + * First Frame Conditioning P + * + * Probability of conditioning on the first frame during training. Lower values work better for video-to-video transformation. + */ + first_frame_conditioning_p?: number + /** + * Aspect Ratio + * + * Aspect ratio to use for training. + */ + aspect_ratio?: '16:9' | '1:1' | '9:16' + /** + * Trigger Phrase + * + * A phrase that will trigger the LoRA style. Will be prepended to captions during training. + */ + trigger_phrase?: string + /** + * Resolution + * + * Resolution to use for training. Higher resolutions require more memory. + */ + resolution?: 'low' | 'medium' | 'high' + /** + * Validation Frame Rate + * + * Target frames per second for validation videos. + */ + validation_frame_rate?: number + /** + * Split Input Into Scenes + * + * If true, videos above a certain duration threshold will be split into scenes. + */ + split_input_into_scenes?: boolean + /** + * Validation Resolution + * + * The resolution to use for validation. + */ + validation_resolution?: 'low' | 'medium' | 'high' + /** + * Validation Number Of Frames + * + * The number of frames in validation videos. + */ + validation_number_of_frames?: number + /** + * Validation Aspect Ratio + * + * The aspect ratio to use for validation. + */ + validation_aspect_ratio?: '16:9' | '1:1' | '9:16' + /** + * Validation Negative Prompt + * + * A negative prompt to use for validation. + */ + validation_negative_prompt?: string + /** + * Auto Scale Input + * + * If true, videos will be automatically scaled to the target frame count and fps. This option has no effect on image datasets. + */ + auto_scale_input?: boolean +} + +/** + * Output + */ +export type SchemaFlux2TrainerV2Output = { + config_file: SchemaFile + diffusers_lora_file: SchemaFile +} + +/** + * InputT2IV2 + * + * V2 input with multi-resolution bucketing. + */ +export type SchemaFlux2TrainerV2Input = { + /** + * Steps + * + * Total number of training steps. + */ + steps?: number + /** + * Image Data Url + * + * + * URL to zip archive with images of a consistent style. Try to use at least 10 images, although more is better. + * + * The zip can also contain a text file for each image. The text file should be named: + * ROOT.txt + * For example: + * photo.txt + * + * This text file can be used to specify the edit instructions for the image pair. + * + * If no text file is provided, the default_caption will be used. + * + * If no default_caption is provided, the training will fail. + * + */ + image_data_url: string + /** + * Learning Rate + * + * Learning rate applied to trainable parameters. + */ + learning_rate?: number + /** + * Default Caption + * + * Default caption to use when caption files are missing. If None, missing captions will cause an error. + */ + default_caption?: string | unknown + /** + * Output Lora Format + * + * Dictates the naming scheme for the output weights + */ + output_lora_format?: 'fal' | 'comfy' +} + +/** + * Output + */ +export type SchemaFlux2TrainerV2EditOutput = { + config_file: SchemaFile + diffusers_lora_file: SchemaFile +} + +/** + * InputEditV2 + */ +export type SchemaFlux2TrainerV2EditInput = { + /** + * Steps + * + * Total number of training steps. + */ + steps?: number + /** + * Image Data Url + * + * + * URL to the input data zip archive. + * + * The zip should contain pairs of images. The images should be named: + * + * ROOT_start.EXT and ROOT_end.EXT + * For example: + * photo_start.jpg and photo_end.jpg + * + * The zip can also contain up to four reference image for each image pair. The reference images should be named: + * ROOT_start.EXT, ROOT_start2.EXT, ROOT_start3.EXT, ROOT_start4.EXT, ROOT_end.EXT + * For example: + * photo_start.jpg, photo_start2.jpg, photo_end.jpg + * + * The zip can also contain a text file for each image pair. The text file should be named: + * ROOT.txt + * For example: + * photo.txt + * + * This text file can be used to specify the edit instructions for the image pair. + * + * If no text file is provided, the default_caption will be used. + * + * If no default_caption is provided, the training will fail. + * + */ + image_data_url: string + /** + * Learning Rate + * + * Learning rate applied to trainable parameters. + */ + learning_rate?: number + /** + * Default Caption + * + * Default caption to use when caption files are missing. If None, missing captions will cause an error. + */ + default_caption?: string | unknown + /** + * Output Lora Format + * + * Dictates the naming scheme for the output weights + */ + output_lora_format?: 'fal' | 'comfy' +} + +/** + * Output + */ +export type SchemaQwenImage2512TrainerV2Output = { + config_file: SchemaFile + diffusers_lora_file: SchemaFile +} + +/** + * Input + */ +export type SchemaQwenImage2512TrainerV2Input = { + /** + * Steps + * + * Number of steps to train for + */ + steps?: number + /** + * Image Data Url + * + * + * URL to the input data zip archive. + * + * The zip should contain pairs of images and corresponding captions. + * + * The images should be named: ROOT.EXT. For example: 001.jpg + * + * The corresponding captions should be named: ROOT.txt. For example: 001.txt + * + * If no text file is provided for an image, the default_caption will be used. + * + */ + image_data_url: string + /** + * Learning Rate + * + * Learning rate. + */ + learning_rate?: number + /** + * Default Caption + * + * Default caption to use when caption files are missing. If None, missing captions will cause an error. + */ + default_caption?: string | unknown +} + +/** + * Output + */ +export type SchemaFlux2Klein4bBaseTrainerEditOutput = { + config_file: SchemaFile + diffusers_lora_file: SchemaFile +} + +/** + * InputEditV2 + */ +export type SchemaFlux2Klein4bBaseTrainerEditInput = { + /** + * Steps + * + * Total number of training steps. + */ + steps?: number + /** + * Image Data Url + * + * + * URL to the input data zip archive. + * + * The zip should contain pairs of images. The images should be named: + * + * ROOT_start.EXT and ROOT_end.EXT + * For example: + * photo_start.jpg and photo_end.jpg + * + * The zip can also contain up to four reference image for each image pair. The reference images should be named: + * ROOT_start.EXT, ROOT_start2.EXT, ROOT_start3.EXT, ROOT_start4.EXT, ROOT_end.EXT + * For example: + * photo_start.jpg, photo_start2.jpg, photo_end.jpg + * + * The zip can also contain a text file for each image pair. The text file should be named: + * ROOT.txt + * For example: + * photo.txt + * + * This text file can be used to specify the edit instructions for the image pair. + * + * If no text file is provided, the default_caption will be used. + * + * If no default_caption is provided, the training will fail. + * + */ + image_data_url: string + /** + * Learning Rate + * + * Learning rate applied to trainable parameters. + */ + learning_rate?: number + /** + * Default Caption + * + * Default caption to use when caption files are missing. If None, missing captions will cause an error. + */ + default_caption?: string | unknown + /** + * Output Lora Format + * + * Dictates the naming scheme for the output weights + */ + output_lora_format?: 'fal' | 'comfy' +} + +/** + * Output + */ +export type SchemaFlux2Klein4bBaseTrainerOutput = { + config_file: SchemaFile + diffusers_lora_file: SchemaFile +} + +/** + * InputT2IV2 + * + * V2 input with multi-resolution bucketing. + */ +export type SchemaFlux2Klein4bBaseTrainerInput = { + /** + * Steps + * + * Total number of training steps. + */ + steps?: number + /** + * Image Data Url + * + * + * URL to zip archive with images of a consistent style. Try to use at least 10 images, although more is better. + * + * The zip can also contain a text file for each image. The text file should be named: + * ROOT.txt + * For example: + * photo.txt + * + * This text file can be used to specify the edit instructions for the image pair. + * + * If no text file is provided, the default_caption will be used. + * + * If no default_caption is provided, the training will fail. + * + */ + image_data_url: string + /** + * Learning Rate + * + * Learning rate applied to trainable parameters. + */ + learning_rate?: number + /** + * Default Caption + * + * Default caption to use when caption files are missing. If None, missing captions will cause an error. + */ + default_caption?: string | unknown + /** + * Output Lora Format + * + * Dictates the naming scheme for the output weights + */ + output_lora_format?: 'fal' | 'comfy' +} + +/** + * Output + */ +export type SchemaFlux2Klein9bBaseTrainerOutput = { + config_file: SchemaFile + diffusers_lora_file: SchemaFile +} + +/** + * InputT2IV2 + * + * V2 input with multi-resolution bucketing. + */ +export type SchemaFlux2Klein9bBaseTrainerInput = { + /** + * Steps + * + * Total number of training steps. + */ + steps?: number + /** + * Image Data Url + * + * + * URL to zip archive with images of a consistent style. Try to use at least 10 images, although more is better. + * + * The zip can also contain a text file for each image. The text file should be named: + * ROOT.txt + * For example: + * photo.txt + * + * This text file can be used to specify the edit instructions for the image pair. + * + * If no text file is provided, the default_caption will be used. + * + * If no default_caption is provided, the training will fail. + * + */ + image_data_url: string + /** + * Learning Rate + * + * Learning rate applied to trainable parameters. + */ + learning_rate?: number + /** + * Default Caption + * + * Default caption to use when caption files are missing. If None, missing captions will cause an error. + */ + default_caption?: string | unknown + /** + * Output Lora Format + * + * Dictates the naming scheme for the output weights + */ + output_lora_format?: 'fal' | 'comfy' +} + +/** + * Output + */ +export type SchemaFlux2Klein9bBaseTrainerEditOutput = { + config_file: SchemaFile + diffusers_lora_file: SchemaFile +} + +/** + * InputEditV2 + */ +export type SchemaFlux2Klein9bBaseTrainerEditInput = { + /** + * Steps + * + * Total number of training steps. + */ + steps?: number + /** + * Image Data Url + * + * + * URL to the input data zip archive. + * + * The zip should contain pairs of images. The images should be named: + * + * ROOT_start.EXT and ROOT_end.EXT + * For example: + * photo_start.jpg and photo_end.jpg + * + * The zip can also contain up to four reference image for each image pair. The reference images should be named: + * ROOT_start.EXT, ROOT_start2.EXT, ROOT_start3.EXT, ROOT_start4.EXT, ROOT_end.EXT + * For example: + * photo_start.jpg, photo_start2.jpg, photo_end.jpg + * + * The zip can also contain a text file for each image pair. The text file should be named: + * ROOT.txt + * For example: + * photo.txt + * + * This text file can be used to specify the edit instructions for the image pair. + * + * If no text file is provided, the default_caption will be used. + * + * If no default_caption is provided, the training will fail. + * + */ + image_data_url: string + /** + * Learning Rate + * + * Learning rate applied to trainable parameters. + */ + learning_rate?: number + /** + * Default Caption + * + * Default caption to use when caption files are missing. If None, missing captions will cause an error. + */ + default_caption?: string | unknown + /** + * Output Lora Format + * + * Dictates the naming scheme for the output weights + */ + output_lora_format?: 'fal' | 'comfy' +} + +/** + * Output + */ +export type SchemaZImageTurboTrainerV2Output = { + config_file: SchemaFile + diffusers_lora_file: SchemaFile +} + +/** + * Input + */ +export type SchemaZImageTurboTrainerV2Input = { + /** + * Steps + * + * Number of steps to train for + */ + steps?: number + /** + * Image Data Url + * + * + * URL to the input data zip archive. + * + * The zip should contain pairs of images and corresponding captions. + * + * The images should be named: ROOT.EXT. For example: 001.jpg + * + * The corresponding captions should be named: ROOT.txt. For example: 001.txt + * + * If no text file is provided for an image, the default_caption will be used. + * + */ + image_data_url: string + /** + * Learning Rate + * + * Learning rate. + */ + learning_rate?: number + /** + * Default Caption + * + * Default caption to use when caption files are missing. If None, missing captions will cause an error. + */ + default_caption?: string | unknown +} + +/** + * Output + */ +export type SchemaZImageBaseTrainerOutput = { + config_file: SchemaFile + diffusers_lora_file: SchemaFile +} + +/** + * Input + */ +export type SchemaZImageBaseTrainerInput = { + /** + * Steps + * + * Number of steps to train for + */ + steps?: number + /** + * Image Data Url + * + * + * URL to the input data zip archive. + * + * The zip should contain pairs of images and corresponding captions. + * + * The images should be named: ROOT.EXT. For example: 001.jpg + * + * The corresponding captions should be named: ROOT.txt. For example: 001.txt + * + * If no text file is provided for an image, the default_caption will be used. + * + */ + image_data_url: string + /** + * Learning Rate + * + * Learning rate. + */ + learning_rate?: number + /** + * Default Caption + * + * Default caption to use when caption files are missing. If None, missing captions will cause an error. + */ + default_caption?: string | unknown +} + +/** + * Output + */ +export type SchemaFluxLoraPortraitTrainerOutput = { + /** + * Config File + * + * URL to the training configuration file. + */ + config_file: SchemaFile + /** + * Diffusers Lora File + * + * URL to the trained diffusers lora weights. + */ + diffusers_lora_file: SchemaFile +} + +/** + * PublicInput + */ +export type SchemaFluxLoraPortraitTrainerInput = { + /** + * Images Data Url + * + * + * URL to zip archive with images of a consistent style. Try to use at least 10 images, although more is better. + * + * In addition to images the archive can contain text files with captions. Each text file should have the same name as the image file it corresponds to. + * + * The captions can include a special string `[trigger]`. If a trigger_word is specified, it will replace `[trigger]` in the captions. + * + */ + images_data_url: string + /** + * Trigger Phrase + * + * Trigger phrase to be used in the captions. If None, a trigger word will not be used. + * If no captions are provide the trigger_work will be used instead of captions. If captions are provided, the trigger word will replace the `[trigger]` string in the captions. + * + */ + trigger_phrase?: string | null + /** + * Resume From Checkpoint + * + * URL to a checkpoint to resume training from. + */ + resume_from_checkpoint?: string + /** + * Subject Crop + * + * If True, the subject will be cropped from the image. + */ + subject_crop?: boolean + /** + * Learning Rate + * + * Learning rate to use for training. + */ + learning_rate?: number + /** + * Multiresolution Training + * + * If True, multiresolution training will be used. + */ + multiresolution_training?: boolean + /** + * Steps + * + * Number of steps to train the LoRA on. + */ + steps?: number + /** + * Data Archive Format + * + * The format of the archive. If not specified, the format will be inferred from the URL. + */ + data_archive_format?: string | null + /** + * Create Masks + * + * If True, masks will be created for the subject. + */ + create_masks?: boolean +} + +/** + * Output + */ +export type SchemaFluxLoraFastTrainingOutput = { + /** + * Config File + * + * URL to the training configuration file. + */ + config_file: SchemaFile + /** + * Debug Preprocessed Output + * + * URL to the preprocessed images. + */ + debug_preprocessed_output?: SchemaFile + /** + * Diffusers Lora File + * + * URL to the trained diffusers lora weights. + */ + diffusers_lora_file: SchemaFile +} + +/** + * PublicInput + */ +export type SchemaFluxLoraFastTrainingInput = { + /** + * Images Data Url + * + * + * URL to zip archive with images. Try to use at least 4 images in general the more the better. + * + * In addition to images the archive can contain text files with captions. Each text file should have the same name as the image file it corresponds to. + * + */ + images_data_url: string + /** + * Is Input Format Already Preprocessed + * + * Specifies whether the input data is already in a processed format. When set to False (default), the system expects raw input where image files and their corresponding caption files share the same name (e.g., 'photo.jpg' and 'photo.txt'). Set to True if your data is already in a preprocessed format. + */ + is_input_format_already_preprocessed?: boolean + /** + * Trigger Word + * + * Trigger word to be used in the captions. If None, a trigger word will not be used. + * If no captions are provide the trigger_word will be used instead of captions. If captions are the trigger word will not be used. + * + */ + trigger_word?: string | null + /** + * Steps + * + * Number of steps to train the LoRA on. + */ + steps?: number + /** + * Data Archive Format + * + * The format of the archive. If not specified, the format will be inferred from the URL. + */ + data_archive_format?: string | null + /** + * Is Style + * + * If True, the training will be for a style. This will deactivate segmentation, captioning and will use trigger word instead. Use the trigger word to specify the style. + */ + is_style?: boolean + /** + * Create Masks + * + * If True segmentation masks will be used in the weight the training loss. For people a face mask is used if possible. + */ + create_masks?: boolean +} + +/** + * Output + */ +export type SchemaFluxKontextTrainerOutput = { + /** + * Config File + * + * URL to the configuration file for the trained model. + */ + config_file: SchemaFile + /** + * Diffusers Lora File + * + * URL to the trained diffusers lora weights. + */ + diffusers_lora_file: SchemaFile +} + +/** + * Input + */ +export type SchemaFluxKontextTrainerInput = { + /** + * Steps + * + * Number of steps to train for + */ + steps?: number + /** + * Image Data Url + * + * + * URL to the input data zip archive. + * + * The zip should contain pairs of images. The images should be named: + * + * ROOT_start.EXT and ROOT_end.EXT + * For example: + * photo_start.jpg and photo_end.jpg + * + * The zip can also contain a text file for each image pair. The text file should be named: + * ROOT.txt + * For example: + * photo.txt + * + * This text file can be used to specify the edit instructions for the image pair. + * + * If no text file is provided, the default_caption will be used. + * + * If no default_caption is provided, the training will fail. + * + */ + image_data_url: string + /** + * Learning Rate + */ + learning_rate?: number + /** + * Default Caption + * + * Default caption to use when caption files are missing. If None, missing captions will cause an error. + */ + default_caption?: string + /** + * Output Lora Format + * + * Dictates the naming scheme for the output weights + */ + output_lora_format?: 'fal' | 'comfy' +} + +/** + * Output + */ +export type SchemaFluxKreaTrainerOutput = { + /** + * Config File + * + * URL to the training configuration file. + */ + config_file: SchemaFile + /** + * Debug Preprocessed Output + * + * URL to the preprocessed images. + */ + debug_preprocessed_output?: SchemaFile + /** + * Diffusers Lora File + * + * URL to the trained diffusers lora weights. + */ + diffusers_lora_file: SchemaFile +} + +/** + * PublicInput + */ +export type SchemaFluxKreaTrainerInput = { + /** + * Images Data Url + * + * + * URL to zip archive with images. Try to use at least 4 images in general the more the better. + * + * In addition to images the archive can contain text files with captions. Each text file should have the same name as the image file it corresponds to. + * + */ + images_data_url: string + /** + * Is Input Format Already Preprocessed + * + * Specifies whether the input data is already in a processed format. When set to False (default), the system expects raw input where image files and their corresponding caption files share the same name (e.g., 'photo.jpg' and 'photo.txt'). Set to True if your data is already in a preprocessed format. + */ + is_input_format_already_preprocessed?: boolean + /** + * Trigger Word + * + * Trigger word to be used in the captions. If None, a trigger word will not be used. + * If no captions are provide the trigger_word will be used instead of captions. If captions are the trigger word will not be used. + * + */ + trigger_word?: string | null + /** + * Steps + * + * Number of steps to train the LoRA on. + */ + steps?: number + /** + * Data Archive Format + * + * The format of the archive. If not specified, the format will be inferred from the URL. + */ + data_archive_format?: string | null + /** + * Is Style + * + * If True, the training will be for a style. This will deactivate segmentation, captioning and will use trigger word instead. Use the trigger word to specify the style. + */ + is_style?: boolean + /** + * Create Masks + * + * If True segmentation masks will be used in the weight the training loss. For people a face mask is used if possible. + */ + create_masks?: boolean +} + +export type SchemaQueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetFalAiFluxKreaTrainerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-krea-trainer/requests/{request_id}/status' +} + +export type GetFalAiFluxKreaTrainerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxKreaTrainerRequestsByRequestIdStatusResponse = + GetFalAiFluxKreaTrainerRequestsByRequestIdStatusResponses[keyof GetFalAiFluxKreaTrainerRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxKreaTrainerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-krea-trainer/requests/{request_id}/cancel' +} + +export type PutFalAiFluxKreaTrainerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxKreaTrainerRequestsByRequestIdCancelResponse = + PutFalAiFluxKreaTrainerRequestsByRequestIdCancelResponses[keyof PutFalAiFluxKreaTrainerRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxKreaTrainerData = { + body: SchemaFluxKreaTrainerInput + path?: never + query?: never + url: '/fal-ai/flux-krea-trainer' +} + +export type PostFalAiFluxKreaTrainerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxKreaTrainerResponse = + PostFalAiFluxKreaTrainerResponses[keyof PostFalAiFluxKreaTrainerResponses] + +export type GetFalAiFluxKreaTrainerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-krea-trainer/requests/{request_id}' +} + +export type GetFalAiFluxKreaTrainerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxKreaTrainerOutput +} + +export type GetFalAiFluxKreaTrainerRequestsByRequestIdResponse = + GetFalAiFluxKreaTrainerRequestsByRequestIdResponses[keyof GetFalAiFluxKreaTrainerRequestsByRequestIdResponses] + +export type GetFalAiFluxKontextTrainerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-kontext-trainer/requests/{request_id}/status' +} + +export type GetFalAiFluxKontextTrainerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxKontextTrainerRequestsByRequestIdStatusResponse = + GetFalAiFluxKontextTrainerRequestsByRequestIdStatusResponses[keyof GetFalAiFluxKontextTrainerRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxKontextTrainerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-kontext-trainer/requests/{request_id}/cancel' +} + +export type PutFalAiFluxKontextTrainerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxKontextTrainerRequestsByRequestIdCancelResponse = + PutFalAiFluxKontextTrainerRequestsByRequestIdCancelResponses[keyof PutFalAiFluxKontextTrainerRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxKontextTrainerData = { + body: SchemaFluxKontextTrainerInput + path?: never + query?: never + url: '/fal-ai/flux-kontext-trainer' +} + +export type PostFalAiFluxKontextTrainerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxKontextTrainerResponse = + PostFalAiFluxKontextTrainerResponses[keyof PostFalAiFluxKontextTrainerResponses] + +export type GetFalAiFluxKontextTrainerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-kontext-trainer/requests/{request_id}' +} + +export type GetFalAiFluxKontextTrainerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxKontextTrainerOutput +} + +export type GetFalAiFluxKontextTrainerRequestsByRequestIdResponse = + GetFalAiFluxKontextTrainerRequestsByRequestIdResponses[keyof GetFalAiFluxKontextTrainerRequestsByRequestIdResponses] + +export type GetFalAiFluxLoraFastTrainingRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-lora-fast-training/requests/{request_id}/status' +} + +export type GetFalAiFluxLoraFastTrainingRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFluxLoraFastTrainingRequestsByRequestIdStatusResponse = + GetFalAiFluxLoraFastTrainingRequestsByRequestIdStatusResponses[keyof GetFalAiFluxLoraFastTrainingRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxLoraFastTrainingRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-lora-fast-training/requests/{request_id}/cancel' +} + +export type PutFalAiFluxLoraFastTrainingRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFluxLoraFastTrainingRequestsByRequestIdCancelResponse = + PutFalAiFluxLoraFastTrainingRequestsByRequestIdCancelResponses[keyof PutFalAiFluxLoraFastTrainingRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxLoraFastTrainingData = { + body: SchemaFluxLoraFastTrainingInput + path?: never + query?: never + url: '/fal-ai/flux-lora-fast-training' +} + +export type PostFalAiFluxLoraFastTrainingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxLoraFastTrainingResponse = + PostFalAiFluxLoraFastTrainingResponses[keyof PostFalAiFluxLoraFastTrainingResponses] + +export type GetFalAiFluxLoraFastTrainingRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-lora-fast-training/requests/{request_id}' +} + +export type GetFalAiFluxLoraFastTrainingRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxLoraFastTrainingOutput +} + +export type GetFalAiFluxLoraFastTrainingRequestsByRequestIdResponse = + GetFalAiFluxLoraFastTrainingRequestsByRequestIdResponses[keyof GetFalAiFluxLoraFastTrainingRequestsByRequestIdResponses] + +export type GetFalAiFluxLoraPortraitTrainerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-lora-portrait-trainer/requests/{request_id}/status' +} + +export type GetFalAiFluxLoraPortraitTrainerRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFluxLoraPortraitTrainerRequestsByRequestIdStatusResponse = + GetFalAiFluxLoraPortraitTrainerRequestsByRequestIdStatusResponses[keyof GetFalAiFluxLoraPortraitTrainerRequestsByRequestIdStatusResponses] + +export type PutFalAiFluxLoraPortraitTrainerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-lora-portrait-trainer/requests/{request_id}/cancel' +} + +export type PutFalAiFluxLoraPortraitTrainerRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFluxLoraPortraitTrainerRequestsByRequestIdCancelResponse = + PutFalAiFluxLoraPortraitTrainerRequestsByRequestIdCancelResponses[keyof PutFalAiFluxLoraPortraitTrainerRequestsByRequestIdCancelResponses] + +export type PostFalAiFluxLoraPortraitTrainerData = { + body: SchemaFluxLoraPortraitTrainerInput + path?: never + query?: never + url: '/fal-ai/flux-lora-portrait-trainer' +} + +export type PostFalAiFluxLoraPortraitTrainerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFluxLoraPortraitTrainerResponse = + PostFalAiFluxLoraPortraitTrainerResponses[keyof PostFalAiFluxLoraPortraitTrainerResponses] + +export type GetFalAiFluxLoraPortraitTrainerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-lora-portrait-trainer/requests/{request_id}' +} + +export type GetFalAiFluxLoraPortraitTrainerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFluxLoraPortraitTrainerOutput +} + +export type GetFalAiFluxLoraPortraitTrainerRequestsByRequestIdResponse = + GetFalAiFluxLoraPortraitTrainerRequestsByRequestIdResponses[keyof GetFalAiFluxLoraPortraitTrainerRequestsByRequestIdResponses] + +export type GetFalAiZImageBaseTrainerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/z-image-base-trainer/requests/{request_id}/status' +} + +export type GetFalAiZImageBaseTrainerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiZImageBaseTrainerRequestsByRequestIdStatusResponse = + GetFalAiZImageBaseTrainerRequestsByRequestIdStatusResponses[keyof GetFalAiZImageBaseTrainerRequestsByRequestIdStatusResponses] + +export type PutFalAiZImageBaseTrainerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image-base-trainer/requests/{request_id}/cancel' +} + +export type PutFalAiZImageBaseTrainerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiZImageBaseTrainerRequestsByRequestIdCancelResponse = + PutFalAiZImageBaseTrainerRequestsByRequestIdCancelResponses[keyof PutFalAiZImageBaseTrainerRequestsByRequestIdCancelResponses] + +export type PostFalAiZImageBaseTrainerData = { + body: SchemaZImageBaseTrainerInput + path?: never + query?: never + url: '/fal-ai/z-image-base-trainer' +} + +export type PostFalAiZImageBaseTrainerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiZImageBaseTrainerResponse = + PostFalAiZImageBaseTrainerResponses[keyof PostFalAiZImageBaseTrainerResponses] + +export type GetFalAiZImageBaseTrainerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image-base-trainer/requests/{request_id}' +} + +export type GetFalAiZImageBaseTrainerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaZImageBaseTrainerOutput +} + +export type GetFalAiZImageBaseTrainerRequestsByRequestIdResponse = + GetFalAiZImageBaseTrainerRequestsByRequestIdResponses[keyof GetFalAiZImageBaseTrainerRequestsByRequestIdResponses] + +export type GetFalAiZImageTurboTrainerV2RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/z-image-turbo-trainer-v2/requests/{request_id}/status' +} + +export type GetFalAiZImageTurboTrainerV2RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiZImageTurboTrainerV2RequestsByRequestIdStatusResponse = + GetFalAiZImageTurboTrainerV2RequestsByRequestIdStatusResponses[keyof GetFalAiZImageTurboTrainerV2RequestsByRequestIdStatusResponses] + +export type PutFalAiZImageTurboTrainerV2RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image-turbo-trainer-v2/requests/{request_id}/cancel' +} + +export type PutFalAiZImageTurboTrainerV2RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiZImageTurboTrainerV2RequestsByRequestIdCancelResponse = + PutFalAiZImageTurboTrainerV2RequestsByRequestIdCancelResponses[keyof PutFalAiZImageTurboTrainerV2RequestsByRequestIdCancelResponses] + +export type PostFalAiZImageTurboTrainerV2Data = { + body: SchemaZImageTurboTrainerV2Input + path?: never + query?: never + url: '/fal-ai/z-image-turbo-trainer-v2' +} + +export type PostFalAiZImageTurboTrainerV2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiZImageTurboTrainerV2Response = + PostFalAiZImageTurboTrainerV2Responses[keyof PostFalAiZImageTurboTrainerV2Responses] + +export type GetFalAiZImageTurboTrainerV2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image-turbo-trainer-v2/requests/{request_id}' +} + +export type GetFalAiZImageTurboTrainerV2RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaZImageTurboTrainerV2Output +} + +export type GetFalAiZImageTurboTrainerV2RequestsByRequestIdResponse = + GetFalAiZImageTurboTrainerV2RequestsByRequestIdResponses[keyof GetFalAiZImageTurboTrainerV2RequestsByRequestIdResponses] + +export type GetFalAiFlux2Klein9bBaseTrainerEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-klein-9b-base-trainer/edit/requests/{request_id}/status' +} + +export type GetFalAiFlux2Klein9bBaseTrainerEditRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlux2Klein9bBaseTrainerEditRequestsByRequestIdStatusResponse = + GetFalAiFlux2Klein9bBaseTrainerEditRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2Klein9bBaseTrainerEditRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2Klein9bBaseTrainerEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-klein-9b-base-trainer/edit/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2Klein9bBaseTrainerEditRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlux2Klein9bBaseTrainerEditRequestsByRequestIdCancelResponse = + PutFalAiFlux2Klein9bBaseTrainerEditRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2Klein9bBaseTrainerEditRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2Klein9bBaseTrainerEditData = { + body: SchemaFlux2Klein9bBaseTrainerEditInput + path?: never + query?: never + url: '/fal-ai/flux-2-klein-9b-base-trainer/edit' +} + +export type PostFalAiFlux2Klein9bBaseTrainerEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2Klein9bBaseTrainerEditResponse = + PostFalAiFlux2Klein9bBaseTrainerEditResponses[keyof PostFalAiFlux2Klein9bBaseTrainerEditResponses] + +export type GetFalAiFlux2Klein9bBaseTrainerEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-klein-9b-base-trainer/edit/requests/{request_id}' +} + +export type GetFalAiFlux2Klein9bBaseTrainerEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2Klein9bBaseTrainerEditOutput +} + +export type GetFalAiFlux2Klein9bBaseTrainerEditRequestsByRequestIdResponse = + GetFalAiFlux2Klein9bBaseTrainerEditRequestsByRequestIdResponses[keyof GetFalAiFlux2Klein9bBaseTrainerEditRequestsByRequestIdResponses] + +export type GetFalAiFlux2Klein9bBaseTrainerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-klein-9b-base-trainer/requests/{request_id}/status' +} + +export type GetFalAiFlux2Klein9bBaseTrainerRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlux2Klein9bBaseTrainerRequestsByRequestIdStatusResponse = + GetFalAiFlux2Klein9bBaseTrainerRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2Klein9bBaseTrainerRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2Klein9bBaseTrainerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-klein-9b-base-trainer/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2Klein9bBaseTrainerRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlux2Klein9bBaseTrainerRequestsByRequestIdCancelResponse = + PutFalAiFlux2Klein9bBaseTrainerRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2Klein9bBaseTrainerRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2Klein9bBaseTrainerData = { + body: SchemaFlux2Klein9bBaseTrainerInput + path?: never + query?: never + url: '/fal-ai/flux-2-klein-9b-base-trainer' +} + +export type PostFalAiFlux2Klein9bBaseTrainerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2Klein9bBaseTrainerResponse = + PostFalAiFlux2Klein9bBaseTrainerResponses[keyof PostFalAiFlux2Klein9bBaseTrainerResponses] + +export type GetFalAiFlux2Klein9bBaseTrainerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-klein-9b-base-trainer/requests/{request_id}' +} + +export type GetFalAiFlux2Klein9bBaseTrainerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2Klein9bBaseTrainerOutput +} + +export type GetFalAiFlux2Klein9bBaseTrainerRequestsByRequestIdResponse = + GetFalAiFlux2Klein9bBaseTrainerRequestsByRequestIdResponses[keyof GetFalAiFlux2Klein9bBaseTrainerRequestsByRequestIdResponses] + +export type GetFalAiFlux2Klein4bBaseTrainerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-klein-4b-base-trainer/requests/{request_id}/status' +} + +export type GetFalAiFlux2Klein4bBaseTrainerRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlux2Klein4bBaseTrainerRequestsByRequestIdStatusResponse = + GetFalAiFlux2Klein4bBaseTrainerRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2Klein4bBaseTrainerRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2Klein4bBaseTrainerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-klein-4b-base-trainer/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2Klein4bBaseTrainerRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlux2Klein4bBaseTrainerRequestsByRequestIdCancelResponse = + PutFalAiFlux2Klein4bBaseTrainerRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2Klein4bBaseTrainerRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2Klein4bBaseTrainerData = { + body: SchemaFlux2Klein4bBaseTrainerInput + path?: never + query?: never + url: '/fal-ai/flux-2-klein-4b-base-trainer' +} + +export type PostFalAiFlux2Klein4bBaseTrainerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2Klein4bBaseTrainerResponse = + PostFalAiFlux2Klein4bBaseTrainerResponses[keyof PostFalAiFlux2Klein4bBaseTrainerResponses] + +export type GetFalAiFlux2Klein4bBaseTrainerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-klein-4b-base-trainer/requests/{request_id}' +} + +export type GetFalAiFlux2Klein4bBaseTrainerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2Klein4bBaseTrainerOutput +} + +export type GetFalAiFlux2Klein4bBaseTrainerRequestsByRequestIdResponse = + GetFalAiFlux2Klein4bBaseTrainerRequestsByRequestIdResponses[keyof GetFalAiFlux2Klein4bBaseTrainerRequestsByRequestIdResponses] + +export type GetFalAiFlux2Klein4bBaseTrainerEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-klein-4b-base-trainer/edit/requests/{request_id}/status' +} + +export type GetFalAiFlux2Klein4bBaseTrainerEditRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlux2Klein4bBaseTrainerEditRequestsByRequestIdStatusResponse = + GetFalAiFlux2Klein4bBaseTrainerEditRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2Klein4bBaseTrainerEditRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2Klein4bBaseTrainerEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-klein-4b-base-trainer/edit/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2Klein4bBaseTrainerEditRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlux2Klein4bBaseTrainerEditRequestsByRequestIdCancelResponse = + PutFalAiFlux2Klein4bBaseTrainerEditRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2Klein4bBaseTrainerEditRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2Klein4bBaseTrainerEditData = { + body: SchemaFlux2Klein4bBaseTrainerEditInput + path?: never + query?: never + url: '/fal-ai/flux-2-klein-4b-base-trainer/edit' +} + +export type PostFalAiFlux2Klein4bBaseTrainerEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2Klein4bBaseTrainerEditResponse = + PostFalAiFlux2Klein4bBaseTrainerEditResponses[keyof PostFalAiFlux2Klein4bBaseTrainerEditResponses] + +export type GetFalAiFlux2Klein4bBaseTrainerEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-klein-4b-base-trainer/edit/requests/{request_id}' +} + +export type GetFalAiFlux2Klein4bBaseTrainerEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2Klein4bBaseTrainerEditOutput +} + +export type GetFalAiFlux2Klein4bBaseTrainerEditRequestsByRequestIdResponse = + GetFalAiFlux2Klein4bBaseTrainerEditRequestsByRequestIdResponses[keyof GetFalAiFlux2Klein4bBaseTrainerEditRequestsByRequestIdResponses] + +export type GetFalAiQwenImage2512TrainerV2RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-2512-trainer-v2/requests/{request_id}/status' +} + +export type GetFalAiQwenImage2512TrainerV2RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiQwenImage2512TrainerV2RequestsByRequestIdStatusResponse = + GetFalAiQwenImage2512TrainerV2RequestsByRequestIdStatusResponses[keyof GetFalAiQwenImage2512TrainerV2RequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImage2512TrainerV2RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-2512-trainer-v2/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImage2512TrainerV2RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiQwenImage2512TrainerV2RequestsByRequestIdCancelResponse = + PutFalAiQwenImage2512TrainerV2RequestsByRequestIdCancelResponses[keyof PutFalAiQwenImage2512TrainerV2RequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImage2512TrainerV2Data = { + body: SchemaQwenImage2512TrainerV2Input + path?: never + query?: never + url: '/fal-ai/qwen-image-2512-trainer-v2' +} + +export type PostFalAiQwenImage2512TrainerV2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImage2512TrainerV2Response = + PostFalAiQwenImage2512TrainerV2Responses[keyof PostFalAiQwenImage2512TrainerV2Responses] + +export type GetFalAiQwenImage2512TrainerV2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-2512-trainer-v2/requests/{request_id}' +} + +export type GetFalAiQwenImage2512TrainerV2RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImage2512TrainerV2Output +} + +export type GetFalAiQwenImage2512TrainerV2RequestsByRequestIdResponse = + GetFalAiQwenImage2512TrainerV2RequestsByRequestIdResponses[keyof GetFalAiQwenImage2512TrainerV2RequestsByRequestIdResponses] + +export type GetFalAiFlux2TrainerV2EditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-trainer-v2/edit/requests/{request_id}/status' +} + +export type GetFalAiFlux2TrainerV2EditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2TrainerV2EditRequestsByRequestIdStatusResponse = + GetFalAiFlux2TrainerV2EditRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2TrainerV2EditRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2TrainerV2EditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-trainer-v2/edit/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2TrainerV2EditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2TrainerV2EditRequestsByRequestIdCancelResponse = + PutFalAiFlux2TrainerV2EditRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2TrainerV2EditRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2TrainerV2EditData = { + body: SchemaFlux2TrainerV2EditInput + path?: never + query?: never + url: '/fal-ai/flux-2-trainer-v2/edit' +} + +export type PostFalAiFlux2TrainerV2EditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2TrainerV2EditResponse = + PostFalAiFlux2TrainerV2EditResponses[keyof PostFalAiFlux2TrainerV2EditResponses] + +export type GetFalAiFlux2TrainerV2EditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-trainer-v2/edit/requests/{request_id}' +} + +export type GetFalAiFlux2TrainerV2EditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2TrainerV2EditOutput +} + +export type GetFalAiFlux2TrainerV2EditRequestsByRequestIdResponse = + GetFalAiFlux2TrainerV2EditRequestsByRequestIdResponses[keyof GetFalAiFlux2TrainerV2EditRequestsByRequestIdResponses] + +export type GetFalAiFlux2TrainerV2RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-trainer-v2/requests/{request_id}/status' +} + +export type GetFalAiFlux2TrainerV2RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2TrainerV2RequestsByRequestIdStatusResponse = + GetFalAiFlux2TrainerV2RequestsByRequestIdStatusResponses[keyof GetFalAiFlux2TrainerV2RequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2TrainerV2RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-trainer-v2/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2TrainerV2RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2TrainerV2RequestsByRequestIdCancelResponse = + PutFalAiFlux2TrainerV2RequestsByRequestIdCancelResponses[keyof PutFalAiFlux2TrainerV2RequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2TrainerV2Data = { + body: SchemaFlux2TrainerV2Input + path?: never + query?: never + url: '/fal-ai/flux-2-trainer-v2' +} + +export type PostFalAiFlux2TrainerV2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2TrainerV2Response = + PostFalAiFlux2TrainerV2Responses[keyof PostFalAiFlux2TrainerV2Responses] + +export type GetFalAiFlux2TrainerV2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-trainer-v2/requests/{request_id}' +} + +export type GetFalAiFlux2TrainerV2RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2TrainerV2Output +} + +export type GetFalAiFlux2TrainerV2RequestsByRequestIdResponse = + GetFalAiFlux2TrainerV2RequestsByRequestIdResponses[keyof GetFalAiFlux2TrainerV2RequestsByRequestIdResponses] + +export type GetFalAiLtx2V2vTrainerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx2-v2v-trainer/requests/{request_id}/status' +} + +export type GetFalAiLtx2V2vTrainerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLtx2V2vTrainerRequestsByRequestIdStatusResponse = + GetFalAiLtx2V2vTrainerRequestsByRequestIdStatusResponses[keyof GetFalAiLtx2V2vTrainerRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx2V2vTrainerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx2-v2v-trainer/requests/{request_id}/cancel' +} + +export type PutFalAiLtx2V2vTrainerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLtx2V2vTrainerRequestsByRequestIdCancelResponse = + PutFalAiLtx2V2vTrainerRequestsByRequestIdCancelResponses[keyof PutFalAiLtx2V2vTrainerRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx2V2vTrainerData = { + body: SchemaLtx2V2vTrainerInput + path?: never + query?: never + url: '/fal-ai/ltx2-v2v-trainer' +} + +export type PostFalAiLtx2V2vTrainerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx2V2vTrainerResponse = + PostFalAiLtx2V2vTrainerResponses[keyof PostFalAiLtx2V2vTrainerResponses] + +export type GetFalAiLtx2V2vTrainerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx2-v2v-trainer/requests/{request_id}' +} + +export type GetFalAiLtx2V2vTrainerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtx2V2vTrainerOutput +} + +export type GetFalAiLtx2V2vTrainerRequestsByRequestIdResponse = + GetFalAiLtx2V2vTrainerRequestsByRequestIdResponses[keyof GetFalAiLtx2V2vTrainerRequestsByRequestIdResponses] + +export type GetFalAiLtx2VideoTrainerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx2-video-trainer/requests/{request_id}/status' +} + +export type GetFalAiLtx2VideoTrainerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLtx2VideoTrainerRequestsByRequestIdStatusResponse = + GetFalAiLtx2VideoTrainerRequestsByRequestIdStatusResponses[keyof GetFalAiLtx2VideoTrainerRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx2VideoTrainerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx2-video-trainer/requests/{request_id}/cancel' +} + +export type PutFalAiLtx2VideoTrainerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLtx2VideoTrainerRequestsByRequestIdCancelResponse = + PutFalAiLtx2VideoTrainerRequestsByRequestIdCancelResponses[keyof PutFalAiLtx2VideoTrainerRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx2VideoTrainerData = { + body: SchemaLtx2VideoTrainerInput + path?: never + query?: never + url: '/fal-ai/ltx2-video-trainer' +} + +export type PostFalAiLtx2VideoTrainerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx2VideoTrainerResponse = + PostFalAiLtx2VideoTrainerResponses[keyof PostFalAiLtx2VideoTrainerResponses] + +export type GetFalAiLtx2VideoTrainerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx2-video-trainer/requests/{request_id}' +} + +export type GetFalAiLtx2VideoTrainerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtx2VideoTrainerOutput +} + +export type GetFalAiLtx2VideoTrainerRequestsByRequestIdResponse = + GetFalAiLtx2VideoTrainerRequestsByRequestIdResponses[keyof GetFalAiLtx2VideoTrainerRequestsByRequestIdResponses] + +export type GetFalAiQwenImage2512TrainerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-2512-trainer/requests/{request_id}/status' +} + +export type GetFalAiQwenImage2512TrainerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiQwenImage2512TrainerRequestsByRequestIdStatusResponse = + GetFalAiQwenImage2512TrainerRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImage2512TrainerRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImage2512TrainerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-2512-trainer/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImage2512TrainerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiQwenImage2512TrainerRequestsByRequestIdCancelResponse = + PutFalAiQwenImage2512TrainerRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImage2512TrainerRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImage2512TrainerData = { + body: SchemaQwenImage2512TrainerInput + path?: never + query?: never + url: '/fal-ai/qwen-image-2512-trainer' +} + +export type PostFalAiQwenImage2512TrainerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImage2512TrainerResponse = + PostFalAiQwenImage2512TrainerResponses[keyof PostFalAiQwenImage2512TrainerResponses] + +export type GetFalAiQwenImage2512TrainerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-2512-trainer/requests/{request_id}' +} + +export type GetFalAiQwenImage2512TrainerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImage2512TrainerOutput +} + +export type GetFalAiQwenImage2512TrainerRequestsByRequestIdResponse = + GetFalAiQwenImage2512TrainerRequestsByRequestIdResponses[keyof GetFalAiQwenImage2512TrainerRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEdit2511TrainerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-2511-trainer/requests/{request_id}/status' +} + +export type GetFalAiQwenImageEdit2511TrainerRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEdit2511TrainerRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEdit2511TrainerRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEdit2511TrainerRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEdit2511TrainerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2511-trainer/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImageEdit2511TrainerRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEdit2511TrainerRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEdit2511TrainerRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEdit2511TrainerRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEdit2511TrainerData = { + body: SchemaQwenImageEdit2511TrainerInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-2511-trainer' +} + +export type PostFalAiQwenImageEdit2511TrainerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEdit2511TrainerResponse = + PostFalAiQwenImageEdit2511TrainerResponses[keyof PostFalAiQwenImageEdit2511TrainerResponses] + +export type GetFalAiQwenImageEdit2511TrainerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2511-trainer/requests/{request_id}' +} + +export type GetFalAiQwenImageEdit2511TrainerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImageEdit2511TrainerOutput +} + +export type GetFalAiQwenImageEdit2511TrainerRequestsByRequestIdResponse = + GetFalAiQwenImageEdit2511TrainerRequestsByRequestIdResponses[keyof GetFalAiQwenImageEdit2511TrainerRequestsByRequestIdResponses] + +export type GetFalAiQwenImageLayeredTrainerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-layered-trainer/requests/{request_id}/status' +} + +export type GetFalAiQwenImageLayeredTrainerRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageLayeredTrainerRequestsByRequestIdStatusResponse = + GetFalAiQwenImageLayeredTrainerRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageLayeredTrainerRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageLayeredTrainerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-layered-trainer/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImageLayeredTrainerRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageLayeredTrainerRequestsByRequestIdCancelResponse = + PutFalAiQwenImageLayeredTrainerRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageLayeredTrainerRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageLayeredTrainerData = { + body: SchemaQwenImageLayeredTrainerInput + path?: never + query?: never + url: '/fal-ai/qwen-image-layered-trainer' +} + +export type PostFalAiQwenImageLayeredTrainerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageLayeredTrainerResponse = + PostFalAiQwenImageLayeredTrainerResponses[keyof PostFalAiQwenImageLayeredTrainerResponses] + +export type GetFalAiQwenImageLayeredTrainerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-layered-trainer/requests/{request_id}' +} + +export type GetFalAiQwenImageLayeredTrainerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImageLayeredTrainerOutput +} + +export type GetFalAiQwenImageLayeredTrainerRequestsByRequestIdResponse = + GetFalAiQwenImageLayeredTrainerRequestsByRequestIdResponses[keyof GetFalAiQwenImageLayeredTrainerRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEdit2509TrainerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-2509-trainer/requests/{request_id}/status' +} + +export type GetFalAiQwenImageEdit2509TrainerRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEdit2509TrainerRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEdit2509TrainerRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEdit2509TrainerRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEdit2509TrainerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-trainer/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImageEdit2509TrainerRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEdit2509TrainerRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEdit2509TrainerRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEdit2509TrainerRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEdit2509TrainerData = { + body: SchemaQwenImageEdit2509TrainerInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-2509-trainer' +} + +export type PostFalAiQwenImageEdit2509TrainerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEdit2509TrainerResponse = + PostFalAiQwenImageEdit2509TrainerResponses[keyof PostFalAiQwenImageEdit2509TrainerResponses] + +export type GetFalAiQwenImageEdit2509TrainerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-2509-trainer/requests/{request_id}' +} + +export type GetFalAiQwenImageEdit2509TrainerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImageEdit2509TrainerOutput +} + +export type GetFalAiQwenImageEdit2509TrainerRequestsByRequestIdResponse = + GetFalAiQwenImageEdit2509TrainerRequestsByRequestIdResponses[keyof GetFalAiQwenImageEdit2509TrainerRequestsByRequestIdResponses] + +export type GetFalAiZImageTrainerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/z-image-trainer/requests/{request_id}/status' +} + +export type GetFalAiZImageTrainerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiZImageTrainerRequestsByRequestIdStatusResponse = + GetFalAiZImageTrainerRequestsByRequestIdStatusResponses[keyof GetFalAiZImageTrainerRequestsByRequestIdStatusResponses] + +export type PutFalAiZImageTrainerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image-trainer/requests/{request_id}/cancel' +} + +export type PutFalAiZImageTrainerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiZImageTrainerRequestsByRequestIdCancelResponse = + PutFalAiZImageTrainerRequestsByRequestIdCancelResponses[keyof PutFalAiZImageTrainerRequestsByRequestIdCancelResponses] + +export type PostFalAiZImageTrainerData = { + body: SchemaZImageTrainerInput + path?: never + query?: never + url: '/fal-ai/z-image-trainer' +} + +export type PostFalAiZImageTrainerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiZImageTrainerResponse = + PostFalAiZImageTrainerResponses[keyof PostFalAiZImageTrainerResponses] + +export type GetFalAiZImageTrainerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/z-image-trainer/requests/{request_id}' +} + +export type GetFalAiZImageTrainerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaZImageTrainerOutput +} + +export type GetFalAiZImageTrainerRequestsByRequestIdResponse = + GetFalAiZImageTrainerRequestsByRequestIdResponses[keyof GetFalAiZImageTrainerRequestsByRequestIdResponses] + +export type GetFalAiFlux2TrainerEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-trainer/edit/requests/{request_id}/status' +} + +export type GetFalAiFlux2TrainerEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2TrainerEditRequestsByRequestIdStatusResponse = + GetFalAiFlux2TrainerEditRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2TrainerEditRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2TrainerEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-trainer/edit/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2TrainerEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2TrainerEditRequestsByRequestIdCancelResponse = + PutFalAiFlux2TrainerEditRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2TrainerEditRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2TrainerEditData = { + body: SchemaFlux2TrainerEditInput + path?: never + query?: never + url: '/fal-ai/flux-2-trainer/edit' +} + +export type PostFalAiFlux2TrainerEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2TrainerEditResponse = + PostFalAiFlux2TrainerEditResponses[keyof PostFalAiFlux2TrainerEditResponses] + +export type GetFalAiFlux2TrainerEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-trainer/edit/requests/{request_id}' +} + +export type GetFalAiFlux2TrainerEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2TrainerEditOutput +} + +export type GetFalAiFlux2TrainerEditRequestsByRequestIdResponse = + GetFalAiFlux2TrainerEditRequestsByRequestIdResponses[keyof GetFalAiFlux2TrainerEditRequestsByRequestIdResponses] + +export type GetFalAiFlux2TrainerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flux-2-trainer/requests/{request_id}/status' +} + +export type GetFalAiFlux2TrainerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlux2TrainerRequestsByRequestIdStatusResponse = + GetFalAiFlux2TrainerRequestsByRequestIdStatusResponses[keyof GetFalAiFlux2TrainerRequestsByRequestIdStatusResponses] + +export type PutFalAiFlux2TrainerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-trainer/requests/{request_id}/cancel' +} + +export type PutFalAiFlux2TrainerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlux2TrainerRequestsByRequestIdCancelResponse = + PutFalAiFlux2TrainerRequestsByRequestIdCancelResponses[keyof PutFalAiFlux2TrainerRequestsByRequestIdCancelResponses] + +export type PostFalAiFlux2TrainerData = { + body: SchemaFlux2TrainerInput + path?: never + query?: never + url: '/fal-ai/flux-2-trainer' +} + +export type PostFalAiFlux2TrainerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlux2TrainerResponse = + PostFalAiFlux2TrainerResponses[keyof PostFalAiFlux2TrainerResponses] + +export type GetFalAiFlux2TrainerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flux-2-trainer/requests/{request_id}' +} + +export type GetFalAiFlux2TrainerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlux2TrainerOutput +} + +export type GetFalAiFlux2TrainerRequestsByRequestIdResponse = + GetFalAiFlux2TrainerRequestsByRequestIdResponses[keyof GetFalAiFlux2TrainerRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEditPlusTrainerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-plus-trainer/requests/{request_id}/status' +} + +export type GetFalAiQwenImageEditPlusTrainerRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiQwenImageEditPlusTrainerRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEditPlusTrainerRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEditPlusTrainerRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEditPlusTrainerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-trainer/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImageEditPlusTrainerRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiQwenImageEditPlusTrainerRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEditPlusTrainerRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEditPlusTrainerRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEditPlusTrainerData = { + body: SchemaQwenImageEditPlusTrainerInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-plus-trainer' +} + +export type PostFalAiQwenImageEditPlusTrainerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEditPlusTrainerResponse = + PostFalAiQwenImageEditPlusTrainerResponses[keyof PostFalAiQwenImageEditPlusTrainerResponses] + +export type GetFalAiQwenImageEditPlusTrainerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-plus-trainer/requests/{request_id}' +} + +export type GetFalAiQwenImageEditPlusTrainerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImageEditPlusTrainerOutput +} + +export type GetFalAiQwenImageEditPlusTrainerRequestsByRequestIdResponse = + GetFalAiQwenImageEditPlusTrainerRequestsByRequestIdResponses[keyof GetFalAiQwenImageEditPlusTrainerRequestsByRequestIdResponses] + +export type GetFalAiQwenImageEditTrainerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-edit-trainer/requests/{request_id}/status' +} + +export type GetFalAiQwenImageEditTrainerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiQwenImageEditTrainerRequestsByRequestIdStatusResponse = + GetFalAiQwenImageEditTrainerRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageEditTrainerRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageEditTrainerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-trainer/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImageEditTrainerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiQwenImageEditTrainerRequestsByRequestIdCancelResponse = + PutFalAiQwenImageEditTrainerRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageEditTrainerRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageEditTrainerData = { + body: SchemaQwenImageEditTrainerInput + path?: never + query?: never + url: '/fal-ai/qwen-image-edit-trainer' +} + +export type PostFalAiQwenImageEditTrainerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageEditTrainerResponse = + PostFalAiQwenImageEditTrainerResponses[keyof PostFalAiQwenImageEditTrainerResponses] + +export type GetFalAiQwenImageEditTrainerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-edit-trainer/requests/{request_id}' +} + +export type GetFalAiQwenImageEditTrainerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImageEditTrainerOutput +} + +export type GetFalAiQwenImageEditTrainerRequestsByRequestIdResponse = + GetFalAiQwenImageEditTrainerRequestsByRequestIdResponses[keyof GetFalAiQwenImageEditTrainerRequestsByRequestIdResponses] + +export type GetFalAiQwenImageTrainerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-image-trainer/requests/{request_id}/status' +} + +export type GetFalAiQwenImageTrainerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiQwenImageTrainerRequestsByRequestIdStatusResponse = + GetFalAiQwenImageTrainerRequestsByRequestIdStatusResponses[keyof GetFalAiQwenImageTrainerRequestsByRequestIdStatusResponses] + +export type PutFalAiQwenImageTrainerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-trainer/requests/{request_id}/cancel' +} + +export type PutFalAiQwenImageTrainerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiQwenImageTrainerRequestsByRequestIdCancelResponse = + PutFalAiQwenImageTrainerRequestsByRequestIdCancelResponses[keyof PutFalAiQwenImageTrainerRequestsByRequestIdCancelResponses] + +export type PostFalAiQwenImageTrainerData = { + body: SchemaQwenImageTrainerInput + path?: never + query?: never + url: '/fal-ai/qwen-image-trainer' +} + +export type PostFalAiQwenImageTrainerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwenImageTrainerResponse = + PostFalAiQwenImageTrainerResponses[keyof PostFalAiQwenImageTrainerResponses] + +export type GetFalAiQwenImageTrainerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-image-trainer/requests/{request_id}' +} + +export type GetFalAiQwenImageTrainerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwenImageTrainerOutput +} + +export type GetFalAiQwenImageTrainerRequestsByRequestIdResponse = + GetFalAiQwenImageTrainerRequestsByRequestIdResponses[keyof GetFalAiQwenImageTrainerRequestsByRequestIdResponses] + +export type GetFalAiWan22ImageTrainerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-22-image-trainer/requests/{request_id}/status' +} + +export type GetFalAiWan22ImageTrainerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWan22ImageTrainerRequestsByRequestIdStatusResponse = + GetFalAiWan22ImageTrainerRequestsByRequestIdStatusResponses[keyof GetFalAiWan22ImageTrainerRequestsByRequestIdStatusResponses] + +export type PutFalAiWan22ImageTrainerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-22-image-trainer/requests/{request_id}/cancel' +} + +export type PutFalAiWan22ImageTrainerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWan22ImageTrainerRequestsByRequestIdCancelResponse = + PutFalAiWan22ImageTrainerRequestsByRequestIdCancelResponses[keyof PutFalAiWan22ImageTrainerRequestsByRequestIdCancelResponses] + +export type PostFalAiWan22ImageTrainerData = { + body: SchemaWan22ImageTrainerInput + path?: never + query?: never + url: '/fal-ai/wan-22-image-trainer' +} + +export type PostFalAiWan22ImageTrainerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWan22ImageTrainerResponse = + PostFalAiWan22ImageTrainerResponses[keyof PostFalAiWan22ImageTrainerResponses] + +export type GetFalAiWan22ImageTrainerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-22-image-trainer/requests/{request_id}' +} + +export type GetFalAiWan22ImageTrainerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWan22ImageTrainerOutput +} + +export type GetFalAiWan22ImageTrainerRequestsByRequestIdResponse = + GetFalAiWan22ImageTrainerRequestsByRequestIdResponses[keyof GetFalAiWan22ImageTrainerRequestsByRequestIdResponses] + +export type GetFalAiWanTrainerT2vRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-trainer/t2v/requests/{request_id}/status' +} + +export type GetFalAiWanTrainerT2vRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanTrainerT2vRequestsByRequestIdStatusResponse = + GetFalAiWanTrainerT2vRequestsByRequestIdStatusResponses[keyof GetFalAiWanTrainerT2vRequestsByRequestIdStatusResponses] + +export type PutFalAiWanTrainerT2vRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-trainer/t2v/requests/{request_id}/cancel' +} + +export type PutFalAiWanTrainerT2vRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanTrainerT2vRequestsByRequestIdCancelResponse = + PutFalAiWanTrainerT2vRequestsByRequestIdCancelResponses[keyof PutFalAiWanTrainerT2vRequestsByRequestIdCancelResponses] + +export type PostFalAiWanTrainerT2vData = { + body: SchemaWanTrainerT2vInput + path?: never + query?: never + url: '/fal-ai/wan-trainer/t2v' +} + +export type PostFalAiWanTrainerT2vResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanTrainerT2vResponse = + PostFalAiWanTrainerT2vResponses[keyof PostFalAiWanTrainerT2vResponses] + +export type GetFalAiWanTrainerT2vRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-trainer/t2v/requests/{request_id}' +} + +export type GetFalAiWanTrainerT2vRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanTrainerT2vOutput +} + +export type GetFalAiWanTrainerT2vRequestsByRequestIdResponse = + GetFalAiWanTrainerT2vRequestsByRequestIdResponses[keyof GetFalAiWanTrainerT2vRequestsByRequestIdResponses] + +export type GetFalAiWanTrainerT2V14bRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-trainer/t2v-14b/requests/{request_id}/status' +} + +export type GetFalAiWanTrainerT2V14bRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanTrainerT2V14bRequestsByRequestIdStatusResponse = + GetFalAiWanTrainerT2V14bRequestsByRequestIdStatusResponses[keyof GetFalAiWanTrainerT2V14bRequestsByRequestIdStatusResponses] + +export type PutFalAiWanTrainerT2V14bRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-trainer/t2v-14b/requests/{request_id}/cancel' +} + +export type PutFalAiWanTrainerT2V14bRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanTrainerT2V14bRequestsByRequestIdCancelResponse = + PutFalAiWanTrainerT2V14bRequestsByRequestIdCancelResponses[keyof PutFalAiWanTrainerT2V14bRequestsByRequestIdCancelResponses] + +export type PostFalAiWanTrainerT2V14bData = { + body: SchemaWanTrainerT2V14bInput + path?: never + query?: never + url: '/fal-ai/wan-trainer/t2v-14b' +} + +export type PostFalAiWanTrainerT2V14bResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanTrainerT2V14bResponse = + PostFalAiWanTrainerT2V14bResponses[keyof PostFalAiWanTrainerT2V14bResponses] + +export type GetFalAiWanTrainerT2V14bRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-trainer/t2v-14b/requests/{request_id}' +} + +export type GetFalAiWanTrainerT2V14bRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanTrainerT2V14bOutput +} + +export type GetFalAiWanTrainerT2V14bRequestsByRequestIdResponse = + GetFalAiWanTrainerT2V14bRequestsByRequestIdResponses[keyof GetFalAiWanTrainerT2V14bRequestsByRequestIdResponses] + +export type GetFalAiWanTrainerI2V720pRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-trainer/i2v-720p/requests/{request_id}/status' +} + +export type GetFalAiWanTrainerI2V720pRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanTrainerI2V720pRequestsByRequestIdStatusResponse = + GetFalAiWanTrainerI2V720pRequestsByRequestIdStatusResponses[keyof GetFalAiWanTrainerI2V720pRequestsByRequestIdStatusResponses] + +export type PutFalAiWanTrainerI2V720pRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-trainer/i2v-720p/requests/{request_id}/cancel' +} + +export type PutFalAiWanTrainerI2V720pRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanTrainerI2V720pRequestsByRequestIdCancelResponse = + PutFalAiWanTrainerI2V720pRequestsByRequestIdCancelResponses[keyof PutFalAiWanTrainerI2V720pRequestsByRequestIdCancelResponses] + +export type PostFalAiWanTrainerI2V720pData = { + body: SchemaWanTrainerI2V720pInput + path?: never + query?: never + url: '/fal-ai/wan-trainer/i2v-720p' +} + +export type PostFalAiWanTrainerI2V720pResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanTrainerI2V720pResponse = + PostFalAiWanTrainerI2V720pResponses[keyof PostFalAiWanTrainerI2V720pResponses] + +export type GetFalAiWanTrainerI2V720pRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-trainer/i2v-720p/requests/{request_id}' +} + +export type GetFalAiWanTrainerI2V720pRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanTrainerI2V720pOutput +} + +export type GetFalAiWanTrainerI2V720pRequestsByRequestIdResponse = + GetFalAiWanTrainerI2V720pRequestsByRequestIdResponses[keyof GetFalAiWanTrainerI2V720pRequestsByRequestIdResponses] + +export type GetFalAiWanTrainerFlf2V720pRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-trainer/flf2v-720p/requests/{request_id}/status' +} + +export type GetFalAiWanTrainerFlf2V720pRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanTrainerFlf2V720pRequestsByRequestIdStatusResponse = + GetFalAiWanTrainerFlf2V720pRequestsByRequestIdStatusResponses[keyof GetFalAiWanTrainerFlf2V720pRequestsByRequestIdStatusResponses] + +export type PutFalAiWanTrainerFlf2V720pRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-trainer/flf2v-720p/requests/{request_id}/cancel' +} + +export type PutFalAiWanTrainerFlf2V720pRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanTrainerFlf2V720pRequestsByRequestIdCancelResponse = + PutFalAiWanTrainerFlf2V720pRequestsByRequestIdCancelResponses[keyof PutFalAiWanTrainerFlf2V720pRequestsByRequestIdCancelResponses] + +export type PostFalAiWanTrainerFlf2V720pData = { + body: SchemaWanTrainerFlf2V720pInput + path?: never + query?: never + url: '/fal-ai/wan-trainer/flf2v-720p' +} + +export type PostFalAiWanTrainerFlf2V720pResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanTrainerFlf2V720pResponse = + PostFalAiWanTrainerFlf2V720pResponses[keyof PostFalAiWanTrainerFlf2V720pResponses] + +export type GetFalAiWanTrainerFlf2V720pRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-trainer/flf2v-720p/requests/{request_id}' +} + +export type GetFalAiWanTrainerFlf2V720pRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanTrainerFlf2V720pOutput +} + +export type GetFalAiWanTrainerFlf2V720pRequestsByRequestIdResponse = + GetFalAiWanTrainerFlf2V720pRequestsByRequestIdResponses[keyof GetFalAiWanTrainerFlf2V720pRequestsByRequestIdResponses] + +export type GetFalAiLtxVideoTrainerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-video-trainer/requests/{request_id}/status' +} + +export type GetFalAiLtxVideoTrainerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLtxVideoTrainerRequestsByRequestIdStatusResponse = + GetFalAiLtxVideoTrainerRequestsByRequestIdStatusResponses[keyof GetFalAiLtxVideoTrainerRequestsByRequestIdStatusResponses] + +export type PutFalAiLtxVideoTrainerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-trainer/requests/{request_id}/cancel' +} + +export type PutFalAiLtxVideoTrainerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLtxVideoTrainerRequestsByRequestIdCancelResponse = + PutFalAiLtxVideoTrainerRequestsByRequestIdCancelResponses[keyof PutFalAiLtxVideoTrainerRequestsByRequestIdCancelResponses] + +export type PostFalAiLtxVideoTrainerData = { + body: SchemaLtxVideoTrainerInput + path?: never + query?: never + url: '/fal-ai/ltx-video-trainer' +} + +export type PostFalAiLtxVideoTrainerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtxVideoTrainerResponse = + PostFalAiLtxVideoTrainerResponses[keyof PostFalAiLtxVideoTrainerResponses] + +export type GetFalAiLtxVideoTrainerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-trainer/requests/{request_id}' +} + +export type GetFalAiLtxVideoTrainerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtxVideoTrainerOutput +} + +export type GetFalAiLtxVideoTrainerRequestsByRequestIdResponse = + GetFalAiLtxVideoTrainerRequestsByRequestIdResponses[keyof GetFalAiLtxVideoTrainerRequestsByRequestIdResponses] + +export type GetFalAiRecraftV3CreateStyleRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/recraft/v3/create-style/requests/{request_id}/status' +} + +export type GetFalAiRecraftV3CreateStyleRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiRecraftV3CreateStyleRequestsByRequestIdStatusResponse = + GetFalAiRecraftV3CreateStyleRequestsByRequestIdStatusResponses[keyof GetFalAiRecraftV3CreateStyleRequestsByRequestIdStatusResponses] + +export type PutFalAiRecraftV3CreateStyleRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/recraft/v3/create-style/requests/{request_id}/cancel' +} + +export type PutFalAiRecraftV3CreateStyleRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiRecraftV3CreateStyleRequestsByRequestIdCancelResponse = + PutFalAiRecraftV3CreateStyleRequestsByRequestIdCancelResponses[keyof PutFalAiRecraftV3CreateStyleRequestsByRequestIdCancelResponses] + +export type PostFalAiRecraftV3CreateStyleData = { + body: SchemaRecraftV3CreateStyleInput + path?: never + query?: never + url: '/fal-ai/recraft/v3/create-style' +} + +export type PostFalAiRecraftV3CreateStyleResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiRecraftV3CreateStyleResponse = + PostFalAiRecraftV3CreateStyleResponses[keyof PostFalAiRecraftV3CreateStyleResponses] + +export type GetFalAiRecraftV3CreateStyleRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/recraft/v3/create-style/requests/{request_id}' +} + +export type GetFalAiRecraftV3CreateStyleRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaRecraftV3CreateStyleOutput +} + +export type GetFalAiRecraftV3CreateStyleRequestsByRequestIdResponse = + GetFalAiRecraftV3CreateStyleRequestsByRequestIdResponses[keyof GetFalAiRecraftV3CreateStyleRequestsByRequestIdResponses] + +export type GetFalAiTurboFluxTrainerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/turbo-flux-trainer/requests/{request_id}/status' +} + +export type GetFalAiTurboFluxTrainerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiTurboFluxTrainerRequestsByRequestIdStatusResponse = + GetFalAiTurboFluxTrainerRequestsByRequestIdStatusResponses[keyof GetFalAiTurboFluxTrainerRequestsByRequestIdStatusResponses] + +export type PutFalAiTurboFluxTrainerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/turbo-flux-trainer/requests/{request_id}/cancel' +} + +export type PutFalAiTurboFluxTrainerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiTurboFluxTrainerRequestsByRequestIdCancelResponse = + PutFalAiTurboFluxTrainerRequestsByRequestIdCancelResponses[keyof PutFalAiTurboFluxTrainerRequestsByRequestIdCancelResponses] + +export type PostFalAiTurboFluxTrainerData = { + body: SchemaTurboFluxTrainerInput + path?: never + query?: never + url: '/fal-ai/turbo-flux-trainer' +} + +export type PostFalAiTurboFluxTrainerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiTurboFluxTrainerResponse = + PostFalAiTurboFluxTrainerResponses[keyof PostFalAiTurboFluxTrainerResponses] + +export type GetFalAiTurboFluxTrainerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/turbo-flux-trainer/requests/{request_id}' +} + +export type GetFalAiTurboFluxTrainerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaTurboFluxTrainerOutput +} + +export type GetFalAiTurboFluxTrainerRequestsByRequestIdResponse = + GetFalAiTurboFluxTrainerRequestsByRequestIdResponses[keyof GetFalAiTurboFluxTrainerRequestsByRequestIdResponses] + +export type GetFalAiWanTrainerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-trainer/requests/{request_id}/status' +} + +export type GetFalAiWanTrainerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanTrainerRequestsByRequestIdStatusResponse = + GetFalAiWanTrainerRequestsByRequestIdStatusResponses[keyof GetFalAiWanTrainerRequestsByRequestIdStatusResponses] + +export type PutFalAiWanTrainerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-trainer/requests/{request_id}/cancel' +} + +export type PutFalAiWanTrainerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanTrainerRequestsByRequestIdCancelResponse = + PutFalAiWanTrainerRequestsByRequestIdCancelResponses[keyof PutFalAiWanTrainerRequestsByRequestIdCancelResponses] + +export type PostFalAiWanTrainerData = { + body: SchemaWanTrainerInput + path?: never + query?: never + url: '/fal-ai/wan-trainer' +} + +export type PostFalAiWanTrainerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanTrainerResponse = + PostFalAiWanTrainerResponses[keyof PostFalAiWanTrainerResponses] + +export type GetFalAiWanTrainerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-trainer/requests/{request_id}' +} + +export type GetFalAiWanTrainerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanTrainerOutput +} + +export type GetFalAiWanTrainerRequestsByRequestIdResponse = + GetFalAiWanTrainerRequestsByRequestIdResponses[keyof GetFalAiWanTrainerRequestsByRequestIdResponses] + +export type GetFalAiHunyuanVideoLoraTrainingRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan-video-lora-training/requests/{request_id}/status' +} + +export type GetFalAiHunyuanVideoLoraTrainingRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiHunyuanVideoLoraTrainingRequestsByRequestIdStatusResponse = + GetFalAiHunyuanVideoLoraTrainingRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuanVideoLoraTrainingRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuanVideoLoraTrainingRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-video-lora-training/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuanVideoLoraTrainingRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiHunyuanVideoLoraTrainingRequestsByRequestIdCancelResponse = + PutFalAiHunyuanVideoLoraTrainingRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuanVideoLoraTrainingRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuanVideoLoraTrainingData = { + body: SchemaHunyuanVideoLoraTrainingInput + path?: never + query?: never + url: '/fal-ai/hunyuan-video-lora-training' +} + +export type PostFalAiHunyuanVideoLoraTrainingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuanVideoLoraTrainingResponse = + PostFalAiHunyuanVideoLoraTrainingResponses[keyof PostFalAiHunyuanVideoLoraTrainingResponses] + +export type GetFalAiHunyuanVideoLoraTrainingRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-video-lora-training/requests/{request_id}' +} + +export type GetFalAiHunyuanVideoLoraTrainingRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuanVideoLoraTrainingOutput +} + +export type GetFalAiHunyuanVideoLoraTrainingRequestsByRequestIdResponse = + GetFalAiHunyuanVideoLoraTrainingRequestsByRequestIdResponses[keyof GetFalAiHunyuanVideoLoraTrainingRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/training/zod.gen.ts b/packages/typescript/ai-fal/src/generated/training/zod.gen.ts new file mode 100644 index 00000000..f991982a --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/training/zod.gen.ts @@ -0,0 +1,4800 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * File + */ +export const zSchemaFile = z.object({ + file_size: z.optional(z.union([z.int(), z.unknown()])), + file_name: z.optional(z.union([z.string(), z.unknown()])), + content_type: z.optional(z.union([z.string(), z.unknown()])), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), +}) + +/** + * Output + */ +export const zSchemaHunyuanVideoLoraTrainingOutput = z.object({ + config_file: zSchemaFile, + diffusers_lora_file: zSchemaFile, +}) + +/** + * PublicInput + */ +export const zSchemaHunyuanVideoLoraTrainingInput = z.object({ + trigger_word: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The trigger word to use.', + }), + ) + .default(''), + images_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to zip archive with images. Try to use at least 4 images in general the more the better.\n\n In addition to images the archive can contain text files with captions. Each text file should have the same name as the image file it corresponds to.\n ', + }), + steps: z.int().gte(1).lte(5000).register(z.globalRegistry, { + description: 'Number of steps to train the LoRA on.', + }), + data_archive_format: z.optional(z.union([z.string(), z.unknown(), z.null()])), + learning_rate: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Learning rate to use for training.', + }), + ) + .default(0.0001), + do_caption: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate captions for the images.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaWanTrainerOutput = z.object({ + lora_file: zSchemaFile, + config_file: zSchemaFile, +}) + +/** + * Input + */ +export const zSchemaWanTrainerInput = z.object({ + number_of_steps: z + .optional( + z.int().gte(1).lte(20000).register(z.globalRegistry, { + description: 'The number of steps to train for.', + }), + ) + .default(400), + training_data_url: z.string().register(z.globalRegistry, { + description: + 'URL to zip archive with images of a consistent style. Try to use at least 10 images and/or videos, although more is better.\n\n In addition to images the archive can contain text files with captions. Each text file should have the same name as the image/video file it corresponds to.', + }), + trigger_phrase: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The phrase that will trigger the model to generate an image.', + }), + ) + .default(''), + learning_rate: z + .optional( + z.number().gte(0.000001).lte(1).register(z.globalRegistry, { + description: + 'The rate at which the model learns. Higher values can lead to faster training, but over-fitting.', + }), + ) + .default(0.0002), + auto_scale_input: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the input will be automatically scale the video to 81 frames at 16fps.', + }), + ) + .default(false), +}) + +/** + * Output + */ +export const zSchemaTurboFluxTrainerOutput = z.object({ + config_file: zSchemaFile, + diffusers_lora_file: zSchemaFile, +}) + +/** + * Input + */ +export const zSchemaTurboFluxTrainerInput = z.object({ + images_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to zip archive with images of a consistent style. Try to use at least 10 images, although more is better.\n ', + }), + trigger_phrase: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Trigger phrase to be used in the captions. If None, a trigger word will not be used.\n If no captions are provide the trigger_work will be used instead of captions. If captions are provided, the trigger word will replace the `[trigger]` string in the captions.\n ', + }), + ) + .default('ohwx'), + steps: z + .optional( + z.int().gte(1).lte(10000).register(z.globalRegistry, { + description: 'Number of steps to train the LoRA on.', + }), + ) + .default(1000), + learning_rate: z + .optional( + z.number().gte(1e-7).lte(0.01).register(z.globalRegistry, { + description: 'Learning rate for the training.', + }), + ) + .default(0.00115), + training_style: z.optional( + z.enum(['subject', 'style']).register(z.globalRegistry, { + description: 'Training style to use.', + }), + ), + face_crop: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to try to detect the face and crop the images to the face.', + }), + ) + .default(true), +}) + +/** + * StyleReferenceOutput + */ +export const zSchemaRecraftV3CreateStyleOutput = z.object({ + style_id: z.string().register(z.globalRegistry, { + description: + 'The ID of the created style, this ID can be used to reference the style in the future.', + }), +}) + +/** + * StyleReferenceInput + */ +export const zSchemaRecraftV3CreateStyleInput = z.object({ + images_data_url: z.string().register(z.globalRegistry, { + description: + 'URL to zip archive with images, use PNG format. Maximum 5 images are allowed.', + }), + base_style: z.optional( + z + .enum([ + 'any', + 'realistic_image', + 'digital_illustration', + 'vector_illustration', + 'realistic_image/b_and_w', + 'realistic_image/hard_flash', + 'realistic_image/hdr', + 'realistic_image/natural_light', + 'realistic_image/studio_portrait', + 'realistic_image/enterprise', + 'realistic_image/motion_blur', + 'realistic_image/evening_light', + 'realistic_image/faded_nostalgia', + 'realistic_image/forest_life', + 'realistic_image/mystic_naturalism', + 'realistic_image/natural_tones', + 'realistic_image/organic_calm', + 'realistic_image/real_life_glow', + 'realistic_image/retro_realism', + 'realistic_image/retro_snapshot', + 'realistic_image/urban_drama', + 'realistic_image/village_realism', + 'realistic_image/warm_folk', + 'digital_illustration/pixel_art', + 'digital_illustration/hand_drawn', + 'digital_illustration/grain', + 'digital_illustration/infantile_sketch', + 'digital_illustration/2d_art_poster', + 'digital_illustration/handmade_3d', + 'digital_illustration/hand_drawn_outline', + 'digital_illustration/engraving_color', + 'digital_illustration/2d_art_poster_2', + 'digital_illustration/antiquarian', + 'digital_illustration/bold_fantasy', + 'digital_illustration/child_book', + 'digital_illustration/child_books', + 'digital_illustration/cover', + 'digital_illustration/crosshatch', + 'digital_illustration/digital_engraving', + 'digital_illustration/expressionism', + 'digital_illustration/freehand_details', + 'digital_illustration/grain_20', + 'digital_illustration/graphic_intensity', + 'digital_illustration/hard_comics', + 'digital_illustration/long_shadow', + 'digital_illustration/modern_folk', + 'digital_illustration/multicolor', + 'digital_illustration/neon_calm', + 'digital_illustration/noir', + 'digital_illustration/nostalgic_pastel', + 'digital_illustration/outline_details', + 'digital_illustration/pastel_gradient', + 'digital_illustration/pastel_sketch', + 'digital_illustration/pop_art', + 'digital_illustration/pop_renaissance', + 'digital_illustration/street_art', + 'digital_illustration/tablet_sketch', + 'digital_illustration/urban_glow', + 'digital_illustration/urban_sketching', + 'digital_illustration/vanilla_dreams', + 'digital_illustration/young_adult_book', + 'digital_illustration/young_adult_book_2', + 'vector_illustration/bold_stroke', + 'vector_illustration/chemistry', + 'vector_illustration/colored_stencil', + 'vector_illustration/contour_pop_art', + 'vector_illustration/cosmics', + 'vector_illustration/cutout', + 'vector_illustration/depressive', + 'vector_illustration/editorial', + 'vector_illustration/emotional_flat', + 'vector_illustration/infographical', + 'vector_illustration/marker_outline', + 'vector_illustration/mosaic', + 'vector_illustration/naivector', + 'vector_illustration/roundish_flat', + 'vector_illustration/segmented_colors', + 'vector_illustration/sharp_contrast', + 'vector_illustration/thin', + 'vector_illustration/vector_photo', + 'vector_illustration/vivid_shapes', + 'vector_illustration/engraving', + 'vector_illustration/line_art', + 'vector_illustration/line_circuit', + 'vector_illustration/linocut', + ]) + .register(z.globalRegistry, { + description: + 'The base style of the generated images, this topic is covered above.', + }), + ), +}) + +/** + * TrainingOutput + */ +export const zSchemaLtxVideoTrainerOutput = z.object({ + lora_file: zSchemaFile, + config_file: zSchemaFile, + video: z.union([zSchemaFile, z.unknown()]), +}) + +/** + * Validation + */ +export const zSchemaValidation = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to use for validation.', + }), + image_url: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * Input + */ +export const zSchemaLtxVideoTrainerInput = z.object({ + number_of_steps: z + .optional( + z.int().gte(100).lte(20000).register(z.globalRegistry, { + description: 'The number of steps to train for.', + }), + ) + .default(1000), + frame_rate: z + .optional( + z.int().gte(8).lte(60).register(z.globalRegistry, { + description: 'The target frames per second for the video.', + }), + ) + .default(25), + learning_rate: z + .optional( + z.number().gte(0.000001).lte(1).register(z.globalRegistry, { + description: + 'The rate at which the model learns. Higher values can lead to faster training, but over-fitting.', + }), + ) + .default(0.0002), + validation: z + .optional( + z.array(zSchemaValidation).max(2).register(z.globalRegistry, { + description: + 'A list of validation prompts to use during training. When providing an image, _all_ validation inputs must have an image.', + }), + ) + .default([]), + number_of_frames: z + .optional( + z.int().gte(25).lte(121).register(z.globalRegistry, { + description: + 'The number of frames to use for training. This is the number of frames per second multiplied by the number of seconds.', + }), + ) + .default(81), + validation_reverse: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the validation videos will be reversed. This is useful for effects that are learned in reverse and then applied in reverse.', + }), + ) + .default(false), + training_data_url: z.string().register(z.globalRegistry, { + description: + 'URL to zip archive with videos or images. Try to use at least 10 files, although more is better.\n\n **Supported video formats:** .mp4, .mov, .avi, .mkv\n **Supported image formats:** .png, .jpg, .jpeg\n\n Note: The dataset must contain ONLY videos OR ONLY images - mixed datasets are not supported.\n\n The archive can also contain text files with captions. Each text file should have the same name as the media file it corresponds to.', + }), + split_input_duration_threshold: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: + 'The duration threshold in seconds. If a video is longer than this, it will be split into scenes. If you provide captions for a split video, the caption will be applied to each scene. If you do not provide captions, scenes will be auto-captioned.', + }), + ) + .default(30), + rank: z.optional( + z + .union([ + z.literal(8), + z.literal(16), + z.literal(32), + z.literal(64), + z.literal(128), + ]) + .register(z.globalRegistry, { + description: 'The rank of the LoRA.', + }), + ), + aspect_ratio: z.optional( + z.enum(['16:9', '1:1', '9:16']).register(z.globalRegistry, { + description: + 'The aspect ratio to use for training. This is the aspect ratio of the video.', + }), + ), + trigger_phrase: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The phrase that will trigger the model to generate an image.', + }), + ) + .default(''), + resolution: z.optional( + z.enum(['low', 'medium', 'high']).register(z.globalRegistry, { + description: + 'The resolution to use for training. This is the resolution of the video.', + }), + ), + split_input_into_scenes: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, videos above a certain duration threshold will be split into scenes. If you provide captions for a split video, the caption will be applied to each scene. If you do not provide captions, scenes will be auto-captioned. This option has no effect on image datasets.', + }), + ) + .default(true), + validation_resolution: z.optional( + z.enum(['low', 'medium', 'high']).register(z.globalRegistry, { + description: 'The resolution to use for validation.', + }), + ), + validation_number_of_frames: z + .optional( + z.int().gte(8).lte(121).register(z.globalRegistry, { + description: 'The number of frames to use for validation.', + }), + ) + .default(81), + validation_aspect_ratio: z.optional( + z.enum(['16:9', '1:1', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio to use for validation.', + }), + ), + validation_negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to use for validation.', + }), + ) + .default('blurry, low quality, bad quality, out of focus'), + auto_scale_input: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, videos will be automatically scaled to the target frame count and fps. This option has no effect on image datasets.', + }), + ) + .default(false), +}) + +/** + * Output + */ +export const zSchemaWanTrainerFlf2V720pOutput = z.object({ + lora_file: zSchemaFile, + config_file: zSchemaFile, +}) + +/** + * Input + */ +export const zSchemaWanTrainerFlf2V720pInput = z.object({ + number_of_steps: z + .optional( + z.int().gte(1).lte(20000).register(z.globalRegistry, { + description: 'The number of steps to train for.', + }), + ) + .default(400), + training_data_url: z.string().register(z.globalRegistry, { + description: + 'URL to zip archive with images of a consistent style. Try to use at least 10 images and/or videos, although more is better.\n\n In addition to images the archive can contain text files with captions. Each text file should have the same name as the image/video file it corresponds to.', + }), + trigger_phrase: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The phrase that will trigger the model to generate an image.', + }), + ) + .default(''), + learning_rate: z + .optional( + z.number().gte(0.000001).lte(1).register(z.globalRegistry, { + description: + 'The rate at which the model learns. Higher values can lead to faster training, but over-fitting.', + }), + ) + .default(0.0002), + auto_scale_input: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the input will be automatically scale the video to 81 frames at 16fps.', + }), + ) + .default(false), +}) + +/** + * Output + */ +export const zSchemaWanTrainerI2V720pOutput = z.object({ + lora_file: zSchemaFile, + config_file: zSchemaFile, +}) + +/** + * Input + */ +export const zSchemaWanTrainerI2V720pInput = z.object({ + number_of_steps: z + .optional( + z.int().gte(1).lte(20000).register(z.globalRegistry, { + description: 'The number of steps to train for.', + }), + ) + .default(400), + training_data_url: z.string().register(z.globalRegistry, { + description: + 'URL to zip archive with images of a consistent style. Try to use at least 10 images and/or videos, although more is better.\n\n In addition to images the archive can contain text files with captions. Each text file should have the same name as the image/video file it corresponds to.', + }), + trigger_phrase: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The phrase that will trigger the model to generate an image.', + }), + ) + .default(''), + learning_rate: z + .optional( + z.number().gte(0.000001).lte(1).register(z.globalRegistry, { + description: + 'The rate at which the model learns. Higher values can lead to faster training, but over-fitting.', + }), + ) + .default(0.0002), + auto_scale_input: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the input will be automatically scale the video to 81 frames at 16fps.', + }), + ) + .default(false), +}) + +/** + * Output + */ +export const zSchemaWanTrainerT2V14bOutput = z.object({ + lora_file: zSchemaFile, + config_file: zSchemaFile, +}) + +/** + * Input + */ +export const zSchemaWanTrainerT2V14bInput = z.object({ + number_of_steps: z + .optional( + z.int().gte(1).lte(20000).register(z.globalRegistry, { + description: 'The number of steps to train for.', + }), + ) + .default(400), + training_data_url: z.string().register(z.globalRegistry, { + description: + 'URL to zip archive with images of a consistent style. Try to use at least 10 images and/or videos, although more is better.\n\n In addition to images the archive can contain text files with captions. Each text file should have the same name as the image/video file it corresponds to.', + }), + trigger_phrase: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The phrase that will trigger the model to generate an image.', + }), + ) + .default(''), + learning_rate: z + .optional( + z.number().gte(0.000001).lte(1).register(z.globalRegistry, { + description: + 'The rate at which the model learns. Higher values can lead to faster training, but over-fitting.', + }), + ) + .default(0.0002), + auto_scale_input: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the input will be automatically scale the video to 81 frames at 16fps.', + }), + ) + .default(false), +}) + +/** + * Output + */ +export const zSchemaWanTrainerT2vOutput = z.object({ + lora_file: zSchemaFile, + config_file: zSchemaFile, +}) + +/** + * Input + */ +export const zSchemaWanTrainerT2vInput = z.object({ + number_of_steps: z + .optional( + z.int().gte(1).lte(20000).register(z.globalRegistry, { + description: 'The number of steps to train for.', + }), + ) + .default(400), + training_data_url: z.string().register(z.globalRegistry, { + description: + 'URL to zip archive with images of a consistent style. Try to use at least 10 images and/or videos, although more is better.\n\n In addition to images the archive can contain text files with captions. Each text file should have the same name as the image/video file it corresponds to.', + }), + trigger_phrase: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The phrase that will trigger the model to generate an image.', + }), + ) + .default(''), + learning_rate: z + .optional( + z.number().gte(0.000001).lte(1).register(z.globalRegistry, { + description: + 'The rate at which the model learns. Higher values can lead to faster training, but over-fitting.', + }), + ) + .default(0.0002), + auto_scale_input: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the input will be automatically scale the video to 81 frames at 16fps.', + }), + ) + .default(false), +}) + +/** + * WanTrainerResponse + */ +export const zSchemaWan22ImageTrainerOutput = z.object({ + config_file: zSchemaFile, + high_noise_lora: zSchemaFile, + diffusers_lora_file: zSchemaFile, +}) + +/** + * BasicInput + */ +export const zSchemaWan22ImageTrainerInput = z.object({ + trigger_phrase: z.string().register(z.globalRegistry, { + description: 'Trigger phrase for the model.', + }), + use_masks: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to use masks for the training data.', + }), + ) + .default(true), + learning_rate: z + .optional( + z.number().gte(0.000001).lte(0.1).register(z.globalRegistry, { + description: 'Learning rate for training.', + }), + ) + .default(0.0007), + use_face_cropping: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use face cropping for the training data. When enabled, images will be cropped to the face before resizing.', + }), + ) + .default(false), + training_data_url: z.string().register(z.globalRegistry, { + description: 'URL to the training data.', + }), + steps: z + .optional( + z.int().gte(10).lte(6000).register(z.globalRegistry, { + description: 'Number of training steps.', + }), + ) + .default(1000), + include_synthetic_captions: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to include synthetic captions.', + }), + ) + .default(false), + is_style: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether the training data is style data. If true, face specific options like masking and face detection will be disabled.', + }), + ) + .default(false), + use_face_detection: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use face detection for the training data. When enabled, images will use the center of the face as the center of the image when resizing.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaQwenImageTrainerOutput = z.object({ + lora_file: zSchemaFile, + config_file: zSchemaFile, +}) + +/** + * PublicInput + */ +export const zSchemaQwenImageTrainerInput = z.object({ + steps: z + .optional( + z.int().gte(1).lte(8000).register(z.globalRegistry, { + description: + 'Total number of training steps to perform. Default is 4000.', + }), + ) + .default(1000), + image_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to zip archive with images for training. The archive should contain images and corresponding text files with captions.\n Each text file should have the same name as the image file it corresponds to (e.g., image1.jpg and image1.txt).\n If text files are missing for some images, you can provide a trigger_phrase to automatically create them.\n Supported image formats: PNG, JPG, JPEG, WEBP.\n Try to use at least 10 images, although more is better.\n ', + }), + learning_rate: z + .optional( + z.number().gte(0.000001).lte(0.01).register(z.globalRegistry, { + description: 'Learning rate for training. Default is 5e-4', + }), + ) + .default(0.0005), + trigger_phrase: z + .optional( + z.string().register(z.globalRegistry, { + description: + "Default caption to use for images that don't have corresponding text files. If provided, missing .txt files will be created automatically.", + }), + ) + .default(''), +}) + +/** + * Output + */ +export const zSchemaQwenImageEditTrainerOutput = z.object({ + config_file: zSchemaFile, + diffusers_lora_file: zSchemaFile, +}) + +/** + * InputEdit + */ +export const zSchemaQwenImageEditTrainerInput = z.object({ + steps: z + .optional( + z.int().gte(100).lte(30000).register(z.globalRegistry, { + description: 'Number of steps to train for', + }), + ) + .default(1000), + image_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to the input data zip archive.\n\n The zip should contain pairs of images. The images should be named:\n\n ROOT_start.EXT and ROOT_end.EXT\n For example:\n photo_start.jpg and photo_end.jpg\n\n The zip can also contain a text file for each image pair. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n ', + }), + learning_rate: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Learning rate for LoRA parameters.', + }), + ) + .default(0.0001), + default_caption: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * Output + */ +export const zSchemaQwenImageEditPlusTrainerOutput = z.object({ + config_file: zSchemaFile, + diffusers_lora_file: zSchemaFile, +}) + +/** + * InputPlus + */ +export const zSchemaQwenImageEditPlusTrainerInput = z.object({ + steps: z + .optional( + z.int().gte(100).lte(30000).register(z.globalRegistry, { + description: 'Number of steps to train for', + }), + ) + .default(1000), + image_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to the input data zip archive.\n\n The zip should contain pairs of images. The images should be named:\n\n ROOT_start.EXT and ROOT_end.EXT\n For example:\n photo_start.jpg and photo_end.jpg\n\n The zip can also contain more than one reference image for each image pair. The reference images should be named:\n ROOT_start.EXT, ROOT_start2.EXT, ROOT_start3.EXT, ..., ROOT_end.EXT\n For example:\n photo_start.jpg, photo_start2.jpg, photo_end.jpg\n\n The Reference Image Count field should be set to the number of reference images.\n\n The zip can also contain a text file for each image pair. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n ', + }), + learning_rate: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Learning rate for LoRA parameters.', + }), + ) + .default(0.0001), + default_caption: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * Output + */ +export const zSchemaFlux2TrainerOutput = z.object({ + config_file: zSchemaFile, + diffusers_lora_file: zSchemaFile, +}) + +/** + * InputT2I + */ +export const zSchemaFlux2TrainerInput = z.object({ + steps: z + .optional( + z.int().gte(100).lte(10000).register(z.globalRegistry, { + description: 'Total number of training steps.', + }), + ) + .default(1000), + image_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to zip archive with images of a consistent style. Try to use at least 10 images, although more is better.\n\n The zip can also contain a text file for each image. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n ', + }), + learning_rate: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Learning rate applied to trainable parameters.', + }), + ) + .default(0.00005), + default_caption: z.optional(z.union([z.string(), z.unknown()])), + output_lora_format: z.optional( + z.enum(['fal', 'comfy']).register(z.globalRegistry, { + description: 'Dictates the naming scheme for the output weights', + }), + ), +}) + +/** + * Output + */ +export const zSchemaFlux2TrainerEditOutput = z.object({ + config_file: zSchemaFile, + diffusers_lora_file: zSchemaFile, +}) + +/** + * InputEdit + */ +export const zSchemaFlux2TrainerEditInput = z.object({ + steps: z + .optional( + z.int().gte(100).lte(10000).register(z.globalRegistry, { + description: 'Total number of training steps.', + }), + ) + .default(1000), + image_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to the input data zip archive.\n\n The zip should contain pairs of images. The images should be named:\n\n ROOT_start.EXT and ROOT_end.EXT\n For example:\n photo_start.jpg and photo_end.jpg\n\n The zip can also contain up to four reference image for each image pair. The reference images should be named:\n ROOT_start.EXT, ROOT_start2.EXT, ROOT_start3.EXT, ROOT_start4.EXT, ROOT_end.EXT\n For example:\n photo_start.jpg, photo_start2.jpg, photo_end.jpg\n\n The zip can also contain a text file for each image pair. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n ', + }), + learning_rate: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Learning rate applied to trainable parameters.', + }), + ) + .default(0.00005), + default_caption: z.optional(z.union([z.string(), z.unknown()])), + output_lora_format: z.optional( + z.enum(['fal', 'comfy']).register(z.globalRegistry, { + description: 'Dictates the naming scheme for the output weights', + }), + ), +}) + +/** + * Output + */ +export const zSchemaZImageTrainerOutput = z.object({ + config_file: zSchemaFile, + diffusers_lora_file: zSchemaFile, +}) + +/** + * Input + */ +export const zSchemaZImageTrainerInput = z.object({ + steps: z + .optional( + z.int().gte(100).lte(10000).register(z.globalRegistry, { + description: 'Total number of training steps.', + }), + ) + .default(1000), + image_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to zip archive with images of a consistent style. Try to use at least 10 images, although more is better.\n\n The zip can also contain a text file for each image. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n ', + }), + training_type: z.optional( + z.enum(['content', 'style', 'balanced']).register(z.globalRegistry, { + description: + "Type of training to perform. Use 'content' to focus on the content of the images, 'style' to focus on the style of the images, and 'balanced' to focus on a combination of both.", + }), + ), + learning_rate: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Learning rate applied to trainable parameters.', + }), + ) + .default(0.0001), + default_caption: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * Output + */ +export const zSchemaQwenImageEdit2509TrainerOutput = z.object({ + config_file: zSchemaFile, + diffusers_lora_file: zSchemaFile, +}) + +/** + * InputPlus + */ +export const zSchemaQwenImageEdit2509TrainerInput = z.object({ + steps: z + .optional( + z.int().gte(100).lte(30000).register(z.globalRegistry, { + description: 'Number of steps to train for', + }), + ) + .default(1000), + image_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to the input data zip archive.\n\n The zip should contain pairs of images. The images should be named:\n\n ROOT_start.EXT and ROOT_end.EXT\n For example:\n photo_start.jpg and photo_end.jpg\n\n The zip can also contain more than one reference image for each image pair. The reference images should be named:\n ROOT_start.EXT, ROOT_start2.EXT, ROOT_start3.EXT, ..., ROOT_end.EXT\n For example:\n photo_start.jpg, photo_start2.jpg, photo_end.jpg\n\n The Reference Image Count field should be set to the number of reference images.\n\n The zip can also contain a text file for each image pair. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n ', + }), + learning_rate: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Learning rate for LoRA parameters.', + }), + ) + .default(0.0001), + default_caption: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * Output + */ +export const zSchemaQwenImageLayeredTrainerOutput = z.object({ + config_file: zSchemaFile, + diffusers_lora_file: zSchemaFile, +}) + +/** + * Input + */ +export const zSchemaQwenImageLayeredTrainerInput = z.object({ + steps: z + .optional( + z.int().gte(100).lte(10000).register(z.globalRegistry, { + description: 'Number of steps to train for', + }), + ) + .default(1000), + image_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to the input data zip archive.\n\n The zip should contain groups of images. The images should be named:\n\n ROOT_start.EXT, ROOT_end.EXT, ROOT_end2.EXT, ..., ROOT_endN.EXT\n For example:\n photo_start.png, photo_end.png, photo_end2.png, ..., photo_endN.png\n\n The start image is the base image that will be decomposed into layers.\n The end images are the layers that will be added to the base image. ROOT_end.EXT is the first layer, ROOT_end2.EXT is the second layer, and so on.\n You can have up to 8 layers.\n All image groups must have the same number of output layers.\n\n The end images can contain transparent regions. Only PNG and WebP images are supported since these are the only formats that support transparency.\n\n The zip can also contain a text file for each image group. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify a description of the base image.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n ', + }), + learning_rate: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Learning rate for LoRA parameters.', + }), + ) + .default(0.0001), + default_caption: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * Output + */ +export const zSchemaQwenImageEdit2511TrainerOutput = z.object({ + config_file: zSchemaFile, + diffusers_lora_file: zSchemaFile, +}) + +/** + * Input2511 + */ +export const zSchemaQwenImageEdit2511TrainerInput = z.object({ + steps: z + .optional( + z.int().gte(100).lte(30000).register(z.globalRegistry, { + description: 'Number of steps to train for', + }), + ) + .default(1000), + image_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to the input data zip archive.\n\n The zip should contain pairs of images. The images should be named:\n\n ROOT_start.EXT and ROOT_end.EXT\n For example:\n photo_start.jpg and photo_end.jpg\n\n The zip can also contain more than one reference image for each image pair. The reference images should be named:\n ROOT_start.EXT, ROOT_start2.EXT, ROOT_start3.EXT, ..., ROOT_end.EXT\n For example:\n photo_start.jpg, photo_start2.jpg, photo_end.jpg\n\n The Reference Image Count field should be set to the number of reference images.\n\n The zip can also contain a text file for each image pair. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n ', + }), + learning_rate: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Learning rate for LoRA parameters.', + }), + ) + .default(0.0001), + default_caption: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * Output + */ +export const zSchemaQwenImage2512TrainerOutput = z.object({ + config_file: zSchemaFile, + diffusers_lora_file: zSchemaFile, +}) + +/** + * InputImage + */ +export const zSchemaQwenImage2512TrainerInput = z.object({ + steps: z + .optional( + z.int().gte(100).lte(30000).register(z.globalRegistry, { + description: 'Number of steps to train for', + }), + ) + .default(1000), + image_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to the input data zip archive for text-to-image training.\n\n The zip should contain images with their corresponding text captions:\n\n image.EXT and image.txt\n For example:\n photo.jpg and photo.txt\n\n The text file contains the caption/prompt describing the target image.\n\n If no text file is provided for an image, the default_caption will be used.\n\n If no default_caption is provided and a text file is missing, the training will fail.\n ', + }), + learning_rate: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Learning rate for LoRA parameters.', + }), + ) + .default(0.0005), + default_caption: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * LTX2Output + * + * Output from LTX-2 training. + */ +export const zSchemaLtx2VideoTrainerOutput = z + .object({ + lora_file: zSchemaFile, + config_file: zSchemaFile, + debug_dataset: z.optional(z.union([zSchemaFile, z.unknown()])), + video: z.union([zSchemaFile, z.unknown()]), + }) + .register(z.globalRegistry, { + description: 'Output from LTX-2 training.', + }) + +/** + * LTX2Input + * + * Input configuration for LTX-2 text-to-video training. + */ +export const zSchemaLtx2VideoTrainerInput = z + .object({ + number_of_steps: z + .optional( + z.int().gte(100).lte(20000).register(z.globalRegistry, { + description: 'The number of training steps.', + }), + ) + .default(2000), + audio_preserve_pitch: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "When audio duration doesn't match video duration, stretch/compress audio without changing pitch. If disabled, audio is trimmed or padded with silence.", + }), + ) + .default(true), + frame_rate: z + .optional( + z.int().gte(8).lte(60).register(z.globalRegistry, { + description: 'Target frames per second for the video.', + }), + ) + .default(25), + audio_normalize: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Normalize audio peak amplitude to a consistent level. Recommended for consistent audio levels across the dataset.', + }), + ) + .default(true), + validation: z + .optional( + z.array(zSchemaValidation).max(2).register(z.globalRegistry, { + description: + 'A list of validation prompts to use during training. When providing an image, _all_ validation inputs must have an image.', + }), + ) + .default([]), + learning_rate: z + .optional( + z.number().gte(0.000001).lte(1).register(z.globalRegistry, { + description: + 'Learning rate for optimization. Higher values can lead to faster training but may cause overfitting.', + }), + ) + .default(0.0002), + number_of_frames: z + .optional( + z.int().gte(9).lte(121).register(z.globalRegistry, { + description: + 'Number of frames per training sample. Must satisfy frames % 8 == 1 (e.g., 1, 9, 17, 25, 33, 41, 49, 57, 65, 73, 81, 89, 97).', + }), + ) + .default(89), + training_data_url: z.string().register(z.globalRegistry, { + description: + 'URL to zip archive with videos or images. Try to use at least 10 files, although more is better.\n\n **Supported video formats:** .mp4, .mov, .avi, .mkv\n **Supported image formats:** .png, .jpg, .jpeg\n\n Note: The dataset must contain ONLY videos OR ONLY images - mixed datasets are not supported.\n\n The archive can also contain text files with captions. Each text file should have the same name as the media file it corresponds to.', + }), + split_input_duration_threshold: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: + 'The duration threshold in seconds. If a video is longer than this, it will be split into scenes.', + }), + ) + .default(30), + rank: z.optional( + z + .union([ + z.literal(8), + z.literal(16), + z.literal(32), + z.literal(64), + z.literal(128), + ]) + .register(z.globalRegistry, { + description: + 'The rank of the LoRA adaptation. Higher values increase capacity but use more memory.', + }), + ), + first_frame_conditioning_p: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Probability of conditioning on the first frame during training. Higher values improve image-to-video performance.', + }), + ) + .default(0.5), + stg_scale: z + .optional( + z.number().gte(0).lte(3).register(z.globalRegistry, { + description: + 'STG (Spatio-Temporal Guidance) scale. 0.0 disables STG. Recommended value is 1.0.', + }), + ) + .default(1), + aspect_ratio: z.optional( + z.enum(['16:9', '1:1', '9:16']).register(z.globalRegistry, { + description: 'Aspect ratio to use for training.', + }), + ), + with_audio: z.optional(z.union([z.boolean(), z.unknown()])), + trigger_phrase: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'A phrase that will trigger the LoRA style. Will be prepended to captions during training.', + }), + ) + .default(''), + validation_frame_rate: z + .optional( + z.int().gte(8).lte(60).register(z.globalRegistry, { + description: 'Target frames per second for validation videos.', + }), + ) + .default(25), + resolution: z.optional( + z.enum(['low', 'medium', 'high']).register(z.globalRegistry, { + description: + 'Resolution to use for training. Higher resolutions require more memory.', + }), + ), + split_input_into_scenes: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, videos above a certain duration threshold will be split into scenes.', + }), + ) + .default(true), + generate_audio_in_validation: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio in validation samples.', + }), + ) + .default(true), + validation_resolution: z.optional( + z.enum(['low', 'medium', 'high']).register(z.globalRegistry, { + description: 'The resolution to use for validation.', + }), + ), + validation_number_of_frames: z + .optional( + z.int().gte(9).lte(121).register(z.globalRegistry, { + description: 'The number of frames in validation videos.', + }), + ) + .default(89), + validation_aspect_ratio: z.optional( + z.enum(['16:9', '1:1', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio to use for validation.', + }), + ), + validation_negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to use for validation.', + }), + ) + .default( + 'worst quality, inconsistent motion, blurry, jittery, distorted', + ), + auto_scale_input: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, videos will be automatically scaled to the target frame count and fps. This option has no effect on image datasets.', + }), + ) + .default(false), + }) + .register(z.globalRegistry, { + description: 'Input configuration for LTX-2 text-to-video training.', + }) + +/** + * V2VValidation + * + * Validation input for video-to-video training. + */ +export const zSchemaV2vValidation = z + .object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to use for validation.', + }), + reference_video_url: z.string().min(1).register(z.globalRegistry, { + description: + 'URL to reference video for IC-LoRA validation. This is the input video that will be transformed.', + }), + }) + .register(z.globalRegistry, { + description: 'Validation input for video-to-video training.', + }) + +/** + * LTX2V2VOutput + * + * Output from LTX-2 video-to-video training. + */ +export const zSchemaLtx2V2vTrainerOutput = z + .object({ + lora_file: zSchemaFile, + config_file: zSchemaFile, + debug_dataset: z.optional(z.union([zSchemaFile, z.unknown()])), + video: z.union([zSchemaFile, z.unknown()]), + }) + .register(z.globalRegistry, { + description: 'Output from LTX-2 video-to-video training.', + }) + +/** + * LTX2V2VInput + * + * Input configuration for LTX-2 video-to-video (IC-LoRA) training. + */ +export const zSchemaLtx2V2vTrainerInput = z + .object({ + number_of_steps: z + .optional( + z.int().gte(100).lte(20000).register(z.globalRegistry, { + description: 'The number of training steps.', + }), + ) + .default(2000), + frame_rate: z + .optional( + z.int().gte(8).lte(60).register(z.globalRegistry, { + description: 'Target frames per second for the video.', + }), + ) + .default(25), + learning_rate: z + .optional( + z.number().gte(0.000001).lte(1).register(z.globalRegistry, { + description: + 'Learning rate for optimization. Higher values can lead to faster training but may cause overfitting.', + }), + ) + .default(0.0002), + validation: z + .optional( + z.array(zSchemaV2vValidation).max(2).register(z.globalRegistry, { + description: + 'A list of validation inputs with prompts and reference videos.', + }), + ) + .default([]), + number_of_frames: z + .optional( + z.int().gte(9).lte(121).register(z.globalRegistry, { + description: + 'Number of frames per training sample. Must satisfy frames % 8 == 1 (e.g., 1, 9, 17, 25, 33, 41, 49, 57, 65, 73, 81, 89, 97).', + }), + ) + .default(89), + training_data_url: z.string().register(z.globalRegistry, { + description: + 'URL to zip archive with videos or images. Try to use at least 10 files, although more is better.\n\n **Supported video formats:** .mp4, .mov, .avi, .mkv\n **Supported image formats:** .png, .jpg, .jpeg\n\n Note: The dataset must contain ONLY videos OR ONLY images - mixed datasets are not supported.\n\n The archive can also contain text files with captions. Each text file should have the same name as the media file it corresponds to.', + }), + split_input_duration_threshold: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: + 'The duration threshold in seconds. If a video is longer than this, it will be split into scenes.', + }), + ) + .default(30), + rank: z.optional( + z + .union([ + z.literal(8), + z.literal(16), + z.literal(32), + z.literal(64), + z.literal(128), + ]) + .register(z.globalRegistry, { + description: + 'The rank of the LoRA adaptation. Higher values increase capacity but use more memory.', + }), + ), + stg_scale: z + .optional( + z.number().gte(0).lte(3).register(z.globalRegistry, { + description: + 'STG (Spatio-Temporal Guidance) scale. 0.0 disables STG. Recommended value is 1.0.', + }), + ) + .default(1), + first_frame_conditioning_p: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Probability of conditioning on the first frame during training. Lower values work better for video-to-video transformation.', + }), + ) + .default(0.1), + aspect_ratio: z.optional( + z.enum(['16:9', '1:1', '9:16']).register(z.globalRegistry, { + description: 'Aspect ratio to use for training.', + }), + ), + trigger_phrase: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'A phrase that will trigger the LoRA style. Will be prepended to captions during training.', + }), + ) + .default(''), + resolution: z.optional( + z.enum(['low', 'medium', 'high']).register(z.globalRegistry, { + description: + 'Resolution to use for training. Higher resolutions require more memory.', + }), + ), + validation_frame_rate: z + .optional( + z.int().gte(8).lte(60).register(z.globalRegistry, { + description: 'Target frames per second for validation videos.', + }), + ) + .default(25), + split_input_into_scenes: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, videos above a certain duration threshold will be split into scenes.', + }), + ) + .default(true), + validation_resolution: z.optional( + z.enum(['low', 'medium', 'high']).register(z.globalRegistry, { + description: 'The resolution to use for validation.', + }), + ), + validation_number_of_frames: z + .optional( + z.int().gte(9).lte(121).register(z.globalRegistry, { + description: 'The number of frames in validation videos.', + }), + ) + .default(89), + validation_aspect_ratio: z.optional( + z.enum(['16:9', '1:1', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio to use for validation.', + }), + ), + validation_negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to use for validation.', + }), + ) + .default( + 'worst quality, inconsistent motion, blurry, jittery, distorted', + ), + auto_scale_input: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, videos will be automatically scaled to the target frame count and fps. This option has no effect on image datasets.', + }), + ) + .default(false), + }) + .register(z.globalRegistry, { + description: + 'Input configuration for LTX-2 video-to-video (IC-LoRA) training.', + }) + +/** + * Output + */ +export const zSchemaFlux2TrainerV2Output = z.object({ + config_file: zSchemaFile, + diffusers_lora_file: zSchemaFile, +}) + +/** + * InputT2IV2 + * + * V2 input with multi-resolution bucketing. + */ +export const zSchemaFlux2TrainerV2Input = z + .object({ + steps: z + .optional( + z.int().gte(100).lte(10000).register(z.globalRegistry, { + description: 'Total number of training steps.', + }), + ) + .default(1000), + image_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to zip archive with images of a consistent style. Try to use at least 10 images, although more is better.\n\n The zip can also contain a text file for each image. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n ', + }), + learning_rate: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Learning rate applied to trainable parameters.', + }), + ) + .default(0.00005), + default_caption: z.optional(z.union([z.string(), z.unknown()])), + output_lora_format: z.optional( + z.enum(['fal', 'comfy']).register(z.globalRegistry, { + description: 'Dictates the naming scheme for the output weights', + }), + ), + }) + .register(z.globalRegistry, { + description: 'V2 input with multi-resolution bucketing.', + }) + +/** + * Output + */ +export const zSchemaFlux2TrainerV2EditOutput = z.object({ + config_file: zSchemaFile, + diffusers_lora_file: zSchemaFile, +}) + +/** + * InputEditV2 + */ +export const zSchemaFlux2TrainerV2EditInput = z.object({ + steps: z + .optional( + z.int().gte(100).lte(10000).register(z.globalRegistry, { + description: 'Total number of training steps.', + }), + ) + .default(1000), + image_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to the input data zip archive.\n\n The zip should contain pairs of images. The images should be named:\n\n ROOT_start.EXT and ROOT_end.EXT\n For example:\n photo_start.jpg and photo_end.jpg\n\n The zip can also contain up to four reference image for each image pair. The reference images should be named:\n ROOT_start.EXT, ROOT_start2.EXT, ROOT_start3.EXT, ROOT_start4.EXT, ROOT_end.EXT\n For example:\n photo_start.jpg, photo_start2.jpg, photo_end.jpg\n\n The zip can also contain a text file for each image pair. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n ', + }), + learning_rate: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Learning rate applied to trainable parameters.', + }), + ) + .default(0.00005), + default_caption: z.optional(z.union([z.string(), z.unknown()])), + output_lora_format: z.optional( + z.enum(['fal', 'comfy']).register(z.globalRegistry, { + description: 'Dictates the naming scheme for the output weights', + }), + ), +}) + +/** + * Output + */ +export const zSchemaQwenImage2512TrainerV2Output = z.object({ + config_file: zSchemaFile, + diffusers_lora_file: zSchemaFile, +}) + +/** + * Input + */ +export const zSchemaQwenImage2512TrainerV2Input = z.object({ + steps: z + .optional( + z.int().gte(10).lte(40000).register(z.globalRegistry, { + description: 'Number of steps to train for', + }), + ) + .default(2000), + image_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to the input data zip archive.\n\n The zip should contain pairs of images and corresponding captions.\n\n The images should be named: ROOT.EXT. For example: 001.jpg\n\n The corresponding captions should be named: ROOT.txt. For example: 001.txt\n\n If no text file is provided for an image, the default_caption will be used.\n ', + }), + learning_rate: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Learning rate.', + }), + ) + .default(0.0005), + default_caption: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * Output + */ +export const zSchemaFlux2Klein4bBaseTrainerEditOutput = z.object({ + config_file: zSchemaFile, + diffusers_lora_file: zSchemaFile, +}) + +/** + * InputEditV2 + */ +export const zSchemaFlux2Klein4bBaseTrainerEditInput = z.object({ + steps: z + .optional( + z.int().gte(100).lte(10000).register(z.globalRegistry, { + description: 'Total number of training steps.', + }), + ) + .default(1000), + image_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to the input data zip archive.\n\n The zip should contain pairs of images. The images should be named:\n\n ROOT_start.EXT and ROOT_end.EXT\n For example:\n photo_start.jpg and photo_end.jpg\n\n The zip can also contain up to four reference image for each image pair. The reference images should be named:\n ROOT_start.EXT, ROOT_start2.EXT, ROOT_start3.EXT, ROOT_start4.EXT, ROOT_end.EXT\n For example:\n photo_start.jpg, photo_start2.jpg, photo_end.jpg\n\n The zip can also contain a text file for each image pair. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n ', + }), + learning_rate: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Learning rate applied to trainable parameters.', + }), + ) + .default(0.00005), + default_caption: z.optional(z.union([z.string(), z.unknown()])), + output_lora_format: z.optional( + z.enum(['fal', 'comfy']).register(z.globalRegistry, { + description: 'Dictates the naming scheme for the output weights', + }), + ), +}) + +/** + * Output + */ +export const zSchemaFlux2Klein4bBaseTrainerOutput = z.object({ + config_file: zSchemaFile, + diffusers_lora_file: zSchemaFile, +}) + +/** + * InputT2IV2 + * + * V2 input with multi-resolution bucketing. + */ +export const zSchemaFlux2Klein4bBaseTrainerInput = z + .object({ + steps: z + .optional( + z.int().gte(100).lte(10000).register(z.globalRegistry, { + description: 'Total number of training steps.', + }), + ) + .default(1000), + image_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to zip archive with images of a consistent style. Try to use at least 10 images, although more is better.\n\n The zip can also contain a text file for each image. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n ', + }), + learning_rate: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Learning rate applied to trainable parameters.', + }), + ) + .default(0.00005), + default_caption: z.optional(z.union([z.string(), z.unknown()])), + output_lora_format: z.optional( + z.enum(['fal', 'comfy']).register(z.globalRegistry, { + description: 'Dictates the naming scheme for the output weights', + }), + ), + }) + .register(z.globalRegistry, { + description: 'V2 input with multi-resolution bucketing.', + }) + +/** + * Output + */ +export const zSchemaFlux2Klein9bBaseTrainerOutput = z.object({ + config_file: zSchemaFile, + diffusers_lora_file: zSchemaFile, +}) + +/** + * InputT2IV2 + * + * V2 input with multi-resolution bucketing. + */ +export const zSchemaFlux2Klein9bBaseTrainerInput = z + .object({ + steps: z + .optional( + z.int().gte(100).lte(10000).register(z.globalRegistry, { + description: 'Total number of training steps.', + }), + ) + .default(1000), + image_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to zip archive with images of a consistent style. Try to use at least 10 images, although more is better.\n\n The zip can also contain a text file for each image. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n ', + }), + learning_rate: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Learning rate applied to trainable parameters.', + }), + ) + .default(0.00005), + default_caption: z.optional(z.union([z.string(), z.unknown()])), + output_lora_format: z.optional( + z.enum(['fal', 'comfy']).register(z.globalRegistry, { + description: 'Dictates the naming scheme for the output weights', + }), + ), + }) + .register(z.globalRegistry, { + description: 'V2 input with multi-resolution bucketing.', + }) + +/** + * Output + */ +export const zSchemaFlux2Klein9bBaseTrainerEditOutput = z.object({ + config_file: zSchemaFile, + diffusers_lora_file: zSchemaFile, +}) + +/** + * InputEditV2 + */ +export const zSchemaFlux2Klein9bBaseTrainerEditInput = z.object({ + steps: z + .optional( + z.int().gte(100).lte(10000).register(z.globalRegistry, { + description: 'Total number of training steps.', + }), + ) + .default(1000), + image_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to the input data zip archive.\n\n The zip should contain pairs of images. The images should be named:\n\n ROOT_start.EXT and ROOT_end.EXT\n For example:\n photo_start.jpg and photo_end.jpg\n\n The zip can also contain up to four reference image for each image pair. The reference images should be named:\n ROOT_start.EXT, ROOT_start2.EXT, ROOT_start3.EXT, ROOT_start4.EXT, ROOT_end.EXT\n For example:\n photo_start.jpg, photo_start2.jpg, photo_end.jpg\n\n The zip can also contain a text file for each image pair. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n ', + }), + learning_rate: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Learning rate applied to trainable parameters.', + }), + ) + .default(0.00005), + default_caption: z.optional(z.union([z.string(), z.unknown()])), + output_lora_format: z.optional( + z.enum(['fal', 'comfy']).register(z.globalRegistry, { + description: 'Dictates the naming scheme for the output weights', + }), + ), +}) + +/** + * Output + */ +export const zSchemaZImageTurboTrainerV2Output = z.object({ + config_file: zSchemaFile, + diffusers_lora_file: zSchemaFile, +}) + +/** + * Input + */ +export const zSchemaZImageTurboTrainerV2Input = z.object({ + steps: z + .optional( + z.int().gte(10).lte(40000).register(z.globalRegistry, { + description: 'Number of steps to train for', + }), + ) + .default(2000), + image_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to the input data zip archive.\n\n The zip should contain pairs of images and corresponding captions.\n\n The images should be named: ROOT.EXT. For example: 001.jpg\n\n The corresponding captions should be named: ROOT.txt. For example: 001.txt\n\n If no text file is provided for an image, the default_caption will be used.\n ', + }), + learning_rate: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Learning rate.', + }), + ) + .default(0.0005), + default_caption: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * Output + */ +export const zSchemaZImageBaseTrainerOutput = z.object({ + config_file: zSchemaFile, + diffusers_lora_file: zSchemaFile, +}) + +/** + * Input + */ +export const zSchemaZImageBaseTrainerInput = z.object({ + steps: z + .optional( + z.int().gte(10).lte(40000).register(z.globalRegistry, { + description: 'Number of steps to train for', + }), + ) + .default(2000), + image_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to the input data zip archive.\n\n The zip should contain pairs of images and corresponding captions.\n\n The images should be named: ROOT.EXT. For example: 001.jpg\n\n The corresponding captions should be named: ROOT.txt. For example: 001.txt\n\n If no text file is provided for an image, the default_caption will be used.\n ', + }), + learning_rate: z + .optional( + z.number().register(z.globalRegistry, { + description: 'Learning rate.', + }), + ) + .default(0.0005), + default_caption: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * Output + */ +export const zSchemaFluxLoraPortraitTrainerOutput = z.object({ + config_file: zSchemaFile, + diffusers_lora_file: zSchemaFile, +}) + +/** + * PublicInput + */ +export const zSchemaFluxLoraPortraitTrainerInput = z.object({ + images_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to zip archive with images of a consistent style. Try to use at least 10 images, although more is better.\n\n In addition to images the archive can contain text files with captions. Each text file should have the same name as the image file it corresponds to.\n\n The captions can include a special string `[trigger]`. If a trigger_word is specified, it will replace `[trigger]` in the captions.\n ', + }), + trigger_phrase: z.optional(z.union([z.string(), z.null()])), + resume_from_checkpoint: z + .optional( + z.string().register(z.globalRegistry, { + description: 'URL to a checkpoint to resume training from.', + }), + ) + .default(''), + subject_crop: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If True, the subject will be cropped from the image.', + }), + ) + .default(true), + learning_rate: z + .optional( + z.number().gte(0.000001).lte(0.001).register(z.globalRegistry, { + description: 'Learning rate to use for training.', + }), + ) + .default(0.00009), + multiresolution_training: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If True, multiresolution training will be used.', + }), + ) + .default(true), + steps: z + .optional( + z.int().gte(1).lte(10000).register(z.globalRegistry, { + description: 'Number of steps to train the LoRA on.', + }), + ) + .default(2500), + data_archive_format: z.optional(z.union([z.string(), z.null()])), + create_masks: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If True, masks will be created for the subject.', + }), + ) + .default(false), +}) + +/** + * Output + */ +export const zSchemaFluxLoraFastTrainingOutput = z.object({ + config_file: zSchemaFile, + debug_preprocessed_output: z.optional(zSchemaFile), + diffusers_lora_file: zSchemaFile, +}) + +/** + * PublicInput + */ +export const zSchemaFluxLoraFastTrainingInput = z.object({ + images_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to zip archive with images. Try to use at least 4 images in general the more the better.\n\n In addition to images the archive can contain text files with captions. Each text file should have the same name as the image file it corresponds to.\n ', + }), + is_input_format_already_preprocessed: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "Specifies whether the input data is already in a processed format. When set to False (default), the system expects raw input where image files and their corresponding caption files share the same name (e.g., 'photo.jpg' and 'photo.txt'). Set to True if your data is already in a preprocessed format.", + }), + ) + .default(false), + trigger_word: z.optional(z.union([z.string(), z.null()])), + steps: z.optional( + z.int().gte(1).lte(10000).register(z.globalRegistry, { + description: 'Number of steps to train the LoRA on.', + }), + ), + data_archive_format: z.optional(z.union([z.string(), z.null()])), + is_style: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If True, the training will be for a style. This will deactivate segmentation, captioning and will use trigger word instead. Use the trigger word to specify the style.', + }), + ) + .default(false), + create_masks: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If True segmentation masks will be used in the weight the training loss. For people a face mask is used if possible.', + }), + ) + .default(true), +}) + +/** + * Output + */ +export const zSchemaFluxKontextTrainerOutput = z.object({ + config_file: zSchemaFile, + diffusers_lora_file: zSchemaFile, +}) + +/** + * Input + */ +export const zSchemaFluxKontextTrainerInput = z.object({ + steps: z + .optional( + z.int().gte(2).lte(10000).register(z.globalRegistry, { + description: 'Number of steps to train for', + }), + ) + .default(1000), + image_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to the input data zip archive.\n\n The zip should contain pairs of images. The images should be named:\n\n ROOT_start.EXT and ROOT_end.EXT\n For example:\n photo_start.jpg and photo_end.jpg\n\n The zip can also contain a text file for each image pair. The text file should be named:\n ROOT.txt\n For example:\n photo.txt\n\n This text file can be used to specify the edit instructions for the image pair.\n\n If no text file is provided, the default_caption will be used.\n\n If no default_caption is provided, the training will fail.\n ', + }), + learning_rate: z.optional(z.number()).default(0.0001), + default_caption: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Default caption to use when caption files are missing. If None, missing captions will cause an error.', + }), + ), + output_lora_format: z.optional( + z.enum(['fal', 'comfy']).register(z.globalRegistry, { + description: 'Dictates the naming scheme for the output weights', + }), + ), +}) + +/** + * Output + */ +export const zSchemaFluxKreaTrainerOutput = z.object({ + config_file: zSchemaFile, + debug_preprocessed_output: z.optional(zSchemaFile), + diffusers_lora_file: zSchemaFile, +}) + +/** + * PublicInput + */ +export const zSchemaFluxKreaTrainerInput = z.object({ + images_data_url: z.string().register(z.globalRegistry, { + description: + '\n URL to zip archive with images. Try to use at least 4 images in general the more the better.\n\n In addition to images the archive can contain text files with captions. Each text file should have the same name as the image file it corresponds to.\n ', + }), + is_input_format_already_preprocessed: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "Specifies whether the input data is already in a processed format. When set to False (default), the system expects raw input where image files and their corresponding caption files share the same name (e.g., 'photo.jpg' and 'photo.txt'). Set to True if your data is already in a preprocessed format.", + }), + ) + .default(false), + trigger_word: z.optional(z.union([z.string(), z.null()])), + steps: z.optional( + z.int().gte(1).lte(10000).register(z.globalRegistry, { + description: 'Number of steps to train the LoRA on.', + }), + ), + data_archive_format: z.optional(z.union([z.string(), z.null()])), + is_style: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If True, the training will be for a style. This will deactivate segmentation, captioning and will use trigger word instead. Use the trigger word to specify the style.', + }), + ) + .default(false), + create_masks: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If True segmentation masks will be used in the weight the training loss. For people a face mask is used if possible.', + }), + ) + .default(true), +}) + +export const zSchemaQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetFalAiFluxKreaTrainerRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFluxKreaTrainerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxKreaTrainerRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxKreaTrainerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxKreaTrainerData = z.object({ + body: zSchemaFluxKreaTrainerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxKreaTrainerResponse = zSchemaQueueStatus + +export const zGetFalAiFluxKreaTrainerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxKreaTrainerRequestsByRequestIdResponse = + zSchemaFluxKreaTrainerOutput + +export const zGetFalAiFluxKontextTrainerRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxKontextTrainerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxKontextTrainerRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxKontextTrainerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxKontextTrainerData = z.object({ + body: zSchemaFluxKontextTrainerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxKontextTrainerResponse = zSchemaQueueStatus + +export const zGetFalAiFluxKontextTrainerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxKontextTrainerRequestsByRequestIdResponse = + zSchemaFluxKontextTrainerOutput + +export const zGetFalAiFluxLoraFastTrainingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxLoraFastTrainingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxLoraFastTrainingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxLoraFastTrainingRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxLoraFastTrainingData = z.object({ + body: zSchemaFluxLoraFastTrainingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxLoraFastTrainingResponse = zSchemaQueueStatus + +export const zGetFalAiFluxLoraFastTrainingRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFluxLoraFastTrainingRequestsByRequestIdResponse = + zSchemaFluxLoraFastTrainingOutput + +export const zGetFalAiFluxLoraPortraitTrainerRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFluxLoraPortraitTrainerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFluxLoraPortraitTrainerRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFluxLoraPortraitTrainerRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFluxLoraPortraitTrainerData = z.object({ + body: zSchemaFluxLoraPortraitTrainerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFluxLoraPortraitTrainerResponse = zSchemaQueueStatus + +export const zGetFalAiFluxLoraPortraitTrainerRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiFluxLoraPortraitTrainerRequestsByRequestIdResponse = + zSchemaFluxLoraPortraitTrainerOutput + +export const zGetFalAiZImageBaseTrainerRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiZImageBaseTrainerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiZImageBaseTrainerRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiZImageBaseTrainerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiZImageBaseTrainerData = z.object({ + body: zSchemaZImageBaseTrainerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiZImageBaseTrainerResponse = zSchemaQueueStatus + +export const zGetFalAiZImageBaseTrainerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiZImageBaseTrainerRequestsByRequestIdResponse = + zSchemaZImageBaseTrainerOutput + +export const zGetFalAiZImageTurboTrainerV2RequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiZImageTurboTrainerV2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiZImageTurboTrainerV2RequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiZImageTurboTrainerV2RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiZImageTurboTrainerV2Data = z.object({ + body: zSchemaZImageTurboTrainerV2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiZImageTurboTrainerV2Response = zSchemaQueueStatus + +export const zGetFalAiZImageTurboTrainerV2RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiZImageTurboTrainerV2RequestsByRequestIdResponse = + zSchemaZImageTurboTrainerV2Output + +export const zGetFalAiFlux2Klein9bBaseTrainerEditRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux2Klein9bBaseTrainerEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2Klein9bBaseTrainerEditRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2Klein9bBaseTrainerEditRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2Klein9bBaseTrainerEditData = z.object({ + body: zSchemaFlux2Klein9bBaseTrainerEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2Klein9bBaseTrainerEditResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2Klein9bBaseTrainerEditRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2Klein9bBaseTrainerEditRequestsByRequestIdResponse = + zSchemaFlux2Klein9bBaseTrainerEditOutput + +export const zGetFalAiFlux2Klein9bBaseTrainerRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux2Klein9bBaseTrainerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2Klein9bBaseTrainerRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2Klein9bBaseTrainerRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2Klein9bBaseTrainerData = z.object({ + body: zSchemaFlux2Klein9bBaseTrainerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2Klein9bBaseTrainerResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2Klein9bBaseTrainerRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2Klein9bBaseTrainerRequestsByRequestIdResponse = + zSchemaFlux2Klein9bBaseTrainerOutput + +export const zGetFalAiFlux2Klein4bBaseTrainerRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux2Klein4bBaseTrainerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2Klein4bBaseTrainerRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2Klein4bBaseTrainerRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2Klein4bBaseTrainerData = z.object({ + body: zSchemaFlux2Klein4bBaseTrainerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2Klein4bBaseTrainerResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2Klein4bBaseTrainerRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2Klein4bBaseTrainerRequestsByRequestIdResponse = + zSchemaFlux2Klein4bBaseTrainerOutput + +export const zGetFalAiFlux2Klein4bBaseTrainerEditRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux2Klein4bBaseTrainerEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2Klein4bBaseTrainerEditRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2Klein4bBaseTrainerEditRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2Klein4bBaseTrainerEditData = z.object({ + body: zSchemaFlux2Klein4bBaseTrainerEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2Klein4bBaseTrainerEditResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2Klein4bBaseTrainerEditRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2Klein4bBaseTrainerEditRequestsByRequestIdResponse = + zSchemaFlux2Klein4bBaseTrainerEditOutput + +export const zGetFalAiQwenImage2512TrainerV2RequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImage2512TrainerV2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImage2512TrainerV2RequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImage2512TrainerV2RequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImage2512TrainerV2Data = z.object({ + body: zSchemaQwenImage2512TrainerV2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImage2512TrainerV2Response = zSchemaQueueStatus + +export const zGetFalAiQwenImage2512TrainerV2RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImage2512TrainerV2RequestsByRequestIdResponse = + zSchemaQwenImage2512TrainerV2Output + +export const zGetFalAiFlux2TrainerV2EditRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlux2TrainerV2EditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2TrainerV2EditRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2TrainerV2EditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2TrainerV2EditData = z.object({ + body: zSchemaFlux2TrainerV2EditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2TrainerV2EditResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2TrainerV2EditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2TrainerV2EditRequestsByRequestIdResponse = + zSchemaFlux2TrainerV2EditOutput + +export const zGetFalAiFlux2TrainerV2RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux2TrainerV2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2TrainerV2RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2TrainerV2RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2TrainerV2Data = z.object({ + body: zSchemaFlux2TrainerV2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2TrainerV2Response = zSchemaQueueStatus + +export const zGetFalAiFlux2TrainerV2RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2TrainerV2RequestsByRequestIdResponse = + zSchemaFlux2TrainerV2Output + +export const zGetFalAiLtx2V2vTrainerRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLtx2V2vTrainerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx2V2vTrainerRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx2V2vTrainerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx2V2vTrainerData = z.object({ + body: zSchemaLtx2V2vTrainerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx2V2vTrainerResponse = zSchemaQueueStatus + +export const zGetFalAiLtx2V2vTrainerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLtx2V2vTrainerRequestsByRequestIdResponse = + zSchemaLtx2V2vTrainerOutput + +export const zGetFalAiLtx2VideoTrainerRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLtx2VideoTrainerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx2VideoTrainerRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx2VideoTrainerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx2VideoTrainerData = z.object({ + body: zSchemaLtx2VideoTrainerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx2VideoTrainerResponse = zSchemaQueueStatus + +export const zGetFalAiLtx2VideoTrainerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLtx2VideoTrainerRequestsByRequestIdResponse = + zSchemaLtx2VideoTrainerOutput + +export const zGetFalAiQwenImage2512TrainerRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImage2512TrainerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImage2512TrainerRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImage2512TrainerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImage2512TrainerData = z.object({ + body: zSchemaQwenImage2512TrainerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImage2512TrainerResponse = zSchemaQueueStatus + +export const zGetFalAiQwenImage2512TrainerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImage2512TrainerRequestsByRequestIdResponse = + zSchemaQwenImage2512TrainerOutput + +export const zGetFalAiQwenImageEdit2511TrainerRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEdit2511TrainerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEdit2511TrainerRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEdit2511TrainerRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEdit2511TrainerData = z.object({ + body: zSchemaQwenImageEdit2511TrainerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEdit2511TrainerResponse = zSchemaQueueStatus + +export const zGetFalAiQwenImageEdit2511TrainerRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEdit2511TrainerRequestsByRequestIdResponse = + zSchemaQwenImageEdit2511TrainerOutput + +export const zGetFalAiQwenImageLayeredTrainerRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageLayeredTrainerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageLayeredTrainerRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageLayeredTrainerRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageLayeredTrainerData = z.object({ + body: zSchemaQwenImageLayeredTrainerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageLayeredTrainerResponse = zSchemaQueueStatus + +export const zGetFalAiQwenImageLayeredTrainerRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageLayeredTrainerRequestsByRequestIdResponse = + zSchemaQwenImageLayeredTrainerOutput + +export const zGetFalAiQwenImageEdit2509TrainerRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEdit2509TrainerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEdit2509TrainerRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEdit2509TrainerRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEdit2509TrainerData = z.object({ + body: zSchemaQwenImageEdit2509TrainerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEdit2509TrainerResponse = zSchemaQueueStatus + +export const zGetFalAiQwenImageEdit2509TrainerRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEdit2509TrainerRequestsByRequestIdResponse = + zSchemaQwenImageEdit2509TrainerOutput + +export const zGetFalAiZImageTrainerRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiZImageTrainerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiZImageTrainerRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiZImageTrainerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiZImageTrainerData = z.object({ + body: zSchemaZImageTrainerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiZImageTrainerResponse = zSchemaQueueStatus + +export const zGetFalAiZImageTrainerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiZImageTrainerRequestsByRequestIdResponse = + zSchemaZImageTrainerOutput + +export const zGetFalAiFlux2TrainerEditRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux2TrainerEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2TrainerEditRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2TrainerEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2TrainerEditData = z.object({ + body: zSchemaFlux2TrainerEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2TrainerEditResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2TrainerEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2TrainerEditRequestsByRequestIdResponse = + zSchemaFlux2TrainerEditOutput + +export const zGetFalAiFlux2TrainerRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFlux2TrainerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlux2TrainerRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlux2TrainerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlux2TrainerData = z.object({ + body: zSchemaFlux2TrainerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlux2TrainerResponse = zSchemaQueueStatus + +export const zGetFalAiFlux2TrainerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlux2TrainerRequestsByRequestIdResponse = + zSchemaFlux2TrainerOutput + +export const zGetFalAiQwenImageEditPlusTrainerRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEditPlusTrainerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEditPlusTrainerRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEditPlusTrainerRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEditPlusTrainerData = z.object({ + body: zSchemaQwenImageEditPlusTrainerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEditPlusTrainerResponse = zSchemaQueueStatus + +export const zGetFalAiQwenImageEditPlusTrainerRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEditPlusTrainerRequestsByRequestIdResponse = + zSchemaQwenImageEditPlusTrainerOutput + +export const zGetFalAiQwenImageEditTrainerRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwenImageEditTrainerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageEditTrainerRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageEditTrainerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageEditTrainerData = z.object({ + body: zSchemaQwenImageEditTrainerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageEditTrainerResponse = zSchemaQueueStatus + +export const zGetFalAiQwenImageEditTrainerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageEditTrainerRequestsByRequestIdResponse = + zSchemaQwenImageEditTrainerOutput + +export const zGetFalAiQwenImageTrainerRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiQwenImageTrainerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwenImageTrainerRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwenImageTrainerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwenImageTrainerData = z.object({ + body: zSchemaQwenImageTrainerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwenImageTrainerResponse = zSchemaQueueStatus + +export const zGetFalAiQwenImageTrainerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiQwenImageTrainerRequestsByRequestIdResponse = + zSchemaQwenImageTrainerOutput + +export const zGetFalAiWan22ImageTrainerRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiWan22ImageTrainerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWan22ImageTrainerRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiWan22ImageTrainerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWan22ImageTrainerData = z.object({ + body: zSchemaWan22ImageTrainerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWan22ImageTrainerResponse = zSchemaQueueStatus + +export const zGetFalAiWan22ImageTrainerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWan22ImageTrainerRequestsByRequestIdResponse = + zSchemaWan22ImageTrainerOutput + +export const zGetFalAiWanTrainerT2vRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiWanTrainerT2vRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanTrainerT2vRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanTrainerT2vRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanTrainerT2vData = z.object({ + body: zSchemaWanTrainerT2vInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanTrainerT2vResponse = zSchemaQueueStatus + +export const zGetFalAiWanTrainerT2vRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanTrainerT2vRequestsByRequestIdResponse = + zSchemaWanTrainerT2vOutput + +export const zGetFalAiWanTrainerT2V14bRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiWanTrainerT2V14bRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanTrainerT2V14bRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanTrainerT2V14bRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanTrainerT2V14bData = z.object({ + body: zSchemaWanTrainerT2V14bInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanTrainerT2V14bResponse = zSchemaQueueStatus + +export const zGetFalAiWanTrainerT2V14bRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanTrainerT2V14bRequestsByRequestIdResponse = + zSchemaWanTrainerT2V14bOutput + +export const zGetFalAiWanTrainerI2V720pRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiWanTrainerI2V720pRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanTrainerI2V720pRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanTrainerI2V720pRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanTrainerI2V720pData = z.object({ + body: zSchemaWanTrainerI2V720pInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanTrainerI2V720pResponse = zSchemaQueueStatus + +export const zGetFalAiWanTrainerI2V720pRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanTrainerI2V720pRequestsByRequestIdResponse = + zSchemaWanTrainerI2V720pOutput + +export const zGetFalAiWanTrainerFlf2V720pRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanTrainerFlf2V720pRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanTrainerFlf2V720pRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanTrainerFlf2V720pRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanTrainerFlf2V720pData = z.object({ + body: zSchemaWanTrainerFlf2V720pInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanTrainerFlf2V720pResponse = zSchemaQueueStatus + +export const zGetFalAiWanTrainerFlf2V720pRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanTrainerFlf2V720pRequestsByRequestIdResponse = + zSchemaWanTrainerFlf2V720pOutput + +export const zGetFalAiLtxVideoTrainerRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLtxVideoTrainerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtxVideoTrainerRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtxVideoTrainerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtxVideoTrainerData = z.object({ + body: zSchemaLtxVideoTrainerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtxVideoTrainerResponse = zSchemaQueueStatus + +export const zGetFalAiLtxVideoTrainerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLtxVideoTrainerRequestsByRequestIdResponse = + zSchemaLtxVideoTrainerOutput + +export const zGetFalAiRecraftV3CreateStyleRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiRecraftV3CreateStyleRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiRecraftV3CreateStyleRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiRecraftV3CreateStyleRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiRecraftV3CreateStyleData = z.object({ + body: zSchemaRecraftV3CreateStyleInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiRecraftV3CreateStyleResponse = zSchemaQueueStatus + +export const zGetFalAiRecraftV3CreateStyleRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiRecraftV3CreateStyleRequestsByRequestIdResponse = + zSchemaRecraftV3CreateStyleOutput + +export const zGetFalAiTurboFluxTrainerRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiTurboFluxTrainerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiTurboFluxTrainerRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiTurboFluxTrainerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiTurboFluxTrainerData = z.object({ + body: zSchemaTurboFluxTrainerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiTurboFluxTrainerResponse = zSchemaQueueStatus + +export const zGetFalAiTurboFluxTrainerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiTurboFluxTrainerRequestsByRequestIdResponse = + zSchemaTurboFluxTrainerOutput + +export const zGetFalAiWanTrainerRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiWanTrainerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanTrainerRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanTrainerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanTrainerData = z.object({ + body: zSchemaWanTrainerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanTrainerResponse = zSchemaQueueStatus + +export const zGetFalAiWanTrainerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanTrainerRequestsByRequestIdResponse = + zSchemaWanTrainerOutput + +export const zGetFalAiHunyuanVideoLoraTrainingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiHunyuanVideoLoraTrainingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuanVideoLoraTrainingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuanVideoLoraTrainingRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuanVideoLoraTrainingData = z.object({ + body: zSchemaHunyuanVideoLoraTrainingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuanVideoLoraTrainingResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuanVideoLoraTrainingRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuanVideoLoraTrainingRequestsByRequestIdResponse = + zSchemaHunyuanVideoLoraTrainingOutput diff --git a/packages/typescript/ai-fal/src/generated/unknown/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/unknown/endpoint-map.ts new file mode 100644 index 00000000..0353a2fc --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/unknown/endpoint-map.ts @@ -0,0 +1,78 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zSchemaQwen3TtsCloneVoice06bInput, + zSchemaQwen3TtsCloneVoice06bOutput, + zSchemaQwen3TtsCloneVoice17bInput, + zSchemaQwen3TtsCloneVoice17bOutput, + zSchemaRouterAudioInput, + zSchemaRouterAudioOutput, + zSchemaWorkflowUtilitiesInterleaveVideoInput, + zSchemaWorkflowUtilitiesInterleaveVideoOutput, +} from './zod.gen' + +import type { + SchemaQwen3TtsCloneVoice06bInput, + SchemaQwen3TtsCloneVoice06bOutput, + SchemaQwen3TtsCloneVoice17bInput, + SchemaQwen3TtsCloneVoice17bOutput, + SchemaRouterAudioInput, + SchemaRouterAudioOutput, + SchemaWorkflowUtilitiesInterleaveVideoInput, + SchemaWorkflowUtilitiesInterleaveVideoOutput, +} from './types.gen' + +import type { z } from 'zod' + +export type UnknownEndpointMap = { + 'fal-ai/workflow-utilities/interleave-video': { + input: SchemaWorkflowUtilitiesInterleaveVideoInput + output: SchemaWorkflowUtilitiesInterleaveVideoOutput + } + 'fal-ai/qwen-3-tts/clone-voice/1.7b': { + input: SchemaQwen3TtsCloneVoice17bInput + output: SchemaQwen3TtsCloneVoice17bOutput + } + 'fal-ai/qwen-3-tts/clone-voice/0.6b': { + input: SchemaQwen3TtsCloneVoice06bInput + output: SchemaQwen3TtsCloneVoice06bOutput + } + 'openrouter/router/audio': { + input: SchemaRouterAudioInput + output: SchemaRouterAudioOutput + } +} + +/** Union type of all unknown model endpoint IDs */ +export type UnknownModel = keyof UnknownEndpointMap + +export const UnknownSchemaMap: Record< + UnknownModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['fal-ai/workflow-utilities/interleave-video']: { + input: zSchemaWorkflowUtilitiesInterleaveVideoInput, + output: zSchemaWorkflowUtilitiesInterleaveVideoOutput, + }, + ['fal-ai/qwen-3-tts/clone-voice/1.7b']: { + input: zSchemaQwen3TtsCloneVoice17bInput, + output: zSchemaQwen3TtsCloneVoice17bOutput, + }, + ['fal-ai/qwen-3-tts/clone-voice/0.6b']: { + input: zSchemaQwen3TtsCloneVoice06bInput, + output: zSchemaQwen3TtsCloneVoice06bOutput, + }, + ['openrouter/router/audio']: { + input: zSchemaRouterAudioInput, + output: zSchemaRouterAudioOutput, + }, +} as const + +/** Get the input type for a specific unknown model */ +export type UnknownModelInput = + UnknownEndpointMap[T]['input'] + +/** Get the output type for a specific unknown model */ +export type UnknownModelOutput = + UnknownEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/unknown/types.gen.ts b/packages/typescript/ai-fal/src/generated/unknown/types.gen.ts new file mode 100644 index 00000000..eb027751 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/unknown/types.gen.ts @@ -0,0 +1,630 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * UsageInfo + */ +export type SchemaUsageInfo = { + /** + * Prompt Tokens + */ + prompt_tokens?: number + /** + * Total Tokens + */ + total_tokens?: number + /** + * Completion Tokens + */ + completion_tokens?: number + /** + * Cost + */ + cost: number +} + +/** + * AudioOutput + */ +export type SchemaRouterAudioOutput = { + /** + * Usage + * + * Token usage information + */ + usage?: SchemaUsageInfo + /** + * Output + * + * Generated output from audio processing + */ + output: string +} + +/** + * AudioInput + */ +export type SchemaRouterAudioInput = { + /** + * Prompt + * + * Prompt to be used for the audio processing + */ + prompt: string + /** + * System Prompt + * + * System prompt to provide context or instructions to the model + */ + system_prompt?: string + /** + * Reasoning + * + * Should reasoning be the part of the final answer. + */ + reasoning?: boolean + /** + * Model + * + * Name of the model to use. Charged based on actual token usage. + */ + model: string + /** + * Max Tokens + * + * This sets the upper limit for the number of tokens the model can generate in response. It won't produce more than this limit. The maximum value is the context length minus the prompt length. + */ + max_tokens?: number + /** + * Temperature + * + * This setting influences the variety in the model's responses. Lower values lead to more predictable and typical responses, while higher values encourage more diverse and less common responses. At 0, the model always gives the same response for a given input. + */ + temperature?: number + /** + * Audio Url + * + * URL or data URI of the audio file to process. Supported formats: wav, mp3, aiff, aac, ogg, flac, m4a. + */ + audio_url: string +} + +/** + * Qwen3CloneVoiceOutput + */ +export type SchemaQwen3TtsCloneVoice06bOutput = { + /** + * Speaker Embedding + * + * The generated speaker embedding file in safetensors format. + */ + speaker_embedding: SchemaFile +} + +/** + * File + */ +export type SchemaFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * Qwen3CloneVoiceInput + */ +export type SchemaQwen3TtsCloneVoice06bInput = { + /** + * Audio Url + * + * URL to the reference audio file used for voice cloning. + */ + audio_url: string + /** + * Reference Text + * + * Optional reference text that was used when creating the speaker embedding. Providing this can improve synthesis quality when using a cloned voice. + */ + reference_text?: string +} + +/** + * Qwen3CloneVoiceOutput + */ +export type SchemaQwen3TtsCloneVoice17bOutput = { + /** + * Speaker Embedding + * + * The generated speaker embedding file in safetensors format. + */ + speaker_embedding: SchemaFile +} + +/** + * Qwen3CloneVoiceInput + */ +export type SchemaQwen3TtsCloneVoice17bInput = { + /** + * Audio Url + * + * URL to the reference audio file used for voice cloning. + */ + audio_url: string + /** + * Reference Text + * + * Optional reference text that was used when creating the speaker embedding. Providing this can improve synthesis quality when using a cloned voice. + */ + reference_text?: string +} + +/** + * InterleaveVideoOutput + * + * Output model for interleaved video + */ +export type SchemaWorkflowUtilitiesInterleaveVideoOutput = { + /** + * Video + * + * The interleaved video output + */ + video: SchemaFile +} + +/** + * InterleaveVideoInput + * + * Input model for interleaving multiple videos + */ +export type SchemaWorkflowUtilitiesInterleaveVideoInput = { + /** + * Video Urls + * + * List of video URLs to interleave in order + */ + video_urls: Array +} + +export type SchemaQueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetFalAiWorkflowUtilitiesInterleaveVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/workflow-utilities/interleave-video/requests/{request_id}/status' + } + +export type GetFalAiWorkflowUtilitiesInterleaveVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiWorkflowUtilitiesInterleaveVideoRequestsByRequestIdStatusResponse = + GetFalAiWorkflowUtilitiesInterleaveVideoRequestsByRequestIdStatusResponses[keyof GetFalAiWorkflowUtilitiesInterleaveVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiWorkflowUtilitiesInterleaveVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/workflow-utilities/interleave-video/requests/{request_id}/cancel' + } + +export type PutFalAiWorkflowUtilitiesInterleaveVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiWorkflowUtilitiesInterleaveVideoRequestsByRequestIdCancelResponse = + PutFalAiWorkflowUtilitiesInterleaveVideoRequestsByRequestIdCancelResponses[keyof PutFalAiWorkflowUtilitiesInterleaveVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiWorkflowUtilitiesInterleaveVideoData = { + body: SchemaWorkflowUtilitiesInterleaveVideoInput + path?: never + query?: never + url: '/fal-ai/workflow-utilities/interleave-video' +} + +export type PostFalAiWorkflowUtilitiesInterleaveVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWorkflowUtilitiesInterleaveVideoResponse = + PostFalAiWorkflowUtilitiesInterleaveVideoResponses[keyof PostFalAiWorkflowUtilitiesInterleaveVideoResponses] + +export type GetFalAiWorkflowUtilitiesInterleaveVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/workflow-utilities/interleave-video/requests/{request_id}' +} + +export type GetFalAiWorkflowUtilitiesInterleaveVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaWorkflowUtilitiesInterleaveVideoOutput + } + +export type GetFalAiWorkflowUtilitiesInterleaveVideoRequestsByRequestIdResponse = + GetFalAiWorkflowUtilitiesInterleaveVideoRequestsByRequestIdResponses[keyof GetFalAiWorkflowUtilitiesInterleaveVideoRequestsByRequestIdResponses] + +export type GetFalAiQwen3TtsCloneVoice17bRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-3-tts/clone-voice/1.7b/requests/{request_id}/status' +} + +export type GetFalAiQwen3TtsCloneVoice17bRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiQwen3TtsCloneVoice17bRequestsByRequestIdStatusResponse = + GetFalAiQwen3TtsCloneVoice17bRequestsByRequestIdStatusResponses[keyof GetFalAiQwen3TtsCloneVoice17bRequestsByRequestIdStatusResponses] + +export type PutFalAiQwen3TtsCloneVoice17bRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-3-tts/clone-voice/1.7b/requests/{request_id}/cancel' +} + +export type PutFalAiQwen3TtsCloneVoice17bRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiQwen3TtsCloneVoice17bRequestsByRequestIdCancelResponse = + PutFalAiQwen3TtsCloneVoice17bRequestsByRequestIdCancelResponses[keyof PutFalAiQwen3TtsCloneVoice17bRequestsByRequestIdCancelResponses] + +export type PostFalAiQwen3TtsCloneVoice17bData = { + body: SchemaQwen3TtsCloneVoice17bInput + path?: never + query?: never + url: '/fal-ai/qwen-3-tts/clone-voice/1.7b' +} + +export type PostFalAiQwen3TtsCloneVoice17bResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwen3TtsCloneVoice17bResponse = + PostFalAiQwen3TtsCloneVoice17bResponses[keyof PostFalAiQwen3TtsCloneVoice17bResponses] + +export type GetFalAiQwen3TtsCloneVoice17bRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-3-tts/clone-voice/1.7b/requests/{request_id}' +} + +export type GetFalAiQwen3TtsCloneVoice17bRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwen3TtsCloneVoice17bOutput +} + +export type GetFalAiQwen3TtsCloneVoice17bRequestsByRequestIdResponse = + GetFalAiQwen3TtsCloneVoice17bRequestsByRequestIdResponses[keyof GetFalAiQwen3TtsCloneVoice17bRequestsByRequestIdResponses] + +export type GetFalAiQwen3TtsCloneVoice06bRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/qwen-3-tts/clone-voice/0.6b/requests/{request_id}/status' +} + +export type GetFalAiQwen3TtsCloneVoice06bRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiQwen3TtsCloneVoice06bRequestsByRequestIdStatusResponse = + GetFalAiQwen3TtsCloneVoice06bRequestsByRequestIdStatusResponses[keyof GetFalAiQwen3TtsCloneVoice06bRequestsByRequestIdStatusResponses] + +export type PutFalAiQwen3TtsCloneVoice06bRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-3-tts/clone-voice/0.6b/requests/{request_id}/cancel' +} + +export type PutFalAiQwen3TtsCloneVoice06bRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiQwen3TtsCloneVoice06bRequestsByRequestIdCancelResponse = + PutFalAiQwen3TtsCloneVoice06bRequestsByRequestIdCancelResponses[keyof PutFalAiQwen3TtsCloneVoice06bRequestsByRequestIdCancelResponses] + +export type PostFalAiQwen3TtsCloneVoice06bData = { + body: SchemaQwen3TtsCloneVoice06bInput + path?: never + query?: never + url: '/fal-ai/qwen-3-tts/clone-voice/0.6b' +} + +export type PostFalAiQwen3TtsCloneVoice06bResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiQwen3TtsCloneVoice06bResponse = + PostFalAiQwen3TtsCloneVoice06bResponses[keyof PostFalAiQwen3TtsCloneVoice06bResponses] + +export type GetFalAiQwen3TtsCloneVoice06bRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/qwen-3-tts/clone-voice/0.6b/requests/{request_id}' +} + +export type GetFalAiQwen3TtsCloneVoice06bRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaQwen3TtsCloneVoice06bOutput +} + +export type GetFalAiQwen3TtsCloneVoice06bRequestsByRequestIdResponse = + GetFalAiQwen3TtsCloneVoice06bRequestsByRequestIdResponses[keyof GetFalAiQwen3TtsCloneVoice06bRequestsByRequestIdResponses] + +export type GetOpenrouterRouterAudioRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/openrouter/router/audio/requests/{request_id}/status' +} + +export type GetOpenrouterRouterAudioRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetOpenrouterRouterAudioRequestsByRequestIdStatusResponse = + GetOpenrouterRouterAudioRequestsByRequestIdStatusResponses[keyof GetOpenrouterRouterAudioRequestsByRequestIdStatusResponses] + +export type PutOpenrouterRouterAudioRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/openrouter/router/audio/requests/{request_id}/cancel' +} + +export type PutOpenrouterRouterAudioRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutOpenrouterRouterAudioRequestsByRequestIdCancelResponse = + PutOpenrouterRouterAudioRequestsByRequestIdCancelResponses[keyof PutOpenrouterRouterAudioRequestsByRequestIdCancelResponses] + +export type PostOpenrouterRouterAudioData = { + body: SchemaRouterAudioInput + path?: never + query?: never + url: '/openrouter/router/audio' +} + +export type PostOpenrouterRouterAudioResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostOpenrouterRouterAudioResponse = + PostOpenrouterRouterAudioResponses[keyof PostOpenrouterRouterAudioResponses] + +export type GetOpenrouterRouterAudioRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/openrouter/router/audio/requests/{request_id}' +} + +export type GetOpenrouterRouterAudioRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaRouterAudioOutput +} + +export type GetOpenrouterRouterAudioRequestsByRequestIdResponse = + GetOpenrouterRouterAudioRequestsByRequestIdResponses[keyof GetOpenrouterRouterAudioRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/unknown/zod.gen.ts b/packages/typescript/ai-fal/src/generated/unknown/zod.gen.ts new file mode 100644 index 00000000..ba4a0366 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/unknown/zod.gen.ts @@ -0,0 +1,523 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * UsageInfo + */ +export const zSchemaUsageInfo = z.object({ + prompt_tokens: z.optional(z.int()), + total_tokens: z.optional(z.int()).default(0), + completion_tokens: z.optional(z.int()), + cost: z.number(), +}) + +/** + * AudioOutput + */ +export const zSchemaRouterAudioOutput = z.object({ + usage: z.optional(zSchemaUsageInfo), + output: z.string().register(z.globalRegistry, { + description: 'Generated output from audio processing', + }), +}) + +/** + * AudioInput + */ +export const zSchemaRouterAudioInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Prompt to be used for the audio processing', + }), + system_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + 'System prompt to provide context or instructions to the model', + }), + ), + reasoning: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Should reasoning be the part of the final answer.', + }), + ) + .default(false), + model: z.string().register(z.globalRegistry, { + description: + 'Name of the model to use. Charged based on actual token usage.', + }), + max_tokens: z.optional( + z.int().gte(1).register(z.globalRegistry, { + description: + "This sets the upper limit for the number of tokens the model can generate in response. It won't produce more than this limit. The maximum value is the context length minus the prompt length.", + }), + ), + temperature: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: + "This setting influences the variety in the model's responses. Lower values lead to more predictable and typical responses, while higher values encourage more diverse and less common responses. At 0, the model always gives the same response for a given input.", + }), + ) + .default(1), + audio_url: z.string().register(z.globalRegistry, { + description: + 'URL or data URI of the audio file to process. Supported formats: wav, mp3, aiff, aac, ogg, flac, m4a.', + }), +}) + +/** + * File + */ +export const zSchemaFile = z.object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), +}) + +/** + * Qwen3CloneVoiceOutput + */ +export const zSchemaQwen3TtsCloneVoice06bOutput = z.object({ + speaker_embedding: zSchemaFile, +}) + +/** + * Qwen3CloneVoiceInput + */ +export const zSchemaQwen3TtsCloneVoice06bInput = z.object({ + audio_url: z.string().register(z.globalRegistry, { + description: 'URL to the reference audio file used for voice cloning.', + }), + reference_text: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Optional reference text that was used when creating the speaker embedding. Providing this can improve synthesis quality when using a cloned voice.', + }), + ), +}) + +/** + * Qwen3CloneVoiceOutput + */ +export const zSchemaQwen3TtsCloneVoice17bOutput = z.object({ + speaker_embedding: zSchemaFile, +}) + +/** + * Qwen3CloneVoiceInput + */ +export const zSchemaQwen3TtsCloneVoice17bInput = z.object({ + audio_url: z.string().register(z.globalRegistry, { + description: 'URL to the reference audio file used for voice cloning.', + }), + reference_text: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Optional reference text that was used when creating the speaker embedding. Providing this can improve synthesis quality when using a cloned voice.', + }), + ), +}) + +/** + * InterleaveVideoOutput + * + * Output model for interleaved video + */ +export const zSchemaWorkflowUtilitiesInterleaveVideoOutput = z + .object({ + video: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output model for interleaved video', + }) + +/** + * InterleaveVideoInput + * + * Input model for interleaving multiple videos + */ +export const zSchemaWorkflowUtilitiesInterleaveVideoInput = z + .object({ + video_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'List of video URLs to interleave in order', + }), + }) + .register(z.globalRegistry, { + description: 'Input model for interleaving multiple videos', + }) + +export const zSchemaQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetFalAiWorkflowUtilitiesInterleaveVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWorkflowUtilitiesInterleaveVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWorkflowUtilitiesInterleaveVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWorkflowUtilitiesInterleaveVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWorkflowUtilitiesInterleaveVideoData = z.object({ + body: zSchemaWorkflowUtilitiesInterleaveVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWorkflowUtilitiesInterleaveVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiWorkflowUtilitiesInterleaveVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiWorkflowUtilitiesInterleaveVideoRequestsByRequestIdResponse = + zSchemaWorkflowUtilitiesInterleaveVideoOutput + +export const zGetFalAiQwen3TtsCloneVoice17bRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwen3TtsCloneVoice17bRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwen3TtsCloneVoice17bRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwen3TtsCloneVoice17bRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwen3TtsCloneVoice17bData = z.object({ + body: zSchemaQwen3TtsCloneVoice17bInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwen3TtsCloneVoice17bResponse = zSchemaQueueStatus + +export const zGetFalAiQwen3TtsCloneVoice17bRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiQwen3TtsCloneVoice17bRequestsByRequestIdResponse = + zSchemaQwen3TtsCloneVoice17bOutput + +export const zGetFalAiQwen3TtsCloneVoice06bRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiQwen3TtsCloneVoice06bRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiQwen3TtsCloneVoice06bRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiQwen3TtsCloneVoice06bRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiQwen3TtsCloneVoice06bData = z.object({ + body: zSchemaQwen3TtsCloneVoice06bInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiQwen3TtsCloneVoice06bResponse = zSchemaQueueStatus + +export const zGetFalAiQwen3TtsCloneVoice06bRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiQwen3TtsCloneVoice06bRequestsByRequestIdResponse = + zSchemaQwen3TtsCloneVoice06bOutput + +export const zGetOpenrouterRouterAudioRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetOpenrouterRouterAudioRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutOpenrouterRouterAudioRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutOpenrouterRouterAudioRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostOpenrouterRouterAudioData = z.object({ + body: zSchemaRouterAudioInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostOpenrouterRouterAudioResponse = zSchemaQueueStatus + +export const zGetOpenrouterRouterAudioRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetOpenrouterRouterAudioRequestsByRequestIdResponse = + zSchemaRouterAudioOutput diff --git a/packages/typescript/ai-fal/src/generated/video-to-audio/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/video-to-audio/endpoint-map.ts new file mode 100644 index 00000000..822a6b0b --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/video-to-audio/endpoint-map.ts @@ -0,0 +1,78 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zSchemaKlingVideoVideoToAudioInput, + zSchemaKlingVideoVideoToAudioOutput, + zSchemaSamAudioVisualSeparateInput, + zSchemaSamAudioVisualSeparateOutput, + zSchemaSfxV15VideoToAudioInput, + zSchemaSfxV15VideoToAudioOutput, + zSchemaSfxV1VideoToAudioInput, + zSchemaSfxV1VideoToAudioOutput, +} from './zod.gen' + +import type { + SchemaKlingVideoVideoToAudioInput, + SchemaKlingVideoVideoToAudioOutput, + SchemaSamAudioVisualSeparateInput, + SchemaSamAudioVisualSeparateOutput, + SchemaSfxV15VideoToAudioInput, + SchemaSfxV15VideoToAudioOutput, + SchemaSfxV1VideoToAudioInput, + SchemaSfxV1VideoToAudioOutput, +} from './types.gen' + +import type { z } from 'zod' + +export type VideoToAudioEndpointMap = { + 'fal-ai/sam-audio/visual-separate': { + input: SchemaSamAudioVisualSeparateInput + output: SchemaSamAudioVisualSeparateOutput + } + 'mirelo-ai/sfx-v1.5/video-to-audio': { + input: SchemaSfxV15VideoToAudioInput + output: SchemaSfxV15VideoToAudioOutput + } + 'fal-ai/kling-video/video-to-audio': { + input: SchemaKlingVideoVideoToAudioInput + output: SchemaKlingVideoVideoToAudioOutput + } + 'mirelo-ai/sfx-v1/video-to-audio': { + input: SchemaSfxV1VideoToAudioInput + output: SchemaSfxV1VideoToAudioOutput + } +} + +/** Union type of all video-to-audio model endpoint IDs */ +export type VideoToAudioModel = keyof VideoToAudioEndpointMap + +export const VideoToAudioSchemaMap: Record< + VideoToAudioModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['fal-ai/sam-audio/visual-separate']: { + input: zSchemaSamAudioVisualSeparateInput, + output: zSchemaSamAudioVisualSeparateOutput, + }, + ['mirelo-ai/sfx-v1.5/video-to-audio']: { + input: zSchemaSfxV15VideoToAudioInput, + output: zSchemaSfxV15VideoToAudioOutput, + }, + ['fal-ai/kling-video/video-to-audio']: { + input: zSchemaKlingVideoVideoToAudioInput, + output: zSchemaKlingVideoVideoToAudioOutput, + }, + ['mirelo-ai/sfx-v1/video-to-audio']: { + input: zSchemaSfxV1VideoToAudioInput, + output: zSchemaSfxV1VideoToAudioOutput, + }, +} as const + +/** Get the input type for a specific video-to-audio model */ +export type VideoToAudioModelInput = + VideoToAudioEndpointMap[T]['input'] + +/** Get the output type for a specific video-to-audio model */ +export type VideoToAudioModelOutput = + VideoToAudioEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/video-to-audio/types.gen.ts b/packages/typescript/ai-fal/src/generated/video-to-audio/types.gen.ts new file mode 100644 index 00000000..decabf94 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/video-to-audio/types.gen.ts @@ -0,0 +1,735 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * Audio + */ +export type SchemaAudio = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number | unknown + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string | unknown + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string | unknown + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string +} + +/** + * AudioOutput + */ +export type SchemaSfxV1VideoToAudioOutput = { + /** + * Audio + * + * The generated sound effects audio + */ + audio: Array +} + +/** + * Input + */ +export type SchemaSfxV1VideoToAudioInput = { + /** + * Num Samples + * + * The number of samples to generate from the model + */ + num_samples?: number | unknown + /** + * Video Url + * + * A video url that can accessed from the API to process and add sound effects + */ + video_url: string + /** + * Duration + * + * The duration of the generated audio in seconds + */ + duration?: number | unknown + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used + */ + seed?: number | unknown + /** + * Text Prompt + * + * Additional description to guide the model + */ + text_prompt?: string | unknown +} + +/** + * VideoToAudioOutput + */ +export type SchemaKlingVideoVideoToAudioOutput = { + /** + * Audio + * + * The extracted/generated audio from the video in MP3 format + */ + audio: SchemaFile + /** + * Video + * + * The original video with dubbed audio applied + */ + video: SchemaFile +} + +/** + * File + */ +export type SchemaFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * VideoToAudioInput + */ +export type SchemaKlingVideoVideoToAudioInput = { + /** + * Video Url + * + * The video URL to extract audio from. Only .mp4/.mov formats are supported. File size does not exceed 100MB. Video duration between 3.0s and 20.0s. + */ + video_url: string + /** + * Asmr Mode + * + * Enable ASMR mode. This mode enhances detailed sound effects and is suitable for highly immersive content scenarios. + */ + asmr_mode?: boolean + /** + * Background Music Prompt + * + * Background music prompt. Cannot exceed 200 characters. + */ + background_music_prompt?: string + /** + * Sound Effect Prompt + * + * Sound effect prompt. Cannot exceed 200 characters. + */ + sound_effect_prompt?: string +} + +/** + * Audio + */ +export type SchemaAudioOutput = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number | unknown + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string | unknown + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string | unknown + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string +} + +/** + * AudioOutput + */ +export type SchemaSfxV15VideoToAudioOutput = { + /** + * Audio + * + * The generated sound effects audio + */ + audio: Array +} + +/** + * Input + */ +export type SchemaSfxV15VideoToAudioInput = { + /** + * Num Samples + * + * The number of samples to generate from the model + */ + num_samples?: number | unknown + /** + * Duration + * + * The duration of the generated audio in seconds + */ + duration?: number | unknown + /** + * Start Offset + * + * The start offset in seconds to start the audio generation from + */ + start_offset?: number | unknown + /** + * Video Url + * + * A video url that can accessed from the API to process and add sound effects + */ + video_url: string + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used + */ + seed?: number | unknown + /** + * Text Prompt + * + * Additional description to guide the model + */ + text_prompt?: string | unknown +} + +/** + * SAMAudioVisualSeparateOutput + * + * Output for visual-prompted audio separation. + */ +export type SchemaSamAudioVisualSeparateOutput = { + /** + * Target + * + * The isolated target sound. + */ + target: SchemaFile + /** + * Duration + * + * Duration of the output audio in seconds. + */ + duration: number + /** + * Sample Rate + * + * Sample rate of the output audio in Hz. + */ + sample_rate?: number + /** + * Residual + * + * Everything else in the audio. + */ + residual: SchemaFile +} + +/** + * SAMAudioVisualInput + * + * Input for visual-prompted audio separation. + */ +export type SchemaSamAudioVisualSeparateInput = { + /** + * Prompt + * + * Text prompt to assist with separation. Use natural language to describe the target sound. + */ + prompt?: string + /** + * Video Url + * + * URL of the video file to process (MP4, MOV, etc.) + */ + video_url: string + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'fast' | 'balanced' | 'quality' + /** + * Mask Video Url + * + * URL of the mask video (binary mask indicating target object). Black=target, White=background. + */ + mask_video_url?: string + /** + * Output Format + * + * Output audio format. + */ + output_format?: 'wav' | 'mp3' + /** + * Reranking Candidates + * + * Number of candidates to generate and rank. Higher improves quality but increases latency and cost. + */ + reranking_candidates?: number +} + +export type SchemaQueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetFalAiSamAudioVisualSeparateRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sam-audio/visual-separate/requests/{request_id}/status' +} + +export type GetFalAiSamAudioVisualSeparateRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSamAudioVisualSeparateRequestsByRequestIdStatusResponse = + GetFalAiSamAudioVisualSeparateRequestsByRequestIdStatusResponses[keyof GetFalAiSamAudioVisualSeparateRequestsByRequestIdStatusResponses] + +export type PutFalAiSamAudioVisualSeparateRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam-audio/visual-separate/requests/{request_id}/cancel' +} + +export type PutFalAiSamAudioVisualSeparateRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSamAudioVisualSeparateRequestsByRequestIdCancelResponse = + PutFalAiSamAudioVisualSeparateRequestsByRequestIdCancelResponses[keyof PutFalAiSamAudioVisualSeparateRequestsByRequestIdCancelResponses] + +export type PostFalAiSamAudioVisualSeparateData = { + body: SchemaSamAudioVisualSeparateInput + path?: never + query?: never + url: '/fal-ai/sam-audio/visual-separate' +} + +export type PostFalAiSamAudioVisualSeparateResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSamAudioVisualSeparateResponse = + PostFalAiSamAudioVisualSeparateResponses[keyof PostFalAiSamAudioVisualSeparateResponses] + +export type GetFalAiSamAudioVisualSeparateRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam-audio/visual-separate/requests/{request_id}' +} + +export type GetFalAiSamAudioVisualSeparateRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSamAudioVisualSeparateOutput +} + +export type GetFalAiSamAudioVisualSeparateRequestsByRequestIdResponse = + GetFalAiSamAudioVisualSeparateRequestsByRequestIdResponses[keyof GetFalAiSamAudioVisualSeparateRequestsByRequestIdResponses] + +export type GetMireloAiSfxV15VideoToAudioRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/mirelo-ai/sfx-v1.5/video-to-audio/requests/{request_id}/status' +} + +export type GetMireloAiSfxV15VideoToAudioRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetMireloAiSfxV15VideoToAudioRequestsByRequestIdStatusResponse = + GetMireloAiSfxV15VideoToAudioRequestsByRequestIdStatusResponses[keyof GetMireloAiSfxV15VideoToAudioRequestsByRequestIdStatusResponses] + +export type PutMireloAiSfxV15VideoToAudioRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/mirelo-ai/sfx-v1.5/video-to-audio/requests/{request_id}/cancel' +} + +export type PutMireloAiSfxV15VideoToAudioRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutMireloAiSfxV15VideoToAudioRequestsByRequestIdCancelResponse = + PutMireloAiSfxV15VideoToAudioRequestsByRequestIdCancelResponses[keyof PutMireloAiSfxV15VideoToAudioRequestsByRequestIdCancelResponses] + +export type PostMireloAiSfxV15VideoToAudioData = { + body: SchemaSfxV15VideoToAudioInput + path?: never + query?: never + url: '/mirelo-ai/sfx-v1.5/video-to-audio' +} + +export type PostMireloAiSfxV15VideoToAudioResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostMireloAiSfxV15VideoToAudioResponse = + PostMireloAiSfxV15VideoToAudioResponses[keyof PostMireloAiSfxV15VideoToAudioResponses] + +export type GetMireloAiSfxV15VideoToAudioRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/mirelo-ai/sfx-v1.5/video-to-audio/requests/{request_id}' +} + +export type GetMireloAiSfxV15VideoToAudioRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSfxV15VideoToAudioOutput +} + +export type GetMireloAiSfxV15VideoToAudioRequestsByRequestIdResponse = + GetMireloAiSfxV15VideoToAudioRequestsByRequestIdResponses[keyof GetMireloAiSfxV15VideoToAudioRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoVideoToAudioRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/video-to-audio/requests/{request_id}/status' +} + +export type GetFalAiKlingVideoVideoToAudioRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiKlingVideoVideoToAudioRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoVideoToAudioRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoVideoToAudioRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoVideoToAudioRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/video-to-audio/requests/{request_id}/cancel' +} + +export type PutFalAiKlingVideoVideoToAudioRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiKlingVideoVideoToAudioRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoVideoToAudioRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoVideoToAudioRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoVideoToAudioData = { + body: SchemaKlingVideoVideoToAudioInput + path?: never + query?: never + url: '/fal-ai/kling-video/video-to-audio' +} + +export type PostFalAiKlingVideoVideoToAudioResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoVideoToAudioResponse = + PostFalAiKlingVideoVideoToAudioResponses[keyof PostFalAiKlingVideoVideoToAudioResponses] + +export type GetFalAiKlingVideoVideoToAudioRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/video-to-audio/requests/{request_id}' +} + +export type GetFalAiKlingVideoVideoToAudioRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingVideoVideoToAudioOutput +} + +export type GetFalAiKlingVideoVideoToAudioRequestsByRequestIdResponse = + GetFalAiKlingVideoVideoToAudioRequestsByRequestIdResponses[keyof GetFalAiKlingVideoVideoToAudioRequestsByRequestIdResponses] + +export type GetMireloAiSfxV1VideoToAudioRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/mirelo-ai/sfx-v1/video-to-audio/requests/{request_id}/status' +} + +export type GetMireloAiSfxV1VideoToAudioRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetMireloAiSfxV1VideoToAudioRequestsByRequestIdStatusResponse = + GetMireloAiSfxV1VideoToAudioRequestsByRequestIdStatusResponses[keyof GetMireloAiSfxV1VideoToAudioRequestsByRequestIdStatusResponses] + +export type PutMireloAiSfxV1VideoToAudioRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/mirelo-ai/sfx-v1/video-to-audio/requests/{request_id}/cancel' +} + +export type PutMireloAiSfxV1VideoToAudioRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutMireloAiSfxV1VideoToAudioRequestsByRequestIdCancelResponse = + PutMireloAiSfxV1VideoToAudioRequestsByRequestIdCancelResponses[keyof PutMireloAiSfxV1VideoToAudioRequestsByRequestIdCancelResponses] + +export type PostMireloAiSfxV1VideoToAudioData = { + body: SchemaSfxV1VideoToAudioInput + path?: never + query?: never + url: '/mirelo-ai/sfx-v1/video-to-audio' +} + +export type PostMireloAiSfxV1VideoToAudioResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostMireloAiSfxV1VideoToAudioResponse = + PostMireloAiSfxV1VideoToAudioResponses[keyof PostMireloAiSfxV1VideoToAudioResponses] + +export type GetMireloAiSfxV1VideoToAudioRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/mirelo-ai/sfx-v1/video-to-audio/requests/{request_id}' +} + +export type GetMireloAiSfxV1VideoToAudioRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSfxV1VideoToAudioOutput +} + +export type GetMireloAiSfxV1VideoToAudioRequestsByRequestIdResponse = + GetMireloAiSfxV1VideoToAudioRequestsByRequestIdResponses[keyof GetMireloAiSfxV1VideoToAudioRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/video-to-audio/zod.gen.ts b/packages/typescript/ai-fal/src/generated/video-to-audio/zod.gen.ts new file mode 100644 index 00000000..b006787f --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/video-to-audio/zod.gen.ts @@ -0,0 +1,570 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * Audio + */ +export const zSchemaAudio = z.object({ + file_size: z.optional(z.union([z.int(), z.unknown()])), + file_name: z.optional(z.union([z.string(), z.unknown()])), + content_type: z.optional(z.union([z.string(), z.unknown()])), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), +}) + +/** + * AudioOutput + */ +export const zSchemaSfxV1VideoToAudioOutput = z.object({ + audio: z.array(zSchemaAudio).register(z.globalRegistry, { + description: 'The generated sound effects audio', + }), +}) + +/** + * Input + */ +export const zSchemaSfxV1VideoToAudioInput = z.object({ + num_samples: z.optional(z.union([z.int().gte(2).lte(8), z.unknown()])), + video_url: z.url().min(1).max(2083).register(z.globalRegistry, { + description: + 'A video url that can accessed from the API to process and add sound effects', + }), + duration: z.optional(z.union([z.number().gte(1).lte(10), z.unknown()])), + seed: z.optional(z.union([z.int().gte(1), z.unknown()])), + text_prompt: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * File + */ +export const zSchemaFile = z.object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), +}) + +/** + * VideoToAudioOutput + */ +export const zSchemaKlingVideoVideoToAudioOutput = z.object({ + audio: zSchemaFile, + video: zSchemaFile, +}) + +/** + * VideoToAudioInput + */ +export const zSchemaKlingVideoVideoToAudioInput = z.object({ + video_url: z.string().register(z.globalRegistry, { + description: + 'The video URL to extract audio from. Only .mp4/.mov formats are supported. File size does not exceed 100MB. Video duration between 3.0s and 20.0s.', + }), + asmr_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Enable ASMR mode. This mode enhances detailed sound effects and is suitable for highly immersive content scenarios.', + }), + ) + .default(false), + background_music_prompt: z + .optional( + z.string().max(200).register(z.globalRegistry, { + description: 'Background music prompt. Cannot exceed 200 characters.', + }), + ) + .default('intense car race'), + sound_effect_prompt: z + .optional( + z.string().max(200).register(z.globalRegistry, { + description: 'Sound effect prompt. Cannot exceed 200 characters.', + }), + ) + .default('Car tires screech as they accelerate in a drag race'), +}) + +/** + * Audio + */ +export const zSchemaAudioOutput = z.object({ + file_size: z.optional(z.union([z.int(), z.unknown()])), + file_name: z.optional(z.union([z.string(), z.unknown()])), + content_type: z.optional(z.union([z.string(), z.unknown()])), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), +}) + +/** + * AudioOutput + */ +export const zSchemaSfxV15VideoToAudioOutput = z.object({ + audio: z.array(zSchemaAudioOutput).register(z.globalRegistry, { + description: 'The generated sound effects audio', + }), +}) + +/** + * Input + */ +export const zSchemaSfxV15VideoToAudioInput = z.object({ + num_samples: z.optional(z.union([z.int().gte(2).lte(8), z.unknown()])), + duration: z.optional(z.union([z.number().gte(1).lte(10), z.unknown()])), + start_offset: z.optional(z.union([z.number().gte(0), z.unknown()])), + video_url: z.url().min(1).max(2083).register(z.globalRegistry, { + description: + 'A video url that can accessed from the API to process and add sound effects', + }), + seed: z.optional(z.union([z.int().gte(1), z.unknown()])), + text_prompt: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * SAMAudioVisualSeparateOutput + * + * Output for visual-prompted audio separation. + */ +export const zSchemaSamAudioVisualSeparateOutput = z + .object({ + target: zSchemaFile, + duration: z.number().register(z.globalRegistry, { + description: 'Duration of the output audio in seconds.', + }), + sample_rate: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Sample rate of the output audio in Hz.', + }), + ) + .default(48000), + residual: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output for visual-prompted audio separation.', + }) + +/** + * SAMAudioVisualInput + * + * Input for visual-prompted audio separation. + */ +export const zSchemaSamAudioVisualSeparateInput = z + .object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Text prompt to assist with separation. Use natural language to describe the target sound.', + }), + ) + .default(''), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the video file to process (MP4, MOV, etc.)', + }), + acceleration: z.optional( + z.enum(['fast', 'balanced', 'quality']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + mask_video_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'URL of the mask video (binary mask indicating target object). Black=target, White=background.', + }), + ), + output_format: z.optional( + z.enum(['wav', 'mp3']).register(z.globalRegistry, { + description: 'Output audio format.', + }), + ), + reranking_candidates: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + 'Number of candidates to generate and rank. Higher improves quality but increases latency and cost.', + }), + ) + .default(1), + }) + .register(z.globalRegistry, { + description: 'Input for visual-prompted audio separation.', + }) + +export const zSchemaQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetFalAiSamAudioVisualSeparateRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiSamAudioVisualSeparateRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSamAudioVisualSeparateRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiSamAudioVisualSeparateRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSamAudioVisualSeparateData = z.object({ + body: zSchemaSamAudioVisualSeparateInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSamAudioVisualSeparateResponse = zSchemaQueueStatus + +export const zGetFalAiSamAudioVisualSeparateRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSamAudioVisualSeparateRequestsByRequestIdResponse = + zSchemaSamAudioVisualSeparateOutput + +export const zGetMireloAiSfxV15VideoToAudioRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetMireloAiSfxV15VideoToAudioRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutMireloAiSfxV15VideoToAudioRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutMireloAiSfxV15VideoToAudioRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostMireloAiSfxV15VideoToAudioData = z.object({ + body: zSchemaSfxV15VideoToAudioInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostMireloAiSfxV15VideoToAudioResponse = zSchemaQueueStatus + +export const zGetMireloAiSfxV15VideoToAudioRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetMireloAiSfxV15VideoToAudioRequestsByRequestIdResponse = + zSchemaSfxV15VideoToAudioOutput + +export const zGetFalAiKlingVideoVideoToAudioRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoVideoToAudioRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoVideoToAudioRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoVideoToAudioRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoVideoToAudioData = z.object({ + body: zSchemaKlingVideoVideoToAudioInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoVideoToAudioResponse = zSchemaQueueStatus + +export const zGetFalAiKlingVideoVideoToAudioRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoVideoToAudioRequestsByRequestIdResponse = + zSchemaKlingVideoVideoToAudioOutput + +export const zGetMireloAiSfxV1VideoToAudioRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetMireloAiSfxV1VideoToAudioRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutMireloAiSfxV1VideoToAudioRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutMireloAiSfxV1VideoToAudioRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostMireloAiSfxV1VideoToAudioData = z.object({ + body: zSchemaSfxV1VideoToAudioInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostMireloAiSfxV1VideoToAudioResponse = zSchemaQueueStatus + +export const zGetMireloAiSfxV1VideoToAudioRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetMireloAiSfxV1VideoToAudioRequestsByRequestIdResponse = + zSchemaSfxV1VideoToAudioOutput diff --git a/packages/typescript/ai-fal/src/generated/video-to-text/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/video-to-text/endpoint-map.ts new file mode 100644 index 00000000..ab1efdfd --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/video-to-text/endpoint-map.ts @@ -0,0 +1,54 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zSchemaRouterVideoEnterpriseInput, + zSchemaRouterVideoEnterpriseOutput, + zSchemaRouterVideoInput, + zSchemaRouterVideoOutput, +} from './zod.gen' + +import type { + SchemaRouterVideoEnterpriseInput, + SchemaRouterVideoEnterpriseOutput, + SchemaRouterVideoInput, + SchemaRouterVideoOutput, +} from './types.gen' + +import type { z } from 'zod' + +export type VideoToTextEndpointMap = { + 'openrouter/router/video/enterprise': { + input: SchemaRouterVideoEnterpriseInput + output: SchemaRouterVideoEnterpriseOutput + } + 'openrouter/router/video': { + input: SchemaRouterVideoInput + output: SchemaRouterVideoOutput + } +} + +/** Union type of all video-to-text model endpoint IDs */ +export type VideoToTextModel = keyof VideoToTextEndpointMap + +export const VideoToTextSchemaMap: Record< + VideoToTextModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['openrouter/router/video/enterprise']: { + input: zSchemaRouterVideoEnterpriseInput, + output: zSchemaRouterVideoEnterpriseOutput, + }, + ['openrouter/router/video']: { + input: zSchemaRouterVideoInput, + output: zSchemaRouterVideoOutput, + }, +} as const + +/** Get the input type for a specific video-to-text model */ +export type VideoToTextModelInput = + VideoToTextEndpointMap[T]['input'] + +/** Get the output type for a specific video-to-text model */ +export type VideoToTextModelOutput = + VideoToTextEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/video-to-text/types.gen.ts b/packages/typescript/ai-fal/src/generated/video-to-text/types.gen.ts new file mode 100644 index 00000000..5a44b731 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/video-to-text/types.gen.ts @@ -0,0 +1,383 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * VideoOutput + */ +export type SchemaRouterVideoOutput = { + /** + * Usage + * + * Token usage information + */ + usage?: SchemaUsageInfo + /** + * Output + * + * Generated output from video processing + */ + output: string +} + +/** + * UsageInfo + */ +export type SchemaUsageInfo = { + /** + * Prompt Tokens + */ + prompt_tokens?: number + /** + * Total Tokens + */ + total_tokens?: number + /** + * Completion Tokens + */ + completion_tokens?: number + /** + * Cost + */ + cost: number +} + +/** + * VideoInput + */ +export type SchemaRouterVideoInput = { + /** + * Prompt + * + * Prompt to be used for the video processing + */ + prompt: string + /** + * Video Urls + * + * List of URLs or data URIs of video files to process. Supported formats: mp4, mpeg, mov, webm. For Google Gemini on AI Studio, YouTube links are also supported. Mutually exclusive with video_url. + */ + video_urls?: Array + /** + * System Prompt + * + * System prompt to provide context or instructions to the model + */ + system_prompt?: string + /** + * Reasoning + * + * Should reasoning be the part of the final answer. + */ + reasoning?: boolean + /** + * Model + * + * Name of the model to use. Charged based on actual token usage. + */ + model: string + /** + * Max Tokens + * + * This sets the upper limit for the number of tokens the model can generate in response. It won't produce more than this limit. The maximum value is the context length minus the prompt length. + */ + max_tokens?: number + /** + * Temperature + * + * This setting influences the variety in the model's responses. Lower values lead to more predictable and typical responses, while higher values encourage more diverse and less common responses. At 0, the model always gives the same response for a given input. + */ + temperature?: number +} + +/** + * VideoOutput + */ +export type SchemaRouterVideoEnterpriseOutput = { + /** + * Usage + * + * Token usage information + */ + usage?: SchemaUsageInfo + /** + * Output + * + * Generated output from video processing + */ + output: string +} + +/** + * VideoEnterpriseInput + */ +export type SchemaRouterVideoEnterpriseInput = { + /** + * Prompt + * + * Prompt to be used for the video processing + */ + prompt: string + /** + * Video Urls + * + * List of URLs or data URIs of video files to process. Supported formats: mp4, mpeg, mov, webm. For Google Gemini on AI Studio, YouTube links are also supported. Mutually exclusive with video_url. + */ + video_urls?: Array + /** + * System Prompt + * + * System prompt to provide context or instructions to the model + */ + system_prompt?: string + /** + * Reasoning + * + * Should reasoning be the part of the final answer. + */ + reasoning?: boolean + /** + * Model + * + * Name of the model to use. Charged based on actual token usage. + */ + model: string + /** + * Max Tokens + * + * This sets the upper limit for the number of tokens the model can generate in response. It won't produce more than this limit. The maximum value is the context length minus the prompt length. + */ + max_tokens?: number + /** + * Temperature + * + * This setting influences the variety in the model's responses. Lower values lead to more predictable and typical responses, while higher values encourage more diverse and less common responses. At 0, the model always gives the same response for a given input. + */ + temperature?: number +} + +export type SchemaQueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetOpenrouterRouterVideoEnterpriseRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/openrouter/router/video/enterprise/requests/{request_id}/status' +} + +export type GetOpenrouterRouterVideoEnterpriseRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetOpenrouterRouterVideoEnterpriseRequestsByRequestIdStatusResponse = + GetOpenrouterRouterVideoEnterpriseRequestsByRequestIdStatusResponses[keyof GetOpenrouterRouterVideoEnterpriseRequestsByRequestIdStatusResponses] + +export type PutOpenrouterRouterVideoEnterpriseRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/openrouter/router/video/enterprise/requests/{request_id}/cancel' +} + +export type PutOpenrouterRouterVideoEnterpriseRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutOpenrouterRouterVideoEnterpriseRequestsByRequestIdCancelResponse = + PutOpenrouterRouterVideoEnterpriseRequestsByRequestIdCancelResponses[keyof PutOpenrouterRouterVideoEnterpriseRequestsByRequestIdCancelResponses] + +export type PostOpenrouterRouterVideoEnterpriseData = { + body: SchemaRouterVideoEnterpriseInput + path?: never + query?: never + url: '/openrouter/router/video/enterprise' +} + +export type PostOpenrouterRouterVideoEnterpriseResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostOpenrouterRouterVideoEnterpriseResponse = + PostOpenrouterRouterVideoEnterpriseResponses[keyof PostOpenrouterRouterVideoEnterpriseResponses] + +export type GetOpenrouterRouterVideoEnterpriseRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/openrouter/router/video/enterprise/requests/{request_id}' +} + +export type GetOpenrouterRouterVideoEnterpriseRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaRouterVideoEnterpriseOutput +} + +export type GetOpenrouterRouterVideoEnterpriseRequestsByRequestIdResponse = + GetOpenrouterRouterVideoEnterpriseRequestsByRequestIdResponses[keyof GetOpenrouterRouterVideoEnterpriseRequestsByRequestIdResponses] + +export type GetOpenrouterRouterVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/openrouter/router/video/requests/{request_id}/status' +} + +export type GetOpenrouterRouterVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetOpenrouterRouterVideoRequestsByRequestIdStatusResponse = + GetOpenrouterRouterVideoRequestsByRequestIdStatusResponses[keyof GetOpenrouterRouterVideoRequestsByRequestIdStatusResponses] + +export type PutOpenrouterRouterVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/openrouter/router/video/requests/{request_id}/cancel' +} + +export type PutOpenrouterRouterVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutOpenrouterRouterVideoRequestsByRequestIdCancelResponse = + PutOpenrouterRouterVideoRequestsByRequestIdCancelResponses[keyof PutOpenrouterRouterVideoRequestsByRequestIdCancelResponses] + +export type PostOpenrouterRouterVideoData = { + body: SchemaRouterVideoInput + path?: never + query?: never + url: '/openrouter/router/video' +} + +export type PostOpenrouterRouterVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostOpenrouterRouterVideoResponse = + PostOpenrouterRouterVideoResponses[keyof PostOpenrouterRouterVideoResponses] + +export type GetOpenrouterRouterVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/openrouter/router/video/requests/{request_id}' +} + +export type GetOpenrouterRouterVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaRouterVideoOutput +} + +export type GetOpenrouterRouterVideoRequestsByRequestIdResponse = + GetOpenrouterRouterVideoRequestsByRequestIdResponses[keyof GetOpenrouterRouterVideoRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/video-to-text/zod.gen.ts b/packages/typescript/ai-fal/src/generated/video-to-text/zod.gen.ts new file mode 100644 index 00000000..3eb61807 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/video-to-text/zod.gen.ts @@ -0,0 +1,320 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * UsageInfo + */ +export const zSchemaUsageInfo = z.object({ + prompt_tokens: z.optional(z.int()), + total_tokens: z.optional(z.int()).default(0), + completion_tokens: z.optional(z.int()), + cost: z.number(), +}) + +/** + * VideoOutput + */ +export const zSchemaRouterVideoOutput = z.object({ + usage: z.optional(zSchemaUsageInfo), + output: z.string().register(z.globalRegistry, { + description: 'Generated output from video processing', + }), +}) + +/** + * VideoInput + */ +export const zSchemaRouterVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Prompt to be used for the video processing', + }), + video_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'List of URLs or data URIs of video files to process. Supported formats: mp4, mpeg, mov, webm. For Google Gemini on AI Studio, YouTube links are also supported. Mutually exclusive with video_url.', + }), + ), + system_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + 'System prompt to provide context or instructions to the model', + }), + ), + reasoning: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Should reasoning be the part of the final answer.', + }), + ) + .default(false), + model: z.string().register(z.globalRegistry, { + description: + 'Name of the model to use. Charged based on actual token usage.', + }), + max_tokens: z.optional( + z.int().gte(1).register(z.globalRegistry, { + description: + "This sets the upper limit for the number of tokens the model can generate in response. It won't produce more than this limit. The maximum value is the context length minus the prompt length.", + }), + ), + temperature: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: + "This setting influences the variety in the model's responses. Lower values lead to more predictable and typical responses, while higher values encourage more diverse and less common responses. At 0, the model always gives the same response for a given input.", + }), + ) + .default(1), +}) + +/** + * VideoOutput + */ +export const zSchemaRouterVideoEnterpriseOutput = z.object({ + usage: z.optional(zSchemaUsageInfo), + output: z.string().register(z.globalRegistry, { + description: 'Generated output from video processing', + }), +}) + +/** + * VideoEnterpriseInput + */ +export const zSchemaRouterVideoEnterpriseInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Prompt to be used for the video processing', + }), + video_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'List of URLs or data URIs of video files to process. Supported formats: mp4, mpeg, mov, webm. For Google Gemini on AI Studio, YouTube links are also supported. Mutually exclusive with video_url.', + }), + ), + system_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + 'System prompt to provide context or instructions to the model', + }), + ), + reasoning: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Should reasoning be the part of the final answer.', + }), + ) + .default(false), + model: z.string().register(z.globalRegistry, { + description: + 'Name of the model to use. Charged based on actual token usage.', + }), + max_tokens: z.optional( + z.int().gte(1).register(z.globalRegistry, { + description: + "This sets the upper limit for the number of tokens the model can generate in response. It won't produce more than this limit. The maximum value is the context length minus the prompt length.", + }), + ), + temperature: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: + "This setting influences the variety in the model's responses. Lower values lead to more predictable and typical responses, while higher values encourage more diverse and less common responses. At 0, the model always gives the same response for a given input.", + }), + ) + .default(1), +}) + +export const zSchemaQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetOpenrouterRouterVideoEnterpriseRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetOpenrouterRouterVideoEnterpriseRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutOpenrouterRouterVideoEnterpriseRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutOpenrouterRouterVideoEnterpriseRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostOpenrouterRouterVideoEnterpriseData = z.object({ + body: zSchemaRouterVideoEnterpriseInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostOpenrouterRouterVideoEnterpriseResponse = zSchemaQueueStatus + +export const zGetOpenrouterRouterVideoEnterpriseRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetOpenrouterRouterVideoEnterpriseRequestsByRequestIdResponse = + zSchemaRouterVideoEnterpriseOutput + +export const zGetOpenrouterRouterVideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetOpenrouterRouterVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutOpenrouterRouterVideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutOpenrouterRouterVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostOpenrouterRouterVideoData = z.object({ + body: zSchemaRouterVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostOpenrouterRouterVideoResponse = zSchemaQueueStatus + +export const zGetOpenrouterRouterVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetOpenrouterRouterVideoRequestsByRequestIdResponse = + zSchemaRouterVideoOutput diff --git a/packages/typescript/ai-fal/src/generated/video-to-video/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/video-to-video/endpoint-map.ts new file mode 100644 index 00000000..62990346 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/video-to-video/endpoint-map.ts @@ -0,0 +1,1574 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zSchemaAiFaceSwapFaceswapvideoInput, + zSchemaAiFaceSwapFaceswapvideoOutput, + zSchemaAmtInterpolationInput, + zSchemaAmtInterpolationOutput, + zSchemaAutoCaptionInput, + zSchemaAutoCaptionOutput, + zSchemaBenV2VideoInput, + zSchemaBenV2VideoOutput, + zSchemaBirefnetV2VideoInput, + zSchemaBirefnetV2VideoOutput, + zSchemaBriaVideoEraserEraseKeypointsInput, + zSchemaBriaVideoEraserEraseKeypointsOutput, + zSchemaBriaVideoEraserEraseMaskInput, + zSchemaBriaVideoEraserEraseMaskOutput, + zSchemaBriaVideoEraserErasePromptInput, + zSchemaBriaVideoEraserErasePromptOutput, + zSchemaBytedanceUpscalerUpscaleVideoInput, + zSchemaBytedanceUpscalerUpscaleVideoOutput, + zSchemaCogvideox5bVideoToVideoInput, + zSchemaCogvideox5bVideoToVideoOutput, + zSchemaControlnextInput, + zSchemaControlnextOutput, + zSchemaCrystalVideoUpscalerInput, + zSchemaCrystalVideoUpscalerOutput, + zSchemaDubbingInput, + zSchemaDubbingOutput, + zSchemaDwposeVideoInput, + zSchemaDwposeVideoOutput, + zSchemaEdittoInput, + zSchemaEdittoOutput, + zSchemaFastAnimatediffTurboVideoToVideoInput, + zSchemaFastAnimatediffTurboVideoToVideoOutput, + zSchemaFastAnimatediffVideoToVideoInput, + zSchemaFastAnimatediffVideoToVideoOutput, + zSchemaFfmpegApiComposeInput, + zSchemaFfmpegApiComposeOutput, + zSchemaFfmpegApiMergeAudioVideoInput, + zSchemaFfmpegApiMergeAudioVideoOutput, + zSchemaFfmpegApiMergeVideosInput, + zSchemaFfmpegApiMergeVideosOutput, + zSchemaFilmVideoInput, + zSchemaFilmVideoOutput, + zSchemaFlashvsrUpscaleVideoInput, + zSchemaFlashvsrUpscaleVideoOutput, + zSchemaHunyuanVideoFoleyInput, + zSchemaHunyuanVideoFoleyOutput, + zSchemaHunyuanVideoLoraVideoToVideoInput, + zSchemaHunyuanVideoLoraVideoToVideoOutput, + zSchemaHunyuanVideoVideoToVideoInput, + zSchemaHunyuanVideoVideoToVideoOutput, + zSchemaInfinitalkInput, + zSchemaInfinitalkOutput, + zSchemaInfinitalkVideoToVideoInput, + zSchemaInfinitalkVideoToVideoOutput, + zSchemaKlingVideoO1StandardVideoToVideoEditInput, + zSchemaKlingVideoO1StandardVideoToVideoEditOutput, + zSchemaKlingVideoO1StandardVideoToVideoReferenceInput, + zSchemaKlingVideoO1StandardVideoToVideoReferenceOutput, + zSchemaKlingVideoO1VideoToVideoEditInput, + zSchemaKlingVideoO1VideoToVideoEditOutput, + zSchemaKlingVideoO1VideoToVideoReferenceInput, + zSchemaKlingVideoO1VideoToVideoReferenceOutput, + zSchemaKlingVideoV26ProMotionControlInput, + zSchemaKlingVideoV26ProMotionControlOutput, + zSchemaKlingVideoV26StandardMotionControlInput, + zSchemaKlingVideoV26StandardMotionControlOutput, + zSchemaKreaWan14bVideoToVideoInput, + zSchemaKreaWan14bVideoToVideoOutput, + zSchemaLatentsyncInput, + zSchemaLatentsyncOutput, + zSchemaLightxRecameraInput, + zSchemaLightxRecameraOutput, + zSchemaLightxRelightInput, + zSchemaLightxRelightOutput, + zSchemaLipsyncInput, + zSchemaLipsyncOutput, + zSchemaLtx219bDistilledExtendVideoInput, + zSchemaLtx219bDistilledExtendVideoLoraInput, + zSchemaLtx219bDistilledExtendVideoLoraOutput, + zSchemaLtx219bDistilledExtendVideoOutput, + zSchemaLtx219bDistilledVideoToVideoInput, + zSchemaLtx219bDistilledVideoToVideoLoraInput, + zSchemaLtx219bDistilledVideoToVideoLoraOutput, + zSchemaLtx219bDistilledVideoToVideoOutput, + zSchemaLtx219bExtendVideoInput, + zSchemaLtx219bExtendVideoLoraInput, + zSchemaLtx219bExtendVideoLoraOutput, + zSchemaLtx219bExtendVideoOutput, + zSchemaLtx219bVideoToVideoInput, + zSchemaLtx219bVideoToVideoLoraInput, + zSchemaLtx219bVideoToVideoLoraOutput, + zSchemaLtx219bVideoToVideoOutput, + zSchemaLtx2RetakeVideoInput, + zSchemaLtx2RetakeVideoOutput, + zSchemaLtxVideo13bDevExtendInput, + zSchemaLtxVideo13bDevExtendOutput, + zSchemaLtxVideo13bDevMulticonditioningInput, + zSchemaLtxVideo13bDevMulticonditioningOutput, + zSchemaLtxVideo13bDistilledExtendInput, + zSchemaLtxVideo13bDistilledExtendOutput, + zSchemaLtxVideo13bDistilledMulticonditioningInput, + zSchemaLtxVideo13bDistilledMulticonditioningOutput, + zSchemaLtxVideoLoraMulticonditioningInput, + zSchemaLtxVideoLoraMulticonditioningOutput, + zSchemaLtxVideoV095ExtendInput, + zSchemaLtxVideoV095ExtendOutput, + zSchemaLtxVideoV095MulticonditioningInput, + zSchemaLtxVideoV095MulticonditioningOutput, + zSchemaLtxv13B098DistilledExtendInput, + zSchemaLtxv13B098DistilledExtendOutput, + zSchemaLtxv13B098DistilledMulticonditioningInput, + zSchemaLtxv13B098DistilledMulticonditioningOutput, + zSchemaLucyEditDevInput, + zSchemaLucyEditDevOutput, + zSchemaLucyEditFastInput, + zSchemaLucyEditFastOutput, + zSchemaLucyEditProInput, + zSchemaLucyEditProOutput, + zSchemaLucyRestyleInput, + zSchemaLucyRestyleOutput, + zSchemaLumaDreamMachineRay2FlashModifyInput, + zSchemaLumaDreamMachineRay2FlashModifyOutput, + zSchemaLumaDreamMachineRay2FlashReframeInput, + zSchemaLumaDreamMachineRay2FlashReframeOutput, + zSchemaLumaDreamMachineRay2ModifyInput, + zSchemaLumaDreamMachineRay2ModifyOutput, + zSchemaLumaDreamMachineRay2ReframeInput, + zSchemaLumaDreamMachineRay2ReframeOutput, + zSchemaMagiDistilledExtendVideoInput, + zSchemaMagiDistilledExtendVideoOutput, + zSchemaMagiExtendVideoInput, + zSchemaMagiExtendVideoOutput, + zSchemaMareyMotionTransferInput, + zSchemaMareyMotionTransferOutput, + zSchemaMareyPoseTransferInput, + zSchemaMareyPoseTransferOutput, + zSchemaMmaudioV2Input, + zSchemaMmaudioV2Output, + zSchemaOneToAllAnimation13bInput, + zSchemaOneToAllAnimation13bOutput, + zSchemaOneToAllAnimation14bInput, + zSchemaOneToAllAnimation14bOutput, + zSchemaPikaV2PikadditionsInput, + zSchemaPikaV2PikadditionsOutput, + zSchemaPixverseExtendFastInput, + zSchemaPixverseExtendFastOutput, + zSchemaPixverseExtendInput, + zSchemaPixverseExtendOutput, + zSchemaPixverseLipsyncInput, + zSchemaPixverseLipsyncOutput, + zSchemaPixverseSoundEffectsInput, + zSchemaPixverseSoundEffectsOutput, + zSchemaRifeVideoInput, + zSchemaRifeVideoOutput, + zSchemaSam2VideoInput, + zSchemaSam2VideoOutput, + zSchemaSam3VideoInput, + zSchemaSam3VideoOutput, + zSchemaSam3VideoRleInput, + zSchemaSam3VideoRleOutput, + zSchemaScailInput, + zSchemaScailOutput, + zSchemaSeedvrUpscaleVideoInput, + zSchemaSeedvrUpscaleVideoOutput, + zSchemaSfxV15VideoToVideoInput, + zSchemaSfxV15VideoToVideoOutput, + zSchemaSfxV1VideoToVideoInput, + zSchemaSfxV1VideoToVideoOutput, + zSchemaSora2VideoToVideoRemixInput, + zSchemaSora2VideoToVideoRemixOutput, + zSchemaSteadyDancerInput, + zSchemaSteadyDancerOutput, + zSchemaSyncLipsyncInput, + zSchemaSyncLipsyncOutput, + zSchemaSyncLipsyncReact1Input, + zSchemaSyncLipsyncReact1Output, + zSchemaSyncLipsyncV2Input, + zSchemaSyncLipsyncV2Output, + zSchemaSyncLipsyncV2ProInput, + zSchemaSyncLipsyncV2ProOutput, + zSchemaThinksoundAudioInput, + zSchemaThinksoundAudioOutput, + zSchemaThinksoundInput, + zSchemaThinksoundOutput, + zSchemaTopazUpscaleVideoInput, + zSchemaTopazUpscaleVideoOutput, + zSchemaV26ReferenceToVideoInput, + zSchemaV26ReferenceToVideoOutput, + zSchemaVeo31ExtendVideoInput, + zSchemaVeo31ExtendVideoOutput, + zSchemaVeo31FastExtendVideoInput, + zSchemaVeo31FastExtendVideoOutput, + zSchemaVideoAsPromptInput, + zSchemaVideoAsPromptOutput, + zSchemaVideoBackgroundRemovalFastInput, + zSchemaVideoBackgroundRemovalFastOutput, + zSchemaVideoBackgroundRemovalGreenScreenInput, + zSchemaVideoBackgroundRemovalGreenScreenOutput, + zSchemaVideoBackgroundRemovalInput, + zSchemaVideoBackgroundRemovalOutput, + zSchemaVideoEraseKeypointsInput, + zSchemaVideoEraseKeypointsOutput, + zSchemaVideoEraseMaskInput, + zSchemaVideoEraseMaskOutput, + zSchemaVideoErasePromptInput, + zSchemaVideoErasePromptOutput, + zSchemaVideoIncreaseResolutionInput, + zSchemaVideoIncreaseResolutionOutput, + zSchemaVideoSoundEffectsGeneratorInput, + zSchemaVideoSoundEffectsGeneratorOutput, + zSchemaVideoUpscalerInput, + zSchemaVideoUpscalerOutput, + zSchemaViduQ2VideoExtensionProInput, + zSchemaViduQ2VideoExtensionProOutput, + zSchemaWan22VaceFunA14bDepthInput, + zSchemaWan22VaceFunA14bDepthOutput, + zSchemaWan22VaceFunA14bInpaintingInput, + zSchemaWan22VaceFunA14bInpaintingOutput, + zSchemaWan22VaceFunA14bOutpaintingInput, + zSchemaWan22VaceFunA14bOutpaintingOutput, + zSchemaWan22VaceFunA14bPoseInput, + zSchemaWan22VaceFunA14bPoseOutput, + zSchemaWan22VaceFunA14bReframeInput, + zSchemaWan22VaceFunA14bReframeOutput, + zSchemaWanFunControlInput, + zSchemaWanFunControlOutput, + zSchemaWanV2214bAnimateMoveInput, + zSchemaWanV2214bAnimateMoveOutput, + zSchemaWanV2214bAnimateReplaceInput, + zSchemaWanV2214bAnimateReplaceOutput, + zSchemaWanV22A14bVideoToVideoInput, + zSchemaWanV22A14bVideoToVideoOutput, + zSchemaWanVace13bInput, + zSchemaWanVace13bOutput, + zSchemaWanVace14bDepthInput, + zSchemaWanVace14bDepthOutput, + zSchemaWanVace14bInpaintingInput, + zSchemaWanVace14bInpaintingOutput, + zSchemaWanVace14bInput, + zSchemaWanVace14bOutpaintingInput, + zSchemaWanVace14bOutpaintingOutput, + zSchemaWanVace14bOutput, + zSchemaWanVace14bPoseInput, + zSchemaWanVace14bPoseOutput, + zSchemaWanVace14bReframeInput, + zSchemaWanVace14bReframeOutput, + zSchemaWanVaceAppsLongReframeInput, + zSchemaWanVaceAppsLongReframeOutput, + zSchemaWanVaceAppsVideoEditInput, + zSchemaWanVaceAppsVideoEditOutput, + zSchemaWanVaceInput, + zSchemaWanVaceOutput, + zSchemaWanVisionEnhancerInput, + zSchemaWanVisionEnhancerOutput, + zSchemaWorkflowUtilitiesAutoSubtitleInput, + zSchemaWorkflowUtilitiesAutoSubtitleOutput, +} from './zod.gen' + +import type { + SchemaAiFaceSwapFaceswapvideoInput, + SchemaAiFaceSwapFaceswapvideoOutput, + SchemaAmtInterpolationInput, + SchemaAmtInterpolationOutput, + SchemaAutoCaptionInput, + SchemaAutoCaptionOutput, + SchemaBenV2VideoInput, + SchemaBenV2VideoOutput, + SchemaBirefnetV2VideoInput, + SchemaBirefnetV2VideoOutput, + SchemaBriaVideoEraserEraseKeypointsInput, + SchemaBriaVideoEraserEraseKeypointsOutput, + SchemaBriaVideoEraserEraseMaskInput, + SchemaBriaVideoEraserEraseMaskOutput, + SchemaBriaVideoEraserErasePromptInput, + SchemaBriaVideoEraserErasePromptOutput, + SchemaBytedanceUpscalerUpscaleVideoInput, + SchemaBytedanceUpscalerUpscaleVideoOutput, + SchemaCogvideox5bVideoToVideoInput, + SchemaCogvideox5bVideoToVideoOutput, + SchemaControlnextInput, + SchemaControlnextOutput, + SchemaCrystalVideoUpscalerInput, + SchemaCrystalVideoUpscalerOutput, + SchemaDubbingInput, + SchemaDubbingOutput, + SchemaDwposeVideoInput, + SchemaDwposeVideoOutput, + SchemaEdittoInput, + SchemaEdittoOutput, + SchemaFastAnimatediffTurboVideoToVideoInput, + SchemaFastAnimatediffTurboVideoToVideoOutput, + SchemaFastAnimatediffVideoToVideoInput, + SchemaFastAnimatediffVideoToVideoOutput, + SchemaFfmpegApiComposeInput, + SchemaFfmpegApiComposeOutput, + SchemaFfmpegApiMergeAudioVideoInput, + SchemaFfmpegApiMergeAudioVideoOutput, + SchemaFfmpegApiMergeVideosInput, + SchemaFfmpegApiMergeVideosOutput, + SchemaFilmVideoInput, + SchemaFilmVideoOutput, + SchemaFlashvsrUpscaleVideoInput, + SchemaFlashvsrUpscaleVideoOutput, + SchemaHunyuanVideoFoleyInput, + SchemaHunyuanVideoFoleyOutput, + SchemaHunyuanVideoLoraVideoToVideoInput, + SchemaHunyuanVideoLoraVideoToVideoOutput, + SchemaHunyuanVideoVideoToVideoInput, + SchemaHunyuanVideoVideoToVideoOutput, + SchemaInfinitalkInput, + SchemaInfinitalkOutput, + SchemaInfinitalkVideoToVideoInput, + SchemaInfinitalkVideoToVideoOutput, + SchemaKlingVideoO1StandardVideoToVideoEditInput, + SchemaKlingVideoO1StandardVideoToVideoEditOutput, + SchemaKlingVideoO1StandardVideoToVideoReferenceInput, + SchemaKlingVideoO1StandardVideoToVideoReferenceOutput, + SchemaKlingVideoO1VideoToVideoEditInput, + SchemaKlingVideoO1VideoToVideoEditOutput, + SchemaKlingVideoO1VideoToVideoReferenceInput, + SchemaKlingVideoO1VideoToVideoReferenceOutput, + SchemaKlingVideoV26ProMotionControlInput, + SchemaKlingVideoV26ProMotionControlOutput, + SchemaKlingVideoV26StandardMotionControlInput, + SchemaKlingVideoV26StandardMotionControlOutput, + SchemaKreaWan14bVideoToVideoInput, + SchemaKreaWan14bVideoToVideoOutput, + SchemaLatentsyncInput, + SchemaLatentsyncOutput, + SchemaLightxRecameraInput, + SchemaLightxRecameraOutput, + SchemaLightxRelightInput, + SchemaLightxRelightOutput, + SchemaLipsyncInput, + SchemaLipsyncOutput, + SchemaLtx219bDistilledExtendVideoInput, + SchemaLtx219bDistilledExtendVideoLoraInput, + SchemaLtx219bDistilledExtendVideoLoraOutput, + SchemaLtx219bDistilledExtendVideoOutput, + SchemaLtx219bDistilledVideoToVideoInput, + SchemaLtx219bDistilledVideoToVideoLoraInput, + SchemaLtx219bDistilledVideoToVideoLoraOutput, + SchemaLtx219bDistilledVideoToVideoOutput, + SchemaLtx219bExtendVideoInput, + SchemaLtx219bExtendVideoLoraInput, + SchemaLtx219bExtendVideoLoraOutput, + SchemaLtx219bExtendVideoOutput, + SchemaLtx219bVideoToVideoInput, + SchemaLtx219bVideoToVideoLoraInput, + SchemaLtx219bVideoToVideoLoraOutput, + SchemaLtx219bVideoToVideoOutput, + SchemaLtx2RetakeVideoInput, + SchemaLtx2RetakeVideoOutput, + SchemaLtxVideo13bDevExtendInput, + SchemaLtxVideo13bDevExtendOutput, + SchemaLtxVideo13bDevMulticonditioningInput, + SchemaLtxVideo13bDevMulticonditioningOutput, + SchemaLtxVideo13bDistilledExtendInput, + SchemaLtxVideo13bDistilledExtendOutput, + SchemaLtxVideo13bDistilledMulticonditioningInput, + SchemaLtxVideo13bDistilledMulticonditioningOutput, + SchemaLtxVideoLoraMulticonditioningInput, + SchemaLtxVideoLoraMulticonditioningOutput, + SchemaLtxVideoV095ExtendInput, + SchemaLtxVideoV095ExtendOutput, + SchemaLtxVideoV095MulticonditioningInput, + SchemaLtxVideoV095MulticonditioningOutput, + SchemaLtxv13B098DistilledExtendInput, + SchemaLtxv13B098DistilledExtendOutput, + SchemaLtxv13B098DistilledMulticonditioningInput, + SchemaLtxv13B098DistilledMulticonditioningOutput, + SchemaLucyEditDevInput, + SchemaLucyEditDevOutput, + SchemaLucyEditFastInput, + SchemaLucyEditFastOutput, + SchemaLucyEditProInput, + SchemaLucyEditProOutput, + SchemaLucyRestyleInput, + SchemaLucyRestyleOutput, + SchemaLumaDreamMachineRay2FlashModifyInput, + SchemaLumaDreamMachineRay2FlashModifyOutput, + SchemaLumaDreamMachineRay2FlashReframeInput, + SchemaLumaDreamMachineRay2FlashReframeOutput, + SchemaLumaDreamMachineRay2ModifyInput, + SchemaLumaDreamMachineRay2ModifyOutput, + SchemaLumaDreamMachineRay2ReframeInput, + SchemaLumaDreamMachineRay2ReframeOutput, + SchemaMagiDistilledExtendVideoInput, + SchemaMagiDistilledExtendVideoOutput, + SchemaMagiExtendVideoInput, + SchemaMagiExtendVideoOutput, + SchemaMareyMotionTransferInput, + SchemaMareyMotionTransferOutput, + SchemaMareyPoseTransferInput, + SchemaMareyPoseTransferOutput, + SchemaMmaudioV2Input, + SchemaMmaudioV2Output, + SchemaOneToAllAnimation13bInput, + SchemaOneToAllAnimation13bOutput, + SchemaOneToAllAnimation14bInput, + SchemaOneToAllAnimation14bOutput, + SchemaPikaV2PikadditionsInput, + SchemaPikaV2PikadditionsOutput, + SchemaPixverseExtendFastInput, + SchemaPixverseExtendFastOutput, + SchemaPixverseExtendInput, + SchemaPixverseExtendOutput, + SchemaPixverseLipsyncInput, + SchemaPixverseLipsyncOutput, + SchemaPixverseSoundEffectsInput, + SchemaPixverseSoundEffectsOutput, + SchemaRifeVideoInput, + SchemaRifeVideoOutput, + SchemaSam2VideoInput, + SchemaSam2VideoOutput, + SchemaSam3VideoInput, + SchemaSam3VideoOutput, + SchemaSam3VideoRleInput, + SchemaSam3VideoRleOutput, + SchemaScailInput, + SchemaScailOutput, + SchemaSeedvrUpscaleVideoInput, + SchemaSeedvrUpscaleVideoOutput, + SchemaSfxV15VideoToVideoInput, + SchemaSfxV15VideoToVideoOutput, + SchemaSfxV1VideoToVideoInput, + SchemaSfxV1VideoToVideoOutput, + SchemaSora2VideoToVideoRemixInput, + SchemaSora2VideoToVideoRemixOutput, + SchemaSteadyDancerInput, + SchemaSteadyDancerOutput, + SchemaSyncLipsyncInput, + SchemaSyncLipsyncOutput, + SchemaSyncLipsyncReact1Input, + SchemaSyncLipsyncReact1Output, + SchemaSyncLipsyncV2Input, + SchemaSyncLipsyncV2Output, + SchemaSyncLipsyncV2ProInput, + SchemaSyncLipsyncV2ProOutput, + SchemaThinksoundAudioInput, + SchemaThinksoundAudioOutput, + SchemaThinksoundInput, + SchemaThinksoundOutput, + SchemaTopazUpscaleVideoInput, + SchemaTopazUpscaleVideoOutput, + SchemaV26ReferenceToVideoInput, + SchemaV26ReferenceToVideoOutput, + SchemaVeo31ExtendVideoInput, + SchemaVeo31ExtendVideoOutput, + SchemaVeo31FastExtendVideoInput, + SchemaVeo31FastExtendVideoOutput, + SchemaVideoAsPromptInput, + SchemaVideoAsPromptOutput, + SchemaVideoBackgroundRemovalFastInput, + SchemaVideoBackgroundRemovalFastOutput, + SchemaVideoBackgroundRemovalGreenScreenInput, + SchemaVideoBackgroundRemovalGreenScreenOutput, + SchemaVideoBackgroundRemovalInput, + SchemaVideoBackgroundRemovalOutput, + SchemaVideoEraseKeypointsInput, + SchemaVideoEraseKeypointsOutput, + SchemaVideoEraseMaskInput, + SchemaVideoEraseMaskOutput, + SchemaVideoErasePromptInput, + SchemaVideoErasePromptOutput, + SchemaVideoIncreaseResolutionInput, + SchemaVideoIncreaseResolutionOutput, + SchemaVideoSoundEffectsGeneratorInput, + SchemaVideoSoundEffectsGeneratorOutput, + SchemaVideoUpscalerInput, + SchemaVideoUpscalerOutput, + SchemaViduQ2VideoExtensionProInput, + SchemaViduQ2VideoExtensionProOutput, + SchemaWan22VaceFunA14bDepthInput, + SchemaWan22VaceFunA14bDepthOutput, + SchemaWan22VaceFunA14bInpaintingInput, + SchemaWan22VaceFunA14bInpaintingOutput, + SchemaWan22VaceFunA14bOutpaintingInput, + SchemaWan22VaceFunA14bOutpaintingOutput, + SchemaWan22VaceFunA14bPoseInput, + SchemaWan22VaceFunA14bPoseOutput, + SchemaWan22VaceFunA14bReframeInput, + SchemaWan22VaceFunA14bReframeOutput, + SchemaWanFunControlInput, + SchemaWanFunControlOutput, + SchemaWanV2214bAnimateMoveInput, + SchemaWanV2214bAnimateMoveOutput, + SchemaWanV2214bAnimateReplaceInput, + SchemaWanV2214bAnimateReplaceOutput, + SchemaWanV22A14bVideoToVideoInput, + SchemaWanV22A14bVideoToVideoOutput, + SchemaWanVace13bInput, + SchemaWanVace13bOutput, + SchemaWanVace14bDepthInput, + SchemaWanVace14bDepthOutput, + SchemaWanVace14bInpaintingInput, + SchemaWanVace14bInpaintingOutput, + SchemaWanVace14bInput, + SchemaWanVace14bOutpaintingInput, + SchemaWanVace14bOutpaintingOutput, + SchemaWanVace14bOutput, + SchemaWanVace14bPoseInput, + SchemaWanVace14bPoseOutput, + SchemaWanVace14bReframeInput, + SchemaWanVace14bReframeOutput, + SchemaWanVaceAppsLongReframeInput, + SchemaWanVaceAppsLongReframeOutput, + SchemaWanVaceAppsVideoEditInput, + SchemaWanVaceAppsVideoEditOutput, + SchemaWanVaceInput, + SchemaWanVaceOutput, + SchemaWanVisionEnhancerInput, + SchemaWanVisionEnhancerOutput, + SchemaWorkflowUtilitiesAutoSubtitleInput, + SchemaWorkflowUtilitiesAutoSubtitleOutput, +} from './types.gen' + +import type { z } from 'zod' + +export type VideoToVideoEndpointMap = { + 'bria/video/background-removal': { + input: SchemaVideoBackgroundRemovalInput + output: SchemaVideoBackgroundRemovalOutput + } + 'fal-ai/mmaudio-v2': { + input: SchemaMmaudioV2Input + output: SchemaMmaudioV2Output + } + 'half-moon-ai/ai-face-swap/faceswapvideo': { + input: SchemaAiFaceSwapFaceswapvideoInput + output: SchemaAiFaceSwapFaceswapvideoOutput + } + 'fal-ai/ltx-2-19b/distilled/video-to-video/lora': { + input: SchemaLtx219bDistilledVideoToVideoLoraInput + output: SchemaLtx219bDistilledVideoToVideoLoraOutput + } + 'fal-ai/ltx-2-19b/distilled/video-to-video': { + input: SchemaLtx219bDistilledVideoToVideoInput + output: SchemaLtx219bDistilledVideoToVideoOutput + } + 'fal-ai/ltx-2-19b/video-to-video/lora': { + input: SchemaLtx219bVideoToVideoLoraInput + output: SchemaLtx219bVideoToVideoLoraOutput + } + 'fal-ai/ltx-2-19b/video-to-video': { + input: SchemaLtx219bVideoToVideoInput + output: SchemaLtx219bVideoToVideoOutput + } + 'fal-ai/ltx-2-19b/distilled/extend-video/lora': { + input: SchemaLtx219bDistilledExtendVideoLoraInput + output: SchemaLtx219bDistilledExtendVideoLoraOutput + } + 'fal-ai/ltx-2-19b/distilled/extend-video': { + input: SchemaLtx219bDistilledExtendVideoInput + output: SchemaLtx219bDistilledExtendVideoOutput + } + 'fal-ai/ltx-2-19b/extend-video/lora': { + input: SchemaLtx219bExtendVideoLoraInput + output: SchemaLtx219bExtendVideoLoraOutput + } + 'fal-ai/ltx-2-19b/extend-video': { + input: SchemaLtx219bExtendVideoInput + output: SchemaLtx219bExtendVideoOutput + } + 'bria/video/erase/keypoints': { + input: SchemaVideoEraseKeypointsInput + output: SchemaVideoEraseKeypointsOutput + } + 'bria/video/erase/prompt': { + input: SchemaVideoErasePromptInput + output: SchemaVideoErasePromptOutput + } + 'bria/video/erase/mask': { + input: SchemaVideoEraseMaskInput + output: SchemaVideoEraseMaskOutput + } + 'fal-ai/lightx/relight': { + input: SchemaLightxRelightInput + output: SchemaLightxRelightOutput + } + 'fal-ai/lightx/recamera': { + input: SchemaLightxRecameraInput + output: SchemaLightxRecameraOutput + } + 'fal-ai/kling-video/v2.6/standard/motion-control': { + input: SchemaKlingVideoV26StandardMotionControlInput + output: SchemaKlingVideoV26StandardMotionControlOutput + } + 'fal-ai/kling-video/v2.6/pro/motion-control': { + input: SchemaKlingVideoV26ProMotionControlInput + output: SchemaKlingVideoV26ProMotionControlOutput + } + 'decart/lucy-restyle': { + input: SchemaLucyRestyleInput + output: SchemaLucyRestyleOutput + } + 'fal-ai/scail': { + input: SchemaScailInput + output: SchemaScailOutput + } + 'clarityai/crystal-video-upscaler': { + input: SchemaCrystalVideoUpscalerInput + output: SchemaCrystalVideoUpscalerOutput + } + 'bria/bria_video_eraser/erase/mask': { + input: SchemaBriaVideoEraserEraseMaskInput + output: SchemaBriaVideoEraserEraseMaskOutput + } + 'bria/bria_video_eraser/erase/keypoints': { + input: SchemaBriaVideoEraserEraseKeypointsInput + output: SchemaBriaVideoEraserEraseKeypointsOutput + } + 'bria/bria_video_eraser/erase/prompt': { + input: SchemaBriaVideoEraserErasePromptInput + output: SchemaBriaVideoEraserErasePromptOutput + } + 'wan/v2.6/reference-to-video': { + input: SchemaV26ReferenceToVideoInput + output: SchemaV26ReferenceToVideoOutput + } + 'fal-ai/veo3.1/fast/extend-video': { + input: SchemaVeo31FastExtendVideoInput + output: SchemaVeo31FastExtendVideoOutput + } + 'fal-ai/veo3.1/extend-video': { + input: SchemaVeo31ExtendVideoInput + output: SchemaVeo31ExtendVideoOutput + } + 'fal-ai/kling-video/o1/standard/video-to-video/reference': { + input: SchemaKlingVideoO1StandardVideoToVideoReferenceInput + output: SchemaKlingVideoO1StandardVideoToVideoReferenceOutput + } + 'fal-ai/kling-video/o1/standard/video-to-video/edit': { + input: SchemaKlingVideoO1StandardVideoToVideoEditInput + output: SchemaKlingVideoO1StandardVideoToVideoEditOutput + } + 'fal-ai/steady-dancer': { + input: SchemaSteadyDancerInput + output: SchemaSteadyDancerOutput + } + 'fal-ai/one-to-all-animation/1.3b': { + input: SchemaOneToAllAnimation13bInput + output: SchemaOneToAllAnimation13bOutput + } + 'fal-ai/one-to-all-animation/14b': { + input: SchemaOneToAllAnimation14bInput + output: SchemaOneToAllAnimation14bOutput + } + 'fal-ai/wan-vision-enhancer': { + input: SchemaWanVisionEnhancerInput + output: SchemaWanVisionEnhancerOutput + } + 'fal-ai/sync-lipsync/react-1': { + input: SchemaSyncLipsyncReact1Input + output: SchemaSyncLipsyncReact1Output + } + 'veed/video-background-removal/fast': { + input: SchemaVideoBackgroundRemovalFastInput + output: SchemaVideoBackgroundRemovalFastOutput + } + 'fal-ai/kling-video/o1/video-to-video/edit': { + input: SchemaKlingVideoO1VideoToVideoEditInput + output: SchemaKlingVideoO1VideoToVideoEditOutput + } + 'fal-ai/kling-video/o1/video-to-video/reference': { + input: SchemaKlingVideoO1VideoToVideoReferenceInput + output: SchemaKlingVideoO1VideoToVideoReferenceOutput + } + 'veed/video-background-removal': { + input: SchemaVideoBackgroundRemovalInput + output: SchemaVideoBackgroundRemovalOutput + } + 'veed/video-background-removal/green-screen': { + input: SchemaVideoBackgroundRemovalGreenScreenInput + output: SchemaVideoBackgroundRemovalGreenScreenOutput + } + 'fal-ai/ltx-2/retake-video': { + input: SchemaLtx2RetakeVideoInput + output: SchemaLtx2RetakeVideoOutput + } + 'decart/lucy-edit/fast': { + input: SchemaLucyEditFastInput + output: SchemaLucyEditFastOutput + } + 'fal-ai/sam-3/video-rle': { + input: SchemaSam3VideoRleInput + output: SchemaSam3VideoRleOutput + } + 'fal-ai/sam-3/video': { + input: SchemaSam3VideoInput + output: SchemaSam3VideoOutput + } + 'fal-ai/editto': { + input: SchemaEdittoInput + output: SchemaEdittoOutput + } + 'fal-ai/flashvsr/upscale/video': { + input: SchemaFlashvsrUpscaleVideoInput + output: SchemaFlashvsrUpscaleVideoOutput + } + 'fal-ai/workflow-utilities/auto-subtitle': { + input: SchemaWorkflowUtilitiesAutoSubtitleInput + output: SchemaWorkflowUtilitiesAutoSubtitleOutput + } + 'fal-ai/bytedance-upscaler/upscale/video': { + input: SchemaBytedanceUpscalerUpscaleVideoInput + output: SchemaBytedanceUpscalerUpscaleVideoOutput + } + 'fal-ai/video-as-prompt': { + input: SchemaVideoAsPromptInput + output: SchemaVideoAsPromptOutput + } + 'fal-ai/birefnet/v2/video': { + input: SchemaBirefnetV2VideoInput + output: SchemaBirefnetV2VideoOutput + } + 'fal-ai/vidu/q2/video-extension/pro': { + input: SchemaViduQ2VideoExtensionProInput + output: SchemaViduQ2VideoExtensionProOutput + } + 'mirelo-ai/sfx-v1.5/video-to-video': { + input: SchemaSfxV15VideoToVideoInput + output: SchemaSfxV15VideoToVideoOutput + } + 'fal-ai/krea-wan-14b/video-to-video': { + input: SchemaKreaWan14bVideoToVideoInput + output: SchemaKreaWan14bVideoToVideoOutput + } + 'fal-ai/sora-2/video-to-video/remix': { + input: SchemaSora2VideoToVideoRemixInput + output: SchemaSora2VideoToVideoRemixOutput + } + 'fal-ai/wan-vace-apps/long-reframe': { + input: SchemaWanVaceAppsLongReframeInput + output: SchemaWanVaceAppsLongReframeOutput + } + 'fal-ai/infinitalk/video-to-video': { + input: SchemaInfinitalkVideoToVideoInput + output: SchemaInfinitalkVideoToVideoOutput + } + 'fal-ai/seedvr/upscale/video': { + input: SchemaSeedvrUpscaleVideoInput + output: SchemaSeedvrUpscaleVideoOutput + } + 'fal-ai/wan-vace-apps/video-edit': { + input: SchemaWanVaceAppsVideoEditInput + output: SchemaWanVaceAppsVideoEditOutput + } + 'fal-ai/wan/v2.2-14b/animate/replace': { + input: SchemaWanV2214bAnimateReplaceInput + output: SchemaWanV2214bAnimateReplaceOutput + } + 'fal-ai/wan/v2.2-14b/animate/move': { + input: SchemaWanV2214bAnimateMoveInput + output: SchemaWanV2214bAnimateMoveOutput + } + 'decart/lucy-edit/pro': { + input: SchemaLucyEditProInput + output: SchemaLucyEditProOutput + } + 'decart/lucy-edit/dev': { + input: SchemaLucyEditDevInput + output: SchemaLucyEditDevOutput + } + 'fal-ai/wan-22-vace-fun-a14b/reframe': { + input: SchemaWan22VaceFunA14bReframeInput + output: SchemaWan22VaceFunA14bReframeOutput + } + 'fal-ai/wan-22-vace-fun-a14b/outpainting': { + input: SchemaWan22VaceFunA14bOutpaintingInput + output: SchemaWan22VaceFunA14bOutpaintingOutput + } + 'fal-ai/wan-22-vace-fun-a14b/inpainting': { + input: SchemaWan22VaceFunA14bInpaintingInput + output: SchemaWan22VaceFunA14bInpaintingOutput + } + 'fal-ai/wan-22-vace-fun-a14b/depth': { + input: SchemaWan22VaceFunA14bDepthInput + output: SchemaWan22VaceFunA14bDepthOutput + } + 'fal-ai/wan-22-vace-fun-a14b/pose': { + input: SchemaWan22VaceFunA14bPoseInput + output: SchemaWan22VaceFunA14bPoseOutput + } + 'fal-ai/hunyuan-video-foley': { + input: SchemaHunyuanVideoFoleyInput + output: SchemaHunyuanVideoFoleyOutput + } + 'fal-ai/sync-lipsync/v2/pro': { + input: SchemaSyncLipsyncV2ProInput + output: SchemaSyncLipsyncV2ProOutput + } + 'fal-ai/wan-fun-control': { + input: SchemaWanFunControlInput + output: SchemaWanFunControlOutput + } + 'bria/video/increase-resolution': { + input: SchemaVideoIncreaseResolutionInput + output: SchemaVideoIncreaseResolutionOutput + } + 'fal-ai/infinitalk': { + input: SchemaInfinitalkInput + output: SchemaInfinitalkOutput + } + 'mirelo-ai/sfx-v1/video-to-video': { + input: SchemaSfxV1VideoToVideoInput + output: SchemaSfxV1VideoToVideoOutput + } + 'moonvalley/marey/pose-transfer': { + input: SchemaMareyPoseTransferInput + output: SchemaMareyPoseTransferOutput + } + 'moonvalley/marey/motion-transfer': { + input: SchemaMareyMotionTransferInput + output: SchemaMareyMotionTransferOutput + } + 'fal-ai/ffmpeg-api/merge-videos': { + input: SchemaFfmpegApiMergeVideosInput + output: SchemaFfmpegApiMergeVideosOutput + } + 'fal-ai/wan/v2.2-a14b/video-to-video': { + input: SchemaWanV22A14bVideoToVideoInput + output: SchemaWanV22A14bVideoToVideoOutput + } + 'fal-ai/ltxv-13b-098-distilled/extend': { + input: SchemaLtxv13B098DistilledExtendInput + output: SchemaLtxv13B098DistilledExtendOutput + } + 'fal-ai/rife/video': { + input: SchemaRifeVideoInput + output: SchemaRifeVideoOutput + } + 'fal-ai/film/video': { + input: SchemaFilmVideoInput + output: SchemaFilmVideoOutput + } + 'fal-ai/luma-dream-machine/ray-2-flash/modify': { + input: SchemaLumaDreamMachineRay2FlashModifyInput + output: SchemaLumaDreamMachineRay2FlashModifyOutput + } + 'fal-ai/ltxv-13b-098-distilled/multiconditioning': { + input: SchemaLtxv13B098DistilledMulticonditioningInput + output: SchemaLtxv13B098DistilledMulticonditioningOutput + } + 'fal-ai/pixverse/sound-effects': { + input: SchemaPixverseSoundEffectsInput + output: SchemaPixverseSoundEffectsOutput + } + 'fal-ai/thinksound/audio': { + input: SchemaThinksoundAudioInput + output: SchemaThinksoundAudioOutput + } + 'fal-ai/thinksound': { + input: SchemaThinksoundInput + output: SchemaThinksoundOutput + } + 'fal-ai/pixverse/extend/fast': { + input: SchemaPixverseExtendFastInput + output: SchemaPixverseExtendFastOutput + } + 'fal-ai/pixverse/extend': { + input: SchemaPixverseExtendInput + output: SchemaPixverseExtendOutput + } + 'fal-ai/pixverse/lipsync': { + input: SchemaPixverseLipsyncInput + output: SchemaPixverseLipsyncOutput + } + 'fal-ai/luma-dream-machine/ray-2/modify': { + input: SchemaLumaDreamMachineRay2ModifyInput + output: SchemaLumaDreamMachineRay2ModifyOutput + } + 'fal-ai/wan-vace-14b/reframe': { + input: SchemaWanVace14bReframeInput + output: SchemaWanVace14bReframeOutput + } + 'fal-ai/wan-vace-14b/outpainting': { + input: SchemaWanVace14bOutpaintingInput + output: SchemaWanVace14bOutpaintingOutput + } + 'fal-ai/wan-vace-14b/inpainting': { + input: SchemaWanVace14bInpaintingInput + output: SchemaWanVace14bInpaintingOutput + } + 'fal-ai/wan-vace-14b/pose': { + input: SchemaWanVace14bPoseInput + output: SchemaWanVace14bPoseOutput + } + 'fal-ai/wan-vace-14b/depth': { + input: SchemaWanVace14bDepthInput + output: SchemaWanVace14bDepthOutput + } + 'fal-ai/dwpose/video': { + input: SchemaDwposeVideoInput + output: SchemaDwposeVideoOutput + } + 'fal-ai/ffmpeg-api/merge-audio-video': { + input: SchemaFfmpegApiMergeAudioVideoInput + output: SchemaFfmpegApiMergeAudioVideoOutput + } + 'fal-ai/wan-vace-1-3b': { + input: SchemaWanVace13bInput + output: SchemaWanVace13bOutput + } + 'fal-ai/luma-dream-machine/ray-2-flash/reframe': { + input: SchemaLumaDreamMachineRay2FlashReframeInput + output: SchemaLumaDreamMachineRay2FlashReframeOutput + } + 'fal-ai/luma-dream-machine/ray-2/reframe': { + input: SchemaLumaDreamMachineRay2ReframeInput + output: SchemaLumaDreamMachineRay2ReframeOutput + } + 'veed/lipsync': { + input: SchemaLipsyncInput + output: SchemaLipsyncOutput + } + 'fal-ai/wan-vace-14b': { + input: SchemaWanVace14bInput + output: SchemaWanVace14bOutput + } + 'fal-ai/ltx-video-13b-distilled/extend': { + input: SchemaLtxVideo13bDistilledExtendInput + output: SchemaLtxVideo13bDistilledExtendOutput + } + 'fal-ai/ltx-video-13b-distilled/multiconditioning': { + input: SchemaLtxVideo13bDistilledMulticonditioningInput + output: SchemaLtxVideo13bDistilledMulticonditioningOutput + } + 'fal-ai/ltx-video-13b-dev/multiconditioning': { + input: SchemaLtxVideo13bDevMulticonditioningInput + output: SchemaLtxVideo13bDevMulticonditioningOutput + } + 'fal-ai/ltx-video-13b-dev/extend': { + input: SchemaLtxVideo13bDevExtendInput + output: SchemaLtxVideo13bDevExtendOutput + } + 'fal-ai/ltx-video-lora/multiconditioning': { + input: SchemaLtxVideoLoraMulticonditioningInput + output: SchemaLtxVideoLoraMulticonditioningOutput + } + 'fal-ai/magi/extend-video': { + input: SchemaMagiExtendVideoInput + output: SchemaMagiExtendVideoOutput + } + 'fal-ai/magi-distilled/extend-video': { + input: SchemaMagiDistilledExtendVideoInput + output: SchemaMagiDistilledExtendVideoOutput + } + 'fal-ai/wan-vace': { + input: SchemaWanVaceInput + output: SchemaWanVaceOutput + } + 'cassetteai/video-sound-effects-generator': { + input: SchemaVideoSoundEffectsGeneratorInput + output: SchemaVideoSoundEffectsGeneratorOutput + } + 'fal-ai/sync-lipsync/v2': { + input: SchemaSyncLipsyncV2Input + output: SchemaSyncLipsyncV2Output + } + 'fal-ai/latentsync': { + input: SchemaLatentsyncInput + output: SchemaLatentsyncOutput + } + 'fal-ai/pika/v2/pikadditions': { + input: SchemaPikaV2PikadditionsInput + output: SchemaPikaV2PikadditionsOutput + } + 'fal-ai/ltx-video-v095/multiconditioning': { + input: SchemaLtxVideoV095MulticonditioningInput + output: SchemaLtxVideoV095MulticonditioningOutput + } + 'fal-ai/ltx-video-v095/extend': { + input: SchemaLtxVideoV095ExtendInput + output: SchemaLtxVideoV095ExtendOutput + } + 'fal-ai/topaz/upscale/video': { + input: SchemaTopazUpscaleVideoInput + output: SchemaTopazUpscaleVideoOutput + } + 'fal-ai/ben/v2/video': { + input: SchemaBenV2VideoInput + output: SchemaBenV2VideoOutput + } + 'fal-ai/hunyuan-video/video-to-video': { + input: SchemaHunyuanVideoVideoToVideoInput + output: SchemaHunyuanVideoVideoToVideoOutput + } + 'fal-ai/hunyuan-video-lora/video-to-video': { + input: SchemaHunyuanVideoLoraVideoToVideoInput + output: SchemaHunyuanVideoLoraVideoToVideoOutput + } + 'fal-ai/ffmpeg-api/compose': { + input: SchemaFfmpegApiComposeInput + output: SchemaFfmpegApiComposeOutput + } + 'fal-ai/sync-lipsync': { + input: SchemaSyncLipsyncInput + output: SchemaSyncLipsyncOutput + } + 'fal-ai/auto-caption': { + input: SchemaAutoCaptionInput + output: SchemaAutoCaptionOutput + } + 'fal-ai/dubbing': { + input: SchemaDubbingInput + output: SchemaDubbingOutput + } + 'fal-ai/video-upscaler': { + input: SchemaVideoUpscalerInput + output: SchemaVideoUpscalerOutput + } + 'fal-ai/cogvideox-5b/video-to-video': { + input: SchemaCogvideox5bVideoToVideoInput + output: SchemaCogvideox5bVideoToVideoOutput + } + 'fal-ai/controlnext': { + input: SchemaControlnextInput + output: SchemaControlnextOutput + } + 'fal-ai/sam2/video': { + input: SchemaSam2VideoInput + output: SchemaSam2VideoOutput + } + 'fal-ai/amt-interpolation': { + input: SchemaAmtInterpolationInput + output: SchemaAmtInterpolationOutput + } + 'fal-ai/fast-animatediff/turbo/video-to-video': { + input: SchemaFastAnimatediffTurboVideoToVideoInput + output: SchemaFastAnimatediffTurboVideoToVideoOutput + } + 'fal-ai/fast-animatediff/video-to-video': { + input: SchemaFastAnimatediffVideoToVideoInput + output: SchemaFastAnimatediffVideoToVideoOutput + } +} + +/** Union type of all video-to-video model endpoint IDs */ +export type VideoToVideoModel = keyof VideoToVideoEndpointMap + +export const VideoToVideoSchemaMap: Record< + VideoToVideoModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['bria/video/background-removal']: { + input: zSchemaVideoBackgroundRemovalInput, + output: zSchemaVideoBackgroundRemovalOutput, + }, + ['fal-ai/mmaudio-v2']: { + input: zSchemaMmaudioV2Input, + output: zSchemaMmaudioV2Output, + }, + ['half-moon-ai/ai-face-swap/faceswapvideo']: { + input: zSchemaAiFaceSwapFaceswapvideoInput, + output: zSchemaAiFaceSwapFaceswapvideoOutput, + }, + ['fal-ai/ltx-2-19b/distilled/video-to-video/lora']: { + input: zSchemaLtx219bDistilledVideoToVideoLoraInput, + output: zSchemaLtx219bDistilledVideoToVideoLoraOutput, + }, + ['fal-ai/ltx-2-19b/distilled/video-to-video']: { + input: zSchemaLtx219bDistilledVideoToVideoInput, + output: zSchemaLtx219bDistilledVideoToVideoOutput, + }, + ['fal-ai/ltx-2-19b/video-to-video/lora']: { + input: zSchemaLtx219bVideoToVideoLoraInput, + output: zSchemaLtx219bVideoToVideoLoraOutput, + }, + ['fal-ai/ltx-2-19b/video-to-video']: { + input: zSchemaLtx219bVideoToVideoInput, + output: zSchemaLtx219bVideoToVideoOutput, + }, + ['fal-ai/ltx-2-19b/distilled/extend-video/lora']: { + input: zSchemaLtx219bDistilledExtendVideoLoraInput, + output: zSchemaLtx219bDistilledExtendVideoLoraOutput, + }, + ['fal-ai/ltx-2-19b/distilled/extend-video']: { + input: zSchemaLtx219bDistilledExtendVideoInput, + output: zSchemaLtx219bDistilledExtendVideoOutput, + }, + ['fal-ai/ltx-2-19b/extend-video/lora']: { + input: zSchemaLtx219bExtendVideoLoraInput, + output: zSchemaLtx219bExtendVideoLoraOutput, + }, + ['fal-ai/ltx-2-19b/extend-video']: { + input: zSchemaLtx219bExtendVideoInput, + output: zSchemaLtx219bExtendVideoOutput, + }, + ['bria/video/erase/keypoints']: { + input: zSchemaVideoEraseKeypointsInput, + output: zSchemaVideoEraseKeypointsOutput, + }, + ['bria/video/erase/prompt']: { + input: zSchemaVideoErasePromptInput, + output: zSchemaVideoErasePromptOutput, + }, + ['bria/video/erase/mask']: { + input: zSchemaVideoEraseMaskInput, + output: zSchemaVideoEraseMaskOutput, + }, + ['fal-ai/lightx/relight']: { + input: zSchemaLightxRelightInput, + output: zSchemaLightxRelightOutput, + }, + ['fal-ai/lightx/recamera']: { + input: zSchemaLightxRecameraInput, + output: zSchemaLightxRecameraOutput, + }, + ['fal-ai/kling-video/v2.6/standard/motion-control']: { + input: zSchemaKlingVideoV26StandardMotionControlInput, + output: zSchemaKlingVideoV26StandardMotionControlOutput, + }, + ['fal-ai/kling-video/v2.6/pro/motion-control']: { + input: zSchemaKlingVideoV26ProMotionControlInput, + output: zSchemaKlingVideoV26ProMotionControlOutput, + }, + ['decart/lucy-restyle']: { + input: zSchemaLucyRestyleInput, + output: zSchemaLucyRestyleOutput, + }, + ['fal-ai/scail']: { + input: zSchemaScailInput, + output: zSchemaScailOutput, + }, + ['clarityai/crystal-video-upscaler']: { + input: zSchemaCrystalVideoUpscalerInput, + output: zSchemaCrystalVideoUpscalerOutput, + }, + ['bria/bria_video_eraser/erase/mask']: { + input: zSchemaBriaVideoEraserEraseMaskInput, + output: zSchemaBriaVideoEraserEraseMaskOutput, + }, + ['bria/bria_video_eraser/erase/keypoints']: { + input: zSchemaBriaVideoEraserEraseKeypointsInput, + output: zSchemaBriaVideoEraserEraseKeypointsOutput, + }, + ['bria/bria_video_eraser/erase/prompt']: { + input: zSchemaBriaVideoEraserErasePromptInput, + output: zSchemaBriaVideoEraserErasePromptOutput, + }, + ['wan/v2.6/reference-to-video']: { + input: zSchemaV26ReferenceToVideoInput, + output: zSchemaV26ReferenceToVideoOutput, + }, + ['fal-ai/veo3.1/fast/extend-video']: { + input: zSchemaVeo31FastExtendVideoInput, + output: zSchemaVeo31FastExtendVideoOutput, + }, + ['fal-ai/veo3.1/extend-video']: { + input: zSchemaVeo31ExtendVideoInput, + output: zSchemaVeo31ExtendVideoOutput, + }, + ['fal-ai/kling-video/o1/standard/video-to-video/reference']: { + input: zSchemaKlingVideoO1StandardVideoToVideoReferenceInput, + output: zSchemaKlingVideoO1StandardVideoToVideoReferenceOutput, + }, + ['fal-ai/kling-video/o1/standard/video-to-video/edit']: { + input: zSchemaKlingVideoO1StandardVideoToVideoEditInput, + output: zSchemaKlingVideoO1StandardVideoToVideoEditOutput, + }, + ['fal-ai/steady-dancer']: { + input: zSchemaSteadyDancerInput, + output: zSchemaSteadyDancerOutput, + }, + ['fal-ai/one-to-all-animation/1.3b']: { + input: zSchemaOneToAllAnimation13bInput, + output: zSchemaOneToAllAnimation13bOutput, + }, + ['fal-ai/one-to-all-animation/14b']: { + input: zSchemaOneToAllAnimation14bInput, + output: zSchemaOneToAllAnimation14bOutput, + }, + ['fal-ai/wan-vision-enhancer']: { + input: zSchemaWanVisionEnhancerInput, + output: zSchemaWanVisionEnhancerOutput, + }, + ['fal-ai/sync-lipsync/react-1']: { + input: zSchemaSyncLipsyncReact1Input, + output: zSchemaSyncLipsyncReact1Output, + }, + ['veed/video-background-removal/fast']: { + input: zSchemaVideoBackgroundRemovalFastInput, + output: zSchemaVideoBackgroundRemovalFastOutput, + }, + ['fal-ai/kling-video/o1/video-to-video/edit']: { + input: zSchemaKlingVideoO1VideoToVideoEditInput, + output: zSchemaKlingVideoO1VideoToVideoEditOutput, + }, + ['fal-ai/kling-video/o1/video-to-video/reference']: { + input: zSchemaKlingVideoO1VideoToVideoReferenceInput, + output: zSchemaKlingVideoO1VideoToVideoReferenceOutput, + }, + ['veed/video-background-removal']: { + input: zSchemaVideoBackgroundRemovalInput, + output: zSchemaVideoBackgroundRemovalOutput, + }, + ['veed/video-background-removal/green-screen']: { + input: zSchemaVideoBackgroundRemovalGreenScreenInput, + output: zSchemaVideoBackgroundRemovalGreenScreenOutput, + }, + ['fal-ai/ltx-2/retake-video']: { + input: zSchemaLtx2RetakeVideoInput, + output: zSchemaLtx2RetakeVideoOutput, + }, + ['decart/lucy-edit/fast']: { + input: zSchemaLucyEditFastInput, + output: zSchemaLucyEditFastOutput, + }, + ['fal-ai/sam-3/video-rle']: { + input: zSchemaSam3VideoRleInput, + output: zSchemaSam3VideoRleOutput, + }, + ['fal-ai/sam-3/video']: { + input: zSchemaSam3VideoInput, + output: zSchemaSam3VideoOutput, + }, + ['fal-ai/editto']: { + input: zSchemaEdittoInput, + output: zSchemaEdittoOutput, + }, + ['fal-ai/flashvsr/upscale/video']: { + input: zSchemaFlashvsrUpscaleVideoInput, + output: zSchemaFlashvsrUpscaleVideoOutput, + }, + ['fal-ai/workflow-utilities/auto-subtitle']: { + input: zSchemaWorkflowUtilitiesAutoSubtitleInput, + output: zSchemaWorkflowUtilitiesAutoSubtitleOutput, + }, + ['fal-ai/bytedance-upscaler/upscale/video']: { + input: zSchemaBytedanceUpscalerUpscaleVideoInput, + output: zSchemaBytedanceUpscalerUpscaleVideoOutput, + }, + ['fal-ai/video-as-prompt']: { + input: zSchemaVideoAsPromptInput, + output: zSchemaVideoAsPromptOutput, + }, + ['fal-ai/birefnet/v2/video']: { + input: zSchemaBirefnetV2VideoInput, + output: zSchemaBirefnetV2VideoOutput, + }, + ['fal-ai/vidu/q2/video-extension/pro']: { + input: zSchemaViduQ2VideoExtensionProInput, + output: zSchemaViduQ2VideoExtensionProOutput, + }, + ['mirelo-ai/sfx-v1.5/video-to-video']: { + input: zSchemaSfxV15VideoToVideoInput, + output: zSchemaSfxV15VideoToVideoOutput, + }, + ['fal-ai/krea-wan-14b/video-to-video']: { + input: zSchemaKreaWan14bVideoToVideoInput, + output: zSchemaKreaWan14bVideoToVideoOutput, + }, + ['fal-ai/sora-2/video-to-video/remix']: { + input: zSchemaSora2VideoToVideoRemixInput, + output: zSchemaSora2VideoToVideoRemixOutput, + }, + ['fal-ai/wan-vace-apps/long-reframe']: { + input: zSchemaWanVaceAppsLongReframeInput, + output: zSchemaWanVaceAppsLongReframeOutput, + }, + ['fal-ai/infinitalk/video-to-video']: { + input: zSchemaInfinitalkVideoToVideoInput, + output: zSchemaInfinitalkVideoToVideoOutput, + }, + ['fal-ai/seedvr/upscale/video']: { + input: zSchemaSeedvrUpscaleVideoInput, + output: zSchemaSeedvrUpscaleVideoOutput, + }, + ['fal-ai/wan-vace-apps/video-edit']: { + input: zSchemaWanVaceAppsVideoEditInput, + output: zSchemaWanVaceAppsVideoEditOutput, + }, + ['fal-ai/wan/v2.2-14b/animate/replace']: { + input: zSchemaWanV2214bAnimateReplaceInput, + output: zSchemaWanV2214bAnimateReplaceOutput, + }, + ['fal-ai/wan/v2.2-14b/animate/move']: { + input: zSchemaWanV2214bAnimateMoveInput, + output: zSchemaWanV2214bAnimateMoveOutput, + }, + ['decart/lucy-edit/pro']: { + input: zSchemaLucyEditProInput, + output: zSchemaLucyEditProOutput, + }, + ['decart/lucy-edit/dev']: { + input: zSchemaLucyEditDevInput, + output: zSchemaLucyEditDevOutput, + }, + ['fal-ai/wan-22-vace-fun-a14b/reframe']: { + input: zSchemaWan22VaceFunA14bReframeInput, + output: zSchemaWan22VaceFunA14bReframeOutput, + }, + ['fal-ai/wan-22-vace-fun-a14b/outpainting']: { + input: zSchemaWan22VaceFunA14bOutpaintingInput, + output: zSchemaWan22VaceFunA14bOutpaintingOutput, + }, + ['fal-ai/wan-22-vace-fun-a14b/inpainting']: { + input: zSchemaWan22VaceFunA14bInpaintingInput, + output: zSchemaWan22VaceFunA14bInpaintingOutput, + }, + ['fal-ai/wan-22-vace-fun-a14b/depth']: { + input: zSchemaWan22VaceFunA14bDepthInput, + output: zSchemaWan22VaceFunA14bDepthOutput, + }, + ['fal-ai/wan-22-vace-fun-a14b/pose']: { + input: zSchemaWan22VaceFunA14bPoseInput, + output: zSchemaWan22VaceFunA14bPoseOutput, + }, + ['fal-ai/hunyuan-video-foley']: { + input: zSchemaHunyuanVideoFoleyInput, + output: zSchemaHunyuanVideoFoleyOutput, + }, + ['fal-ai/sync-lipsync/v2/pro']: { + input: zSchemaSyncLipsyncV2ProInput, + output: zSchemaSyncLipsyncV2ProOutput, + }, + ['fal-ai/wan-fun-control']: { + input: zSchemaWanFunControlInput, + output: zSchemaWanFunControlOutput, + }, + ['bria/video/increase-resolution']: { + input: zSchemaVideoIncreaseResolutionInput, + output: zSchemaVideoIncreaseResolutionOutput, + }, + ['fal-ai/infinitalk']: { + input: zSchemaInfinitalkInput, + output: zSchemaInfinitalkOutput, + }, + ['mirelo-ai/sfx-v1/video-to-video']: { + input: zSchemaSfxV1VideoToVideoInput, + output: zSchemaSfxV1VideoToVideoOutput, + }, + ['moonvalley/marey/pose-transfer']: { + input: zSchemaMareyPoseTransferInput, + output: zSchemaMareyPoseTransferOutput, + }, + ['moonvalley/marey/motion-transfer']: { + input: zSchemaMareyMotionTransferInput, + output: zSchemaMareyMotionTransferOutput, + }, + ['fal-ai/ffmpeg-api/merge-videos']: { + input: zSchemaFfmpegApiMergeVideosInput, + output: zSchemaFfmpegApiMergeVideosOutput, + }, + ['fal-ai/wan/v2.2-a14b/video-to-video']: { + input: zSchemaWanV22A14bVideoToVideoInput, + output: zSchemaWanV22A14bVideoToVideoOutput, + }, + ['fal-ai/ltxv-13b-098-distilled/extend']: { + input: zSchemaLtxv13B098DistilledExtendInput, + output: zSchemaLtxv13B098DistilledExtendOutput, + }, + ['fal-ai/rife/video']: { + input: zSchemaRifeVideoInput, + output: zSchemaRifeVideoOutput, + }, + ['fal-ai/film/video']: { + input: zSchemaFilmVideoInput, + output: zSchemaFilmVideoOutput, + }, + ['fal-ai/luma-dream-machine/ray-2-flash/modify']: { + input: zSchemaLumaDreamMachineRay2FlashModifyInput, + output: zSchemaLumaDreamMachineRay2FlashModifyOutput, + }, + ['fal-ai/ltxv-13b-098-distilled/multiconditioning']: { + input: zSchemaLtxv13B098DistilledMulticonditioningInput, + output: zSchemaLtxv13B098DistilledMulticonditioningOutput, + }, + ['fal-ai/pixverse/sound-effects']: { + input: zSchemaPixverseSoundEffectsInput, + output: zSchemaPixverseSoundEffectsOutput, + }, + ['fal-ai/thinksound/audio']: { + input: zSchemaThinksoundAudioInput, + output: zSchemaThinksoundAudioOutput, + }, + ['fal-ai/thinksound']: { + input: zSchemaThinksoundInput, + output: zSchemaThinksoundOutput, + }, + ['fal-ai/pixverse/extend/fast']: { + input: zSchemaPixverseExtendFastInput, + output: zSchemaPixverseExtendFastOutput, + }, + ['fal-ai/pixverse/extend']: { + input: zSchemaPixverseExtendInput, + output: zSchemaPixverseExtendOutput, + }, + ['fal-ai/pixverse/lipsync']: { + input: zSchemaPixverseLipsyncInput, + output: zSchemaPixverseLipsyncOutput, + }, + ['fal-ai/luma-dream-machine/ray-2/modify']: { + input: zSchemaLumaDreamMachineRay2ModifyInput, + output: zSchemaLumaDreamMachineRay2ModifyOutput, + }, + ['fal-ai/wan-vace-14b/reframe']: { + input: zSchemaWanVace14bReframeInput, + output: zSchemaWanVace14bReframeOutput, + }, + ['fal-ai/wan-vace-14b/outpainting']: { + input: zSchemaWanVace14bOutpaintingInput, + output: zSchemaWanVace14bOutpaintingOutput, + }, + ['fal-ai/wan-vace-14b/inpainting']: { + input: zSchemaWanVace14bInpaintingInput, + output: zSchemaWanVace14bInpaintingOutput, + }, + ['fal-ai/wan-vace-14b/pose']: { + input: zSchemaWanVace14bPoseInput, + output: zSchemaWanVace14bPoseOutput, + }, + ['fal-ai/wan-vace-14b/depth']: { + input: zSchemaWanVace14bDepthInput, + output: zSchemaWanVace14bDepthOutput, + }, + ['fal-ai/dwpose/video']: { + input: zSchemaDwposeVideoInput, + output: zSchemaDwposeVideoOutput, + }, + ['fal-ai/ffmpeg-api/merge-audio-video']: { + input: zSchemaFfmpegApiMergeAudioVideoInput, + output: zSchemaFfmpegApiMergeAudioVideoOutput, + }, + ['fal-ai/wan-vace-1-3b']: { + input: zSchemaWanVace13bInput, + output: zSchemaWanVace13bOutput, + }, + ['fal-ai/luma-dream-machine/ray-2-flash/reframe']: { + input: zSchemaLumaDreamMachineRay2FlashReframeInput, + output: zSchemaLumaDreamMachineRay2FlashReframeOutput, + }, + ['fal-ai/luma-dream-machine/ray-2/reframe']: { + input: zSchemaLumaDreamMachineRay2ReframeInput, + output: zSchemaLumaDreamMachineRay2ReframeOutput, + }, + ['veed/lipsync']: { + input: zSchemaLipsyncInput, + output: zSchemaLipsyncOutput, + }, + ['fal-ai/wan-vace-14b']: { + input: zSchemaWanVace14bInput, + output: zSchemaWanVace14bOutput, + }, + ['fal-ai/ltx-video-13b-distilled/extend']: { + input: zSchemaLtxVideo13bDistilledExtendInput, + output: zSchemaLtxVideo13bDistilledExtendOutput, + }, + ['fal-ai/ltx-video-13b-distilled/multiconditioning']: { + input: zSchemaLtxVideo13bDistilledMulticonditioningInput, + output: zSchemaLtxVideo13bDistilledMulticonditioningOutput, + }, + ['fal-ai/ltx-video-13b-dev/multiconditioning']: { + input: zSchemaLtxVideo13bDevMulticonditioningInput, + output: zSchemaLtxVideo13bDevMulticonditioningOutput, + }, + ['fal-ai/ltx-video-13b-dev/extend']: { + input: zSchemaLtxVideo13bDevExtendInput, + output: zSchemaLtxVideo13bDevExtendOutput, + }, + ['fal-ai/ltx-video-lora/multiconditioning']: { + input: zSchemaLtxVideoLoraMulticonditioningInput, + output: zSchemaLtxVideoLoraMulticonditioningOutput, + }, + ['fal-ai/magi/extend-video']: { + input: zSchemaMagiExtendVideoInput, + output: zSchemaMagiExtendVideoOutput, + }, + ['fal-ai/magi-distilled/extend-video']: { + input: zSchemaMagiDistilledExtendVideoInput, + output: zSchemaMagiDistilledExtendVideoOutput, + }, + ['fal-ai/wan-vace']: { + input: zSchemaWanVaceInput, + output: zSchemaWanVaceOutput, + }, + ['cassetteai/video-sound-effects-generator']: { + input: zSchemaVideoSoundEffectsGeneratorInput, + output: zSchemaVideoSoundEffectsGeneratorOutput, + }, + ['fal-ai/sync-lipsync/v2']: { + input: zSchemaSyncLipsyncV2Input, + output: zSchemaSyncLipsyncV2Output, + }, + ['fal-ai/latentsync']: { + input: zSchemaLatentsyncInput, + output: zSchemaLatentsyncOutput, + }, + ['fal-ai/pika/v2/pikadditions']: { + input: zSchemaPikaV2PikadditionsInput, + output: zSchemaPikaV2PikadditionsOutput, + }, + ['fal-ai/ltx-video-v095/multiconditioning']: { + input: zSchemaLtxVideoV095MulticonditioningInput, + output: zSchemaLtxVideoV095MulticonditioningOutput, + }, + ['fal-ai/ltx-video-v095/extend']: { + input: zSchemaLtxVideoV095ExtendInput, + output: zSchemaLtxVideoV095ExtendOutput, + }, + ['fal-ai/topaz/upscale/video']: { + input: zSchemaTopazUpscaleVideoInput, + output: zSchemaTopazUpscaleVideoOutput, + }, + ['fal-ai/ben/v2/video']: { + input: zSchemaBenV2VideoInput, + output: zSchemaBenV2VideoOutput, + }, + ['fal-ai/hunyuan-video/video-to-video']: { + input: zSchemaHunyuanVideoVideoToVideoInput, + output: zSchemaHunyuanVideoVideoToVideoOutput, + }, + ['fal-ai/hunyuan-video-lora/video-to-video']: { + input: zSchemaHunyuanVideoLoraVideoToVideoInput, + output: zSchemaHunyuanVideoLoraVideoToVideoOutput, + }, + ['fal-ai/ffmpeg-api/compose']: { + input: zSchemaFfmpegApiComposeInput, + output: zSchemaFfmpegApiComposeOutput, + }, + ['fal-ai/sync-lipsync']: { + input: zSchemaSyncLipsyncInput, + output: zSchemaSyncLipsyncOutput, + }, + ['fal-ai/auto-caption']: { + input: zSchemaAutoCaptionInput, + output: zSchemaAutoCaptionOutput, + }, + ['fal-ai/dubbing']: { + input: zSchemaDubbingInput, + output: zSchemaDubbingOutput, + }, + ['fal-ai/video-upscaler']: { + input: zSchemaVideoUpscalerInput, + output: zSchemaVideoUpscalerOutput, + }, + ['fal-ai/cogvideox-5b/video-to-video']: { + input: zSchemaCogvideox5bVideoToVideoInput, + output: zSchemaCogvideox5bVideoToVideoOutput, + }, + ['fal-ai/controlnext']: { + input: zSchemaControlnextInput, + output: zSchemaControlnextOutput, + }, + ['fal-ai/sam2/video']: { + input: zSchemaSam2VideoInput, + output: zSchemaSam2VideoOutput, + }, + ['fal-ai/amt-interpolation']: { + input: zSchemaAmtInterpolationInput, + output: zSchemaAmtInterpolationOutput, + }, + ['fal-ai/fast-animatediff/turbo/video-to-video']: { + input: zSchemaFastAnimatediffTurboVideoToVideoInput, + output: zSchemaFastAnimatediffTurboVideoToVideoOutput, + }, + ['fal-ai/fast-animatediff/video-to-video']: { + input: zSchemaFastAnimatediffVideoToVideoInput, + output: zSchemaFastAnimatediffVideoToVideoOutput, + }, +} as const + +/** Get the input type for a specific video-to-video model */ +export type VideoToVideoModelInput = + VideoToVideoEndpointMap[T]['input'] + +/** Get the output type for a specific video-to-video model */ +export type VideoToVideoModelOutput = + VideoToVideoEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/video-to-video/types.gen.ts b/packages/typescript/ai-fal/src/generated/video-to-video/types.gen.ts new file mode 100644 index 00000000..5afd75c8 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/video-to-video/types.gen.ts @@ -0,0 +1,24778 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * AnimateDiffV2VOutput + */ +export type SchemaFastAnimatediffVideoToVideoOutput = { + /** + * Seed + * + * Seed used for generating the video. + */ + seed: number + /** + * Video + * + * Generated video file. + */ + video: SchemaFile +} + +/** + * File + */ +export type SchemaFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string +} + +/** + * AnimateDiffV2VInput + */ +export type SchemaFastAnimatediffVideoToVideoInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Video Url + * + * URL of the video. + */ + video_url: string + /** + * First N Seconds + * + * The first N number of seconds of video to animate. + */ + first_n_seconds?: number + /** + * Fps + * + * Number of frames per second to extract from the video. + */ + fps?: number + /** + * Strength + * + * The strength of the input video in the final output. + */ + strength?: number + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related image to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Motions + * + * The motions to apply to the video. + */ + motions?: Array< + 'zoom-out' | 'zoom-in' | 'pan-left' | 'pan-right' | 'tilt-up' | 'tilt-down' + > +} + +/** + * AnimateDiffV2VOutput + */ +export type SchemaFastAnimatediffTurboVideoToVideoOutput = { + /** + * Seed + * + * Seed used for generating the video. + */ + seed: number + /** + * Video + * + * Generated video file. + */ + video: SchemaFile +} + +/** + * AnimateDiffV2VTurboInput + */ +export type SchemaFastAnimatediffTurboVideoToVideoInput = { + /** + * Prompt + * + * The prompt to use for generating the image. Be as descriptive as possible for best results. + */ + prompt: string + /** + * Video Url + * + * URL of the video. + */ + video_url: string + /** + * First N Seconds + * + * The first N number of seconds of video to animate. + */ + first_n_seconds?: number + /** + * Fps + * + * Number of frames per second to extract from the video. + */ + fps?: number + /** + * Strength + * + * The strength of the input video in the final output. + */ + strength?: number + /** + * Guidance Scale + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. 4-12 is recommended for turbo mode. + */ + num_inference_steps?: number + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of Stable Diffusion + * will output the same image every time. + * + */ + seed?: number + /** + * Negative Prompt + * + * + * The negative prompt to use. Use it to address details that you don't want + * in the image. This could be colors, objects, scenery and even the small details + * (e.g. moustache, blurry, low resolution). + * + */ + negative_prompt?: string + /** + * Motions + * + * The motions to apply to the video. + */ + motions?: Array< + 'zoom-out' | 'zoom-in' | 'pan-left' | 'pan-right' | 'tilt-up' | 'tilt-down' + > +} + +/** + * AMTInterpolationOutput + */ +export type SchemaAmtInterpolationOutput = { + /** + * Video + * + * Generated video + */ + video: SchemaFile +} + +/** + * AMTInterpolationInput + */ +export type SchemaAmtInterpolationInput = { + /** + * Video URL + * + * URL of the video to be processed + */ + video_url: string + /** + * Recursive Interpolation Passes + * + * Number of recursive interpolation passes + */ + recursive_interpolation_passes?: number + /** + * Output FPS + * + * Output frames per second + */ + output_fps?: number +} + +/** + * SAM2VideoOutput + */ +export type SchemaSam2VideoOutput = { + /** + * Boundingbox Frames Zip + * + * Zip file containing per-frame bounding box overlays. + */ + boundingbox_frames_zip?: SchemaFile + /** + * Video + * + * The segmented video. + */ + video: SchemaFile +} + +/** + * SAM2VideoRLEInput + */ +export type SchemaSam2VideoInput = { + /** + * Video Url + * + * The URL of the video to be segmented. + */ + video_url: string + /** + * Prompts + * + * List of prompts to segment the video + */ + prompts?: Array + /** + * Boundingbox Zip + * + * Return per-frame bounding box overlays as a zip archive. + */ + boundingbox_zip?: boolean + /** + * Mask Url + * + * The URL of the mask to be applied initially. + */ + mask_url?: string + /** + * Apply Mask + * + * Apply the mask on the video. + */ + apply_mask?: boolean + /** + * Box Prompts + * + * Coordinates for boxes + */ + box_prompts?: Array +} + +/** + * BoxPrompt + */ +export type SchemaBoxPrompt = { + /** + * Y Min + * + * Y Min Coordinate of the box + */ + y_min?: number + /** + * Frame Index + * + * The frame index to interact with. + */ + frame_index?: number + /** + * X Max + * + * X Max Coordinate of the prompt + */ + x_max?: number + /** + * X Min + * + * X Min Coordinate of the box + */ + x_min?: number + /** + * Y Max + * + * Y Max Coordinate of the prompt + */ + y_max?: number +} + +/** + * PointPrompt + */ +export type SchemaPointPrompt = { + /** + * Y + * + * Y Coordinate of the prompt + */ + y?: number + /** + * Label + * + * Label of the prompt. 1 for foreground, 0 for background + */ + label?: 0 | 1 + /** + * Frame Index + * + * The frame index to interact with. + */ + frame_index?: number + /** + * X + * + * X Coordinate of the prompt + */ + x?: number +} + +/** + * ControlNeXtOutput + */ +export type SchemaControlnextOutput = { + /** + * The generated video. + */ + video: SchemaFile +} + +/** + * ControlNeXtInput + */ +export type SchemaControlnextInput = { + /** + * Controlnext Cond Scale + * + * Condition scale for ControlNeXt. + */ + controlnext_cond_scale?: number + /** + * Video Url + * + * URL of the input video. + */ + video_url: string + /** + * Fps + * + * Frames per second for the output video. + */ + fps?: number + /** + * Max Frame Num + * + * Maximum number of frames to process. + */ + max_frame_num?: number + /** + * Width + * + * Width of the output video. + */ + width?: number + /** + * Overlap + * + * Number of overlapping frames between batches. + */ + overlap?: number + /** + * Guidance Scale + * + * Guidance scale for the diffusion process. + */ + guidance_scale?: number + /** + * Batch Frames + * + * Number of frames to process in each batch. + */ + batch_frames?: number + /** + * Height + * + * Height of the output video. + */ + height?: number + /** + * Sample Stride + * + * Stride for sampling frames from the input video. + */ + sample_stride?: number + /** + * Image Url + * + * URL of the reference image. + */ + image_url: string + /** + * Decode Chunk Size + * + * Chunk size for decoding frames. + */ + decode_chunk_size?: number + /** + * Motion Bucket Id + * + * Motion bucket ID for the pipeline. + */ + motion_bucket_id?: number + /** + * Num Inference Steps + * + * Number of inference steps. + */ + num_inference_steps?: number +} + +/** + * Output + */ +export type SchemaCogvideox5bVideoToVideoOutput = { + /** + * Prompt + * + * The prompt used for generating the video. + */ + prompt: string + /** + * Timings + */ + timings: { + [key: string]: number + } + /** + * Seed + * + * + * Seed of the generated video. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Video + * + * The URL to the generated video + */ + video: SchemaFile +} + +/** + * VideoToVideoInput + */ +export type SchemaCogvideox5bVideoToVideoInput = { + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Input Video Url + * + * The video to generate the video from. + */ + video_url: string + /** + * Use Rife + * + * Use RIFE for video interpolation + */ + use_rife?: boolean + /** + * Loras + * + * + * The LoRAs to use for the image generation. We currently support one lora. + * + */ + loras?: Array + /** + * Video Size + * + * The size of the generated video. + */ + video_size?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Strength + * + * The strength to use for Video to Video. 1.0 completely remakes the video while 0.0 preserves the original. + */ + strength?: number + /** + * Guidance scale (CFG) + * + * + * The CFG (Classifier Free Guidance) scale is a measure of how close you want + * the model to stick to your prompt when looking for a related video to show you. + * + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * The number of inference steps to perform. + */ + num_inference_steps?: number + /** + * Export Fps + * + * The target FPS of the video + */ + export_fps?: number + /** + * Negative Prompt + * + * The negative prompt to generate video from + */ + negative_prompt?: string + /** + * Seed + * + * + * The same seed and the same prompt given to the same version of the model + * will output the same video every time. + * + */ + seed?: number +} + +/** + * ImageSize + */ +export type SchemaImageSize = { + /** + * Height + * + * The height of the generated image. + */ + height?: number + /** + * Width + * + * The width of the generated image. + */ + width?: number +} + +/** + * LoraWeight + */ +export type SchemaLoraWeight = { + /** + * Path + * + * URL or the path to the LoRA weights. + */ + path: string + /** + * Scale + * + * + * The scale of the LoRA weight. This is used to scale the LoRA weight + * before merging it with the base model. + * + */ + scale?: number +} + +/** + * Output + */ +export type SchemaVideoUpscalerOutput = { + /** + * Video + * + * The stitched video + */ + video: SchemaFile +} + +/** + * Input + */ +export type SchemaVideoUpscalerInput = { + /** + * Video Url + * + * The URL of the video to upscale + */ + video_url: string + /** + * Scale + * + * The scale factor + */ + scale?: number +} + +/** + * OutputModel + */ +export type SchemaDubbingOutput = { + /** + * Video + * + * The generated video with the lip sync. + */ + video: SchemaFile +} + +/** + * InputModel + */ +export type SchemaDubbingInput = { + /** + * Do Lipsync + * + * Whether to lip sync the audio to the video + */ + do_lipsync?: boolean + /** + * Video Url + * + * Input video URL to be dubbed. + */ + video_url: string + /** + * Target Language + * + * Target language to dub the video to + */ + target_language?: 'hindi' | 'turkish' | 'english' +} + +/** + * Output + */ +export type SchemaAutoCaptionOutput = { + /** + * Video Url + * + * URL to the caption .mp4 video. + */ + video_url: string +} + +/** + * CaptionInput + */ +export type SchemaAutoCaptionInput = { + /** + * Txt Font + * + * Font for generated captions. Choose one in 'Arial','Standard','Garamond', 'Times New Roman','Georgia', or pass a url to a .ttf file + */ + txt_font?: string + /** + * Video Url + * + * URL to the .mp4 video with audio. Only videos of size <100MB are allowed. + */ + video_url: string + /** + * Top Align + * + * Top-to-bottom alignment of the text. Can be a string ('top', 'center', 'bottom') or a float (0.0-1.0) + */ + top_align?: string | number + /** + * Txt Color + * + * Colour of the text. Can be a RGB tuple, a color name, or an hexadecimal notation. + */ + txt_color?: string + /** + * Stroke Width + * + * Width of the text strokes in pixels + */ + stroke_width?: number + /** + * Refresh Interval + * + * Number of seconds the captions should stay on screen. A higher number will also result in more text being displayed at once. + */ + refresh_interval?: number + /** + * Font Size + * + * Size of text in generated captions. + */ + font_size?: number + /** + * Left Align + * + * Left-to-right alignment of the text. Can be a string ('left', 'center', 'right') or a float (0.0-1.0) + */ + left_align?: string | number +} + +/** + * LipSyncOutput + */ +export type SchemaSyncLipsyncOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * LipSyncInput + */ +export type SchemaSyncLipsyncInput = { + /** + * Model + * + * The model to use for lipsyncing + */ + model?: 'lipsync-1.8.0' | 'lipsync-1.7.1' | 'lipsync-1.9.0-beta' + /** + * Video Url + * + * URL of the input video + */ + video_url: string + /** + * Sync Mode + * + * Lipsync mode when audio and video durations are out of sync. + */ + sync_mode?: 'cut_off' | 'loop' | 'bounce' | 'silence' | 'remap' + /** + * Audio Url + * + * URL of the input audio + */ + audio_url: string +} + +/** + * Keyframe + */ +export type SchemaKeyframe = { + /** + * Duration + * + * The duration in milliseconds of this keyframe + */ + duration: number + /** + * Timestamp + * + * The timestamp in milliseconds where this keyframe starts + */ + timestamp: number + /** + * Url + * + * The URL where this keyframe's media file can be accessed + */ + url: string +} + +/** + * Track + */ +export type SchemaTrack = { + /** + * Type + * + * Type of track ('video' or 'audio') + */ + type: string + /** + * Id + * + * Unique identifier for the track + */ + id: string + /** + * Keyframes + * + * List of keyframes that make up this track + */ + keyframes: Array +} + +/** + * ComposeOutput + */ +export type SchemaFfmpegApiComposeOutput = { + /** + * Video Url + * + * URL of the processed video file + */ + video_url: string + /** + * Thumbnail Url + * + * URL of the video's thumbnail image + */ + thumbnail_url: string +} + +/** + * Input + */ +export type SchemaFfmpegApiComposeInput = { + /** + * Tracks + * + * List of tracks to be combined into the final media + */ + tracks: Array +} + +/** + * HunyuanV2VResponse + */ +export type SchemaHunyuanVideoLoraVideoToVideoOutput = { + /** + * Seed + * + * The seed used for generating the video. + */ + seed: number + /** + * Video + */ + video: SchemaFile +} + +/** + * HunyuanV2VRequest + */ +export type SchemaHunyuanVideoLoraVideoToVideoInput = { + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Aspect Ratio (W:H) + * + * The aspect ratio of the video to generate. + */ + aspect_ratio?: '16:9' | '9:16' + /** + * Resolution + * + * The resolution of the video to generate. + */ + resolution?: '480p' | '580p' | '720p' + /** + * Video Url + * + * URL of the video + */ + video_url: string + /** + * Loras + * + * + * The LoRAs to use for the image generation. You can use any number of LoRAs + * and they will be merged together to generate the final image. + * + */ + loras?: Array + /** + * Strength + * + * Strength of video-to-video + */ + strength?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Seed + * + * The seed to use for generating the video. + */ + seed?: number + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: '129' | '85' + /** + * Pro Mode + * + * By default, generations are done with 35 steps. Pro mode does 55 steps which results in higher quality videos but will take more time and cost 2x more billing units. + */ + pro_mode?: boolean +} + +/** + * HunyuanT2VResponse + */ +export type SchemaHunyuanVideoVideoToVideoOutput = { + /** + * Seed + * + * The seed used for generating the video. + */ + seed: number + /** + * Video + */ + video: SchemaFile +} + +/** + * HunyuanV2VRequest + */ +export type SchemaHunyuanVideoVideoToVideoInput = { + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Aspect Ratio (W:H) + * + * The aspect ratio of the video to generate. + */ + aspect_ratio?: '16:9' | '9:16' + /** + * Resolution + * + * The resolution of the video to generate. + */ + resolution?: '480p' | '580p' | '720p' + /** + * Video Url + * + * URL of the video input. + */ + video_url: string + /** + * Strength + * + * Strength for Video-to-Video + */ + strength?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * The number of inference steps to run. Lower gets faster results, higher gets better results. + */ + num_inference_steps?: number + /** + * Seed + * + * The seed to use for generating the video. + */ + seed?: number + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: '129' | '85' + /** + * Pro Mode + * + * By default, generations are done with 35 steps. Pro mode does 55 steps which results in higher quality videos but will take more time and cost 2x more billing units. + */ + pro_mode?: boolean +} + +/** + * Ben2OutputVideo + */ +export type SchemaBenV2VideoOutput = { + /** + * Seed + * + * + * Seed of the generated Image. It will be the same value of the one passed in the + * input or the randomly generated that was used in case none was passed. + * + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * Ben2InputVideo + */ +export type SchemaBenV2VideoInput = { + /** + * Video Url + * + * URL of video to be used for background removal. + */ + video_url: string + /** + * Seed + * + * Random seed for reproducible generation. + */ + seed?: number + /** + * Background Color + * + * Optional RGB values (0-255) for the background color. If not provided, the background will be transparent. For ex: [0, 0, 0] + */ + background_color?: [unknown, unknown, unknown] +} + +/** + * VideoUpscaleOutput + */ +export type SchemaTopazUpscaleVideoOutput = { + /** + * Video + * + * The upscaled video file + */ + video: SchemaFile +} + +/** + * VideoUpscaleRequest + */ +export type SchemaTopazUpscaleVideoInput = { + /** + * H264 Output + * + * Whether to use H264 codec for output video. Default is H265. + */ + H264_output?: boolean + /** + * Video Url + * + * URL of the video to upscale + */ + video_url: string + /** + * Upscale Factor + * + * Factor to upscale the video by (e.g. 2.0 doubles width and height) + */ + upscale_factor?: number + /** + * Target Fps + * + * Target FPS for frame interpolation. If set, frame interpolation will be enabled. + */ + target_fps?: number +} + +/** + * ExtendVideoOutput + */ +export type SchemaLtxVideoV095ExtendOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * ExtendVideoInput + */ +export type SchemaLtxVideoV095ExtendInput = { + /** + * Prompt + * + * Text prompt to guide generation + */ + prompt: string + /** + * Resolution + * + * Resolution of the generated video (480p or 720p). + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video (16:9 or 9:16). + */ + aspect_ratio?: '9:16' | '16:9' + /** + * Expand Prompt + * + * Whether to expand the prompt using the model's own capabilities. + */ + expand_prompt?: boolean + /** + * Seed + * + * Random seed for generation + */ + seed?: number + /** + * Num Inference Steps + * + * Number of inference steps + */ + num_inference_steps?: number + /** + * Negative Prompt + * + * Negative prompt for generation + */ + negative_prompt?: string + /** + * Video + * + * Video to be extended. + */ + video: SchemaVideoConditioningInput +} + +/** + * VideoConditioningInput + */ +export type SchemaVideoConditioningInput = { + /** + * Video Url + * + * URL of video to be extended + */ + video_url: string + /** + * Start Frame Num + * + * Frame number of the video from which the conditioning starts. Must be a multiple of 8. + */ + start_frame_num: number +} + +/** + * MulticonditioningVideoOutput + */ +export type SchemaLtxVideoV095MulticonditioningOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * MultiConditioningVideoInput + */ +export type SchemaLtxVideoV095MulticonditioningInput = { + /** + * Prompt + * + * Text prompt to guide generation + */ + prompt: string + /** + * Resolution + * + * Resolution of the generated video (480p or 720p). + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video (16:9 or 9:16). + */ + aspect_ratio?: '9:16' | '16:9' + /** + * Expand Prompt + * + * Whether to expand the prompt using the model's own capabilities. + */ + expand_prompt?: boolean + /** + * Images + * + * URL of images to use as conditioning + */ + images?: Array + /** + * Videos + * + * Videos to use as conditioning + */ + videos?: Array + /** + * Seed + * + * Random seed for generation + */ + seed?: number + /** + * Num Inference Steps + * + * Number of inference steps + */ + num_inference_steps?: number + /** + * Negative Prompt + * + * Negative prompt for generation + */ + negative_prompt?: string +} + +/** + * ImageConditioningInput + */ +export type SchemaImageConditioningInput = { + /** + * Start Frame Num + * + * Frame number of the image from which the conditioning starts. Must be a multiple of 8. + */ + start_frame_num: number + /** + * Image Url + * + * URL of image to use as conditioning + */ + image_url: string +} + +/** + * PikadditionsOutput + * + * Output from Pikadditions generation + */ +export type SchemaPikaV2PikadditionsOutput = { + /** + * Video + * + * The generated video with added objects/images + */ + video: SchemaFile +} + +/** + * PikadditionsRequest + * + * Request model for Pikadditions endpoint + */ +export type SchemaPikaV2PikadditionsInput = { + /** + * Prompt + * + * Text prompt describing what to add + */ + prompt?: string + /** + * Video Url + * + * URL of the input video + */ + video_url: string + /** + * Seed + * + * The seed for the random number generator + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to guide the model + */ + negative_prompt?: string + /** + * Image Url + * + * URL of the image to add + */ + image_url: string +} + +/** + * Output + */ +export type SchemaLatentsyncOutput = { + /** + * Video + * + * The generated video with the lip sync. + */ + video: SchemaFile +} + +/** + * Input + */ +export type SchemaLatentsyncInput = { + /** + * Video Url + * + * The URL of the video to generate the lip sync for. + */ + video_url: string + /** + * Guidance Scale + * + * Guidance scale for the model inference + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for generation. If None, a random seed will be used. + */ + seed?: number + /** + * Audio Url + * + * The URL of the audio to generate the lip sync for. + */ + audio_url: string + /** + * Loop Mode + * + * Video loop mode when audio is longer than video. Options: pingpong, loop + */ + loop_mode?: 'pingpong' | 'loop' +} + +/** + * LipSyncV2Output + */ +export type SchemaSyncLipsyncV2Output = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * LipSyncV2Input + */ +export type SchemaSyncLipsyncV2Input = { + /** + * Model + * + * The model to use for lipsyncing. `lipsync-2-pro` will cost roughly 1.67 times as much as `lipsync-2` for the same duration. + */ + model?: 'lipsync-2' | 'lipsync-2-pro' + /** + * Video Url + * + * URL of the input video + */ + video_url: string + /** + * Sync Mode + * + * Lipsync mode when audio and video durations are out of sync. + */ + sync_mode?: 'cut_off' | 'loop' | 'bounce' | 'silence' | 'remap' + /** + * Audio Url + * + * URL of the input audio + */ + audio_url: string +} + +/** + * VideoOutput + * + * Pydantic model for returning the re-sounded video back to the client. + */ +export type SchemaVideoSoundEffectsGeneratorOutput = { + video: SchemaFile +} + +/** + * VideoInput + * + * Pydantic model for receiving a video file to analyze and re-sound. + */ +export type SchemaVideoSoundEffectsGeneratorInput = { + video_url: SchemaVideo +} + +/** + * Video + * + * Represents a video file. + */ +export type SchemaVideo = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number | unknown + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string | unknown + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string | unknown + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File | unknown +} + +/** + * WanT2VResponse + */ +export type SchemaWanVaceOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * WanT2VRequest + */ +export type SchemaWanVaceInput = { + /** + * Shift + * + * Shift parameter for video generation. + */ + shift?: number + /** + * Video Url + * + * URL to the source video file. If provided, the model will use this video as a reference. + */ + video_url?: string + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Ref Image Urls + * + * Urls to source reference image. If provided, the model will use this image as reference. + */ + ref_image_urls?: Array + /** + * Task + * + * Task type for the model. + */ + task?: 'depth' | 'inpainting' + /** + * Frames Per Second + * + * Frames per second of the generated video. Must be between 5 to 24. + */ + frames_per_second?: number + /** + * Mask Image Url + * + * URL to the guiding mask file. If provided, the model will use this mask as a reference to create masked video. If provided mask video url will be ignored. + */ + mask_image_url?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Frames + * + * Number of frames to generate. Must be between 81 to 100 (inclusive). Works only with only reference images as input if source video or mask video is provided output len would be same as source up to 241 frames + */ + num_frames?: number + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Aspect Ratio + * + * Aspect ratio of the generated video (16:9 or 9:16). + */ + aspect_ratio?: 'auto' | '9:16' | '16:9' + /** + * Resolution + * + * Resolution of the generated video (480p,580p, or 720p). + */ + resolution?: '480p' | '580p' | '720p' + /** + * Mask Video Url + * + * URL to the source mask file. If provided, the model will use this mask as a reference. + */ + mask_video_url?: string + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Preprocess + * + * Whether to preprocess the input video. + */ + preprocess?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean +} + +/** + * MagiVideoExtensionResponse + */ +export type SchemaMagiDistilledExtendVideoOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * MagiVideoExtensionRequest + */ +export type SchemaMagiDistilledExtendVideoInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Resolution + * + * Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit. + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' | '1:1' + /** + * Video Url + * + * URL of the input video to represent the beginning of the video. If the input video does not match the chosen aspect ratio, it is resized and center cropped. + */ + video_url: string + /** + * Start Frame + * + * The frame to begin the generation from, with the remaining frames will be treated as the prefix video. The final video will contain the frames up until this number unchanged, followed by the generated frames. The default start frame is 32 frames before the end of the video, which gives optimal results. + */ + start_frame?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: 4 | 8 | 16 | 32 + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Num Frames + * + * Number of frames to generate. Must be between 96 and 192 (inclusive). Each additional 24 frames beyond 96 incurs an additional billing unit. + */ + num_frames?: number +} + +/** + * MagiVideoExtensionResponse + */ +export type SchemaMagiExtendVideoOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * MagiVideoExtensionRequest + */ +export type SchemaMagiExtendVideoInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Resolution + * + * Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit. + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' | '1:1' + /** + * Video Url + * + * URL of the input video to represent the beginning of the video. If the input video does not match the chosen aspect ratio, it is resized and center cropped. + */ + video_url: string + /** + * Start Frame + * + * The frame to begin the generation from, with the remaining frames will be treated as the prefix video. The final video will contain the frames up until this number unchanged, followed by the generated frames. The default start frame is 32 frames before the end of the video, which gives optimal results. + */ + start_frame?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: 4 | 8 | 16 | 32 | 64 + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Num Frames + * + * Number of frames to generate. Must be between 96 and 192 (inclusive). Each additional 24 frames beyond 96 incurs an additional billing unit. + */ + num_frames?: number +} + +/** + * VideoCondition + * + * Video condition to use for generation. + */ +export type SchemaVideoCondition = { + /** + * Strength + * + * The strength of the condition. + */ + strength?: number + /** + * Start Frame Number + * + * The frame number to start the condition on. + */ + start_frame_number?: number + /** + * Video Url + * + * The URL of the video to use as input. + */ + video_url: string +} + +/** + * ImageCondition + * + * Image condition to use for generation. + */ +export type SchemaImageCondition = { + /** + * Strength + * + * The strength of the condition. + */ + strength?: number + /** + * Start Frame Number + * + * The frame number to start the condition on. + */ + start_frame_number?: number + /** + * Image Url + * + * The URL of the image to use as input. + */ + image_url: string +} + +/** + * MulticonditioningVideoOutput + */ +export type SchemaLtxVideoLoraMulticonditioningOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video. + */ + video: SchemaFile +} + +/** + * MulticonditioningVideoInput + * + * Request model for text-to-video generation with multiple conditions. + */ +export type SchemaLtxVideoLoraMulticonditioningInput = { + /** + * Number Of Steps + * + * The number of inference steps to use. + */ + number_of_steps?: number + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Reverse Video + * + * Whether to reverse the video. + */ + reverse_video?: boolean + /** + * Frame Rate + * + * The frame rate of the video. + */ + frame_rate?: number + /** + * Expand Prompt + * + * Whether to expand the prompt using the LLM. + */ + expand_prompt?: boolean + /** + * Number Of Frames + * + * The number of frames in the video. + */ + number_of_frames?: number + /** + * Loras + * + * The LoRA weights to use for generation. + */ + loras?: Array + /** + * Images + * + * The image conditions to use for generation. + */ + images?: Array + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * The negative prompt to use. + */ + negative_prompt?: string + /** + * Aspect Ratio + * + * The aspect ratio of the video. + */ + aspect_ratio?: '16:9' | '1:1' | '9:16' | 'auto' + /** + * Resolution + * + * The resolution of the video. + */ + resolution?: '480p' | '720p' + /** + * Videos + * + * The video conditions to use for generation. + */ + videos?: Array + /** + * Seed + * + * The seed to use for generation. + */ + seed?: number +} + +/** + * LoRAWeight + * + * LoRA weight to use for generation. + */ +export type SchemaLoRaWeight = { + /** + * Path + * + * URL or path to the LoRA weights. + */ + path: string + /** + * Scale + * + * Scale of the LoRA weight. This is a multiplier applied to the LoRA weight when loading it. + */ + scale?: number + /** + * Weight Name + * + * Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights. + */ + weight_name?: string +} + +/** + * ExtendVideoOutput + */ +export type SchemaLtxVideo13bDevExtendOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * ExtendVideoInput + */ +export type SchemaLtxVideo13bDevExtendInput = { + /** + * Second Pass Skip Initial Steps + * + * The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes. + */ + second_pass_skip_initial_steps?: number + /** + * First Pass Num Inference Steps + * + * Number of inference steps during the first pass. + */ + first_pass_num_inference_steps?: number + /** + * Frame Rate + * + * The frame rate of the video. + */ + frame_rate?: number + /** + * Prompt + * + * Text prompt to guide generation + */ + prompt: string + /** + * Reverse Video + * + * Whether to reverse the video. + */ + reverse_video?: boolean + /** + * Expand Prompt + * + * Whether to expand the prompt using a language model. + */ + expand_prompt?: boolean + /** + * Loras + * + * LoRA weights to use for generation + */ + loras?: Array + /** + * Second Pass Num Inference Steps + * + * Number of inference steps during the second pass. + */ + second_pass_num_inference_steps?: number + /** + * Num Frames + * + * The number of frames in the video. + */ + num_frames?: number + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Video + * + * Video to be extended. + */ + video: SchemaVideoConditioningInput + /** + * Negative Prompt + * + * Negative prompt for generation + */ + negative_prompt?: string + /** + * Resolution + * + * Resolution of the generated video (480p or 720p). + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * The aspect ratio of the video. + */ + aspect_ratio?: '9:16' | '1:1' | '16:9' | 'auto' + /** + * Constant Rate Factor + * + * The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality. + */ + constant_rate_factor?: number + /** + * First Pass Skip Final Steps + * + * Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details. + */ + first_pass_skip_final_steps?: number + /** + * Seed + * + * Random seed for generation + */ + seed?: number +} + +/** + * MultiConditioningVideoOutput + */ +export type SchemaLtxVideo13bDevMulticonditioningOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * MultiConditioningVideoInput + */ +export type SchemaLtxVideo13bDevMulticonditioningInput = { + /** + * Second Pass Skip Initial Steps + * + * The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes. + */ + second_pass_skip_initial_steps?: number + /** + * First Pass Num Inference Steps + * + * Number of inference steps during the first pass. + */ + first_pass_num_inference_steps?: number + /** + * Frame Rate + * + * The frame rate of the video. + */ + frame_rate?: number + /** + * Prompt + * + * Text prompt to guide generation + */ + prompt: string + /** + * Reverse Video + * + * Whether to reverse the video. + */ + reverse_video?: boolean + /** + * Expand Prompt + * + * Whether to expand the prompt using a language model. + */ + expand_prompt?: boolean + /** + * Loras + * + * LoRA weights to use for generation + */ + loras?: Array + /** + * Images + * + * URL of images to use as conditioning + */ + images?: Array + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Num Frames + * + * The number of frames in the video. + */ + num_frames?: number + /** + * Second Pass Num Inference Steps + * + * Number of inference steps during the second pass. + */ + second_pass_num_inference_steps?: number + /** + * Negative Prompt + * + * Negative prompt for generation + */ + negative_prompt?: string + /** + * Resolution + * + * Resolution of the generated video (480p or 720p). + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * The aspect ratio of the video. + */ + aspect_ratio?: '9:16' | '1:1' | '16:9' | 'auto' + /** + * Videos + * + * Videos to use as conditioning + */ + videos?: Array + /** + * Constant Rate Factor + * + * The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality. + */ + constant_rate_factor?: number + /** + * First Pass Skip Final Steps + * + * Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details. + */ + first_pass_skip_final_steps?: number + /** + * Seed + * + * Random seed for generation + */ + seed?: number +} + +/** + * MultiConditioningVideoOutput + */ +export type SchemaLtxVideo13bDistilledMulticonditioningOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * DistilledMultiConditioningVideoInput + * + * Distilled model input + */ +export type SchemaLtxVideo13bDistilledMulticonditioningInput = { + /** + * Second Pass Skip Initial Steps + * + * The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes. + */ + second_pass_skip_initial_steps?: number + /** + * First Pass Num Inference Steps + * + * Number of inference steps during the first pass. + */ + first_pass_num_inference_steps?: number + /** + * Frame Rate + * + * The frame rate of the video. + */ + frame_rate?: number + /** + * Reverse Video + * + * Whether to reverse the video. + */ + reverse_video?: boolean + /** + * Prompt + * + * Text prompt to guide generation + */ + prompt: string + /** + * Expand Prompt + * + * Whether to expand the prompt using a language model. + */ + expand_prompt?: boolean + /** + * Loras + * + * LoRA weights to use for generation + */ + loras?: Array + /** + * Images + * + * URL of images to use as conditioning + */ + images?: Array + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Num Frames + * + * The number of frames in the video. + */ + num_frames?: number + /** + * Second Pass Num Inference Steps + * + * Number of inference steps during the second pass. + */ + second_pass_num_inference_steps?: number + /** + * Negative Prompt + * + * Negative prompt for generation + */ + negative_prompt?: string + /** + * Resolution + * + * Resolution of the generated video (480p or 720p). + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * The aspect ratio of the video. + */ + aspect_ratio?: '9:16' | '1:1' | '16:9' | 'auto' + /** + * Constant Rate Factor + * + * The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality. + */ + constant_rate_factor?: number + /** + * Videos + * + * Videos to use as conditioning + */ + videos?: Array + /** + * First Pass Skip Final Steps + * + * Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details. + */ + first_pass_skip_final_steps?: number + /** + * Seed + * + * Random seed for generation + */ + seed?: number +} + +/** + * ExtendVideoOutput + */ +export type SchemaLtxVideo13bDistilledExtendOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * DistilledExtendVideoInput + * + * Distilled model input + */ +export type SchemaLtxVideo13bDistilledExtendInput = { + /** + * Second Pass Skip Initial Steps + * + * The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes. + */ + second_pass_skip_initial_steps?: number + /** + * First Pass Num Inference Steps + * + * Number of inference steps during the first pass. + */ + first_pass_num_inference_steps?: number + /** + * Frame Rate + * + * The frame rate of the video. + */ + frame_rate?: number + /** + * Reverse Video + * + * Whether to reverse the video. + */ + reverse_video?: boolean + /** + * Prompt + * + * Text prompt to guide generation + */ + prompt: string + /** + * Expand Prompt + * + * Whether to expand the prompt using a language model. + */ + expand_prompt?: boolean + /** + * Loras + * + * LoRA weights to use for generation + */ + loras?: Array + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Num Frames + * + * The number of frames in the video. + */ + num_frames?: number + /** + * Second Pass Num Inference Steps + * + * Number of inference steps during the second pass. + */ + second_pass_num_inference_steps?: number + /** + * Video + * + * Video to be extended. + */ + video: SchemaVideoConditioningInput + /** + * Negative Prompt + * + * Negative prompt for generation + */ + negative_prompt?: string + /** + * Resolution + * + * Resolution of the generated video (480p or 720p). + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * The aspect ratio of the video. + */ + aspect_ratio?: '9:16' | '1:1' | '16:9' | 'auto' + /** + * Constant Rate Factor + * + * The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality. + */ + constant_rate_factor?: number + /** + * First Pass Skip Final Steps + * + * Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details. + */ + first_pass_skip_final_steps?: number + /** + * Seed + * + * Random seed for generation + */ + seed?: number +} + +/** + * WanVACEResponse + */ +export type SchemaWanVace14bOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * ZIP archive of all video frames if requested. + */ + frames_zip?: SchemaFile | unknown + /** + * Seed + * + * The seed used for generation. + */ + seed: number + video: SchemaVideoFile +} + +/** + * VideoFile + */ +export type SchemaVideoFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number | unknown + /** + * Duration + * + * The duration of the video + */ + duration?: number | unknown + /** + * Height + * + * The height of the video + */ + height?: number | unknown + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * Width + * + * The width of the video + */ + width?: number | unknown + /** + * Fps + * + * The FPS of the video + */ + fps?: number | unknown + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string | unknown + /** + * Num Frames + * + * The number of frames in the video + */ + num_frames?: number | unknown + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string | unknown +} + +/** + * WanVACERequest + */ +export type SchemaWanVace14bInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Video URL + * + * URL to the source video file. If provided, the model will use this video as a reference. + */ + video_url?: string | unknown + /** + * Number of Interpolated Frames + * + * Number of frames to interpolate between the original frames. A value of 0 means no interpolation. + */ + num_interpolated_frames?: number + /** + * Temporal Downsample Factor + * + * Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied. + */ + temporal_downsample_factor?: number + /** + * First Frame URL + * + * URL to the first frame of the video. If provided, the model will use this frame as a reference. + */ + first_frame_url?: string | unknown + /** + * Reference Image URLs + * + * URLs to source reference image. If provided, the model will use this image as reference. + */ + ref_image_urls?: Array + /** + * Transparency Mode + * + * The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled. + */ + transparency_mode?: 'content_aware' | 'white' | 'black' + /** + * Number of Frames + * + * Number of frames to generate. Must be between 81 to 241 (inclusive). + */ + num_frames?: number + /** + * Auto Downsample Min FPS + * + * The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences. + */ + auto_downsample_min_fps?: number + /** + * Guidance Scale + * + * Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt. + */ + guidance_scale?: number + /** + * Sampler + * + * Sampler to use for video generation. + */ + sampler?: 'unipc' | 'dpm++' | 'euler' + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Mask Video URL + * + * URL to the source mask file. If provided, the model will use this mask as a reference. + */ + mask_video_url?: string | unknown + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number | unknown + /** + * Interpolator Model + * + * The model to use for frame interpolation. Options are 'rife' or 'film'. + */ + interpolator_model?: 'rife' | 'film' + /** + * Enable Auto Downsample + * + * If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length. + */ + enable_auto_downsample?: boolean + /** + * Preprocess + * + * Whether to preprocess the input video. + */ + preprocess?: boolean + /** + * Shift + * + * Shift parameter for video generation. + */ + shift?: number + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Acceleration + * + * Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster. + */ + acceleration?: 'none' | 'low' | 'regular' | unknown + /** + * Mask Image URL + * + * URL to the guiding mask file. If provided, the model will use this mask as a reference to create masked video. If provided mask video url will be ignored. + */ + mask_image_url?: string | unknown + /** + * Task + * + * Task type for the model. + */ + task?: 'depth' | 'pose' | 'inpainting' | 'outpainting' | 'reframe' + /** + * Match Input Number of Frames + * + * If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter. + */ + match_input_num_frames?: boolean + /** + * Frames per Second + * + * Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true. + */ + frames_per_second?: number | unknown + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Return Frames Zip + * + * If true, also return a ZIP file containing all generated frames. + */ + return_frames_zip?: boolean + /** + * Resolution + * + * Resolution of the generated video. + */ + resolution?: 'auto' | '240p' | '360p' | '480p' | '580p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. + */ + aspect_ratio?: 'auto' | '16:9' | '1:1' | '9:16' + /** + * Match Input Frames Per Second + * + * If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter. + */ + match_input_frames_per_second?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Last Frame URL + * + * URL to the last frame of the video. If provided, the model will use this frame as a reference. + */ + last_frame_url?: string | unknown +} + +/** + * LipsyncAppOutput + */ +export type SchemaLipsyncOutput = { + video: SchemaFile +} + +/** + * LipsyncInput + */ +export type SchemaLipsyncInput = { + /** + * Video Url + */ + video_url: string + /** + * Audio Url + */ + audio_url: string +} + +/** + * ReframeOutput + */ +export type SchemaLumaDreamMachineRay2ReframeOutput = { + /** + * Video + * + * URL of the reframed video + */ + video: SchemaFile +} + +/** + * ReframeVideoRequest + */ +export type SchemaLumaDreamMachineRay2ReframeInput = { + /** + * Prompt + * + * Optional prompt for reframing + */ + prompt?: string + /** + * Aspect Ratio + * + * The aspect ratio of the reframed video + */ + aspect_ratio: '1:1' | '16:9' | '9:16' | '4:3' | '3:4' | '21:9' | '9:21' + /** + * Y Start + * + * Start Y coordinate for reframing + */ + y_start?: number + /** + * X End + * + * End X coordinate for reframing + */ + x_end?: number + /** + * Video Url + * + * URL of the input video to reframe + */ + video_url: string + /** + * Y End + * + * End Y coordinate for reframing + */ + y_end?: number + /** + * X Start + * + * Start X coordinate for reframing + */ + x_start?: number + /** + * Grid Position Y + * + * Y position of the grid for reframing + */ + grid_position_y?: number + /** + * Grid Position X + * + * X position of the grid for reframing + */ + grid_position_x?: number + /** + * Image Url + * + * Optional URL of the first frame image for reframing + */ + image_url?: string +} + +/** + * ReframeOutput + */ +export type SchemaLumaDreamMachineRay2FlashReframeOutput = { + /** + * Video + * + * URL of the reframed video + */ + video: SchemaFile +} + +/** + * ReframeVideoRequest + */ +export type SchemaLumaDreamMachineRay2FlashReframeInput = { + /** + * Prompt + * + * Optional prompt for reframing + */ + prompt?: string + /** + * Aspect Ratio + * + * The aspect ratio of the reframed video + */ + aspect_ratio: '1:1' | '16:9' | '9:16' | '4:3' | '3:4' | '21:9' | '9:21' + /** + * Y Start + * + * Start Y coordinate for reframing + */ + y_start?: number + /** + * X End + * + * End X coordinate for reframing + */ + x_end?: number + /** + * Video Url + * + * URL of the input video to reframe + */ + video_url: string + /** + * Y End + * + * End Y coordinate for reframing + */ + y_end?: number + /** + * X Start + * + * Start X coordinate for reframing + */ + x_start?: number + /** + * Grid Position Y + * + * Y position of the grid for reframing + */ + grid_position_y?: number + /** + * Grid Position X + * + * X position of the grid for reframing + */ + grid_position_x?: number + /** + * Image Url + * + * Optional URL of the first frame image for reframing + */ + image_url?: string +} + +/** + * WanT2VResponse + */ +export type SchemaWanVace13bOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * WanT2VRequest + */ +export type SchemaWanVace13bInput = { + /** + * Shift + * + * Shift parameter for video generation. + */ + shift?: number + /** + * Video Url + * + * URL to the source video file. If provided, the model will use this video as a reference. + */ + video_url?: string + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Mask Image Url + * + * URL to the guiding mask file. If provided, the model will use this mask as a reference to create masked video. If provided mask video url will be ignored. + */ + mask_image_url?: string + /** + * Task + * + * Task type for the model. + */ + task?: 'depth' | 'inpainting' | 'pose' + /** + * Frames Per Second + * + * Frames per second of the generated video. Must be between 5 to 24. + */ + frames_per_second?: number + /** + * Ref Image Urls + * + * Urls to source reference image. If provided, the model will use this image as reference. + */ + ref_image_urls?: Array + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Frames + * + * Number of frames to generate. Must be between 81 to 100 (inclusive). Works only with only reference images as input if source video or mask video is provided output len would be same as source up to 241 frames + */ + num_frames?: number + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Resolution + * + * Resolution of the generated video (480p,580p, or 720p). + */ + resolution?: '480p' | '580p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video (16:9 or 9:16). + */ + aspect_ratio?: 'auto' | '9:16' | '16:9' + /** + * Mask Video Url + * + * URL to the source mask file. If provided, the model will use this mask as a reference. + */ + mask_video_url?: string + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Preprocess + * + * Whether to preprocess the input video. + */ + preprocess?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean +} + +/** + * CombineOutput + */ +export type SchemaFfmpegApiMergeAudioVideoOutput = { + video: SchemaFile +} + +/** + * CombineInput + */ +export type SchemaFfmpegApiMergeAudioVideoInput = { + /** + * Video Url + * + * URL of the video file to use as the video track + */ + video_url: string + /** + * Start Offset + * + * Offset in seconds for when the audio should start relative to the video + */ + start_offset?: number + /** + * Audio Url + * + * URL of the audio file to use as the audio track + */ + audio_url: string +} + +/** + * DWPoseVideoOutput + */ +export type SchemaDwposeVideoOutput = { + /** + * Video + * + * The output video with pose estimation. + */ + video: SchemaFile +} + +/** + * DWPoseVideoInput + */ +export type SchemaDwposeVideoInput = { + /** + * Video Url + * + * URL of video to be used for pose estimation + */ + video_url: string + /** + * Draw Mode + * + * Mode of drawing the pose on the video. Options are: 'full-pose', 'body-pose', 'face-pose', 'hand-pose', 'face-hand-mask', 'face-mask', 'hand-mask'. + */ + draw_mode?: + | 'full-pose' + | 'body-pose' + | 'face-pose' + | 'hand-pose' + | 'face-hand-mask' + | 'face-mask' + | 'hand-mask' +} + +/** + * WanVACEDepthResponse + */ +export type SchemaWanVace14bDepthOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * ZIP archive of all video frames if requested. + */ + frames_zip?: SchemaFile | unknown + /** + * Seed + * + * The seed used for generation. + */ + seed: number + video: SchemaVideoFile +} + +/** + * WanVACEDepthRequest + */ +export type SchemaWanVace14bDepthInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Video URL + * + * URL to the source video file. Required for depth task. + */ + video_url: string + /** + * Number of Interpolated Frames + * + * Number of frames to interpolate between the original frames. A value of 0 means no interpolation. + */ + num_interpolated_frames?: number + /** + * Temporal Downsample Factor + * + * Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied. + */ + temporal_downsample_factor?: number + /** + * First Frame URL + * + * URL to the first frame of the video. If provided, the model will use this frame as a reference. + */ + first_frame_url?: string | unknown + /** + * Reference Image URLs + * + * URLs to source reference image. If provided, the model will use this image as reference. + */ + ref_image_urls?: Array + /** + * Transparency Mode + * + * The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled. + */ + transparency_mode?: 'content_aware' | 'white' | 'black' + /** + * Number of Frames + * + * Number of frames to generate. Must be between 81 to 241 (inclusive). + */ + num_frames?: number + /** + * Auto Downsample Min FPS + * + * The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences. + */ + auto_downsample_min_fps?: number + /** + * Guidance Scale + * + * Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt. + */ + guidance_scale?: number + /** + * Sampler + * + * Sampler to use for video generation. + */ + sampler?: 'unipc' | 'dpm++' | 'euler' + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number | unknown + /** + * Interpolator Model + * + * The model to use for frame interpolation. Options are 'rife' or 'film'. + */ + interpolator_model?: 'rife' | 'film' + /** + * Enable Auto Downsample + * + * If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length. + */ + enable_auto_downsample?: boolean + /** + * Preprocess + * + * Whether to preprocess the input video. + */ + preprocess?: boolean + /** + * Shift + * + * Shift parameter for video generation. + */ + shift?: number + /** + * Acceleration + * + * Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster. + */ + acceleration?: 'none' | 'low' | 'regular' | unknown + /** + * Match Input Number of Frames + * + * If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter. + */ + match_input_num_frames?: boolean + /** + * Frames per Second + * + * Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true. + */ + frames_per_second?: number | unknown + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Return Frames Zip + * + * If true, also return a ZIP file containing all generated frames. + */ + return_frames_zip?: boolean + /** + * Resolution + * + * Resolution of the generated video. + */ + resolution?: 'auto' | '240p' | '360p' | '480p' | '580p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. + */ + aspect_ratio?: 'auto' | '16:9' | '1:1' | '9:16' + /** + * Match Input Frames Per Second + * + * If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter. + */ + match_input_frames_per_second?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Last Frame URL + * + * URL to the last frame of the video. If provided, the model will use this frame as a reference. + */ + last_frame_url?: string | unknown +} + +/** + * WanVACEPoseResponse + */ +export type SchemaWanVace14bPoseOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * ZIP archive of all video frames if requested. + */ + frames_zip?: SchemaFile | unknown + /** + * Seed + * + * The seed used for generation. + */ + seed: number + video: SchemaVideoFile +} + +/** + * WanVACEPoseRequest + */ +export type SchemaWanVace14bPoseInput = { + /** + * Prompt + * + * The text prompt to guide video generation. For pose task, the prompt should describe the desired pose and action of the subject in the video. + */ + prompt: string + /** + * Video URL + * + * URL to the source video file. Required for pose task. + */ + video_url: string + /** + * Number of Interpolated Frames + * + * Number of frames to interpolate between the original frames. A value of 0 means no interpolation. + */ + num_interpolated_frames?: number + /** + * Temporal Downsample Factor + * + * Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied. + */ + temporal_downsample_factor?: number + /** + * First Frame URL + * + * URL to the first frame of the video. If provided, the model will use this frame as a reference. + */ + first_frame_url?: string | unknown + /** + * Reference Image URLs + * + * URLs to source reference image. If provided, the model will use this image as reference. + */ + ref_image_urls?: Array + /** + * Transparency Mode + * + * The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled. + */ + transparency_mode?: 'content_aware' | 'white' | 'black' + /** + * Number of Frames + * + * Number of frames to generate. Must be between 81 to 241 (inclusive). + */ + num_frames?: number + /** + * Auto Downsample Min FPS + * + * The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences. + */ + auto_downsample_min_fps?: number + /** + * Guidance Scale + * + * Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt. + */ + guidance_scale?: number + /** + * Sampler + * + * Sampler to use for video generation. + */ + sampler?: 'unipc' | 'dpm++' | 'euler' + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number | unknown + /** + * Interpolator Model + * + * The model to use for frame interpolation. Options are 'rife' or 'film'. + */ + interpolator_model?: 'rife' | 'film' + /** + * Enable Auto Downsample + * + * If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length. + */ + enable_auto_downsample?: boolean + /** + * Preprocess + * + * Whether to preprocess the input video. + */ + preprocess?: boolean + /** + * Shift + * + * Shift parameter for video generation. + */ + shift?: number + /** + * Acceleration + * + * Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster. + */ + acceleration?: 'none' | 'low' | 'regular' | unknown + /** + * Match Input Number of Frames + * + * If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter. + */ + match_input_num_frames?: boolean + /** + * Frames per Second + * + * Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true. + */ + frames_per_second?: number | unknown + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Return Frames Zip + * + * If true, also return a ZIP file containing all generated frames. + */ + return_frames_zip?: boolean + /** + * Resolution + * + * Resolution of the generated video. + */ + resolution?: 'auto' | '240p' | '360p' | '480p' | '580p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. + */ + aspect_ratio?: 'auto' | '16:9' | '1:1' | '9:16' + /** + * Match Input Frames Per Second + * + * If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter. + */ + match_input_frames_per_second?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Last Frame URL + * + * URL to the last frame of the video. If provided, the model will use this frame as a reference. + */ + last_frame_url?: string | unknown +} + +/** + * WanVACEInpaintingResponse + */ +export type SchemaWanVace14bInpaintingOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * ZIP archive of all video frames if requested. + */ + frames_zip?: SchemaFile | unknown + /** + * Seed + * + * The seed used for generation. + */ + seed: number + video: SchemaVideoFile +} + +/** + * WanVACEInpaintingRequest + */ +export type SchemaWanVace14bInpaintingInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Video URL + * + * URL to the source video file. Required for inpainting. + */ + video_url: string + /** + * Number of Interpolated Frames + * + * Number of frames to interpolate between the original frames. A value of 0 means no interpolation. + */ + num_interpolated_frames?: number + /** + * Temporal Downsample Factor + * + * Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied. + */ + temporal_downsample_factor?: number + /** + * First Frame URL + * + * URL to the first frame of the video. If provided, the model will use this frame as a reference. + */ + first_frame_url?: string | unknown + /** + * Reference Image URLs + * + * Urls to source reference image. If provided, the model will use this image as reference. + */ + ref_image_urls?: Array + /** + * Transparency Mode + * + * The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled. + */ + transparency_mode?: 'content_aware' | 'white' | 'black' + /** + * Number of Frames + * + * Number of frames to generate. Must be between 81 to 241 (inclusive). + */ + num_frames?: number + /** + * Auto Downsample Min FPS + * + * The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences. + */ + auto_downsample_min_fps?: number + /** + * Guidance Scale + * + * Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt. + */ + guidance_scale?: number + /** + * Sampler + * + * Sampler to use for video generation. + */ + sampler?: 'unipc' | 'dpm++' | 'euler' + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Mask Video URL + * + * URL to the source mask file. Required for inpainting. + */ + mask_video_url: string | unknown + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number | unknown + /** + * Interpolator Model + * + * The model to use for frame interpolation. Options are 'rife' or 'film'. + */ + interpolator_model?: 'rife' | 'film' + /** + * Enable Auto Downsample + * + * If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length. + */ + enable_auto_downsample?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Shift + * + * Shift parameter for video generation. + */ + shift?: number + /** + * Preprocess + * + * Whether to preprocess the input video. + */ + preprocess?: boolean + /** + * Acceleration + * + * Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster. + */ + acceleration?: 'none' | 'low' | 'regular' | unknown + /** + * Mask Image URL + * + * URL to the guiding mask file. If provided, the model will use this mask as a reference to create masked video using salient mask tracking. Will be ignored if mask_video_url is provided. + */ + mask_image_url?: string | unknown + /** + * Match Input Number of Frames + * + * If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter. + */ + match_input_num_frames?: boolean + /** + * Frames per Second + * + * Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true. + */ + frames_per_second?: number | unknown + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Return Frames Zip + * + * If true, also return a ZIP file containing all generated frames. + */ + return_frames_zip?: boolean + /** + * Resolution + * + * Resolution of the generated video. + */ + resolution?: 'auto' | '240p' | '360p' | '480p' | '580p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. + */ + aspect_ratio?: 'auto' | '16:9' | '1:1' | '9:16' + /** + * Match Input Frames Per Second + * + * If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter. + */ + match_input_frames_per_second?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Last Frame URL + * + * URL to the last frame of the video. If provided, the model will use this frame as a reference. + */ + last_frame_url?: string | unknown +} + +/** + * WanVACEOutpaintingResponse + */ +export type SchemaWanVace14bOutpaintingOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * ZIP archive of all video frames if requested. + */ + frames_zip?: SchemaFile | unknown + /** + * Seed + * + * The seed used for generation. + */ + seed: number + video: SchemaVideoFile +} + +/** + * WanVACEOutpaintingRequest + */ +export type SchemaWanVace14bOutpaintingInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Video URL + * + * URL to the source video file. Required for outpainting. + */ + video_url: string + /** + * Number of Interpolated Frames + * + * Number of frames to interpolate between the original frames. A value of 0 means no interpolation. + */ + num_interpolated_frames?: number + /** + * Temporal Downsample Factor + * + * Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied. + */ + temporal_downsample_factor?: number + /** + * First Frame URL + * + * URL to the first frame of the video. If provided, the model will use this frame as a reference. + */ + first_frame_url?: string | unknown + /** + * Reference Image URLs + * + * URLs to source reference image. If provided, the model will use this image as reference. + */ + ref_image_urls?: Array + /** + * Expand Ratio + * + * Amount of expansion. This is a float value between 0 and 1, where 0.25 adds 25% to the original video size on the specified sides. + */ + expand_ratio?: number + /** + * Transparency Mode + * + * The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled. + */ + transparency_mode?: 'content_aware' | 'white' | 'black' + /** + * Number of Frames + * + * Number of frames to generate. Must be between 81 to 241 (inclusive). + */ + num_frames?: number + /** + * Auto Downsample Min FPS + * + * The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences. + */ + auto_downsample_min_fps?: number + /** + * Expand Bottom + * + * Whether to expand the video to the bottom. + */ + expand_bottom?: boolean + /** + * Sampler + * + * Sampler to use for video generation. + */ + sampler?: 'unipc' | 'dpm++' | 'euler' + /** + * Guidance Scale + * + * Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt. + */ + guidance_scale?: number + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number | unknown + /** + * Interpolator Model + * + * The model to use for frame interpolation. Options are 'rife' or 'film'. + */ + interpolator_model?: 'rife' | 'film' + /** + * Enable Auto Downsample + * + * If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length. + */ + enable_auto_downsample?: boolean + /** + * Expand Top + * + * Whether to expand the video to the top. + */ + expand_top?: boolean + /** + * Shift + * + * Shift parameter for video generation. + */ + shift?: number + /** + * Expand Left + * + * Whether to expand the video to the left. + */ + expand_left?: boolean + /** + * Acceleration + * + * Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster. + */ + acceleration?: 'none' | 'low' | 'regular' | unknown + /** + * Match Input Number of Frames + * + * If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter. + */ + match_input_num_frames?: boolean + /** + * Frames per Second + * + * Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true. + */ + frames_per_second?: number | unknown + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Return Frames Zip + * + * If true, also return a ZIP file containing all generated frames. + */ + return_frames_zip?: boolean + /** + * Resolution + * + * Resolution of the generated video. + */ + resolution?: 'auto' | '240p' | '360p' | '480p' | '580p' | '720p' + /** + * Expand Right + * + * Whether to expand the video to the right. + */ + expand_right?: boolean + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. + */ + aspect_ratio?: 'auto' | '16:9' | '1:1' | '9:16' + /** + * Match Input Frames Per Second + * + * If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter. + */ + match_input_frames_per_second?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Last Frame URL + * + * URL to the last frame of the video. If provided, the model will use this frame as a reference. + */ + last_frame_url?: string | unknown +} + +/** + * WanVACEReframeResponse + */ +export type SchemaWanVace14bReframeOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * ZIP archive of all video frames if requested. + */ + frames_zip?: SchemaFile | unknown + /** + * Seed + * + * The seed used for generation. + */ + seed: number + video: SchemaVideoFile +} + +/** + * WanVACEReframeRequest + */ +export type SchemaWanVace14bReframeInput = { + /** + * Prompt + * + * The text prompt to guide video generation. Optional for reframing. + */ + prompt?: string + /** + * Video URL + * + * URL to the source video file. This video will be used as a reference for the reframe task. + */ + video_url: string + /** + * Number of Interpolated Frames + * + * Number of frames to interpolate between the original frames. A value of 0 means no interpolation. + */ + num_interpolated_frames?: number + /** + * Temporal Downsample Factor + * + * Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied. + */ + temporal_downsample_factor?: number + /** + * First Frame URL + * + * URL to the first frame of the video. If provided, the model will use this frame as a reference. + */ + first_frame_url?: string | unknown + /** + * Transparency Mode + * + * The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled. + */ + transparency_mode?: 'content_aware' | 'white' | 'black' + /** + * Number of Frames + * + * Number of frames to generate. Must be between 81 to 241 (inclusive). + */ + num_frames?: number + /** + * Trim Borders + * + * Whether to trim borders from the video. + */ + trim_borders?: boolean + /** + * Auto Downsample Min FPS + * + * The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences. + */ + auto_downsample_min_fps?: number + /** + * Sampler + * + * Sampler to use for video generation. + */ + sampler?: 'unipc' | 'dpm++' | 'euler' + /** + * Guidance Scale + * + * Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt. + */ + guidance_scale?: number + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number | unknown + /** + * Interpolator Model + * + * The model to use for frame interpolation. Options are 'rife' or 'film'. + */ + interpolator_model?: 'rife' | 'film' + /** + * Enable Auto Downsample + * + * If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length. + */ + enable_auto_downsample?: boolean + /** + * Shift + * + * Shift parameter for video generation. + */ + shift?: number + /** + * Acceleration + * + * Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster. + */ + acceleration?: 'none' | 'low' | 'regular' | unknown + /** + * Zoom Factor + * + * Zoom factor for the video. When this value is greater than 0, the video will be zoomed in by this factor (in relation to the canvas size,) cutting off the edges of the video. A value of 0 means no zoom. + */ + zoom_factor?: number + /** + * Match Input Number of Frames + * + * If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter. + */ + match_input_num_frames?: boolean + /** + * Frames per Second + * + * Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true. + */ + frames_per_second?: number | unknown + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Return Frames Zip + * + * If true, also return a ZIP file containing all generated frames. + */ + return_frames_zip?: boolean + /** + * Resolution + * + * Resolution of the generated video. + */ + resolution?: 'auto' | '240p' | '360p' | '480p' | '580p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. + */ + aspect_ratio?: 'auto' | '16:9' | '1:1' | '9:16' + /** + * Match Input Frames Per Second + * + * If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter. + */ + match_input_frames_per_second?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Last Frame URL + * + * URL to the last frame of the video. If provided, the model will use this frame as a reference. + */ + last_frame_url?: string | unknown +} + +/** + * ModifyOutput + */ +export type SchemaLumaDreamMachineRay2ModifyOutput = { + /** + * Video + * + * URL of the modified video + */ + video: SchemaFile +} + +/** + * ModifyVideoRequest + */ +export type SchemaLumaDreamMachineRay2ModifyInput = { + /** + * Prompt + * + * Instruction for modifying the video + */ + prompt?: string + /** + * Video Url + * + * URL of the input video to modify + */ + video_url: string + /** + * Mode + * + * Amount of modification to apply to the video, adhere_1 is the least amount of modification, reimagine_3 is the most + */ + mode?: + | 'adhere_1' + | 'adhere_2' + | 'adhere_3' + | 'flex_1' + | 'flex_2' + | 'flex_3' + | 'reimagine_1' + | 'reimagine_2' + | 'reimagine_3' + /** + * Image Url + * + * Optional URL of the first frame image for modification + */ + image_url?: string +} + +/** + * LipsyncOutput + */ +export type SchemaPixverseLipsyncOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * LipsyncRequest + */ +export type SchemaPixverseLipsyncInput = { + /** + * Text + * + * Text content for TTS when audio_url is not provided + */ + text?: string + /** + * Video Url + * + * URL of the input video + */ + video_url: string + /** + * Audio Url + * + * URL of the input audio. If not provided, TTS will be used. + */ + audio_url?: string + /** + * Voice Id + * + * Voice to use for TTS when audio_url is not provided + */ + voice_id?: + | 'Emily' + | 'James' + | 'Isabella' + | 'Liam' + | 'Chloe' + | 'Adrian' + | 'Harper' + | 'Ava' + | 'Sophia' + | 'Julia' + | 'Mason' + | 'Jack' + | 'Oliver' + | 'Ethan' + | 'Auto' +} + +/** + * ExtendOutput + */ +export type SchemaPixverseExtendOutput = { + /** + * Video + * + * The extended video + */ + video: SchemaFile +} + +/** + * ExtendRequest + */ +export type SchemaPixverseExtendInput = { + /** + * Prompt + * + * Prompt describing how to extend the video + */ + prompt: string + /** + * Resolution + * + * The resolution of the generated video + */ + resolution?: '360p' | '540p' | '720p' | '1080p' + /** + * Duration + * + * The duration of the generated video in seconds. 1080p videos are limited to 5 seconds + */ + duration?: '5' | '8' + /** + * Style + * + * The style of the extended video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Video Url + * + * URL of the input video to extend + */ + video_url: string + /** + * Model + * + * The model version to use for generation + */ + model?: 'v3.5' | 'v4' | 'v4.5' | 'v5' | 'v5.5' | 'v5.6' + /** + * Seed + * + * Random seed for generation + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * ExtendOutput + */ +export type SchemaPixverseExtendFastOutput = { + /** + * Video + * + * The extended video + */ + video: SchemaFile +} + +/** + * FastExtendRequest + */ +export type SchemaPixverseExtendFastInput = { + /** + * Prompt + * + * Prompt describing how to extend the video + */ + prompt: string + /** + * Resolution + * + * The resolution of the generated video. Fast mode doesn't support 1080p + */ + resolution?: '360p' | '540p' | '720p' + /** + * Video Url + * + * URL of the input video to extend + */ + video_url: string + /** + * Style + * + * The style of the extended video + */ + style?: 'anime' | '3d_animation' | 'clay' | 'comic' | 'cyberpunk' + /** + * Model + * + * The model version to use for generation + */ + model?: 'v3.5' | 'v4' | 'v4.5' | 'v5' | 'v5.5' | 'v5.6' + /** + * Seed + * + * Random seed for generation + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to be used for the generation + */ + negative_prompt?: string +} + +/** + * Output + */ +export type SchemaThinksoundOutput = { + /** + * Prompt + * + * The prompt used to generate the audio. + */ + prompt: string + /** + * Video + * + * The generated video with audio. + */ + video: SchemaFile +} + +/** + * Input + */ +export type SchemaThinksoundInput = { + /** + * Prompt + * + * A prompt to guide the audio generation. If not provided, it will be extracted from the video. + */ + prompt?: string + /** + * Video Url + * + * The URL of the video to generate the audio for. + */ + video_url: string + /** + * Seed + * + * The seed for the random number generator + */ + seed?: number + /** + * Number of Inference Steps + * + * The number of inference steps for audio generation. + */ + num_inference_steps?: number + /** + * CFG Scale + * + * The classifier-free guidance scale for audio generation. + */ + cfg_scale?: number +} + +/** + * AudioOutput + */ +export type SchemaThinksoundAudioOutput = { + /** + * Prompt + * + * The prompt used to generate the audio. + */ + prompt: string + /** + * Audio + * + * The generated audio file. + */ + audio: SchemaFile +} + +/** + * Input + */ +export type SchemaThinksoundAudioInput = { + /** + * Prompt + * + * A prompt to guide the audio generation. If not provided, it will be extracted from the video. + */ + prompt?: string + /** + * Video Url + * + * The URL of the video to generate the audio for. + */ + video_url: string + /** + * Seed + * + * The seed for the random number generator + */ + seed?: number + /** + * Number of Inference Steps + * + * The number of inference steps for audio generation. + */ + num_inference_steps?: number + /** + * CFG Scale + * + * The classifier-free guidance scale for audio generation. + */ + cfg_scale?: number +} + +/** + * SoundEffectOutput + */ +export type SchemaPixverseSoundEffectsOutput = { + /** + * Video + * + * The video with added sound effects + */ + video: SchemaFile +} + +/** + * SoundEffectRequest + */ +export type SchemaPixverseSoundEffectsInput = { + /** + * Prompt + * + * Description of the sound effect to generate. If empty, a random sound effect will be generated + */ + prompt?: string + /** + * Video Url + * + * URL of the input video to add sound effects to + */ + video_url: string + /** + * Original Sound Switch + * + * Whether to keep the original audio from the video + */ + original_sound_switch?: boolean +} + +/** + * MultiConditioningVideoOutput + */ +export type SchemaLtxv13B098DistilledMulticonditioningOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * DistilledMultiConditioningVideoInput + * + * Distilled model input + */ +export type SchemaLtxv13B098DistilledMulticonditioningInput = { + /** + * Second Pass Skip Initial Steps + * + * The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes. + */ + second_pass_skip_initial_steps?: number + /** + * Number of Inference Steps + * + * Number of inference steps during the first pass. + */ + first_pass_num_inference_steps?: number + /** + * Frame Rate + * + * The frame rate of the video. + */ + frame_rate?: number + /** + * Reverse Video + * + * Whether to reverse the video. + */ + reverse_video?: boolean + /** + * Prompt + * + * Text prompt to guide generation + */ + prompt: string + /** + * Expand Prompt + * + * Whether to expand the prompt using a language model. + */ + expand_prompt?: boolean + /** + * Temporal AdaIN Factor + * + * The factor for adaptive instance normalization (AdaIN) applied to generated video chunks after the first. This can help deal with a gradual increase in saturation/contrast in the generated video by normalizing the color distribution across the video. A high value will ensure the color distribution is more consistent across the video, while a low value will allow for more variation in color distribution. + */ + temporal_adain_factor?: number + /** + * Loras + * + * LoRA weights to use for generation + */ + loras?: Array + /** + * Images + * + * URL of images to use as conditioning + */ + images?: Array + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Number of Frames + * + * The number of frames in the video. + */ + num_frames?: number + /** + * Second Pass Number of Inference Steps + * + * Number of inference steps during the second pass. + */ + second_pass_num_inference_steps?: number + /** + * Negative Prompt + * + * Negative prompt for generation + */ + negative_prompt?: string + /** + * Enable Detail Pass + * + * Whether to use a detail pass. If True, the model will perform a second pass to refine the video and enhance details. This incurs a 2.0x cost multiplier on the base price. + */ + enable_detail_pass?: boolean + /** + * Resolution + * + * Resolution of the generated video. + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * The aspect ratio of the video. + */ + aspect_ratio?: '9:16' | '1:1' | '16:9' | 'auto' + /** + * Tone Map Compression Ratio + * + * The compression ratio for tone mapping. This is used to compress the dynamic range of the video to improve visual quality. A value of 0.0 means no compression, while a value of 1.0 means maximum compression. + */ + tone_map_compression_ratio?: number + /** + * Videos + * + * Videos to use as conditioning + */ + videos?: Array + /** + * Constant Rate Factor + * + * The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality. + */ + constant_rate_factor?: number + /** + * Seed + * + * Random seed for generation + */ + seed?: number +} + +/** + * ModifyOutput + */ +export type SchemaLumaDreamMachineRay2FlashModifyOutput = { + /** + * Video + * + * URL of the modified video + */ + video: SchemaFile +} + +/** + * ModifyVideoRequest + */ +export type SchemaLumaDreamMachineRay2FlashModifyInput = { + /** + * Prompt + * + * Instruction for modifying the video + */ + prompt?: string + /** + * Video Url + * + * URL of the input video to modify + */ + video_url: string + /** + * Mode + * + * Amount of modification to apply to the video, adhere_1 is the least amount of modification, reimagine_3 is the most + */ + mode?: + | 'adhere_1' + | 'adhere_2' + | 'adhere_3' + | 'flex_1' + | 'flex_2' + | 'flex_3' + | 'reimagine_1' + | 'reimagine_2' + | 'reimagine_3' + /** + * Image Url + * + * Optional URL of the first frame image for modification + */ + image_url?: string +} + +/** + * FILMVideoOutput + */ +export type SchemaFilmVideoOutput = { + /** + * Video + * + * The generated video file with interpolated frames. + */ + video: SchemaVideoFile +} + +/** + * FILMVideoInput + */ +export type SchemaFilmVideoInput = { + /** + * Video Write Mode + * + * The write mode of the output video. Only applicable if output_type is 'video'. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Video URL + * + * The URL of the video to use for interpolation. + */ + video_url: string + /** + * Use Calculated FPS + * + * If True, the function will use the calculated FPS of the input video multiplied by the number of frames to determine the output FPS. If False, the passed FPS will be used. + */ + use_calculated_fps?: boolean + /** + * Loop + * + * If True, the final frame will be looped back to the first frame to create a seamless loop. If False, the final frame will not loop back. + */ + loop?: boolean + /** + * Frames Per Second + * + * Frames per second for the output video. Only applicable if use_calculated_fps is False. + */ + fps?: number + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Video Quality + * + * The quality of the output video. Only applicable if output_type is 'video'. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Use Scene Detection + * + * If True, the input video will be split into scenes before interpolation. This removes smear frames between scenes, but can result in false positives if the scene detection is not accurate. If False, the entire video will be treated as a single scene. + */ + use_scene_detection?: boolean + /** + * Number of Frames + * + * The number of frames to generate between the input video frames. + */ + num_frames?: number +} + +/** + * RIFEVideoOutput + */ +export type SchemaRifeVideoOutput = { + /** + * Video + * + * The generated video file with interpolated frames. + */ + video: SchemaFile +} + +/** + * RIFEVideoInput + */ +export type SchemaRifeVideoInput = { + /** + * Video URL + * + * The URL of the video to use for interpolation. + */ + video_url: string + /** + * Use Scene Detection + * + * If True, the input video will be split into scenes before interpolation. This removes smear frames between scenes, but can result in false positives if the scene detection is not accurate. If False, the entire video will be treated as a single scene. + */ + use_scene_detection?: boolean + /** + * Loop + * + * If True, the final frame will be looped back to the first frame to create a seamless loop. If False, the final frame will not loop back. + */ + loop?: boolean + /** + * Number of Frames + * + * The number of frames to generate between the input video frames. + */ + num_frames?: number + /** + * Use Calculated FPS + * + * If True, the function will use the calculated FPS of the input video multiplied by the number of frames to determine the output FPS. If False, the passed FPS will be used. + */ + use_calculated_fps?: boolean + /** + * Frames Per Second + * + * Frames per second for the output video. Only applicable if use_calculated_fps is False. + */ + fps?: number +} + +/** + * ExtendVideoConditioningInput + */ +export type SchemaExtendVideoConditioningInput = { + /** + * Video URL + * + * URL of video to use as conditioning + */ + video_url: string + /** + * Start Frame Number + * + * Frame number of the video from which the conditioning starts. Must be a multiple of 8. + */ + start_frame_num?: number + /** + * Reverse Video + * + * Whether to reverse the video. This is useful for tasks where the video conditioning should be applied in reverse order. + */ + reverse_video?: boolean + /** + * Limit Number of Frames + * + * Whether to limit the number of frames used from the video. If True, the `max_num_frames` parameter will be used to limit the number of frames. + */ + limit_num_frames?: boolean + /** + * Resample FPS + * + * Whether to resample the video to a specific FPS. If True, the `target_fps` parameter will be used to resample the video. + */ + resample_fps?: boolean + /** + * Strength + * + * Strength of the conditioning. 0.0 means no conditioning, 1.0 means full conditioning. + */ + strength?: number + /** + * Target FPS + * + * Target FPS to resample the video to. Only relevant if `resample_fps` is True. + */ + target_fps?: number + /** + * Maximum Number of Frames + * + * Maximum number of frames to use from the video. If None, all frames will be used. + */ + max_num_frames?: number +} + +/** + * ExtendVideoOutput + */ +export type SchemaLtxv13B098DistilledExtendOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * DistilledExtendVideoInput + * + * Distilled model input + */ +export type SchemaLtxv13B098DistilledExtendInput = { + /** + * Second Pass Skip Initial Steps + * + * The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes. + */ + second_pass_skip_initial_steps?: number + /** + * Number of Inference Steps + * + * Number of inference steps during the first pass. + */ + first_pass_num_inference_steps?: number + /** + * Frame Rate + * + * The frame rate of the video. + */ + frame_rate?: number + /** + * Reverse Video + * + * Whether to reverse the video. + */ + reverse_video?: boolean + /** + * Prompt + * + * Text prompt to guide generation + */ + prompt: string + /** + * Expand Prompt + * + * Whether to expand the prompt using a language model. + */ + expand_prompt?: boolean + /** + * Temporal AdaIN Factor + * + * The factor for adaptive instance normalization (AdaIN) applied to generated video chunks after the first. This can help deal with a gradual increase in saturation/contrast in the generated video by normalizing the color distribution across the video. A high value will ensure the color distribution is more consistent across the video, while a low value will allow for more variation in color distribution. + */ + temporal_adain_factor?: number + /** + * Loras + * + * LoRA weights to use for generation + */ + loras?: Array + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Number of Frames + * + * The number of frames in the video. + */ + num_frames?: number + /** + * Second Pass Number of Inference Steps + * + * Number of inference steps during the second pass. + */ + second_pass_num_inference_steps?: number + /** + * Negative Prompt + * + * Negative prompt for generation + */ + negative_prompt?: string + /** + * Video + * + * Video to be extended. + */ + video: SchemaExtendVideoConditioningInput + /** + * Enable Detail Pass + * + * Whether to use a detail pass. If True, the model will perform a second pass to refine the video and enhance details. This incurs a 2.0x cost multiplier on the base price. + */ + enable_detail_pass?: boolean + /** + * Resolution + * + * Resolution of the generated video. + */ + resolution?: '480p' | '720p' + /** + * Aspect Ratio + * + * The aspect ratio of the video. + */ + aspect_ratio?: '9:16' | '1:1' | '16:9' | 'auto' + /** + * Tone Map Compression Ratio + * + * The compression ratio for tone mapping. This is used to compress the dynamic range of the video to improve visual quality. A value of 0.0 means no compression, while a value of 1.0 means maximum compression. + */ + tone_map_compression_ratio?: number + /** + * Constant Rate Factor + * + * The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality. + */ + constant_rate_factor?: number + /** + * Seed + * + * Random seed for generation + */ + seed?: number +} + +/** + * WanV2VResponse + */ +export type SchemaWanV22A14bVideoToVideoOutput = { + /** + * Prompt + * + * The text prompt used for video generation. + */ + prompt?: string + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * WanV2VRequest + */ +export type SchemaWanV22A14bVideoToVideoInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Video URL + * + * URL of the input video. + */ + video_url: string + /** + * Acceleration + * + * Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'. + */ + acceleration?: 'none' | 'regular' + /** + * Number of Interpolated Frames + * + * Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4. + */ + num_interpolated_frames?: number + /** + * Shift + * + * Shift value for the video. Must be between 1.0 and 10.0. + */ + shift?: number + /** + * Resample Video Frame Rate + * + * If true, the video will be resampled to the passed frames per second. If false, the video will not be resampled. + */ + resample_fps?: boolean + /** + * Frames per Second + * + * Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is. + */ + frames_per_second?: number + /** + * Enable Safety Checker + * + * If set to true, input data will be checked for safety before processing. + */ + enable_safety_checker?: boolean + /** + * Number of Frames + * + * Number of frames to generate. Must be between 17 to 161 (inclusive). + */ + num_frames?: number + /** + * Guidance Scale (1st Stage) + * + * Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality. + */ + guidance_scale?: number + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Resolution + * + * Resolution of the generated video (480p, 580p, or 720p). + */ + resolution?: '480p' | '580p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input video. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' | '1:1' + /** + * Enable Output Safety Checker + * + * If set to true, output video will be checked for safety after generation. + */ + enable_output_safety_checker?: boolean + /** + * Guidance Scale (2nd Stage) + * + * Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model. + */ + guidance_scale_2?: number + /** + * Video Quality + * + * The quality of the output video. Higher quality means better visual quality but larger file size. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Strength + * + * Strength of the video transformation. A value of 1.0 means the output will be completely based on the prompt, while a value of 0.0 means the output will be identical to the input video. + */ + strength?: number + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Interpolator Model + * + * The model to use for frame interpolation. If None, no interpolation is applied. + */ + interpolator_model?: 'none' | 'film' | 'rife' + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Adjust FPS for Interpolation + * + * If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is. + */ + adjust_fps_for_interpolation?: boolean +} + +/** + * MergeVideosOutput + */ +export type SchemaFfmpegApiMergeVideosOutput = { + /** + * Metadata + * + * Metadata about the merged video including original video info + */ + metadata: { + [key: string]: unknown + } + video: SchemaFile +} + +/** + * MergeVideosInput + */ +export type SchemaFfmpegApiMergeVideosInput = { + /** + * Target Fps + * + * Target FPS for the output video. If not provided, uses the lowest FPS from input videos. + */ + target_fps?: number | unknown + /** + * Video Urls + * + * List of video URLs to merge in order + */ + video_urls: Array + /** + * Resolution + * + * Resolution of the final video. Width and height must be between 512 and 2048. + */ + resolution?: + | SchemaImageSize + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + | unknown +} + +/** + * MareyOutput + */ +export type SchemaMareyMotionTransferOutput = { + video: SchemaFile +} + +/** + * MareyInputMotionTransfer + */ +export type SchemaMareyMotionTransferInput = { + /** + * Prompt + * + * The prompt to generate a video from + */ + prompt: string + /** + * Video Url + * + * The URL of the video to use as the control video. + */ + video_url: string + /** + * Seed + * + * Seed for random number generation. Use -1 for random seed each run. + */ + seed?: number | unknown + /** + * Reference Image Url + * + * Optional reference image URL to use for pose control or as a starting frame + */ + reference_image_url?: string | unknown + /** + * Negative Prompt + * + * Negative prompt used to guide the model away from undesirable features. + */ + negative_prompt?: string | unknown + /** + * First Frame Image Url + * + * Optional first frame image URL to use as the first frame of the generated video + */ + first_frame_image_url?: string | unknown +} + +/** + * MareyOutput + */ +export type SchemaMareyPoseTransferOutput = { + video: SchemaFile +} + +/** + * MareyInputPoseTransfer + */ +export type SchemaMareyPoseTransferInput = { + /** + * Prompt + * + * The prompt to generate a video from + */ + prompt: string + /** + * Video Url + * + * The URL of the video to use as the control video. + */ + video_url: string + /** + * Seed + * + * Seed for random number generation. Use -1 for random seed each run. + */ + seed?: number | unknown + /** + * Reference Image Url + * + * Optional reference image URL to use for pose control or as a starting frame + */ + reference_image_url?: string | unknown + /** + * Negative Prompt + * + * Negative prompt used to guide the model away from undesirable features. + */ + negative_prompt?: string | unknown + /** + * First Frame Image Url + * + * Optional first frame image URL to use as the first frame of the generated video + */ + first_frame_image_url?: string | unknown +} + +/** + * VideoOutput + */ +export type SchemaSfxV1VideoToVideoOutput = { + /** + * Video + * + * The processed video with sound effects + */ + video: Array +} + +/** + * Input + */ +export type SchemaSfxV1VideoToVideoInput = { + /** + * Num Samples + * + * The number of samples to generate from the model + */ + num_samples?: number | unknown + /** + * Video Url + * + * A video url that can accessed from the API to process and add sound effects + */ + video_url: string + /** + * Duration + * + * The duration of the generated audio in seconds + */ + duration?: number | unknown + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used + */ + seed?: number | unknown + /** + * Text Prompt + * + * Additional description to guide the model + */ + text_prompt?: string | unknown +} + +/** + * AvatarSingleAudioResponse + */ +export type SchemaInfinitalkOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * InfiniTalkSingleAudioRequest + */ +export type SchemaInfinitalkInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Resolution + * + * Resolution of the video to generate. Must be either 480p or 720p. + */ + resolution?: '480p' | '720p' + /** + * Acceleration + * + * The acceleration level to use for generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Image URL + * + * URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped. + */ + image_url: string + /** + * Audio URL + * + * The URL of the audio file. + */ + audio_url: string + /** + * Number of Frames + * + * Number of frames to generate. Must be between 41 to 721. + */ + num_frames?: number + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number +} + +/** + * OutputIncreaseResolutionModel + */ +export type SchemaVideoIncreaseResolutionOutput = { + /** + * Video + * + * Video with removed background and audio. + */ + video: SchemaVideo | SchemaFile +} + +/** + * InputIncreaseResolutionModel + */ +export type SchemaVideoIncreaseResolutionInput = { + /** + * Video Url + * + * Input video to increase resolution. Size should be less than 14142x14142 and duration less than 30s. + */ + video_url: string + /** + * Output Container And Codec + * + * Output container and codec. Options: mp4_h265, mp4_h264, webm_vp9, mov_h265, mov_proresks, mkv_h265, mkv_h264, mkv_vp9, gif. + */ + output_container_and_codec?: + | 'mp4_h265' + | 'mp4_h264' + | 'webm_vp9' + | 'mov_h265' + | 'mov_proresks' + | 'mkv_h265' + | 'mkv_h264' + | 'mkv_vp9' + | 'gif' + /** + * Desired Increase + * + * desired_increase factor. Options: 2x, 4x. + */ + desired_increase?: '2' | '4' +} + +/** + * WanFunControlResponse + */ +export type SchemaWanFunControlOutput = { + /** + * Video + * + * The video generated by the model. + */ + video: SchemaFile +} + +/** + * WanFunControlRequest + */ +export type SchemaWanFunControlInput = { + /** + * Prompt + * + * The prompt to generate the video. + */ + prompt: string + /** + * Shift + * + * The shift for the scheduler. + */ + shift?: number + /** + * Preprocess Video + * + * Whether to preprocess the video. If True, the video will be preprocessed to depth or pose. + */ + preprocess_video?: boolean + /** + * Reference Image URL + * + * The URL of the reference image to use as a reference for the video generation. + */ + reference_image_url?: string + /** + * FPS + * + * The fps to generate. Only used when match_input_fps is False. + */ + fps?: number + /** + * Match Input Number of Frames + * + * Whether to match the number of frames in the input video. + */ + match_input_num_frames?: boolean + /** + * Guidance Scale + * + * The guidance scale. + */ + guidance_scale?: number + /** + * Preprocess Type + * + * The type of preprocess to apply to the video. Only used when preprocess_video is True. + */ + preprocess_type?: 'depth' | 'pose' + /** + * Control Video URL + * + * The URL of the control video to use as a reference for the video generation. + */ + control_video_url: string + /** + * Negative Prompt + * + * The negative prompt to generate the video. + */ + negative_prompt?: string + /** + * Number of Frames + * + * The number of frames to generate. Only used when match_input_num_frames is False. + */ + num_frames?: number + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number + /** + * Number of Inference Steps + * + * The number of inference steps. + */ + num_inference_steps?: number + /** + * Match Input FPS + * + * Whether to match the fps in the input video. + */ + match_input_fps?: boolean +} + +/** + * LipSyncV2ProOutput + */ +export type SchemaSyncLipsyncV2ProOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * LipSyncV2ProInput + */ +export type SchemaSyncLipsyncV2ProInput = { + /** + * Sync Mode + * + * Lipsync mode when audio and video durations are out of sync. + */ + sync_mode?: 'cut_off' | 'loop' | 'bounce' | 'silence' | 'remap' + /** + * Video Url + * + * URL of the input video + */ + video_url: string + /** + * Audio Url + * + * URL of the input audio + */ + audio_url: string +} + +/** + * HunyuanFoleyResponse + */ +export type SchemaHunyuanVideoFoleyOutput = { + /** + * Video + * + * List of generated video files with audio. + */ + video: SchemaFile +} + +/** + * HunyuanFoleyRequest + */ +export type SchemaHunyuanVideoFoleyInput = { + /** + * Video Url + * + * The URL of the video to generate audio for. + */ + video_url: string + /** + * Guidance Scale + * + * Guidance scale for audio generation. + */ + guidance_scale?: number + /** + * Num Inference Steps + * + * Number of inference steps for generation. + */ + num_inference_steps?: number + /** + * Seed + * + * Random seed for reproducible generation. + */ + seed?: number + /** + * Negative Prompt + * + * Negative prompt to avoid certain audio characteristics. + */ + negative_prompt?: string + /** + * Text Prompt + * + * Text description of the desired audio (optional). + */ + text_prompt: string +} + +/** + * WanVACEPoseResponse + */ +export type SchemaWan22VaceFunA14bPoseOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * ZIP archive of all video frames if requested. + */ + frames_zip?: SchemaFile | unknown + /** + * Seed + * + * The seed used for generation. + */ + seed: number + video: SchemaVideoFile +} + +/** + * WanVACEPoseRequest + */ +export type SchemaWan22VaceFunA14bPoseInput = { + /** + * Prompt + * + * The text prompt to guide video generation. For pose task, the prompt should describe the desired pose and action of the subject in the video. + */ + prompt: string + /** + * Video URL + * + * URL to the source video file. Required for pose task. + */ + video_url: string + /** + * Number of Interpolated Frames + * + * Number of frames to interpolate between the original frames. A value of 0 means no interpolation. + */ + num_interpolated_frames?: number + /** + * Temporal Downsample Factor + * + * Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied. + */ + temporal_downsample_factor?: number + /** + * First Frame URL + * + * URL to the first frame of the video. If provided, the model will use this frame as a reference. + */ + first_frame_url?: string | unknown + /** + * Reference Image URLs + * + * URLs to source reference image. If provided, the model will use this image as reference. + */ + ref_image_urls?: Array + /** + * Guidance Scale + * + * Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt. + */ + guidance_scale?: number + /** + * Number of Frames + * + * Number of frames to generate. Must be between 81 to 241 (inclusive). + */ + num_frames?: number + /** + * Auto Downsample Min FPS + * + * The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences. + */ + auto_downsample_min_fps?: number + /** + * Transparency Mode + * + * The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled. + */ + transparency_mode?: 'content_aware' | 'white' | 'black' + /** + * Sampler + * + * Sampler to use for video generation. + */ + sampler?: 'unipc' | 'dpm++' | 'euler' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number | unknown + /** + * Interpolator Model + * + * The model to use for frame interpolation. Options are 'rife' or 'film'. + */ + interpolator_model?: 'rife' | 'film' + /** + * Enable Auto Downsample + * + * If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length. + */ + enable_auto_downsample?: boolean + /** + * Preprocess + * + * Whether to preprocess the input video. + */ + preprocess?: boolean + /** + * Shift + * + * Shift parameter for video generation. + */ + shift?: number + /** + * Acceleration + * + * Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster. + */ + acceleration?: 'none' | 'low' | 'regular' | unknown + /** + * Frames per Second + * + * Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true. + */ + frames_per_second?: number | unknown + /** + * Match Input Number of Frames + * + * If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter. + */ + match_input_num_frames?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Resolution + * + * Resolution of the generated video. + */ + resolution?: 'auto' | '240p' | '360p' | '480p' | '580p' | '720p' + /** + * Return Frames Zip + * + * If true, also return a ZIP file containing all generated frames. + */ + return_frames_zip?: boolean + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. + */ + aspect_ratio?: 'auto' | '16:9' | '1:1' | '9:16' + /** + * Match Input Frames Per Second + * + * If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter. + */ + match_input_frames_per_second?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Last Frame URL + * + * URL to the last frame of the video. If provided, the model will use this frame as a reference. + */ + last_frame_url?: string | unknown +} + +/** + * WanVACEDepthResponse + */ +export type SchemaWan22VaceFunA14bDepthOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * ZIP archive of all video frames if requested. + */ + frames_zip?: SchemaFile | unknown + /** + * Seed + * + * The seed used for generation. + */ + seed: number + video: SchemaVideoFile +} + +/** + * WanVACEDepthRequest + */ +export type SchemaWan22VaceFunA14bDepthInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Video URL + * + * URL to the source video file. Required for depth task. + */ + video_url: string + /** + * Number of Interpolated Frames + * + * Number of frames to interpolate between the original frames. A value of 0 means no interpolation. + */ + num_interpolated_frames?: number + /** + * Temporal Downsample Factor + * + * Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied. + */ + temporal_downsample_factor?: number + /** + * First Frame URL + * + * URL to the first frame of the video. If provided, the model will use this frame as a reference. + */ + first_frame_url?: string | unknown + /** + * Reference Image URLs + * + * URLs to source reference image. If provided, the model will use this image as reference. + */ + ref_image_urls?: Array + /** + * Guidance Scale + * + * Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt. + */ + guidance_scale?: number + /** + * Number of Frames + * + * Number of frames to generate. Must be between 81 to 241 (inclusive). + */ + num_frames?: number + /** + * Auto Downsample Min FPS + * + * The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences. + */ + auto_downsample_min_fps?: number + /** + * Transparency Mode + * + * The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled. + */ + transparency_mode?: 'content_aware' | 'white' | 'black' + /** + * Sampler + * + * Sampler to use for video generation. + */ + sampler?: 'unipc' | 'dpm++' | 'euler' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number | unknown + /** + * Interpolator Model + * + * The model to use for frame interpolation. Options are 'rife' or 'film'. + */ + interpolator_model?: 'rife' | 'film' + /** + * Enable Auto Downsample + * + * If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length. + */ + enable_auto_downsample?: boolean + /** + * Preprocess + * + * Whether to preprocess the input video. + */ + preprocess?: boolean + /** + * Shift + * + * Shift parameter for video generation. + */ + shift?: number + /** + * Acceleration + * + * Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster. + */ + acceleration?: 'none' | 'low' | 'regular' | unknown + /** + * Frames per Second + * + * Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true. + */ + frames_per_second?: number | unknown + /** + * Match Input Number of Frames + * + * If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter. + */ + match_input_num_frames?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Resolution + * + * Resolution of the generated video. + */ + resolution?: 'auto' | '240p' | '360p' | '480p' | '580p' | '720p' + /** + * Return Frames Zip + * + * If true, also return a ZIP file containing all generated frames. + */ + return_frames_zip?: boolean + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. + */ + aspect_ratio?: 'auto' | '16:9' | '1:1' | '9:16' + /** + * Match Input Frames Per Second + * + * If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter. + */ + match_input_frames_per_second?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Last Frame URL + * + * URL to the last frame of the video. If provided, the model will use this frame as a reference. + */ + last_frame_url?: string | unknown +} + +/** + * WanVACEInpaintingResponse + */ +export type SchemaWan22VaceFunA14bInpaintingOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * ZIP archive of all video frames if requested. + */ + frames_zip?: SchemaFile | unknown + /** + * Seed + * + * The seed used for generation. + */ + seed: number + video: SchemaVideoFile +} + +/** + * WanVACEInpaintingRequest + */ +export type SchemaWan22VaceFunA14bInpaintingInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Video URL + * + * URL to the source video file. Required for inpainting. + */ + video_url: string + /** + * Number of Interpolated Frames + * + * Number of frames to interpolate between the original frames. A value of 0 means no interpolation. + */ + num_interpolated_frames?: number + /** + * Temporal Downsample Factor + * + * Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied. + */ + temporal_downsample_factor?: number + /** + * First Frame URL + * + * URL to the first frame of the video. If provided, the model will use this frame as a reference. + */ + first_frame_url?: string | unknown + /** + * Reference Image URLs + * + * Urls to source reference image. If provided, the model will use this image as reference. + */ + ref_image_urls?: Array + /** + * Guidance Scale + * + * Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt. + */ + guidance_scale?: number + /** + * Number of Frames + * + * Number of frames to generate. Must be between 81 to 241 (inclusive). + */ + num_frames?: number + /** + * Auto Downsample Min FPS + * + * The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences. + */ + auto_downsample_min_fps?: number + /** + * Transparency Mode + * + * The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled. + */ + transparency_mode?: 'content_aware' | 'white' | 'black' + /** + * Sampler + * + * Sampler to use for video generation. + */ + sampler?: 'unipc' | 'dpm++' | 'euler' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Mask Video URL + * + * URL to the source mask file. Required for inpainting. + */ + mask_video_url: string | unknown + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number | unknown + /** + * Interpolator Model + * + * The model to use for frame interpolation. Options are 'rife' or 'film'. + */ + interpolator_model?: 'rife' | 'film' + /** + * Enable Auto Downsample + * + * If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length. + */ + enable_auto_downsample?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Shift + * + * Shift parameter for video generation. + */ + shift?: number + /** + * Preprocess + * + * Whether to preprocess the input video. + */ + preprocess?: boolean + /** + * Acceleration + * + * Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster. + */ + acceleration?: 'none' | 'low' | 'regular' | unknown + /** + * Mask Image URL + * + * URL to the guiding mask file. If provided, the model will use this mask as a reference to create masked video using salient mask tracking. Will be ignored if mask_video_url is provided. + */ + mask_image_url?: string | unknown + /** + * Frames per Second + * + * Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true. + */ + frames_per_second?: number | unknown + /** + * Match Input Number of Frames + * + * If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter. + */ + match_input_num_frames?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Resolution + * + * Resolution of the generated video. + */ + resolution?: 'auto' | '240p' | '360p' | '480p' | '580p' | '720p' + /** + * Return Frames Zip + * + * If true, also return a ZIP file containing all generated frames. + */ + return_frames_zip?: boolean + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. + */ + aspect_ratio?: 'auto' | '16:9' | '1:1' | '9:16' + /** + * Match Input Frames Per Second + * + * If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter. + */ + match_input_frames_per_second?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Last Frame URL + * + * URL to the last frame of the video. If provided, the model will use this frame as a reference. + */ + last_frame_url?: string | unknown +} + +/** + * WanVACEOutpaintingResponse + */ +export type SchemaWan22VaceFunA14bOutpaintingOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * ZIP archive of all video frames if requested. + */ + frames_zip?: SchemaFile | unknown + /** + * Seed + * + * The seed used for generation. + */ + seed: number + video: SchemaVideoFile +} + +/** + * WanVACEOutpaintingRequest + */ +export type SchemaWan22VaceFunA14bOutpaintingInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Video URL + * + * URL to the source video file. Required for outpainting. + */ + video_url: string + /** + * Number of Interpolated Frames + * + * Number of frames to interpolate between the original frames. A value of 0 means no interpolation. + */ + num_interpolated_frames?: number + /** + * Temporal Downsample Factor + * + * Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied. + */ + temporal_downsample_factor?: number + /** + * First Frame URL + * + * URL to the first frame of the video. If provided, the model will use this frame as a reference. + */ + first_frame_url?: string | unknown + /** + * Reference Image URLs + * + * URLs to source reference image. If provided, the model will use this image as reference. + */ + ref_image_urls?: Array + /** + * Expand Ratio + * + * Amount of expansion. This is a float value between 0 and 1, where 0.25 adds 25% to the original video size on the specified sides. + */ + expand_ratio?: number + /** + * Transparency Mode + * + * The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled. + */ + transparency_mode?: 'content_aware' | 'white' | 'black' + /** + * Number of Frames + * + * Number of frames to generate. Must be between 81 to 241 (inclusive). + */ + num_frames?: number + /** + * Auto Downsample Min FPS + * + * The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences. + */ + auto_downsample_min_fps?: number + /** + * Guidance Scale + * + * Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt. + */ + guidance_scale?: number + /** + * Sampler + * + * Sampler to use for video generation. + */ + sampler?: 'unipc' | 'dpm++' | 'euler' + /** + * Expand Bottom + * + * Whether to expand the video to the bottom. + */ + expand_bottom?: boolean + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Expand Left + * + * Whether to expand the video to the left. + */ + expand_left?: boolean + /** + * Interpolator Model + * + * The model to use for frame interpolation. Options are 'rife' or 'film'. + */ + interpolator_model?: 'rife' | 'film' + /** + * Enable Auto Downsample + * + * If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length. + */ + enable_auto_downsample?: boolean + /** + * Expand Top + * + * Whether to expand the video to the top. + */ + expand_top?: boolean + /** + * Shift + * + * Shift parameter for video generation. + */ + shift?: number + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number | unknown + /** + * Acceleration + * + * Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster. + */ + acceleration?: 'none' | 'low' | 'regular' | unknown + /** + * Frames per Second + * + * Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true. + */ + frames_per_second?: number | unknown + /** + * Match Input Number of Frames + * + * If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter. + */ + match_input_num_frames?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Expand Right + * + * Whether to expand the video to the right. + */ + expand_right?: boolean + /** + * Resolution + * + * Resolution of the generated video. + */ + resolution?: 'auto' | '240p' | '360p' | '480p' | '580p' | '720p' + /** + * Return Frames Zip + * + * If true, also return a ZIP file containing all generated frames. + */ + return_frames_zip?: boolean + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. + */ + aspect_ratio?: 'auto' | '16:9' | '1:1' | '9:16' + /** + * Match Input Frames Per Second + * + * If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter. + */ + match_input_frames_per_second?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Last Frame URL + * + * URL to the last frame of the video. If provided, the model will use this frame as a reference. + */ + last_frame_url?: string | unknown +} + +/** + * WanVACEReframeResponse + */ +export type SchemaWan22VaceFunA14bReframeOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * ZIP archive of all video frames if requested. + */ + frames_zip?: SchemaFile | unknown + /** + * Seed + * + * The seed used for generation. + */ + seed: number + video: SchemaVideoFile +} + +/** + * WanVACEReframeRequest + */ +export type SchemaWan22VaceFunA14bReframeInput = { + /** + * Prompt + * + * The text prompt to guide video generation. Optional for reframing. + */ + prompt?: string + /** + * Video URL + * + * URL to the source video file. This video will be used as a reference for the reframe task. + */ + video_url: string + /** + * Number of Interpolated Frames + * + * Number of frames to interpolate between the original frames. A value of 0 means no interpolation. + */ + num_interpolated_frames?: number + /** + * Temporal Downsample Factor + * + * Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied. + */ + temporal_downsample_factor?: number + /** + * First Frame URL + * + * URL to the first frame of the video. If provided, the model will use this frame as a reference. + */ + first_frame_url?: string | unknown + /** + * Guidance Scale + * + * Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt. + */ + guidance_scale?: number + /** + * Number of Frames + * + * Number of frames to generate. Must be between 81 to 241 (inclusive). + */ + num_frames?: number + /** + * Auto Downsample Min FPS + * + * The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences. + */ + auto_downsample_min_fps?: number + /** + * Trim Borders + * + * Whether to trim borders from the video. + */ + trim_borders?: boolean + /** + * Sampler + * + * Sampler to use for video generation. + */ + sampler?: 'unipc' | 'dpm++' | 'euler' + /** + * Transparency Mode + * + * The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled. + */ + transparency_mode?: 'content_aware' | 'white' | 'black' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number | unknown + /** + * Interpolator Model + * + * The model to use for frame interpolation. Options are 'rife' or 'film'. + */ + interpolator_model?: 'rife' | 'film' + /** + * Enable Auto Downsample + * + * If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length. + */ + enable_auto_downsample?: boolean + /** + * Shift + * + * Shift parameter for video generation. + */ + shift?: number + /** + * Acceleration + * + * Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster. + */ + acceleration?: 'none' | 'low' | 'regular' | unknown + /** + * Zoom Factor + * + * Zoom factor for the video. When this value is greater than 0, the video will be zoomed in by this factor (in relation to the canvas size,) cutting off the edges of the video. A value of 0 means no zoom. + */ + zoom_factor?: number + /** + * Frames per Second + * + * Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true. + */ + frames_per_second?: number | unknown + /** + * Match Input Number of Frames + * + * If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter. + */ + match_input_num_frames?: boolean + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Resolution + * + * Resolution of the generated video. + */ + resolution?: 'auto' | '240p' | '360p' | '480p' | '580p' | '720p' + /** + * Return Frames Zip + * + * If true, also return a ZIP file containing all generated frames. + */ + return_frames_zip?: boolean + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. + */ + aspect_ratio?: 'auto' | '16:9' | '1:1' | '9:16' + /** + * Match Input Frames Per Second + * + * If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter. + */ + match_input_frames_per_second?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Last Frame URL + * + * URL to the last frame of the video. If provided, the model will use this frame as a reference. + */ + last_frame_url?: string | unknown +} + +/** + * LucyEditDevOutput + */ +export type SchemaLucyEditDevOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * LucyEditDevInput + */ +export type SchemaLucyEditDevInput = { + /** + * Sync Mode + * + * + * If set to true, the function will wait for the video to be generated + * and uploaded before returning the response. This will increase the + * latency of the function but it allows you to get the video directly + * in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Video Url + * + * URL of the video to edit + */ + video_url: string + /** + * Prompt + * + * Text description of the desired video content + */ + prompt: string + /** + * Enhance Prompt + * + * Whether to enhance the prompt for better results. + */ + enhance_prompt?: boolean +} + +/** + * LucyEditProOutput + */ +export type SchemaLucyEditProOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * LucyEditProInput + */ +export type SchemaLucyEditProInput = { + /** + * Sync Mode + * + * + * If set to true, the function will wait for the video to be generated + * and uploaded before returning the response. This will increase the + * latency of the function but it allows you to get the video directly + * in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Video Url + * + * URL of the video to edit + */ + video_url: string + /** + * Prompt + * + * Text description of the desired video content + */ + prompt: string + /** + * Resolution + * + * Resolution of the generated video + */ + resolution?: '720p' + /** + * Enhance Prompt + * + * Whether to enhance the prompt for better results. + */ + enhance_prompt?: boolean +} + +/** + * WanAnimateMoveResponse + */ +export type SchemaWanV2214bAnimateMoveOutput = { + /** + * Prompt + * + * The prompt used for generation (auto-generated by the model) + */ + prompt: string + /** + * Frames Zip + * + * ZIP archive of generated frames (if requested). + */ + frames_zip?: SchemaFile + /** + * Seed + * + * The seed used for generation + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * WanAnimateMoveRequest + */ +export type SchemaWanV2214bAnimateMoveInput = { + /** + * Video Write Mode + * + * The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Video URL + * + * URL of the input video. + */ + video_url: string + /** + * Resolution + * + * Resolution of the generated video (480p, 580p, or 720p). + */ + resolution?: '480p' | '580p' | '720p' + /** + * Return Frames ZIP + * + * If true, also return a ZIP archive containing per-frame images generated on GPU (lossless). + */ + return_frames_zip?: boolean + /** + * Shift + * + * Shift value for the video. Must be between 1.0 and 10.0. + */ + shift?: number + /** + * Enable Output Safety Checker + * + * If set to true, output video will be checked for safety after generation. + */ + enable_output_safety_checker?: boolean + /** + * Image URL + * + * URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped. + */ + image_url: string + /** + * Video Quality + * + * The quality of the output video. Higher quality means better visual quality but larger file size. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Enable Safety Checker + * + * If set to true, input data will be checked for safety before processing. + */ + enable_safety_checker?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Use Turbo + * + * If true, applies quality enhancement for faster generation with improved quality. When enabled, parameters are automatically optimized for best results. + */ + use_turbo?: boolean + /** + * Guidance Scale + * + * Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number +} + +/** + * WanAnimateReplaceResponse + */ +export type SchemaWanV2214bAnimateReplaceOutput = { + /** + * Prompt + * + * The prompt used for generation (auto-generated by the model) + */ + prompt: string + /** + * Frames Zip + * + * ZIP archive of generated frames (if requested). + */ + frames_zip?: SchemaFile + /** + * Seed + * + * The seed used for generation + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * WanAnimateMoveRequest + */ +export type SchemaWanV2214bAnimateReplaceInput = { + /** + * Video Write Mode + * + * The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Video URL + * + * URL of the input video. + */ + video_url: string + /** + * Resolution + * + * Resolution of the generated video (480p, 580p, or 720p). + */ + resolution?: '480p' | '580p' | '720p' + /** + * Return Frames ZIP + * + * If true, also return a ZIP archive containing per-frame images generated on GPU (lossless). + */ + return_frames_zip?: boolean + /** + * Shift + * + * Shift value for the video. Must be between 1.0 and 10.0. + */ + shift?: number + /** + * Enable Output Safety Checker + * + * If set to true, output video will be checked for safety after generation. + */ + enable_output_safety_checker?: boolean + /** + * Image URL + * + * URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped. + */ + image_url: string + /** + * Video Quality + * + * The quality of the output video. Higher quality means better visual quality but larger file size. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Enable Safety Checker + * + * If set to true, input data will be checked for safety before processing. + */ + enable_safety_checker?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Use Turbo + * + * If true, applies quality enhancement for faster generation with improved quality. When enabled, parameters are automatically optimized for best results. + */ + use_turbo?: boolean + /** + * Guidance Scale + * + * Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality. + */ + guidance_scale?: number + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number +} + +/** + * WanVACEVideoEditResponse + */ +export type SchemaWanVaceAppsVideoEditOutput = { + /** + * Frames Zip + * + * ZIP archive of generated frames if requested. + */ + frames_zip?: SchemaFile + /** + * Video + * + * The edited video. + */ + video: SchemaVideoFile +} + +/** + * WanVACEVideoEditRequest + */ +export type SchemaWanVaceAppsVideoEditInput = { + /** + * Prompt + * + * Prompt to edit the video. + */ + prompt: string + /** + * Video URL + * + * URL of the input video. + */ + video_url: string + /** + * Acceleration + * + * Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster. + */ + acceleration?: 'none' | 'low' | 'regular' + /** + * Resolution + * + * Resolution of the edited video. + */ + resolution?: 'auto' | '240p' | '360p' | '480p' | '580p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the edited video. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' | '1:1' + /** + * Return Frames ZIP + * + * Whether to include a ZIP archive containing all generated frames. + */ + return_frames_zip?: boolean + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Video Type + * + * The type of video you're editing. Use 'general' for most videos, and 'human' for videos emphasizing human subjects and motions. The default value 'auto' means the model will guess based on the first frame of the video. + */ + video_type?: 'auto' | 'general' | 'human' + /** + * Image URLs + * + * URLs of the input images to use as a reference for the generation. + */ + image_urls?: Array + /** + * Enable Auto Downsampling + * + * Whether to enable automatic downsampling. If your video has a high frame rate or is long, enabling longer sequences to be generated. The video will be interpolated back to the original frame rate after generation. + */ + enable_auto_downsample?: boolean + /** + * Auto Downsample Min FPS + * + * The minimum frames per second to downsample the video to. + */ + auto_downsample_min_fps?: number +} + +/** + * SeedVRVideoOutput + */ +export type SchemaSeedvrUpscaleVideoOutput = { + /** + * Seed + * + * The random seed used for the generation process. + */ + seed: number + video: SchemaFile +} + +/** + * SeedVRVideoInput + */ +export type SchemaSeedvrUpscaleVideoInput = { + /** + * Upscale Mode + * + * The mode to use for the upscale. If 'target', the upscale factor will be calculated based on the target resolution. If 'factor', the upscale factor will be used directly. + */ + upscale_mode?: 'target' | 'factor' + /** + * Video Url + * + * The input video to be processed + */ + video_url: string + /** + * Noise Scale + * + * The noise scale to use for the generation process. + */ + noise_scale?: number + /** + * Output Format + * + * The format of the output video. + */ + output_format?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Output Write Mode + * + * The write mode of the output video. + */ + output_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Target Resolution + * + * The target resolution to upscale to when `upscale_mode` is `target`. + */ + target_resolution?: '720p' | '1080p' | '1440p' | '2160p' + /** + * Output Quality + * + * The quality of the output video. + */ + output_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Upscale Factor + * + * Upscaling factor to be used. Will multiply the dimensions with this factor when `upscale_mode` is `factor`. + */ + upscale_factor?: number + /** + * Seed + * + * The random seed used for the generation process. + */ + seed?: number | unknown +} + +/** + * InfinitalkVid2VidResponse + */ +export type SchemaInfinitalkVideoToVideoOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * InfiniTalkVid2VidAudioRequest + */ +export type SchemaInfinitalkVideoToVideoInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Resolution + * + * Resolution of the video to generate. Must be either 480p or 720p. + */ + resolution?: '480p' | '720p' + /** + * Acceleration + * + * The acceleration level to use for generation. + */ + acceleration?: 'none' | 'regular' | 'high' + /** + * Video Url + * + * URL of the input video. + */ + video_url: string + /** + * Audio URL + * + * The URL of the audio file. + */ + audio_url: string + /** + * Number of Frames + * + * Number of frames to generate. Must be between 81 to 129 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units. + */ + num_frames?: number + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number +} + +/** + * LongWanVACEReframeResponse + */ +export type SchemaWanVaceAppsLongReframeOutput = { + /** + * Video + * + * The output video file. + */ + video: SchemaVideoFile +} + +/** + * LongWanVACEReframeRequest + */ +export type SchemaWanVaceAppsLongReframeInput = { + /** + * Prompt + * + * The text prompt to guide video generation. Optional for reframing. + */ + prompt?: string + /** + * Video URL + * + * URL to the source video file. This video will be used as a reference for the reframe task. + */ + video_url: string + /** + * Acceleration + * + * Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster. + */ + acceleration?: 'none' | 'low' | 'regular' + /** + * Paste Back + * + * Whether to paste back the reframed scene to the original video. + */ + paste_back?: boolean + /** + * Zoom Factor + * + * Zoom factor for the video. When this value is greater than 0, the video will be zoomed in by this factor (in relation to the canvas size,) cutting off the edges of the video. A value of 0 means no zoom. + */ + zoom_factor?: number + /** + * Shift + * + * Shift parameter for video generation. + */ + shift?: number + /** + * Scene Threshold + * + * Threshold for scene detection sensitivity (0-100). Lower values detect more scenes. + */ + scene_threshold?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Guidance Scale + * + * Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt. + */ + guidance_scale?: number + /** + * Auto Downsample Min Fps + * + * Minimum FPS for auto downsample. + */ + auto_downsample_min_fps?: number + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Sampler + * + * Sampler to use for video generation. + */ + sampler?: 'unipc' | 'dpm++' | 'euler' + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Resolution + * + * Resolution of the generated video. + */ + resolution?: 'auto' | '240p' | '360p' | '480p' | '580p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. + */ + aspect_ratio?: 'auto' | '16:9' | '1:1' | '9:16' + /** + * Return Frames Zip + * + * If true, also return a ZIP file containing all generated frames. + */ + return_frames_zip?: boolean + /** + * Trim Borders + * + * Whether to trim borders from the video. + */ + trim_borders?: boolean + /** + * Transparency Mode + * + * The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled. + */ + transparency_mode?: 'content_aware' | 'white' | 'black' + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Interpolator Model + * + * The model to use for frame interpolation. Options are 'rife' or 'film'. + */ + interpolator_model?: 'rife' | 'film' + /** + * Enable Auto Downsample + * + * Whether to enable auto downsample. + */ + enable_auto_downsample?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number +} + +/** + * ImageFile + */ +export type SchemaImageFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * Height + * + * The height of the image + */ + height?: number + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * Width + * + * The width of the image + */ + width?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * RemixOutput + */ +export type SchemaSora2VideoToVideoRemixOutput = { + /** + * Spritesheet + * + * Spritesheet image for the video + */ + spritesheet?: SchemaImageFile + /** + * Thumbnail + * + * Thumbnail image for the video + */ + thumbnail?: SchemaImageFile + /** + * Video ID + * + * The ID of the generated video + */ + video_id: string + /** + * Video + * + * The generated video + */ + video: SchemaVideoFile +} + +/** + * RemixInput + */ +export type SchemaSora2VideoToVideoRemixInput = { + /** + * Prompt + * + * Updated text prompt that directs the remix generation + */ + prompt: string + /** + * Video ID + * + * The video_id from a previous Sora 2 generation. Note: You can only remix videos that were generated by Sora (via text-to-video or image-to-video endpoints), not arbitrary uploaded videos. + */ + video_id: string + /** + * Delete Video + * + * Whether to delete the video after generation for privacy reasons. If True, the video cannot be used for remixing and will be permanently deleted. + */ + delete_video?: boolean +} + +/** + * VideoToVideoOutput + */ +export type SchemaKreaWan14bVideoToVideoOutput = { + video: SchemaFile +} + +/** + * VideoToVideoInput + */ +export type SchemaKreaWan14bVideoToVideoInput = { + /** + * Prompt + * + * Prompt for the video-to-video generation. + */ + prompt: string + /** + * Video Url + * + * URL of the input video. Currently, only outputs of 16:9 aspect ratio and 480p resolution are supported. Video duration should be less than 1000 frames at 16fps, and output frames will be 6 plus a multiple of 12, for example 18, 30, 42, etc. + */ + video_url: string + /** + * Strength + * + * Denoising strength for the video-to-video generation. 0.0 preserves the original, 1.0 completely remakes the video. + */ + strength?: number + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * Seed for the video-to-video generation. + */ + seed?: number | unknown +} + +/** + * Video + */ +export type SchemaVideoOutput = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number | unknown + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string | unknown + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string | unknown + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string +} + +/** + * VideoOutput + */ +export type SchemaSfxV15VideoToVideoOutput = { + /** + * Video + * + * The processed video with sound effects + */ + video: Array +} + +/** + * Input + */ +export type SchemaSfxV15VideoToVideoInput = { + /** + * Num Samples + * + * The number of samples to generate from the model + */ + num_samples?: number | unknown + /** + * Duration + * + * The duration of the generated audio in seconds + */ + duration?: number | unknown + /** + * Start Offset + * + * The start offset in seconds to start the audio generation from + */ + start_offset?: number | unknown + /** + * Video Url + * + * A video url that can accessed from the API to process and add sound effects + */ + video_url: string + /** + * Seed + * + * The seed to use for the generation. If not provided, a random seed will be used + */ + seed?: number | unknown + /** + * Text Prompt + * + * Additional description to guide the model + */ + text_prompt?: string | unknown +} + +/** + * Q2VideoExtensionOutput + */ +export type SchemaViduQ2VideoExtensionProOutput = { + /** + * Video + * + * The extended video using the Q2 model + */ + video: SchemaFile +} + +/** + * Q2VideoExtensionRequest + */ +export type SchemaViduQ2VideoExtensionProInput = { + /** + * Prompt + * + * text prompt to guide the video extension + */ + prompt?: string + /** + * Duration + * + * Duration of the extension in seconds + */ + duration?: 2 | 3 | 4 | 5 | 6 | 7 + /** + * Video Url + * + * URL of the video to extend + */ + video_url: string + /** + * Resolution + * + * Output video resolution + */ + resolution?: '720p' | '1080p' + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number +} + +/** + * VideoOutput + */ +export type SchemaBirefnetV2VideoOutput = { + /** + * Video + * + * Video with background removed + */ + video: SchemaVideoFile + /** + * Mask Video + * + * Mask used to remove the background + */ + mask_video?: SchemaVideoFile +} + +/** + * VideoInputV2 + */ +export type SchemaBirefnetV2VideoInput = { + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Operating Resolution + * + * The resolution to operate on. The higher the resolution, the more accurate the output will be for high res input images. The '2304x2304' option is only available for the 'General Use (Dynamic)' model. + */ + operating_resolution?: '1024x1024' | '2048x2048' | '2304x2304' + /** + * Video Url + * + * URL of the video to remove background from + */ + video_url: string + /** + * Model + * + * + * Model to use for background removal. + * The 'General Use (Light)' model is the original model used in the BiRefNet repository. + * The 'General Use (Light 2K)' model is the original model used in the BiRefNet repository but trained with 2K images. + * The 'General Use (Heavy)' model is a slower but more accurate model. + * The 'Matting' model is a model trained specifically for matting images. + * The 'Portrait' model is a model trained specifically for portrait images. + * The 'General Use (Dynamic)' model supports dynamic resolutions from 256x256 to 2304x2304. + * The 'General Use (Light)' model is recommended for most use cases. + * + * The corresponding models are as follows: + * - 'General Use (Light)': BiRefNet + * - 'General Use (Light 2K)': BiRefNet_lite-2K + * - 'General Use (Heavy)': BiRefNet_lite + * - 'Matting': BiRefNet-matting + * - 'Portrait': BiRefNet-portrait + * - 'General Use (Dynamic)': BiRefNet_dynamic + * + */ + model?: + | 'General Use (Light)' + | 'General Use (Light 2K)' + | 'General Use (Heavy)' + | 'Matting' + | 'Portrait' + | 'General Use (Dynamic)' + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Output Mask + * + * Whether to output the mask used to remove the background + */ + output_mask?: boolean + /** + * Refine Foreground + * + * Whether to refine the foreground using the estimated mask + */ + refine_foreground?: boolean +} + +/** + * VideoEffectOutput + */ +export type SchemaVideoAsPromptOutput = { + video: SchemaFile +} + +/** + * VideoEffectInputWan + */ +export type SchemaVideoAsPromptInput = { + /** + * Prompt + * + * The prompt to generate an image from. + */ + prompt: string + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. + */ + aspect_ratio?: '16:9' | '9:16' + /** + * Resolution + * + * Resolution of the generated video. + */ + resolution?: '480p' | '580p' | '720p' + /** + * Video Url + * + * reference video to generate effect video from. + */ + video_url: string + /** + * Image Url + * + * Input image to generate the effect video for. + */ + image_url: string + /** + * Frames Per Second + * + * Frames per second for the output video. Only applicable if output_type is 'video'. + */ + fps?: number + /** + * Video Description + * + * A brief description of the input video content. + */ + video_description: string + /** + * Seed + * + * Random seed for reproducible generation. If set none, a random seed will be used. + */ + seed?: number | unknown + /** + * Guidance Scale + * + * Guidance scale for generation. + */ + guidance_scale?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Num Frames + * + * The number of frames to generate. + */ + num_frames?: number +} + +/** + * UpscaleOutput + */ +export type SchemaBytedanceUpscalerUpscaleVideoOutput = { + /** + * Duration + * + * Duration of audio input/video output as used for billing. + */ + duration: number + /** + * Video + * + * Generated video file + */ + video: SchemaFile +} + +/** + * UpscaleInput + */ +export type SchemaBytedanceUpscalerUpscaleVideoInput = { + /** + * Target Fps + * + * The target FPS of the video to upscale. + */ + target_fps?: '30fps' | '60fps' + /** + * Video Url + * + * The URL of the video to upscale. + */ + video_url: string + /** + * Target Resolution + * + * The target resolution of the video to upscale. + */ + target_resolution?: '1080p' | '2k' | '4k' +} + +/** + * AutoSubtitleOutput + * + * Output model for video with automatic subtitles + */ +export type SchemaWorkflowUtilitiesAutoSubtitleOutput = { + /** + * Transcription + * + * Full transcription text + */ + transcription: string + /** + * Subtitle Count + * + * Number of subtitle segments generated + */ + subtitle_count: number + /** + * Transcription Metadata + * + * Additional transcription metadata from ElevenLabs (language, segments, etc.) + */ + transcription_metadata?: { + [key: string]: unknown + } + /** + * Words + * + * Word-level timing information from transcription service + */ + words?: Array<{ + [key: string]: unknown + }> + /** + * Video + * + * The video with automatic subtitles + */ + video: SchemaFile +} + +/** + * AutoSubtitleInput + * + * Input model for automatic subtitle generation and styling + */ +export type SchemaWorkflowUtilitiesAutoSubtitleInput = { + /** + * Font Weight + * + * Font weight (TikTok style typically uses bold or black) + */ + font_weight?: 'normal' | 'bold' | 'black' + /** + * Video Url + * + * URL of the video file to add automatic subtitles to + * + * Max file size: 95.4MB, Timeout: 30.0s + */ + video_url: string + /** + * Stroke Width + * + * Text stroke/outline width in pixels (0 for no stroke) + */ + stroke_width?: number + /** + * Font Color + * + * Subtitle text color for non-active words + */ + font_color?: + | 'white' + | 'black' + | 'red' + | 'green' + | 'blue' + | 'yellow' + | 'orange' + | 'purple' + | 'pink' + | 'brown' + | 'gray' + | 'cyan' + | 'magenta' + /** + * Font Size + * + * Font size for subtitles (TikTok style uses larger text) + */ + font_size?: number + /** + * Language + * + * Language code for transcription (e.g., 'en', 'es', 'fr', 'de', 'it', 'pt', 'nl', 'ja', 'zh', 'ko') or 3-letter ISO code (e.g., 'eng', 'spa', 'fra') + */ + language?: string + /** + * Y Offset + * + * Vertical offset in pixels (positive = move down, negative = move up) + */ + y_offset?: number + /** + * Background Opacity + * + * Background opacity (0.0 = fully transparent, 1.0 = fully opaque) + */ + background_opacity?: number + /** + * Stroke Color + * + * Text stroke/outline color + */ + stroke_color?: + | 'black' + | 'white' + | 'red' + | 'green' + | 'blue' + | 'yellow' + | 'orange' + | 'purple' + | 'pink' + | 'brown' + | 'gray' + | 'cyan' + | 'magenta' + /** + * Highlight Color + * + * Color for the currently speaking word (karaoke-style highlight) + */ + highlight_color?: + | 'white' + | 'black' + | 'red' + | 'green' + | 'blue' + | 'yellow' + | 'orange' + | 'purple' + | 'pink' + | 'brown' + | 'gray' + | 'cyan' + | 'magenta' + /** + * Enable Animation + * + * Enable animation effects for subtitles (bounce style entrance) + */ + enable_animation?: boolean + /** + * Font Name + * + * Any Google Font name from fonts.google.com (e.g., 'Montserrat', 'Poppins', 'BBH Sans Hegarty') + */ + font_name?: string + /** + * Position + * + * Vertical position of subtitles + */ + position?: 'top' | 'center' | 'bottom' + /** + * Words Per Subtitle + * + * Maximum number of words per subtitle segment. Use 1 for single-word display, 2-3 for short phrases, or 8-12 for full sentences. + */ + words_per_subtitle?: number + /** + * Background Color + * + * Background color behind text ('none' or 'transparent' for no background) + */ + background_color?: + | 'black' + | 'white' + | 'red' + | 'green' + | 'blue' + | 'yellow' + | 'orange' + | 'purple' + | 'pink' + | 'brown' + | 'gray' + | 'cyan' + | 'magenta' + | 'none' + | 'transparent' +} + +/** + * FlashVSRPlusVideoOutput + */ +export type SchemaFlashvsrUpscaleVideoOutput = { + /** + * Seed + * + * The random seed used for the generation process. + */ + seed: number + /** + * Video + * + * Upscaled video file after processing + */ + video: SchemaFile +} + +/** + * FlashVSRPlusVideoInput + * + * Input fields common to FlashVSR+ image/video endpoints. + */ +export type SchemaFlashvsrUpscaleVideoInput = { + /** + * Video Url + * + * The input video to be upscaled + */ + video_url: string + /** + * Acceleration + * + * Acceleration mode for VAE decoding. Options: regular (best quality), high (balanced), full (fastest). More accerleation means longer duration videos can be processed too. + */ + acceleration?: 'regular' | 'high' | 'full' + /** + * Quality + * + * Quality level for tile blending (0-100). Controls overlap between tiles to prevent grid artifacts. Higher values provide better quality with more overlap. Recommended: 70-85 for high-res videos, 50-70 for faster processing. + */ + quality?: number + /** + * Output Format + * + * The format of the output video. + */ + output_format?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Color Fix + * + * Color correction enabled. + */ + color_fix?: boolean + /** + * Output Write Mode + * + * The write mode of the output video. + */ + output_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Sync Mode + * + * If `True`, the media will be returned inline and not stored in history. + */ + sync_mode?: boolean + /** + * Output Quality + * + * The quality of the output video. + */ + output_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Upscale Factor + * + * Upscaling factor to be used. + */ + upscale_factor?: number + /** + * Preserve Audio + * + * Copy the original audio tracks into the upscaled video using FFmpeg when possible. + */ + preserve_audio?: boolean + /** + * Seed + * + * The random seed used for the generation process. + */ + seed?: number +} + +/** + * EdittoOutput + */ +export type SchemaEdittoOutput = { + /** + * Prompt + * + * The prompt used for generation. + */ + prompt: string + /** + * ZIP archive of all video frames if requested. + */ + frames_zip?: SchemaFile | unknown + /** + * Seed + * + * The seed used for generation. + */ + seed: number + video: SchemaVideoFile +} + +/** + * EdittoInput + */ +export type SchemaEdittoInput = { + /** + * Prompt + * + * The text prompt to guide video generation. + */ + prompt: string + /** + * Video URL + * + * URL to the source video file. Required for inpainting. + */ + video_url: string + /** + * Acceleration + * + * Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster. + */ + acceleration?: 'none' | 'low' | 'regular' | unknown + /** + * Number of Interpolated Frames + * + * Number of frames to interpolate between the original frames. A value of 0 means no interpolation. + */ + num_interpolated_frames?: number + /** + * Temporal Downsample Factor + * + * Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied. + */ + temporal_downsample_factor?: number + /** + * Shift + * + * Shift parameter for video generation. + */ + shift?: number + /** + * Frames per Second + * + * Frames per second of the generated video. Must be between 5 to 30. Ignored if match_input_frames_per_second is true. + */ + frames_per_second?: number | unknown + /** + * Match Input Number of Frames + * + * If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter. + */ + match_input_num_frames?: boolean + /** + * Guidance Scale + * + * Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt. + */ + guidance_scale?: number + /** + * Number of Frames + * + * Number of frames to generate. Must be between 81 to 241 (inclusive). + */ + num_frames?: number + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Sampler + * + * Sampler to use for video generation. + */ + sampler?: 'unipc' | 'dpm++' | 'euler' + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Resolution + * + * Resolution of the generated video. + */ + resolution?: 'auto' | '240p' | '360p' | '480p' | '580p' | '720p' + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. + */ + aspect_ratio?: 'auto' | '16:9' | '1:1' | '9:16' + /** + * Return Frames Zip + * + * If true, also return a ZIP file containing all generated frames. + */ + return_frames_zip?: boolean + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Match Input Frames Per Second + * + * If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter. + */ + match_input_frames_per_second?: boolean + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Number of Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number | unknown + /** + * Enable Auto Downsample + * + * If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length. + */ + enable_auto_downsample?: boolean +} + +/** + * PointPromptBase + */ +export type SchemaPointPromptBase = { + /** + * Y + * + * Y Coordinate of the prompt + */ + y?: number + /** + * X + * + * X Coordinate of the prompt + */ + x?: number + /** + * Object Id + * + * Optional object identifier. Prompts sharing an object id refine the same object. + */ + object_id?: number + /** + * Label + * + * 1 for foreground, 0 for background + */ + label?: 0 | 1 +} + +/** + * BoxPromptBase + */ +export type SchemaBoxPromptBase = { + /** + * Y Min + * + * Y Min Coordinate of the box + */ + y_min?: number + /** + * Object Id + * + * Optional object identifier. Boxes sharing an object id refine the same object. + */ + object_id?: number + /** + * X Max + * + * X Max Coordinate of the box + */ + x_max?: number + /** + * X Min + * + * X Min Coordinate of the box + */ + x_min?: number + /** + * Y Max + * + * Y Max Coordinate of the box + */ + y_max?: number +} + +/** + * SAM3VideoOutput + */ +export type SchemaSam3VideoOutput = { + /** + * Boundingbox Frames Zip + * + * Zip file containing per-frame bounding box overlays. + */ + boundingbox_frames_zip?: SchemaFile + /** + * Video + * + * The segmented video. + */ + video: SchemaFile +} + +/** + * SAM3VideoInput + */ +export type SchemaSam3VideoInput = { + /** + * Prompt + * + * Text prompt for segmentation. Use commas to track multiple objects (e.g., 'person, cloth'). + */ + prompt?: string + /** + * Video Url + * + * The URL of the video to be segmented. + */ + video_url: string + /** + * Detection Threshold + * + * Detection confidence threshold (0.0-1.0). Lower = more detections but less precise. + */ + detection_threshold?: number + /** + * Box Prompts + * + * List of box prompt coordinates (x_min, y_min, x_max, y_max). + */ + box_prompts?: Array + /** + * Point Prompts + * + * List of point prompts + */ + point_prompts?: Array + /** + * Apply Mask + * + * Apply the mask on the video. + */ + apply_mask?: boolean + /** + * Text Prompt + * + * [DEPRECATED] Use 'prompt' instead. Kept for backward compatibility. + * + * @deprecated + */ + text_prompt?: string +} + +/** + * SAM3VideoOutput + */ +export type SchemaSam3VideoRleOutput = { + /** + * Boundingbox Frames Zip + * + * Zip file containing per-frame bounding box overlays. + */ + boundingbox_frames_zip?: SchemaFile + /** + * Video + * + * The segmented video. + */ + video: SchemaFile +} + +/** + * SAM3VideoRLEInput + */ +export type SchemaSam3VideoRleInput = { + /** + * Prompt + * + * Text prompt for segmentation. Use commas to track multiple objects (e.g., 'person, cloth'). + */ + prompt?: string + /** + * Video Url + * + * The URL of the video to be segmented. + */ + video_url: string + /** + * Detection Threshold + * + * Detection confidence threshold (0.0-1.0). Lower = more detections but less precise. Defaults: 0.5 for existing, 0.7 for new objects. Try 0.2-0.3 if text prompts fail. + */ + detection_threshold?: number + /** + * Box Prompts + * + * List of box prompts with optional frame_index. + */ + box_prompts?: Array + /** + * Boundingbox Zip + * + * Return per-frame bounding box overlays as a zip archive. + */ + boundingbox_zip?: boolean + /** + * Point Prompts + * + * List of point prompts with frame indices. + */ + point_prompts?: Array + /** + * Frame Index + * + * Frame index used for initial interaction when mask_url is provided. + */ + frame_index?: number + /** + * Mask Url + * + * The URL of the mask to be applied initially. + */ + mask_url?: string + /** + * Apply Mask + * + * Apply the mask on the video. + */ + apply_mask?: boolean +} + +/** + * LucyEditFastOutput + */ +export type SchemaLucyEditFastOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * LucyEditFastInput + */ +export type SchemaLucyEditFastInput = { + /** + * Sync Mode + * + * + * If set to true, the function will wait for the video to be generated + * and uploaded before returning the response. This will increase the + * latency of the function but it allows you to get the video directly + * in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Video Url + * + * URL of the video to edit + */ + video_url: string + /** + * Prompt + * + * Text description of the desired video content + */ + prompt: string + /** + * Enhance Prompt + * + * Whether to enhance the prompt for better results. + */ + enhance_prompt?: boolean +} + +/** + * LTXRetakeVideoResponse + */ +export type SchemaLtx2RetakeVideoOutput = { + /** + * Video + * + * The generated video file + */ + video: SchemaVideoFile +} + +/** + * LTXRetakeVideoRequest + */ +export type SchemaLtx2RetakeVideoInput = { + /** + * Prompt + * + * The prompt to retake the video with + */ + prompt: string + /** + * Duration + * + * The duration of the video to retake in seconds + */ + duration?: number + /** + * Video URL + * + * The URL of the video to retake + */ + video_url: string + /** + * Start Time + * + * The start time of the video to retake in seconds + */ + start_time?: number + /** + * Retake Mode + * + * The retake mode to use for the retake + */ + retake_mode?: 'replace_audio' | 'replace_video' | 'replace_audio_and_video' +} + +/** + * GreenScreenRembgOutput + */ +export type SchemaVideoBackgroundRemovalGreenScreenOutput = { + /** + * Video + */ + video: Array +} + +/** + * GreenScreenRembgInput + */ +export type SchemaVideoBackgroundRemovalGreenScreenInput = { + /** + * Video Url + */ + video_url: string + /** + * Output Codec + * + * Single VP9 video with alpha channel or two videos (rgb and alpha) in H264 format. H264 is recommended for better RGB quality. + */ + output_codec?: 'vp9' | 'h264' + /** + * Spill Suppression Strength + * + * Increase the value if green spots remain in the video, decrease if color changes are noticed on the extracted subject. + */ + spill_suppression_strength?: number | unknown +} + +/** + * OmniV2VReferenceOutput + */ +export type SchemaKlingVideoO1VideoToVideoReferenceOutput = { + /** + * Video + * + * The generated video. + */ + video: SchemaFile +} + +/** + * OmniV2VReferenceInput + * + * Input for video editing or video-as-reference generation. + */ +export type SchemaKlingVideoO1VideoToVideoReferenceInput = { + /** + * Prompt + * + * Use @Element1, @Element2 to reference elements and @Image1, @Image2 to reference images in order. + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video frame. If 'auto', the aspect ratio will be determined automatically based on the input video, and the closest aspect ratio to the input video will be used. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' | '1:1' + /** + * Duration + * + * Video duration in seconds. + */ + duration?: '3' | '4' | '5' | '6' | '7' | '8' | '9' | '10' + /** + * Video Url + * + * Reference video URL. Only .mp4/.mov formats supported, 3-10 seconds duration, 720-2160px resolution, max 200MB. + * + * Max file size: 200.0MB, Min width: 720px, Min height: 720px, Max width: 2160px, Max height: 2160px, Min duration: 3.0s, Max duration: 10.05s, Min FPS: 24.0, Max FPS: 60.0, Timeout: 30.0s + */ + video_url: string + /** + * Keep Audio + * + * Whether to keep the original audio from the video. + */ + keep_audio?: boolean + /** + * Elements + * + * Elements (characters/objects) to include. Reference in prompt as @Element1, @Element2, etc. Maximum 4 total (elements + reference images) when using video. + */ + elements?: Array + /** + * Image Urls + * + * Reference images for style/appearance. Reference in prompt as @Image1, @Image2, etc. Maximum 4 total (elements + reference images) when using video. + */ + image_urls?: Array +} + +/** + * OmniVideoElementInput + */ +export type SchemaOmniVideoElementInput = { + /** + * Reference Image Urls + * + * Additional reference images from different angles. 1-4 images supported. At least one image is required. + */ + reference_image_urls?: Array + /** + * Frontal Image Url + * + * The frontal image of the element (main view). + * + * Max file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s + */ + frontal_image_url: string +} + +/** + * OmniV2VEditOutput + */ +export type SchemaKlingVideoO1VideoToVideoEditOutput = { + /** + * Video + * + * The generated video. + */ + video: SchemaFile +} + +/** + * OmniV2VEditInput + * + * Input for video editing or video-as-reference generation. + */ +export type SchemaKlingVideoO1VideoToVideoEditInput = { + /** + * Prompt + * + * Use @Element1, @Element2 to reference elements and @Image1, @Image2 to reference images in order. + */ + prompt: string + /** + * Video Url + * + * Reference video URL. Only .mp4/.mov formats supported, 3-10 seconds duration, 720-2160px resolution, max 200MB. + * + * Max file size: 200.0MB, Min width: 720px, Min height: 720px, Max width: 2160px, Max height: 2160px, Min duration: 3.0s, Max duration: 10.05s, Min FPS: 24.0, Max FPS: 60.0, Timeout: 30.0s + */ + video_url: string + /** + * Elements + * + * Elements (characters/objects) to include. Reference in prompt as @Element1, @Element2, etc. Maximum 4 total (elements + reference images) when using video. + */ + elements?: Array + /** + * Image Urls + * + * Reference images for style/appearance. Reference in prompt as @Image1, @Image2, etc. Maximum 4 total (elements + reference images) when using video. + */ + image_urls?: Array + /** + * Keep Audio + * + * Whether to keep the original audio from the video. + */ + keep_audio?: boolean +} + +/** + * FastGeneralRembgOutput + */ +export type SchemaVideoBackgroundRemovalFastOutput = { + /** + * Video + */ + video: Array +} + +/** + * FastGeneralRembgInput + */ +export type SchemaVideoBackgroundRemovalFastInput = { + /** + * Video Url + */ + video_url: string + /** + * Subject Is Person + * + * Set to False if the subject is not a person. + */ + subject_is_person?: boolean + /** + * Output Codec + * + * Single VP9 video with alpha channel or two videos (rgb and alpha) in H264 format. H264 is recommended for better RGB quality. + */ + output_codec?: 'vp9' | 'h264' + /** + * Refine Foreground Edges + * + * Improves the quality of the extracted object's edges. + */ + refine_foreground_edges?: boolean +} + +/** + * React1Output + */ +export type SchemaSyncLipsyncReact1Output = { + /** + * Video + * + * The generated video with synchronized lip and facial movements. + */ + video: SchemaVideoFile +} + +/** + * React1Input + */ +export type SchemaSyncLipsyncReact1Input = { + /** + * Emotion + * + * Emotion prompt for the generation. Currently supports single-word emotions only. + */ + emotion: 'happy' | 'angry' | 'sad' | 'neutral' | 'disgusted' | 'surprised' + /** + * Video Url + * + * URL to the input video. Must be **15 seconds or shorter**. + */ + video_url: string + /** + * Lipsync Mode + * + * Lipsync mode when audio and video durations are out of sync. + */ + lipsync_mode?: 'cut_off' | 'loop' | 'bounce' | 'silence' | 'remap' + /** + * Audio Url + * + * URL to the input audio. Must be **15 seconds or shorter**. + */ + audio_url: string + /** + * Temperature + * + * Controls the expresiveness of the lipsync. + */ + temperature?: number + /** + * Model Mode + * + * Controls the edit region and movement scope for the model. Available options: + * - `lips`: Only lipsync using react-1 (minimal facial changes). + * - `face`: Lipsync + facial expressions without head movements. + * - `head`: Lipsync + facial expressions + natural talking head movements. + */ + model_mode?: 'lips' | 'face' | 'head' +} + +/** + * Output + * + * Output from Wan Vision Enhancer + */ +export type SchemaWanVisionEnhancerOutput = { + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Timings + * + * The timings of the different steps in the workflow. + */ + timings: { + [key: string]: number + } + /** + * The enhanced video file. + */ + video: SchemaFile +} + +/** + * Input + * + * Input parameters for Wan Vision Enhancer (Video-to-Video) + */ +export type SchemaWanVisionEnhancerInput = { + /** + * Prompt + * + * Optional prompt to prepend to the VLM-generated description. Leave empty to use only the auto-generated description from the video. + */ + prompt?: string | unknown + /** + * Video Url + * + * The URL of the video to enhance with Wan Video. Maximum 200MB file size. Videos longer than 500 frames will have only the first 500 frames processed (~8-21 seconds depending on fps). + */ + video_url: string + /** + * Seed + * + * Random seed for reproducibility. If not provided, a random seed will be used. + */ + seed?: number | unknown + /** + * Output Resolution + * + * Target output resolution for the enhanced video. 720p (native, fast) or 1080p (upscaled, slower). Processing is always done at 720p, then upscaled if 1080p selected. + */ + target_resolution?: '720p' | '1080p' + /** + * Negative Prompt + * + * Negative prompt to avoid unwanted features. + */ + negative_prompt?: string | unknown + /** + * Creativity + * + * Controls how much the model enhances/changes the video. 0 = Minimal change (preserves original), 1 = Subtle enhancement (default), 2 = Medium enhancement, 3 = Strong enhancement, 4 = Maximum enhancement. + */ + creativity?: number +} + +/** + * OneToALLAnimationResponse + */ +export type SchemaOneToAllAnimation14bOutput = { + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * OneToALLAnimationRequest + */ +export type SchemaOneToAllAnimation14bInput = { + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Resolution + * + * The resolution of the video to generate. + */ + resolution?: '480p' | '580p' | '720p' + /** + * Image Guidance Scale + * + * The image guidance scale to use for the video generation. + */ + image_guidance_scale?: number + /** + * Pose Guidance Scale + * + * The pose guidance scale to use for the video generation. + */ + pose_guidance_scale?: number + /** + * Video Url + * + * The URL of the video to use as a reference for the video generation. + */ + video_url: string + /** + * Image Url + * + * The URL of the image to use as a reference for the video generation. + */ + image_url: string + /** + * Num Inference Steps + * + * The number of inference steps to use for the video generation. + */ + num_inference_steps?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt: string +} + +/** + * OneToALLAnimationResponse + */ +export type SchemaOneToAllAnimation13bOutput = { + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * OneToALLAnimationRequest + */ +export type SchemaOneToAllAnimation13bInput = { + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Resolution + * + * The resolution of the video to generate. + */ + resolution?: '480p' | '580p' | '720p' + /** + * Image Guidance Scale + * + * The image guidance scale to use for the video generation. + */ + image_guidance_scale?: number + /** + * Pose Guidance Scale + * + * The pose guidance scale to use for the video generation. + */ + pose_guidance_scale?: number + /** + * Video Url + * + * The URL of the video to use as a reference for the video generation. + */ + video_url: string + /** + * Image Url + * + * The URL of the image to use as a reference for the video generation. + */ + image_url: string + /** + * Num Inference Steps + * + * The number of inference steps to use for the video generation. + */ + num_inference_steps?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt: string +} + +/** + * SteadyDancerResponse + * + * Response model for SteadyDancer. + */ +export type SchemaSteadyDancerOutput = { + /** + * Num Frames + * + * The actual number of frames generated (aligned to 4k+1 pattern). + */ + num_frames: number + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Video + * + * The generated dance animation video. + */ + video: SchemaFile +} + +/** + * SteadyDancerRequest + * + * Request model for SteadyDancer human animation. + */ +export type SchemaSteadyDancerInput = { + /** + * Prompt + * + * Text prompt describing the desired animation. + */ + prompt?: string + /** + * Video Url + * + * URL of the driving pose video. The motion from this video will be transferred to the reference image. + */ + video_url?: string + /** + * Acceleration + * + * Acceleration levels. + */ + acceleration?: 'light' | 'moderate' | 'aggressive' + /** + * Pose Guidance Scale + * + * Pose guidance scale for pose control strength. + */ + pose_guidance_scale?: number + /** + * Shift + * + * Shift parameter for video generation. + */ + shift?: number + /** + * Pose Guidance End + * + * End ratio for pose guidance. Controls when pose guidance ends. + */ + pose_guidance_end?: number + /** + * Frames Per Second + * + * Frames per second of the generated video. Must be between 5 to 24. If not specified, uses the FPS from the input video. + */ + frames_per_second?: number + /** + * Guidance Scale + * + * Classifier-free guidance scale for prompt adherence. + */ + guidance_scale?: number + /** + * Num Frames + * + * Number of frames to generate. If not specified, uses the frame count from the input video (capped at 241). Will be adjusted to nearest valid value (must satisfy 4k+1 pattern). + */ + num_frames?: number + /** + * Use Turbo + * + * If true, applies quality enhancement for faster generation with improved quality. When enabled, parameters are automatically optimized (num_inference_steps=6, guidance_scale=1.0) and uses the LightX2V distillation LoRA. + */ + use_turbo?: boolean + /** + * Negative Prompt + * + * Negative prompt for video generation. + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean + /** + * Aspect Ratio + * + * Aspect ratio of the generated video. If 'auto', will be determined from the reference image. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' | '1:1' + /** + * Pose Guidance Start + * + * Start ratio for pose guidance. Controls when pose guidance begins. + */ + pose_guidance_start?: number + /** + * Resolution + * + * Resolution of the generated video. 576p is default, 720p for higher quality. 480p is lower quality. + */ + resolution?: '480p' | '576p' | '720p' + /** + * Image Url + * + * URL of the reference image to animate. This is the person/character whose appearance will be preserved. + */ + image_url?: string + /** + * Preserve Audio + * + * If enabled, copies audio from the input driving video to the output video. + */ + preserve_audio?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Num Inference Steps + * + * Number of inference steps for sampling. Higher values give better quality but take longer. + */ + num_inference_steps?: number +} + +/** + * OmniV2VEditOutput + */ +export type SchemaKlingVideoO1StandardVideoToVideoEditOutput = { + /** + * Video + * + * The generated video. + */ + video: SchemaFile +} + +/** + * OmniV2VEditInput + * + * Input for video editing or video-as-reference generation. + */ +export type SchemaKlingVideoO1StandardVideoToVideoEditInput = { + /** + * Prompt + * + * Use @Element1, @Element2 to reference elements and @Image1, @Image2 to reference images in order. + */ + prompt: string + /** + * Video Url + * + * Reference video URL. Only .mp4/.mov formats supported, 3-10 seconds duration, 720-2160px resolution, max 200MB. + * + * Max file size: 200.0MB, Min width: 720px, Min height: 720px, Max width: 2160px, Max height: 2160px, Min duration: 3.0s, Max duration: 10.05s, Min FPS: 24.0, Max FPS: 60.0, Timeout: 30.0s + */ + video_url: string + /** + * Elements + * + * Elements (characters/objects) to include. Reference in prompt as @Element1, @Element2, etc. Maximum 4 total (elements + reference images) when using video. + */ + elements?: Array + /** + * Image Urls + * + * Reference images for style/appearance. Reference in prompt as @Image1, @Image2, etc. Maximum 4 total (elements + reference images) when using video. + */ + image_urls?: Array + /** + * Keep Audio + * + * Whether to keep the original audio from the video. + */ + keep_audio?: boolean +} + +/** + * OmniV2VReferenceOutput + */ +export type SchemaKlingVideoO1StandardVideoToVideoReferenceOutput = { + /** + * Video + * + * The generated video. + */ + video: SchemaFile +} + +/** + * OmniV2VReferenceInput + * + * Input for video editing or video-as-reference generation. + */ +export type SchemaKlingVideoO1StandardVideoToVideoReferenceInput = { + /** + * Prompt + * + * Use @Element1, @Element2 to reference elements and @Image1, @Image2 to reference images in order. + */ + prompt: string + /** + * Aspect Ratio + * + * The aspect ratio of the generated video frame. If 'auto', the aspect ratio will be determined automatically based on the input video, and the closest aspect ratio to the input video will be used. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' | '1:1' + /** + * Duration + * + * Video duration in seconds. + */ + duration?: '3' | '4' | '5' | '6' | '7' | '8' | '9' | '10' + /** + * Video Url + * + * Reference video URL. Only .mp4/.mov formats supported, 3-10 seconds duration, 720-2160px resolution, max 200MB. + * + * Max file size: 200.0MB, Min width: 720px, Min height: 720px, Max width: 2160px, Max height: 2160px, Min duration: 3.0s, Max duration: 10.05s, Min FPS: 24.0, Max FPS: 60.0, Timeout: 30.0s + */ + video_url: string + /** + * Keep Audio + * + * Whether to keep the original audio from the video. + */ + keep_audio?: boolean + /** + * Elements + * + * Elements (characters/objects) to include. Reference in prompt as @Element1, @Element2, etc. Maximum 4 total (elements + reference images) when using video. + */ + elements?: Array + /** + * Image Urls + * + * Reference images for style/appearance. Reference in prompt as @Image1, @Image2, etc. Maximum 4 total (elements + reference images) when using video. + */ + image_urls?: Array +} + +/** + * Veo31VideoToVideoOutput + */ +export type SchemaVeo31ExtendVideoOutput = { + /** + * Video + * + * The extended video. + */ + video: SchemaFile +} + +/** + * Veo31VideoToVideoInput + * + * Input for video extension/video-to-video generation. + */ +export type SchemaVeo31ExtendVideoInput = { + /** + * Prompt + * + * The text prompt describing how the video should be extended + */ + prompt: string + /** + * Duration + * + * The duration of the generated video. + */ + duration?: '7s' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * Auto Fix + * + * Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them. + */ + auto_fix?: boolean + /** + * Video URL + * + * URL of the video to extend. The video should be 720p or 1080p resolution in 16:9 or 9:16 aspect ratio. + */ + video_url: string + /** + * Resolution + * + * The resolution of the generated video. + */ + resolution?: '720p' + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number + /** + * Negative Prompt + * + * A negative prompt to guide the video generation. + */ + negative_prompt?: string +} + +/** + * Veo31VideoToVideoOutput + */ +export type SchemaVeo31FastExtendVideoOutput = { + /** + * Video + * + * The extended video. + */ + video: SchemaFile +} + +/** + * Veo31VideoToVideoInput + * + * Input for video extension/video-to-video generation. + */ +export type SchemaVeo31FastExtendVideoInput = { + /** + * Prompt + * + * The text prompt describing how the video should be extended + */ + prompt: string + /** + * Duration + * + * The duration of the generated video. + */ + duration?: '7s' + /** + * Aspect Ratio + * + * The aspect ratio of the generated video. + */ + aspect_ratio?: 'auto' | '16:9' | '9:16' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * Auto Fix + * + * Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them. + */ + auto_fix?: boolean + /** + * Video URL + * + * URL of the video to extend. The video should be 720p or 1080p resolution in 16:9 or 9:16 aspect ratio. + */ + video_url: string + /** + * Resolution + * + * The resolution of the generated video. + */ + resolution?: '720p' + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number + /** + * Negative Prompt + * + * A negative prompt to guide the video generation. + */ + negative_prompt?: string +} + +/** + * ReferenceToVideoOutput + * + * Output for reference-to-video generation + */ +export type SchemaV26ReferenceToVideoOutput = { + /** + * Actual Prompt + * + * The actual prompt used if prompt rewriting was enabled + */ + actual_prompt?: string + /** + * Seed + * + * The seed used for generation + */ + seed: number + /** + * Video + * + * The generated video file + */ + video: SchemaVideoFile +} + +/** + * ReferenceToVideoInput + * + * Input for Wan 2.6 reference-to-video generation (R2V) + */ +export type SchemaV26ReferenceToVideoInput = { + /** + * Prompt + * + * Use @Video1, @Video2, @Video3 to reference subjects from your videos. Works for people, animals, or objects. For multi-shot prompts: '[0-3s] Shot 1. [3-6s] Shot 2.' Max 800 characters. + */ + prompt: string + /** + * Resolution + * + * Video resolution tier. R2V only supports 720p and 1080p (no 480p). + */ + resolution?: '720p' | '1080p' + /** + * Video Urls + * + * Reference videos for subject consistency (1-3 videos). Videos' FPS must be at least 16 FPS.Reference in prompt as @Video1, @Video2, @Video3. Works for people, animals, or objects. + */ + video_urls: Array + /** + * Aspect Ratio + * + * The aspect ratio of the generated video. + */ + aspect_ratio?: '16:9' | '9:16' | '1:1' | '4:3' | '3:4' + /** + * Duration + * + * Duration of the generated video in seconds. R2V supports only 5 or 10 seconds (no 15s). + */ + duration?: '5' | '10' + /** + * Enable Prompt Expansion + * + * Whether to enable prompt rewriting using LLM. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number + /** + * Multi Shots + * + * When true (default), enables intelligent multi-shot segmentation for coherent narrative videos with multiple shots. When false, generates single continuous shot. Only active when enable_prompt_expansion is True. + */ + multi_shots?: boolean + /** + * Negative Prompt + * + * Negative prompt to describe content to avoid. Max 500 characters. + */ + negative_prompt?: string + /** + * Enable Safety Checker + * + * If set to true, the safety checker will be enabled. + */ + enable_safety_checker?: boolean +} + +/** + * VideoOutput + */ +export type SchemaBriaVideoEraserErasePromptOutput = { + /** + * Video + * + * Final video. + */ + video: SchemaVideo | SchemaFile +} + +/** + * EraseByPromptInputModel + */ +export type SchemaBriaVideoEraserErasePromptInput = { + /** + * Preserve Audio + * + * If true, audio will be preserved in the output video. + */ + preserve_audio?: boolean + /** + * Video Url + * + * Input video to erase object from. duration must be less than 5s. + */ + video_url: string + /** + * Prompt + * + * Input prompt to detect object to erase + */ + prompt: string + /** + * Output Container And Codec + * + * Output container and codec. Options: mp4_h265, mp4_h264, webm_vp9, gif, mov_h264, mov_h265, mov_proresks, mkv_h264, mkv_h265, mkv_vp9, mkv_mpeg4. + */ + output_container_and_codec?: + | 'mp4_h265' + | 'mp4_h264' + | 'webm_vp9' + | 'gif' + | 'mov_h264' + | 'mov_h265' + | 'mov_proresks' + | 'mkv_h264' + | 'mkv_h265' + | 'mkv_vp9' + | 'mkv_mpeg4' + /** + * Auto Trim + * + * auto trim the video, to working duration ( 5s ) + */ + auto_trim?: boolean +} + +/** + * VideoOutput + */ +export type SchemaBriaVideoEraserEraseKeypointsOutput = { + /** + * Video + * + * Final video. + */ + video: SchemaVideo | SchemaFile +} + +/** + * EraseByKeyPointsInputModel + */ +export type SchemaBriaVideoEraserEraseKeypointsInput = { + /** + * Preserve Audio + * + * If true, audio will be preserved in the output video. + */ + preserve_audio?: boolean + /** + * Video Url + * + * Input video to erase object from. duration must be less than 5s. + */ + video_url: string + /** + * Output Container And Codec + * + * Output container and codec. Options: mp4_h265, mp4_h264, webm_vp9, gif, mov_h264, mov_h265, mov_proresks, mkv_h264, mkv_h265, mkv_vp9, mkv_mpeg4. + */ + output_container_and_codec?: + | 'mp4_h265' + | 'mp4_h264' + | 'webm_vp9' + | 'gif' + | 'mov_h264' + | 'mov_h265' + | 'mov_proresks' + | 'mkv_h264' + | 'mkv_h265' + | 'mkv_vp9' + | 'mkv_mpeg4' + /** + * Keypoints + * + * Input keypoints [x,y] to erase or keep from the video. Format like so: {'x':100, 'y':100, 'type':'positive/negative'} + */ + keypoints: Array + /** + * Auto Trim + * + * auto trim the video, to working duration ( 5s ) + */ + auto_trim?: boolean +} + +/** + * VideoOutput + */ +export type SchemaBriaVideoEraserEraseMaskOutput = { + /** + * Video + * + * Final video. + */ + video: SchemaVideo | SchemaFile +} + +/** + * EraseInputModel + */ +export type SchemaBriaVideoEraserEraseMaskInput = { + /** + * Preserve Audio + * + * If true, audio will be preserved in the output video. + */ + preserve_audio?: boolean + /** + * Video Url + * + * Input video to erase object from. duration must be less than 5s. + */ + video_url: string + /** + * Output Container And Codec + * + * Output container and codec. Options: mp4_h265, mp4_h264, webm_vp9, gif, mov_h264, mov_h265, mov_proresks, mkv_h264, mkv_h265, mkv_vp9, mkv_mpeg4. + */ + output_container_and_codec?: + | 'mp4_h265' + | 'mp4_h264' + | 'webm_vp9' + | 'gif' + | 'mov_h264' + | 'mov_h265' + | 'mov_proresks' + | 'mkv_h264' + | 'mkv_h265' + | 'mkv_vp9' + | 'mkv_mpeg4' + /** + * Mask Video Url + * + * Input video to mask erase object from. duration must be less than 5s. + */ + mask_video_url: string + /** + * Auto Trim + * + * auto trim the video, to working duration ( 5s ) + */ + auto_trim?: boolean +} + +/** + * CrystalVideoUpscaleOutput + */ +export type SchemaCrystalVideoUpscalerOutput = { + /** + * Video + * + * URL to the upscaled video + */ + video: SchemaVideoFile +} + +/** + * CrystalVideoUpscaleInput + */ +export type SchemaCrystalVideoUpscalerInput = { + /** + * Video Url + * + * URL to the input video. + */ + video_url: string + /** + * Scale Factor + * + * Scale factor. The scale factor must be chosen such that the upscaled video does not exceed 5K resolution. + */ + scale_factor?: number +} + +/** + * ScailResponse + */ +export type SchemaScailOutput = { + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * ScailRequest + */ +export type SchemaScailInput = { + /** + * Prompt + * + * The prompt to guide video generation. + */ + prompt: string + /** + * Video Url + * + * The URL of the video to use as a reference for the video generation. + */ + video_url: string + /** + * Resolution + * + * Output resolution. Outputs 896x512 (landscape) or 512x896 (portrait) based on the input image aspect ratio. + */ + resolution?: '512p' + /** + * Num Inference Steps + * + * The number of inference steps to use for the video generation. + */ + num_inference_steps?: number + /** + * Multi Character + * + * Enable multi-character mode. Use when driving video has multiple people. + */ + multi_character?: boolean + /** + * Image Url + * + * The URL of the image to use as a reference for the video generation. + */ + image_url: string +} + +/** + * LucyRestyleOutput + */ +export type SchemaLucyRestyleOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * LucyRestyleInput + */ +export type SchemaLucyRestyleInput = { + /** + * Sync Mode + * + * + * If set to true, the function will wait for the video to be generated + * and uploaded before returning the response. This will increase the + * latency of the function but it allows you to get the video directly + * in the response without going through the CDN. + * + */ + sync_mode?: boolean + /** + * Video Url + * + * URL of the video to edit + */ + video_url: string + /** + * Resolution + * + * Resolution of the generated video + */ + resolution?: '720p' + /** + * Prompt + * + * Text description of the desired video content + */ + prompt: string + /** + * Seed + * + * Seed for video generation + */ + seed?: number + /** + * Enhance Prompt + * + * Whether to enhance the prompt for better results. + */ + enhance_prompt?: boolean +} + +/** + * MotionControlOutput + * + * Output model for motion control video generation. + */ +export type SchemaKlingVideoV26ProMotionControlOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * MotionControlRequest + * + * Request model for motion control video generation. + */ +export type SchemaKlingVideoV26ProMotionControlInput = { + /** + * Prompt + */ + prompt?: string + /** + * Video Url + * + * Reference video URL. The character actions in the generated video will be consistent with this reference video. Should contain a realistic style character with entire body or upper body visible, including head, without obstruction. Duration limit depends on character_orientation: 10s max for 'image', 30s max for 'video'. + */ + video_url: string + /** + * Character Orientation + * + * Controls whether the output character's orientation matches the reference image or video. 'video': orientation matches reference video - better for complex motions (max 30s). 'image': orientation matches reference image - better for following camera movements (max 10s). + */ + character_orientation: 'image' | 'video' + /** + * Keep Original Sound + * + * Whether to keep the original sound from the reference video. + */ + keep_original_sound?: boolean + /** + * Image Url + * + * Reference image URL. The characters, backgrounds, and other elements in the generated video are based on this reference image. Characters should have clear body proportions, avoid occlusion, and occupy more than 5% of the image area. + */ + image_url: string +} + +/** + * MotionControlOutput + * + * Output model for motion control video generation. + */ +export type SchemaKlingVideoV26StandardMotionControlOutput = { + /** + * Video + * + * The generated video + */ + video: SchemaFile +} + +/** + * MotionControlRequest + * + * Request model for motion control video generation. + */ +export type SchemaKlingVideoV26StandardMotionControlInput = { + /** + * Prompt + */ + prompt?: string + /** + * Video Url + * + * Reference video URL. The character actions in the generated video will be consistent with this reference video. Should contain a realistic style character with entire body or upper body visible, including head, without obstruction. Duration limit depends on character_orientation: 10s max for 'image', 30s max for 'video'. + */ + video_url: string + /** + * Character Orientation + * + * Controls whether the output character's orientation matches the reference image or video. 'video': orientation matches reference video - better for complex motions (max 30s). 'image': orientation matches reference image - better for following camera movements (max 10s). + */ + character_orientation: 'image' | 'video' + /** + * Keep Original Sound + * + * Whether to keep the original sound from the reference video. + */ + keep_original_sound?: boolean + /** + * Image Url + * + * Reference image URL. The characters, backgrounds, and other elements in the generated video are based on this reference image. Characters should have clear body proportions, avoid occlusion, and occupy more than 5% of the image area. + */ + image_url: string +} + +/** + * TrajectoryParameters + * + * Camera trajectory parameters for re-camera operations. + * + * Each list represents interpolation values across frames: + * - theta: Horizontal rotation angles (degrees) + * - phi: Vertical rotation angles (degrees) + * - radius: Camera distance scaling factors + */ +export type SchemaTrajectoryParameters = { + /** + * Theta + * + * Horizontal rotation angles (degrees) for each keyframe. + */ + theta: Array + /** + * Radius + * + * Camera distance scaling factors for each keyframe. + */ + radius: Array + /** + * Phi + * + * Vertical rotation angles (degrees) for each keyframe. + */ + phi: Array +} + +/** + * LightXOutput + */ +export type SchemaLightxRecameraOutput = { + /** + * Viz Video + * + * Optional: visualization/debug video (if produced by the pipeline). + */ + viz_video?: SchemaFile + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Input Video + * + * Optional: normalized/processed input video (if produced by the pipeline). + */ + input_video?: SchemaFile + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * LightXRecameraRequest + * + * Re-camera-only request (minimal schema). + */ +export type SchemaLightxRecameraInput = { + /** + * Prompt + * + * Optional text prompt. If omitted, Light-X will auto-caption the video. + */ + prompt?: string + /** + * Trajectory + * + * Camera trajectory parameters (required for recamera mode). + */ + trajectory?: SchemaTrajectoryParameters + /** + * Video Url + * + * URL of the input video. + */ + video_url: string + /** + * Camera + * + * Camera control mode. + */ + camera?: 'traj' | 'target' + /** + * Target Pose + * + * Target camera pose [theta, phi, radius, x, y] (required when camera='target'). + */ + target_pose?: Array + /** + * Mode + * + * Camera motion mode. + */ + mode?: 'gradual' | 'bullet' | 'direct' | 'dolly-zoom' + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number +} + +/** + * RelightParameters + * + * Relighting parameters for video relighting operations. + * + * Used with relight_condition_type 'ic' (intrinsic conditioning). + */ +export type SchemaRelightParameters = { + /** + * Relight Prompt + * + * Text prompt describing the desired lighting condition. + */ + relight_prompt: string + /** + * Bg Source + * + * Direction of the light source (used for IC-light). + */ + bg_source?: 'Left' | 'Right' | 'Top' | 'Bottom' + /** + * Use Sky Mask + * + * Whether to use sky masking for outdoor scenes. + */ + use_sky_mask?: boolean + /** + * Cfg + * + * Classifier-free guidance scale for relighting. + */ + cfg?: number +} + +/** + * LightXOutput + */ +export type SchemaLightxRelightOutput = { + /** + * Viz Video + * + * Optional: visualization/debug video (if produced by the pipeline). + */ + viz_video?: SchemaFile + /** + * Seed + * + * The seed used for generation. + */ + seed: number + /** + * Input Video + * + * Optional: normalized/processed input video (if produced by the pipeline). + */ + input_video?: SchemaFile + /** + * Video + * + * The generated video file. + */ + video: SchemaFile +} + +/** + * LightXRelightRequest + * + * Relighting-only request (minimal schema). + */ +export type SchemaLightxRelightInput = { + /** + * Prompt + * + * Optional text prompt. If omitted, Light-X will auto-caption the video. + */ + prompt?: string + /** + * Video Url + * + * URL of the input video. + */ + video_url: string + /** + * Relight Parameters + * + * Relighting parameters (required for relight_condition_type='ic'). Not used for 'bg' (which expects a background image URL instead). + */ + relight_parameters?: SchemaRelightParameters + /** + * Ref Id + * + * Frame index to use as referencen to relight the video with reference. + */ + ref_id?: number + /** + * Relit Cond Img Url + * + * URL of conditioning image. Required for relight_condition_type='ref'/'hdr'. Also required for relight_condition_type='bg' (background image). + */ + relit_cond_img_url?: string + /** + * Relit Cond Type + * + * Relight condition type. + */ + relit_cond_type?: 'ic' | 'ref' | 'hdr' | 'bg' + /** + * Seed + * + * Random seed for reproducibility. If None, a random seed is chosen. + */ + seed?: number +} + +/** + * VideoOutput + */ +export type SchemaVideoEraseMaskOutput = { + /** + * Video + * + * Final video. + */ + video: SchemaVideo | SchemaFile +} + +/** + * EraseInputModel + */ +export type SchemaVideoEraseMaskInput = { + /** + * Preserve Audio + * + * If true, audio will be preserved in the output video. + */ + preserve_audio?: boolean + /** + * Video Url + * + * Input video to erase object from. duration must be less than 5s. + */ + video_url: string + /** + * Output Container And Codec + * + * Output container and codec. Options: mp4_h265, mp4_h264, webm_vp9, gif, mov_h264, mov_h265, mov_proresks, mkv_h264, mkv_h265, mkv_vp9, mkv_mpeg4. + */ + output_container_and_codec?: + | 'mp4_h265' + | 'mp4_h264' + | 'webm_vp9' + | 'gif' + | 'mov_h264' + | 'mov_h265' + | 'mov_proresks' + | 'mkv_h264' + | 'mkv_h265' + | 'mkv_vp9' + | 'mkv_mpeg4' + /** + * Mask Video Url + * + * Input video to mask erase object from. duration must be less than 5s. + */ + mask_video_url: string + /** + * Auto Trim + * + * auto trim the video, to working duration ( 5s ) + */ + auto_trim?: boolean +} + +/** + * VideoOutput + */ +export type SchemaVideoErasePromptOutput = { + /** + * Video + * + * Final video. + */ + video: SchemaVideo | SchemaFile +} + +/** + * EraseByPromptInputModel + */ +export type SchemaVideoErasePromptInput = { + /** + * Preserve Audio + * + * If true, audio will be preserved in the output video. + */ + preserve_audio?: boolean + /** + * Video Url + * + * Input video to erase object from. duration must be less than 5s. + */ + video_url: string + /** + * Prompt + * + * Input prompt to detect object to erase + */ + prompt: string + /** + * Output Container And Codec + * + * Output container and codec. Options: mp4_h265, mp4_h264, webm_vp9, gif, mov_h264, mov_h265, mov_proresks, mkv_h264, mkv_h265, mkv_vp9, mkv_mpeg4. + */ + output_container_and_codec?: + | 'mp4_h265' + | 'mp4_h264' + | 'webm_vp9' + | 'gif' + | 'mov_h264' + | 'mov_h265' + | 'mov_proresks' + | 'mkv_h264' + | 'mkv_h265' + | 'mkv_vp9' + | 'mkv_mpeg4' + /** + * Auto Trim + * + * auto trim the video, to working duration ( 5s ) + */ + auto_trim?: boolean +} + +/** + * VideoOutput + */ +export type SchemaVideoEraseKeypointsOutput = { + /** + * Video + * + * Final video. + */ + video: SchemaVideo | SchemaFile +} + +/** + * EraseByKeyPointsInputModel + */ +export type SchemaVideoEraseKeypointsInput = { + /** + * Preserve Audio + * + * If true, audio will be preserved in the output video. + */ + preserve_audio?: boolean + /** + * Video Url + * + * Input video to erase object from. duration must be less than 5s. + */ + video_url: string + /** + * Output Container And Codec + * + * Output container and codec. Options: mp4_h265, mp4_h264, webm_vp9, gif, mov_h264, mov_h265, mov_proresks, mkv_h264, mkv_h265, mkv_vp9, mkv_mpeg4. + */ + output_container_and_codec?: + | 'mp4_h265' + | 'mp4_h264' + | 'webm_vp9' + | 'gif' + | 'mov_h264' + | 'mov_h265' + | 'mov_proresks' + | 'mkv_h264' + | 'mkv_h265' + | 'mkv_vp9' + | 'mkv_mpeg4' + /** + * Keypoints + * + * Input keypoints [x,y] to erase or keep from the video. Format like so: {'x':100, 'y':100, 'type':'positive/negative'} + */ + keypoints: Array + /** + * Auto Trim + * + * auto trim the video, to working duration ( 5s ) + */ + auto_trim?: boolean +} + +/** + * LTX2ExtendVideoOutput + */ +export type SchemaLtx219bExtendVideoOutput = { + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Seed + * + * The seed used for the random number generator. + */ + seed: number + video: SchemaVideoFile +} + +/** + * LTX2ExtendVideoInput + * + * extend_direction: ExtendDirection = Field( + * description="Direction to extend the video. 'forward' extends from the end of the video, 'backward' extends from the beginning.", + * default="forward", + * ui={"important": True}, + * title="Extend Direction", + * ) + */ +export type SchemaLtx219bExtendVideoInput = { + /** + * Use Multi-Scale + * + * Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details. + */ + use_multiscale?: boolean + /** + * Video URL + * + * The URL of the video to extend. + */ + video_url: string + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' | 'full' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * FPS + * + * The frames per second of the generated video. + */ + fps?: number + /** + * Camera LoRA + * + * The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora?: + | 'dolly_in' + | 'dolly_out' + | 'dolly_left' + | 'dolly_right' + | 'jib_up' + | 'jib_down' + | 'static' + | 'none' + /** + * Video Size + * + * The size of the generated video. + */ + video_size?: + | SchemaImageSize + | 'auto' + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Camera LoRA Scale + * + * The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora_scale?: number + /** + * Guidance Scale + * + * The guidance scale to use. + */ + guidance_scale?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt?: string + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Video Strength + * + * Video conditioning strength. Lower values represent more freedom given to the model to change the video content. + */ + video_strength?: number + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Number of Context Frames + * + * The number of frames to use as context for the extension. + */ + num_context_frames?: number + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to use. + */ + num_inference_steps?: number + /** + * Audio Strength + * + * Audio conditioning strength. Lower values represent more freedom given to the model to change the audio content. + */ + audio_strength?: number + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number | unknown + /** + * Match Input FPS + * + * When true, match the output FPS to the input video's FPS instead of using the default target FPS. + */ + match_input_fps?: boolean +} + +/** + * LTX2ExtendVideoOutput + */ +export type SchemaLtx219bExtendVideoLoraOutput = { + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Seed + * + * The seed used for the random number generator. + */ + seed: number + video: SchemaVideoFile +} + +/** + * LTX2LoRAExtendVideoInput + */ +export type SchemaLtx219bExtendVideoLoraInput = { + /** + * Use Multi-Scale + * + * Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details. + */ + use_multiscale?: boolean + /** + * Video URL + * + * The URL of the video to extend. + */ + video_url: string + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' | 'full' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * FPS + * + * The frames per second of the generated video. + */ + fps?: number + /** + * LoRAs + * + * The LoRAs to use for the generation. + */ + loras: Array + /** + * Camera LoRA + * + * The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora?: + | 'dolly_in' + | 'dolly_out' + | 'dolly_left' + | 'dolly_right' + | 'jib_up' + | 'jib_down' + | 'static' + | 'none' + /** + * Video Size + * + * The size of the generated video. + */ + video_size?: + | SchemaImageSize + | 'auto' + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Camera LoRA Scale + * + * The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora_scale?: number + /** + * Guidance Scale + * + * The guidance scale to use. + */ + guidance_scale?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt?: string + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Video Strength + * + * Video conditioning strength. Lower values represent more freedom given to the model to change the video content. + */ + video_strength?: number + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Number of Context Frames + * + * The number of frames to use as context for the extension. + */ + num_context_frames?: number + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to use. + */ + num_inference_steps?: number + /** + * Audio Strength + * + * Audio conditioning strength. Lower values represent more freedom given to the model to change the audio content. + */ + audio_strength?: number + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number | unknown + /** + * Match Input FPS + * + * When true, match the output FPS to the input video's FPS instead of using the default target FPS. + */ + match_input_fps?: boolean +} + +/** + * LoRAInput + * + * LoRA weight configuration. + */ +export type SchemaLoRaInput = { + /** + * Path + * + * URL, HuggingFace repo ID (owner/repo) to lora weights. + */ + path: string + /** + * Scale + * + * Scale factor for LoRA application (0.0 to 4.0). + */ + scale?: number + /** + * Weight Name + * + * Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights. + */ + weight_name?: string | unknown +} + +/** + * LTX2ExtendVideoOutput + */ +export type SchemaLtx219bDistilledExtendVideoOutput = { + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Seed + * + * The seed used for the random number generator. + */ + seed: number + video: SchemaVideoFile +} + +/** + * LTX2DistilledExtendVideoInput + */ +export type SchemaLtx219bDistilledExtendVideoInput = { + /** + * Use Multi-Scale + * + * Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details. + */ + use_multiscale?: boolean + /** + * Video URL + * + * The URL of the video to extend. + */ + video_url: string + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' | 'full' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * FPS + * + * The frames per second of the generated video. + */ + fps?: number + /** + * Camera LoRA + * + * The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora?: + | 'dolly_in' + | 'dolly_out' + | 'dolly_left' + | 'dolly_right' + | 'jib_up' + | 'jib_down' + | 'static' + | 'none' + /** + * Video Size + * + * The size of the generated video. + */ + video_size?: + | SchemaImageSize + | 'auto' + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Camera LoRA Scale + * + * The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora_scale?: number + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt?: string + /** + * Video Strength + * + * Video conditioning strength. Lower values represent more freedom given to the model to change the video content. + */ + video_strength?: number + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Number of Context Frames + * + * The number of frames to use as context for the extension. + */ + num_context_frames?: number + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Audio Strength + * + * Audio conditioning strength. Lower values represent more freedom given to the model to change the audio content. + */ + audio_strength?: number + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number | unknown + /** + * Match Input FPS + * + * When true, match the output FPS to the input video's FPS instead of using the default target FPS. + */ + match_input_fps?: boolean +} + +/** + * LTX2ExtendVideoOutput + */ +export type SchemaLtx219bDistilledExtendVideoLoraOutput = { + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Seed + * + * The seed used for the random number generator. + */ + seed: number + video: SchemaVideoFile +} + +/** + * LTX2LoRADistilledExtendVideoInput + */ +export type SchemaLtx219bDistilledExtendVideoLoraInput = { + /** + * Use Multi-Scale + * + * Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details. + */ + use_multiscale?: boolean + /** + * Video URL + * + * The URL of the video to extend. + */ + video_url: string + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' | 'full' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * FPS + * + * The frames per second of the generated video. + */ + fps?: number + /** + * LoRAs + * + * The LoRAs to use for the generation. + */ + loras: Array + /** + * Camera LoRA + * + * The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora?: + | 'dolly_in' + | 'dolly_out' + | 'dolly_left' + | 'dolly_right' + | 'jib_up' + | 'jib_down' + | 'static' + | 'none' + /** + * Video Size + * + * The size of the generated video. + */ + video_size?: + | SchemaImageSize + | 'auto' + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Camera LoRA Scale + * + * The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora_scale?: number + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt?: string + /** + * Video Strength + * + * Video conditioning strength. Lower values represent more freedom given to the model to change the video content. + */ + video_strength?: number + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Number of Context Frames + * + * The number of frames to use as context for the extension. + */ + num_context_frames?: number + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Audio Strength + * + * Audio conditioning strength. Lower values represent more freedom given to the model to change the audio content. + */ + audio_strength?: number + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number | unknown + /** + * Match Input FPS + * + * When true, match the output FPS to the input video's FPS instead of using the default target FPS. + */ + match_input_fps?: boolean +} + +/** + * LTX2VideoToVideoOutput + */ +export type SchemaLtx219bVideoToVideoOutput = { + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Seed + * + * The seed used for the random number generator. + */ + seed: number + video: SchemaVideoFile +} + +/** + * LTX2VideoToVideoInput + */ +export type SchemaLtx219bVideoToVideoInput = { + /** + * Use Multi-Scale + * + * Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details. + */ + use_multiscale?: boolean + /** + * Video URL + * + * The URL of the video to generate the video from. + */ + video_url: string + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * IC-LoRA Scale + * + * The scale of the IC-LoRA to use. This allows you to control the strength of the IC-LoRA. + */ + ic_lora_scale?: number + /** + * Video Size + * + * The size of the generated video. + */ + video_size?: + | SchemaImageSize + | 'auto' + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Guidance Scale + * + * The guidance scale to use. + */ + guidance_scale?: number + /** + * Camera LoRA Scale + * + * The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora_scale?: number + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Video Strength + * + * Video conditioning strength. Lower values represent more freedom given to the model to change the video content. + */ + video_strength?: number + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Image URL + * + * An optional URL of an image to use as the first frame of the video. + */ + image_url?: string | unknown + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number | unknown + /** + * Match Video Length + * + * When enabled, the number of frames will be calculated based on the video duration and FPS. When disabled, use the specified num_frames. + */ + match_video_length?: boolean + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' | 'full' + /** + * FPS + * + * The frames per second of the generated video. + */ + fps?: number + /** + * Camera LoRA + * + * The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora?: + | 'dolly_in' + | 'dolly_out' + | 'dolly_left' + | 'dolly_right' + | 'jib_up' + | 'jib_down' + | 'static' + | 'none' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Image Strength + * + * The strength of the image to use for the video generation. + */ + image_strength?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt?: string + /** + * Preprocessor + * + * The preprocessor to use for the video. When a preprocessor is used and `ic_lora_type` is set to `match_preprocessor`, the IC-LoRA will be loaded based on the preprocessor type. + */ + preprocessor?: 'depth' | 'canny' | 'pose' | 'none' + /** + * IC-LoRA + * + * The type of IC-LoRA to load. In-Context LoRA weights are used to condition the video based on edge, depth, or pose videos. Only change this from `match_preprocessor` if your videos are already preprocessed (or you are using the detailer.) + */ + ic_lora?: + | 'match_preprocessor' + | 'canny' + | 'depth' + | 'pose' + | 'detailer' + | 'none' + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Number of Inference Steps + * + * The number of inference steps to use. + */ + num_inference_steps?: number + /** + * Match Input FPS + * + * When true, match the output FPS to the input video's FPS instead of using the default target FPS. + */ + match_input_fps?: boolean +} + +/** + * LTX2VideoToVideoOutput + */ +export type SchemaLtx219bVideoToVideoLoraOutput = { + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Seed + * + * The seed used for the random number generator. + */ + seed: number + video: SchemaVideoFile +} + +/** + * LTX2LoRAVideoToVideoInput + */ +export type SchemaLtx219bVideoToVideoLoraInput = { + /** + * Use Multi-Scale + * + * Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details. + */ + use_multiscale?: boolean + /** + * Video URL + * + * The URL of the video to generate the video from. + */ + video_url: string + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * IC-LoRA Scale + * + * The scale of the IC-LoRA to use. This allows you to control the strength of the IC-LoRA. + */ + ic_lora_scale?: number + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * LoRAs + * + * The LoRAs to use for the generation. + */ + loras: Array + /** + * Video Size + * + * The size of the generated video. + */ + video_size?: + | SchemaImageSize + | 'auto' + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Guidance Scale + * + * The guidance scale to use. + */ + guidance_scale?: number + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Camera LoRA Scale + * + * The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora_scale?: number + /** + * Video Strength + * + * Video conditioning strength. Lower values represent more freedom given to the model to change the video content. + */ + video_strength?: number + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * Image URL + * + * An optional URL of an image to use as the first frame of the video. + */ + image_url?: string | unknown + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number | unknown + /** + * Match Video Length + * + * When enabled, the number of frames will be calculated based on the video duration and FPS. When disabled, use the specified num_frames. + */ + match_video_length?: boolean + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' | 'full' + /** + * FPS + * + * The frames per second of the generated video. + */ + fps?: number + /** + * Camera LoRA + * + * The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora?: + | 'dolly_in' + | 'dolly_out' + | 'dolly_left' + | 'dolly_right' + | 'jib_up' + | 'jib_down' + | 'static' + | 'none' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Image Strength + * + * The strength of the image to use for the video generation. + */ + image_strength?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt?: string + /** + * Preprocessor + * + * The preprocessor to use for the video. When a preprocessor is used and `ic_lora_type` is set to `match_preprocessor`, the IC-LoRA will be loaded based on the preprocessor type. + */ + preprocessor?: 'depth' | 'canny' | 'pose' | 'none' + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * IC-LoRA + * + * The type of IC-LoRA to load. In-Context LoRA weights are used to condition the video based on edge, depth, or pose videos. Only change this from `match_preprocessor` if your videos are already preprocessed (or you are using the detailer.) + */ + ic_lora?: + | 'match_preprocessor' + | 'canny' + | 'depth' + | 'pose' + | 'detailer' + | 'none' + /** + * Match Input FPS + * + * When true, match the output FPS to the input video's FPS instead of using the default target FPS. + */ + match_input_fps?: boolean + /** + * Number of Inference Steps + * + * The number of inference steps to use. + */ + num_inference_steps?: number +} + +/** + * LTX2VideoToVideoOutput + */ +export type SchemaLtx219bDistilledVideoToVideoOutput = { + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Seed + * + * The seed used for the random number generator. + */ + seed: number + video: SchemaVideoFile +} + +/** + * LTX2DistilledVideoToVideoInput + */ +export type SchemaLtx219bDistilledVideoToVideoInput = { + /** + * Use Multi-Scale + * + * Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details. + */ + use_multiscale?: boolean + /** + * Video URL + * + * The URL of the video to generate the video from. + */ + video_url: string + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' | 'full' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * IC-LoRA Scale + * + * The scale of the IC-LoRA to use. This allows you to control the strength of the IC-LoRA. + */ + ic_lora_scale?: number + /** + * FPS + * + * The frames per second of the generated video. + */ + fps?: number + /** + * Camera LoRA + * + * The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora?: + | 'dolly_in' + | 'dolly_out' + | 'dolly_left' + | 'dolly_right' + | 'jib_up' + | 'jib_down' + | 'static' + | 'none' + /** + * Video Size + * + * The size of the generated video. + */ + video_size?: + | SchemaImageSize + | 'auto' + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Camera LoRA Scale + * + * The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora_scale?: number + /** + * Image Strength + * + * The strength of the image to use for the video generation. + */ + image_strength?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt?: string + /** + * Preprocessor + * + * The preprocessor to use for the video. When a preprocessor is used and `ic_lora_type` is set to `match_preprocessor`, the IC-LoRA will be loaded based on the preprocessor type. + */ + preprocessor?: 'depth' | 'canny' | 'pose' | 'none' + /** + * Video Strength + * + * Video conditioning strength. Lower values represent more freedom given to the model to change the video content. + */ + video_strength?: number + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * IC-LoRA + * + * The type of IC-LoRA to load. In-Context LoRA weights are used to condition the video based on edge, depth, or pose videos. Only change this from `match_preprocessor` if your videos are already preprocessed (or you are using the detailer.) + */ + ic_lora?: + | 'match_preprocessor' + | 'canny' + | 'depth' + | 'pose' + | 'detailer' + | 'none' + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Image URL + * + * An optional URL of an image to use as the first frame of the video. + */ + image_url?: string | unknown + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number | unknown + /** + * Match Input FPS + * + * When true, match the output FPS to the input video's FPS instead of using the default target FPS. + */ + match_input_fps?: boolean + /** + * Match Video Length + * + * When enabled, the number of frames will be calculated based on the video duration and FPS. When disabled, use the specified num_frames. + */ + match_video_length?: boolean +} + +/** + * LTX2VideoToVideoOutput + */ +export type SchemaLtx219bDistilledVideoToVideoLoraOutput = { + /** + * Prompt + * + * The prompt used for the generation. + */ + prompt: string + /** + * Seed + * + * The seed used for the random number generator. + */ + seed: number + video: SchemaVideoFile +} + +/** + * LTX2LoRADistilledVideoToVideoInput + */ +export type SchemaLtx219bDistilledVideoToVideoLoraInput = { + /** + * Use Multi-Scale + * + * Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details. + */ + use_multiscale?: boolean + /** + * Video URL + * + * The URL of the video to generate the video from. + */ + video_url: string + /** + * Acceleration + * + * The acceleration level to use. + */ + acceleration?: 'none' | 'regular' | 'high' | 'full' + /** + * Generate Audio + * + * Whether to generate audio for the video. + */ + generate_audio?: boolean + /** + * Prompt + * + * The prompt to generate the video from. + */ + prompt: string + /** + * IC-LoRA Scale + * + * The scale of the IC-LoRA to use. This allows you to control the strength of the IC-LoRA. + */ + ic_lora_scale?: number + /** + * FPS + * + * The frames per second of the generated video. + */ + fps?: number + /** + * LoRAs + * + * The LoRAs to use for the generation. + */ + loras: Array + /** + * Camera LoRA + * + * The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora?: + | 'dolly_in' + | 'dolly_out' + | 'dolly_left' + | 'dolly_right' + | 'jib_up' + | 'jib_down' + | 'static' + | 'none' + /** + * Video Size + * + * The size of the generated video. + */ + video_size?: + | SchemaImageSize + | 'auto' + | 'square_hd' + | 'square' + | 'portrait_4_3' + | 'portrait_16_9' + | 'landscape_4_3' + | 'landscape_16_9' + /** + * Enable Safety Checker + * + * Whether to enable the safety checker. + */ + enable_safety_checker?: boolean + /** + * Camera LoRA Scale + * + * The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera. + */ + camera_lora_scale?: number + /** + * Image Strength + * + * The strength of the image to use for the video generation. + */ + image_strength?: number + /** + * Negative Prompt + * + * The negative prompt to generate the video from. + */ + negative_prompt?: string + /** + * Preprocessor + * + * The preprocessor to use for the video. When a preprocessor is used and `ic_lora_type` is set to `match_preprocessor`, the IC-LoRA will be loaded based on the preprocessor type. + */ + preprocessor?: 'depth' | 'canny' | 'pose' | 'none' + /** + * Video Strength + * + * Video conditioning strength. Lower values represent more freedom given to the model to change the video content. + */ + video_strength?: number + /** + * Video Output Type + * + * The output type of the generated video. + */ + video_output_type?: + | 'X264 (.mp4)' + | 'VP9 (.webm)' + | 'PRORES4444 (.mov)' + | 'GIF (.gif)' + /** + * IC-LoRA + * + * The type of IC-LoRA to load. In-Context LoRA weights are used to condition the video based on edge, depth, or pose videos. Only change this from `match_preprocessor` if your videos are already preprocessed (or you are using the detailer.) + */ + ic_lora?: + | 'match_preprocessor' + | 'canny' + | 'depth' + | 'pose' + | 'detailer' + | 'none' + /** + * Video Write Mode + * + * The write mode of the generated video. + */ + video_write_mode?: 'fast' | 'balanced' | 'small' + /** + * Number of Frames + * + * The number of frames to generate. + */ + num_frames?: number + /** + * Image URL + * + * An optional URL of an image to use as the first frame of the video. + */ + image_url?: string | unknown + /** + * Video Quality + * + * The quality of the generated video. + */ + video_quality?: 'low' | 'medium' | 'high' | 'maximum' + /** + * Sync Mode + * + * If `True`, the media will be returned as a data URI and the output data won't be available in the request history. + */ + sync_mode?: boolean + /** + * Enable Prompt Expansion + * + * Whether to enable prompt expansion. + */ + enable_prompt_expansion?: boolean + /** + * Seed + * + * The seed for the random number generator. + */ + seed?: number | unknown + /** + * Match Input FPS + * + * When true, match the output FPS to the input video's FPS instead of using the default target FPS. + */ + match_input_fps?: boolean + /** + * Match Video Length + * + * When enabled, the number of frames will be calculated based on the video duration and FPS. When disabled, use the specified num_frames. + */ + match_video_length?: boolean +} + +/** + * FaceFusionVideoOutput + * + * FaceFusion output payload when video content is generated + */ +export type SchemaAiFaceSwapFaceswapvideoOutput = { + /** + * Processing Time Ms + * + * Optional processing duration in milliseconds + */ + processing_time_ms?: number | unknown + video: SchemaVideo +} + +/** + * FaceSwapInputVideo + * + * Input schema for image ↔ video face swap + */ +export type SchemaAiFaceSwapFaceswapvideoInput = { + /** + * Source Face Url + * + * Source face image + */ + source_face_url: string + /** + * Target Video Url + * + * Target video URL + */ + target_video_url: string +} + +/** + * Output + */ +export type SchemaMmaudioV2Output = { + /** + * Video + * + * The generated video with the lip sync. + */ + video: SchemaFile +} + +/** + * BaseInput + */ +export type SchemaMmaudioV2Input = { + /** + * Prompt + * + * The prompt to generate the audio for. + */ + prompt: string + /** + * Video Url + * + * The URL of the video to generate the audio for. + */ + video_url: string + /** + * Num Steps + * + * The number of steps to generate the audio for. + */ + num_steps?: number + /** + * Duration + * + * The duration of the audio to generate. + */ + duration?: number + /** + * Cfg Strength + * + * The strength of Classifier Free Guidance. + */ + cfg_strength?: number + /** + * Seed + * + * The seed for the random number generator + */ + seed?: number + /** + * Mask Away Clip + * + * Whether to mask away the clip. + */ + mask_away_clip?: boolean + /** + * Negative Prompt + * + * The negative prompt to generate the audio for. + */ + negative_prompt?: string +} + +/** + * GeneralRembgOutput + */ +export type SchemaVideoBackgroundRemovalOutput = { + /** + * Video + */ + video: Array +} + +/** + * GeneralRembgInput + */ +export type SchemaVideoBackgroundRemovalInput = { + /** + * Video Url + */ + video_url: string + /** + * Subject Is Person + * + * Set to False if the subject is not a person. + */ + subject_is_person?: boolean + /** + * Output Codec + * + * Single VP9 video with alpha channel or two videos (rgb and alpha) in H264 format. H264 is recommended for better RGB quality. + */ + output_codec?: 'vp9' | 'h264' + /** + * Refine Foreground Edges + * + * Improves the quality of the extracted object's edges. + */ + refine_foreground_edges?: boolean +} + +export type SchemaQueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetBriaVideoBackgroundRemovalRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/video/background-removal/requests/{request_id}/status' +} + +export type GetBriaVideoBackgroundRemovalRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetBriaVideoBackgroundRemovalRequestsByRequestIdStatusResponse = + GetBriaVideoBackgroundRemovalRequestsByRequestIdStatusResponses[keyof GetBriaVideoBackgroundRemovalRequestsByRequestIdStatusResponses] + +export type PutBriaVideoBackgroundRemovalRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/video/background-removal/requests/{request_id}/cancel' +} + +export type PutBriaVideoBackgroundRemovalRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutBriaVideoBackgroundRemovalRequestsByRequestIdCancelResponse = + PutBriaVideoBackgroundRemovalRequestsByRequestIdCancelResponses[keyof PutBriaVideoBackgroundRemovalRequestsByRequestIdCancelResponses] + +export type PostBriaVideoBackgroundRemovalData = { + body: SchemaVideoBackgroundRemovalInput + path?: never + query?: never + url: '/bria/video/background-removal' +} + +export type PostBriaVideoBackgroundRemovalResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaVideoBackgroundRemovalResponse = + PostBriaVideoBackgroundRemovalResponses[keyof PostBriaVideoBackgroundRemovalResponses] + +export type GetBriaVideoBackgroundRemovalRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/video/background-removal/requests/{request_id}' +} + +export type GetBriaVideoBackgroundRemovalRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVideoBackgroundRemovalOutput +} + +export type GetBriaVideoBackgroundRemovalRequestsByRequestIdResponse = + GetBriaVideoBackgroundRemovalRequestsByRequestIdResponses[keyof GetBriaVideoBackgroundRemovalRequestsByRequestIdResponses] + +export type GetFalAiMmaudioV2RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/mmaudio-v2/requests/{request_id}/status' +} + +export type GetFalAiMmaudioV2RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMmaudioV2RequestsByRequestIdStatusResponse = + GetFalAiMmaudioV2RequestsByRequestIdStatusResponses[keyof GetFalAiMmaudioV2RequestsByRequestIdStatusResponses] + +export type PutFalAiMmaudioV2RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/mmaudio-v2/requests/{request_id}/cancel' +} + +export type PutFalAiMmaudioV2RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMmaudioV2RequestsByRequestIdCancelResponse = + PutFalAiMmaudioV2RequestsByRequestIdCancelResponses[keyof PutFalAiMmaudioV2RequestsByRequestIdCancelResponses] + +export type PostFalAiMmaudioV2Data = { + body: SchemaMmaudioV2Input + path?: never + query?: never + url: '/fal-ai/mmaudio-v2' +} + +export type PostFalAiMmaudioV2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMmaudioV2Response = + PostFalAiMmaudioV2Responses[keyof PostFalAiMmaudioV2Responses] + +export type GetFalAiMmaudioV2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/mmaudio-v2/requests/{request_id}' +} + +export type GetFalAiMmaudioV2RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMmaudioV2Output +} + +export type GetFalAiMmaudioV2RequestsByRequestIdResponse = + GetFalAiMmaudioV2RequestsByRequestIdResponses[keyof GetFalAiMmaudioV2RequestsByRequestIdResponses] + +export type GetHalfMoonAiAiFaceSwapFaceswapvideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/half-moon-ai/ai-face-swap/faceswapvideo/requests/{request_id}/status' + } + +export type GetHalfMoonAiAiFaceSwapFaceswapvideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetHalfMoonAiAiFaceSwapFaceswapvideoRequestsByRequestIdStatusResponse = + GetHalfMoonAiAiFaceSwapFaceswapvideoRequestsByRequestIdStatusResponses[keyof GetHalfMoonAiAiFaceSwapFaceswapvideoRequestsByRequestIdStatusResponses] + +export type PutHalfMoonAiAiFaceSwapFaceswapvideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/half-moon-ai/ai-face-swap/faceswapvideo/requests/{request_id}/cancel' + } + +export type PutHalfMoonAiAiFaceSwapFaceswapvideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutHalfMoonAiAiFaceSwapFaceswapvideoRequestsByRequestIdCancelResponse = + PutHalfMoonAiAiFaceSwapFaceswapvideoRequestsByRequestIdCancelResponses[keyof PutHalfMoonAiAiFaceSwapFaceswapvideoRequestsByRequestIdCancelResponses] + +export type PostHalfMoonAiAiFaceSwapFaceswapvideoData = { + body: SchemaAiFaceSwapFaceswapvideoInput + path?: never + query?: never + url: '/half-moon-ai/ai-face-swap/faceswapvideo' +} + +export type PostHalfMoonAiAiFaceSwapFaceswapvideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostHalfMoonAiAiFaceSwapFaceswapvideoResponse = + PostHalfMoonAiAiFaceSwapFaceswapvideoResponses[keyof PostHalfMoonAiAiFaceSwapFaceswapvideoResponses] + +export type GetHalfMoonAiAiFaceSwapFaceswapvideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/half-moon-ai/ai-face-swap/faceswapvideo/requests/{request_id}' +} + +export type GetHalfMoonAiAiFaceSwapFaceswapvideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAiFaceSwapFaceswapvideoOutput +} + +export type GetHalfMoonAiAiFaceSwapFaceswapvideoRequestsByRequestIdResponse = + GetHalfMoonAiAiFaceSwapFaceswapvideoRequestsByRequestIdResponses[keyof GetHalfMoonAiAiFaceSwapFaceswapvideoRequestsByRequestIdResponses] + +export type GetFalAiLtx219bDistilledVideoToVideoLoraRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2-19b/distilled/video-to-video/lora/requests/{request_id}/status' + } + +export type GetFalAiLtx219bDistilledVideoToVideoLoraRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtx219bDistilledVideoToVideoLoraRequestsByRequestIdStatusResponse = + GetFalAiLtx219bDistilledVideoToVideoLoraRequestsByRequestIdStatusResponses[keyof GetFalAiLtx219bDistilledVideoToVideoLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx219bDistilledVideoToVideoLoraRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/distilled/video-to-video/lora/requests/{request_id}/cancel' + } + +export type PutFalAiLtx219bDistilledVideoToVideoLoraRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtx219bDistilledVideoToVideoLoraRequestsByRequestIdCancelResponse = + PutFalAiLtx219bDistilledVideoToVideoLoraRequestsByRequestIdCancelResponses[keyof PutFalAiLtx219bDistilledVideoToVideoLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx219bDistilledVideoToVideoLoraData = { + body: SchemaLtx219bDistilledVideoToVideoLoraInput + path?: never + query?: never + url: '/fal-ai/ltx-2-19b/distilled/video-to-video/lora' +} + +export type PostFalAiLtx219bDistilledVideoToVideoLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx219bDistilledVideoToVideoLoraResponse = + PostFalAiLtx219bDistilledVideoToVideoLoraResponses[keyof PostFalAiLtx219bDistilledVideoToVideoLoraResponses] + +export type GetFalAiLtx219bDistilledVideoToVideoLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/distilled/video-to-video/lora/requests/{request_id}' +} + +export type GetFalAiLtx219bDistilledVideoToVideoLoraRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaLtx219bDistilledVideoToVideoLoraOutput + } + +export type GetFalAiLtx219bDistilledVideoToVideoLoraRequestsByRequestIdResponse = + GetFalAiLtx219bDistilledVideoToVideoLoraRequestsByRequestIdResponses[keyof GetFalAiLtx219bDistilledVideoToVideoLoraRequestsByRequestIdResponses] + +export type GetFalAiLtx219bDistilledVideoToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2-19b/distilled/video-to-video/requests/{request_id}/status' + } + +export type GetFalAiLtx219bDistilledVideoToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtx219bDistilledVideoToVideoRequestsByRequestIdStatusResponse = + GetFalAiLtx219bDistilledVideoToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLtx219bDistilledVideoToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx219bDistilledVideoToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/distilled/video-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiLtx219bDistilledVideoToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtx219bDistilledVideoToVideoRequestsByRequestIdCancelResponse = + PutFalAiLtx219bDistilledVideoToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLtx219bDistilledVideoToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx219bDistilledVideoToVideoData = { + body: SchemaLtx219bDistilledVideoToVideoInput + path?: never + query?: never + url: '/fal-ai/ltx-2-19b/distilled/video-to-video' +} + +export type PostFalAiLtx219bDistilledVideoToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx219bDistilledVideoToVideoResponse = + PostFalAiLtx219bDistilledVideoToVideoResponses[keyof PostFalAiLtx219bDistilledVideoToVideoResponses] + +export type GetFalAiLtx219bDistilledVideoToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/distilled/video-to-video/requests/{request_id}' +} + +export type GetFalAiLtx219bDistilledVideoToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtx219bDistilledVideoToVideoOutput +} + +export type GetFalAiLtx219bDistilledVideoToVideoRequestsByRequestIdResponse = + GetFalAiLtx219bDistilledVideoToVideoRequestsByRequestIdResponses[keyof GetFalAiLtx219bDistilledVideoToVideoRequestsByRequestIdResponses] + +export type GetFalAiLtx219bVideoToVideoLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2-19b/video-to-video/lora/requests/{request_id}/status' +} + +export type GetFalAiLtx219bVideoToVideoLoraRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtx219bVideoToVideoLoraRequestsByRequestIdStatusResponse = + GetFalAiLtx219bVideoToVideoLoraRequestsByRequestIdStatusResponses[keyof GetFalAiLtx219bVideoToVideoLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx219bVideoToVideoLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/video-to-video/lora/requests/{request_id}/cancel' +} + +export type PutFalAiLtx219bVideoToVideoLoraRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtx219bVideoToVideoLoraRequestsByRequestIdCancelResponse = + PutFalAiLtx219bVideoToVideoLoraRequestsByRequestIdCancelResponses[keyof PutFalAiLtx219bVideoToVideoLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx219bVideoToVideoLoraData = { + body: SchemaLtx219bVideoToVideoLoraInput + path?: never + query?: never + url: '/fal-ai/ltx-2-19b/video-to-video/lora' +} + +export type PostFalAiLtx219bVideoToVideoLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx219bVideoToVideoLoraResponse = + PostFalAiLtx219bVideoToVideoLoraResponses[keyof PostFalAiLtx219bVideoToVideoLoraResponses] + +export type GetFalAiLtx219bVideoToVideoLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/video-to-video/lora/requests/{request_id}' +} + +export type GetFalAiLtx219bVideoToVideoLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtx219bVideoToVideoLoraOutput +} + +export type GetFalAiLtx219bVideoToVideoLoraRequestsByRequestIdResponse = + GetFalAiLtx219bVideoToVideoLoraRequestsByRequestIdResponses[keyof GetFalAiLtx219bVideoToVideoLoraRequestsByRequestIdResponses] + +export type GetFalAiLtx219bVideoToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2-19b/video-to-video/requests/{request_id}/status' +} + +export type GetFalAiLtx219bVideoToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLtx219bVideoToVideoRequestsByRequestIdStatusResponse = + GetFalAiLtx219bVideoToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLtx219bVideoToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx219bVideoToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/video-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiLtx219bVideoToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLtx219bVideoToVideoRequestsByRequestIdCancelResponse = + PutFalAiLtx219bVideoToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLtx219bVideoToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx219bVideoToVideoData = { + body: SchemaLtx219bVideoToVideoInput + path?: never + query?: never + url: '/fal-ai/ltx-2-19b/video-to-video' +} + +export type PostFalAiLtx219bVideoToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx219bVideoToVideoResponse = + PostFalAiLtx219bVideoToVideoResponses[keyof PostFalAiLtx219bVideoToVideoResponses] + +export type GetFalAiLtx219bVideoToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/video-to-video/requests/{request_id}' +} + +export type GetFalAiLtx219bVideoToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtx219bVideoToVideoOutput +} + +export type GetFalAiLtx219bVideoToVideoRequestsByRequestIdResponse = + GetFalAiLtx219bVideoToVideoRequestsByRequestIdResponses[keyof GetFalAiLtx219bVideoToVideoRequestsByRequestIdResponses] + +export type GetFalAiLtx219bDistilledExtendVideoLoraRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2-19b/distilled/extend-video/lora/requests/{request_id}/status' + } + +export type GetFalAiLtx219bDistilledExtendVideoLoraRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtx219bDistilledExtendVideoLoraRequestsByRequestIdStatusResponse = + GetFalAiLtx219bDistilledExtendVideoLoraRequestsByRequestIdStatusResponses[keyof GetFalAiLtx219bDistilledExtendVideoLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx219bDistilledExtendVideoLoraRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/distilled/extend-video/lora/requests/{request_id}/cancel' + } + +export type PutFalAiLtx219bDistilledExtendVideoLoraRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtx219bDistilledExtendVideoLoraRequestsByRequestIdCancelResponse = + PutFalAiLtx219bDistilledExtendVideoLoraRequestsByRequestIdCancelResponses[keyof PutFalAiLtx219bDistilledExtendVideoLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx219bDistilledExtendVideoLoraData = { + body: SchemaLtx219bDistilledExtendVideoLoraInput + path?: never + query?: never + url: '/fal-ai/ltx-2-19b/distilled/extend-video/lora' +} + +export type PostFalAiLtx219bDistilledExtendVideoLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx219bDistilledExtendVideoLoraResponse = + PostFalAiLtx219bDistilledExtendVideoLoraResponses[keyof PostFalAiLtx219bDistilledExtendVideoLoraResponses] + +export type GetFalAiLtx219bDistilledExtendVideoLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/distilled/extend-video/lora/requests/{request_id}' +} + +export type GetFalAiLtx219bDistilledExtendVideoLoraRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaLtx219bDistilledExtendVideoLoraOutput + } + +export type GetFalAiLtx219bDistilledExtendVideoLoraRequestsByRequestIdResponse = + GetFalAiLtx219bDistilledExtendVideoLoraRequestsByRequestIdResponses[keyof GetFalAiLtx219bDistilledExtendVideoLoraRequestsByRequestIdResponses] + +export type GetFalAiLtx219bDistilledExtendVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2-19b/distilled/extend-video/requests/{request_id}/status' +} + +export type GetFalAiLtx219bDistilledExtendVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtx219bDistilledExtendVideoRequestsByRequestIdStatusResponse = + GetFalAiLtx219bDistilledExtendVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLtx219bDistilledExtendVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx219bDistilledExtendVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/distilled/extend-video/requests/{request_id}/cancel' +} + +export type PutFalAiLtx219bDistilledExtendVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtx219bDistilledExtendVideoRequestsByRequestIdCancelResponse = + PutFalAiLtx219bDistilledExtendVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLtx219bDistilledExtendVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx219bDistilledExtendVideoData = { + body: SchemaLtx219bDistilledExtendVideoInput + path?: never + query?: never + url: '/fal-ai/ltx-2-19b/distilled/extend-video' +} + +export type PostFalAiLtx219bDistilledExtendVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx219bDistilledExtendVideoResponse = + PostFalAiLtx219bDistilledExtendVideoResponses[keyof PostFalAiLtx219bDistilledExtendVideoResponses] + +export type GetFalAiLtx219bDistilledExtendVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/distilled/extend-video/requests/{request_id}' +} + +export type GetFalAiLtx219bDistilledExtendVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtx219bDistilledExtendVideoOutput +} + +export type GetFalAiLtx219bDistilledExtendVideoRequestsByRequestIdResponse = + GetFalAiLtx219bDistilledExtendVideoRequestsByRequestIdResponses[keyof GetFalAiLtx219bDistilledExtendVideoRequestsByRequestIdResponses] + +export type GetFalAiLtx219bExtendVideoLoraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2-19b/extend-video/lora/requests/{request_id}/status' +} + +export type GetFalAiLtx219bExtendVideoLoraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLtx219bExtendVideoLoraRequestsByRequestIdStatusResponse = + GetFalAiLtx219bExtendVideoLoraRequestsByRequestIdStatusResponses[keyof GetFalAiLtx219bExtendVideoLoraRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx219bExtendVideoLoraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/extend-video/lora/requests/{request_id}/cancel' +} + +export type PutFalAiLtx219bExtendVideoLoraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLtx219bExtendVideoLoraRequestsByRequestIdCancelResponse = + PutFalAiLtx219bExtendVideoLoraRequestsByRequestIdCancelResponses[keyof PutFalAiLtx219bExtendVideoLoraRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx219bExtendVideoLoraData = { + body: SchemaLtx219bExtendVideoLoraInput + path?: never + query?: never + url: '/fal-ai/ltx-2-19b/extend-video/lora' +} + +export type PostFalAiLtx219bExtendVideoLoraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx219bExtendVideoLoraResponse = + PostFalAiLtx219bExtendVideoLoraResponses[keyof PostFalAiLtx219bExtendVideoLoraResponses] + +export type GetFalAiLtx219bExtendVideoLoraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/extend-video/lora/requests/{request_id}' +} + +export type GetFalAiLtx219bExtendVideoLoraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtx219bExtendVideoLoraOutput +} + +export type GetFalAiLtx219bExtendVideoLoraRequestsByRequestIdResponse = + GetFalAiLtx219bExtendVideoLoraRequestsByRequestIdResponses[keyof GetFalAiLtx219bExtendVideoLoraRequestsByRequestIdResponses] + +export type GetFalAiLtx219bExtendVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2-19b/extend-video/requests/{request_id}/status' +} + +export type GetFalAiLtx219bExtendVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLtx219bExtendVideoRequestsByRequestIdStatusResponse = + GetFalAiLtx219bExtendVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLtx219bExtendVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx219bExtendVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/extend-video/requests/{request_id}/cancel' +} + +export type PutFalAiLtx219bExtendVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLtx219bExtendVideoRequestsByRequestIdCancelResponse = + PutFalAiLtx219bExtendVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLtx219bExtendVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx219bExtendVideoData = { + body: SchemaLtx219bExtendVideoInput + path?: never + query?: never + url: '/fal-ai/ltx-2-19b/extend-video' +} + +export type PostFalAiLtx219bExtendVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx219bExtendVideoResponse = + PostFalAiLtx219bExtendVideoResponses[keyof PostFalAiLtx219bExtendVideoResponses] + +export type GetFalAiLtx219bExtendVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2-19b/extend-video/requests/{request_id}' +} + +export type GetFalAiLtx219bExtendVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtx219bExtendVideoOutput +} + +export type GetFalAiLtx219bExtendVideoRequestsByRequestIdResponse = + GetFalAiLtx219bExtendVideoRequestsByRequestIdResponses[keyof GetFalAiLtx219bExtendVideoRequestsByRequestIdResponses] + +export type GetBriaVideoEraseKeypointsRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/video/erase/keypoints/requests/{request_id}/status' +} + +export type GetBriaVideoEraseKeypointsRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetBriaVideoEraseKeypointsRequestsByRequestIdStatusResponse = + GetBriaVideoEraseKeypointsRequestsByRequestIdStatusResponses[keyof GetBriaVideoEraseKeypointsRequestsByRequestIdStatusResponses] + +export type PutBriaVideoEraseKeypointsRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/video/erase/keypoints/requests/{request_id}/cancel' +} + +export type PutBriaVideoEraseKeypointsRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutBriaVideoEraseKeypointsRequestsByRequestIdCancelResponse = + PutBriaVideoEraseKeypointsRequestsByRequestIdCancelResponses[keyof PutBriaVideoEraseKeypointsRequestsByRequestIdCancelResponses] + +export type PostBriaVideoEraseKeypointsData = { + body: SchemaVideoEraseKeypointsInput + path?: never + query?: never + url: '/bria/video/erase/keypoints' +} + +export type PostBriaVideoEraseKeypointsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaVideoEraseKeypointsResponse = + PostBriaVideoEraseKeypointsResponses[keyof PostBriaVideoEraseKeypointsResponses] + +export type GetBriaVideoEraseKeypointsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/video/erase/keypoints/requests/{request_id}' +} + +export type GetBriaVideoEraseKeypointsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVideoEraseKeypointsOutput +} + +export type GetBriaVideoEraseKeypointsRequestsByRequestIdResponse = + GetBriaVideoEraseKeypointsRequestsByRequestIdResponses[keyof GetBriaVideoEraseKeypointsRequestsByRequestIdResponses] + +export type GetBriaVideoErasePromptRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/video/erase/prompt/requests/{request_id}/status' +} + +export type GetBriaVideoErasePromptRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetBriaVideoErasePromptRequestsByRequestIdStatusResponse = + GetBriaVideoErasePromptRequestsByRequestIdStatusResponses[keyof GetBriaVideoErasePromptRequestsByRequestIdStatusResponses] + +export type PutBriaVideoErasePromptRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/video/erase/prompt/requests/{request_id}/cancel' +} + +export type PutBriaVideoErasePromptRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutBriaVideoErasePromptRequestsByRequestIdCancelResponse = + PutBriaVideoErasePromptRequestsByRequestIdCancelResponses[keyof PutBriaVideoErasePromptRequestsByRequestIdCancelResponses] + +export type PostBriaVideoErasePromptData = { + body: SchemaVideoErasePromptInput + path?: never + query?: never + url: '/bria/video/erase/prompt' +} + +export type PostBriaVideoErasePromptResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaVideoErasePromptResponse = + PostBriaVideoErasePromptResponses[keyof PostBriaVideoErasePromptResponses] + +export type GetBriaVideoErasePromptRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/video/erase/prompt/requests/{request_id}' +} + +export type GetBriaVideoErasePromptRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVideoErasePromptOutput +} + +export type GetBriaVideoErasePromptRequestsByRequestIdResponse = + GetBriaVideoErasePromptRequestsByRequestIdResponses[keyof GetBriaVideoErasePromptRequestsByRequestIdResponses] + +export type GetBriaVideoEraseMaskRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/video/erase/mask/requests/{request_id}/status' +} + +export type GetBriaVideoEraseMaskRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetBriaVideoEraseMaskRequestsByRequestIdStatusResponse = + GetBriaVideoEraseMaskRequestsByRequestIdStatusResponses[keyof GetBriaVideoEraseMaskRequestsByRequestIdStatusResponses] + +export type PutBriaVideoEraseMaskRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/video/erase/mask/requests/{request_id}/cancel' +} + +export type PutBriaVideoEraseMaskRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutBriaVideoEraseMaskRequestsByRequestIdCancelResponse = + PutBriaVideoEraseMaskRequestsByRequestIdCancelResponses[keyof PutBriaVideoEraseMaskRequestsByRequestIdCancelResponses] + +export type PostBriaVideoEraseMaskData = { + body: SchemaVideoEraseMaskInput + path?: never + query?: never + url: '/bria/video/erase/mask' +} + +export type PostBriaVideoEraseMaskResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaVideoEraseMaskResponse = + PostBriaVideoEraseMaskResponses[keyof PostBriaVideoEraseMaskResponses] + +export type GetBriaVideoEraseMaskRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/video/erase/mask/requests/{request_id}' +} + +export type GetBriaVideoEraseMaskRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVideoEraseMaskOutput +} + +export type GetBriaVideoEraseMaskRequestsByRequestIdResponse = + GetBriaVideoEraseMaskRequestsByRequestIdResponses[keyof GetBriaVideoEraseMaskRequestsByRequestIdResponses] + +export type GetFalAiLightxRelightRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/lightx/relight/requests/{request_id}/status' +} + +export type GetFalAiLightxRelightRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLightxRelightRequestsByRequestIdStatusResponse = + GetFalAiLightxRelightRequestsByRequestIdStatusResponses[keyof GetFalAiLightxRelightRequestsByRequestIdStatusResponses] + +export type PutFalAiLightxRelightRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/lightx/relight/requests/{request_id}/cancel' +} + +export type PutFalAiLightxRelightRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLightxRelightRequestsByRequestIdCancelResponse = + PutFalAiLightxRelightRequestsByRequestIdCancelResponses[keyof PutFalAiLightxRelightRequestsByRequestIdCancelResponses] + +export type PostFalAiLightxRelightData = { + body: SchemaLightxRelightInput + path?: never + query?: never + url: '/fal-ai/lightx/relight' +} + +export type PostFalAiLightxRelightResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLightxRelightResponse = + PostFalAiLightxRelightResponses[keyof PostFalAiLightxRelightResponses] + +export type GetFalAiLightxRelightRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/lightx/relight/requests/{request_id}' +} + +export type GetFalAiLightxRelightRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLightxRelightOutput +} + +export type GetFalAiLightxRelightRequestsByRequestIdResponse = + GetFalAiLightxRelightRequestsByRequestIdResponses[keyof GetFalAiLightxRelightRequestsByRequestIdResponses] + +export type GetFalAiLightxRecameraRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/lightx/recamera/requests/{request_id}/status' +} + +export type GetFalAiLightxRecameraRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLightxRecameraRequestsByRequestIdStatusResponse = + GetFalAiLightxRecameraRequestsByRequestIdStatusResponses[keyof GetFalAiLightxRecameraRequestsByRequestIdStatusResponses] + +export type PutFalAiLightxRecameraRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/lightx/recamera/requests/{request_id}/cancel' +} + +export type PutFalAiLightxRecameraRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLightxRecameraRequestsByRequestIdCancelResponse = + PutFalAiLightxRecameraRequestsByRequestIdCancelResponses[keyof PutFalAiLightxRecameraRequestsByRequestIdCancelResponses] + +export type PostFalAiLightxRecameraData = { + body: SchemaLightxRecameraInput + path?: never + query?: never + url: '/fal-ai/lightx/recamera' +} + +export type PostFalAiLightxRecameraResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLightxRecameraResponse = + PostFalAiLightxRecameraResponses[keyof PostFalAiLightxRecameraResponses] + +export type GetFalAiLightxRecameraRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/lightx/recamera/requests/{request_id}' +} + +export type GetFalAiLightxRecameraRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLightxRecameraOutput +} + +export type GetFalAiLightxRecameraRequestsByRequestIdResponse = + GetFalAiLightxRecameraRequestsByRequestIdResponses[keyof GetFalAiLightxRecameraRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV26StandardMotionControlRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v2.6/standard/motion-control/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoV26StandardMotionControlRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV26StandardMotionControlRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV26StandardMotionControlRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV26StandardMotionControlRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV26StandardMotionControlRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2.6/standard/motion-control/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoV26StandardMotionControlRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV26StandardMotionControlRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV26StandardMotionControlRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV26StandardMotionControlRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV26StandardMotionControlData = { + body: SchemaKlingVideoV26StandardMotionControlInput + path?: never + query?: never + url: '/fal-ai/kling-video/v2.6/standard/motion-control' +} + +export type PostFalAiKlingVideoV26StandardMotionControlResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV26StandardMotionControlResponse = + PostFalAiKlingVideoV26StandardMotionControlResponses[keyof PostFalAiKlingVideoV26StandardMotionControlResponses] + +export type GetFalAiKlingVideoV26StandardMotionControlRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2.6/standard/motion-control/requests/{request_id}' + } + +export type GetFalAiKlingVideoV26StandardMotionControlRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV26StandardMotionControlOutput + } + +export type GetFalAiKlingVideoV26StandardMotionControlRequestsByRequestIdResponse = + GetFalAiKlingVideoV26StandardMotionControlRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV26StandardMotionControlRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoV26ProMotionControlRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/v2.6/pro/motion-control/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoV26ProMotionControlRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoV26ProMotionControlRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoV26ProMotionControlRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoV26ProMotionControlRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoV26ProMotionControlRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2.6/pro/motion-control/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoV26ProMotionControlRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoV26ProMotionControlRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoV26ProMotionControlRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoV26ProMotionControlRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoV26ProMotionControlData = { + body: SchemaKlingVideoV26ProMotionControlInput + path?: never + query?: never + url: '/fal-ai/kling-video/v2.6/pro/motion-control' +} + +export type PostFalAiKlingVideoV26ProMotionControlResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoV26ProMotionControlResponse = + PostFalAiKlingVideoV26ProMotionControlResponses[keyof PostFalAiKlingVideoV26ProMotionControlResponses] + +export type GetFalAiKlingVideoV26ProMotionControlRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/v2.6/pro/motion-control/requests/{request_id}' +} + +export type GetFalAiKlingVideoV26ProMotionControlRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaKlingVideoV26ProMotionControlOutput + } + +export type GetFalAiKlingVideoV26ProMotionControlRequestsByRequestIdResponse = + GetFalAiKlingVideoV26ProMotionControlRequestsByRequestIdResponses[keyof GetFalAiKlingVideoV26ProMotionControlRequestsByRequestIdResponses] + +export type GetDecartLucyRestyleRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/decart/lucy-restyle/requests/{request_id}/status' +} + +export type GetDecartLucyRestyleRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetDecartLucyRestyleRequestsByRequestIdStatusResponse = + GetDecartLucyRestyleRequestsByRequestIdStatusResponses[keyof GetDecartLucyRestyleRequestsByRequestIdStatusResponses] + +export type PutDecartLucyRestyleRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/decart/lucy-restyle/requests/{request_id}/cancel' +} + +export type PutDecartLucyRestyleRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutDecartLucyRestyleRequestsByRequestIdCancelResponse = + PutDecartLucyRestyleRequestsByRequestIdCancelResponses[keyof PutDecartLucyRestyleRequestsByRequestIdCancelResponses] + +export type PostDecartLucyRestyleData = { + body: SchemaLucyRestyleInput + path?: never + query?: never + url: '/decart/lucy-restyle' +} + +export type PostDecartLucyRestyleResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostDecartLucyRestyleResponse = + PostDecartLucyRestyleResponses[keyof PostDecartLucyRestyleResponses] + +export type GetDecartLucyRestyleRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/decart/lucy-restyle/requests/{request_id}' +} + +export type GetDecartLucyRestyleRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLucyRestyleOutput +} + +export type GetDecartLucyRestyleRequestsByRequestIdResponse = + GetDecartLucyRestyleRequestsByRequestIdResponses[keyof GetDecartLucyRestyleRequestsByRequestIdResponses] + +export type GetFalAiScailRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/scail/requests/{request_id}/status' +} + +export type GetFalAiScailRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiScailRequestsByRequestIdStatusResponse = + GetFalAiScailRequestsByRequestIdStatusResponses[keyof GetFalAiScailRequestsByRequestIdStatusResponses] + +export type PutFalAiScailRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/scail/requests/{request_id}/cancel' +} + +export type PutFalAiScailRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiScailRequestsByRequestIdCancelResponse = + PutFalAiScailRequestsByRequestIdCancelResponses[keyof PutFalAiScailRequestsByRequestIdCancelResponses] + +export type PostFalAiScailData = { + body: SchemaScailInput + path?: never + query?: never + url: '/fal-ai/scail' +} + +export type PostFalAiScailResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiScailResponse = + PostFalAiScailResponses[keyof PostFalAiScailResponses] + +export type GetFalAiScailRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/scail/requests/{request_id}' +} + +export type GetFalAiScailRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaScailOutput +} + +export type GetFalAiScailRequestsByRequestIdResponse = + GetFalAiScailRequestsByRequestIdResponses[keyof GetFalAiScailRequestsByRequestIdResponses] + +export type GetClarityaiCrystalVideoUpscalerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/clarityai/crystal-video-upscaler/requests/{request_id}/status' +} + +export type GetClarityaiCrystalVideoUpscalerRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetClarityaiCrystalVideoUpscalerRequestsByRequestIdStatusResponse = + GetClarityaiCrystalVideoUpscalerRequestsByRequestIdStatusResponses[keyof GetClarityaiCrystalVideoUpscalerRequestsByRequestIdStatusResponses] + +export type PutClarityaiCrystalVideoUpscalerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/clarityai/crystal-video-upscaler/requests/{request_id}/cancel' +} + +export type PutClarityaiCrystalVideoUpscalerRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutClarityaiCrystalVideoUpscalerRequestsByRequestIdCancelResponse = + PutClarityaiCrystalVideoUpscalerRequestsByRequestIdCancelResponses[keyof PutClarityaiCrystalVideoUpscalerRequestsByRequestIdCancelResponses] + +export type PostClarityaiCrystalVideoUpscalerData = { + body: SchemaCrystalVideoUpscalerInput + path?: never + query?: never + url: '/clarityai/crystal-video-upscaler' +} + +export type PostClarityaiCrystalVideoUpscalerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostClarityaiCrystalVideoUpscalerResponse = + PostClarityaiCrystalVideoUpscalerResponses[keyof PostClarityaiCrystalVideoUpscalerResponses] + +export type GetClarityaiCrystalVideoUpscalerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/clarityai/crystal-video-upscaler/requests/{request_id}' +} + +export type GetClarityaiCrystalVideoUpscalerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaCrystalVideoUpscalerOutput +} + +export type GetClarityaiCrystalVideoUpscalerRequestsByRequestIdResponse = + GetClarityaiCrystalVideoUpscalerRequestsByRequestIdResponses[keyof GetClarityaiCrystalVideoUpscalerRequestsByRequestIdResponses] + +export type GetBriaBriaVideoEraserEraseMaskRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/bria_video_eraser/erase/mask/requests/{request_id}/status' +} + +export type GetBriaBriaVideoEraserEraseMaskRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetBriaBriaVideoEraserEraseMaskRequestsByRequestIdStatusResponse = + GetBriaBriaVideoEraserEraseMaskRequestsByRequestIdStatusResponses[keyof GetBriaBriaVideoEraserEraseMaskRequestsByRequestIdStatusResponses] + +export type PutBriaBriaVideoEraserEraseMaskRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/bria_video_eraser/erase/mask/requests/{request_id}/cancel' +} + +export type PutBriaBriaVideoEraserEraseMaskRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutBriaBriaVideoEraserEraseMaskRequestsByRequestIdCancelResponse = + PutBriaBriaVideoEraserEraseMaskRequestsByRequestIdCancelResponses[keyof PutBriaBriaVideoEraserEraseMaskRequestsByRequestIdCancelResponses] + +export type PostBriaBriaVideoEraserEraseMaskData = { + body: SchemaBriaVideoEraserEraseMaskInput + path?: never + query?: never + url: '/bria/bria_video_eraser/erase/mask' +} + +export type PostBriaBriaVideoEraserEraseMaskResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaBriaVideoEraserEraseMaskResponse = + PostBriaBriaVideoEraserEraseMaskResponses[keyof PostBriaBriaVideoEraserEraseMaskResponses] + +export type GetBriaBriaVideoEraserEraseMaskRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/bria_video_eraser/erase/mask/requests/{request_id}' +} + +export type GetBriaBriaVideoEraserEraseMaskRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBriaVideoEraserEraseMaskOutput +} + +export type GetBriaBriaVideoEraserEraseMaskRequestsByRequestIdResponse = + GetBriaBriaVideoEraserEraseMaskRequestsByRequestIdResponses[keyof GetBriaBriaVideoEraserEraseMaskRequestsByRequestIdResponses] + +export type GetBriaBriaVideoEraserEraseKeypointsRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/bria_video_eraser/erase/keypoints/requests/{request_id}/status' + } + +export type GetBriaBriaVideoEraserEraseKeypointsRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetBriaBriaVideoEraserEraseKeypointsRequestsByRequestIdStatusResponse = + GetBriaBriaVideoEraserEraseKeypointsRequestsByRequestIdStatusResponses[keyof GetBriaBriaVideoEraserEraseKeypointsRequestsByRequestIdStatusResponses] + +export type PutBriaBriaVideoEraserEraseKeypointsRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/bria_video_eraser/erase/keypoints/requests/{request_id}/cancel' + } + +export type PutBriaBriaVideoEraserEraseKeypointsRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutBriaBriaVideoEraserEraseKeypointsRequestsByRequestIdCancelResponse = + PutBriaBriaVideoEraserEraseKeypointsRequestsByRequestIdCancelResponses[keyof PutBriaBriaVideoEraserEraseKeypointsRequestsByRequestIdCancelResponses] + +export type PostBriaBriaVideoEraserEraseKeypointsData = { + body: SchemaBriaVideoEraserEraseKeypointsInput + path?: never + query?: never + url: '/bria/bria_video_eraser/erase/keypoints' +} + +export type PostBriaBriaVideoEraserEraseKeypointsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaBriaVideoEraserEraseKeypointsResponse = + PostBriaBriaVideoEraserEraseKeypointsResponses[keyof PostBriaBriaVideoEraserEraseKeypointsResponses] + +export type GetBriaBriaVideoEraserEraseKeypointsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/bria_video_eraser/erase/keypoints/requests/{request_id}' +} + +export type GetBriaBriaVideoEraserEraseKeypointsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBriaVideoEraserEraseKeypointsOutput +} + +export type GetBriaBriaVideoEraserEraseKeypointsRequestsByRequestIdResponse = + GetBriaBriaVideoEraserEraseKeypointsRequestsByRequestIdResponses[keyof GetBriaBriaVideoEraserEraseKeypointsRequestsByRequestIdResponses] + +export type GetBriaBriaVideoEraserErasePromptRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/bria_video_eraser/erase/prompt/requests/{request_id}/status' +} + +export type GetBriaBriaVideoEraserErasePromptRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetBriaBriaVideoEraserErasePromptRequestsByRequestIdStatusResponse = + GetBriaBriaVideoEraserErasePromptRequestsByRequestIdStatusResponses[keyof GetBriaBriaVideoEraserErasePromptRequestsByRequestIdStatusResponses] + +export type PutBriaBriaVideoEraserErasePromptRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/bria_video_eraser/erase/prompt/requests/{request_id}/cancel' +} + +export type PutBriaBriaVideoEraserErasePromptRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutBriaBriaVideoEraserErasePromptRequestsByRequestIdCancelResponse = + PutBriaBriaVideoEraserErasePromptRequestsByRequestIdCancelResponses[keyof PutBriaBriaVideoEraserErasePromptRequestsByRequestIdCancelResponses] + +export type PostBriaBriaVideoEraserErasePromptData = { + body: SchemaBriaVideoEraserErasePromptInput + path?: never + query?: never + url: '/bria/bria_video_eraser/erase/prompt' +} + +export type PostBriaBriaVideoEraserErasePromptResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaBriaVideoEraserErasePromptResponse = + PostBriaBriaVideoEraserErasePromptResponses[keyof PostBriaBriaVideoEraserErasePromptResponses] + +export type GetBriaBriaVideoEraserErasePromptRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/bria_video_eraser/erase/prompt/requests/{request_id}' +} + +export type GetBriaBriaVideoEraserErasePromptRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBriaVideoEraserErasePromptOutput +} + +export type GetBriaBriaVideoEraserErasePromptRequestsByRequestIdResponse = + GetBriaBriaVideoEraserErasePromptRequestsByRequestIdResponses[keyof GetBriaBriaVideoEraserErasePromptRequestsByRequestIdResponses] + +export type GetWanV26ReferenceToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/wan/v2.6/reference-to-video/requests/{request_id}/status' +} + +export type GetWanV26ReferenceToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetWanV26ReferenceToVideoRequestsByRequestIdStatusResponse = + GetWanV26ReferenceToVideoRequestsByRequestIdStatusResponses[keyof GetWanV26ReferenceToVideoRequestsByRequestIdStatusResponses] + +export type PutWanV26ReferenceToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/wan/v2.6/reference-to-video/requests/{request_id}/cancel' +} + +export type PutWanV26ReferenceToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutWanV26ReferenceToVideoRequestsByRequestIdCancelResponse = + PutWanV26ReferenceToVideoRequestsByRequestIdCancelResponses[keyof PutWanV26ReferenceToVideoRequestsByRequestIdCancelResponses] + +export type PostWanV26ReferenceToVideoData = { + body: SchemaV26ReferenceToVideoInput + path?: never + query?: never + url: '/wan/v2.6/reference-to-video' +} + +export type PostWanV26ReferenceToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostWanV26ReferenceToVideoResponse = + PostWanV26ReferenceToVideoResponses[keyof PostWanV26ReferenceToVideoResponses] + +export type GetWanV26ReferenceToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/wan/v2.6/reference-to-video/requests/{request_id}' +} + +export type GetWanV26ReferenceToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaV26ReferenceToVideoOutput +} + +export type GetWanV26ReferenceToVideoRequestsByRequestIdResponse = + GetWanV26ReferenceToVideoRequestsByRequestIdResponses[keyof GetWanV26ReferenceToVideoRequestsByRequestIdResponses] + +export type GetFalAiVeo31FastExtendVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/veo3.1/fast/extend-video/requests/{request_id}/status' +} + +export type GetFalAiVeo31FastExtendVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiVeo31FastExtendVideoRequestsByRequestIdStatusResponse = + GetFalAiVeo31FastExtendVideoRequestsByRequestIdStatusResponses[keyof GetFalAiVeo31FastExtendVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiVeo31FastExtendVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3.1/fast/extend-video/requests/{request_id}/cancel' +} + +export type PutFalAiVeo31FastExtendVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiVeo31FastExtendVideoRequestsByRequestIdCancelResponse = + PutFalAiVeo31FastExtendVideoRequestsByRequestIdCancelResponses[keyof PutFalAiVeo31FastExtendVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiVeo31FastExtendVideoData = { + body: SchemaVeo31FastExtendVideoInput + path?: never + query?: never + url: '/fal-ai/veo3.1/fast/extend-video' +} + +export type PostFalAiVeo31FastExtendVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiVeo31FastExtendVideoResponse = + PostFalAiVeo31FastExtendVideoResponses[keyof PostFalAiVeo31FastExtendVideoResponses] + +export type GetFalAiVeo31FastExtendVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3.1/fast/extend-video/requests/{request_id}' +} + +export type GetFalAiVeo31FastExtendVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVeo31FastExtendVideoOutput +} + +export type GetFalAiVeo31FastExtendVideoRequestsByRequestIdResponse = + GetFalAiVeo31FastExtendVideoRequestsByRequestIdResponses[keyof GetFalAiVeo31FastExtendVideoRequestsByRequestIdResponses] + +export type GetFalAiVeo31ExtendVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/veo3.1/extend-video/requests/{request_id}/status' +} + +export type GetFalAiVeo31ExtendVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiVeo31ExtendVideoRequestsByRequestIdStatusResponse = + GetFalAiVeo31ExtendVideoRequestsByRequestIdStatusResponses[keyof GetFalAiVeo31ExtendVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiVeo31ExtendVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3.1/extend-video/requests/{request_id}/cancel' +} + +export type PutFalAiVeo31ExtendVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiVeo31ExtendVideoRequestsByRequestIdCancelResponse = + PutFalAiVeo31ExtendVideoRequestsByRequestIdCancelResponses[keyof PutFalAiVeo31ExtendVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiVeo31ExtendVideoData = { + body: SchemaVeo31ExtendVideoInput + path?: never + query?: never + url: '/fal-ai/veo3.1/extend-video' +} + +export type PostFalAiVeo31ExtendVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiVeo31ExtendVideoResponse = + PostFalAiVeo31ExtendVideoResponses[keyof PostFalAiVeo31ExtendVideoResponses] + +export type GetFalAiVeo31ExtendVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/veo3.1/extend-video/requests/{request_id}' +} + +export type GetFalAiVeo31ExtendVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVeo31ExtendVideoOutput +} + +export type GetFalAiVeo31ExtendVideoRequestsByRequestIdResponse = + GetFalAiVeo31ExtendVideoRequestsByRequestIdResponses[keyof GetFalAiVeo31ExtendVideoRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoO1StandardVideoToVideoReferenceRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/o1/standard/video-to-video/reference/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoO1StandardVideoToVideoReferenceRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoO1StandardVideoToVideoReferenceRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoO1StandardVideoToVideoReferenceRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoO1StandardVideoToVideoReferenceRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoO1StandardVideoToVideoReferenceRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/o1/standard/video-to-video/reference/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoO1StandardVideoToVideoReferenceRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoO1StandardVideoToVideoReferenceRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoO1StandardVideoToVideoReferenceRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoO1StandardVideoToVideoReferenceRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoO1StandardVideoToVideoReferenceData = { + body: SchemaKlingVideoO1StandardVideoToVideoReferenceInput + path?: never + query?: never + url: '/fal-ai/kling-video/o1/standard/video-to-video/reference' +} + +export type PostFalAiKlingVideoO1StandardVideoToVideoReferenceResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoO1StandardVideoToVideoReferenceResponse = + PostFalAiKlingVideoO1StandardVideoToVideoReferenceResponses[keyof PostFalAiKlingVideoO1StandardVideoToVideoReferenceResponses] + +export type GetFalAiKlingVideoO1StandardVideoToVideoReferenceRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/o1/standard/video-to-video/reference/requests/{request_id}' + } + +export type GetFalAiKlingVideoO1StandardVideoToVideoReferenceRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaKlingVideoO1StandardVideoToVideoReferenceOutput + } + +export type GetFalAiKlingVideoO1StandardVideoToVideoReferenceRequestsByRequestIdResponse = + GetFalAiKlingVideoO1StandardVideoToVideoReferenceRequestsByRequestIdResponses[keyof GetFalAiKlingVideoO1StandardVideoToVideoReferenceRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoO1StandardVideoToVideoEditRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/o1/standard/video-to-video/edit/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoO1StandardVideoToVideoEditRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoO1StandardVideoToVideoEditRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoO1StandardVideoToVideoEditRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoO1StandardVideoToVideoEditRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoO1StandardVideoToVideoEditRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/o1/standard/video-to-video/edit/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoO1StandardVideoToVideoEditRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoO1StandardVideoToVideoEditRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoO1StandardVideoToVideoEditRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoO1StandardVideoToVideoEditRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoO1StandardVideoToVideoEditData = { + body: SchemaKlingVideoO1StandardVideoToVideoEditInput + path?: never + query?: never + url: '/fal-ai/kling-video/o1/standard/video-to-video/edit' +} + +export type PostFalAiKlingVideoO1StandardVideoToVideoEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoO1StandardVideoToVideoEditResponse = + PostFalAiKlingVideoO1StandardVideoToVideoEditResponses[keyof PostFalAiKlingVideoO1StandardVideoToVideoEditResponses] + +export type GetFalAiKlingVideoO1StandardVideoToVideoEditRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/o1/standard/video-to-video/edit/requests/{request_id}' + } + +export type GetFalAiKlingVideoO1StandardVideoToVideoEditRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaKlingVideoO1StandardVideoToVideoEditOutput + } + +export type GetFalAiKlingVideoO1StandardVideoToVideoEditRequestsByRequestIdResponse = + GetFalAiKlingVideoO1StandardVideoToVideoEditRequestsByRequestIdResponses[keyof GetFalAiKlingVideoO1StandardVideoToVideoEditRequestsByRequestIdResponses] + +export type GetFalAiSteadyDancerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/steady-dancer/requests/{request_id}/status' +} + +export type GetFalAiSteadyDancerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSteadyDancerRequestsByRequestIdStatusResponse = + GetFalAiSteadyDancerRequestsByRequestIdStatusResponses[keyof GetFalAiSteadyDancerRequestsByRequestIdStatusResponses] + +export type PutFalAiSteadyDancerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/steady-dancer/requests/{request_id}/cancel' +} + +export type PutFalAiSteadyDancerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSteadyDancerRequestsByRequestIdCancelResponse = + PutFalAiSteadyDancerRequestsByRequestIdCancelResponses[keyof PutFalAiSteadyDancerRequestsByRequestIdCancelResponses] + +export type PostFalAiSteadyDancerData = { + body: SchemaSteadyDancerInput + path?: never + query?: never + url: '/fal-ai/steady-dancer' +} + +export type PostFalAiSteadyDancerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSteadyDancerResponse = + PostFalAiSteadyDancerResponses[keyof PostFalAiSteadyDancerResponses] + +export type GetFalAiSteadyDancerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/steady-dancer/requests/{request_id}' +} + +export type GetFalAiSteadyDancerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSteadyDancerOutput +} + +export type GetFalAiSteadyDancerRequestsByRequestIdResponse = + GetFalAiSteadyDancerRequestsByRequestIdResponses[keyof GetFalAiSteadyDancerRequestsByRequestIdResponses] + +export type GetFalAiOneToAllAnimation13bRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/one-to-all-animation/1.3b/requests/{request_id}/status' +} + +export type GetFalAiOneToAllAnimation13bRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiOneToAllAnimation13bRequestsByRequestIdStatusResponse = + GetFalAiOneToAllAnimation13bRequestsByRequestIdStatusResponses[keyof GetFalAiOneToAllAnimation13bRequestsByRequestIdStatusResponses] + +export type PutFalAiOneToAllAnimation13bRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/one-to-all-animation/1.3b/requests/{request_id}/cancel' +} + +export type PutFalAiOneToAllAnimation13bRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiOneToAllAnimation13bRequestsByRequestIdCancelResponse = + PutFalAiOneToAllAnimation13bRequestsByRequestIdCancelResponses[keyof PutFalAiOneToAllAnimation13bRequestsByRequestIdCancelResponses] + +export type PostFalAiOneToAllAnimation13bData = { + body: SchemaOneToAllAnimation13bInput + path?: never + query?: never + url: '/fal-ai/one-to-all-animation/1.3b' +} + +export type PostFalAiOneToAllAnimation13bResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiOneToAllAnimation13bResponse = + PostFalAiOneToAllAnimation13bResponses[keyof PostFalAiOneToAllAnimation13bResponses] + +export type GetFalAiOneToAllAnimation13bRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/one-to-all-animation/1.3b/requests/{request_id}' +} + +export type GetFalAiOneToAllAnimation13bRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaOneToAllAnimation13bOutput +} + +export type GetFalAiOneToAllAnimation13bRequestsByRequestIdResponse = + GetFalAiOneToAllAnimation13bRequestsByRequestIdResponses[keyof GetFalAiOneToAllAnimation13bRequestsByRequestIdResponses] + +export type GetFalAiOneToAllAnimation14bRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/one-to-all-animation/14b/requests/{request_id}/status' +} + +export type GetFalAiOneToAllAnimation14bRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiOneToAllAnimation14bRequestsByRequestIdStatusResponse = + GetFalAiOneToAllAnimation14bRequestsByRequestIdStatusResponses[keyof GetFalAiOneToAllAnimation14bRequestsByRequestIdStatusResponses] + +export type PutFalAiOneToAllAnimation14bRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/one-to-all-animation/14b/requests/{request_id}/cancel' +} + +export type PutFalAiOneToAllAnimation14bRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiOneToAllAnimation14bRequestsByRequestIdCancelResponse = + PutFalAiOneToAllAnimation14bRequestsByRequestIdCancelResponses[keyof PutFalAiOneToAllAnimation14bRequestsByRequestIdCancelResponses] + +export type PostFalAiOneToAllAnimation14bData = { + body: SchemaOneToAllAnimation14bInput + path?: never + query?: never + url: '/fal-ai/one-to-all-animation/14b' +} + +export type PostFalAiOneToAllAnimation14bResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiOneToAllAnimation14bResponse = + PostFalAiOneToAllAnimation14bResponses[keyof PostFalAiOneToAllAnimation14bResponses] + +export type GetFalAiOneToAllAnimation14bRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/one-to-all-animation/14b/requests/{request_id}' +} + +export type GetFalAiOneToAllAnimation14bRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaOneToAllAnimation14bOutput +} + +export type GetFalAiOneToAllAnimation14bRequestsByRequestIdResponse = + GetFalAiOneToAllAnimation14bRequestsByRequestIdResponses[keyof GetFalAiOneToAllAnimation14bRequestsByRequestIdResponses] + +export type GetFalAiWanVisionEnhancerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-vision-enhancer/requests/{request_id}/status' +} + +export type GetFalAiWanVisionEnhancerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanVisionEnhancerRequestsByRequestIdStatusResponse = + GetFalAiWanVisionEnhancerRequestsByRequestIdStatusResponses[keyof GetFalAiWanVisionEnhancerRequestsByRequestIdStatusResponses] + +export type PutFalAiWanVisionEnhancerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-vision-enhancer/requests/{request_id}/cancel' +} + +export type PutFalAiWanVisionEnhancerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanVisionEnhancerRequestsByRequestIdCancelResponse = + PutFalAiWanVisionEnhancerRequestsByRequestIdCancelResponses[keyof PutFalAiWanVisionEnhancerRequestsByRequestIdCancelResponses] + +export type PostFalAiWanVisionEnhancerData = { + body: SchemaWanVisionEnhancerInput + path?: never + query?: never + url: '/fal-ai/wan-vision-enhancer' +} + +export type PostFalAiWanVisionEnhancerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanVisionEnhancerResponse = + PostFalAiWanVisionEnhancerResponses[keyof PostFalAiWanVisionEnhancerResponses] + +export type GetFalAiWanVisionEnhancerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-vision-enhancer/requests/{request_id}' +} + +export type GetFalAiWanVisionEnhancerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanVisionEnhancerOutput +} + +export type GetFalAiWanVisionEnhancerRequestsByRequestIdResponse = + GetFalAiWanVisionEnhancerRequestsByRequestIdResponses[keyof GetFalAiWanVisionEnhancerRequestsByRequestIdResponses] + +export type GetFalAiSyncLipsyncReact1RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sync-lipsync/react-1/requests/{request_id}/status' +} + +export type GetFalAiSyncLipsyncReact1RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSyncLipsyncReact1RequestsByRequestIdStatusResponse = + GetFalAiSyncLipsyncReact1RequestsByRequestIdStatusResponses[keyof GetFalAiSyncLipsyncReact1RequestsByRequestIdStatusResponses] + +export type PutFalAiSyncLipsyncReact1RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sync-lipsync/react-1/requests/{request_id}/cancel' +} + +export type PutFalAiSyncLipsyncReact1RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSyncLipsyncReact1RequestsByRequestIdCancelResponse = + PutFalAiSyncLipsyncReact1RequestsByRequestIdCancelResponses[keyof PutFalAiSyncLipsyncReact1RequestsByRequestIdCancelResponses] + +export type PostFalAiSyncLipsyncReact1Data = { + body: SchemaSyncLipsyncReact1Input + path?: never + query?: never + url: '/fal-ai/sync-lipsync/react-1' +} + +export type PostFalAiSyncLipsyncReact1Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSyncLipsyncReact1Response = + PostFalAiSyncLipsyncReact1Responses[keyof PostFalAiSyncLipsyncReact1Responses] + +export type GetFalAiSyncLipsyncReact1RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sync-lipsync/react-1/requests/{request_id}' +} + +export type GetFalAiSyncLipsyncReact1RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSyncLipsyncReact1Output +} + +export type GetFalAiSyncLipsyncReact1RequestsByRequestIdResponse = + GetFalAiSyncLipsyncReact1RequestsByRequestIdResponses[keyof GetFalAiSyncLipsyncReact1RequestsByRequestIdResponses] + +export type GetVeedVideoBackgroundRemovalFastRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/veed/video-background-removal/fast/requests/{request_id}/status' +} + +export type GetVeedVideoBackgroundRemovalFastRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetVeedVideoBackgroundRemovalFastRequestsByRequestIdStatusResponse = + GetVeedVideoBackgroundRemovalFastRequestsByRequestIdStatusResponses[keyof GetVeedVideoBackgroundRemovalFastRequestsByRequestIdStatusResponses] + +export type PutVeedVideoBackgroundRemovalFastRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/veed/video-background-removal/fast/requests/{request_id}/cancel' +} + +export type PutVeedVideoBackgroundRemovalFastRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutVeedVideoBackgroundRemovalFastRequestsByRequestIdCancelResponse = + PutVeedVideoBackgroundRemovalFastRequestsByRequestIdCancelResponses[keyof PutVeedVideoBackgroundRemovalFastRequestsByRequestIdCancelResponses] + +export type PostVeedVideoBackgroundRemovalFastData = { + body: SchemaVideoBackgroundRemovalFastInput + path?: never + query?: never + url: '/veed/video-background-removal/fast' +} + +export type PostVeedVideoBackgroundRemovalFastResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostVeedVideoBackgroundRemovalFastResponse = + PostVeedVideoBackgroundRemovalFastResponses[keyof PostVeedVideoBackgroundRemovalFastResponses] + +export type GetVeedVideoBackgroundRemovalFastRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/veed/video-background-removal/fast/requests/{request_id}' +} + +export type GetVeedVideoBackgroundRemovalFastRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVideoBackgroundRemovalFastOutput +} + +export type GetVeedVideoBackgroundRemovalFastRequestsByRequestIdResponse = + GetVeedVideoBackgroundRemovalFastRequestsByRequestIdResponses[keyof GetVeedVideoBackgroundRemovalFastRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoO1VideoToVideoEditRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/o1/video-to-video/edit/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoO1VideoToVideoEditRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoO1VideoToVideoEditRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoO1VideoToVideoEditRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoO1VideoToVideoEditRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoO1VideoToVideoEditRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/o1/video-to-video/edit/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoO1VideoToVideoEditRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoO1VideoToVideoEditRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoO1VideoToVideoEditRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoO1VideoToVideoEditRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoO1VideoToVideoEditData = { + body: SchemaKlingVideoO1VideoToVideoEditInput + path?: never + query?: never + url: '/fal-ai/kling-video/o1/video-to-video/edit' +} + +export type PostFalAiKlingVideoO1VideoToVideoEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoO1VideoToVideoEditResponse = + PostFalAiKlingVideoO1VideoToVideoEditResponses[keyof PostFalAiKlingVideoO1VideoToVideoEditResponses] + +export type GetFalAiKlingVideoO1VideoToVideoEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/o1/video-to-video/edit/requests/{request_id}' +} + +export type GetFalAiKlingVideoO1VideoToVideoEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKlingVideoO1VideoToVideoEditOutput +} + +export type GetFalAiKlingVideoO1VideoToVideoEditRequestsByRequestIdResponse = + GetFalAiKlingVideoO1VideoToVideoEditRequestsByRequestIdResponses[keyof GetFalAiKlingVideoO1VideoToVideoEditRequestsByRequestIdResponses] + +export type GetFalAiKlingVideoO1VideoToVideoReferenceRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/kling-video/o1/video-to-video/reference/requests/{request_id}/status' + } + +export type GetFalAiKlingVideoO1VideoToVideoReferenceRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiKlingVideoO1VideoToVideoReferenceRequestsByRequestIdStatusResponse = + GetFalAiKlingVideoO1VideoToVideoReferenceRequestsByRequestIdStatusResponses[keyof GetFalAiKlingVideoO1VideoToVideoReferenceRequestsByRequestIdStatusResponses] + +export type PutFalAiKlingVideoO1VideoToVideoReferenceRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/o1/video-to-video/reference/requests/{request_id}/cancel' + } + +export type PutFalAiKlingVideoO1VideoToVideoReferenceRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiKlingVideoO1VideoToVideoReferenceRequestsByRequestIdCancelResponse = + PutFalAiKlingVideoO1VideoToVideoReferenceRequestsByRequestIdCancelResponses[keyof PutFalAiKlingVideoO1VideoToVideoReferenceRequestsByRequestIdCancelResponses] + +export type PostFalAiKlingVideoO1VideoToVideoReferenceData = { + body: SchemaKlingVideoO1VideoToVideoReferenceInput + path?: never + query?: never + url: '/fal-ai/kling-video/o1/video-to-video/reference' +} + +export type PostFalAiKlingVideoO1VideoToVideoReferenceResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKlingVideoO1VideoToVideoReferenceResponse = + PostFalAiKlingVideoO1VideoToVideoReferenceResponses[keyof PostFalAiKlingVideoO1VideoToVideoReferenceResponses] + +export type GetFalAiKlingVideoO1VideoToVideoReferenceRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/kling-video/o1/video-to-video/reference/requests/{request_id}' +} + +export type GetFalAiKlingVideoO1VideoToVideoReferenceRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaKlingVideoO1VideoToVideoReferenceOutput + } + +export type GetFalAiKlingVideoO1VideoToVideoReferenceRequestsByRequestIdResponse = + GetFalAiKlingVideoO1VideoToVideoReferenceRequestsByRequestIdResponses[keyof GetFalAiKlingVideoO1VideoToVideoReferenceRequestsByRequestIdResponses] + +export type GetVeedVideoBackgroundRemovalRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/veed/video-background-removal/requests/{request_id}/status' +} + +export type GetVeedVideoBackgroundRemovalRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetVeedVideoBackgroundRemovalRequestsByRequestIdStatusResponse = + GetVeedVideoBackgroundRemovalRequestsByRequestIdStatusResponses[keyof GetVeedVideoBackgroundRemovalRequestsByRequestIdStatusResponses] + +export type PutVeedVideoBackgroundRemovalRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/veed/video-background-removal/requests/{request_id}/cancel' +} + +export type PutVeedVideoBackgroundRemovalRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutVeedVideoBackgroundRemovalRequestsByRequestIdCancelResponse = + PutVeedVideoBackgroundRemovalRequestsByRequestIdCancelResponses[keyof PutVeedVideoBackgroundRemovalRequestsByRequestIdCancelResponses] + +export type PostVeedVideoBackgroundRemovalData = { + body: SchemaVideoBackgroundRemovalInput + path?: never + query?: never + url: '/veed/video-background-removal' +} + +export type PostVeedVideoBackgroundRemovalResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostVeedVideoBackgroundRemovalResponse = + PostVeedVideoBackgroundRemovalResponses[keyof PostVeedVideoBackgroundRemovalResponses] + +export type GetVeedVideoBackgroundRemovalRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/veed/video-background-removal/requests/{request_id}' +} + +export type GetVeedVideoBackgroundRemovalRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVideoBackgroundRemovalOutput +} + +export type GetVeedVideoBackgroundRemovalRequestsByRequestIdResponse = + GetVeedVideoBackgroundRemovalRequestsByRequestIdResponses[keyof GetVeedVideoBackgroundRemovalRequestsByRequestIdResponses] + +export type GetVeedVideoBackgroundRemovalGreenScreenRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/veed/video-background-removal/green-screen/requests/{request_id}/status' + } + +export type GetVeedVideoBackgroundRemovalGreenScreenRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetVeedVideoBackgroundRemovalGreenScreenRequestsByRequestIdStatusResponse = + GetVeedVideoBackgroundRemovalGreenScreenRequestsByRequestIdStatusResponses[keyof GetVeedVideoBackgroundRemovalGreenScreenRequestsByRequestIdStatusResponses] + +export type PutVeedVideoBackgroundRemovalGreenScreenRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/veed/video-background-removal/green-screen/requests/{request_id}/cancel' + } + +export type PutVeedVideoBackgroundRemovalGreenScreenRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutVeedVideoBackgroundRemovalGreenScreenRequestsByRequestIdCancelResponse = + PutVeedVideoBackgroundRemovalGreenScreenRequestsByRequestIdCancelResponses[keyof PutVeedVideoBackgroundRemovalGreenScreenRequestsByRequestIdCancelResponses] + +export type PostVeedVideoBackgroundRemovalGreenScreenData = { + body: SchemaVideoBackgroundRemovalGreenScreenInput + path?: never + query?: never + url: '/veed/video-background-removal/green-screen' +} + +export type PostVeedVideoBackgroundRemovalGreenScreenResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostVeedVideoBackgroundRemovalGreenScreenResponse = + PostVeedVideoBackgroundRemovalGreenScreenResponses[keyof PostVeedVideoBackgroundRemovalGreenScreenResponses] + +export type GetVeedVideoBackgroundRemovalGreenScreenRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/veed/video-background-removal/green-screen/requests/{request_id}' +} + +export type GetVeedVideoBackgroundRemovalGreenScreenRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaVideoBackgroundRemovalGreenScreenOutput + } + +export type GetVeedVideoBackgroundRemovalGreenScreenRequestsByRequestIdResponse = + GetVeedVideoBackgroundRemovalGreenScreenRequestsByRequestIdResponses[keyof GetVeedVideoBackgroundRemovalGreenScreenRequestsByRequestIdResponses] + +export type GetFalAiLtx2RetakeVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-2/retake-video/requests/{request_id}/status' +} + +export type GetFalAiLtx2RetakeVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLtx2RetakeVideoRequestsByRequestIdStatusResponse = + GetFalAiLtx2RetakeVideoRequestsByRequestIdStatusResponses[keyof GetFalAiLtx2RetakeVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiLtx2RetakeVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2/retake-video/requests/{request_id}/cancel' +} + +export type PutFalAiLtx2RetakeVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLtx2RetakeVideoRequestsByRequestIdCancelResponse = + PutFalAiLtx2RetakeVideoRequestsByRequestIdCancelResponses[keyof PutFalAiLtx2RetakeVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiLtx2RetakeVideoData = { + body: SchemaLtx2RetakeVideoInput + path?: never + query?: never + url: '/fal-ai/ltx-2/retake-video' +} + +export type PostFalAiLtx2RetakeVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtx2RetakeVideoResponse = + PostFalAiLtx2RetakeVideoResponses[keyof PostFalAiLtx2RetakeVideoResponses] + +export type GetFalAiLtx2RetakeVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-2/retake-video/requests/{request_id}' +} + +export type GetFalAiLtx2RetakeVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtx2RetakeVideoOutput +} + +export type GetFalAiLtx2RetakeVideoRequestsByRequestIdResponse = + GetFalAiLtx2RetakeVideoRequestsByRequestIdResponses[keyof GetFalAiLtx2RetakeVideoRequestsByRequestIdResponses] + +export type GetDecartLucyEditFastRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/decart/lucy-edit/fast/requests/{request_id}/status' +} + +export type GetDecartLucyEditFastRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetDecartLucyEditFastRequestsByRequestIdStatusResponse = + GetDecartLucyEditFastRequestsByRequestIdStatusResponses[keyof GetDecartLucyEditFastRequestsByRequestIdStatusResponses] + +export type PutDecartLucyEditFastRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/decart/lucy-edit/fast/requests/{request_id}/cancel' +} + +export type PutDecartLucyEditFastRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutDecartLucyEditFastRequestsByRequestIdCancelResponse = + PutDecartLucyEditFastRequestsByRequestIdCancelResponses[keyof PutDecartLucyEditFastRequestsByRequestIdCancelResponses] + +export type PostDecartLucyEditFastData = { + body: SchemaLucyEditFastInput + path?: never + query?: never + url: '/decart/lucy-edit/fast' +} + +export type PostDecartLucyEditFastResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostDecartLucyEditFastResponse = + PostDecartLucyEditFastResponses[keyof PostDecartLucyEditFastResponses] + +export type GetDecartLucyEditFastRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/decart/lucy-edit/fast/requests/{request_id}' +} + +export type GetDecartLucyEditFastRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLucyEditFastOutput +} + +export type GetDecartLucyEditFastRequestsByRequestIdResponse = + GetDecartLucyEditFastRequestsByRequestIdResponses[keyof GetDecartLucyEditFastRequestsByRequestIdResponses] + +export type GetFalAiSam3VideoRleRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sam-3/video-rle/requests/{request_id}/status' +} + +export type GetFalAiSam3VideoRleRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSam3VideoRleRequestsByRequestIdStatusResponse = + GetFalAiSam3VideoRleRequestsByRequestIdStatusResponses[keyof GetFalAiSam3VideoRleRequestsByRequestIdStatusResponses] + +export type PutFalAiSam3VideoRleRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam-3/video-rle/requests/{request_id}/cancel' +} + +export type PutFalAiSam3VideoRleRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSam3VideoRleRequestsByRequestIdCancelResponse = + PutFalAiSam3VideoRleRequestsByRequestIdCancelResponses[keyof PutFalAiSam3VideoRleRequestsByRequestIdCancelResponses] + +export type PostFalAiSam3VideoRleData = { + body: SchemaSam3VideoRleInput + path?: never + query?: never + url: '/fal-ai/sam-3/video-rle' +} + +export type PostFalAiSam3VideoRleResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSam3VideoRleResponse = + PostFalAiSam3VideoRleResponses[keyof PostFalAiSam3VideoRleResponses] + +export type GetFalAiSam3VideoRleRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam-3/video-rle/requests/{request_id}' +} + +export type GetFalAiSam3VideoRleRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSam3VideoRleOutput +} + +export type GetFalAiSam3VideoRleRequestsByRequestIdResponse = + GetFalAiSam3VideoRleRequestsByRequestIdResponses[keyof GetFalAiSam3VideoRleRequestsByRequestIdResponses] + +export type GetFalAiSam3VideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sam-3/video/requests/{request_id}/status' +} + +export type GetFalAiSam3VideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSam3VideoRequestsByRequestIdStatusResponse = + GetFalAiSam3VideoRequestsByRequestIdStatusResponses[keyof GetFalAiSam3VideoRequestsByRequestIdStatusResponses] + +export type PutFalAiSam3VideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam-3/video/requests/{request_id}/cancel' +} + +export type PutFalAiSam3VideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSam3VideoRequestsByRequestIdCancelResponse = + PutFalAiSam3VideoRequestsByRequestIdCancelResponses[keyof PutFalAiSam3VideoRequestsByRequestIdCancelResponses] + +export type PostFalAiSam3VideoData = { + body: SchemaSam3VideoInput + path?: never + query?: never + url: '/fal-ai/sam-3/video' +} + +export type PostFalAiSam3VideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSam3VideoResponse = + PostFalAiSam3VideoResponses[keyof PostFalAiSam3VideoResponses] + +export type GetFalAiSam3VideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam-3/video/requests/{request_id}' +} + +export type GetFalAiSam3VideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSam3VideoOutput +} + +export type GetFalAiSam3VideoRequestsByRequestIdResponse = + GetFalAiSam3VideoRequestsByRequestIdResponses[keyof GetFalAiSam3VideoRequestsByRequestIdResponses] + +export type GetFalAiEdittoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/editto/requests/{request_id}/status' +} + +export type GetFalAiEdittoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiEdittoRequestsByRequestIdStatusResponse = + GetFalAiEdittoRequestsByRequestIdStatusResponses[keyof GetFalAiEdittoRequestsByRequestIdStatusResponses] + +export type PutFalAiEdittoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/editto/requests/{request_id}/cancel' +} + +export type PutFalAiEdittoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiEdittoRequestsByRequestIdCancelResponse = + PutFalAiEdittoRequestsByRequestIdCancelResponses[keyof PutFalAiEdittoRequestsByRequestIdCancelResponses] + +export type PostFalAiEdittoData = { + body: SchemaEdittoInput + path?: never + query?: never + url: '/fal-ai/editto' +} + +export type PostFalAiEdittoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiEdittoResponse = + PostFalAiEdittoResponses[keyof PostFalAiEdittoResponses] + +export type GetFalAiEdittoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/editto/requests/{request_id}' +} + +export type GetFalAiEdittoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaEdittoOutput +} + +export type GetFalAiEdittoRequestsByRequestIdResponse = + GetFalAiEdittoRequestsByRequestIdResponses[keyof GetFalAiEdittoRequestsByRequestIdResponses] + +export type GetFalAiFlashvsrUpscaleVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/flashvsr/upscale/video/requests/{request_id}/status' +} + +export type GetFalAiFlashvsrUpscaleVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlashvsrUpscaleVideoRequestsByRequestIdStatusResponse = + GetFalAiFlashvsrUpscaleVideoRequestsByRequestIdStatusResponses[keyof GetFalAiFlashvsrUpscaleVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiFlashvsrUpscaleVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flashvsr/upscale/video/requests/{request_id}/cancel' +} + +export type PutFalAiFlashvsrUpscaleVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlashvsrUpscaleVideoRequestsByRequestIdCancelResponse = + PutFalAiFlashvsrUpscaleVideoRequestsByRequestIdCancelResponses[keyof PutFalAiFlashvsrUpscaleVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiFlashvsrUpscaleVideoData = { + body: SchemaFlashvsrUpscaleVideoInput + path?: never + query?: never + url: '/fal-ai/flashvsr/upscale/video' +} + +export type PostFalAiFlashvsrUpscaleVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlashvsrUpscaleVideoResponse = + PostFalAiFlashvsrUpscaleVideoResponses[keyof PostFalAiFlashvsrUpscaleVideoResponses] + +export type GetFalAiFlashvsrUpscaleVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/flashvsr/upscale/video/requests/{request_id}' +} + +export type GetFalAiFlashvsrUpscaleVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlashvsrUpscaleVideoOutput +} + +export type GetFalAiFlashvsrUpscaleVideoRequestsByRequestIdResponse = + GetFalAiFlashvsrUpscaleVideoRequestsByRequestIdResponses[keyof GetFalAiFlashvsrUpscaleVideoRequestsByRequestIdResponses] + +export type GetFalAiWorkflowUtilitiesAutoSubtitleRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/workflow-utilities/auto-subtitle/requests/{request_id}/status' + } + +export type GetFalAiWorkflowUtilitiesAutoSubtitleRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiWorkflowUtilitiesAutoSubtitleRequestsByRequestIdStatusResponse = + GetFalAiWorkflowUtilitiesAutoSubtitleRequestsByRequestIdStatusResponses[keyof GetFalAiWorkflowUtilitiesAutoSubtitleRequestsByRequestIdStatusResponses] + +export type PutFalAiWorkflowUtilitiesAutoSubtitleRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/workflow-utilities/auto-subtitle/requests/{request_id}/cancel' + } + +export type PutFalAiWorkflowUtilitiesAutoSubtitleRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiWorkflowUtilitiesAutoSubtitleRequestsByRequestIdCancelResponse = + PutFalAiWorkflowUtilitiesAutoSubtitleRequestsByRequestIdCancelResponses[keyof PutFalAiWorkflowUtilitiesAutoSubtitleRequestsByRequestIdCancelResponses] + +export type PostFalAiWorkflowUtilitiesAutoSubtitleData = { + body: SchemaWorkflowUtilitiesAutoSubtitleInput + path?: never + query?: never + url: '/fal-ai/workflow-utilities/auto-subtitle' +} + +export type PostFalAiWorkflowUtilitiesAutoSubtitleResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWorkflowUtilitiesAutoSubtitleResponse = + PostFalAiWorkflowUtilitiesAutoSubtitleResponses[keyof PostFalAiWorkflowUtilitiesAutoSubtitleResponses] + +export type GetFalAiWorkflowUtilitiesAutoSubtitleRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/workflow-utilities/auto-subtitle/requests/{request_id}' +} + +export type GetFalAiWorkflowUtilitiesAutoSubtitleRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaWorkflowUtilitiesAutoSubtitleOutput + } + +export type GetFalAiWorkflowUtilitiesAutoSubtitleRequestsByRequestIdResponse = + GetFalAiWorkflowUtilitiesAutoSubtitleRequestsByRequestIdResponses[keyof GetFalAiWorkflowUtilitiesAutoSubtitleRequestsByRequestIdResponses] + +export type GetFalAiBytedanceUpscalerUpscaleVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/bytedance-upscaler/upscale/video/requests/{request_id}/status' + } + +export type GetFalAiBytedanceUpscalerUpscaleVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiBytedanceUpscalerUpscaleVideoRequestsByRequestIdStatusResponse = + GetFalAiBytedanceUpscalerUpscaleVideoRequestsByRequestIdStatusResponses[keyof GetFalAiBytedanceUpscalerUpscaleVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiBytedanceUpscalerUpscaleVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance-upscaler/upscale/video/requests/{request_id}/cancel' + } + +export type PutFalAiBytedanceUpscalerUpscaleVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiBytedanceUpscalerUpscaleVideoRequestsByRequestIdCancelResponse = + PutFalAiBytedanceUpscalerUpscaleVideoRequestsByRequestIdCancelResponses[keyof PutFalAiBytedanceUpscalerUpscaleVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiBytedanceUpscalerUpscaleVideoData = { + body: SchemaBytedanceUpscalerUpscaleVideoInput + path?: never + query?: never + url: '/fal-ai/bytedance-upscaler/upscale/video' +} + +export type PostFalAiBytedanceUpscalerUpscaleVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBytedanceUpscalerUpscaleVideoResponse = + PostFalAiBytedanceUpscalerUpscaleVideoResponses[keyof PostFalAiBytedanceUpscalerUpscaleVideoResponses] + +export type GetFalAiBytedanceUpscalerUpscaleVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/bytedance-upscaler/upscale/video/requests/{request_id}' +} + +export type GetFalAiBytedanceUpscalerUpscaleVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaBytedanceUpscalerUpscaleVideoOutput + } + +export type GetFalAiBytedanceUpscalerUpscaleVideoRequestsByRequestIdResponse = + GetFalAiBytedanceUpscalerUpscaleVideoRequestsByRequestIdResponses[keyof GetFalAiBytedanceUpscalerUpscaleVideoRequestsByRequestIdResponses] + +export type GetFalAiVideoAsPromptRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/video-as-prompt/requests/{request_id}/status' +} + +export type GetFalAiVideoAsPromptRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiVideoAsPromptRequestsByRequestIdStatusResponse = + GetFalAiVideoAsPromptRequestsByRequestIdStatusResponses[keyof GetFalAiVideoAsPromptRequestsByRequestIdStatusResponses] + +export type PutFalAiVideoAsPromptRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/video-as-prompt/requests/{request_id}/cancel' +} + +export type PutFalAiVideoAsPromptRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiVideoAsPromptRequestsByRequestIdCancelResponse = + PutFalAiVideoAsPromptRequestsByRequestIdCancelResponses[keyof PutFalAiVideoAsPromptRequestsByRequestIdCancelResponses] + +export type PostFalAiVideoAsPromptData = { + body: SchemaVideoAsPromptInput + path?: never + query?: never + url: '/fal-ai/video-as-prompt' +} + +export type PostFalAiVideoAsPromptResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiVideoAsPromptResponse = + PostFalAiVideoAsPromptResponses[keyof PostFalAiVideoAsPromptResponses] + +export type GetFalAiVideoAsPromptRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/video-as-prompt/requests/{request_id}' +} + +export type GetFalAiVideoAsPromptRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVideoAsPromptOutput +} + +export type GetFalAiVideoAsPromptRequestsByRequestIdResponse = + GetFalAiVideoAsPromptRequestsByRequestIdResponses[keyof GetFalAiVideoAsPromptRequestsByRequestIdResponses] + +export type GetFalAiBirefnetV2VideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/birefnet/v2/video/requests/{request_id}/status' +} + +export type GetFalAiBirefnetV2VideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiBirefnetV2VideoRequestsByRequestIdStatusResponse = + GetFalAiBirefnetV2VideoRequestsByRequestIdStatusResponses[keyof GetFalAiBirefnetV2VideoRequestsByRequestIdStatusResponses] + +export type PutFalAiBirefnetV2VideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/birefnet/v2/video/requests/{request_id}/cancel' +} + +export type PutFalAiBirefnetV2VideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiBirefnetV2VideoRequestsByRequestIdCancelResponse = + PutFalAiBirefnetV2VideoRequestsByRequestIdCancelResponses[keyof PutFalAiBirefnetV2VideoRequestsByRequestIdCancelResponses] + +export type PostFalAiBirefnetV2VideoData = { + body: SchemaBirefnetV2VideoInput + path?: never + query?: never + url: '/fal-ai/birefnet/v2/video' +} + +export type PostFalAiBirefnetV2VideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBirefnetV2VideoResponse = + PostFalAiBirefnetV2VideoResponses[keyof PostFalAiBirefnetV2VideoResponses] + +export type GetFalAiBirefnetV2VideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/birefnet/v2/video/requests/{request_id}' +} + +export type GetFalAiBirefnetV2VideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBirefnetV2VideoOutput +} + +export type GetFalAiBirefnetV2VideoRequestsByRequestIdResponse = + GetFalAiBirefnetV2VideoRequestsByRequestIdResponses[keyof GetFalAiBirefnetV2VideoRequestsByRequestIdResponses] + +export type GetFalAiViduQ2VideoExtensionProRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/vidu/q2/video-extension/pro/requests/{request_id}/status' +} + +export type GetFalAiViduQ2VideoExtensionProRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiViduQ2VideoExtensionProRequestsByRequestIdStatusResponse = + GetFalAiViduQ2VideoExtensionProRequestsByRequestIdStatusResponses[keyof GetFalAiViduQ2VideoExtensionProRequestsByRequestIdStatusResponses] + +export type PutFalAiViduQ2VideoExtensionProRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/q2/video-extension/pro/requests/{request_id}/cancel' +} + +export type PutFalAiViduQ2VideoExtensionProRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiViduQ2VideoExtensionProRequestsByRequestIdCancelResponse = + PutFalAiViduQ2VideoExtensionProRequestsByRequestIdCancelResponses[keyof PutFalAiViduQ2VideoExtensionProRequestsByRequestIdCancelResponses] + +export type PostFalAiViduQ2VideoExtensionProData = { + body: SchemaViduQ2VideoExtensionProInput + path?: never + query?: never + url: '/fal-ai/vidu/q2/video-extension/pro' +} + +export type PostFalAiViduQ2VideoExtensionProResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiViduQ2VideoExtensionProResponse = + PostFalAiViduQ2VideoExtensionProResponses[keyof PostFalAiViduQ2VideoExtensionProResponses] + +export type GetFalAiViduQ2VideoExtensionProRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/vidu/q2/video-extension/pro/requests/{request_id}' +} + +export type GetFalAiViduQ2VideoExtensionProRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaViduQ2VideoExtensionProOutput +} + +export type GetFalAiViduQ2VideoExtensionProRequestsByRequestIdResponse = + GetFalAiViduQ2VideoExtensionProRequestsByRequestIdResponses[keyof GetFalAiViduQ2VideoExtensionProRequestsByRequestIdResponses] + +export type GetMireloAiSfxV15VideoToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/mirelo-ai/sfx-v1.5/video-to-video/requests/{request_id}/status' +} + +export type GetMireloAiSfxV15VideoToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetMireloAiSfxV15VideoToVideoRequestsByRequestIdStatusResponse = + GetMireloAiSfxV15VideoToVideoRequestsByRequestIdStatusResponses[keyof GetMireloAiSfxV15VideoToVideoRequestsByRequestIdStatusResponses] + +export type PutMireloAiSfxV15VideoToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/mirelo-ai/sfx-v1.5/video-to-video/requests/{request_id}/cancel' +} + +export type PutMireloAiSfxV15VideoToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutMireloAiSfxV15VideoToVideoRequestsByRequestIdCancelResponse = + PutMireloAiSfxV15VideoToVideoRequestsByRequestIdCancelResponses[keyof PutMireloAiSfxV15VideoToVideoRequestsByRequestIdCancelResponses] + +export type PostMireloAiSfxV15VideoToVideoData = { + body: SchemaSfxV15VideoToVideoInput + path?: never + query?: never + url: '/mirelo-ai/sfx-v1.5/video-to-video' +} + +export type PostMireloAiSfxV15VideoToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostMireloAiSfxV15VideoToVideoResponse = + PostMireloAiSfxV15VideoToVideoResponses[keyof PostMireloAiSfxV15VideoToVideoResponses] + +export type GetMireloAiSfxV15VideoToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/mirelo-ai/sfx-v1.5/video-to-video/requests/{request_id}' +} + +export type GetMireloAiSfxV15VideoToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSfxV15VideoToVideoOutput +} + +export type GetMireloAiSfxV15VideoToVideoRequestsByRequestIdResponse = + GetMireloAiSfxV15VideoToVideoRequestsByRequestIdResponses[keyof GetMireloAiSfxV15VideoToVideoRequestsByRequestIdResponses] + +export type GetFalAiKreaWan14bVideoToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/krea-wan-14b/video-to-video/requests/{request_id}/status' +} + +export type GetFalAiKreaWan14bVideoToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiKreaWan14bVideoToVideoRequestsByRequestIdStatusResponse = + GetFalAiKreaWan14bVideoToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiKreaWan14bVideoToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiKreaWan14bVideoToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/krea-wan-14b/video-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiKreaWan14bVideoToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiKreaWan14bVideoToVideoRequestsByRequestIdCancelResponse = + PutFalAiKreaWan14bVideoToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiKreaWan14bVideoToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiKreaWan14bVideoToVideoData = { + body: SchemaKreaWan14bVideoToVideoInput + path?: never + query?: never + url: '/fal-ai/krea-wan-14b/video-to-video' +} + +export type PostFalAiKreaWan14bVideoToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiKreaWan14bVideoToVideoResponse = + PostFalAiKreaWan14bVideoToVideoResponses[keyof PostFalAiKreaWan14bVideoToVideoResponses] + +export type GetFalAiKreaWan14bVideoToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/krea-wan-14b/video-to-video/requests/{request_id}' +} + +export type GetFalAiKreaWan14bVideoToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaKreaWan14bVideoToVideoOutput +} + +export type GetFalAiKreaWan14bVideoToVideoRequestsByRequestIdResponse = + GetFalAiKreaWan14bVideoToVideoRequestsByRequestIdResponses[keyof GetFalAiKreaWan14bVideoToVideoRequestsByRequestIdResponses] + +export type GetFalAiSora2VideoToVideoRemixRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sora-2/video-to-video/remix/requests/{request_id}/status' +} + +export type GetFalAiSora2VideoToVideoRemixRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSora2VideoToVideoRemixRequestsByRequestIdStatusResponse = + GetFalAiSora2VideoToVideoRemixRequestsByRequestIdStatusResponses[keyof GetFalAiSora2VideoToVideoRemixRequestsByRequestIdStatusResponses] + +export type PutFalAiSora2VideoToVideoRemixRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sora-2/video-to-video/remix/requests/{request_id}/cancel' +} + +export type PutFalAiSora2VideoToVideoRemixRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSora2VideoToVideoRemixRequestsByRequestIdCancelResponse = + PutFalAiSora2VideoToVideoRemixRequestsByRequestIdCancelResponses[keyof PutFalAiSora2VideoToVideoRemixRequestsByRequestIdCancelResponses] + +export type PostFalAiSora2VideoToVideoRemixData = { + body: SchemaSora2VideoToVideoRemixInput + path?: never + query?: never + url: '/fal-ai/sora-2/video-to-video/remix' +} + +export type PostFalAiSora2VideoToVideoRemixResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSora2VideoToVideoRemixResponse = + PostFalAiSora2VideoToVideoRemixResponses[keyof PostFalAiSora2VideoToVideoRemixResponses] + +export type GetFalAiSora2VideoToVideoRemixRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sora-2/video-to-video/remix/requests/{request_id}' +} + +export type GetFalAiSora2VideoToVideoRemixRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSora2VideoToVideoRemixOutput +} + +export type GetFalAiSora2VideoToVideoRemixRequestsByRequestIdResponse = + GetFalAiSora2VideoToVideoRemixRequestsByRequestIdResponses[keyof GetFalAiSora2VideoToVideoRemixRequestsByRequestIdResponses] + +export type GetFalAiWanVaceAppsLongReframeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-vace-apps/long-reframe/requests/{request_id}/status' +} + +export type GetFalAiWanVaceAppsLongReframeRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanVaceAppsLongReframeRequestsByRequestIdStatusResponse = + GetFalAiWanVaceAppsLongReframeRequestsByRequestIdStatusResponses[keyof GetFalAiWanVaceAppsLongReframeRequestsByRequestIdStatusResponses] + +export type PutFalAiWanVaceAppsLongReframeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-vace-apps/long-reframe/requests/{request_id}/cancel' +} + +export type PutFalAiWanVaceAppsLongReframeRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanVaceAppsLongReframeRequestsByRequestIdCancelResponse = + PutFalAiWanVaceAppsLongReframeRequestsByRequestIdCancelResponses[keyof PutFalAiWanVaceAppsLongReframeRequestsByRequestIdCancelResponses] + +export type PostFalAiWanVaceAppsLongReframeData = { + body: SchemaWanVaceAppsLongReframeInput + path?: never + query?: never + url: '/fal-ai/wan-vace-apps/long-reframe' +} + +export type PostFalAiWanVaceAppsLongReframeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanVaceAppsLongReframeResponse = + PostFalAiWanVaceAppsLongReframeResponses[keyof PostFalAiWanVaceAppsLongReframeResponses] + +export type GetFalAiWanVaceAppsLongReframeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-vace-apps/long-reframe/requests/{request_id}' +} + +export type GetFalAiWanVaceAppsLongReframeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanVaceAppsLongReframeOutput +} + +export type GetFalAiWanVaceAppsLongReframeRequestsByRequestIdResponse = + GetFalAiWanVaceAppsLongReframeRequestsByRequestIdResponses[keyof GetFalAiWanVaceAppsLongReframeRequestsByRequestIdResponses] + +export type GetFalAiInfinitalkVideoToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/infinitalk/video-to-video/requests/{request_id}/status' +} + +export type GetFalAiInfinitalkVideoToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiInfinitalkVideoToVideoRequestsByRequestIdStatusResponse = + GetFalAiInfinitalkVideoToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiInfinitalkVideoToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiInfinitalkVideoToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/infinitalk/video-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiInfinitalkVideoToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiInfinitalkVideoToVideoRequestsByRequestIdCancelResponse = + PutFalAiInfinitalkVideoToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiInfinitalkVideoToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiInfinitalkVideoToVideoData = { + body: SchemaInfinitalkVideoToVideoInput + path?: never + query?: never + url: '/fal-ai/infinitalk/video-to-video' +} + +export type PostFalAiInfinitalkVideoToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiInfinitalkVideoToVideoResponse = + PostFalAiInfinitalkVideoToVideoResponses[keyof PostFalAiInfinitalkVideoToVideoResponses] + +export type GetFalAiInfinitalkVideoToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/infinitalk/video-to-video/requests/{request_id}' +} + +export type GetFalAiInfinitalkVideoToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaInfinitalkVideoToVideoOutput +} + +export type GetFalAiInfinitalkVideoToVideoRequestsByRequestIdResponse = + GetFalAiInfinitalkVideoToVideoRequestsByRequestIdResponses[keyof GetFalAiInfinitalkVideoToVideoRequestsByRequestIdResponses] + +export type GetFalAiSeedvrUpscaleVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/seedvr/upscale/video/requests/{request_id}/status' +} + +export type GetFalAiSeedvrUpscaleVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSeedvrUpscaleVideoRequestsByRequestIdStatusResponse = + GetFalAiSeedvrUpscaleVideoRequestsByRequestIdStatusResponses[keyof GetFalAiSeedvrUpscaleVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiSeedvrUpscaleVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/seedvr/upscale/video/requests/{request_id}/cancel' +} + +export type PutFalAiSeedvrUpscaleVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSeedvrUpscaleVideoRequestsByRequestIdCancelResponse = + PutFalAiSeedvrUpscaleVideoRequestsByRequestIdCancelResponses[keyof PutFalAiSeedvrUpscaleVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiSeedvrUpscaleVideoData = { + body: SchemaSeedvrUpscaleVideoInput + path?: never + query?: never + url: '/fal-ai/seedvr/upscale/video' +} + +export type PostFalAiSeedvrUpscaleVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSeedvrUpscaleVideoResponse = + PostFalAiSeedvrUpscaleVideoResponses[keyof PostFalAiSeedvrUpscaleVideoResponses] + +export type GetFalAiSeedvrUpscaleVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/seedvr/upscale/video/requests/{request_id}' +} + +export type GetFalAiSeedvrUpscaleVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSeedvrUpscaleVideoOutput +} + +export type GetFalAiSeedvrUpscaleVideoRequestsByRequestIdResponse = + GetFalAiSeedvrUpscaleVideoRequestsByRequestIdResponses[keyof GetFalAiSeedvrUpscaleVideoRequestsByRequestIdResponses] + +export type GetFalAiWanVaceAppsVideoEditRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-vace-apps/video-edit/requests/{request_id}/status' +} + +export type GetFalAiWanVaceAppsVideoEditRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanVaceAppsVideoEditRequestsByRequestIdStatusResponse = + GetFalAiWanVaceAppsVideoEditRequestsByRequestIdStatusResponses[keyof GetFalAiWanVaceAppsVideoEditRequestsByRequestIdStatusResponses] + +export type PutFalAiWanVaceAppsVideoEditRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-vace-apps/video-edit/requests/{request_id}/cancel' +} + +export type PutFalAiWanVaceAppsVideoEditRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanVaceAppsVideoEditRequestsByRequestIdCancelResponse = + PutFalAiWanVaceAppsVideoEditRequestsByRequestIdCancelResponses[keyof PutFalAiWanVaceAppsVideoEditRequestsByRequestIdCancelResponses] + +export type PostFalAiWanVaceAppsVideoEditData = { + body: SchemaWanVaceAppsVideoEditInput + path?: never + query?: never + url: '/fal-ai/wan-vace-apps/video-edit' +} + +export type PostFalAiWanVaceAppsVideoEditResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanVaceAppsVideoEditResponse = + PostFalAiWanVaceAppsVideoEditResponses[keyof PostFalAiWanVaceAppsVideoEditResponses] + +export type GetFalAiWanVaceAppsVideoEditRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-vace-apps/video-edit/requests/{request_id}' +} + +export type GetFalAiWanVaceAppsVideoEditRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanVaceAppsVideoEditOutput +} + +export type GetFalAiWanVaceAppsVideoEditRequestsByRequestIdResponse = + GetFalAiWanVaceAppsVideoEditRequestsByRequestIdResponses[keyof GetFalAiWanVaceAppsVideoEditRequestsByRequestIdResponses] + +export type GetFalAiWanV2214bAnimateReplaceRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan/v2.2-14b/animate/replace/requests/{request_id}/status' +} + +export type GetFalAiWanV2214bAnimateReplaceRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiWanV2214bAnimateReplaceRequestsByRequestIdStatusResponse = + GetFalAiWanV2214bAnimateReplaceRequestsByRequestIdStatusResponses[keyof GetFalAiWanV2214bAnimateReplaceRequestsByRequestIdStatusResponses] + +export type PutFalAiWanV2214bAnimateReplaceRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-14b/animate/replace/requests/{request_id}/cancel' +} + +export type PutFalAiWanV2214bAnimateReplaceRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiWanV2214bAnimateReplaceRequestsByRequestIdCancelResponse = + PutFalAiWanV2214bAnimateReplaceRequestsByRequestIdCancelResponses[keyof PutFalAiWanV2214bAnimateReplaceRequestsByRequestIdCancelResponses] + +export type PostFalAiWanV2214bAnimateReplaceData = { + body: SchemaWanV2214bAnimateReplaceInput + path?: never + query?: never + url: '/fal-ai/wan/v2.2-14b/animate/replace' +} + +export type PostFalAiWanV2214bAnimateReplaceResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanV2214bAnimateReplaceResponse = + PostFalAiWanV2214bAnimateReplaceResponses[keyof PostFalAiWanV2214bAnimateReplaceResponses] + +export type GetFalAiWanV2214bAnimateReplaceRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-14b/animate/replace/requests/{request_id}' +} + +export type GetFalAiWanV2214bAnimateReplaceRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanV2214bAnimateReplaceOutput +} + +export type GetFalAiWanV2214bAnimateReplaceRequestsByRequestIdResponse = + GetFalAiWanV2214bAnimateReplaceRequestsByRequestIdResponses[keyof GetFalAiWanV2214bAnimateReplaceRequestsByRequestIdResponses] + +export type GetFalAiWanV2214bAnimateMoveRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan/v2.2-14b/animate/move/requests/{request_id}/status' +} + +export type GetFalAiWanV2214bAnimateMoveRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanV2214bAnimateMoveRequestsByRequestIdStatusResponse = + GetFalAiWanV2214bAnimateMoveRequestsByRequestIdStatusResponses[keyof GetFalAiWanV2214bAnimateMoveRequestsByRequestIdStatusResponses] + +export type PutFalAiWanV2214bAnimateMoveRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-14b/animate/move/requests/{request_id}/cancel' +} + +export type PutFalAiWanV2214bAnimateMoveRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanV2214bAnimateMoveRequestsByRequestIdCancelResponse = + PutFalAiWanV2214bAnimateMoveRequestsByRequestIdCancelResponses[keyof PutFalAiWanV2214bAnimateMoveRequestsByRequestIdCancelResponses] + +export type PostFalAiWanV2214bAnimateMoveData = { + body: SchemaWanV2214bAnimateMoveInput + path?: never + query?: never + url: '/fal-ai/wan/v2.2-14b/animate/move' +} + +export type PostFalAiWanV2214bAnimateMoveResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanV2214bAnimateMoveResponse = + PostFalAiWanV2214bAnimateMoveResponses[keyof PostFalAiWanV2214bAnimateMoveResponses] + +export type GetFalAiWanV2214bAnimateMoveRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-14b/animate/move/requests/{request_id}' +} + +export type GetFalAiWanV2214bAnimateMoveRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanV2214bAnimateMoveOutput +} + +export type GetFalAiWanV2214bAnimateMoveRequestsByRequestIdResponse = + GetFalAiWanV2214bAnimateMoveRequestsByRequestIdResponses[keyof GetFalAiWanV2214bAnimateMoveRequestsByRequestIdResponses] + +export type GetDecartLucyEditProRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/decart/lucy-edit/pro/requests/{request_id}/status' +} + +export type GetDecartLucyEditProRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetDecartLucyEditProRequestsByRequestIdStatusResponse = + GetDecartLucyEditProRequestsByRequestIdStatusResponses[keyof GetDecartLucyEditProRequestsByRequestIdStatusResponses] + +export type PutDecartLucyEditProRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/decart/lucy-edit/pro/requests/{request_id}/cancel' +} + +export type PutDecartLucyEditProRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutDecartLucyEditProRequestsByRequestIdCancelResponse = + PutDecartLucyEditProRequestsByRequestIdCancelResponses[keyof PutDecartLucyEditProRequestsByRequestIdCancelResponses] + +export type PostDecartLucyEditProData = { + body: SchemaLucyEditProInput + path?: never + query?: never + url: '/decart/lucy-edit/pro' +} + +export type PostDecartLucyEditProResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostDecartLucyEditProResponse = + PostDecartLucyEditProResponses[keyof PostDecartLucyEditProResponses] + +export type GetDecartLucyEditProRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/decart/lucy-edit/pro/requests/{request_id}' +} + +export type GetDecartLucyEditProRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLucyEditProOutput +} + +export type GetDecartLucyEditProRequestsByRequestIdResponse = + GetDecartLucyEditProRequestsByRequestIdResponses[keyof GetDecartLucyEditProRequestsByRequestIdResponses] + +export type GetDecartLucyEditDevRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/decart/lucy-edit/dev/requests/{request_id}/status' +} + +export type GetDecartLucyEditDevRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetDecartLucyEditDevRequestsByRequestIdStatusResponse = + GetDecartLucyEditDevRequestsByRequestIdStatusResponses[keyof GetDecartLucyEditDevRequestsByRequestIdStatusResponses] + +export type PutDecartLucyEditDevRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/decart/lucy-edit/dev/requests/{request_id}/cancel' +} + +export type PutDecartLucyEditDevRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutDecartLucyEditDevRequestsByRequestIdCancelResponse = + PutDecartLucyEditDevRequestsByRequestIdCancelResponses[keyof PutDecartLucyEditDevRequestsByRequestIdCancelResponses] + +export type PostDecartLucyEditDevData = { + body: SchemaLucyEditDevInput + path?: never + query?: never + url: '/decart/lucy-edit/dev' +} + +export type PostDecartLucyEditDevResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostDecartLucyEditDevResponse = + PostDecartLucyEditDevResponses[keyof PostDecartLucyEditDevResponses] + +export type GetDecartLucyEditDevRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/decart/lucy-edit/dev/requests/{request_id}' +} + +export type GetDecartLucyEditDevRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLucyEditDevOutput +} + +export type GetDecartLucyEditDevRequestsByRequestIdResponse = + GetDecartLucyEditDevRequestsByRequestIdResponses[keyof GetDecartLucyEditDevRequestsByRequestIdResponses] + +export type GetFalAiWan22VaceFunA14bReframeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-22-vace-fun-a14b/reframe/requests/{request_id}/status' +} + +export type GetFalAiWan22VaceFunA14bReframeRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiWan22VaceFunA14bReframeRequestsByRequestIdStatusResponse = + GetFalAiWan22VaceFunA14bReframeRequestsByRequestIdStatusResponses[keyof GetFalAiWan22VaceFunA14bReframeRequestsByRequestIdStatusResponses] + +export type PutFalAiWan22VaceFunA14bReframeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-22-vace-fun-a14b/reframe/requests/{request_id}/cancel' +} + +export type PutFalAiWan22VaceFunA14bReframeRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiWan22VaceFunA14bReframeRequestsByRequestIdCancelResponse = + PutFalAiWan22VaceFunA14bReframeRequestsByRequestIdCancelResponses[keyof PutFalAiWan22VaceFunA14bReframeRequestsByRequestIdCancelResponses] + +export type PostFalAiWan22VaceFunA14bReframeData = { + body: SchemaWan22VaceFunA14bReframeInput + path?: never + query?: never + url: '/fal-ai/wan-22-vace-fun-a14b/reframe' +} + +export type PostFalAiWan22VaceFunA14bReframeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWan22VaceFunA14bReframeResponse = + PostFalAiWan22VaceFunA14bReframeResponses[keyof PostFalAiWan22VaceFunA14bReframeResponses] + +export type GetFalAiWan22VaceFunA14bReframeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-22-vace-fun-a14b/reframe/requests/{request_id}' +} + +export type GetFalAiWan22VaceFunA14bReframeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWan22VaceFunA14bReframeOutput +} + +export type GetFalAiWan22VaceFunA14bReframeRequestsByRequestIdResponse = + GetFalAiWan22VaceFunA14bReframeRequestsByRequestIdResponses[keyof GetFalAiWan22VaceFunA14bReframeRequestsByRequestIdResponses] + +export type GetFalAiWan22VaceFunA14bOutpaintingRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-22-vace-fun-a14b/outpainting/requests/{request_id}/status' +} + +export type GetFalAiWan22VaceFunA14bOutpaintingRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiWan22VaceFunA14bOutpaintingRequestsByRequestIdStatusResponse = + GetFalAiWan22VaceFunA14bOutpaintingRequestsByRequestIdStatusResponses[keyof GetFalAiWan22VaceFunA14bOutpaintingRequestsByRequestIdStatusResponses] + +export type PutFalAiWan22VaceFunA14bOutpaintingRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-22-vace-fun-a14b/outpainting/requests/{request_id}/cancel' +} + +export type PutFalAiWan22VaceFunA14bOutpaintingRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiWan22VaceFunA14bOutpaintingRequestsByRequestIdCancelResponse = + PutFalAiWan22VaceFunA14bOutpaintingRequestsByRequestIdCancelResponses[keyof PutFalAiWan22VaceFunA14bOutpaintingRequestsByRequestIdCancelResponses] + +export type PostFalAiWan22VaceFunA14bOutpaintingData = { + body: SchemaWan22VaceFunA14bOutpaintingInput + path?: never + query?: never + url: '/fal-ai/wan-22-vace-fun-a14b/outpainting' +} + +export type PostFalAiWan22VaceFunA14bOutpaintingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWan22VaceFunA14bOutpaintingResponse = + PostFalAiWan22VaceFunA14bOutpaintingResponses[keyof PostFalAiWan22VaceFunA14bOutpaintingResponses] + +export type GetFalAiWan22VaceFunA14bOutpaintingRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-22-vace-fun-a14b/outpainting/requests/{request_id}' +} + +export type GetFalAiWan22VaceFunA14bOutpaintingRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWan22VaceFunA14bOutpaintingOutput +} + +export type GetFalAiWan22VaceFunA14bOutpaintingRequestsByRequestIdResponse = + GetFalAiWan22VaceFunA14bOutpaintingRequestsByRequestIdResponses[keyof GetFalAiWan22VaceFunA14bOutpaintingRequestsByRequestIdResponses] + +export type GetFalAiWan22VaceFunA14bInpaintingRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-22-vace-fun-a14b/inpainting/requests/{request_id}/status' +} + +export type GetFalAiWan22VaceFunA14bInpaintingRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiWan22VaceFunA14bInpaintingRequestsByRequestIdStatusResponse = + GetFalAiWan22VaceFunA14bInpaintingRequestsByRequestIdStatusResponses[keyof GetFalAiWan22VaceFunA14bInpaintingRequestsByRequestIdStatusResponses] + +export type PutFalAiWan22VaceFunA14bInpaintingRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-22-vace-fun-a14b/inpainting/requests/{request_id}/cancel' +} + +export type PutFalAiWan22VaceFunA14bInpaintingRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiWan22VaceFunA14bInpaintingRequestsByRequestIdCancelResponse = + PutFalAiWan22VaceFunA14bInpaintingRequestsByRequestIdCancelResponses[keyof PutFalAiWan22VaceFunA14bInpaintingRequestsByRequestIdCancelResponses] + +export type PostFalAiWan22VaceFunA14bInpaintingData = { + body: SchemaWan22VaceFunA14bInpaintingInput + path?: never + query?: never + url: '/fal-ai/wan-22-vace-fun-a14b/inpainting' +} + +export type PostFalAiWan22VaceFunA14bInpaintingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWan22VaceFunA14bInpaintingResponse = + PostFalAiWan22VaceFunA14bInpaintingResponses[keyof PostFalAiWan22VaceFunA14bInpaintingResponses] + +export type GetFalAiWan22VaceFunA14bInpaintingRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-22-vace-fun-a14b/inpainting/requests/{request_id}' +} + +export type GetFalAiWan22VaceFunA14bInpaintingRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWan22VaceFunA14bInpaintingOutput +} + +export type GetFalAiWan22VaceFunA14bInpaintingRequestsByRequestIdResponse = + GetFalAiWan22VaceFunA14bInpaintingRequestsByRequestIdResponses[keyof GetFalAiWan22VaceFunA14bInpaintingRequestsByRequestIdResponses] + +export type GetFalAiWan22VaceFunA14bDepthRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-22-vace-fun-a14b/depth/requests/{request_id}/status' +} + +export type GetFalAiWan22VaceFunA14bDepthRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWan22VaceFunA14bDepthRequestsByRequestIdStatusResponse = + GetFalAiWan22VaceFunA14bDepthRequestsByRequestIdStatusResponses[keyof GetFalAiWan22VaceFunA14bDepthRequestsByRequestIdStatusResponses] + +export type PutFalAiWan22VaceFunA14bDepthRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-22-vace-fun-a14b/depth/requests/{request_id}/cancel' +} + +export type PutFalAiWan22VaceFunA14bDepthRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWan22VaceFunA14bDepthRequestsByRequestIdCancelResponse = + PutFalAiWan22VaceFunA14bDepthRequestsByRequestIdCancelResponses[keyof PutFalAiWan22VaceFunA14bDepthRequestsByRequestIdCancelResponses] + +export type PostFalAiWan22VaceFunA14bDepthData = { + body: SchemaWan22VaceFunA14bDepthInput + path?: never + query?: never + url: '/fal-ai/wan-22-vace-fun-a14b/depth' +} + +export type PostFalAiWan22VaceFunA14bDepthResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWan22VaceFunA14bDepthResponse = + PostFalAiWan22VaceFunA14bDepthResponses[keyof PostFalAiWan22VaceFunA14bDepthResponses] + +export type GetFalAiWan22VaceFunA14bDepthRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-22-vace-fun-a14b/depth/requests/{request_id}' +} + +export type GetFalAiWan22VaceFunA14bDepthRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWan22VaceFunA14bDepthOutput +} + +export type GetFalAiWan22VaceFunA14bDepthRequestsByRequestIdResponse = + GetFalAiWan22VaceFunA14bDepthRequestsByRequestIdResponses[keyof GetFalAiWan22VaceFunA14bDepthRequestsByRequestIdResponses] + +export type GetFalAiWan22VaceFunA14bPoseRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-22-vace-fun-a14b/pose/requests/{request_id}/status' +} + +export type GetFalAiWan22VaceFunA14bPoseRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWan22VaceFunA14bPoseRequestsByRequestIdStatusResponse = + GetFalAiWan22VaceFunA14bPoseRequestsByRequestIdStatusResponses[keyof GetFalAiWan22VaceFunA14bPoseRequestsByRequestIdStatusResponses] + +export type PutFalAiWan22VaceFunA14bPoseRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-22-vace-fun-a14b/pose/requests/{request_id}/cancel' +} + +export type PutFalAiWan22VaceFunA14bPoseRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWan22VaceFunA14bPoseRequestsByRequestIdCancelResponse = + PutFalAiWan22VaceFunA14bPoseRequestsByRequestIdCancelResponses[keyof PutFalAiWan22VaceFunA14bPoseRequestsByRequestIdCancelResponses] + +export type PostFalAiWan22VaceFunA14bPoseData = { + body: SchemaWan22VaceFunA14bPoseInput + path?: never + query?: never + url: '/fal-ai/wan-22-vace-fun-a14b/pose' +} + +export type PostFalAiWan22VaceFunA14bPoseResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWan22VaceFunA14bPoseResponse = + PostFalAiWan22VaceFunA14bPoseResponses[keyof PostFalAiWan22VaceFunA14bPoseResponses] + +export type GetFalAiWan22VaceFunA14bPoseRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-22-vace-fun-a14b/pose/requests/{request_id}' +} + +export type GetFalAiWan22VaceFunA14bPoseRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWan22VaceFunA14bPoseOutput +} + +export type GetFalAiWan22VaceFunA14bPoseRequestsByRequestIdResponse = + GetFalAiWan22VaceFunA14bPoseRequestsByRequestIdResponses[keyof GetFalAiWan22VaceFunA14bPoseRequestsByRequestIdResponses] + +export type GetFalAiHunyuanVideoFoleyRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan-video-foley/requests/{request_id}/status' +} + +export type GetFalAiHunyuanVideoFoleyRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiHunyuanVideoFoleyRequestsByRequestIdStatusResponse = + GetFalAiHunyuanVideoFoleyRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuanVideoFoleyRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuanVideoFoleyRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-video-foley/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuanVideoFoleyRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiHunyuanVideoFoleyRequestsByRequestIdCancelResponse = + PutFalAiHunyuanVideoFoleyRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuanVideoFoleyRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuanVideoFoleyData = { + body: SchemaHunyuanVideoFoleyInput + path?: never + query?: never + url: '/fal-ai/hunyuan-video-foley' +} + +export type PostFalAiHunyuanVideoFoleyResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuanVideoFoleyResponse = + PostFalAiHunyuanVideoFoleyResponses[keyof PostFalAiHunyuanVideoFoleyResponses] + +export type GetFalAiHunyuanVideoFoleyRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-video-foley/requests/{request_id}' +} + +export type GetFalAiHunyuanVideoFoleyRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuanVideoFoleyOutput +} + +export type GetFalAiHunyuanVideoFoleyRequestsByRequestIdResponse = + GetFalAiHunyuanVideoFoleyRequestsByRequestIdResponses[keyof GetFalAiHunyuanVideoFoleyRequestsByRequestIdResponses] + +export type GetFalAiSyncLipsyncV2ProRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sync-lipsync/v2/pro/requests/{request_id}/status' +} + +export type GetFalAiSyncLipsyncV2ProRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSyncLipsyncV2ProRequestsByRequestIdStatusResponse = + GetFalAiSyncLipsyncV2ProRequestsByRequestIdStatusResponses[keyof GetFalAiSyncLipsyncV2ProRequestsByRequestIdStatusResponses] + +export type PutFalAiSyncLipsyncV2ProRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sync-lipsync/v2/pro/requests/{request_id}/cancel' +} + +export type PutFalAiSyncLipsyncV2ProRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSyncLipsyncV2ProRequestsByRequestIdCancelResponse = + PutFalAiSyncLipsyncV2ProRequestsByRequestIdCancelResponses[keyof PutFalAiSyncLipsyncV2ProRequestsByRequestIdCancelResponses] + +export type PostFalAiSyncLipsyncV2ProData = { + body: SchemaSyncLipsyncV2ProInput + path?: never + query?: never + url: '/fal-ai/sync-lipsync/v2/pro' +} + +export type PostFalAiSyncLipsyncV2ProResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSyncLipsyncV2ProResponse = + PostFalAiSyncLipsyncV2ProResponses[keyof PostFalAiSyncLipsyncV2ProResponses] + +export type GetFalAiSyncLipsyncV2ProRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sync-lipsync/v2/pro/requests/{request_id}' +} + +export type GetFalAiSyncLipsyncV2ProRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSyncLipsyncV2ProOutput +} + +export type GetFalAiSyncLipsyncV2ProRequestsByRequestIdResponse = + GetFalAiSyncLipsyncV2ProRequestsByRequestIdResponses[keyof GetFalAiSyncLipsyncV2ProRequestsByRequestIdResponses] + +export type GetFalAiWanFunControlRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-fun-control/requests/{request_id}/status' +} + +export type GetFalAiWanFunControlRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanFunControlRequestsByRequestIdStatusResponse = + GetFalAiWanFunControlRequestsByRequestIdStatusResponses[keyof GetFalAiWanFunControlRequestsByRequestIdStatusResponses] + +export type PutFalAiWanFunControlRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-fun-control/requests/{request_id}/cancel' +} + +export type PutFalAiWanFunControlRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanFunControlRequestsByRequestIdCancelResponse = + PutFalAiWanFunControlRequestsByRequestIdCancelResponses[keyof PutFalAiWanFunControlRequestsByRequestIdCancelResponses] + +export type PostFalAiWanFunControlData = { + body: SchemaWanFunControlInput + path?: never + query?: never + url: '/fal-ai/wan-fun-control' +} + +export type PostFalAiWanFunControlResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanFunControlResponse = + PostFalAiWanFunControlResponses[keyof PostFalAiWanFunControlResponses] + +export type GetFalAiWanFunControlRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-fun-control/requests/{request_id}' +} + +export type GetFalAiWanFunControlRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanFunControlOutput +} + +export type GetFalAiWanFunControlRequestsByRequestIdResponse = + GetFalAiWanFunControlRequestsByRequestIdResponses[keyof GetFalAiWanFunControlRequestsByRequestIdResponses] + +export type GetBriaVideoIncreaseResolutionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/bria/video/increase-resolution/requests/{request_id}/status' +} + +export type GetBriaVideoIncreaseResolutionRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetBriaVideoIncreaseResolutionRequestsByRequestIdStatusResponse = + GetBriaVideoIncreaseResolutionRequestsByRequestIdStatusResponses[keyof GetBriaVideoIncreaseResolutionRequestsByRequestIdStatusResponses] + +export type PutBriaVideoIncreaseResolutionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/video/increase-resolution/requests/{request_id}/cancel' +} + +export type PutBriaVideoIncreaseResolutionRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutBriaVideoIncreaseResolutionRequestsByRequestIdCancelResponse = + PutBriaVideoIncreaseResolutionRequestsByRequestIdCancelResponses[keyof PutBriaVideoIncreaseResolutionRequestsByRequestIdCancelResponses] + +export type PostBriaVideoIncreaseResolutionData = { + body: SchemaVideoIncreaseResolutionInput + path?: never + query?: never + url: '/bria/video/increase-resolution' +} + +export type PostBriaVideoIncreaseResolutionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostBriaVideoIncreaseResolutionResponse = + PostBriaVideoIncreaseResolutionResponses[keyof PostBriaVideoIncreaseResolutionResponses] + +export type GetBriaVideoIncreaseResolutionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/bria/video/increase-resolution/requests/{request_id}' +} + +export type GetBriaVideoIncreaseResolutionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVideoIncreaseResolutionOutput +} + +export type GetBriaVideoIncreaseResolutionRequestsByRequestIdResponse = + GetBriaVideoIncreaseResolutionRequestsByRequestIdResponses[keyof GetBriaVideoIncreaseResolutionRequestsByRequestIdResponses] + +export type GetFalAiInfinitalkRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/infinitalk/requests/{request_id}/status' +} + +export type GetFalAiInfinitalkRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiInfinitalkRequestsByRequestIdStatusResponse = + GetFalAiInfinitalkRequestsByRequestIdStatusResponses[keyof GetFalAiInfinitalkRequestsByRequestIdStatusResponses] + +export type PutFalAiInfinitalkRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/infinitalk/requests/{request_id}/cancel' +} + +export type PutFalAiInfinitalkRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiInfinitalkRequestsByRequestIdCancelResponse = + PutFalAiInfinitalkRequestsByRequestIdCancelResponses[keyof PutFalAiInfinitalkRequestsByRequestIdCancelResponses] + +export type PostFalAiInfinitalkData = { + body: SchemaInfinitalkInput + path?: never + query?: never + url: '/fal-ai/infinitalk' +} + +export type PostFalAiInfinitalkResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiInfinitalkResponse = + PostFalAiInfinitalkResponses[keyof PostFalAiInfinitalkResponses] + +export type GetFalAiInfinitalkRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/infinitalk/requests/{request_id}' +} + +export type GetFalAiInfinitalkRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaInfinitalkOutput +} + +export type GetFalAiInfinitalkRequestsByRequestIdResponse = + GetFalAiInfinitalkRequestsByRequestIdResponses[keyof GetFalAiInfinitalkRequestsByRequestIdResponses] + +export type GetMireloAiSfxV1VideoToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/mirelo-ai/sfx-v1/video-to-video/requests/{request_id}/status' +} + +export type GetMireloAiSfxV1VideoToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetMireloAiSfxV1VideoToVideoRequestsByRequestIdStatusResponse = + GetMireloAiSfxV1VideoToVideoRequestsByRequestIdStatusResponses[keyof GetMireloAiSfxV1VideoToVideoRequestsByRequestIdStatusResponses] + +export type PutMireloAiSfxV1VideoToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/mirelo-ai/sfx-v1/video-to-video/requests/{request_id}/cancel' +} + +export type PutMireloAiSfxV1VideoToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutMireloAiSfxV1VideoToVideoRequestsByRequestIdCancelResponse = + PutMireloAiSfxV1VideoToVideoRequestsByRequestIdCancelResponses[keyof PutMireloAiSfxV1VideoToVideoRequestsByRequestIdCancelResponses] + +export type PostMireloAiSfxV1VideoToVideoData = { + body: SchemaSfxV1VideoToVideoInput + path?: never + query?: never + url: '/mirelo-ai/sfx-v1/video-to-video' +} + +export type PostMireloAiSfxV1VideoToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostMireloAiSfxV1VideoToVideoResponse = + PostMireloAiSfxV1VideoToVideoResponses[keyof PostMireloAiSfxV1VideoToVideoResponses] + +export type GetMireloAiSfxV1VideoToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/mirelo-ai/sfx-v1/video-to-video/requests/{request_id}' +} + +export type GetMireloAiSfxV1VideoToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSfxV1VideoToVideoOutput +} + +export type GetMireloAiSfxV1VideoToVideoRequestsByRequestIdResponse = + GetMireloAiSfxV1VideoToVideoRequestsByRequestIdResponses[keyof GetMireloAiSfxV1VideoToVideoRequestsByRequestIdResponses] + +export type GetMoonvalleyMareyPoseTransferRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/moonvalley/marey/pose-transfer/requests/{request_id}/status' +} + +export type GetMoonvalleyMareyPoseTransferRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetMoonvalleyMareyPoseTransferRequestsByRequestIdStatusResponse = + GetMoonvalleyMareyPoseTransferRequestsByRequestIdStatusResponses[keyof GetMoonvalleyMareyPoseTransferRequestsByRequestIdStatusResponses] + +export type PutMoonvalleyMareyPoseTransferRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/moonvalley/marey/pose-transfer/requests/{request_id}/cancel' +} + +export type PutMoonvalleyMareyPoseTransferRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutMoonvalleyMareyPoseTransferRequestsByRequestIdCancelResponse = + PutMoonvalleyMareyPoseTransferRequestsByRequestIdCancelResponses[keyof PutMoonvalleyMareyPoseTransferRequestsByRequestIdCancelResponses] + +export type PostMoonvalleyMareyPoseTransferData = { + body: SchemaMareyPoseTransferInput + path?: never + query?: never + url: '/moonvalley/marey/pose-transfer' +} + +export type PostMoonvalleyMareyPoseTransferResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostMoonvalleyMareyPoseTransferResponse = + PostMoonvalleyMareyPoseTransferResponses[keyof PostMoonvalleyMareyPoseTransferResponses] + +export type GetMoonvalleyMareyPoseTransferRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/moonvalley/marey/pose-transfer/requests/{request_id}' +} + +export type GetMoonvalleyMareyPoseTransferRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMareyPoseTransferOutput +} + +export type GetMoonvalleyMareyPoseTransferRequestsByRequestIdResponse = + GetMoonvalleyMareyPoseTransferRequestsByRequestIdResponses[keyof GetMoonvalleyMareyPoseTransferRequestsByRequestIdResponses] + +export type GetMoonvalleyMareyMotionTransferRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/moonvalley/marey/motion-transfer/requests/{request_id}/status' +} + +export type GetMoonvalleyMareyMotionTransferRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetMoonvalleyMareyMotionTransferRequestsByRequestIdStatusResponse = + GetMoonvalleyMareyMotionTransferRequestsByRequestIdStatusResponses[keyof GetMoonvalleyMareyMotionTransferRequestsByRequestIdStatusResponses] + +export type PutMoonvalleyMareyMotionTransferRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/moonvalley/marey/motion-transfer/requests/{request_id}/cancel' +} + +export type PutMoonvalleyMareyMotionTransferRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutMoonvalleyMareyMotionTransferRequestsByRequestIdCancelResponse = + PutMoonvalleyMareyMotionTransferRequestsByRequestIdCancelResponses[keyof PutMoonvalleyMareyMotionTransferRequestsByRequestIdCancelResponses] + +export type PostMoonvalleyMareyMotionTransferData = { + body: SchemaMareyMotionTransferInput + path?: never + query?: never + url: '/moonvalley/marey/motion-transfer' +} + +export type PostMoonvalleyMareyMotionTransferResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostMoonvalleyMareyMotionTransferResponse = + PostMoonvalleyMareyMotionTransferResponses[keyof PostMoonvalleyMareyMotionTransferResponses] + +export type GetMoonvalleyMareyMotionTransferRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/moonvalley/marey/motion-transfer/requests/{request_id}' +} + +export type GetMoonvalleyMareyMotionTransferRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMareyMotionTransferOutput +} + +export type GetMoonvalleyMareyMotionTransferRequestsByRequestIdResponse = + GetMoonvalleyMareyMotionTransferRequestsByRequestIdResponses[keyof GetMoonvalleyMareyMotionTransferRequestsByRequestIdResponses] + +export type GetFalAiFfmpegApiMergeVideosRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ffmpeg-api/merge-videos/requests/{request_id}/status' +} + +export type GetFalAiFfmpegApiMergeVideosRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFfmpegApiMergeVideosRequestsByRequestIdStatusResponse = + GetFalAiFfmpegApiMergeVideosRequestsByRequestIdStatusResponses[keyof GetFalAiFfmpegApiMergeVideosRequestsByRequestIdStatusResponses] + +export type PutFalAiFfmpegApiMergeVideosRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ffmpeg-api/merge-videos/requests/{request_id}/cancel' +} + +export type PutFalAiFfmpegApiMergeVideosRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFfmpegApiMergeVideosRequestsByRequestIdCancelResponse = + PutFalAiFfmpegApiMergeVideosRequestsByRequestIdCancelResponses[keyof PutFalAiFfmpegApiMergeVideosRequestsByRequestIdCancelResponses] + +export type PostFalAiFfmpegApiMergeVideosData = { + body: SchemaFfmpegApiMergeVideosInput + path?: never + query?: never + url: '/fal-ai/ffmpeg-api/merge-videos' +} + +export type PostFalAiFfmpegApiMergeVideosResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFfmpegApiMergeVideosResponse = + PostFalAiFfmpegApiMergeVideosResponses[keyof PostFalAiFfmpegApiMergeVideosResponses] + +export type GetFalAiFfmpegApiMergeVideosRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ffmpeg-api/merge-videos/requests/{request_id}' +} + +export type GetFalAiFfmpegApiMergeVideosRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFfmpegApiMergeVideosOutput +} + +export type GetFalAiFfmpegApiMergeVideosRequestsByRequestIdResponse = + GetFalAiFfmpegApiMergeVideosRequestsByRequestIdResponses[keyof GetFalAiFfmpegApiMergeVideosRequestsByRequestIdResponses] + +export type GetFalAiWanV22A14bVideoToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan/v2.2-a14b/video-to-video/requests/{request_id}/status' +} + +export type GetFalAiWanV22A14bVideoToVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanV22A14bVideoToVideoRequestsByRequestIdStatusResponse = + GetFalAiWanV22A14bVideoToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiWanV22A14bVideoToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiWanV22A14bVideoToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-a14b/video-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiWanV22A14bVideoToVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanV22A14bVideoToVideoRequestsByRequestIdCancelResponse = + PutFalAiWanV22A14bVideoToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiWanV22A14bVideoToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiWanV22A14bVideoToVideoData = { + body: SchemaWanV22A14bVideoToVideoInput + path?: never + query?: never + url: '/fal-ai/wan/v2.2-a14b/video-to-video' +} + +export type PostFalAiWanV22A14bVideoToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanV22A14bVideoToVideoResponse = + PostFalAiWanV22A14bVideoToVideoResponses[keyof PostFalAiWanV22A14bVideoToVideoResponses] + +export type GetFalAiWanV22A14bVideoToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan/v2.2-a14b/video-to-video/requests/{request_id}' +} + +export type GetFalAiWanV22A14bVideoToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanV22A14bVideoToVideoOutput +} + +export type GetFalAiWanV22A14bVideoToVideoRequestsByRequestIdResponse = + GetFalAiWanV22A14bVideoToVideoRequestsByRequestIdResponses[keyof GetFalAiWanV22A14bVideoToVideoRequestsByRequestIdResponses] + +export type GetFalAiLtxv13B098DistilledExtendRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltxv-13b-098-distilled/extend/requests/{request_id}/status' +} + +export type GetFalAiLtxv13B098DistilledExtendRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtxv13B098DistilledExtendRequestsByRequestIdStatusResponse = + GetFalAiLtxv13B098DistilledExtendRequestsByRequestIdStatusResponses[keyof GetFalAiLtxv13B098DistilledExtendRequestsByRequestIdStatusResponses] + +export type PutFalAiLtxv13B098DistilledExtendRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltxv-13b-098-distilled/extend/requests/{request_id}/cancel' +} + +export type PutFalAiLtxv13B098DistilledExtendRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtxv13B098DistilledExtendRequestsByRequestIdCancelResponse = + PutFalAiLtxv13B098DistilledExtendRequestsByRequestIdCancelResponses[keyof PutFalAiLtxv13B098DistilledExtendRequestsByRequestIdCancelResponses] + +export type PostFalAiLtxv13B098DistilledExtendData = { + body: SchemaLtxv13B098DistilledExtendInput + path?: never + query?: never + url: '/fal-ai/ltxv-13b-098-distilled/extend' +} + +export type PostFalAiLtxv13B098DistilledExtendResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtxv13B098DistilledExtendResponse = + PostFalAiLtxv13B098DistilledExtendResponses[keyof PostFalAiLtxv13B098DistilledExtendResponses] + +export type GetFalAiLtxv13B098DistilledExtendRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltxv-13b-098-distilled/extend/requests/{request_id}' +} + +export type GetFalAiLtxv13B098DistilledExtendRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtxv13B098DistilledExtendOutput +} + +export type GetFalAiLtxv13B098DistilledExtendRequestsByRequestIdResponse = + GetFalAiLtxv13B098DistilledExtendRequestsByRequestIdResponses[keyof GetFalAiLtxv13B098DistilledExtendRequestsByRequestIdResponses] + +export type GetFalAiRifeVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/rife/video/requests/{request_id}/status' +} + +export type GetFalAiRifeVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiRifeVideoRequestsByRequestIdStatusResponse = + GetFalAiRifeVideoRequestsByRequestIdStatusResponses[keyof GetFalAiRifeVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiRifeVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/rife/video/requests/{request_id}/cancel' +} + +export type PutFalAiRifeVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiRifeVideoRequestsByRequestIdCancelResponse = + PutFalAiRifeVideoRequestsByRequestIdCancelResponses[keyof PutFalAiRifeVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiRifeVideoData = { + body: SchemaRifeVideoInput + path?: never + query?: never + url: '/fal-ai/rife/video' +} + +export type PostFalAiRifeVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiRifeVideoResponse = + PostFalAiRifeVideoResponses[keyof PostFalAiRifeVideoResponses] + +export type GetFalAiRifeVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/rife/video/requests/{request_id}' +} + +export type GetFalAiRifeVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaRifeVideoOutput +} + +export type GetFalAiRifeVideoRequestsByRequestIdResponse = + GetFalAiRifeVideoRequestsByRequestIdResponses[keyof GetFalAiRifeVideoRequestsByRequestIdResponses] + +export type GetFalAiFilmVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/film/video/requests/{request_id}/status' +} + +export type GetFalAiFilmVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFilmVideoRequestsByRequestIdStatusResponse = + GetFalAiFilmVideoRequestsByRequestIdStatusResponses[keyof GetFalAiFilmVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiFilmVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/film/video/requests/{request_id}/cancel' +} + +export type PutFalAiFilmVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFilmVideoRequestsByRequestIdCancelResponse = + PutFalAiFilmVideoRequestsByRequestIdCancelResponses[keyof PutFalAiFilmVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiFilmVideoData = { + body: SchemaFilmVideoInput + path?: never + query?: never + url: '/fal-ai/film/video' +} + +export type PostFalAiFilmVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFilmVideoResponse = + PostFalAiFilmVideoResponses[keyof PostFalAiFilmVideoResponses] + +export type GetFalAiFilmVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/film/video/requests/{request_id}' +} + +export type GetFalAiFilmVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFilmVideoOutput +} + +export type GetFalAiFilmVideoRequestsByRequestIdResponse = + GetFalAiFilmVideoRequestsByRequestIdResponses[keyof GetFalAiFilmVideoRequestsByRequestIdResponses] + +export type GetFalAiLumaDreamMachineRay2FlashModifyRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/luma-dream-machine/ray-2-flash/modify/requests/{request_id}/status' + } + +export type GetFalAiLumaDreamMachineRay2FlashModifyRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLumaDreamMachineRay2FlashModifyRequestsByRequestIdStatusResponse = + GetFalAiLumaDreamMachineRay2FlashModifyRequestsByRequestIdStatusResponses[keyof GetFalAiLumaDreamMachineRay2FlashModifyRequestsByRequestIdStatusResponses] + +export type PutFalAiLumaDreamMachineRay2FlashModifyRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-dream-machine/ray-2-flash/modify/requests/{request_id}/cancel' + } + +export type PutFalAiLumaDreamMachineRay2FlashModifyRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLumaDreamMachineRay2FlashModifyRequestsByRequestIdCancelResponse = + PutFalAiLumaDreamMachineRay2FlashModifyRequestsByRequestIdCancelResponses[keyof PutFalAiLumaDreamMachineRay2FlashModifyRequestsByRequestIdCancelResponses] + +export type PostFalAiLumaDreamMachineRay2FlashModifyData = { + body: SchemaLumaDreamMachineRay2FlashModifyInput + path?: never + query?: never + url: '/fal-ai/luma-dream-machine/ray-2-flash/modify' +} + +export type PostFalAiLumaDreamMachineRay2FlashModifyResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLumaDreamMachineRay2FlashModifyResponse = + PostFalAiLumaDreamMachineRay2FlashModifyResponses[keyof PostFalAiLumaDreamMachineRay2FlashModifyResponses] + +export type GetFalAiLumaDreamMachineRay2FlashModifyRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-dream-machine/ray-2-flash/modify/requests/{request_id}' +} + +export type GetFalAiLumaDreamMachineRay2FlashModifyRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaLumaDreamMachineRay2FlashModifyOutput + } + +export type GetFalAiLumaDreamMachineRay2FlashModifyRequestsByRequestIdResponse = + GetFalAiLumaDreamMachineRay2FlashModifyRequestsByRequestIdResponses[keyof GetFalAiLumaDreamMachineRay2FlashModifyRequestsByRequestIdResponses] + +export type GetFalAiLtxv13B098DistilledMulticonditioningRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltxv-13b-098-distilled/multiconditioning/requests/{request_id}/status' + } + +export type GetFalAiLtxv13B098DistilledMulticonditioningRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtxv13B098DistilledMulticonditioningRequestsByRequestIdStatusResponse = + GetFalAiLtxv13B098DistilledMulticonditioningRequestsByRequestIdStatusResponses[keyof GetFalAiLtxv13B098DistilledMulticonditioningRequestsByRequestIdStatusResponses] + +export type PutFalAiLtxv13B098DistilledMulticonditioningRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltxv-13b-098-distilled/multiconditioning/requests/{request_id}/cancel' + } + +export type PutFalAiLtxv13B098DistilledMulticonditioningRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtxv13B098DistilledMulticonditioningRequestsByRequestIdCancelResponse = + PutFalAiLtxv13B098DistilledMulticonditioningRequestsByRequestIdCancelResponses[keyof PutFalAiLtxv13B098DistilledMulticonditioningRequestsByRequestIdCancelResponses] + +export type PostFalAiLtxv13B098DistilledMulticonditioningData = { + body: SchemaLtxv13B098DistilledMulticonditioningInput + path?: never + query?: never + url: '/fal-ai/ltxv-13b-098-distilled/multiconditioning' +} + +export type PostFalAiLtxv13B098DistilledMulticonditioningResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtxv13B098DistilledMulticonditioningResponse = + PostFalAiLtxv13B098DistilledMulticonditioningResponses[keyof PostFalAiLtxv13B098DistilledMulticonditioningResponses] + +export type GetFalAiLtxv13B098DistilledMulticonditioningRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltxv-13b-098-distilled/multiconditioning/requests/{request_id}' + } + +export type GetFalAiLtxv13B098DistilledMulticonditioningRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaLtxv13B098DistilledMulticonditioningOutput + } + +export type GetFalAiLtxv13B098DistilledMulticonditioningRequestsByRequestIdResponse = + GetFalAiLtxv13B098DistilledMulticonditioningRequestsByRequestIdResponses[keyof GetFalAiLtxv13B098DistilledMulticonditioningRequestsByRequestIdResponses] + +export type GetFalAiPixverseSoundEffectsRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/sound-effects/requests/{request_id}/status' +} + +export type GetFalAiPixverseSoundEffectsRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixverseSoundEffectsRequestsByRequestIdStatusResponse = + GetFalAiPixverseSoundEffectsRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseSoundEffectsRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseSoundEffectsRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/sound-effects/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseSoundEffectsRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixverseSoundEffectsRequestsByRequestIdCancelResponse = + PutFalAiPixverseSoundEffectsRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseSoundEffectsRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseSoundEffectsData = { + body: SchemaPixverseSoundEffectsInput + path?: never + query?: never + url: '/fal-ai/pixverse/sound-effects' +} + +export type PostFalAiPixverseSoundEffectsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseSoundEffectsResponse = + PostFalAiPixverseSoundEffectsResponses[keyof PostFalAiPixverseSoundEffectsResponses] + +export type GetFalAiPixverseSoundEffectsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/sound-effects/requests/{request_id}' +} + +export type GetFalAiPixverseSoundEffectsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseSoundEffectsOutput +} + +export type GetFalAiPixverseSoundEffectsRequestsByRequestIdResponse = + GetFalAiPixverseSoundEffectsRequestsByRequestIdResponses[keyof GetFalAiPixverseSoundEffectsRequestsByRequestIdResponses] + +export type GetFalAiThinksoundAudioRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/thinksound/audio/requests/{request_id}/status' +} + +export type GetFalAiThinksoundAudioRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiThinksoundAudioRequestsByRequestIdStatusResponse = + GetFalAiThinksoundAudioRequestsByRequestIdStatusResponses[keyof GetFalAiThinksoundAudioRequestsByRequestIdStatusResponses] + +export type PutFalAiThinksoundAudioRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/thinksound/audio/requests/{request_id}/cancel' +} + +export type PutFalAiThinksoundAudioRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiThinksoundAudioRequestsByRequestIdCancelResponse = + PutFalAiThinksoundAudioRequestsByRequestIdCancelResponses[keyof PutFalAiThinksoundAudioRequestsByRequestIdCancelResponses] + +export type PostFalAiThinksoundAudioData = { + body: SchemaThinksoundAudioInput + path?: never + query?: never + url: '/fal-ai/thinksound/audio' +} + +export type PostFalAiThinksoundAudioResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiThinksoundAudioResponse = + PostFalAiThinksoundAudioResponses[keyof PostFalAiThinksoundAudioResponses] + +export type GetFalAiThinksoundAudioRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/thinksound/audio/requests/{request_id}' +} + +export type GetFalAiThinksoundAudioRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaThinksoundAudioOutput +} + +export type GetFalAiThinksoundAudioRequestsByRequestIdResponse = + GetFalAiThinksoundAudioRequestsByRequestIdResponses[keyof GetFalAiThinksoundAudioRequestsByRequestIdResponses] + +export type GetFalAiThinksoundRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/thinksound/requests/{request_id}/status' +} + +export type GetFalAiThinksoundRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiThinksoundRequestsByRequestIdStatusResponse = + GetFalAiThinksoundRequestsByRequestIdStatusResponses[keyof GetFalAiThinksoundRequestsByRequestIdStatusResponses] + +export type PutFalAiThinksoundRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/thinksound/requests/{request_id}/cancel' +} + +export type PutFalAiThinksoundRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiThinksoundRequestsByRequestIdCancelResponse = + PutFalAiThinksoundRequestsByRequestIdCancelResponses[keyof PutFalAiThinksoundRequestsByRequestIdCancelResponses] + +export type PostFalAiThinksoundData = { + body: SchemaThinksoundInput + path?: never + query?: never + url: '/fal-ai/thinksound' +} + +export type PostFalAiThinksoundResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiThinksoundResponse = + PostFalAiThinksoundResponses[keyof PostFalAiThinksoundResponses] + +export type GetFalAiThinksoundRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/thinksound/requests/{request_id}' +} + +export type GetFalAiThinksoundRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaThinksoundOutput +} + +export type GetFalAiThinksoundRequestsByRequestIdResponse = + GetFalAiThinksoundRequestsByRequestIdResponses[keyof GetFalAiThinksoundRequestsByRequestIdResponses] + +export type GetFalAiPixverseExtendFastRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/extend/fast/requests/{request_id}/status' +} + +export type GetFalAiPixverseExtendFastRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixverseExtendFastRequestsByRequestIdStatusResponse = + GetFalAiPixverseExtendFastRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseExtendFastRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseExtendFastRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/extend/fast/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseExtendFastRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixverseExtendFastRequestsByRequestIdCancelResponse = + PutFalAiPixverseExtendFastRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseExtendFastRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseExtendFastData = { + body: SchemaPixverseExtendFastInput + path?: never + query?: never + url: '/fal-ai/pixverse/extend/fast' +} + +export type PostFalAiPixverseExtendFastResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseExtendFastResponse = + PostFalAiPixverseExtendFastResponses[keyof PostFalAiPixverseExtendFastResponses] + +export type GetFalAiPixverseExtendFastRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/extend/fast/requests/{request_id}' +} + +export type GetFalAiPixverseExtendFastRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseExtendFastOutput +} + +export type GetFalAiPixverseExtendFastRequestsByRequestIdResponse = + GetFalAiPixverseExtendFastRequestsByRequestIdResponses[keyof GetFalAiPixverseExtendFastRequestsByRequestIdResponses] + +export type GetFalAiPixverseExtendRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/extend/requests/{request_id}/status' +} + +export type GetFalAiPixverseExtendRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixverseExtendRequestsByRequestIdStatusResponse = + GetFalAiPixverseExtendRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseExtendRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseExtendRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/extend/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseExtendRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixverseExtendRequestsByRequestIdCancelResponse = + PutFalAiPixverseExtendRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseExtendRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseExtendData = { + body: SchemaPixverseExtendInput + path?: never + query?: never + url: '/fal-ai/pixverse/extend' +} + +export type PostFalAiPixverseExtendResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseExtendResponse = + PostFalAiPixverseExtendResponses[keyof PostFalAiPixverseExtendResponses] + +export type GetFalAiPixverseExtendRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/extend/requests/{request_id}' +} + +export type GetFalAiPixverseExtendRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseExtendOutput +} + +export type GetFalAiPixverseExtendRequestsByRequestIdResponse = + GetFalAiPixverseExtendRequestsByRequestIdResponses[keyof GetFalAiPixverseExtendRequestsByRequestIdResponses] + +export type GetFalAiPixverseLipsyncRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pixverse/lipsync/requests/{request_id}/status' +} + +export type GetFalAiPixverseLipsyncRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPixverseLipsyncRequestsByRequestIdStatusResponse = + GetFalAiPixverseLipsyncRequestsByRequestIdStatusResponses[keyof GetFalAiPixverseLipsyncRequestsByRequestIdStatusResponses] + +export type PutFalAiPixverseLipsyncRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/lipsync/requests/{request_id}/cancel' +} + +export type PutFalAiPixverseLipsyncRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPixverseLipsyncRequestsByRequestIdCancelResponse = + PutFalAiPixverseLipsyncRequestsByRequestIdCancelResponses[keyof PutFalAiPixverseLipsyncRequestsByRequestIdCancelResponses] + +export type PostFalAiPixverseLipsyncData = { + body: SchemaPixverseLipsyncInput + path?: never + query?: never + url: '/fal-ai/pixverse/lipsync' +} + +export type PostFalAiPixverseLipsyncResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPixverseLipsyncResponse = + PostFalAiPixverseLipsyncResponses[keyof PostFalAiPixverseLipsyncResponses] + +export type GetFalAiPixverseLipsyncRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pixverse/lipsync/requests/{request_id}' +} + +export type GetFalAiPixverseLipsyncRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPixverseLipsyncOutput +} + +export type GetFalAiPixverseLipsyncRequestsByRequestIdResponse = + GetFalAiPixverseLipsyncRequestsByRequestIdResponses[keyof GetFalAiPixverseLipsyncRequestsByRequestIdResponses] + +export type GetFalAiLumaDreamMachineRay2ModifyRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/luma-dream-machine/ray-2/modify/requests/{request_id}/status' +} + +export type GetFalAiLumaDreamMachineRay2ModifyRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLumaDreamMachineRay2ModifyRequestsByRequestIdStatusResponse = + GetFalAiLumaDreamMachineRay2ModifyRequestsByRequestIdStatusResponses[keyof GetFalAiLumaDreamMachineRay2ModifyRequestsByRequestIdStatusResponses] + +export type PutFalAiLumaDreamMachineRay2ModifyRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-dream-machine/ray-2/modify/requests/{request_id}/cancel' +} + +export type PutFalAiLumaDreamMachineRay2ModifyRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLumaDreamMachineRay2ModifyRequestsByRequestIdCancelResponse = + PutFalAiLumaDreamMachineRay2ModifyRequestsByRequestIdCancelResponses[keyof PutFalAiLumaDreamMachineRay2ModifyRequestsByRequestIdCancelResponses] + +export type PostFalAiLumaDreamMachineRay2ModifyData = { + body: SchemaLumaDreamMachineRay2ModifyInput + path?: never + query?: never + url: '/fal-ai/luma-dream-machine/ray-2/modify' +} + +export type PostFalAiLumaDreamMachineRay2ModifyResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLumaDreamMachineRay2ModifyResponse = + PostFalAiLumaDreamMachineRay2ModifyResponses[keyof PostFalAiLumaDreamMachineRay2ModifyResponses] + +export type GetFalAiLumaDreamMachineRay2ModifyRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-dream-machine/ray-2/modify/requests/{request_id}' +} + +export type GetFalAiLumaDreamMachineRay2ModifyRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLumaDreamMachineRay2ModifyOutput +} + +export type GetFalAiLumaDreamMachineRay2ModifyRequestsByRequestIdResponse = + GetFalAiLumaDreamMachineRay2ModifyRequestsByRequestIdResponses[keyof GetFalAiLumaDreamMachineRay2ModifyRequestsByRequestIdResponses] + +export type GetFalAiWanVace14bReframeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-vace-14b/reframe/requests/{request_id}/status' +} + +export type GetFalAiWanVace14bReframeRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanVace14bReframeRequestsByRequestIdStatusResponse = + GetFalAiWanVace14bReframeRequestsByRequestIdStatusResponses[keyof GetFalAiWanVace14bReframeRequestsByRequestIdStatusResponses] + +export type PutFalAiWanVace14bReframeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-vace-14b/reframe/requests/{request_id}/cancel' +} + +export type PutFalAiWanVace14bReframeRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanVace14bReframeRequestsByRequestIdCancelResponse = + PutFalAiWanVace14bReframeRequestsByRequestIdCancelResponses[keyof PutFalAiWanVace14bReframeRequestsByRequestIdCancelResponses] + +export type PostFalAiWanVace14bReframeData = { + body: SchemaWanVace14bReframeInput + path?: never + query?: never + url: '/fal-ai/wan-vace-14b/reframe' +} + +export type PostFalAiWanVace14bReframeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanVace14bReframeResponse = + PostFalAiWanVace14bReframeResponses[keyof PostFalAiWanVace14bReframeResponses] + +export type GetFalAiWanVace14bReframeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-vace-14b/reframe/requests/{request_id}' +} + +export type GetFalAiWanVace14bReframeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanVace14bReframeOutput +} + +export type GetFalAiWanVace14bReframeRequestsByRequestIdResponse = + GetFalAiWanVace14bReframeRequestsByRequestIdResponses[keyof GetFalAiWanVace14bReframeRequestsByRequestIdResponses] + +export type GetFalAiWanVace14bOutpaintingRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-vace-14b/outpainting/requests/{request_id}/status' +} + +export type GetFalAiWanVace14bOutpaintingRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanVace14bOutpaintingRequestsByRequestIdStatusResponse = + GetFalAiWanVace14bOutpaintingRequestsByRequestIdStatusResponses[keyof GetFalAiWanVace14bOutpaintingRequestsByRequestIdStatusResponses] + +export type PutFalAiWanVace14bOutpaintingRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-vace-14b/outpainting/requests/{request_id}/cancel' +} + +export type PutFalAiWanVace14bOutpaintingRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanVace14bOutpaintingRequestsByRequestIdCancelResponse = + PutFalAiWanVace14bOutpaintingRequestsByRequestIdCancelResponses[keyof PutFalAiWanVace14bOutpaintingRequestsByRequestIdCancelResponses] + +export type PostFalAiWanVace14bOutpaintingData = { + body: SchemaWanVace14bOutpaintingInput + path?: never + query?: never + url: '/fal-ai/wan-vace-14b/outpainting' +} + +export type PostFalAiWanVace14bOutpaintingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanVace14bOutpaintingResponse = + PostFalAiWanVace14bOutpaintingResponses[keyof PostFalAiWanVace14bOutpaintingResponses] + +export type GetFalAiWanVace14bOutpaintingRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-vace-14b/outpainting/requests/{request_id}' +} + +export type GetFalAiWanVace14bOutpaintingRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanVace14bOutpaintingOutput +} + +export type GetFalAiWanVace14bOutpaintingRequestsByRequestIdResponse = + GetFalAiWanVace14bOutpaintingRequestsByRequestIdResponses[keyof GetFalAiWanVace14bOutpaintingRequestsByRequestIdResponses] + +export type GetFalAiWanVace14bInpaintingRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-vace-14b/inpainting/requests/{request_id}/status' +} + +export type GetFalAiWanVace14bInpaintingRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanVace14bInpaintingRequestsByRequestIdStatusResponse = + GetFalAiWanVace14bInpaintingRequestsByRequestIdStatusResponses[keyof GetFalAiWanVace14bInpaintingRequestsByRequestIdStatusResponses] + +export type PutFalAiWanVace14bInpaintingRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-vace-14b/inpainting/requests/{request_id}/cancel' +} + +export type PutFalAiWanVace14bInpaintingRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanVace14bInpaintingRequestsByRequestIdCancelResponse = + PutFalAiWanVace14bInpaintingRequestsByRequestIdCancelResponses[keyof PutFalAiWanVace14bInpaintingRequestsByRequestIdCancelResponses] + +export type PostFalAiWanVace14bInpaintingData = { + body: SchemaWanVace14bInpaintingInput + path?: never + query?: never + url: '/fal-ai/wan-vace-14b/inpainting' +} + +export type PostFalAiWanVace14bInpaintingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanVace14bInpaintingResponse = + PostFalAiWanVace14bInpaintingResponses[keyof PostFalAiWanVace14bInpaintingResponses] + +export type GetFalAiWanVace14bInpaintingRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-vace-14b/inpainting/requests/{request_id}' +} + +export type GetFalAiWanVace14bInpaintingRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanVace14bInpaintingOutput +} + +export type GetFalAiWanVace14bInpaintingRequestsByRequestIdResponse = + GetFalAiWanVace14bInpaintingRequestsByRequestIdResponses[keyof GetFalAiWanVace14bInpaintingRequestsByRequestIdResponses] + +export type GetFalAiWanVace14bPoseRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-vace-14b/pose/requests/{request_id}/status' +} + +export type GetFalAiWanVace14bPoseRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanVace14bPoseRequestsByRequestIdStatusResponse = + GetFalAiWanVace14bPoseRequestsByRequestIdStatusResponses[keyof GetFalAiWanVace14bPoseRequestsByRequestIdStatusResponses] + +export type PutFalAiWanVace14bPoseRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-vace-14b/pose/requests/{request_id}/cancel' +} + +export type PutFalAiWanVace14bPoseRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanVace14bPoseRequestsByRequestIdCancelResponse = + PutFalAiWanVace14bPoseRequestsByRequestIdCancelResponses[keyof PutFalAiWanVace14bPoseRequestsByRequestIdCancelResponses] + +export type PostFalAiWanVace14bPoseData = { + body: SchemaWanVace14bPoseInput + path?: never + query?: never + url: '/fal-ai/wan-vace-14b/pose' +} + +export type PostFalAiWanVace14bPoseResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanVace14bPoseResponse = + PostFalAiWanVace14bPoseResponses[keyof PostFalAiWanVace14bPoseResponses] + +export type GetFalAiWanVace14bPoseRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-vace-14b/pose/requests/{request_id}' +} + +export type GetFalAiWanVace14bPoseRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanVace14bPoseOutput +} + +export type GetFalAiWanVace14bPoseRequestsByRequestIdResponse = + GetFalAiWanVace14bPoseRequestsByRequestIdResponses[keyof GetFalAiWanVace14bPoseRequestsByRequestIdResponses] + +export type GetFalAiWanVace14bDepthRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-vace-14b/depth/requests/{request_id}/status' +} + +export type GetFalAiWanVace14bDepthRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanVace14bDepthRequestsByRequestIdStatusResponse = + GetFalAiWanVace14bDepthRequestsByRequestIdStatusResponses[keyof GetFalAiWanVace14bDepthRequestsByRequestIdStatusResponses] + +export type PutFalAiWanVace14bDepthRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-vace-14b/depth/requests/{request_id}/cancel' +} + +export type PutFalAiWanVace14bDepthRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanVace14bDepthRequestsByRequestIdCancelResponse = + PutFalAiWanVace14bDepthRequestsByRequestIdCancelResponses[keyof PutFalAiWanVace14bDepthRequestsByRequestIdCancelResponses] + +export type PostFalAiWanVace14bDepthData = { + body: SchemaWanVace14bDepthInput + path?: never + query?: never + url: '/fal-ai/wan-vace-14b/depth' +} + +export type PostFalAiWanVace14bDepthResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanVace14bDepthResponse = + PostFalAiWanVace14bDepthResponses[keyof PostFalAiWanVace14bDepthResponses] + +export type GetFalAiWanVace14bDepthRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-vace-14b/depth/requests/{request_id}' +} + +export type GetFalAiWanVace14bDepthRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanVace14bDepthOutput +} + +export type GetFalAiWanVace14bDepthRequestsByRequestIdResponse = + GetFalAiWanVace14bDepthRequestsByRequestIdResponses[keyof GetFalAiWanVace14bDepthRequestsByRequestIdResponses] + +export type GetFalAiDwposeVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/dwpose/video/requests/{request_id}/status' +} + +export type GetFalAiDwposeVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiDwposeVideoRequestsByRequestIdStatusResponse = + GetFalAiDwposeVideoRequestsByRequestIdStatusResponses[keyof GetFalAiDwposeVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiDwposeVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/dwpose/video/requests/{request_id}/cancel' +} + +export type PutFalAiDwposeVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiDwposeVideoRequestsByRequestIdCancelResponse = + PutFalAiDwposeVideoRequestsByRequestIdCancelResponses[keyof PutFalAiDwposeVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiDwposeVideoData = { + body: SchemaDwposeVideoInput + path?: never + query?: never + url: '/fal-ai/dwpose/video' +} + +export type PostFalAiDwposeVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiDwposeVideoResponse = + PostFalAiDwposeVideoResponses[keyof PostFalAiDwposeVideoResponses] + +export type GetFalAiDwposeVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/dwpose/video/requests/{request_id}' +} + +export type GetFalAiDwposeVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaDwposeVideoOutput +} + +export type GetFalAiDwposeVideoRequestsByRequestIdResponse = + GetFalAiDwposeVideoRequestsByRequestIdResponses[keyof GetFalAiDwposeVideoRequestsByRequestIdResponses] + +export type GetFalAiFfmpegApiMergeAudioVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ffmpeg-api/merge-audio-video/requests/{request_id}/status' +} + +export type GetFalAiFfmpegApiMergeAudioVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFfmpegApiMergeAudioVideoRequestsByRequestIdStatusResponse = + GetFalAiFfmpegApiMergeAudioVideoRequestsByRequestIdStatusResponses[keyof GetFalAiFfmpegApiMergeAudioVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiFfmpegApiMergeAudioVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ffmpeg-api/merge-audio-video/requests/{request_id}/cancel' +} + +export type PutFalAiFfmpegApiMergeAudioVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFfmpegApiMergeAudioVideoRequestsByRequestIdCancelResponse = + PutFalAiFfmpegApiMergeAudioVideoRequestsByRequestIdCancelResponses[keyof PutFalAiFfmpegApiMergeAudioVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiFfmpegApiMergeAudioVideoData = { + body: SchemaFfmpegApiMergeAudioVideoInput + path?: never + query?: never + url: '/fal-ai/ffmpeg-api/merge-audio-video' +} + +export type PostFalAiFfmpegApiMergeAudioVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFfmpegApiMergeAudioVideoResponse = + PostFalAiFfmpegApiMergeAudioVideoResponses[keyof PostFalAiFfmpegApiMergeAudioVideoResponses] + +export type GetFalAiFfmpegApiMergeAudioVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ffmpeg-api/merge-audio-video/requests/{request_id}' +} + +export type GetFalAiFfmpegApiMergeAudioVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFfmpegApiMergeAudioVideoOutput +} + +export type GetFalAiFfmpegApiMergeAudioVideoRequestsByRequestIdResponse = + GetFalAiFfmpegApiMergeAudioVideoRequestsByRequestIdResponses[keyof GetFalAiFfmpegApiMergeAudioVideoRequestsByRequestIdResponses] + +export type GetFalAiWanVace13bRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-vace-1-3b/requests/{request_id}/status' +} + +export type GetFalAiWanVace13bRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanVace13bRequestsByRequestIdStatusResponse = + GetFalAiWanVace13bRequestsByRequestIdStatusResponses[keyof GetFalAiWanVace13bRequestsByRequestIdStatusResponses] + +export type PutFalAiWanVace13bRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-vace-1-3b/requests/{request_id}/cancel' +} + +export type PutFalAiWanVace13bRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanVace13bRequestsByRequestIdCancelResponse = + PutFalAiWanVace13bRequestsByRequestIdCancelResponses[keyof PutFalAiWanVace13bRequestsByRequestIdCancelResponses] + +export type PostFalAiWanVace13bData = { + body: SchemaWanVace13bInput + path?: never + query?: never + url: '/fal-ai/wan-vace-1-3b' +} + +export type PostFalAiWanVace13bResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanVace13bResponse = + PostFalAiWanVace13bResponses[keyof PostFalAiWanVace13bResponses] + +export type GetFalAiWanVace13bRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-vace-1-3b/requests/{request_id}' +} + +export type GetFalAiWanVace13bRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanVace13bOutput +} + +export type GetFalAiWanVace13bRequestsByRequestIdResponse = + GetFalAiWanVace13bRequestsByRequestIdResponses[keyof GetFalAiWanVace13bRequestsByRequestIdResponses] + +export type GetFalAiLumaDreamMachineRay2FlashReframeRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/luma-dream-machine/ray-2-flash/reframe/requests/{request_id}/status' + } + +export type GetFalAiLumaDreamMachineRay2FlashReframeRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLumaDreamMachineRay2FlashReframeRequestsByRequestIdStatusResponse = + GetFalAiLumaDreamMachineRay2FlashReframeRequestsByRequestIdStatusResponses[keyof GetFalAiLumaDreamMachineRay2FlashReframeRequestsByRequestIdStatusResponses] + +export type PutFalAiLumaDreamMachineRay2FlashReframeRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-dream-machine/ray-2-flash/reframe/requests/{request_id}/cancel' + } + +export type PutFalAiLumaDreamMachineRay2FlashReframeRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLumaDreamMachineRay2FlashReframeRequestsByRequestIdCancelResponse = + PutFalAiLumaDreamMachineRay2FlashReframeRequestsByRequestIdCancelResponses[keyof PutFalAiLumaDreamMachineRay2FlashReframeRequestsByRequestIdCancelResponses] + +export type PostFalAiLumaDreamMachineRay2FlashReframeData = { + body: SchemaLumaDreamMachineRay2FlashReframeInput + path?: never + query?: never + url: '/fal-ai/luma-dream-machine/ray-2-flash/reframe' +} + +export type PostFalAiLumaDreamMachineRay2FlashReframeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLumaDreamMachineRay2FlashReframeResponse = + PostFalAiLumaDreamMachineRay2FlashReframeResponses[keyof PostFalAiLumaDreamMachineRay2FlashReframeResponses] + +export type GetFalAiLumaDreamMachineRay2FlashReframeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-dream-machine/ray-2-flash/reframe/requests/{request_id}' +} + +export type GetFalAiLumaDreamMachineRay2FlashReframeRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaLumaDreamMachineRay2FlashReframeOutput + } + +export type GetFalAiLumaDreamMachineRay2FlashReframeRequestsByRequestIdResponse = + GetFalAiLumaDreamMachineRay2FlashReframeRequestsByRequestIdResponses[keyof GetFalAiLumaDreamMachineRay2FlashReframeRequestsByRequestIdResponses] + +export type GetFalAiLumaDreamMachineRay2ReframeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/luma-dream-machine/ray-2/reframe/requests/{request_id}/status' +} + +export type GetFalAiLumaDreamMachineRay2ReframeRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLumaDreamMachineRay2ReframeRequestsByRequestIdStatusResponse = + GetFalAiLumaDreamMachineRay2ReframeRequestsByRequestIdStatusResponses[keyof GetFalAiLumaDreamMachineRay2ReframeRequestsByRequestIdStatusResponses] + +export type PutFalAiLumaDreamMachineRay2ReframeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-dream-machine/ray-2/reframe/requests/{request_id}/cancel' +} + +export type PutFalAiLumaDreamMachineRay2ReframeRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLumaDreamMachineRay2ReframeRequestsByRequestIdCancelResponse = + PutFalAiLumaDreamMachineRay2ReframeRequestsByRequestIdCancelResponses[keyof PutFalAiLumaDreamMachineRay2ReframeRequestsByRequestIdCancelResponses] + +export type PostFalAiLumaDreamMachineRay2ReframeData = { + body: SchemaLumaDreamMachineRay2ReframeInput + path?: never + query?: never + url: '/fal-ai/luma-dream-machine/ray-2/reframe' +} + +export type PostFalAiLumaDreamMachineRay2ReframeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLumaDreamMachineRay2ReframeResponse = + PostFalAiLumaDreamMachineRay2ReframeResponses[keyof PostFalAiLumaDreamMachineRay2ReframeResponses] + +export type GetFalAiLumaDreamMachineRay2ReframeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/luma-dream-machine/ray-2/reframe/requests/{request_id}' +} + +export type GetFalAiLumaDreamMachineRay2ReframeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLumaDreamMachineRay2ReframeOutput +} + +export type GetFalAiLumaDreamMachineRay2ReframeRequestsByRequestIdResponse = + GetFalAiLumaDreamMachineRay2ReframeRequestsByRequestIdResponses[keyof GetFalAiLumaDreamMachineRay2ReframeRequestsByRequestIdResponses] + +export type GetVeedLipsyncRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/veed/lipsync/requests/{request_id}/status' +} + +export type GetVeedLipsyncRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetVeedLipsyncRequestsByRequestIdStatusResponse = + GetVeedLipsyncRequestsByRequestIdStatusResponses[keyof GetVeedLipsyncRequestsByRequestIdStatusResponses] + +export type PutVeedLipsyncRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/veed/lipsync/requests/{request_id}/cancel' +} + +export type PutVeedLipsyncRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutVeedLipsyncRequestsByRequestIdCancelResponse = + PutVeedLipsyncRequestsByRequestIdCancelResponses[keyof PutVeedLipsyncRequestsByRequestIdCancelResponses] + +export type PostVeedLipsyncData = { + body: SchemaLipsyncInput + path?: never + query?: never + url: '/veed/lipsync' +} + +export type PostVeedLipsyncResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostVeedLipsyncResponse = + PostVeedLipsyncResponses[keyof PostVeedLipsyncResponses] + +export type GetVeedLipsyncRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/veed/lipsync/requests/{request_id}' +} + +export type GetVeedLipsyncRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLipsyncOutput +} + +export type GetVeedLipsyncRequestsByRequestIdResponse = + GetVeedLipsyncRequestsByRequestIdResponses[keyof GetVeedLipsyncRequestsByRequestIdResponses] + +export type GetFalAiWanVace14bRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-vace-14b/requests/{request_id}/status' +} + +export type GetFalAiWanVace14bRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanVace14bRequestsByRequestIdStatusResponse = + GetFalAiWanVace14bRequestsByRequestIdStatusResponses[keyof GetFalAiWanVace14bRequestsByRequestIdStatusResponses] + +export type PutFalAiWanVace14bRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-vace-14b/requests/{request_id}/cancel' +} + +export type PutFalAiWanVace14bRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanVace14bRequestsByRequestIdCancelResponse = + PutFalAiWanVace14bRequestsByRequestIdCancelResponses[keyof PutFalAiWanVace14bRequestsByRequestIdCancelResponses] + +export type PostFalAiWanVace14bData = { + body: SchemaWanVace14bInput + path?: never + query?: never + url: '/fal-ai/wan-vace-14b' +} + +export type PostFalAiWanVace14bResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanVace14bResponse = + PostFalAiWanVace14bResponses[keyof PostFalAiWanVace14bResponses] + +export type GetFalAiWanVace14bRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-vace-14b/requests/{request_id}' +} + +export type GetFalAiWanVace14bRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanVace14bOutput +} + +export type GetFalAiWanVace14bRequestsByRequestIdResponse = + GetFalAiWanVace14bRequestsByRequestIdResponses[keyof GetFalAiWanVace14bRequestsByRequestIdResponses] + +export type GetFalAiLtxVideo13bDistilledExtendRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-video-13b-distilled/extend/requests/{request_id}/status' +} + +export type GetFalAiLtxVideo13bDistilledExtendRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtxVideo13bDistilledExtendRequestsByRequestIdStatusResponse = + GetFalAiLtxVideo13bDistilledExtendRequestsByRequestIdStatusResponses[keyof GetFalAiLtxVideo13bDistilledExtendRequestsByRequestIdStatusResponses] + +export type PutFalAiLtxVideo13bDistilledExtendRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-13b-distilled/extend/requests/{request_id}/cancel' +} + +export type PutFalAiLtxVideo13bDistilledExtendRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtxVideo13bDistilledExtendRequestsByRequestIdCancelResponse = + PutFalAiLtxVideo13bDistilledExtendRequestsByRequestIdCancelResponses[keyof PutFalAiLtxVideo13bDistilledExtendRequestsByRequestIdCancelResponses] + +export type PostFalAiLtxVideo13bDistilledExtendData = { + body: SchemaLtxVideo13bDistilledExtendInput + path?: never + query?: never + url: '/fal-ai/ltx-video-13b-distilled/extend' +} + +export type PostFalAiLtxVideo13bDistilledExtendResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtxVideo13bDistilledExtendResponse = + PostFalAiLtxVideo13bDistilledExtendResponses[keyof PostFalAiLtxVideo13bDistilledExtendResponses] + +export type GetFalAiLtxVideo13bDistilledExtendRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-13b-distilled/extend/requests/{request_id}' +} + +export type GetFalAiLtxVideo13bDistilledExtendRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtxVideo13bDistilledExtendOutput +} + +export type GetFalAiLtxVideo13bDistilledExtendRequestsByRequestIdResponse = + GetFalAiLtxVideo13bDistilledExtendRequestsByRequestIdResponses[keyof GetFalAiLtxVideo13bDistilledExtendRequestsByRequestIdResponses] + +export type GetFalAiLtxVideo13bDistilledMulticonditioningRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-video-13b-distilled/multiconditioning/requests/{request_id}/status' + } + +export type GetFalAiLtxVideo13bDistilledMulticonditioningRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtxVideo13bDistilledMulticonditioningRequestsByRequestIdStatusResponse = + GetFalAiLtxVideo13bDistilledMulticonditioningRequestsByRequestIdStatusResponses[keyof GetFalAiLtxVideo13bDistilledMulticonditioningRequestsByRequestIdStatusResponses] + +export type PutFalAiLtxVideo13bDistilledMulticonditioningRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-13b-distilled/multiconditioning/requests/{request_id}/cancel' + } + +export type PutFalAiLtxVideo13bDistilledMulticonditioningRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtxVideo13bDistilledMulticonditioningRequestsByRequestIdCancelResponse = + PutFalAiLtxVideo13bDistilledMulticonditioningRequestsByRequestIdCancelResponses[keyof PutFalAiLtxVideo13bDistilledMulticonditioningRequestsByRequestIdCancelResponses] + +export type PostFalAiLtxVideo13bDistilledMulticonditioningData = { + body: SchemaLtxVideo13bDistilledMulticonditioningInput + path?: never + query?: never + url: '/fal-ai/ltx-video-13b-distilled/multiconditioning' +} + +export type PostFalAiLtxVideo13bDistilledMulticonditioningResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtxVideo13bDistilledMulticonditioningResponse = + PostFalAiLtxVideo13bDistilledMulticonditioningResponses[keyof PostFalAiLtxVideo13bDistilledMulticonditioningResponses] + +export type GetFalAiLtxVideo13bDistilledMulticonditioningRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-13b-distilled/multiconditioning/requests/{request_id}' + } + +export type GetFalAiLtxVideo13bDistilledMulticonditioningRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaLtxVideo13bDistilledMulticonditioningOutput + } + +export type GetFalAiLtxVideo13bDistilledMulticonditioningRequestsByRequestIdResponse = + GetFalAiLtxVideo13bDistilledMulticonditioningRequestsByRequestIdResponses[keyof GetFalAiLtxVideo13bDistilledMulticonditioningRequestsByRequestIdResponses] + +export type GetFalAiLtxVideo13bDevMulticonditioningRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-video-13b-dev/multiconditioning/requests/{request_id}/status' + } + +export type GetFalAiLtxVideo13bDevMulticonditioningRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtxVideo13bDevMulticonditioningRequestsByRequestIdStatusResponse = + GetFalAiLtxVideo13bDevMulticonditioningRequestsByRequestIdStatusResponses[keyof GetFalAiLtxVideo13bDevMulticonditioningRequestsByRequestIdStatusResponses] + +export type PutFalAiLtxVideo13bDevMulticonditioningRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-13b-dev/multiconditioning/requests/{request_id}/cancel' + } + +export type PutFalAiLtxVideo13bDevMulticonditioningRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtxVideo13bDevMulticonditioningRequestsByRequestIdCancelResponse = + PutFalAiLtxVideo13bDevMulticonditioningRequestsByRequestIdCancelResponses[keyof PutFalAiLtxVideo13bDevMulticonditioningRequestsByRequestIdCancelResponses] + +export type PostFalAiLtxVideo13bDevMulticonditioningData = { + body: SchemaLtxVideo13bDevMulticonditioningInput + path?: never + query?: never + url: '/fal-ai/ltx-video-13b-dev/multiconditioning' +} + +export type PostFalAiLtxVideo13bDevMulticonditioningResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtxVideo13bDevMulticonditioningResponse = + PostFalAiLtxVideo13bDevMulticonditioningResponses[keyof PostFalAiLtxVideo13bDevMulticonditioningResponses] + +export type GetFalAiLtxVideo13bDevMulticonditioningRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-13b-dev/multiconditioning/requests/{request_id}' +} + +export type GetFalAiLtxVideo13bDevMulticonditioningRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaLtxVideo13bDevMulticonditioningOutput + } + +export type GetFalAiLtxVideo13bDevMulticonditioningRequestsByRequestIdResponse = + GetFalAiLtxVideo13bDevMulticonditioningRequestsByRequestIdResponses[keyof GetFalAiLtxVideo13bDevMulticonditioningRequestsByRequestIdResponses] + +export type GetFalAiLtxVideo13bDevExtendRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-video-13b-dev/extend/requests/{request_id}/status' +} + +export type GetFalAiLtxVideo13bDevExtendRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLtxVideo13bDevExtendRequestsByRequestIdStatusResponse = + GetFalAiLtxVideo13bDevExtendRequestsByRequestIdStatusResponses[keyof GetFalAiLtxVideo13bDevExtendRequestsByRequestIdStatusResponses] + +export type PutFalAiLtxVideo13bDevExtendRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-13b-dev/extend/requests/{request_id}/cancel' +} + +export type PutFalAiLtxVideo13bDevExtendRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLtxVideo13bDevExtendRequestsByRequestIdCancelResponse = + PutFalAiLtxVideo13bDevExtendRequestsByRequestIdCancelResponses[keyof PutFalAiLtxVideo13bDevExtendRequestsByRequestIdCancelResponses] + +export type PostFalAiLtxVideo13bDevExtendData = { + body: SchemaLtxVideo13bDevExtendInput + path?: never + query?: never + url: '/fal-ai/ltx-video-13b-dev/extend' +} + +export type PostFalAiLtxVideo13bDevExtendResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtxVideo13bDevExtendResponse = + PostFalAiLtxVideo13bDevExtendResponses[keyof PostFalAiLtxVideo13bDevExtendResponses] + +export type GetFalAiLtxVideo13bDevExtendRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-13b-dev/extend/requests/{request_id}' +} + +export type GetFalAiLtxVideo13bDevExtendRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtxVideo13bDevExtendOutput +} + +export type GetFalAiLtxVideo13bDevExtendRequestsByRequestIdResponse = + GetFalAiLtxVideo13bDevExtendRequestsByRequestIdResponses[keyof GetFalAiLtxVideo13bDevExtendRequestsByRequestIdResponses] + +export type GetFalAiLtxVideoLoraMulticonditioningRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-video-lora/multiconditioning/requests/{request_id}/status' + } + +export type GetFalAiLtxVideoLoraMulticonditioningRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtxVideoLoraMulticonditioningRequestsByRequestIdStatusResponse = + GetFalAiLtxVideoLoraMulticonditioningRequestsByRequestIdStatusResponses[keyof GetFalAiLtxVideoLoraMulticonditioningRequestsByRequestIdStatusResponses] + +export type PutFalAiLtxVideoLoraMulticonditioningRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-lora/multiconditioning/requests/{request_id}/cancel' + } + +export type PutFalAiLtxVideoLoraMulticonditioningRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtxVideoLoraMulticonditioningRequestsByRequestIdCancelResponse = + PutFalAiLtxVideoLoraMulticonditioningRequestsByRequestIdCancelResponses[keyof PutFalAiLtxVideoLoraMulticonditioningRequestsByRequestIdCancelResponses] + +export type PostFalAiLtxVideoLoraMulticonditioningData = { + body: SchemaLtxVideoLoraMulticonditioningInput + path?: never + query?: never + url: '/fal-ai/ltx-video-lora/multiconditioning' +} + +export type PostFalAiLtxVideoLoraMulticonditioningResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtxVideoLoraMulticonditioningResponse = + PostFalAiLtxVideoLoraMulticonditioningResponses[keyof PostFalAiLtxVideoLoraMulticonditioningResponses] + +export type GetFalAiLtxVideoLoraMulticonditioningRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-lora/multiconditioning/requests/{request_id}' +} + +export type GetFalAiLtxVideoLoraMulticonditioningRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaLtxVideoLoraMulticonditioningOutput + } + +export type GetFalAiLtxVideoLoraMulticonditioningRequestsByRequestIdResponse = + GetFalAiLtxVideoLoraMulticonditioningRequestsByRequestIdResponses[keyof GetFalAiLtxVideoLoraMulticonditioningRequestsByRequestIdResponses] + +export type GetFalAiMagiExtendVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/magi/extend-video/requests/{request_id}/status' +} + +export type GetFalAiMagiExtendVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMagiExtendVideoRequestsByRequestIdStatusResponse = + GetFalAiMagiExtendVideoRequestsByRequestIdStatusResponses[keyof GetFalAiMagiExtendVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiMagiExtendVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/magi/extend-video/requests/{request_id}/cancel' +} + +export type PutFalAiMagiExtendVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMagiExtendVideoRequestsByRequestIdCancelResponse = + PutFalAiMagiExtendVideoRequestsByRequestIdCancelResponses[keyof PutFalAiMagiExtendVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiMagiExtendVideoData = { + body: SchemaMagiExtendVideoInput + path?: never + query?: never + url: '/fal-ai/magi/extend-video' +} + +export type PostFalAiMagiExtendVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMagiExtendVideoResponse = + PostFalAiMagiExtendVideoResponses[keyof PostFalAiMagiExtendVideoResponses] + +export type GetFalAiMagiExtendVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/magi/extend-video/requests/{request_id}' +} + +export type GetFalAiMagiExtendVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMagiExtendVideoOutput +} + +export type GetFalAiMagiExtendVideoRequestsByRequestIdResponse = + GetFalAiMagiExtendVideoRequestsByRequestIdResponses[keyof GetFalAiMagiExtendVideoRequestsByRequestIdResponses] + +export type GetFalAiMagiDistilledExtendVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/magi-distilled/extend-video/requests/{request_id}/status' +} + +export type GetFalAiMagiDistilledExtendVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMagiDistilledExtendVideoRequestsByRequestIdStatusResponse = + GetFalAiMagiDistilledExtendVideoRequestsByRequestIdStatusResponses[keyof GetFalAiMagiDistilledExtendVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiMagiDistilledExtendVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/magi-distilled/extend-video/requests/{request_id}/cancel' +} + +export type PutFalAiMagiDistilledExtendVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMagiDistilledExtendVideoRequestsByRequestIdCancelResponse = + PutFalAiMagiDistilledExtendVideoRequestsByRequestIdCancelResponses[keyof PutFalAiMagiDistilledExtendVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiMagiDistilledExtendVideoData = { + body: SchemaMagiDistilledExtendVideoInput + path?: never + query?: never + url: '/fal-ai/magi-distilled/extend-video' +} + +export type PostFalAiMagiDistilledExtendVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMagiDistilledExtendVideoResponse = + PostFalAiMagiDistilledExtendVideoResponses[keyof PostFalAiMagiDistilledExtendVideoResponses] + +export type GetFalAiMagiDistilledExtendVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/magi-distilled/extend-video/requests/{request_id}' +} + +export type GetFalAiMagiDistilledExtendVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMagiDistilledExtendVideoOutput +} + +export type GetFalAiMagiDistilledExtendVideoRequestsByRequestIdResponse = + GetFalAiMagiDistilledExtendVideoRequestsByRequestIdResponses[keyof GetFalAiMagiDistilledExtendVideoRequestsByRequestIdResponses] + +export type GetFalAiWanVaceRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/wan-vace/requests/{request_id}/status' +} + +export type GetFalAiWanVaceRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiWanVaceRequestsByRequestIdStatusResponse = + GetFalAiWanVaceRequestsByRequestIdStatusResponses[keyof GetFalAiWanVaceRequestsByRequestIdStatusResponses] + +export type PutFalAiWanVaceRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-vace/requests/{request_id}/cancel' +} + +export type PutFalAiWanVaceRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiWanVaceRequestsByRequestIdCancelResponse = + PutFalAiWanVaceRequestsByRequestIdCancelResponses[keyof PutFalAiWanVaceRequestsByRequestIdCancelResponses] + +export type PostFalAiWanVaceData = { + body: SchemaWanVaceInput + path?: never + query?: never + url: '/fal-ai/wan-vace' +} + +export type PostFalAiWanVaceResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiWanVaceResponse = + PostFalAiWanVaceResponses[keyof PostFalAiWanVaceResponses] + +export type GetFalAiWanVaceRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/wan-vace/requests/{request_id}' +} + +export type GetFalAiWanVaceRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaWanVaceOutput +} + +export type GetFalAiWanVaceRequestsByRequestIdResponse = + GetFalAiWanVaceRequestsByRequestIdResponses[keyof GetFalAiWanVaceRequestsByRequestIdResponses] + +export type GetCassetteaiVideoSoundEffectsGeneratorRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/cassetteai/video-sound-effects-generator/requests/{request_id}/status' + } + +export type GetCassetteaiVideoSoundEffectsGeneratorRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetCassetteaiVideoSoundEffectsGeneratorRequestsByRequestIdStatusResponse = + GetCassetteaiVideoSoundEffectsGeneratorRequestsByRequestIdStatusResponses[keyof GetCassetteaiVideoSoundEffectsGeneratorRequestsByRequestIdStatusResponses] + +export type PutCassetteaiVideoSoundEffectsGeneratorRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/cassetteai/video-sound-effects-generator/requests/{request_id}/cancel' + } + +export type PutCassetteaiVideoSoundEffectsGeneratorRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutCassetteaiVideoSoundEffectsGeneratorRequestsByRequestIdCancelResponse = + PutCassetteaiVideoSoundEffectsGeneratorRequestsByRequestIdCancelResponses[keyof PutCassetteaiVideoSoundEffectsGeneratorRequestsByRequestIdCancelResponses] + +export type PostCassetteaiVideoSoundEffectsGeneratorData = { + body: SchemaVideoSoundEffectsGeneratorInput + path?: never + query?: never + url: '/cassetteai/video-sound-effects-generator' +} + +export type PostCassetteaiVideoSoundEffectsGeneratorResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostCassetteaiVideoSoundEffectsGeneratorResponse = + PostCassetteaiVideoSoundEffectsGeneratorResponses[keyof PostCassetteaiVideoSoundEffectsGeneratorResponses] + +export type GetCassetteaiVideoSoundEffectsGeneratorRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/cassetteai/video-sound-effects-generator/requests/{request_id}' +} + +export type GetCassetteaiVideoSoundEffectsGeneratorRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaVideoSoundEffectsGeneratorOutput + } + +export type GetCassetteaiVideoSoundEffectsGeneratorRequestsByRequestIdResponse = + GetCassetteaiVideoSoundEffectsGeneratorRequestsByRequestIdResponses[keyof GetCassetteaiVideoSoundEffectsGeneratorRequestsByRequestIdResponses] + +export type GetFalAiSyncLipsyncV2RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sync-lipsync/v2/requests/{request_id}/status' +} + +export type GetFalAiSyncLipsyncV2RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSyncLipsyncV2RequestsByRequestIdStatusResponse = + GetFalAiSyncLipsyncV2RequestsByRequestIdStatusResponses[keyof GetFalAiSyncLipsyncV2RequestsByRequestIdStatusResponses] + +export type PutFalAiSyncLipsyncV2RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sync-lipsync/v2/requests/{request_id}/cancel' +} + +export type PutFalAiSyncLipsyncV2RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSyncLipsyncV2RequestsByRequestIdCancelResponse = + PutFalAiSyncLipsyncV2RequestsByRequestIdCancelResponses[keyof PutFalAiSyncLipsyncV2RequestsByRequestIdCancelResponses] + +export type PostFalAiSyncLipsyncV2Data = { + body: SchemaSyncLipsyncV2Input + path?: never + query?: never + url: '/fal-ai/sync-lipsync/v2' +} + +export type PostFalAiSyncLipsyncV2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSyncLipsyncV2Response = + PostFalAiSyncLipsyncV2Responses[keyof PostFalAiSyncLipsyncV2Responses] + +export type GetFalAiSyncLipsyncV2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sync-lipsync/v2/requests/{request_id}' +} + +export type GetFalAiSyncLipsyncV2RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSyncLipsyncV2Output +} + +export type GetFalAiSyncLipsyncV2RequestsByRequestIdResponse = + GetFalAiSyncLipsyncV2RequestsByRequestIdResponses[keyof GetFalAiSyncLipsyncV2RequestsByRequestIdResponses] + +export type GetFalAiLatentsyncRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/latentsync/requests/{request_id}/status' +} + +export type GetFalAiLatentsyncRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLatentsyncRequestsByRequestIdStatusResponse = + GetFalAiLatentsyncRequestsByRequestIdStatusResponses[keyof GetFalAiLatentsyncRequestsByRequestIdStatusResponses] + +export type PutFalAiLatentsyncRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/latentsync/requests/{request_id}/cancel' +} + +export type PutFalAiLatentsyncRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLatentsyncRequestsByRequestIdCancelResponse = + PutFalAiLatentsyncRequestsByRequestIdCancelResponses[keyof PutFalAiLatentsyncRequestsByRequestIdCancelResponses] + +export type PostFalAiLatentsyncData = { + body: SchemaLatentsyncInput + path?: never + query?: never + url: '/fal-ai/latentsync' +} + +export type PostFalAiLatentsyncResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLatentsyncResponse = + PostFalAiLatentsyncResponses[keyof PostFalAiLatentsyncResponses] + +export type GetFalAiLatentsyncRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/latentsync/requests/{request_id}' +} + +export type GetFalAiLatentsyncRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLatentsyncOutput +} + +export type GetFalAiLatentsyncRequestsByRequestIdResponse = + GetFalAiLatentsyncRequestsByRequestIdResponses[keyof GetFalAiLatentsyncRequestsByRequestIdResponses] + +export type GetFalAiPikaV2PikadditionsRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/pika/v2/pikadditions/requests/{request_id}/status' +} + +export type GetFalAiPikaV2PikadditionsRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiPikaV2PikadditionsRequestsByRequestIdStatusResponse = + GetFalAiPikaV2PikadditionsRequestsByRequestIdStatusResponses[keyof GetFalAiPikaV2PikadditionsRequestsByRequestIdStatusResponses] + +export type PutFalAiPikaV2PikadditionsRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pika/v2/pikadditions/requests/{request_id}/cancel' +} + +export type PutFalAiPikaV2PikadditionsRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiPikaV2PikadditionsRequestsByRequestIdCancelResponse = + PutFalAiPikaV2PikadditionsRequestsByRequestIdCancelResponses[keyof PutFalAiPikaV2PikadditionsRequestsByRequestIdCancelResponses] + +export type PostFalAiPikaV2PikadditionsData = { + body: SchemaPikaV2PikadditionsInput + path?: never + query?: never + url: '/fal-ai/pika/v2/pikadditions' +} + +export type PostFalAiPikaV2PikadditionsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiPikaV2PikadditionsResponse = + PostFalAiPikaV2PikadditionsResponses[keyof PostFalAiPikaV2PikadditionsResponses] + +export type GetFalAiPikaV2PikadditionsRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/pika/v2/pikadditions/requests/{request_id}' +} + +export type GetFalAiPikaV2PikadditionsRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaPikaV2PikadditionsOutput +} + +export type GetFalAiPikaV2PikadditionsRequestsByRequestIdResponse = + GetFalAiPikaV2PikadditionsRequestsByRequestIdResponses[keyof GetFalAiPikaV2PikadditionsRequestsByRequestIdResponses] + +export type GetFalAiLtxVideoV095MulticonditioningRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-video-v095/multiconditioning/requests/{request_id}/status' + } + +export type GetFalAiLtxVideoV095MulticonditioningRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiLtxVideoV095MulticonditioningRequestsByRequestIdStatusResponse = + GetFalAiLtxVideoV095MulticonditioningRequestsByRequestIdStatusResponses[keyof GetFalAiLtxVideoV095MulticonditioningRequestsByRequestIdStatusResponses] + +export type PutFalAiLtxVideoV095MulticonditioningRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-v095/multiconditioning/requests/{request_id}/cancel' + } + +export type PutFalAiLtxVideoV095MulticonditioningRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiLtxVideoV095MulticonditioningRequestsByRequestIdCancelResponse = + PutFalAiLtxVideoV095MulticonditioningRequestsByRequestIdCancelResponses[keyof PutFalAiLtxVideoV095MulticonditioningRequestsByRequestIdCancelResponses] + +export type PostFalAiLtxVideoV095MulticonditioningData = { + body: SchemaLtxVideoV095MulticonditioningInput + path?: never + query?: never + url: '/fal-ai/ltx-video-v095/multiconditioning' +} + +export type PostFalAiLtxVideoV095MulticonditioningResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtxVideoV095MulticonditioningResponse = + PostFalAiLtxVideoV095MulticonditioningResponses[keyof PostFalAiLtxVideoV095MulticonditioningResponses] + +export type GetFalAiLtxVideoV095MulticonditioningRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-v095/multiconditioning/requests/{request_id}' +} + +export type GetFalAiLtxVideoV095MulticonditioningRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaLtxVideoV095MulticonditioningOutput + } + +export type GetFalAiLtxVideoV095MulticonditioningRequestsByRequestIdResponse = + GetFalAiLtxVideoV095MulticonditioningRequestsByRequestIdResponses[keyof GetFalAiLtxVideoV095MulticonditioningRequestsByRequestIdResponses] + +export type GetFalAiLtxVideoV095ExtendRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ltx-video-v095/extend/requests/{request_id}/status' +} + +export type GetFalAiLtxVideoV095ExtendRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLtxVideoV095ExtendRequestsByRequestIdStatusResponse = + GetFalAiLtxVideoV095ExtendRequestsByRequestIdStatusResponses[keyof GetFalAiLtxVideoV095ExtendRequestsByRequestIdStatusResponses] + +export type PutFalAiLtxVideoV095ExtendRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-v095/extend/requests/{request_id}/cancel' +} + +export type PutFalAiLtxVideoV095ExtendRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLtxVideoV095ExtendRequestsByRequestIdCancelResponse = + PutFalAiLtxVideoV095ExtendRequestsByRequestIdCancelResponses[keyof PutFalAiLtxVideoV095ExtendRequestsByRequestIdCancelResponses] + +export type PostFalAiLtxVideoV095ExtendData = { + body: SchemaLtxVideoV095ExtendInput + path?: never + query?: never + url: '/fal-ai/ltx-video-v095/extend' +} + +export type PostFalAiLtxVideoV095ExtendResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLtxVideoV095ExtendResponse = + PostFalAiLtxVideoV095ExtendResponses[keyof PostFalAiLtxVideoV095ExtendResponses] + +export type GetFalAiLtxVideoV095ExtendRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ltx-video-v095/extend/requests/{request_id}' +} + +export type GetFalAiLtxVideoV095ExtendRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLtxVideoV095ExtendOutput +} + +export type GetFalAiLtxVideoV095ExtendRequestsByRequestIdResponse = + GetFalAiLtxVideoV095ExtendRequestsByRequestIdResponses[keyof GetFalAiLtxVideoV095ExtendRequestsByRequestIdResponses] + +export type GetFalAiTopazUpscaleVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/topaz/upscale/video/requests/{request_id}/status' +} + +export type GetFalAiTopazUpscaleVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiTopazUpscaleVideoRequestsByRequestIdStatusResponse = + GetFalAiTopazUpscaleVideoRequestsByRequestIdStatusResponses[keyof GetFalAiTopazUpscaleVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiTopazUpscaleVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/topaz/upscale/video/requests/{request_id}/cancel' +} + +export type PutFalAiTopazUpscaleVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiTopazUpscaleVideoRequestsByRequestIdCancelResponse = + PutFalAiTopazUpscaleVideoRequestsByRequestIdCancelResponses[keyof PutFalAiTopazUpscaleVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiTopazUpscaleVideoData = { + body: SchemaTopazUpscaleVideoInput + path?: never + query?: never + url: '/fal-ai/topaz/upscale/video' +} + +export type PostFalAiTopazUpscaleVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiTopazUpscaleVideoResponse = + PostFalAiTopazUpscaleVideoResponses[keyof PostFalAiTopazUpscaleVideoResponses] + +export type GetFalAiTopazUpscaleVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/topaz/upscale/video/requests/{request_id}' +} + +export type GetFalAiTopazUpscaleVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaTopazUpscaleVideoOutput +} + +export type GetFalAiTopazUpscaleVideoRequestsByRequestIdResponse = + GetFalAiTopazUpscaleVideoRequestsByRequestIdResponses[keyof GetFalAiTopazUpscaleVideoRequestsByRequestIdResponses] + +export type GetFalAiBenV2VideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ben/v2/video/requests/{request_id}/status' +} + +export type GetFalAiBenV2VideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiBenV2VideoRequestsByRequestIdStatusResponse = + GetFalAiBenV2VideoRequestsByRequestIdStatusResponses[keyof GetFalAiBenV2VideoRequestsByRequestIdStatusResponses] + +export type PutFalAiBenV2VideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ben/v2/video/requests/{request_id}/cancel' +} + +export type PutFalAiBenV2VideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiBenV2VideoRequestsByRequestIdCancelResponse = + PutFalAiBenV2VideoRequestsByRequestIdCancelResponses[keyof PutFalAiBenV2VideoRequestsByRequestIdCancelResponses] + +export type PostFalAiBenV2VideoData = { + body: SchemaBenV2VideoInput + path?: never + query?: never + url: '/fal-ai/ben/v2/video' +} + +export type PostFalAiBenV2VideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiBenV2VideoResponse = + PostFalAiBenV2VideoResponses[keyof PostFalAiBenV2VideoResponses] + +export type GetFalAiBenV2VideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ben/v2/video/requests/{request_id}' +} + +export type GetFalAiBenV2VideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaBenV2VideoOutput +} + +export type GetFalAiBenV2VideoRequestsByRequestIdResponse = + GetFalAiBenV2VideoRequestsByRequestIdResponses[keyof GetFalAiBenV2VideoRequestsByRequestIdResponses] + +export type GetFalAiHunyuanVideoVideoToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan-video/video-to-video/requests/{request_id}/status' +} + +export type GetFalAiHunyuanVideoVideoToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiHunyuanVideoVideoToVideoRequestsByRequestIdStatusResponse = + GetFalAiHunyuanVideoVideoToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuanVideoVideoToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuanVideoVideoToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-video/video-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiHunyuanVideoVideoToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiHunyuanVideoVideoToVideoRequestsByRequestIdCancelResponse = + PutFalAiHunyuanVideoVideoToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuanVideoVideoToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuanVideoVideoToVideoData = { + body: SchemaHunyuanVideoVideoToVideoInput + path?: never + query?: never + url: '/fal-ai/hunyuan-video/video-to-video' +} + +export type PostFalAiHunyuanVideoVideoToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuanVideoVideoToVideoResponse = + PostFalAiHunyuanVideoVideoToVideoResponses[keyof PostFalAiHunyuanVideoVideoToVideoResponses] + +export type GetFalAiHunyuanVideoVideoToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-video/video-to-video/requests/{request_id}' +} + +export type GetFalAiHunyuanVideoVideoToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuanVideoVideoToVideoOutput +} + +export type GetFalAiHunyuanVideoVideoToVideoRequestsByRequestIdResponse = + GetFalAiHunyuanVideoVideoToVideoRequestsByRequestIdResponses[keyof GetFalAiHunyuanVideoVideoToVideoRequestsByRequestIdResponses] + +export type GetFalAiHunyuanVideoLoraVideoToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/hunyuan-video-lora/video-to-video/requests/{request_id}/status' + } + +export type GetFalAiHunyuanVideoLoraVideoToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiHunyuanVideoLoraVideoToVideoRequestsByRequestIdStatusResponse = + GetFalAiHunyuanVideoLoraVideoToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiHunyuanVideoLoraVideoToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiHunyuanVideoLoraVideoToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-video-lora/video-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiHunyuanVideoLoraVideoToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiHunyuanVideoLoraVideoToVideoRequestsByRequestIdCancelResponse = + PutFalAiHunyuanVideoLoraVideoToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiHunyuanVideoLoraVideoToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiHunyuanVideoLoraVideoToVideoData = { + body: SchemaHunyuanVideoLoraVideoToVideoInput + path?: never + query?: never + url: '/fal-ai/hunyuan-video-lora/video-to-video' +} + +export type PostFalAiHunyuanVideoLoraVideoToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiHunyuanVideoLoraVideoToVideoResponse = + PostFalAiHunyuanVideoLoraVideoToVideoResponses[keyof PostFalAiHunyuanVideoLoraVideoToVideoResponses] + +export type GetFalAiHunyuanVideoLoraVideoToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/hunyuan-video-lora/video-to-video/requests/{request_id}' +} + +export type GetFalAiHunyuanVideoLoraVideoToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaHunyuanVideoLoraVideoToVideoOutput +} + +export type GetFalAiHunyuanVideoLoraVideoToVideoRequestsByRequestIdResponse = + GetFalAiHunyuanVideoLoraVideoToVideoRequestsByRequestIdResponses[keyof GetFalAiHunyuanVideoLoraVideoToVideoRequestsByRequestIdResponses] + +export type GetFalAiFfmpegApiComposeRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/ffmpeg-api/compose/requests/{request_id}/status' +} + +export type GetFalAiFfmpegApiComposeRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFfmpegApiComposeRequestsByRequestIdStatusResponse = + GetFalAiFfmpegApiComposeRequestsByRequestIdStatusResponses[keyof GetFalAiFfmpegApiComposeRequestsByRequestIdStatusResponses] + +export type PutFalAiFfmpegApiComposeRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ffmpeg-api/compose/requests/{request_id}/cancel' +} + +export type PutFalAiFfmpegApiComposeRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFfmpegApiComposeRequestsByRequestIdCancelResponse = + PutFalAiFfmpegApiComposeRequestsByRequestIdCancelResponses[keyof PutFalAiFfmpegApiComposeRequestsByRequestIdCancelResponses] + +export type PostFalAiFfmpegApiComposeData = { + body: SchemaFfmpegApiComposeInput + path?: never + query?: never + url: '/fal-ai/ffmpeg-api/compose' +} + +export type PostFalAiFfmpegApiComposeResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFfmpegApiComposeResponse = + PostFalAiFfmpegApiComposeResponses[keyof PostFalAiFfmpegApiComposeResponses] + +export type GetFalAiFfmpegApiComposeRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/ffmpeg-api/compose/requests/{request_id}' +} + +export type GetFalAiFfmpegApiComposeRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFfmpegApiComposeOutput +} + +export type GetFalAiFfmpegApiComposeRequestsByRequestIdResponse = + GetFalAiFfmpegApiComposeRequestsByRequestIdResponses[keyof GetFalAiFfmpegApiComposeRequestsByRequestIdResponses] + +export type GetFalAiSyncLipsyncRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sync-lipsync/requests/{request_id}/status' +} + +export type GetFalAiSyncLipsyncRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSyncLipsyncRequestsByRequestIdStatusResponse = + GetFalAiSyncLipsyncRequestsByRequestIdStatusResponses[keyof GetFalAiSyncLipsyncRequestsByRequestIdStatusResponses] + +export type PutFalAiSyncLipsyncRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sync-lipsync/requests/{request_id}/cancel' +} + +export type PutFalAiSyncLipsyncRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSyncLipsyncRequestsByRequestIdCancelResponse = + PutFalAiSyncLipsyncRequestsByRequestIdCancelResponses[keyof PutFalAiSyncLipsyncRequestsByRequestIdCancelResponses] + +export type PostFalAiSyncLipsyncData = { + body: SchemaSyncLipsyncInput + path?: never + query?: never + url: '/fal-ai/sync-lipsync' +} + +export type PostFalAiSyncLipsyncResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSyncLipsyncResponse = + PostFalAiSyncLipsyncResponses[keyof PostFalAiSyncLipsyncResponses] + +export type GetFalAiSyncLipsyncRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sync-lipsync/requests/{request_id}' +} + +export type GetFalAiSyncLipsyncRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSyncLipsyncOutput +} + +export type GetFalAiSyncLipsyncRequestsByRequestIdResponse = + GetFalAiSyncLipsyncRequestsByRequestIdResponses[keyof GetFalAiSyncLipsyncRequestsByRequestIdResponses] + +export type GetFalAiAutoCaptionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/auto-caption/requests/{request_id}/status' +} + +export type GetFalAiAutoCaptionRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiAutoCaptionRequestsByRequestIdStatusResponse = + GetFalAiAutoCaptionRequestsByRequestIdStatusResponses[keyof GetFalAiAutoCaptionRequestsByRequestIdStatusResponses] + +export type PutFalAiAutoCaptionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/auto-caption/requests/{request_id}/cancel' +} + +export type PutFalAiAutoCaptionRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiAutoCaptionRequestsByRequestIdCancelResponse = + PutFalAiAutoCaptionRequestsByRequestIdCancelResponses[keyof PutFalAiAutoCaptionRequestsByRequestIdCancelResponses] + +export type PostFalAiAutoCaptionData = { + body: SchemaAutoCaptionInput + path?: never + query?: never + url: '/fal-ai/auto-caption' +} + +export type PostFalAiAutoCaptionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiAutoCaptionResponse = + PostFalAiAutoCaptionResponses[keyof PostFalAiAutoCaptionResponses] + +export type GetFalAiAutoCaptionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/auto-caption/requests/{request_id}' +} + +export type GetFalAiAutoCaptionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAutoCaptionOutput +} + +export type GetFalAiAutoCaptionRequestsByRequestIdResponse = + GetFalAiAutoCaptionRequestsByRequestIdResponses[keyof GetFalAiAutoCaptionRequestsByRequestIdResponses] + +export type GetFalAiDubbingRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/dubbing/requests/{request_id}/status' +} + +export type GetFalAiDubbingRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiDubbingRequestsByRequestIdStatusResponse = + GetFalAiDubbingRequestsByRequestIdStatusResponses[keyof GetFalAiDubbingRequestsByRequestIdStatusResponses] + +export type PutFalAiDubbingRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/dubbing/requests/{request_id}/cancel' +} + +export type PutFalAiDubbingRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiDubbingRequestsByRequestIdCancelResponse = + PutFalAiDubbingRequestsByRequestIdCancelResponses[keyof PutFalAiDubbingRequestsByRequestIdCancelResponses] + +export type PostFalAiDubbingData = { + body: SchemaDubbingInput + path?: never + query?: never + url: '/fal-ai/dubbing' +} + +export type PostFalAiDubbingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiDubbingResponse = + PostFalAiDubbingResponses[keyof PostFalAiDubbingResponses] + +export type GetFalAiDubbingRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/dubbing/requests/{request_id}' +} + +export type GetFalAiDubbingRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaDubbingOutput +} + +export type GetFalAiDubbingRequestsByRequestIdResponse = + GetFalAiDubbingRequestsByRequestIdResponses[keyof GetFalAiDubbingRequestsByRequestIdResponses] + +export type GetFalAiVideoUpscalerRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/video-upscaler/requests/{request_id}/status' +} + +export type GetFalAiVideoUpscalerRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiVideoUpscalerRequestsByRequestIdStatusResponse = + GetFalAiVideoUpscalerRequestsByRequestIdStatusResponses[keyof GetFalAiVideoUpscalerRequestsByRequestIdStatusResponses] + +export type PutFalAiVideoUpscalerRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/video-upscaler/requests/{request_id}/cancel' +} + +export type PutFalAiVideoUpscalerRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiVideoUpscalerRequestsByRequestIdCancelResponse = + PutFalAiVideoUpscalerRequestsByRequestIdCancelResponses[keyof PutFalAiVideoUpscalerRequestsByRequestIdCancelResponses] + +export type PostFalAiVideoUpscalerData = { + body: SchemaVideoUpscalerInput + path?: never + query?: never + url: '/fal-ai/video-upscaler' +} + +export type PostFalAiVideoUpscalerResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiVideoUpscalerResponse = + PostFalAiVideoUpscalerResponses[keyof PostFalAiVideoUpscalerResponses] + +export type GetFalAiVideoUpscalerRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/video-upscaler/requests/{request_id}' +} + +export type GetFalAiVideoUpscalerRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVideoUpscalerOutput +} + +export type GetFalAiVideoUpscalerRequestsByRequestIdResponse = + GetFalAiVideoUpscalerRequestsByRequestIdResponses[keyof GetFalAiVideoUpscalerRequestsByRequestIdResponses] + +export type GetFalAiCogvideox5bVideoToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/cogvideox-5b/video-to-video/requests/{request_id}/status' +} + +export type GetFalAiCogvideox5bVideoToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiCogvideox5bVideoToVideoRequestsByRequestIdStatusResponse = + GetFalAiCogvideox5bVideoToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiCogvideox5bVideoToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiCogvideox5bVideoToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/cogvideox-5b/video-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiCogvideox5bVideoToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiCogvideox5bVideoToVideoRequestsByRequestIdCancelResponse = + PutFalAiCogvideox5bVideoToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiCogvideox5bVideoToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiCogvideox5bVideoToVideoData = { + body: SchemaCogvideox5bVideoToVideoInput + path?: never + query?: never + url: '/fal-ai/cogvideox-5b/video-to-video' +} + +export type PostFalAiCogvideox5bVideoToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiCogvideox5bVideoToVideoResponse = + PostFalAiCogvideox5bVideoToVideoResponses[keyof PostFalAiCogvideox5bVideoToVideoResponses] + +export type GetFalAiCogvideox5bVideoToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/cogvideox-5b/video-to-video/requests/{request_id}' +} + +export type GetFalAiCogvideox5bVideoToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaCogvideox5bVideoToVideoOutput +} + +export type GetFalAiCogvideox5bVideoToVideoRequestsByRequestIdResponse = + GetFalAiCogvideox5bVideoToVideoRequestsByRequestIdResponses[keyof GetFalAiCogvideox5bVideoToVideoRequestsByRequestIdResponses] + +export type GetFalAiControlnextRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/controlnext/requests/{request_id}/status' +} + +export type GetFalAiControlnextRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiControlnextRequestsByRequestIdStatusResponse = + GetFalAiControlnextRequestsByRequestIdStatusResponses[keyof GetFalAiControlnextRequestsByRequestIdStatusResponses] + +export type PutFalAiControlnextRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/controlnext/requests/{request_id}/cancel' +} + +export type PutFalAiControlnextRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiControlnextRequestsByRequestIdCancelResponse = + PutFalAiControlnextRequestsByRequestIdCancelResponses[keyof PutFalAiControlnextRequestsByRequestIdCancelResponses] + +export type PostFalAiControlnextData = { + body: SchemaControlnextInput + path?: never + query?: never + url: '/fal-ai/controlnext' +} + +export type PostFalAiControlnextResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiControlnextResponse = + PostFalAiControlnextResponses[keyof PostFalAiControlnextResponses] + +export type GetFalAiControlnextRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/controlnext/requests/{request_id}' +} + +export type GetFalAiControlnextRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaControlnextOutput +} + +export type GetFalAiControlnextRequestsByRequestIdResponse = + GetFalAiControlnextRequestsByRequestIdResponses[keyof GetFalAiControlnextRequestsByRequestIdResponses] + +export type GetFalAiSam2VideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sam2/video/requests/{request_id}/status' +} + +export type GetFalAiSam2VideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSam2VideoRequestsByRequestIdStatusResponse = + GetFalAiSam2VideoRequestsByRequestIdStatusResponses[keyof GetFalAiSam2VideoRequestsByRequestIdStatusResponses] + +export type PutFalAiSam2VideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam2/video/requests/{request_id}/cancel' +} + +export type PutFalAiSam2VideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSam2VideoRequestsByRequestIdCancelResponse = + PutFalAiSam2VideoRequestsByRequestIdCancelResponses[keyof PutFalAiSam2VideoRequestsByRequestIdCancelResponses] + +export type PostFalAiSam2VideoData = { + body: SchemaSam2VideoInput + path?: never + query?: never + url: '/fal-ai/sam2/video' +} + +export type PostFalAiSam2VideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSam2VideoResponse = + PostFalAiSam2VideoResponses[keyof PostFalAiSam2VideoResponses] + +export type GetFalAiSam2VideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam2/video/requests/{request_id}' +} + +export type GetFalAiSam2VideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSam2VideoOutput +} + +export type GetFalAiSam2VideoRequestsByRequestIdResponse = + GetFalAiSam2VideoRequestsByRequestIdResponses[keyof GetFalAiSam2VideoRequestsByRequestIdResponses] + +export type GetFalAiAmtInterpolationRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/amt-interpolation/requests/{request_id}/status' +} + +export type GetFalAiAmtInterpolationRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiAmtInterpolationRequestsByRequestIdStatusResponse = + GetFalAiAmtInterpolationRequestsByRequestIdStatusResponses[keyof GetFalAiAmtInterpolationRequestsByRequestIdStatusResponses] + +export type PutFalAiAmtInterpolationRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/amt-interpolation/requests/{request_id}/cancel' +} + +export type PutFalAiAmtInterpolationRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiAmtInterpolationRequestsByRequestIdCancelResponse = + PutFalAiAmtInterpolationRequestsByRequestIdCancelResponses[keyof PutFalAiAmtInterpolationRequestsByRequestIdCancelResponses] + +export type PostFalAiAmtInterpolationData = { + body: SchemaAmtInterpolationInput + path?: never + query?: never + url: '/fal-ai/amt-interpolation' +} + +export type PostFalAiAmtInterpolationResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiAmtInterpolationResponse = + PostFalAiAmtInterpolationResponses[keyof PostFalAiAmtInterpolationResponses] + +export type GetFalAiAmtInterpolationRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/amt-interpolation/requests/{request_id}' +} + +export type GetFalAiAmtInterpolationRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAmtInterpolationOutput +} + +export type GetFalAiAmtInterpolationRequestsByRequestIdResponse = + GetFalAiAmtInterpolationRequestsByRequestIdResponses[keyof GetFalAiAmtInterpolationRequestsByRequestIdResponses] + +export type GetFalAiFastAnimatediffTurboVideoToVideoRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fast-animatediff/turbo/video-to-video/requests/{request_id}/status' + } + +export type GetFalAiFastAnimatediffTurboVideoToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFastAnimatediffTurboVideoToVideoRequestsByRequestIdStatusResponse = + GetFalAiFastAnimatediffTurboVideoToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiFastAnimatediffTurboVideoToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiFastAnimatediffTurboVideoToVideoRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-animatediff/turbo/video-to-video/requests/{request_id}/cancel' + } + +export type PutFalAiFastAnimatediffTurboVideoToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFastAnimatediffTurboVideoToVideoRequestsByRequestIdCancelResponse = + PutFalAiFastAnimatediffTurboVideoToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiFastAnimatediffTurboVideoToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiFastAnimatediffTurboVideoToVideoData = { + body: SchemaFastAnimatediffTurboVideoToVideoInput + path?: never + query?: never + url: '/fal-ai/fast-animatediff/turbo/video-to-video' +} + +export type PostFalAiFastAnimatediffTurboVideoToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFastAnimatediffTurboVideoToVideoResponse = + PostFalAiFastAnimatediffTurboVideoToVideoResponses[keyof PostFalAiFastAnimatediffTurboVideoToVideoResponses] + +export type GetFalAiFastAnimatediffTurboVideoToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-animatediff/turbo/video-to-video/requests/{request_id}' +} + +export type GetFalAiFastAnimatediffTurboVideoToVideoRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFastAnimatediffTurboVideoToVideoOutput + } + +export type GetFalAiFastAnimatediffTurboVideoToVideoRequestsByRequestIdResponse = + GetFalAiFastAnimatediffTurboVideoToVideoRequestsByRequestIdResponses[keyof GetFalAiFastAnimatediffTurboVideoToVideoRequestsByRequestIdResponses] + +export type GetFalAiFastAnimatediffVideoToVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/fast-animatediff/video-to-video/requests/{request_id}/status' +} + +export type GetFalAiFastAnimatediffVideoToVideoRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFastAnimatediffVideoToVideoRequestsByRequestIdStatusResponse = + GetFalAiFastAnimatediffVideoToVideoRequestsByRequestIdStatusResponses[keyof GetFalAiFastAnimatediffVideoToVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiFastAnimatediffVideoToVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-animatediff/video-to-video/requests/{request_id}/cancel' +} + +export type PutFalAiFastAnimatediffVideoToVideoRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFastAnimatediffVideoToVideoRequestsByRequestIdCancelResponse = + PutFalAiFastAnimatediffVideoToVideoRequestsByRequestIdCancelResponses[keyof PutFalAiFastAnimatediffVideoToVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiFastAnimatediffVideoToVideoData = { + body: SchemaFastAnimatediffVideoToVideoInput + path?: never + query?: never + url: '/fal-ai/fast-animatediff/video-to-video' +} + +export type PostFalAiFastAnimatediffVideoToVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFastAnimatediffVideoToVideoResponse = + PostFalAiFastAnimatediffVideoToVideoResponses[keyof PostFalAiFastAnimatediffVideoToVideoResponses] + +export type GetFalAiFastAnimatediffVideoToVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/fast-animatediff/video-to-video/requests/{request_id}' +} + +export type GetFalAiFastAnimatediffVideoToVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFastAnimatediffVideoToVideoOutput +} + +export type GetFalAiFastAnimatediffVideoToVideoRequestsByRequestIdResponse = + GetFalAiFastAnimatediffVideoToVideoRequestsByRequestIdResponses[keyof GetFalAiFastAnimatediffVideoToVideoRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/video-to-video/zod.gen.ts b/packages/typescript/ai-fal/src/generated/video-to-video/zod.gen.ts new file mode 100644 index 00000000..e4cc5773 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/video-to-video/zod.gen.ts @@ -0,0 +1,21840 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * File + */ +export const zSchemaFile = z.object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), +}) + +/** + * AnimateDiffV2VOutput + */ +export const zSchemaFastAnimatediffVideoToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generating the video.', + }), + video: zSchemaFile, +}) + +/** + * AnimateDiffV2VInput + */ +export const zSchemaFastAnimatediffVideoToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the video.', + }), + first_n_seconds: z + .optional( + z.int().gte(2).lte(4).register(z.globalRegistry, { + description: 'The first N number of seconds of video to animate.', + }), + ) + .default(3), + fps: z + .optional( + z.int().gte(1).lte(16).register(z.globalRegistry, { + description: 'Number of frames per second to extract from the video.', + }), + ) + .default(8), + strength: z + .optional( + z.number().gte(0.1).lte(1).register(z.globalRegistry, { + description: 'The strength of the input video in the final output.', + }), + ) + .default(0.7), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related image to show you.\n ', + }), + ) + .default(7.5), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(25), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default('(bad quality, worst quality:1.2), ugly faces, bad anime'), + motions: z.optional( + z + .array( + z.enum([ + 'zoom-out', + 'zoom-in', + 'pan-left', + 'pan-right', + 'tilt-up', + 'tilt-down', + ]), + ) + .register(z.globalRegistry, { + description: 'The motions to apply to the video.', + }), + ), +}) + +/** + * AnimateDiffV2VOutput + */ +export const zSchemaFastAnimatediffTurboVideoToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'Seed used for generating the video.', + }), + video: zSchemaFile, +}) + +/** + * AnimateDiffV2VTurboInput + */ +export const zSchemaFastAnimatediffTurboVideoToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The prompt to use for generating the image. Be as descriptive as possible for best results.', + }), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the video.', + }), + first_n_seconds: z + .optional( + z.int().gte(2).lte(12).register(z.globalRegistry, { + description: 'The first N number of seconds of video to animate.', + }), + ) + .default(3), + fps: z + .optional( + z.int().gte(1).lte(16).register(z.globalRegistry, { + description: 'Number of frames per second to extract from the video.', + }), + ) + .default(8), + strength: z + .optional( + z.number().gte(0.1).lte(1).register(z.globalRegistry, { + description: 'The strength of the input video in the final output.', + }), + ) + .default(0.7), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + 'The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.', + }), + ) + .default(1), + num_inference_steps: z + .optional( + z.int().gte(1).lte(32).register(z.globalRegistry, { + description: + 'The number of inference steps to perform. 4-12 is recommended for turbo mode.', + }), + ) + .default(8), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of Stable Diffusion\n will output the same image every time.\n ', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "\n The negative prompt to use. Use it to address details that you don't want\n in the image. This could be colors, objects, scenery and even the small details\n (e.g. moustache, blurry, low resolution).\n ", + }), + ) + .default('(bad quality, worst quality:1.2), ugly faces, bad anime'), + motions: z.optional( + z + .array( + z.enum([ + 'zoom-out', + 'zoom-in', + 'pan-left', + 'pan-right', + 'tilt-up', + 'tilt-down', + ]), + ) + .register(z.globalRegistry, { + description: 'The motions to apply to the video.', + }), + ), +}) + +/** + * AMTInterpolationOutput + */ +export const zSchemaAmtInterpolationOutput = z.object({ + video: zSchemaFile, +}) + +/** + * AMTInterpolationInput + */ +export const zSchemaAmtInterpolationInput = z.object({ + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the video to be processed', + }), + recursive_interpolation_passes: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Number of recursive interpolation passes', + }), + ) + .default(2), + output_fps: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Output frames per second', + }), + ) + .default(24), +}) + +/** + * SAM2VideoOutput + */ +export const zSchemaSam2VideoOutput = z.object({ + boundingbox_frames_zip: z.optional(zSchemaFile), + video: zSchemaFile, +}) + +/** + * BoxPrompt + */ +export const zSchemaBoxPrompt = z.object({ + y_min: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Y Min Coordinate of the box', + }), + ) + .default(0), + frame_index: z + .optional( + z.int().register(z.globalRegistry, { + description: 'The frame index to interact with.', + }), + ) + .default(0), + x_max: z + .optional( + z.int().register(z.globalRegistry, { + description: 'X Max Coordinate of the prompt', + }), + ) + .default(0), + x_min: z + .optional( + z.int().register(z.globalRegistry, { + description: 'X Min Coordinate of the box', + }), + ) + .default(0), + y_max: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Y Max Coordinate of the prompt', + }), + ) + .default(0), +}) + +/** + * PointPrompt + */ +export const zSchemaPointPrompt = z.object({ + y: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Y Coordinate of the prompt', + }), + ) + .default(350), + label: z.optional( + z.union([z.literal(0), z.literal(1)]).register(z.globalRegistry, { + description: 'Label of the prompt. 1 for foreground, 0 for background', + }), + ), + frame_index: z + .optional( + z.int().register(z.globalRegistry, { + description: 'The frame index to interact with.', + }), + ) + .default(0), + x: z + .optional( + z.int().register(z.globalRegistry, { + description: 'X Coordinate of the prompt', + }), + ) + .default(305), +}) + +/** + * SAM2VideoRLEInput + */ +export const zSchemaSam2VideoInput = z.object({ + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to be segmented.', + }), + prompts: z + .optional( + z.array(zSchemaPointPrompt).register(z.globalRegistry, { + description: 'List of prompts to segment the video', + }), + ) + .default([]), + boundingbox_zip: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Return per-frame bounding box overlays as a zip archive.', + }), + ) + .default(false), + mask_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The URL of the mask to be applied initially.', + }), + ), + apply_mask: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Apply the mask on the video.', + }), + ) + .default(false), + box_prompts: z + .optional( + z.array(zSchemaBoxPrompt).register(z.globalRegistry, { + description: 'Coordinates for boxes', + }), + ) + .default([]), +}) + +/** + * ControlNeXtOutput + */ +export const zSchemaControlnextOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ControlNeXtInput + */ +export const zSchemaControlnextInput = z.object({ + controlnext_cond_scale: z + .optional( + z.number().gte(0.1).lte(10).register(z.globalRegistry, { + description: 'Condition scale for ControlNeXt.', + }), + ) + .default(1), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the input video.', + }), + fps: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'Frames per second for the output video.', + }), + ) + .default(7), + max_frame_num: z + .optional( + z.int().gte(1).lte(1000).register(z.globalRegistry, { + description: 'Maximum number of frames to process.', + }), + ) + .default(240), + width: z + .optional( + z.int().gte(64).lte(1024).register(z.globalRegistry, { + description: 'Width of the output video.', + }), + ) + .default(576), + overlap: z + .optional( + z.int().gte(0).lte(20).register(z.globalRegistry, { + description: 'Number of overlapping frames between batches.', + }), + ) + .default(6), + guidance_scale: z + .optional( + z.number().gte(0.1).lte(10).register(z.globalRegistry, { + description: 'Guidance scale for the diffusion process.', + }), + ) + .default(3), + batch_frames: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'Number of frames to process in each batch.', + }), + ) + .default(24), + height: z + .optional( + z.int().gte(64).lte(1024).register(z.globalRegistry, { + description: 'Height of the output video.', + }), + ) + .default(1024), + sample_stride: z + .optional( + z.int().gte(1).lte(10).register(z.globalRegistry, { + description: 'Stride for sampling frames from the input video.', + }), + ) + .default(2), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the reference image.', + }), + decode_chunk_size: z + .optional( + z.int().gte(1).lte(10).register(z.globalRegistry, { + description: 'Chunk size for decoding frames.', + }), + ) + .default(2), + motion_bucket_id: z + .optional( + z.number().gte(0).lte(255).register(z.globalRegistry, { + description: 'Motion bucket ID for the pipeline.', + }), + ) + .default(127), + num_inference_steps: z + .optional( + z.int().gte(1).lte(100).register(z.globalRegistry, { + description: 'Number of inference steps.', + }), + ) + .default(25), +}) + +/** + * Output + */ +export const zSchemaCogvideox5bVideoToVideoOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generating the video.', + }), + timings: z.record(z.string(), z.number()), + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated video. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + video: zSchemaFile, +}) + +/** + * ImageSize + */ +export const zSchemaImageSize = z.object({ + height: z + .optional( + z.int().lte(14142).register(z.globalRegistry, { + description: 'The height of the generated image.', + }), + ) + .default(512), + width: z + .optional( + z.int().lte(14142).register(z.globalRegistry, { + description: 'The width of the generated image.', + }), + ) + .default(512), +}) + +/** + * LoraWeight + */ +export const zSchemaLoraWeight = z.object({ + path: z.string().register(z.globalRegistry, { + description: 'URL or the path to the LoRA weights.', + }), + scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + '\n The scale of the LoRA weight. This is used to scale the LoRA weight\n before merging it with the base model.\n ', + }), + ) + .default(1), +}) + +/** + * VideoToVideoInput + */ +export const zSchemaCogvideox5bVideoToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + video_url: z.string().register(z.globalRegistry, { + description: 'The video to generate the video from.', + }), + use_rife: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Use RIFE for video interpolation', + }), + ) + .default(true), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. We currently support one lora.\n ', + }), + ) + .default([]), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + strength: z + .optional( + z.number().gte(0.05).lte(1).register(z.globalRegistry, { + description: + 'The strength to use for Video to Video. 1.0 completely remakes the video while 0.0 preserves the original.', + }), + ) + .default(0.8), + guidance_scale: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: + '\n The CFG (Classifier Free Guidance) scale is a measure of how close you want\n the model to stick to your prompt when looking for a related video to show you.\n ', + }), + ) + .default(7), + num_inference_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to perform.', + }), + ) + .default(50), + export_fps: z + .optional( + z.int().gte(4).lte(32).register(z.globalRegistry, { + description: 'The target FPS of the video', + }), + ) + .default(16), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate video from', + }), + ) + .default(''), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + '\n The same seed and the same prompt given to the same version of the model\n will output the same video every time.\n ', + }), + ), +}) + +/** + * Output + */ +export const zSchemaVideoUpscalerOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Input + */ +export const zSchemaVideoUpscalerInput = z.object({ + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to upscale', + }), + scale: z + .optional( + z.number().gte(1).lte(8).register(z.globalRegistry, { + description: 'The scale factor', + }), + ) + .default(2), +}) + +/** + * OutputModel + */ +export const zSchemaDubbingOutput = z.object({ + video: zSchemaFile, +}) + +/** + * InputModel + */ +export const zSchemaDubbingInput = z.object({ + do_lipsync: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to lip sync the audio to the video', + }), + ) + .default(true), + video_url: z.string().register(z.globalRegistry, { + description: 'Input video URL to be dubbed.', + }), + target_language: z.optional( + z.enum(['hindi', 'turkish', 'english']).register(z.globalRegistry, { + description: 'Target language to dub the video to', + }), + ), +}) + +/** + * Output + */ +export const zSchemaAutoCaptionOutput = z.object({ + video_url: z.string().register(z.globalRegistry, { + description: 'URL to the caption .mp4 video.', + }), +}) + +/** + * CaptionInput + */ +export const zSchemaAutoCaptionInput = z.object({ + txt_font: z + .optional( + z.string().register(z.globalRegistry, { + description: + "Font for generated captions. Choose one in 'Arial','Standard','Garamond', 'Times New Roman','Georgia', or pass a url to a .ttf file", + }), + ) + .default('Standard'), + video_url: z.string().register(z.globalRegistry, { + description: + 'URL to the .mp4 video with audio. Only videos of size <100MB are allowed.', + }), + top_align: z.optional(z.union([z.string(), z.number()])), + txt_color: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Colour of the text. Can be a RGB tuple, a color name, or an hexadecimal notation.', + }), + ) + .default('white'), + stroke_width: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Width of the text strokes in pixels', + }), + ) + .default(1), + refresh_interval: z + .optional( + z.number().gte(0.5).lte(3).register(z.globalRegistry, { + description: + 'Number of seconds the captions should stay on screen. A higher number will also result in more text being displayed at once.', + }), + ) + .default(1.5), + font_size: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Size of text in generated captions.', + }), + ) + .default(24), + left_align: z.optional(z.union([z.string(), z.number()])), +}) + +/** + * LipSyncOutput + */ +export const zSchemaSyncLipsyncOutput = z.object({ + video: zSchemaFile, +}) + +/** + * LipSyncInput + */ +export const zSchemaSyncLipsyncInput = z.object({ + model: z.optional( + z + .enum(['lipsync-1.8.0', 'lipsync-1.7.1', 'lipsync-1.9.0-beta']) + .register(z.globalRegistry, { + description: 'The model to use for lipsyncing', + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the input video', + }), + sync_mode: z.optional( + z + .enum(['cut_off', 'loop', 'bounce', 'silence', 'remap']) + .register(z.globalRegistry, { + description: + 'Lipsync mode when audio and video durations are out of sync.', + }), + ), + audio_url: z.string().register(z.globalRegistry, { + description: 'URL of the input audio', + }), +}) + +/** + * Keyframe + */ +export const zSchemaKeyframe = z.object({ + duration: z.number().register(z.globalRegistry, { + description: 'The duration in milliseconds of this keyframe', + }), + timestamp: z.number().register(z.globalRegistry, { + description: 'The timestamp in milliseconds where this keyframe starts', + }), + url: z.string().register(z.globalRegistry, { + description: "The URL where this keyframe's media file can be accessed", + }), +}) + +/** + * Track + */ +export const zSchemaTrack = z.object({ + type: z.string().register(z.globalRegistry, { + description: "Type of track ('video' or 'audio')", + }), + id: z.string().register(z.globalRegistry, { + description: 'Unique identifier for the track', + }), + keyframes: z.array(zSchemaKeyframe).register(z.globalRegistry, { + description: 'List of keyframes that make up this track', + }), +}) + +/** + * ComposeOutput + */ +export const zSchemaFfmpegApiComposeOutput = z.object({ + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the processed video file', + }), + thumbnail_url: z.string().register(z.globalRegistry, { + description: "URL of the video's thumbnail image", + }), +}) + +/** + * Input + */ +export const zSchemaFfmpegApiComposeInput = z.object({ + tracks: z.array(zSchemaTrack).register(z.globalRegistry, { + description: 'List of tracks to be combined into the final media', + }), +}) + +/** + * HunyuanV2VResponse + */ +export const zSchemaHunyuanVideoLoraVideoToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generating the video.', + }), + video: zSchemaFile, +}) + +/** + * HunyuanV2VRequest + */ +export const zSchemaHunyuanVideoLoraVideoToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the video to generate.', + }), + ), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'The resolution of the video to generate.', + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the video', + }), + loras: z + .optional( + z.array(zSchemaLoraWeight).register(z.globalRegistry, { + description: + '\n The LoRAs to use for the image generation. You can use any number of LoRAs\n and they will be merged together to generate the final image.\n ', + }), + ) + .default([]), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: 'Strength of video-to-video', + }), + ) + .default(0.75), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for generating the video.', + }), + ), + num_frames: z.optional( + z.enum(['129', '85']).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ), + pro_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'By default, generations are done with 35 steps. Pro mode does 55 steps which results in higher quality videos but will take more time and cost 2x more billing units.', + }), + ) + .default(false), +}) + +/** + * HunyuanT2VResponse + */ +export const zSchemaHunyuanVideoVideoToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generating the video.', + }), + video: zSchemaFile, +}) + +/** + * HunyuanV2VRequest + */ +export const zSchemaHunyuanVideoVideoToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the video to generate.', + }), + ), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'The resolution of the video to generate.', + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the video input.', + }), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: 'Strength for Video-to-Video', + }), + ) + .default(0.85), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(30).register(z.globalRegistry, { + description: + 'The number of inference steps to run. Lower gets faster results, higher gets better results.', + }), + ) + .default(30), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for generating the video.', + }), + ), + num_frames: z.optional( + z.enum(['129', '85']).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ), + pro_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'By default, generations are done with 35 steps. Pro mode does 55 steps which results in higher quality videos but will take more time and cost 2x more billing units.', + }), + ) + .default(false), +}) + +/** + * Ben2OutputVideo + */ +export const zSchemaBenV2VideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: + '\n Seed of the generated Image. It will be the same value of the one passed in the\n input or the randomly generated that was used in case none was passed.\n ', + }), + video: zSchemaFile, +}) + +/** + * Ben2InputVideo + */ +export const zSchemaBenV2VideoInput = z.object({ + video_url: z.string().register(z.globalRegistry, { + description: 'URL of video to be used for background removal.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducible generation.', + }), + ), + background_color: z.optional( + z + .tuple([z.unknown(), z.unknown(), z.unknown()]) + .register(z.globalRegistry, { + description: + 'Optional RGB values (0-255) for the background color. If not provided, the background will be transparent. For ex: [0, 0, 0]', + }), + ), +}) + +/** + * VideoUpscaleOutput + */ +export const zSchemaTopazUpscaleVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * VideoUpscaleRequest + */ +export const zSchemaTopazUpscaleVideoInput = z.object({ + H264_output: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use H264 codec for output video. Default is H265.', + }), + ) + .default(false), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the video to upscale', + }), + upscale_factor: z + .optional( + z.number().gte(1).lte(4).register(z.globalRegistry, { + description: + 'Factor to upscale the video by (e.g. 2.0 doubles width and height)', + }), + ) + .default(2), + target_fps: z.optional( + z.int().gte(16).lte(60).register(z.globalRegistry, { + description: + 'Target FPS for frame interpolation. If set, frame interpolation will be enabled.', + }), + ), +}) + +/** + * ExtendVideoOutput + */ +export const zSchemaLtxVideoV095ExtendOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * VideoConditioningInput + */ +export const zSchemaVideoConditioningInput = z.object({ + video_url: z.string().register(z.globalRegistry, { + description: 'URL of video to be extended', + }), + start_frame_num: z.int().gte(0).lte(120).register(z.globalRegistry, { + description: + 'Frame number of the video from which the conditioning starts. Must be a multiple of 8.', + }), +}) + +/** + * ExtendVideoInput + */ +export const zSchemaLtxVideoV095ExtendInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt to guide generation', + }), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p or 720p).', + }), + ), + aspect_ratio: z.optional( + z.enum(['9:16', '16:9']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video (16:9 or 9:16).', + }), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "Whether to expand the prompt using the model's own capabilities.", + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps', + }), + ) + .default(40), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for generation', + }), + ) + .default('worst quality, inconsistent motion, blurry, jittery, distorted'), + video: zSchemaVideoConditioningInput, +}) + +/** + * MulticonditioningVideoOutput + */ +export const zSchemaLtxVideoV095MulticonditioningOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * ImageConditioningInput + */ +export const zSchemaImageConditioningInput = z.object({ + start_frame_num: z.int().gte(0).lte(120).register(z.globalRegistry, { + description: + 'Frame number of the image from which the conditioning starts. Must be a multiple of 8.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of image to use as conditioning', + }), +}) + +/** + * MultiConditioningVideoInput + */ +export const zSchemaLtxVideoV095MulticonditioningInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt to guide generation', + }), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p or 720p).', + }), + ), + aspect_ratio: z.optional( + z.enum(['9:16', '16:9']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video (16:9 or 9:16).', + }), + ), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "Whether to expand the prompt using the model's own capabilities.", + }), + ) + .default(true), + images: z + .optional( + z.array(zSchemaImageConditioningInput).register(z.globalRegistry, { + description: 'URL of images to use as conditioning', + }), + ) + .default([]), + videos: z + .optional( + z.array(zSchemaVideoConditioningInput).register(z.globalRegistry, { + description: 'Videos to use as conditioning', + }), + ) + .default([]), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps', + }), + ) + .default(40), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for generation', + }), + ) + .default('worst quality, inconsistent motion, blurry, jittery, distorted'), +}) + +/** + * PikadditionsOutput + * + * Output from Pikadditions generation + */ +export const zSchemaPikaV2PikadditionsOutput = z + .object({ + video: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output from Pikadditions generation', + }) + +/** + * PikadditionsRequest + * + * Request model for Pikadditions endpoint + */ +export const zSchemaPikaV2PikadditionsInput = z + .object({ + prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'Text prompt describing what to add', + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the input video', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator', + }), + ), + negative_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to guide the model', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to add', + }), + }) + .register(z.globalRegistry, { + description: 'Request model for Pikadditions endpoint', + }) + +/** + * Output + */ +export const zSchemaLatentsyncOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Input + */ +export const zSchemaLatentsyncInput = z.object({ + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to generate the lip sync for.', + }), + guidance_scale: z + .optional( + z.number().gte(1).lte(2).register(z.globalRegistry, { + description: 'Guidance scale for the model inference', + }), + ) + .default(1), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for generation. If None, a random seed will be used.', + }), + ), + audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the audio to generate the lip sync for.', + }), + loop_mode: z.optional( + z.enum(['pingpong', 'loop']).register(z.globalRegistry, { + description: + 'Video loop mode when audio is longer than video. Options: pingpong, loop', + }), + ), +}) + +/** + * LipSyncV2Output + */ +export const zSchemaSyncLipsyncV2Output = z.object({ + video: zSchemaFile, +}) + +/** + * LipSyncV2Input + */ +export const zSchemaSyncLipsyncV2Input = z.object({ + model: z.optional( + z.enum(['lipsync-2', 'lipsync-2-pro']).register(z.globalRegistry, { + description: + 'The model to use for lipsyncing. `lipsync-2-pro` will cost roughly 1.67 times as much as `lipsync-2` for the same duration.', + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the input video', + }), + sync_mode: z.optional( + z + .enum(['cut_off', 'loop', 'bounce', 'silence', 'remap']) + .register(z.globalRegistry, { + description: + 'Lipsync mode when audio and video durations are out of sync.', + }), + ), + audio_url: z.string().register(z.globalRegistry, { + description: 'URL of the input audio', + }), +}) + +/** + * VideoOutput + * + * Pydantic model for returning the re-sounded video back to the client. + */ +export const zSchemaVideoSoundEffectsGeneratorOutput = z + .object({ + video: zSchemaFile, + }) + .register(z.globalRegistry, { + description: + 'Pydantic model for returning the re-sounded video back to the client.', + }) + +/** + * Video + * + * Represents a video file. + */ +export const zSchemaVideo = z + .object({ + file_size: z.optional(z.union([z.int(), z.unknown()])), + file_name: z.optional(z.union([z.string(), z.unknown()])), + content_type: z.optional(z.union([z.string(), z.unknown()])), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + file_data: z.optional(z.union([z.string(), z.unknown()])), + }) + .register(z.globalRegistry, { + description: 'Represents a video file.', + }) + +/** + * VideoInput + * + * Pydantic model for receiving a video file to analyze and re-sound. + */ +export const zSchemaVideoSoundEffectsGeneratorInput = z + .object({ + video_url: zSchemaVideo, + }) + .register(z.globalRegistry, { + description: + 'Pydantic model for receiving a video file to analyze and re-sound.', + }) + +/** + * WanT2VResponse + */ +export const zSchemaWanVaceOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * WanT2VRequest + */ +export const zSchemaWanVaceInput = z.object({ + shift: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Shift parameter for video generation.', + }), + ) + .default(5), + video_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'URL to the source video file. If provided, the model will use this video as a reference.', + }), + ), + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + ref_image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'Urls to source reference image. If provided, the model will use this image as reference.', + }), + ), + task: z.optional( + z.enum(['depth', 'inpainting']).register(z.globalRegistry, { + description: 'Task type for the model.', + }), + ), + frames_per_second: z + .optional( + z.int().gte(5).lte(24).register(z.globalRegistry, { + description: + 'Frames per second of the generated video. Must be between 5 to 24.', + }), + ) + .default(16), + mask_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'URL to the guiding mask file. If provided, the model will use this mask as a reference to create masked video. If provided mask video url will be ignored.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + num_frames: z + .optional( + z.int().gte(81).lte(240).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 100 (inclusive). Works only with only reference images as input if source video or mask video is provided output len would be same as source up to 241 frames', + }), + ) + .default(81), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default( + 'bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards', + ), + aspect_ratio: z.optional( + z.enum(['auto', '9:16', '16:9']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video (16:9 or 9:16).', + }), + ), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p,580p, or 720p).', + }), + ), + mask_video_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'URL to the source mask file. If provided, the model will use this mask as a reference.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(2).lte(40).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(30), + preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the input video.', + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), +}) + +/** + * MagiVideoExtensionResponse + */ +export const zSchemaMagiDistilledExtendVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * MagiVideoExtensionRequest + */ +export const zSchemaMagiDistilledExtendVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: + 'Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit.', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: + "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input video to represent the beginning of the video. If the input video does not match the chosen aspect ratio, it is resized and center cropped.', + }), + start_frame: z.optional( + z.int().gte(0).register(z.globalRegistry, { + description: + 'The frame to begin the generation from, with the remaining frames will be treated as the prefix video. The final video will contain the frames up until this number unchanged, followed by the generated frames. The default start frame is 32 frames before the end of the video, which gives optimal results.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z.optional( + z + .union([z.literal(4), z.literal(8), z.literal(16), z.literal(32)]) + .register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + num_frames: z + .optional( + z.int().gte(96).lte(192).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 96 and 192 (inclusive). Each additional 24 frames beyond 96 incurs an additional billing unit.', + }), + ) + .default(96), +}) + +/** + * MagiVideoExtensionResponse + */ +export const zSchemaMagiExtendVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * MagiVideoExtensionRequest + */ +export const zSchemaMagiExtendVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: + 'Resolution of the generated video (480p or 720p). 480p is 0.5 billing units, and 720p is 1 billing unit.', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: + "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input image.", + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input video to represent the beginning of the video. If the input video does not match the chosen aspect ratio, it is resized and center cropped.', + }), + start_frame: z.optional( + z.int().gte(0).register(z.globalRegistry, { + description: + 'The frame to begin the generation from, with the remaining frames will be treated as the prefix video. The final video will contain the frames up until this number unchanged, followed by the generated frames. The default start frame is 32 frames before the end of the video, which gives optimal results.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_inference_steps: z.optional( + z + .union([ + z.literal(4), + z.literal(8), + z.literal(16), + z.literal(32), + z.literal(64), + ]) + .register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + num_frames: z + .optional( + z.int().gte(96).lte(192).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 96 and 192 (inclusive). Each additional 24 frames beyond 96 incurs an additional billing unit.', + }), + ) + .default(96), +}) + +/** + * VideoCondition + * + * Video condition to use for generation. + */ +export const zSchemaVideoCondition = z + .object({ + strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The strength of the condition.', + }), + ) + .default(1), + start_frame_number: z + .optional( + z.int().gte(0).lte(160).register(z.globalRegistry, { + description: 'The frame number to start the condition on.', + }), + ) + .default(0), + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to use as input.', + }), + }) + .register(z.globalRegistry, { + description: 'Video condition to use for generation.', + }) + +/** + * ImageCondition + * + * Image condition to use for generation. + */ +export const zSchemaImageCondition = z + .object({ + strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The strength of the condition.', + }), + ) + .default(1), + start_frame_number: z + .optional( + z.int().gte(0).lte(160).register(z.globalRegistry, { + description: 'The frame number to start the condition on.', + }), + ) + .default(0), + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to use as input.', + }), + }) + .register(z.globalRegistry, { + description: 'Image condition to use for generation.', + }) + +/** + * MulticonditioningVideoOutput + */ +export const zSchemaLtxVideoLoraMulticonditioningOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * LoRAWeight + * + * LoRA weight to use for generation. + */ +export const zSchemaLoRaWeight = z + .object({ + path: z.string().register(z.globalRegistry, { + description: 'URL or path to the LoRA weights.', + }), + scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: + 'Scale of the LoRA weight. This is a multiplier applied to the LoRA weight when loading it.', + }), + ) + .default(1), + weight_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Name of the LoRA weight. Only used if `path` is a HuggingFace repository, and is only required when the repository contains multiple LoRA weights.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'LoRA weight to use for generation.', + }) + +/** + * MulticonditioningVideoInput + * + * Request model for text-to-video generation with multiple conditions. + */ +export const zSchemaLtxVideoLoraMulticonditioningInput = z + .object({ + number_of_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to use.', + }), + ) + .default(30), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + reverse_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to reverse the video.', + }), + ) + .default(false), + frame_rate: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frame rate of the video.', + }), + ) + .default(25), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to expand the prompt using the LLM.', + }), + ) + .default(false), + number_of_frames: z + .optional( + z.int().gte(9).lte(161).register(z.globalRegistry, { + description: 'The number of frames in the video.', + }), + ) + .default(89), + loras: z + .optional( + z.array(zSchemaLoRaWeight).register(z.globalRegistry, { + description: 'The LoRA weights to use for generation.', + }), + ) + .default([]), + images: z + .optional( + z.array(zSchemaImageCondition).register(z.globalRegistry, { + description: 'The image conditions to use for generation.', + }), + ) + .default([]), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to use.', + }), + ) + .default( + 'blurry, low quality, low resolution, inconsistent motion, jittery, distorted', + ), + aspect_ratio: z.optional( + z.enum(['16:9', '1:1', '9:16', 'auto']).register(z.globalRegistry, { + description: 'The aspect ratio of the video.', + }), + ), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: 'The resolution of the video.', + }), + ), + videos: z + .optional( + z.array(zSchemaVideoCondition).register(z.globalRegistry, { + description: 'The video conditions to use for generation.', + }), + ) + .default([]), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed to use for generation.', + }), + ), + }) + .register(z.globalRegistry, { + description: + 'Request model for text-to-video generation with multiple conditions.', + }) + +/** + * ExtendVideoOutput + */ +export const zSchemaLtxVideo13bDevExtendOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * ExtendVideoInput + */ +export const zSchemaLtxVideo13bDevExtendInput = z.object({ + second_pass_skip_initial_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: + 'The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.', + }), + ) + .default(17), + first_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps during the first pass.', + }), + ) + .default(30), + frame_rate: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frame rate of the video.', + }), + ) + .default(30), + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt to guide generation', + }), + reverse_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to reverse the video.', + }), + ) + .default(false), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to expand the prompt using a language model.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoRaWeight).register(z.globalRegistry, { + description: 'LoRA weights to use for generation', + }), + ) + .default([]), + second_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps during the second pass.', + }), + ) + .default(30), + num_frames: z + .optional( + z.int().gte(9).lte(161).register(z.globalRegistry, { + description: 'The number of frames in the video.', + }), + ) + .default(121), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + video: zSchemaVideoConditioningInput, + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for generation', + }), + ) + .default('worst quality, inconsistent motion, blurry, jittery, distorted'), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p or 720p).', + }), + ), + aspect_ratio: z.optional( + z.enum(['9:16', '1:1', '16:9', 'auto']).register(z.globalRegistry, { + description: 'The aspect ratio of the video.', + }), + ), + constant_rate_factor: z + .optional( + z.int().gte(20).lte(60).register(z.globalRegistry, { + description: + "The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality.", + }), + ) + .default(35), + first_pass_skip_final_steps: z + .optional( + z.int().gte(0).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details.', + }), + ) + .default(3), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), +}) + +/** + * MultiConditioningVideoOutput + */ +export const zSchemaLtxVideo13bDevMulticonditioningOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * MultiConditioningVideoInput + */ +export const zSchemaLtxVideo13bDevMulticonditioningInput = z.object({ + second_pass_skip_initial_steps: z + .optional( + z.int().gte(1).lte(50).register(z.globalRegistry, { + description: + 'The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.', + }), + ) + .default(17), + first_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps during the first pass.', + }), + ) + .default(30), + frame_rate: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frame rate of the video.', + }), + ) + .default(30), + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt to guide generation', + }), + reverse_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to reverse the video.', + }), + ) + .default(false), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to expand the prompt using a language model.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoRaWeight).register(z.globalRegistry, { + description: 'LoRA weights to use for generation', + }), + ) + .default([]), + images: z + .optional( + z.array(zSchemaImageConditioningInput).register(z.globalRegistry, { + description: 'URL of images to use as conditioning', + }), + ) + .default([]), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + num_frames: z + .optional( + z.int().gte(9).lte(161).register(z.globalRegistry, { + description: 'The number of frames in the video.', + }), + ) + .default(121), + second_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: 'Number of inference steps during the second pass.', + }), + ) + .default(30), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for generation', + }), + ) + .default('worst quality, inconsistent motion, blurry, jittery, distorted'), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p or 720p).', + }), + ), + aspect_ratio: z.optional( + z.enum(['9:16', '1:1', '16:9', 'auto']).register(z.globalRegistry, { + description: 'The aspect ratio of the video.', + }), + ), + videos: z + .optional( + z.array(zSchemaVideoConditioningInput).register(z.globalRegistry, { + description: 'Videos to use as conditioning', + }), + ) + .default([]), + constant_rate_factor: z + .optional( + z.int().gte(20).lte(60).register(z.globalRegistry, { + description: + "The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality.", + }), + ) + .default(35), + first_pass_skip_final_steps: z + .optional( + z.int().gte(0).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details.', + }), + ) + .default(3), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), +}) + +/** + * MultiConditioningVideoOutput + */ +export const zSchemaLtxVideo13bDistilledMulticonditioningOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * DistilledMultiConditioningVideoInput + * + * Distilled model input + */ +export const zSchemaLtxVideo13bDistilledMulticonditioningInput = z + .object({ + second_pass_skip_initial_steps: z + .optional( + z.int().gte(1).lte(20).register(z.globalRegistry, { + description: + 'The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.', + }), + ) + .default(5), + first_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(20).register(z.globalRegistry, { + description: 'Number of inference steps during the first pass.', + }), + ) + .default(8), + frame_rate: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frame rate of the video.', + }), + ) + .default(30), + reverse_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to reverse the video.', + }), + ) + .default(false), + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt to guide generation', + }), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to expand the prompt using a language model.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoRaWeight).register(z.globalRegistry, { + description: 'LoRA weights to use for generation', + }), + ) + .default([]), + images: z + .optional( + z.array(zSchemaImageConditioningInput).register(z.globalRegistry, { + description: 'URL of images to use as conditioning', + }), + ) + .default([]), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + num_frames: z + .optional( + z.int().gte(9).lte(161).register(z.globalRegistry, { + description: 'The number of frames in the video.', + }), + ) + .default(121), + second_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(20).register(z.globalRegistry, { + description: 'Number of inference steps during the second pass.', + }), + ) + .default(8), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for generation', + }), + ) + .default( + 'worst quality, inconsistent motion, blurry, jittery, distorted', + ), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p or 720p).', + }), + ), + aspect_ratio: z.optional( + z.enum(['9:16', '1:1', '16:9', 'auto']).register(z.globalRegistry, { + description: 'The aspect ratio of the video.', + }), + ), + constant_rate_factor: z + .optional( + z.int().gte(20).lte(60).register(z.globalRegistry, { + description: + "The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality.", + }), + ) + .default(35), + videos: z + .optional( + z.array(zSchemaVideoConditioningInput).register(z.globalRegistry, { + description: 'Videos to use as conditioning', + }), + ) + .default([]), + first_pass_skip_final_steps: z + .optional( + z.int().gte(0).lte(20).register(z.globalRegistry, { + description: + 'Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details.', + }), + ) + .default(1), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Distilled model input', + }) + +/** + * ExtendVideoOutput + */ +export const zSchemaLtxVideo13bDistilledExtendOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * DistilledExtendVideoInput + * + * Distilled model input + */ +export const zSchemaLtxVideo13bDistilledExtendInput = z + .object({ + second_pass_skip_initial_steps: z + .optional( + z.int().gte(1).lte(20).register(z.globalRegistry, { + description: + 'The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.', + }), + ) + .default(5), + first_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(20).register(z.globalRegistry, { + description: 'Number of inference steps during the first pass.', + }), + ) + .default(8), + frame_rate: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frame rate of the video.', + }), + ) + .default(30), + reverse_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to reverse the video.', + }), + ) + .default(false), + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt to guide generation', + }), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to expand the prompt using a language model.', + }), + ) + .default(false), + loras: z + .optional( + z.array(zSchemaLoRaWeight).register(z.globalRegistry, { + description: 'LoRA weights to use for generation', + }), + ) + .default([]), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + num_frames: z + .optional( + z.int().gte(9).lte(161).register(z.globalRegistry, { + description: 'The number of frames in the video.', + }), + ) + .default(121), + second_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(20).register(z.globalRegistry, { + description: 'Number of inference steps during the second pass.', + }), + ) + .default(8), + video: zSchemaVideoConditioningInput, + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for generation', + }), + ) + .default( + 'worst quality, inconsistent motion, blurry, jittery, distorted', + ), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p or 720p).', + }), + ), + aspect_ratio: z.optional( + z.enum(['9:16', '1:1', '16:9', 'auto']).register(z.globalRegistry, { + description: 'The aspect ratio of the video.', + }), + ), + constant_rate_factor: z + .optional( + z.int().gte(20).lte(60).register(z.globalRegistry, { + description: + "The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality.", + }), + ) + .default(35), + first_pass_skip_final_steps: z + .optional( + z.int().gte(0).lte(20).register(z.globalRegistry, { + description: + 'Number of inference steps to skip in the final steps of the first pass. By skipping some steps at the end, the first pass can focus on larger changes instead of smaller details.', + }), + ) + .default(1), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Distilled model input', + }) + +/** + * VideoFile + */ +export const zSchemaVideoFile = z.object({ + file_size: z.optional(z.union([z.int(), z.unknown()])), + duration: z.optional(z.union([z.number(), z.unknown()])), + height: z.optional(z.union([z.int(), z.unknown()])), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + width: z.optional(z.union([z.int(), z.unknown()])), + fps: z.optional(z.union([z.number(), z.unknown()])), + file_name: z.optional(z.union([z.string(), z.unknown()])), + num_frames: z.optional(z.union([z.int(), z.unknown()])), + content_type: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * WanVACEResponse + */ +export const zSchemaWanVace14bOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + frames_zip: z.optional(z.union([zSchemaFile, z.unknown()])), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaVideoFile, +}) + +/** + * WanVACERequest + */ +export const zSchemaWanVace14bInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + video_url: z.optional(z.union([z.string(), z.unknown()])), + num_interpolated_frames: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Number of frames to interpolate between the original frames. A value of 0 means no interpolation.', + }), + ) + .default(0), + temporal_downsample_factor: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.', + }), + ) + .default(0), + first_frame_url: z.optional(z.union([z.string(), z.unknown()])), + ref_image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'URLs to source reference image. If provided, the model will use this image as reference.', + }), + ), + transparency_mode: z.optional( + z.enum(['content_aware', 'white', 'black']).register(z.globalRegistry, { + description: + 'The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.', + }), + ), + num_frames: z + .optional( + z.int().gte(17).lte(241).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 241 (inclusive).', + }), + ) + .default(81), + auto_downsample_min_fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: + 'The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences.', + }), + ) + .default(15), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.', + }), + ) + .default(5), + sampler: z.optional( + z.enum(['unipc', 'dpm++', 'euler']).register(z.globalRegistry, { + description: 'Sampler to use for video generation.', + }), + ), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + mask_video_url: z.optional(z.union([z.string(), z.unknown()])), + seed: z.optional(z.union([z.int(), z.unknown()])), + interpolator_model: z.optional( + z.enum(['rife', 'film']).register(z.globalRegistry, { + description: + "The model to use for frame interpolation. Options are 'rife' or 'film'.", + }), + ), + enable_auto_downsample: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.', + }), + ) + .default(false), + preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the input video.', + }), + ) + .default(false), + shift: z + .optional( + z.number().gte(1).lte(15).register(z.globalRegistry, { + description: 'Shift parameter for video generation.', + }), + ) + .default(5), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + acceleration: z.optional( + z.union([z.enum(['none', 'low', 'regular']), z.unknown()]), + ), + mask_image_url: z.optional(z.union([z.string(), z.unknown()])), + task: z.optional( + z + .enum(['depth', 'pose', 'inpainting', 'outpainting', 'reframe']) + .register(z.globalRegistry, { + description: 'Task type for the model.', + }), + ), + match_input_num_frames: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.', + }), + ) + .default(false), + frames_per_second: z.optional(z.union([z.int().gte(5).lte(30), z.unknown()])), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default( + 'letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards', + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + return_frames_zip: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, also return a ZIP file containing all generated frames.', + }), + ) + .default(false), + resolution: z.optional( + z + .enum(['auto', '240p', '360p', '480p', '580p', '720p']) + .register(z.globalRegistry, { + description: 'Resolution of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '1:1', '9:16']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video.', + }), + ), + match_input_frames_per_second: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(30), + last_frame_url: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * LipsyncAppOutput + */ +export const zSchemaLipsyncOutput = z.object({ + video: zSchemaFile, +}) + +/** + * LipsyncInput + */ +export const zSchemaLipsyncInput = z.object({ + video_url: z.url().min(1).max(2083), + audio_url: z.url().min(1).max(2083), +}) + +/** + * ReframeOutput + */ +export const zSchemaLumaDreamMachineRay2ReframeOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ReframeVideoRequest + */ +export const zSchemaLumaDreamMachineRay2ReframeInput = z.object({ + prompt: z.optional( + z.string().min(1).max(5000).register(z.globalRegistry, { + description: 'Optional prompt for reframing', + }), + ), + aspect_ratio: z + .enum(['1:1', '16:9', '9:16', '4:3', '3:4', '21:9', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the reframed video', + }), + y_start: z.optional( + z.int().register(z.globalRegistry, { + description: 'Start Y coordinate for reframing', + }), + ), + x_end: z.optional( + z.int().register(z.globalRegistry, { + description: 'End X coordinate for reframing', + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the input video to reframe', + }), + y_end: z.optional( + z.int().register(z.globalRegistry, { + description: 'End Y coordinate for reframing', + }), + ), + x_start: z.optional( + z.int().register(z.globalRegistry, { + description: 'Start X coordinate for reframing', + }), + ), + grid_position_y: z.optional( + z.int().register(z.globalRegistry, { + description: 'Y position of the grid for reframing', + }), + ), + grid_position_x: z.optional( + z.int().register(z.globalRegistry, { + description: 'X position of the grid for reframing', + }), + ), + image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'Optional URL of the first frame image for reframing', + }), + ), +}) + +/** + * ReframeOutput + */ +export const zSchemaLumaDreamMachineRay2FlashReframeOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ReframeVideoRequest + */ +export const zSchemaLumaDreamMachineRay2FlashReframeInput = z.object({ + prompt: z.optional( + z.string().min(1).max(5000).register(z.globalRegistry, { + description: 'Optional prompt for reframing', + }), + ), + aspect_ratio: z + .enum(['1:1', '16:9', '9:16', '4:3', '3:4', '21:9', '9:21']) + .register(z.globalRegistry, { + description: 'The aspect ratio of the reframed video', + }), + y_start: z.optional( + z.int().register(z.globalRegistry, { + description: 'Start Y coordinate for reframing', + }), + ), + x_end: z.optional( + z.int().register(z.globalRegistry, { + description: 'End X coordinate for reframing', + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the input video to reframe', + }), + y_end: z.optional( + z.int().register(z.globalRegistry, { + description: 'End Y coordinate for reframing', + }), + ), + x_start: z.optional( + z.int().register(z.globalRegistry, { + description: 'Start X coordinate for reframing', + }), + ), + grid_position_y: z.optional( + z.int().register(z.globalRegistry, { + description: 'Y position of the grid for reframing', + }), + ), + grid_position_x: z.optional( + z.int().register(z.globalRegistry, { + description: 'X position of the grid for reframing', + }), + ), + image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'Optional URL of the first frame image for reframing', + }), + ), +}) + +/** + * WanT2VResponse + */ +export const zSchemaWanVace13bOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * WanT2VRequest + */ +export const zSchemaWanVace13bInput = z.object({ + shift: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Shift parameter for video generation.', + }), + ) + .default(5), + video_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'URL to the source video file. If provided, the model will use this video as a reference.', + }), + ), + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + mask_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'URL to the guiding mask file. If provided, the model will use this mask as a reference to create masked video. If provided mask video url will be ignored.', + }), + ), + task: z.optional( + z.enum(['depth', 'inpainting', 'pose']).register(z.globalRegistry, { + description: 'Task type for the model.', + }), + ), + frames_per_second: z + .optional( + z.int().gte(5).lte(24).register(z.globalRegistry, { + description: + 'Frames per second of the generated video. Must be between 5 to 24.', + }), + ) + .default(16), + ref_image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'Urls to source reference image. If provided, the model will use this image as reference.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + num_frames: z + .optional( + z.int().gte(81).lte(240).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 100 (inclusive). Works only with only reference images as input if source video or mask video is provided output len would be same as source up to 241 frames', + }), + ) + .default(81), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default( + 'bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards', + ), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p,580p, or 720p).', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '9:16', '16:9']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video (16:9 or 9:16).', + }), + ), + mask_video_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'URL to the source mask file. If provided, the model will use this mask as a reference.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(2).lte(40).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(30), + preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the input video.', + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), +}) + +/** + * CombineOutput + */ +export const zSchemaFfmpegApiMergeAudioVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * CombineInput + */ +export const zSchemaFfmpegApiMergeAudioVideoInput = z.object({ + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the video file to use as the video track', + }), + start_offset: z + .optional( + z.number().gte(0).register(z.globalRegistry, { + description: + 'Offset in seconds for when the audio should start relative to the video', + }), + ) + .default(0), + audio_url: z.string().register(z.globalRegistry, { + description: 'URL of the audio file to use as the audio track', + }), +}) + +/** + * DWPoseVideoOutput + */ +export const zSchemaDwposeVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * DWPoseVideoInput + */ +export const zSchemaDwposeVideoInput = z.object({ + video_url: z.string().register(z.globalRegistry, { + description: 'URL of video to be used for pose estimation', + }), + draw_mode: z.optional( + z + .enum([ + 'full-pose', + 'body-pose', + 'face-pose', + 'hand-pose', + 'face-hand-mask', + 'face-mask', + 'hand-mask', + ]) + .register(z.globalRegistry, { + description: + "Mode of drawing the pose on the video. Options are: 'full-pose', 'body-pose', 'face-pose', 'hand-pose', 'face-hand-mask', 'face-mask', 'hand-mask'.", + }), + ), +}) + +/** + * WanVACEDepthResponse + */ +export const zSchemaWanVace14bDepthOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + frames_zip: z.optional(z.union([zSchemaFile, z.unknown()])), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaVideoFile, +}) + +/** + * WanVACEDepthRequest + */ +export const zSchemaWanVace14bDepthInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + video_url: z.string().register(z.globalRegistry, { + description: 'URL to the source video file. Required for depth task.', + }), + num_interpolated_frames: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Number of frames to interpolate between the original frames. A value of 0 means no interpolation.', + }), + ) + .default(0), + temporal_downsample_factor: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.', + }), + ) + .default(0), + first_frame_url: z.optional(z.union([z.string(), z.unknown()])), + ref_image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'URLs to source reference image. If provided, the model will use this image as reference.', + }), + ), + transparency_mode: z.optional( + z.enum(['content_aware', 'white', 'black']).register(z.globalRegistry, { + description: + 'The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.', + }), + ), + num_frames: z + .optional( + z.int().gte(17).lte(241).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 241 (inclusive).', + }), + ) + .default(81), + auto_downsample_min_fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: + 'The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences.', + }), + ) + .default(15), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.', + }), + ) + .default(5), + sampler: z.optional( + z.enum(['unipc', 'dpm++', 'euler']).register(z.globalRegistry, { + description: 'Sampler to use for video generation.', + }), + ), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.unknown()])), + interpolator_model: z.optional( + z.enum(['rife', 'film']).register(z.globalRegistry, { + description: + "The model to use for frame interpolation. Options are 'rife' or 'film'.", + }), + ), + enable_auto_downsample: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.', + }), + ) + .default(false), + preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the input video.', + }), + ) + .default(false), + shift: z + .optional( + z.number().gte(1).lte(15).register(z.globalRegistry, { + description: 'Shift parameter for video generation.', + }), + ) + .default(5), + acceleration: z.optional( + z.union([z.enum(['none', 'low', 'regular']), z.unknown()]), + ), + match_input_num_frames: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.', + }), + ) + .default(false), + frames_per_second: z.optional(z.union([z.int().gte(5).lte(30), z.unknown()])), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default( + 'letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards', + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + return_frames_zip: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, also return a ZIP file containing all generated frames.', + }), + ) + .default(false), + resolution: z.optional( + z + .enum(['auto', '240p', '360p', '480p', '580p', '720p']) + .register(z.globalRegistry, { + description: 'Resolution of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '1:1', '9:16']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video.', + }), + ), + match_input_frames_per_second: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(30), + last_frame_url: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * WanVACEPoseResponse + */ +export const zSchemaWanVace14bPoseOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + frames_zip: z.optional(z.union([zSchemaFile, z.unknown()])), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaVideoFile, +}) + +/** + * WanVACEPoseRequest + */ +export const zSchemaWanVace14bPoseInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The text prompt to guide video generation. For pose task, the prompt should describe the desired pose and action of the subject in the video.', + }), + video_url: z.string().register(z.globalRegistry, { + description: 'URL to the source video file. Required for pose task.', + }), + num_interpolated_frames: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Number of frames to interpolate between the original frames. A value of 0 means no interpolation.', + }), + ) + .default(0), + temporal_downsample_factor: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.', + }), + ) + .default(0), + first_frame_url: z.optional(z.union([z.string(), z.unknown()])), + ref_image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'URLs to source reference image. If provided, the model will use this image as reference.', + }), + ), + transparency_mode: z.optional( + z.enum(['content_aware', 'white', 'black']).register(z.globalRegistry, { + description: + 'The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.', + }), + ), + num_frames: z + .optional( + z.int().gte(17).lte(241).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 241 (inclusive).', + }), + ) + .default(81), + auto_downsample_min_fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: + 'The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences.', + }), + ) + .default(15), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.', + }), + ) + .default(5), + sampler: z.optional( + z.enum(['unipc', 'dpm++', 'euler']).register(z.globalRegistry, { + description: 'Sampler to use for video generation.', + }), + ), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.unknown()])), + interpolator_model: z.optional( + z.enum(['rife', 'film']).register(z.globalRegistry, { + description: + "The model to use for frame interpolation. Options are 'rife' or 'film'.", + }), + ), + enable_auto_downsample: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.', + }), + ) + .default(false), + preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the input video.', + }), + ) + .default(false), + shift: z + .optional( + z.number().gte(1).lte(15).register(z.globalRegistry, { + description: 'Shift parameter for video generation.', + }), + ) + .default(5), + acceleration: z.optional( + z.union([z.enum(['none', 'low', 'regular']), z.unknown()]), + ), + match_input_num_frames: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.', + }), + ) + .default(false), + frames_per_second: z.optional(z.union([z.int().gte(5).lte(30), z.unknown()])), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default( + 'letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards', + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + return_frames_zip: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, also return a ZIP file containing all generated frames.', + }), + ) + .default(false), + resolution: z.optional( + z + .enum(['auto', '240p', '360p', '480p', '580p', '720p']) + .register(z.globalRegistry, { + description: 'Resolution of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '1:1', '9:16']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video.', + }), + ), + match_input_frames_per_second: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(30), + last_frame_url: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * WanVACEInpaintingResponse + */ +export const zSchemaWanVace14bInpaintingOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + frames_zip: z.optional(z.union([zSchemaFile, z.unknown()])), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaVideoFile, +}) + +/** + * WanVACEInpaintingRequest + */ +export const zSchemaWanVace14bInpaintingInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + video_url: z.string().register(z.globalRegistry, { + description: 'URL to the source video file. Required for inpainting.', + }), + num_interpolated_frames: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Number of frames to interpolate between the original frames. A value of 0 means no interpolation.', + }), + ) + .default(0), + temporal_downsample_factor: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.', + }), + ) + .default(0), + first_frame_url: z.optional(z.union([z.string(), z.unknown()])), + ref_image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'Urls to source reference image. If provided, the model will use this image as reference.', + }), + ), + transparency_mode: z.optional( + z.enum(['content_aware', 'white', 'black']).register(z.globalRegistry, { + description: + 'The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.', + }), + ), + num_frames: z + .optional( + z.int().gte(17).lte(241).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 241 (inclusive).', + }), + ) + .default(81), + auto_downsample_min_fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: + 'The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences.', + }), + ) + .default(15), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.', + }), + ) + .default(5), + sampler: z.optional( + z.enum(['unipc', 'dpm++', 'euler']).register(z.globalRegistry, { + description: 'Sampler to use for video generation.', + }), + ), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + mask_video_url: z.union([z.string(), z.unknown()]), + seed: z.optional(z.union([z.int(), z.unknown()])), + interpolator_model: z.optional( + z.enum(['rife', 'film']).register(z.globalRegistry, { + description: + "The model to use for frame interpolation. Options are 'rife' or 'film'.", + }), + ), + enable_auto_downsample: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.', + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + shift: z + .optional( + z.number().gte(1).lte(15).register(z.globalRegistry, { + description: 'Shift parameter for video generation.', + }), + ) + .default(5), + preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the input video.', + }), + ) + .default(false), + acceleration: z.optional( + z.union([z.enum(['none', 'low', 'regular']), z.unknown()]), + ), + mask_image_url: z.optional(z.union([z.string(), z.unknown()])), + match_input_num_frames: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.', + }), + ) + .default(false), + frames_per_second: z.optional(z.union([z.int().gte(5).lte(30), z.unknown()])), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default( + 'letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards', + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + return_frames_zip: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, also return a ZIP file containing all generated frames.', + }), + ) + .default(false), + resolution: z.optional( + z + .enum(['auto', '240p', '360p', '480p', '580p', '720p']) + .register(z.globalRegistry, { + description: 'Resolution of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '1:1', '9:16']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video.', + }), + ), + match_input_frames_per_second: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(30), + last_frame_url: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * WanVACEOutpaintingResponse + */ +export const zSchemaWanVace14bOutpaintingOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + frames_zip: z.optional(z.union([zSchemaFile, z.unknown()])), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaVideoFile, +}) + +/** + * WanVACEOutpaintingRequest + */ +export const zSchemaWanVace14bOutpaintingInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + video_url: z.string().register(z.globalRegistry, { + description: 'URL to the source video file. Required for outpainting.', + }), + num_interpolated_frames: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Number of frames to interpolate between the original frames. A value of 0 means no interpolation.', + }), + ) + .default(0), + temporal_downsample_factor: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.', + }), + ) + .default(0), + first_frame_url: z.optional(z.union([z.string(), z.unknown()])), + ref_image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'URLs to source reference image. If provided, the model will use this image as reference.', + }), + ), + expand_ratio: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Amount of expansion. This is a float value between 0 and 1, where 0.25 adds 25% to the original video size on the specified sides.', + }), + ) + .default(0.25), + transparency_mode: z.optional( + z.enum(['content_aware', 'white', 'black']).register(z.globalRegistry, { + description: + 'The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.', + }), + ), + num_frames: z + .optional( + z.int().gte(17).lte(241).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 241 (inclusive).', + }), + ) + .default(81), + auto_downsample_min_fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: + 'The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences.', + }), + ) + .default(15), + expand_bottom: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to expand the video to the bottom.', + }), + ) + .default(false), + sampler: z.optional( + z.enum(['unipc', 'dpm++', 'euler']).register(z.globalRegistry, { + description: 'Sampler to use for video generation.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.', + }), + ) + .default(5), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.unknown()])), + interpolator_model: z.optional( + z.enum(['rife', 'film']).register(z.globalRegistry, { + description: + "The model to use for frame interpolation. Options are 'rife' or 'film'.", + }), + ), + enable_auto_downsample: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.', + }), + ) + .default(false), + expand_top: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to expand the video to the top.', + }), + ) + .default(false), + shift: z + .optional( + z.number().gte(1).lte(15).register(z.globalRegistry, { + description: 'Shift parameter for video generation.', + }), + ) + .default(5), + expand_left: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to expand the video to the left.', + }), + ) + .default(false), + acceleration: z.optional( + z.union([z.enum(['none', 'low', 'regular']), z.unknown()]), + ), + match_input_num_frames: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.', + }), + ) + .default(false), + frames_per_second: z.optional(z.union([z.int().gte(5).lte(30), z.unknown()])), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default( + 'letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards', + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + return_frames_zip: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, also return a ZIP file containing all generated frames.', + }), + ) + .default(false), + resolution: z.optional( + z + .enum(['auto', '240p', '360p', '480p', '580p', '720p']) + .register(z.globalRegistry, { + description: 'Resolution of the generated video.', + }), + ), + expand_right: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to expand the video to the right.', + }), + ) + .default(false), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '1:1', '9:16']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video.', + }), + ), + match_input_frames_per_second: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(30), + last_frame_url: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * WanVACEReframeResponse + */ +export const zSchemaWanVace14bReframeOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + frames_zip: z.optional(z.union([zSchemaFile, z.unknown()])), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaVideoFile, +}) + +/** + * WanVACEReframeRequest + */ +export const zSchemaWanVace14bReframeInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The text prompt to guide video generation. Optional for reframing.', + }), + ) + .default(''), + video_url: z.string().register(z.globalRegistry, { + description: + 'URL to the source video file. This video will be used as a reference for the reframe task.', + }), + num_interpolated_frames: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Number of frames to interpolate between the original frames. A value of 0 means no interpolation.', + }), + ) + .default(0), + temporal_downsample_factor: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.', + }), + ) + .default(0), + first_frame_url: z.optional(z.union([z.string(), z.unknown()])), + transparency_mode: z.optional( + z.enum(['content_aware', 'white', 'black']).register(z.globalRegistry, { + description: + 'The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.', + }), + ), + num_frames: z + .optional( + z.int().gte(17).lte(241).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 241 (inclusive).', + }), + ) + .default(81), + trim_borders: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to trim borders from the video.', + }), + ) + .default(true), + auto_downsample_min_fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: + 'The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences.', + }), + ) + .default(15), + sampler: z.optional( + z.enum(['unipc', 'dpm++', 'euler']).register(z.globalRegistry, { + description: 'Sampler to use for video generation.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.', + }), + ) + .default(5), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.unknown()])), + interpolator_model: z.optional( + z.enum(['rife', 'film']).register(z.globalRegistry, { + description: + "The model to use for frame interpolation. Options are 'rife' or 'film'.", + }), + ), + enable_auto_downsample: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.', + }), + ) + .default(false), + shift: z + .optional( + z.number().gte(1).lte(15).register(z.globalRegistry, { + description: 'Shift parameter for video generation.', + }), + ) + .default(5), + acceleration: z.optional( + z.union([z.enum(['none', 'low', 'regular']), z.unknown()]), + ), + zoom_factor: z + .optional( + z.number().gte(0).lte(0.9).register(z.globalRegistry, { + description: + 'Zoom factor for the video. When this value is greater than 0, the video will be zoomed in by this factor (in relation to the canvas size,) cutting off the edges of the video. A value of 0 means no zoom.', + }), + ) + .default(0), + match_input_num_frames: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.', + }), + ) + .default(true), + frames_per_second: z.optional(z.union([z.int().gte(5).lte(30), z.unknown()])), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default( + 'letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards', + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + return_frames_zip: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, also return a ZIP file containing all generated frames.', + }), + ) + .default(false), + resolution: z.optional( + z + .enum(['auto', '240p', '360p', '480p', '580p', '720p']) + .register(z.globalRegistry, { + description: 'Resolution of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '1:1', '9:16']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video.', + }), + ), + match_input_frames_per_second: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(30), + last_frame_url: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * ModifyOutput + */ +export const zSchemaLumaDreamMachineRay2ModifyOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ModifyVideoRequest + */ +export const zSchemaLumaDreamMachineRay2ModifyInput = z.object({ + prompt: z.optional( + z.string().min(3).max(5000).register(z.globalRegistry, { + description: 'Instruction for modifying the video', + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the input video to modify', + }), + mode: z.optional( + z + .enum([ + 'adhere_1', + 'adhere_2', + 'adhere_3', + 'flex_1', + 'flex_2', + 'flex_3', + 'reimagine_1', + 'reimagine_2', + 'reimagine_3', + ]) + .register(z.globalRegistry, { + description: + 'Amount of modification to apply to the video, adhere_1 is the least amount of modification, reimagine_3 is the most', + }), + ), + image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'Optional URL of the first frame image for modification', + }), + ), +}) + +/** + * LipsyncOutput + */ +export const zSchemaPixverseLipsyncOutput = z.object({ + video: zSchemaFile, +}) + +/** + * LipsyncRequest + */ +export const zSchemaPixverseLipsyncInput = z.object({ + text: z.optional( + z.string().register(z.globalRegistry, { + description: 'Text content for TTS when audio_url is not provided', + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the input video', + }), + audio_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'URL of the input audio. If not provided, TTS will be used.', + }), + ), + voice_id: z.optional( + z + .enum([ + 'Emily', + 'James', + 'Isabella', + 'Liam', + 'Chloe', + 'Adrian', + 'Harper', + 'Ava', + 'Sophia', + 'Julia', + 'Mason', + 'Jack', + 'Oliver', + 'Ethan', + 'Auto', + ]) + .register(z.globalRegistry, { + description: 'Voice to use for TTS when audio_url is not provided', + }), + ), +}) + +/** + * ExtendOutput + */ +export const zSchemaPixverseExtendOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ExtendRequest + */ +export const zSchemaPixverseExtendInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Prompt describing how to extend the video', + }), + resolution: z.optional( + z.enum(['360p', '540p', '720p', '1080p']).register(z.globalRegistry, { + description: 'The resolution of the generated video', + }), + ), + duration: z.optional( + z.enum(['5', '8']).register(z.globalRegistry, { + description: + 'The duration of the generated video in seconds. 1080p videos are limited to 5 seconds', + }), + ), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the extended video', + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the input video to extend', + }), + model: z.optional( + z + .enum(['v3.5', 'v4', 'v4.5', 'v5', 'v5.5', 'v5.6']) + .register(z.globalRegistry, { + description: 'The model version to use for generation', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * ExtendOutput + */ +export const zSchemaPixverseExtendFastOutput = z.object({ + video: zSchemaFile, +}) + +/** + * FastExtendRequest + */ +export const zSchemaPixverseExtendFastInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Prompt describing how to extend the video', + }), + resolution: z.optional( + z.enum(['360p', '540p', '720p']).register(z.globalRegistry, { + description: + "The resolution of the generated video. Fast mode doesn't support 1080p", + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the input video to extend', + }), + style: z.optional( + z + .enum(['anime', '3d_animation', 'clay', 'comic', 'cyberpunk']) + .register(z.globalRegistry, { + description: 'The style of the extended video', + }), + ), + model: z.optional( + z + .enum(['v3.5', 'v4', 'v4.5', 'v5', 'v5.5', 'v5.6']) + .register(z.globalRegistry, { + description: 'The model version to use for generation', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to be used for the generation', + }), + ) + .default(''), +}) + +/** + * Output + */ +export const zSchemaThinksoundOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used to generate the audio.', + }), + video: zSchemaFile, +}) + +/** + * Input + */ +export const zSchemaThinksoundInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'A prompt to guide the audio generation. If not provided, it will be extracted from the video.', + }), + ) + .default(''), + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to generate the audio for.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(2).lte(100).register(z.globalRegistry, { + description: 'The number of inference steps for audio generation.', + }), + ) + .default(24), + cfg_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: 'The classifier-free guidance scale for audio generation.', + }), + ) + .default(5), +}) + +/** + * AudioOutput + */ +export const zSchemaThinksoundAudioOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used to generate the audio.', + }), + audio: zSchemaFile, +}) + +/** + * Input + */ +export const zSchemaThinksoundAudioInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'A prompt to guide the audio generation. If not provided, it will be extracted from the video.', + }), + ) + .default(''), + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to generate the audio for.', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(2).lte(100).register(z.globalRegistry, { + description: 'The number of inference steps for audio generation.', + }), + ) + .default(24), + cfg_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: 'The classifier-free guidance scale for audio generation.', + }), + ) + .default(5), +}) + +/** + * SoundEffectOutput + */ +export const zSchemaPixverseSoundEffectsOutput = z.object({ + video: zSchemaFile, +}) + +/** + * SoundEffectRequest + */ +export const zSchemaPixverseSoundEffectsInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Description of the sound effect to generate. If empty, a random sound effect will be generated', + }), + ) + .default(''), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the input video to add sound effects to', + }), + original_sound_switch: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to keep the original audio from the video', + }), + ) + .default(false), +}) + +/** + * MultiConditioningVideoOutput + */ +export const zSchemaLtxv13B098DistilledMulticonditioningOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * DistilledMultiConditioningVideoInput + * + * Distilled model input + */ +export const zSchemaLtxv13B098DistilledMulticonditioningInput = z + .object({ + second_pass_skip_initial_steps: z + .optional( + z.int().gte(1).lte(11).register(z.globalRegistry, { + description: + 'The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.', + }), + ) + .default(5), + first_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(12).register(z.globalRegistry, { + description: 'Number of inference steps during the first pass.', + }), + ) + .default(8), + frame_rate: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frame rate of the video.', + }), + ) + .default(24), + reverse_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to reverse the video.', + }), + ) + .default(false), + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt to guide generation', + }), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to expand the prompt using a language model.', + }), + ) + .default(false), + temporal_adain_factor: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The factor for adaptive instance normalization (AdaIN) applied to generated video chunks after the first. This can help deal with a gradual increase in saturation/contrast in the generated video by normalizing the color distribution across the video. A high value will ensure the color distribution is more consistent across the video, while a low value will allow for more variation in color distribution.', + }), + ) + .default(0.5), + loras: z + .optional( + z.array(zSchemaLoRaWeight).register(z.globalRegistry, { + description: 'LoRA weights to use for generation', + }), + ) + .default([]), + images: z + .optional( + z.array(zSchemaImageConditioningInput).register(z.globalRegistry, { + description: 'URL of images to use as conditioning', + }), + ) + .default([]), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + num_frames: z + .optional( + z.int().gte(9).lte(1441).register(z.globalRegistry, { + description: 'The number of frames in the video.', + }), + ) + .default(121), + second_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(12).register(z.globalRegistry, { + description: 'Number of inference steps during the second pass.', + }), + ) + .default(8), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for generation', + }), + ) + .default( + 'worst quality, inconsistent motion, blurry, jittery, distorted', + ), + enable_detail_pass: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use a detail pass. If True, the model will perform a second pass to refine the video and enhance details. This incurs a 2.0x cost multiplier on the base price.', + }), + ) + .default(false), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['9:16', '1:1', '16:9', 'auto']).register(z.globalRegistry, { + description: 'The aspect ratio of the video.', + }), + ), + tone_map_compression_ratio: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The compression ratio for tone mapping. This is used to compress the dynamic range of the video to improve visual quality. A value of 0.0 means no compression, while a value of 1.0 means maximum compression.', + }), + ) + .default(0), + videos: z + .optional( + z.array(zSchemaVideoConditioningInput).register(z.globalRegistry, { + description: 'Videos to use as conditioning', + }), + ) + .default([]), + constant_rate_factor: z + .optional( + z.int().gte(0).lte(51).register(z.globalRegistry, { + description: + "The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality.", + }), + ) + .default(29), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Distilled model input', + }) + +/** + * ModifyOutput + */ +export const zSchemaLumaDreamMachineRay2FlashModifyOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ModifyVideoRequest + */ +export const zSchemaLumaDreamMachineRay2FlashModifyInput = z.object({ + prompt: z.optional( + z.string().min(3).max(5000).register(z.globalRegistry, { + description: 'Instruction for modifying the video', + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the input video to modify', + }), + mode: z.optional( + z + .enum([ + 'adhere_1', + 'adhere_2', + 'adhere_3', + 'flex_1', + 'flex_2', + 'flex_3', + 'reimagine_1', + 'reimagine_2', + 'reimagine_3', + ]) + .register(z.globalRegistry, { + description: + 'Amount of modification to apply to the video, adhere_1 is the least amount of modification, reimagine_3 is the most', + }), + ), + image_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'Optional URL of the first frame image for modification', + }), + ), +}) + +/** + * FILMVideoOutput + */ +export const zSchemaFilmVideoOutput = z.object({ + video: zSchemaVideoFile, +}) + +/** + * FILMVideoInput + */ +export const zSchemaFilmVideoInput = z.object({ + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: + "The write mode of the output video. Only applicable if output_type is 'video'.", + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to use for interpolation.', + }), + use_calculated_fps: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If True, the function will use the calculated FPS of the input video multiplied by the number of frames to determine the output FPS. If False, the passed FPS will be used.', + }), + ) + .default(true), + loop: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If True, the final frame will be looped back to the first frame to create a seamless loop. If False, the final frame will not loop back.', + }), + ) + .default(false), + fps: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: + 'Frames per second for the output video. Only applicable if use_calculated_fps is False.', + }), + ) + .default(8), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: + "The quality of the output video. Only applicable if output_type is 'video'.", + }), + ), + use_scene_detection: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If True, the input video will be split into scenes before interpolation. This removes smear frames between scenes, but can result in false positives if the scene detection is not accurate. If False, the entire video will be treated as a single scene.', + }), + ) + .default(false), + num_frames: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + 'The number of frames to generate between the input video frames.', + }), + ) + .default(1), +}) + +/** + * RIFEVideoOutput + */ +export const zSchemaRifeVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * RIFEVideoInput + */ +export const zSchemaRifeVideoInput = z.object({ + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to use for interpolation.', + }), + use_scene_detection: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If True, the input video will be split into scenes before interpolation. This removes smear frames between scenes, but can result in false positives if the scene detection is not accurate. If False, the entire video will be treated as a single scene.', + }), + ) + .default(false), + loop: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If True, the final frame will be looped back to the first frame to create a seamless loop. If False, the final frame will not loop back.', + }), + ) + .default(false), + num_frames: z + .optional( + z.int().gte(1).lte(4).register(z.globalRegistry, { + description: + 'The number of frames to generate between the input video frames.', + }), + ) + .default(1), + use_calculated_fps: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If True, the function will use the calculated FPS of the input video multiplied by the number of frames to determine the output FPS. If False, the passed FPS will be used.', + }), + ) + .default(true), + fps: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: + 'Frames per second for the output video. Only applicable if use_calculated_fps is False.', + }), + ) + .default(8), +}) + +/** + * ExtendVideoConditioningInput + */ +export const zSchemaExtendVideoConditioningInput = z.object({ + video_url: z.string().register(z.globalRegistry, { + description: 'URL of video to use as conditioning', + }), + start_frame_num: z + .optional( + z.int().gte(0).lte(1440).register(z.globalRegistry, { + description: + 'Frame number of the video from which the conditioning starts. Must be a multiple of 8.', + }), + ) + .default(0), + reverse_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to reverse the video. This is useful for tasks where the video conditioning should be applied in reverse order.', + }), + ) + .default(false), + limit_num_frames: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to limit the number of frames used from the video. If True, the `max_num_frames` parameter will be used to limit the number of frames.', + }), + ) + .default(false), + resample_fps: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to resample the video to a specific FPS. If True, the `target_fps` parameter will be used to resample the video.', + }), + ) + .default(false), + strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Strength of the conditioning. 0.0 means no conditioning, 1.0 means full conditioning.', + }), + ) + .default(1), + target_fps: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: + 'Target FPS to resample the video to. Only relevant if `resample_fps` is True.', + }), + ) + .default(24), + max_num_frames: z + .optional( + z.int().gte(1).lte(1441).register(z.globalRegistry, { + description: + 'Maximum number of frames to use from the video. If None, all frames will be used.', + }), + ) + .default(1441), +}) + +/** + * ExtendVideoOutput + */ +export const zSchemaLtxv13B098DistilledExtendOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * DistilledExtendVideoInput + * + * Distilled model input + */ +export const zSchemaLtxv13B098DistilledExtendInput = z + .object({ + second_pass_skip_initial_steps: z + .optional( + z.int().gte(1).lte(11).register(z.globalRegistry, { + description: + 'The number of inference steps to skip in the initial steps of the second pass. By skipping some steps at the beginning, the second pass can focus on smaller details instead of larger changes.', + }), + ) + .default(5), + first_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(12).register(z.globalRegistry, { + description: 'Number of inference steps during the first pass.', + }), + ) + .default(8), + frame_rate: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frame rate of the video.', + }), + ) + .default(24), + reverse_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to reverse the video.', + }), + ) + .default(false), + prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt to guide generation', + }), + expand_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to expand the prompt using a language model.', + }), + ) + .default(false), + temporal_adain_factor: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The factor for adaptive instance normalization (AdaIN) applied to generated video chunks after the first. This can help deal with a gradual increase in saturation/contrast in the generated video by normalizing the color distribution across the video. A high value will ensure the color distribution is more consistent across the video, while a low value will allow for more variation in color distribution.', + }), + ) + .default(0.5), + loras: z + .optional( + z.array(zSchemaLoRaWeight).register(z.globalRegistry, { + description: 'LoRA weights to use for generation', + }), + ) + .default([]), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + num_frames: z + .optional( + z.int().gte(9).lte(1441).register(z.globalRegistry, { + description: 'The number of frames in the video.', + }), + ) + .default(121), + second_pass_num_inference_steps: z + .optional( + z.int().gte(2).lte(12).register(z.globalRegistry, { + description: 'Number of inference steps during the second pass.', + }), + ) + .default(8), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for generation', + }), + ) + .default( + 'worst quality, inconsistent motion, blurry, jittery, distorted', + ), + video: zSchemaExtendVideoConditioningInput, + enable_detail_pass: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use a detail pass. If True, the model will perform a second pass to refine the video and enhance details. This incurs a 2.0x cost multiplier on the base price.', + }), + ) + .default(false), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['9:16', '1:1', '16:9', 'auto']).register(z.globalRegistry, { + description: 'The aspect ratio of the video.', + }), + ), + tone_map_compression_ratio: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The compression ratio for tone mapping. This is used to compress the dynamic range of the video to improve visual quality. A value of 0.0 means no compression, while a value of 1.0 means maximum compression.', + }), + ) + .default(0), + constant_rate_factor: z + .optional( + z.int().gte(0).lte(51).register(z.globalRegistry, { + description: + "The constant rate factor (CRF) to compress input media with. Compressed input media more closely matches the model's training data, which can improve motion quality.", + }), + ) + .default(29), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for generation', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Distilled model input', + }) + +/** + * WanV2VResponse + */ +export const zSchemaWanV22A14bVideoToVideoOutput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The text prompt used for video generation.', + }), + ) + .default(''), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * WanV2VRequest + */ +export const zSchemaWanV22A14bVideoToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the input video.', + }), + acceleration: z.optional( + z.enum(['none', 'regular']).register(z.globalRegistry, { + description: + "Acceleration level to use. The more acceleration, the faster the generation, but with lower quality. The recommended value is 'regular'.", + }), + ), + num_interpolated_frames: z + .optional( + z.int().gte(0).lte(4).register(z.globalRegistry, { + description: + 'Number of frames to interpolate between each pair of generated frames. Must be between 0 and 4.', + }), + ) + .default(1), + shift: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Shift value for the video. Must be between 1.0 and 10.0.', + }), + ) + .default(5), + resample_fps: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the video will be resampled to the passed frames per second. If false, the video will not be resampled.', + }), + ) + .default(false), + frames_per_second: z + .optional( + z.int().gte(4).lte(60).register(z.globalRegistry, { + description: + 'Frames per second of the generated video. Must be between 4 to 60. When using interpolation and `adjust_fps_for_interpolation` is set to true (default true,) the final FPS will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If `adjust_fps_for_interpolation` is set to false, this value will be used as-is.', + }), + ) + .default(16), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, input data will be checked for safety before processing.', + }), + ) + .default(false), + num_frames: z + .optional( + z.int().gte(17).lte(161).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 17 to 161 (inclusive).', + }), + ) + .default(81), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.', + }), + ) + .default(3.5), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default(''), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: + 'The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.', + }), + ), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p, 580p, or 720p).', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: + "Aspect ratio of the generated video. If 'auto', the aspect ratio will be determined automatically based on the input video.", + }), + ), + enable_output_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, output video will be checked for safety after generation.', + }), + ) + .default(false), + guidance_scale_2: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Guidance scale for the second stage of the model. This is used to control the adherence to the prompt in the second stage of the model.', + }), + ) + .default(4), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: + 'The quality of the output video. Higher quality means better visual quality but larger file size.', + }), + ), + strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Strength of the video transformation. A value of 1.0 means the output will be completely based on the prompt, while a value of 0.0 means the output will be identical to the input video.', + }), + ) + .default(0.9), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(40).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(27), + interpolator_model: z.optional( + z.enum(['none', 'film', 'rife']).register(z.globalRegistry, { + description: + 'The model to use for frame interpolation. If None, no interpolation is applied.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + adjust_fps_for_interpolation: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the number of frames per second will be multiplied by the number of interpolated frames plus one. For example, if the generated frames per second is 16 and the number of interpolated frames is 1, the final frames per second will be 32. If false, the passed frames per second will be used as-is.', + }), + ) + .default(true), +}) + +/** + * MergeVideosOutput + */ +export const zSchemaFfmpegApiMergeVideosOutput = z.object({ + metadata: z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: + 'Metadata about the merged video including original video info', + }), + video: zSchemaFile, +}) + +/** + * MergeVideosInput + */ +export const zSchemaFfmpegApiMergeVideosInput = z.object({ + target_fps: z.optional(z.union([z.number().gte(1).lte(60), z.unknown()])), + video_urls: z.array(z.string()).min(2).register(z.globalRegistry, { + description: 'List of video URLs to merge in order', + }), + resolution: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + z.unknown(), + ]), + ), +}) + +/** + * MareyOutput + */ +export const zSchemaMareyMotionTransferOutput = z.object({ + video: zSchemaFile, +}) + +/** + * MareyInputMotionTransfer + */ +export const zSchemaMareyMotionTransferInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate a video from', + }), + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to use as the control video.', + }), + seed: z.optional(z.union([z.int(), z.unknown()])), + reference_image_url: z.optional(z.union([z.string(), z.unknown()])), + negative_prompt: z.optional(z.union([z.string(), z.unknown()])), + first_frame_image_url: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * MareyOutput + */ +export const zSchemaMareyPoseTransferOutput = z.object({ + video: zSchemaFile, +}) + +/** + * MareyInputPoseTransfer + */ +export const zSchemaMareyPoseTransferInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate a video from', + }), + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to use as the control video.', + }), + seed: z.optional(z.union([z.int(), z.unknown()])), + reference_image_url: z.optional(z.union([z.string(), z.unknown()])), + negative_prompt: z.optional(z.union([z.string(), z.unknown()])), + first_frame_image_url: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * VideoOutput + */ +export const zSchemaSfxV1VideoToVideoOutput = z.object({ + video: z.array(zSchemaVideo).register(z.globalRegistry, { + description: 'The processed video with sound effects', + }), +}) + +/** + * Input + */ +export const zSchemaSfxV1VideoToVideoInput = z.object({ + num_samples: z.optional(z.union([z.int().gte(2).lte(8), z.unknown()])), + video_url: z.url().min(1).max(2083).register(z.globalRegistry, { + description: + 'A video url that can accessed from the API to process and add sound effects', + }), + duration: z.optional(z.union([z.number().gte(1).lte(10), z.unknown()])), + seed: z.optional(z.union([z.int().gte(1), z.unknown()])), + text_prompt: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * AvatarSingleAudioResponse + */ +export const zSchemaInfinitalkOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * InfiniTalkSingleAudioRequest + */ +export const zSchemaInfinitalkInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: + 'Resolution of the video to generate. Must be either 480p or 720p.', + }), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use for generation.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.', + }), + audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the audio file.', + }), + num_frames: z + .optional( + z.int().gte(41).lte(721).register(z.globalRegistry, { + description: 'Number of frames to generate. Must be between 41 to 721.', + }), + ) + .default(145), + seed: z + .optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ) + .default(42), +}) + +/** + * OutputIncreaseResolutionModel + */ +export const zSchemaVideoIncreaseResolutionOutput = z.object({ + video: z.union([zSchemaVideo, zSchemaFile]), +}) + +/** + * InputIncreaseResolutionModel + */ +export const zSchemaVideoIncreaseResolutionInput = z.object({ + video_url: z.string().register(z.globalRegistry, { + description: + 'Input video to increase resolution. Size should be less than 14142x14142 and duration less than 30s.', + }), + output_container_and_codec: z.optional( + z + .enum([ + 'mp4_h265', + 'mp4_h264', + 'webm_vp9', + 'mov_h265', + 'mov_proresks', + 'mkv_h265', + 'mkv_h264', + 'mkv_vp9', + 'gif', + ]) + .register(z.globalRegistry, { + description: + 'Output container and codec. Options: mp4_h265, mp4_h264, webm_vp9, mov_h265, mov_proresks, mkv_h265, mkv_h264, mkv_vp9, gif.', + }), + ), + desired_increase: z.optional( + z.enum(['2', '4']).register(z.globalRegistry, { + description: 'desired_increase factor. Options: 2x, 4x.', + }), + ), +}) + +/** + * WanFunControlResponse + */ +export const zSchemaWanFunControlOutput = z.object({ + video: zSchemaFile, +}) + +/** + * WanFunControlRequest + */ +export const zSchemaWanFunControlInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video.', + }), + shift: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The shift for the scheduler.', + }), + ) + .default(5), + preprocess_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to preprocess the video. If True, the video will be preprocessed to depth or pose.', + }), + ) + .default(false), + reference_image_url: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The URL of the reference image to use as a reference for the video generation.', + }), + ), + fps: z + .optional( + z.int().gte(4).lte(60).register(z.globalRegistry, { + description: + 'The fps to generate. Only used when match_input_fps is False.', + }), + ) + .default(16), + match_input_num_frames: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to match the number of frames in the input video.', + }), + ) + .default(true), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The guidance scale.', + }), + ) + .default(6), + preprocess_type: z.optional( + z.enum(['depth', 'pose']).register(z.globalRegistry, { + description: + 'The type of preprocess to apply to the video. Only used when preprocess_video is True.', + }), + ), + control_video_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the control video to use as a reference for the video generation.', + }), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video.', + }), + ) + .default(''), + num_frames: z + .optional( + z.int().gte(49).lte(121).register(z.globalRegistry, { + description: + 'The number of frames to generate. Only used when match_input_num_frames is False.', + }), + ) + .default(81), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(4).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps.', + }), + ) + .default(27), + match_input_fps: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to match the fps in the input video.', + }), + ) + .default(true), +}) + +/** + * LipSyncV2ProOutput + */ +export const zSchemaSyncLipsyncV2ProOutput = z.object({ + video: zSchemaFile, +}) + +/** + * LipSyncV2ProInput + */ +export const zSchemaSyncLipsyncV2ProInput = z.object({ + sync_mode: z.optional( + z + .enum(['cut_off', 'loop', 'bounce', 'silence', 'remap']) + .register(z.globalRegistry, { + description: + 'Lipsync mode when audio and video durations are out of sync.', + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the input video', + }), + audio_url: z.string().register(z.globalRegistry, { + description: 'URL of the input audio', + }), +}) + +/** + * HunyuanFoleyResponse + */ +export const zSchemaHunyuanVideoFoleyOutput = z.object({ + video: zSchemaFile, +}) + +/** + * HunyuanFoleyRequest + */ +export const zSchemaHunyuanVideoFoleyInput = z.object({ + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to generate audio for.', + }), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Guidance scale for audio generation.', + }), + ) + .default(4.5), + num_inference_steps: z + .optional( + z.int().gte(10).lte(100).register(z.globalRegistry, { + description: 'Number of inference steps for generation.', + }), + ) + .default(50), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Random seed for reproducible generation.', + }), + ), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt to avoid certain audio characteristics.', + }), + ) + .default('noisy, harsh'), + text_prompt: z.string().register(z.globalRegistry, { + description: 'Text description of the desired audio (optional).', + }), +}) + +/** + * WanVACEPoseResponse + */ +export const zSchemaWan22VaceFunA14bPoseOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + frames_zip: z.optional(z.union([zSchemaFile, z.unknown()])), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaVideoFile, +}) + +/** + * WanVACEPoseRequest + */ +export const zSchemaWan22VaceFunA14bPoseInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: + 'The text prompt to guide video generation. For pose task, the prompt should describe the desired pose and action of the subject in the video.', + }), + video_url: z.string().register(z.globalRegistry, { + description: 'URL to the source video file. Required for pose task.', + }), + num_interpolated_frames: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Number of frames to interpolate between the original frames. A value of 0 means no interpolation.', + }), + ) + .default(0), + temporal_downsample_factor: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.', + }), + ) + .default(0), + first_frame_url: z.optional(z.union([z.string(), z.unknown()])), + ref_image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'URLs to source reference image. If provided, the model will use this image as reference.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.', + }), + ) + .default(5), + num_frames: z + .optional( + z.int().gte(17).lte(241).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 241 (inclusive).', + }), + ) + .default(81), + auto_downsample_min_fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: + 'The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences.', + }), + ) + .default(15), + transparency_mode: z.optional( + z.enum(['content_aware', 'white', 'black']).register(z.globalRegistry, { + description: + 'The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.', + }), + ), + sampler: z.optional( + z.enum(['unipc', 'dpm++', 'euler']).register(z.globalRegistry, { + description: 'Sampler to use for video generation.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.unknown()])), + interpolator_model: z.optional( + z.enum(['rife', 'film']).register(z.globalRegistry, { + description: + "The model to use for frame interpolation. Options are 'rife' or 'film'.", + }), + ), + enable_auto_downsample: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.', + }), + ) + .default(false), + preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the input video.', + }), + ) + .default(false), + shift: z + .optional( + z.number().gte(1).lte(15).register(z.globalRegistry, { + description: 'Shift parameter for video generation.', + }), + ) + .default(5), + acceleration: z.optional( + z.union([z.enum(['none', 'low', 'regular']), z.unknown()]), + ), + frames_per_second: z.optional(z.union([z.int().gte(5).lte(30), z.unknown()])), + match_input_num_frames: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default( + 'letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards', + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + resolution: z.optional( + z + .enum(['auto', '240p', '360p', '480p', '580p', '720p']) + .register(z.globalRegistry, { + description: 'Resolution of the generated video.', + }), + ), + return_frames_zip: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, also return a ZIP file containing all generated frames.', + }), + ) + .default(false), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '1:1', '9:16']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video.', + }), + ), + match_input_frames_per_second: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(30), + last_frame_url: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * WanVACEDepthResponse + */ +export const zSchemaWan22VaceFunA14bDepthOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + frames_zip: z.optional(z.union([zSchemaFile, z.unknown()])), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaVideoFile, +}) + +/** + * WanVACEDepthRequest + */ +export const zSchemaWan22VaceFunA14bDepthInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + video_url: z.string().register(z.globalRegistry, { + description: 'URL to the source video file. Required for depth task.', + }), + num_interpolated_frames: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Number of frames to interpolate between the original frames. A value of 0 means no interpolation.', + }), + ) + .default(0), + temporal_downsample_factor: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.', + }), + ) + .default(0), + first_frame_url: z.optional(z.union([z.string(), z.unknown()])), + ref_image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'URLs to source reference image. If provided, the model will use this image as reference.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.', + }), + ) + .default(5), + num_frames: z + .optional( + z.int().gte(17).lte(241).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 241 (inclusive).', + }), + ) + .default(81), + auto_downsample_min_fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: + 'The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences.', + }), + ) + .default(15), + transparency_mode: z.optional( + z.enum(['content_aware', 'white', 'black']).register(z.globalRegistry, { + description: + 'The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.', + }), + ), + sampler: z.optional( + z.enum(['unipc', 'dpm++', 'euler']).register(z.globalRegistry, { + description: 'Sampler to use for video generation.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.unknown()])), + interpolator_model: z.optional( + z.enum(['rife', 'film']).register(z.globalRegistry, { + description: + "The model to use for frame interpolation. Options are 'rife' or 'film'.", + }), + ), + enable_auto_downsample: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.', + }), + ) + .default(false), + preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the input video.', + }), + ) + .default(false), + shift: z + .optional( + z.number().gte(1).lte(15).register(z.globalRegistry, { + description: 'Shift parameter for video generation.', + }), + ) + .default(5), + acceleration: z.optional( + z.union([z.enum(['none', 'low', 'regular']), z.unknown()]), + ), + frames_per_second: z.optional(z.union([z.int().gte(5).lte(30), z.unknown()])), + match_input_num_frames: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default( + 'letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards', + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + resolution: z.optional( + z + .enum(['auto', '240p', '360p', '480p', '580p', '720p']) + .register(z.globalRegistry, { + description: 'Resolution of the generated video.', + }), + ), + return_frames_zip: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, also return a ZIP file containing all generated frames.', + }), + ) + .default(false), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '1:1', '9:16']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video.', + }), + ), + match_input_frames_per_second: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(30), + last_frame_url: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * WanVACEInpaintingResponse + */ +export const zSchemaWan22VaceFunA14bInpaintingOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + frames_zip: z.optional(z.union([zSchemaFile, z.unknown()])), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaVideoFile, +}) + +/** + * WanVACEInpaintingRequest + */ +export const zSchemaWan22VaceFunA14bInpaintingInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + video_url: z.string().register(z.globalRegistry, { + description: 'URL to the source video file. Required for inpainting.', + }), + num_interpolated_frames: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Number of frames to interpolate between the original frames. A value of 0 means no interpolation.', + }), + ) + .default(0), + temporal_downsample_factor: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.', + }), + ) + .default(0), + first_frame_url: z.optional(z.union([z.string(), z.unknown()])), + ref_image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'Urls to source reference image. If provided, the model will use this image as reference.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.', + }), + ) + .default(5), + num_frames: z + .optional( + z.int().gte(17).lte(241).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 241 (inclusive).', + }), + ) + .default(81), + auto_downsample_min_fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: + 'The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences.', + }), + ) + .default(15), + transparency_mode: z.optional( + z.enum(['content_aware', 'white', 'black']).register(z.globalRegistry, { + description: + 'The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.', + }), + ), + sampler: z.optional( + z.enum(['unipc', 'dpm++', 'euler']).register(z.globalRegistry, { + description: 'Sampler to use for video generation.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + mask_video_url: z.union([z.string(), z.unknown()]), + seed: z.optional(z.union([z.int(), z.unknown()])), + interpolator_model: z.optional( + z.enum(['rife', 'film']).register(z.globalRegistry, { + description: + "The model to use for frame interpolation. Options are 'rife' or 'film'.", + }), + ), + enable_auto_downsample: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.', + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + shift: z + .optional( + z.number().gte(1).lte(15).register(z.globalRegistry, { + description: 'Shift parameter for video generation.', + }), + ) + .default(5), + preprocess: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preprocess the input video.', + }), + ) + .default(false), + acceleration: z.optional( + z.union([z.enum(['none', 'low', 'regular']), z.unknown()]), + ), + mask_image_url: z.optional(z.union([z.string(), z.unknown()])), + frames_per_second: z.optional(z.union([z.int().gte(5).lte(30), z.unknown()])), + match_input_num_frames: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default( + 'letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards', + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + resolution: z.optional( + z + .enum(['auto', '240p', '360p', '480p', '580p', '720p']) + .register(z.globalRegistry, { + description: 'Resolution of the generated video.', + }), + ), + return_frames_zip: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, also return a ZIP file containing all generated frames.', + }), + ) + .default(false), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '1:1', '9:16']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video.', + }), + ), + match_input_frames_per_second: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(30), + last_frame_url: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * WanVACEOutpaintingResponse + */ +export const zSchemaWan22VaceFunA14bOutpaintingOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + frames_zip: z.optional(z.union([zSchemaFile, z.unknown()])), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaVideoFile, +}) + +/** + * WanVACEOutpaintingRequest + */ +export const zSchemaWan22VaceFunA14bOutpaintingInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + video_url: z.string().register(z.globalRegistry, { + description: 'URL to the source video file. Required for outpainting.', + }), + num_interpolated_frames: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Number of frames to interpolate between the original frames. A value of 0 means no interpolation.', + }), + ) + .default(0), + temporal_downsample_factor: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.', + }), + ) + .default(0), + first_frame_url: z.optional(z.union([z.string(), z.unknown()])), + ref_image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'URLs to source reference image. If provided, the model will use this image as reference.', + }), + ), + expand_ratio: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Amount of expansion. This is a float value between 0 and 1, where 0.25 adds 25% to the original video size on the specified sides.', + }), + ) + .default(0.25), + transparency_mode: z.optional( + z.enum(['content_aware', 'white', 'black']).register(z.globalRegistry, { + description: + 'The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.', + }), + ), + num_frames: z + .optional( + z.int().gte(17).lte(241).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 241 (inclusive).', + }), + ) + .default(81), + auto_downsample_min_fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: + 'The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences.', + }), + ) + .default(15), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.', + }), + ) + .default(5), + sampler: z.optional( + z.enum(['unipc', 'dpm++', 'euler']).register(z.globalRegistry, { + description: 'Sampler to use for video generation.', + }), + ), + expand_bottom: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to expand the video to the bottom.', + }), + ) + .default(false), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + expand_left: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to expand the video to the left.', + }), + ) + .default(false), + interpolator_model: z.optional( + z.enum(['rife', 'film']).register(z.globalRegistry, { + description: + "The model to use for frame interpolation. Options are 'rife' or 'film'.", + }), + ), + enable_auto_downsample: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.', + }), + ) + .default(false), + expand_top: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to expand the video to the top.', + }), + ) + .default(false), + shift: z + .optional( + z.number().gte(1).lte(15).register(z.globalRegistry, { + description: 'Shift parameter for video generation.', + }), + ) + .default(5), + seed: z.optional(z.union([z.int(), z.unknown()])), + acceleration: z.optional( + z.union([z.enum(['none', 'low', 'regular']), z.unknown()]), + ), + frames_per_second: z.optional(z.union([z.int().gte(5).lte(30), z.unknown()])), + match_input_num_frames: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default( + 'letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards', + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + expand_right: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to expand the video to the right.', + }), + ) + .default(false), + resolution: z.optional( + z + .enum(['auto', '240p', '360p', '480p', '580p', '720p']) + .register(z.globalRegistry, { + description: 'Resolution of the generated video.', + }), + ), + return_frames_zip: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, also return a ZIP file containing all generated frames.', + }), + ) + .default(false), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '1:1', '9:16']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video.', + }), + ), + match_input_frames_per_second: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(30), + last_frame_url: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * WanVACEReframeResponse + */ +export const zSchemaWan22VaceFunA14bReframeOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + frames_zip: z.optional(z.union([zSchemaFile, z.unknown()])), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaVideoFile, +}) + +/** + * WanVACEReframeRequest + */ +export const zSchemaWan22VaceFunA14bReframeInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The text prompt to guide video generation. Optional for reframing.', + }), + ) + .default(''), + video_url: z.string().register(z.globalRegistry, { + description: + 'URL to the source video file. This video will be used as a reference for the reframe task.', + }), + num_interpolated_frames: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Number of frames to interpolate between the original frames. A value of 0 means no interpolation.', + }), + ) + .default(0), + temporal_downsample_factor: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.', + }), + ) + .default(0), + first_frame_url: z.optional(z.union([z.string(), z.unknown()])), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.', + }), + ) + .default(5), + num_frames: z + .optional( + z.int().gte(17).lte(241).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 241 (inclusive).', + }), + ) + .default(81), + auto_downsample_min_fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: + 'The minimum frames per second to downsample the video to. This is used to help determine the auto downsample factor to try and find the lowest detail-preserving downsample factor. The default value is appropriate for most videos, if you are using a video with very fast motion, you may need to increase this value. If your video has a very low amount of motion, you could decrease this value to allow for higher downsampling and thus longer sequences.', + }), + ) + .default(15), + trim_borders: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to trim borders from the video.', + }), + ) + .default(true), + sampler: z.optional( + z.enum(['unipc', 'dpm++', 'euler']).register(z.globalRegistry, { + description: 'Sampler to use for video generation.', + }), + ), + transparency_mode: z.optional( + z.enum(['content_aware', 'white', 'black']).register(z.globalRegistry, { + description: + 'The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.unknown()])), + interpolator_model: z.optional( + z.enum(['rife', 'film']).register(z.globalRegistry, { + description: + "The model to use for frame interpolation. Options are 'rife' or 'film'.", + }), + ), + enable_auto_downsample: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.', + }), + ) + .default(false), + shift: z + .optional( + z.number().gte(1).lte(15).register(z.globalRegistry, { + description: 'Shift parameter for video generation.', + }), + ) + .default(5), + acceleration: z.optional( + z.union([z.enum(['none', 'low', 'regular']), z.unknown()]), + ), + zoom_factor: z + .optional( + z.number().gte(0).lte(0.9).register(z.globalRegistry, { + description: + 'Zoom factor for the video. When this value is greater than 0, the video will be zoomed in by this factor (in relation to the canvas size,) cutting off the edges of the video. A value of 0 means no zoom.', + }), + ) + .default(0), + frames_per_second: z.optional(z.union([z.int().gte(5).lte(30), z.unknown()])), + match_input_num_frames: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.', + }), + ) + .default(true), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default( + 'letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards', + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + resolution: z.optional( + z + .enum(['auto', '240p', '360p', '480p', '580p', '720p']) + .register(z.globalRegistry, { + description: 'Resolution of the generated video.', + }), + ), + return_frames_zip: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, also return a ZIP file containing all generated frames.', + }), + ) + .default(false), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '1:1', '9:16']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video.', + }), + ), + match_input_frames_per_second: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(30), + last_frame_url: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * LucyEditDevOutput + */ +export const zSchemaLucyEditDevOutput = z.object({ + video: zSchemaFile, +}) + +/** + * LucyEditDevInput + */ +export const zSchemaLucyEditDevInput = z.object({ + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the video to be generated\n and uploaded before returning the response. This will increase the\n latency of the function but it allows you to get the video directly\n in the response without going through the CDN.\n ', + }), + ) + .default(true), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the video to edit', + }), + prompt: z.string().max(1500).register(z.globalRegistry, { + description: 'Text description of the desired video content', + }), + enhance_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enhance the prompt for better results.', + }), + ) + .default(true), +}) + +/** + * LucyEditProOutput + */ +export const zSchemaLucyEditProOutput = z.object({ + video: zSchemaFile, +}) + +/** + * LucyEditProInput + */ +export const zSchemaLucyEditProInput = z.object({ + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the video to be generated\n and uploaded before returning the response. This will increase the\n latency of the function but it allows you to get the video directly\n in the response without going through the CDN.\n ', + }), + ) + .default(true), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the video to edit', + }), + prompt: z.string().max(1500).register(z.globalRegistry, { + description: 'Text description of the desired video content', + }), + resolution: z.optional( + z.enum(['720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video', + }), + ), + enhance_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enhance the prompt for better results.', + }), + ) + .default(true), +}) + +/** + * WanAnimateMoveResponse + */ +export const zSchemaWanV2214bAnimateMoveOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation (auto-generated by the model)', + }), + frames_zip: z.optional(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + video: zSchemaFile, +}) + +/** + * WanAnimateMoveRequest + */ +export const zSchemaWanV2214bAnimateMoveInput = z.object({ + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: + 'The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.', + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the input video.', + }), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p, 580p, or 720p).', + }), + ), + return_frames_zip: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, also return a ZIP archive containing per-frame images generated on GPU (lossless).', + }), + ) + .default(false), + shift: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Shift value for the video. Must be between 1.0 and 10.0.', + }), + ) + .default(5), + enable_output_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, output video will be checked for safety after generation.', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.', + }), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: + 'The quality of the output video. Higher quality means better visual quality but larger file size.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, input data will be checked for safety before processing.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(40).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(20), + use_turbo: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, applies quality enhancement for faster generation with improved quality. When enabled, parameters are automatically optimized for best results.', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.', + }), + ) + .default(1), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), +}) + +/** + * WanAnimateReplaceResponse + */ +export const zSchemaWanV2214bAnimateReplaceOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation (auto-generated by the model)', + }), + frames_zip: z.optional(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + video: zSchemaFile, +}) + +/** + * WanAnimateMoveRequest + */ +export const zSchemaWanV2214bAnimateReplaceInput = z.object({ + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: + 'The write mode of the output video. Faster write mode means faster results but larger file size, balanced write mode is a good compromise between speed and quality, and small write mode is the slowest but produces the smallest file size.', + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the input video.', + }), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video (480p, 580p, or 720p).', + }), + ), + return_frames_zip: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, also return a ZIP archive containing per-frame images generated on GPU (lossless).', + }), + ) + .default(false), + shift: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Shift value for the video. Must be between 1.0 and 10.0.', + }), + ) + .default(5), + enable_output_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, output video will be checked for safety after generation.', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input image. If the input image does not match the chosen aspect ratio, it is resized and center cropped.', + }), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: + 'The quality of the output video. Higher quality means better visual quality but larger file size.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If set to true, input data will be checked for safety before processing.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(40).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(20), + use_turbo: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, applies quality enhancement for faster generation with improved quality. When enabled, parameters are automatically optimized for best results.', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Classifier-free guidance scale. Higher values give better adherence to the prompt but may decrease quality.', + }), + ) + .default(1), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), +}) + +/** + * WanVACEVideoEditResponse + */ +export const zSchemaWanVaceAppsVideoEditOutput = z.object({ + frames_zip: z.optional(zSchemaFile), + video: zSchemaVideoFile, +}) + +/** + * WanVACEVideoEditRequest + */ +export const zSchemaWanVaceAppsVideoEditInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Prompt to edit the video.', + }), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the input video.', + }), + acceleration: z.optional( + z.enum(['none', 'low', 'regular']).register(z.globalRegistry, { + description: + "Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster.", + }), + ), + resolution: z.optional( + z + .enum(['auto', '240p', '360p', '480p', '580p', '720p']) + .register(z.globalRegistry, { + description: 'Resolution of the edited video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: 'Aspect ratio of the edited video.', + }), + ), + return_frames_zip: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to include a ZIP archive containing all generated frames.', + }), + ) + .default(false), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + video_type: z.optional( + z.enum(['auto', 'general', 'human']).register(z.globalRegistry, { + description: + "The type of video you're editing. Use 'general' for most videos, and 'human' for videos emphasizing human subjects and motions. The default value 'auto' means the model will guess based on the first frame of the video.", + }), + ), + image_urls: z + .optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'URLs of the input images to use as a reference for the generation.', + }), + ) + .default([]), + enable_auto_downsample: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable automatic downsampling. If your video has a high frame rate or is long, enabling longer sequences to be generated. The video will be interpolated back to the original frame rate after generation.', + }), + ) + .default(true), + auto_downsample_min_fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: + 'The minimum frames per second to downsample the video to.', + }), + ) + .default(15), +}) + +/** + * SeedVRVideoOutput + */ +export const zSchemaSeedvrUpscaleVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The random seed used for the generation process.', + }), + video: zSchemaFile, +}) + +/** + * SeedVRVideoInput + */ +export const zSchemaSeedvrUpscaleVideoInput = z.object({ + upscale_mode: z.optional( + z.enum(['target', 'factor']).register(z.globalRegistry, { + description: + "The mode to use for the upscale. If 'target', the upscale factor will be calculated based on the target resolution. If 'factor', the upscale factor will be used directly.", + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: 'The input video to be processed', + }), + noise_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'The noise scale to use for the generation process.', + }), + ) + .default(0.1), + output_format: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The format of the output video.', + }), + ), + output_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the output video.', + }), + ), + target_resolution: z.optional( + z.enum(['720p', '1080p', '1440p', '2160p']).register(z.globalRegistry, { + description: + 'The target resolution to upscale to when `upscale_mode` is `target`.', + }), + ), + output_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the output video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + upscale_factor: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Upscaling factor to be used. Will multiply the dimensions with this factor when `upscale_mode` is `factor`.', + }), + ) + .default(2), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * InfinitalkVid2VidResponse + */ +export const zSchemaInfinitalkVideoToVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, +}) + +/** + * InfiniTalkVid2VidAudioRequest + */ +export const zSchemaInfinitalkVideoToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + resolution: z.optional( + z.enum(['480p', '720p']).register(z.globalRegistry, { + description: + 'Resolution of the video to generate. Must be either 480p or 720p.', + }), + ), + acceleration: z.optional( + z.enum(['none', 'regular', 'high']).register(z.globalRegistry, { + description: 'The acceleration level to use for generation.', + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the input video.', + }), + audio_url: z.string().register(z.globalRegistry, { + description: 'The URL of the audio file.', + }), + num_frames: z + .optional( + z.int().gte(41).lte(241).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 129 (inclusive). If the number of frames is greater than 81, the video will be generated with 1.25x more billing units.', + }), + ) + .default(145), + seed: z + .optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ) + .default(42), +}) + +/** + * LongWanVACEReframeResponse + */ +export const zSchemaWanVaceAppsLongReframeOutput = z.object({ + video: zSchemaVideoFile, +}) + +/** + * LongWanVACEReframeRequest + */ +export const zSchemaWanVaceAppsLongReframeInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'The text prompt to guide video generation. Optional for reframing.', + }), + ) + .default(''), + video_url: z.string().register(z.globalRegistry, { + description: + 'URL to the source video file. This video will be used as a reference for the reframe task.', + }), + acceleration: z.optional( + z.enum(['none', 'low', 'regular']).register(z.globalRegistry, { + description: + "Acceleration to use for inference. Options are 'none' or 'regular'. Accelerated inference will very slightly affect output, but will be significantly faster.", + }), + ), + paste_back: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to paste back the reframed scene to the original video.', + }), + ) + .default(true), + zoom_factor: z + .optional( + z.number().gte(0).lte(0.9).register(z.globalRegistry, { + description: + 'Zoom factor for the video. When this value is greater than 0, the video will be zoomed in by this factor (in relation to the canvas size,) cutting off the edges of the video. A value of 0 means no zoom.', + }), + ) + .default(0), + shift: z + .optional( + z.number().gte(1).lte(15).register(z.globalRegistry, { + description: 'Shift parameter for video generation.', + }), + ) + .default(5), + scene_threshold: z + .optional( + z.number().gte(0).lte(100).register(z.globalRegistry, { + description: + 'Threshold for scene detection sensitivity (0-100). Lower values detect more scenes.', + }), + ) + .default(30), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.', + }), + ) + .default(5), + auto_downsample_min_fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: 'Minimum FPS for auto downsample.', + }), + ) + .default(6), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default( + 'letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards', + ), + sampler: z.optional( + z.enum(['unipc', 'dpm++', 'euler']).register(z.globalRegistry, { + description: 'Sampler to use for video generation.', + }), + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + resolution: z.optional( + z + .enum(['auto', '240p', '360p', '480p', '580p', '720p']) + .register(z.globalRegistry, { + description: 'Resolution of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '1:1', '9:16']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video.', + }), + ), + return_frames_zip: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, also return a ZIP file containing all generated frames.', + }), + ) + .default(false), + trim_borders: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to trim borders from the video.', + }), + ) + .default(true), + transparency_mode: z.optional( + z.enum(['content_aware', 'white', 'black']).register(z.globalRegistry, { + description: + 'The transparency mode to apply to the first and last frames. This controls how the transparent areas of the first and last frames are filled.', + }), + ), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + interpolator_model: z.optional( + z.enum(['rife', 'film']).register(z.globalRegistry, { + description: + "The model to use for frame interpolation. Options are 'rife' or 'film'.", + }), + ), + enable_auto_downsample: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable auto downsample.', + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(30), +}) + +/** + * ImageFile + */ +export const zSchemaImageFile = z.object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + height: z.optional( + z.int().register(z.globalRegistry, { + description: 'The height of the image', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + width: z.optional( + z.int().register(z.globalRegistry, { + description: 'The width of the image', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), +}) + +/** + * RemixOutput + */ +export const zSchemaSora2VideoToVideoRemixOutput = z.object({ + spritesheet: z.optional(zSchemaImageFile), + thumbnail: z.optional(zSchemaImageFile), + video_id: z.string().register(z.globalRegistry, { + description: 'The ID of the generated video', + }), + video: zSchemaVideoFile, +}) + +/** + * RemixInput + */ +export const zSchemaSora2VideoToVideoRemixInput = z.object({ + prompt: z.string().min(1).max(5000).register(z.globalRegistry, { + description: 'Updated text prompt that directs the remix generation', + }), + video_id: z.string().register(z.globalRegistry, { + description: + 'The video_id from a previous Sora 2 generation. Note: You can only remix videos that were generated by Sora (via text-to-video or image-to-video endpoints), not arbitrary uploaded videos.', + }), + delete_video: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to delete the video after generation for privacy reasons. If True, the video cannot be used for remixing and will be permanently deleted.', + }), + ) + .default(true), +}) + +/** + * VideoToVideoOutput + */ +export const zSchemaKreaWan14bVideoToVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * VideoToVideoInput + */ +export const zSchemaKreaWan14bVideoToVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Prompt for the video-to-video generation.', + }), + video_url: z.string().register(z.globalRegistry, { + description: + 'URL of the input video. Currently, only outputs of 16:9 aspect ratio and 480p resolution are supported. Video duration should be less than 1000 frames at 16fps, and output frames will be 6 plus a multiple of 12, for example 18, 30, 42, etc.', + }), + strength: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'Denoising strength for the video-to-video generation. 0.0 preserves the original, 1.0 completely remakes the video.', + }), + ) + .default(0.85), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to enable prompt expansion. This will use a large language model to expand the prompt with additional details while maintaining the original meaning.', + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.unknown()])), +}) + +/** + * Video + */ +export const zSchemaVideoOutput = z.object({ + file_size: z.optional(z.union([z.int(), z.unknown()])), + file_name: z.optional(z.union([z.string(), z.unknown()])), + content_type: z.optional(z.union([z.string(), z.unknown()])), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), +}) + +/** + * VideoOutput + */ +export const zSchemaSfxV15VideoToVideoOutput = z.object({ + video: z.array(zSchemaVideoOutput).register(z.globalRegistry, { + description: 'The processed video with sound effects', + }), +}) + +/** + * Input + */ +export const zSchemaSfxV15VideoToVideoInput = z.object({ + num_samples: z.optional(z.union([z.int().gte(2).lte(8), z.unknown()])), + duration: z.optional(z.union([z.number().gte(1).lte(10), z.unknown()])), + start_offset: z.optional(z.union([z.number().gte(0), z.unknown()])), + video_url: z.url().min(1).max(2083).register(z.globalRegistry, { + description: + 'A video url that can accessed from the API to process and add sound effects', + }), + seed: z.optional(z.union([z.int().gte(1), z.unknown()])), + text_prompt: z.optional(z.union([z.string(), z.unknown()])), +}) + +/** + * Q2VideoExtensionOutput + */ +export const zSchemaViduQ2VideoExtensionProOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Q2VideoExtensionRequest + */ +export const zSchemaViduQ2VideoExtensionProInput = z.object({ + prompt: z.optional( + z.string().max(3000).register(z.globalRegistry, { + description: 'text prompt to guide the video extension', + }), + ), + duration: z.optional( + z + .union([ + z.literal(2), + z.literal(3), + z.literal(4), + z.literal(5), + z.literal(6), + z.literal(7), + ]) + .register(z.globalRegistry, { + description: 'Duration of the extension in seconds', + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the video to extend', + }), + resolution: z.optional( + z.enum(['720p', '1080p']).register(z.globalRegistry, { + description: 'Output video resolution', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), +}) + +/** + * VideoOutput + */ +export const zSchemaBirefnetV2VideoOutput = z.object({ + video: zSchemaVideoFile, + mask_video: z.optional(zSchemaVideoFile), +}) + +/** + * VideoInputV2 + */ +export const zSchemaBirefnetV2VideoInput = z.object({ + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + operating_resolution: z.optional( + z.enum(['1024x1024', '2048x2048', '2304x2304']).register(z.globalRegistry, { + description: + "The resolution to operate on. The higher the resolution, the more accurate the output will be for high res input images. The '2304x2304' option is only available for the 'General Use (Dynamic)' model.", + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the video to remove background from', + }), + model: z.optional( + z + .enum([ + 'General Use (Light)', + 'General Use (Light 2K)', + 'General Use (Heavy)', + 'Matting', + 'Portrait', + 'General Use (Dynamic)', + ]) + .register(z.globalRegistry, { + description: + "\n Model to use for background removal.\n The 'General Use (Light)' model is the original model used in the BiRefNet repository.\n The 'General Use (Light 2K)' model is the original model used in the BiRefNet repository but trained with 2K images.\n The 'General Use (Heavy)' model is a slower but more accurate model.\n The 'Matting' model is a model trained specifically for matting images.\n The 'Portrait' model is a model trained specifically for portrait images.\n The 'General Use (Dynamic)' model supports dynamic resolutions from 256x256 to 2304x2304.\n The 'General Use (Light)' model is recommended for most use cases.\n\n The corresponding models are as follows:\n - 'General Use (Light)': BiRefNet\n - 'General Use (Light 2K)': BiRefNet_lite-2K\n - 'General Use (Heavy)': BiRefNet_lite\n - 'Matting': BiRefNet-matting\n - 'Portrait': BiRefNet-portrait\n - 'General Use (Dynamic)': BiRefNet_dynamic\n ", + }), + ), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + output_mask: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to output the mask used to remove the background', + }), + ) + .default(false), + refine_foreground: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to refine the foreground using the estimated mask', + }), + ) + .default(true), +}) + +/** + * VideoEffectOutput + */ +export const zSchemaVideoAsPromptOutput = z.object({ + video: zSchemaFile, +}) + +/** + * VideoEffectInputWan + */ +export const zSchemaVideoAsPromptInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate an image from.', + }), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video.', + }), + ), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video.', + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: 'reference video to generate effect video from.', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'Input image to generate the effect video for.', + }), + fps: z + .optional( + z.int().gte(1).lte(60).register(z.globalRegistry, { + description: + "Frames per second for the output video. Only applicable if output_type is 'video'.", + }), + ) + .default(16), + video_description: z.string().register(z.globalRegistry, { + description: 'A brief description of the input video content.', + }), + seed: z.optional(z.union([z.int(), z.unknown()])), + guidance_scale: z + .optional( + z.number().gte(1).lte(20).register(z.globalRegistry, { + description: 'Guidance scale for generation.', + }), + ) + .default(5), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + num_frames: z + .optional( + z.int().gte(1).lte(100).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(49), +}) + +/** + * UpscaleOutput + */ +export const zSchemaBytedanceUpscalerUpscaleVideoOutput = z.object({ + duration: z.number().register(z.globalRegistry, { + description: 'Duration of audio input/video output as used for billing.', + }), + video: zSchemaFile, +}) + +/** + * UpscaleInput + */ +export const zSchemaBytedanceUpscalerUpscaleVideoInput = z.object({ + target_fps: z.optional( + z.enum(['30fps', '60fps']).register(z.globalRegistry, { + description: 'The target FPS of the video to upscale.', + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to upscale.', + }), + target_resolution: z.optional( + z.enum(['1080p', '2k', '4k']).register(z.globalRegistry, { + description: 'The target resolution of the video to upscale.', + }), + ), +}) + +/** + * AutoSubtitleOutput + * + * Output model for video with automatic subtitles + */ +export const zSchemaWorkflowUtilitiesAutoSubtitleOutput = z + .object({ + transcription: z.string().register(z.globalRegistry, { + description: 'Full transcription text', + }), + subtitle_count: z.int().register(z.globalRegistry, { + description: 'Number of subtitle segments generated', + }), + transcription_metadata: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: + 'Additional transcription metadata from ElevenLabs (language, segments, etc.)', + }), + ), + words: z.optional( + z.array(z.record(z.string(), z.unknown())).register(z.globalRegistry, { + description: 'Word-level timing information from transcription service', + }), + ), + video: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output model for video with automatic subtitles', + }) + +/** + * AutoSubtitleInput + * + * Input model for automatic subtitle generation and styling + */ +export const zSchemaWorkflowUtilitiesAutoSubtitleInput = z + .object({ + font_weight: z.optional( + z.enum(['normal', 'bold', 'black']).register(z.globalRegistry, { + description: 'Font weight (TikTok style typically uses bold or black)', + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: + 'URL of the video file to add automatic subtitles to\n\nMax file size: 95.4MB, Timeout: 30.0s', + }), + stroke_width: z + .optional( + z.int().gte(0).lte(10).register(z.globalRegistry, { + description: 'Text stroke/outline width in pixels (0 for no stroke)', + }), + ) + .default(3), + font_color: z.optional( + z + .enum([ + 'white', + 'black', + 'red', + 'green', + 'blue', + 'yellow', + 'orange', + 'purple', + 'pink', + 'brown', + 'gray', + 'cyan', + 'magenta', + ]) + .register(z.globalRegistry, { + description: 'Subtitle text color for non-active words', + }), + ), + font_size: z + .optional( + z.int().gte(20).lte(150).register(z.globalRegistry, { + description: + 'Font size for subtitles (TikTok style uses larger text)', + }), + ) + .default(100), + language: z + .optional( + z.string().register(z.globalRegistry, { + description: + "Language code for transcription (e.g., 'en', 'es', 'fr', 'de', 'it', 'pt', 'nl', 'ja', 'zh', 'ko') or 3-letter ISO code (e.g., 'eng', 'spa', 'fra')", + }), + ) + .default('en'), + y_offset: z + .optional( + z.int().gte(-200).lte(200).register(z.globalRegistry, { + description: + 'Vertical offset in pixels (positive = move down, negative = move up)', + }), + ) + .default(75), + background_opacity: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Background opacity (0.0 = fully transparent, 1.0 = fully opaque)', + }), + ) + .default(0), + stroke_color: z.optional( + z + .enum([ + 'black', + 'white', + 'red', + 'green', + 'blue', + 'yellow', + 'orange', + 'purple', + 'pink', + 'brown', + 'gray', + 'cyan', + 'magenta', + ]) + .register(z.globalRegistry, { + description: 'Text stroke/outline color', + }), + ), + highlight_color: z.optional( + z + .enum([ + 'white', + 'black', + 'red', + 'green', + 'blue', + 'yellow', + 'orange', + 'purple', + 'pink', + 'brown', + 'gray', + 'cyan', + 'magenta', + ]) + .register(z.globalRegistry, { + description: + 'Color for the currently speaking word (karaoke-style highlight)', + }), + ), + enable_animation: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Enable animation effects for subtitles (bounce style entrance)', + }), + ) + .default(true), + font_name: z + .optional( + z.string().register(z.globalRegistry, { + description: + "Any Google Font name from fonts.google.com (e.g., 'Montserrat', 'Poppins', 'BBH Sans Hegarty')", + }), + ) + .default('Montserrat'), + position: z.optional( + z.enum(['top', 'center', 'bottom']).register(z.globalRegistry, { + description: 'Vertical position of subtitles', + }), + ), + words_per_subtitle: z + .optional( + z.int().gte(1).lte(12).register(z.globalRegistry, { + description: + 'Maximum number of words per subtitle segment. Use 1 for single-word display, 2-3 for short phrases, or 8-12 for full sentences.', + }), + ) + .default(3), + background_color: z.optional( + z + .enum([ + 'black', + 'white', + 'red', + 'green', + 'blue', + 'yellow', + 'orange', + 'purple', + 'pink', + 'brown', + 'gray', + 'cyan', + 'magenta', + 'none', + 'transparent', + ]) + .register(z.globalRegistry, { + description: + "Background color behind text ('none' or 'transparent' for no background)", + }), + ), + }) + .register(z.globalRegistry, { + description: 'Input model for automatic subtitle generation and styling', + }) + +/** + * FlashVSRPlusVideoOutput + */ +export const zSchemaFlashvsrUpscaleVideoOutput = z.object({ + seed: z.int().register(z.globalRegistry, { + description: 'The random seed used for the generation process.', + }), + video: zSchemaFile, +}) + +/** + * FlashVSRPlusVideoInput + * + * Input fields common to FlashVSR+ image/video endpoints. + */ +export const zSchemaFlashvsrUpscaleVideoInput = z + .object({ + video_url: z.string().register(z.globalRegistry, { + description: 'The input video to be upscaled', + }), + acceleration: z.optional( + z.enum(['regular', 'high', 'full']).register(z.globalRegistry, { + description: + 'Acceleration mode for VAE decoding. Options: regular (best quality), high (balanced), full (fastest). More accerleation means longer duration videos can be processed too.', + }), + ), + quality: z + .optional( + z.int().gte(0).lte(100).register(z.globalRegistry, { + description: + 'Quality level for tile blending (0-100). Controls overlap between tiles to prevent grid artifacts. Higher values provide better quality with more overlap. Recommended: 70-85 for high-res videos, 50-70 for faster processing.', + }), + ) + .default(70), + output_format: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The format of the output video.', + }), + ), + color_fix: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Color correction enabled.', + }), + ) + .default(true), + output_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the output video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If `True`, the media will be returned inline and not stored in history.', + }), + ) + .default(false), + output_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the output video.', + }), + ), + upscale_factor: z + .optional( + z.number().gte(1).lte(4).register(z.globalRegistry, { + description: 'Upscaling factor to be used.', + }), + ) + .default(2), + preserve_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Copy the original audio tracks into the upscaled video using FFmpeg when possible.', + }), + ) + .default(false), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The random seed used for the generation process.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Input fields common to FlashVSR+ image/video endpoints.', + }) + +/** + * EdittoOutput + */ +export const zSchemaEdittoOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for generation.', + }), + frames_zip: z.optional(z.union([zSchemaFile, z.unknown()])), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaVideoFile, +}) + +/** + * EdittoInput + */ +export const zSchemaEdittoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The text prompt to guide video generation.', + }), + video_url: z.string().register(z.globalRegistry, { + description: 'URL to the source video file. Required for inpainting.', + }), + acceleration: z.optional( + z.union([z.enum(['none', 'low', 'regular']), z.unknown()]), + ), + num_interpolated_frames: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Number of frames to interpolate between the original frames. A value of 0 means no interpolation.', + }), + ) + .default(0), + temporal_downsample_factor: z + .optional( + z.int().gte(0).lte(5).register(z.globalRegistry, { + description: + 'Temporal downsample factor for the video. This is an integer value that determines how many frames to skip in the video. A value of 0 means no downsampling. For each downsample factor, one upsample factor will automatically be applied.', + }), + ) + .default(0), + shift: z + .optional( + z.number().gte(1).lte(15).register(z.globalRegistry, { + description: 'Shift parameter for video generation.', + }), + ) + .default(5), + frames_per_second: z.optional(z.union([z.int().gte(5).lte(30), z.unknown()])), + match_input_num_frames: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the number of frames in the generated video will match the number of frames in the input video. If false, the number of frames will be determined by the num_frames parameter.', + }), + ) + .default(false), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'Guidance scale for classifier-free guidance. Higher values encourage the model to generate images closely related to the text prompt.', + }), + ) + .default(5), + num_frames: z + .optional( + z.int().gte(17).lte(241).register(z.globalRegistry, { + description: + 'Number of frames to generate. Must be between 81 to 241 (inclusive).', + }), + ) + .default(81), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default( + 'letterboxing, borders, black bars, bright colors, overexposed, static, blurred details, subtitles, style, artwork, painting, picture, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still picture, cluttered background, three legs, many people in the background, walking backwards', + ), + sampler: z.optional( + z.enum(['unipc', 'dpm++', 'euler']).register(z.globalRegistry, { + description: 'Sampler to use for video generation.', + }), + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + resolution: z.optional( + z + .enum(['auto', '240p', '360p', '480p', '580p', '720p']) + .register(z.globalRegistry, { + description: 'Resolution of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '1:1', '9:16']).register(z.globalRegistry, { + description: 'Aspect ratio of the generated video.', + }), + ), + return_frames_zip: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, also return a ZIP file containing all generated frames.', + }), + ) + .default(false), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + match_input_frames_per_second: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the frames per second of the generated video will match the input video. If false, the frames per second will be determined by the frames_per_second parameter.', + }), + ) + .default(false), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(2).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(30), + seed: z.optional(z.union([z.int(), z.unknown()])), + enable_auto_downsample: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, the model will automatically temporally downsample the video to an appropriate frame length for the model, then will interpolate it back to the original frame length.', + }), + ) + .default(false), +}) + +/** + * PointPromptBase + */ +export const zSchemaPointPromptBase = z.object({ + y: z.optional( + z.int().register(z.globalRegistry, { + description: 'Y Coordinate of the prompt', + }), + ), + x: z.optional( + z.int().register(z.globalRegistry, { + description: 'X Coordinate of the prompt', + }), + ), + object_id: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Optional object identifier. Prompts sharing an object id refine the same object.', + }), + ), + label: z.optional( + z.union([z.literal(0), z.literal(1)]).register(z.globalRegistry, { + description: '1 for foreground, 0 for background', + }), + ), +}) + +/** + * BoxPromptBase + */ +export const zSchemaBoxPromptBase = z.object({ + y_min: z.optional( + z.int().register(z.globalRegistry, { + description: 'Y Min Coordinate of the box', + }), + ), + object_id: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Optional object identifier. Boxes sharing an object id refine the same object.', + }), + ), + x_max: z.optional( + z.int().register(z.globalRegistry, { + description: 'X Max Coordinate of the box', + }), + ), + x_min: z.optional( + z.int().register(z.globalRegistry, { + description: 'X Min Coordinate of the box', + }), + ), + y_max: z.optional( + z.int().register(z.globalRegistry, { + description: 'Y Max Coordinate of the box', + }), + ), +}) + +/** + * SAM3VideoOutput + */ +export const zSchemaSam3VideoOutput = z.object({ + boundingbox_frames_zip: z.optional(zSchemaFile), + video: zSchemaFile, +}) + +/** + * SAM3VideoInput + */ +export const zSchemaSam3VideoInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "Text prompt for segmentation. Use commas to track multiple objects (e.g., 'person, cloth').", + }), + ) + .default(''), + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to be segmented.', + }), + detection_threshold: z + .optional( + z.number().gte(0.1).lte(1).register(z.globalRegistry, { + description: + 'Detection confidence threshold (0.0-1.0). Lower = more detections but less precise. ', + }), + ) + .default(0.5), + box_prompts: z + .optional( + z.array(zSchemaBoxPromptBase).register(z.globalRegistry, { + description: + 'List of box prompt coordinates (x_min, y_min, x_max, y_max).', + }), + ) + .default([]), + point_prompts: z + .optional( + z.array(zSchemaPointPromptBase).register(z.globalRegistry, { + description: 'List of point prompts', + }), + ) + .default([]), + apply_mask: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Apply the mask on the video.', + }), + ) + .default(true), + text_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + "[DEPRECATED] Use 'prompt' instead. Kept for backward compatibility.", + }), + ), +}) + +/** + * SAM3VideoOutput + */ +export const zSchemaSam3VideoRleOutput = z.object({ + boundingbox_frames_zip: z.optional(zSchemaFile), + video: zSchemaFile, +}) + +/** + * SAM3VideoRLEInput + */ +export const zSchemaSam3VideoRleInput = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + "Text prompt for segmentation. Use commas to track multiple objects (e.g., 'person, cloth').", + }), + ) + .default(''), + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to be segmented.', + }), + detection_threshold: z + .optional( + z.number().gte(0.01).lte(1).register(z.globalRegistry, { + description: + 'Detection confidence threshold (0.0-1.0). Lower = more detections but less precise. Defaults: 0.5 for existing, 0.7 for new objects. Try 0.2-0.3 if text prompts fail.', + }), + ) + .default(0.5), + box_prompts: z + .optional( + z.array(zSchemaBoxPrompt).register(z.globalRegistry, { + description: 'List of box prompts with optional frame_index.', + }), + ) + .default([]), + boundingbox_zip: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Return per-frame bounding box overlays as a zip archive.', + }), + ) + .default(false), + point_prompts: z + .optional( + z.array(zSchemaPointPrompt).register(z.globalRegistry, { + description: 'List of point prompts with frame indices.', + }), + ) + .default([]), + frame_index: z + .optional( + z.int().register(z.globalRegistry, { + description: + 'Frame index used for initial interaction when mask_url is provided.', + }), + ) + .default(0), + mask_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The URL of the mask to be applied initially.', + }), + ), + apply_mask: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Apply the mask on the video.', + }), + ) + .default(false), +}) + +/** + * LucyEditFastOutput + */ +export const zSchemaLucyEditFastOutput = z.object({ + video: zSchemaFile, +}) + +/** + * LucyEditFastInput + */ +export const zSchemaLucyEditFastInput = z.object({ + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the video to be generated\n and uploaded before returning the response. This will increase the\n latency of the function but it allows you to get the video directly\n in the response without going through the CDN.\n ', + }), + ) + .default(false), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the video to edit', + }), + prompt: z.string().max(1500).register(z.globalRegistry, { + description: 'Text description of the desired video content', + }), + enhance_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enhance the prompt for better results.', + }), + ) + .default(true), +}) + +/** + * LTXRetakeVideoResponse + */ +export const zSchemaLtx2RetakeVideoOutput = z.object({ + video: zSchemaVideoFile, +}) + +/** + * LTXRetakeVideoRequest + */ +export const zSchemaLtx2RetakeVideoInput = z.object({ + prompt: z.string().min(1).max(5000).register(z.globalRegistry, { + description: 'The prompt to retake the video with', + }), + duration: z + .optional( + z.number().gte(2).lte(20).register(z.globalRegistry, { + description: 'The duration of the video to retake in seconds', + }), + ) + .default(5), + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to retake', + }), + start_time: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'The start time of the video to retake in seconds', + }), + ) + .default(0), + retake_mode: z.optional( + z + .enum(['replace_audio', 'replace_video', 'replace_audio_and_video']) + .register(z.globalRegistry, { + description: 'The retake mode to use for the retake', + }), + ), +}) + +/** + * GreenScreenRembgOutput + */ +export const zSchemaVideoBackgroundRemovalGreenScreenOutput = z.object({ + video: z.array(zSchemaFile), +}) + +/** + * GreenScreenRembgInput + */ +export const zSchemaVideoBackgroundRemovalGreenScreenInput = z.object({ + video_url: z.url().min(1).max(2083), + output_codec: z.optional( + z.enum(['vp9', 'h264']).register(z.globalRegistry, { + description: + 'Single VP9 video with alpha channel or two videos (rgb and alpha) in H264 format. H264 is recommended for better RGB quality.', + }), + ), + spill_suppression_strength: z.optional( + z.union([z.number().gte(0).lte(1), z.unknown()]), + ), +}) + +/** + * OmniV2VReferenceOutput + */ +export const zSchemaKlingVideoO1VideoToVideoReferenceOutput = z.object({ + video: zSchemaFile, +}) + +/** + * OmniVideoElementInput + */ +export const zSchemaOmniVideoElementInput = z.object({ + reference_image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'Additional reference images from different angles. 1-4 images supported. At least one image is required.', + }), + ), + frontal_image_url: z.string().register(z.globalRegistry, { + description: + 'The frontal image of the element (main view).\n\nMax file size: 10.0MB, Min width: 300px, Min height: 300px, Min aspect ratio: 0.40, Max aspect ratio: 2.50, Timeout: 20.0s', + }), +}) + +/** + * OmniV2VReferenceInput + * + * Input for video editing or video-as-reference generation. + */ +export const zSchemaKlingVideoO1VideoToVideoReferenceInput = z + .object({ + prompt: z.string().max(2500).register(z.globalRegistry, { + description: + 'Use @Element1, @Element2 to reference elements and @Image1, @Image2 to reference images in order.', + }), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: + "The aspect ratio of the generated video frame. If 'auto', the aspect ratio will be determined automatically based on the input video, and the closest aspect ratio to the input video will be used.", + }), + ), + duration: z.optional( + z + .enum(['3', '4', '5', '6', '7', '8', '9', '10']) + .register(z.globalRegistry, { + description: 'Video duration in seconds.', + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: + 'Reference video URL. Only .mp4/.mov formats supported, 3-10 seconds duration, 720-2160px resolution, max 200MB.\n\nMax file size: 200.0MB, Min width: 720px, Min height: 720px, Max width: 2160px, Max height: 2160px, Min duration: 3.0s, Max duration: 10.05s, Min FPS: 24.0, Max FPS: 60.0, Timeout: 30.0s', + }), + keep_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to keep the original audio from the video.', + }), + ) + .default(false), + elements: z.optional( + z.array(zSchemaOmniVideoElementInput).register(z.globalRegistry, { + description: + 'Elements (characters/objects) to include. Reference in prompt as @Element1, @Element2, etc. Maximum 4 total (elements + reference images) when using video.', + }), + ), + image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'Reference images for style/appearance. Reference in prompt as @Image1, @Image2, etc. Maximum 4 total (elements + reference images) when using video.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Input for video editing or video-as-reference generation.', + }) + +/** + * OmniV2VEditOutput + */ +export const zSchemaKlingVideoO1VideoToVideoEditOutput = z.object({ + video: zSchemaFile, +}) + +/** + * OmniV2VEditInput + * + * Input for video editing or video-as-reference generation. + */ +export const zSchemaKlingVideoO1VideoToVideoEditInput = z + .object({ + prompt: z.string().max(2500).register(z.globalRegistry, { + description: + 'Use @Element1, @Element2 to reference elements and @Image1, @Image2 to reference images in order.', + }), + video_url: z.string().register(z.globalRegistry, { + description: + 'Reference video URL. Only .mp4/.mov formats supported, 3-10 seconds duration, 720-2160px resolution, max 200MB.\n\nMax file size: 200.0MB, Min width: 720px, Min height: 720px, Max width: 2160px, Max height: 2160px, Min duration: 3.0s, Max duration: 10.05s, Min FPS: 24.0, Max FPS: 60.0, Timeout: 30.0s', + }), + elements: z.optional( + z.array(zSchemaOmniVideoElementInput).register(z.globalRegistry, { + description: + 'Elements (characters/objects) to include. Reference in prompt as @Element1, @Element2, etc. Maximum 4 total (elements + reference images) when using video.', + }), + ), + image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'Reference images for style/appearance. Reference in prompt as @Image1, @Image2, etc. Maximum 4 total (elements + reference images) when using video.', + }), + ), + keep_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to keep the original audio from the video.', + }), + ) + .default(false), + }) + .register(z.globalRegistry, { + description: 'Input for video editing or video-as-reference generation.', + }) + +/** + * FastGeneralRembgOutput + */ +export const zSchemaVideoBackgroundRemovalFastOutput = z.object({ + video: z.array(zSchemaFile), +}) + +/** + * FastGeneralRembgInput + */ +export const zSchemaVideoBackgroundRemovalFastInput = z.object({ + video_url: z.url().min(1).max(2083), + subject_is_person: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Set to False if the subject is not a person.', + }), + ) + .default(true), + output_codec: z.optional( + z.enum(['vp9', 'h264']).register(z.globalRegistry, { + description: + 'Single VP9 video with alpha channel or two videos (rgb and alpha) in H264 format. H264 is recommended for better RGB quality.', + }), + ), + refine_foreground_edges: z + .optional( + z.boolean().register(z.globalRegistry, { + description: "Improves the quality of the extracted object's edges.", + }), + ) + .default(true), +}) + +/** + * React1Output + */ +export const zSchemaSyncLipsyncReact1Output = z.object({ + video: zSchemaVideoFile, +}) + +/** + * React1Input + */ +export const zSchemaSyncLipsyncReact1Input = z.object({ + emotion: z + .enum(['happy', 'angry', 'sad', 'neutral', 'disgusted', 'surprised']) + .register(z.globalRegistry, { + description: + 'Emotion prompt for the generation. Currently supports single-word emotions only.', + }), + video_url: z.string().register(z.globalRegistry, { + description: 'URL to the input video. Must be **15 seconds or shorter**.', + }), + lipsync_mode: z.optional( + z + .enum(['cut_off', 'loop', 'bounce', 'silence', 'remap']) + .register(z.globalRegistry, { + description: + 'Lipsync mode when audio and video durations are out of sync.', + }), + ), + audio_url: z.string().register(z.globalRegistry, { + description: 'URL to the input audio. Must be **15 seconds or shorter**.', + }), + temperature: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Controls the expresiveness of the lipsync.', + }), + ) + .default(0.5), + model_mode: z.optional( + z.enum(['lips', 'face', 'head']).register(z.globalRegistry, { + description: + 'Controls the edit region and movement scope for the model. Available options:\n- `lips`: Only lipsync using react-1 (minimal facial changes).\n- `face`: Lipsync + facial expressions without head movements.\n- `head`: Lipsync + facial expressions + natural talking head movements.', + }), + ), +}) + +/** + * Output + * + * Output from Wan Vision Enhancer + */ +export const zSchemaWanVisionEnhancerOutput = z + .object({ + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + timings: z.record(z.string(), z.number()).register(z.globalRegistry, { + description: 'The timings of the different steps in the workflow.', + }), + video: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output from Wan Vision Enhancer', + }) + +/** + * Input + * + * Input parameters for Wan Vision Enhancer (Video-to-Video) + */ +export const zSchemaWanVisionEnhancerInput = z + .object({ + prompt: z.optional(z.union([z.string(), z.unknown()])), + video_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the video to enhance with Wan Video. Maximum 200MB file size. Videos longer than 500 frames will have only the first 500 frames processed (~8-21 seconds depending on fps).', + }), + seed: z.optional(z.union([z.int(), z.unknown()])), + target_resolution: z.optional( + z.enum(['720p', '1080p']).register(z.globalRegistry, { + description: + 'Target output resolution for the enhanced video. 720p (native, fast) or 1080p (upscaled, slower). Processing is always done at 720p, then upscaled if 1080p selected.', + }), + ), + negative_prompt: z.optional(z.union([z.string(), z.unknown()])), + creativity: z + .optional( + z.int().gte(0).lte(4).register(z.globalRegistry, { + description: + 'Controls how much the model enhances/changes the video. 0 = Minimal change (preserves original), 1 = Subtle enhancement (default), 2 = Medium enhancement, 3 = Strong enhancement, 4 = Maximum enhancement.', + }), + ) + .default(1), + }) + .register(z.globalRegistry, { + description: 'Input parameters for Wan Vision Enhancer (Video-to-Video)', + }) + +/** + * OneToALLAnimationResponse + */ +export const zSchemaOneToAllAnimation14bOutput = z.object({ + video: zSchemaFile, +}) + +/** + * OneToALLAnimationRequest + */ +export const zSchemaOneToAllAnimation14bInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'The resolution of the video to generate.', + }), + ), + image_guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'The image guidance scale to use for the video generation.', + }), + ) + .default(2), + pose_guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The pose guidance scale to use for the video generation.', + }), + ) + .default(1.5), + video_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the video to use as a reference for the video generation.', + }), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image to use as a reference for the video generation.', + }), + num_inference_steps: z + .optional( + z.int().gte(2).lte(30).register(z.globalRegistry, { + description: + 'The number of inference steps to use for the video generation.', + }), + ) + .default(30), + negative_prompt: z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), +}) + +/** + * OneToALLAnimationResponse + */ +export const zSchemaOneToAllAnimation13bOutput = z.object({ + video: zSchemaFile, +}) + +/** + * OneToALLAnimationRequest + */ +export const zSchemaOneToAllAnimation13bInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + resolution: z.optional( + z.enum(['480p', '580p', '720p']).register(z.globalRegistry, { + description: 'The resolution of the video to generate.', + }), + ), + image_guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: + 'The image guidance scale to use for the video generation.', + }), + ) + .default(2), + pose_guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The pose guidance scale to use for the video generation.', + }), + ) + .default(1.5), + video_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the video to use as a reference for the video generation.', + }), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image to use as a reference for the video generation.', + }), + num_inference_steps: z + .optional( + z.int().gte(2).lte(30).register(z.globalRegistry, { + description: + 'The number of inference steps to use for the video generation.', + }), + ) + .default(30), + negative_prompt: z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), +}) + +/** + * SteadyDancerResponse + * + * Response model for SteadyDancer. + */ +export const zSchemaSteadyDancerOutput = z + .object({ + num_frames: z.int().register(z.globalRegistry, { + description: + 'The actual number of frames generated (aligned to 4k+1 pattern).', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + video: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Response model for SteadyDancer.', + }) + +/** + * SteadyDancerRequest + * + * Request model for SteadyDancer human animation. + */ +export const zSchemaSteadyDancerInput = z + .object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Text prompt describing the desired animation.', + }), + ) + .default('A person dancing with smooth and natural movements.'), + video_url: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'URL of the driving pose video. The motion from this video will be transferred to the reference image.', + }), + ) + .default( + 'https://v3b.fal.media/files/b/0a84de68/jXDWywjhagRfR-GuZjoRs_video.mp4', + ), + acceleration: z.optional( + z.enum(['light', 'moderate', 'aggressive']).register(z.globalRegistry, { + description: 'Acceleration levels.', + }), + ), + pose_guidance_scale: z + .optional( + z.number().gte(0.5).lte(3).register(z.globalRegistry, { + description: 'Pose guidance scale for pose control strength.', + }), + ) + .default(1), + shift: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Shift parameter for video generation.', + }), + ) + .default(5), + pose_guidance_end: z + .optional( + z.number().gte(0.2).lte(1).register(z.globalRegistry, { + description: + 'End ratio for pose guidance. Controls when pose guidance ends.', + }), + ) + .default(0.4), + frames_per_second: z.optional( + z.int().gte(5).lte(24).register(z.globalRegistry, { + description: + 'Frames per second of the generated video. Must be between 5 to 24. If not specified, uses the FPS from the input video.', + }), + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(6).register(z.globalRegistry, { + description: 'Classifier-free guidance scale for prompt adherence.', + }), + ) + .default(1), + num_frames: z.optional( + z.int().gte(5).lte(241).register(z.globalRegistry, { + description: + 'Number of frames to generate. If not specified, uses the frame count from the input video (capped at 241). Will be adjusted to nearest valid value (must satisfy 4k+1 pattern).', + }), + ), + use_turbo: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If true, applies quality enhancement for faster generation with improved quality. When enabled, parameters are automatically optimized (num_inference_steps=6, guidance_scale=1.0) and uses the LightX2V distillation LoRA.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Negative prompt for video generation.', + }), + ) + .default( + 'blurred, distorted face, bad anatomy, extra limbs, poorly drawn hands, poorly drawn feet, disfigured, out of frame, duplicate, watermark, signature, text', + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(false), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: + "Aspect ratio of the generated video. If 'auto', will be determined from the reference image.", + }), + ), + pose_guidance_start: z + .optional( + z.number().gte(0).lte(0.5).register(z.globalRegistry, { + description: + 'Start ratio for pose guidance. Controls when pose guidance begins.', + }), + ) + .default(0.1), + resolution: z.optional( + z.enum(['480p', '576p', '720p']).register(z.globalRegistry, { + description: + 'Resolution of the generated video. 576p is default, 720p for higher quality. 480p is lower quality.', + }), + ), + image_url: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'URL of the reference image to animate. This is the person/character whose appearance will be preserved.', + }), + ) + .default( + 'https://v3b.fal.media/files/b/0a85edaa/GDUCMPrdvOMcI5JpEcU7f.png', + ), + preserve_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'If enabled, copies audio from the input driving video to the output video.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(4).lte(50).register(z.globalRegistry, { + description: + 'Number of inference steps for sampling. Higher values give better quality but take longer.', + }), + ) + .default(6), + }) + .register(z.globalRegistry, { + description: 'Request model for SteadyDancer human animation.', + }) + +/** + * OmniV2VEditOutput + */ +export const zSchemaKlingVideoO1StandardVideoToVideoEditOutput = z.object({ + video: zSchemaFile, +}) + +/** + * OmniV2VEditInput + * + * Input for video editing or video-as-reference generation. + */ +export const zSchemaKlingVideoO1StandardVideoToVideoEditInput = z + .object({ + prompt: z.string().max(2500).register(z.globalRegistry, { + description: + 'Use @Element1, @Element2 to reference elements and @Image1, @Image2 to reference images in order.', + }), + video_url: z.string().register(z.globalRegistry, { + description: + 'Reference video URL. Only .mp4/.mov formats supported, 3-10 seconds duration, 720-2160px resolution, max 200MB.\n\nMax file size: 200.0MB, Min width: 720px, Min height: 720px, Max width: 2160px, Max height: 2160px, Min duration: 3.0s, Max duration: 10.05s, Min FPS: 24.0, Max FPS: 60.0, Timeout: 30.0s', + }), + elements: z.optional( + z.array(zSchemaOmniVideoElementInput).register(z.globalRegistry, { + description: + 'Elements (characters/objects) to include. Reference in prompt as @Element1, @Element2, etc. Maximum 4 total (elements + reference images) when using video.', + }), + ), + image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'Reference images for style/appearance. Reference in prompt as @Image1, @Image2, etc. Maximum 4 total (elements + reference images) when using video.', + }), + ), + keep_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to keep the original audio from the video.', + }), + ) + .default(false), + }) + .register(z.globalRegistry, { + description: 'Input for video editing or video-as-reference generation.', + }) + +/** + * OmniV2VReferenceOutput + */ +export const zSchemaKlingVideoO1StandardVideoToVideoReferenceOutput = z.object({ + video: zSchemaFile, +}) + +/** + * OmniV2VReferenceInput + * + * Input for video editing or video-as-reference generation. + */ +export const zSchemaKlingVideoO1StandardVideoToVideoReferenceInput = z + .object({ + prompt: z.string().max(2500).register(z.globalRegistry, { + description: + 'Use @Element1, @Element2 to reference elements and @Image1, @Image2 to reference images in order.', + }), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16', '1:1']).register(z.globalRegistry, { + description: + "The aspect ratio of the generated video frame. If 'auto', the aspect ratio will be determined automatically based on the input video, and the closest aspect ratio to the input video will be used.", + }), + ), + duration: z.optional( + z + .enum(['3', '4', '5', '6', '7', '8', '9', '10']) + .register(z.globalRegistry, { + description: 'Video duration in seconds.', + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: + 'Reference video URL. Only .mp4/.mov formats supported, 3-10 seconds duration, 720-2160px resolution, max 200MB.\n\nMax file size: 200.0MB, Min width: 720px, Min height: 720px, Max width: 2160px, Max height: 2160px, Min duration: 3.0s, Max duration: 10.05s, Min FPS: 24.0, Max FPS: 60.0, Timeout: 30.0s', + }), + keep_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to keep the original audio from the video.', + }), + ) + .default(false), + elements: z.optional( + z.array(zSchemaOmniVideoElementInput).register(z.globalRegistry, { + description: + 'Elements (characters/objects) to include. Reference in prompt as @Element1, @Element2, etc. Maximum 4 total (elements + reference images) when using video.', + }), + ), + image_urls: z.optional( + z.array(z.string()).register(z.globalRegistry, { + description: + 'Reference images for style/appearance. Reference in prompt as @Image1, @Image2, etc. Maximum 4 total (elements + reference images) when using video.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Input for video editing or video-as-reference generation.', + }) + +/** + * Veo31VideoToVideoOutput + */ +export const zSchemaVeo31ExtendVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Veo31VideoToVideoInput + * + * Input for video extension/video-to-video generation. + */ +export const zSchemaVeo31ExtendVideoInput = z + .object({ + prompt: z.string().max(20000).register(z.globalRegistry, { + description: + 'The text prompt describing how the video should be extended', + }), + duration: z.optional( + z.enum(['7s']).register(z.globalRegistry, { + description: 'The duration of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + auto_fix: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.', + }), + ) + .default(false), + video_url: z.string().register(z.globalRegistry, { + description: + 'URL of the video to extend. The video should be 720p or 1080p resolution in 16:9 or 9:16 aspect ratio.', + }), + resolution: z.optional( + z.enum(['720p']).register(z.globalRegistry, { + description: 'The resolution of the generated video.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), + negative_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to guide the video generation.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Input for video extension/video-to-video generation.', + }) + +/** + * Veo31VideoToVideoOutput + */ +export const zSchemaVeo31FastExtendVideoOutput = z.object({ + video: zSchemaFile, +}) + +/** + * Veo31VideoToVideoInput + * + * Input for video extension/video-to-video generation. + */ +export const zSchemaVeo31FastExtendVideoInput = z + .object({ + prompt: z.string().max(20000).register(z.globalRegistry, { + description: + 'The text prompt describing how the video should be extended', + }), + duration: z.optional( + z.enum(['7s']).register(z.globalRegistry, { + description: 'The duration of the generated video.', + }), + ), + aspect_ratio: z.optional( + z.enum(['auto', '16:9', '9:16']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + auto_fix: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to automatically attempt to fix prompts that fail content policy or other validation checks by rewriting them.', + }), + ) + .default(false), + video_url: z.string().register(z.globalRegistry, { + description: + 'URL of the video to extend. The video should be 720p or 1080p resolution in 16:9 or 9:16 aspect ratio.', + }), + resolution: z.optional( + z.enum(['720p']).register(z.globalRegistry, { + description: 'The resolution of the generated video.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'The seed for the random number generator.', + }), + ), + negative_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'A negative prompt to guide the video generation.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Input for video extension/video-to-video generation.', + }) + +/** + * ReferenceToVideoOutput + * + * Output for reference-to-video generation + */ +export const zSchemaV26ReferenceToVideoOutput = z + .object({ + actual_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: 'The actual prompt used if prompt rewriting was enabled', + }), + ), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation', + }), + video: zSchemaVideoFile, + }) + .register(z.globalRegistry, { + description: 'Output for reference-to-video generation', + }) + +/** + * ReferenceToVideoInput + * + * Input for Wan 2.6 reference-to-video generation (R2V) + */ +export const zSchemaV26ReferenceToVideoInput = z + .object({ + prompt: z.string().min(1).register(z.globalRegistry, { + description: + "Use @Video1, @Video2, @Video3 to reference subjects from your videos. Works for people, animals, or objects. For multi-shot prompts: '[0-3s] Shot 1. [3-6s] Shot 2.' Max 800 characters.", + }), + resolution: z.optional( + z.enum(['720p', '1080p']).register(z.globalRegistry, { + description: + 'Video resolution tier. R2V only supports 720p and 1080p (no 480p).', + }), + ), + video_urls: z.array(z.string()).register(z.globalRegistry, { + description: + "Reference videos for subject consistency (1-3 videos). Videos' FPS must be at least 16 FPS.Reference in prompt as @Video1, @Video2, @Video3. Works for people, animals, or objects.", + }), + aspect_ratio: z.optional( + z.enum(['16:9', '9:16', '1:1', '4:3', '3:4']).register(z.globalRegistry, { + description: 'The aspect ratio of the generated video.', + }), + ), + duration: z.optional( + z.enum(['5', '10']).register(z.globalRegistry, { + description: + 'Duration of the generated video in seconds. R2V supports only 5 or 10 seconds (no 15s).', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt rewriting using LLM.', + }), + ) + .default(true), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + multi_shots: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'When true (default), enables intelligent multi-shot segmentation for coherent narrative videos with multiple shots. When false, generates single continuous shot. Only active when enable_prompt_expansion is True.', + }), + ) + .default(true), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: + 'Negative prompt to describe content to avoid. Max 500 characters.', + }), + ) + .default(''), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If set to true, the safety checker will be enabled.', + }), + ) + .default(true), + }) + .register(z.globalRegistry, { + description: 'Input for Wan 2.6 reference-to-video generation (R2V)', + }) + +/** + * VideoOutput + */ +export const zSchemaBriaVideoEraserErasePromptOutput = z.object({ + video: z.union([zSchemaVideo, zSchemaFile]), +}) + +/** + * EraseByPromptInputModel + */ +export const zSchemaBriaVideoEraserErasePromptInput = z.object({ + preserve_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If true, audio will be preserved in the output video.', + }), + ) + .default(true), + video_url: z.string().register(z.globalRegistry, { + description: + 'Input video to erase object from. duration must be less than 5s.', + }), + prompt: z.string().register(z.globalRegistry, { + description: 'Input prompt to detect object to erase', + }), + output_container_and_codec: z.optional( + z + .enum([ + 'mp4_h265', + 'mp4_h264', + 'webm_vp9', + 'gif', + 'mov_h264', + 'mov_h265', + 'mov_proresks', + 'mkv_h264', + 'mkv_h265', + 'mkv_vp9', + 'mkv_mpeg4', + ]) + .register(z.globalRegistry, { + description: + 'Output container and codec. Options: mp4_h265, mp4_h264, webm_vp9, gif, mov_h264, mov_h265, mov_proresks, mkv_h264, mkv_h265, mkv_vp9, mkv_mpeg4.', + }), + ), + auto_trim: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'auto trim the video, to working duration ( 5s )', + }), + ) + .default(true), +}) + +/** + * VideoOutput + */ +export const zSchemaBriaVideoEraserEraseKeypointsOutput = z.object({ + video: z.union([zSchemaVideo, zSchemaFile]), +}) + +/** + * EraseByKeyPointsInputModel + */ +export const zSchemaBriaVideoEraserEraseKeypointsInput = z.object({ + preserve_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If true, audio will be preserved in the output video.', + }), + ) + .default(true), + video_url: z.string().register(z.globalRegistry, { + description: + 'Input video to erase object from. duration must be less than 5s.', + }), + output_container_and_codec: z.optional( + z + .enum([ + 'mp4_h265', + 'mp4_h264', + 'webm_vp9', + 'gif', + 'mov_h264', + 'mov_h265', + 'mov_proresks', + 'mkv_h264', + 'mkv_h265', + 'mkv_vp9', + 'mkv_mpeg4', + ]) + .register(z.globalRegistry, { + description: + 'Output container and codec. Options: mp4_h265, mp4_h264, webm_vp9, gif, mov_h264, mov_h265, mov_proresks, mkv_h264, mkv_h265, mkv_vp9, mkv_mpeg4.', + }), + ), + keypoints: z.array(z.string()).register(z.globalRegistry, { + description: + "Input keypoints [x,y] to erase or keep from the video. Format like so: {'x':100, 'y':100, 'type':'positive/negative'}", + }), + auto_trim: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'auto trim the video, to working duration ( 5s )', + }), + ) + .default(true), +}) + +/** + * VideoOutput + */ +export const zSchemaBriaVideoEraserEraseMaskOutput = z.object({ + video: z.union([zSchemaVideo, zSchemaFile]), +}) + +/** + * EraseInputModel + */ +export const zSchemaBriaVideoEraserEraseMaskInput = z.object({ + preserve_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If true, audio will be preserved in the output video.', + }), + ) + .default(true), + video_url: z.string().register(z.globalRegistry, { + description: + 'Input video to erase object from. duration must be less than 5s.', + }), + output_container_and_codec: z.optional( + z + .enum([ + 'mp4_h265', + 'mp4_h264', + 'webm_vp9', + 'gif', + 'mov_h264', + 'mov_h265', + 'mov_proresks', + 'mkv_h264', + 'mkv_h265', + 'mkv_vp9', + 'mkv_mpeg4', + ]) + .register(z.globalRegistry, { + description: + 'Output container and codec. Options: mp4_h265, mp4_h264, webm_vp9, gif, mov_h264, mov_h265, mov_proresks, mkv_h264, mkv_h265, mkv_vp9, mkv_mpeg4.', + }), + ), + mask_video_url: z.string().register(z.globalRegistry, { + description: + 'Input video to mask erase object from. duration must be less than 5s.', + }), + auto_trim: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'auto trim the video, to working duration ( 5s )', + }), + ) + .default(true), +}) + +/** + * CrystalVideoUpscaleOutput + */ +export const zSchemaCrystalVideoUpscalerOutput = z.object({ + video: zSchemaVideoFile, +}) + +/** + * CrystalVideoUpscaleInput + */ +export const zSchemaCrystalVideoUpscalerInput = z.object({ + video_url: z.string().register(z.globalRegistry, { + description: 'URL to the input video.', + }), + scale_factor: z + .optional( + z.number().gte(1).lte(200).register(z.globalRegistry, { + description: + 'Scale factor. The scale factor must be chosen such that the upscaled video does not exceed 5K resolution.', + }), + ) + .default(2), +}) + +/** + * ScailResponse + */ +export const zSchemaScailOutput = z.object({ + video: zSchemaFile, +}) + +/** + * ScailRequest + */ +export const zSchemaScailInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to guide video generation.', + }), + video_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the video to use as a reference for the video generation.', + }), + resolution: z.optional( + z.enum(['512p']).register(z.globalRegistry, { + description: + 'Output resolution. Outputs 896x512 (landscape) or 512x896 (portrait) based on the input image aspect ratio.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(2).lte(30).register(z.globalRegistry, { + description: + 'The number of inference steps to use for the video generation.', + }), + ) + .default(28), + multi_character: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Enable multi-character mode. Use when driving video has multiple people.', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: + 'The URL of the image to use as a reference for the video generation.', + }), +}) + +/** + * LucyRestyleOutput + */ +export const zSchemaLucyRestyleOutput = z.object({ + video: zSchemaFile, +}) + +/** + * LucyRestyleInput + */ +export const zSchemaLucyRestyleInput = z.object({ + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + '\n If set to true, the function will wait for the video to be generated\n and uploaded before returning the response. This will increase the\n latency of the function but it allows you to get the video directly\n in the response without going through the CDN.\n ', + }), + ) + .default(false), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the video to edit', + }), + resolution: z.optional( + z.enum(['720p']).register(z.globalRegistry, { + description: 'Resolution of the generated video', + }), + ), + prompt: z.string().max(1500).register(z.globalRegistry, { + description: 'Text description of the desired video content', + }), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: 'Seed for video generation', + }), + ), + enhance_prompt: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enhance the prompt for better results.', + }), + ) + .default(true), +}) + +/** + * MotionControlOutput + * + * Output model for motion control video generation. + */ +export const zSchemaKlingVideoV26ProMotionControlOutput = z + .object({ + video: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output model for motion control video generation.', + }) + +/** + * MotionControlRequest + * + * Request model for motion control video generation. + */ +export const zSchemaKlingVideoV26ProMotionControlInput = z + .object({ + prompt: z.optional(z.string().max(2500)), + video_url: z.string().register(z.globalRegistry, { + description: + "Reference video URL. The character actions in the generated video will be consistent with this reference video. Should contain a realistic style character with entire body or upper body visible, including head, without obstruction. Duration limit depends on character_orientation: 10s max for 'image', 30s max for 'video'.", + }), + character_orientation: z + .enum(['image', 'video']) + .register(z.globalRegistry, { + description: + "Controls whether the output character's orientation matches the reference image or video. 'video': orientation matches reference video - better for complex motions (max 30s). 'image': orientation matches reference image - better for following camera movements (max 10s).", + }), + keep_original_sound: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to keep the original sound from the reference video.', + }), + ) + .default(true), + image_url: z.string().register(z.globalRegistry, { + description: + 'Reference image URL. The characters, backgrounds, and other elements in the generated video are based on this reference image. Characters should have clear body proportions, avoid occlusion, and occupy more than 5% of the image area.', + }), + }) + .register(z.globalRegistry, { + description: 'Request model for motion control video generation.', + }) + +/** + * MotionControlOutput + * + * Output model for motion control video generation. + */ +export const zSchemaKlingVideoV26StandardMotionControlOutput = z + .object({ + video: zSchemaFile, + }) + .register(z.globalRegistry, { + description: 'Output model for motion control video generation.', + }) + +/** + * MotionControlRequest + * + * Request model for motion control video generation. + */ +export const zSchemaKlingVideoV26StandardMotionControlInput = z + .object({ + prompt: z.optional(z.string().max(2500)), + video_url: z.string().register(z.globalRegistry, { + description: + "Reference video URL. The character actions in the generated video will be consistent with this reference video. Should contain a realistic style character with entire body or upper body visible, including head, without obstruction. Duration limit depends on character_orientation: 10s max for 'image', 30s max for 'video'.", + }), + character_orientation: z + .enum(['image', 'video']) + .register(z.globalRegistry, { + description: + "Controls whether the output character's orientation matches the reference image or video. 'video': orientation matches reference video - better for complex motions (max 30s). 'image': orientation matches reference image - better for following camera movements (max 10s).", + }), + keep_original_sound: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to keep the original sound from the reference video.', + }), + ) + .default(true), + image_url: z.string().register(z.globalRegistry, { + description: + 'Reference image URL. The characters, backgrounds, and other elements in the generated video are based on this reference image. Characters should have clear body proportions, avoid occlusion, and occupy more than 5% of the image area.', + }), + }) + .register(z.globalRegistry, { + description: 'Request model for motion control video generation.', + }) + +/** + * TrajectoryParameters + * + * Camera trajectory parameters for re-camera operations. + * + * Each list represents interpolation values across frames: + * - theta: Horizontal rotation angles (degrees) + * - phi: Vertical rotation angles (degrees) + * - radius: Camera distance scaling factors + */ +export const zSchemaTrajectoryParameters = z + .object({ + theta: z.array(z.number()).register(z.globalRegistry, { + description: 'Horizontal rotation angles (degrees) for each keyframe.', + }), + radius: z.array(z.number()).register(z.globalRegistry, { + description: 'Camera distance scaling factors for each keyframe.', + }), + phi: z.array(z.number()).register(z.globalRegistry, { + description: 'Vertical rotation angles (degrees) for each keyframe.', + }), + }) + .register(z.globalRegistry, { + description: + 'Camera trajectory parameters for re-camera operations.\n\nEach list represents interpolation values across frames:\n- theta: Horizontal rotation angles (degrees)\n- phi: Vertical rotation angles (degrees)\n- radius: Camera distance scaling factors', + }) + +/** + * LightXOutput + */ +export const zSchemaLightxRecameraOutput = z.object({ + viz_video: z.optional(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + input_video: z.optional(zSchemaFile), + video: zSchemaFile, +}) + +/** + * LightXRecameraRequest + * + * Re-camera-only request (minimal schema). + */ +export const zSchemaLightxRecameraInput = z + .object({ + prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Optional text prompt. If omitted, Light-X will auto-caption the video.', + }), + ), + trajectory: z.optional(zSchemaTrajectoryParameters), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the input video.', + }), + camera: z.optional( + z.enum(['traj', 'target']).register(z.globalRegistry, { + description: 'Camera control mode.', + }), + ), + target_pose: z.optional( + z.array(z.number()).register(z.globalRegistry, { + description: + "Target camera pose [theta, phi, radius, x, y] (required when camera='target').", + }), + ), + mode: z.optional( + z + .enum(['gradual', 'bullet', 'direct', 'dolly-zoom']) + .register(z.globalRegistry, { + description: 'Camera motion mode.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Re-camera-only request (minimal schema).', + }) + +/** + * RelightParameters + * + * Relighting parameters for video relighting operations. + * + * Used with relight_condition_type 'ic' (intrinsic conditioning). + */ +export const zSchemaRelightParameters = z + .object({ + relight_prompt: z.string().register(z.globalRegistry, { + description: 'Text prompt describing the desired lighting condition.', + }), + bg_source: z.optional( + z.enum(['Left', 'Right', 'Top', 'Bottom']).register(z.globalRegistry, { + description: 'Direction of the light source (used for IC-light).', + }), + ), + use_sky_mask: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to use sky masking for outdoor scenes.', + }), + ) + .default(false), + cfg: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'Classifier-free guidance scale for relighting.', + }), + ) + .default(2), + }) + .register(z.globalRegistry, { + description: + "Relighting parameters for video relighting operations.\n\nUsed with relight_condition_type 'ic' (intrinsic conditioning).", + }) + +/** + * LightXOutput + */ +export const zSchemaLightxRelightOutput = z.object({ + viz_video: z.optional(zSchemaFile), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for generation.', + }), + input_video: z.optional(zSchemaFile), + video: zSchemaFile, +}) + +/** + * LightXRelightRequest + * + * Relighting-only request (minimal schema). + */ +export const zSchemaLightxRelightInput = z + .object({ + prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + 'Optional text prompt. If omitted, Light-X will auto-caption the video.', + }), + ), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the input video.', + }), + relight_parameters: z.optional(zSchemaRelightParameters), + ref_id: z + .optional( + z.int().gte(0).register(z.globalRegistry, { + description: + 'Frame index to use as referencen to relight the video with reference.', + }), + ) + .default(0), + relit_cond_img_url: z.optional( + z.string().register(z.globalRegistry, { + description: + "URL of conditioning image. Required for relight_condition_type='ref'/'hdr'. Also required for relight_condition_type='bg' (background image).", + }), + ), + relit_cond_type: z.optional( + z.enum(['ic', 'ref', 'hdr', 'bg']).register(z.globalRegistry, { + description: 'Relight condition type.', + }), + ), + seed: z.optional( + z.int().register(z.globalRegistry, { + description: + 'Random seed for reproducibility. If None, a random seed is chosen.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Relighting-only request (minimal schema).', + }) + +/** + * VideoOutput + */ +export const zSchemaVideoEraseMaskOutput = z.object({ + video: z.union([zSchemaVideo, zSchemaFile]), +}) + +/** + * EraseInputModel + */ +export const zSchemaVideoEraseMaskInput = z.object({ + preserve_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If true, audio will be preserved in the output video.', + }), + ) + .default(true), + video_url: z.string().register(z.globalRegistry, { + description: + 'Input video to erase object from. duration must be less than 5s.', + }), + output_container_and_codec: z.optional( + z + .enum([ + 'mp4_h265', + 'mp4_h264', + 'webm_vp9', + 'gif', + 'mov_h264', + 'mov_h265', + 'mov_proresks', + 'mkv_h264', + 'mkv_h265', + 'mkv_vp9', + 'mkv_mpeg4', + ]) + .register(z.globalRegistry, { + description: + 'Output container and codec. Options: mp4_h265, mp4_h264, webm_vp9, gif, mov_h264, mov_h265, mov_proresks, mkv_h264, mkv_h265, mkv_vp9, mkv_mpeg4.', + }), + ), + mask_video_url: z.string().register(z.globalRegistry, { + description: + 'Input video to mask erase object from. duration must be less than 5s.', + }), + auto_trim: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'auto trim the video, to working duration ( 5s )', + }), + ) + .default(true), +}) + +/** + * VideoOutput + */ +export const zSchemaVideoErasePromptOutput = z.object({ + video: z.union([zSchemaVideo, zSchemaFile]), +}) + +/** + * EraseByPromptInputModel + */ +export const zSchemaVideoErasePromptInput = z.object({ + preserve_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If true, audio will be preserved in the output video.', + }), + ) + .default(true), + video_url: z.string().register(z.globalRegistry, { + description: + 'Input video to erase object from. duration must be less than 5s.', + }), + prompt: z.string().register(z.globalRegistry, { + description: 'Input prompt to detect object to erase', + }), + output_container_and_codec: z.optional( + z + .enum([ + 'mp4_h265', + 'mp4_h264', + 'webm_vp9', + 'gif', + 'mov_h264', + 'mov_h265', + 'mov_proresks', + 'mkv_h264', + 'mkv_h265', + 'mkv_vp9', + 'mkv_mpeg4', + ]) + .register(z.globalRegistry, { + description: + 'Output container and codec. Options: mp4_h265, mp4_h264, webm_vp9, gif, mov_h264, mov_h265, mov_proresks, mkv_h264, mkv_h265, mkv_vp9, mkv_mpeg4.', + }), + ), + auto_trim: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'auto trim the video, to working duration ( 5s )', + }), + ) + .default(true), +}) + +/** + * VideoOutput + */ +export const zSchemaVideoEraseKeypointsOutput = z.object({ + video: z.union([zSchemaVideo, zSchemaFile]), +}) + +/** + * EraseByKeyPointsInputModel + */ +export const zSchemaVideoEraseKeypointsInput = z.object({ + preserve_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'If true, audio will be preserved in the output video.', + }), + ) + .default(true), + video_url: z.string().register(z.globalRegistry, { + description: + 'Input video to erase object from. duration must be less than 5s.', + }), + output_container_and_codec: z.optional( + z + .enum([ + 'mp4_h265', + 'mp4_h264', + 'webm_vp9', + 'gif', + 'mov_h264', + 'mov_h265', + 'mov_proresks', + 'mkv_h264', + 'mkv_h265', + 'mkv_vp9', + 'mkv_mpeg4', + ]) + .register(z.globalRegistry, { + description: + 'Output container and codec. Options: mp4_h265, mp4_h264, webm_vp9, gif, mov_h264, mov_h265, mov_proresks, mkv_h264, mkv_h265, mkv_vp9, mkv_mpeg4.', + }), + ), + keypoints: z.array(z.string()).register(z.globalRegistry, { + description: + "Input keypoints [x,y] to erase or keep from the video. Format like so: {'x':100, 'y':100, 'type':'positive/negative'}", + }), + auto_trim: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'auto trim the video, to working duration ( 5s )', + }), + ) + .default(true), +}) + +/** + * LTX2ExtendVideoOutput + */ +export const zSchemaLtx219bExtendVideoOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the random number generator.', + }), + video: zSchemaVideoFile, +}) + +/** + * LTX2ExtendVideoInput + * + * extend_direction: ExtendDirection = Field( + * description="Direction to extend the video. 'forward' extends from the end of the video, 'backward' extends from the beginning.", + * default="forward", + * ui={"important": True}, + * title="Extend Direction", + * ) + */ +export const zSchemaLtx219bExtendVideoInput = z + .object({ + use_multiscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.', + }), + ) + .default(true), + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to extend.', + }), + acceleration: z.optional( + z.enum(['none', 'regular', 'high', 'full']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frames per second of the generated video.', + }), + ) + .default(25), + camera_lora: z.optional( + z + .enum([ + 'dolly_in', + 'dolly_out', + 'dolly_left', + 'dolly_right', + 'jib_up', + 'jib_down', + 'static', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'auto', + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + camera_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ) + .default(1), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The guidance scale to use.', + }), + ) + .default(3), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), + ) + .default( + 'blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts.', + ), + num_frames: z + .optional( + z.int().gte(9).lte(481).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(121), + video_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Video conditioning strength. Lower values represent more freedom given to the model to change the video content.', + }), + ) + .default(1), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + num_context_frames: z + .optional( + z.int().gte(0).lte(121).register(z.globalRegistry, { + description: + 'The number of frames to use as context for the extension.', + }), + ) + .default(25), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(8).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to use.', + }), + ) + .default(40), + audio_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Audio conditioning strength. Lower values represent more freedom given to the model to change the audio content.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + match_input_fps: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "When true, match the output FPS to the input video's FPS instead of using the default target FPS.", + }), + ) + .default(true), + }) + .register(z.globalRegistry, { + description: + 'extend_direction: ExtendDirection = Field(\n description="Direction to extend the video. \'forward\' extends from the end of the video, \'backward\' extends from the beginning.",\n default="forward",\n ui={"important": True},\n title="Extend Direction",\n)', + }) + +/** + * LTX2ExtendVideoOutput + */ +export const zSchemaLtx219bExtendVideoLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the random number generator.', + }), + video: zSchemaVideoFile, +}) + +/** + * LoRAInput + * + * LoRA weight configuration. + */ +export const zSchemaLoRaInput = z + .object({ + path: z.string().register(z.globalRegistry, { + description: 'URL, HuggingFace repo ID (owner/repo) to lora weights.', + }), + scale: z + .optional( + z.number().gte(0).lte(4).register(z.globalRegistry, { + description: 'Scale factor for LoRA application (0.0 to 4.0).', + }), + ) + .default(1), + weight_name: z.optional(z.union([z.string(), z.unknown()])), + }) + .register(z.globalRegistry, { + description: 'LoRA weight configuration.', + }) + +/** + * LTX2LoRAExtendVideoInput + */ +export const zSchemaLtx219bExtendVideoLoraInput = z.object({ + use_multiscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.', + }), + ) + .default(true), + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to extend.', + }), + acceleration: z.optional( + z.enum(['none', 'regular', 'high', 'full']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frames per second of the generated video.', + }), + ) + .default(25), + loras: z.array(zSchemaLoRaInput).register(z.globalRegistry, { + description: 'The LoRAs to use for the generation.', + }), + camera_lora: z.optional( + z + .enum([ + 'dolly_in', + 'dolly_out', + 'dolly_left', + 'dolly_right', + 'jib_up', + 'jib_down', + 'static', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'auto', + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + camera_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ) + .default(1), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The guidance scale to use.', + }), + ) + .default(3), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), + ) + .default( + 'blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts.', + ), + num_frames: z + .optional( + z.int().gte(9).lte(481).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(121), + video_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Video conditioning strength. Lower values represent more freedom given to the model to change the video content.', + }), + ) + .default(1), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + num_context_frames: z + .optional( + z.int().gte(0).lte(121).register(z.globalRegistry, { + description: + 'The number of frames to use as context for the extension.', + }), + ) + .default(25), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + num_inference_steps: z + .optional( + z.int().gte(8).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to use.', + }), + ) + .default(40), + audio_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Audio conditioning strength. Lower values represent more freedom given to the model to change the audio content.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + match_input_fps: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "When true, match the output FPS to the input video's FPS instead of using the default target FPS.", + }), + ) + .default(true), +}) + +/** + * LTX2ExtendVideoOutput + */ +export const zSchemaLtx219bDistilledExtendVideoOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the random number generator.', + }), + video: zSchemaVideoFile, +}) + +/** + * LTX2DistilledExtendVideoInput + */ +export const zSchemaLtx219bDistilledExtendVideoInput = z.object({ + use_multiscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.', + }), + ) + .default(true), + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to extend.', + }), + acceleration: z.optional( + z.enum(['none', 'regular', 'high', 'full']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frames per second of the generated video.', + }), + ) + .default(25), + camera_lora: z.optional( + z + .enum([ + 'dolly_in', + 'dolly_out', + 'dolly_left', + 'dolly_right', + 'jib_up', + 'jib_down', + 'static', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'auto', + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + camera_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ) + .default(1), + num_frames: z + .optional( + z.int().gte(9).lte(481).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(121), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), + ) + .default( + 'blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts.', + ), + video_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Video conditioning strength. Lower values represent more freedom given to the model to change the video content.', + }), + ) + .default(1), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + num_context_frames: z + .optional( + z.int().gte(0).lte(121).register(z.globalRegistry, { + description: + 'The number of frames to use as context for the extension.', + }), + ) + .default(25), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + audio_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Audio conditioning strength. Lower values represent more freedom given to the model to change the audio content.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + match_input_fps: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "When true, match the output FPS to the input video's FPS instead of using the default target FPS.", + }), + ) + .default(true), +}) + +/** + * LTX2ExtendVideoOutput + */ +export const zSchemaLtx219bDistilledExtendVideoLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the random number generator.', + }), + video: zSchemaVideoFile, +}) + +/** + * LTX2LoRADistilledExtendVideoInput + */ +export const zSchemaLtx219bDistilledExtendVideoLoraInput = z.object({ + use_multiscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.', + }), + ) + .default(true), + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to extend.', + }), + acceleration: z.optional( + z.enum(['none', 'regular', 'high', 'full']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frames per second of the generated video.', + }), + ) + .default(25), + loras: z.array(zSchemaLoRaInput).register(z.globalRegistry, { + description: 'The LoRAs to use for the generation.', + }), + camera_lora: z.optional( + z + .enum([ + 'dolly_in', + 'dolly_out', + 'dolly_left', + 'dolly_right', + 'jib_up', + 'jib_down', + 'static', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'auto', + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + camera_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ) + .default(1), + num_frames: z + .optional( + z.int().gte(9).lte(481).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(121), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), + ) + .default( + 'blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts.', + ), + video_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Video conditioning strength. Lower values represent more freedom given to the model to change the video content.', + }), + ) + .default(1), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + num_context_frames: z + .optional( + z.int().gte(0).lte(121).register(z.globalRegistry, { + description: + 'The number of frames to use as context for the extension.', + }), + ) + .default(25), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + audio_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Audio conditioning strength. Lower values represent more freedom given to the model to change the audio content.', + }), + ) + .default(1), + seed: z.optional(z.union([z.int(), z.unknown()])), + match_input_fps: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "When true, match the output FPS to the input video's FPS instead of using the default target FPS.", + }), + ) + .default(true), +}) + +/** + * LTX2VideoToVideoOutput + */ +export const zSchemaLtx219bVideoToVideoOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the random number generator.', + }), + video: zSchemaVideoFile, +}) + +/** + * LTX2VideoToVideoInput + */ +export const zSchemaLtx219bVideoToVideoInput = z.object({ + use_multiscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.', + }), + ) + .default(true), + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to generate the video from.', + }), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + ic_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the IC-LoRA to use. This allows you to control the strength of the IC-LoRA.', + }), + ) + .default(1), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'auto', + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The guidance scale to use.', + }), + ) + .default(3), + camera_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ) + .default(1), + num_frames: z + .optional( + z.int().gte(9).lte(481).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(121), + video_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Video conditioning strength. Lower values represent more freedom given to the model to change the video content.', + }), + ) + .default(1), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + image_url: z.optional(z.union([z.string(), z.unknown()])), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.unknown()])), + match_video_length: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'When enabled, the number of frames will be calculated based on the video duration and FPS. When disabled, use the specified num_frames.', + }), + ) + .default(true), + acceleration: z.optional( + z.enum(['none', 'regular', 'high', 'full']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frames per second of the generated video.', + }), + ) + .default(25), + camera_lora: z.optional( + z + .enum([ + 'dolly_in', + 'dolly_out', + 'dolly_left', + 'dolly_right', + 'jib_up', + 'jib_down', + 'static', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + image_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The strength of the image to use for the video generation.', + }), + ) + .default(1), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), + ) + .default( + 'blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts.', + ), + preprocessor: z.optional( + z.enum(['depth', 'canny', 'pose', 'none']).register(z.globalRegistry, { + description: + 'The preprocessor to use for the video. When a preprocessor is used and `ic_lora_type` is set to `match_preprocessor`, the IC-LoRA will be loaded based on the preprocessor type.', + }), + ), + ic_lora: z.optional( + z + .enum([ + 'match_preprocessor', + 'canny', + 'depth', + 'pose', + 'detailer', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The type of IC-LoRA to load. In-Context LoRA weights are used to condition the video based on edge, depth, or pose videos. Only change this from `match_preprocessor` if your videos are already preprocessed (or you are using the detailer.)', + }), + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + num_inference_steps: z + .optional( + z.int().gte(8).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to use.', + }), + ) + .default(40), + match_input_fps: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "When true, match the output FPS to the input video's FPS instead of using the default target FPS.", + }), + ) + .default(true), +}) + +/** + * LTX2VideoToVideoOutput + */ +export const zSchemaLtx219bVideoToVideoLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the random number generator.', + }), + video: zSchemaVideoFile, +}) + +/** + * LTX2LoRAVideoToVideoInput + */ +export const zSchemaLtx219bVideoToVideoLoraInput = z.object({ + use_multiscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.', + }), + ) + .default(true), + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to generate the video from.', + }), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + ic_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the IC-LoRA to use. This allows you to control the strength of the IC-LoRA.', + }), + ) + .default(1), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + loras: z.array(zSchemaLoRaInput).register(z.globalRegistry, { + description: 'The LoRAs to use for the generation.', + }), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'auto', + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + guidance_scale: z + .optional( + z.number().gte(1).lte(10).register(z.globalRegistry, { + description: 'The guidance scale to use.', + }), + ) + .default(3), + num_frames: z + .optional( + z.int().gte(9).lte(481).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(121), + camera_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ) + .default(1), + video_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Video conditioning strength. Lower values represent more freedom given to the model to change the video content.', + }), + ) + .default(1), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + image_url: z.optional(z.union([z.string(), z.unknown()])), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.unknown()])), + match_video_length: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'When enabled, the number of frames will be calculated based on the video duration and FPS. When disabled, use the specified num_frames.', + }), + ) + .default(true), + acceleration: z.optional( + z.enum(['none', 'regular', 'high', 'full']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frames per second of the generated video.', + }), + ) + .default(25), + camera_lora: z.optional( + z + .enum([ + 'dolly_in', + 'dolly_out', + 'dolly_left', + 'dolly_right', + 'jib_up', + 'jib_down', + 'static', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + image_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The strength of the image to use for the video generation.', + }), + ) + .default(1), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), + ) + .default( + 'blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts.', + ), + preprocessor: z.optional( + z.enum(['depth', 'canny', 'pose', 'none']).register(z.globalRegistry, { + description: + 'The preprocessor to use for the video. When a preprocessor is used and `ic_lora_type` is set to `match_preprocessor`, the IC-LoRA will be loaded based on the preprocessor type.', + }), + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + ic_lora: z.optional( + z + .enum([ + 'match_preprocessor', + 'canny', + 'depth', + 'pose', + 'detailer', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The type of IC-LoRA to load. In-Context LoRA weights are used to condition the video based on edge, depth, or pose videos. Only change this from `match_preprocessor` if your videos are already preprocessed (or you are using the detailer.)', + }), + ), + match_input_fps: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "When true, match the output FPS to the input video's FPS instead of using the default target FPS.", + }), + ) + .default(true), + num_inference_steps: z + .optional( + z.int().gte(8).lte(50).register(z.globalRegistry, { + description: 'The number of inference steps to use.', + }), + ) + .default(40), +}) + +/** + * LTX2VideoToVideoOutput + */ +export const zSchemaLtx219bDistilledVideoToVideoOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the random number generator.', + }), + video: zSchemaVideoFile, +}) + +/** + * LTX2DistilledVideoToVideoInput + */ +export const zSchemaLtx219bDistilledVideoToVideoInput = z.object({ + use_multiscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.', + }), + ) + .default(true), + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to generate the video from.', + }), + acceleration: z.optional( + z.enum(['none', 'regular', 'high', 'full']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + ic_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the IC-LoRA to use. This allows you to control the strength of the IC-LoRA.', + }), + ) + .default(1), + fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frames per second of the generated video.', + }), + ) + .default(25), + camera_lora: z.optional( + z + .enum([ + 'dolly_in', + 'dolly_out', + 'dolly_left', + 'dolly_right', + 'jib_up', + 'jib_down', + 'static', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'auto', + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + camera_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ) + .default(1), + image_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The strength of the image to use for the video generation.', + }), + ) + .default(1), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), + ) + .default( + 'blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts.', + ), + preprocessor: z.optional( + z.enum(['depth', 'canny', 'pose', 'none']).register(z.globalRegistry, { + description: + 'The preprocessor to use for the video. When a preprocessor is used and `ic_lora_type` is set to `match_preprocessor`, the IC-LoRA will be loaded based on the preprocessor type.', + }), + ), + video_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Video conditioning strength. Lower values represent more freedom given to the model to change the video content.', + }), + ) + .default(1), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + ic_lora: z.optional( + z + .enum([ + 'match_preprocessor', + 'canny', + 'depth', + 'pose', + 'detailer', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The type of IC-LoRA to load. In-Context LoRA weights are used to condition the video based on edge, depth, or pose videos. Only change this from `match_preprocessor` if your videos are already preprocessed (or you are using the detailer.)', + }), + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + num_frames: z + .optional( + z.int().gte(9).lte(481).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(121), + image_url: z.optional(z.union([z.string(), z.unknown()])), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.unknown()])), + match_input_fps: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "When true, match the output FPS to the input video's FPS instead of using the default target FPS.", + }), + ) + .default(true), + match_video_length: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'When enabled, the number of frames will be calculated based on the video duration and FPS. When disabled, use the specified num_frames.', + }), + ) + .default(true), +}) + +/** + * LTX2VideoToVideoOutput + */ +export const zSchemaLtx219bDistilledVideoToVideoLoraOutput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt used for the generation.', + }), + seed: z.int().register(z.globalRegistry, { + description: 'The seed used for the random number generator.', + }), + video: zSchemaVideoFile, +}) + +/** + * LTX2LoRADistilledVideoToVideoInput + */ +export const zSchemaLtx219bDistilledVideoToVideoLoraInput = z.object({ + use_multiscale: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'Whether to use multi-scale generation. If True, the model will generate the video at a smaller scale first, then use the smaller video to guide the generation of a video at or above your requested size. This results in better coherence and details.', + }), + ) + .default(true), + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to generate the video from.', + }), + acceleration: z.optional( + z.enum(['none', 'regular', 'high', 'full']).register(z.globalRegistry, { + description: 'The acceleration level to use.', + }), + ), + generate_audio: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to generate audio for the video.', + }), + ) + .default(true), + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the video from.', + }), + ic_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the IC-LoRA to use. This allows you to control the strength of the IC-LoRA.', + }), + ) + .default(1), + fps: z + .optional( + z.number().gte(1).lte(60).register(z.globalRegistry, { + description: 'The frames per second of the generated video.', + }), + ) + .default(25), + loras: z.array(zSchemaLoRaInput).register(z.globalRegistry, { + description: 'The LoRAs to use for the generation.', + }), + camera_lora: z.optional( + z + .enum([ + 'dolly_in', + 'dolly_out', + 'dolly_left', + 'dolly_right', + 'jib_up', + 'jib_down', + 'static', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ), + video_size: z.optional( + z.union([ + zSchemaImageSize, + z.enum([ + 'auto', + 'square_hd', + 'square', + 'portrait_4_3', + 'portrait_16_9', + 'landscape_4_3', + 'landscape_16_9', + ]), + ]), + ), + enable_safety_checker: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable the safety checker.', + }), + ) + .default(true), + camera_lora_scale: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The scale of the camera LoRA to use. This allows you to control the camera movement of the generated video more accurately than just prompting the model to move the camera.', + }), + ) + .default(1), + image_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'The strength of the image to use for the video generation.', + }), + ) + .default(1), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the video from.', + }), + ) + .default( + 'blurry, out of focus, overexposed, underexposed, low contrast, washed out colors, excessive noise, grainy texture, poor lighting, flickering, motion blur, distorted proportions, unnatural skin tones, deformed facial features, asymmetrical face, missing facial features, extra limbs, disfigured hands, wrong hand count, artifacts around text, inconsistent perspective, camera shake, incorrect depth of field, background too sharp, background clutter, distracting reflections, harsh shadows, inconsistent lighting direction, color banding, cartoonish rendering, 3D CGI look, unrealistic materials, uncanny valley effect, incorrect ethnicity, wrong gender, exaggerated expressions, wrong gaze direction, mismatched lip sync, silent or muted audio, distorted voice, robotic voice, echo, background noise, off-sync audio,incorrect dialogue, added dialogue, repetitive speech, jittery movement, awkward pauses, incorrect timing, unnatural transitions, inconsistent framing, tilted camera, flat lighting, inconsistent tone, cinematic oversaturation, stylized filters, or AI artifacts.', + ), + preprocessor: z.optional( + z.enum(['depth', 'canny', 'pose', 'none']).register(z.globalRegistry, { + description: + 'The preprocessor to use for the video. When a preprocessor is used and `ic_lora_type` is set to `match_preprocessor`, the IC-LoRA will be loaded based on the preprocessor type.', + }), + ), + video_strength: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Video conditioning strength. Lower values represent more freedom given to the model to change the video content.', + }), + ) + .default(1), + video_output_type: z.optional( + z + .enum(['X264 (.mp4)', 'VP9 (.webm)', 'PRORES4444 (.mov)', 'GIF (.gif)']) + .register(z.globalRegistry, { + description: 'The output type of the generated video.', + }), + ), + ic_lora: z.optional( + z + .enum([ + 'match_preprocessor', + 'canny', + 'depth', + 'pose', + 'detailer', + 'none', + ]) + .register(z.globalRegistry, { + description: + 'The type of IC-LoRA to load. In-Context LoRA weights are used to condition the video based on edge, depth, or pose videos. Only change this from `match_preprocessor` if your videos are already preprocessed (or you are using the detailer.)', + }), + ), + video_write_mode: z.optional( + z.enum(['fast', 'balanced', 'small']).register(z.globalRegistry, { + description: 'The write mode of the generated video.', + }), + ), + num_frames: z + .optional( + z.int().gte(9).lte(481).register(z.globalRegistry, { + description: 'The number of frames to generate.', + }), + ) + .default(121), + image_url: z.optional(z.union([z.string(), z.unknown()])), + video_quality: z.optional( + z.enum(['low', 'medium', 'high', 'maximum']).register(z.globalRegistry, { + description: 'The quality of the generated video.', + }), + ), + sync_mode: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "If `True`, the media will be returned as a data URI and the output data won't be available in the request history.", + }), + ) + .default(false), + enable_prompt_expansion: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to enable prompt expansion.', + }), + ) + .default(false), + seed: z.optional(z.union([z.int(), z.unknown()])), + match_input_fps: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + "When true, match the output FPS to the input video's FPS instead of using the default target FPS.", + }), + ) + .default(true), + match_video_length: z + .optional( + z.boolean().register(z.globalRegistry, { + description: + 'When enabled, the number of frames will be calculated based on the video duration and FPS. When disabled, use the specified num_frames.', + }), + ) + .default(true), +}) + +/** + * FaceFusionVideoOutput + * + * FaceFusion output payload when video content is generated + */ +export const zSchemaAiFaceSwapFaceswapvideoOutput = z + .object({ + processing_time_ms: z.optional(z.union([z.int(), z.unknown()])), + video: zSchemaVideo, + }) + .register(z.globalRegistry, { + description: 'FaceFusion output payload when video content is generated', + }) + +/** + * FaceSwapInputVideo + * + * Input schema for image ↔ video face swap + */ +export const zSchemaAiFaceSwapFaceswapvideoInput = z + .object({ + source_face_url: z.string().register(z.globalRegistry, { + description: 'Source face image', + }), + target_video_url: z.string().register(z.globalRegistry, { + description: 'Target video URL', + }), + }) + .register(z.globalRegistry, { + description: 'Input schema for image ↔ video face swap', + }) + +/** + * Output + */ +export const zSchemaMmaudioV2Output = z.object({ + video: zSchemaFile, +}) + +/** + * BaseInput + */ +export const zSchemaMmaudioV2Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'The prompt to generate the audio for.', + }), + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the video to generate the audio for.', + }), + num_steps: z + .optional( + z.int().gte(4).lte(50).register(z.globalRegistry, { + description: 'The number of steps to generate the audio for.', + }), + ) + .default(25), + duration: z + .optional( + z.number().gte(1).lte(30).register(z.globalRegistry, { + description: 'The duration of the audio to generate.', + }), + ) + .default(8), + cfg_strength: z + .optional( + z.number().gte(0).lte(20).register(z.globalRegistry, { + description: 'The strength of Classifier Free Guidance.', + }), + ) + .default(4.5), + seed: z.optional( + z.int().gte(0).lte(65535).register(z.globalRegistry, { + description: 'The seed for the random number generator', + }), + ), + mask_away_clip: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to mask away the clip.', + }), + ) + .default(false), + negative_prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'The negative prompt to generate the audio for.', + }), + ) + .default(''), +}) + +/** + * GeneralRembgOutput + */ +export const zSchemaVideoBackgroundRemovalOutput = z.object({ + video: z.array(zSchemaFile), +}) + +/** + * GeneralRembgInput + */ +export const zSchemaVideoBackgroundRemovalInput = z.object({ + video_url: z.url().min(1).max(2083), + subject_is_person: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Set to False if the subject is not a person.', + }), + ) + .default(true), + output_codec: z.optional( + z.enum(['vp9', 'h264']).register(z.globalRegistry, { + description: + 'Single VP9 video with alpha channel or two videos (rgb and alpha) in H264 format. H264 is recommended for better RGB quality.', + }), + ), + refine_foreground_edges: z + .optional( + z.boolean().register(z.globalRegistry, { + description: "Improves the quality of the extracted object's edges.", + }), + ) + .default(true), +}) + +export const zSchemaQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetBriaVideoBackgroundRemovalRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetBriaVideoBackgroundRemovalRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaVideoBackgroundRemovalRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutBriaVideoBackgroundRemovalRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaVideoBackgroundRemovalData = z.object({ + body: zSchemaVideoBackgroundRemovalInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaVideoBackgroundRemovalResponse = zSchemaQueueStatus + +export const zGetBriaVideoBackgroundRemovalRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetBriaVideoBackgroundRemovalRequestsByRequestIdResponse = + zSchemaVideoBackgroundRemovalOutput + +export const zGetFalAiMmaudioV2RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiMmaudioV2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMmaudioV2RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiMmaudioV2RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMmaudioV2Data = z.object({ + body: zSchemaMmaudioV2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMmaudioV2Response = zSchemaQueueStatus + +export const zGetFalAiMmaudioV2RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMmaudioV2RequestsByRequestIdResponse = + zSchemaMmaudioV2Output + +export const zGetHalfMoonAiAiFaceSwapFaceswapvideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetHalfMoonAiAiFaceSwapFaceswapvideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutHalfMoonAiAiFaceSwapFaceswapvideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutHalfMoonAiAiFaceSwapFaceswapvideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostHalfMoonAiAiFaceSwapFaceswapvideoData = z.object({ + body: zSchemaAiFaceSwapFaceswapvideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostHalfMoonAiAiFaceSwapFaceswapvideoResponse = zSchemaQueueStatus + +export const zGetHalfMoonAiAiFaceSwapFaceswapvideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetHalfMoonAiAiFaceSwapFaceswapvideoRequestsByRequestIdResponse = + zSchemaAiFaceSwapFaceswapvideoOutput + +export const zGetFalAiLtx219bDistilledVideoToVideoLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtx219bDistilledVideoToVideoLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx219bDistilledVideoToVideoLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx219bDistilledVideoToVideoLoraRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx219bDistilledVideoToVideoLoraData = z.object({ + body: zSchemaLtx219bDistilledVideoToVideoLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx219bDistilledVideoToVideoLoraResponse = + zSchemaQueueStatus + +export const zGetFalAiLtx219bDistilledVideoToVideoLoraRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLtx219bDistilledVideoToVideoLoraRequestsByRequestIdResponse = + zSchemaLtx219bDistilledVideoToVideoLoraOutput + +export const zGetFalAiLtx219bDistilledVideoToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtx219bDistilledVideoToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx219bDistilledVideoToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx219bDistilledVideoToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx219bDistilledVideoToVideoData = z.object({ + body: zSchemaLtx219bDistilledVideoToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx219bDistilledVideoToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiLtx219bDistilledVideoToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLtx219bDistilledVideoToVideoRequestsByRequestIdResponse = + zSchemaLtx219bDistilledVideoToVideoOutput + +export const zGetFalAiLtx219bVideoToVideoLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtx219bVideoToVideoLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx219bVideoToVideoLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx219bVideoToVideoLoraRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx219bVideoToVideoLoraData = z.object({ + body: zSchemaLtx219bVideoToVideoLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx219bVideoToVideoLoraResponse = zSchemaQueueStatus + +export const zGetFalAiLtx219bVideoToVideoLoraRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiLtx219bVideoToVideoLoraRequestsByRequestIdResponse = + zSchemaLtx219bVideoToVideoLoraOutput + +export const zGetFalAiLtx219bVideoToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtx219bVideoToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx219bVideoToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx219bVideoToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx219bVideoToVideoData = z.object({ + body: zSchemaLtx219bVideoToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx219bVideoToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiLtx219bVideoToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLtx219bVideoToVideoRequestsByRequestIdResponse = + zSchemaLtx219bVideoToVideoOutput + +export const zGetFalAiLtx219bDistilledExtendVideoLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtx219bDistilledExtendVideoLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx219bDistilledExtendVideoLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx219bDistilledExtendVideoLoraRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx219bDistilledExtendVideoLoraData = z.object({ + body: zSchemaLtx219bDistilledExtendVideoLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx219bDistilledExtendVideoLoraResponse = + zSchemaQueueStatus + +export const zGetFalAiLtx219bDistilledExtendVideoLoraRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLtx219bDistilledExtendVideoLoraRequestsByRequestIdResponse = + zSchemaLtx219bDistilledExtendVideoLoraOutput + +export const zGetFalAiLtx219bDistilledExtendVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtx219bDistilledExtendVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx219bDistilledExtendVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx219bDistilledExtendVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx219bDistilledExtendVideoData = z.object({ + body: zSchemaLtx219bDistilledExtendVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx219bDistilledExtendVideoResponse = zSchemaQueueStatus + +export const zGetFalAiLtx219bDistilledExtendVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLtx219bDistilledExtendVideoRequestsByRequestIdResponse = + zSchemaLtx219bDistilledExtendVideoOutput + +export const zGetFalAiLtx219bExtendVideoLoraRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtx219bExtendVideoLoraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx219bExtendVideoLoraRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx219bExtendVideoLoraRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx219bExtendVideoLoraData = z.object({ + body: zSchemaLtx219bExtendVideoLoraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx219bExtendVideoLoraResponse = zSchemaQueueStatus + +export const zGetFalAiLtx219bExtendVideoLoraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLtx219bExtendVideoLoraRequestsByRequestIdResponse = + zSchemaLtx219bExtendVideoLoraOutput + +export const zGetFalAiLtx219bExtendVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtx219bExtendVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx219bExtendVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx219bExtendVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx219bExtendVideoData = z.object({ + body: zSchemaLtx219bExtendVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx219bExtendVideoResponse = zSchemaQueueStatus + +export const zGetFalAiLtx219bExtendVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLtx219bExtendVideoRequestsByRequestIdResponse = + zSchemaLtx219bExtendVideoOutput + +export const zGetBriaVideoEraseKeypointsRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetBriaVideoEraseKeypointsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaVideoEraseKeypointsRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutBriaVideoEraseKeypointsRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaVideoEraseKeypointsData = z.object({ + body: zSchemaVideoEraseKeypointsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaVideoEraseKeypointsResponse = zSchemaQueueStatus + +export const zGetBriaVideoEraseKeypointsRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetBriaVideoEraseKeypointsRequestsByRequestIdResponse = + zSchemaVideoEraseKeypointsOutput + +export const zGetBriaVideoErasePromptRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetBriaVideoErasePromptRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaVideoErasePromptRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutBriaVideoErasePromptRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaVideoErasePromptData = z.object({ + body: zSchemaVideoErasePromptInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaVideoErasePromptResponse = zSchemaQueueStatus + +export const zGetBriaVideoErasePromptRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetBriaVideoErasePromptRequestsByRequestIdResponse = + zSchemaVideoErasePromptOutput + +export const zGetBriaVideoEraseMaskRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetBriaVideoEraseMaskRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaVideoEraseMaskRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutBriaVideoEraseMaskRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaVideoEraseMaskData = z.object({ + body: zSchemaVideoEraseMaskInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaVideoEraseMaskResponse = zSchemaQueueStatus + +export const zGetBriaVideoEraseMaskRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetBriaVideoEraseMaskRequestsByRequestIdResponse = + zSchemaVideoEraseMaskOutput + +export const zGetFalAiLightxRelightRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLightxRelightRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLightxRelightRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLightxRelightRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLightxRelightData = z.object({ + body: zSchemaLightxRelightInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLightxRelightResponse = zSchemaQueueStatus + +export const zGetFalAiLightxRelightRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLightxRelightRequestsByRequestIdResponse = + zSchemaLightxRelightOutput + +export const zGetFalAiLightxRecameraRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLightxRecameraRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLightxRecameraRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLightxRecameraRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLightxRecameraData = z.object({ + body: zSchemaLightxRecameraInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLightxRecameraResponse = zSchemaQueueStatus + +export const zGetFalAiLightxRecameraRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLightxRecameraRequestsByRequestIdResponse = + zSchemaLightxRecameraOutput + +export const zGetFalAiKlingVideoV26StandardMotionControlRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV26StandardMotionControlRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV26StandardMotionControlRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV26StandardMotionControlRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV26StandardMotionControlData = z.object({ + body: zSchemaKlingVideoV26StandardMotionControlInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV26StandardMotionControlResponse = + zSchemaQueueStatus + +export const zGetFalAiKlingVideoV26StandardMotionControlRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV26StandardMotionControlRequestsByRequestIdResponse = + zSchemaKlingVideoV26StandardMotionControlOutput + +export const zGetFalAiKlingVideoV26ProMotionControlRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoV26ProMotionControlRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoV26ProMotionControlRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoV26ProMotionControlRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoV26ProMotionControlData = z.object({ + body: zSchemaKlingVideoV26ProMotionControlInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoV26ProMotionControlResponse = + zSchemaQueueStatus + +export const zGetFalAiKlingVideoV26ProMotionControlRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoV26ProMotionControlRequestsByRequestIdResponse = + zSchemaKlingVideoV26ProMotionControlOutput + +export const zGetDecartLucyRestyleRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetDecartLucyRestyleRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutDecartLucyRestyleRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutDecartLucyRestyleRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostDecartLucyRestyleData = z.object({ + body: zSchemaLucyRestyleInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostDecartLucyRestyleResponse = zSchemaQueueStatus + +export const zGetDecartLucyRestyleRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetDecartLucyRestyleRequestsByRequestIdResponse = + zSchemaLucyRestyleOutput + +export const zGetFalAiScailRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiScailRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiScailRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiScailRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiScailData = z.object({ + body: zSchemaScailInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiScailResponse = zSchemaQueueStatus + +export const zGetFalAiScailRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiScailRequestsByRequestIdResponse = zSchemaScailOutput + +export const zGetClarityaiCrystalVideoUpscalerRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetClarityaiCrystalVideoUpscalerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutClarityaiCrystalVideoUpscalerRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutClarityaiCrystalVideoUpscalerRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostClarityaiCrystalVideoUpscalerData = z.object({ + body: zSchemaCrystalVideoUpscalerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostClarityaiCrystalVideoUpscalerResponse = zSchemaQueueStatus + +export const zGetClarityaiCrystalVideoUpscalerRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetClarityaiCrystalVideoUpscalerRequestsByRequestIdResponse = + zSchemaCrystalVideoUpscalerOutput + +export const zGetBriaBriaVideoEraserEraseMaskRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetBriaBriaVideoEraserEraseMaskRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaBriaVideoEraserEraseMaskRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutBriaBriaVideoEraserEraseMaskRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaBriaVideoEraserEraseMaskData = z.object({ + body: zSchemaBriaVideoEraserEraseMaskInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaBriaVideoEraserEraseMaskResponse = zSchemaQueueStatus + +export const zGetBriaBriaVideoEraserEraseMaskRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetBriaBriaVideoEraserEraseMaskRequestsByRequestIdResponse = + zSchemaBriaVideoEraserEraseMaskOutput + +export const zGetBriaBriaVideoEraserEraseKeypointsRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetBriaBriaVideoEraserEraseKeypointsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaBriaVideoEraserEraseKeypointsRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutBriaBriaVideoEraserEraseKeypointsRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaBriaVideoEraserEraseKeypointsData = z.object({ + body: zSchemaBriaVideoEraserEraseKeypointsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaBriaVideoEraserEraseKeypointsResponse = zSchemaQueueStatus + +export const zGetBriaBriaVideoEraserEraseKeypointsRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetBriaBriaVideoEraserEraseKeypointsRequestsByRequestIdResponse = + zSchemaBriaVideoEraserEraseKeypointsOutput + +export const zGetBriaBriaVideoEraserErasePromptRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetBriaBriaVideoEraserErasePromptRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaBriaVideoEraserErasePromptRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutBriaBriaVideoEraserErasePromptRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaBriaVideoEraserErasePromptData = z.object({ + body: zSchemaBriaVideoEraserErasePromptInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaBriaVideoEraserErasePromptResponse = zSchemaQueueStatus + +export const zGetBriaBriaVideoEraserErasePromptRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetBriaBriaVideoEraserErasePromptRequestsByRequestIdResponse = + zSchemaBriaVideoEraserErasePromptOutput + +export const zGetWanV26ReferenceToVideoRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetWanV26ReferenceToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutWanV26ReferenceToVideoRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutWanV26ReferenceToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostWanV26ReferenceToVideoData = z.object({ + body: zSchemaV26ReferenceToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostWanV26ReferenceToVideoResponse = zSchemaQueueStatus + +export const zGetWanV26ReferenceToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetWanV26ReferenceToVideoRequestsByRequestIdResponse = + zSchemaV26ReferenceToVideoOutput + +export const zGetFalAiVeo31FastExtendVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiVeo31FastExtendVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiVeo31FastExtendVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiVeo31FastExtendVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiVeo31FastExtendVideoData = z.object({ + body: zSchemaVeo31FastExtendVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiVeo31FastExtendVideoResponse = zSchemaQueueStatus + +export const zGetFalAiVeo31FastExtendVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiVeo31FastExtendVideoRequestsByRequestIdResponse = + zSchemaVeo31FastExtendVideoOutput + +export const zGetFalAiVeo31ExtendVideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiVeo31ExtendVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiVeo31ExtendVideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiVeo31ExtendVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiVeo31ExtendVideoData = z.object({ + body: zSchemaVeo31ExtendVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiVeo31ExtendVideoResponse = zSchemaQueueStatus + +export const zGetFalAiVeo31ExtendVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiVeo31ExtendVideoRequestsByRequestIdResponse = + zSchemaVeo31ExtendVideoOutput + +export const zGetFalAiKlingVideoO1StandardVideoToVideoReferenceRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoO1StandardVideoToVideoReferenceRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoO1StandardVideoToVideoReferenceRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoO1StandardVideoToVideoReferenceRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoO1StandardVideoToVideoReferenceData = z.object( + { + body: zSchemaKlingVideoO1StandardVideoToVideoReferenceInput, + path: z.optional(z.never()), + query: z.optional(z.never()), + }, +) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoO1StandardVideoToVideoReferenceResponse = + zSchemaQueueStatus + +export const zGetFalAiKlingVideoO1StandardVideoToVideoReferenceRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoO1StandardVideoToVideoReferenceRequestsByRequestIdResponse = + zSchemaKlingVideoO1StandardVideoToVideoReferenceOutput + +export const zGetFalAiKlingVideoO1StandardVideoToVideoEditRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoO1StandardVideoToVideoEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoO1StandardVideoToVideoEditRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoO1StandardVideoToVideoEditRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoO1StandardVideoToVideoEditData = z.object({ + body: zSchemaKlingVideoO1StandardVideoToVideoEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoO1StandardVideoToVideoEditResponse = + zSchemaQueueStatus + +export const zGetFalAiKlingVideoO1StandardVideoToVideoEditRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoO1StandardVideoToVideoEditRequestsByRequestIdResponse = + zSchemaKlingVideoO1StandardVideoToVideoEditOutput + +export const zGetFalAiSteadyDancerRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSteadyDancerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSteadyDancerRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSteadyDancerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSteadyDancerData = z.object({ + body: zSchemaSteadyDancerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSteadyDancerResponse = zSchemaQueueStatus + +export const zGetFalAiSteadyDancerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSteadyDancerRequestsByRequestIdResponse = + zSchemaSteadyDancerOutput + +export const zGetFalAiOneToAllAnimation13bRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiOneToAllAnimation13bRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiOneToAllAnimation13bRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiOneToAllAnimation13bRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiOneToAllAnimation13bData = z.object({ + body: zSchemaOneToAllAnimation13bInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiOneToAllAnimation13bResponse = zSchemaQueueStatus + +export const zGetFalAiOneToAllAnimation13bRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiOneToAllAnimation13bRequestsByRequestIdResponse = + zSchemaOneToAllAnimation13bOutput + +export const zGetFalAiOneToAllAnimation14bRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiOneToAllAnimation14bRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiOneToAllAnimation14bRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiOneToAllAnimation14bRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiOneToAllAnimation14bData = z.object({ + body: zSchemaOneToAllAnimation14bInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiOneToAllAnimation14bResponse = zSchemaQueueStatus + +export const zGetFalAiOneToAllAnimation14bRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiOneToAllAnimation14bRequestsByRequestIdResponse = + zSchemaOneToAllAnimation14bOutput + +export const zGetFalAiWanVisionEnhancerRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiWanVisionEnhancerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanVisionEnhancerRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanVisionEnhancerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanVisionEnhancerData = z.object({ + body: zSchemaWanVisionEnhancerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanVisionEnhancerResponse = zSchemaQueueStatus + +export const zGetFalAiWanVisionEnhancerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanVisionEnhancerRequestsByRequestIdResponse = + zSchemaWanVisionEnhancerOutput + +export const zGetFalAiSyncLipsyncReact1RequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiSyncLipsyncReact1RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSyncLipsyncReact1RequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiSyncLipsyncReact1RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSyncLipsyncReact1Data = z.object({ + body: zSchemaSyncLipsyncReact1Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSyncLipsyncReact1Response = zSchemaQueueStatus + +export const zGetFalAiSyncLipsyncReact1RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSyncLipsyncReact1RequestsByRequestIdResponse = + zSchemaSyncLipsyncReact1Output + +export const zGetVeedVideoBackgroundRemovalFastRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetVeedVideoBackgroundRemovalFastRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutVeedVideoBackgroundRemovalFastRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutVeedVideoBackgroundRemovalFastRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostVeedVideoBackgroundRemovalFastData = z.object({ + body: zSchemaVideoBackgroundRemovalFastInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostVeedVideoBackgroundRemovalFastResponse = zSchemaQueueStatus + +export const zGetVeedVideoBackgroundRemovalFastRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetVeedVideoBackgroundRemovalFastRequestsByRequestIdResponse = + zSchemaVideoBackgroundRemovalFastOutput + +export const zGetFalAiKlingVideoO1VideoToVideoEditRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoO1VideoToVideoEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoO1VideoToVideoEditRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoO1VideoToVideoEditRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoO1VideoToVideoEditData = z.object({ + body: zSchemaKlingVideoO1VideoToVideoEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoO1VideoToVideoEditResponse = zSchemaQueueStatus + +export const zGetFalAiKlingVideoO1VideoToVideoEditRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoO1VideoToVideoEditRequestsByRequestIdResponse = + zSchemaKlingVideoO1VideoToVideoEditOutput + +export const zGetFalAiKlingVideoO1VideoToVideoReferenceRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKlingVideoO1VideoToVideoReferenceRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKlingVideoO1VideoToVideoReferenceRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKlingVideoO1VideoToVideoReferenceRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKlingVideoO1VideoToVideoReferenceData = z.object({ + body: zSchemaKlingVideoO1VideoToVideoReferenceInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKlingVideoO1VideoToVideoReferenceResponse = + zSchemaQueueStatus + +export const zGetFalAiKlingVideoO1VideoToVideoReferenceRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiKlingVideoO1VideoToVideoReferenceRequestsByRequestIdResponse = + zSchemaKlingVideoO1VideoToVideoReferenceOutput + +export const zGetVeedVideoBackgroundRemovalRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetVeedVideoBackgroundRemovalRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutVeedVideoBackgroundRemovalRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutVeedVideoBackgroundRemovalRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostVeedVideoBackgroundRemovalData = z.object({ + body: zSchemaVideoBackgroundRemovalInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostVeedVideoBackgroundRemovalResponse = zSchemaQueueStatus + +export const zGetVeedVideoBackgroundRemovalRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetVeedVideoBackgroundRemovalRequestsByRequestIdResponse = + zSchemaVideoBackgroundRemovalOutput + +export const zGetVeedVideoBackgroundRemovalGreenScreenRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetVeedVideoBackgroundRemovalGreenScreenRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutVeedVideoBackgroundRemovalGreenScreenRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutVeedVideoBackgroundRemovalGreenScreenRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostVeedVideoBackgroundRemovalGreenScreenData = z.object({ + body: zSchemaVideoBackgroundRemovalGreenScreenInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostVeedVideoBackgroundRemovalGreenScreenResponse = + zSchemaQueueStatus + +export const zGetVeedVideoBackgroundRemovalGreenScreenRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetVeedVideoBackgroundRemovalGreenScreenRequestsByRequestIdResponse = + zSchemaVideoBackgroundRemovalGreenScreenOutput + +export const zGetFalAiLtx2RetakeVideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLtx2RetakeVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtx2RetakeVideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtx2RetakeVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtx2RetakeVideoData = z.object({ + body: zSchemaLtx2RetakeVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtx2RetakeVideoResponse = zSchemaQueueStatus + +export const zGetFalAiLtx2RetakeVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLtx2RetakeVideoRequestsByRequestIdResponse = + zSchemaLtx2RetakeVideoOutput + +export const zGetDecartLucyEditFastRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetDecartLucyEditFastRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutDecartLucyEditFastRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutDecartLucyEditFastRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostDecartLucyEditFastData = z.object({ + body: zSchemaLucyEditFastInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostDecartLucyEditFastResponse = zSchemaQueueStatus + +export const zGetDecartLucyEditFastRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetDecartLucyEditFastRequestsByRequestIdResponse = + zSchemaLucyEditFastOutput + +export const zGetFalAiSam3VideoRleRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSam3VideoRleRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSam3VideoRleRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSam3VideoRleRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSam3VideoRleData = z.object({ + body: zSchemaSam3VideoRleInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSam3VideoRleResponse = zSchemaQueueStatus + +export const zGetFalAiSam3VideoRleRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSam3VideoRleRequestsByRequestIdResponse = + zSchemaSam3VideoRleOutput + +export const zGetFalAiSam3VideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSam3VideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSam3VideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSam3VideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSam3VideoData = z.object({ + body: zSchemaSam3VideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSam3VideoResponse = zSchemaQueueStatus + +export const zGetFalAiSam3VideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSam3VideoRequestsByRequestIdResponse = + zSchemaSam3VideoOutput + +export const zGetFalAiEdittoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiEdittoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiEdittoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiEdittoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiEdittoData = z.object({ + body: zSchemaEdittoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiEdittoResponse = zSchemaQueueStatus + +export const zGetFalAiEdittoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiEdittoRequestsByRequestIdResponse = zSchemaEdittoOutput + +export const zGetFalAiFlashvsrUpscaleVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlashvsrUpscaleVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlashvsrUpscaleVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlashvsrUpscaleVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlashvsrUpscaleVideoData = z.object({ + body: zSchemaFlashvsrUpscaleVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlashvsrUpscaleVideoResponse = zSchemaQueueStatus + +export const zGetFalAiFlashvsrUpscaleVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlashvsrUpscaleVideoRequestsByRequestIdResponse = + zSchemaFlashvsrUpscaleVideoOutput + +export const zGetFalAiWorkflowUtilitiesAutoSubtitleRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWorkflowUtilitiesAutoSubtitleRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWorkflowUtilitiesAutoSubtitleRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWorkflowUtilitiesAutoSubtitleRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWorkflowUtilitiesAutoSubtitleData = z.object({ + body: zSchemaWorkflowUtilitiesAutoSubtitleInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWorkflowUtilitiesAutoSubtitleResponse = + zSchemaQueueStatus + +export const zGetFalAiWorkflowUtilitiesAutoSubtitleRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiWorkflowUtilitiesAutoSubtitleRequestsByRequestIdResponse = + zSchemaWorkflowUtilitiesAutoSubtitleOutput + +export const zGetFalAiBytedanceUpscalerUpscaleVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiBytedanceUpscalerUpscaleVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBytedanceUpscalerUpscaleVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiBytedanceUpscalerUpscaleVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBytedanceUpscalerUpscaleVideoData = z.object({ + body: zSchemaBytedanceUpscalerUpscaleVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBytedanceUpscalerUpscaleVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiBytedanceUpscalerUpscaleVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiBytedanceUpscalerUpscaleVideoRequestsByRequestIdResponse = + zSchemaBytedanceUpscalerUpscaleVideoOutput + +export const zGetFalAiVideoAsPromptRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiVideoAsPromptRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiVideoAsPromptRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiVideoAsPromptRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiVideoAsPromptData = z.object({ + body: zSchemaVideoAsPromptInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiVideoAsPromptResponse = zSchemaQueueStatus + +export const zGetFalAiVideoAsPromptRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiVideoAsPromptRequestsByRequestIdResponse = + zSchemaVideoAsPromptOutput + +export const zGetFalAiBirefnetV2VideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiBirefnetV2VideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBirefnetV2VideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiBirefnetV2VideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBirefnetV2VideoData = z.object({ + body: zSchemaBirefnetV2VideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBirefnetV2VideoResponse = zSchemaQueueStatus + +export const zGetFalAiBirefnetV2VideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiBirefnetV2VideoRequestsByRequestIdResponse = + zSchemaBirefnetV2VideoOutput + +export const zGetFalAiViduQ2VideoExtensionProRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiViduQ2VideoExtensionProRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiViduQ2VideoExtensionProRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiViduQ2VideoExtensionProRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiViduQ2VideoExtensionProData = z.object({ + body: zSchemaViduQ2VideoExtensionProInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiViduQ2VideoExtensionProResponse = zSchemaQueueStatus + +export const zGetFalAiViduQ2VideoExtensionProRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiViduQ2VideoExtensionProRequestsByRequestIdResponse = + zSchemaViduQ2VideoExtensionProOutput + +export const zGetMireloAiSfxV15VideoToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetMireloAiSfxV15VideoToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutMireloAiSfxV15VideoToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutMireloAiSfxV15VideoToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostMireloAiSfxV15VideoToVideoData = z.object({ + body: zSchemaSfxV15VideoToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostMireloAiSfxV15VideoToVideoResponse = zSchemaQueueStatus + +export const zGetMireloAiSfxV15VideoToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetMireloAiSfxV15VideoToVideoRequestsByRequestIdResponse = + zSchemaSfxV15VideoToVideoOutput + +export const zGetFalAiKreaWan14bVideoToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiKreaWan14bVideoToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiKreaWan14bVideoToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiKreaWan14bVideoToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiKreaWan14bVideoToVideoData = z.object({ + body: zSchemaKreaWan14bVideoToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiKreaWan14bVideoToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiKreaWan14bVideoToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiKreaWan14bVideoToVideoRequestsByRequestIdResponse = + zSchemaKreaWan14bVideoToVideoOutput + +export const zGetFalAiSora2VideoToVideoRemixRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiSora2VideoToVideoRemixRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSora2VideoToVideoRemixRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiSora2VideoToVideoRemixRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSora2VideoToVideoRemixData = z.object({ + body: zSchemaSora2VideoToVideoRemixInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSora2VideoToVideoRemixResponse = zSchemaQueueStatus + +export const zGetFalAiSora2VideoToVideoRemixRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSora2VideoToVideoRemixRequestsByRequestIdResponse = + zSchemaSora2VideoToVideoRemixOutput + +export const zGetFalAiWanVaceAppsLongReframeRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanVaceAppsLongReframeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanVaceAppsLongReframeRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanVaceAppsLongReframeRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanVaceAppsLongReframeData = z.object({ + body: zSchemaWanVaceAppsLongReframeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanVaceAppsLongReframeResponse = zSchemaQueueStatus + +export const zGetFalAiWanVaceAppsLongReframeRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanVaceAppsLongReframeRequestsByRequestIdResponse = + zSchemaWanVaceAppsLongReframeOutput + +export const zGetFalAiInfinitalkVideoToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiInfinitalkVideoToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiInfinitalkVideoToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiInfinitalkVideoToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiInfinitalkVideoToVideoData = z.object({ + body: zSchemaInfinitalkVideoToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiInfinitalkVideoToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiInfinitalkVideoToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiInfinitalkVideoToVideoRequestsByRequestIdResponse = + zSchemaInfinitalkVideoToVideoOutput + +export const zGetFalAiSeedvrUpscaleVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiSeedvrUpscaleVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSeedvrUpscaleVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiSeedvrUpscaleVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSeedvrUpscaleVideoData = z.object({ + body: zSchemaSeedvrUpscaleVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSeedvrUpscaleVideoResponse = zSchemaQueueStatus + +export const zGetFalAiSeedvrUpscaleVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSeedvrUpscaleVideoRequestsByRequestIdResponse = + zSchemaSeedvrUpscaleVideoOutput + +export const zGetFalAiWanVaceAppsVideoEditRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanVaceAppsVideoEditRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanVaceAppsVideoEditRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanVaceAppsVideoEditRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanVaceAppsVideoEditData = z.object({ + body: zSchemaWanVaceAppsVideoEditInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanVaceAppsVideoEditResponse = zSchemaQueueStatus + +export const zGetFalAiWanVaceAppsVideoEditRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanVaceAppsVideoEditRequestsByRequestIdResponse = + zSchemaWanVaceAppsVideoEditOutput + +export const zGetFalAiWanV2214bAnimateReplaceRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanV2214bAnimateReplaceRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanV2214bAnimateReplaceRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanV2214bAnimateReplaceRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanV2214bAnimateReplaceData = z.object({ + body: zSchemaWanV2214bAnimateReplaceInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanV2214bAnimateReplaceResponse = zSchemaQueueStatus + +export const zGetFalAiWanV2214bAnimateReplaceRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiWanV2214bAnimateReplaceRequestsByRequestIdResponse = + zSchemaWanV2214bAnimateReplaceOutput + +export const zGetFalAiWanV2214bAnimateMoveRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanV2214bAnimateMoveRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanV2214bAnimateMoveRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanV2214bAnimateMoveRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanV2214bAnimateMoveData = z.object({ + body: zSchemaWanV2214bAnimateMoveInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanV2214bAnimateMoveResponse = zSchemaQueueStatus + +export const zGetFalAiWanV2214bAnimateMoveRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanV2214bAnimateMoveRequestsByRequestIdResponse = + zSchemaWanV2214bAnimateMoveOutput + +export const zGetDecartLucyEditProRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetDecartLucyEditProRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutDecartLucyEditProRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutDecartLucyEditProRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostDecartLucyEditProData = z.object({ + body: zSchemaLucyEditProInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostDecartLucyEditProResponse = zSchemaQueueStatus + +export const zGetDecartLucyEditProRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetDecartLucyEditProRequestsByRequestIdResponse = + zSchemaLucyEditProOutput + +export const zGetDecartLucyEditDevRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetDecartLucyEditDevRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutDecartLucyEditDevRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutDecartLucyEditDevRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostDecartLucyEditDevData = z.object({ + body: zSchemaLucyEditDevInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostDecartLucyEditDevResponse = zSchemaQueueStatus + +export const zGetDecartLucyEditDevRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetDecartLucyEditDevRequestsByRequestIdResponse = + zSchemaLucyEditDevOutput + +export const zGetFalAiWan22VaceFunA14bReframeRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWan22VaceFunA14bReframeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWan22VaceFunA14bReframeRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWan22VaceFunA14bReframeRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWan22VaceFunA14bReframeData = z.object({ + body: zSchemaWan22VaceFunA14bReframeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWan22VaceFunA14bReframeResponse = zSchemaQueueStatus + +export const zGetFalAiWan22VaceFunA14bReframeRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiWan22VaceFunA14bReframeRequestsByRequestIdResponse = + zSchemaWan22VaceFunA14bReframeOutput + +export const zGetFalAiWan22VaceFunA14bOutpaintingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWan22VaceFunA14bOutpaintingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWan22VaceFunA14bOutpaintingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWan22VaceFunA14bOutpaintingRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWan22VaceFunA14bOutpaintingData = z.object({ + body: zSchemaWan22VaceFunA14bOutpaintingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWan22VaceFunA14bOutpaintingResponse = zSchemaQueueStatus + +export const zGetFalAiWan22VaceFunA14bOutpaintingRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiWan22VaceFunA14bOutpaintingRequestsByRequestIdResponse = + zSchemaWan22VaceFunA14bOutpaintingOutput + +export const zGetFalAiWan22VaceFunA14bInpaintingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWan22VaceFunA14bInpaintingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWan22VaceFunA14bInpaintingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWan22VaceFunA14bInpaintingRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWan22VaceFunA14bInpaintingData = z.object({ + body: zSchemaWan22VaceFunA14bInpaintingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWan22VaceFunA14bInpaintingResponse = zSchemaQueueStatus + +export const zGetFalAiWan22VaceFunA14bInpaintingRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiWan22VaceFunA14bInpaintingRequestsByRequestIdResponse = + zSchemaWan22VaceFunA14bInpaintingOutput + +export const zGetFalAiWan22VaceFunA14bDepthRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWan22VaceFunA14bDepthRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWan22VaceFunA14bDepthRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWan22VaceFunA14bDepthRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWan22VaceFunA14bDepthData = z.object({ + body: zSchemaWan22VaceFunA14bDepthInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWan22VaceFunA14bDepthResponse = zSchemaQueueStatus + +export const zGetFalAiWan22VaceFunA14bDepthRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWan22VaceFunA14bDepthRequestsByRequestIdResponse = + zSchemaWan22VaceFunA14bDepthOutput + +export const zGetFalAiWan22VaceFunA14bPoseRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWan22VaceFunA14bPoseRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWan22VaceFunA14bPoseRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWan22VaceFunA14bPoseRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWan22VaceFunA14bPoseData = z.object({ + body: zSchemaWan22VaceFunA14bPoseInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWan22VaceFunA14bPoseResponse = zSchemaQueueStatus + +export const zGetFalAiWan22VaceFunA14bPoseRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWan22VaceFunA14bPoseRequestsByRequestIdResponse = + zSchemaWan22VaceFunA14bPoseOutput + +export const zGetFalAiHunyuanVideoFoleyRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiHunyuanVideoFoleyRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuanVideoFoleyRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuanVideoFoleyRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuanVideoFoleyData = z.object({ + body: zSchemaHunyuanVideoFoleyInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuanVideoFoleyResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuanVideoFoleyRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuanVideoFoleyRequestsByRequestIdResponse = + zSchemaHunyuanVideoFoleyOutput + +export const zGetFalAiSyncLipsyncV2ProRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSyncLipsyncV2ProRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSyncLipsyncV2ProRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSyncLipsyncV2ProRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSyncLipsyncV2ProData = z.object({ + body: zSchemaSyncLipsyncV2ProInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSyncLipsyncV2ProResponse = zSchemaQueueStatus + +export const zGetFalAiSyncLipsyncV2ProRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSyncLipsyncV2ProRequestsByRequestIdResponse = + zSchemaSyncLipsyncV2ProOutput + +export const zGetFalAiWanFunControlRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiWanFunControlRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanFunControlRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanFunControlRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanFunControlData = z.object({ + body: zSchemaWanFunControlInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanFunControlResponse = zSchemaQueueStatus + +export const zGetFalAiWanFunControlRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanFunControlRequestsByRequestIdResponse = + zSchemaWanFunControlOutput + +export const zGetBriaVideoIncreaseResolutionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetBriaVideoIncreaseResolutionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutBriaVideoIncreaseResolutionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutBriaVideoIncreaseResolutionRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostBriaVideoIncreaseResolutionData = z.object({ + body: zSchemaVideoIncreaseResolutionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostBriaVideoIncreaseResolutionResponse = zSchemaQueueStatus + +export const zGetBriaVideoIncreaseResolutionRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetBriaVideoIncreaseResolutionRequestsByRequestIdResponse = + zSchemaVideoIncreaseResolutionOutput + +export const zGetFalAiInfinitalkRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiInfinitalkRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiInfinitalkRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiInfinitalkRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiInfinitalkData = z.object({ + body: zSchemaInfinitalkInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiInfinitalkResponse = zSchemaQueueStatus + +export const zGetFalAiInfinitalkRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiInfinitalkRequestsByRequestIdResponse = + zSchemaInfinitalkOutput + +export const zGetMireloAiSfxV1VideoToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetMireloAiSfxV1VideoToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutMireloAiSfxV1VideoToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutMireloAiSfxV1VideoToVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostMireloAiSfxV1VideoToVideoData = z.object({ + body: zSchemaSfxV1VideoToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostMireloAiSfxV1VideoToVideoResponse = zSchemaQueueStatus + +export const zGetMireloAiSfxV1VideoToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetMireloAiSfxV1VideoToVideoRequestsByRequestIdResponse = + zSchemaSfxV1VideoToVideoOutput + +export const zGetMoonvalleyMareyPoseTransferRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetMoonvalleyMareyPoseTransferRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutMoonvalleyMareyPoseTransferRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutMoonvalleyMareyPoseTransferRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostMoonvalleyMareyPoseTransferData = z.object({ + body: zSchemaMareyPoseTransferInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostMoonvalleyMareyPoseTransferResponse = zSchemaQueueStatus + +export const zGetMoonvalleyMareyPoseTransferRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetMoonvalleyMareyPoseTransferRequestsByRequestIdResponse = + zSchemaMareyPoseTransferOutput + +export const zGetMoonvalleyMareyMotionTransferRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetMoonvalleyMareyMotionTransferRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutMoonvalleyMareyMotionTransferRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutMoonvalleyMareyMotionTransferRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostMoonvalleyMareyMotionTransferData = z.object({ + body: zSchemaMareyMotionTransferInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostMoonvalleyMareyMotionTransferResponse = zSchemaQueueStatus + +export const zGetMoonvalleyMareyMotionTransferRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetMoonvalleyMareyMotionTransferRequestsByRequestIdResponse = + zSchemaMareyMotionTransferOutput + +export const zGetFalAiFfmpegApiMergeVideosRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFfmpegApiMergeVideosRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFfmpegApiMergeVideosRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFfmpegApiMergeVideosRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFfmpegApiMergeVideosData = z.object({ + body: zSchemaFfmpegApiMergeVideosInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFfmpegApiMergeVideosResponse = zSchemaQueueStatus + +export const zGetFalAiFfmpegApiMergeVideosRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFfmpegApiMergeVideosRequestsByRequestIdResponse = + zSchemaFfmpegApiMergeVideosOutput + +export const zGetFalAiWanV22A14bVideoToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanV22A14bVideoToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanV22A14bVideoToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanV22A14bVideoToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanV22A14bVideoToVideoData = z.object({ + body: zSchemaWanV22A14bVideoToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanV22A14bVideoToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiWanV22A14bVideoToVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanV22A14bVideoToVideoRequestsByRequestIdResponse = + zSchemaWanV22A14bVideoToVideoOutput + +export const zGetFalAiLtxv13B098DistilledExtendRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtxv13B098DistilledExtendRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtxv13B098DistilledExtendRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtxv13B098DistilledExtendRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtxv13B098DistilledExtendData = z.object({ + body: zSchemaLtxv13B098DistilledExtendInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtxv13B098DistilledExtendResponse = zSchemaQueueStatus + +export const zGetFalAiLtxv13B098DistilledExtendRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLtxv13B098DistilledExtendRequestsByRequestIdResponse = + zSchemaLtxv13B098DistilledExtendOutput + +export const zGetFalAiRifeVideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiRifeVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiRifeVideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiRifeVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiRifeVideoData = z.object({ + body: zSchemaRifeVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiRifeVideoResponse = zSchemaQueueStatus + +export const zGetFalAiRifeVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiRifeVideoRequestsByRequestIdResponse = + zSchemaRifeVideoOutput + +export const zGetFalAiFilmVideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFilmVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFilmVideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFilmVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFilmVideoData = z.object({ + body: zSchemaFilmVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFilmVideoResponse = zSchemaQueueStatus + +export const zGetFalAiFilmVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFilmVideoRequestsByRequestIdResponse = + zSchemaFilmVideoOutput + +export const zGetFalAiLumaDreamMachineRay2FlashModifyRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLumaDreamMachineRay2FlashModifyRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLumaDreamMachineRay2FlashModifyRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLumaDreamMachineRay2FlashModifyRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLumaDreamMachineRay2FlashModifyData = z.object({ + body: zSchemaLumaDreamMachineRay2FlashModifyInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLumaDreamMachineRay2FlashModifyResponse = + zSchemaQueueStatus + +export const zGetFalAiLumaDreamMachineRay2FlashModifyRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLumaDreamMachineRay2FlashModifyRequestsByRequestIdResponse = + zSchemaLumaDreamMachineRay2FlashModifyOutput + +export const zGetFalAiLtxv13B098DistilledMulticonditioningRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtxv13B098DistilledMulticonditioningRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtxv13B098DistilledMulticonditioningRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtxv13B098DistilledMulticonditioningRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtxv13B098DistilledMulticonditioningData = z.object({ + body: zSchemaLtxv13B098DistilledMulticonditioningInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtxv13B098DistilledMulticonditioningResponse = + zSchemaQueueStatus + +export const zGetFalAiLtxv13B098DistilledMulticonditioningRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLtxv13B098DistilledMulticonditioningRequestsByRequestIdResponse = + zSchemaLtxv13B098DistilledMulticonditioningOutput + +export const zGetFalAiPixverseSoundEffectsRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseSoundEffectsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseSoundEffectsRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseSoundEffectsRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseSoundEffectsData = z.object({ + body: zSchemaPixverseSoundEffectsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseSoundEffectsResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseSoundEffectsRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseSoundEffectsRequestsByRequestIdResponse = + zSchemaPixverseSoundEffectsOutput + +export const zGetFalAiThinksoundAudioRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiThinksoundAudioRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiThinksoundAudioRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiThinksoundAudioRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiThinksoundAudioData = z.object({ + body: zSchemaThinksoundAudioInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiThinksoundAudioResponse = zSchemaQueueStatus + +export const zGetFalAiThinksoundAudioRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiThinksoundAudioRequestsByRequestIdResponse = + zSchemaThinksoundAudioOutput + +export const zGetFalAiThinksoundRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiThinksoundRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiThinksoundRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiThinksoundRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiThinksoundData = z.object({ + body: zSchemaThinksoundInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiThinksoundResponse = zSchemaQueueStatus + +export const zGetFalAiThinksoundRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiThinksoundRequestsByRequestIdResponse = + zSchemaThinksoundOutput + +export const zGetFalAiPixverseExtendFastRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPixverseExtendFastRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseExtendFastRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseExtendFastRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseExtendFastData = z.object({ + body: zSchemaPixverseExtendFastInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseExtendFastResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseExtendFastRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseExtendFastRequestsByRequestIdResponse = + zSchemaPixverseExtendFastOutput + +export const zGetFalAiPixverseExtendRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiPixverseExtendRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseExtendRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseExtendRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseExtendData = z.object({ + body: zSchemaPixverseExtendInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseExtendResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseExtendRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseExtendRequestsByRequestIdResponse = + zSchemaPixverseExtendOutput + +export const zGetFalAiPixverseLipsyncRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiPixverseLipsyncRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPixverseLipsyncRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiPixverseLipsyncRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPixverseLipsyncData = z.object({ + body: zSchemaPixverseLipsyncInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPixverseLipsyncResponse = zSchemaQueueStatus + +export const zGetFalAiPixverseLipsyncRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPixverseLipsyncRequestsByRequestIdResponse = + zSchemaPixverseLipsyncOutput + +export const zGetFalAiLumaDreamMachineRay2ModifyRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLumaDreamMachineRay2ModifyRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLumaDreamMachineRay2ModifyRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLumaDreamMachineRay2ModifyRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLumaDreamMachineRay2ModifyData = z.object({ + body: zSchemaLumaDreamMachineRay2ModifyInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLumaDreamMachineRay2ModifyResponse = zSchemaQueueStatus + +export const zGetFalAiLumaDreamMachineRay2ModifyRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLumaDreamMachineRay2ModifyRequestsByRequestIdResponse = + zSchemaLumaDreamMachineRay2ModifyOutput + +export const zGetFalAiWanVace14bReframeRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiWanVace14bReframeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanVace14bReframeRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanVace14bReframeRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanVace14bReframeData = z.object({ + body: zSchemaWanVace14bReframeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanVace14bReframeResponse = zSchemaQueueStatus + +export const zGetFalAiWanVace14bReframeRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanVace14bReframeRequestsByRequestIdResponse = + zSchemaWanVace14bReframeOutput + +export const zGetFalAiWanVace14bOutpaintingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanVace14bOutpaintingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanVace14bOutpaintingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanVace14bOutpaintingRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanVace14bOutpaintingData = z.object({ + body: zSchemaWanVace14bOutpaintingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanVace14bOutpaintingResponse = zSchemaQueueStatus + +export const zGetFalAiWanVace14bOutpaintingRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanVace14bOutpaintingRequestsByRequestIdResponse = + zSchemaWanVace14bOutpaintingOutput + +export const zGetFalAiWanVace14bInpaintingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiWanVace14bInpaintingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanVace14bInpaintingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanVace14bInpaintingRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanVace14bInpaintingData = z.object({ + body: zSchemaWanVace14bInpaintingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanVace14bInpaintingResponse = zSchemaQueueStatus + +export const zGetFalAiWanVace14bInpaintingRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanVace14bInpaintingRequestsByRequestIdResponse = + zSchemaWanVace14bInpaintingOutput + +export const zGetFalAiWanVace14bPoseRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiWanVace14bPoseRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanVace14bPoseRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanVace14bPoseRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanVace14bPoseData = z.object({ + body: zSchemaWanVace14bPoseInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanVace14bPoseResponse = zSchemaQueueStatus + +export const zGetFalAiWanVace14bPoseRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanVace14bPoseRequestsByRequestIdResponse = + zSchemaWanVace14bPoseOutput + +export const zGetFalAiWanVace14bDepthRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiWanVace14bDepthRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanVace14bDepthRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanVace14bDepthRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanVace14bDepthData = z.object({ + body: zSchemaWanVace14bDepthInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanVace14bDepthResponse = zSchemaQueueStatus + +export const zGetFalAiWanVace14bDepthRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanVace14bDepthRequestsByRequestIdResponse = + zSchemaWanVace14bDepthOutput + +export const zGetFalAiDwposeVideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiDwposeVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiDwposeVideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiDwposeVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiDwposeVideoData = z.object({ + body: zSchemaDwposeVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiDwposeVideoResponse = zSchemaQueueStatus + +export const zGetFalAiDwposeVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiDwposeVideoRequestsByRequestIdResponse = + zSchemaDwposeVideoOutput + +export const zGetFalAiFfmpegApiMergeAudioVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFfmpegApiMergeAudioVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFfmpegApiMergeAudioVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFfmpegApiMergeAudioVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFfmpegApiMergeAudioVideoData = z.object({ + body: zSchemaFfmpegApiMergeAudioVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFfmpegApiMergeAudioVideoResponse = zSchemaQueueStatus + +export const zGetFalAiFfmpegApiMergeAudioVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFfmpegApiMergeAudioVideoRequestsByRequestIdResponse = + zSchemaFfmpegApiMergeAudioVideoOutput + +export const zGetFalAiWanVace13bRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiWanVace13bRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanVace13bRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanVace13bRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanVace13bData = z.object({ + body: zSchemaWanVace13bInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanVace13bResponse = zSchemaQueueStatus + +export const zGetFalAiWanVace13bRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanVace13bRequestsByRequestIdResponse = + zSchemaWanVace13bOutput + +export const zGetFalAiLumaDreamMachineRay2FlashReframeRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLumaDreamMachineRay2FlashReframeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLumaDreamMachineRay2FlashReframeRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLumaDreamMachineRay2FlashReframeRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLumaDreamMachineRay2FlashReframeData = z.object({ + body: zSchemaLumaDreamMachineRay2FlashReframeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLumaDreamMachineRay2FlashReframeResponse = + zSchemaQueueStatus + +export const zGetFalAiLumaDreamMachineRay2FlashReframeRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLumaDreamMachineRay2FlashReframeRequestsByRequestIdResponse = + zSchemaLumaDreamMachineRay2FlashReframeOutput + +export const zGetFalAiLumaDreamMachineRay2ReframeRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLumaDreamMachineRay2ReframeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLumaDreamMachineRay2ReframeRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLumaDreamMachineRay2ReframeRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLumaDreamMachineRay2ReframeData = z.object({ + body: zSchemaLumaDreamMachineRay2ReframeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLumaDreamMachineRay2ReframeResponse = zSchemaQueueStatus + +export const zGetFalAiLumaDreamMachineRay2ReframeRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLumaDreamMachineRay2ReframeRequestsByRequestIdResponse = + zSchemaLumaDreamMachineRay2ReframeOutput + +export const zGetVeedLipsyncRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetVeedLipsyncRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutVeedLipsyncRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutVeedLipsyncRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostVeedLipsyncData = z.object({ + body: zSchemaLipsyncInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostVeedLipsyncResponse = zSchemaQueueStatus + +export const zGetVeedLipsyncRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetVeedLipsyncRequestsByRequestIdResponse = zSchemaLipsyncOutput + +export const zGetFalAiWanVace14bRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiWanVace14bRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanVace14bRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanVace14bRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanVace14bData = z.object({ + body: zSchemaWanVace14bInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanVace14bResponse = zSchemaQueueStatus + +export const zGetFalAiWanVace14bRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanVace14bRequestsByRequestIdResponse = + zSchemaWanVace14bOutput + +export const zGetFalAiLtxVideo13bDistilledExtendRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtxVideo13bDistilledExtendRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtxVideo13bDistilledExtendRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtxVideo13bDistilledExtendRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtxVideo13bDistilledExtendData = z.object({ + body: zSchemaLtxVideo13bDistilledExtendInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtxVideo13bDistilledExtendResponse = zSchemaQueueStatus + +export const zGetFalAiLtxVideo13bDistilledExtendRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLtxVideo13bDistilledExtendRequestsByRequestIdResponse = + zSchemaLtxVideo13bDistilledExtendOutput + +export const zGetFalAiLtxVideo13bDistilledMulticonditioningRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtxVideo13bDistilledMulticonditioningRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtxVideo13bDistilledMulticonditioningRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtxVideo13bDistilledMulticonditioningRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtxVideo13bDistilledMulticonditioningData = z.object({ + body: zSchemaLtxVideo13bDistilledMulticonditioningInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtxVideo13bDistilledMulticonditioningResponse = + zSchemaQueueStatus + +export const zGetFalAiLtxVideo13bDistilledMulticonditioningRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLtxVideo13bDistilledMulticonditioningRequestsByRequestIdResponse = + zSchemaLtxVideo13bDistilledMulticonditioningOutput + +export const zGetFalAiLtxVideo13bDevMulticonditioningRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtxVideo13bDevMulticonditioningRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtxVideo13bDevMulticonditioningRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtxVideo13bDevMulticonditioningRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtxVideo13bDevMulticonditioningData = z.object({ + body: zSchemaLtxVideo13bDevMulticonditioningInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtxVideo13bDevMulticonditioningResponse = + zSchemaQueueStatus + +export const zGetFalAiLtxVideo13bDevMulticonditioningRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLtxVideo13bDevMulticonditioningRequestsByRequestIdResponse = + zSchemaLtxVideo13bDevMulticonditioningOutput + +export const zGetFalAiLtxVideo13bDevExtendRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtxVideo13bDevExtendRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtxVideo13bDevExtendRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtxVideo13bDevExtendRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtxVideo13bDevExtendData = z.object({ + body: zSchemaLtxVideo13bDevExtendInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtxVideo13bDevExtendResponse = zSchemaQueueStatus + +export const zGetFalAiLtxVideo13bDevExtendRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLtxVideo13bDevExtendRequestsByRequestIdResponse = + zSchemaLtxVideo13bDevExtendOutput + +export const zGetFalAiLtxVideoLoraMulticonditioningRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtxVideoLoraMulticonditioningRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtxVideoLoraMulticonditioningRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtxVideoLoraMulticonditioningRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtxVideoLoraMulticonditioningData = z.object({ + body: zSchemaLtxVideoLoraMulticonditioningInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtxVideoLoraMulticonditioningResponse = + zSchemaQueueStatus + +export const zGetFalAiLtxVideoLoraMulticonditioningRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLtxVideoLoraMulticonditioningRequestsByRequestIdResponse = + zSchemaLtxVideoLoraMulticonditioningOutput + +export const zGetFalAiMagiExtendVideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiMagiExtendVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMagiExtendVideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiMagiExtendVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMagiExtendVideoData = z.object({ + body: zSchemaMagiExtendVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMagiExtendVideoResponse = zSchemaQueueStatus + +export const zGetFalAiMagiExtendVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMagiExtendVideoRequestsByRequestIdResponse = + zSchemaMagiExtendVideoOutput + +export const zGetFalAiMagiDistilledExtendVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMagiDistilledExtendVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMagiDistilledExtendVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMagiDistilledExtendVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMagiDistilledExtendVideoData = z.object({ + body: zSchemaMagiDistilledExtendVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMagiDistilledExtendVideoResponse = zSchemaQueueStatus + +export const zGetFalAiMagiDistilledExtendVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMagiDistilledExtendVideoRequestsByRequestIdResponse = + zSchemaMagiDistilledExtendVideoOutput + +export const zGetFalAiWanVaceRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiWanVaceRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiWanVaceRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiWanVaceRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiWanVaceData = z.object({ + body: zSchemaWanVaceInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiWanVaceResponse = zSchemaQueueStatus + +export const zGetFalAiWanVaceRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiWanVaceRequestsByRequestIdResponse = zSchemaWanVaceOutput + +export const zGetCassetteaiVideoSoundEffectsGeneratorRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetCassetteaiVideoSoundEffectsGeneratorRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutCassetteaiVideoSoundEffectsGeneratorRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutCassetteaiVideoSoundEffectsGeneratorRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostCassetteaiVideoSoundEffectsGeneratorData = z.object({ + body: zSchemaVideoSoundEffectsGeneratorInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostCassetteaiVideoSoundEffectsGeneratorResponse = + zSchemaQueueStatus + +export const zGetCassetteaiVideoSoundEffectsGeneratorRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetCassetteaiVideoSoundEffectsGeneratorRequestsByRequestIdResponse = + zSchemaVideoSoundEffectsGeneratorOutput + +export const zGetFalAiSyncLipsyncV2RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSyncLipsyncV2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSyncLipsyncV2RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSyncLipsyncV2RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSyncLipsyncV2Data = z.object({ + body: zSchemaSyncLipsyncV2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSyncLipsyncV2Response = zSchemaQueueStatus + +export const zGetFalAiSyncLipsyncV2RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSyncLipsyncV2RequestsByRequestIdResponse = + zSchemaSyncLipsyncV2Output + +export const zGetFalAiLatentsyncRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLatentsyncRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLatentsyncRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLatentsyncRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLatentsyncData = z.object({ + body: zSchemaLatentsyncInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLatentsyncResponse = zSchemaQueueStatus + +export const zGetFalAiLatentsyncRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLatentsyncRequestsByRequestIdResponse = + zSchemaLatentsyncOutput + +export const zGetFalAiPikaV2PikadditionsRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiPikaV2PikadditionsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiPikaV2PikadditionsRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiPikaV2PikadditionsRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiPikaV2PikadditionsData = z.object({ + body: zSchemaPikaV2PikadditionsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiPikaV2PikadditionsResponse = zSchemaQueueStatus + +export const zGetFalAiPikaV2PikadditionsRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiPikaV2PikadditionsRequestsByRequestIdResponse = + zSchemaPikaV2PikadditionsOutput + +export const zGetFalAiLtxVideoV095MulticonditioningRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtxVideoV095MulticonditioningRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtxVideoV095MulticonditioningRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtxVideoV095MulticonditioningRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtxVideoV095MulticonditioningData = z.object({ + body: zSchemaLtxVideoV095MulticonditioningInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtxVideoV095MulticonditioningResponse = + zSchemaQueueStatus + +export const zGetFalAiLtxVideoV095MulticonditioningRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiLtxVideoV095MulticonditioningRequestsByRequestIdResponse = + zSchemaLtxVideoV095MulticonditioningOutput + +export const zGetFalAiLtxVideoV095ExtendRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiLtxVideoV095ExtendRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLtxVideoV095ExtendRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiLtxVideoV095ExtendRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLtxVideoV095ExtendData = z.object({ + body: zSchemaLtxVideoV095ExtendInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLtxVideoV095ExtendResponse = zSchemaQueueStatus + +export const zGetFalAiLtxVideoV095ExtendRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLtxVideoV095ExtendRequestsByRequestIdResponse = + zSchemaLtxVideoV095ExtendOutput + +export const zGetFalAiTopazUpscaleVideoRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiTopazUpscaleVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiTopazUpscaleVideoRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiTopazUpscaleVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiTopazUpscaleVideoData = z.object({ + body: zSchemaTopazUpscaleVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiTopazUpscaleVideoResponse = zSchemaQueueStatus + +export const zGetFalAiTopazUpscaleVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiTopazUpscaleVideoRequestsByRequestIdResponse = + zSchemaTopazUpscaleVideoOutput + +export const zGetFalAiBenV2VideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiBenV2VideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiBenV2VideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiBenV2VideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiBenV2VideoData = z.object({ + body: zSchemaBenV2VideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiBenV2VideoResponse = zSchemaQueueStatus + +export const zGetFalAiBenV2VideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiBenV2VideoRequestsByRequestIdResponse = + zSchemaBenV2VideoOutput + +export const zGetFalAiHunyuanVideoVideoToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiHunyuanVideoVideoToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuanVideoVideoToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuanVideoVideoToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuanVideoVideoToVideoData = z.object({ + body: zSchemaHunyuanVideoVideoToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuanVideoVideoToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuanVideoVideoToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuanVideoVideoToVideoRequestsByRequestIdResponse = + zSchemaHunyuanVideoVideoToVideoOutput + +export const zGetFalAiHunyuanVideoLoraVideoToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiHunyuanVideoLoraVideoToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiHunyuanVideoLoraVideoToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiHunyuanVideoLoraVideoToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiHunyuanVideoLoraVideoToVideoData = z.object({ + body: zSchemaHunyuanVideoLoraVideoToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiHunyuanVideoLoraVideoToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiHunyuanVideoLoraVideoToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiHunyuanVideoLoraVideoToVideoRequestsByRequestIdResponse = + zSchemaHunyuanVideoLoraVideoToVideoOutput + +export const zGetFalAiFfmpegApiComposeRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiFfmpegApiComposeRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFfmpegApiComposeRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiFfmpegApiComposeRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFfmpegApiComposeData = z.object({ + body: zSchemaFfmpegApiComposeInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFfmpegApiComposeResponse = zSchemaQueueStatus + +export const zGetFalAiFfmpegApiComposeRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFfmpegApiComposeRequestsByRequestIdResponse = + zSchemaFfmpegApiComposeOutput + +export const zGetFalAiSyncLipsyncRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSyncLipsyncRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSyncLipsyncRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSyncLipsyncRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSyncLipsyncData = z.object({ + body: zSchemaSyncLipsyncInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSyncLipsyncResponse = zSchemaQueueStatus + +export const zGetFalAiSyncLipsyncRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSyncLipsyncRequestsByRequestIdResponse = + zSchemaSyncLipsyncOutput + +export const zGetFalAiAutoCaptionRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiAutoCaptionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiAutoCaptionRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiAutoCaptionRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiAutoCaptionData = z.object({ + body: zSchemaAutoCaptionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiAutoCaptionResponse = zSchemaQueueStatus + +export const zGetFalAiAutoCaptionRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiAutoCaptionRequestsByRequestIdResponse = + zSchemaAutoCaptionOutput + +export const zGetFalAiDubbingRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiDubbingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiDubbingRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiDubbingRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiDubbingData = z.object({ + body: zSchemaDubbingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiDubbingResponse = zSchemaQueueStatus + +export const zGetFalAiDubbingRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiDubbingRequestsByRequestIdResponse = zSchemaDubbingOutput + +export const zGetFalAiVideoUpscalerRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiVideoUpscalerRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiVideoUpscalerRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiVideoUpscalerRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiVideoUpscalerData = z.object({ + body: zSchemaVideoUpscalerInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiVideoUpscalerResponse = zSchemaQueueStatus + +export const zGetFalAiVideoUpscalerRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiVideoUpscalerRequestsByRequestIdResponse = + zSchemaVideoUpscalerOutput + +export const zGetFalAiCogvideox5bVideoToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiCogvideox5bVideoToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiCogvideox5bVideoToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiCogvideox5bVideoToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiCogvideox5bVideoToVideoData = z.object({ + body: zSchemaCogvideox5bVideoToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiCogvideox5bVideoToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiCogvideox5bVideoToVideoRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiCogvideox5bVideoToVideoRequestsByRequestIdResponse = + zSchemaCogvideox5bVideoToVideoOutput + +export const zGetFalAiControlnextRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiControlnextRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiControlnextRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiControlnextRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiControlnextData = z.object({ + body: zSchemaControlnextInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiControlnextResponse = zSchemaQueueStatus + +export const zGetFalAiControlnextRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiControlnextRequestsByRequestIdResponse = + zSchemaControlnextOutput + +export const zGetFalAiSam2VideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSam2VideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSam2VideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSam2VideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSam2VideoData = z.object({ + body: zSchemaSam2VideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSam2VideoResponse = zSchemaQueueStatus + +export const zGetFalAiSam2VideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSam2VideoRequestsByRequestIdResponse = + zSchemaSam2VideoOutput + +export const zGetFalAiAmtInterpolationRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiAmtInterpolationRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiAmtInterpolationRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiAmtInterpolationRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiAmtInterpolationData = z.object({ + body: zSchemaAmtInterpolationInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiAmtInterpolationResponse = zSchemaQueueStatus + +export const zGetFalAiAmtInterpolationRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiAmtInterpolationRequestsByRequestIdResponse = + zSchemaAmtInterpolationOutput + +export const zGetFalAiFastAnimatediffTurboVideoToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFastAnimatediffTurboVideoToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFastAnimatediffTurboVideoToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFastAnimatediffTurboVideoToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFastAnimatediffTurboVideoToVideoData = z.object({ + body: zSchemaFastAnimatediffTurboVideoToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFastAnimatediffTurboVideoToVideoResponse = + zSchemaQueueStatus + +export const zGetFalAiFastAnimatediffTurboVideoToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFastAnimatediffTurboVideoToVideoRequestsByRequestIdResponse = + zSchemaFastAnimatediffTurboVideoToVideoOutput + +export const zGetFalAiFastAnimatediffVideoToVideoRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFastAnimatediffVideoToVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFastAnimatediffVideoToVideoRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFastAnimatediffVideoToVideoRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFastAnimatediffVideoToVideoData = z.object({ + body: zSchemaFastAnimatediffVideoToVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFastAnimatediffVideoToVideoResponse = zSchemaQueueStatus + +export const zGetFalAiFastAnimatediffVideoToVideoRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFastAnimatediffVideoToVideoRequestsByRequestIdResponse = + zSchemaFastAnimatediffVideoToVideoOutput diff --git a/packages/typescript/ai-fal/src/generated/vision/endpoint-map.ts b/packages/typescript/ai-fal/src/generated/vision/endpoint-map.ts new file mode 100644 index 00000000..43955d1e --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/vision/endpoint-map.ts @@ -0,0 +1,438 @@ +// AUTO-GENERATED - Do not edit manually +// Generated from types.gen.ts via scripts/generate-fal-endpoint-maps.ts + +import { + zSchemaAiDetectorDetectImageInput, + zSchemaAiDetectorDetectImageOutput, + zSchemaArbiterImageImageInput, + zSchemaArbiterImageImageOutput, + zSchemaArbiterImageInput, + zSchemaArbiterImageOutput, + zSchemaArbiterImageTextInput, + zSchemaArbiterImageTextOutput, + zSchemaFlorence2LargeCaptionInput, + zSchemaFlorence2LargeCaptionOutput, + zSchemaFlorence2LargeDetailedCaptionInput, + zSchemaFlorence2LargeDetailedCaptionOutput, + zSchemaFlorence2LargeMoreDetailedCaptionInput, + zSchemaFlorence2LargeMoreDetailedCaptionOutput, + zSchemaFlorence2LargeOcrInput, + zSchemaFlorence2LargeOcrOutput, + zSchemaFlorence2LargeRegionToCategoryInput, + zSchemaFlorence2LargeRegionToCategoryOutput, + zSchemaFlorence2LargeRegionToDescriptionInput, + zSchemaFlorence2LargeRegionToDescriptionOutput, + zSchemaGotOcrV2Input, + zSchemaGotOcrV2Output, + zSchemaImageutilsNsfwInput, + zSchemaImageutilsNsfwOutput, + zSchemaIsaac01Input, + zSchemaIsaac01OpenaiV1ChatCompletionsInput, + zSchemaIsaac01OpenaiV1ChatCompletionsOutput, + zSchemaIsaac01Output, + zSchemaLlavaNextInput, + zSchemaLlavaNextOutput, + zSchemaMoondream2Input, + zSchemaMoondream2ObjectDetectionInput, + zSchemaMoondream2ObjectDetectionOutput, + zSchemaMoondream2Output, + zSchemaMoondream2PointObjectDetectionInput, + zSchemaMoondream2PointObjectDetectionOutput, + zSchemaMoondream2VisualQueryInput, + zSchemaMoondream2VisualQueryOutput, + zSchemaMoondream3PreviewCaptionInput, + zSchemaMoondream3PreviewCaptionOutput, + zSchemaMoondream3PreviewDetectInput, + zSchemaMoondream3PreviewDetectOutput, + zSchemaMoondream3PreviewPointInput, + zSchemaMoondream3PreviewPointOutput, + zSchemaMoondream3PreviewQueryInput, + zSchemaMoondream3PreviewQueryOutput, + zSchemaMoondreamBatchedInput, + zSchemaMoondreamBatchedOutput, + zSchemaMoondreamNextBatchInput, + zSchemaMoondreamNextBatchOutput, + zSchemaMoondreamNextInput, + zSchemaMoondreamNextOutput, + zSchemaRouterVisionInput, + zSchemaRouterVisionOutput, + zSchemaSa2Va4bImageInput, + zSchemaSa2Va4bImageOutput, + zSchemaSa2Va4bVideoInput, + zSchemaSa2Va4bVideoOutput, + zSchemaSa2Va8bImageInput, + zSchemaSa2Va8bImageOutput, + zSchemaSa2Va8bVideoInput, + zSchemaSa2Va8bVideoOutput, + zSchemaSam3ImageEmbedInput, + zSchemaSam3ImageEmbedOutput, + zSchemaVideoUnderstandingInput, + zSchemaVideoUnderstandingOutput, + zSchemaXAilabNsfwInput, + zSchemaXAilabNsfwOutput, +} from './zod.gen' + +import type { + SchemaAiDetectorDetectImageInput, + SchemaAiDetectorDetectImageOutput, + SchemaArbiterImageImageInput, + SchemaArbiterImageImageOutput, + SchemaArbiterImageInput, + SchemaArbiterImageOutput, + SchemaArbiterImageTextInput, + SchemaArbiterImageTextOutput, + SchemaFlorence2LargeCaptionInput, + SchemaFlorence2LargeCaptionOutput, + SchemaFlorence2LargeDetailedCaptionInput, + SchemaFlorence2LargeDetailedCaptionOutput, + SchemaFlorence2LargeMoreDetailedCaptionInput, + SchemaFlorence2LargeMoreDetailedCaptionOutput, + SchemaFlorence2LargeOcrInput, + SchemaFlorence2LargeOcrOutput, + SchemaFlorence2LargeRegionToCategoryInput, + SchemaFlorence2LargeRegionToCategoryOutput, + SchemaFlorence2LargeRegionToDescriptionInput, + SchemaFlorence2LargeRegionToDescriptionOutput, + SchemaGotOcrV2Input, + SchemaGotOcrV2Output, + SchemaImageutilsNsfwInput, + SchemaImageutilsNsfwOutput, + SchemaIsaac01Input, + SchemaIsaac01OpenaiV1ChatCompletionsInput, + SchemaIsaac01OpenaiV1ChatCompletionsOutput, + SchemaIsaac01Output, + SchemaLlavaNextInput, + SchemaLlavaNextOutput, + SchemaMoondream2Input, + SchemaMoondream2ObjectDetectionInput, + SchemaMoondream2ObjectDetectionOutput, + SchemaMoondream2Output, + SchemaMoondream2PointObjectDetectionInput, + SchemaMoondream2PointObjectDetectionOutput, + SchemaMoondream2VisualQueryInput, + SchemaMoondream2VisualQueryOutput, + SchemaMoondream3PreviewCaptionInput, + SchemaMoondream3PreviewCaptionOutput, + SchemaMoondream3PreviewDetectInput, + SchemaMoondream3PreviewDetectOutput, + SchemaMoondream3PreviewPointInput, + SchemaMoondream3PreviewPointOutput, + SchemaMoondream3PreviewQueryInput, + SchemaMoondream3PreviewQueryOutput, + SchemaMoondreamBatchedInput, + SchemaMoondreamBatchedOutput, + SchemaMoondreamNextBatchInput, + SchemaMoondreamNextBatchOutput, + SchemaMoondreamNextInput, + SchemaMoondreamNextOutput, + SchemaRouterVisionInput, + SchemaRouterVisionOutput, + SchemaSa2Va4bImageInput, + SchemaSa2Va4bImageOutput, + SchemaSa2Va4bVideoInput, + SchemaSa2Va4bVideoOutput, + SchemaSa2Va8bImageInput, + SchemaSa2Va8bImageOutput, + SchemaSa2Va8bVideoInput, + SchemaSa2Va8bVideoOutput, + SchemaSam3ImageEmbedInput, + SchemaSam3ImageEmbedOutput, + SchemaVideoUnderstandingInput, + SchemaVideoUnderstandingOutput, + SchemaXAilabNsfwInput, + SchemaXAilabNsfwOutput, +} from './types.gen' + +import type { z } from 'zod' + +export type VisionEndpointMap = { + 'fal-ai/arbiter/image/text': { + input: SchemaArbiterImageTextInput + output: SchemaArbiterImageTextOutput + } + 'fal-ai/arbiter/image/image': { + input: SchemaArbiterImageImageInput + output: SchemaArbiterImageImageOutput + } + 'fal-ai/arbiter/image': { + input: SchemaArbiterImageInput + output: SchemaArbiterImageOutput + } + 'half-moon-ai/ai-detector/detect-image': { + input: SchemaAiDetectorDetectImageInput + output: SchemaAiDetectorDetectImageOutput + } + 'fal-ai/sam-3/image/embed': { + input: SchemaSam3ImageEmbedInput + output: SchemaSam3ImageEmbedOutput + } + 'openrouter/router/vision': { + input: SchemaRouterVisionInput + output: SchemaRouterVisionOutput + } + 'fal-ai/moondream3-preview/detect': { + input: SchemaMoondream3PreviewDetectInput + output: SchemaMoondream3PreviewDetectOutput + } + 'fal-ai/moondream3-preview/point': { + input: SchemaMoondream3PreviewPointInput + output: SchemaMoondream3PreviewPointOutput + } + 'fal-ai/moondream3-preview/query': { + input: SchemaMoondream3PreviewQueryInput + output: SchemaMoondream3PreviewQueryOutput + } + 'fal-ai/moondream3-preview/caption': { + input: SchemaMoondream3PreviewCaptionInput + output: SchemaMoondream3PreviewCaptionOutput + } + 'perceptron/isaac-01/openai/v1/chat/completions': { + input: SchemaIsaac01OpenaiV1ChatCompletionsInput + output: SchemaIsaac01OpenaiV1ChatCompletionsOutput + } + 'perceptron/isaac-01': { + input: SchemaIsaac01Input + output: SchemaIsaac01Output + } + 'fal-ai/x-ailab/nsfw': { + input: SchemaXAilabNsfwInput + output: SchemaXAilabNsfwOutput + } + 'fal-ai/video-understanding': { + input: SchemaVideoUnderstandingInput + output: SchemaVideoUnderstandingOutput + } + 'fal-ai/moondream2/visual-query': { + input: SchemaMoondream2VisualQueryInput + output: SchemaMoondream2VisualQueryOutput + } + 'fal-ai/moondream2': { + input: SchemaMoondream2Input + output: SchemaMoondream2Output + } + 'fal-ai/moondream2/point-object-detection': { + input: SchemaMoondream2PointObjectDetectionInput + output: SchemaMoondream2PointObjectDetectionOutput + } + 'fal-ai/moondream2/object-detection': { + input: SchemaMoondream2ObjectDetectionInput + output: SchemaMoondream2ObjectDetectionOutput + } + 'fal-ai/got-ocr/v2': { + input: SchemaGotOcrV2Input + output: SchemaGotOcrV2Output + } + 'fal-ai/moondream-next/batch': { + input: SchemaMoondreamNextBatchInput + output: SchemaMoondreamNextBatchOutput + } + 'fal-ai/sa2va/4b/video': { + input: SchemaSa2Va4bVideoInput + output: SchemaSa2Va4bVideoOutput + } + 'fal-ai/sa2va/8b/video': { + input: SchemaSa2Va8bVideoInput + output: SchemaSa2Va8bVideoOutput + } + 'fal-ai/sa2va/4b/image': { + input: SchemaSa2Va4bImageInput + output: SchemaSa2Va4bImageOutput + } + 'fal-ai/sa2va/8b/image': { + input: SchemaSa2Va8bImageInput + output: SchemaSa2Va8bImageOutput + } + 'fal-ai/moondream-next': { + input: SchemaMoondreamNextInput + output: SchemaMoondreamNextOutput + } + 'fal-ai/florence-2-large/region-to-description': { + input: SchemaFlorence2LargeRegionToDescriptionInput + output: SchemaFlorence2LargeRegionToDescriptionOutput + } + 'fal-ai/florence-2-large/ocr': { + input: SchemaFlorence2LargeOcrInput + output: SchemaFlorence2LargeOcrOutput + } + 'fal-ai/florence-2-large/more-detailed-caption': { + input: SchemaFlorence2LargeMoreDetailedCaptionInput + output: SchemaFlorence2LargeMoreDetailedCaptionOutput + } + 'fal-ai/florence-2-large/region-to-category': { + input: SchemaFlorence2LargeRegionToCategoryInput + output: SchemaFlorence2LargeRegionToCategoryOutput + } + 'fal-ai/florence-2-large/caption': { + input: SchemaFlorence2LargeCaptionInput + output: SchemaFlorence2LargeCaptionOutput + } + 'fal-ai/florence-2-large/detailed-caption': { + input: SchemaFlorence2LargeDetailedCaptionInput + output: SchemaFlorence2LargeDetailedCaptionOutput + } + 'fal-ai/imageutils/nsfw': { + input: SchemaImageutilsNsfwInput + output: SchemaImageutilsNsfwOutput + } + 'fal-ai/moondream/batched': { + input: SchemaMoondreamBatchedInput + output: SchemaMoondreamBatchedOutput + } + 'fal-ai/llava-next': { + input: SchemaLlavaNextInput + output: SchemaLlavaNextOutput + } +} + +/** Union type of all vision model endpoint IDs */ +export type VisionModel = keyof VisionEndpointMap + +export const VisionSchemaMap: Record< + VisionModel, + { input: z.ZodSchema; output: z.ZodSchema } +> = { + ['fal-ai/arbiter/image/text']: { + input: zSchemaArbiterImageTextInput, + output: zSchemaArbiterImageTextOutput, + }, + ['fal-ai/arbiter/image/image']: { + input: zSchemaArbiterImageImageInput, + output: zSchemaArbiterImageImageOutput, + }, + ['fal-ai/arbiter/image']: { + input: zSchemaArbiterImageInput, + output: zSchemaArbiterImageOutput, + }, + ['half-moon-ai/ai-detector/detect-image']: { + input: zSchemaAiDetectorDetectImageInput, + output: zSchemaAiDetectorDetectImageOutput, + }, + ['fal-ai/sam-3/image/embed']: { + input: zSchemaSam3ImageEmbedInput, + output: zSchemaSam3ImageEmbedOutput, + }, + ['openrouter/router/vision']: { + input: zSchemaRouterVisionInput, + output: zSchemaRouterVisionOutput, + }, + ['fal-ai/moondream3-preview/detect']: { + input: zSchemaMoondream3PreviewDetectInput, + output: zSchemaMoondream3PreviewDetectOutput, + }, + ['fal-ai/moondream3-preview/point']: { + input: zSchemaMoondream3PreviewPointInput, + output: zSchemaMoondream3PreviewPointOutput, + }, + ['fal-ai/moondream3-preview/query']: { + input: zSchemaMoondream3PreviewQueryInput, + output: zSchemaMoondream3PreviewQueryOutput, + }, + ['fal-ai/moondream3-preview/caption']: { + input: zSchemaMoondream3PreviewCaptionInput, + output: zSchemaMoondream3PreviewCaptionOutput, + }, + ['perceptron/isaac-01/openai/v1/chat/completions']: { + input: zSchemaIsaac01OpenaiV1ChatCompletionsInput, + output: zSchemaIsaac01OpenaiV1ChatCompletionsOutput, + }, + ['perceptron/isaac-01']: { + input: zSchemaIsaac01Input, + output: zSchemaIsaac01Output, + }, + ['fal-ai/x-ailab/nsfw']: { + input: zSchemaXAilabNsfwInput, + output: zSchemaXAilabNsfwOutput, + }, + ['fal-ai/video-understanding']: { + input: zSchemaVideoUnderstandingInput, + output: zSchemaVideoUnderstandingOutput, + }, + ['fal-ai/moondream2/visual-query']: { + input: zSchemaMoondream2VisualQueryInput, + output: zSchemaMoondream2VisualQueryOutput, + }, + ['fal-ai/moondream2']: { + input: zSchemaMoondream2Input, + output: zSchemaMoondream2Output, + }, + ['fal-ai/moondream2/point-object-detection']: { + input: zSchemaMoondream2PointObjectDetectionInput, + output: zSchemaMoondream2PointObjectDetectionOutput, + }, + ['fal-ai/moondream2/object-detection']: { + input: zSchemaMoondream2ObjectDetectionInput, + output: zSchemaMoondream2ObjectDetectionOutput, + }, + ['fal-ai/got-ocr/v2']: { + input: zSchemaGotOcrV2Input, + output: zSchemaGotOcrV2Output, + }, + ['fal-ai/moondream-next/batch']: { + input: zSchemaMoondreamNextBatchInput, + output: zSchemaMoondreamNextBatchOutput, + }, + ['fal-ai/sa2va/4b/video']: { + input: zSchemaSa2Va4bVideoInput, + output: zSchemaSa2Va4bVideoOutput, + }, + ['fal-ai/sa2va/8b/video']: { + input: zSchemaSa2Va8bVideoInput, + output: zSchemaSa2Va8bVideoOutput, + }, + ['fal-ai/sa2va/4b/image']: { + input: zSchemaSa2Va4bImageInput, + output: zSchemaSa2Va4bImageOutput, + }, + ['fal-ai/sa2va/8b/image']: { + input: zSchemaSa2Va8bImageInput, + output: zSchemaSa2Va8bImageOutput, + }, + ['fal-ai/moondream-next']: { + input: zSchemaMoondreamNextInput, + output: zSchemaMoondreamNextOutput, + }, + ['fal-ai/florence-2-large/region-to-description']: { + input: zSchemaFlorence2LargeRegionToDescriptionInput, + output: zSchemaFlorence2LargeRegionToDescriptionOutput, + }, + ['fal-ai/florence-2-large/ocr']: { + input: zSchemaFlorence2LargeOcrInput, + output: zSchemaFlorence2LargeOcrOutput, + }, + ['fal-ai/florence-2-large/more-detailed-caption']: { + input: zSchemaFlorence2LargeMoreDetailedCaptionInput, + output: zSchemaFlorence2LargeMoreDetailedCaptionOutput, + }, + ['fal-ai/florence-2-large/region-to-category']: { + input: zSchemaFlorence2LargeRegionToCategoryInput, + output: zSchemaFlorence2LargeRegionToCategoryOutput, + }, + ['fal-ai/florence-2-large/caption']: { + input: zSchemaFlorence2LargeCaptionInput, + output: zSchemaFlorence2LargeCaptionOutput, + }, + ['fal-ai/florence-2-large/detailed-caption']: { + input: zSchemaFlorence2LargeDetailedCaptionInput, + output: zSchemaFlorence2LargeDetailedCaptionOutput, + }, + ['fal-ai/imageutils/nsfw']: { + input: zSchemaImageutilsNsfwInput, + output: zSchemaImageutilsNsfwOutput, + }, + ['fal-ai/moondream/batched']: { + input: zSchemaMoondreamBatchedInput, + output: zSchemaMoondreamBatchedOutput, + }, + ['fal-ai/llava-next']: { + input: zSchemaLlavaNextInput, + output: zSchemaLlavaNextOutput, + }, +} as const + +/** Get the input type for a specific vision model */ +export type VisionModelInput = + VisionEndpointMap[T]['input'] + +/** Get the output type for a specific vision model */ +export type VisionModelOutput = + VisionEndpointMap[T]['output'] diff --git a/packages/typescript/ai-fal/src/generated/vision/types.gen.ts b/packages/typescript/ai-fal/src/generated/vision/types.gen.ts new file mode 100644 index 00000000..624a806f --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/vision/types.gen.ts @@ -0,0 +1,4883 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: 'https://queue.fal.run' | (string & {}) +} + +/** + * LLavaOutput + */ +export type SchemaLlavaNextOutput = { + /** + * Partial + * + * Whether the output is partial + */ + partial?: boolean + /** + * Output + * + * Generated output + */ + output: string +} + +/** + * LLavaInput + */ +export type SchemaLlavaNextInput = { + /** + * Prompt + * + * Prompt to be used for the image + */ + prompt: string + /** + * Top P + * + * Top P for sampling + */ + top_p?: number + /** + * Max Tokens + * + * Maximum number of tokens to generate + */ + max_tokens?: number + /** + * Temperature + * + * Temperature for sampling + */ + temperature?: number + /** + * Image URL + * + * URL of the image to be processed + */ + image_url: string +} + +/** + * MoondreamInputParam + */ +export type SchemaMoondreamInputParam = { + /** + * Prompt + * + * Prompt to be used for the image + */ + prompt?: string + /** + * Image URL + * + * URL of the image to be processed + */ + image_url: string +} + +/** + * BatchedMoondreamOutput + */ +export type SchemaMoondreamBatchedOutput = { + /** + * Filenames + * + * Filenames of the images processed + */ + filenames?: Array | null + /** + * Outputs + * + * List of generated outputs + */ + outputs: Array + /** + * Partial + * + * Whether the output is partial + */ + partial?: boolean + /** + * Timings + * + * Timings for different parts of the process + */ + timings: { + [key: string]: number + } +} + +/** + * BatchedMoondreamInput + */ +export type SchemaMoondreamBatchedInput = { + /** + * Model ID + * + * Model ID to use for inference + */ + model_id?: 'vikhyatk/moondream2' | 'fal-ai/moondream2-docci' + /** + * Repetition Penalty + * + * Repetition penalty for sampling + */ + repetition_penalty?: number + /** + * Input prompt & image pairs + * + * List of input prompts and image URLs + */ + inputs: Array + /** + * Max Tokens + * + * Maximum number of new tokens to generate + */ + max_tokens?: number + /** + * Temperature + * + * Temperature for sampling + */ + temperature?: number + /** + * Top P + * + * Top P for sampling + */ + top_p?: number +} + +/** + * NSFWImageDetectionOutput + */ +export type SchemaImageutilsNsfwOutput = { + /** + * Nsfw Probability + * + * The probability of the image being NSFW. + */ + nsfw_probability: number +} + +/** + * NSFWImageDetectionInput + */ +export type SchemaImageutilsNsfwInput = { + /** + * Image Url + * + * Input image url. + */ + image_url: string +} + +/** + * TextOutput + */ +export type SchemaFlorence2LargeDetailedCaptionOutput = { + /** + * Results + * + * Results from the model + */ + results: string +} + +/** + * ImageInput + */ +export type SchemaFlorence2LargeDetailedCaptionInput = { + /** + * Image Url + * + * The URL of the image to be processed. + */ + image_url: string +} + +/** + * TextOutput + */ +export type SchemaFlorence2LargeCaptionOutput = { + /** + * Results + * + * Results from the model + */ + results: string +} + +/** + * ImageInput + */ +export type SchemaFlorence2LargeCaptionInput = { + /** + * Image Url + * + * The URL of the image to be processed. + */ + image_url: string +} + +/** + * TextOutput + */ +export type SchemaFlorence2LargeRegionToCategoryOutput = { + /** + * Results + * + * Results from the model + */ + results: string +} + +/** + * ImageWithUserCoordinatesInput + */ +export type SchemaFlorence2LargeRegionToCategoryInput = { + /** + * Region + * + * The user input coordinates + */ + region: SchemaRegion + /** + * Image Url + * + * The URL of the image to be processed. + */ + image_url: string +} + +/** + * Region + */ +export type SchemaRegion = { + /** + * Y1 + * + * Y-coordinate of the top-left corner + */ + y1: number + /** + * X2 + * + * X-coordinate of the bottom-right corner + */ + x2: number + /** + * X1 + * + * X-coordinate of the top-left corner + */ + x1: number + /** + * Y2 + * + * Y-coordinate of the bottom-right corner + */ + y2: number +} + +/** + * TextOutput + */ +export type SchemaFlorence2LargeMoreDetailedCaptionOutput = { + /** + * Results + * + * Results from the model + */ + results: string +} + +/** + * ImageInput + */ +export type SchemaFlorence2LargeMoreDetailedCaptionInput = { + /** + * Image Url + * + * The URL of the image to be processed. + */ + image_url: string +} + +/** + * TextOutput + */ +export type SchemaFlorence2LargeOcrOutput = { + /** + * Results + * + * Results from the model + */ + results: string +} + +/** + * ImageInput + */ +export type SchemaFlorence2LargeOcrInput = { + /** + * Image Url + * + * The URL of the image to be processed. + */ + image_url: string +} + +/** + * TextOutput + */ +export type SchemaFlorence2LargeRegionToDescriptionOutput = { + /** + * Results + * + * Results from the model + */ + results: string +} + +/** + * ImageWithUserCoordinatesInput + */ +export type SchemaFlorence2LargeRegionToDescriptionInput = { + /** + * Region + * + * The user input coordinates + */ + region: SchemaRegion + /** + * Image Url + * + * The URL of the image to be processed. + */ + image_url: string +} + +/** + * MoonDreamOutput + */ +export type SchemaMoondreamNextOutput = { + /** + * Output + * + * Response from the model + */ + output: string +} + +/** + * QueryInput + */ +export type SchemaMoondreamNextInput = { + /** + * Prompt + * + * Prompt for query task + */ + prompt: string + /** + * Task Type + * + * Type of task to perform + */ + task_type?: 'caption' | 'query' + /** + * Max Tokens + * + * Maximum number of tokens to generate + */ + max_tokens?: number + /** + * Image URL + * + * Image URL to be processed + */ + image_url: string +} + +/** + * ImageChatOutput + */ +export type SchemaSa2Va8bImageOutput = { + /** + * Masks + * + * Dictionary of label: mask image + */ + masks: Array + /** + * Output + * + * Generated output + */ + output: string +} + +/** + * Image + * + * Represents an image file. + */ +export type SchemaImage = { + /** + * Height + * + * The height of the image in pixels. + */ + height?: number + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * Width + * + * The width of the image in pixels. + */ + width?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * ImageInput + */ +export type SchemaSa2Va8bImageInput = { + /** + * Prompt + * + * Prompt to be used for the chat completion + */ + prompt: string + /** + * Image Url + * + * Url for the Input image. + */ + image_url: string +} + +/** + * ImageChatOutput + */ +export type SchemaSa2Va4bImageOutput = { + /** + * Masks + * + * Dictionary of label: mask image + */ + masks: Array + /** + * Output + * + * Generated output + */ + output: string +} + +/** + * ImageInput + */ +export type SchemaSa2Va4bImageInput = { + /** + * Prompt + * + * Prompt to be used for the chat completion + */ + prompt: string + /** + * Image Url + * + * Url for the Input image. + */ + image_url: string +} + +/** + * VideoChatOutput + */ +export type SchemaSa2Va8bVideoOutput = { + /** + * Masks + * + * Dictionary of label: mask video + */ + masks: Array + /** + * Output + * + * Generated output + */ + output: string +} + +/** + * File + */ +export type SchemaFile = { + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * VideoInput + */ +export type SchemaSa2Va8bVideoInput = { + /** + * Prompt + * + * Prompt to be used for the chat completion + */ + prompt: string + /** + * Video Url + * + * The URL of the input video. + */ + video_url: string + /** + * Num Frames To Sample + * + * Number of frames to sample from the video. If not provided, all frames are sampled. + */ + num_frames_to_sample?: number +} + +/** + * VideoChatOutput + */ +export type SchemaSa2Va4bVideoOutput = { + /** + * Masks + * + * Dictionary of label: mask video + */ + masks: Array + /** + * Output + * + * Generated output + */ + output: string +} + +/** + * VideoInput + */ +export type SchemaSa2Va4bVideoInput = { + /** + * Prompt + * + * Prompt to be used for the chat completion + */ + prompt: string + /** + * Video Url + * + * The URL of the input video. + */ + video_url: string + /** + * Num Frames To Sample + * + * Number of frames to sample from the video. If not provided, all frames are sampled. + */ + num_frames_to_sample?: number +} + +/** + * BatchMoonDreamOutput + */ +export type SchemaMoondreamNextBatchOutput = { + /** + * Outputs + * + * List of generated captions + */ + outputs: Array + /** + * Captions File + * + * URL to the generated captions JSON file containing filename-caption pairs. + */ + captions_file: SchemaFile +} + +/** + * BatchQueryInput + */ +export type SchemaMoondreamNextBatchInput = { + /** + * Prompt + * + * Single prompt to apply to all images + */ + prompt: string + /** + * Image URLs + * + * List of image URLs to be processed (maximum 32 images) + */ + images_data_url: string + /** + * Max Tokens + * + * Maximum number of tokens to generate + */ + max_tokens?: number +} + +/** + * ImageChatOutput + */ +export type SchemaGotOcrV2Output = { + /** + * Output + * + * Generated output + */ + outputs: Array +} + +/** + * ImageInput + */ +export type SchemaGotOcrV2Input = { + /** + * Do Format + * + * Generate the output in formatted mode. + */ + do_format?: boolean + /** + * Multi Page + * + * Use provided images to generate a single output. + */ + multi_page?: boolean + /** + * Input Image Urls + * + * URL of images. + */ + input_image_urls?: Array +} + +/** + * MoondreamObjectOutput + */ +export type SchemaMoondream2ObjectDetectionOutput = { + image: SchemaImage + /** + * Objects + * + * Objects detected in the image + */ + objects: Array<{ + [key: string]: unknown + }> +} + +/** + * MoondreamObjectInput + */ +export type SchemaMoondream2ObjectDetectionInput = { + /** + * Object + * + * Object to be detected in the image + */ + object: string + /** + * Image URL + * + * URL of the image to be processed + */ + image_url: string +} + +/** + * MoondreamObjectOutput + */ +export type SchemaMoondream2PointObjectDetectionOutput = { + image: SchemaImage + /** + * Objects + * + * Objects detected in the image + */ + objects: Array<{ + [key: string]: unknown + }> +} + +/** + * MoondreamObjectInput + */ +export type SchemaMoondream2PointObjectDetectionInput = { + /** + * Object + * + * Object to be detected in the image + */ + object: string + /** + * Image URL + * + * URL of the image to be processed + */ + image_url: string +} + +/** + * MoondreamOutput + */ +export type SchemaMoondream2Output = { + /** + * Output + * + * Output for the given query + */ + output: string +} + +/** + * MoondreamInput + */ +export type SchemaMoondream2Input = { + /** + * Image URL + * + * URL of the image to be processed + */ + image_url: string +} + +/** + * MoondreamOutput + */ +export type SchemaMoondream2VisualQueryOutput = { + /** + * Output + * + * Output for the given query + */ + output: string +} + +/** + * MoondreamQueryInput + */ +export type SchemaMoondream2VisualQueryInput = { + /** + * Query + * + * Query to be asked in the image + */ + prompt: string + /** + * Image URL + * + * URL of the image to be processed + */ + image_url: string +} + +/** + * VideoUnderstandingOutput + */ +export type SchemaVideoUnderstandingOutput = { + /** + * Output + * + * The analysis of the video content based on the prompt + */ + output: string +} + +/** + * VideoUnderstandingInput + */ +export type SchemaVideoUnderstandingInput = { + /** + * Detailed Analysis + * + * Whether to request a more detailed analysis of the video + */ + detailed_analysis?: boolean + /** + * Video Url + * + * URL of the video to analyze + */ + video_url: string + /** + * Prompt + * + * The question or prompt about the video content. + */ + prompt: string +} + +/** + * NSFWOutput + */ +export type SchemaXAilabNsfwOutput = { + /** + * Has Nsfw Concepts + * + * List of booleans indicating if the image has an NSFW concept + */ + has_nsfw_concepts: Array +} + +/** + * NSFWInput + */ +export type SchemaXAilabNsfwInput = { + /** + * Image Urls + * + * List of image URLs to check. If more than 10 images are provided, only the first 10 will be checked. + */ + image_urls: Array +} + +/** + * CompletionUsage + */ +export type SchemaCompletionUsage = { + /** + * Completion Tokens + * + * Number of tokens in the completion + */ + completion_tokens: number + /** + * Total Tokens + * + * Total tokens used + */ + total_tokens: number + /** + * Prompt Tokens + * + * Number of tokens in the prompt + */ + prompt_tokens: number +} + +/** + * ChatOutput + */ +export type SchemaIsaac01Output = { + /** + * Usage information + */ + usage?: SchemaCompletionUsage | unknown + /** + * Error + * + * Error message if an error occurred + */ + error?: string | unknown + /** + * Partial + * + * Whether the output is partial + */ + partial?: boolean + /** + * Output + * + * Generated output + */ + output: string +} + +/** + * VisionInput + */ +export type SchemaIsaac01Input = { + /** + * Prompt + * + * Prompt to be used for the image + */ + prompt: string + /** + * Response Style + * + * + * Response style to be used for the image. + * + * - text: Model will output text. Good for descriptions and captioning. + * - box: Model will output a combination of text and bounding boxes. Good for + * localization. + * - point: Model will output a combination of text and points. Good for counting many + * objects. + * - polygon: Model will output a combination of text and polygons. Good for granular + * segmentation. + * + */ + response_style?: 'text' | 'box' | 'point' | 'polygon' + /** + * Image Url + * + * Image URL to be processed + */ + image_url: string +} + +/** + * Schema referenced but not defined by fal.ai (missing from source OpenAPI spec) + */ +export type SchemaIsaac01OpenaiV1ChatCompletionsInput = { + [key: string]: unknown +} + +export type SchemaIsaac01OpenaiV1ChatCompletionsOutput = unknown + +/** + * MoondreamCaptionOutput + */ +export type SchemaMoondream3PreviewCaptionOutput = { + /** + * Finish Reason + * + * Reason for finishing the output generation + */ + finish_reason: string + /** + * Output + * + * Generated caption for the image + */ + output: string + /** + * Usage Info + * + * Usage information for the request + */ + usage_info: SchemaUsageInfo +} + +/** + * UsageInfo + */ +export type SchemaUsageInfo = { + /** + * Output Tokens + * + * Number of output tokens generated + */ + output_tokens: number + /** + * Decode Time Ms + * + * Time taken for decoding in milliseconds + */ + decode_time_ms: number + /** + * Input Tokens + * + * Number of input tokens processed + */ + input_tokens: number + /** + * Ttft Ms + * + * Time to first token in milliseconds + */ + ttft_ms: number + /** + * Prefill Time Ms + * + * Time taken for prefill in milliseconds + */ + prefill_time_ms: number +} + +/** + * MoondreamCaptionInput + */ +export type SchemaMoondream3PreviewCaptionInput = { + /** + * Top P + * + * Nucleus sampling probability mass to use, between 0 and 1. + */ + top_p?: number + /** + * Length + * + * Length of the caption to generate + */ + length?: 'short' | 'normal' | 'long' + /** + * Temperature + * + * Sampling temperature to use, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If not set, defaults to 0. + */ + temperature?: number + /** + * Image URL + * + * URL of the image to be processed + * + * Max width: 7000px, Max height: 7000px, Timeout: 20.0s + */ + image_url: string +} + +/** + * MoondreamQueryOutput + */ +export type SchemaMoondream3PreviewQueryOutput = { + /** + * Finish Reason + * + * Reason for finishing the output generation + */ + finish_reason: string + /** + * Reasoning + * + * Detailed reasoning behind the answer, if enabled + */ + reasoning?: string + /** + * Output + * + * Answer to the query about the image + */ + output: string + /** + * Usage Info + * + * Usage information for the request + */ + usage_info: SchemaUsageInfo +} + +/** + * MoondreamQueryInput + */ +export type SchemaMoondream3PreviewQueryInput = { + /** + * Prompt + * + * Query to be asked in the image + */ + prompt: string + /** + * Top P + * + * Nucleus sampling probability mass to use, between 0 and 1. + */ + top_p?: number + /** + * Temperature + * + * Sampling temperature to use, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If not set, defaults to 0. + */ + temperature?: number + /** + * Reasoning + * + * Whether to include detailed reasoning behind the answer + */ + reasoning?: boolean + /** + * Image URL + * + * URL of the image to be processed + * + * Max width: 7000px, Max height: 7000px, Timeout: 20.0s + */ + image_url: string +} + +/** + * Point + */ +export type SchemaPoint = { + /** + * Y + * + * Y coordinate of the point in normalized format (0 to 1) + */ + y: number + /** + * X + * + * X coordinate of the point in normalized format (0 to 1) + */ + x: number +} + +/** + * MoondreamPointOutput + */ +export type SchemaMoondream3PreviewPointOutput = { + /** + * Points + * + * List of points marking the detected objects + */ + points: Array + /** + * Finish Reason + * + * Reason for finishing the output generation + */ + finish_reason: string + /** + * Image + * + * Image with points drawn on detected objects + */ + image?: SchemaImageFile + /** + * Usage Info + * + * Usage information for the request + */ + usage_info: SchemaUsageInfo +} + +/** + * ImageFile + */ +export type SchemaImageFile = { + /** + * Height + * + * The height of the image + */ + height?: number + /** + * File Size + * + * The size of the file in bytes. + */ + file_size?: number + /** + * Url + * + * The URL where the file can be downloaded from. + */ + url: string + /** + * Width + * + * The width of the image + */ + width?: number + /** + * File Name + * + * The name of the file. It will be auto-generated if not provided. + */ + file_name?: string + /** + * Content Type + * + * The mime type of the file. + */ + content_type?: string + /** + * File Data + * + * File data + */ + file_data?: Blob | File +} + +/** + * MoondreamPointInput + */ +export type SchemaMoondream3PreviewPointInput = { + /** + * Prompt + * + * Object to be located in the image + */ + prompt: string + /** + * Preview + * + * Whether to preview the output + */ + preview?: boolean + /** + * Image URL + * + * URL of the image to be processed + * + * Max width: 7000px, Max height: 7000px, Timeout: 20.0s + */ + image_url: string +} + +/** + * Object + */ +export type SchemaObject = { + /** + * Y Min + * + * Top boundary of detection box in normalized format (0 to 1) + */ + y_min: number + /** + * X Max + * + * Right boundary of detection box in normalized format (0 to 1) + */ + x_max: number + /** + * X Min + * + * Left boundary of detection box in normalized format (0 to 1) + */ + x_min: number + /** + * Y Max + * + * Bottom boundary of detection box in normalized format (0 to 1) + */ + y_max: number +} + +/** + * MoondreamDetectOutput + */ +export type SchemaMoondream3PreviewDetectOutput = { + /** + * Finish Reason + * + * Reason for finishing the output generation + */ + finish_reason: string + /** + * Image + * + * Image with bounding boxes drawn around detected objects + */ + image?: SchemaImageFile + /** + * Objects + * + * List of detected objects with their bounding boxes + */ + objects: Array + /** + * Usage Info + * + * Usage information for the request + */ + usage_info: SchemaUsageInfo +} + +/** + * MoondreamDetectInput + */ +export type SchemaMoondream3PreviewDetectInput = { + /** + * Prompt + * + * Object to be detected in the image + */ + prompt: string + /** + * Preview + * + * Whether to preview the output + */ + preview?: boolean + /** + * Image URL + * + * URL of the image to be processed + * + * Max width: 7000px, Max height: 7000px, Timeout: 20.0s + */ + image_url: string +} + +/** + * VisionOutput + */ +export type SchemaRouterVisionOutput = { + /** + * Usage + * + * Token usage information + */ + usage?: SchemaUsageInfo + /** + * Output + * + * Generated output + */ + output: string +} + +/** + * VisionInput + */ +export type SchemaRouterVisionInput = { + /** + * Prompt + * + * Prompt to be used for the image + */ + prompt: string + /** + * System Prompt + * + * System prompt to provide context or instructions to the model + */ + system_prompt?: string + /** + * Reasoning + * + * Should reasoning be the part of the final answer. + */ + reasoning?: boolean + /** + * Model + * + * Name of the model to use. Charged based on actual token usage. + */ + model: string + /** + * Max Tokens + * + * This sets the upper limit for the number of tokens the model can generate in response. It won't produce more than this limit. The maximum value is the context length minus the prompt length. + */ + max_tokens?: number + /** + * Temperature + * + * This setting influences the variety in the model's responses. Lower values lead to more predictable and typical responses, while higher values encourage more diverse and less common responses. At 0, the model always gives the same response for a given input. + */ + temperature?: number + /** + * Image Urls + * + * List of image URLs to be processed + */ + image_urls: Array +} + +/** + * SAM3EmbeddingOutput + */ +export type SchemaSam3ImageEmbedOutput = { + /** + * Embedding B64 + * + * Embedding of the image + */ + embedding_b64: string +} + +/** + * SAM3EmbeddingInput + */ +export type SchemaSam3ImageEmbedInput = { + /** + * Image Url + * + * URL of the image to embed. + */ + image_url: string +} + +/** + * AIImageDetectionOutput + */ +export type SchemaAiDetectorDetectImageOutput = { + /** + * Latency + */ + latency: number + /** + * Verdict + */ + verdict: string + /** + * Is Ai Generated + */ + is_ai_generated: boolean + /** + * Confidence + */ + confidence: number +} + +/** + * ImageDetectionInput + */ +export type SchemaAiDetectorDetectImageInput = { + /** + * Image Url + * + * URL pointing to an image to analyze for AI generation.(Max: 3000 characters) + */ + image_url: string +} + +/** + * ImageInput + */ +export type SchemaImageInput = { + /** + * Hypothesis + * + * The image to use for the measurement. + */ + hypothesis: string +} + +/** + * MultiMeasurementOutput + */ +export type SchemaArbiterImageOutput = { + /** + * Values + * + * The values of the measurements. + */ + values?: Array<{ + [key: string]: + | number + | { + [key: string]: number + } + }> +} + +/** + * ImageMultiMeasurementInput + */ +export type SchemaArbiterImageInput = { + /** + * Measurements + * + * The measurements to use for the measurement. + */ + measurements: Array<'arniqa' | 'clip_iqa' | 'musiq' | 'nima' | 'lapvar'> + /** + * Inputs + * + * The inputs to use for the measurement. + */ + inputs: Array +} + +/** + * ReferenceImageInput + */ +export type SchemaReferenceImageInput = { + /** + * Hypothesis + * + * The hypothesis image to use for the measurement. + */ + hypothesis: string + /** + * Reference + * + * The image to use for the measurement. + */ + reference: string +} + +/** + * MultiMeasurementOutput + */ +export type SchemaArbiterImageImageOutput = { + /** + * Values + * + * The values of the measurements. + */ + values?: Array<{ + [key: string]: + | number + | { + [key: string]: number + } + }> +} + +/** + * ImageReferenceMeasurementInput + */ +export type SchemaArbiterImageImageInput = { + /** + * Measurements + * + * The measurements to use for the measurement. + */ + measurements: Array<'dists' | 'mse' | 'lpips' | 'sdi' | 'ssim'> + /** + * Inputs + * + * The inputs to use for the measurement. + */ + inputs: Array +} + +/** + * SemanticImageInput + */ +export type SchemaSemanticImageInput = { + /** + * Hypothesis + * + * The hypothesis image to use for the measurement. + */ + hypothesis: string + /** + * Reference + * + * The text reference to use for the measurement. + */ + reference: string +} + +/** + * MultiMeasurementOutput + */ +export type SchemaArbiterImageTextOutput = { + /** + * Values + * + * The values of the measurements. + */ + values?: Array<{ + [key: string]: + | number + | { + [key: string]: number + } + }> +} + +/** + * SemanticImageMeasurementInput + */ +export type SchemaArbiterImageTextInput = { + /** + * Measurements + * + * The measurements to use for the measurement. + */ + measurements: Array<'clip_score'> + /** + * Inputs + * + * The inputs to use for the measurement. + */ + inputs: Array +} + +export type SchemaQueueStatus = { + status: 'IN_QUEUE' | 'IN_PROGRESS' | 'COMPLETED' + /** + * The request id. + */ + request_id: string + /** + * The response url. + */ + response_url?: string + /** + * The status url. + */ + status_url?: string + /** + * The cancel url. + */ + cancel_url?: string + /** + * The logs. + */ + logs?: { + [key: string]: unknown + } + /** + * The metrics. + */ + metrics?: { + [key: string]: unknown + } + /** + * The queue position. + */ + queue_position?: number +} + +export type GetFalAiArbiterImageTextRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/arbiter/image/text/requests/{request_id}/status' +} + +export type GetFalAiArbiterImageTextRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiArbiterImageTextRequestsByRequestIdStatusResponse = + GetFalAiArbiterImageTextRequestsByRequestIdStatusResponses[keyof GetFalAiArbiterImageTextRequestsByRequestIdStatusResponses] + +export type PutFalAiArbiterImageTextRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/arbiter/image/text/requests/{request_id}/cancel' +} + +export type PutFalAiArbiterImageTextRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiArbiterImageTextRequestsByRequestIdCancelResponse = + PutFalAiArbiterImageTextRequestsByRequestIdCancelResponses[keyof PutFalAiArbiterImageTextRequestsByRequestIdCancelResponses] + +export type PostFalAiArbiterImageTextData = { + body: SchemaArbiterImageTextInput + path?: never + query?: never + url: '/fal-ai/arbiter/image/text' +} + +export type PostFalAiArbiterImageTextResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiArbiterImageTextResponse = + PostFalAiArbiterImageTextResponses[keyof PostFalAiArbiterImageTextResponses] + +export type GetFalAiArbiterImageTextRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/arbiter/image/text/requests/{request_id}' +} + +export type GetFalAiArbiterImageTextRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaArbiterImageTextOutput +} + +export type GetFalAiArbiterImageTextRequestsByRequestIdResponse = + GetFalAiArbiterImageTextRequestsByRequestIdResponses[keyof GetFalAiArbiterImageTextRequestsByRequestIdResponses] + +export type GetFalAiArbiterImageImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/arbiter/image/image/requests/{request_id}/status' +} + +export type GetFalAiArbiterImageImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiArbiterImageImageRequestsByRequestIdStatusResponse = + GetFalAiArbiterImageImageRequestsByRequestIdStatusResponses[keyof GetFalAiArbiterImageImageRequestsByRequestIdStatusResponses] + +export type PutFalAiArbiterImageImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/arbiter/image/image/requests/{request_id}/cancel' +} + +export type PutFalAiArbiterImageImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiArbiterImageImageRequestsByRequestIdCancelResponse = + PutFalAiArbiterImageImageRequestsByRequestIdCancelResponses[keyof PutFalAiArbiterImageImageRequestsByRequestIdCancelResponses] + +export type PostFalAiArbiterImageImageData = { + body: SchemaArbiterImageImageInput + path?: never + query?: never + url: '/fal-ai/arbiter/image/image' +} + +export type PostFalAiArbiterImageImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiArbiterImageImageResponse = + PostFalAiArbiterImageImageResponses[keyof PostFalAiArbiterImageImageResponses] + +export type GetFalAiArbiterImageImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/arbiter/image/image/requests/{request_id}' +} + +export type GetFalAiArbiterImageImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaArbiterImageImageOutput +} + +export type GetFalAiArbiterImageImageRequestsByRequestIdResponse = + GetFalAiArbiterImageImageRequestsByRequestIdResponses[keyof GetFalAiArbiterImageImageRequestsByRequestIdResponses] + +export type GetFalAiArbiterImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/arbiter/image/requests/{request_id}/status' +} + +export type GetFalAiArbiterImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiArbiterImageRequestsByRequestIdStatusResponse = + GetFalAiArbiterImageRequestsByRequestIdStatusResponses[keyof GetFalAiArbiterImageRequestsByRequestIdStatusResponses] + +export type PutFalAiArbiterImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/arbiter/image/requests/{request_id}/cancel' +} + +export type PutFalAiArbiterImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiArbiterImageRequestsByRequestIdCancelResponse = + PutFalAiArbiterImageRequestsByRequestIdCancelResponses[keyof PutFalAiArbiterImageRequestsByRequestIdCancelResponses] + +export type PostFalAiArbiterImageData = { + body: SchemaArbiterImageInput + path?: never + query?: never + url: '/fal-ai/arbiter/image' +} + +export type PostFalAiArbiterImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiArbiterImageResponse = + PostFalAiArbiterImageResponses[keyof PostFalAiArbiterImageResponses] + +export type GetFalAiArbiterImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/arbiter/image/requests/{request_id}' +} + +export type GetFalAiArbiterImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaArbiterImageOutput +} + +export type GetFalAiArbiterImageRequestsByRequestIdResponse = + GetFalAiArbiterImageRequestsByRequestIdResponses[keyof GetFalAiArbiterImageRequestsByRequestIdResponses] + +export type GetHalfMoonAiAiDetectorDetectImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/half-moon-ai/ai-detector/detect-image/requests/{request_id}/status' +} + +export type GetHalfMoonAiAiDetectorDetectImageRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetHalfMoonAiAiDetectorDetectImageRequestsByRequestIdStatusResponse = + GetHalfMoonAiAiDetectorDetectImageRequestsByRequestIdStatusResponses[keyof GetHalfMoonAiAiDetectorDetectImageRequestsByRequestIdStatusResponses] + +export type PutHalfMoonAiAiDetectorDetectImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/half-moon-ai/ai-detector/detect-image/requests/{request_id}/cancel' +} + +export type PutHalfMoonAiAiDetectorDetectImageRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutHalfMoonAiAiDetectorDetectImageRequestsByRequestIdCancelResponse = + PutHalfMoonAiAiDetectorDetectImageRequestsByRequestIdCancelResponses[keyof PutHalfMoonAiAiDetectorDetectImageRequestsByRequestIdCancelResponses] + +export type PostHalfMoonAiAiDetectorDetectImageData = { + body: SchemaAiDetectorDetectImageInput + path?: never + query?: never + url: '/half-moon-ai/ai-detector/detect-image' +} + +export type PostHalfMoonAiAiDetectorDetectImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostHalfMoonAiAiDetectorDetectImageResponse = + PostHalfMoonAiAiDetectorDetectImageResponses[keyof PostHalfMoonAiAiDetectorDetectImageResponses] + +export type GetHalfMoonAiAiDetectorDetectImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/half-moon-ai/ai-detector/detect-image/requests/{request_id}' +} + +export type GetHalfMoonAiAiDetectorDetectImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaAiDetectorDetectImageOutput +} + +export type GetHalfMoonAiAiDetectorDetectImageRequestsByRequestIdResponse = + GetHalfMoonAiAiDetectorDetectImageRequestsByRequestIdResponses[keyof GetHalfMoonAiAiDetectorDetectImageRequestsByRequestIdResponses] + +export type GetFalAiSam3ImageEmbedRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sam-3/image/embed/requests/{request_id}/status' +} + +export type GetFalAiSam3ImageEmbedRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSam3ImageEmbedRequestsByRequestIdStatusResponse = + GetFalAiSam3ImageEmbedRequestsByRequestIdStatusResponses[keyof GetFalAiSam3ImageEmbedRequestsByRequestIdStatusResponses] + +export type PutFalAiSam3ImageEmbedRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam-3/image/embed/requests/{request_id}/cancel' +} + +export type PutFalAiSam3ImageEmbedRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSam3ImageEmbedRequestsByRequestIdCancelResponse = + PutFalAiSam3ImageEmbedRequestsByRequestIdCancelResponses[keyof PutFalAiSam3ImageEmbedRequestsByRequestIdCancelResponses] + +export type PostFalAiSam3ImageEmbedData = { + body: SchemaSam3ImageEmbedInput + path?: never + query?: never + url: '/fal-ai/sam-3/image/embed' +} + +export type PostFalAiSam3ImageEmbedResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSam3ImageEmbedResponse = + PostFalAiSam3ImageEmbedResponses[keyof PostFalAiSam3ImageEmbedResponses] + +export type GetFalAiSam3ImageEmbedRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sam-3/image/embed/requests/{request_id}' +} + +export type GetFalAiSam3ImageEmbedRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSam3ImageEmbedOutput +} + +export type GetFalAiSam3ImageEmbedRequestsByRequestIdResponse = + GetFalAiSam3ImageEmbedRequestsByRequestIdResponses[keyof GetFalAiSam3ImageEmbedRequestsByRequestIdResponses] + +export type GetOpenrouterRouterVisionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/openrouter/router/vision/requests/{request_id}/status' +} + +export type GetOpenrouterRouterVisionRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetOpenrouterRouterVisionRequestsByRequestIdStatusResponse = + GetOpenrouterRouterVisionRequestsByRequestIdStatusResponses[keyof GetOpenrouterRouterVisionRequestsByRequestIdStatusResponses] + +export type PutOpenrouterRouterVisionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/openrouter/router/vision/requests/{request_id}/cancel' +} + +export type PutOpenrouterRouterVisionRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutOpenrouterRouterVisionRequestsByRequestIdCancelResponse = + PutOpenrouterRouterVisionRequestsByRequestIdCancelResponses[keyof PutOpenrouterRouterVisionRequestsByRequestIdCancelResponses] + +export type PostOpenrouterRouterVisionData = { + body: SchemaRouterVisionInput + path?: never + query?: never + url: '/openrouter/router/vision' +} + +export type PostOpenrouterRouterVisionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostOpenrouterRouterVisionResponse = + PostOpenrouterRouterVisionResponses[keyof PostOpenrouterRouterVisionResponses] + +export type GetOpenrouterRouterVisionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/openrouter/router/vision/requests/{request_id}' +} + +export type GetOpenrouterRouterVisionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaRouterVisionOutput +} + +export type GetOpenrouterRouterVisionRequestsByRequestIdResponse = + GetOpenrouterRouterVisionRequestsByRequestIdResponses[keyof GetOpenrouterRouterVisionRequestsByRequestIdResponses] + +export type GetFalAiMoondream3PreviewDetectRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/moondream3-preview/detect/requests/{request_id}/status' +} + +export type GetFalAiMoondream3PreviewDetectRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMoondream3PreviewDetectRequestsByRequestIdStatusResponse = + GetFalAiMoondream3PreviewDetectRequestsByRequestIdStatusResponses[keyof GetFalAiMoondream3PreviewDetectRequestsByRequestIdStatusResponses] + +export type PutFalAiMoondream3PreviewDetectRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream3-preview/detect/requests/{request_id}/cancel' +} + +export type PutFalAiMoondream3PreviewDetectRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMoondream3PreviewDetectRequestsByRequestIdCancelResponse = + PutFalAiMoondream3PreviewDetectRequestsByRequestIdCancelResponses[keyof PutFalAiMoondream3PreviewDetectRequestsByRequestIdCancelResponses] + +export type PostFalAiMoondream3PreviewDetectData = { + body: SchemaMoondream3PreviewDetectInput + path?: never + query?: never + url: '/fal-ai/moondream3-preview/detect' +} + +export type PostFalAiMoondream3PreviewDetectResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMoondream3PreviewDetectResponse = + PostFalAiMoondream3PreviewDetectResponses[keyof PostFalAiMoondream3PreviewDetectResponses] + +export type GetFalAiMoondream3PreviewDetectRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream3-preview/detect/requests/{request_id}' +} + +export type GetFalAiMoondream3PreviewDetectRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMoondream3PreviewDetectOutput +} + +export type GetFalAiMoondream3PreviewDetectRequestsByRequestIdResponse = + GetFalAiMoondream3PreviewDetectRequestsByRequestIdResponses[keyof GetFalAiMoondream3PreviewDetectRequestsByRequestIdResponses] + +export type GetFalAiMoondream3PreviewPointRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/moondream3-preview/point/requests/{request_id}/status' +} + +export type GetFalAiMoondream3PreviewPointRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMoondream3PreviewPointRequestsByRequestIdStatusResponse = + GetFalAiMoondream3PreviewPointRequestsByRequestIdStatusResponses[keyof GetFalAiMoondream3PreviewPointRequestsByRequestIdStatusResponses] + +export type PutFalAiMoondream3PreviewPointRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream3-preview/point/requests/{request_id}/cancel' +} + +export type PutFalAiMoondream3PreviewPointRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMoondream3PreviewPointRequestsByRequestIdCancelResponse = + PutFalAiMoondream3PreviewPointRequestsByRequestIdCancelResponses[keyof PutFalAiMoondream3PreviewPointRequestsByRequestIdCancelResponses] + +export type PostFalAiMoondream3PreviewPointData = { + body: SchemaMoondream3PreviewPointInput + path?: never + query?: never + url: '/fal-ai/moondream3-preview/point' +} + +export type PostFalAiMoondream3PreviewPointResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMoondream3PreviewPointResponse = + PostFalAiMoondream3PreviewPointResponses[keyof PostFalAiMoondream3PreviewPointResponses] + +export type GetFalAiMoondream3PreviewPointRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream3-preview/point/requests/{request_id}' +} + +export type GetFalAiMoondream3PreviewPointRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMoondream3PreviewPointOutput +} + +export type GetFalAiMoondream3PreviewPointRequestsByRequestIdResponse = + GetFalAiMoondream3PreviewPointRequestsByRequestIdResponses[keyof GetFalAiMoondream3PreviewPointRequestsByRequestIdResponses] + +export type GetFalAiMoondream3PreviewQueryRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/moondream3-preview/query/requests/{request_id}/status' +} + +export type GetFalAiMoondream3PreviewQueryRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMoondream3PreviewQueryRequestsByRequestIdStatusResponse = + GetFalAiMoondream3PreviewQueryRequestsByRequestIdStatusResponses[keyof GetFalAiMoondream3PreviewQueryRequestsByRequestIdStatusResponses] + +export type PutFalAiMoondream3PreviewQueryRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream3-preview/query/requests/{request_id}/cancel' +} + +export type PutFalAiMoondream3PreviewQueryRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMoondream3PreviewQueryRequestsByRequestIdCancelResponse = + PutFalAiMoondream3PreviewQueryRequestsByRequestIdCancelResponses[keyof PutFalAiMoondream3PreviewQueryRequestsByRequestIdCancelResponses] + +export type PostFalAiMoondream3PreviewQueryData = { + body: SchemaMoondream3PreviewQueryInput + path?: never + query?: never + url: '/fal-ai/moondream3-preview/query' +} + +export type PostFalAiMoondream3PreviewQueryResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMoondream3PreviewQueryResponse = + PostFalAiMoondream3PreviewQueryResponses[keyof PostFalAiMoondream3PreviewQueryResponses] + +export type GetFalAiMoondream3PreviewQueryRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream3-preview/query/requests/{request_id}' +} + +export type GetFalAiMoondream3PreviewQueryRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMoondream3PreviewQueryOutput +} + +export type GetFalAiMoondream3PreviewQueryRequestsByRequestIdResponse = + GetFalAiMoondream3PreviewQueryRequestsByRequestIdResponses[keyof GetFalAiMoondream3PreviewQueryRequestsByRequestIdResponses] + +export type GetFalAiMoondream3PreviewCaptionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/moondream3-preview/caption/requests/{request_id}/status' +} + +export type GetFalAiMoondream3PreviewCaptionRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMoondream3PreviewCaptionRequestsByRequestIdStatusResponse = + GetFalAiMoondream3PreviewCaptionRequestsByRequestIdStatusResponses[keyof GetFalAiMoondream3PreviewCaptionRequestsByRequestIdStatusResponses] + +export type PutFalAiMoondream3PreviewCaptionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream3-preview/caption/requests/{request_id}/cancel' +} + +export type PutFalAiMoondream3PreviewCaptionRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMoondream3PreviewCaptionRequestsByRequestIdCancelResponse = + PutFalAiMoondream3PreviewCaptionRequestsByRequestIdCancelResponses[keyof PutFalAiMoondream3PreviewCaptionRequestsByRequestIdCancelResponses] + +export type PostFalAiMoondream3PreviewCaptionData = { + body: SchemaMoondream3PreviewCaptionInput + path?: never + query?: never + url: '/fal-ai/moondream3-preview/caption' +} + +export type PostFalAiMoondream3PreviewCaptionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMoondream3PreviewCaptionResponse = + PostFalAiMoondream3PreviewCaptionResponses[keyof PostFalAiMoondream3PreviewCaptionResponses] + +export type GetFalAiMoondream3PreviewCaptionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream3-preview/caption/requests/{request_id}' +} + +export type GetFalAiMoondream3PreviewCaptionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMoondream3PreviewCaptionOutput +} + +export type GetFalAiMoondream3PreviewCaptionRequestsByRequestIdResponse = + GetFalAiMoondream3PreviewCaptionRequestsByRequestIdResponses[keyof GetFalAiMoondream3PreviewCaptionRequestsByRequestIdResponses] + +export type GetPerceptronIsaac01OpenaiV1ChatCompletionsRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/perceptron/isaac-01/openai/v1/chat/completions/requests/{request_id}/status' + } + +export type GetPerceptronIsaac01OpenaiV1ChatCompletionsRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetPerceptronIsaac01OpenaiV1ChatCompletionsRequestsByRequestIdStatusResponse = + GetPerceptronIsaac01OpenaiV1ChatCompletionsRequestsByRequestIdStatusResponses[keyof GetPerceptronIsaac01OpenaiV1ChatCompletionsRequestsByRequestIdStatusResponses] + +export type PutPerceptronIsaac01OpenaiV1ChatCompletionsRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/perceptron/isaac-01/openai/v1/chat/completions/requests/{request_id}/cancel' + } + +export type PutPerceptronIsaac01OpenaiV1ChatCompletionsRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutPerceptronIsaac01OpenaiV1ChatCompletionsRequestsByRequestIdCancelResponse = + PutPerceptronIsaac01OpenaiV1ChatCompletionsRequestsByRequestIdCancelResponses[keyof PutPerceptronIsaac01OpenaiV1ChatCompletionsRequestsByRequestIdCancelResponses] + +export type PostPerceptronIsaac01OpenaiV1ChatCompletionsData = { + body: SchemaIsaac01OpenaiV1ChatCompletionsInput + path?: never + query?: never + url: '/perceptron/isaac-01/openai/v1/chat/completions' +} + +export type PostPerceptronIsaac01OpenaiV1ChatCompletionsResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostPerceptronIsaac01OpenaiV1ChatCompletionsResponse = + PostPerceptronIsaac01OpenaiV1ChatCompletionsResponses[keyof PostPerceptronIsaac01OpenaiV1ChatCompletionsResponses] + +export type GetPerceptronIsaac01OpenaiV1ChatCompletionsRequestsByRequestIdData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/perceptron/isaac-01/openai/v1/chat/completions/requests/{request_id}' + } + +export type GetPerceptronIsaac01OpenaiV1ChatCompletionsRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaIsaac01OpenaiV1ChatCompletionsOutput + } + +export type GetPerceptronIsaac01OpenaiV1ChatCompletionsRequestsByRequestIdResponse = + GetPerceptronIsaac01OpenaiV1ChatCompletionsRequestsByRequestIdResponses[keyof GetPerceptronIsaac01OpenaiV1ChatCompletionsRequestsByRequestIdResponses] + +export type GetPerceptronIsaac01RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/perceptron/isaac-01/requests/{request_id}/status' +} + +export type GetPerceptronIsaac01RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetPerceptronIsaac01RequestsByRequestIdStatusResponse = + GetPerceptronIsaac01RequestsByRequestIdStatusResponses[keyof GetPerceptronIsaac01RequestsByRequestIdStatusResponses] + +export type PutPerceptronIsaac01RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/perceptron/isaac-01/requests/{request_id}/cancel' +} + +export type PutPerceptronIsaac01RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutPerceptronIsaac01RequestsByRequestIdCancelResponse = + PutPerceptronIsaac01RequestsByRequestIdCancelResponses[keyof PutPerceptronIsaac01RequestsByRequestIdCancelResponses] + +export type PostPerceptronIsaac01Data = { + body: SchemaIsaac01Input + path?: never + query?: never + url: '/perceptron/isaac-01' +} + +export type PostPerceptronIsaac01Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostPerceptronIsaac01Response = + PostPerceptronIsaac01Responses[keyof PostPerceptronIsaac01Responses] + +export type GetPerceptronIsaac01RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/perceptron/isaac-01/requests/{request_id}' +} + +export type GetPerceptronIsaac01RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaIsaac01Output +} + +export type GetPerceptronIsaac01RequestsByRequestIdResponse = + GetPerceptronIsaac01RequestsByRequestIdResponses[keyof GetPerceptronIsaac01RequestsByRequestIdResponses] + +export type GetFalAiXAilabNsfwRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/x-ailab/nsfw/requests/{request_id}/status' +} + +export type GetFalAiXAilabNsfwRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiXAilabNsfwRequestsByRequestIdStatusResponse = + GetFalAiXAilabNsfwRequestsByRequestIdStatusResponses[keyof GetFalAiXAilabNsfwRequestsByRequestIdStatusResponses] + +export type PutFalAiXAilabNsfwRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/x-ailab/nsfw/requests/{request_id}/cancel' +} + +export type PutFalAiXAilabNsfwRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiXAilabNsfwRequestsByRequestIdCancelResponse = + PutFalAiXAilabNsfwRequestsByRequestIdCancelResponses[keyof PutFalAiXAilabNsfwRequestsByRequestIdCancelResponses] + +export type PostFalAiXAilabNsfwData = { + body: SchemaXAilabNsfwInput + path?: never + query?: never + url: '/fal-ai/x-ailab/nsfw' +} + +export type PostFalAiXAilabNsfwResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiXAilabNsfwResponse = + PostFalAiXAilabNsfwResponses[keyof PostFalAiXAilabNsfwResponses] + +export type GetFalAiXAilabNsfwRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/x-ailab/nsfw/requests/{request_id}' +} + +export type GetFalAiXAilabNsfwRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaXAilabNsfwOutput +} + +export type GetFalAiXAilabNsfwRequestsByRequestIdResponse = + GetFalAiXAilabNsfwRequestsByRequestIdResponses[keyof GetFalAiXAilabNsfwRequestsByRequestIdResponses] + +export type GetFalAiVideoUnderstandingRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/video-understanding/requests/{request_id}/status' +} + +export type GetFalAiVideoUnderstandingRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiVideoUnderstandingRequestsByRequestIdStatusResponse = + GetFalAiVideoUnderstandingRequestsByRequestIdStatusResponses[keyof GetFalAiVideoUnderstandingRequestsByRequestIdStatusResponses] + +export type PutFalAiVideoUnderstandingRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/video-understanding/requests/{request_id}/cancel' +} + +export type PutFalAiVideoUnderstandingRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiVideoUnderstandingRequestsByRequestIdCancelResponse = + PutFalAiVideoUnderstandingRequestsByRequestIdCancelResponses[keyof PutFalAiVideoUnderstandingRequestsByRequestIdCancelResponses] + +export type PostFalAiVideoUnderstandingData = { + body: SchemaVideoUnderstandingInput + path?: never + query?: never + url: '/fal-ai/video-understanding' +} + +export type PostFalAiVideoUnderstandingResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiVideoUnderstandingResponse = + PostFalAiVideoUnderstandingResponses[keyof PostFalAiVideoUnderstandingResponses] + +export type GetFalAiVideoUnderstandingRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/video-understanding/requests/{request_id}' +} + +export type GetFalAiVideoUnderstandingRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaVideoUnderstandingOutput +} + +export type GetFalAiVideoUnderstandingRequestsByRequestIdResponse = + GetFalAiVideoUnderstandingRequestsByRequestIdResponses[keyof GetFalAiVideoUnderstandingRequestsByRequestIdResponses] + +export type GetFalAiMoondream2VisualQueryRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/moondream2/visual-query/requests/{request_id}/status' +} + +export type GetFalAiMoondream2VisualQueryRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMoondream2VisualQueryRequestsByRequestIdStatusResponse = + GetFalAiMoondream2VisualQueryRequestsByRequestIdStatusResponses[keyof GetFalAiMoondream2VisualQueryRequestsByRequestIdStatusResponses] + +export type PutFalAiMoondream2VisualQueryRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream2/visual-query/requests/{request_id}/cancel' +} + +export type PutFalAiMoondream2VisualQueryRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMoondream2VisualQueryRequestsByRequestIdCancelResponse = + PutFalAiMoondream2VisualQueryRequestsByRequestIdCancelResponses[keyof PutFalAiMoondream2VisualQueryRequestsByRequestIdCancelResponses] + +export type PostFalAiMoondream2VisualQueryData = { + body: SchemaMoondream2VisualQueryInput + path?: never + query?: never + url: '/fal-ai/moondream2/visual-query' +} + +export type PostFalAiMoondream2VisualQueryResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMoondream2VisualQueryResponse = + PostFalAiMoondream2VisualQueryResponses[keyof PostFalAiMoondream2VisualQueryResponses] + +export type GetFalAiMoondream2VisualQueryRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream2/visual-query/requests/{request_id}' +} + +export type GetFalAiMoondream2VisualQueryRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMoondream2VisualQueryOutput +} + +export type GetFalAiMoondream2VisualQueryRequestsByRequestIdResponse = + GetFalAiMoondream2VisualQueryRequestsByRequestIdResponses[keyof GetFalAiMoondream2VisualQueryRequestsByRequestIdResponses] + +export type GetFalAiMoondream2RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/moondream2/requests/{request_id}/status' +} + +export type GetFalAiMoondream2RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMoondream2RequestsByRequestIdStatusResponse = + GetFalAiMoondream2RequestsByRequestIdStatusResponses[keyof GetFalAiMoondream2RequestsByRequestIdStatusResponses] + +export type PutFalAiMoondream2RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream2/requests/{request_id}/cancel' +} + +export type PutFalAiMoondream2RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMoondream2RequestsByRequestIdCancelResponse = + PutFalAiMoondream2RequestsByRequestIdCancelResponses[keyof PutFalAiMoondream2RequestsByRequestIdCancelResponses] + +export type PostFalAiMoondream2Data = { + body: SchemaMoondream2Input + path?: never + query?: never + url: '/fal-ai/moondream2' +} + +export type PostFalAiMoondream2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMoondream2Response = + PostFalAiMoondream2Responses[keyof PostFalAiMoondream2Responses] + +export type GetFalAiMoondream2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream2/requests/{request_id}' +} + +export type GetFalAiMoondream2RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMoondream2Output +} + +export type GetFalAiMoondream2RequestsByRequestIdResponse = + GetFalAiMoondream2RequestsByRequestIdResponses[keyof GetFalAiMoondream2RequestsByRequestIdResponses] + +export type GetFalAiMoondream2PointObjectDetectionRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/moondream2/point-object-detection/requests/{request_id}/status' + } + +export type GetFalAiMoondream2PointObjectDetectionRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMoondream2PointObjectDetectionRequestsByRequestIdStatusResponse = + GetFalAiMoondream2PointObjectDetectionRequestsByRequestIdStatusResponses[keyof GetFalAiMoondream2PointObjectDetectionRequestsByRequestIdStatusResponses] + +export type PutFalAiMoondream2PointObjectDetectionRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream2/point-object-detection/requests/{request_id}/cancel' + } + +export type PutFalAiMoondream2PointObjectDetectionRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMoondream2PointObjectDetectionRequestsByRequestIdCancelResponse = + PutFalAiMoondream2PointObjectDetectionRequestsByRequestIdCancelResponses[keyof PutFalAiMoondream2PointObjectDetectionRequestsByRequestIdCancelResponses] + +export type PostFalAiMoondream2PointObjectDetectionData = { + body: SchemaMoondream2PointObjectDetectionInput + path?: never + query?: never + url: '/fal-ai/moondream2/point-object-detection' +} + +export type PostFalAiMoondream2PointObjectDetectionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMoondream2PointObjectDetectionResponse = + PostFalAiMoondream2PointObjectDetectionResponses[keyof PostFalAiMoondream2PointObjectDetectionResponses] + +export type GetFalAiMoondream2PointObjectDetectionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream2/point-object-detection/requests/{request_id}' +} + +export type GetFalAiMoondream2PointObjectDetectionRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaMoondream2PointObjectDetectionOutput + } + +export type GetFalAiMoondream2PointObjectDetectionRequestsByRequestIdResponse = + GetFalAiMoondream2PointObjectDetectionRequestsByRequestIdResponses[keyof GetFalAiMoondream2PointObjectDetectionRequestsByRequestIdResponses] + +export type GetFalAiMoondream2ObjectDetectionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/moondream2/object-detection/requests/{request_id}/status' +} + +export type GetFalAiMoondream2ObjectDetectionRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiMoondream2ObjectDetectionRequestsByRequestIdStatusResponse = + GetFalAiMoondream2ObjectDetectionRequestsByRequestIdStatusResponses[keyof GetFalAiMoondream2ObjectDetectionRequestsByRequestIdStatusResponses] + +export type PutFalAiMoondream2ObjectDetectionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream2/object-detection/requests/{request_id}/cancel' +} + +export type PutFalAiMoondream2ObjectDetectionRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiMoondream2ObjectDetectionRequestsByRequestIdCancelResponse = + PutFalAiMoondream2ObjectDetectionRequestsByRequestIdCancelResponses[keyof PutFalAiMoondream2ObjectDetectionRequestsByRequestIdCancelResponses] + +export type PostFalAiMoondream2ObjectDetectionData = { + body: SchemaMoondream2ObjectDetectionInput + path?: never + query?: never + url: '/fal-ai/moondream2/object-detection' +} + +export type PostFalAiMoondream2ObjectDetectionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMoondream2ObjectDetectionResponse = + PostFalAiMoondream2ObjectDetectionResponses[keyof PostFalAiMoondream2ObjectDetectionResponses] + +export type GetFalAiMoondream2ObjectDetectionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream2/object-detection/requests/{request_id}' +} + +export type GetFalAiMoondream2ObjectDetectionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMoondream2ObjectDetectionOutput +} + +export type GetFalAiMoondream2ObjectDetectionRequestsByRequestIdResponse = + GetFalAiMoondream2ObjectDetectionRequestsByRequestIdResponses[keyof GetFalAiMoondream2ObjectDetectionRequestsByRequestIdResponses] + +export type GetFalAiGotOcrV2RequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/got-ocr/v2/requests/{request_id}/status' +} + +export type GetFalAiGotOcrV2RequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiGotOcrV2RequestsByRequestIdStatusResponse = + GetFalAiGotOcrV2RequestsByRequestIdStatusResponses[keyof GetFalAiGotOcrV2RequestsByRequestIdStatusResponses] + +export type PutFalAiGotOcrV2RequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/got-ocr/v2/requests/{request_id}/cancel' +} + +export type PutFalAiGotOcrV2RequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiGotOcrV2RequestsByRequestIdCancelResponse = + PutFalAiGotOcrV2RequestsByRequestIdCancelResponses[keyof PutFalAiGotOcrV2RequestsByRequestIdCancelResponses] + +export type PostFalAiGotOcrV2Data = { + body: SchemaGotOcrV2Input + path?: never + query?: never + url: '/fal-ai/got-ocr/v2' +} + +export type PostFalAiGotOcrV2Responses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiGotOcrV2Response = + PostFalAiGotOcrV2Responses[keyof PostFalAiGotOcrV2Responses] + +export type GetFalAiGotOcrV2RequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/got-ocr/v2/requests/{request_id}' +} + +export type GetFalAiGotOcrV2RequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaGotOcrV2Output +} + +export type GetFalAiGotOcrV2RequestsByRequestIdResponse = + GetFalAiGotOcrV2RequestsByRequestIdResponses[keyof GetFalAiGotOcrV2RequestsByRequestIdResponses] + +export type GetFalAiMoondreamNextBatchRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/moondream-next/batch/requests/{request_id}/status' +} + +export type GetFalAiMoondreamNextBatchRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMoondreamNextBatchRequestsByRequestIdStatusResponse = + GetFalAiMoondreamNextBatchRequestsByRequestIdStatusResponses[keyof GetFalAiMoondreamNextBatchRequestsByRequestIdStatusResponses] + +export type PutFalAiMoondreamNextBatchRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream-next/batch/requests/{request_id}/cancel' +} + +export type PutFalAiMoondreamNextBatchRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMoondreamNextBatchRequestsByRequestIdCancelResponse = + PutFalAiMoondreamNextBatchRequestsByRequestIdCancelResponses[keyof PutFalAiMoondreamNextBatchRequestsByRequestIdCancelResponses] + +export type PostFalAiMoondreamNextBatchData = { + body: SchemaMoondreamNextBatchInput + path?: never + query?: never + url: '/fal-ai/moondream-next/batch' +} + +export type PostFalAiMoondreamNextBatchResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMoondreamNextBatchResponse = + PostFalAiMoondreamNextBatchResponses[keyof PostFalAiMoondreamNextBatchResponses] + +export type GetFalAiMoondreamNextBatchRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream-next/batch/requests/{request_id}' +} + +export type GetFalAiMoondreamNextBatchRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMoondreamNextBatchOutput +} + +export type GetFalAiMoondreamNextBatchRequestsByRequestIdResponse = + GetFalAiMoondreamNextBatchRequestsByRequestIdResponses[keyof GetFalAiMoondreamNextBatchRequestsByRequestIdResponses] + +export type GetFalAiSa2Va4bVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sa2va/4b/video/requests/{request_id}/status' +} + +export type GetFalAiSa2Va4bVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSa2Va4bVideoRequestsByRequestIdStatusResponse = + GetFalAiSa2Va4bVideoRequestsByRequestIdStatusResponses[keyof GetFalAiSa2Va4bVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiSa2Va4bVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sa2va/4b/video/requests/{request_id}/cancel' +} + +export type PutFalAiSa2Va4bVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSa2Va4bVideoRequestsByRequestIdCancelResponse = + PutFalAiSa2Va4bVideoRequestsByRequestIdCancelResponses[keyof PutFalAiSa2Va4bVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiSa2Va4bVideoData = { + body: SchemaSa2Va4bVideoInput + path?: never + query?: never + url: '/fal-ai/sa2va/4b/video' +} + +export type PostFalAiSa2Va4bVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSa2Va4bVideoResponse = + PostFalAiSa2Va4bVideoResponses[keyof PostFalAiSa2Va4bVideoResponses] + +export type GetFalAiSa2Va4bVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sa2va/4b/video/requests/{request_id}' +} + +export type GetFalAiSa2Va4bVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSa2Va4bVideoOutput +} + +export type GetFalAiSa2Va4bVideoRequestsByRequestIdResponse = + GetFalAiSa2Va4bVideoRequestsByRequestIdResponses[keyof GetFalAiSa2Va4bVideoRequestsByRequestIdResponses] + +export type GetFalAiSa2Va8bVideoRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sa2va/8b/video/requests/{request_id}/status' +} + +export type GetFalAiSa2Va8bVideoRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSa2Va8bVideoRequestsByRequestIdStatusResponse = + GetFalAiSa2Va8bVideoRequestsByRequestIdStatusResponses[keyof GetFalAiSa2Va8bVideoRequestsByRequestIdStatusResponses] + +export type PutFalAiSa2Va8bVideoRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sa2va/8b/video/requests/{request_id}/cancel' +} + +export type PutFalAiSa2Va8bVideoRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSa2Va8bVideoRequestsByRequestIdCancelResponse = + PutFalAiSa2Va8bVideoRequestsByRequestIdCancelResponses[keyof PutFalAiSa2Va8bVideoRequestsByRequestIdCancelResponses] + +export type PostFalAiSa2Va8bVideoData = { + body: SchemaSa2Va8bVideoInput + path?: never + query?: never + url: '/fal-ai/sa2va/8b/video' +} + +export type PostFalAiSa2Va8bVideoResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSa2Va8bVideoResponse = + PostFalAiSa2Va8bVideoResponses[keyof PostFalAiSa2Va8bVideoResponses] + +export type GetFalAiSa2Va8bVideoRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sa2va/8b/video/requests/{request_id}' +} + +export type GetFalAiSa2Va8bVideoRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSa2Va8bVideoOutput +} + +export type GetFalAiSa2Va8bVideoRequestsByRequestIdResponse = + GetFalAiSa2Va8bVideoRequestsByRequestIdResponses[keyof GetFalAiSa2Va8bVideoRequestsByRequestIdResponses] + +export type GetFalAiSa2Va4bImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sa2va/4b/image/requests/{request_id}/status' +} + +export type GetFalAiSa2Va4bImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSa2Va4bImageRequestsByRequestIdStatusResponse = + GetFalAiSa2Va4bImageRequestsByRequestIdStatusResponses[keyof GetFalAiSa2Va4bImageRequestsByRequestIdStatusResponses] + +export type PutFalAiSa2Va4bImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sa2va/4b/image/requests/{request_id}/cancel' +} + +export type PutFalAiSa2Va4bImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSa2Va4bImageRequestsByRequestIdCancelResponse = + PutFalAiSa2Va4bImageRequestsByRequestIdCancelResponses[keyof PutFalAiSa2Va4bImageRequestsByRequestIdCancelResponses] + +export type PostFalAiSa2Va4bImageData = { + body: SchemaSa2Va4bImageInput + path?: never + query?: never + url: '/fal-ai/sa2va/4b/image' +} + +export type PostFalAiSa2Va4bImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSa2Va4bImageResponse = + PostFalAiSa2Va4bImageResponses[keyof PostFalAiSa2Va4bImageResponses] + +export type GetFalAiSa2Va4bImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sa2va/4b/image/requests/{request_id}' +} + +export type GetFalAiSa2Va4bImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSa2Va4bImageOutput +} + +export type GetFalAiSa2Va4bImageRequestsByRequestIdResponse = + GetFalAiSa2Va4bImageRequestsByRequestIdResponses[keyof GetFalAiSa2Va4bImageRequestsByRequestIdResponses] + +export type GetFalAiSa2Va8bImageRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/sa2va/8b/image/requests/{request_id}/status' +} + +export type GetFalAiSa2Va8bImageRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiSa2Va8bImageRequestsByRequestIdStatusResponse = + GetFalAiSa2Va8bImageRequestsByRequestIdStatusResponses[keyof GetFalAiSa2Va8bImageRequestsByRequestIdStatusResponses] + +export type PutFalAiSa2Va8bImageRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sa2va/8b/image/requests/{request_id}/cancel' +} + +export type PutFalAiSa2Va8bImageRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiSa2Va8bImageRequestsByRequestIdCancelResponse = + PutFalAiSa2Va8bImageRequestsByRequestIdCancelResponses[keyof PutFalAiSa2Va8bImageRequestsByRequestIdCancelResponses] + +export type PostFalAiSa2Va8bImageData = { + body: SchemaSa2Va8bImageInput + path?: never + query?: never + url: '/fal-ai/sa2va/8b/image' +} + +export type PostFalAiSa2Va8bImageResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiSa2Va8bImageResponse = + PostFalAiSa2Va8bImageResponses[keyof PostFalAiSa2Va8bImageResponses] + +export type GetFalAiSa2Va8bImageRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/sa2va/8b/image/requests/{request_id}' +} + +export type GetFalAiSa2Va8bImageRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaSa2Va8bImageOutput +} + +export type GetFalAiSa2Va8bImageRequestsByRequestIdResponse = + GetFalAiSa2Va8bImageRequestsByRequestIdResponses[keyof GetFalAiSa2Va8bImageRequestsByRequestIdResponses] + +export type GetFalAiMoondreamNextRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/moondream-next/requests/{request_id}/status' +} + +export type GetFalAiMoondreamNextRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMoondreamNextRequestsByRequestIdStatusResponse = + GetFalAiMoondreamNextRequestsByRequestIdStatusResponses[keyof GetFalAiMoondreamNextRequestsByRequestIdStatusResponses] + +export type PutFalAiMoondreamNextRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream-next/requests/{request_id}/cancel' +} + +export type PutFalAiMoondreamNextRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMoondreamNextRequestsByRequestIdCancelResponse = + PutFalAiMoondreamNextRequestsByRequestIdCancelResponses[keyof PutFalAiMoondreamNextRequestsByRequestIdCancelResponses] + +export type PostFalAiMoondreamNextData = { + body: SchemaMoondreamNextInput + path?: never + query?: never + url: '/fal-ai/moondream-next' +} + +export type PostFalAiMoondreamNextResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMoondreamNextResponse = + PostFalAiMoondreamNextResponses[keyof PostFalAiMoondreamNextResponses] + +export type GetFalAiMoondreamNextRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream-next/requests/{request_id}' +} + +export type GetFalAiMoondreamNextRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMoondreamNextOutput +} + +export type GetFalAiMoondreamNextRequestsByRequestIdResponse = + GetFalAiMoondreamNextRequestsByRequestIdResponses[keyof GetFalAiMoondreamNextRequestsByRequestIdResponses] + +export type GetFalAiFlorence2LargeRegionToDescriptionRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/florence-2-large/region-to-description/requests/{request_id}/status' + } + +export type GetFalAiFlorence2LargeRegionToDescriptionRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlorence2LargeRegionToDescriptionRequestsByRequestIdStatusResponse = + GetFalAiFlorence2LargeRegionToDescriptionRequestsByRequestIdStatusResponses[keyof GetFalAiFlorence2LargeRegionToDescriptionRequestsByRequestIdStatusResponses] + +export type PutFalAiFlorence2LargeRegionToDescriptionRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/region-to-description/requests/{request_id}/cancel' + } + +export type PutFalAiFlorence2LargeRegionToDescriptionRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlorence2LargeRegionToDescriptionRequestsByRequestIdCancelResponse = + PutFalAiFlorence2LargeRegionToDescriptionRequestsByRequestIdCancelResponses[keyof PutFalAiFlorence2LargeRegionToDescriptionRequestsByRequestIdCancelResponses] + +export type PostFalAiFlorence2LargeRegionToDescriptionData = { + body: SchemaFlorence2LargeRegionToDescriptionInput + path?: never + query?: never + url: '/fal-ai/florence-2-large/region-to-description' +} + +export type PostFalAiFlorence2LargeRegionToDescriptionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlorence2LargeRegionToDescriptionResponse = + PostFalAiFlorence2LargeRegionToDescriptionResponses[keyof PostFalAiFlorence2LargeRegionToDescriptionResponses] + +export type GetFalAiFlorence2LargeRegionToDescriptionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/region-to-description/requests/{request_id}' +} + +export type GetFalAiFlorence2LargeRegionToDescriptionRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFlorence2LargeRegionToDescriptionOutput + } + +export type GetFalAiFlorence2LargeRegionToDescriptionRequestsByRequestIdResponse = + GetFalAiFlorence2LargeRegionToDescriptionRequestsByRequestIdResponses[keyof GetFalAiFlorence2LargeRegionToDescriptionRequestsByRequestIdResponses] + +export type GetFalAiFlorence2LargeOcrRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/florence-2-large/ocr/requests/{request_id}/status' +} + +export type GetFalAiFlorence2LargeOcrRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlorence2LargeOcrRequestsByRequestIdStatusResponse = + GetFalAiFlorence2LargeOcrRequestsByRequestIdStatusResponses[keyof GetFalAiFlorence2LargeOcrRequestsByRequestIdStatusResponses] + +export type PutFalAiFlorence2LargeOcrRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/ocr/requests/{request_id}/cancel' +} + +export type PutFalAiFlorence2LargeOcrRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlorence2LargeOcrRequestsByRequestIdCancelResponse = + PutFalAiFlorence2LargeOcrRequestsByRequestIdCancelResponses[keyof PutFalAiFlorence2LargeOcrRequestsByRequestIdCancelResponses] + +export type PostFalAiFlorence2LargeOcrData = { + body: SchemaFlorence2LargeOcrInput + path?: never + query?: never + url: '/fal-ai/florence-2-large/ocr' +} + +export type PostFalAiFlorence2LargeOcrResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlorence2LargeOcrResponse = + PostFalAiFlorence2LargeOcrResponses[keyof PostFalAiFlorence2LargeOcrResponses] + +export type GetFalAiFlorence2LargeOcrRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/ocr/requests/{request_id}' +} + +export type GetFalAiFlorence2LargeOcrRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlorence2LargeOcrOutput +} + +export type GetFalAiFlorence2LargeOcrRequestsByRequestIdResponse = + GetFalAiFlorence2LargeOcrRequestsByRequestIdResponses[keyof GetFalAiFlorence2LargeOcrRequestsByRequestIdResponses] + +export type GetFalAiFlorence2LargeMoreDetailedCaptionRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/florence-2-large/more-detailed-caption/requests/{request_id}/status' + } + +export type GetFalAiFlorence2LargeMoreDetailedCaptionRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlorence2LargeMoreDetailedCaptionRequestsByRequestIdStatusResponse = + GetFalAiFlorence2LargeMoreDetailedCaptionRequestsByRequestIdStatusResponses[keyof GetFalAiFlorence2LargeMoreDetailedCaptionRequestsByRequestIdStatusResponses] + +export type PutFalAiFlorence2LargeMoreDetailedCaptionRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/more-detailed-caption/requests/{request_id}/cancel' + } + +export type PutFalAiFlorence2LargeMoreDetailedCaptionRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlorence2LargeMoreDetailedCaptionRequestsByRequestIdCancelResponse = + PutFalAiFlorence2LargeMoreDetailedCaptionRequestsByRequestIdCancelResponses[keyof PutFalAiFlorence2LargeMoreDetailedCaptionRequestsByRequestIdCancelResponses] + +export type PostFalAiFlorence2LargeMoreDetailedCaptionData = { + body: SchemaFlorence2LargeMoreDetailedCaptionInput + path?: never + query?: never + url: '/fal-ai/florence-2-large/more-detailed-caption' +} + +export type PostFalAiFlorence2LargeMoreDetailedCaptionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlorence2LargeMoreDetailedCaptionResponse = + PostFalAiFlorence2LargeMoreDetailedCaptionResponses[keyof PostFalAiFlorence2LargeMoreDetailedCaptionResponses] + +export type GetFalAiFlorence2LargeMoreDetailedCaptionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/more-detailed-caption/requests/{request_id}' +} + +export type GetFalAiFlorence2LargeMoreDetailedCaptionRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFlorence2LargeMoreDetailedCaptionOutput + } + +export type GetFalAiFlorence2LargeMoreDetailedCaptionRequestsByRequestIdResponse = + GetFalAiFlorence2LargeMoreDetailedCaptionRequestsByRequestIdResponses[keyof GetFalAiFlorence2LargeMoreDetailedCaptionRequestsByRequestIdResponses] + +export type GetFalAiFlorence2LargeRegionToCategoryRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/florence-2-large/region-to-category/requests/{request_id}/status' + } + +export type GetFalAiFlorence2LargeRegionToCategoryRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlorence2LargeRegionToCategoryRequestsByRequestIdStatusResponse = + GetFalAiFlorence2LargeRegionToCategoryRequestsByRequestIdStatusResponses[keyof GetFalAiFlorence2LargeRegionToCategoryRequestsByRequestIdStatusResponses] + +export type PutFalAiFlorence2LargeRegionToCategoryRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/region-to-category/requests/{request_id}/cancel' + } + +export type PutFalAiFlorence2LargeRegionToCategoryRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlorence2LargeRegionToCategoryRequestsByRequestIdCancelResponse = + PutFalAiFlorence2LargeRegionToCategoryRequestsByRequestIdCancelResponses[keyof PutFalAiFlorence2LargeRegionToCategoryRequestsByRequestIdCancelResponses] + +export type PostFalAiFlorence2LargeRegionToCategoryData = { + body: SchemaFlorence2LargeRegionToCategoryInput + path?: never + query?: never + url: '/fal-ai/florence-2-large/region-to-category' +} + +export type PostFalAiFlorence2LargeRegionToCategoryResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlorence2LargeRegionToCategoryResponse = + PostFalAiFlorence2LargeRegionToCategoryResponses[keyof PostFalAiFlorence2LargeRegionToCategoryResponses] + +export type GetFalAiFlorence2LargeRegionToCategoryRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/region-to-category/requests/{request_id}' +} + +export type GetFalAiFlorence2LargeRegionToCategoryRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFlorence2LargeRegionToCategoryOutput + } + +export type GetFalAiFlorence2LargeRegionToCategoryRequestsByRequestIdResponse = + GetFalAiFlorence2LargeRegionToCategoryRequestsByRequestIdResponses[keyof GetFalAiFlorence2LargeRegionToCategoryRequestsByRequestIdResponses] + +export type GetFalAiFlorence2LargeCaptionRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/florence-2-large/caption/requests/{request_id}/status' +} + +export type GetFalAiFlorence2LargeCaptionRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiFlorence2LargeCaptionRequestsByRequestIdStatusResponse = + GetFalAiFlorence2LargeCaptionRequestsByRequestIdStatusResponses[keyof GetFalAiFlorence2LargeCaptionRequestsByRequestIdStatusResponses] + +export type PutFalAiFlorence2LargeCaptionRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/caption/requests/{request_id}/cancel' +} + +export type PutFalAiFlorence2LargeCaptionRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiFlorence2LargeCaptionRequestsByRequestIdCancelResponse = + PutFalAiFlorence2LargeCaptionRequestsByRequestIdCancelResponses[keyof PutFalAiFlorence2LargeCaptionRequestsByRequestIdCancelResponses] + +export type PostFalAiFlorence2LargeCaptionData = { + body: SchemaFlorence2LargeCaptionInput + path?: never + query?: never + url: '/fal-ai/florence-2-large/caption' +} + +export type PostFalAiFlorence2LargeCaptionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlorence2LargeCaptionResponse = + PostFalAiFlorence2LargeCaptionResponses[keyof PostFalAiFlorence2LargeCaptionResponses] + +export type GetFalAiFlorence2LargeCaptionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/caption/requests/{request_id}' +} + +export type GetFalAiFlorence2LargeCaptionRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaFlorence2LargeCaptionOutput +} + +export type GetFalAiFlorence2LargeCaptionRequestsByRequestIdResponse = + GetFalAiFlorence2LargeCaptionRequestsByRequestIdResponses[keyof GetFalAiFlorence2LargeCaptionRequestsByRequestIdResponses] + +export type GetFalAiFlorence2LargeDetailedCaptionRequestsByRequestIdStatusData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/florence-2-large/detailed-caption/requests/{request_id}/status' + } + +export type GetFalAiFlorence2LargeDetailedCaptionRequestsByRequestIdStatusResponses = + { + /** + * The request status. + */ + 200: SchemaQueueStatus + } + +export type GetFalAiFlorence2LargeDetailedCaptionRequestsByRequestIdStatusResponse = + GetFalAiFlorence2LargeDetailedCaptionRequestsByRequestIdStatusResponses[keyof GetFalAiFlorence2LargeDetailedCaptionRequestsByRequestIdStatusResponses] + +export type PutFalAiFlorence2LargeDetailedCaptionRequestsByRequestIdCancelData = + { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/detailed-caption/requests/{request_id}/cancel' + } + +export type PutFalAiFlorence2LargeDetailedCaptionRequestsByRequestIdCancelResponses = + { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } + } + +export type PutFalAiFlorence2LargeDetailedCaptionRequestsByRequestIdCancelResponse = + PutFalAiFlorence2LargeDetailedCaptionRequestsByRequestIdCancelResponses[keyof PutFalAiFlorence2LargeDetailedCaptionRequestsByRequestIdCancelResponses] + +export type PostFalAiFlorence2LargeDetailedCaptionData = { + body: SchemaFlorence2LargeDetailedCaptionInput + path?: never + query?: never + url: '/fal-ai/florence-2-large/detailed-caption' +} + +export type PostFalAiFlorence2LargeDetailedCaptionResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiFlorence2LargeDetailedCaptionResponse = + PostFalAiFlorence2LargeDetailedCaptionResponses[keyof PostFalAiFlorence2LargeDetailedCaptionResponses] + +export type GetFalAiFlorence2LargeDetailedCaptionRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/florence-2-large/detailed-caption/requests/{request_id}' +} + +export type GetFalAiFlorence2LargeDetailedCaptionRequestsByRequestIdResponses = + { + /** + * Result of the request. + */ + 200: SchemaFlorence2LargeDetailedCaptionOutput + } + +export type GetFalAiFlorence2LargeDetailedCaptionRequestsByRequestIdResponse = + GetFalAiFlorence2LargeDetailedCaptionRequestsByRequestIdResponses[keyof GetFalAiFlorence2LargeDetailedCaptionRequestsByRequestIdResponses] + +export type GetFalAiImageutilsNsfwRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/imageutils/nsfw/requests/{request_id}/status' +} + +export type GetFalAiImageutilsNsfwRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiImageutilsNsfwRequestsByRequestIdStatusResponse = + GetFalAiImageutilsNsfwRequestsByRequestIdStatusResponses[keyof GetFalAiImageutilsNsfwRequestsByRequestIdStatusResponses] + +export type PutFalAiImageutilsNsfwRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/imageutils/nsfw/requests/{request_id}/cancel' +} + +export type PutFalAiImageutilsNsfwRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiImageutilsNsfwRequestsByRequestIdCancelResponse = + PutFalAiImageutilsNsfwRequestsByRequestIdCancelResponses[keyof PutFalAiImageutilsNsfwRequestsByRequestIdCancelResponses] + +export type PostFalAiImageutilsNsfwData = { + body: SchemaImageutilsNsfwInput + path?: never + query?: never + url: '/fal-ai/imageutils/nsfw' +} + +export type PostFalAiImageutilsNsfwResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiImageutilsNsfwResponse = + PostFalAiImageutilsNsfwResponses[keyof PostFalAiImageutilsNsfwResponses] + +export type GetFalAiImageutilsNsfwRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/imageutils/nsfw/requests/{request_id}' +} + +export type GetFalAiImageutilsNsfwRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaImageutilsNsfwOutput +} + +export type GetFalAiImageutilsNsfwRequestsByRequestIdResponse = + GetFalAiImageutilsNsfwRequestsByRequestIdResponses[keyof GetFalAiImageutilsNsfwRequestsByRequestIdResponses] + +export type GetFalAiMoondreamBatchedRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/moondream/batched/requests/{request_id}/status' +} + +export type GetFalAiMoondreamBatchedRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiMoondreamBatchedRequestsByRequestIdStatusResponse = + GetFalAiMoondreamBatchedRequestsByRequestIdStatusResponses[keyof GetFalAiMoondreamBatchedRequestsByRequestIdStatusResponses] + +export type PutFalAiMoondreamBatchedRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream/batched/requests/{request_id}/cancel' +} + +export type PutFalAiMoondreamBatchedRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiMoondreamBatchedRequestsByRequestIdCancelResponse = + PutFalAiMoondreamBatchedRequestsByRequestIdCancelResponses[keyof PutFalAiMoondreamBatchedRequestsByRequestIdCancelResponses] + +export type PostFalAiMoondreamBatchedData = { + body: SchemaMoondreamBatchedInput + path?: never + query?: never + url: '/fal-ai/moondream/batched' +} + +export type PostFalAiMoondreamBatchedResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiMoondreamBatchedResponse = + PostFalAiMoondreamBatchedResponses[keyof PostFalAiMoondreamBatchedResponses] + +export type GetFalAiMoondreamBatchedRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/moondream/batched/requests/{request_id}' +} + +export type GetFalAiMoondreamBatchedRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaMoondreamBatchedOutput +} + +export type GetFalAiMoondreamBatchedRequestsByRequestIdResponse = + GetFalAiMoondreamBatchedRequestsByRequestIdResponses[keyof GetFalAiMoondreamBatchedRequestsByRequestIdResponses] + +export type GetFalAiLlavaNextRequestsByRequestIdStatusData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: { + /** + * Whether to include logs (`1`) in the response or not (`0`). + */ + logs?: number + } + url: '/fal-ai/llava-next/requests/{request_id}/status' +} + +export type GetFalAiLlavaNextRequestsByRequestIdStatusResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type GetFalAiLlavaNextRequestsByRequestIdStatusResponse = + GetFalAiLlavaNextRequestsByRequestIdStatusResponses[keyof GetFalAiLlavaNextRequestsByRequestIdStatusResponses] + +export type PutFalAiLlavaNextRequestsByRequestIdCancelData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/llava-next/requests/{request_id}/cancel' +} + +export type PutFalAiLlavaNextRequestsByRequestIdCancelResponses = { + /** + * The request was cancelled. + */ + 200: { + /** + * Whether the request was cancelled successfully. + */ + success?: boolean + } +} + +export type PutFalAiLlavaNextRequestsByRequestIdCancelResponse = + PutFalAiLlavaNextRequestsByRequestIdCancelResponses[keyof PutFalAiLlavaNextRequestsByRequestIdCancelResponses] + +export type PostFalAiLlavaNextData = { + body: SchemaLlavaNextInput + path?: never + query?: never + url: '/fal-ai/llava-next' +} + +export type PostFalAiLlavaNextResponses = { + /** + * The request status. + */ + 200: SchemaQueueStatus +} + +export type PostFalAiLlavaNextResponse = + PostFalAiLlavaNextResponses[keyof PostFalAiLlavaNextResponses] + +export type GetFalAiLlavaNextRequestsByRequestIdData = { + body?: never + path: { + /** + * Request ID + */ + request_id: string + } + query?: never + url: '/fal-ai/llava-next/requests/{request_id}' +} + +export type GetFalAiLlavaNextRequestsByRequestIdResponses = { + /** + * Result of the request. + */ + 200: SchemaLlavaNextOutput +} + +export type GetFalAiLlavaNextRequestsByRequestIdResponse = + GetFalAiLlavaNextRequestsByRequestIdResponses[keyof GetFalAiLlavaNextRequestsByRequestIdResponses] diff --git a/packages/typescript/ai-fal/src/generated/vision/zod.gen.ts b/packages/typescript/ai-fal/src/generated/vision/zod.gen.ts new file mode 100644 index 00000000..9e0f3013 --- /dev/null +++ b/packages/typescript/ai-fal/src/generated/vision/zod.gen.ts @@ -0,0 +1,3952 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { z } from 'zod' + +/** + * LLavaOutput + */ +export const zSchemaLlavaNextOutput = z.object({ + partial: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the output is partial', + }), + ) + .default(false), + output: z.string().register(z.globalRegistry, { + description: 'Generated output', + }), +}) + +/** + * LLavaInput + */ +export const zSchemaLlavaNextInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Prompt to be used for the image', + }), + top_p: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Top P for sampling', + }), + ) + .default(1), + max_tokens: z + .optional( + z.int().register(z.globalRegistry, { + description: 'Maximum number of tokens to generate', + }), + ) + .default(64), + temperature: z + .optional( + z.number().lte(1).register(z.globalRegistry, { + description: 'Temperature for sampling', + }), + ) + .default(0.2), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be processed', + }), +}) + +/** + * MoondreamInputParam + */ +export const zSchemaMoondreamInputParam = z.object({ + prompt: z + .optional( + z.string().register(z.globalRegistry, { + description: 'Prompt to be used for the image', + }), + ) + .default('Describe this image.'), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be processed', + }), +}) + +/** + * BatchedMoondreamOutput + */ +export const zSchemaMoondreamBatchedOutput = z.object({ + filenames: z.optional(z.union([z.array(z.string()), z.null()])), + outputs: z.array(z.string()).register(z.globalRegistry, { + description: 'List of generated outputs', + }), + partial: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the output is partial', + }), + ) + .default(false), + timings: z.record(z.string(), z.number()).register(z.globalRegistry, { + description: 'Timings for different parts of the process', + }), +}) + +/** + * BatchedMoondreamInput + */ +export const zSchemaMoondreamBatchedInput = z.object({ + model_id: z.optional( + z + .enum(['vikhyatk/moondream2', 'fal-ai/moondream2-docci']) + .register(z.globalRegistry, { + description: 'Model ID to use for inference', + }), + ), + repetition_penalty: z + .optional( + z.number().gte(1).lte(2).register(z.globalRegistry, { + description: 'Repetition penalty for sampling', + }), + ) + .default(1), + inputs: z.array(zSchemaMoondreamInputParam).register(z.globalRegistry, { + description: 'List of input prompts and image URLs', + }), + max_tokens: z + .optional( + z.int().gte(32).lte(1024).register(z.globalRegistry, { + description: 'Maximum number of new tokens to generate', + }), + ) + .default(64), + temperature: z + .optional( + z.number().lte(1).register(z.globalRegistry, { + description: 'Temperature for sampling', + }), + ) + .default(0.2), + top_p: z + .optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Top P for sampling', + }), + ) + .default(1), +}) + +/** + * NSFWImageDetectionOutput + */ +export const zSchemaImageutilsNsfwOutput = z.object({ + nsfw_probability: z.number().register(z.globalRegistry, { + description: 'The probability of the image being NSFW.', + }), +}) + +/** + * NSFWImageDetectionInput + */ +export const zSchemaImageutilsNsfwInput = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: 'Input image url.', + }), +}) + +/** + * TextOutput + */ +export const zSchemaFlorence2LargeDetailedCaptionOutput = z.object({ + results: z.string().register(z.globalRegistry, { + description: 'Results from the model', + }), +}) + +/** + * ImageInput + */ +export const zSchemaFlorence2LargeDetailedCaptionInput = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to be processed.', + }), +}) + +/** + * TextOutput + */ +export const zSchemaFlorence2LargeCaptionOutput = z.object({ + results: z.string().register(z.globalRegistry, { + description: 'Results from the model', + }), +}) + +/** + * ImageInput + */ +export const zSchemaFlorence2LargeCaptionInput = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to be processed.', + }), +}) + +/** + * TextOutput + */ +export const zSchemaFlorence2LargeRegionToCategoryOutput = z.object({ + results: z.string().register(z.globalRegistry, { + description: 'Results from the model', + }), +}) + +/** + * Region + */ +export const zSchemaRegion = z.object({ + y1: z.int().gte(0).lte(999).register(z.globalRegistry, { + description: 'Y-coordinate of the top-left corner', + }), + x2: z.int().gte(0).lte(999).register(z.globalRegistry, { + description: 'X-coordinate of the bottom-right corner', + }), + x1: z.int().gte(0).lte(999).register(z.globalRegistry, { + description: 'X-coordinate of the top-left corner', + }), + y2: z.int().gte(0).lte(999).register(z.globalRegistry, { + description: 'Y-coordinate of the bottom-right corner', + }), +}) + +/** + * ImageWithUserCoordinatesInput + */ +export const zSchemaFlorence2LargeRegionToCategoryInput = z.object({ + region: zSchemaRegion, + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to be processed.', + }), +}) + +/** + * TextOutput + */ +export const zSchemaFlorence2LargeMoreDetailedCaptionOutput = z.object({ + results: z.string().register(z.globalRegistry, { + description: 'Results from the model', + }), +}) + +/** + * ImageInput + */ +export const zSchemaFlorence2LargeMoreDetailedCaptionInput = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to be processed.', + }), +}) + +/** + * TextOutput + */ +export const zSchemaFlorence2LargeOcrOutput = z.object({ + results: z.string().register(z.globalRegistry, { + description: 'Results from the model', + }), +}) + +/** + * ImageInput + */ +export const zSchemaFlorence2LargeOcrInput = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to be processed.', + }), +}) + +/** + * TextOutput + */ +export const zSchemaFlorence2LargeRegionToDescriptionOutput = z.object({ + results: z.string().register(z.globalRegistry, { + description: 'Results from the model', + }), +}) + +/** + * ImageWithUserCoordinatesInput + */ +export const zSchemaFlorence2LargeRegionToDescriptionInput = z.object({ + region: zSchemaRegion, + image_url: z.string().register(z.globalRegistry, { + description: 'The URL of the image to be processed.', + }), +}) + +/** + * MoonDreamOutput + */ +export const zSchemaMoondreamNextOutput = z.object({ + output: z.string().register(z.globalRegistry, { + description: 'Response from the model', + }), +}) + +/** + * QueryInput + */ +export const zSchemaMoondreamNextInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Prompt for query task', + }), + task_type: z.optional( + z.enum(['caption', 'query']).register(z.globalRegistry, { + description: 'Type of task to perform', + }), + ), + max_tokens: z + .optional( + z.int().gte(1).lte(512).register(z.globalRegistry, { + description: 'Maximum number of tokens to generate', + }), + ) + .default(64), + image_url: z.string().register(z.globalRegistry, { + description: 'Image URL to be processed', + }), +}) + +/** + * Image + * + * Represents an image file. + */ +export const zSchemaImage = z + .object({ + height: z.optional( + z.int().register(z.globalRegistry, { + description: 'The height of the image in pixels.', + }), + ), + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + width: z.optional( + z.int().register(z.globalRegistry, { + description: 'The width of the image in pixels.', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), + }) + .register(z.globalRegistry, { + description: 'Represents an image file.', + }) + +/** + * ImageChatOutput + */ +export const zSchemaSa2Va8bImageOutput = z.object({ + masks: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Dictionary of label: mask image', + }), + output: z.string().register(z.globalRegistry, { + description: 'Generated output', + }), +}) + +/** + * ImageInput + */ +export const zSchemaSa2Va8bImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Prompt to be used for the chat completion', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'Url for the Input image.', + }), +}) + +/** + * ImageChatOutput + */ +export const zSchemaSa2Va4bImageOutput = z.object({ + masks: z.array(zSchemaImage).register(z.globalRegistry, { + description: 'Dictionary of label: mask image', + }), + output: z.string().register(z.globalRegistry, { + description: 'Generated output', + }), +}) + +/** + * ImageInput + */ +export const zSchemaSa2Va4bImageInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Prompt to be used for the chat completion', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'Url for the Input image.', + }), +}) + +/** + * File + */ +export const zSchemaFile = z.object({ + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), +}) + +/** + * VideoChatOutput + */ +export const zSchemaSa2Va8bVideoOutput = z.object({ + masks: z.array(zSchemaFile).register(z.globalRegistry, { + description: 'Dictionary of label: mask video', + }), + output: z.string().register(z.globalRegistry, { + description: 'Generated output', + }), +}) + +/** + * VideoInput + */ +export const zSchemaSa2Va8bVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Prompt to be used for the chat completion', + }), + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the input video.', + }), + num_frames_to_sample: z.optional( + z.int().gte(1).lte(100).register(z.globalRegistry, { + description: + 'Number of frames to sample from the video. If not provided, all frames are sampled.', + }), + ), +}) + +/** + * VideoChatOutput + */ +export const zSchemaSa2Va4bVideoOutput = z.object({ + masks: z.array(zSchemaFile).register(z.globalRegistry, { + description: 'Dictionary of label: mask video', + }), + output: z.string().register(z.globalRegistry, { + description: 'Generated output', + }), +}) + +/** + * VideoInput + */ +export const zSchemaSa2Va4bVideoInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Prompt to be used for the chat completion', + }), + video_url: z.string().register(z.globalRegistry, { + description: 'The URL of the input video.', + }), + num_frames_to_sample: z.optional( + z.int().gte(1).lte(100).register(z.globalRegistry, { + description: + 'Number of frames to sample from the video. If not provided, all frames are sampled.', + }), + ), +}) + +/** + * BatchMoonDreamOutput + */ +export const zSchemaMoondreamNextBatchOutput = z.object({ + outputs: z.array(z.string()).register(z.globalRegistry, { + description: 'List of generated captions', + }), + captions_file: zSchemaFile, +}) + +/** + * BatchQueryInput + */ +export const zSchemaMoondreamNextBatchInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Single prompt to apply to all images', + }), + images_data_url: z.string().register(z.globalRegistry, { + description: 'List of image URLs to be processed (maximum 32 images)', + }), + max_tokens: z + .optional( + z.int().gte(1).lte(512).register(z.globalRegistry, { + description: 'Maximum number of tokens to generate', + }), + ) + .default(64), +}) + +/** + * ImageChatOutput + */ +export const zSchemaGotOcrV2Output = z.object({ + outputs: z.array(z.string()).register(z.globalRegistry, { + description: 'Generated output', + }), +}) + +/** + * ImageInput + */ +export const zSchemaGotOcrV2Input = z.object({ + do_format: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Generate the output in formatted mode.', + }), + ) + .default(false), + multi_page: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Use provided images to generate a single output.', + }), + ) + .default(false), + input_image_urls: z + .optional( + z.array(z.string()).register(z.globalRegistry, { + description: 'URL of images.', + }), + ) + .default([]), +}) + +/** + * MoondreamObjectOutput + */ +export const zSchemaMoondream2ObjectDetectionOutput = z.object({ + image: zSchemaImage, + objects: z + .array(z.record(z.string(), z.unknown())) + .register(z.globalRegistry, { + description: 'Objects detected in the image', + }), +}) + +/** + * MoondreamObjectInput + */ +export const zSchemaMoondream2ObjectDetectionInput = z.object({ + object: z.string().register(z.globalRegistry, { + description: 'Object to be detected in the image', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be processed', + }), +}) + +/** + * MoondreamObjectOutput + */ +export const zSchemaMoondream2PointObjectDetectionOutput = z.object({ + image: zSchemaImage, + objects: z + .array(z.record(z.string(), z.unknown())) + .register(z.globalRegistry, { + description: 'Objects detected in the image', + }), +}) + +/** + * MoondreamObjectInput + */ +export const zSchemaMoondream2PointObjectDetectionInput = z.object({ + object: z.string().register(z.globalRegistry, { + description: 'Object to be detected in the image', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be processed', + }), +}) + +/** + * MoondreamOutput + */ +export const zSchemaMoondream2Output = z.object({ + output: z.string().register(z.globalRegistry, { + description: 'Output for the given query', + }), +}) + +/** + * MoondreamInput + */ +export const zSchemaMoondream2Input = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be processed', + }), +}) + +/** + * MoondreamOutput + */ +export const zSchemaMoondream2VisualQueryOutput = z.object({ + output: z.string().register(z.globalRegistry, { + description: 'Output for the given query', + }), +}) + +/** + * MoondreamQueryInput + */ +export const zSchemaMoondream2VisualQueryInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Query to be asked in the image', + }), + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to be processed', + }), +}) + +/** + * VideoUnderstandingOutput + */ +export const zSchemaVideoUnderstandingOutput = z.object({ + output: z.string().register(z.globalRegistry, { + description: 'The analysis of the video content based on the prompt', + }), +}) + +/** + * VideoUnderstandingInput + */ +export const zSchemaVideoUnderstandingInput = z.object({ + detailed_analysis: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to request a more detailed analysis of the video', + }), + ) + .default(false), + video_url: z.string().register(z.globalRegistry, { + description: 'URL of the video to analyze', + }), + prompt: z.string().min(1).max(5000).register(z.globalRegistry, { + description: 'The question or prompt about the video content.', + }), +}) + +/** + * NSFWOutput + */ +export const zSchemaXAilabNsfwOutput = z.object({ + has_nsfw_concepts: z.array(z.boolean()).register(z.globalRegistry, { + description: 'List of booleans indicating if the image has an NSFW concept', + }), +}) + +/** + * NSFWInput + */ +export const zSchemaXAilabNsfwInput = z.object({ + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: + 'List of image URLs to check. If more than 10 images are provided, only the first 10 will be checked.', + }), +}) + +/** + * CompletionUsage + */ +export const zSchemaCompletionUsage = z.object({ + completion_tokens: z.int().register(z.globalRegistry, { + description: 'Number of tokens in the completion', + }), + total_tokens: z.int().register(z.globalRegistry, { + description: 'Total tokens used', + }), + prompt_tokens: z.int().register(z.globalRegistry, { + description: 'Number of tokens in the prompt', + }), +}) + +/** + * ChatOutput + */ +export const zSchemaIsaac01Output = z.object({ + usage: z.optional(z.union([zSchemaCompletionUsage, z.unknown()])), + error: z.optional(z.union([z.string(), z.unknown()])), + partial: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the output is partial', + }), + ) + .default(false), + output: z.string().register(z.globalRegistry, { + description: 'Generated output', + }), +}) + +/** + * VisionInput + */ +export const zSchemaIsaac01Input = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Prompt to be used for the image', + }), + response_style: z.optional( + z.enum(['text', 'box', 'point', 'polygon']).register(z.globalRegistry, { + description: + '\nResponse style to be used for the image.\n\n- text: Model will output text. Good for descriptions and captioning.\n- box: Model will output a combination of text and bounding boxes. Good for\nlocalization.\n- point: Model will output a combination of text and points. Good for counting many\nobjects.\n- polygon: Model will output a combination of text and polygons. Good for granular\nsegmentation.\n', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: 'Image URL to be processed', + }), +}) + +/** + * Schema referenced but not defined by fal.ai (missing from source OpenAPI spec) + */ +export const zSchemaIsaac01OpenaiV1ChatCompletionsInput = z + .record(z.string(), z.unknown()) + .register(z.globalRegistry, { + description: + 'Schema referenced but not defined by fal.ai (missing from source OpenAPI spec)', + }) + +export const zSchemaIsaac01OpenaiV1ChatCompletionsOutput = z.unknown() + +/** + * UsageInfo + */ +export const zSchemaUsageInfo = z.object({ + output_tokens: z.int().register(z.globalRegistry, { + description: 'Number of output tokens generated', + }), + decode_time_ms: z.number().register(z.globalRegistry, { + description: 'Time taken for decoding in milliseconds', + }), + input_tokens: z.int().register(z.globalRegistry, { + description: 'Number of input tokens processed', + }), + ttft_ms: z.number().register(z.globalRegistry, { + description: 'Time to first token in milliseconds', + }), + prefill_time_ms: z.number().register(z.globalRegistry, { + description: 'Time taken for prefill in milliseconds', + }), +}) + +/** + * MoondreamCaptionOutput + */ +export const zSchemaMoondream3PreviewCaptionOutput = z.object({ + finish_reason: z.string().register(z.globalRegistry, { + description: 'Reason for finishing the output generation', + }), + output: z.string().register(z.globalRegistry, { + description: 'Generated caption for the image', + }), + usage_info: zSchemaUsageInfo, +}) + +/** + * MoondreamCaptionInput + */ +export const zSchemaMoondream3PreviewCaptionInput = z.object({ + top_p: z.optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Nucleus sampling probability mass to use, between 0 and 1.', + }), + ), + length: z.optional( + z.enum(['short', 'normal', 'long']).register(z.globalRegistry, { + description: 'Length of the caption to generate', + }), + ), + temperature: z.optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Sampling temperature to use, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If not set, defaults to 0.', + }), + ), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the image to be processed\n\nMax width: 7000px, Max height: 7000px, Timeout: 20.0s', + }), +}) + +/** + * MoondreamQueryOutput + */ +export const zSchemaMoondream3PreviewQueryOutput = z.object({ + finish_reason: z.string().register(z.globalRegistry, { + description: 'Reason for finishing the output generation', + }), + reasoning: z.optional( + z.string().register(z.globalRegistry, { + description: 'Detailed reasoning behind the answer, if enabled', + }), + ), + output: z.string().register(z.globalRegistry, { + description: 'Answer to the query about the image', + }), + usage_info: zSchemaUsageInfo, +}) + +/** + * MoondreamQueryInput + */ +export const zSchemaMoondream3PreviewQueryInput = z.object({ + prompt: z.string().min(1).register(z.globalRegistry, { + description: 'Query to be asked in the image', + }), + top_p: z.optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: 'Nucleus sampling probability mass to use, between 0 and 1.', + }), + ), + temperature: z.optional( + z.number().gte(0).lte(1).register(z.globalRegistry, { + description: + 'Sampling temperature to use, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If not set, defaults to 0.', + }), + ), + reasoning: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to include detailed reasoning behind the answer', + }), + ) + .default(true), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the image to be processed\n\nMax width: 7000px, Max height: 7000px, Timeout: 20.0s', + }), +}) + +/** + * Point + */ +export const zSchemaPoint = z.object({ + y: z.number().register(z.globalRegistry, { + description: 'Y coordinate of the point in normalized format (0 to 1)', + }), + x: z.number().register(z.globalRegistry, { + description: 'X coordinate of the point in normalized format (0 to 1)', + }), +}) + +/** + * ImageFile + */ +export const zSchemaImageFile = z.object({ + height: z.optional( + z.int().register(z.globalRegistry, { + description: 'The height of the image', + }), + ), + file_size: z.optional( + z.int().register(z.globalRegistry, { + description: 'The size of the file in bytes.', + }), + ), + url: z.string().register(z.globalRegistry, { + description: 'The URL where the file can be downloaded from.', + }), + width: z.optional( + z.int().register(z.globalRegistry, { + description: 'The width of the image', + }), + ), + file_name: z.optional( + z.string().register(z.globalRegistry, { + description: + 'The name of the file. It will be auto-generated if not provided.', + }), + ), + content_type: z.optional( + z.string().register(z.globalRegistry, { + description: 'The mime type of the file.', + }), + ), + file_data: z.optional( + z.string().register(z.globalRegistry, { + description: 'File data', + }), + ), +}) + +/** + * MoondreamPointOutput + */ +export const zSchemaMoondream3PreviewPointOutput = z.object({ + points: z.array(zSchemaPoint).register(z.globalRegistry, { + description: 'List of points marking the detected objects', + }), + finish_reason: z.string().register(z.globalRegistry, { + description: 'Reason for finishing the output generation', + }), + image: z.optional(zSchemaImageFile), + usage_info: zSchemaUsageInfo, +}) + +/** + * MoondreamPointInput + */ +export const zSchemaMoondream3PreviewPointInput = z.object({ + prompt: z.string().min(1).register(z.globalRegistry, { + description: 'Object to be located in the image', + }), + preview: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preview the output', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the image to be processed\n\nMax width: 7000px, Max height: 7000px, Timeout: 20.0s', + }), +}) + +/** + * Object + */ +export const zSchemaObject = z.object({ + y_min: z.number().register(z.globalRegistry, { + description: 'Top boundary of detection box in normalized format (0 to 1)', + }), + x_max: z.number().register(z.globalRegistry, { + description: + 'Right boundary of detection box in normalized format (0 to 1)', + }), + x_min: z.number().register(z.globalRegistry, { + description: 'Left boundary of detection box in normalized format (0 to 1)', + }), + y_max: z.number().register(z.globalRegistry, { + description: + 'Bottom boundary of detection box in normalized format (0 to 1)', + }), +}) + +/** + * MoondreamDetectOutput + */ +export const zSchemaMoondream3PreviewDetectOutput = z.object({ + finish_reason: z.string().register(z.globalRegistry, { + description: 'Reason for finishing the output generation', + }), + image: z.optional(zSchemaImageFile), + objects: z.array(zSchemaObject).register(z.globalRegistry, { + description: 'List of detected objects with their bounding boxes', + }), + usage_info: zSchemaUsageInfo, +}) + +/** + * MoondreamDetectInput + */ +export const zSchemaMoondream3PreviewDetectInput = z.object({ + prompt: z.string().min(1).register(z.globalRegistry, { + description: 'Object to be detected in the image', + }), + preview: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether to preview the output', + }), + ) + .default(false), + image_url: z.string().register(z.globalRegistry, { + description: + 'URL of the image to be processed\n\nMax width: 7000px, Max height: 7000px, Timeout: 20.0s', + }), +}) + +/** + * VisionOutput + */ +export const zSchemaRouterVisionOutput = z.object({ + usage: z.optional(zSchemaUsageInfo), + output: z.string().register(z.globalRegistry, { + description: 'Generated output', + }), +}) + +/** + * VisionInput + */ +export const zSchemaRouterVisionInput = z.object({ + prompt: z.string().register(z.globalRegistry, { + description: 'Prompt to be used for the image', + }), + system_prompt: z.optional( + z.string().register(z.globalRegistry, { + description: + 'System prompt to provide context or instructions to the model', + }), + ), + reasoning: z + .optional( + z.boolean().register(z.globalRegistry, { + description: 'Should reasoning be the part of the final answer.', + }), + ) + .default(false), + model: z.string().register(z.globalRegistry, { + description: + 'Name of the model to use. Charged based on actual token usage.', + }), + max_tokens: z.optional( + z.int().gte(1).register(z.globalRegistry, { + description: + "This sets the upper limit for the number of tokens the model can generate in response. It won't produce more than this limit. The maximum value is the context length minus the prompt length.", + }), + ), + temperature: z + .optional( + z.number().gte(0).lte(2).register(z.globalRegistry, { + description: + "This setting influences the variety in the model's responses. Lower values lead to more predictable and typical responses, while higher values encourage more diverse and less common responses. At 0, the model always gives the same response for a given input.", + }), + ) + .default(1), + image_urls: z.array(z.string()).register(z.globalRegistry, { + description: 'List of image URLs to be processed', + }), +}) + +/** + * SAM3EmbeddingOutput + */ +export const zSchemaSam3ImageEmbedOutput = z.object({ + embedding_b64: z.string().register(z.globalRegistry, { + description: 'Embedding of the image', + }), +}) + +/** + * SAM3EmbeddingInput + */ +export const zSchemaSam3ImageEmbedInput = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: 'URL of the image to embed.', + }), +}) + +/** + * AIImageDetectionOutput + */ +export const zSchemaAiDetectorDetectImageOutput = z.object({ + latency: z.number(), + verdict: z.string(), + is_ai_generated: z.boolean(), + confidence: z.number(), +}) + +/** + * ImageDetectionInput + */ +export const zSchemaAiDetectorDetectImageInput = z.object({ + image_url: z.string().register(z.globalRegistry, { + description: + 'URL pointing to an image to analyze for AI generation.(Max: 3000 characters)', + }), +}) + +/** + * ImageInput + */ +export const zSchemaImageInput = z.object({ + hypothesis: z.string().register(z.globalRegistry, { + description: 'The image to use for the measurement.', + }), +}) + +/** + * MultiMeasurementOutput + */ +export const zSchemaArbiterImageOutput = z.object({ + values: z.optional( + z + .array( + z.record( + z.string(), + z.union([z.number(), z.record(z.string(), z.number())]), + ), + ) + .register(z.globalRegistry, { + description: 'The values of the measurements.', + }), + ), +}) + +/** + * ImageMultiMeasurementInput + */ +export const zSchemaArbiterImageInput = z.object({ + measurements: z + .array(z.enum(['arniqa', 'clip_iqa', 'musiq', 'nima', 'lapvar'])) + .register(z.globalRegistry, { + description: 'The measurements to use for the measurement.', + }), + inputs: z.array(zSchemaImageInput).register(z.globalRegistry, { + description: 'The inputs to use for the measurement.', + }), +}) + +/** + * ReferenceImageInput + */ +export const zSchemaReferenceImageInput = z.object({ + hypothesis: z.string().register(z.globalRegistry, { + description: 'The hypothesis image to use for the measurement.', + }), + reference: z.string().register(z.globalRegistry, { + description: 'The image to use for the measurement.', + }), +}) + +/** + * MultiMeasurementOutput + */ +export const zSchemaArbiterImageImageOutput = z.object({ + values: z.optional( + z + .array( + z.record( + z.string(), + z.union([z.number(), z.record(z.string(), z.number())]), + ), + ) + .register(z.globalRegistry, { + description: 'The values of the measurements.', + }), + ), +}) + +/** + * ImageReferenceMeasurementInput + */ +export const zSchemaArbiterImageImageInput = z.object({ + measurements: z + .array(z.enum(['dists', 'mse', 'lpips', 'sdi', 'ssim'])) + .register(z.globalRegistry, { + description: 'The measurements to use for the measurement.', + }), + inputs: z.array(zSchemaReferenceImageInput).register(z.globalRegistry, { + description: 'The inputs to use for the measurement.', + }), +}) + +/** + * SemanticImageInput + */ +export const zSchemaSemanticImageInput = z.object({ + hypothesis: z.string().register(z.globalRegistry, { + description: 'The hypothesis image to use for the measurement.', + }), + reference: z.string().register(z.globalRegistry, { + description: 'The text reference to use for the measurement.', + }), +}) + +/** + * MultiMeasurementOutput + */ +export const zSchemaArbiterImageTextOutput = z.object({ + values: z.optional( + z + .array( + z.record( + z.string(), + z.union([z.number(), z.record(z.string(), z.number())]), + ), + ) + .register(z.globalRegistry, { + description: 'The values of the measurements.', + }), + ), +}) + +/** + * SemanticImageMeasurementInput + */ +export const zSchemaArbiterImageTextInput = z.object({ + measurements: z.array(z.enum(['clip_score'])).register(z.globalRegistry, { + description: 'The measurements to use for the measurement.', + }), + inputs: z.array(zSchemaSemanticImageInput).register(z.globalRegistry, { + description: 'The inputs to use for the measurement.', + }), +}) + +export const zSchemaQueueStatus = z.object({ + status: z.enum(['IN_QUEUE', 'IN_PROGRESS', 'COMPLETED']), + request_id: z.string().register(z.globalRegistry, { + description: 'The request id.', + }), + response_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The response url.', + }), + ), + status_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The status url.', + }), + ), + cancel_url: z.optional( + z.string().register(z.globalRegistry, { + description: 'The cancel url.', + }), + ), + logs: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The logs.', + }), + ), + metrics: z.optional( + z.record(z.string(), z.unknown()).register(z.globalRegistry, { + description: 'The metrics.', + }), + ), + queue_position: z.optional( + z.int().register(z.globalRegistry, { + description: 'The queue position.', + }), + ), +}) + +export const zGetFalAiArbiterImageTextRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiArbiterImageTextRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiArbiterImageTextRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiArbiterImageTextRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiArbiterImageTextData = z.object({ + body: zSchemaArbiterImageTextInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiArbiterImageTextResponse = zSchemaQueueStatus + +export const zGetFalAiArbiterImageTextRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiArbiterImageTextRequestsByRequestIdResponse = + zSchemaArbiterImageTextOutput + +export const zGetFalAiArbiterImageImageRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiArbiterImageImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiArbiterImageImageRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiArbiterImageImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiArbiterImageImageData = z.object({ + body: zSchemaArbiterImageImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiArbiterImageImageResponse = zSchemaQueueStatus + +export const zGetFalAiArbiterImageImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiArbiterImageImageRequestsByRequestIdResponse = + zSchemaArbiterImageImageOutput + +export const zGetFalAiArbiterImageRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiArbiterImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiArbiterImageRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiArbiterImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiArbiterImageData = z.object({ + body: zSchemaArbiterImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiArbiterImageResponse = zSchemaQueueStatus + +export const zGetFalAiArbiterImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiArbiterImageRequestsByRequestIdResponse = + zSchemaArbiterImageOutput + +export const zGetHalfMoonAiAiDetectorDetectImageRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetHalfMoonAiAiDetectorDetectImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutHalfMoonAiAiDetectorDetectImageRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutHalfMoonAiAiDetectorDetectImageRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostHalfMoonAiAiDetectorDetectImageData = z.object({ + body: zSchemaAiDetectorDetectImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostHalfMoonAiAiDetectorDetectImageResponse = zSchemaQueueStatus + +export const zGetHalfMoonAiAiDetectorDetectImageRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetHalfMoonAiAiDetectorDetectImageRequestsByRequestIdResponse = + zSchemaAiDetectorDetectImageOutput + +export const zGetFalAiSam3ImageEmbedRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSam3ImageEmbedRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSam3ImageEmbedRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSam3ImageEmbedRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSam3ImageEmbedData = z.object({ + body: zSchemaSam3ImageEmbedInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSam3ImageEmbedResponse = zSchemaQueueStatus + +export const zGetFalAiSam3ImageEmbedRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSam3ImageEmbedRequestsByRequestIdResponse = + zSchemaSam3ImageEmbedOutput + +export const zGetOpenrouterRouterVisionRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetOpenrouterRouterVisionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutOpenrouterRouterVisionRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutOpenrouterRouterVisionRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostOpenrouterRouterVisionData = z.object({ + body: zSchemaRouterVisionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostOpenrouterRouterVisionResponse = zSchemaQueueStatus + +export const zGetOpenrouterRouterVisionRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetOpenrouterRouterVisionRequestsByRequestIdResponse = + zSchemaRouterVisionOutput + +export const zGetFalAiMoondream3PreviewDetectRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMoondream3PreviewDetectRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMoondream3PreviewDetectRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMoondream3PreviewDetectRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMoondream3PreviewDetectData = z.object({ + body: zSchemaMoondream3PreviewDetectInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMoondream3PreviewDetectResponse = zSchemaQueueStatus + +export const zGetFalAiMoondream3PreviewDetectRequestsByRequestIdData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * Result of the request. + */ +export const zGetFalAiMoondream3PreviewDetectRequestsByRequestIdResponse = + zSchemaMoondream3PreviewDetectOutput + +export const zGetFalAiMoondream3PreviewPointRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMoondream3PreviewPointRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMoondream3PreviewPointRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMoondream3PreviewPointRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMoondream3PreviewPointData = z.object({ + body: zSchemaMoondream3PreviewPointInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMoondream3PreviewPointResponse = zSchemaQueueStatus + +export const zGetFalAiMoondream3PreviewPointRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMoondream3PreviewPointRequestsByRequestIdResponse = + zSchemaMoondream3PreviewPointOutput + +export const zGetFalAiMoondream3PreviewQueryRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMoondream3PreviewQueryRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMoondream3PreviewQueryRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMoondream3PreviewQueryRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMoondream3PreviewQueryData = z.object({ + body: zSchemaMoondream3PreviewQueryInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMoondream3PreviewQueryResponse = zSchemaQueueStatus + +export const zGetFalAiMoondream3PreviewQueryRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMoondream3PreviewQueryRequestsByRequestIdResponse = + zSchemaMoondream3PreviewQueryOutput + +export const zGetFalAiMoondream3PreviewCaptionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMoondream3PreviewCaptionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMoondream3PreviewCaptionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMoondream3PreviewCaptionRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMoondream3PreviewCaptionData = z.object({ + body: zSchemaMoondream3PreviewCaptionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMoondream3PreviewCaptionResponse = zSchemaQueueStatus + +export const zGetFalAiMoondream3PreviewCaptionRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMoondream3PreviewCaptionRequestsByRequestIdResponse = + zSchemaMoondream3PreviewCaptionOutput + +export const zGetPerceptronIsaac01OpenaiV1ChatCompletionsRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetPerceptronIsaac01OpenaiV1ChatCompletionsRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutPerceptronIsaac01OpenaiV1ChatCompletionsRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutPerceptronIsaac01OpenaiV1ChatCompletionsRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostPerceptronIsaac01OpenaiV1ChatCompletionsData = z.object({ + body: zSchemaIsaac01OpenaiV1ChatCompletionsInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostPerceptronIsaac01OpenaiV1ChatCompletionsResponse = + zSchemaQueueStatus + +export const zGetPerceptronIsaac01OpenaiV1ChatCompletionsRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetPerceptronIsaac01OpenaiV1ChatCompletionsRequestsByRequestIdResponse = + zSchemaIsaac01OpenaiV1ChatCompletionsOutput + +export const zGetPerceptronIsaac01RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetPerceptronIsaac01RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutPerceptronIsaac01RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutPerceptronIsaac01RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostPerceptronIsaac01Data = z.object({ + body: zSchemaIsaac01Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostPerceptronIsaac01Response = zSchemaQueueStatus + +export const zGetPerceptronIsaac01RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetPerceptronIsaac01RequestsByRequestIdResponse = + zSchemaIsaac01Output + +export const zGetFalAiXAilabNsfwRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiXAilabNsfwRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiXAilabNsfwRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiXAilabNsfwRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiXAilabNsfwData = z.object({ + body: zSchemaXAilabNsfwInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiXAilabNsfwResponse = zSchemaQueueStatus + +export const zGetFalAiXAilabNsfwRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiXAilabNsfwRequestsByRequestIdResponse = + zSchemaXAilabNsfwOutput + +export const zGetFalAiVideoUnderstandingRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiVideoUnderstandingRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiVideoUnderstandingRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiVideoUnderstandingRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiVideoUnderstandingData = z.object({ + body: zSchemaVideoUnderstandingInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiVideoUnderstandingResponse = zSchemaQueueStatus + +export const zGetFalAiVideoUnderstandingRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiVideoUnderstandingRequestsByRequestIdResponse = + zSchemaVideoUnderstandingOutput + +export const zGetFalAiMoondream2VisualQueryRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMoondream2VisualQueryRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMoondream2VisualQueryRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMoondream2VisualQueryRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMoondream2VisualQueryData = z.object({ + body: zSchemaMoondream2VisualQueryInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMoondream2VisualQueryResponse = zSchemaQueueStatus + +export const zGetFalAiMoondream2VisualQueryRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMoondream2VisualQueryRequestsByRequestIdResponse = + zSchemaMoondream2VisualQueryOutput + +export const zGetFalAiMoondream2RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiMoondream2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMoondream2RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiMoondream2RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMoondream2Data = z.object({ + body: zSchemaMoondream2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMoondream2Response = zSchemaQueueStatus + +export const zGetFalAiMoondream2RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMoondream2RequestsByRequestIdResponse = + zSchemaMoondream2Output + +export const zGetFalAiMoondream2PointObjectDetectionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMoondream2PointObjectDetectionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMoondream2PointObjectDetectionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMoondream2PointObjectDetectionRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMoondream2PointObjectDetectionData = z.object({ + body: zSchemaMoondream2PointObjectDetectionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMoondream2PointObjectDetectionResponse = + zSchemaQueueStatus + +export const zGetFalAiMoondream2PointObjectDetectionRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMoondream2PointObjectDetectionRequestsByRequestIdResponse = + zSchemaMoondream2PointObjectDetectionOutput + +export const zGetFalAiMoondream2ObjectDetectionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMoondream2ObjectDetectionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMoondream2ObjectDetectionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMoondream2ObjectDetectionRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMoondream2ObjectDetectionData = z.object({ + body: zSchemaMoondream2ObjectDetectionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMoondream2ObjectDetectionResponse = zSchemaQueueStatus + +export const zGetFalAiMoondream2ObjectDetectionRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiMoondream2ObjectDetectionRequestsByRequestIdResponse = + zSchemaMoondream2ObjectDetectionOutput + +export const zGetFalAiGotOcrV2RequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiGotOcrV2RequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiGotOcrV2RequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiGotOcrV2RequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiGotOcrV2Data = z.object({ + body: zSchemaGotOcrV2Input, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiGotOcrV2Response = zSchemaQueueStatus + +export const zGetFalAiGotOcrV2RequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiGotOcrV2RequestsByRequestIdResponse = + zSchemaGotOcrV2Output + +export const zGetFalAiMoondreamNextBatchRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiMoondreamNextBatchRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMoondreamNextBatchRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiMoondreamNextBatchRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMoondreamNextBatchData = z.object({ + body: zSchemaMoondreamNextBatchInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMoondreamNextBatchResponse = zSchemaQueueStatus + +export const zGetFalAiMoondreamNextBatchRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMoondreamNextBatchRequestsByRequestIdResponse = + zSchemaMoondreamNextBatchOutput + +export const zGetFalAiSa2Va4bVideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSa2Va4bVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSa2Va4bVideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSa2Va4bVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSa2Va4bVideoData = z.object({ + body: zSchemaSa2Va4bVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSa2Va4bVideoResponse = zSchemaQueueStatus + +export const zGetFalAiSa2Va4bVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSa2Va4bVideoRequestsByRequestIdResponse = + zSchemaSa2Va4bVideoOutput + +export const zGetFalAiSa2Va8bVideoRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSa2Va8bVideoRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSa2Va8bVideoRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSa2Va8bVideoRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSa2Va8bVideoData = z.object({ + body: zSchemaSa2Va8bVideoInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSa2Va8bVideoResponse = zSchemaQueueStatus + +export const zGetFalAiSa2Va8bVideoRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSa2Va8bVideoRequestsByRequestIdResponse = + zSchemaSa2Va8bVideoOutput + +export const zGetFalAiSa2Va4bImageRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSa2Va4bImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSa2Va4bImageRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSa2Va4bImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSa2Va4bImageData = z.object({ + body: zSchemaSa2Va4bImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSa2Va4bImageResponse = zSchemaQueueStatus + +export const zGetFalAiSa2Va4bImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSa2Va4bImageRequestsByRequestIdResponse = + zSchemaSa2Va4bImageOutput + +export const zGetFalAiSa2Va8bImageRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiSa2Va8bImageRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiSa2Va8bImageRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiSa2Va8bImageRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiSa2Va8bImageData = z.object({ + body: zSchemaSa2Va8bImageInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiSa2Va8bImageResponse = zSchemaQueueStatus + +export const zGetFalAiSa2Va8bImageRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiSa2Va8bImageRequestsByRequestIdResponse = + zSchemaSa2Va8bImageOutput + +export const zGetFalAiMoondreamNextRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiMoondreamNextRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMoondreamNextRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiMoondreamNextRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMoondreamNextData = z.object({ + body: zSchemaMoondreamNextInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMoondreamNextResponse = zSchemaQueueStatus + +export const zGetFalAiMoondreamNextRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMoondreamNextRequestsByRequestIdResponse = + zSchemaMoondreamNextOutput + +export const zGetFalAiFlorence2LargeRegionToDescriptionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlorence2LargeRegionToDescriptionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlorence2LargeRegionToDescriptionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlorence2LargeRegionToDescriptionRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlorence2LargeRegionToDescriptionData = z.object({ + body: zSchemaFlorence2LargeRegionToDescriptionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlorence2LargeRegionToDescriptionResponse = + zSchemaQueueStatus + +export const zGetFalAiFlorence2LargeRegionToDescriptionRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlorence2LargeRegionToDescriptionRequestsByRequestIdResponse = + zSchemaFlorence2LargeRegionToDescriptionOutput + +export const zGetFalAiFlorence2LargeOcrRequestsByRequestIdStatusData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }, +) + +/** + * The request status. + */ +export const zGetFalAiFlorence2LargeOcrRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlorence2LargeOcrRequestsByRequestIdCancelData = z.object( + { + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }, +) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlorence2LargeOcrRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlorence2LargeOcrData = z.object({ + body: zSchemaFlorence2LargeOcrInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlorence2LargeOcrResponse = zSchemaQueueStatus + +export const zGetFalAiFlorence2LargeOcrRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlorence2LargeOcrRequestsByRequestIdResponse = + zSchemaFlorence2LargeOcrOutput + +export const zGetFalAiFlorence2LargeMoreDetailedCaptionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlorence2LargeMoreDetailedCaptionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlorence2LargeMoreDetailedCaptionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlorence2LargeMoreDetailedCaptionRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlorence2LargeMoreDetailedCaptionData = z.object({ + body: zSchemaFlorence2LargeMoreDetailedCaptionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlorence2LargeMoreDetailedCaptionResponse = + zSchemaQueueStatus + +export const zGetFalAiFlorence2LargeMoreDetailedCaptionRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlorence2LargeMoreDetailedCaptionRequestsByRequestIdResponse = + zSchemaFlorence2LargeMoreDetailedCaptionOutput + +export const zGetFalAiFlorence2LargeRegionToCategoryRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlorence2LargeRegionToCategoryRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlorence2LargeRegionToCategoryRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlorence2LargeRegionToCategoryRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlorence2LargeRegionToCategoryData = z.object({ + body: zSchemaFlorence2LargeRegionToCategoryInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlorence2LargeRegionToCategoryResponse = + zSchemaQueueStatus + +export const zGetFalAiFlorence2LargeRegionToCategoryRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlorence2LargeRegionToCategoryRequestsByRequestIdResponse = + zSchemaFlorence2LargeRegionToCategoryOutput + +export const zGetFalAiFlorence2LargeCaptionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlorence2LargeCaptionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlorence2LargeCaptionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlorence2LargeCaptionRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlorence2LargeCaptionData = z.object({ + body: zSchemaFlorence2LargeCaptionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlorence2LargeCaptionResponse = zSchemaQueueStatus + +export const zGetFalAiFlorence2LargeCaptionRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiFlorence2LargeCaptionRequestsByRequestIdResponse = + zSchemaFlorence2LargeCaptionOutput + +export const zGetFalAiFlorence2LargeDetailedCaptionRequestsByRequestIdStatusData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), + }) + +/** + * The request status. + */ +export const zGetFalAiFlorence2LargeDetailedCaptionRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiFlorence2LargeDetailedCaptionRequestsByRequestIdCancelData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * The request was cancelled. + */ +export const zPutFalAiFlorence2LargeDetailedCaptionRequestsByRequestIdCancelResponse = + z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiFlorence2LargeDetailedCaptionData = z.object({ + body: zSchemaFlorence2LargeDetailedCaptionInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiFlorence2LargeDetailedCaptionResponse = + zSchemaQueueStatus + +export const zGetFalAiFlorence2LargeDetailedCaptionRequestsByRequestIdData = + z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), + }) + +/** + * Result of the request. + */ +export const zGetFalAiFlorence2LargeDetailedCaptionRequestsByRequestIdResponse = + zSchemaFlorence2LargeDetailedCaptionOutput + +export const zGetFalAiImageutilsNsfwRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiImageutilsNsfwRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiImageutilsNsfwRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiImageutilsNsfwRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiImageutilsNsfwData = z.object({ + body: zSchemaImageutilsNsfwInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiImageutilsNsfwResponse = zSchemaQueueStatus + +export const zGetFalAiImageutilsNsfwRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiImageutilsNsfwRequestsByRequestIdResponse = + zSchemaImageutilsNsfwOutput + +export const zGetFalAiMoondreamBatchedRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiMoondreamBatchedRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiMoondreamBatchedRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiMoondreamBatchedRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiMoondreamBatchedData = z.object({ + body: zSchemaMoondreamBatchedInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiMoondreamBatchedResponse = zSchemaQueueStatus + +export const zGetFalAiMoondreamBatchedRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiMoondreamBatchedRequestsByRequestIdResponse = + zSchemaMoondreamBatchedOutput + +export const zGetFalAiLlavaNextRequestsByRequestIdStatusData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional( + z.object({ + logs: z.optional( + z.number().register(z.globalRegistry, { + description: + 'Whether to include logs (`1`) in the response or not (`0`).', + }), + ), + }), + ), +}) + +/** + * The request status. + */ +export const zGetFalAiLlavaNextRequestsByRequestIdStatusResponse = + zSchemaQueueStatus + +export const zPutFalAiLlavaNextRequestsByRequestIdCancelData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * The request was cancelled. + */ +export const zPutFalAiLlavaNextRequestsByRequestIdCancelResponse = z + .object({ + success: z.optional( + z.boolean().register(z.globalRegistry, { + description: 'Whether the request was cancelled successfully.', + }), + ), + }) + .register(z.globalRegistry, { + description: 'The request was cancelled.', + }) + +export const zPostFalAiLlavaNextData = z.object({ + body: zSchemaLlavaNextInput, + path: z.optional(z.never()), + query: z.optional(z.never()), +}) + +/** + * The request status. + */ +export const zPostFalAiLlavaNextResponse = zSchemaQueueStatus + +export const zGetFalAiLlavaNextRequestsByRequestIdData = z.object({ + body: z.optional(z.never()), + path: z.object({ + request_id: z.string().register(z.globalRegistry, { + description: 'Request ID', + }), + }), + query: z.optional(z.never()), +}) + +/** + * Result of the request. + */ +export const zGetFalAiLlavaNextRequestsByRequestIdResponse = + zSchemaLlavaNextOutput diff --git a/packages/typescript/ai-fal/src/image/image-provider-options.ts b/packages/typescript/ai-fal/src/image/image-provider-options.ts new file mode 100644 index 00000000..528685f5 --- /dev/null +++ b/packages/typescript/ai-fal/src/image/image-provider-options.ts @@ -0,0 +1,55 @@ +/** + * fal.ai image size presets supported by most models. + * These are semantic names that fal.ai accepts directly. + */ +export type FalImageSizePreset = + | 'square_hd' + | 'square' + | 'landscape_4_3' + | 'landscape_16_9' + | 'portrait_4_3' + | 'portrait_16_9' + +/** + * Mapping of standard TanStack AI sizes to fal.ai size presets. + */ +const SIZE_TO_FAL_PRESET: Record = { + '1024x1024': 'square_hd', + '512x512': 'square', + '1024x768': 'landscape_4_3', + '768x1024': 'portrait_4_3', + '1280x720': 'landscape_16_9', + '720x1280': 'portrait_16_9', + '1920x1080': 'landscape_16_9', + '1080x1920': 'portrait_16_9', +} + +/** + * Maps TanStack AI size format (WIDTHxHEIGHT) to fal.ai format. + * fal.ai accepts either preset names or { width, height } objects. + */ +export function mapSizeToFalFormat( + size: string | undefined, +): FalImageSizePreset | { width: number; height: number } | undefined { + if (!size) return undefined + + // Check if it's a known preset mapping + const preset = SIZE_TO_FAL_PRESET[size] + if (preset) return preset + + // Try to parse as WIDTHxHEIGHT + const match = size.match(/^(\d+)x(\d+)$/) + if (match && match[1] && match[2]) { + return { + width: parseInt(match[1], 10), + height: parseInt(match[2], 10), + } + } + + // If it's already a preset name, return as-is + if (Object.values(SIZE_TO_FAL_PRESET).includes(size as FalImageSizePreset)) { + return size as FalImageSizePreset + } + + return undefined +} diff --git a/packages/typescript/ai-fal/src/index.ts b/packages/typescript/ai-fal/src/index.ts new file mode 100644 index 00000000..32d850e8 --- /dev/null +++ b/packages/typescript/ai-fal/src/index.ts @@ -0,0 +1,56 @@ +// ============================================================================ +// Image Adapter +// ============================================================================ + +export { FalImageAdapter, createFalImage, falImage } from './adapters/image' + +export { + mapSizeToFalFormat, + type FalImageSizePreset, +} from './image/image-provider-options' + +// ============================================================================ +// Video Adapter (Experimental) +// ============================================================================ + +export { FalVideoAdapter, createFalVideo, falVideo } from './adapters/video' + +// ============================================================================ +// Model Types (from fal.ai's type system) +// ============================================================================ + +export { + type FalImageProviderOptions, + type FalVideoProviderOptions, +} from './model-meta' + +export { + type FalImageInput, + type FalImageOutput, + type FalVideoInput, + type FalVideoOutput, + type FalImageModel, + type FalVideoModel, + type FalAudioModel, + type FalTextModel, + type Fal3dModel, + type FalJsonModel, + type FalAudioInput, + type FalAudioOutput, + type FalTextInput, + type FalTextOutput, + type Fal3dInput, + type Fal3dOutput, + type FalJsonInput, + type FalJsonOutput, +} from './generated' +// ============================================================================ +// Utils +// ============================================================================ + +export { + getFalApiKeyFromEnv, + configureFalClient, + generateId, + type FalClientConfig, +} from './utils' diff --git a/packages/typescript/ai-fal/src/model-meta.ts b/packages/typescript/ai-fal/src/model-meta.ts new file mode 100644 index 00000000..96b673cd --- /dev/null +++ b/packages/typescript/ai-fal/src/model-meta.ts @@ -0,0 +1,39 @@ +/** + * Re-export our generated comprehensive type system for full fal.ai model support. + * Generated from fal.ai's OpenAPI specs with types for 1000+ models across 25 categories. + * These types give you full autocomplete and type safety for any model. + */ + +// Import for use in this file +import type { + FalImageInput, + FalImageModel, + FalVideoInput, + FalVideoModel, +} from './generated' + +/** + * Provider options for image generation, excluding fields TanStack AI handles. + * Use this for the `modelOptions` parameter in image generation. + * + * @example + * type FluxOptions = FalImageProviderOptions<'fal-ai/flux/dev'> + */ +export type FalImageProviderOptions = Omit< + FalImageInput, + 'model' | 'prompt' | 'image_size' | 'num_images' | 'aspect_ratio' +> + +/** + * Provider options for video generation, excluding fields TanStack AI handles. + * Use this for the `modelOptions` parameter in video generation. + */ +export type FalVideoProviderOptions = Omit< + FalVideoInput, + | 'model' + | 'prompt' + | 'aspect_ratio' + | 'duration' + | 'aspect_ratio' + | 'resolution' +> diff --git a/packages/typescript/ai-fal/src/utils/client.ts b/packages/typescript/ai-fal/src/utils/client.ts new file mode 100644 index 00000000..05a259a4 --- /dev/null +++ b/packages/typescript/ai-fal/src/utils/client.ts @@ -0,0 +1,56 @@ +import { fal } from '@fal-ai/client' + +export interface FalClientConfig { + apiKey: string + proxyUrl?: string +} + +interface EnvObject { + FAL_KEY?: string +} + +interface WindowWithEnv { + env?: EnvObject +} + +function getEnvironment(): EnvObject | undefined { + if (typeof globalThis !== 'undefined') { + const win = (globalThis as { window?: WindowWithEnv }).window + if (win?.env) { + return win.env + } + } + if (typeof process !== 'undefined') { + return process.env as EnvObject + } + return undefined +} + +export function getFalApiKeyFromEnv(): string { + const env = getEnvironment() + const key = env?.FAL_KEY + + if (!key) { + throw new Error( + 'FAL_KEY is required. Please set it in your environment variables or use the factory function with an explicit API key.', + ) + } + + return key +} + +export function configureFalClient(config?: FalClientConfig): void { + if (config?.proxyUrl) { + fal.config({ + proxyUrl: config.proxyUrl, + }) + } else { + fal.config({ + credentials: config?.apiKey ?? getFalApiKeyFromEnv(), + }) + } +} + +export function generateId(prefix: string): string { + return `${prefix}-${Date.now()}-${Math.random().toString(36).substring(7)}` +} diff --git a/packages/typescript/ai-fal/src/utils/index.ts b/packages/typescript/ai-fal/src/utils/index.ts new file mode 100644 index 00000000..acd4bfb6 --- /dev/null +++ b/packages/typescript/ai-fal/src/utils/index.ts @@ -0,0 +1,6 @@ +export { + getFalApiKeyFromEnv, + configureFalClient, + generateId, + type FalClientConfig, +} from './client' diff --git a/packages/typescript/ai-fal/tests/image-adapter.test.ts b/packages/typescript/ai-fal/tests/image-adapter.test.ts new file mode 100644 index 00000000..6909af2f --- /dev/null +++ b/packages/typescript/ai-fal/tests/image-adapter.test.ts @@ -0,0 +1,261 @@ +import { beforeEach, describe, expect, it, vi } from 'vitest' +import { createFalImage, mapSizeToFalFormat } from '../src' + +// Declare mocks at module level +let mockSubscribe: any +let mockConfig: any + +// Mock the fal.ai client +vi.mock('@fal-ai/client', () => { + return { + fal: { + subscribe: (...args: Array) => mockSubscribe(...args), + config: (...args: Array) => mockConfig(...args), + }, + } +}) + +const createAdapter = () => createFalImage('fal-ai/flux/dev', 'test-key') + +function createMockImageResponse(images: Array<{ url: string }>) { + return { + data: { + images, + }, + requestId: 'req-123', + } +} + +describe('Fal Image Adapter', () => { + beforeEach(() => { + vi.clearAllMocks() + mockSubscribe = vi.fn() + mockConfig = vi.fn() + }) + + it('generates images with correct API call', async () => { + const mockResponse = createMockImageResponse([ + { url: 'https://fal.media/files/image1.png' }, + ]) + + mockSubscribe.mockResolvedValueOnce(mockResponse) + + const adapter = createAdapter() + + const result = await adapter.generateImages({ + model: 'fal-ai/flux/dev', + prompt: 'A futuristic city at sunset', + }) + + expect(mockSubscribe).toHaveBeenCalledTimes(1) + + const [model, options] = mockSubscribe.mock.calls[0]! + expect(model).toBe('fal-ai/flux/dev') + expect(options).toMatchObject({ + input: { + prompt: 'A futuristic city at sunset', + }, + }) + + expect(result.images).toHaveLength(1) + expect(result.images[0]!.url).toBe('https://fal.media/files/image1.png') + expect(result.model).toBe('fal-ai/flux/dev') + }) + + it('generates multiple images', async () => { + const mockResponse = createMockImageResponse([ + { url: 'https://fal.media/files/image1.png' }, + { url: 'https://fal.media/files/image2.png' }, + ]) + + mockSubscribe.mockResolvedValueOnce(mockResponse) + + const adapter = createAdapter() + + const result = await adapter.generateImages({ + model: 'fal-ai/flux/dev', + prompt: 'A cute robot mascot', + numberOfImages: 2, + }) + + const [, options] = mockSubscribe.mock.calls[0]! + expect(options.input).toMatchObject({ + num_images: 2, + }) + + expect(result.images).toHaveLength(2) + expect(result.images[0]!.url).toBe('https://fal.media/files/image1.png') + expect(result.images[1]!.url).toBe('https://fal.media/files/image2.png') + }) + + it('handles base64 image responses', async () => { + const base64Data = + 'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==' + const mockResponse = createMockImageResponse([ + { url: `data:image/png;base64,${base64Data}` }, + ]) + + mockSubscribe.mockResolvedValueOnce(mockResponse) + + const adapter = createAdapter() + + const result = await adapter.generateImages({ + model: 'fal-ai/flux/dev', + prompt: 'A simple test image', + }) + + expect(result.images).toHaveLength(1) + expect(result.images[0]!.b64Json).toBe(base64Data) + expect(result.images[0]!.url).toBe(`data:image/png;base64,${base64Data}`) + }) + + it('converts size to fal format preset', async () => { + const mockResponse = createMockImageResponse([ + { url: 'https://fal.media/files/image.png' }, + ]) + + mockSubscribe.mockResolvedValueOnce(mockResponse) + + const adapter = createAdapter() + + await adapter.generateImages({ + model: 'fal-ai/flux/dev', + prompt: 'A wide landscape', + size: '1024x768', // Should map to landscape_4_3 + }) + + const [, options] = mockSubscribe.mock.calls[0]! + expect(options.input).toMatchObject({ + image_size: 'landscape_4_3', + }) + }) + + it('converts custom size to width/height object', async () => { + const mockResponse = createMockImageResponse([ + { url: 'https://fal.media/files/image.png' }, + ]) + + mockSubscribe.mockResolvedValueOnce(mockResponse) + + const adapter = createAdapter() + + await adapter.generateImages({ + model: 'fal-ai/flux/dev', + prompt: 'A custom size image', + size: '800x600', + }) + + const [, options] = mockSubscribe.mock.calls[0]! + expect(options.input).toMatchObject({ + image_size: { width: 800, height: 600 }, + }) + }) + + it('passes model options correctly', async () => { + const mockResponse = createMockImageResponse([ + { url: 'https://fal.media/files/image.png' }, + ]) + + mockSubscribe.mockResolvedValueOnce(mockResponse) + + const adapter = createAdapter() + + await adapter.generateImages({ + model: 'fal-ai/flux/dev', + prompt: 'Test', + modelOptions: { + num_inference_steps: 28, + guidance_scale: 3.5, + seed: 12345, + }, + }) + + const [, options] = mockSubscribe.mock.calls[0]! + expect(options.input).toMatchObject({ + num_inference_steps: 28, + guidance_scale: 3.5, + seed: 12345, + }) + }) + + it('handles single image response format', async () => { + const mockResponse = { + data: { + image: { url: 'https://fal.media/files/single.png' }, + }, + requestId: 'req-456', + } + + mockSubscribe.mockResolvedValueOnce(mockResponse) + + const adapter = createAdapter() + + const result = await adapter.generateImages({ + model: 'fal-ai/flux/dev', + prompt: 'Single image test', + }) + + expect(result.images).toHaveLength(1) + expect(result.images[0]!.url).toBe('https://fal.media/files/single.png') + }) + + it('throws error on SDK error', async () => { + mockSubscribe.mockRejectedValueOnce(new Error('Model not found')) + + const adapter = createAdapter() + + await expect( + adapter.generateImages({ + model: 'invalid/model', + prompt: 'Test prompt', + }), + ).rejects.toThrow('Model not found') + }) + + it('configures client with API key', () => { + createFalImage('fal-ai/flux/dev', 'my-api-key') + + expect(mockConfig).toHaveBeenCalledWith({ + credentials: 'my-api-key', + }) + }) + + it('configures client with proxy URL when provided', () => { + createFalImage('fal-ai/flux/dev', 'my-api-key', { + proxyUrl: '/api/fal/proxy', + }) + + expect(mockConfig).toHaveBeenCalledWith({ + proxyUrl: '/api/fal/proxy', + }) + }) +}) + +describe('mapSizeToFalFormat', () => { + it('maps known sizes to presets', () => { + expect(mapSizeToFalFormat('1024x1024')).toBe('square_hd') + expect(mapSizeToFalFormat('512x512')).toBe('square') + expect(mapSizeToFalFormat('1024x768')).toBe('landscape_4_3') + expect(mapSizeToFalFormat('768x1024')).toBe('portrait_4_3') + expect(mapSizeToFalFormat('1280x720')).toBe('landscape_16_9') + expect(mapSizeToFalFormat('720x1280')).toBe('portrait_16_9') + }) + + it('parses custom WIDTHxHEIGHT format', () => { + expect(mapSizeToFalFormat('800x600')).toEqual({ width: 800, height: 600 }) + expect(mapSizeToFalFormat('1920x1200')).toEqual({ + width: 1920, + height: 1200, + }) + }) + + it('returns preset names as-is', () => { + expect(mapSizeToFalFormat('square_hd')).toBe('square_hd') + expect(mapSizeToFalFormat('landscape_4_3')).toBe('landscape_4_3') + }) + + it('returns undefined for invalid input', () => { + expect(mapSizeToFalFormat(undefined)).toBeUndefined() + expect(mapSizeToFalFormat('invalid')).toBeUndefined() + }) +}) diff --git a/packages/typescript/ai-fal/tests/video-adapter.test.ts b/packages/typescript/ai-fal/tests/video-adapter.test.ts new file mode 100644 index 00000000..6691e17d --- /dev/null +++ b/packages/typescript/ai-fal/tests/video-adapter.test.ts @@ -0,0 +1,260 @@ +import { beforeEach, describe, expect, it, vi } from 'vitest' +import { createFalVideo } from '../src' + +// Declare mocks at module level +let mockQueueSubmit: any +let mockQueueStatus: any +let mockQueueResult: any +let mockConfig: any + +// Mock the fal.ai client +vi.mock('@fal-ai/client', () => { + return { + fal: { + queue: { + submit: (...args: Array) => mockQueueSubmit(...args), + status: (...args: Array) => mockQueueStatus(...args), + result: (...args: Array) => mockQueueResult(...args), + }, + config: (...args: Array) => mockConfig(...args), + }, + } +}) + +const createAdapter = () => + createFalVideo('fal-ai/veo3/image-to-video', 'test-key') + +describe('Fal Video Adapter', () => { + beforeEach(() => { + vi.clearAllMocks() + mockQueueSubmit = vi.fn() + mockQueueStatus = vi.fn() + mockQueueResult = vi.fn() + mockConfig = vi.fn() + }) + + describe('createVideoJob', () => { + it('submits video generation job to queue', async () => { + mockQueueSubmit.mockResolvedValueOnce({ + request_id: 'job-123', + }) + + const adapter = createAdapter() + + const result = await adapter.createVideoJob({ + model: 'fal-ai/veo3/image-to-video', + prompt: 'A cat walking in the garden', + }) + + expect(mockQueueSubmit).toHaveBeenCalledTimes(1) + + const [model, options] = mockQueueSubmit.mock.calls[0]! + expect(model).toBe('fal-ai/veo3/image-to-video') + expect(options).toMatchObject({ + input: { + prompt: 'A cat walking in the garden', + }, + }) + + expect(result.jobId).toBe('job-123') + expect(result.model).toBe('fal-ai/veo3/image-to-video') + }) + + it('includes image URL for image-to-video models', async () => { + mockQueueSubmit.mockResolvedValueOnce({ + request_id: 'job-456', + }) + + const adapter = createAdapter() + + await adapter.createVideoJob({ + model: 'fal-ai/veo3/image-to-video', + prompt: 'A stylish woman walks down a Tokyo street', + modelOptions: { + image_url: 'https://example.com/image.jpg', + }, + }) + + const [, options] = mockQueueSubmit.mock.calls[0]! + expect(options.input).toMatchObject({ + image_url: 'https://example.com/image.jpg', + }) + }) + + it('includes duration option', async () => { + mockQueueSubmit.mockResolvedValueOnce({ + request_id: 'job-789', + }) + + const adapter = createAdapter() + + await adapter.createVideoJob({ + model: 'fal-ai/veo3/image-to-video', + prompt: 'A time lapse of a sunset', + duration: 10, + }) + + const [, options] = mockQueueSubmit.mock.calls[0]! + expect(options.input).toMatchObject({ + duration: 10, + }) + }) + + it('converts size to aspect ratio', async () => { + mockQueueSubmit.mockResolvedValueOnce({ + request_id: 'job-ar', + }) + + const adapter = createAdapter() + + await adapter.createVideoJob({ + model: 'fal-ai/veo3/image-to-video', + prompt: 'A wide landscape video', + size: '1920x1080', // 16:9 + }) + + const [, options] = mockQueueSubmit.mock.calls[0]! + expect(options.input).toMatchObject({ + aspect_ratio: '16:9', + }) + }) + + it('passes model-specific options', async () => { + mockQueueSubmit.mockResolvedValueOnce({ + request_id: 'job-opts', + }) + + const adapter = createAdapter() + + await adapter.createVideoJob({ + model: 'fal-ai/veo3/image-to-video', + prompt: 'Test video', + modelOptions: { + image_url: 'https://example.com/image.jpg', + auto_fix: true, + }, + }) + + const [, options] = mockQueueSubmit.mock.calls[0]! + expect(options.input).toMatchObject({ + image_url: 'https://example.com/image.jpg', + auto_fix: true, + }) + }) + }) + + describe('getVideoStatus', () => { + it('returns pending status for queued jobs', async () => { + mockQueueStatus.mockResolvedValueOnce({ + status: 'IN_QUEUE', + queue_position: 5, + }) + + const adapter = createAdapter() + + const result = await adapter.getVideoStatus('job-123') + + expect(mockQueueStatus).toHaveBeenCalledWith( + 'fal-ai/veo3/image-to-video', + { requestId: 'job-123', logs: true }, + ) + + expect(result.jobId).toBe('job-123') + expect(result.status).toBe('pending') + expect(result.progress).toBe(50) // 100 - 5 * 10 = 50 + }) + + it('returns processing status for in-progress jobs', async () => { + mockQueueStatus.mockResolvedValueOnce({ + status: 'IN_PROGRESS', + logs: [{ message: 'Generating frames...' }], + }) + + const adapter = createAdapter() + + const result = await adapter.getVideoStatus('job-456') + + expect(result.status).toBe('processing') + }) + + it('returns completed status for finished jobs', async () => { + mockQueueStatus.mockResolvedValueOnce({ + status: 'COMPLETED', + }) + + const adapter = createAdapter() + + const result = await adapter.getVideoStatus('job-789') + + expect(result.status).toBe('completed') + }) + }) + + describe('getVideoUrl', () => { + it('returns video URL from video object', async () => { + mockQueueResult.mockResolvedValueOnce({ + data: { + video: { url: 'https://fal.media/files/video.mp4' }, + }, + }) + + const adapter = createAdapter() + + const result = await adapter.getVideoUrl('job-123') + + expect(mockQueueResult).toHaveBeenCalledWith( + 'fal-ai/veo3/image-to-video', + { requestId: 'job-123' }, + ) + + expect(result.jobId).toBe('job-123') + expect(result.url).toBe('https://fal.media/files/video.mp4') + }) + + it('returns video URL from video_url field', async () => { + mockQueueResult.mockResolvedValueOnce({ + data: { + video_url: 'https://fal.media/files/video2.mp4', + }, + }) + + const adapter = createAdapter() + + const result = await adapter.getVideoUrl('job-456') + + expect(result.url).toBe('https://fal.media/files/video2.mp4') + }) + + it('throws error when video URL is not found', async () => { + mockQueueResult.mockResolvedValueOnce({ + data: {}, + }) + + const adapter = createAdapter() + + await expect(adapter.getVideoUrl('job-789')).rejects.toThrow( + 'Video URL not found in response', + ) + }) + }) + + describe('client configuration', () => { + it('configures client with API key', () => { + createFalVideo('fal-ai/veo3/image-to-video', 'my-api-key') + + expect(mockConfig).toHaveBeenCalledWith({ + credentials: 'my-api-key', + }) + }) + + it('configures client with proxy URL when provided', () => { + createFalVideo('fal-ai/veo3/image-to-video', 'my-api-key', { + proxyUrl: '/api/fal/proxy', + }) + + expect(mockConfig).toHaveBeenCalledWith({ + proxyUrl: '/api/fal/proxy', + }) + }) + }) +}) diff --git a/packages/typescript/ai-fal/tsconfig.json b/packages/typescript/ai-fal/tsconfig.json new file mode 100644 index 00000000..20ecbb36 --- /dev/null +++ b/packages/typescript/ai-fal/tsconfig.json @@ -0,0 +1,16 @@ +{ + "extends": "../../../tsconfig.json", + "compilerOptions": { + "outDir": "dist" + }, + "include": [ + "scripts/**/*.ts", + "src/**/*.ts", + "src/**/*.tsx", + "./tests/**/*.ts", + "openapi-ts.config.ts", + "eslint.config.ts", + "vite.config.ts" + ], + "exclude": ["node_modules", "dist"] +} diff --git a/packages/typescript/ai-fal/vite.config.ts b/packages/typescript/ai-fal/vite.config.ts new file mode 100644 index 00000000..77bcc2e6 --- /dev/null +++ b/packages/typescript/ai-fal/vite.config.ts @@ -0,0 +1,36 @@ +import { defineConfig, mergeConfig } from 'vitest/config' +import { tanstackViteConfig } from '@tanstack/vite-config' +import packageJson from './package.json' + +const config = defineConfig({ + test: { + name: packageJson.name, + dir: './', + watch: false, + globals: true, + environment: 'node', + include: ['tests/**/*.test.ts'], + coverage: { + provider: 'v8', + reporter: ['text', 'json', 'html', 'lcov'], + exclude: [ + 'node_modules/', + 'dist/', + 'tests/', + '**/*.test.ts', + '**/*.config.ts', + '**/types.ts', + ], + include: ['src/**/*.ts'], + }, + }, +}) + +export default mergeConfig( + config, + tanstackViteConfig({ + entry: ['./src/index.ts'], + srcDir: './src', + cjs: false, + }), +) diff --git a/packages/typescript/ai-openrouter/src/model-meta.ts b/packages/typescript/ai-openrouter/src/model-meta.ts index 84fed2ed..1ba6fd99 100644 --- a/packages/typescript/ai-openrouter/src/model-meta.ts +++ b/packages/typescript/ai-openrouter/src/model-meta.ts @@ -1,17358 +1,8596 @@ -import type { - OpenRouterBaseOptions, - OpenRouterCommonOptions, -} from './text/text-provider-options' -const NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B_FREE = { - id: 'nousresearch/hermes-3-llama-3.1-405b:free', - name: 'Nous: Hermes 3 405B Instruct (free)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const META_LLAMA_LLAMA_3_3_70B_INSTRUCT_FREE = { - id: 'meta-llama/llama-3.3-70b-instruct:free', - name: 'Meta: Llama 3.3 70B Instruct (free)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'repetition_penalty', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_CODER_FREE = { - id: 'qwen/qwen3-coder:free', - name: 'Qwen: Qwen3 Coder 480B A35B (free)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 1048576, - max_output_tokens: 262000, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const TNGTECH_DEEPSEEK_R1T2_CHIMERA_FREE = { - id: 'tngtech/deepseek-r1t2-chimera:free', - name: 'TNG: DeepSeek R1T2 Chimera (free)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 163840, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_4B_FREE = { - id: 'qwen/qwen3-4b:free', - name: 'Qwen: Qwen3 4B (free)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'response_format', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 128000, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const GOOGLE_GEMMA_3N_E2B_IT_FREE = { - id: 'google/gemma-3n-e2b-it:free', - name: 'Google: Gemma 3n 2B (free)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'top_p', - ], - }, - context_window: 8192, - max_output_tokens: 2048, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const META_LLAMA_LLAMA_3_2_3B_INSTRUCT_FREE = { - id: 'meta-llama/llama-3.2-3b-instruct:free', - name: 'Meta: Llama 3.2 3B Instruct (free)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const GOOGLE_GEMMA_3_12B_IT_FREE = { - id: 'google/gemma-3-12b-it:free', - name: 'Google: Gemma 3 12B (free)', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: ['max_tokens', 'seed', 'stop', 'temperature', 'top_p'], - }, - context_window: 131072, - max_output_tokens: 8192, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const COGNITIVECOMPUTATIONS_DOLPHIN_MISTRAL_24B_VENICE_EDITION_FREE = { - id: 'cognitivecomputations/dolphin-mistral-24b-venice-edition:free', - name: 'Venice: Uncensored (free)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 32768, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const Z_AI_GLM_4_5_AIR_FREE = { - id: 'z-ai/glm-4.5-air:free', - name: 'Z.AI: GLM 4.5 Air (free)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 96000, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const MOONSHOTAI_KIMI_K2_FREE = { - id: 'moonshotai/kimi-k2:free', - name: 'MoonshotAI: Kimi K2 0711 (free)', - supports: { - input: ['text'], - output: ['text'], - supports: ['max_tokens', 'seed', 'stop', 'temperature'], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const GOOGLE_GEMMA_3_27B_IT_FREE = { - id: 'google/gemma-3-27b-it:free', - name: 'Google: Gemma 3 27B (free)', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const GOOGLE_GEMMA_3_4B_IT_FREE = { - id: 'google/gemma-3-4b-it:free', - name: 'Google: Gemma 3 4B (free)', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'max_tokens', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 8192, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const NVIDIA_NEMOTRON_3_NANO_30B_A3B_FREE = { - id: 'nvidia/nemotron-3-nano-30b-a3b:free', - name: 'NVIDIA: Nemotron 3 Nano 30B A3B (free)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'seed', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 256000, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const TNGTECH_TNG_R1T_CHIMERA_FREE = { - id: 'tngtech/tng-r1t-chimera:free', - name: 'TNG: R1T Chimera (free)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 163840, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const NVIDIA_NEMOTRON_NANO_12B_V2_VL_FREE = { - id: 'nvidia/nemotron-nano-12b-v2-vl:free', - name: 'NVIDIA: Nemotron Nano 12B 2 VL (free)', - supports: { - input: ['image', 'text', 'video'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'seed', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 128000, - max_output_tokens: 128000, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const ARCEE_AI_TRINITY_MINI_FREE = { - id: 'arcee-ai/trinity-mini:free', - name: 'Arcee AI: Trinity Mini (free)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const TNGTECH_DEEPSEEK_R1T_CHIMERA_FREE = { - id: 'tngtech/deepseek-r1t-chimera:free', - name: 'TNG: DeepSeek R1T Chimera (free)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 163840, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const GOOGLE_GEMMA_3N_E4B_IT_FREE = { - id: 'google/gemma-3n-e4b-it:free', - name: 'Google: Gemma 3n 4B (free)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'top_p', - ], - }, - context_window: 32000, - max_output_tokens: 2048, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const GOOGLE_GEMINI_2_0_FLASH_EXP_FREE = { - id: 'google/gemini-2.0-flash-exp:free', - name: 'Google: Gemini 2.0 Flash Experimental (free)', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'max_tokens', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 1048576, - max_output_tokens: 8192, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_OSS_120B_FREE = { - id: 'openai/gpt-oss-120b:free', - name: 'OpenAI: gpt-oss-120b (free)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_OSS_20B_FREE = { - id: 'openai/gpt-oss-20b:free', - name: 'OpenAI: gpt-oss-20b (free)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MISTRAL_SMALL_3_1_24B_INSTRUCT_FREE = { - id: 'mistralai/mistral-small-3.1-24b-instruct:free', - name: 'Mistral: Mistral Small 3.1 24B (free)', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 128000, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_DEVSTRAL_2512_FREE = { - id: 'mistralai/devstral-2512:free', - name: 'Mistral: Devstral 2 2512 (free)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 262144, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const XIAOMI_MIMO_V2_FLASH_FREE = { - id: 'xiaomi/mimo-v2-flash:free', - name: 'Xiaomi: MiMo-V2-Flash (free)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'response_format', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 262144, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const ALLENAI_MOLMO_2_8B_FREE = { - id: 'allenai/molmo-2-8b:free', - name: 'AllenAI: Molmo2 8B (free)', - supports: { - input: ['text', 'image', 'video'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 36864, - max_output_tokens: 36864, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const DEEPSEEK_DEEPSEEK_R1_0528_FREE = { - id: 'deepseek/deepseek-r1-0528:free', - name: 'DeepSeek: R1 0528 (free)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'temperature', - ], - }, - context_window: 163840, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const NVIDIA_NEMOTRON_NANO_9B_V2_FREE = { - id: 'nvidia/nemotron-nano-9b-v2:free', - name: 'NVIDIA: Nemotron Nano 9B V2 (free)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 32000, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const META_LLAMA_LLAMA_3_1_405B_INSTRUCT_FREE = { - id: 'meta-llama/llama-3.1-405b-instruct:free', - name: 'Meta: Llama 3.1 405B Instruct (free)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'repetition_penalty', - 'temperature', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN_2_5_VL_7B_INSTRUCT_FREE = { - id: 'qwen/qwen-2.5-vl-7b-instruct:free', - name: 'Qwen: Qwen2.5-VL 7B Instruct (free)', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'repetition_penalty', - 'temperature', - ], - }, - context_window: 32768, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MISTRAL_7B_INSTRUCT_FREE = { - id: 'mistralai/mistral-7b-instruct:free', - name: 'Mistral: Mistral 7B Instruct (free)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 32768, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0, - cached: 0, - }, - output: { - normal: 0, - }, - }, - image: 0, - }, -} as const -const LIQUID_LFM_2_2_6B = { - id: 'liquid/lfm-2.2-6b', - name: 'LiquidAI/LFM2-2.6B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 8192, - pricing: { - text: { - input: { - normal: 0.01, - cached: 0, - }, - output: { - normal: 0.02, - }, - }, - image: 0, - }, -} as const -const LIQUID_LFM2_8B_A1B = { - id: 'liquid/lfm2-8b-a1b', - name: 'LiquidAI/LFM2-8B-A1B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 8192, - pricing: { - text: { - input: { - normal: 0.01, - cached: 0, - }, - output: { - normal: 0.02, - }, - }, - image: 0, - }, -} as const -const IBM_GRANITE_GRANITE_4_0_H_MICRO = { - id: 'ibm-granite/granite-4.0-h-micro', - name: 'IBM: Granite 4.0 Micro', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'repetition_penalty', - 'seed', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 8192, - pricing: { - text: { - input: { - normal: 0.017, - cached: 0, - }, - output: { - normal: 0.11, - }, - }, - image: 0, - }, -} as const -const GOOGLE_GEMMA_3_4B_IT = { - id: 'google/gemma-3-4b-it', - name: 'Google: Gemma 3 4B', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.01703012, - cached: 0, - }, - output: { - normal: 0.0681536, - }, - }, - image: 0, - }, -} as const -const META_LLAMA_LLAMA_3_2_3B_INSTRUCT = { - id: 'meta-llama/llama-3.2-3b-instruct', - name: 'Meta: Llama 3.2 3B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.02, - cached: 0, - }, - output: { - normal: 0.02, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MISTRAL_NEMO = { - id: 'mistralai/mistral-nemo', - name: 'Mistral: Mistral Nemo', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.02, - cached: 0, - }, - output: { - normal: 0.04, - }, - }, - image: 0, - }, -} as const -const GOOGLE_GEMMA_3N_E4B_IT = { - id: 'google/gemma-3n-e4b-it', - name: 'Google: Gemma 3n 4B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 32000, - pricing: { - text: { - input: { - normal: 0.02, - cached: 0, - }, - output: { - normal: 0.04, - }, - }, - image: 0, - }, -} as const -const META_LLAMA_LLAMA_3_1_8B_INSTRUCT = { - id: 'meta-llama/llama-3.1-8b-instruct', - name: 'Meta: Llama 3.1 8B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.02, - cached: 0, - }, - output: { - normal: 0.05, - }, - }, - image: 0, - }, -} as const -const META_LLAMA_LLAMA_GUARD_3_8B = { - id: 'meta-llama/llama-guard-3-8b', - name: 'Llama Guard 3 8B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - pricing: { - text: { - input: { - normal: 0.02, - cached: 0, - }, - output: { - normal: 0.06, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_OSS_20B = { - id: 'openai/gpt-oss-20b', - name: 'OpenAI: gpt-oss-20b', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'reasoning_effort', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 131072, - pricing: { - text: { - input: { - normal: 0.02, - cached: 0, - }, - output: { - normal: 0.1, - }, - }, - image: 0, - }, -} as const -const NOUSRESEARCH_DEEPHERMES_3_MISTRAL_24B_PREVIEW = { - id: 'nousresearch/deephermes-3-mistral-24b-preview', - name: 'Nous: DeepHermes 3 Mistral 24B Preview', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 32768, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 0.02, - cached: 0, - }, - output: { - normal: 0.1, - }, - }, - image: 0, - }, -} as const -const NOUSRESEARCH_HERMES_2_PRO_LLAMA_3_8B = { - id: 'nousresearch/hermes-2-pro-llama-3-8b', - name: 'NousResearch: Hermes 2 Pro - Llama-3 8B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 8192, - max_output_tokens: 2048, - pricing: { - text: { - input: { - normal: 0.025, - cached: 0, - }, - output: { - normal: 0.08, - }, - }, - image: 0, - }, -} as const -const META_LLAMA_LLAMA_3_2_1B_INSTRUCT = { - id: 'meta-llama/llama-3.2-1b-instruct', - name: 'Meta: Llama 3.2 1B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'repetition_penalty', - 'seed', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.027, - cached: 0, - }, - output: { - normal: 0.2, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MISTRAL_7B_INSTRUCT = { - id: 'mistralai/mistral-7b-instruct', - name: 'Mistral: Mistral 7B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 32768, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.028, - cached: 0, - }, - output: { - normal: 0.054, - }, - }, - image: 0, - }, -} as const -const META_LLAMA_LLAMA_3_8B_INSTRUCT = { - id: 'meta-llama/llama-3-8b-instruct', - name: 'Meta: Llama 3 8B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 8192, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.03, - cached: 0, - }, - output: { - normal: 0.06, - }, - }, - image: 0, - }, -} as const -const GOOGLE_GEMMA_2_9B_IT = { - id: 'google/gemma-2-9b-it', - name: 'Google: Gemma 2 9B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'repetition_penalty', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 8192, - pricing: { - text: { - input: { - normal: 0.03, - cached: 0, - }, - output: { - normal: 0.09, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN2_5_CODER_7B_INSTRUCT = { - id: 'qwen/qwen2.5-coder-7b-instruct', - name: 'Qwen: Qwen2.5 Coder 7B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.03, - cached: 0, - }, - output: { - normal: 0.09, - }, - }, - image: 0, - }, -} as const -const GOOGLE_GEMMA_3_12B_IT = { - id: 'google/gemma-3-12b-it', - name: 'Google: Gemma 3 12B', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 131072, - pricing: { - text: { - input: { - normal: 0.03, - cached: 0, - }, - output: { - normal: 0.1, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MISTRAL_SMALL_3_1_24B_INSTRUCT = { - id: 'mistralai/mistral-small-3.1-24b-instruct', - name: 'Mistral: Mistral Small 3.1 24B', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 128000, - max_output_tokens: 131072, - pricing: { - text: { - input: { - normal: 0.03, - cached: 0, - }, - output: { - normal: 0.11, - }, - }, - image: 0, - }, -} as const -const DEEPSEEK_DEEPSEEK_R1_DISTILL_LLAMA_70B = { - id: 'deepseek/deepseek-r1-distill-llama-70b', - name: 'DeepSeek: R1 Distill Llama 70B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 128000, - max_output_tokens: 131072, - pricing: { - text: { - input: { - normal: 0.03, - cached: 0, - }, - output: { - normal: 0.11, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN_2_5_CODER_32B_INSTRUCT = { - id: 'qwen/qwen-2.5-coder-32b-instruct', - name: 'Qwen2.5 Coder 32B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 128000, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 0.03, - cached: 0, - }, - output: { - normal: 0.11, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MISTRAL_SMALL_24B_INSTRUCT_2501 = { - id: 'mistralai/mistral-small-24b-instruct-2501', - name: 'Mistral: Mistral Small 3', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 32768, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 0.03, - cached: 0, - }, - output: { - normal: 0.11, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_8B = { - id: 'qwen/qwen3-8b', - name: 'Qwen: Qwen3 8B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 20000, - pricing: { - text: { - input: { - normal: 0.035, - cached: 0, - }, - output: { - normal: 0.138, - }, - }, - image: 0, - }, -} as const -const AMAZON_NOVA_MICRO_V1 = { - id: 'amazon/nova-micro-v1', - name: 'Amazon: Nova Micro 1.0', - supports: { - input: ['text'], - output: ['text'], - supports: ['max_tokens', 'stop', 'temperature', 'tools', 'top_k', 'top_p'], - }, - context_window: 128000, - max_output_tokens: 5120, - pricing: { - text: { - input: { - normal: 0.035, - cached: 0, - }, - output: { - normal: 0.14, - }, - }, - image: 0, - }, -} as const -const COHERE_COMMAND_R7B_12_2024 = { - id: 'cohere/command-r7b-12-2024', - name: 'Cohere: Command R7B (12-2024)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 128000, - max_output_tokens: 4000, - pricing: { - text: { - input: { - normal: 0.0375, - cached: 0, - }, - output: { - normal: 0.15, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_OSS_120B = { - id: 'openai/gpt-oss-120b', - name: 'OpenAI: gpt-oss-120b', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'reasoning_effort', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.039, - cached: 0, - }, - output: { - normal: 0.19, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_OSS_120B_EXACTO = { - id: 'openai/gpt-oss-120b:exacto', - name: 'OpenAI: gpt-oss-120b (exacto)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.039, - cached: 0, - }, - output: { - normal: 0.19, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MINISTRAL_3B = { - id: 'mistralai/ministral-3b', - name: 'Mistral: Ministral 3B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 128000, - pricing: { - text: { - input: { - normal: 0.04, - cached: 0, - }, - output: { - normal: 0.04, - }, - }, - image: 0, - }, -} as const -const SAO10K_L3_LUNARIS_8B = { - id: 'sao10k/l3-lunaris-8b', - name: 'Sao10K: Llama 3 8B Lunaris', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 8192, - pricing: { - text: { - input: { - normal: 0.04, - cached: 0, - }, - output: { - normal: 0.05, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN_2_5_7B_INSTRUCT = { - id: 'qwen/qwen-2.5-7b-instruct', - name: 'Qwen: Qwen2.5 7B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.04, - cached: 0, - }, - output: { - normal: 0.1, - }, - }, - image: 0, - }, -} as const -const GOOGLE_GEMMA_3_27B_IT = { - id: 'google/gemma-3-27b-it', - name: 'Google: Gemma 3 27B', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 96000, - pricing: { - text: { - input: { - normal: 0.04, - cached: 0, - }, - output: { - normal: 0.15, - }, - }, - image: 0, - }, -} as const -const NVIDIA_NEMOTRON_NANO_9B_V2 = { - id: 'nvidia/nemotron-nano-9b-v2', - name: 'NVIDIA: Nemotron Nano 9B V2', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 32000, - pricing: { - text: { - input: { - normal: 0.04, - cached: 0, - }, - output: { - normal: 0.16, - }, - }, - image: 0, - }, -} as const -const ARCEE_AI_TRINITY_MINI = { - id: 'arcee-ai/trinity-mini', - name: 'Arcee AI: Trinity Mini', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 131072, - pricing: { - text: { - input: { - normal: 0.045, - cached: 0, - }, - output: { - normal: 0.15, - }, - }, - image: 0, - }, -} as const -const META_LLAMA_LLAMA_3_2_11B_VISION_INSTRUCT = { - id: 'meta-llama/llama-3.2-11b-vision-instruct', - name: 'Meta: Llama 3.2 11B Vision Instruct', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.049, - cached: 0, - }, - output: { - normal: 0.049, - }, - }, - image: 0.00007948, - }, -} as const -const MICROSOFT_PHI_4_MULTIMODAL_INSTRUCT = { - id: 'microsoft/phi-4-multimodal-instruct', - name: 'Microsoft: Phi 4 Multimodal Instruct', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.05, - cached: 0, - }, - output: { - normal: 0.1, - }, - }, - image: 0.00017685, - }, -} as const -const QWEN_QWEN_TURBO = { - id: 'qwen/qwen-turbo', - name: 'Qwen: Qwen-Turbo', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 1000000, - max_output_tokens: 8192, - pricing: { - text: { - input: { - normal: 0.05, - cached: 0.02, - }, - output: { - normal: 0.2, - }, - }, - image: 0, - }, -} as const -const Z_AI_GLM_4_5_AIR = { - id: 'z-ai/glm-4.5-air', - name: 'Z.AI: GLM 4.5 Air', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 131072, - pricing: { - text: { - input: { - normal: 0.05, - cached: 0, - }, - output: { - normal: 0.22, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN2_5_VL_32B_INSTRUCT = { - id: 'qwen/qwen2.5-vl-32b-instruct', - name: 'Qwen: Qwen2.5 VL 32B Instruct', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 32768, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.05, - cached: 0, - }, - output: { - normal: 0.22, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_14B = { - id: 'qwen/qwen3-14b', - name: 'Qwen: Qwen3 14B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131702, - max_output_tokens: 40960, - pricing: { - text: { - input: { - normal: 0.05, - cached: 0, - }, - output: { - normal: 0.22, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_DEVSTRAL_2512 = { - id: 'mistralai/devstral-2512', - name: 'Mistral: Devstral 2 2512', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 262144, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 0.05, - cached: 0, - }, - output: { - normal: 0.22, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_5_NANO = { - id: 'openai/gpt-5-nano', - name: 'OpenAI: GPT-5 Nano', - supports: { - input: ['text', 'image', 'document'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'structured_outputs', - 'tool_choice', - 'tools', - ], - }, - context_window: 400000, - max_output_tokens: 128000, - pricing: { - text: { - input: { - normal: 0.05, - cached: 0.005, - }, - output: { - normal: 0.4, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_30B_A3B_THINKING_2507 = { - id: 'qwen/qwen3-30b-a3b-thinking-2507', - name: 'Qwen: Qwen3 30B A3B Thinking 2507', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.051, - cached: 0, - }, - output: { - normal: 0.34, - }, - }, - image: 0, - }, -} as const -const GRYPHE_MYTHOMAX_L2_13B = { - id: 'gryphe/mythomax-l2-13b', - name: 'MythoMax 13B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_a', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 4096, - pricing: { - text: { - input: { - normal: 0.06, - cached: 0, - }, - output: { - normal: 0.06, - }, - }, - image: 0, - }, -} as const -const DEEPSEEK_DEEPSEEK_R1_0528_QWEN3_8B = { - id: 'deepseek/deepseek-r1-0528-qwen3-8b', - name: 'DeepSeek: DeepSeek R1 0528 Qwen3 8B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 32000, - pricing: { - text: { - input: { - normal: 0.06, - cached: 0, - }, - output: { - normal: 0.09, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_DEVSTRAL_SMALL_2505 = { - id: 'mistralai/devstral-small-2505', - name: 'Mistral: Devstral Small 2505', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.06, - cached: 0, - }, - output: { - normal: 0.12, - }, - }, - image: 0, - }, -} as const -const MICROSOFT_PHI_4 = { - id: 'microsoft/phi-4', - name: 'Microsoft: Phi 4', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 16384, - pricing: { - text: { - input: { - normal: 0.06, - cached: 0, - }, - output: { - normal: 0.14, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MISTRAL_SMALL_3_2_24B_INSTRUCT = { - id: 'mistralai/mistral-small-3.2-24b-instruct', - name: 'Mistral: Mistral Small 3.2 24B', - supports: { - input: ['image', 'text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 128000, - max_output_tokens: 131072, - pricing: { - text: { - input: { - normal: 0.06, - cached: 0, - }, - output: { - normal: 0.18, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_30B_A3B = { - id: 'qwen/qwen3-30b-a3b', - name: 'Qwen: Qwen3 30B A3B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 40960, - pricing: { - text: { - input: { - normal: 0.06, - cached: 0, - }, - output: { - normal: 0.22, - }, - }, - image: 0, - }, -} as const -const AMAZON_NOVA_LITE_V1 = { - id: 'amazon/nova-lite-v1', - name: 'Amazon: Nova Lite 1.0', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: ['max_tokens', 'stop', 'temperature', 'tools', 'top_k', 'top_p'], - }, - context_window: 300000, - max_output_tokens: 5120, - pricing: { - text: { - input: { - normal: 0.06, - cached: 0, - }, - output: { - normal: 0.24, - }, - }, - image: 0.00009, - }, -} as const -const NVIDIA_NEMOTRON_3_NANO_30B_A3B = { - id: 'nvidia/nemotron-3-nano-30b-a3b', - name: 'NVIDIA: Nemotron 3 Nano 30B A3B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 256000, - max_output_tokens: 262144, - pricing: { - text: { - input: { - normal: 0.06, - cached: 0, - }, - output: { - normal: 0.24, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_CODER_30B_A3B_INSTRUCT = { - id: 'qwen/qwen3-coder-30b-a3b-instruct', - name: 'Qwen: Qwen3 Coder 30B A3B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 0.07, - cached: 0, - }, - output: { - normal: 0.27, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_DEVSTRAL_SMALL = { - id: 'mistralai/devstral-small', - name: 'Mistral: Devstral Small 1.1', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.07, - cached: 0, - }, - output: { - normal: 0.28, - }, - }, - image: 0, - }, -} as const -const BAIDU_ERNIE_4_5_21B_A3B_THINKING = { - id: 'baidu/ernie-4.5-21b-a3b-thinking', - name: 'Baidu: ERNIE 4.5 21B A3B Thinking', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 0.07, - cached: 0, - }, - output: { - normal: 0.28, - }, - }, - image: 0, - }, -} as const -const BAIDU_ERNIE_4_5_21B_A3B = { - id: 'baidu/ernie-4.5-21b-a3b', - name: 'Baidu: ERNIE 4.5 21B A3B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'repetition_penalty', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 8000, - pricing: { - text: { - input: { - normal: 0.07, - cached: 0, - }, - output: { - normal: 0.28, - }, - }, - image: 0, - }, -} as const -const MICROSOFT_PHI_4_REASONING_PLUS = { - id: 'microsoft/phi-4-reasoning-plus', - name: 'Microsoft: Phi 4 Reasoning Plus', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 32768, - pricing: { - text: { - input: { - normal: 0.07, - cached: 0, - }, - output: { - normal: 0.35, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_235B_A22B_2507 = { - id: 'qwen/qwen3-235b-a22b-2507', - name: 'Qwen: Qwen3 235B A22B Instruct 2507', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'reasoning_effort', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 262144, - pricing: { - text: { - input: { - normal: 0.071, - cached: 0, - }, - output: { - normal: 0.463, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_OSS_SAFEGUARD_20B = { - id: 'openai/gpt-oss-safeguard-20b', - name: 'OpenAI: gpt-oss-safeguard-20b', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 0.075, - cached: 0.037, - }, - output: { - normal: 0.3, - }, - }, - image: 0, - }, -} as const -const BYTEDANCE_SEED_SEED_1_6_FLASH = { - id: 'bytedance-seed/seed-1.6-flash', - name: 'ByteDance Seed: Seed 1.6 Flash', - supports: { - input: ['image', 'text', 'video'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 262144, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.075, - cached: 0, - }, - output: { - normal: 0.3, - }, - }, - image: 0, - }, -} as const -const GOOGLE_GEMINI_2_0_FLASH_LITE_001 = { - id: 'google/gemini-2.0-flash-lite-001', - name: 'Google: Gemini 2.0 Flash Lite', - supports: { - input: ['text', 'image', 'document', 'audio', 'video'], - output: ['text'], - supports: [ - 'max_tokens', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 1048576, - max_output_tokens: 8192, - pricing: { - text: { - input: { - normal: 0.075, - cached: 0, - }, - output: { - normal: 0.3, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_32B = { - id: 'qwen/qwen3-32b', - name: 'Qwen: Qwen3 32B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 40960, - pricing: { - text: { - input: { - normal: 0.08, - cached: 0, - }, - output: { - normal: 0.24, - }, - }, - image: 0, - }, -} as const -const META_LLAMA_LLAMA_4_SCOUT = { - id: 'meta-llama/llama-4-scout', - name: 'Meta: Llama 4 Scout', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 10000000, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.08, - cached: 0, - }, - output: { - normal: 0.3, - }, - }, - image: 0.0003342, - }, -} as const -const QWEN_QWEN3_30B_A3B_INSTRUCT_2507 = { - id: 'qwen/qwen3-30b-a3b-instruct-2507', - name: 'Qwen: Qwen3 30B A3B Instruct 2507', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 262144, - pricing: { - text: { - input: { - normal: 0.08, - cached: 0, - }, - output: { - normal: 0.33, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_VL_8B_INSTRUCT = { - id: 'qwen/qwen3-vl-8b-instruct', - name: 'Qwen: Qwen3 VL 8B Instruct', - supports: { - input: ['image', 'text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 256000, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 0.08, - cached: 0, - }, - output: { - normal: 0.5, - }, - }, - image: 0, - }, -} as const -const ALIBABA_TONGYI_DEEPRESEARCH_30B_A3B = { - id: 'alibaba/tongyi-deepresearch-30b-a3b', - name: 'Tongyi DeepResearch 30B A3B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 131072, - pricing: { - text: { - input: { - normal: 0.09, - cached: 0, - }, - output: { - normal: 0.4, - }, - }, - image: 0, - }, -} as const -const NEVERSLEEP_LLAMA_3_1_LUMIMAID_8B = { - id: 'neversleep/llama-3.1-lumimaid-8b', - name: 'NeverSleep: Lumimaid v0.2 8B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'stop', - 'structured_outputs', - 'temperature', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.09, - cached: 0, - }, - output: { - normal: 0.6, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_NEXT_80B_A3B_INSTRUCT = { - id: 'qwen/qwen3-next-80b-a3b-instruct', - name: 'Qwen: Qwen3 Next 80B A3B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 262144, - pricing: { - text: { - input: { - normal: 0.09, - cached: 0, - }, - output: { - normal: 1.1, - }, - }, - image: 0, - }, -} as const -const Z_AI_GLM_4_32B = { - id: 'z-ai/glm-4-32b', - name: 'Z.AI: GLM 4 32B ', - supports: { - input: ['text'], - output: ['text'], - supports: ['max_tokens', 'temperature', 'tool_choice', 'tools', 'top_p'], - }, - context_window: 128000, - pricing: { - text: { - input: { - normal: 0.1, - cached: 0, - }, - output: { - normal: 0.1, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_PIXTRAL_12B = { - id: 'mistralai/pixtral-12b', - name: 'Mistral: Pixtral 12B', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 4096, - pricing: { - text: { - input: { - normal: 0.1, - cached: 0, - }, - output: { - normal: 0.1, - }, - }, - image: 0.0001445, - }, -} as const -const MISTRALAI_MINISTRAL_3B_2512 = { - id: 'mistralai/ministral-3b-2512', - name: 'Mistral: Ministral 3 3B 2512', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 128000, - pricing: { - text: { - input: { - normal: 0.1, - cached: 0, - }, - output: { - normal: 0.1, - }, - }, - image: 0, - }, -} as const -const MISTRAL_MINISTRAL_8B = { - id: 'mistral/ministral-8b', - name: 'Mistral: Ministral 8B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.1, - cached: 0, - }, - output: { - normal: 0.1, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MINISTRAL_8B = { - id: 'mistralai/ministral-8b', - name: 'Mistral: Ministral 8B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 128000, - pricing: { - text: { - input: { - normal: 0.1, - cached: 0, - }, - output: { - normal: 0.1, - }, - }, - image: 0, - }, -} as const -const ALLENAI_OLMO_3_7B_INSTRUCT = { - id: 'allenai/olmo-3-7b-instruct', - name: 'AllenAI: Olmo 3 7B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 65536, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 0.1, - cached: 0, - }, - output: { - normal: 0.2, - }, - }, - image: 0, - }, -} as const -const BYTEDANCE_UI_TARS_1_5_7B = { - id: 'bytedance/ui-tars-1.5-7b', - name: 'ByteDance: UI-TARS 7B ', - supports: { - input: ['image', 'text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 128000, - max_output_tokens: 2048, - pricing: { - text: { - input: { - normal: 0.1, - cached: 0, - }, - output: { - normal: 0.2, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MISTRAL_SMALL_CREATIVE = { - id: 'mistralai/mistral-small-creative', - name: 'Mistral: Mistral Small Creative', - supports: { - input: ['text'], - output: ['text'], - supports: ['tool_choice', 'tools'], - }, - context_window: 32768, - pricing: { - text: { - input: { - normal: 0.1, - cached: 0, - }, - output: { - normal: 0.3, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_VOXTRAL_SMALL_24B_2507 = { - id: 'mistralai/voxtral-small-24b-2507', - name: 'Mistral: Voxtral Small 24B 2507', - supports: { - input: ['text', 'audio'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 32000, - pricing: { - text: { - input: { - normal: 0.1, - cached: 0, - }, - output: { - normal: 0.3, - }, - }, - image: 0, - }, -} as const -const META_LLAMA_LLAMA_3_3_70B_INSTRUCT = { - id: 'meta-llama/llama-3.3-70b-instruct', - name: 'Meta: Llama 3.3 70B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.1, - cached: 0, - }, - output: { - normal: 0.32, - }, - }, - image: 0, - }, -} as const -const OPENGVLAB_INTERNVL3_78B = { - id: 'opengvlab/internvl3-78b', - name: 'OpenGVLab: InternVL3 78B', - supports: { - input: ['image', 'text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 0.1, - cached: 0, - }, - output: { - normal: 0.39, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_4_1_NANO = { - id: 'openai/gpt-4.1-nano', - name: 'OpenAI: GPT-4.1 Nano', - supports: { - input: ['image', 'text', 'document'], - output: ['text'], - supports: [ - 'max_tokens', - 'response_format', - 'seed', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 1047576, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 0.1, - cached: 0.025, - }, - output: { - normal: 0.4, - }, - }, - image: 0, - }, -} as const -const GOOGLE_GEMINI_2_0_FLASH_001 = { - id: 'google/gemini-2.0-flash-001', - name: 'Google: Gemini 2.0 Flash', - supports: { - input: ['text', 'image', 'document', 'audio', 'video'], - output: ['text'], - supports: [ - 'max_tokens', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 1000000, - max_output_tokens: 8192, - pricing: { - text: { - input: { - normal: 0.1, - cached: 0.20829999999999999, - }, - output: { - normal: 0.4, - }, - }, - image: 0.0000258, - }, -} as const -const GOOGLE_GEMINI_2_5_FLASH_LITE_PREVIEW_09_2025 = { - id: 'google/gemini-2.5-flash-lite-preview-09-2025', - name: 'Google: Gemini 2.5 Flash Lite Preview 09-2025', - supports: { - input: ['text', 'image', 'document', 'audio', 'video'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 1048576, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 0.1, - cached: 0, - }, - output: { - normal: 0.4, - }, - }, - image: 0, - }, -} as const -const GOOGLE_GEMINI_2_5_FLASH_LITE = { - id: 'google/gemini-2.5-flash-lite', - name: 'Google: Gemini 2.5 Flash Lite', - supports: { - input: ['text', 'image', 'document', 'audio', 'video'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 1048576, - max_output_tokens: 65535, - pricing: { - text: { - input: { - normal: 0.1, - cached: 0.1933, - }, - output: { - normal: 0.4, - }, - }, - image: 0, - }, -} as const -const NVIDIA_LLAMA_3_3_NEMOTRON_SUPER_49B_V1_5 = { - id: 'nvidia/llama-3.3-nemotron-super-49b-v1.5', - name: 'NVIDIA: Llama 3.3 Nemotron Super 49B V1.5', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.1, - cached: 0, - }, - output: { - normal: 0.4, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_VL_4B_INSTRUCT = { - id: 'qwen/qwen3-vl-4b-instruct', - name: 'Qwen: Qwen3 VL 4B Instruct', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 256000, - pricing: { - text: { - input: { - normal: 0.1, - cached: 0, - }, - output: { - normal: 0.6, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MISTRAL_7B_INSTRUCT_V0_1 = { - id: 'mistralai/mistral-7b-instruct-v0.1', - name: 'Mistral: Mistral 7B Instruct v0.1', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'repetition_penalty', - 'seed', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 4096, - pricing: { - text: { - input: { - normal: 0.11, - cached: 0, - }, - output: { - normal: 0.19, - }, - }, - image: 0, - }, -} as const -const NOUSRESEARCH_HERMES_4_70B = { - id: 'nousresearch/hermes-4-70b', - name: 'Nous: Hermes 4 70B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 131072, - pricing: { - text: { - input: { - normal: 0.11, - cached: 0, - }, - output: { - normal: 0.38, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_235B_A22B_THINKING_2507 = { - id: 'qwen/qwen3-235b-a22b-thinking-2507', - name: 'Qwen: Qwen3 235B A22B Thinking 2507', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 262144, - max_output_tokens: 262144, - pricing: { - text: { - input: { - normal: 0.11, - cached: 0, - }, - output: { - normal: 0.6, - }, - }, - image: 0, - }, -} as const -const ALLENAI_OLMO_3_7B_THINK = { - id: 'allenai/olmo-3-7b-think', - name: 'AllenAI: Olmo 3 7B Think', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 65536, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 0.12, - cached: 0, - }, - output: { - normal: 0.2, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN_2_5_72B_INSTRUCT = { - id: 'qwen/qwen-2.5-72b-instruct', - name: 'Qwen2.5 72B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.12, - cached: 0, - }, - output: { - normal: 0.39, - }, - }, - image: 0, - }, -} as const -const BAIDU_ERNIE_4_5_VL_28B_A3B = { - id: 'baidu/ernie-4.5-vl-28b-a3b', - name: 'Baidu: ERNIE 4.5 VL 28B A3B', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 8000, - pricing: { - text: { - input: { - normal: 0.14, - cached: 0, - }, - output: { - normal: 0.56, - }, - }, - image: 0, - }, -} as const -const TENCENT_HUNYUAN_A13B_INSTRUCT = { - id: 'tencent/hunyuan-a13b-instruct', - name: 'Tencent: Hunyuan A13B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'reasoning', - 'response_format', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 32768, - max_output_tokens: 131072, - pricing: { - text: { - input: { - normal: 0.14, - cached: 0, - }, - output: { - normal: 0.57, - }, - }, - image: 0, - }, -} as const -const DEEPSEEK_DEEPSEEK_R1_DISTILL_QWEN_14B = { - id: 'deepseek/deepseek-r1-distill-qwen-14b', - name: 'DeepSeek: R1 Distill Qwen 14B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.15, - cached: 0, - }, - output: { - normal: 0.15, - }, - }, - image: 0, - }, -} as const -const ESSENTIALAI_RNJ_1_INSTRUCT = { - id: 'essentialai/rnj-1-instruct', - name: 'EssentialAI: Rnj 1 Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 32768, - pricing: { - text: { - input: { - normal: 0.15, - cached: 0, - }, - output: { - normal: 0.15, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MINISTRAL_8B_2512 = { - id: 'mistralai/ministral-8b-2512', - name: 'Mistral: Ministral 3 8B 2512', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 128000, - pricing: { - text: { - input: { - normal: 0.15, - cached: 0, - }, - output: { - normal: 0.15, - }, - }, - image: 0, - }, -} as const -const QWEN_QWQ_32B = { - id: 'qwen/qwq-32b', - name: 'Qwen: QwQ 32B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.15, - cached: 0, - }, - output: { - normal: 0.4, - }, - }, - image: 0, - }, -} as const -const ALLENAI_OLMO_3_1_32B_THINK = { - id: 'allenai/olmo-3.1-32b-think', - name: 'AllenAI: Olmo 3.1 32B Think', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 65536, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 0.15, - cached: 0, - }, - output: { - normal: 0.5, - }, - }, - image: 0, - }, -} as const -const ALLENAI_OLMO_3_32B_THINK = { - id: 'allenai/olmo-3-32b-think', - name: 'AllenAI: Olmo 3 32B Think', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 65536, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 0.15, - cached: 0, - }, - output: { - normal: 0.5, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_VL_30B_A3B_INSTRUCT = { - id: 'qwen/qwen3-vl-30b-a3b-instruct', - name: 'Qwen: Qwen3 VL 30B A3B Instruct', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 262144, - pricing: { - text: { - input: { - normal: 0.15, - cached: 0, - }, - output: { - normal: 0.6, - }, - }, - image: 0, - }, -} as const -const META_LLAMA_LLAMA_4_MAVERICK = { - id: 'meta-llama/llama-4-maverick', - name: 'Meta: Llama 4 Maverick', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 1048576, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.15, - cached: 0, - }, - output: { - normal: 0.6, - }, - }, - image: 0.0006684, - }, -} as const -const OPENAI_GPT_4O_MINI_SEARCH_PREVIEW = { - id: 'openai/gpt-4o-mini-search-preview', - name: 'OpenAI: GPT-4o-mini Search Preview', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'max_tokens', - 'response_format', - 'structured_outputs', - 'web_search_options', - ], - }, - context_window: 128000, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.15, - cached: 0, - }, - output: { - normal: 0.6, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_4O_MINI_2024_07_18 = { - id: 'openai/gpt-4o-mini-2024-07-18', - name: 'OpenAI: GPT-4o-mini (2024-07-18)', - supports: { - input: ['text', 'image', 'document'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - 'web_search_options', - ], - }, - context_window: 128000, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.15, - cached: 0.075, - }, - output: { - normal: 0.6, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_4O_MINI = { - id: 'openai/gpt-4o-mini', - name: 'OpenAI: GPT-4o-mini', - supports: { - input: ['text', 'image', 'document'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - 'web_search_options', - ], - }, - context_window: 128000, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.15, - cached: 0.075, - }, - output: { - normal: 0.6, - }, - }, - image: 0, - }, -} as const -const COHERE_COMMAND_R_08_2024 = { - id: 'cohere/command-r-08-2024', - name: 'Cohere: Command R (08-2024)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 128000, - max_output_tokens: 4000, - pricing: { - text: { - input: { - normal: 0.15, - cached: 0, - }, - output: { - normal: 0.6, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN2_5_VL_72B_INSTRUCT = { - id: 'qwen/qwen2.5-vl-72b-instruct', - name: 'Qwen: Qwen2.5 VL 72B Instruct', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 0.15, - cached: 0, - }, - output: { - normal: 0.6, - }, - }, - image: 0, - }, -} as const -const DEEPSEEK_DEEPSEEK_CHAT_V3_1 = { - id: 'deepseek/deepseek-chat-v3.1', - name: 'DeepSeek: DeepSeek V3.1', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 7168, - pricing: { - text: { - input: { - normal: 0.15, - cached: 0, - }, - output: { - normal: 0.75, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_NEXT_80B_A3B_THINKING = { - id: 'qwen/qwen3-next-80b-a3b-thinking', - name: 'Qwen: Qwen3 Next 80B A3B Thinking', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 262144, - max_output_tokens: 262144, - pricing: { - text: { - input: { - normal: 0.15, - cached: 0, - }, - output: { - normal: 1.2, - }, - }, - image: 0, - }, -} as const -const THEDRUMMER_ROCINANTE_12B = { - id: 'thedrummer/rocinante-12b', - name: 'TheDrummer: Rocinante 12B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 32768, - pricing: { - text: { - input: { - normal: 0.17, - cached: 0, - }, - output: { - normal: 0.43, - }, - }, - image: 0, - }, -} as const -const ARCEE_AI_SPOTLIGHT = { - id: 'arcee-ai/spotlight', - name: 'Arcee AI: Spotlight', - supports: { - input: ['image', 'text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 65537, - pricing: { - text: { - input: { - normal: 0.18, - cached: 0, - }, - output: { - normal: 0.18, - }, - }, - image: 0, - }, -} as const -const META_LLAMA_LLAMA_GUARD_4_12B = { - id: 'meta-llama/llama-guard-4-12b', - name: 'Meta: Llama Guard 4 12B', - supports: { - input: ['image', 'text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 163840, - pricing: { - text: { - input: { - normal: 0.18, - cached: 0, - }, - output: { - normal: 0.18, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_235B_A22B = { - id: 'qwen/qwen3-235b-a22b', - name: 'Qwen: Qwen3 235B A22B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 40960, - pricing: { - text: { - input: { - normal: 0.18, - cached: 0, - }, - output: { - normal: 0.54, - }, - }, - image: 0, - }, -} as const -const DEEPCOGITO_COGITO_V2_PREVIEW_LLAMA_109B_MOE = { - id: 'deepcogito/cogito-v2-preview-llama-109b-moe', - name: 'Cogito V2 Preview Llama 109B', - supports: { - input: ['image', 'text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.18, - cached: 0, - }, - output: { - normal: 0.59, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_VL_8B_THINKING = { - id: 'qwen/qwen3-vl-8b-thinking', - name: 'Qwen: Qwen3 VL 8B Thinking', - supports: { - input: ['image', 'text'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'response_format', - 'seed', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 256000, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 0.18, - cached: 0, - }, - output: { - normal: 2.1, - }, - }, - image: 0, - }, -} as const -const DEEPSEEK_DEEPSEEK_CHAT_V3_0324 = { - id: 'deepseek/deepseek-chat-v3-0324', - name: 'DeepSeek: DeepSeek V3 0324', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 0.19, - cached: 0, - }, - output: { - normal: 0.87, - }, - }, - image: 0, - }, -} as const -const META_LLAMA_LLAMA_GUARD_2_8B = { - id: 'meta-llama/llama-guard-2-8b', - name: 'Meta: LlamaGuard 2 8B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 8192, - pricing: { - text: { - input: { - normal: 0.2, - cached: 0, - }, - output: { - normal: 0.2, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MISTRAL_7B_INSTRUCT_V0_3 = { - id: 'mistralai/mistral-7b-instruct-v0.3', - name: 'Mistral: Mistral 7B Instruct v0.3', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 32768, - max_output_tokens: 4096, - pricing: { - text: { - input: { - normal: 0.2, - cached: 0, - }, - output: { - normal: 0.2, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN_2_5_VL_7B_INSTRUCT = { - id: 'qwen/qwen-2.5-vl-7b-instruct', - name: 'Qwen: Qwen2.5-VL 7B Instruct', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 32768, - pricing: { - text: { - input: { - normal: 0.2, - cached: 0, - }, - output: { - normal: 0.2, - }, - }, - image: 0.0001445, - }, -} as const -const MISTRALAI_MINISTRAL_14B_2512 = { - id: 'mistralai/ministral-14b-2512', - name: 'Mistral: Ministral 3 14B 2512', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 128000, - pricing: { - text: { - input: { - normal: 0.2, - cached: 0, - }, - output: { - normal: 0.2, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MISTRAL_7B_INSTRUCT_V0_2 = { - id: 'mistralai/mistral-7b-instruct-v0.2', - name: 'Mistral: Mistral 7B Instruct v0.2', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 32768, - pricing: { - text: { - input: { - normal: 0.2, - cached: 0, - }, - output: { - normal: 0.2, - }, - }, - image: 0, - }, -} as const -const AI21_JAMBA_MINI_1_7 = { - id: 'ai21/jamba-mini-1.7', - name: 'AI21: Jamba Mini 1.7', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'max_tokens', - 'response_format', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 256000, - max_output_tokens: 4096, - pricing: { - text: { - input: { - normal: 0.2, - cached: 0, - }, - output: { - normal: 0.4, - }, - }, - image: 0, - }, -} as const -const X_AI_GROK_4_1_FAST = { - id: 'x-ai/grok-4.1-fast', - name: 'xAI: Grok 4.1 Fast', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'include_reasoning', - 'logprobs', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 2000000, - max_output_tokens: 30000, - pricing: { - text: { - input: { - normal: 0.2, - cached: 0.05, - }, - output: { - normal: 0.5, - }, - }, - image: 0, - }, -} as const -const X_AI_GROK_4_FAST = { - id: 'x-ai/grok-4-fast', - name: 'xAI: Grok 4 Fast', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'include_reasoning', - 'logprobs', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 2000000, - max_output_tokens: 30000, - pricing: { - text: { - input: { - normal: 0.2, - cached: 0.05, - }, - output: { - normal: 0.5, - }, - }, - image: 0, - }, -} as const -const NVIDIA_NEMOTRON_NANO_12B_V2_VL = { - id: 'nvidia/nemotron-nano-12b-v2-vl', - name: 'NVIDIA: Nemotron Nano 12B 2 VL', - supports: { - input: ['image', 'text', 'video'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 128000, - pricing: { - text: { - input: { - normal: 0.2, - cached: 0, - }, - output: { - normal: 0.6, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MISTRAL_SABA = { - id: 'mistralai/mistral-saba', - name: 'Mistral: Saba', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 32000, - pricing: { - text: { - input: { - normal: 0.2, - cached: 0, - }, - output: { - normal: 0.6, - }, - }, - image: 0, - }, -} as const -const ALLENAI_OLMO_3_1_32B_INSTRUCT = { - id: 'allenai/olmo-3.1-32b-instruct', - name: 'AllenAI: Olmo 3.1 32B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 65536, - pricing: { - text: { - input: { - normal: 0.2, - cached: 0, - }, - output: { - normal: 0.6, - }, - }, - image: 0, - }, -} as const -const MEITUAN_LONGCAT_FLASH_CHAT = { - id: 'meituan/longcat-flash-chat', - name: 'Meituan: LongCat Flash Chat', - supports: { - input: ['text'], - output: ['text'], - supports: ['max_tokens', 'temperature', 'top_p'], - }, - context_window: 131072, - max_output_tokens: 131072, - pricing: { - text: { - input: { - normal: 0.2, - cached: 0, - }, - output: { - normal: 0.8, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_VL_30B_A3B_THINKING = { - id: 'qwen/qwen3-vl-30b-a3b-thinking', - name: 'Qwen: Qwen3 VL 30B A3B Thinking', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 0.2, - cached: 0, - }, - output: { - normal: 1, - }, - }, - image: 0, - }, -} as const -const MINIMAX_MINIMAX_M2 = { - id: 'minimax/minimax-m2', - name: 'MiniMax: MiniMax M2', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 204800, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 0.2, - cached: 0.03, - }, - output: { - normal: 1, - }, - }, - image: 0, - }, -} as const -const MINIMAX_MINIMAX_01 = { - id: 'minimax/minimax-01', - name: 'MiniMax: MiniMax-01', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: ['max_tokens', 'temperature', 'top_p'], - }, - context_window: 1000000, - max_output_tokens: 1000192, - pricing: { - text: { - input: { - normal: 0.2, - cached: 0, - }, - output: { - normal: 1.1, - }, - }, - image: 0, - }, -} as const -const PRIME_INTELLECT_INTELLECT_3 = { - id: 'prime-intellect/intellect-3', - name: 'Prime Intellect: INTELLECT-3', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 131072, - pricing: { - text: { - input: { - normal: 0.2, - cached: 0, - }, - output: { - normal: 1.1, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_VL_235B_A22B_INSTRUCT = { - id: 'qwen/qwen3-vl-235b-a22b-instruct', - name: 'Qwen: Qwen3 VL 235B A22B Instruct', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.2, - cached: 0, - }, - output: { - normal: 1.2, - }, - }, - image: 0, - }, -} as const -const X_AI_GROK_CODE_FAST_1 = { - id: 'x-ai/grok-code-fast-1', - name: 'xAI: Grok Code Fast 1', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'include_reasoning', - 'logprobs', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 256000, - max_output_tokens: 10000, - pricing: { - text: { - input: { - normal: 0.2, - cached: 0.02, - }, - output: { - normal: 1.5, - }, - }, - image: 0, - }, -} as const -const KWAIPILOT_KAT_CODER_PRO = { - id: 'kwaipilot/kat-coder-pro', - name: 'Kwaipilot: KAT-Coder-Pro V1', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 262144, - max_output_tokens: 128000, - pricing: { - text: { - input: { - normal: 0.207, - cached: 0.0414, - }, - output: { - normal: 0.828, - }, - }, - image: 0, - }, -} as const -const DEEPSEEK_DEEPSEEK_V3_2_EXP = { - id: 'deepseek/deepseek-v3.2-exp', - name: 'DeepSeek: DeepSeek V3.2 Exp', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 0.21, - cached: 0, - }, - output: { - normal: 0.32, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN_VL_PLUS = { - id: 'qwen/qwen-vl-plus', - name: 'Qwen: Qwen VL Plus', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'temperature', - 'top_p', - ], - }, - context_window: 7500, - max_output_tokens: 1500, - pricing: { - text: { - input: { - normal: 0.21, - cached: 0, - }, - output: { - normal: 0.63, - }, - }, - image: 0.0002688, - }, -} as const -const DEEPSEEK_DEEPSEEK_V3_1_TERMINUS_EXACTO = { - id: 'deepseek/deepseek-v3.1-terminus:exacto', - name: 'DeepSeek: DeepSeek V3.1 Terminus (exacto)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.21, - cached: 0.168, - }, - output: { - normal: 0.79, - }, - }, - image: 0, - }, -} as const -const DEEPSEEK_DEEPSEEK_V3_1_TERMINUS = { - id: 'deepseek/deepseek-v3.1-terminus', - name: 'DeepSeek: DeepSeek V3.1 Terminus', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.21, - cached: 0.168, - }, - output: { - normal: 0.79, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_CODER = { - id: 'qwen/qwen3-coder', - name: 'Qwen: Qwen3 Coder 480B A35B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 1048576, - max_output_tokens: 262144, - pricing: { - text: { - input: { - normal: 0.22, - cached: 0, - }, - output: { - normal: 0.95, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_CODER_EXACTO = { - id: 'qwen/qwen3-coder:exacto', - name: 'Qwen: Qwen3 Coder 480B A35B (exacto)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 1048576, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 0.22, - cached: 0, - }, - output: { - normal: 1.8, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MISTRAL_TINY = { - id: 'mistralai/mistral-tiny', - name: 'Mistral Tiny', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 32000, - pricing: { - text: { - input: { - normal: 0.25, - cached: 0, - }, - output: { - normal: 0.25, - }, - }, - image: 0, - }, -} as const -const DEEPSEEK_DEEPSEEK_V3_2 = { - id: 'deepseek/deepseek-v3.2', - name: 'DeepSeek: DeepSeek V3.2', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 0.25, - cached: 0, - }, - output: { - normal: 0.38, - }, - }, - image: 0, - }, -} as const -const TNGTECH_DEEPSEEK_R1T2_CHIMERA = { - id: 'tngtech/deepseek-r1t2-chimera', - name: 'TNG: DeepSeek R1T2 Chimera', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 163840, - max_output_tokens: 163840, - pricing: { - text: { - input: { - normal: 0.25, - cached: 0, - }, - output: { - normal: 0.85, - }, - }, - image: 0, - }, -} as const -const TNGTECH_TNG_R1T_CHIMERA = { - id: 'tngtech/tng-r1t-chimera', - name: 'TNG: R1T Chimera', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 163840, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 0.25, - cached: 0, - }, - output: { - normal: 0.85, - }, - }, - image: 0, - }, -} as const -const INCEPTION_MERCURY = { - id: 'inception/mercury', - name: 'Inception: Mercury', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 128000, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.25, - cached: 0, - }, - output: { - normal: 1, - }, - }, - image: 0, - }, -} as const -const INCEPTION_MERCURY_CODER = { - id: 'inception/mercury-coder', - name: 'Inception: Mercury Coder', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 128000, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.25, - cached: 0, - }, - output: { - normal: 1, - }, - }, - image: 0, - }, -} as const -const ANTHROPIC_CLAUDE_3_HAIKU = { - id: 'anthropic/claude-3-haiku', - name: 'Anthropic: Claude 3 Haiku', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'max_tokens', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 200000, - max_output_tokens: 4096, - pricing: { - text: { - input: { - normal: 0.25, - cached: 0.32999999999999996, - }, - output: { - normal: 1.25, - }, - }, - image: 0.0004, - }, -} as const -const OPENAI_GPT_5_MINI = { - id: 'openai/gpt-5-mini', - name: 'OpenAI: GPT-5 Mini', - supports: { - input: ['text', 'image', 'document'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'structured_outputs', - 'tool_choice', - 'tools', - ], - }, - context_window: 400000, - max_output_tokens: 128000, - pricing: { - text: { - input: { - normal: 0.25, - cached: 0.025, - }, - output: { - normal: 2, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_5_1_CODEX_MINI = { - id: 'openai/gpt-5.1-codex-mini', - name: 'OpenAI: GPT-5.1-Codex-Mini', - supports: { - input: ['image', 'text'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'structured_outputs', - 'tool_choice', - 'tools', - ], - }, - context_window: 400000, - max_output_tokens: 100000, - pricing: { - text: { - input: { - normal: 0.25, - cached: 0.025, - }, - output: { - normal: 2, - }, - }, - image: 0, - }, -} as const -const BYTEDANCE_SEED_SEED_1_6 = { - id: 'bytedance-seed/seed-1.6', - name: 'ByteDance Seed: Seed 1.6', - supports: { - input: ['image', 'text', 'video'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 262144, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 0.25, - cached: 0, - }, - output: { - normal: 2, - }, - }, - image: 0, - }, -} as const -const DEEPSEEK_DEEPSEEK_R1_DISTILL_QWEN_32B = { - id: 'deepseek/deepseek-r1-distill-qwen-32b', - name: 'DeepSeek: R1 Distill Qwen 32B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 128000, - pricing: { - text: { - input: { - normal: 0.27, - cached: 0, - }, - output: { - normal: 0.27, - }, - }, - image: 0, - }, -} as const -const DEEPSEEK_DEEPSEEK_V3_2_SPECIALE = { - id: 'deepseek/deepseek-v3.2-speciale', - name: 'DeepSeek: DeepSeek V3.2 Speciale', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 0.27, - cached: 0, - }, - output: { - normal: 0.41, - }, - }, - image: 0, - }, -} as const -const NEX_AGI_DEEPSEEK_V3_1_NEX_N1 = { - id: 'nex-agi/deepseek-v3.1-nex-n1', - name: 'Nex AGI: DeepSeek V3.1 Nex N1', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'response_format', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 8192, - max_output_tokens: 163840, - pricing: { - text: { - input: { - normal: 0.27, - cached: 0, - }, - output: { - normal: 1, - }, - }, - image: 0, - }, -} as const -const MINIMAX_MINIMAX_M2_1 = { - id: 'minimax/minimax-m2.1', - name: 'MiniMax: MiniMax M2.1', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 204800, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 0.27, - cached: 0, - }, - output: { - normal: 1.12, - }, - }, - image: 0, - }, -} as const -const BAIDU_ERNIE_4_5_300B_A47B = { - id: 'baidu/ernie-4.5-300b-a47b', - name: 'Baidu: ERNIE 4.5 300B A47B ', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 12000, - pricing: { - text: { - input: { - normal: 0.28, - cached: 0, - }, - output: { - normal: 1.1, - }, - }, - image: 0, - }, -} as const -const MOONSHOTAI_KIMI_DEV_72B = { - id: 'moonshotai/kimi-dev-72b', - name: 'MoonshotAI: Kimi Dev 72B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'reasoning', - 'response_format', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 131072, - pricing: { - text: { - input: { - normal: 0.29, - cached: 0, - }, - output: { - normal: 1.15, - }, - }, - image: 0, - }, -} as const -const NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B = { - id: 'nousresearch/hermes-3-llama-3.1-70b', - name: 'Nous: Hermes 3 70B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.3, - cached: 0, - }, - output: { - normal: 0.3, - }, - }, - image: 0, - }, -} as const -const META_LLAMA_LLAMA_3_70B_INSTRUCT = { - id: 'meta-llama/llama-3-70b-instruct', - name: 'Meta: Llama 3 70B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 8192, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.3, - cached: 0, - }, - output: { - normal: 0.4, - }, - }, - image: 0, - }, -} as const -const THEDRUMMER_CYDONIA_24B_V4_1 = { - id: 'thedrummer/cydonia-24b-v4.1', - name: 'TheDrummer: Cydonia 24B V4.1', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 131072, - pricing: { - text: { - input: { - normal: 0.3, - cached: 0, - }, - output: { - normal: 0.5, - }, - }, - image: 0, - }, -} as const -const X_AI_GROK_3_MINI = { - id: 'x-ai/grok-3-mini', - name: 'xAI: Grok 3 Mini', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'include_reasoning', - 'logprobs', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.3, - cached: 0.075, - }, - output: { - normal: 0.5, - }, - }, - image: 0, - }, -} as const -const X_AI_GROK_3_MINI_BETA = { - id: 'x-ai/grok-3-mini-beta', - name: 'xAI: Grok 3 Mini Beta', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'include_reasoning', - 'logprobs', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.3, - cached: 0.075, - }, - output: { - normal: 0.5, - }, - }, - image: 0, - }, -} as const -const Z_AI_GLM_4_6V = { - id: 'z-ai/glm-4.6v', - name: 'Z.AI: GLM 4.6V', - supports: { - input: ['image', 'text', 'video'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 131072, - pricing: { - text: { - input: { - normal: 0.3, - cached: 0, - }, - output: { - normal: 0.9, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_CODESTRAL_2508 = { - id: 'mistralai/codestral-2508', - name: 'Mistral: Codestral 2508', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 256000, - pricing: { - text: { - input: { - normal: 0.3, - cached: 0, - }, - output: { - normal: 0.9, - }, - }, - image: 0, - }, -} as const -const DEEPSEEK_DEEPSEEK_CHAT = { - id: 'deepseek/deepseek-chat', - name: 'DeepSeek: DeepSeek V3', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 163840, - pricing: { - text: { - input: { - normal: 0.3, - cached: 0, - }, - output: { - normal: 1.2, - }, - }, - image: 0, - }, -} as const -const TNGTECH_DEEPSEEK_R1T_CHIMERA = { - id: 'tngtech/deepseek-r1t-chimera', - name: 'TNG: DeepSeek R1T Chimera', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 163840, - max_output_tokens: 163840, - pricing: { - text: { - input: { - normal: 0.3, - cached: 0, - }, - output: { - normal: 1.2, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_CODER_FLASH = { - id: 'qwen/qwen3-coder-flash', - name: 'Qwen: Qwen3 Coder Flash', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 128000, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 0.3, - cached: 0.08, - }, - output: { - normal: 1.5, - }, - }, - image: 0, - }, -} as const -const GOOGLE_GEMINI_2_5_FLASH_IMAGE_PREVIEW = { - id: 'google/gemini-2.5-flash-image-preview', - name: 'Google: Gemini 2.5 Flash Image Preview (Nano Banana)', - supports: { - input: ['image', 'text'], - output: ['image', 'text'], - supports: [ - 'max_tokens', - 'response_format', - 'seed', - 'structured_outputs', - 'temperature', - 'top_p', - ], - }, - context_window: 32768, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 0.3, - cached: 0, - }, - output: { - normal: 2.5, - }, - }, - image: 0.001238, - }, -} as const -const GOOGLE_GEMINI_2_5_FLASH_PREVIEW_09_2025 = { - id: 'google/gemini-2.5-flash-preview-09-2025', - name: 'Google: Gemini 2.5 Flash Preview 09-2025', - supports: { - input: ['image', 'document', 'text', 'audio', 'video'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 1048576, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 0.3, - cached: 0.4583, - }, - output: { - normal: 2.5, - }, - }, - image: 0.001238, - }, -} as const -const GOOGLE_GEMINI_2_5_FLASH_IMAGE = { - id: 'google/gemini-2.5-flash-image', - name: 'Google: Gemini 2.5 Flash Image (Nano Banana)', - supports: { - input: ['image', 'text'], - output: ['image', 'text'], - supports: [ - 'max_tokens', - 'response_format', - 'seed', - 'structured_outputs', - 'temperature', - 'top_p', - ], - }, - context_window: 32768, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 0.3, - cached: 0, - }, - output: { - normal: 2.5, - }, - }, - image: 0.001238, - }, -} as const -const AMAZON_NOVA_2_LITE_V1 = { - id: 'amazon/nova-2-lite-v1', - name: 'Amazon: Nova 2 Lite', - supports: { - input: ['text', 'image', 'video', 'document'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 1000000, - max_output_tokens: 65535, - pricing: { - text: { - input: { - normal: 0.3, - cached: 0, - }, - output: { - normal: 2.5, - }, - }, - image: 0, - }, -} as const -const GOOGLE_GEMINI_2_5_FLASH = { - id: 'google/gemini-2.5-flash', - name: 'Google: Gemini 2.5 Flash', - supports: { - input: ['document', 'image', 'text', 'audio', 'video'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 1048576, - max_output_tokens: 65535, - pricing: { - text: { - input: { - normal: 0.3, - cached: 0.4133, - }, - output: { - normal: 2.5, - }, - }, - image: 0.001238, - }, -} as const -const Z_AI_GLM_4_6 = { - id: 'z-ai/glm-4.6', - name: 'Z.AI: GLM 4.6', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_a', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 200000, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 0.35, - cached: 0, - }, - output: { - normal: 1.5, - }, - }, - image: 0, - }, -} as const -const Z_AI_GLM_4_5 = { - id: 'z-ai/glm-4.5', - name: 'Z.AI: GLM 4.5', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 0.35, - cached: 0, - }, - output: { - normal: 1.55, - }, - }, - image: 0, - }, -} as const -const MOONSHOTAI_KIMI_K2_0905 = { - id: 'moonshotai/kimi-k2-0905', - name: 'MoonshotAI: Kimi K2 0905', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 262144, - max_output_tokens: 262144, - pricing: { - text: { - input: { - normal: 0.39, - cached: 0, - }, - output: { - normal: 1.9, - }, - }, - image: 0, - }, -} as const -const META_LLAMA_LLAMA_3_1_70B_INSTRUCT = { - id: 'meta-llama/llama-3.1-70b-instruct', - name: 'Meta: Llama 3.1 70B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.4, - cached: 0, - }, - output: { - normal: 0.4, - }, - }, - image: 0, - }, -} as const -const THEDRUMMER_UNSLOPNEMO_12B = { - id: 'thedrummer/unslopnemo-12b', - name: 'TheDrummer: UnslopNemo 12B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 32000, - pricing: { - text: { - input: { - normal: 0.4, - cached: 0, - }, - output: { - normal: 0.4, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN_PLUS_2025_07_28 = { - id: 'qwen/qwen-plus-2025-07-28', - name: 'Qwen: Qwen Plus 0728', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 1000000, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 0.4, - cached: 0, - }, - output: { - normal: 1.2, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN_PLUS = { - id: 'qwen/qwen-plus', - name: 'Qwen: Qwen-Plus', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 8192, - pricing: { - text: { - input: { - normal: 0.4, - cached: 0.16, - }, - output: { - normal: 1.2, - }, - }, - image: 0, - }, -} as const -const Z_AI_GLM_4_7 = { - id: 'z-ai/glm-4.7', - name: 'Z.AI: GLM 4.7', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_a', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 200000, - max_output_tokens: 65535, - pricing: { - text: { - input: { - normal: 0.4, - cached: 0, - }, - output: { - normal: 1.5, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_4_1_MINI = { - id: 'openai/gpt-4.1-mini', - name: 'OpenAI: GPT-4.1 Mini', - supports: { - input: ['image', 'text', 'document'], - output: ['text'], - supports: [ - 'max_tokens', - 'response_format', - 'seed', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 1047576, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 0.4, - cached: 0.1, - }, - output: { - normal: 1.6, - }, - }, - image: 0, - }, -} as const -const MOONSHOTAI_KIMI_K2_THINKING = { - id: 'moonshotai/kimi-k2-thinking', - name: 'MoonshotAI: Kimi K2 Thinking', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 262144, - max_output_tokens: 65535, - pricing: { - text: { - input: { - normal: 0.4, - cached: 0, - }, - output: { - normal: 1.75, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_DEVSTRAL_MEDIUM = { - id: 'mistralai/devstral-medium', - name: 'Mistral: Devstral Medium', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.4, - cached: 0, - }, - output: { - normal: 2, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MISTRAL_MEDIUM_3 = { - id: 'mistralai/mistral-medium-3', - name: 'Mistral: Mistral Medium 3', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.4, - cached: 0, - }, - output: { - normal: 2, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MISTRAL_MEDIUM_3_1 = { - id: 'mistralai/mistral-medium-3.1', - name: 'Mistral: Mistral Medium 3.1', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.4, - cached: 0, - }, - output: { - normal: 2, - }, - }, - image: 0, - }, -} as const -const MINIMAX_MINIMAX_M1 = { - id: 'minimax/minimax-m1', - name: 'MiniMax: MiniMax M1', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 1000000, - max_output_tokens: 40000, - pricing: { - text: { - input: { - normal: 0.4, - cached: 0, - }, - output: { - normal: 2.2, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN_PLUS_2025_07_28_THINKING = { - id: 'qwen/qwen-plus-2025-07-28:thinking', - name: 'Qwen: Qwen Plus 0728 (thinking)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'response_format', - 'seed', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 1000000, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 0.4, - cached: 0, - }, - output: { - normal: 4, - }, - }, - image: 0, - }, -} as const -const BAIDU_ERNIE_4_5_VL_424B_A47B = { - id: 'baidu/ernie-4.5-vl-424b-a47b', - name: 'Baidu: ERNIE 4.5 VL 424B A47B ', - supports: { - input: ['image', 'text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 16000, - pricing: { - text: { - input: { - normal: 0.42, - cached: 0, - }, - output: { - normal: 1.25, - }, - }, - image: 0, - }, -} as const -const Z_AI_GLM_4_6_EXACTO = { - id: 'z-ai/glm-4.6:exacto', - name: 'Z.AI: GLM 4.6 (exacto)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 200000, - max_output_tokens: 131072, - pricing: { - text: { - input: { - normal: 0.44, - cached: 0, - }, - output: { - normal: 1.76, - }, - }, - image: 0, - }, -} as const -const UNDI95_REMM_SLERP_L2_13B = { - id: 'undi95/remm-slerp-l2-13b', - name: 'ReMM SLERP 13B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_a', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 4096, - pricing: { - text: { - input: { - normal: 0.45, - cached: 0, - }, - output: { - normal: 0.65, - }, - }, - image: 0, - }, -} as const -const DEEPSEEK_DEEPSEEK_R1_0528 = { - id: 'deepseek/deepseek-r1-0528', - name: 'DeepSeek: R1 0528', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 163840, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 0.45, - cached: 0, - }, - output: { - normal: 2.15, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_VL_235B_A22B_THINKING = { - id: 'qwen/qwen3-vl-235b-a22b-thinking', - name: 'Qwen: Qwen3 VL 235B A22B Thinking', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 262144, - pricing: { - text: { - input: { - normal: 0.45, - cached: 0, - }, - output: { - normal: 3.5, - }, - }, - image: 0, - }, -} as const -const MICROSOFT_WIZARDLM_2_8X22B = { - id: 'microsoft/wizardlm-2-8x22b', - name: 'WizardLM-2 8x22B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 65536, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.48, - cached: 0, - }, - output: { - normal: 0.48, - }, - }, - image: 0, - }, -} as const -const ARCEE_AI_CODER_LARGE = { - id: 'arcee-ai/coder-large', - name: 'Arcee AI: Coder Large', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 32768, - pricing: { - text: { - input: { - normal: 0.5, - cached: 0, - }, - output: { - normal: 0.8, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MISTRAL_LARGE_2512 = { - id: 'mistralai/mistral-large-2512', - name: 'Mistral: Mistral Large 3 2512', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 256000, - pricing: { - text: { - input: { - normal: 0.5, - cached: 0, - }, - output: { - normal: 1.5, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_VL_32B_INSTRUCT = { - id: 'qwen/qwen3-vl-32b-instruct', - name: 'Qwen: Qwen3 VL 32B Instruct', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 262144, - pricing: { - text: { - input: { - normal: 0.5, - cached: 0, - }, - output: { - normal: 1.5, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_3_5_TURBO = { - id: 'openai/gpt-3.5-turbo', - name: 'OpenAI: GPT-3.5 Turbo', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 16385, - max_output_tokens: 4096, - pricing: { - text: { - input: { - normal: 0.5, - cached: 0, - }, - output: { - normal: 1.5, - }, - }, - image: 0, - }, -} as const -const DEEPSEEK_DEEPSEEK_PROVER_V2 = { - id: 'deepseek/deepseek-prover-v2', - name: 'DeepSeek: DeepSeek Prover V2', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 163840, - pricing: { - text: { - input: { - normal: 0.5, - cached: 0, - }, - output: { - normal: 2.18, - }, - }, - image: 0, - }, -} as const -const MOONSHOTAI_KIMI_K2 = { - id: 'moonshotai/kimi-k2', - name: 'MoonshotAI: Kimi K2 0711', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.5, - cached: 0, - }, - output: { - normal: 2.4, - }, - }, - image: 0, - }, -} as const -const GOOGLE_GEMINI_3_FLASH_PREVIEW = { - id: 'google/gemini-3-flash-preview', - name: 'Google: Gemini 3 Flash Preview', - supports: { - input: ['text', 'image', 'document', 'audio', 'video'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 1048576, - max_output_tokens: 65535, - pricing: { - text: { - input: { - normal: 0.5, - cached: 0.05, - }, - output: { - normal: 3, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MIXTRAL_8X7B_INSTRUCT = { - id: 'mistralai/mixtral-8x7b-instruct', - name: 'Mistral: Mixtral 8x7B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 32768, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.54, - cached: 0, - }, - output: { - normal: 0.54, - }, - }, - image: 0, - }, -} as const -const THEDRUMMER_SKYFALL_36B_V2 = { - id: 'thedrummer/skyfall-36b-v2', - name: 'TheDrummer: Skyfall 36B V2', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 32768, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 0.55, - cached: 0, - }, - output: { - normal: 0.8, - }, - }, - image: 0, - }, -} as const -const MINIMAX_MINIMAX_M1_80K = { - id: 'minimax/minimax-m1-80k', - name: 'MiniMax: MiniMax-M1-80k', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 1000000, - max_output_tokens: 40000, - pricing: { - text: { - input: { - normal: 0.55, - cached: 0, - }, - output: { - normal: 2.2, - }, - }, - image: 0, - }, -} as const -const STEPFUN_AI_STEP3 = { - id: 'stepfun-ai/step3', - name: 'StepFun: Step3', - supports: { - input: ['image', 'text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'reasoning', - 'response_format', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 65536, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 0.57, - cached: 0, - }, - output: { - normal: 1.42, - }, - }, - image: 0, - }, -} as const -const NVIDIA_LLAMA_3_1_NEMOTRON_ULTRA_253B_V1 = { - id: 'nvidia/llama-3.1-nemotron-ultra-253b-v1', - name: 'NVIDIA: Llama 3.1 Nemotron Ultra 253B v1', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.6, - cached: 0, - }, - output: { - normal: 1.8, - }, - }, - image: 0, - }, -} as const -const Z_AI_GLM_4_5V = { - id: 'z-ai/glm-4.5v', - name: 'Z.AI: GLM 4.5V', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 65536, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.6, - cached: 0.11, - }, - output: { - normal: 1.8, - }, - }, - image: 0, - }, -} as const -const MOONSHOTAI_KIMI_K2_0905_EXACTO = { - id: 'moonshotai/kimi-k2-0905:exacto', - name: 'MoonshotAI: Kimi K2 0905 (exacto)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 262144, - pricing: { - text: { - input: { - normal: 0.6, - cached: 0, - }, - output: { - normal: 2.5, - }, - }, - image: 0, - }, -} as const -const GOOGLE_GEMMA_2_27B_IT = { - id: 'google/gemma-2-27b-it', - name: 'Google: Gemma 2 27B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'stop', - 'structured_outputs', - 'temperature', - 'top_p', - ], - }, - context_window: 8192, - pricing: { - text: { - input: { - normal: 0.65, - cached: 0, - }, - output: { - normal: 0.65, - }, - }, - image: 0, - }, -} as const -const SAO10K_L3_3_EURYALE_70B = { - id: 'sao10k/l3.3-euryale-70b', - name: 'Sao10K: Llama 3.3 Euryale 70B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 8192, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.65, - cached: 0, - }, - output: { - normal: 0.75, - }, - }, - image: 0, - }, -} as const -const SAO10K_L3_1_EURYALE_70B = { - id: 'sao10k/l3.1-euryale-70b', - name: 'Sao10K: Llama 3.1 Euryale 70B v2.2', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.65, - cached: 0, - }, - output: { - normal: 0.75, - }, - }, - image: 0, - }, -} as const -const AION_LABS_AION_1_0_MINI = { - id: 'aion-labs/aion-1.0-mini', - name: 'AionLabs: Aion-1.0-Mini', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'temperature', - 'top_p', - ], - }, - context_window: 16384, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 0.7, - cached: 0, - }, - output: { - normal: 1.4, - }, - }, - image: 0, - }, -} as const -const DEEPSEEK_DEEPSEEK_R1 = { - id: 'deepseek/deepseek-r1', - name: 'DeepSeek: R1', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 163840, - max_output_tokens: 163840, - pricing: { - text: { - input: { - normal: 0.7, - cached: 0, - }, - output: { - normal: 2.4, - }, - }, - image: 0, - }, -} as const -const MANCER_WEAVER = { - id: 'mancer/weaver', - name: 'Mancer: Weaver (alpha)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'top_a', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 8000, - max_output_tokens: 2000, - pricing: { - text: { - input: { - normal: 0.75, - cached: 0, - }, - output: { - normal: 1, - }, - }, - image: 0, - }, -} as const -const ARCEE_AI_VIRTUOSO_LARGE = { - id: 'arcee-ai/virtuoso-large', - name: 'Arcee AI: Virtuoso Large', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 64000, - pricing: { - text: { - input: { - normal: 0.75, - cached: 0, - }, - output: { - normal: 1.2, - }, - }, - image: 0, - }, -} as const -const MORPH_MORPH_V3_FAST = { - id: 'morph/morph-v3-fast', - name: 'Morph: Morph V3 Fast', - supports: { - input: ['text'], - output: ['text'], - supports: ['max_tokens', 'stop', 'temperature'], - }, - context_window: 81920, - max_output_tokens: 38000, - pricing: { - text: { - input: { - normal: 0.8, - cached: 0, - }, - output: { - normal: 1.2, - }, - }, - image: 0, - }, -} as const -const ALFREDPROS_CODELLAMA_7B_INSTRUCT_SOLIDITY = { - id: 'alfredpros/codellama-7b-instruct-solidity', - name: 'AlfredPros: CodeLLaMa 7B Instruct Solidity', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 4096, - max_output_tokens: 4096, - pricing: { - text: { - input: { - normal: 0.8, - cached: 0, - }, - output: { - normal: 1.2, - }, - }, - image: 0, - }, -} as const -const ELEUTHERAI_LLEMMA_7B = { - id: 'eleutherai/llemma_7b', - name: 'EleutherAI: Llemma 7b', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 4096, - max_output_tokens: 4096, - pricing: { - text: { - input: { - normal: 0.8, - cached: 0, - }, - output: { - normal: 1.2, - }, - }, - image: 0, - }, -} as const -const AION_LABS_AION_RP_LLAMA_3_1_8B = { - id: 'aion-labs/aion-rp-llama-3.1-8b', - name: 'AionLabs: Aion-RP 1.0 (8B)', - supports: { - input: ['text'], - output: ['text'], - supports: ['max_tokens', 'temperature', 'top_p'], - }, - context_window: 32768, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 0.8, - cached: 0, - }, - output: { - normal: 1.6, - }, - }, - image: 0, - }, -} as const -const AMAZON_NOVA_PRO_V1 = { - id: 'amazon/nova-pro-v1', - name: 'Amazon: Nova Pro 1.0', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: ['max_tokens', 'stop', 'temperature', 'tools', 'top_k', 'top_p'], - }, - context_window: 300000, - max_output_tokens: 5120, - pricing: { - text: { - input: { - normal: 0.8, - cached: 0, - }, - output: { - normal: 3.2, - }, - }, - image: 0.0012, - }, -} as const -const QWEN_QWEN_VL_MAX = { - id: 'qwen/qwen-vl-max', - name: 'Qwen: Qwen VL Max', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 8192, - pricing: { - text: { - input: { - normal: 0.8, - cached: 0, - }, - output: { - normal: 3.2, - }, - }, - image: 0.001024, - }, -} as const -const ANTHROPIC_CLAUDE_3_5_HAIKU = { - id: 'anthropic/claude-3.5-haiku', - name: 'Anthropic: Claude 3.5 Haiku', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'max_tokens', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 200000, - max_output_tokens: 8192, - pricing: { - text: { - input: { - normal: 0.8, - cached: 1.08, - }, - output: { - normal: 4, - }, - }, - image: 0, - }, -} as const -const RELACE_RELACE_APPLY_3 = { - id: 'relace/relace-apply-3', - name: 'Relace: Relace Apply 3', - supports: { - input: ['text'], - output: ['text'], - supports: ['max_tokens', 'seed', 'stop'], - }, - context_window: 256000, - max_output_tokens: 128000, - pricing: { - text: { - input: { - normal: 0.85, - cached: 0, - }, - output: { - normal: 1.25, - }, - }, - image: 0, - }, -} as const -const SWITCHPOINT_ROUTER = { - id: 'switchpoint/router', - name: 'Switchpoint Router', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.85, - cached: 0, - }, - output: { - normal: 3.4, - }, - }, - image: 0, - }, -} as const -const DEEPCOGITO_COGITO_V2_PREVIEW_LLAMA_70B = { - id: 'deepcogito/cogito-v2-preview-llama-70b', - name: 'Deep Cogito: Cogito V2 Preview Llama 70B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 0.88, - cached: 0, - }, - output: { - normal: 0.88, - }, - }, - image: 0, - }, -} as const -const MORPH_MORPH_V3_LARGE = { - id: 'morph/morph-v3-large', - name: 'Morph: Morph V3 Large', - supports: { - input: ['text'], - output: ['text'], - supports: ['max_tokens', 'stop', 'temperature'], - }, - context_window: 81920, - max_output_tokens: 131072, - pricing: { - text: { - input: { - normal: 0.9, - cached: 0, - }, - output: { - normal: 1.9, - }, - }, - image: 0, - }, -} as const -const ARCEE_AI_MAESTRO_REASONING = { - id: 'arcee-ai/maestro-reasoning', - name: 'Arcee AI: Maestro Reasoning', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 32000, - pricing: { - text: { - input: { - normal: 0.9, - cached: 0, - }, - output: { - normal: 3.3, - }, - }, - image: 0, - }, -} as const -const PERPLEXITY_SONAR = { - id: 'perplexity/sonar', - name: 'Perplexity: Sonar', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'temperature', - 'top_k', - 'top_p', - 'web_search_options', - ], - }, - context_window: 127072, - pricing: { - text: { - input: { - normal: 1, - cached: 0, - }, - output: { - normal: 1, - }, - }, - image: 0, - }, -} as const -const NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B = { - id: 'nousresearch/hermes-3-llama-3.1-405b', - name: 'Nous: Hermes 3 405B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 1, - cached: 0, - }, - output: { - normal: 1, - }, - }, - image: 0, - }, -} as const -const NEVERSLEEP_NOROMAID_20B = { - id: 'neversleep/noromaid-20b', - name: 'Noromaid 20B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_a', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 8192, - pricing: { - text: { - input: { - normal: 1, - cached: 0, - }, - output: { - normal: 1.75, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_3_5_TURBO_0613 = { - id: 'openai/gpt-3.5-turbo-0613', - name: 'OpenAI: GPT-3.5 Turbo (older v0613)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 4095, - max_output_tokens: 4096, - pricing: { - text: { - input: { - normal: 1, - cached: 0, - }, - output: { - normal: 2, - }, - }, - image: 0, - }, -} as const -const RELACE_RELACE_SEARCH = { - id: 'relace/relace-search', - name: 'Relace: Relace Search', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'max_tokens', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 256000, - max_output_tokens: 128000, - pricing: { - text: { - input: { - normal: 1, - cached: 0, - }, - output: { - normal: 3, - }, - }, - image: 0, - }, -} as const -const NOUSRESEARCH_HERMES_4_405B = { - id: 'nousresearch/hermes-4-405b', - name: 'Nous: Hermes 4 405B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'temperature', - 'top_k', - 'top_p', - ], - }, - pricing: { - text: { - input: { - normal: 1, - cached: 0, - }, - output: { - normal: 3, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_CODER_PLUS = { - id: 'qwen/qwen3-coder-plus', - name: 'Qwen: Qwen3 Coder Plus', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 128000, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 1, - cached: 0.1, - }, - output: { - normal: 5, - }, - }, - image: 0, - }, -} as const -const ANTHROPIC_CLAUDE_HAIKU_4_5 = { - id: 'anthropic/claude-haiku-4.5', - name: 'Anthropic: Claude Haiku 4.5', - supports: { - input: ['image', 'text'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 200000, - max_output_tokens: 64000, - pricing: { - text: { - input: { - normal: 1, - cached: 1.35, - }, - output: { - normal: 5, - }, - }, - image: 0, - }, -} as const -const OPENAI_O4_MINI_HIGH = { - id: 'openai/o4-mini-high', - name: 'OpenAI: o4 Mini High', - supports: { - input: ['image', 'text', 'document'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'structured_outputs', - 'tool_choice', - 'tools', - ], - }, - context_window: 200000, - max_output_tokens: 100000, - pricing: { - text: { - input: { - normal: 1.1, - cached: 0.275, - }, - output: { - normal: 4.4, - }, - }, - image: 0, - }, -} as const -const OPENAI_O3_MINI_HIGH = { - id: 'openai/o3-mini-high', - name: 'OpenAI: o3 Mini High', - supports: { - input: ['text', 'document'], - output: ['text'], - supports: [ - 'max_tokens', - 'response_format', - 'seed', - 'structured_outputs', - 'tool_choice', - 'tools', - ], - }, - context_window: 200000, - max_output_tokens: 100000, - pricing: { - text: { - input: { - normal: 1.1, - cached: 0.55, - }, - output: { - normal: 4.4, - }, - }, - image: 0, - }, -} as const -const OPENAI_O3_MINI = { - id: 'openai/o3-mini', - name: 'OpenAI: o3 Mini', - supports: { - input: ['text', 'document'], - output: ['text'], - supports: [ - 'max_tokens', - 'response_format', - 'seed', - 'structured_outputs', - 'tool_choice', - 'tools', - ], - }, - context_window: 200000, - max_output_tokens: 100000, - pricing: { - text: { - input: { - normal: 1.1, - cached: 0.55, - }, - output: { - normal: 4.4, - }, - }, - image: 0, - }, -} as const -const OPENAI_O4_MINI = { - id: 'openai/o4-mini', - name: 'OpenAI: o4 Mini', - supports: { - input: ['image', 'text', 'document'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'structured_outputs', - 'tool_choice', - 'tools', - ], - }, - context_window: 200000, - max_output_tokens: 100000, - pricing: { - text: { - input: { - normal: 1.1, - cached: 0.275, - }, - output: { - normal: 4.4, - }, - }, - image: 0, - }, -} as const -const NVIDIA_LLAMA_3_1_NEMOTRON_70B_INSTRUCT = { - id: 'nvidia/llama-3.1-nemotron-70b-instruct', - name: 'NVIDIA: Llama 3.1 Nemotron 70B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 1.2, - cached: 0, - }, - output: { - normal: 1.2, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN3_MAX = { - id: 'qwen/qwen3-max', - name: 'Qwen: Qwen3 Max', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 256000, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 1.2, - cached: 0.24, - }, - output: { - normal: 6, - }, - }, - image: 0, - }, -} as const -const DEEPCOGITO_COGITO_V2_1_671B = { - id: 'deepcogito/cogito-v2.1-671b', - name: 'Deep Cogito: Cogito v2.1 671B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 128000, - pricing: { - text: { - input: { - normal: 1.25, - cached: 0, - }, - output: { - normal: 1.25, - }, - }, - image: 0, - }, -} as const -const GOOGLE_GEMINI_2_5_PRO_PREVIEW = { - id: 'google/gemini-2.5-pro-preview', - name: 'Google: Gemini 2.5 Pro Preview 06-05', - supports: { - input: ['document', 'image', 'text', 'audio'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 1048576, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 1.25, - cached: 1.935, - }, - output: { - normal: 10, - }, - }, - image: 0.00516, - }, -} as const -const OPENAI_GPT_5_CHAT = { - id: 'openai/gpt-5-chat', - name: 'OpenAI: GPT-5 Chat', - supports: { - input: ['document', 'image', 'text'], - output: ['text'], - supports: ['max_tokens', 'response_format', 'seed', 'structured_outputs'], - }, - context_window: 128000, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 1.25, - cached: 0.125, - }, - output: { - normal: 10, - }, - }, - image: 0, - }, -} as const -const GOOGLE_GEMINI_2_5_PRO_PREVIEW_05_06 = { - id: 'google/gemini-2.5-pro-preview-05-06', - name: 'Google: Gemini 2.5 Pro Preview 05-06', - supports: { - input: ['text', 'image', 'document', 'audio', 'video'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 1048576, - max_output_tokens: 65535, - pricing: { - text: { - input: { - normal: 1.25, - cached: 1.935, - }, - output: { - normal: 10, - }, - }, - image: 0.00516, - }, -} as const -const OPENAI_GPT_5 = { - id: 'openai/gpt-5', - name: 'OpenAI: GPT-5', - supports: { - input: ['text', 'image', 'document'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'structured_outputs', - 'tool_choice', - 'tools', - ], - }, - context_window: 400000, - max_output_tokens: 128000, - pricing: { - text: { - input: { - normal: 1.25, - cached: 0.125, - }, - output: { - normal: 10, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_5_CODEX = { - id: 'openai/gpt-5-codex', - name: 'OpenAI: GPT-5 Codex', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'structured_outputs', - 'tool_choice', - 'tools', - ], - }, - context_window: 400000, - max_output_tokens: 128000, - pricing: { - text: { - input: { - normal: 1.25, - cached: 0.125, - }, - output: { - normal: 10, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_5_1_CODEX = { - id: 'openai/gpt-5.1-codex', - name: 'OpenAI: GPT-5.1-Codex', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'structured_outputs', - 'tool_choice', - 'tools', - ], - }, - context_window: 400000, - max_output_tokens: 128000, - pricing: { - text: { - input: { - normal: 1.25, - cached: 0.125, - }, - output: { - normal: 10, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_5_1_CHAT = { - id: 'openai/gpt-5.1-chat', - name: 'OpenAI: GPT-5.1 Chat', - supports: { - input: ['document', 'image', 'text'], - output: ['text'], - supports: [ - 'max_tokens', - 'response_format', - 'seed', - 'structured_outputs', - 'tool_choice', - 'tools', - ], - }, - context_window: 128000, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 1.25, - cached: 0.125, - }, - output: { - normal: 10, - }, - }, - image: 0, - }, -} as const -const GOOGLE_GEMINI_2_5_PRO = { - id: 'google/gemini-2.5-pro', - name: 'Google: Gemini 2.5 Pro', - supports: { - input: ['text', 'image', 'document', 'audio', 'video'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 1048576, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 1.25, - cached: 1.75, - }, - output: { - normal: 10, - }, - }, - image: 0.00516, - }, -} as const -const OPENAI_GPT_5_1_CODEX_MAX = { - id: 'openai/gpt-5.1-codex-max', - name: 'OpenAI: GPT-5.1-Codex-Max', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'structured_outputs', - 'tool_choice', - 'tools', - ], - }, - context_window: 400000, - max_output_tokens: 128000, - pricing: { - text: { - input: { - normal: 1.25, - cached: 0.125, - }, - output: { - normal: 10, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_5_1 = { - id: 'openai/gpt-5.1', - name: 'OpenAI: GPT-5.1', - supports: { - input: ['image', 'text', 'document'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'structured_outputs', - 'tool_choice', - 'tools', - ], - }, - context_window: 400000, - max_output_tokens: 128000, - pricing: { - text: { - input: { - normal: 1.25, - cached: 0.125, - }, - output: { - normal: 10, - }, - }, - image: 0, - }, -} as const -const SAO10K_L3_EURYALE_70B = { - id: 'sao10k/l3-euryale-70b', - name: 'Sao10k: Llama 3 Euryale 70B v2.1', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'repetition_penalty', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 8192, - max_output_tokens: 8192, - pricing: { - text: { - input: { - normal: 1.48, - cached: 0, - }, - output: { - normal: 1.48, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_3_5_TURBO_INSTRUCT = { - id: 'openai/gpt-3.5-turbo-instruct', - name: 'OpenAI: GPT-3.5 Turbo Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 4095, - max_output_tokens: 4096, - pricing: { - text: { - input: { - normal: 1.5, - cached: 0, - }, - output: { - normal: 2, - }, - }, - image: 0, - }, -} as const -const OPENAI_CODEX_MINI = { - id: 'openai/codex-mini', - name: 'OpenAI: Codex Mini', - supports: { - input: ['image', 'text'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'structured_outputs', - 'tool_choice', - 'tools', - ], - }, - context_window: 200000, - max_output_tokens: 100000, - pricing: { - text: { - input: { - normal: 1.5, - cached: 0.375, - }, - output: { - normal: 6, - }, - }, - image: 0, - }, -} as const -const QWEN_QWEN_MAX = { - id: 'qwen/qwen-max', - name: 'Qwen: Qwen-Max ', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 32768, - max_output_tokens: 8192, - pricing: { - text: { - input: { - normal: 1.6, - cached: 0.64, - }, - output: { - normal: 6.4, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_5_2 = { - id: 'openai/gpt-5.2', - name: 'OpenAI: GPT-5.2', - supports: { - input: ['document', 'image', 'text'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'structured_outputs', - 'tool_choice', - 'tools', - ], - }, - context_window: 400000, - max_output_tokens: 128000, - pricing: { - text: { - input: { - normal: 1.75, - cached: 0.175, - }, - output: { - normal: 14, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_5_2_CHAT = { - id: 'openai/gpt-5.2-chat', - name: 'OpenAI: GPT-5.2 Chat', - supports: { - input: ['document', 'image', 'text'], - output: ['text'], - supports: [ - 'max_tokens', - 'response_format', - 'seed', - 'structured_outputs', - 'tool_choice', - 'tools', - ], - }, - context_window: 128000, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 1.75, - cached: 0.175, - }, - output: { - normal: 14, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MISTRAL_LARGE_2411 = { - id: 'mistralai/mistral-large-2411', - name: 'Mistral Large 2411', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 128000, - pricing: { - text: { - input: { - normal: 2, - cached: 0, - }, - output: { - normal: 6, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_PIXTRAL_LARGE_2411 = { - id: 'mistralai/pixtral-large-2411', - name: 'Mistral: Pixtral Large 2411', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 128000, - pricing: { - text: { - input: { - normal: 2, - cached: 0, - }, - output: { - normal: 6, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MIXTRAL_8X22B_INSTRUCT = { - id: 'mistralai/mixtral-8x22b-instruct', - name: 'Mistral: Mixtral 8x22B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 65536, - pricing: { - text: { - input: { - normal: 2, - cached: 0, - }, - output: { - normal: 6, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MISTRAL_LARGE = { - id: 'mistralai/mistral-large', - name: 'Mistral Large', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 128000, - pricing: { - text: { - input: { - normal: 2, - cached: 0, - }, - output: { - normal: 6, - }, - }, - image: 0, - }, -} as const -const MISTRALAI_MISTRAL_LARGE_2407 = { - id: 'mistralai/mistral-large-2407', - name: 'Mistral Large 2407', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 128000, - pricing: { - text: { - input: { - normal: 2, - cached: 0, - }, - output: { - normal: 6, - }, - }, - image: 0, - }, -} as const -const PERPLEXITY_SONAR_DEEP_RESEARCH = { - id: 'perplexity/sonar-deep-research', - name: 'Perplexity: Sonar Deep Research', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'temperature', - 'top_k', - 'top_p', - 'web_search_options', - ], - }, - context_window: 128000, - pricing: { - text: { - input: { - normal: 2, - cached: 0, - }, - output: { - normal: 8, - }, - }, - image: 0, - }, -} as const -const PERPLEXITY_SONAR_REASONING_PRO = { - id: 'perplexity/sonar-reasoning-pro', - name: 'Perplexity: Sonar Reasoning Pro', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'temperature', - 'top_k', - 'top_p', - 'web_search_options', - ], - }, - context_window: 128000, - pricing: { - text: { - input: { - normal: 2, - cached: 0, - }, - output: { - normal: 8, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_4_1 = { - id: 'openai/gpt-4.1', - name: 'OpenAI: GPT-4.1', - supports: { - input: ['image', 'text', 'document'], - output: ['text'], - supports: [ - 'max_tokens', - 'response_format', - 'seed', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 1047576, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 2, - cached: 0.5, - }, - output: { - normal: 8, - }, - }, - image: 0, - }, -} as const -const OPENAI_O3 = { - id: 'openai/o3', - name: 'OpenAI: o3', - supports: { - input: ['image', 'text', 'document'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'structured_outputs', - 'tool_choice', - 'tools', - ], - }, - context_window: 200000, - max_output_tokens: 100000, - pricing: { - text: { - input: { - normal: 2, - cached: 0.5, - }, - output: { - normal: 8, - }, - }, - image: 0, - }, -} as const -const AI21_JAMBA_LARGE_1_7 = { - id: 'ai21/jamba-large-1.7', - name: 'AI21: Jamba Large 1.7', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'max_tokens', - 'response_format', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 256000, - max_output_tokens: 4096, - pricing: { - text: { - input: { - normal: 2, - cached: 0, - }, - output: { - normal: 8, - }, - }, - image: 0, - }, -} as const -const OPENAI_O4_MINI_DEEP_RESEARCH = { - id: 'openai/o4-mini-deep-research', - name: 'OpenAI: o4 Mini Deep Research', - supports: { - input: ['document', 'image', 'text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 200000, - max_output_tokens: 100000, - pricing: { - text: { - input: { - normal: 2, - cached: 0.5, - }, - output: { - normal: 8, - }, - }, - image: 0, - }, -} as const -const GOOGLE_GEMINI_3_PRO_IMAGE_PREVIEW = { - id: 'google/gemini-3-pro-image-preview', - name: 'Google: Nano Banana Pro (Gemini 3 Pro Image Preview)', - supports: { - input: ['image', 'text'], - output: ['image', 'text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_p', - ], - }, - context_window: 65536, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 2, - cached: 0, - }, - output: { - normal: 12, - }, - }, - image: 0.067, - }, -} as const -const GOOGLE_GEMINI_3_PRO_PREVIEW = { - id: 'google/gemini-3-pro-preview', - name: 'Google: Gemini 3 Pro Preview', - supports: { - input: ['text', 'image', 'document', 'audio', 'video'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 1048576, - max_output_tokens: 65536, - pricing: { - text: { - input: { - normal: 2, - cached: 2.575, - }, - output: { - normal: 12, - }, - }, - image: 0.008256, - }, -} as const -const OPENAI_GPT_5_IMAGE_MINI = { - id: 'openai/gpt-5-image-mini', - name: 'OpenAI: GPT-5 Image Mini', - supports: { - input: ['document', 'image', 'text'], - output: ['image', 'text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 400000, - max_output_tokens: 128000, - pricing: { - text: { - input: { - normal: 2.5, - cached: 0.25, - }, - output: { - normal: 2, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_4O_SEARCH_PREVIEW = { - id: 'openai/gpt-4o-search-preview', - name: 'OpenAI: GPT-4o Search Preview', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'max_tokens', - 'response_format', - 'structured_outputs', - 'web_search_options', - ], - }, - context_window: 128000, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 2.5, - cached: 0, - }, - output: { - normal: 10, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_4O_AUDIO_PREVIEW = { - id: 'openai/gpt-4o-audio-preview', - name: 'OpenAI: GPT-4o Audio', - supports: { - input: ['audio', 'text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 128000, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 2.5, - cached: 0, - }, - output: { - normal: 10, - }, - }, - image: 0, - }, -} as const -const COHERE_COMMAND_A = { - id: 'cohere/command-a', - name: 'Cohere: Command A', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 256000, - max_output_tokens: 8192, - pricing: { - text: { - input: { - normal: 2.5, - cached: 0, - }, - output: { - normal: 10, - }, - }, - image: 0, - }, -} as const -const INFLECTION_INFLECTION_3_PI = { - id: 'inflection/inflection-3-pi', - name: 'Inflection: Inflection 3 Pi', - supports: { - input: ['text'], - output: ['text'], - supports: ['max_tokens', 'stop', 'temperature', 'top_p'], - }, - context_window: 8000, - max_output_tokens: 1024, - pricing: { - text: { - input: { - normal: 2.5, - cached: 0, - }, - output: { - normal: 10, - }, - }, - image: 0, - }, -} as const -const INFLECTION_INFLECTION_3_PRODUCTIVITY = { - id: 'inflection/inflection-3-productivity', - name: 'Inflection: Inflection 3 Productivity', - supports: { - input: ['text'], - output: ['text'], - supports: ['max_tokens', 'stop', 'temperature', 'top_p'], - }, - context_window: 8000, - max_output_tokens: 1024, - pricing: { - text: { - input: { - normal: 2.5, - cached: 0, - }, - output: { - normal: 10, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_4O_2024_11_20 = { - id: 'openai/gpt-4o-2024-11-20', - name: 'OpenAI: GPT-4o (2024-11-20)', - supports: { - input: ['text', 'image', 'document'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - 'web_search_options', - ], - }, - context_window: 128000, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 2.5, - cached: 1.25, - }, - output: { - normal: 10, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_4O_2024_08_06 = { - id: 'openai/gpt-4o-2024-08-06', - name: 'OpenAI: GPT-4o (2024-08-06)', - supports: { - input: ['text', 'image', 'document'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - 'web_search_options', - ], - }, - context_window: 128000, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 2.5, - cached: 1.25, - }, - output: { - normal: 10, - }, - }, - image: 0.003613, - }, -} as const -const OPENAI_GPT_4O = { - id: 'openai/gpt-4o', - name: 'OpenAI: GPT-4o', - supports: { - input: ['text', 'image', 'document'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - 'web_search_options', - ], - }, - context_window: 128000, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 2.5, - cached: 1.25, - }, - output: { - normal: 10, - }, - }, - image: 0, - }, -} as const -const COHERE_COMMAND_R_PLUS_08_2024 = { - id: 'cohere/command-r-plus-08-2024', - name: 'Cohere: Command R+ (08-2024)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 128000, - max_output_tokens: 4000, - pricing: { - text: { - input: { - normal: 2.5, - cached: 0, - }, - output: { - normal: 10, - }, - }, - image: 0, - }, -} as const -const AMAZON_NOVA_PREMIER_V1 = { - id: 'amazon/nova-premier-v1', - name: 'Amazon: Nova Premier 1.0', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: ['max_tokens', 'stop', 'temperature', 'tools', 'top_k', 'top_p'], - }, - context_window: 1000000, - max_output_tokens: 32000, - pricing: { - text: { - input: { - normal: 2.5, - cached: 0.625, - }, - output: { - normal: 12.5, - }, - }, - image: 0, - }, -} as const -const SAO10K_L3_1_70B_HANAMI_X1 = { - id: 'sao10k/l3.1-70b-hanami-x1', - name: 'Sao10K: Llama 3.1 70B Hanami x1', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 16000, - pricing: { - text: { - input: { - normal: 3, - cached: 0, - }, - output: { - normal: 3, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_3_5_TURBO_16K = { - id: 'openai/gpt-3.5-turbo-16k', - name: 'OpenAI: GPT-3.5 Turbo 16k', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 16385, - max_output_tokens: 4096, - pricing: { - text: { - input: { - normal: 3, - cached: 0, - }, - output: { - normal: 4, - }, - }, - image: 0, - }, -} as const -const ANTHRACITE_ORG_MAGNUM_V4_72B = { - id: 'anthracite-org/magnum-v4-72b', - name: 'Magnum v4 72B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'top_a', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 32768, - max_output_tokens: 2048, - pricing: { - text: { - input: { - normal: 3, - cached: 0, - }, - output: { - normal: 5, - }, - }, - image: 0, - }, -} as const -const ANTHROPIC_CLAUDE_SONNET_4_5 = { - id: 'anthropic/claude-sonnet-4.5', - name: 'Anthropic: Claude Sonnet 4.5', - supports: { - input: ['text', 'image', 'document'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 1000000, - max_output_tokens: 64000, - pricing: { - text: { - input: { - normal: 3, - cached: 4.05, - }, - output: { - normal: 15, - }, - }, - image: 0, - }, -} as const -const X_AI_GROK_3 = { - id: 'x-ai/grok-3', - name: 'xAI: Grok 3', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 3, - cached: 0.75, - }, - output: { - normal: 15, - }, - }, - image: 0, - }, -} as const -const ANTHROPIC_CLAUDE_3_7_SONNET_THINKING = { - id: 'anthropic/claude-3.7-sonnet:thinking', - name: 'Anthropic: Claude 3.7 Sonnet (thinking)', - supports: { - input: ['text', 'image', 'document'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_p', - ], - }, - context_window: 200000, - max_output_tokens: 64000, - pricing: { - text: { - input: { - normal: 3, - cached: 4.05, - }, - output: { - normal: 15, - }, - }, - image: 0.0048, - }, -} as const -const X_AI_GROK_4 = { - id: 'x-ai/grok-4', - name: 'xAI: Grok 4', - supports: { - input: ['image', 'text'], - output: ['text'], - supports: [ - 'include_reasoning', - 'logprobs', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 256000, - pricing: { - text: { - input: { - normal: 3, - cached: 0.75, - }, - output: { - normal: 15, - }, - }, - image: 0, - }, -} as const -const PERPLEXITY_SONAR_PRO = { - id: 'perplexity/sonar-pro', - name: 'Perplexity: Sonar Pro', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'presence_penalty', - 'temperature', - 'top_k', - 'top_p', - 'web_search_options', - ], - }, - context_window: 200000, - max_output_tokens: 8000, - pricing: { - text: { - input: { - normal: 3, - cached: 0, - }, - output: { - normal: 15, - }, - }, - image: 0, - }, -} as const -const X_AI_GROK_3_BETA = { - id: 'x-ai/grok-3-beta', - name: 'xAI: Grok 3 Beta', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 3, - cached: 0.75, - }, - output: { - normal: 15, - }, - }, - image: 0, - }, -} as const -const ANTHROPIC_CLAUDE_SONNET_4 = { - id: 'anthropic/claude-sonnet-4', - name: 'Anthropic: Claude Sonnet 4', - supports: { - input: ['image', 'text', 'document'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 1000000, - max_output_tokens: 64000, - pricing: { - text: { - input: { - normal: 3, - cached: 4.05, - }, - output: { - normal: 15, - }, - }, - image: 0.0048, - }, -} as const -const ANTHROPIC_CLAUDE_3_7_SONNET = { - id: 'anthropic/claude-3.7-sonnet', - name: 'Anthropic: Claude 3.7 Sonnet', - supports: { - input: ['text', 'image', 'document'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 200000, - max_output_tokens: 64000, - pricing: { - text: { - input: { - normal: 3, - cached: 4.05, - }, - output: { - normal: 15, - }, - }, - image: 0.0048, - }, -} as const -const PERPLEXITY_SONAR_PRO_SEARCH = { - id: 'perplexity/sonar-pro-search', - name: 'Perplexity: Sonar Pro Search', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'structured_outputs', - 'temperature', - 'top_k', - 'top_p', - 'web_search_options', - ], - }, - context_window: 200000, - max_output_tokens: 8000, - pricing: { - text: { - input: { - normal: 3, - cached: 0, - }, - output: { - normal: 15, - }, - }, - image: 0, - }, -} as const -const META_LLAMA_LLAMA_3_1_405B_INSTRUCT = { - id: 'meta-llama/llama-3.1-405b-instruct', - name: 'Meta: Llama 3.1 405B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 3.5, - cached: 0, - }, - output: { - normal: 3.5, - }, - }, - image: 0, - }, -} as const -const DEEPCOGITO_COGITO_V2_PREVIEW_LLAMA_405B = { - id: 'deepcogito/cogito-v2-preview-llama-405b', - name: 'Deep Cogito: Cogito V2 Preview Llama 405B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'reasoning', - 'repetition_penalty', - 'response_format', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - pricing: { - text: { - input: { - normal: 3.5, - cached: 0, - }, - output: { - normal: 3.5, - }, - }, - image: 0, - }, -} as const -const META_LLAMA_LLAMA_3_1_405B = { - id: 'meta-llama/llama-3.1-405b', - name: 'Meta: Llama 3.1 405B (base)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 4, - cached: 0, - }, - output: { - normal: 4, - }, - }, - image: 0, - }, -} as const -const AION_LABS_AION_1_0 = { - id: 'aion-labs/aion-1.0', - name: 'AionLabs: Aion-1.0', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'temperature', - 'top_p', - ], - }, - context_window: 32768, - max_output_tokens: 32768, - pricing: { - text: { - input: { - normal: 4, - cached: 0, - }, - output: { - normal: 8, - }, - }, - image: 0, - }, -} as const -const RAIFLE_SORCERERLM_8X22B = { - id: 'raifle/sorcererlm-8x22b', - name: 'SorcererLM 8x22B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 16000, - pricing: { - text: { - input: { - normal: 4.5, - cached: 0, - }, - output: { - normal: 4.5, - }, - }, - image: 0, - }, -} as const -const OPENAI_CHATGPT_4O_LATEST = { - id: 'openai/chatgpt-4o-latest', - name: 'OpenAI: ChatGPT-4o', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 128000, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 5, - cached: 0, - }, - output: { - normal: 15, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_4O_2024_05_13 = { - id: 'openai/gpt-4o-2024-05-13', - name: 'OpenAI: GPT-4o (2024-05-13)', - supports: { - input: ['text', 'image', 'document'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - 'web_search_options', - ], - }, - context_window: 128000, - max_output_tokens: 4096, - pricing: { - text: { - input: { - normal: 5, - cached: 0, - }, - output: { - normal: 15, - }, - }, - image: 0, - }, -} as const -const ANTHROPIC_CLAUDE_OPUS_4_5 = { - id: 'anthropic/claude-opus-4.5', - name: 'Anthropic: Claude Opus 4.5', - supports: { - input: ['document', 'image', 'text'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'verbosity', - ], - }, - context_window: 200000, - max_output_tokens: 64000, - pricing: { - text: { - input: { - normal: 5, - cached: 6.75, - }, - output: { - normal: 25, - }, - }, - image: 0, - }, -} as const -const ALPINDALE_GOLIATH_120B = { - id: 'alpindale/goliath-120b', - name: 'Goliath 120B', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'top_a', - 'top_k', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 6144, - max_output_tokens: 1024, - pricing: { - text: { - input: { - normal: 6, - cached: 0, - }, - output: { - normal: 8, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_4O_EXTENDED = { - id: 'openai/gpt-4o:extended', - name: 'OpenAI: GPT-4o (extended)', - supports: { - input: ['text', 'image', 'document'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - 'web_search_options', - ], - }, - context_window: 128000, - max_output_tokens: 64000, - pricing: { - text: { - input: { - normal: 6, - cached: 0, - }, - output: { - normal: 18, - }, - }, - image: 0, - }, -} as const -const ANTHROPIC_CLAUDE_3_5_SONNET = { - id: 'anthropic/claude-3.5-sonnet', - name: 'Anthropic: Claude 3.5 Sonnet', - supports: { - input: ['text', 'image', 'document'], - output: ['text'], - supports: [ - 'max_tokens', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 200000, - max_output_tokens: 8192, - pricing: { - text: { - input: { - normal: 6, - cached: 0, - }, - output: { - normal: 30, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_5_IMAGE = { - id: 'openai/gpt-5-image', - name: 'OpenAI: GPT-5 Image', - supports: { - input: ['image', 'text', 'document'], - output: ['image', 'text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 400000, - max_output_tokens: 128000, - pricing: { - text: { - input: { - normal: 10, - cached: 1.25, - }, - output: { - normal: 10, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_4_1106_PREVIEW = { - id: 'openai/gpt-4-1106-preview', - name: 'OpenAI: GPT-4 Turbo (older v1106)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 128000, - max_output_tokens: 4096, - pricing: { - text: { - input: { - normal: 10, - cached: 0, - }, - output: { - normal: 30, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_4_TURBO = { - id: 'openai/gpt-4-turbo', - name: 'OpenAI: GPT-4 Turbo', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 128000, - max_output_tokens: 4096, - pricing: { - text: { - input: { - normal: 10, - cached: 0, - }, - output: { - normal: 30, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_4_TURBO_PREVIEW = { - id: 'openai/gpt-4-turbo-preview', - name: 'OpenAI: GPT-4 Turbo Preview', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 128000, - max_output_tokens: 4096, - pricing: { - text: { - input: { - normal: 10, - cached: 0, - }, - output: { - normal: 30, - }, - }, - image: 0, - }, -} as const -const OPENAI_O3_DEEP_RESEARCH = { - id: 'openai/o3-deep-research', - name: 'OpenAI: o3 Deep Research', - supports: { - input: ['image', 'text', 'document'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'include_reasoning', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'reasoning', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 200000, - max_output_tokens: 100000, - pricing: { - text: { - input: { - normal: 10, - cached: 2.5, - }, - output: { - normal: 40, - }, - }, - image: 0, - }, -} as const -const OPENAI_O1 = { - id: 'openai/o1', - name: 'OpenAI: o1', - supports: { - input: ['text', 'image', 'document'], - output: ['text'], - supports: [ - 'max_tokens', - 'response_format', - 'seed', - 'structured_outputs', - 'tool_choice', - 'tools', - ], - }, - context_window: 200000, - max_output_tokens: 100000, - pricing: { - text: { - input: { - normal: 15, - cached: 7.5, - }, - output: { - normal: 60, - }, - }, - image: 0, - }, -} as const -const ANTHROPIC_CLAUDE_OPUS_4_1 = { - id: 'anthropic/claude-opus-4.1', - name: 'Anthropic: Claude Opus 4.1', - supports: { - input: ['image', 'text', 'document'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 200000, - max_output_tokens: 32000, - pricing: { - text: { - input: { - normal: 15, - cached: 20.25, - }, - output: { - normal: 75, - }, - }, - image: 0.024, - }, -} as const -const ANTHROPIC_CLAUDE_OPUS_4 = { - id: 'anthropic/claude-opus-4', - name: 'Anthropic: Claude Opus 4', - supports: { - input: ['image', 'text', 'document'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 200000, - max_output_tokens: 32000, - pricing: { - text: { - input: { - normal: 15, - cached: 20.25, - }, - output: { - normal: 75, - }, - }, - image: 0.024, - }, -} as const -const OPENAI_GPT_5_PRO = { - id: 'openai/gpt-5-pro', - name: 'OpenAI: GPT-5 Pro', - supports: { - input: ['image', 'text', 'document'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'structured_outputs', - 'tool_choice', - 'tools', - ], - }, - context_window: 400000, - max_output_tokens: 128000, - pricing: { - text: { - input: { - normal: 15, - cached: 0, - }, - output: { - normal: 120, - }, - }, - image: 0, - }, -} as const -const OPENAI_O3_PRO = { - id: 'openai/o3-pro', - name: 'OpenAI: o3 Pro', - supports: { - input: ['text', 'document', 'image'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'structured_outputs', - 'tool_choice', - 'tools', - ], - }, - context_window: 200000, - max_output_tokens: 100000, - pricing: { - text: { - input: { - normal: 20, - cached: 0, - }, - output: { - normal: 80, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_5_2_PRO = { - id: 'openai/gpt-5.2-pro', - name: 'OpenAI: GPT-5.2 Pro', - supports: { - input: ['image', 'text', 'document'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'structured_outputs', - 'tool_choice', - 'tools', - ], - }, - context_window: 400000, - max_output_tokens: 128000, - pricing: { - text: { - input: { - normal: 21, - cached: 0, - }, - output: { - normal: 168, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_4 = { - id: 'openai/gpt-4', - name: 'OpenAI: GPT-4', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 8191, - max_output_tokens: 4096, - pricing: { - text: { - input: { - normal: 30, - cached: 0, - }, - output: { - normal: 60, - }, - }, - image: 0, - }, -} as const -const OPENAI_GPT_4_0314 = { - id: 'openai/gpt-4-0314', - name: 'OpenAI: GPT-4 (older v0314)', - supports: { - input: ['text'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'logit_bias', - 'logprobs', - 'max_tokens', - 'presence_penalty', - 'response_format', - 'seed', - 'stop', - 'structured_outputs', - 'temperature', - 'tool_choice', - 'tools', - 'top_logprobs', - 'top_p', - ], - }, - context_window: 8191, - max_output_tokens: 4096, - pricing: { - text: { - input: { - normal: 30, - cached: 0, - }, - output: { - normal: 60, - }, - }, - image: 0, - }, -} as const -const OPENAI_O1_PRO = { - id: 'openai/o1-pro', - name: 'OpenAI: o1-pro', - supports: { - input: ['text', 'image', 'document'], - output: ['text'], - supports: [ - 'include_reasoning', - 'max_tokens', - 'reasoning', - 'response_format', - 'seed', - 'structured_outputs', - ], - }, - context_window: 200000, - max_output_tokens: 100000, - pricing: { - text: { - input: { - normal: 150, - cached: 0, - }, - output: { - normal: 600, - }, - }, - image: 0, - }, -} as const -const ALLENAI_OLMO_2_0325_32B_INSTRUCT = { - id: 'allenai/olmo-2-0325-32b-instruct', - name: 'AllenAI: Olmo 2 32B Instruct', - supports: { - input: ['text'], - output: ['text'], - supports: [], - }, - context_window: 128000, - pricing: { - text: { - input: { - normal: 0.05, - cached: 0, - }, - output: { - normal: 0.2, - }, - }, - image: 0, - }, -} as const -const META_LLAMA_LLAMA_3_2_90B_VISION_INSTRUCT = { - id: 'meta-llama/llama-3.2-90b-vision-instruct', - name: 'Meta: Llama 3.2 90B Vision Instruct', - supports: { - input: ['text', 'image'], - output: ['text'], - supports: [ - 'frequency_penalty', - 'max_tokens', - 'min_p', - 'presence_penalty', - 'repetition_penalty', - 'response_format', - 'seed', - 'stop', - 'temperature', - 'top_k', - 'top_p', - ], - }, - context_window: 131072, - max_output_tokens: 16384, - pricing: { - text: { - input: { - normal: 0.35, - cached: 0, - }, - output: { - normal: 0.4, - }, - }, - image: 0.0005058, - }, -} as const -const ANTHROPIC_CLAUDE_3_5_HAIKU_20241022 = { - id: 'anthropic/claude-3.5-haiku-20241022', - name: 'Anthropic: Claude 3.5 Haiku (2024-10-22)', - supports: { - input: ['text', 'image', 'document'], - output: ['text'], - supports: [ - 'max_tokens', - 'stop', - 'temperature', - 'tool_choice', - 'tools', - 'top_k', - 'top_p', - ], - }, - context_window: 200000, - max_output_tokens: 8192, - pricing: { - text: { - input: { - normal: 0.8, - cached: 1.08, - }, - output: { - normal: 4, - }, - }, - image: 0, - }, -} as const +import type { OpenRouterBaseOptions, OpenRouterCommonOptions } from './text/text-provider-options' + +const NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B_FREE = { + id: 'nousresearch/hermes-3-llama-3.1-405b:free', + name: 'Nous: Hermes 3 405B Instruct (free)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const META_LLAMA_LLAMA_3_3_70B_INSTRUCT_FREE = { + id: 'meta-llama/llama-3.3-70b-instruct:free', + name: 'Meta: Llama 3.3 70B Instruct (free)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'repetition_penalty', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_CODER_FREE = { + id: 'qwen/qwen3-coder:free', + name: 'Qwen: Qwen3 Coder 480B A35B (free)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 1048576, + max_output_tokens: 262000, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const TNGTECH_DEEPSEEK_R1T2_CHIMERA_FREE = { + id: 'tngtech/deepseek-r1t2-chimera:free', + name: 'TNG: DeepSeek R1T2 Chimera (free)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 163840, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_4B_FREE = { + id: 'qwen/qwen3-4b:free', + name: 'Qwen: Qwen3 4B (free)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'response_format', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 128000, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const GOOGLE_GEMMA_3N_E2B_IT_FREE = { + id: 'google/gemma-3n-e2b-it:free', + name: 'Google: Gemma 3n 2B (free)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'temperature', 'top_p'], + }, + context_window: 8192, + max_output_tokens: 2048, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const META_LLAMA_LLAMA_3_2_3B_INSTRUCT_FREE = { + id: 'meta-llama/llama-3.2-3b-instruct:free', + name: 'Meta: Llama 3.2 3B Instruct (free)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const GOOGLE_GEMMA_3_12B_IT_FREE = { + id: 'google/gemma-3-12b-it:free', + name: 'Google: Gemma 3 12B (free)', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['max_tokens', 'seed', 'stop', 'temperature', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 8192, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const COGNITIVECOMPUTATIONS_DOLPHIN_MISTRAL_24B_VENICE_EDITION_FREE = { + id: 'cognitivecomputations/dolphin-mistral-24b-venice-edition:free', + name: 'Venice: Uncensored (free)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 32768, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const Z_AI_GLM_4_5_AIR_FREE = { + id: 'z-ai/glm-4.5-air:free', + name: 'Z.AI: GLM 4.5 Air (free)', + supports: { + input: ['text'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 96000, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const MOONSHOTAI_KIMI_K2_FREE = { + id: 'moonshotai/kimi-k2:free', + name: 'MoonshotAI: Kimi K2 0711 (free)', + supports: { + input: ['text'], + output: ['text'], + supports: ['max_tokens', 'seed', 'stop', 'temperature'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const GOOGLE_GEMMA_3_27B_IT_FREE = { + id: 'google/gemma-3-27b-it:free', + name: 'Google: Gemma 3 27B (free)', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const GOOGLE_GEMMA_3_4B_IT_FREE = { + id: 'google/gemma-3-4b-it:free', + name: 'Google: Gemma 3 4B (free)', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['max_tokens', 'response_format', 'seed', 'stop', 'temperature', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 8192, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const NVIDIA_NEMOTRON_3_NANO_30B_A3B_FREE = { + id: 'nvidia/nemotron-3-nano-30b-a3b:free', + name: 'NVIDIA: Nemotron 3 Nano 30B A3B (free)', + supports: { + input: ['text'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'seed', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 256000, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const TNGTECH_TNG_R1T_CHIMERA_FREE = { + id: 'tngtech/tng-r1t-chimera:free', + name: 'TNG: R1T Chimera (free)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 163840, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const NVIDIA_NEMOTRON_NANO_12B_V2_VL_FREE = { + id: 'nvidia/nemotron-nano-12b-v2-vl:free', + name: 'NVIDIA: Nemotron Nano 12B 2 VL (free)', + supports: { + input: ['image', 'text', 'video'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'seed', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 128000, + max_output_tokens: 128000, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const ARCEE_AI_TRINITY_MINI_FREE = { + id: 'arcee-ai/trinity-mini:free', + name: 'Arcee AI: Trinity Mini (free)', + supports: { + input: ['text'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const TNGTECH_DEEPSEEK_R1T_CHIMERA_FREE = { + id: 'tngtech/deepseek-r1t-chimera:free', + name: 'TNG: DeepSeek R1T Chimera (free)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 163840, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const GOOGLE_GEMMA_3N_E4B_IT_FREE = { + id: 'google/gemma-3n-e4b-it:free', + name: 'Google: Gemma 3n 4B (free)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'temperature', 'top_p'], + }, + context_window: 32000, + max_output_tokens: 2048, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const GOOGLE_GEMINI_2_0_FLASH_EXP_FREE = { + id: 'google/gemini-2.0-flash-exp:free', + name: 'Google: Gemini 2.0 Flash Experimental (free)', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['max_tokens', 'response_format', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 1048576, + max_output_tokens: 8192, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_OSS_120B_FREE = { + id: 'openai/gpt-oss-120b:free', + name: 'OpenAI: gpt-oss-120b (free)', + supports: { + input: ['text'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'seed', 'stop', 'temperature', 'tool_choice', 'tools'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_OSS_20B_FREE = { + id: 'openai/gpt-oss-20b:free', + name: 'OpenAI: gpt-oss-20b (free)', + supports: { + input: ['text'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'seed', 'stop', 'temperature', 'tool_choice', 'tools'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MISTRAL_SMALL_3_1_24B_INSTRUCT_FREE = { + id: 'mistralai/mistral-small-3.1-24b-instruct:free', + name: 'Mistral: Mistral Small 3.1 24B (free)', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 128000, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_DEVSTRAL_2512_FREE = { + id: 'mistralai/devstral-2512:free', + name: 'Mistral: Devstral 2 2512 (free)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 262144, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const XIAOMI_MIMO_V2_FLASH_FREE = { + id: 'xiaomi/mimo-v2-flash:free', + name: 'Xiaomi: MiMo-V2-Flash (free)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'response_format', 'stop', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 262144, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const ALLENAI_MOLMO_2_8B_FREE = { + id: 'allenai/molmo-2-8b:free', + name: 'AllenAI: Molmo2 8B (free)', + supports: { + input: ['text', 'image', 'video'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 36864, + max_output_tokens: 36864, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const DEEPSEEK_DEEPSEEK_R1_0528_FREE = { + id: 'deepseek/deepseek-r1-0528:free', + name: 'DeepSeek: R1 0528 (free)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'temperature'], + }, + context_window: 163840, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const NVIDIA_NEMOTRON_NANO_9B_V2_FREE = { + id: 'nvidia/nemotron-nano-9b-v2:free', + name: 'NVIDIA: Nemotron Nano 9B V2 (free)', + supports: { + input: ['text'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 32000, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const META_LLAMA_LLAMA_3_1_405B_INSTRUCT_FREE = { + id: 'meta-llama/llama-3.1-405b-instruct:free', + name: 'Meta: Llama 3.1 405B Instruct (free)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'repetition_penalty', 'temperature'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN_2_5_VL_7B_INSTRUCT_FREE = { + id: 'qwen/qwen-2.5-vl-7b-instruct:free', + name: 'Qwen: Qwen2.5-VL 7B Instruct (free)', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'repetition_penalty', 'temperature'], + }, + context_window: 32768, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MISTRAL_7B_INSTRUCT_FREE = { + id: 'mistralai/mistral-7b-instruct:free', + name: 'Mistral: Mistral 7B Instruct (free)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 32768, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0, + cached: 0, + }, + output: { + normal: 0, + }, + }, + image: 0, + }, + } as const +const LIQUID_LFM_2_2_6B = { + id: 'liquid/lfm-2.2-6b', + name: 'LiquidAI/LFM2-2.6B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 8192, + pricing: { + text: { + input: { + normal: 0.01, + cached: 0, + }, + output: { + normal: 0.02, + }, + }, + image: 0, + }, + } as const +const LIQUID_LFM2_8B_A1B = { + id: 'liquid/lfm2-8b-a1b', + name: 'LiquidAI/LFM2-8B-A1B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 8192, + pricing: { + text: { + input: { + normal: 0.01, + cached: 0, + }, + output: { + normal: 0.02, + }, + }, + image: 0, + }, + } as const +const IBM_GRANITE_GRANITE_4_0_H_MICRO = { + id: 'ibm-granite/granite-4.0-h-micro', + name: 'IBM: Granite 4.0 Micro', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'repetition_penalty', 'seed', 'temperature', 'top_k', 'top_p'], + }, + context_window: 8192, + pricing: { + text: { + input: { + normal: 0.017, + cached: 0, + }, + output: { + normal: 0.11, + }, + }, + image: 0, + }, + } as const +const GOOGLE_GEMMA_3_4B_IT = { + id: 'google/gemma-3-4b-it', + name: 'Google: Gemma 3 4B', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.01703012, + cached: 0, + }, + output: { + normal: 0.0681536, + }, + }, + image: 0, + }, + } as const +const META_LLAMA_LLAMA_3_2_3B_INSTRUCT = { + id: 'meta-llama/llama-3.2-3b-instruct', + name: 'Meta: Llama 3.2 3B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.02, + cached: 0, + }, + output: { + normal: 0.02, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MISTRAL_NEMO = { + id: 'mistralai/mistral-nemo', + name: 'Mistral: Mistral Nemo', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.02, + cached: 0, + }, + output: { + normal: 0.04, + }, + }, + image: 0, + }, + } as const +const GOOGLE_GEMMA_3N_E4B_IT = { + id: 'google/gemma-3n-e4b-it', + name: 'Google: Gemma 3n 4B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 32000, + pricing: { + text: { + input: { + normal: 0.02, + cached: 0, + }, + output: { + normal: 0.04, + }, + }, + image: 0, + }, + } as const +const META_LLAMA_LLAMA_3_1_8B_INSTRUCT = { + id: 'meta-llama/llama-3.1-8b-instruct', + name: 'Meta: Llama 3.1 8B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.02, + cached: 0, + }, + output: { + normal: 0.05, + }, + }, + image: 0, + }, + } as const +const META_LLAMA_LLAMA_GUARD_3_8B = { + id: 'meta-llama/llama-guard-3-8b', + name: 'Llama Guard 3 8B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + pricing: { + text: { + input: { + normal: 0.02, + cached: 0, + }, + output: { + normal: 0.06, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_OSS_20B = { + id: 'openai/gpt-oss-20b', + name: 'OpenAI: gpt-oss-20b', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'reasoning_effort', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 131072, + pricing: { + text: { + input: { + normal: 0.02, + cached: 0, + }, + output: { + normal: 0.1, + }, + }, + image: 0, + }, + } as const +const NOUSRESEARCH_DEEPHERMES_3_MISTRAL_24B_PREVIEW = { + id: 'nousresearch/deephermes-3-mistral-24b-preview', + name: 'Nous: DeepHermes 3 Mistral 24B Preview', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 32768, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 0.02, + cached: 0, + }, + output: { + normal: 0.1, + }, + }, + image: 0, + }, + } as const +const NOUSRESEARCH_HERMES_2_PRO_LLAMA_3_8B = { + id: 'nousresearch/hermes-2-pro-llama-3-8b', + name: 'NousResearch: Hermes 2 Pro - Llama-3 8B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 8192, + max_output_tokens: 2048, + pricing: { + text: { + input: { + normal: 0.025, + cached: 0, + }, + output: { + normal: 0.08, + }, + }, + image: 0, + }, + } as const +const META_LLAMA_LLAMA_3_2_1B_INSTRUCT = { + id: 'meta-llama/llama-3.2-1b-instruct', + name: 'Meta: Llama 3.2 1B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'repetition_penalty', 'seed', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.027, + cached: 0, + }, + output: { + normal: 0.2, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MISTRAL_7B_INSTRUCT = { + id: 'mistralai/mistral-7b-instruct', + name: 'Mistral: Mistral 7B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 32768, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.028, + cached: 0, + }, + output: { + normal: 0.054, + }, + }, + image: 0, + }, + } as const +const META_LLAMA_LLAMA_3_8B_INSTRUCT = { + id: 'meta-llama/llama-3-8b-instruct', + name: 'Meta: Llama 3 8B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 8192, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.03, + cached: 0, + }, + output: { + normal: 0.06, + }, + }, + image: 0, + }, + } as const +const GOOGLE_GEMMA_2_9B_IT = { + id: 'google/gemma-2-9b-it', + name: 'Google: Gemma 2 9B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'repetition_penalty', 'temperature', 'top_k', 'top_p'], + }, + context_window: 8192, + pricing: { + text: { + input: { + normal: 0.03, + cached: 0, + }, + output: { + normal: 0.09, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN2_5_CODER_7B_INSTRUCT = { + id: 'qwen/qwen2.5-coder-7b-instruct', + name: 'Qwen: Qwen2.5 Coder 7B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'repetition_penalty', 'response_format', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.03, + cached: 0, + }, + output: { + normal: 0.09, + }, + }, + image: 0, + }, + } as const +const GOOGLE_GEMMA_3_12B_IT = { + id: 'google/gemma-3-12b-it', + name: 'Google: Gemma 3 12B', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 131072, + pricing: { + text: { + input: { + normal: 0.03, + cached: 0, + }, + output: { + normal: 0.1, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MISTRAL_SMALL_3_1_24B_INSTRUCT = { + id: 'mistralai/mistral-small-3.1-24b-instruct', + name: 'Mistral: Mistral Small 3.1 24B', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 128000, + max_output_tokens: 131072, + pricing: { + text: { + input: { + normal: 0.03, + cached: 0, + }, + output: { + normal: 0.11, + }, + }, + image: 0, + }, + } as const +const DEEPSEEK_DEEPSEEK_R1_DISTILL_LLAMA_70B = { + id: 'deepseek/deepseek-r1-distill-llama-70b', + name: 'DeepSeek: R1 Distill Llama 70B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 128000, + max_output_tokens: 131072, + pricing: { + text: { + input: { + normal: 0.03, + cached: 0, + }, + output: { + normal: 0.11, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN_2_5_CODER_32B_INSTRUCT = { + id: 'qwen/qwen-2.5-coder-32b-instruct', + name: 'Qwen2.5 Coder 32B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 128000, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 0.03, + cached: 0, + }, + output: { + normal: 0.11, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MISTRAL_SMALL_24B_INSTRUCT_2501 = { + id: 'mistralai/mistral-small-24b-instruct-2501', + name: 'Mistral: Mistral Small 3', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 32768, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 0.03, + cached: 0, + }, + output: { + normal: 0.11, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_8B = { + id: 'qwen/qwen3-8b', + name: 'Qwen: Qwen3 8B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'logprobs', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 20000, + pricing: { + text: { + input: { + normal: 0.035, + cached: 0, + }, + output: { + normal: 0.138, + }, + }, + image: 0, + }, + } as const +const AMAZON_NOVA_MICRO_V1 = { + id: 'amazon/nova-micro-v1', + name: 'Amazon: Nova Micro 1.0', + supports: { + input: ['text'], + output: ['text'], + supports: ['max_tokens', 'stop', 'temperature', 'tools', 'top_k', 'top_p'], + }, + context_window: 128000, + max_output_tokens: 5120, + pricing: { + text: { + input: { + normal: 0.035, + cached: 0, + }, + output: { + normal: 0.14, + }, + }, + image: 0, + }, + } as const +const COHERE_COMMAND_R7B_12_2024 = { + id: 'cohere/command-r7b-12-2024', + name: 'Cohere: Command R7B (12-2024)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 128000, + max_output_tokens: 4000, + pricing: { + text: { + input: { + normal: 0.0375, + cached: 0, + }, + output: { + normal: 0.15, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_OSS_120B = { + id: 'openai/gpt-oss-120b', + name: 'OpenAI: gpt-oss-120b', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'reasoning_effort', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.039, + cached: 0, + }, + output: { + normal: 0.19, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_OSS_120B_EXACTO = { + id: 'openai/gpt-oss-120b:exacto', + name: 'OpenAI: gpt-oss-120b (exacto)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.039, + cached: 0, + }, + output: { + normal: 0.19, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MINISTRAL_3B = { + id: 'mistralai/ministral-3b', + name: 'Mistral: Ministral 3B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 128000, + pricing: { + text: { + input: { + normal: 0.04, + cached: 0, + }, + output: { + normal: 0.04, + }, + }, + image: 0, + }, + } as const +const SAO10K_L3_LUNARIS_8B = { + id: 'sao10k/l3-lunaris-8b', + name: 'Sao10K: Llama 3 8B Lunaris', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 8192, + pricing: { + text: { + input: { + normal: 0.04, + cached: 0, + }, + output: { + normal: 0.05, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN_2_5_7B_INSTRUCT = { + id: 'qwen/qwen-2.5-7b-instruct', + name: 'Qwen: Qwen2.5 7B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.04, + cached: 0, + }, + output: { + normal: 0.1, + }, + }, + image: 0, + }, + } as const +const GOOGLE_GEMMA_3_27B_IT = { + id: 'google/gemma-3-27b-it', + name: 'Google: Gemma 3 27B', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 96000, + pricing: { + text: { + input: { + normal: 0.04, + cached: 0, + }, + output: { + normal: 0.15, + }, + }, + image: 0, + }, + } as const +const NVIDIA_NEMOTRON_NANO_9B_V2 = { + id: 'nvidia/nemotron-nano-9b-v2', + name: 'NVIDIA: Nemotron Nano 9B V2', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 32000, + pricing: { + text: { + input: { + normal: 0.04, + cached: 0, + }, + output: { + normal: 0.16, + }, + }, + image: 0, + }, + } as const +const ARCEE_AI_TRINITY_MINI = { + id: 'arcee-ai/trinity-mini', + name: 'Arcee AI: Trinity Mini', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 131072, + pricing: { + text: { + input: { + normal: 0.045, + cached: 0, + }, + output: { + normal: 0.15, + }, + }, + image: 0, + }, + } as const +const META_LLAMA_LLAMA_3_2_11B_VISION_INSTRUCT = { + id: 'meta-llama/llama-3.2-11b-vision-instruct', + name: 'Meta: Llama 3.2 11B Vision Instruct', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.049, + cached: 0, + }, + output: { + normal: 0.049, + }, + }, + image: 0.00007948, + }, + } as const +const MICROSOFT_PHI_4_MULTIMODAL_INSTRUCT = { + id: 'microsoft/phi-4-multimodal-instruct', + name: 'Microsoft: Phi 4 Multimodal Instruct', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.05, + cached: 0, + }, + output: { + normal: 0.1, + }, + }, + image: 0.00017685, + }, + } as const +const QWEN_QWEN_TURBO = { + id: 'qwen/qwen-turbo', + name: 'Qwen: Qwen-Turbo', + supports: { + input: ['text'], + output: ['text'], + supports: ['max_tokens', 'presence_penalty', 'response_format', 'seed', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 1000000, + max_output_tokens: 8192, + pricing: { + text: { + input: { + normal: 0.05, + cached: 0.02, + }, + output: { + normal: 0.2, + }, + }, + image: 0, + }, + } as const +const Z_AI_GLM_4_5_AIR = { + id: 'z-ai/glm-4.5-air', + name: 'Z.AI: GLM 4.5 Air', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 131072, + pricing: { + text: { + input: { + normal: 0.05, + cached: 0, + }, + output: { + normal: 0.22, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN2_5_VL_32B_INSTRUCT = { + id: 'qwen/qwen2.5-vl-32b-instruct', + name: 'Qwen: Qwen2.5 VL 32B Instruct', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 32768, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.05, + cached: 0, + }, + output: { + normal: 0.22, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_14B = { + id: 'qwen/qwen3-14b', + name: 'Qwen: Qwen3 14B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131702, + max_output_tokens: 40960, + pricing: { + text: { + input: { + normal: 0.05, + cached: 0, + }, + output: { + normal: 0.22, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_DEVSTRAL_2512 = { + id: 'mistralai/devstral-2512', + name: 'Mistral: Devstral 2 2512', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 262144, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 0.05, + cached: 0, + }, + output: { + normal: 0.22, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_5_NANO = { + id: 'openai/gpt-5-nano', + name: 'OpenAI: GPT-5 Nano', + supports: { + input: ['text', 'image', 'document'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'structured_outputs', 'tool_choice', 'tools'], + }, + context_window: 400000, + max_output_tokens: 128000, + pricing: { + text: { + input: { + normal: 0.05, + cached: 0.005, + }, + output: { + normal: 0.4, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_30B_A3B_THINKING_2507 = { + id: 'qwen/qwen3-30b-a3b-thinking-2507', + name: 'Qwen: Qwen3 30B A3B Thinking 2507', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.051, + cached: 0, + }, + output: { + normal: 0.34, + }, + }, + image: 0, + }, + } as const +const GRYPHE_MYTHOMAX_L2_13B = { + id: 'gryphe/mythomax-l2-13b', + name: 'MythoMax 13B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_a', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 4096, + pricing: { + text: { + input: { + normal: 0.06, + cached: 0, + }, + output: { + normal: 0.06, + }, + }, + image: 0, + }, + } as const +const DEEPSEEK_DEEPSEEK_R1_0528_QWEN3_8B = { + id: 'deepseek/deepseek-r1-0528-qwen3-8b', + name: 'DeepSeek: DeepSeek R1 0528 Qwen3 8B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 32000, + pricing: { + text: { + input: { + normal: 0.06, + cached: 0, + }, + output: { + normal: 0.09, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_DEVSTRAL_SMALL_2505 = { + id: 'mistralai/devstral-small-2505', + name: 'Mistral: Devstral Small 2505', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.06, + cached: 0, + }, + output: { + normal: 0.12, + }, + }, + image: 0, + }, + } as const +const MICROSOFT_PHI_4 = { + id: 'microsoft/phi-4', + name: 'Microsoft: Phi 4', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 16384, + pricing: { + text: { + input: { + normal: 0.06, + cached: 0, + }, + output: { + normal: 0.14, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MISTRAL_SMALL_3_2_24B_INSTRUCT = { + id: 'mistralai/mistral-small-3.2-24b-instruct', + name: 'Mistral: Mistral Small 3.2 24B', + supports: { + input: ['image', 'text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 128000, + max_output_tokens: 131072, + pricing: { + text: { + input: { + normal: 0.06, + cached: 0, + }, + output: { + normal: 0.18, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_30B_A3B = { + id: 'qwen/qwen3-30b-a3b', + name: 'Qwen: Qwen3 30B A3B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 40960, + pricing: { + text: { + input: { + normal: 0.06, + cached: 0, + }, + output: { + normal: 0.22, + }, + }, + image: 0, + }, + } as const +const AMAZON_NOVA_LITE_V1 = { + id: 'amazon/nova-lite-v1', + name: 'Amazon: Nova Lite 1.0', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['max_tokens', 'stop', 'temperature', 'tools', 'top_k', 'top_p'], + }, + context_window: 300000, + max_output_tokens: 5120, + pricing: { + text: { + input: { + normal: 0.06, + cached: 0, + }, + output: { + normal: 0.24, + }, + }, + image: 0.00009, + }, + } as const +const NVIDIA_NEMOTRON_3_NANO_30B_A3B = { + id: 'nvidia/nemotron-3-nano-30b-a3b', + name: 'NVIDIA: Nemotron 3 Nano 30B A3B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 256000, + max_output_tokens: 262144, + pricing: { + text: { + input: { + normal: 0.06, + cached: 0, + }, + output: { + normal: 0.24, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_CODER_30B_A3B_INSTRUCT = { + id: 'qwen/qwen3-coder-30b-a3b-instruct', + name: 'Qwen: Qwen3 Coder 30B A3B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 0.07, + cached: 0, + }, + output: { + normal: 0.27, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_DEVSTRAL_SMALL = { + id: 'mistralai/devstral-small', + name: 'Mistral: Devstral Small 1.1', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.07, + cached: 0, + }, + output: { + normal: 0.28, + }, + }, + image: 0, + }, + } as const +const BAIDU_ERNIE_4_5_21B_A3B_THINKING = { + id: 'baidu/ernie-4.5-21b-a3b-thinking', + name: 'Baidu: ERNIE 4.5 21B A3B Thinking', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 0.07, + cached: 0, + }, + output: { + normal: 0.28, + }, + }, + image: 0, + }, + } as const +const BAIDU_ERNIE_4_5_21B_A3B = { + id: 'baidu/ernie-4.5-21b-a3b', + name: 'Baidu: ERNIE 4.5 21B A3B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'repetition_penalty', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 8000, + pricing: { + text: { + input: { + normal: 0.07, + cached: 0, + }, + output: { + normal: 0.28, + }, + }, + image: 0, + }, + } as const +const MICROSOFT_PHI_4_REASONING_PLUS = { + id: 'microsoft/phi-4-reasoning-plus', + name: 'Microsoft: Phi 4 Reasoning Plus', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 32768, + pricing: { + text: { + input: { + normal: 0.07, + cached: 0, + }, + output: { + normal: 0.35, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_235B_A22B_2507 = { + id: 'qwen/qwen3-235b-a22b-2507', + name: 'Qwen: Qwen3 235B A22B Instruct 2507', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'reasoning_effort', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 262144, + pricing: { + text: { + input: { + normal: 0.071, + cached: 0, + }, + output: { + normal: 0.463, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_OSS_SAFEGUARD_20B = { + id: 'openai/gpt-oss-safeguard-20b', + name: 'OpenAI: gpt-oss-safeguard-20b', + supports: { + input: ['text'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 0.075, + cached: 0.037, + }, + output: { + normal: 0.3, + }, + }, + image: 0, + }, + } as const +const BYTEDANCE_SEED_SEED_1_6_FLASH = { + id: 'bytedance-seed/seed-1.6-flash', + name: 'ByteDance Seed: Seed 1.6 Flash', + supports: { + input: ['image', 'text', 'video'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 262144, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.075, + cached: 0, + }, + output: { + normal: 0.3, + }, + }, + image: 0, + }, + } as const +const GOOGLE_GEMINI_2_0_FLASH_LITE_001 = { + id: 'google/gemini-2.0-flash-lite-001', + name: 'Google: Gemini 2.0 Flash Lite', + supports: { + input: ['text', 'image', 'document', 'audio', 'video'], + output: ['text'], + supports: ['max_tokens', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 1048576, + max_output_tokens: 8192, + pricing: { + text: { + input: { + normal: 0.075, + cached: 0, + }, + output: { + normal: 0.3, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_32B = { + id: 'qwen/qwen3-32b', + name: 'Qwen: Qwen3 32B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 40960, + pricing: { + text: { + input: { + normal: 0.08, + cached: 0, + }, + output: { + normal: 0.24, + }, + }, + image: 0, + }, + } as const +const META_LLAMA_LLAMA_4_SCOUT = { + id: 'meta-llama/llama-4-scout', + name: 'Meta: Llama 4 Scout', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 10000000, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.08, + cached: 0, + }, + output: { + normal: 0.3, + }, + }, + image: 0.0003342, + }, + } as const +const QWEN_QWEN3_30B_A3B_INSTRUCT_2507 = { + id: 'qwen/qwen3-30b-a3b-instruct-2507', + name: 'Qwen: Qwen3 30B A3B Instruct 2507', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 262144, + pricing: { + text: { + input: { + normal: 0.08, + cached: 0, + }, + output: { + normal: 0.33, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_VL_8B_INSTRUCT = { + id: 'qwen/qwen3-vl-8b-instruct', + name: 'Qwen: Qwen3 VL 8B Instruct', + supports: { + input: ['image', 'text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 256000, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 0.08, + cached: 0, + }, + output: { + normal: 0.5, + }, + }, + image: 0, + }, + } as const +const ALIBABA_TONGYI_DEEPRESEARCH_30B_A3B = { + id: 'alibaba/tongyi-deepresearch-30b-a3b', + name: 'Tongyi DeepResearch 30B A3B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 131072, + pricing: { + text: { + input: { + normal: 0.09, + cached: 0, + }, + output: { + normal: 0.4, + }, + }, + image: 0, + }, + } as const +const NEVERSLEEP_LLAMA_3_1_LUMIMAID_8B = { + id: 'neversleep/llama-3.1-lumimaid-8b', + name: 'NeverSleep: Lumimaid v0.2 8B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'stop', 'structured_outputs', 'temperature', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.09, + cached: 0, + }, + output: { + normal: 0.6, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_NEXT_80B_A3B_INSTRUCT = { + id: 'qwen/qwen3-next-80b-a3b-instruct', + name: 'Qwen: Qwen3 Next 80B A3B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 262144, + pricing: { + text: { + input: { + normal: 0.09, + cached: 0, + }, + output: { + normal: 1.1, + }, + }, + image: 0, + }, + } as const +const Z_AI_GLM_4_32B = { + id: 'z-ai/glm-4-32b', + name: 'Z.AI: GLM 4 32B ', + supports: { + input: ['text'], + output: ['text'], + supports: ['max_tokens', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 128000, + pricing: { + text: { + input: { + normal: 0.1, + cached: 0, + }, + output: { + normal: 0.1, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_PIXTRAL_12B = { + id: 'mistralai/pixtral-12b', + name: 'Mistral: Pixtral 12B', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 4096, + pricing: { + text: { + input: { + normal: 0.1, + cached: 0, + }, + output: { + normal: 0.1, + }, + }, + image: 0.0001445, + }, + } as const +const MISTRALAI_MINISTRAL_3B_2512 = { + id: 'mistralai/ministral-3b-2512', + name: 'Mistral: Ministral 3 3B 2512', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 128000, + pricing: { + text: { + input: { + normal: 0.1, + cached: 0, + }, + output: { + normal: 0.1, + }, + }, + image: 0, + }, + } as const +const MISTRAL_MINISTRAL_8B = { + id: 'mistral/ministral-8b', + name: 'Mistral: Ministral 8B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.1, + cached: 0, + }, + output: { + normal: 0.1, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MINISTRAL_8B = { + id: 'mistralai/ministral-8b', + name: 'Mistral: Ministral 8B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 128000, + pricing: { + text: { + input: { + normal: 0.1, + cached: 0, + }, + output: { + normal: 0.1, + }, + }, + image: 0, + }, + } as const +const ALLENAI_OLMO_3_7B_INSTRUCT = { + id: 'allenai/olmo-3-7b-instruct', + name: 'AllenAI: Olmo 3 7B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 65536, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 0.1, + cached: 0, + }, + output: { + normal: 0.2, + }, + }, + image: 0, + }, + } as const +const BYTEDANCE_UI_TARS_1_5_7B = { + id: 'bytedance/ui-tars-1.5-7b', + name: 'ByteDance: UI-TARS 7B ', + supports: { + input: ['image', 'text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 128000, + max_output_tokens: 2048, + pricing: { + text: { + input: { + normal: 0.1, + cached: 0, + }, + output: { + normal: 0.2, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MISTRAL_SMALL_CREATIVE = { + id: 'mistralai/mistral-small-creative', + name: 'Mistral: Mistral Small Creative', + supports: { + input: ['text'], + output: ['text'], + supports: ['tool_choice', 'tools'], + }, + context_window: 32768, + pricing: { + text: { + input: { + normal: 0.1, + cached: 0, + }, + output: { + normal: 0.3, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_VOXTRAL_SMALL_24B_2507 = { + id: 'mistralai/voxtral-small-24b-2507', + name: 'Mistral: Voxtral Small 24B 2507', + supports: { + input: ['text', 'audio'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 32000, + pricing: { + text: { + input: { + normal: 0.1, + cached: 0, + }, + output: { + normal: 0.3, + }, + }, + image: 0, + }, + } as const +const META_LLAMA_LLAMA_3_3_70B_INSTRUCT = { + id: 'meta-llama/llama-3.3-70b-instruct', + name: 'Meta: Llama 3.3 70B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.1, + cached: 0, + }, + output: { + normal: 0.32, + }, + }, + image: 0, + }, + } as const +const OPENGVLAB_INTERNVL3_78B = { + id: 'opengvlab/internvl3-78b', + name: 'OpenGVLab: InternVL3 78B', + supports: { + input: ['image', 'text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 0.1, + cached: 0, + }, + output: { + normal: 0.39, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_4_1_NANO = { + id: 'openai/gpt-4.1-nano', + name: 'OpenAI: GPT-4.1 Nano', + supports: { + input: ['image', 'text', 'document'], + output: ['text'], + supports: ['max_tokens', 'response_format', 'seed', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 1047576, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 0.1, + cached: 0.025, + }, + output: { + normal: 0.4, + }, + }, + image: 0, + }, + } as const +const GOOGLE_GEMINI_2_0_FLASH_001 = { + id: 'google/gemini-2.0-flash-001', + name: 'Google: Gemini 2.0 Flash', + supports: { + input: ['text', 'image', 'document', 'audio', 'video'], + output: ['text'], + supports: ['max_tokens', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 1000000, + max_output_tokens: 8192, + pricing: { + text: { + input: { + normal: 0.1, + cached: 0.20829999999999999, + }, + output: { + normal: 0.4, + }, + }, + image: 0.0000258, + }, + } as const +const GOOGLE_GEMINI_2_5_FLASH_LITE_PREVIEW_09_2025 = { + id: 'google/gemini-2.5-flash-lite-preview-09-2025', + name: 'Google: Gemini 2.5 Flash Lite Preview 09-2025', + supports: { + input: ['text', 'image', 'document', 'audio', 'video'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 1048576, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 0.1, + cached: 0, + }, + output: { + normal: 0.4, + }, + }, + image: 0, + }, + } as const +const GOOGLE_GEMINI_2_5_FLASH_LITE = { + id: 'google/gemini-2.5-flash-lite', + name: 'Google: Gemini 2.5 Flash Lite', + supports: { + input: ['text', 'image', 'document', 'audio', 'video'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 1048576, + max_output_tokens: 65535, + pricing: { + text: { + input: { + normal: 0.1, + cached: 0.1933, + }, + output: { + normal: 0.4, + }, + }, + image: 0, + }, + } as const +const NVIDIA_LLAMA_3_3_NEMOTRON_SUPER_49B_V1_5 = { + id: 'nvidia/llama-3.3-nemotron-super-49b-v1.5', + name: 'NVIDIA: Llama 3.3 Nemotron Super 49B V1.5', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.1, + cached: 0, + }, + output: { + normal: 0.4, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_VL_4B_INSTRUCT = { + id: 'qwen/qwen3-vl-4b-instruct', + name: 'Qwen: Qwen3 VL 4B Instruct', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 256000, + pricing: { + text: { + input: { + normal: 0.1, + cached: 0, + }, + output: { + normal: 0.6, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MISTRAL_7B_INSTRUCT_V0_1 = { + id: 'mistralai/mistral-7b-instruct-v0.1', + name: 'Mistral: Mistral 7B Instruct v0.1', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'repetition_penalty', 'seed', 'temperature', 'top_k', 'top_p'], + }, + context_window: 4096, + pricing: { + text: { + input: { + normal: 0.11, + cached: 0, + }, + output: { + normal: 0.19, + }, + }, + image: 0, + }, + } as const +const NOUSRESEARCH_HERMES_4_70B = { + id: 'nousresearch/hermes-4-70b', + name: 'Nous: Hermes 4 70B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 131072, + pricing: { + text: { + input: { + normal: 0.11, + cached: 0, + }, + output: { + normal: 0.38, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_235B_A22B_THINKING_2507 = { + id: 'qwen/qwen3-235b-a22b-thinking-2507', + name: 'Qwen: Qwen3 235B A22B Thinking 2507', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 262144, + max_output_tokens: 262144, + pricing: { + text: { + input: { + normal: 0.11, + cached: 0, + }, + output: { + normal: 0.6, + }, + }, + image: 0, + }, + } as const +const ALLENAI_OLMO_3_7B_THINK = { + id: 'allenai/olmo-3-7b-think', + name: 'AllenAI: Olmo 3 7B Think', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 65536, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 0.12, + cached: 0, + }, + output: { + normal: 0.2, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN_2_5_72B_INSTRUCT = { + id: 'qwen/qwen-2.5-72b-instruct', + name: 'Qwen2.5 72B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.12, + cached: 0, + }, + output: { + normal: 0.39, + }, + }, + image: 0, + }, + } as const +const BAIDU_ERNIE_4_5_VL_28B_A3B = { + id: 'baidu/ernie-4.5-vl-28b-a3b', + name: 'Baidu: ERNIE 4.5 VL 28B A3B', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 8000, + pricing: { + text: { + input: { + normal: 0.14, + cached: 0, + }, + output: { + normal: 0.56, + }, + }, + image: 0, + }, + } as const +const TENCENT_HUNYUAN_A13B_INSTRUCT = { + id: 'tencent/hunyuan-a13b-instruct', + name: 'Tencent: Hunyuan A13B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'reasoning', 'response_format', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 32768, + max_output_tokens: 131072, + pricing: { + text: { + input: { + normal: 0.14, + cached: 0, + }, + output: { + normal: 0.57, + }, + }, + image: 0, + }, + } as const +const DEEPSEEK_DEEPSEEK_R1_DISTILL_QWEN_14B = { + id: 'deepseek/deepseek-r1-distill-qwen-14b', + name: 'DeepSeek: R1 Distill Qwen 14B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.15, + cached: 0, + }, + output: { + normal: 0.15, + }, + }, + image: 0, + }, + } as const +const ESSENTIALAI_RNJ_1_INSTRUCT = { + id: 'essentialai/rnj-1-instruct', + name: 'EssentialAI: Rnj 1 Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 32768, + pricing: { + text: { + input: { + normal: 0.15, + cached: 0, + }, + output: { + normal: 0.15, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MINISTRAL_8B_2512 = { + id: 'mistralai/ministral-8b-2512', + name: 'Mistral: Ministral 3 8B 2512', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 128000, + pricing: { + text: { + input: { + normal: 0.15, + cached: 0, + }, + output: { + normal: 0.15, + }, + }, + image: 0, + }, + } as const +const QWEN_QWQ_32B = { + id: 'qwen/qwq-32b', + name: 'Qwen: QwQ 32B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.15, + cached: 0, + }, + output: { + normal: 0.4, + }, + }, + image: 0, + }, + } as const +const ALLENAI_OLMO_3_1_32B_THINK = { + id: 'allenai/olmo-3.1-32b-think', + name: 'AllenAI: Olmo 3.1 32B Think', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 65536, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 0.15, + cached: 0, + }, + output: { + normal: 0.5, + }, + }, + image: 0, + }, + } as const +const ALLENAI_OLMO_3_32B_THINK = { + id: 'allenai/olmo-3-32b-think', + name: 'AllenAI: Olmo 3 32B Think', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 65536, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 0.15, + cached: 0, + }, + output: { + normal: 0.5, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_VL_30B_A3B_INSTRUCT = { + id: 'qwen/qwen3-vl-30b-a3b-instruct', + name: 'Qwen: Qwen3 VL 30B A3B Instruct', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 262144, + pricing: { + text: { + input: { + normal: 0.15, + cached: 0, + }, + output: { + normal: 0.6, + }, + }, + image: 0, + }, + } as const +const META_LLAMA_LLAMA_4_MAVERICK = { + id: 'meta-llama/llama-4-maverick', + name: 'Meta: Llama 4 Maverick', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 1048576, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.15, + cached: 0, + }, + output: { + normal: 0.6, + }, + }, + image: 0.0006684, + }, + } as const +const OPENAI_GPT_4O_MINI_SEARCH_PREVIEW = { + id: 'openai/gpt-4o-mini-search-preview', + name: 'OpenAI: GPT-4o-mini Search Preview', + supports: { + input: ['text'], + output: ['text'], + supports: ['max_tokens', 'response_format', 'structured_outputs', 'web_search_options'], + }, + context_window: 128000, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.15, + cached: 0, + }, + output: { + normal: 0.6, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_4O_MINI_2024_07_18 = { + id: 'openai/gpt-4o-mini-2024-07-18', + name: 'OpenAI: GPT-4o-mini (2024-07-18)', + supports: { + input: ['text', 'image', 'document'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p', 'web_search_options'], + }, + context_window: 128000, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.15, + cached: 0.075, + }, + output: { + normal: 0.6, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_4O_MINI = { + id: 'openai/gpt-4o-mini', + name: 'OpenAI: GPT-4o-mini', + supports: { + input: ['text', 'image', 'document'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p', 'web_search_options'], + }, + context_window: 128000, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.15, + cached: 0.075, + }, + output: { + normal: 0.6, + }, + }, + image: 0, + }, + } as const +const COHERE_COMMAND_R_08_2024 = { + id: 'cohere/command-r-08-2024', + name: 'Cohere: Command R (08-2024)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 128000, + max_output_tokens: 4000, + pricing: { + text: { + input: { + normal: 0.15, + cached: 0, + }, + output: { + normal: 0.6, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN2_5_VL_72B_INSTRUCT = { + id: 'qwen/qwen2.5-vl-72b-instruct', + name: 'Qwen: Qwen2.5 VL 72B Instruct', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 0.15, + cached: 0, + }, + output: { + normal: 0.6, + }, + }, + image: 0, + }, + } as const +const DEEPSEEK_DEEPSEEK_CHAT_V3_1 = { + id: 'deepseek/deepseek-chat-v3.1', + name: 'DeepSeek: DeepSeek V3.1', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 7168, + pricing: { + text: { + input: { + normal: 0.15, + cached: 0, + }, + output: { + normal: 0.75, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_NEXT_80B_A3B_THINKING = { + id: 'qwen/qwen3-next-80b-a3b-thinking', + name: 'Qwen: Qwen3 Next 80B A3B Thinking', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 262144, + max_output_tokens: 262144, + pricing: { + text: { + input: { + normal: 0.15, + cached: 0, + }, + output: { + normal: 1.2, + }, + }, + image: 0, + }, + } as const +const THEDRUMMER_ROCINANTE_12B = { + id: 'thedrummer/rocinante-12b', + name: 'TheDrummer: Rocinante 12B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 32768, + pricing: { + text: { + input: { + normal: 0.17, + cached: 0, + }, + output: { + normal: 0.43, + }, + }, + image: 0, + }, + } as const +const ARCEE_AI_SPOTLIGHT = { + id: 'arcee-ai/spotlight', + name: 'Arcee AI: Spotlight', + supports: { + input: ['image', 'text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 65537, + pricing: { + text: { + input: { + normal: 0.18, + cached: 0, + }, + output: { + normal: 0.18, + }, + }, + image: 0, + }, + } as const +const META_LLAMA_LLAMA_GUARD_4_12B = { + id: 'meta-llama/llama-guard-4-12b', + name: 'Meta: Llama Guard 4 12B', + supports: { + input: ['image', 'text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 163840, + pricing: { + text: { + input: { + normal: 0.18, + cached: 0, + }, + output: { + normal: 0.18, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_235B_A22B = { + id: 'qwen/qwen3-235b-a22b', + name: 'Qwen: Qwen3 235B A22B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 40960, + pricing: { + text: { + input: { + normal: 0.18, + cached: 0, + }, + output: { + normal: 0.54, + }, + }, + image: 0, + }, + } as const +const DEEPCOGITO_COGITO_V2_PREVIEW_LLAMA_109B_MOE = { + id: 'deepcogito/cogito-v2-preview-llama-109b-moe', + name: 'Cogito V2 Preview Llama 109B', + supports: { + input: ['image', 'text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.18, + cached: 0, + }, + output: { + normal: 0.59, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_VL_8B_THINKING = { + id: 'qwen/qwen3-vl-8b-thinking', + name: 'Qwen: Qwen3 VL 8B Thinking', + supports: { + input: ['image', 'text'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'response_format', 'seed', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 256000, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 0.18, + cached: 0, + }, + output: { + normal: 2.1, + }, + }, + image: 0, + }, + } as const +const DEEPSEEK_DEEPSEEK_CHAT_V3_0324 = { + id: 'deepseek/deepseek-chat-v3-0324', + name: 'DeepSeek: DeepSeek V3 0324', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 0.19, + cached: 0, + }, + output: { + normal: 0.87, + }, + }, + image: 0, + }, + } as const +const META_LLAMA_LLAMA_GUARD_2_8B = { + id: 'meta-llama/llama-guard-2-8b', + name: 'Meta: LlamaGuard 2 8B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 8192, + pricing: { + text: { + input: { + normal: 0.2, + cached: 0, + }, + output: { + normal: 0.2, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MISTRAL_7B_INSTRUCT_V0_3 = { + id: 'mistralai/mistral-7b-instruct-v0.3', + name: 'Mistral: Mistral 7B Instruct v0.3', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 32768, + max_output_tokens: 4096, + pricing: { + text: { + input: { + normal: 0.2, + cached: 0, + }, + output: { + normal: 0.2, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN_2_5_VL_7B_INSTRUCT = { + id: 'qwen/qwen-2.5-vl-7b-instruct', + name: 'Qwen: Qwen2.5-VL 7B Instruct', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 32768, + pricing: { + text: { + input: { + normal: 0.2, + cached: 0, + }, + output: { + normal: 0.2, + }, + }, + image: 0.0001445, + }, + } as const +const MISTRALAI_MINISTRAL_14B_2512 = { + id: 'mistralai/ministral-14b-2512', + name: 'Mistral: Ministral 3 14B 2512', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 128000, + pricing: { + text: { + input: { + normal: 0.2, + cached: 0, + }, + output: { + normal: 0.2, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MISTRAL_7B_INSTRUCT_V0_2 = { + id: 'mistralai/mistral-7b-instruct-v0.2', + name: 'Mistral: Mistral 7B Instruct v0.2', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 32768, + pricing: { + text: { + input: { + normal: 0.2, + cached: 0, + }, + output: { + normal: 0.2, + }, + }, + image: 0, + }, + } as const +const AI21_JAMBA_MINI_1_7 = { + id: 'ai21/jamba-mini-1.7', + name: 'AI21: Jamba Mini 1.7', + supports: { + input: ['text'], + output: ['text'], + supports: ['max_tokens', 'response_format', 'stop', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 256000, + max_output_tokens: 4096, + pricing: { + text: { + input: { + normal: 0.2, + cached: 0, + }, + output: { + normal: 0.4, + }, + }, + image: 0, + }, + } as const +const X_AI_GROK_4_1_FAST = { + id: 'x-ai/grok-4.1-fast', + name: 'xAI: Grok 4.1 Fast', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['include_reasoning', 'logprobs', 'max_tokens', 'reasoning', 'response_format', 'seed', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p'], + }, + context_window: 2000000, + max_output_tokens: 30000, + pricing: { + text: { + input: { + normal: 0.2, + cached: 0.05, + }, + output: { + normal: 0.5, + }, + }, + image: 0, + }, + } as const +const X_AI_GROK_4_FAST = { + id: 'x-ai/grok-4-fast', + name: 'xAI: Grok 4 Fast', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['include_reasoning', 'logprobs', 'max_tokens', 'reasoning', 'response_format', 'seed', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p'], + }, + context_window: 2000000, + max_output_tokens: 30000, + pricing: { + text: { + input: { + normal: 0.2, + cached: 0.05, + }, + output: { + normal: 0.5, + }, + }, + image: 0, + }, + } as const +const NVIDIA_NEMOTRON_NANO_12B_V2_VL = { + id: 'nvidia/nemotron-nano-12b-v2-vl', + name: 'NVIDIA: Nemotron Nano 12B 2 VL', + supports: { + input: ['image', 'text', 'video'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 128000, + pricing: { + text: { + input: { + normal: 0.2, + cached: 0, + }, + output: { + normal: 0.6, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MISTRAL_SABA = { + id: 'mistralai/mistral-saba', + name: 'Mistral: Saba', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 32000, + pricing: { + text: { + input: { + normal: 0.2, + cached: 0, + }, + output: { + normal: 0.6, + }, + }, + image: 0, + }, + } as const +const ALLENAI_OLMO_3_1_32B_INSTRUCT = { + id: 'allenai/olmo-3.1-32b-instruct', + name: 'AllenAI: Olmo 3.1 32B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 65536, + pricing: { + text: { + input: { + normal: 0.2, + cached: 0, + }, + output: { + normal: 0.6, + }, + }, + image: 0, + }, + } as const +const MEITUAN_LONGCAT_FLASH_CHAT = { + id: 'meituan/longcat-flash-chat', + name: 'Meituan: LongCat Flash Chat', + supports: { + input: ['text'], + output: ['text'], + supports: ['max_tokens', 'temperature', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 131072, + pricing: { + text: { + input: { + normal: 0.2, + cached: 0, + }, + output: { + normal: 0.8, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_VL_30B_A3B_THINKING = { + id: 'qwen/qwen3-vl-30b-a3b-thinking', + name: 'Qwen: Qwen3 VL 30B A3B Thinking', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 0.2, + cached: 0, + }, + output: { + normal: 1, + }, + }, + image: 0, + }, + } as const +const MINIMAX_MINIMAX_M2 = { + id: 'minimax/minimax-m2', + name: 'MiniMax: MiniMax M2', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 204800, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 0.2, + cached: 0.03, + }, + output: { + normal: 1, + }, + }, + image: 0, + }, + } as const +const MINIMAX_MINIMAX_01 = { + id: 'minimax/minimax-01', + name: 'MiniMax: MiniMax-01', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['max_tokens', 'temperature', 'top_p'], + }, + context_window: 1000000, + max_output_tokens: 1000192, + pricing: { + text: { + input: { + normal: 0.2, + cached: 0, + }, + output: { + normal: 1.1, + }, + }, + image: 0, + }, + } as const +const PRIME_INTELLECT_INTELLECT_3 = { + id: 'prime-intellect/intellect-3', + name: 'Prime Intellect: INTELLECT-3', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 131072, + pricing: { + text: { + input: { + normal: 0.2, + cached: 0, + }, + output: { + normal: 1.1, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_VL_235B_A22B_INSTRUCT = { + id: 'qwen/qwen3-vl-235b-a22b-instruct', + name: 'Qwen: Qwen3 VL 235B A22B Instruct', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.2, + cached: 0, + }, + output: { + normal: 1.2, + }, + }, + image: 0, + }, + } as const +const X_AI_GROK_CODE_FAST_1 = { + id: 'x-ai/grok-code-fast-1', + name: 'xAI: Grok Code Fast 1', + supports: { + input: ['text'], + output: ['text'], + supports: ['include_reasoning', 'logprobs', 'max_tokens', 'reasoning', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p'], + }, + context_window: 256000, + max_output_tokens: 10000, + pricing: { + text: { + input: { + normal: 0.2, + cached: 0.02, + }, + output: { + normal: 1.5, + }, + }, + image: 0, + }, + } as const +const KWAIPILOT_KAT_CODER_PRO = { + id: 'kwaipilot/kat-coder-pro', + name: 'Kwaipilot: KAT-Coder-Pro V1', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 262144, + max_output_tokens: 128000, + pricing: { + text: { + input: { + normal: 0.207, + cached: 0.0414, + }, + output: { + normal: 0.828, + }, + }, + image: 0, + }, + } as const +const DEEPSEEK_DEEPSEEK_V3_2_EXP = { + id: 'deepseek/deepseek-v3.2-exp', + name: 'DeepSeek: DeepSeek V3.2 Exp', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 0.21, + cached: 0, + }, + output: { + normal: 0.32, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN_VL_PLUS = { + id: 'qwen/qwen-vl-plus', + name: 'Qwen: Qwen VL Plus', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['max_tokens', 'presence_penalty', 'response_format', 'seed', 'temperature', 'top_p'], + }, + context_window: 7500, + max_output_tokens: 1500, + pricing: { + text: { + input: { + normal: 0.21, + cached: 0, + }, + output: { + normal: 0.63, + }, + }, + image: 0.0002688, + }, + } as const +const DEEPSEEK_DEEPSEEK_V3_1_TERMINUS_EXACTO = { + id: 'deepseek/deepseek-v3.1-terminus:exacto', + name: 'DeepSeek: DeepSeek V3.1 Terminus (exacto)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.21, + cached: 0.168, + }, + output: { + normal: 0.79, + }, + }, + image: 0, + }, + } as const +const DEEPSEEK_DEEPSEEK_V3_1_TERMINUS = { + id: 'deepseek/deepseek-v3.1-terminus', + name: 'DeepSeek: DeepSeek V3.1 Terminus', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.21, + cached: 0.168, + }, + output: { + normal: 0.79, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_CODER = { + id: 'qwen/qwen3-coder', + name: 'Qwen: Qwen3 Coder 480B A35B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 1048576, + max_output_tokens: 262144, + pricing: { + text: { + input: { + normal: 0.22, + cached: 0, + }, + output: { + normal: 0.95, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_CODER_EXACTO = { + id: 'qwen/qwen3-coder:exacto', + name: 'Qwen: Qwen3 Coder 480B A35B (exacto)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 1048576, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 0.22, + cached: 0, + }, + output: { + normal: 1.8, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MISTRAL_TINY = { + id: 'mistralai/mistral-tiny', + name: 'Mistral Tiny', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 32000, + pricing: { + text: { + input: { + normal: 0.25, + cached: 0, + }, + output: { + normal: 0.25, + }, + }, + image: 0, + }, + } as const +const DEEPSEEK_DEEPSEEK_V3_2 = { + id: 'deepseek/deepseek-v3.2', + name: 'DeepSeek: DeepSeek V3.2', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 0.25, + cached: 0, + }, + output: { + normal: 0.38, + }, + }, + image: 0, + }, + } as const +const TNGTECH_DEEPSEEK_R1T2_CHIMERA = { + id: 'tngtech/deepseek-r1t2-chimera', + name: 'TNG: DeepSeek R1T2 Chimera', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 163840, + max_output_tokens: 163840, + pricing: { + text: { + input: { + normal: 0.25, + cached: 0, + }, + output: { + normal: 0.85, + }, + }, + image: 0, + }, + } as const +const TNGTECH_TNG_R1T_CHIMERA = { + id: 'tngtech/tng-r1t-chimera', + name: 'TNG: R1T Chimera', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 163840, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 0.25, + cached: 0, + }, + output: { + normal: 0.85, + }, + }, + image: 0, + }, + } as const +const INCEPTION_MERCURY = { + id: 'inception/mercury', + name: 'Inception: Mercury', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 128000, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.25, + cached: 0, + }, + output: { + normal: 1, + }, + }, + image: 0, + }, + } as const +const INCEPTION_MERCURY_CODER = { + id: 'inception/mercury-coder', + name: 'Inception: Mercury Coder', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 128000, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.25, + cached: 0, + }, + output: { + normal: 1, + }, + }, + image: 0, + }, + } as const +const ANTHROPIC_CLAUDE_3_HAIKU = { + id: 'anthropic/claude-3-haiku', + name: 'Anthropic: Claude 3 Haiku', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['max_tokens', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 200000, + max_output_tokens: 4096, + pricing: { + text: { + input: { + normal: 0.25, + cached: 0.32999999999999996, + }, + output: { + normal: 1.25, + }, + }, + image: 0.0004, + }, + } as const +const OPENAI_GPT_5_MINI = { + id: 'openai/gpt-5-mini', + name: 'OpenAI: GPT-5 Mini', + supports: { + input: ['text', 'image', 'document'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'structured_outputs', 'tool_choice', 'tools'], + }, + context_window: 400000, + max_output_tokens: 128000, + pricing: { + text: { + input: { + normal: 0.25, + cached: 0.025, + }, + output: { + normal: 2, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_5_1_CODEX_MINI = { + id: 'openai/gpt-5.1-codex-mini', + name: 'OpenAI: GPT-5.1-Codex-Mini', + supports: { + input: ['image', 'text'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'structured_outputs', 'tool_choice', 'tools'], + }, + context_window: 400000, + max_output_tokens: 100000, + pricing: { + text: { + input: { + normal: 0.25, + cached: 0.025, + }, + output: { + normal: 2, + }, + }, + image: 0, + }, + } as const +const BYTEDANCE_SEED_SEED_1_6 = { + id: 'bytedance-seed/seed-1.6', + name: 'ByteDance Seed: Seed 1.6', + supports: { + input: ['image', 'text', 'video'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 262144, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 0.25, + cached: 0, + }, + output: { + normal: 2, + }, + }, + image: 0, + }, + } as const +const DEEPSEEK_DEEPSEEK_R1_DISTILL_QWEN_32B = { + id: 'deepseek/deepseek-r1-distill-qwen-32b', + name: 'DeepSeek: R1 Distill Qwen 32B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 128000, + pricing: { + text: { + input: { + normal: 0.27, + cached: 0, + }, + output: { + normal: 0.27, + }, + }, + image: 0, + }, + } as const +const DEEPSEEK_DEEPSEEK_V3_2_SPECIALE = { + id: 'deepseek/deepseek-v3.2-speciale', + name: 'DeepSeek: DeepSeek V3.2 Speciale', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 0.27, + cached: 0, + }, + output: { + normal: 0.41, + }, + }, + image: 0, + }, + } as const +const NEX_AGI_DEEPSEEK_V3_1_NEX_N1 = { + id: 'nex-agi/deepseek-v3.1-nex-n1', + name: 'Nex AGI: DeepSeek V3.1 Nex N1', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'response_format', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 8192, + max_output_tokens: 163840, + pricing: { + text: { + input: { + normal: 0.27, + cached: 0, + }, + output: { + normal: 1, + }, + }, + image: 0, + }, + } as const +const MINIMAX_MINIMAX_M2_1 = { + id: 'minimax/minimax-m2.1', + name: 'MiniMax: MiniMax M2.1', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 204800, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 0.27, + cached: 0, + }, + output: { + normal: 1.12, + }, + }, + image: 0, + }, + } as const +const BAIDU_ERNIE_4_5_300B_A47B = { + id: 'baidu/ernie-4.5-300b-a47b', + name: 'Baidu: ERNIE 4.5 300B A47B ', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 12000, + pricing: { + text: { + input: { + normal: 0.28, + cached: 0, + }, + output: { + normal: 1.1, + }, + }, + image: 0, + }, + } as const +const MOONSHOTAI_KIMI_DEV_72B = { + id: 'moonshotai/kimi-dev-72b', + name: 'MoonshotAI: Kimi Dev 72B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'reasoning', 'response_format', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 131072, + pricing: { + text: { + input: { + normal: 0.29, + cached: 0, + }, + output: { + normal: 1.15, + }, + }, + image: 0, + }, + } as const +const NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B = { + id: 'nousresearch/hermes-3-llama-3.1-70b', + name: 'Nous: Hermes 3 70B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.3, + cached: 0, + }, + output: { + normal: 0.3, + }, + }, + image: 0, + }, + } as const +const META_LLAMA_LLAMA_3_70B_INSTRUCT = { + id: 'meta-llama/llama-3-70b-instruct', + name: 'Meta: Llama 3 70B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 8192, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.3, + cached: 0, + }, + output: { + normal: 0.4, + }, + }, + image: 0, + }, + } as const +const THEDRUMMER_CYDONIA_24B_V4_1 = { + id: 'thedrummer/cydonia-24b-v4.1', + name: 'TheDrummer: Cydonia 24B V4.1', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 131072, + pricing: { + text: { + input: { + normal: 0.3, + cached: 0, + }, + output: { + normal: 0.5, + }, + }, + image: 0, + }, + } as const +const X_AI_GROK_3_MINI = { + id: 'x-ai/grok-3-mini', + name: 'xAI: Grok 3 Mini', + supports: { + input: ['text'], + output: ['text'], + supports: ['include_reasoning', 'logprobs', 'max_tokens', 'reasoning', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.3, + cached: 0.075, + }, + output: { + normal: 0.5, + }, + }, + image: 0, + }, + } as const +const X_AI_GROK_3_MINI_BETA = { + id: 'x-ai/grok-3-mini-beta', + name: 'xAI: Grok 3 Mini Beta', + supports: { + input: ['text'], + output: ['text'], + supports: ['include_reasoning', 'logprobs', 'max_tokens', 'reasoning', 'response_format', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.3, + cached: 0.075, + }, + output: { + normal: 0.5, + }, + }, + image: 0, + }, + } as const +const Z_AI_GLM_4_6V = { + id: 'z-ai/glm-4.6v', + name: 'Z.AI: GLM 4.6V', + supports: { + input: ['image', 'text', 'video'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 131072, + pricing: { + text: { + input: { + normal: 0.3, + cached: 0, + }, + output: { + normal: 0.9, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_CODESTRAL_2508 = { + id: 'mistralai/codestral-2508', + name: 'Mistral: Codestral 2508', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 256000, + pricing: { + text: { + input: { + normal: 0.3, + cached: 0, + }, + output: { + normal: 0.9, + }, + }, + image: 0, + }, + } as const +const DEEPSEEK_DEEPSEEK_CHAT = { + id: 'deepseek/deepseek-chat', + name: 'DeepSeek: DeepSeek V3', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 163840, + pricing: { + text: { + input: { + normal: 0.3, + cached: 0, + }, + output: { + normal: 1.2, + }, + }, + image: 0, + }, + } as const +const TNGTECH_DEEPSEEK_R1T_CHIMERA = { + id: 'tngtech/deepseek-r1t-chimera', + name: 'TNG: DeepSeek R1T Chimera', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 163840, + max_output_tokens: 163840, + pricing: { + text: { + input: { + normal: 0.3, + cached: 0, + }, + output: { + normal: 1.2, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_CODER_FLASH = { + id: 'qwen/qwen3-coder-flash', + name: 'Qwen: Qwen3 Coder Flash', + supports: { + input: ['text'], + output: ['text'], + supports: ['max_tokens', 'presence_penalty', 'response_format', 'seed', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 128000, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 0.3, + cached: 0.08, + }, + output: { + normal: 1.5, + }, + }, + image: 0, + }, + } as const +const GOOGLE_GEMINI_2_5_FLASH_IMAGE_PREVIEW = { + id: 'google/gemini-2.5-flash-image-preview', + name: 'Google: Gemini 2.5 Flash Image Preview (Nano Banana)', + supports: { + input: ['image', 'text'], + output: ['image', 'text'], + supports: ['max_tokens', 'response_format', 'seed', 'structured_outputs', 'temperature', 'top_p'], + }, + context_window: 32768, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 0.3, + cached: 0, + }, + output: { + normal: 2.5, + }, + }, + image: 0.001238, + }, + } as const +const GOOGLE_GEMINI_2_5_FLASH_PREVIEW_09_2025 = { + id: 'google/gemini-2.5-flash-preview-09-2025', + name: 'Google: Gemini 2.5 Flash Preview 09-2025', + supports: { + input: ['image', 'document', 'text', 'audio', 'video'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 1048576, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 0.3, + cached: 0.4583, + }, + output: { + normal: 2.5, + }, + }, + image: 0.001238, + }, + } as const +const GOOGLE_GEMINI_2_5_FLASH_IMAGE = { + id: 'google/gemini-2.5-flash-image', + name: 'Google: Gemini 2.5 Flash Image (Nano Banana)', + supports: { + input: ['image', 'text'], + output: ['image', 'text'], + supports: ['max_tokens', 'response_format', 'seed', 'structured_outputs', 'temperature', 'top_p'], + }, + context_window: 32768, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 0.3, + cached: 0, + }, + output: { + normal: 2.5, + }, + }, + image: 0.001238, + }, + } as const +const AMAZON_NOVA_2_LITE_V1 = { + id: 'amazon/nova-2-lite-v1', + name: 'Amazon: Nova 2 Lite', + supports: { + input: ['text', 'image', 'video', 'document'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 1000000, + max_output_tokens: 65535, + pricing: { + text: { + input: { + normal: 0.3, + cached: 0, + }, + output: { + normal: 2.5, + }, + }, + image: 0, + }, + } as const +const GOOGLE_GEMINI_2_5_FLASH = { + id: 'google/gemini-2.5-flash', + name: 'Google: Gemini 2.5 Flash', + supports: { + input: ['document', 'image', 'text', 'audio', 'video'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 1048576, + max_output_tokens: 65535, + pricing: { + text: { + input: { + normal: 0.3, + cached: 0.4133, + }, + output: { + normal: 2.5, + }, + }, + image: 0.001238, + }, + } as const +const Z_AI_GLM_4_6 = { + id: 'z-ai/glm-4.6', + name: 'Z.AI: GLM 4.6', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_a', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 200000, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 0.35, + cached: 0, + }, + output: { + normal: 1.5, + }, + }, + image: 0, + }, + } as const +const Z_AI_GLM_4_5 = { + id: 'z-ai/glm-4.5', + name: 'Z.AI: GLM 4.5', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 0.35, + cached: 0, + }, + output: { + normal: 1.55, + }, + }, + image: 0, + }, + } as const +const MOONSHOTAI_KIMI_K2_0905 = { + id: 'moonshotai/kimi-k2-0905', + name: 'MoonshotAI: Kimi K2 0905', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 262144, + max_output_tokens: 262144, + pricing: { + text: { + input: { + normal: 0.39, + cached: 0, + }, + output: { + normal: 1.9, + }, + }, + image: 0, + }, + } as const +const META_LLAMA_LLAMA_3_1_70B_INSTRUCT = { + id: 'meta-llama/llama-3.1-70b-instruct', + name: 'Meta: Llama 3.1 70B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.4, + cached: 0, + }, + output: { + normal: 0.4, + }, + }, + image: 0, + }, + } as const +const THEDRUMMER_UNSLOPNEMO_12B = { + id: 'thedrummer/unslopnemo-12b', + name: 'TheDrummer: UnslopNemo 12B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 32000, + pricing: { + text: { + input: { + normal: 0.4, + cached: 0, + }, + output: { + normal: 0.4, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN_PLUS_2025_07_28 = { + id: 'qwen/qwen-plus-2025-07-28', + name: 'Qwen: Qwen Plus 0728', + supports: { + input: ['text'], + output: ['text'], + supports: ['max_tokens', 'presence_penalty', 'response_format', 'seed', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 1000000, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 0.4, + cached: 0, + }, + output: { + normal: 1.2, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN_PLUS = { + id: 'qwen/qwen-plus', + name: 'Qwen: Qwen-Plus', + supports: { + input: ['text'], + output: ['text'], + supports: ['max_tokens', 'presence_penalty', 'response_format', 'seed', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 8192, + pricing: { + text: { + input: { + normal: 0.4, + cached: 0.16, + }, + output: { + normal: 1.2, + }, + }, + image: 0, + }, + } as const +const Z_AI_GLM_4_7 = { + id: 'z-ai/glm-4.7', + name: 'Z.AI: GLM 4.7', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_a', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 200000, + max_output_tokens: 65535, + pricing: { + text: { + input: { + normal: 0.4, + cached: 0, + }, + output: { + normal: 1.5, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_4_1_MINI = { + id: 'openai/gpt-4.1-mini', + name: 'OpenAI: GPT-4.1 Mini', + supports: { + input: ['image', 'text', 'document'], + output: ['text'], + supports: ['max_tokens', 'response_format', 'seed', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 1047576, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 0.4, + cached: 0.1, + }, + output: { + normal: 1.6, + }, + }, + image: 0, + }, + } as const +const MOONSHOTAI_KIMI_K2_THINKING = { + id: 'moonshotai/kimi-k2-thinking', + name: 'MoonshotAI: Kimi K2 Thinking', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 262144, + max_output_tokens: 65535, + pricing: { + text: { + input: { + normal: 0.4, + cached: 0, + }, + output: { + normal: 1.75, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_DEVSTRAL_MEDIUM = { + id: 'mistralai/devstral-medium', + name: 'Mistral: Devstral Medium', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.4, + cached: 0, + }, + output: { + normal: 2, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MISTRAL_MEDIUM_3 = { + id: 'mistralai/mistral-medium-3', + name: 'Mistral: Mistral Medium 3', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.4, + cached: 0, + }, + output: { + normal: 2, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MISTRAL_MEDIUM_3_1 = { + id: 'mistralai/mistral-medium-3.1', + name: 'Mistral: Mistral Medium 3.1', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.4, + cached: 0, + }, + output: { + normal: 2, + }, + }, + image: 0, + }, + } as const +const MINIMAX_MINIMAX_M1 = { + id: 'minimax/minimax-m1', + name: 'MiniMax: MiniMax M1', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 1000000, + max_output_tokens: 40000, + pricing: { + text: { + input: { + normal: 0.4, + cached: 0, + }, + output: { + normal: 2.2, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN_PLUS_2025_07_28_THINKING = { + id: 'qwen/qwen-plus-2025-07-28:thinking', + name: 'Qwen: Qwen Plus 0728 (thinking)', + supports: { + input: ['text'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'response_format', 'seed', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 1000000, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 0.4, + cached: 0, + }, + output: { + normal: 4, + }, + }, + image: 0, + }, + } as const +const BAIDU_ERNIE_4_5_VL_424B_A47B = { + id: 'baidu/ernie-4.5-vl-424b-a47b', + name: 'Baidu: ERNIE 4.5 VL 424B A47B ', + supports: { + input: ['image', 'text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 16000, + pricing: { + text: { + input: { + normal: 0.42, + cached: 0, + }, + output: { + normal: 1.25, + }, + }, + image: 0, + }, + } as const +const Z_AI_GLM_4_6_EXACTO = { + id: 'z-ai/glm-4.6:exacto', + name: 'Z.AI: GLM 4.6 (exacto)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 200000, + max_output_tokens: 131072, + pricing: { + text: { + input: { + normal: 0.44, + cached: 0, + }, + output: { + normal: 1.76, + }, + }, + image: 0, + }, + } as const +const UNDI95_REMM_SLERP_L2_13B = { + id: 'undi95/remm-slerp-l2-13b', + name: 'ReMM SLERP 13B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_a', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 4096, + pricing: { + text: { + input: { + normal: 0.45, + cached: 0, + }, + output: { + normal: 0.65, + }, + }, + image: 0, + }, + } as const +const DEEPSEEK_DEEPSEEK_R1_0528 = { + id: 'deepseek/deepseek-r1-0528', + name: 'DeepSeek: R1 0528', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 163840, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 0.45, + cached: 0, + }, + output: { + normal: 2.15, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_VL_235B_A22B_THINKING = { + id: 'qwen/qwen3-vl-235b-a22b-thinking', + name: 'Qwen: Qwen3 VL 235B A22B Thinking', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 262144, + pricing: { + text: { + input: { + normal: 0.45, + cached: 0, + }, + output: { + normal: 3.5, + }, + }, + image: 0, + }, + } as const +const MICROSOFT_WIZARDLM_2_8X22B = { + id: 'microsoft/wizardlm-2-8x22b', + name: 'WizardLM-2 8x22B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 65536, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.48, + cached: 0, + }, + output: { + normal: 0.48, + }, + }, + image: 0, + }, + } as const +const ARCEE_AI_CODER_LARGE = { + id: 'arcee-ai/coder-large', + name: 'Arcee AI: Coder Large', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 32768, + pricing: { + text: { + input: { + normal: 0.5, + cached: 0, + }, + output: { + normal: 0.8, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MISTRAL_LARGE_2512 = { + id: 'mistralai/mistral-large-2512', + name: 'Mistral: Mistral Large 3 2512', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 256000, + pricing: { + text: { + input: { + normal: 0.5, + cached: 0, + }, + output: { + normal: 1.5, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_VL_32B_INSTRUCT = { + id: 'qwen/qwen3-vl-32b-instruct', + name: 'Qwen: Qwen3 VL 32B Instruct', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 262144, + pricing: { + text: { + input: { + normal: 0.5, + cached: 0, + }, + output: { + normal: 1.5, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_3_5_TURBO = { + id: 'openai/gpt-3.5-turbo', + name: 'OpenAI: GPT-3.5 Turbo', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p'], + }, + context_window: 16385, + max_output_tokens: 4096, + pricing: { + text: { + input: { + normal: 0.5, + cached: 0, + }, + output: { + normal: 1.5, + }, + }, + image: 0, + }, + } as const +const DEEPSEEK_DEEPSEEK_PROVER_V2 = { + id: 'deepseek/deepseek-prover-v2', + name: 'DeepSeek: DeepSeek Prover V2', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 163840, + pricing: { + text: { + input: { + normal: 0.5, + cached: 0, + }, + output: { + normal: 2.18, + }, + }, + image: 0, + }, + } as const +const MOONSHOTAI_KIMI_K2 = { + id: 'moonshotai/kimi-k2', + name: 'MoonshotAI: Kimi K2 0711', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.5, + cached: 0, + }, + output: { + normal: 2.4, + }, + }, + image: 0, + }, + } as const +const GOOGLE_GEMINI_3_FLASH_PREVIEW = { + id: 'google/gemini-3-flash-preview', + name: 'Google: Gemini 3 Flash Preview', + supports: { + input: ['text', 'image', 'document', 'audio', 'video'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 1048576, + max_output_tokens: 65535, + pricing: { + text: { + input: { + normal: 0.5, + cached: 0.05, + }, + output: { + normal: 3, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MIXTRAL_8X7B_INSTRUCT = { + id: 'mistralai/mixtral-8x7b-instruct', + name: 'Mistral: Mixtral 8x7B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 32768, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.54, + cached: 0, + }, + output: { + normal: 0.54, + }, + }, + image: 0, + }, + } as const +const THEDRUMMER_SKYFALL_36B_V2 = { + id: 'thedrummer/skyfall-36b-v2', + name: 'TheDrummer: Skyfall 36B V2', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 32768, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 0.55, + cached: 0, + }, + output: { + normal: 0.8, + }, + }, + image: 0, + }, + } as const +const MINIMAX_MINIMAX_M1_80K = { + id: 'minimax/minimax-m1-80k', + name: 'MiniMax: MiniMax-M1-80k', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 1000000, + max_output_tokens: 40000, + pricing: { + text: { + input: { + normal: 0.55, + cached: 0, + }, + output: { + normal: 2.2, + }, + }, + image: 0, + }, + } as const +const STEPFUN_AI_STEP3 = { + id: 'stepfun-ai/step3', + name: 'StepFun: Step3', + supports: { + input: ['image', 'text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'reasoning', 'response_format', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 65536, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 0.57, + cached: 0, + }, + output: { + normal: 1.42, + }, + }, + image: 0, + }, + } as const +const NVIDIA_LLAMA_3_1_NEMOTRON_ULTRA_253B_V1 = { + id: 'nvidia/llama-3.1-nemotron-ultra-253b-v1', + name: 'NVIDIA: Llama 3.1 Nemotron Ultra 253B v1', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.6, + cached: 0, + }, + output: { + normal: 1.8, + }, + }, + image: 0, + }, + } as const +const Z_AI_GLM_4_5V = { + id: 'z-ai/glm-4.5v', + name: 'Z.AI: GLM 4.5V', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 65536, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.6, + cached: 0.11, + }, + output: { + normal: 1.8, + }, + }, + image: 0, + }, + } as const +const MOONSHOTAI_KIMI_K2_0905_EXACTO = { + id: 'moonshotai/kimi-k2-0905:exacto', + name: 'MoonshotAI: Kimi K2 0905 (exacto)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 262144, + pricing: { + text: { + input: { + normal: 0.6, + cached: 0, + }, + output: { + normal: 2.5, + }, + }, + image: 0, + }, + } as const +const GOOGLE_GEMMA_2_27B_IT = { + id: 'google/gemma-2-27b-it', + name: 'Google: Gemma 2 27B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'stop', 'structured_outputs', 'temperature', 'top_p'], + }, + context_window: 8192, + pricing: { + text: { + input: { + normal: 0.65, + cached: 0, + }, + output: { + normal: 0.65, + }, + }, + image: 0, + }, + } as const +const SAO10K_L3_3_EURYALE_70B = { + id: 'sao10k/l3.3-euryale-70b', + name: 'Sao10K: Llama 3.3 Euryale 70B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 8192, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.65, + cached: 0, + }, + output: { + normal: 0.75, + }, + }, + image: 0, + }, + } as const +const SAO10K_L3_1_EURYALE_70B = { + id: 'sao10k/l3.1-euryale-70b', + name: 'Sao10K: Llama 3.1 Euryale 70B v2.2', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.65, + cached: 0, + }, + output: { + normal: 0.75, + }, + }, + image: 0, + }, + } as const +const AION_LABS_AION_1_0_MINI = { + id: 'aion-labs/aion-1.0-mini', + name: 'AionLabs: Aion-1.0-Mini', + supports: { + input: ['text'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'temperature', 'top_p'], + }, + context_window: 16384, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 0.7, + cached: 0, + }, + output: { + normal: 1.4, + }, + }, + image: 0, + }, + } as const +const DEEPSEEK_DEEPSEEK_R1 = { + id: 'deepseek/deepseek-r1', + name: 'DeepSeek: R1', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 163840, + max_output_tokens: 163840, + pricing: { + text: { + input: { + normal: 0.7, + cached: 0, + }, + output: { + normal: 2.4, + }, + }, + image: 0, + }, + } as const +const MANCER_WEAVER = { + id: 'mancer/weaver', + name: 'Mancer: Weaver (alpha)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'top_a', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 8000, + max_output_tokens: 2000, + pricing: { + text: { + input: { + normal: 0.75, + cached: 0, + }, + output: { + normal: 1, + }, + }, + image: 0, + }, + } as const +const ARCEE_AI_VIRTUOSO_LARGE = { + id: 'arcee-ai/virtuoso-large', + name: 'Arcee AI: Virtuoso Large', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 64000, + pricing: { + text: { + input: { + normal: 0.75, + cached: 0, + }, + output: { + normal: 1.2, + }, + }, + image: 0, + }, + } as const +const MORPH_MORPH_V3_FAST = { + id: 'morph/morph-v3-fast', + name: 'Morph: Morph V3 Fast', + supports: { + input: ['text'], + output: ['text'], + supports: ['max_tokens', 'stop', 'temperature'], + }, + context_window: 81920, + max_output_tokens: 38000, + pricing: { + text: { + input: { + normal: 0.8, + cached: 0, + }, + output: { + normal: 1.2, + }, + }, + image: 0, + }, + } as const +const ALFREDPROS_CODELLAMA_7B_INSTRUCT_SOLIDITY = { + id: 'alfredpros/codellama-7b-instruct-solidity', + name: 'AlfredPros: CodeLLaMa 7B Instruct Solidity', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 4096, + max_output_tokens: 4096, + pricing: { + text: { + input: { + normal: 0.8, + cached: 0, + }, + output: { + normal: 1.2, + }, + }, + image: 0, + }, + } as const +const ELEUTHERAI_LLEMMA_7B = { + id: 'eleutherai/llemma_7b', + name: 'EleutherAI: Llemma 7b', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 4096, + max_output_tokens: 4096, + pricing: { + text: { + input: { + normal: 0.8, + cached: 0, + }, + output: { + normal: 1.2, + }, + }, + image: 0, + }, + } as const +const AION_LABS_AION_RP_LLAMA_3_1_8B = { + id: 'aion-labs/aion-rp-llama-3.1-8b', + name: 'AionLabs: Aion-RP 1.0 (8B)', + supports: { + input: ['text'], + output: ['text'], + supports: ['max_tokens', 'temperature', 'top_p'], + }, + context_window: 32768, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 0.8, + cached: 0, + }, + output: { + normal: 1.6, + }, + }, + image: 0, + }, + } as const +const AMAZON_NOVA_PRO_V1 = { + id: 'amazon/nova-pro-v1', + name: 'Amazon: Nova Pro 1.0', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['max_tokens', 'stop', 'temperature', 'tools', 'top_k', 'top_p'], + }, + context_window: 300000, + max_output_tokens: 5120, + pricing: { + text: { + input: { + normal: 0.8, + cached: 0, + }, + output: { + normal: 3.2, + }, + }, + image: 0.0012, + }, + } as const +const QWEN_QWEN_VL_MAX = { + id: 'qwen/qwen-vl-max', + name: 'Qwen: Qwen VL Max', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['max_tokens', 'presence_penalty', 'response_format', 'seed', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 8192, + pricing: { + text: { + input: { + normal: 0.8, + cached: 0, + }, + output: { + normal: 3.2, + }, + }, + image: 0.001024, + }, + } as const +const ANTHROPIC_CLAUDE_3_5_HAIKU = { + id: 'anthropic/claude-3.5-haiku', + name: 'Anthropic: Claude 3.5 Haiku', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['max_tokens', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 200000, + max_output_tokens: 8192, + pricing: { + text: { + input: { + normal: 0.8, + cached: 1.08, + }, + output: { + normal: 4, + }, + }, + image: 0, + }, + } as const +const RELACE_RELACE_APPLY_3 = { + id: 'relace/relace-apply-3', + name: 'Relace: Relace Apply 3', + supports: { + input: ['text'], + output: ['text'], + supports: ['max_tokens', 'seed', 'stop'], + }, + context_window: 256000, + max_output_tokens: 128000, + pricing: { + text: { + input: { + normal: 0.85, + cached: 0, + }, + output: { + normal: 1.25, + }, + }, + image: 0, + }, + } as const +const SWITCHPOINT_ROUTER = { + id: 'switchpoint/router', + name: 'Switchpoint Router', + supports: { + input: ['text'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.85, + cached: 0, + }, + output: { + normal: 3.4, + }, + }, + image: 0, + }, + } as const +const DEEPCOGITO_COGITO_V2_PREVIEW_LLAMA_70B = { + id: 'deepcogito/cogito-v2-preview-llama-70b', + name: 'Deep Cogito: Cogito V2 Preview Llama 70B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 0.88, + cached: 0, + }, + output: { + normal: 0.88, + }, + }, + image: 0, + }, + } as const +const MORPH_MORPH_V3_LARGE = { + id: 'morph/morph-v3-large', + name: 'Morph: Morph V3 Large', + supports: { + input: ['text'], + output: ['text'], + supports: ['max_tokens', 'stop', 'temperature'], + }, + context_window: 81920, + max_output_tokens: 131072, + pricing: { + text: { + input: { + normal: 0.9, + cached: 0, + }, + output: { + normal: 1.9, + }, + }, + image: 0, + }, + } as const +const ARCEE_AI_MAESTRO_REASONING = { + id: 'arcee-ai/maestro-reasoning', + name: 'Arcee AI: Maestro Reasoning', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 32000, + pricing: { + text: { + input: { + normal: 0.9, + cached: 0, + }, + output: { + normal: 3.3, + }, + }, + image: 0, + }, + } as const +const PERPLEXITY_SONAR = { + id: 'perplexity/sonar', + name: 'Perplexity: Sonar', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'temperature', 'top_k', 'top_p', 'web_search_options'], + }, + context_window: 127072, + pricing: { + text: { + input: { + normal: 1, + cached: 0, + }, + output: { + normal: 1, + }, + }, + image: 0, + }, + } as const +const NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B = { + id: 'nousresearch/hermes-3-llama-3.1-405b', + name: 'Nous: Hermes 3 405B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 1, + cached: 0, + }, + output: { + normal: 1, + }, + }, + image: 0, + }, + } as const +const NEVERSLEEP_NOROMAID_20B = { + id: 'neversleep/noromaid-20b', + name: 'Noromaid 20B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_a', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 8192, + pricing: { + text: { + input: { + normal: 1, + cached: 0, + }, + output: { + normal: 1.75, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_3_5_TURBO_0613 = { + id: 'openai/gpt-3.5-turbo-0613', + name: 'OpenAI: GPT-3.5 Turbo (older v0613)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p'], + }, + context_window: 4095, + max_output_tokens: 4096, + pricing: { + text: { + input: { + normal: 1, + cached: 0, + }, + output: { + normal: 2, + }, + }, + image: 0, + }, + } as const +const RELACE_RELACE_SEARCH = { + id: 'relace/relace-search', + name: 'Relace: Relace Search', + supports: { + input: ['text'], + output: ['text'], + supports: ['max_tokens', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 256000, + max_output_tokens: 128000, + pricing: { + text: { + input: { + normal: 1, + cached: 0, + }, + output: { + normal: 3, + }, + }, + image: 0, + }, + } as const +const NOUSRESEARCH_HERMES_4_405B = { + id: 'nousresearch/hermes-4-405b', + name: 'Nous: Hermes 4 405B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'temperature', 'top_k', 'top_p'], + }, + pricing: { + text: { + input: { + normal: 1, + cached: 0, + }, + output: { + normal: 3, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_CODER_PLUS = { + id: 'qwen/qwen3-coder-plus', + name: 'Qwen: Qwen3 Coder Plus', + supports: { + input: ['text'], + output: ['text'], + supports: ['max_tokens', 'presence_penalty', 'response_format', 'seed', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 128000, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 1, + cached: 0.1, + }, + output: { + normal: 5, + }, + }, + image: 0, + }, + } as const +const ANTHROPIC_CLAUDE_HAIKU_4_5 = { + id: 'anthropic/claude-haiku-4.5', + name: 'Anthropic: Claude Haiku 4.5', + supports: { + input: ['image', 'text'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 200000, + max_output_tokens: 64000, + pricing: { + text: { + input: { + normal: 1, + cached: 1.35, + }, + output: { + normal: 5, + }, + }, + image: 0, + }, + } as const +const OPENAI_O4_MINI_HIGH = { + id: 'openai/o4-mini-high', + name: 'OpenAI: o4 Mini High', + supports: { + input: ['image', 'text', 'document'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'structured_outputs', 'tool_choice', 'tools'], + }, + context_window: 200000, + max_output_tokens: 100000, + pricing: { + text: { + input: { + normal: 1.1, + cached: 0.275, + }, + output: { + normal: 4.4, + }, + }, + image: 0, + }, + } as const +const OPENAI_O3_MINI_HIGH = { + id: 'openai/o3-mini-high', + name: 'OpenAI: o3 Mini High', + supports: { + input: ['text', 'document'], + output: ['text'], + supports: ['max_tokens', 'response_format', 'seed', 'structured_outputs', 'tool_choice', 'tools'], + }, + context_window: 200000, + max_output_tokens: 100000, + pricing: { + text: { + input: { + normal: 1.1, + cached: 0.55, + }, + output: { + normal: 4.4, + }, + }, + image: 0, + }, + } as const +const OPENAI_O3_MINI = { + id: 'openai/o3-mini', + name: 'OpenAI: o3 Mini', + supports: { + input: ['text', 'document'], + output: ['text'], + supports: ['max_tokens', 'response_format', 'seed', 'structured_outputs', 'tool_choice', 'tools'], + }, + context_window: 200000, + max_output_tokens: 100000, + pricing: { + text: { + input: { + normal: 1.1, + cached: 0.55, + }, + output: { + normal: 4.4, + }, + }, + image: 0, + }, + } as const +const OPENAI_O4_MINI = { + id: 'openai/o4-mini', + name: 'OpenAI: o4 Mini', + supports: { + input: ['image', 'text', 'document'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'structured_outputs', 'tool_choice', 'tools'], + }, + context_window: 200000, + max_output_tokens: 100000, + pricing: { + text: { + input: { + normal: 1.1, + cached: 0.275, + }, + output: { + normal: 4.4, + }, + }, + image: 0, + }, + } as const +const NVIDIA_LLAMA_3_1_NEMOTRON_70B_INSTRUCT = { + id: 'nvidia/llama-3.1-nemotron-70b-instruct', + name: 'NVIDIA: Llama 3.1 Nemotron 70B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 1.2, + cached: 0, + }, + output: { + normal: 1.2, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN3_MAX = { + id: 'qwen/qwen3-max', + name: 'Qwen: Qwen3 Max', + supports: { + input: ['text'], + output: ['text'], + supports: ['max_tokens', 'presence_penalty', 'response_format', 'seed', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 256000, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 1.2, + cached: 0.24, + }, + output: { + normal: 6, + }, + }, + image: 0, + }, + } as const +const DEEPCOGITO_COGITO_V2_1_671B = { + id: 'deepcogito/cogito-v2.1-671b', + name: 'Deep Cogito: Cogito v2.1 671B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 128000, + pricing: { + text: { + input: { + normal: 1.25, + cached: 0, + }, + output: { + normal: 1.25, + }, + }, + image: 0, + }, + } as const +const GOOGLE_GEMINI_2_5_PRO_PREVIEW = { + id: 'google/gemini-2.5-pro-preview', + name: 'Google: Gemini 2.5 Pro Preview 06-05', + supports: { + input: ['document', 'image', 'text', 'audio'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 1048576, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 1.25, + cached: 1.935, + }, + output: { + normal: 10, + }, + }, + image: 0.00516, + }, + } as const +const OPENAI_GPT_5_CHAT = { + id: 'openai/gpt-5-chat', + name: 'OpenAI: GPT-5 Chat', + supports: { + input: ['document', 'image', 'text'], + output: ['text'], + supports: ['max_tokens', 'response_format', 'seed', 'structured_outputs'], + }, + context_window: 128000, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 1.25, + cached: 0.125, + }, + output: { + normal: 10, + }, + }, + image: 0, + }, + } as const +const GOOGLE_GEMINI_2_5_PRO_PREVIEW_05_06 = { + id: 'google/gemini-2.5-pro-preview-05-06', + name: 'Google: Gemini 2.5 Pro Preview 05-06', + supports: { + input: ['text', 'image', 'document', 'audio', 'video'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 1048576, + max_output_tokens: 65535, + pricing: { + text: { + input: { + normal: 1.25, + cached: 1.935, + }, + output: { + normal: 10, + }, + }, + image: 0.00516, + }, + } as const +const OPENAI_GPT_5 = { + id: 'openai/gpt-5', + name: 'OpenAI: GPT-5', + supports: { + input: ['text', 'image', 'document'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'structured_outputs', 'tool_choice', 'tools'], + }, + context_window: 400000, + max_output_tokens: 128000, + pricing: { + text: { + input: { + normal: 1.25, + cached: 0.125, + }, + output: { + normal: 10, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_5_CODEX = { + id: 'openai/gpt-5-codex', + name: 'OpenAI: GPT-5 Codex', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'structured_outputs', 'tool_choice', 'tools'], + }, + context_window: 400000, + max_output_tokens: 128000, + pricing: { + text: { + input: { + normal: 1.25, + cached: 0.125, + }, + output: { + normal: 10, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_5_1_CODEX = { + id: 'openai/gpt-5.1-codex', + name: 'OpenAI: GPT-5.1-Codex', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'structured_outputs', 'tool_choice', 'tools'], + }, + context_window: 400000, + max_output_tokens: 128000, + pricing: { + text: { + input: { + normal: 1.25, + cached: 0.125, + }, + output: { + normal: 10, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_5_1_CHAT = { + id: 'openai/gpt-5.1-chat', + name: 'OpenAI: GPT-5.1 Chat', + supports: { + input: ['document', 'image', 'text'], + output: ['text'], + supports: ['max_tokens', 'response_format', 'seed', 'structured_outputs', 'tool_choice', 'tools'], + }, + context_window: 128000, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 1.25, + cached: 0.125, + }, + output: { + normal: 10, + }, + }, + image: 0, + }, + } as const +const GOOGLE_GEMINI_2_5_PRO = { + id: 'google/gemini-2.5-pro', + name: 'Google: Gemini 2.5 Pro', + supports: { + input: ['text', 'image', 'document', 'audio', 'video'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 1048576, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 1.25, + cached: 1.75, + }, + output: { + normal: 10, + }, + }, + image: 0.00516, + }, + } as const +const OPENAI_GPT_5_1_CODEX_MAX = { + id: 'openai/gpt-5.1-codex-max', + name: 'OpenAI: GPT-5.1-Codex-Max', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'structured_outputs', 'tool_choice', 'tools'], + }, + context_window: 400000, + max_output_tokens: 128000, + pricing: { + text: { + input: { + normal: 1.25, + cached: 0.125, + }, + output: { + normal: 10, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_5_1 = { + id: 'openai/gpt-5.1', + name: 'OpenAI: GPT-5.1', + supports: { + input: ['image', 'text', 'document'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'structured_outputs', 'tool_choice', 'tools'], + }, + context_window: 400000, + max_output_tokens: 128000, + pricing: { + text: { + input: { + normal: 1.25, + cached: 0.125, + }, + output: { + normal: 10, + }, + }, + image: 0, + }, + } as const +const SAO10K_L3_EURYALE_70B = { + id: 'sao10k/l3-euryale-70b', + name: 'Sao10k: Llama 3 Euryale 70B v2.1', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'repetition_penalty', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 8192, + max_output_tokens: 8192, + pricing: { + text: { + input: { + normal: 1.48, + cached: 0, + }, + output: { + normal: 1.48, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_3_5_TURBO_INSTRUCT = { + id: 'openai/gpt-3.5-turbo-instruct', + name: 'OpenAI: GPT-3.5 Turbo Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_logprobs', 'top_p'], + }, + context_window: 4095, + max_output_tokens: 4096, + pricing: { + text: { + input: { + normal: 1.5, + cached: 0, + }, + output: { + normal: 2, + }, + }, + image: 0, + }, + } as const +const OPENAI_CODEX_MINI = { + id: 'openai/codex-mini', + name: 'OpenAI: Codex Mini', + supports: { + input: ['image', 'text'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'structured_outputs', 'tool_choice', 'tools'], + }, + context_window: 200000, + max_output_tokens: 100000, + pricing: { + text: { + input: { + normal: 1.5, + cached: 0.375, + }, + output: { + normal: 6, + }, + }, + image: 0, + }, + } as const +const QWEN_QWEN_MAX = { + id: 'qwen/qwen-max', + name: 'Qwen: Qwen-Max ', + supports: { + input: ['text'], + output: ['text'], + supports: ['max_tokens', 'presence_penalty', 'response_format', 'seed', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 32768, + max_output_tokens: 8192, + pricing: { + text: { + input: { + normal: 1.6, + cached: 0.64, + }, + output: { + normal: 6.4, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_5_2 = { + id: 'openai/gpt-5.2', + name: 'OpenAI: GPT-5.2', + supports: { + input: ['document', 'image', 'text'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'structured_outputs', 'tool_choice', 'tools'], + }, + context_window: 400000, + max_output_tokens: 128000, + pricing: { + text: { + input: { + normal: 1.75, + cached: 0.175, + }, + output: { + normal: 14, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_5_2_CHAT = { + id: 'openai/gpt-5.2-chat', + name: 'OpenAI: GPT-5.2 Chat', + supports: { + input: ['document', 'image', 'text'], + output: ['text'], + supports: ['max_tokens', 'response_format', 'seed', 'structured_outputs', 'tool_choice', 'tools'], + }, + context_window: 128000, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 1.75, + cached: 0.175, + }, + output: { + normal: 14, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MISTRAL_LARGE_2411 = { + id: 'mistralai/mistral-large-2411', + name: 'Mistral Large 2411', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 128000, + pricing: { + text: { + input: { + normal: 2, + cached: 0, + }, + output: { + normal: 6, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_PIXTRAL_LARGE_2411 = { + id: 'mistralai/pixtral-large-2411', + name: 'Mistral: Pixtral Large 2411', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 128000, + pricing: { + text: { + input: { + normal: 2, + cached: 0, + }, + output: { + normal: 6, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MIXTRAL_8X22B_INSTRUCT = { + id: 'mistralai/mixtral-8x22b-instruct', + name: 'Mistral: Mixtral 8x22B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 65536, + pricing: { + text: { + input: { + normal: 2, + cached: 0, + }, + output: { + normal: 6, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MISTRAL_LARGE = { + id: 'mistralai/mistral-large', + name: 'Mistral Large', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 128000, + pricing: { + text: { + input: { + normal: 2, + cached: 0, + }, + output: { + normal: 6, + }, + }, + image: 0, + }, + } as const +const MISTRALAI_MISTRAL_LARGE_2407 = { + id: 'mistralai/mistral-large-2407', + name: 'Mistral Large 2407', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 128000, + pricing: { + text: { + input: { + normal: 2, + cached: 0, + }, + output: { + normal: 6, + }, + }, + image: 0, + }, + } as const +const PERPLEXITY_SONAR_DEEP_RESEARCH = { + id: 'perplexity/sonar-deep-research', + name: 'Perplexity: Sonar Deep Research', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'temperature', 'top_k', 'top_p', 'web_search_options'], + }, + context_window: 128000, + pricing: { + text: { + input: { + normal: 2, + cached: 0, + }, + output: { + normal: 8, + }, + }, + image: 0, + }, + } as const +const PERPLEXITY_SONAR_REASONING_PRO = { + id: 'perplexity/sonar-reasoning-pro', + name: 'Perplexity: Sonar Reasoning Pro', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'temperature', 'top_k', 'top_p', 'web_search_options'], + }, + context_window: 128000, + pricing: { + text: { + input: { + normal: 2, + cached: 0, + }, + output: { + normal: 8, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_4_1 = { + id: 'openai/gpt-4.1', + name: 'OpenAI: GPT-4.1', + supports: { + input: ['image', 'text', 'document'], + output: ['text'], + supports: ['max_tokens', 'response_format', 'seed', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 1047576, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 2, + cached: 0.5, + }, + output: { + normal: 8, + }, + }, + image: 0, + }, + } as const +const OPENAI_O3 = { + id: 'openai/o3', + name: 'OpenAI: o3', + supports: { + input: ['image', 'text', 'document'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'structured_outputs', 'tool_choice', 'tools'], + }, + context_window: 200000, + max_output_tokens: 100000, + pricing: { + text: { + input: { + normal: 2, + cached: 0.5, + }, + output: { + normal: 8, + }, + }, + image: 0, + }, + } as const +const AI21_JAMBA_LARGE_1_7 = { + id: 'ai21/jamba-large-1.7', + name: 'AI21: Jamba Large 1.7', + supports: { + input: ['text'], + output: ['text'], + supports: ['max_tokens', 'response_format', 'stop', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 256000, + max_output_tokens: 4096, + pricing: { + text: { + input: { + normal: 2, + cached: 0, + }, + output: { + normal: 8, + }, + }, + image: 0, + }, + } as const +const OPENAI_O4_MINI_DEEP_RESEARCH = { + id: 'openai/o4-mini-deep-research', + name: 'OpenAI: o4 Mini Deep Research', + supports: { + input: ['document', 'image', 'text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'logprobs', 'max_tokens', 'presence_penalty', 'reasoning', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p'], + }, + context_window: 200000, + max_output_tokens: 100000, + pricing: { + text: { + input: { + normal: 2, + cached: 0.5, + }, + output: { + normal: 8, + }, + }, + image: 0, + }, + } as const +const GOOGLE_GEMINI_3_PRO_IMAGE_PREVIEW = { + id: 'google/gemini-3-pro-image-preview', + name: 'Google: Nano Banana Pro (Gemini 3 Pro Image Preview)', + supports: { + input: ['image', 'text'], + output: ['image', 'text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_p'], + }, + context_window: 65536, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 2, + cached: 0, + }, + output: { + normal: 12, + }, + }, + image: 0.067, + }, + } as const +const GOOGLE_GEMINI_3_PRO_PREVIEW = { + id: 'google/gemini-3-pro-preview', + name: 'Google: Gemini 3 Pro Preview', + supports: { + input: ['text', 'image', 'document', 'audio', 'video'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 1048576, + max_output_tokens: 65536, + pricing: { + text: { + input: { + normal: 2, + cached: 2.575, + }, + output: { + normal: 12, + }, + }, + image: 0.008256, + }, + } as const +const OPENAI_GPT_5_IMAGE_MINI = { + id: 'openai/gpt-5-image-mini', + name: 'OpenAI: GPT-5 Image Mini', + supports: { + input: ['document', 'image', 'text'], + output: ['image', 'text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'logprobs', 'max_tokens', 'presence_penalty', 'reasoning', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p'], + }, + context_window: 400000, + max_output_tokens: 128000, + pricing: { + text: { + input: { + normal: 2.5, + cached: 0.25, + }, + output: { + normal: 2, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_4O_SEARCH_PREVIEW = { + id: 'openai/gpt-4o-search-preview', + name: 'OpenAI: GPT-4o Search Preview', + supports: { + input: ['text'], + output: ['text'], + supports: ['max_tokens', 'response_format', 'structured_outputs', 'web_search_options'], + }, + context_window: 128000, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 2.5, + cached: 0, + }, + output: { + normal: 10, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_4O_AUDIO_PREVIEW = { + id: 'openai/gpt-4o-audio-preview', + name: 'OpenAI: GPT-4o Audio', + supports: { + input: ['audio', 'text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p'], + }, + context_window: 128000, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 2.5, + cached: 0, + }, + output: { + normal: 10, + }, + }, + image: 0, + }, + } as const +const COHERE_COMMAND_A = { + id: 'cohere/command-a', + name: 'Cohere: Command A', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_k', 'top_p'], + }, + context_window: 256000, + max_output_tokens: 8192, + pricing: { + text: { + input: { + normal: 2.5, + cached: 0, + }, + output: { + normal: 10, + }, + }, + image: 0, + }, + } as const +const INFLECTION_INFLECTION_3_PI = { + id: 'inflection/inflection-3-pi', + name: 'Inflection: Inflection 3 Pi', + supports: { + input: ['text'], + output: ['text'], + supports: ['max_tokens', 'stop', 'temperature', 'top_p'], + }, + context_window: 8000, + max_output_tokens: 1024, + pricing: { + text: { + input: { + normal: 2.5, + cached: 0, + }, + output: { + normal: 10, + }, + }, + image: 0, + }, + } as const +const INFLECTION_INFLECTION_3_PRODUCTIVITY = { + id: 'inflection/inflection-3-productivity', + name: 'Inflection: Inflection 3 Productivity', + supports: { + input: ['text'], + output: ['text'], + supports: ['max_tokens', 'stop', 'temperature', 'top_p'], + }, + context_window: 8000, + max_output_tokens: 1024, + pricing: { + text: { + input: { + normal: 2.5, + cached: 0, + }, + output: { + normal: 10, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_4O_2024_11_20 = { + id: 'openai/gpt-4o-2024-11-20', + name: 'OpenAI: GPT-4o (2024-11-20)', + supports: { + input: ['text', 'image', 'document'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p', 'web_search_options'], + }, + context_window: 128000, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 2.5, + cached: 1.25, + }, + output: { + normal: 10, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_4O_2024_08_06 = { + id: 'openai/gpt-4o-2024-08-06', + name: 'OpenAI: GPT-4o (2024-08-06)', + supports: { + input: ['text', 'image', 'document'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p', 'web_search_options'], + }, + context_window: 128000, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 2.5, + cached: 1.25, + }, + output: { + normal: 10, + }, + }, + image: 0.003613, + }, + } as const +const OPENAI_GPT_4O = { + id: 'openai/gpt-4o', + name: 'OpenAI: GPT-4o', + supports: { + input: ['text', 'image', 'document'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p', 'web_search_options'], + }, + context_window: 128000, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 2.5, + cached: 1.25, + }, + output: { + normal: 10, + }, + }, + image: 0, + }, + } as const +const COHERE_COMMAND_R_PLUS_08_2024 = { + id: 'cohere/command-r-plus-08-2024', + name: 'Cohere: Command R+ (08-2024)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 128000, + max_output_tokens: 4000, + pricing: { + text: { + input: { + normal: 2.5, + cached: 0, + }, + output: { + normal: 10, + }, + }, + image: 0, + }, + } as const +const AMAZON_NOVA_PREMIER_V1 = { + id: 'amazon/nova-premier-v1', + name: 'Amazon: Nova Premier 1.0', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['max_tokens', 'stop', 'temperature', 'tools', 'top_k', 'top_p'], + }, + context_window: 1000000, + max_output_tokens: 32000, + pricing: { + text: { + input: { + normal: 2.5, + cached: 0.625, + }, + output: { + normal: 12.5, + }, + }, + image: 0, + }, + } as const +const SAO10K_L3_1_70B_HANAMI_X1 = { + id: 'sao10k/l3.1-70b-hanami-x1', + name: 'Sao10K: Llama 3.1 70B Hanami x1', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 16000, + pricing: { + text: { + input: { + normal: 3, + cached: 0, + }, + output: { + normal: 3, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_3_5_TURBO_16K = { + id: 'openai/gpt-3.5-turbo-16k', + name: 'OpenAI: GPT-3.5 Turbo 16k', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p'], + }, + context_window: 16385, + max_output_tokens: 4096, + pricing: { + text: { + input: { + normal: 3, + cached: 0, + }, + output: { + normal: 4, + }, + }, + image: 0, + }, + } as const +const ANTHRACITE_ORG_MAGNUM_V4_72B = { + id: 'anthracite-org/magnum-v4-72b', + name: 'Magnum v4 72B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'top_a', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 32768, + max_output_tokens: 2048, + pricing: { + text: { + input: { + normal: 3, + cached: 0, + }, + output: { + normal: 5, + }, + }, + image: 0, + }, + } as const +const ANTHROPIC_CLAUDE_SONNET_4_5 = { + id: 'anthropic/claude-sonnet-4.5', + name: 'Anthropic: Claude Sonnet 4.5', + supports: { + input: ['text', 'image', 'document'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 1000000, + max_output_tokens: 64000, + pricing: { + text: { + input: { + normal: 3, + cached: 4.05, + }, + output: { + normal: 15, + }, + }, + image: 0, + }, + } as const +const X_AI_GROK_3 = { + id: 'x-ai/grok-3', + name: 'xAI: Grok 3', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logprobs', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 3, + cached: 0.75, + }, + output: { + normal: 15, + }, + }, + image: 0, + }, + } as const +const ANTHROPIC_CLAUDE_3_7_SONNET_THINKING = { + id: 'anthropic/claude-3.7-sonnet:thinking', + name: 'Anthropic: Claude 3.7 Sonnet (thinking)', + supports: { + input: ['text', 'image', 'document'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'stop', 'temperature', 'tool_choice', 'tools', 'top_p'], + }, + context_window: 200000, + max_output_tokens: 64000, + pricing: { + text: { + input: { + normal: 3, + cached: 4.05, + }, + output: { + normal: 15, + }, + }, + image: 0.0048, + }, + } as const +const X_AI_GROK_4 = { + id: 'x-ai/grok-4', + name: 'xAI: Grok 4', + supports: { + input: ['image', 'text'], + output: ['text'], + supports: ['include_reasoning', 'logprobs', 'max_tokens', 'reasoning', 'response_format', 'seed', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p'], + }, + context_window: 256000, + pricing: { + text: { + input: { + normal: 3, + cached: 0.75, + }, + output: { + normal: 15, + }, + }, + image: 0, + }, + } as const +const PERPLEXITY_SONAR_PRO = { + id: 'perplexity/sonar-pro', + name: 'Perplexity: Sonar Pro', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'presence_penalty', 'temperature', 'top_k', 'top_p', 'web_search_options'], + }, + context_window: 200000, + max_output_tokens: 8000, + pricing: { + text: { + input: { + normal: 3, + cached: 0, + }, + output: { + normal: 15, + }, + }, + image: 0, + }, + } as const +const X_AI_GROK_3_BETA = { + id: 'x-ai/grok-3-beta', + name: 'xAI: Grok 3 Beta', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logprobs', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 3, + cached: 0.75, + }, + output: { + normal: 15, + }, + }, + image: 0, + }, + } as const +const ANTHROPIC_CLAUDE_SONNET_4 = { + id: 'anthropic/claude-sonnet-4', + name: 'Anthropic: Claude Sonnet 4', + supports: { + input: ['image', 'text', 'document'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 1000000, + max_output_tokens: 64000, + pricing: { + text: { + input: { + normal: 3, + cached: 4.05, + }, + output: { + normal: 15, + }, + }, + image: 0.0048, + }, + } as const +const ANTHROPIC_CLAUDE_3_7_SONNET = { + id: 'anthropic/claude-3.7-sonnet', + name: 'Anthropic: Claude 3.7 Sonnet', + supports: { + input: ['text', 'image', 'document'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 200000, + max_output_tokens: 64000, + pricing: { + text: { + input: { + normal: 3, + cached: 4.05, + }, + output: { + normal: 15, + }, + }, + image: 0.0048, + }, + } as const +const PERPLEXITY_SONAR_PRO_SEARCH = { + id: 'perplexity/sonar-pro-search', + name: 'Perplexity: Sonar Pro Search', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'max_tokens', 'presence_penalty', 'reasoning', 'structured_outputs', 'temperature', 'top_k', 'top_p', 'web_search_options'], + }, + context_window: 200000, + max_output_tokens: 8000, + pricing: { + text: { + input: { + normal: 3, + cached: 0, + }, + output: { + normal: 15, + }, + }, + image: 0, + }, + } as const +const META_LLAMA_LLAMA_3_1_405B_INSTRUCT = { + id: 'meta-llama/llama-3.1-405b-instruct', + name: 'Meta: Llama 3.1 405B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 3.5, + cached: 0, + }, + output: { + normal: 3.5, + }, + }, + image: 0, + }, + } as const +const DEEPCOGITO_COGITO_V2_PREVIEW_LLAMA_405B = { + id: 'deepcogito/cogito-v2-preview-llama-405b', + name: 'Deep Cogito: Cogito V2 Preview Llama 405B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'reasoning', 'repetition_penalty', 'response_format', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 131072, + pricing: { + text: { + input: { + normal: 3.5, + cached: 0, + }, + output: { + normal: 3.5, + }, + }, + image: 0, + }, + } as const +const META_LLAMA_LLAMA_3_1_405B = { + id: 'meta-llama/llama-3.1-405b', + name: 'Meta: Llama 3.1 405B (base)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 4, + cached: 0, + }, + output: { + normal: 4, + }, + }, + image: 0, + }, + } as const +const AION_LABS_AION_1_0 = { + id: 'aion-labs/aion-1.0', + name: 'AionLabs: Aion-1.0', + supports: { + input: ['text'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'temperature', 'top_p'], + }, + context_window: 32768, + max_output_tokens: 32768, + pricing: { + text: { + input: { + normal: 4, + cached: 0, + }, + output: { + normal: 8, + }, + }, + image: 0, + }, + } as const +const RAIFLE_SORCERERLM_8X22B = { + id: 'raifle/sorcererlm-8x22b', + name: 'SorcererLM 8x22B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 16000, + pricing: { + text: { + input: { + normal: 4.5, + cached: 0, + }, + output: { + normal: 4.5, + }, + }, + image: 0, + }, + } as const +const OPENAI_CHATGPT_4O_LATEST = { + id: 'openai/chatgpt-4o-latest', + name: 'OpenAI: ChatGPT-4o', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'top_logprobs', 'top_p'], + }, + context_window: 128000, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 5, + cached: 0, + }, + output: { + normal: 15, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_4O_2024_05_13 = { + id: 'openai/gpt-4o-2024-05-13', + name: 'OpenAI: GPT-4o (2024-05-13)', + supports: { + input: ['text', 'image', 'document'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p', 'web_search_options'], + }, + context_window: 128000, + max_output_tokens: 4096, + pricing: { + text: { + input: { + normal: 5, + cached: 0, + }, + output: { + normal: 15, + }, + }, + image: 0, + }, + } as const +const ANTHROPIC_CLAUDE_OPUS_4_5 = { + id: 'anthropic/claude-opus-4.5', + name: 'Anthropic: Claude Opus 4.5', + supports: { + input: ['document', 'image', 'text'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'verbosity'], + }, + context_window: 200000, + max_output_tokens: 64000, + pricing: { + text: { + input: { + normal: 5, + cached: 6.75, + }, + output: { + normal: 25, + }, + }, + image: 0, + }, + } as const +const ALPINDALE_GOLIATH_120B = { + id: 'alpindale/goliath-120b', + name: 'Goliath 120B', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'top_a', 'top_k', 'top_logprobs', 'top_p'], + }, + context_window: 6144, + max_output_tokens: 1024, + pricing: { + text: { + input: { + normal: 6, + cached: 0, + }, + output: { + normal: 8, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_4O_EXTENDED = { + id: 'openai/gpt-4o:extended', + name: 'OpenAI: GPT-4o (extended)', + supports: { + input: ['text', 'image', 'document'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p', 'web_search_options'], + }, + context_window: 128000, + max_output_tokens: 64000, + pricing: { + text: { + input: { + normal: 6, + cached: 0, + }, + output: { + normal: 18, + }, + }, + image: 0, + }, + } as const +const ANTHROPIC_CLAUDE_3_5_SONNET = { + id: 'anthropic/claude-3.5-sonnet', + name: 'Anthropic: Claude 3.5 Sonnet', + supports: { + input: ['text', 'image', 'document'], + output: ['text'], + supports: ['max_tokens', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 200000, + max_output_tokens: 8192, + pricing: { + text: { + input: { + normal: 6, + cached: 0, + }, + output: { + normal: 30, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_5_IMAGE = { + id: 'openai/gpt-5-image', + name: 'OpenAI: GPT-5 Image', + supports: { + input: ['image', 'text', 'document'], + output: ['image', 'text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'logprobs', 'max_tokens', 'presence_penalty', 'reasoning', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p'], + }, + context_window: 400000, + max_output_tokens: 128000, + pricing: { + text: { + input: { + normal: 10, + cached: 1.25, + }, + output: { + normal: 10, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_4_1106_PREVIEW = { + id: 'openai/gpt-4-1106-preview', + name: 'OpenAI: GPT-4 Turbo (older v1106)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p'], + }, + context_window: 128000, + max_output_tokens: 4096, + pricing: { + text: { + input: { + normal: 10, + cached: 0, + }, + output: { + normal: 30, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_4_TURBO = { + id: 'openai/gpt-4-turbo', + name: 'OpenAI: GPT-4 Turbo', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p'], + }, + context_window: 128000, + max_output_tokens: 4096, + pricing: { + text: { + input: { + normal: 10, + cached: 0, + }, + output: { + normal: 30, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_4_TURBO_PREVIEW = { + id: 'openai/gpt-4-turbo-preview', + name: 'OpenAI: GPT-4 Turbo Preview', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p'], + }, + context_window: 128000, + max_output_tokens: 4096, + pricing: { + text: { + input: { + normal: 10, + cached: 0, + }, + output: { + normal: 30, + }, + }, + image: 0, + }, + } as const +const OPENAI_O3_DEEP_RESEARCH = { + id: 'openai/o3-deep-research', + name: 'OpenAI: o3 Deep Research', + supports: { + input: ['image', 'text', 'document'], + output: ['text'], + supports: ['frequency_penalty', 'include_reasoning', 'logit_bias', 'logprobs', 'max_tokens', 'presence_penalty', 'reasoning', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p'], + }, + context_window: 200000, + max_output_tokens: 100000, + pricing: { + text: { + input: { + normal: 10, + cached: 2.5, + }, + output: { + normal: 40, + }, + }, + image: 0, + }, + } as const +const OPENAI_O1 = { + id: 'openai/o1', + name: 'OpenAI: o1', + supports: { + input: ['text', 'image', 'document'], + output: ['text'], + supports: ['max_tokens', 'response_format', 'seed', 'structured_outputs', 'tool_choice', 'tools'], + }, + context_window: 200000, + max_output_tokens: 100000, + pricing: { + text: { + input: { + normal: 15, + cached: 7.5, + }, + output: { + normal: 60, + }, + }, + image: 0, + }, + } as const +const ANTHROPIC_CLAUDE_OPUS_4_1 = { + id: 'anthropic/claude-opus-4.1', + name: 'Anthropic: Claude Opus 4.1', + supports: { + input: ['image', 'text', 'document'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 200000, + max_output_tokens: 32000, + pricing: { + text: { + input: { + normal: 15, + cached: 20.25, + }, + output: { + normal: 75, + }, + }, + image: 0.024, + }, + } as const +const ANTHROPIC_CLAUDE_OPUS_4 = { + id: 'anthropic/claude-opus-4', + name: 'Anthropic: Claude Opus 4', + supports: { + input: ['image', 'text', 'document'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 200000, + max_output_tokens: 32000, + pricing: { + text: { + input: { + normal: 15, + cached: 20.25, + }, + output: { + normal: 75, + }, + }, + image: 0.024, + }, + } as const +const OPENAI_GPT_5_PRO = { + id: 'openai/gpt-5-pro', + name: 'OpenAI: GPT-5 Pro', + supports: { + input: ['image', 'text', 'document'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'structured_outputs', 'tool_choice', 'tools'], + }, + context_window: 400000, + max_output_tokens: 128000, + pricing: { + text: { + input: { + normal: 15, + cached: 0, + }, + output: { + normal: 120, + }, + }, + image: 0, + }, + } as const +const OPENAI_O3_PRO = { + id: 'openai/o3-pro', + name: 'OpenAI: o3 Pro', + supports: { + input: ['text', 'document', 'image'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'structured_outputs', 'tool_choice', 'tools'], + }, + context_window: 200000, + max_output_tokens: 100000, + pricing: { + text: { + input: { + normal: 20, + cached: 0, + }, + output: { + normal: 80, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_5_2_PRO = { + id: 'openai/gpt-5.2-pro', + name: 'OpenAI: GPT-5.2 Pro', + supports: { + input: ['image', 'text', 'document'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'structured_outputs', 'tool_choice', 'tools'], + }, + context_window: 400000, + max_output_tokens: 128000, + pricing: { + text: { + input: { + normal: 21, + cached: 0, + }, + output: { + normal: 168, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_4 = { + id: 'openai/gpt-4', + name: 'OpenAI: GPT-4', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p'], + }, + context_window: 8191, + max_output_tokens: 4096, + pricing: { + text: { + input: { + normal: 30, + cached: 0, + }, + output: { + normal: 60, + }, + }, + image: 0, + }, + } as const +const OPENAI_GPT_4_0314 = { + id: 'openai/gpt-4-0314', + name: 'OpenAI: GPT-4 (older v0314)', + supports: { + input: ['text'], + output: ['text'], + supports: ['frequency_penalty', 'logit_bias', 'logprobs', 'max_tokens', 'presence_penalty', 'response_format', 'seed', 'stop', 'structured_outputs', 'temperature', 'tool_choice', 'tools', 'top_logprobs', 'top_p'], + }, + context_window: 8191, + max_output_tokens: 4096, + pricing: { + text: { + input: { + normal: 30, + cached: 0, + }, + output: { + normal: 60, + }, + }, + image: 0, + }, + } as const +const OPENAI_O1_PRO = { + id: 'openai/o1-pro', + name: 'OpenAI: o1-pro', + supports: { + input: ['text', 'image', 'document'], + output: ['text'], + supports: ['include_reasoning', 'max_tokens', 'reasoning', 'response_format', 'seed', 'structured_outputs'], + }, + context_window: 200000, + max_output_tokens: 100000, + pricing: { + text: { + input: { + normal: 150, + cached: 0, + }, + output: { + normal: 600, + }, + }, + image: 0, + }, + } as const +const ALLENAI_OLMO_2_0325_32B_INSTRUCT = { + id: 'allenai/olmo-2-0325-32b-instruct', + name: 'AllenAI: Olmo 2 32B Instruct', + supports: { + input: ['text'], + output: ['text'], + supports: [], + }, + context_window: 128000, + pricing: { + text: { + input: { + normal: 0.05, + cached: 0, + }, + output: { + normal: 0.2, + }, + }, + image: 0, + }, + } as const +const META_LLAMA_LLAMA_3_2_90B_VISION_INSTRUCT = { + id: 'meta-llama/llama-3.2-90b-vision-instruct', + name: 'Meta: Llama 3.2 90B Vision Instruct', + supports: { + input: ['text', 'image'], + output: ['text'], + supports: ['frequency_penalty', 'max_tokens', 'min_p', 'presence_penalty', 'repetition_penalty', 'response_format', 'seed', 'stop', 'temperature', 'top_k', 'top_p'], + }, + context_window: 131072, + max_output_tokens: 16384, + pricing: { + text: { + input: { + normal: 0.35, + cached: 0, + }, + output: { + normal: 0.4, + }, + }, + image: 0.0005058, + }, + } as const +const ANTHROPIC_CLAUDE_3_5_HAIKU_20241022 = { + id: 'anthropic/claude-3.5-haiku-20241022', + name: 'Anthropic: Claude 3.5 Haiku (2024-10-22)', + supports: { + input: ['text', 'image', 'document'], + output: ['text'], + supports: ['max_tokens', 'stop', 'temperature', 'tool_choice', 'tools', 'top_k', 'top_p'], + }, + context_window: 200000, + max_output_tokens: 8192, + pricing: { + text: { + input: { + normal: 0.8, + cached: 1.08, + }, + output: { + normal: 4, + }, + }, + image: 0, + }, + } as const -export type OpenRouterModelOptionsByName = { - [NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [META_LLAMA_LLAMA_3_3_70B_INSTRUCT_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'repetition_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN3_CODER_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [TNGTECH_DEEPSEEK_R1T2_CHIMERA_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN3_4B_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'response_format' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [GOOGLE_GEMMA_3N_E2B_IT_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_p' - > - [META_LLAMA_LLAMA_3_2_3B_INSTRUCT_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [GOOGLE_GEMMA_3_12B_IT_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - 'max_completion_tokens' | 'seed' | 'stop' | 'temperature' | 'top_p' - > - [COGNITIVECOMPUTATIONS_DOLPHIN_MISTRAL_24B_VENICE_EDITION_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [Z_AI_GLM_4_5_AIR_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [MOONSHOTAI_KIMI_K2_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - 'max_completion_tokens' | 'seed' | 'stop' | 'temperature' - > - [GOOGLE_GEMMA_3_27B_IT_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [GOOGLE_GEMMA_3_4B_IT_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_p' - > - [NVIDIA_NEMOTRON_3_NANO_30B_A3B_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'seed' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [TNGTECH_TNG_R1T_CHIMERA_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [NVIDIA_NEMOTRON_NANO_12B_V2_VL_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'seed' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [ARCEE_AI_TRINITY_MINI_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [TNGTECH_DEEPSEEK_R1T_CHIMERA_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [GOOGLE_GEMMA_3N_E4B_IT_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_p' - > - [GOOGLE_GEMINI_2_0_FLASH_EXP_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [OPENAI_GPT_OSS_120B_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - > - [OPENAI_GPT_OSS_20B_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - > - [MISTRALAI_MISTRAL_SMALL_3_1_24B_INSTRUCT_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [MISTRALAI_DEVSTRAL_2512_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [XIAOMI_MIMO_V2_FLASH_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'response_format' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [ALLENAI_MOLMO_2_8B_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [DEEPSEEK_DEEPSEEK_R1_0528_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'temperature' - > - [NVIDIA_NEMOTRON_NANO_9B_V2_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [META_LLAMA_LLAMA_3_1_405B_INSTRUCT_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'repetition_penalty' - | 'temperature' - > - [QWEN_QWEN_2_5_VL_7B_INSTRUCT_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'repetition_penalty' - | 'temperature' - > - [MISTRALAI_MISTRAL_7B_INSTRUCT_FREE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [LIQUID_LFM_2_2_6B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [LIQUID_LFM2_8B_A1B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [IBM_GRANITE_GRANITE_4_0_H_MICRO.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'repetition_penalty' - | 'seed' - | 'temperature' - | 'top_k' - | 'top_p' - > - [GOOGLE_GEMMA_3_4B_IT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [META_LLAMA_LLAMA_3_2_3B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [MISTRALAI_MISTRAL_NEMO.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [GOOGLE_GEMMA_3N_E4B_IT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [META_LLAMA_LLAMA_3_1_8B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [META_LLAMA_LLAMA_GUARD_3_8B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [OPENAI_GPT_OSS_20B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [NOUSRESEARCH_DEEPHERMES_3_MISTRAL_24B_PREVIEW.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [NOUSRESEARCH_HERMES_2_PRO_LLAMA_3_8B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [META_LLAMA_LLAMA_3_2_1B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'repetition_penalty' - | 'seed' - | 'temperature' - | 'top_k' - | 'top_p' - > - [MISTRALAI_MISTRAL_7B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [META_LLAMA_LLAMA_3_8B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [GOOGLE_GEMMA_2_9B_IT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'repetition_penalty' - | 'temperature' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN2_5_CODER_7B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'temperature' - | 'top_k' - | 'top_p' - > - [GOOGLE_GEMMA_3_12B_IT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [MISTRALAI_MISTRAL_SMALL_3_1_24B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [DEEPSEEK_DEEPSEEK_R1_DISTILL_LLAMA_70B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN_2_5_CODER_32B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [MISTRALAI_MISTRAL_SMALL_24B_INSTRUCT_2501.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN3_8B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [AMAZON_NOVA_MICRO_V1.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - 'max_completion_tokens' | 'stop' | 'temperature' | 'top_k' | 'top_p' - > - [COHERE_COMMAND_R7B_12_2024.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [OPENAI_GPT_OSS_120B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [OPENAI_GPT_OSS_120B_EXACTO.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [MISTRALAI_MINISTRAL_3B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [SAO10K_L3_LUNARIS_8B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN_2_5_7B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [GOOGLE_GEMMA_3_27B_IT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [NVIDIA_NEMOTRON_NANO_9B_V2.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [ARCEE_AI_TRINITY_MINI.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [META_LLAMA_LLAMA_3_2_11B_VISION_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [MICROSOFT_PHI_4_MULTIMODAL_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN_TURBO.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [Z_AI_GLM_4_5_AIR.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN2_5_VL_32B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [QWEN_QWEN3_14B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [MISTRALAI_DEVSTRAL_2512.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [OPENAI_GPT_5_NANO.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'tool_choice' - > - [QWEN_QWEN3_30B_A3B_THINKING_2507.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [GRYPHE_MYTHOMAX_L2_13B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_a' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [DEEPSEEK_DEEPSEEK_R1_0528_QWEN3_8B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [MISTRALAI_DEVSTRAL_SMALL_2505.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [MICROSOFT_PHI_4.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [MISTRALAI_MISTRAL_SMALL_3_2_24B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN3_30B_A3B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [AMAZON_NOVA_LITE_V1.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - 'max_completion_tokens' | 'stop' | 'temperature' | 'top_k' | 'top_p' - > - [NVIDIA_NEMOTRON_3_NANO_30B_A3B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN3_CODER_30B_A3B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [MISTRALAI_DEVSTRAL_SMALL.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [BAIDU_ERNIE_4_5_21B_A3B_THINKING.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [BAIDU_ERNIE_4_5_21B_A3B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'repetition_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [MICROSOFT_PHI_4_REASONING_PLUS.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN3_235B_A22B_2507.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [OPENAI_GPT_OSS_SAFEGUARD_20B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [BYTEDANCE_SEED_SEED_1_6_FLASH.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [GOOGLE_GEMINI_2_0_FLASH_LITE_001.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [QWEN_QWEN3_32B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [META_LLAMA_LLAMA_4_SCOUT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN3_30B_A3B_INSTRUCT_2507.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN3_VL_8B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [ALIBABA_TONGYI_DEEPRESEARCH_30B_A3B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [NEVERSLEEP_LLAMA_3_1_LUMIMAID_8B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'stop' - | 'temperature' - | 'top_p' - > - [QWEN_QWEN3_NEXT_80B_A3B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [Z_AI_GLM_4_32B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - 'max_completion_tokens' | 'temperature' | 'tool_choice' | 'top_p' - > - [MISTRALAI_PIXTRAL_12B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [MISTRALAI_MINISTRAL_3B_2512.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [MISTRAL_MINISTRAL_8B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [MISTRALAI_MINISTRAL_8B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [ALLENAI_OLMO_3_7B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [BYTEDANCE_UI_TARS_1_5_7B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [MISTRALAI_MISTRAL_SMALL_CREATIVE.id]: OpenRouterCommonOptions & - Pick - [MISTRALAI_VOXTRAL_SMALL_24B_2507.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [META_LLAMA_LLAMA_3_3_70B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [OPENGVLAB_INTERNVL3_78B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [OPENAI_GPT_4_1_NANO.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'response_format' - | 'seed' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [GOOGLE_GEMINI_2_0_FLASH_001.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [GOOGLE_GEMINI_2_5_FLASH_LITE_PREVIEW_09_2025.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [GOOGLE_GEMINI_2_5_FLASH_LITE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [NVIDIA_LLAMA_3_3_NEMOTRON_SUPER_49B_V1_5.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN3_VL_4B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [MISTRALAI_MISTRAL_7B_INSTRUCT_V0_1.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'repetition_penalty' - | 'seed' - | 'temperature' - | 'top_k' - | 'top_p' - > - [NOUSRESEARCH_HERMES_4_70B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN3_235B_A22B_THINKING_2507.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [ALLENAI_OLMO_3_7B_THINK.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN_2_5_72B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [BAIDU_ERNIE_4_5_VL_28B_A3B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [TENCENT_HUNYUAN_A13B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'reasoning' - | 'response_format' - | 'temperature' - | 'top_k' - | 'top_p' - > - [DEEPSEEK_DEEPSEEK_R1_DISTILL_QWEN_14B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [ESSENTIALAI_RNJ_1_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [MISTRALAI_MINISTRAL_8B_2512.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [QWEN_QWQ_32B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [ALLENAI_OLMO_3_1_32B_THINK.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [ALLENAI_OLMO_3_32B_THINK.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN3_VL_30B_A3B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [META_LLAMA_LLAMA_4_MAVERICK.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [OPENAI_GPT_4O_MINI_SEARCH_PREVIEW.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - 'max_completion_tokens' | 'response_format' | 'web_search_options' - > - [OPENAI_GPT_4O_MINI_2024_07_18.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - | 'web_search_options' - > - [OPENAI_GPT_4O_MINI.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - | 'web_search_options' - > - [COHERE_COMMAND_R_08_2024.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN2_5_VL_72B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [DEEPSEEK_DEEPSEEK_CHAT_V3_1.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [QWEN_QWEN3_NEXT_80B_A3B_THINKING.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [THEDRUMMER_ROCINANTE_12B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [ARCEE_AI_SPOTLIGHT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [META_LLAMA_LLAMA_GUARD_4_12B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN3_235B_A22B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [DEEPCOGITO_COGITO_V2_PREVIEW_LLAMA_109B_MOE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN3_VL_8B_THINKING.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'response_format' - | 'seed' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [DEEPSEEK_DEEPSEEK_CHAT_V3_0324.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [META_LLAMA_LLAMA_GUARD_2_8B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [MISTRALAI_MISTRAL_7B_INSTRUCT_V0_3.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN_2_5_VL_7B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [MISTRALAI_MINISTRAL_14B_2512.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [MISTRALAI_MISTRAL_7B_INSTRUCT_V0_2.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [AI21_JAMBA_MINI_1_7.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'response_format' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [X_AI_GROK_4_1_FAST.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'logprobs' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - > - [X_AI_GROK_4_FAST.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'logprobs' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - > - [NVIDIA_NEMOTRON_NANO_12B_V2_VL.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [MISTRALAI_MISTRAL_SABA.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [ALLENAI_OLMO_3_1_32B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [MEITUAN_LONGCAT_FLASH_CHAT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - 'max_completion_tokens' | 'temperature' | 'top_p' - > - [QWEN_QWEN3_VL_30B_A3B_THINKING.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [MINIMAX_MINIMAX_M2.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [MINIMAX_MINIMAX_01.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - 'max_completion_tokens' | 'temperature' | 'top_p' - > - [PRIME_INTELLECT_INTELLECT_3.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN3_VL_235B_A22B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [X_AI_GROK_CODE_FAST_1.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'logprobs' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - > - [KWAIPILOT_KAT_CODER_PRO.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [DEEPSEEK_DEEPSEEK_V3_2_EXP.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN_VL_PLUS.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'temperature' - | 'top_p' - > - [DEEPSEEK_DEEPSEEK_V3_1_TERMINUS_EXACTO.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [DEEPSEEK_DEEPSEEK_V3_1_TERMINUS.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN3_CODER.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [QWEN_QWEN3_CODER_EXACTO.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [MISTRALAI_MISTRAL_TINY.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [DEEPSEEK_DEEPSEEK_V3_2.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [TNGTECH_DEEPSEEK_R1T2_CHIMERA.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [TNGTECH_TNG_R1T_CHIMERA.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [INCEPTION_MERCURY.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [INCEPTION_MERCURY_CODER.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [ANTHROPIC_CLAUDE_3_HAIKU.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [OPENAI_GPT_5_MINI.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'tool_choice' - > - [OPENAI_GPT_5_1_CODEX_MINI.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'tool_choice' - > - [BYTEDANCE_SEED_SEED_1_6.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [DEEPSEEK_DEEPSEEK_R1_DISTILL_QWEN_32B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [DEEPSEEK_DEEPSEEK_V3_2_SPECIALE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [NEX_AGI_DEEPSEEK_V3_1_NEX_N1.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'response_format' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [MINIMAX_MINIMAX_M2_1.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [BAIDU_ERNIE_4_5_300B_A47B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [MOONSHOTAI_KIMI_DEV_72B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'reasoning' - | 'response_format' - | 'temperature' - | 'top_k' - | 'top_p' - > - [NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [META_LLAMA_LLAMA_3_70B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [THEDRUMMER_CYDONIA_24B_V4_1.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [X_AI_GROK_3_MINI.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'logprobs' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - > - [X_AI_GROK_3_MINI_BETA.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'logprobs' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - > - [Z_AI_GLM_4_6V.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [MISTRALAI_CODESTRAL_2508.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [DEEPSEEK_DEEPSEEK_CHAT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [TNGTECH_DEEPSEEK_R1T_CHIMERA.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN3_CODER_FLASH.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [GOOGLE_GEMINI_2_5_FLASH_IMAGE_PREVIEW.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'response_format' - | 'seed' - | 'temperature' - | 'top_p' - > - [GOOGLE_GEMINI_2_5_FLASH_PREVIEW_09_2025.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [GOOGLE_GEMINI_2_5_FLASH_IMAGE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'response_format' - | 'seed' - | 'temperature' - | 'top_p' - > - [AMAZON_NOVA_2_LITE_V1.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [GOOGLE_GEMINI_2_5_FLASH.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [Z_AI_GLM_4_6.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_a' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [Z_AI_GLM_4_5.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [MOONSHOTAI_KIMI_K2_0905.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [META_LLAMA_LLAMA_3_1_70B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [THEDRUMMER_UNSLOPNEMO_12B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [QWEN_QWEN_PLUS_2025_07_28.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [QWEN_QWEN_PLUS.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [Z_AI_GLM_4_7.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_a' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [OPENAI_GPT_4_1_MINI.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'response_format' - | 'seed' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [MOONSHOTAI_KIMI_K2_THINKING.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [MISTRALAI_DEVSTRAL_MEDIUM.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [MISTRALAI_MISTRAL_MEDIUM_3.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [MISTRALAI_MISTRAL_MEDIUM_3_1.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [MINIMAX_MINIMAX_M1.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN_PLUS_2025_07_28_THINKING.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'response_format' - | 'seed' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [BAIDU_ERNIE_4_5_VL_424B_A47B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [Z_AI_GLM_4_6_EXACTO.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [UNDI95_REMM_SLERP_L2_13B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_a' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [DEEPSEEK_DEEPSEEK_R1_0528.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [QWEN_QWEN3_VL_235B_A22B_THINKING.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [MICROSOFT_WIZARDLM_2_8X22B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [ARCEE_AI_CODER_LARGE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [MISTRALAI_MISTRAL_LARGE_2512.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [QWEN_QWEN3_VL_32B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [OPENAI_GPT_3_5_TURBO.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - > - [DEEPSEEK_DEEPSEEK_PROVER_V2.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [MOONSHOTAI_KIMI_K2.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [GOOGLE_GEMINI_3_FLASH_PREVIEW.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [MISTRALAI_MIXTRAL_8X7B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [THEDRUMMER_SKYFALL_36B_V2.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [MINIMAX_MINIMAX_M1_80K.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [STEPFUN_AI_STEP3.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'reasoning' - | 'response_format' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [NVIDIA_LLAMA_3_1_NEMOTRON_ULTRA_253B_V1.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'temperature' - | 'top_k' - | 'top_p' - > - [Z_AI_GLM_4_5V.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [MOONSHOTAI_KIMI_K2_0905_EXACTO.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [GOOGLE_GEMMA_2_27B_IT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'stop' - | 'temperature' - | 'top_p' - > - [SAO10K_L3_3_EURYALE_70B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [SAO10K_L3_1_EURYALE_70B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [AION_LABS_AION_1_0_MINI.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'temperature' - | 'top_p' - > - [DEEPSEEK_DEEPSEEK_R1.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [MANCER_WEAVER.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_a' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [ARCEE_AI_VIRTUOSO_LARGE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [MORPH_MORPH_V3_FAST.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - 'max_completion_tokens' | 'stop' | 'temperature' - > - [ALFREDPROS_CODELLAMA_7B_INSTRUCT_SOLIDITY.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [ELEUTHERAI_LLEMMA_7B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [AION_LABS_AION_RP_LLAMA_3_1_8B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - 'max_completion_tokens' | 'temperature' | 'top_p' - > - [AMAZON_NOVA_PRO_V1.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - 'max_completion_tokens' | 'stop' | 'temperature' | 'top_k' | 'top_p' - > - [QWEN_QWEN_VL_MAX.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [ANTHROPIC_CLAUDE_3_5_HAIKU.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [RELACE_RELACE_APPLY_3.id]: OpenRouterCommonOptions & - Pick - [SWITCHPOINT_ROUTER.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [DEEPCOGITO_COGITO_V2_PREVIEW_LLAMA_70B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [MORPH_MORPH_V3_LARGE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - 'max_completion_tokens' | 'stop' | 'temperature' - > - [ARCEE_AI_MAESTRO_REASONING.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [PERPLEXITY_SONAR.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'temperature' - | 'top_k' - | 'top_p' - | 'web_search_options' - > - [NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [NEVERSLEEP_NOROMAID_20B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_a' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [OPENAI_GPT_3_5_TURBO_0613.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - > - [RELACE_RELACE_SEARCH.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [NOUSRESEARCH_HERMES_4_405B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'temperature' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN3_CODER_PLUS.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [ANTHROPIC_CLAUDE_HAIKU_4_5.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [OPENAI_O4_MINI_HIGH.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'tool_choice' - > - [OPENAI_O3_MINI_HIGH.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - 'max_completion_tokens' | 'response_format' | 'seed' | 'tool_choice' - > - [OPENAI_O3_MINI.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - 'max_completion_tokens' | 'response_format' | 'seed' | 'tool_choice' - > - [OPENAI_O4_MINI.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'tool_choice' - > - [NVIDIA_LLAMA_3_1_NEMOTRON_70B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [QWEN_QWEN3_MAX.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [DEEPCOGITO_COGITO_V2_1_671B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [GOOGLE_GEMINI_2_5_PRO_PREVIEW.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [OPENAI_GPT_5_CHAT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - 'max_completion_tokens' | 'response_format' | 'seed' - > - [GOOGLE_GEMINI_2_5_PRO_PREVIEW_05_06.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [OPENAI_GPT_5.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'tool_choice' - > - [OPENAI_GPT_5_CODEX.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'tool_choice' - > - [OPENAI_GPT_5_1_CODEX.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'tool_choice' - > - [OPENAI_GPT_5_1_CHAT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - 'max_completion_tokens' | 'response_format' | 'seed' | 'tool_choice' - > - [GOOGLE_GEMINI_2_5_PRO.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [OPENAI_GPT_5_1_CODEX_MAX.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'tool_choice' - > - [OPENAI_GPT_5_1.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'tool_choice' - > - [SAO10K_L3_EURYALE_70B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'repetition_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [OPENAI_GPT_3_5_TURBO_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_logprobs' - | 'top_p' - > - [OPENAI_CODEX_MINI.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'tool_choice' - > - [QWEN_QWEN_MAX.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [OPENAI_GPT_5_2.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'tool_choice' - > - [OPENAI_GPT_5_2_CHAT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - 'max_completion_tokens' | 'response_format' | 'seed' | 'tool_choice' - > - [MISTRALAI_MISTRAL_LARGE_2411.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [MISTRALAI_PIXTRAL_LARGE_2411.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [MISTRALAI_MIXTRAL_8X22B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [MISTRALAI_MISTRAL_LARGE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [MISTRALAI_MISTRAL_LARGE_2407.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [PERPLEXITY_SONAR_DEEP_RESEARCH.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'temperature' - | 'top_k' - | 'top_p' - | 'web_search_options' - > - [PERPLEXITY_SONAR_REASONING_PRO.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'temperature' - | 'top_k' - | 'top_p' - | 'web_search_options' - > - [OPENAI_GPT_4_1.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'response_format' - | 'seed' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [OPENAI_O3.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'tool_choice' - > - [AI21_JAMBA_LARGE_1_7.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'response_format' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [OPENAI_O4_MINI_DEEP_RESEARCH.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - > - [GOOGLE_GEMINI_3_PRO_IMAGE_PREVIEW.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_p' - > - [GOOGLE_GEMINI_3_PRO_PREVIEW.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [OPENAI_GPT_5_IMAGE_MINI.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - > - [OPENAI_GPT_4O_SEARCH_PREVIEW.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - 'max_completion_tokens' | 'response_format' | 'web_search_options' - > - [OPENAI_GPT_4O_AUDIO_PREVIEW.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - > - [COHERE_COMMAND_A.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [INFLECTION_INFLECTION_3_PI.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - 'max_completion_tokens' | 'stop' | 'temperature' | 'top_p' - > - [INFLECTION_INFLECTION_3_PRODUCTIVITY.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - 'max_completion_tokens' | 'stop' | 'temperature' | 'top_p' - > - [OPENAI_GPT_4O_2024_11_20.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - | 'web_search_options' - > - [OPENAI_GPT_4O_2024_08_06.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - | 'web_search_options' - > - [OPENAI_GPT_4O.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - | 'web_search_options' - > - [COHERE_COMMAND_R_PLUS_08_2024.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [AMAZON_NOVA_PREMIER_V1.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - 'max_completion_tokens' | 'stop' | 'temperature' | 'top_k' | 'top_p' - > - [SAO10K_L3_1_70B_HANAMI_X1.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [OPENAI_GPT_3_5_TURBO_16K.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - > - [ANTHRACITE_ORG_MAGNUM_V4_72B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_a' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [ANTHROPIC_CLAUDE_SONNET_4_5.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [X_AI_GROK_3.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - > - [ANTHROPIC_CLAUDE_3_7_SONNET_THINKING.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_p' - > - [X_AI_GROK_4.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'logprobs' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - > - [PERPLEXITY_SONAR_PRO.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'presence_penalty' - | 'temperature' - | 'top_k' - | 'top_p' - | 'web_search_options' - > - [X_AI_GROK_3_BETA.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - > - [ANTHROPIC_CLAUDE_SONNET_4.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [ANTHROPIC_CLAUDE_3_7_SONNET.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [PERPLEXITY_SONAR_PRO_SEARCH.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'temperature' - | 'top_k' - | 'top_p' - | 'web_search_options' - > - [META_LLAMA_LLAMA_3_1_405B_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [DEEPCOGITO_COGITO_V2_PREVIEW_LLAMA_405B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'reasoning' - | 'repetition_penalty' - | 'response_format' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [META_LLAMA_LLAMA_3_1_405B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [AION_LABS_AION_1_0.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'temperature' - | 'top_p' - > - [RAIFLE_SORCERERLM_8X22B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [OPENAI_CHATGPT_4O_LATEST.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_logprobs' - | 'top_p' - > - [OPENAI_GPT_4O_2024_05_13.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - | 'web_search_options' - > - [ANTHROPIC_CLAUDE_OPUS_4_5.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'verbosity' - > - [ALPINDALE_GOLIATH_120B.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_a' - | 'top_k' - | 'top_logprobs' - | 'top_p' - > - [OPENAI_GPT_4O_EXTENDED.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - | 'web_search_options' - > - [ANTHROPIC_CLAUDE_3_5_SONNET.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [OPENAI_GPT_5_IMAGE.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - > - [OPENAI_GPT_4_1106_PREVIEW.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - > - [OPENAI_GPT_4_TURBO.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - > - [OPENAI_GPT_4_TURBO_PREVIEW.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - > - [OPENAI_O3_DEEP_RESEARCH.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'include_reasoning' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'reasoning' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - > - [OPENAI_O1.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - 'max_completion_tokens' | 'response_format' | 'seed' | 'tool_choice' - > - [ANTHROPIC_CLAUDE_OPUS_4_1.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [ANTHROPIC_CLAUDE_OPUS_4.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - [OPENAI_GPT_5_PRO.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'tool_choice' - > - [OPENAI_O3_PRO.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'tool_choice' - > - [OPENAI_GPT_5_2_PRO.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - | 'tool_choice' - > - [OPENAI_GPT_4.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - > - [OPENAI_GPT_4_0314.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'logit_bias' - | 'logprobs' - | 'max_completion_tokens' - | 'presence_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_logprobs' - | 'top_p' - > - [OPENAI_O1_PRO.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'include_reasoning' - | 'max_completion_tokens' - | 'reasoning' - | 'response_format' - | 'seed' - > - [ALLENAI_OLMO_2_0325_32B_INSTRUCT.id]: OpenRouterCommonOptions & - OpenRouterBaseOptions - [META_LLAMA_LLAMA_3_2_90B_VISION_INSTRUCT.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'frequency_penalty' - | 'max_completion_tokens' - | 'min_p' - | 'presence_penalty' - | 'repetition_penalty' - | 'response_format' - | 'seed' - | 'stop' - | 'temperature' - | 'top_k' - | 'top_p' - > - [ANTHROPIC_CLAUDE_3_5_HAIKU_20241022.id]: OpenRouterCommonOptions & - Pick< - OpenRouterBaseOptions, - | 'max_completion_tokens' - | 'stop' - | 'temperature' - | 'tool_choice' - | 'top_k' - | 'top_p' - > - 'openrouter/auto': OpenRouterCommonOptions & OpenRouterBaseOptions + +export type OpenRouterModelOptionsByName = { + [NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B_FREE.id]: OpenRouterCommonOptions & Pick; + [META_LLAMA_LLAMA_3_3_70B_INSTRUCT_FREE.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_CODER_FREE.id]: OpenRouterCommonOptions & Pick; + [TNGTECH_DEEPSEEK_R1T2_CHIMERA_FREE.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_4B_FREE.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMMA_3N_E2B_IT_FREE.id]: OpenRouterCommonOptions & Pick; + [META_LLAMA_LLAMA_3_2_3B_INSTRUCT_FREE.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMMA_3_12B_IT_FREE.id]: OpenRouterCommonOptions & Pick; + [COGNITIVECOMPUTATIONS_DOLPHIN_MISTRAL_24B_VENICE_EDITION_FREE.id]: OpenRouterCommonOptions & Pick; + [Z_AI_GLM_4_5_AIR_FREE.id]: OpenRouterCommonOptions & Pick; + [MOONSHOTAI_KIMI_K2_FREE.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMMA_3_27B_IT_FREE.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMMA_3_4B_IT_FREE.id]: OpenRouterCommonOptions & Pick; + [NVIDIA_NEMOTRON_3_NANO_30B_A3B_FREE.id]: OpenRouterCommonOptions & Pick; + [TNGTECH_TNG_R1T_CHIMERA_FREE.id]: OpenRouterCommonOptions & Pick; + [NVIDIA_NEMOTRON_NANO_12B_V2_VL_FREE.id]: OpenRouterCommonOptions & Pick; + [ARCEE_AI_TRINITY_MINI_FREE.id]: OpenRouterCommonOptions & Pick; + [TNGTECH_DEEPSEEK_R1T_CHIMERA_FREE.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMMA_3N_E4B_IT_FREE.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMINI_2_0_FLASH_EXP_FREE.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_OSS_120B_FREE.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_OSS_20B_FREE.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MISTRAL_SMALL_3_1_24B_INSTRUCT_FREE.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_DEVSTRAL_2512_FREE.id]: OpenRouterCommonOptions & Pick; + [XIAOMI_MIMO_V2_FLASH_FREE.id]: OpenRouterCommonOptions & Pick; + [ALLENAI_MOLMO_2_8B_FREE.id]: OpenRouterCommonOptions & Pick; + [DEEPSEEK_DEEPSEEK_R1_0528_FREE.id]: OpenRouterCommonOptions & Pick; + [NVIDIA_NEMOTRON_NANO_9B_V2_FREE.id]: OpenRouterCommonOptions & Pick; + [META_LLAMA_LLAMA_3_1_405B_INSTRUCT_FREE.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN_2_5_VL_7B_INSTRUCT_FREE.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MISTRAL_7B_INSTRUCT_FREE.id]: OpenRouterCommonOptions & Pick; + [LIQUID_LFM_2_2_6B.id]: OpenRouterCommonOptions & Pick; + [LIQUID_LFM2_8B_A1B.id]: OpenRouterCommonOptions & Pick; + [IBM_GRANITE_GRANITE_4_0_H_MICRO.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMMA_3_4B_IT.id]: OpenRouterCommonOptions & Pick; + [META_LLAMA_LLAMA_3_2_3B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MISTRAL_NEMO.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMMA_3N_E4B_IT.id]: OpenRouterCommonOptions & Pick; + [META_LLAMA_LLAMA_3_1_8B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [META_LLAMA_LLAMA_GUARD_3_8B.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_OSS_20B.id]: OpenRouterCommonOptions & Pick; + [NOUSRESEARCH_DEEPHERMES_3_MISTRAL_24B_PREVIEW.id]: OpenRouterCommonOptions & Pick; + [NOUSRESEARCH_HERMES_2_PRO_LLAMA_3_8B.id]: OpenRouterCommonOptions & Pick; + [META_LLAMA_LLAMA_3_2_1B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MISTRAL_7B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [META_LLAMA_LLAMA_3_8B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMMA_2_9B_IT.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN2_5_CODER_7B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMMA_3_12B_IT.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MISTRAL_SMALL_3_1_24B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [DEEPSEEK_DEEPSEEK_R1_DISTILL_LLAMA_70B.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN_2_5_CODER_32B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MISTRAL_SMALL_24B_INSTRUCT_2501.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_8B.id]: OpenRouterCommonOptions & Pick; + [AMAZON_NOVA_MICRO_V1.id]: OpenRouterCommonOptions & Pick; + [COHERE_COMMAND_R7B_12_2024.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_OSS_120B.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_OSS_120B_EXACTO.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MINISTRAL_3B.id]: OpenRouterCommonOptions & Pick; + [SAO10K_L3_LUNARIS_8B.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN_2_5_7B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMMA_3_27B_IT.id]: OpenRouterCommonOptions & Pick; + [NVIDIA_NEMOTRON_NANO_9B_V2.id]: OpenRouterCommonOptions & Pick; + [ARCEE_AI_TRINITY_MINI.id]: OpenRouterCommonOptions & Pick; + [META_LLAMA_LLAMA_3_2_11B_VISION_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [MICROSOFT_PHI_4_MULTIMODAL_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN_TURBO.id]: OpenRouterCommonOptions & Pick; + [Z_AI_GLM_4_5_AIR.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN2_5_VL_32B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_14B.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_DEVSTRAL_2512.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_5_NANO.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_30B_A3B_THINKING_2507.id]: OpenRouterCommonOptions & Pick; + [GRYPHE_MYTHOMAX_L2_13B.id]: OpenRouterCommonOptions & Pick; + [DEEPSEEK_DEEPSEEK_R1_0528_QWEN3_8B.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_DEVSTRAL_SMALL_2505.id]: OpenRouterCommonOptions & Pick; + [MICROSOFT_PHI_4.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MISTRAL_SMALL_3_2_24B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_30B_A3B.id]: OpenRouterCommonOptions & Pick; + [AMAZON_NOVA_LITE_V1.id]: OpenRouterCommonOptions & Pick; + [NVIDIA_NEMOTRON_3_NANO_30B_A3B.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_CODER_30B_A3B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_DEVSTRAL_SMALL.id]: OpenRouterCommonOptions & Pick; + [BAIDU_ERNIE_4_5_21B_A3B_THINKING.id]: OpenRouterCommonOptions & Pick; + [BAIDU_ERNIE_4_5_21B_A3B.id]: OpenRouterCommonOptions & Pick; + [MICROSOFT_PHI_4_REASONING_PLUS.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_235B_A22B_2507.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_OSS_SAFEGUARD_20B.id]: OpenRouterCommonOptions & Pick; + [BYTEDANCE_SEED_SEED_1_6_FLASH.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMINI_2_0_FLASH_LITE_001.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_32B.id]: OpenRouterCommonOptions & Pick; + [META_LLAMA_LLAMA_4_SCOUT.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_30B_A3B_INSTRUCT_2507.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_VL_8B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [ALIBABA_TONGYI_DEEPRESEARCH_30B_A3B.id]: OpenRouterCommonOptions & Pick; + [NEVERSLEEP_LLAMA_3_1_LUMIMAID_8B.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_NEXT_80B_A3B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [Z_AI_GLM_4_32B.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_PIXTRAL_12B.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MINISTRAL_3B_2512.id]: OpenRouterCommonOptions & Pick; + [MISTRAL_MINISTRAL_8B.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MINISTRAL_8B.id]: OpenRouterCommonOptions & Pick; + [ALLENAI_OLMO_3_7B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [BYTEDANCE_UI_TARS_1_5_7B.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MISTRAL_SMALL_CREATIVE.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_VOXTRAL_SMALL_24B_2507.id]: OpenRouterCommonOptions & Pick; + [META_LLAMA_LLAMA_3_3_70B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [OPENGVLAB_INTERNVL3_78B.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_4_1_NANO.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMINI_2_0_FLASH_001.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMINI_2_5_FLASH_LITE_PREVIEW_09_2025.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMINI_2_5_FLASH_LITE.id]: OpenRouterCommonOptions & Pick; + [NVIDIA_LLAMA_3_3_NEMOTRON_SUPER_49B_V1_5.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_VL_4B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MISTRAL_7B_INSTRUCT_V0_1.id]: OpenRouterCommonOptions & Pick; + [NOUSRESEARCH_HERMES_4_70B.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_235B_A22B_THINKING_2507.id]: OpenRouterCommonOptions & Pick; + [ALLENAI_OLMO_3_7B_THINK.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN_2_5_72B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [BAIDU_ERNIE_4_5_VL_28B_A3B.id]: OpenRouterCommonOptions & Pick; + [TENCENT_HUNYUAN_A13B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [DEEPSEEK_DEEPSEEK_R1_DISTILL_QWEN_14B.id]: OpenRouterCommonOptions & Pick; + [ESSENTIALAI_RNJ_1_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MINISTRAL_8B_2512.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWQ_32B.id]: OpenRouterCommonOptions & Pick; + [ALLENAI_OLMO_3_1_32B_THINK.id]: OpenRouterCommonOptions & Pick; + [ALLENAI_OLMO_3_32B_THINK.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_VL_30B_A3B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [META_LLAMA_LLAMA_4_MAVERICK.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_4O_MINI_SEARCH_PREVIEW.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_4O_MINI_2024_07_18.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_4O_MINI.id]: OpenRouterCommonOptions & Pick; + [COHERE_COMMAND_R_08_2024.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN2_5_VL_72B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [DEEPSEEK_DEEPSEEK_CHAT_V3_1.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_NEXT_80B_A3B_THINKING.id]: OpenRouterCommonOptions & Pick; + [THEDRUMMER_ROCINANTE_12B.id]: OpenRouterCommonOptions & Pick; + [ARCEE_AI_SPOTLIGHT.id]: OpenRouterCommonOptions & Pick; + [META_LLAMA_LLAMA_GUARD_4_12B.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_235B_A22B.id]: OpenRouterCommonOptions & Pick; + [DEEPCOGITO_COGITO_V2_PREVIEW_LLAMA_109B_MOE.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_VL_8B_THINKING.id]: OpenRouterCommonOptions & Pick; + [DEEPSEEK_DEEPSEEK_CHAT_V3_0324.id]: OpenRouterCommonOptions & Pick; + [META_LLAMA_LLAMA_GUARD_2_8B.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MISTRAL_7B_INSTRUCT_V0_3.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN_2_5_VL_7B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MINISTRAL_14B_2512.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MISTRAL_7B_INSTRUCT_V0_2.id]: OpenRouterCommonOptions & Pick; + [AI21_JAMBA_MINI_1_7.id]: OpenRouterCommonOptions & Pick; + [X_AI_GROK_4_1_FAST.id]: OpenRouterCommonOptions & Pick; + [X_AI_GROK_4_FAST.id]: OpenRouterCommonOptions & Pick; + [NVIDIA_NEMOTRON_NANO_12B_V2_VL.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MISTRAL_SABA.id]: OpenRouterCommonOptions & Pick; + [ALLENAI_OLMO_3_1_32B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [MEITUAN_LONGCAT_FLASH_CHAT.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_VL_30B_A3B_THINKING.id]: OpenRouterCommonOptions & Pick; + [MINIMAX_MINIMAX_M2.id]: OpenRouterCommonOptions & Pick; + [MINIMAX_MINIMAX_01.id]: OpenRouterCommonOptions & Pick; + [PRIME_INTELLECT_INTELLECT_3.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_VL_235B_A22B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [X_AI_GROK_CODE_FAST_1.id]: OpenRouterCommonOptions & Pick; + [KWAIPILOT_KAT_CODER_PRO.id]: OpenRouterCommonOptions & Pick; + [DEEPSEEK_DEEPSEEK_V3_2_EXP.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN_VL_PLUS.id]: OpenRouterCommonOptions & Pick; + [DEEPSEEK_DEEPSEEK_V3_1_TERMINUS_EXACTO.id]: OpenRouterCommonOptions & Pick; + [DEEPSEEK_DEEPSEEK_V3_1_TERMINUS.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_CODER.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_CODER_EXACTO.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MISTRAL_TINY.id]: OpenRouterCommonOptions & Pick; + [DEEPSEEK_DEEPSEEK_V3_2.id]: OpenRouterCommonOptions & Pick; + [TNGTECH_DEEPSEEK_R1T2_CHIMERA.id]: OpenRouterCommonOptions & Pick; + [TNGTECH_TNG_R1T_CHIMERA.id]: OpenRouterCommonOptions & Pick; + [INCEPTION_MERCURY.id]: OpenRouterCommonOptions & Pick; + [INCEPTION_MERCURY_CODER.id]: OpenRouterCommonOptions & Pick; + [ANTHROPIC_CLAUDE_3_HAIKU.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_5_MINI.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_5_1_CODEX_MINI.id]: OpenRouterCommonOptions & Pick; + [BYTEDANCE_SEED_SEED_1_6.id]: OpenRouterCommonOptions & Pick; + [DEEPSEEK_DEEPSEEK_R1_DISTILL_QWEN_32B.id]: OpenRouterCommonOptions & Pick; + [DEEPSEEK_DEEPSEEK_V3_2_SPECIALE.id]: OpenRouterCommonOptions & Pick; + [NEX_AGI_DEEPSEEK_V3_1_NEX_N1.id]: OpenRouterCommonOptions & Pick; + [MINIMAX_MINIMAX_M2_1.id]: OpenRouterCommonOptions & Pick; + [BAIDU_ERNIE_4_5_300B_A47B.id]: OpenRouterCommonOptions & Pick; + [MOONSHOTAI_KIMI_DEV_72B.id]: OpenRouterCommonOptions & Pick; + [NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B.id]: OpenRouterCommonOptions & Pick; + [META_LLAMA_LLAMA_3_70B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [THEDRUMMER_CYDONIA_24B_V4_1.id]: OpenRouterCommonOptions & Pick; + [X_AI_GROK_3_MINI.id]: OpenRouterCommonOptions & Pick; + [X_AI_GROK_3_MINI_BETA.id]: OpenRouterCommonOptions & Pick; + [Z_AI_GLM_4_6V.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_CODESTRAL_2508.id]: OpenRouterCommonOptions & Pick; + [DEEPSEEK_DEEPSEEK_CHAT.id]: OpenRouterCommonOptions & Pick; + [TNGTECH_DEEPSEEK_R1T_CHIMERA.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_CODER_FLASH.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMINI_2_5_FLASH_IMAGE_PREVIEW.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMINI_2_5_FLASH_PREVIEW_09_2025.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMINI_2_5_FLASH_IMAGE.id]: OpenRouterCommonOptions & Pick; + [AMAZON_NOVA_2_LITE_V1.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMINI_2_5_FLASH.id]: OpenRouterCommonOptions & Pick; + [Z_AI_GLM_4_6.id]: OpenRouterCommonOptions & Pick; + [Z_AI_GLM_4_5.id]: OpenRouterCommonOptions & Pick; + [MOONSHOTAI_KIMI_K2_0905.id]: OpenRouterCommonOptions & Pick; + [META_LLAMA_LLAMA_3_1_70B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [THEDRUMMER_UNSLOPNEMO_12B.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN_PLUS_2025_07_28.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN_PLUS.id]: OpenRouterCommonOptions & Pick; + [Z_AI_GLM_4_7.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_4_1_MINI.id]: OpenRouterCommonOptions & Pick; + [MOONSHOTAI_KIMI_K2_THINKING.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_DEVSTRAL_MEDIUM.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MISTRAL_MEDIUM_3.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MISTRAL_MEDIUM_3_1.id]: OpenRouterCommonOptions & Pick; + [MINIMAX_MINIMAX_M1.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN_PLUS_2025_07_28_THINKING.id]: OpenRouterCommonOptions & Pick; + [BAIDU_ERNIE_4_5_VL_424B_A47B.id]: OpenRouterCommonOptions & Pick; + [Z_AI_GLM_4_6_EXACTO.id]: OpenRouterCommonOptions & Pick; + [UNDI95_REMM_SLERP_L2_13B.id]: OpenRouterCommonOptions & Pick; + [DEEPSEEK_DEEPSEEK_R1_0528.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_VL_235B_A22B_THINKING.id]: OpenRouterCommonOptions & Pick; + [MICROSOFT_WIZARDLM_2_8X22B.id]: OpenRouterCommonOptions & Pick; + [ARCEE_AI_CODER_LARGE.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MISTRAL_LARGE_2512.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_VL_32B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_3_5_TURBO.id]: OpenRouterCommonOptions & Pick; + [DEEPSEEK_DEEPSEEK_PROVER_V2.id]: OpenRouterCommonOptions & Pick; + [MOONSHOTAI_KIMI_K2.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMINI_3_FLASH_PREVIEW.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MIXTRAL_8X7B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [THEDRUMMER_SKYFALL_36B_V2.id]: OpenRouterCommonOptions & Pick; + [MINIMAX_MINIMAX_M1_80K.id]: OpenRouterCommonOptions & Pick; + [STEPFUN_AI_STEP3.id]: OpenRouterCommonOptions & Pick; + [NVIDIA_LLAMA_3_1_NEMOTRON_ULTRA_253B_V1.id]: OpenRouterCommonOptions & Pick; + [Z_AI_GLM_4_5V.id]: OpenRouterCommonOptions & Pick; + [MOONSHOTAI_KIMI_K2_0905_EXACTO.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMMA_2_27B_IT.id]: OpenRouterCommonOptions & Pick; + [SAO10K_L3_3_EURYALE_70B.id]: OpenRouterCommonOptions & Pick; + [SAO10K_L3_1_EURYALE_70B.id]: OpenRouterCommonOptions & Pick; + [AION_LABS_AION_1_0_MINI.id]: OpenRouterCommonOptions & Pick; + [DEEPSEEK_DEEPSEEK_R1.id]: OpenRouterCommonOptions & Pick; + [MANCER_WEAVER.id]: OpenRouterCommonOptions & Pick; + [ARCEE_AI_VIRTUOSO_LARGE.id]: OpenRouterCommonOptions & Pick; + [MORPH_MORPH_V3_FAST.id]: OpenRouterCommonOptions & Pick; + [ALFREDPROS_CODELLAMA_7B_INSTRUCT_SOLIDITY.id]: OpenRouterCommonOptions & Pick; + [ELEUTHERAI_LLEMMA_7B.id]: OpenRouterCommonOptions & Pick; + [AION_LABS_AION_RP_LLAMA_3_1_8B.id]: OpenRouterCommonOptions & Pick; + [AMAZON_NOVA_PRO_V1.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN_VL_MAX.id]: OpenRouterCommonOptions & Pick; + [ANTHROPIC_CLAUDE_3_5_HAIKU.id]: OpenRouterCommonOptions & Pick; + [RELACE_RELACE_APPLY_3.id]: OpenRouterCommonOptions & Pick; + [SWITCHPOINT_ROUTER.id]: OpenRouterCommonOptions & Pick; + [DEEPCOGITO_COGITO_V2_PREVIEW_LLAMA_70B.id]: OpenRouterCommonOptions & Pick; + [MORPH_MORPH_V3_LARGE.id]: OpenRouterCommonOptions & Pick; + [ARCEE_AI_MAESTRO_REASONING.id]: OpenRouterCommonOptions & Pick; + [PERPLEXITY_SONAR.id]: OpenRouterCommonOptions & Pick; + [NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B.id]: OpenRouterCommonOptions & Pick; + [NEVERSLEEP_NOROMAID_20B.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_3_5_TURBO_0613.id]: OpenRouterCommonOptions & Pick; + [RELACE_RELACE_SEARCH.id]: OpenRouterCommonOptions & Pick; + [NOUSRESEARCH_HERMES_4_405B.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_CODER_PLUS.id]: OpenRouterCommonOptions & Pick; + [ANTHROPIC_CLAUDE_HAIKU_4_5.id]: OpenRouterCommonOptions & Pick; + [OPENAI_O4_MINI_HIGH.id]: OpenRouterCommonOptions & Pick; + [OPENAI_O3_MINI_HIGH.id]: OpenRouterCommonOptions & Pick; + [OPENAI_O3_MINI.id]: OpenRouterCommonOptions & Pick; + [OPENAI_O4_MINI.id]: OpenRouterCommonOptions & Pick; + [NVIDIA_LLAMA_3_1_NEMOTRON_70B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN3_MAX.id]: OpenRouterCommonOptions & Pick; + [DEEPCOGITO_COGITO_V2_1_671B.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMINI_2_5_PRO_PREVIEW.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_5_CHAT.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMINI_2_5_PRO_PREVIEW_05_06.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_5.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_5_CODEX.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_5_1_CODEX.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_5_1_CHAT.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMINI_2_5_PRO.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_5_1_CODEX_MAX.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_5_1.id]: OpenRouterCommonOptions & Pick; + [SAO10K_L3_EURYALE_70B.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_3_5_TURBO_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [OPENAI_CODEX_MINI.id]: OpenRouterCommonOptions & Pick; + [QWEN_QWEN_MAX.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_5_2.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_5_2_CHAT.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MISTRAL_LARGE_2411.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_PIXTRAL_LARGE_2411.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MIXTRAL_8X22B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MISTRAL_LARGE.id]: OpenRouterCommonOptions & Pick; + [MISTRALAI_MISTRAL_LARGE_2407.id]: OpenRouterCommonOptions & Pick; + [PERPLEXITY_SONAR_DEEP_RESEARCH.id]: OpenRouterCommonOptions & Pick; + [PERPLEXITY_SONAR_REASONING_PRO.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_4_1.id]: OpenRouterCommonOptions & Pick; + [OPENAI_O3.id]: OpenRouterCommonOptions & Pick; + [AI21_JAMBA_LARGE_1_7.id]: OpenRouterCommonOptions & Pick; + [OPENAI_O4_MINI_DEEP_RESEARCH.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMINI_3_PRO_IMAGE_PREVIEW.id]: OpenRouterCommonOptions & Pick; + [GOOGLE_GEMINI_3_PRO_PREVIEW.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_5_IMAGE_MINI.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_4O_SEARCH_PREVIEW.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_4O_AUDIO_PREVIEW.id]: OpenRouterCommonOptions & Pick; + [COHERE_COMMAND_A.id]: OpenRouterCommonOptions & Pick; + [INFLECTION_INFLECTION_3_PI.id]: OpenRouterCommonOptions & Pick; + [INFLECTION_INFLECTION_3_PRODUCTIVITY.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_4O_2024_11_20.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_4O_2024_08_06.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_4O.id]: OpenRouterCommonOptions & Pick; + [COHERE_COMMAND_R_PLUS_08_2024.id]: OpenRouterCommonOptions & Pick; + [AMAZON_NOVA_PREMIER_V1.id]: OpenRouterCommonOptions & Pick; + [SAO10K_L3_1_70B_HANAMI_X1.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_3_5_TURBO_16K.id]: OpenRouterCommonOptions & Pick; + [ANTHRACITE_ORG_MAGNUM_V4_72B.id]: OpenRouterCommonOptions & Pick; + [ANTHROPIC_CLAUDE_SONNET_4_5.id]: OpenRouterCommonOptions & Pick; + [X_AI_GROK_3.id]: OpenRouterCommonOptions & Pick; + [ANTHROPIC_CLAUDE_3_7_SONNET_THINKING.id]: OpenRouterCommonOptions & Pick; + [X_AI_GROK_4.id]: OpenRouterCommonOptions & Pick; + [PERPLEXITY_SONAR_PRO.id]: OpenRouterCommonOptions & Pick; + [X_AI_GROK_3_BETA.id]: OpenRouterCommonOptions & Pick; + [ANTHROPIC_CLAUDE_SONNET_4.id]: OpenRouterCommonOptions & Pick; + [ANTHROPIC_CLAUDE_3_7_SONNET.id]: OpenRouterCommonOptions & Pick; + [PERPLEXITY_SONAR_PRO_SEARCH.id]: OpenRouterCommonOptions & Pick; + [META_LLAMA_LLAMA_3_1_405B_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [DEEPCOGITO_COGITO_V2_PREVIEW_LLAMA_405B.id]: OpenRouterCommonOptions & Pick; + [META_LLAMA_LLAMA_3_1_405B.id]: OpenRouterCommonOptions & Pick; + [AION_LABS_AION_1_0.id]: OpenRouterCommonOptions & Pick; + [RAIFLE_SORCERERLM_8X22B.id]: OpenRouterCommonOptions & Pick; + [OPENAI_CHATGPT_4O_LATEST.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_4O_2024_05_13.id]: OpenRouterCommonOptions & Pick; + [ANTHROPIC_CLAUDE_OPUS_4_5.id]: OpenRouterCommonOptions & Pick; + [ALPINDALE_GOLIATH_120B.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_4O_EXTENDED.id]: OpenRouterCommonOptions & Pick; + [ANTHROPIC_CLAUDE_3_5_SONNET.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_5_IMAGE.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_4_1106_PREVIEW.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_4_TURBO.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_4_TURBO_PREVIEW.id]: OpenRouterCommonOptions & Pick; + [OPENAI_O3_DEEP_RESEARCH.id]: OpenRouterCommonOptions & Pick; + [OPENAI_O1.id]: OpenRouterCommonOptions & Pick; + [ANTHROPIC_CLAUDE_OPUS_4_1.id]: OpenRouterCommonOptions & Pick; + [ANTHROPIC_CLAUDE_OPUS_4.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_5_PRO.id]: OpenRouterCommonOptions & Pick; + [OPENAI_O3_PRO.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_5_2_PRO.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_4.id]: OpenRouterCommonOptions & Pick; + [OPENAI_GPT_4_0314.id]: OpenRouterCommonOptions & Pick; + [OPENAI_O1_PRO.id]: OpenRouterCommonOptions & Pick; + [ALLENAI_OLMO_2_0325_32B_INSTRUCT.id]: OpenRouterCommonOptions & OpenRouterBaseOptions; + [META_LLAMA_LLAMA_3_2_90B_VISION_INSTRUCT.id]: OpenRouterCommonOptions & Pick; + [ANTHROPIC_CLAUDE_3_5_HAIKU_20241022.id]: OpenRouterCommonOptions & Pick; + "openrouter/auto": OpenRouterCommonOptions & OpenRouterBaseOptions; } -export type OpenRouterModelInputModalitiesByName = { - [NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B_FREE.id]: ReadonlyArray<'text'> - [META_LLAMA_LLAMA_3_3_70B_INSTRUCT_FREE.id]: ReadonlyArray<'text'> - [QWEN_QWEN3_CODER_FREE.id]: ReadonlyArray<'text'> - [TNGTECH_DEEPSEEK_R1T2_CHIMERA_FREE.id]: ReadonlyArray<'text'> - [QWEN_QWEN3_4B_FREE.id]: ReadonlyArray<'text'> - [GOOGLE_GEMMA_3N_E2B_IT_FREE.id]: ReadonlyArray<'text'> - [META_LLAMA_LLAMA_3_2_3B_INSTRUCT_FREE.id]: ReadonlyArray<'text'> - [GOOGLE_GEMMA_3_12B_IT_FREE.id]: ReadonlyArray<'text' | 'image'> - [COGNITIVECOMPUTATIONS_DOLPHIN_MISTRAL_24B_VENICE_EDITION_FREE.id]: ReadonlyArray<'text'> - [Z_AI_GLM_4_5_AIR_FREE.id]: ReadonlyArray<'text'> - [MOONSHOTAI_KIMI_K2_FREE.id]: ReadonlyArray<'text'> - [GOOGLE_GEMMA_3_27B_IT_FREE.id]: ReadonlyArray<'text' | 'image'> - [GOOGLE_GEMMA_3_4B_IT_FREE.id]: ReadonlyArray<'text' | 'image'> - [NVIDIA_NEMOTRON_3_NANO_30B_A3B_FREE.id]: ReadonlyArray<'text'> - [TNGTECH_TNG_R1T_CHIMERA_FREE.id]: ReadonlyArray<'text'> - [NVIDIA_NEMOTRON_NANO_12B_V2_VL_FREE.id]: ReadonlyArray< - 'image' | 'text' | 'video' - > - [ARCEE_AI_TRINITY_MINI_FREE.id]: ReadonlyArray<'text'> - [TNGTECH_DEEPSEEK_R1T_CHIMERA_FREE.id]: ReadonlyArray<'text'> - [GOOGLE_GEMMA_3N_E4B_IT_FREE.id]: ReadonlyArray<'text'> - [GOOGLE_GEMINI_2_0_FLASH_EXP_FREE.id]: ReadonlyArray<'text' | 'image'> - [OPENAI_GPT_OSS_120B_FREE.id]: ReadonlyArray<'text'> - [OPENAI_GPT_OSS_20B_FREE.id]: ReadonlyArray<'text'> - [MISTRALAI_MISTRAL_SMALL_3_1_24B_INSTRUCT_FREE.id]: ReadonlyArray< - 'text' | 'image' - > - [MISTRALAI_DEVSTRAL_2512_FREE.id]: ReadonlyArray<'text'> - [XIAOMI_MIMO_V2_FLASH_FREE.id]: ReadonlyArray<'text'> - [ALLENAI_MOLMO_2_8B_FREE.id]: ReadonlyArray<'text' | 'image' | 'video'> - [DEEPSEEK_DEEPSEEK_R1_0528_FREE.id]: ReadonlyArray<'text'> - [NVIDIA_NEMOTRON_NANO_9B_V2_FREE.id]: ReadonlyArray<'text'> - [META_LLAMA_LLAMA_3_1_405B_INSTRUCT_FREE.id]: ReadonlyArray<'text'> - [QWEN_QWEN_2_5_VL_7B_INSTRUCT_FREE.id]: ReadonlyArray<'text' | 'image'> - [MISTRALAI_MISTRAL_7B_INSTRUCT_FREE.id]: ReadonlyArray<'text'> - [LIQUID_LFM_2_2_6B.id]: ReadonlyArray<'text'> - [LIQUID_LFM2_8B_A1B.id]: ReadonlyArray<'text'> - [IBM_GRANITE_GRANITE_4_0_H_MICRO.id]: ReadonlyArray<'text'> - [GOOGLE_GEMMA_3_4B_IT.id]: ReadonlyArray<'text' | 'image'> - [META_LLAMA_LLAMA_3_2_3B_INSTRUCT.id]: ReadonlyArray<'text'> - [MISTRALAI_MISTRAL_NEMO.id]: ReadonlyArray<'text'> - [GOOGLE_GEMMA_3N_E4B_IT.id]: ReadonlyArray<'text'> - [META_LLAMA_LLAMA_3_1_8B_INSTRUCT.id]: ReadonlyArray<'text'> - [META_LLAMA_LLAMA_GUARD_3_8B.id]: ReadonlyArray<'text'> - [OPENAI_GPT_OSS_20B.id]: ReadonlyArray<'text'> - [NOUSRESEARCH_DEEPHERMES_3_MISTRAL_24B_PREVIEW.id]: ReadonlyArray<'text'> - [NOUSRESEARCH_HERMES_2_PRO_LLAMA_3_8B.id]: ReadonlyArray<'text'> - [META_LLAMA_LLAMA_3_2_1B_INSTRUCT.id]: ReadonlyArray<'text'> - [MISTRALAI_MISTRAL_7B_INSTRUCT.id]: ReadonlyArray<'text'> - [META_LLAMA_LLAMA_3_8B_INSTRUCT.id]: ReadonlyArray<'text'> - [GOOGLE_GEMMA_2_9B_IT.id]: ReadonlyArray<'text'> - [QWEN_QWEN2_5_CODER_7B_INSTRUCT.id]: ReadonlyArray<'text'> - [GOOGLE_GEMMA_3_12B_IT.id]: ReadonlyArray<'text' | 'image'> - [MISTRALAI_MISTRAL_SMALL_3_1_24B_INSTRUCT.id]: ReadonlyArray<'text' | 'image'> - [DEEPSEEK_DEEPSEEK_R1_DISTILL_LLAMA_70B.id]: ReadonlyArray<'text'> - [QWEN_QWEN_2_5_CODER_32B_INSTRUCT.id]: ReadonlyArray<'text'> - [MISTRALAI_MISTRAL_SMALL_24B_INSTRUCT_2501.id]: ReadonlyArray<'text'> - [QWEN_QWEN3_8B.id]: ReadonlyArray<'text'> - [AMAZON_NOVA_MICRO_V1.id]: ReadonlyArray<'text'> - [COHERE_COMMAND_R7B_12_2024.id]: ReadonlyArray<'text'> - [OPENAI_GPT_OSS_120B.id]: ReadonlyArray<'text'> - [OPENAI_GPT_OSS_120B_EXACTO.id]: ReadonlyArray<'text'> - [MISTRALAI_MINISTRAL_3B.id]: ReadonlyArray<'text'> - [SAO10K_L3_LUNARIS_8B.id]: ReadonlyArray<'text'> - [QWEN_QWEN_2_5_7B_INSTRUCT.id]: ReadonlyArray<'text'> - [GOOGLE_GEMMA_3_27B_IT.id]: ReadonlyArray<'text' | 'image'> - [NVIDIA_NEMOTRON_NANO_9B_V2.id]: ReadonlyArray<'text'> - [ARCEE_AI_TRINITY_MINI.id]: ReadonlyArray<'text'> - [META_LLAMA_LLAMA_3_2_11B_VISION_INSTRUCT.id]: ReadonlyArray<'text' | 'image'> - [MICROSOFT_PHI_4_MULTIMODAL_INSTRUCT.id]: ReadonlyArray<'text' | 'image'> - [QWEN_QWEN_TURBO.id]: ReadonlyArray<'text'> - [Z_AI_GLM_4_5_AIR.id]: ReadonlyArray<'text'> - [QWEN_QWEN2_5_VL_32B_INSTRUCT.id]: ReadonlyArray<'text' | 'image'> - [QWEN_QWEN3_14B.id]: ReadonlyArray<'text'> - [MISTRALAI_DEVSTRAL_2512.id]: ReadonlyArray<'text'> - [OPENAI_GPT_5_NANO.id]: ReadonlyArray<'text' | 'image' | 'document'> - [QWEN_QWEN3_30B_A3B_THINKING_2507.id]: ReadonlyArray<'text'> - [GRYPHE_MYTHOMAX_L2_13B.id]: ReadonlyArray<'text'> - [DEEPSEEK_DEEPSEEK_R1_0528_QWEN3_8B.id]: ReadonlyArray<'text'> - [MISTRALAI_DEVSTRAL_SMALL_2505.id]: ReadonlyArray<'text'> - [MICROSOFT_PHI_4.id]: ReadonlyArray<'text'> - [MISTRALAI_MISTRAL_SMALL_3_2_24B_INSTRUCT.id]: ReadonlyArray<'image' | 'text'> - [QWEN_QWEN3_30B_A3B.id]: ReadonlyArray<'text'> - [AMAZON_NOVA_LITE_V1.id]: ReadonlyArray<'text' | 'image'> - [NVIDIA_NEMOTRON_3_NANO_30B_A3B.id]: ReadonlyArray<'text'> - [QWEN_QWEN3_CODER_30B_A3B_INSTRUCT.id]: ReadonlyArray<'text'> - [MISTRALAI_DEVSTRAL_SMALL.id]: ReadonlyArray<'text'> - [BAIDU_ERNIE_4_5_21B_A3B_THINKING.id]: ReadonlyArray<'text'> - [BAIDU_ERNIE_4_5_21B_A3B.id]: ReadonlyArray<'text'> - [MICROSOFT_PHI_4_REASONING_PLUS.id]: ReadonlyArray<'text'> - [QWEN_QWEN3_235B_A22B_2507.id]: ReadonlyArray<'text'> - [OPENAI_GPT_OSS_SAFEGUARD_20B.id]: ReadonlyArray<'text'> - [BYTEDANCE_SEED_SEED_1_6_FLASH.id]: ReadonlyArray<'image' | 'text' | 'video'> - [GOOGLE_GEMINI_2_0_FLASH_LITE_001.id]: ReadonlyArray< - 'text' | 'image' | 'document' | 'audio' | 'video' - > - [QWEN_QWEN3_32B.id]: ReadonlyArray<'text'> - [META_LLAMA_LLAMA_4_SCOUT.id]: ReadonlyArray<'text' | 'image'> - [QWEN_QWEN3_30B_A3B_INSTRUCT_2507.id]: ReadonlyArray<'text'> - [QWEN_QWEN3_VL_8B_INSTRUCT.id]: ReadonlyArray<'image' | 'text'> - [ALIBABA_TONGYI_DEEPRESEARCH_30B_A3B.id]: ReadonlyArray<'text'> - [NEVERSLEEP_LLAMA_3_1_LUMIMAID_8B.id]: ReadonlyArray<'text'> - [QWEN_QWEN3_NEXT_80B_A3B_INSTRUCT.id]: ReadonlyArray<'text'> - [Z_AI_GLM_4_32B.id]: ReadonlyArray<'text'> - [MISTRALAI_PIXTRAL_12B.id]: ReadonlyArray<'text' | 'image'> - [MISTRALAI_MINISTRAL_3B_2512.id]: ReadonlyArray<'text' | 'image'> - [MISTRAL_MINISTRAL_8B.id]: ReadonlyArray<'text'> - [MISTRALAI_MINISTRAL_8B.id]: ReadonlyArray<'text'> - [ALLENAI_OLMO_3_7B_INSTRUCT.id]: ReadonlyArray<'text'> - [BYTEDANCE_UI_TARS_1_5_7B.id]: ReadonlyArray<'image' | 'text'> - [MISTRALAI_MISTRAL_SMALL_CREATIVE.id]: ReadonlyArray<'text'> - [MISTRALAI_VOXTRAL_SMALL_24B_2507.id]: ReadonlyArray<'text' | 'audio'> - [META_LLAMA_LLAMA_3_3_70B_INSTRUCT.id]: ReadonlyArray<'text'> - [OPENGVLAB_INTERNVL3_78B.id]: ReadonlyArray<'image' | 'text'> - [OPENAI_GPT_4_1_NANO.id]: ReadonlyArray<'image' | 'text' | 'document'> - [GOOGLE_GEMINI_2_0_FLASH_001.id]: ReadonlyArray< - 'text' | 'image' | 'document' | 'audio' | 'video' - > - [GOOGLE_GEMINI_2_5_FLASH_LITE_PREVIEW_09_2025.id]: ReadonlyArray< - 'text' | 'image' | 'document' | 'audio' | 'video' - > - [GOOGLE_GEMINI_2_5_FLASH_LITE.id]: ReadonlyArray< - 'text' | 'image' | 'document' | 'audio' | 'video' - > - [NVIDIA_LLAMA_3_3_NEMOTRON_SUPER_49B_V1_5.id]: ReadonlyArray<'text'> - [QWEN_QWEN3_VL_4B_INSTRUCT.id]: ReadonlyArray<'text' | 'image'> - [MISTRALAI_MISTRAL_7B_INSTRUCT_V0_1.id]: ReadonlyArray<'text'> - [NOUSRESEARCH_HERMES_4_70B.id]: ReadonlyArray<'text'> - [QWEN_QWEN3_235B_A22B_THINKING_2507.id]: ReadonlyArray<'text'> - [ALLENAI_OLMO_3_7B_THINK.id]: ReadonlyArray<'text'> - [QWEN_QWEN_2_5_72B_INSTRUCT.id]: ReadonlyArray<'text'> - [BAIDU_ERNIE_4_5_VL_28B_A3B.id]: ReadonlyArray<'text' | 'image'> - [TENCENT_HUNYUAN_A13B_INSTRUCT.id]: ReadonlyArray<'text'> - [DEEPSEEK_DEEPSEEK_R1_DISTILL_QWEN_14B.id]: ReadonlyArray<'text'> - [ESSENTIALAI_RNJ_1_INSTRUCT.id]: ReadonlyArray<'text'> - [MISTRALAI_MINISTRAL_8B_2512.id]: ReadonlyArray<'text' | 'image'> - [QWEN_QWQ_32B.id]: ReadonlyArray<'text'> - [ALLENAI_OLMO_3_1_32B_THINK.id]: ReadonlyArray<'text'> - [ALLENAI_OLMO_3_32B_THINK.id]: ReadonlyArray<'text'> - [QWEN_QWEN3_VL_30B_A3B_INSTRUCT.id]: ReadonlyArray<'text' | 'image'> - [META_LLAMA_LLAMA_4_MAVERICK.id]: ReadonlyArray<'text' | 'image'> - [OPENAI_GPT_4O_MINI_SEARCH_PREVIEW.id]: ReadonlyArray<'text'> - [OPENAI_GPT_4O_MINI_2024_07_18.id]: ReadonlyArray< - 'text' | 'image' | 'document' - > - [OPENAI_GPT_4O_MINI.id]: ReadonlyArray<'text' | 'image' | 'document'> - [COHERE_COMMAND_R_08_2024.id]: ReadonlyArray<'text'> - [QWEN_QWEN2_5_VL_72B_INSTRUCT.id]: ReadonlyArray<'text' | 'image'> - [DEEPSEEK_DEEPSEEK_CHAT_V3_1.id]: ReadonlyArray<'text'> - [QWEN_QWEN3_NEXT_80B_A3B_THINKING.id]: ReadonlyArray<'text'> - [THEDRUMMER_ROCINANTE_12B.id]: ReadonlyArray<'text'> - [ARCEE_AI_SPOTLIGHT.id]: ReadonlyArray<'image' | 'text'> - [META_LLAMA_LLAMA_GUARD_4_12B.id]: ReadonlyArray<'image' | 'text'> - [QWEN_QWEN3_235B_A22B.id]: ReadonlyArray<'text'> - [DEEPCOGITO_COGITO_V2_PREVIEW_LLAMA_109B_MOE.id]: ReadonlyArray< - 'image' | 'text' - > - [QWEN_QWEN3_VL_8B_THINKING.id]: ReadonlyArray<'image' | 'text'> - [DEEPSEEK_DEEPSEEK_CHAT_V3_0324.id]: ReadonlyArray<'text'> - [META_LLAMA_LLAMA_GUARD_2_8B.id]: ReadonlyArray<'text'> - [MISTRALAI_MISTRAL_7B_INSTRUCT_V0_3.id]: ReadonlyArray<'text'> - [QWEN_QWEN_2_5_VL_7B_INSTRUCT.id]: ReadonlyArray<'text' | 'image'> - [MISTRALAI_MINISTRAL_14B_2512.id]: ReadonlyArray<'text' | 'image'> - [MISTRALAI_MISTRAL_7B_INSTRUCT_V0_2.id]: ReadonlyArray<'text'> - [AI21_JAMBA_MINI_1_7.id]: ReadonlyArray<'text'> - [X_AI_GROK_4_1_FAST.id]: ReadonlyArray<'text' | 'image'> - [X_AI_GROK_4_FAST.id]: ReadonlyArray<'text' | 'image'> - [NVIDIA_NEMOTRON_NANO_12B_V2_VL.id]: ReadonlyArray<'image' | 'text' | 'video'> - [MISTRALAI_MISTRAL_SABA.id]: ReadonlyArray<'text'> - [ALLENAI_OLMO_3_1_32B_INSTRUCT.id]: ReadonlyArray<'text'> - [MEITUAN_LONGCAT_FLASH_CHAT.id]: ReadonlyArray<'text'> - [QWEN_QWEN3_VL_30B_A3B_THINKING.id]: ReadonlyArray<'text' | 'image'> - [MINIMAX_MINIMAX_M2.id]: ReadonlyArray<'text'> - [MINIMAX_MINIMAX_01.id]: ReadonlyArray<'text' | 'image'> - [PRIME_INTELLECT_INTELLECT_3.id]: ReadonlyArray<'text'> - [QWEN_QWEN3_VL_235B_A22B_INSTRUCT.id]: ReadonlyArray<'text' | 'image'> - [X_AI_GROK_CODE_FAST_1.id]: ReadonlyArray<'text'> - [KWAIPILOT_KAT_CODER_PRO.id]: ReadonlyArray<'text'> - [DEEPSEEK_DEEPSEEK_V3_2_EXP.id]: ReadonlyArray<'text'> - [QWEN_QWEN_VL_PLUS.id]: ReadonlyArray<'text' | 'image'> - [DEEPSEEK_DEEPSEEK_V3_1_TERMINUS_EXACTO.id]: ReadonlyArray<'text'> - [DEEPSEEK_DEEPSEEK_V3_1_TERMINUS.id]: ReadonlyArray<'text'> - [QWEN_QWEN3_CODER.id]: ReadonlyArray<'text'> - [QWEN_QWEN3_CODER_EXACTO.id]: ReadonlyArray<'text'> - [MISTRALAI_MISTRAL_TINY.id]: ReadonlyArray<'text'> - [DEEPSEEK_DEEPSEEK_V3_2.id]: ReadonlyArray<'text'> - [TNGTECH_DEEPSEEK_R1T2_CHIMERA.id]: ReadonlyArray<'text'> - [TNGTECH_TNG_R1T_CHIMERA.id]: ReadonlyArray<'text'> - [INCEPTION_MERCURY.id]: ReadonlyArray<'text'> - [INCEPTION_MERCURY_CODER.id]: ReadonlyArray<'text'> - [ANTHROPIC_CLAUDE_3_HAIKU.id]: ReadonlyArray<'text' | 'image'> - [OPENAI_GPT_5_MINI.id]: ReadonlyArray<'text' | 'image' | 'document'> - [OPENAI_GPT_5_1_CODEX_MINI.id]: ReadonlyArray<'image' | 'text'> - [BYTEDANCE_SEED_SEED_1_6.id]: ReadonlyArray<'image' | 'text' | 'video'> - [DEEPSEEK_DEEPSEEK_R1_DISTILL_QWEN_32B.id]: ReadonlyArray<'text'> - [DEEPSEEK_DEEPSEEK_V3_2_SPECIALE.id]: ReadonlyArray<'text'> - [NEX_AGI_DEEPSEEK_V3_1_NEX_N1.id]: ReadonlyArray<'text'> - [MINIMAX_MINIMAX_M2_1.id]: ReadonlyArray<'text'> - [BAIDU_ERNIE_4_5_300B_A47B.id]: ReadonlyArray<'text'> - [MOONSHOTAI_KIMI_DEV_72B.id]: ReadonlyArray<'text'> - [NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B.id]: ReadonlyArray<'text'> - [META_LLAMA_LLAMA_3_70B_INSTRUCT.id]: ReadonlyArray<'text'> - [THEDRUMMER_CYDONIA_24B_V4_1.id]: ReadonlyArray<'text'> - [X_AI_GROK_3_MINI.id]: ReadonlyArray<'text'> - [X_AI_GROK_3_MINI_BETA.id]: ReadonlyArray<'text'> - [Z_AI_GLM_4_6V.id]: ReadonlyArray<'image' | 'text' | 'video'> - [MISTRALAI_CODESTRAL_2508.id]: ReadonlyArray<'text'> - [DEEPSEEK_DEEPSEEK_CHAT.id]: ReadonlyArray<'text'> - [TNGTECH_DEEPSEEK_R1T_CHIMERA.id]: ReadonlyArray<'text'> - [QWEN_QWEN3_CODER_FLASH.id]: ReadonlyArray<'text'> - [GOOGLE_GEMINI_2_5_FLASH_IMAGE_PREVIEW.id]: ReadonlyArray<'image' | 'text'> - [GOOGLE_GEMINI_2_5_FLASH_PREVIEW_09_2025.id]: ReadonlyArray< - 'image' | 'document' | 'text' | 'audio' | 'video' - > - [GOOGLE_GEMINI_2_5_FLASH_IMAGE.id]: ReadonlyArray<'image' | 'text'> - [AMAZON_NOVA_2_LITE_V1.id]: ReadonlyArray< - 'text' | 'image' | 'video' | 'document' - > - [GOOGLE_GEMINI_2_5_FLASH.id]: ReadonlyArray< - 'document' | 'image' | 'text' | 'audio' | 'video' - > - [Z_AI_GLM_4_6.id]: ReadonlyArray<'text'> - [Z_AI_GLM_4_5.id]: ReadonlyArray<'text'> - [MOONSHOTAI_KIMI_K2_0905.id]: ReadonlyArray<'text'> - [META_LLAMA_LLAMA_3_1_70B_INSTRUCT.id]: ReadonlyArray<'text'> - [THEDRUMMER_UNSLOPNEMO_12B.id]: ReadonlyArray<'text'> - [QWEN_QWEN_PLUS_2025_07_28.id]: ReadonlyArray<'text'> - [QWEN_QWEN_PLUS.id]: ReadonlyArray<'text'> - [Z_AI_GLM_4_7.id]: ReadonlyArray<'text'> - [OPENAI_GPT_4_1_MINI.id]: ReadonlyArray<'image' | 'text' | 'document'> - [MOONSHOTAI_KIMI_K2_THINKING.id]: ReadonlyArray<'text'> - [MISTRALAI_DEVSTRAL_MEDIUM.id]: ReadonlyArray<'text'> - [MISTRALAI_MISTRAL_MEDIUM_3.id]: ReadonlyArray<'text' | 'image'> - [MISTRALAI_MISTRAL_MEDIUM_3_1.id]: ReadonlyArray<'text' | 'image'> - [MINIMAX_MINIMAX_M1.id]: ReadonlyArray<'text'> - [QWEN_QWEN_PLUS_2025_07_28_THINKING.id]: ReadonlyArray<'text'> - [BAIDU_ERNIE_4_5_VL_424B_A47B.id]: ReadonlyArray<'image' | 'text'> - [Z_AI_GLM_4_6_EXACTO.id]: ReadonlyArray<'text'> - [UNDI95_REMM_SLERP_L2_13B.id]: ReadonlyArray<'text'> - [DEEPSEEK_DEEPSEEK_R1_0528.id]: ReadonlyArray<'text'> - [QWEN_QWEN3_VL_235B_A22B_THINKING.id]: ReadonlyArray<'text' | 'image'> - [MICROSOFT_WIZARDLM_2_8X22B.id]: ReadonlyArray<'text'> - [ARCEE_AI_CODER_LARGE.id]: ReadonlyArray<'text'> - [MISTRALAI_MISTRAL_LARGE_2512.id]: ReadonlyArray<'text' | 'image'> - [QWEN_QWEN3_VL_32B_INSTRUCT.id]: ReadonlyArray<'text' | 'image'> - [OPENAI_GPT_3_5_TURBO.id]: ReadonlyArray<'text'> - [DEEPSEEK_DEEPSEEK_PROVER_V2.id]: ReadonlyArray<'text'> - [MOONSHOTAI_KIMI_K2.id]: ReadonlyArray<'text'> - [GOOGLE_GEMINI_3_FLASH_PREVIEW.id]: ReadonlyArray< - 'text' | 'image' | 'document' | 'audio' | 'video' - > - [MISTRALAI_MIXTRAL_8X7B_INSTRUCT.id]: ReadonlyArray<'text'> - [THEDRUMMER_SKYFALL_36B_V2.id]: ReadonlyArray<'text'> - [MINIMAX_MINIMAX_M1_80K.id]: ReadonlyArray<'text'> - [STEPFUN_AI_STEP3.id]: ReadonlyArray<'image' | 'text'> - [NVIDIA_LLAMA_3_1_NEMOTRON_ULTRA_253B_V1.id]: ReadonlyArray<'text'> - [Z_AI_GLM_4_5V.id]: ReadonlyArray<'text' | 'image'> - [MOONSHOTAI_KIMI_K2_0905_EXACTO.id]: ReadonlyArray<'text'> - [GOOGLE_GEMMA_2_27B_IT.id]: ReadonlyArray<'text'> - [SAO10K_L3_3_EURYALE_70B.id]: ReadonlyArray<'text'> - [SAO10K_L3_1_EURYALE_70B.id]: ReadonlyArray<'text'> - [AION_LABS_AION_1_0_MINI.id]: ReadonlyArray<'text'> - [DEEPSEEK_DEEPSEEK_R1.id]: ReadonlyArray<'text'> - [MANCER_WEAVER.id]: ReadonlyArray<'text'> - [ARCEE_AI_VIRTUOSO_LARGE.id]: ReadonlyArray<'text'> - [MORPH_MORPH_V3_FAST.id]: ReadonlyArray<'text'> - [ALFREDPROS_CODELLAMA_7B_INSTRUCT_SOLIDITY.id]: ReadonlyArray<'text'> - [ELEUTHERAI_LLEMMA_7B.id]: ReadonlyArray<'text'> - [AION_LABS_AION_RP_LLAMA_3_1_8B.id]: ReadonlyArray<'text'> - [AMAZON_NOVA_PRO_V1.id]: ReadonlyArray<'text' | 'image'> - [QWEN_QWEN_VL_MAX.id]: ReadonlyArray<'text' | 'image'> - [ANTHROPIC_CLAUDE_3_5_HAIKU.id]: ReadonlyArray<'text' | 'image'> - [RELACE_RELACE_APPLY_3.id]: ReadonlyArray<'text'> - [SWITCHPOINT_ROUTER.id]: ReadonlyArray<'text'> - [DEEPCOGITO_COGITO_V2_PREVIEW_LLAMA_70B.id]: ReadonlyArray<'text'> - [MORPH_MORPH_V3_LARGE.id]: ReadonlyArray<'text'> - [ARCEE_AI_MAESTRO_REASONING.id]: ReadonlyArray<'text'> - [PERPLEXITY_SONAR.id]: ReadonlyArray<'text' | 'image'> - [NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B.id]: ReadonlyArray<'text'> - [NEVERSLEEP_NOROMAID_20B.id]: ReadonlyArray<'text'> - [OPENAI_GPT_3_5_TURBO_0613.id]: ReadonlyArray<'text'> - [RELACE_RELACE_SEARCH.id]: ReadonlyArray<'text'> - [NOUSRESEARCH_HERMES_4_405B.id]: ReadonlyArray<'text'> - [QWEN_QWEN3_CODER_PLUS.id]: ReadonlyArray<'text'> - [ANTHROPIC_CLAUDE_HAIKU_4_5.id]: ReadonlyArray<'image' | 'text'> - [OPENAI_O4_MINI_HIGH.id]: ReadonlyArray<'image' | 'text' | 'document'> - [OPENAI_O3_MINI_HIGH.id]: ReadonlyArray<'text' | 'document'> - [OPENAI_O3_MINI.id]: ReadonlyArray<'text' | 'document'> - [OPENAI_O4_MINI.id]: ReadonlyArray<'image' | 'text' | 'document'> - [NVIDIA_LLAMA_3_1_NEMOTRON_70B_INSTRUCT.id]: ReadonlyArray<'text'> - [QWEN_QWEN3_MAX.id]: ReadonlyArray<'text'> - [DEEPCOGITO_COGITO_V2_1_671B.id]: ReadonlyArray<'text'> - [GOOGLE_GEMINI_2_5_PRO_PREVIEW.id]: ReadonlyArray< - 'document' | 'image' | 'text' | 'audio' - > - [OPENAI_GPT_5_CHAT.id]: ReadonlyArray<'document' | 'image' | 'text'> - [GOOGLE_GEMINI_2_5_PRO_PREVIEW_05_06.id]: ReadonlyArray< - 'text' | 'image' | 'document' | 'audio' | 'video' - > - [OPENAI_GPT_5.id]: ReadonlyArray<'text' | 'image' | 'document'> - [OPENAI_GPT_5_CODEX.id]: ReadonlyArray<'text' | 'image'> - [OPENAI_GPT_5_1_CODEX.id]: ReadonlyArray<'text' | 'image'> - [OPENAI_GPT_5_1_CHAT.id]: ReadonlyArray<'document' | 'image' | 'text'> - [GOOGLE_GEMINI_2_5_PRO.id]: ReadonlyArray< - 'text' | 'image' | 'document' | 'audio' | 'video' - > - [OPENAI_GPT_5_1_CODEX_MAX.id]: ReadonlyArray<'text' | 'image'> - [OPENAI_GPT_5_1.id]: ReadonlyArray<'image' | 'text' | 'document'> - [SAO10K_L3_EURYALE_70B.id]: ReadonlyArray<'text'> - [OPENAI_GPT_3_5_TURBO_INSTRUCT.id]: ReadonlyArray<'text'> - [OPENAI_CODEX_MINI.id]: ReadonlyArray<'image' | 'text'> - [QWEN_QWEN_MAX.id]: ReadonlyArray<'text'> - [OPENAI_GPT_5_2.id]: ReadonlyArray<'document' | 'image' | 'text'> - [OPENAI_GPT_5_2_CHAT.id]: ReadonlyArray<'document' | 'image' | 'text'> - [MISTRALAI_MISTRAL_LARGE_2411.id]: ReadonlyArray<'text'> - [MISTRALAI_PIXTRAL_LARGE_2411.id]: ReadonlyArray<'text' | 'image'> - [MISTRALAI_MIXTRAL_8X22B_INSTRUCT.id]: ReadonlyArray<'text'> - [MISTRALAI_MISTRAL_LARGE.id]: ReadonlyArray<'text'> - [MISTRALAI_MISTRAL_LARGE_2407.id]: ReadonlyArray<'text'> - [PERPLEXITY_SONAR_DEEP_RESEARCH.id]: ReadonlyArray<'text'> - [PERPLEXITY_SONAR_REASONING_PRO.id]: ReadonlyArray<'text' | 'image'> - [OPENAI_GPT_4_1.id]: ReadonlyArray<'image' | 'text' | 'document'> - [OPENAI_O3.id]: ReadonlyArray<'image' | 'text' | 'document'> - [AI21_JAMBA_LARGE_1_7.id]: ReadonlyArray<'text'> - [OPENAI_O4_MINI_DEEP_RESEARCH.id]: ReadonlyArray< - 'document' | 'image' | 'text' - > - [GOOGLE_GEMINI_3_PRO_IMAGE_PREVIEW.id]: ReadonlyArray<'image' | 'text'> - [GOOGLE_GEMINI_3_PRO_PREVIEW.id]: ReadonlyArray< - 'text' | 'image' | 'document' | 'audio' | 'video' - > - [OPENAI_GPT_5_IMAGE_MINI.id]: ReadonlyArray<'document' | 'image' | 'text'> - [OPENAI_GPT_4O_SEARCH_PREVIEW.id]: ReadonlyArray<'text'> - [OPENAI_GPT_4O_AUDIO_PREVIEW.id]: ReadonlyArray<'audio' | 'text'> - [COHERE_COMMAND_A.id]: ReadonlyArray<'text'> - [INFLECTION_INFLECTION_3_PI.id]: ReadonlyArray<'text'> - [INFLECTION_INFLECTION_3_PRODUCTIVITY.id]: ReadonlyArray<'text'> - [OPENAI_GPT_4O_2024_11_20.id]: ReadonlyArray<'text' | 'image' | 'document'> - [OPENAI_GPT_4O_2024_08_06.id]: ReadonlyArray<'text' | 'image' | 'document'> - [OPENAI_GPT_4O.id]: ReadonlyArray<'text' | 'image' | 'document'> - [COHERE_COMMAND_R_PLUS_08_2024.id]: ReadonlyArray<'text'> - [AMAZON_NOVA_PREMIER_V1.id]: ReadonlyArray<'text' | 'image'> - [SAO10K_L3_1_70B_HANAMI_X1.id]: ReadonlyArray<'text'> - [OPENAI_GPT_3_5_TURBO_16K.id]: ReadonlyArray<'text'> - [ANTHRACITE_ORG_MAGNUM_V4_72B.id]: ReadonlyArray<'text'> - [ANTHROPIC_CLAUDE_SONNET_4_5.id]: ReadonlyArray<'text' | 'image' | 'document'> - [X_AI_GROK_3.id]: ReadonlyArray<'text'> - [ANTHROPIC_CLAUDE_3_7_SONNET_THINKING.id]: ReadonlyArray< - 'text' | 'image' | 'document' - > - [X_AI_GROK_4.id]: ReadonlyArray<'image' | 'text'> - [PERPLEXITY_SONAR_PRO.id]: ReadonlyArray<'text' | 'image'> - [X_AI_GROK_3_BETA.id]: ReadonlyArray<'text'> - [ANTHROPIC_CLAUDE_SONNET_4.id]: ReadonlyArray<'image' | 'text' | 'document'> - [ANTHROPIC_CLAUDE_3_7_SONNET.id]: ReadonlyArray<'text' | 'image' | 'document'> - [PERPLEXITY_SONAR_PRO_SEARCH.id]: ReadonlyArray<'text' | 'image'> - [META_LLAMA_LLAMA_3_1_405B_INSTRUCT.id]: ReadonlyArray<'text'> - [DEEPCOGITO_COGITO_V2_PREVIEW_LLAMA_405B.id]: ReadonlyArray<'text'> - [META_LLAMA_LLAMA_3_1_405B.id]: ReadonlyArray<'text'> - [AION_LABS_AION_1_0.id]: ReadonlyArray<'text'> - [RAIFLE_SORCERERLM_8X22B.id]: ReadonlyArray<'text'> - [OPENAI_CHATGPT_4O_LATEST.id]: ReadonlyArray<'text' | 'image'> - [OPENAI_GPT_4O_2024_05_13.id]: ReadonlyArray<'text' | 'image' | 'document'> - [ANTHROPIC_CLAUDE_OPUS_4_5.id]: ReadonlyArray<'document' | 'image' | 'text'> - [ALPINDALE_GOLIATH_120B.id]: ReadonlyArray<'text'> - [OPENAI_GPT_4O_EXTENDED.id]: ReadonlyArray<'text' | 'image' | 'document'> - [ANTHROPIC_CLAUDE_3_5_SONNET.id]: ReadonlyArray<'text' | 'image' | 'document'> - [OPENAI_GPT_5_IMAGE.id]: ReadonlyArray<'image' | 'text' | 'document'> - [OPENAI_GPT_4_1106_PREVIEW.id]: ReadonlyArray<'text'> - [OPENAI_GPT_4_TURBO.id]: ReadonlyArray<'text' | 'image'> - [OPENAI_GPT_4_TURBO_PREVIEW.id]: ReadonlyArray<'text'> - [OPENAI_O3_DEEP_RESEARCH.id]: ReadonlyArray<'image' | 'text' | 'document'> - [OPENAI_O1.id]: ReadonlyArray<'text' | 'image' | 'document'> - [ANTHROPIC_CLAUDE_OPUS_4_1.id]: ReadonlyArray<'image' | 'text' | 'document'> - [ANTHROPIC_CLAUDE_OPUS_4.id]: ReadonlyArray<'image' | 'text' | 'document'> - [OPENAI_GPT_5_PRO.id]: ReadonlyArray<'image' | 'text' | 'document'> - [OPENAI_O3_PRO.id]: ReadonlyArray<'text' | 'document' | 'image'> - [OPENAI_GPT_5_2_PRO.id]: ReadonlyArray<'image' | 'text' | 'document'> - [OPENAI_GPT_4.id]: ReadonlyArray<'text'> - [OPENAI_GPT_4_0314.id]: ReadonlyArray<'text'> - [OPENAI_O1_PRO.id]: ReadonlyArray<'text' | 'image' | 'document'> - [ALLENAI_OLMO_2_0325_32B_INSTRUCT.id]: ReadonlyArray<'text'> - [META_LLAMA_LLAMA_3_2_90B_VISION_INSTRUCT.id]: ReadonlyArray<'text' | 'image'> - [ANTHROPIC_CLAUDE_3_5_HAIKU_20241022.id]: ReadonlyArray< - 'text' | 'image' | 'document' - > - 'openrouter/auto': ReadonlyArray< - 'text' | 'image' | 'audio' | 'video' | 'document' - > + +export type OpenRouterModelInputModalitiesByName = { + [NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B_FREE.id]: ReadonlyArray<'text'>; + [META_LLAMA_LLAMA_3_3_70B_INSTRUCT_FREE.id]: ReadonlyArray<'text'>; + [QWEN_QWEN3_CODER_FREE.id]: ReadonlyArray<'text'>; + [TNGTECH_DEEPSEEK_R1T2_CHIMERA_FREE.id]: ReadonlyArray<'text'>; + [QWEN_QWEN3_4B_FREE.id]: ReadonlyArray<'text'>; + [GOOGLE_GEMMA_3N_E2B_IT_FREE.id]: ReadonlyArray<'text'>; + [META_LLAMA_LLAMA_3_2_3B_INSTRUCT_FREE.id]: ReadonlyArray<'text'>; + [GOOGLE_GEMMA_3_12B_IT_FREE.id]: ReadonlyArray<'text' | 'image'>; + [COGNITIVECOMPUTATIONS_DOLPHIN_MISTRAL_24B_VENICE_EDITION_FREE.id]: ReadonlyArray<'text'>; + [Z_AI_GLM_4_5_AIR_FREE.id]: ReadonlyArray<'text'>; + [MOONSHOTAI_KIMI_K2_FREE.id]: ReadonlyArray<'text'>; + [GOOGLE_GEMMA_3_27B_IT_FREE.id]: ReadonlyArray<'text' | 'image'>; + [GOOGLE_GEMMA_3_4B_IT_FREE.id]: ReadonlyArray<'text' | 'image'>; + [NVIDIA_NEMOTRON_3_NANO_30B_A3B_FREE.id]: ReadonlyArray<'text'>; + [TNGTECH_TNG_R1T_CHIMERA_FREE.id]: ReadonlyArray<'text'>; + [NVIDIA_NEMOTRON_NANO_12B_V2_VL_FREE.id]: ReadonlyArray<'image' | 'text' | 'video'>; + [ARCEE_AI_TRINITY_MINI_FREE.id]: ReadonlyArray<'text'>; + [TNGTECH_DEEPSEEK_R1T_CHIMERA_FREE.id]: ReadonlyArray<'text'>; + [GOOGLE_GEMMA_3N_E4B_IT_FREE.id]: ReadonlyArray<'text'>; + [GOOGLE_GEMINI_2_0_FLASH_EXP_FREE.id]: ReadonlyArray<'text' | 'image'>; + [OPENAI_GPT_OSS_120B_FREE.id]: ReadonlyArray<'text'>; + [OPENAI_GPT_OSS_20B_FREE.id]: ReadonlyArray<'text'>; + [MISTRALAI_MISTRAL_SMALL_3_1_24B_INSTRUCT_FREE.id]: ReadonlyArray<'text' | 'image'>; + [MISTRALAI_DEVSTRAL_2512_FREE.id]: ReadonlyArray<'text'>; + [XIAOMI_MIMO_V2_FLASH_FREE.id]: ReadonlyArray<'text'>; + [ALLENAI_MOLMO_2_8B_FREE.id]: ReadonlyArray<'text' | 'image' | 'video'>; + [DEEPSEEK_DEEPSEEK_R1_0528_FREE.id]: ReadonlyArray<'text'>; + [NVIDIA_NEMOTRON_NANO_9B_V2_FREE.id]: ReadonlyArray<'text'>; + [META_LLAMA_LLAMA_3_1_405B_INSTRUCT_FREE.id]: ReadonlyArray<'text'>; + [QWEN_QWEN_2_5_VL_7B_INSTRUCT_FREE.id]: ReadonlyArray<'text' | 'image'>; + [MISTRALAI_MISTRAL_7B_INSTRUCT_FREE.id]: ReadonlyArray<'text'>; + [LIQUID_LFM_2_2_6B.id]: ReadonlyArray<'text'>; + [LIQUID_LFM2_8B_A1B.id]: ReadonlyArray<'text'>; + [IBM_GRANITE_GRANITE_4_0_H_MICRO.id]: ReadonlyArray<'text'>; + [GOOGLE_GEMMA_3_4B_IT.id]: ReadonlyArray<'text' | 'image'>; + [META_LLAMA_LLAMA_3_2_3B_INSTRUCT.id]: ReadonlyArray<'text'>; + [MISTRALAI_MISTRAL_NEMO.id]: ReadonlyArray<'text'>; + [GOOGLE_GEMMA_3N_E4B_IT.id]: ReadonlyArray<'text'>; + [META_LLAMA_LLAMA_3_1_8B_INSTRUCT.id]: ReadonlyArray<'text'>; + [META_LLAMA_LLAMA_GUARD_3_8B.id]: ReadonlyArray<'text'>; + [OPENAI_GPT_OSS_20B.id]: ReadonlyArray<'text'>; + [NOUSRESEARCH_DEEPHERMES_3_MISTRAL_24B_PREVIEW.id]: ReadonlyArray<'text'>; + [NOUSRESEARCH_HERMES_2_PRO_LLAMA_3_8B.id]: ReadonlyArray<'text'>; + [META_LLAMA_LLAMA_3_2_1B_INSTRUCT.id]: ReadonlyArray<'text'>; + [MISTRALAI_MISTRAL_7B_INSTRUCT.id]: ReadonlyArray<'text'>; + [META_LLAMA_LLAMA_3_8B_INSTRUCT.id]: ReadonlyArray<'text'>; + [GOOGLE_GEMMA_2_9B_IT.id]: ReadonlyArray<'text'>; + [QWEN_QWEN2_5_CODER_7B_INSTRUCT.id]: ReadonlyArray<'text'>; + [GOOGLE_GEMMA_3_12B_IT.id]: ReadonlyArray<'text' | 'image'>; + [MISTRALAI_MISTRAL_SMALL_3_1_24B_INSTRUCT.id]: ReadonlyArray<'text' | 'image'>; + [DEEPSEEK_DEEPSEEK_R1_DISTILL_LLAMA_70B.id]: ReadonlyArray<'text'>; + [QWEN_QWEN_2_5_CODER_32B_INSTRUCT.id]: ReadonlyArray<'text'>; + [MISTRALAI_MISTRAL_SMALL_24B_INSTRUCT_2501.id]: ReadonlyArray<'text'>; + [QWEN_QWEN3_8B.id]: ReadonlyArray<'text'>; + [AMAZON_NOVA_MICRO_V1.id]: ReadonlyArray<'text'>; + [COHERE_COMMAND_R7B_12_2024.id]: ReadonlyArray<'text'>; + [OPENAI_GPT_OSS_120B.id]: ReadonlyArray<'text'>; + [OPENAI_GPT_OSS_120B_EXACTO.id]: ReadonlyArray<'text'>; + [MISTRALAI_MINISTRAL_3B.id]: ReadonlyArray<'text'>; + [SAO10K_L3_LUNARIS_8B.id]: ReadonlyArray<'text'>; + [QWEN_QWEN_2_5_7B_INSTRUCT.id]: ReadonlyArray<'text'>; + [GOOGLE_GEMMA_3_27B_IT.id]: ReadonlyArray<'text' | 'image'>; + [NVIDIA_NEMOTRON_NANO_9B_V2.id]: ReadonlyArray<'text'>; + [ARCEE_AI_TRINITY_MINI.id]: ReadonlyArray<'text'>; + [META_LLAMA_LLAMA_3_2_11B_VISION_INSTRUCT.id]: ReadonlyArray<'text' | 'image'>; + [MICROSOFT_PHI_4_MULTIMODAL_INSTRUCT.id]: ReadonlyArray<'text' | 'image'>; + [QWEN_QWEN_TURBO.id]: ReadonlyArray<'text'>; + [Z_AI_GLM_4_5_AIR.id]: ReadonlyArray<'text'>; + [QWEN_QWEN2_5_VL_32B_INSTRUCT.id]: ReadonlyArray<'text' | 'image'>; + [QWEN_QWEN3_14B.id]: ReadonlyArray<'text'>; + [MISTRALAI_DEVSTRAL_2512.id]: ReadonlyArray<'text'>; + [OPENAI_GPT_5_NANO.id]: ReadonlyArray<'text' | 'image' | 'document'>; + [QWEN_QWEN3_30B_A3B_THINKING_2507.id]: ReadonlyArray<'text'>; + [GRYPHE_MYTHOMAX_L2_13B.id]: ReadonlyArray<'text'>; + [DEEPSEEK_DEEPSEEK_R1_0528_QWEN3_8B.id]: ReadonlyArray<'text'>; + [MISTRALAI_DEVSTRAL_SMALL_2505.id]: ReadonlyArray<'text'>; + [MICROSOFT_PHI_4.id]: ReadonlyArray<'text'>; + [MISTRALAI_MISTRAL_SMALL_3_2_24B_INSTRUCT.id]: ReadonlyArray<'image' | 'text'>; + [QWEN_QWEN3_30B_A3B.id]: ReadonlyArray<'text'>; + [AMAZON_NOVA_LITE_V1.id]: ReadonlyArray<'text' | 'image'>; + [NVIDIA_NEMOTRON_3_NANO_30B_A3B.id]: ReadonlyArray<'text'>; + [QWEN_QWEN3_CODER_30B_A3B_INSTRUCT.id]: ReadonlyArray<'text'>; + [MISTRALAI_DEVSTRAL_SMALL.id]: ReadonlyArray<'text'>; + [BAIDU_ERNIE_4_5_21B_A3B_THINKING.id]: ReadonlyArray<'text'>; + [BAIDU_ERNIE_4_5_21B_A3B.id]: ReadonlyArray<'text'>; + [MICROSOFT_PHI_4_REASONING_PLUS.id]: ReadonlyArray<'text'>; + [QWEN_QWEN3_235B_A22B_2507.id]: ReadonlyArray<'text'>; + [OPENAI_GPT_OSS_SAFEGUARD_20B.id]: ReadonlyArray<'text'>; + [BYTEDANCE_SEED_SEED_1_6_FLASH.id]: ReadonlyArray<'image' | 'text' | 'video'>; + [GOOGLE_GEMINI_2_0_FLASH_LITE_001.id]: ReadonlyArray<'text' | 'image' | 'document' | 'audio' | 'video'>; + [QWEN_QWEN3_32B.id]: ReadonlyArray<'text'>; + [META_LLAMA_LLAMA_4_SCOUT.id]: ReadonlyArray<'text' | 'image'>; + [QWEN_QWEN3_30B_A3B_INSTRUCT_2507.id]: ReadonlyArray<'text'>; + [QWEN_QWEN3_VL_8B_INSTRUCT.id]: ReadonlyArray<'image' | 'text'>; + [ALIBABA_TONGYI_DEEPRESEARCH_30B_A3B.id]: ReadonlyArray<'text'>; + [NEVERSLEEP_LLAMA_3_1_LUMIMAID_8B.id]: ReadonlyArray<'text'>; + [QWEN_QWEN3_NEXT_80B_A3B_INSTRUCT.id]: ReadonlyArray<'text'>; + [Z_AI_GLM_4_32B.id]: ReadonlyArray<'text'>; + [MISTRALAI_PIXTRAL_12B.id]: ReadonlyArray<'text' | 'image'>; + [MISTRALAI_MINISTRAL_3B_2512.id]: ReadonlyArray<'text' | 'image'>; + [MISTRAL_MINISTRAL_8B.id]: ReadonlyArray<'text'>; + [MISTRALAI_MINISTRAL_8B.id]: ReadonlyArray<'text'>; + [ALLENAI_OLMO_3_7B_INSTRUCT.id]: ReadonlyArray<'text'>; + [BYTEDANCE_UI_TARS_1_5_7B.id]: ReadonlyArray<'image' | 'text'>; + [MISTRALAI_MISTRAL_SMALL_CREATIVE.id]: ReadonlyArray<'text'>; + [MISTRALAI_VOXTRAL_SMALL_24B_2507.id]: ReadonlyArray<'text' | 'audio'>; + [META_LLAMA_LLAMA_3_3_70B_INSTRUCT.id]: ReadonlyArray<'text'>; + [OPENGVLAB_INTERNVL3_78B.id]: ReadonlyArray<'image' | 'text'>; + [OPENAI_GPT_4_1_NANO.id]: ReadonlyArray<'image' | 'text' | 'document'>; + [GOOGLE_GEMINI_2_0_FLASH_001.id]: ReadonlyArray<'text' | 'image' | 'document' | 'audio' | 'video'>; + [GOOGLE_GEMINI_2_5_FLASH_LITE_PREVIEW_09_2025.id]: ReadonlyArray<'text' | 'image' | 'document' | 'audio' | 'video'>; + [GOOGLE_GEMINI_2_5_FLASH_LITE.id]: ReadonlyArray<'text' | 'image' | 'document' | 'audio' | 'video'>; + [NVIDIA_LLAMA_3_3_NEMOTRON_SUPER_49B_V1_5.id]: ReadonlyArray<'text'>; + [QWEN_QWEN3_VL_4B_INSTRUCT.id]: ReadonlyArray<'text' | 'image'>; + [MISTRALAI_MISTRAL_7B_INSTRUCT_V0_1.id]: ReadonlyArray<'text'>; + [NOUSRESEARCH_HERMES_4_70B.id]: ReadonlyArray<'text'>; + [QWEN_QWEN3_235B_A22B_THINKING_2507.id]: ReadonlyArray<'text'>; + [ALLENAI_OLMO_3_7B_THINK.id]: ReadonlyArray<'text'>; + [QWEN_QWEN_2_5_72B_INSTRUCT.id]: ReadonlyArray<'text'>; + [BAIDU_ERNIE_4_5_VL_28B_A3B.id]: ReadonlyArray<'text' | 'image'>; + [TENCENT_HUNYUAN_A13B_INSTRUCT.id]: ReadonlyArray<'text'>; + [DEEPSEEK_DEEPSEEK_R1_DISTILL_QWEN_14B.id]: ReadonlyArray<'text'>; + [ESSENTIALAI_RNJ_1_INSTRUCT.id]: ReadonlyArray<'text'>; + [MISTRALAI_MINISTRAL_8B_2512.id]: ReadonlyArray<'text' | 'image'>; + [QWEN_QWQ_32B.id]: ReadonlyArray<'text'>; + [ALLENAI_OLMO_3_1_32B_THINK.id]: ReadonlyArray<'text'>; + [ALLENAI_OLMO_3_32B_THINK.id]: ReadonlyArray<'text'>; + [QWEN_QWEN3_VL_30B_A3B_INSTRUCT.id]: ReadonlyArray<'text' | 'image'>; + [META_LLAMA_LLAMA_4_MAVERICK.id]: ReadonlyArray<'text' | 'image'>; + [OPENAI_GPT_4O_MINI_SEARCH_PREVIEW.id]: ReadonlyArray<'text'>; + [OPENAI_GPT_4O_MINI_2024_07_18.id]: ReadonlyArray<'text' | 'image' | 'document'>; + [OPENAI_GPT_4O_MINI.id]: ReadonlyArray<'text' | 'image' | 'document'>; + [COHERE_COMMAND_R_08_2024.id]: ReadonlyArray<'text'>; + [QWEN_QWEN2_5_VL_72B_INSTRUCT.id]: ReadonlyArray<'text' | 'image'>; + [DEEPSEEK_DEEPSEEK_CHAT_V3_1.id]: ReadonlyArray<'text'>; + [QWEN_QWEN3_NEXT_80B_A3B_THINKING.id]: ReadonlyArray<'text'>; + [THEDRUMMER_ROCINANTE_12B.id]: ReadonlyArray<'text'>; + [ARCEE_AI_SPOTLIGHT.id]: ReadonlyArray<'image' | 'text'>; + [META_LLAMA_LLAMA_GUARD_4_12B.id]: ReadonlyArray<'image' | 'text'>; + [QWEN_QWEN3_235B_A22B.id]: ReadonlyArray<'text'>; + [DEEPCOGITO_COGITO_V2_PREVIEW_LLAMA_109B_MOE.id]: ReadonlyArray<'image' | 'text'>; + [QWEN_QWEN3_VL_8B_THINKING.id]: ReadonlyArray<'image' | 'text'>; + [DEEPSEEK_DEEPSEEK_CHAT_V3_0324.id]: ReadonlyArray<'text'>; + [META_LLAMA_LLAMA_GUARD_2_8B.id]: ReadonlyArray<'text'>; + [MISTRALAI_MISTRAL_7B_INSTRUCT_V0_3.id]: ReadonlyArray<'text'>; + [QWEN_QWEN_2_5_VL_7B_INSTRUCT.id]: ReadonlyArray<'text' | 'image'>; + [MISTRALAI_MINISTRAL_14B_2512.id]: ReadonlyArray<'text' | 'image'>; + [MISTRALAI_MISTRAL_7B_INSTRUCT_V0_2.id]: ReadonlyArray<'text'>; + [AI21_JAMBA_MINI_1_7.id]: ReadonlyArray<'text'>; + [X_AI_GROK_4_1_FAST.id]: ReadonlyArray<'text' | 'image'>; + [X_AI_GROK_4_FAST.id]: ReadonlyArray<'text' | 'image'>; + [NVIDIA_NEMOTRON_NANO_12B_V2_VL.id]: ReadonlyArray<'image' | 'text' | 'video'>; + [MISTRALAI_MISTRAL_SABA.id]: ReadonlyArray<'text'>; + [ALLENAI_OLMO_3_1_32B_INSTRUCT.id]: ReadonlyArray<'text'>; + [MEITUAN_LONGCAT_FLASH_CHAT.id]: ReadonlyArray<'text'>; + [QWEN_QWEN3_VL_30B_A3B_THINKING.id]: ReadonlyArray<'text' | 'image'>; + [MINIMAX_MINIMAX_M2.id]: ReadonlyArray<'text'>; + [MINIMAX_MINIMAX_01.id]: ReadonlyArray<'text' | 'image'>; + [PRIME_INTELLECT_INTELLECT_3.id]: ReadonlyArray<'text'>; + [QWEN_QWEN3_VL_235B_A22B_INSTRUCT.id]: ReadonlyArray<'text' | 'image'>; + [X_AI_GROK_CODE_FAST_1.id]: ReadonlyArray<'text'>; + [KWAIPILOT_KAT_CODER_PRO.id]: ReadonlyArray<'text'>; + [DEEPSEEK_DEEPSEEK_V3_2_EXP.id]: ReadonlyArray<'text'>; + [QWEN_QWEN_VL_PLUS.id]: ReadonlyArray<'text' | 'image'>; + [DEEPSEEK_DEEPSEEK_V3_1_TERMINUS_EXACTO.id]: ReadonlyArray<'text'>; + [DEEPSEEK_DEEPSEEK_V3_1_TERMINUS.id]: ReadonlyArray<'text'>; + [QWEN_QWEN3_CODER.id]: ReadonlyArray<'text'>; + [QWEN_QWEN3_CODER_EXACTO.id]: ReadonlyArray<'text'>; + [MISTRALAI_MISTRAL_TINY.id]: ReadonlyArray<'text'>; + [DEEPSEEK_DEEPSEEK_V3_2.id]: ReadonlyArray<'text'>; + [TNGTECH_DEEPSEEK_R1T2_CHIMERA.id]: ReadonlyArray<'text'>; + [TNGTECH_TNG_R1T_CHIMERA.id]: ReadonlyArray<'text'>; + [INCEPTION_MERCURY.id]: ReadonlyArray<'text'>; + [INCEPTION_MERCURY_CODER.id]: ReadonlyArray<'text'>; + [ANTHROPIC_CLAUDE_3_HAIKU.id]: ReadonlyArray<'text' | 'image'>; + [OPENAI_GPT_5_MINI.id]: ReadonlyArray<'text' | 'image' | 'document'>; + [OPENAI_GPT_5_1_CODEX_MINI.id]: ReadonlyArray<'image' | 'text'>; + [BYTEDANCE_SEED_SEED_1_6.id]: ReadonlyArray<'image' | 'text' | 'video'>; + [DEEPSEEK_DEEPSEEK_R1_DISTILL_QWEN_32B.id]: ReadonlyArray<'text'>; + [DEEPSEEK_DEEPSEEK_V3_2_SPECIALE.id]: ReadonlyArray<'text'>; + [NEX_AGI_DEEPSEEK_V3_1_NEX_N1.id]: ReadonlyArray<'text'>; + [MINIMAX_MINIMAX_M2_1.id]: ReadonlyArray<'text'>; + [BAIDU_ERNIE_4_5_300B_A47B.id]: ReadonlyArray<'text'>; + [MOONSHOTAI_KIMI_DEV_72B.id]: ReadonlyArray<'text'>; + [NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B.id]: ReadonlyArray<'text'>; + [META_LLAMA_LLAMA_3_70B_INSTRUCT.id]: ReadonlyArray<'text'>; + [THEDRUMMER_CYDONIA_24B_V4_1.id]: ReadonlyArray<'text'>; + [X_AI_GROK_3_MINI.id]: ReadonlyArray<'text'>; + [X_AI_GROK_3_MINI_BETA.id]: ReadonlyArray<'text'>; + [Z_AI_GLM_4_6V.id]: ReadonlyArray<'image' | 'text' | 'video'>; + [MISTRALAI_CODESTRAL_2508.id]: ReadonlyArray<'text'>; + [DEEPSEEK_DEEPSEEK_CHAT.id]: ReadonlyArray<'text'>; + [TNGTECH_DEEPSEEK_R1T_CHIMERA.id]: ReadonlyArray<'text'>; + [QWEN_QWEN3_CODER_FLASH.id]: ReadonlyArray<'text'>; + [GOOGLE_GEMINI_2_5_FLASH_IMAGE_PREVIEW.id]: ReadonlyArray<'image' | 'text'>; + [GOOGLE_GEMINI_2_5_FLASH_PREVIEW_09_2025.id]: ReadonlyArray<'image' | 'document' | 'text' | 'audio' | 'video'>; + [GOOGLE_GEMINI_2_5_FLASH_IMAGE.id]: ReadonlyArray<'image' | 'text'>; + [AMAZON_NOVA_2_LITE_V1.id]: ReadonlyArray<'text' | 'image' | 'video' | 'document'>; + [GOOGLE_GEMINI_2_5_FLASH.id]: ReadonlyArray<'document' | 'image' | 'text' | 'audio' | 'video'>; + [Z_AI_GLM_4_6.id]: ReadonlyArray<'text'>; + [Z_AI_GLM_4_5.id]: ReadonlyArray<'text'>; + [MOONSHOTAI_KIMI_K2_0905.id]: ReadonlyArray<'text'>; + [META_LLAMA_LLAMA_3_1_70B_INSTRUCT.id]: ReadonlyArray<'text'>; + [THEDRUMMER_UNSLOPNEMO_12B.id]: ReadonlyArray<'text'>; + [QWEN_QWEN_PLUS_2025_07_28.id]: ReadonlyArray<'text'>; + [QWEN_QWEN_PLUS.id]: ReadonlyArray<'text'>; + [Z_AI_GLM_4_7.id]: ReadonlyArray<'text'>; + [OPENAI_GPT_4_1_MINI.id]: ReadonlyArray<'image' | 'text' | 'document'>; + [MOONSHOTAI_KIMI_K2_THINKING.id]: ReadonlyArray<'text'>; + [MISTRALAI_DEVSTRAL_MEDIUM.id]: ReadonlyArray<'text'>; + [MISTRALAI_MISTRAL_MEDIUM_3.id]: ReadonlyArray<'text' | 'image'>; + [MISTRALAI_MISTRAL_MEDIUM_3_1.id]: ReadonlyArray<'text' | 'image'>; + [MINIMAX_MINIMAX_M1.id]: ReadonlyArray<'text'>; + [QWEN_QWEN_PLUS_2025_07_28_THINKING.id]: ReadonlyArray<'text'>; + [BAIDU_ERNIE_4_5_VL_424B_A47B.id]: ReadonlyArray<'image' | 'text'>; + [Z_AI_GLM_4_6_EXACTO.id]: ReadonlyArray<'text'>; + [UNDI95_REMM_SLERP_L2_13B.id]: ReadonlyArray<'text'>; + [DEEPSEEK_DEEPSEEK_R1_0528.id]: ReadonlyArray<'text'>; + [QWEN_QWEN3_VL_235B_A22B_THINKING.id]: ReadonlyArray<'text' | 'image'>; + [MICROSOFT_WIZARDLM_2_8X22B.id]: ReadonlyArray<'text'>; + [ARCEE_AI_CODER_LARGE.id]: ReadonlyArray<'text'>; + [MISTRALAI_MISTRAL_LARGE_2512.id]: ReadonlyArray<'text' | 'image'>; + [QWEN_QWEN3_VL_32B_INSTRUCT.id]: ReadonlyArray<'text' | 'image'>; + [OPENAI_GPT_3_5_TURBO.id]: ReadonlyArray<'text'>; + [DEEPSEEK_DEEPSEEK_PROVER_V2.id]: ReadonlyArray<'text'>; + [MOONSHOTAI_KIMI_K2.id]: ReadonlyArray<'text'>; + [GOOGLE_GEMINI_3_FLASH_PREVIEW.id]: ReadonlyArray<'text' | 'image' | 'document' | 'audio' | 'video'>; + [MISTRALAI_MIXTRAL_8X7B_INSTRUCT.id]: ReadonlyArray<'text'>; + [THEDRUMMER_SKYFALL_36B_V2.id]: ReadonlyArray<'text'>; + [MINIMAX_MINIMAX_M1_80K.id]: ReadonlyArray<'text'>; + [STEPFUN_AI_STEP3.id]: ReadonlyArray<'image' | 'text'>; + [NVIDIA_LLAMA_3_1_NEMOTRON_ULTRA_253B_V1.id]: ReadonlyArray<'text'>; + [Z_AI_GLM_4_5V.id]: ReadonlyArray<'text' | 'image'>; + [MOONSHOTAI_KIMI_K2_0905_EXACTO.id]: ReadonlyArray<'text'>; + [GOOGLE_GEMMA_2_27B_IT.id]: ReadonlyArray<'text'>; + [SAO10K_L3_3_EURYALE_70B.id]: ReadonlyArray<'text'>; + [SAO10K_L3_1_EURYALE_70B.id]: ReadonlyArray<'text'>; + [AION_LABS_AION_1_0_MINI.id]: ReadonlyArray<'text'>; + [DEEPSEEK_DEEPSEEK_R1.id]: ReadonlyArray<'text'>; + [MANCER_WEAVER.id]: ReadonlyArray<'text'>; + [ARCEE_AI_VIRTUOSO_LARGE.id]: ReadonlyArray<'text'>; + [MORPH_MORPH_V3_FAST.id]: ReadonlyArray<'text'>; + [ALFREDPROS_CODELLAMA_7B_INSTRUCT_SOLIDITY.id]: ReadonlyArray<'text'>; + [ELEUTHERAI_LLEMMA_7B.id]: ReadonlyArray<'text'>; + [AION_LABS_AION_RP_LLAMA_3_1_8B.id]: ReadonlyArray<'text'>; + [AMAZON_NOVA_PRO_V1.id]: ReadonlyArray<'text' | 'image'>; + [QWEN_QWEN_VL_MAX.id]: ReadonlyArray<'text' | 'image'>; + [ANTHROPIC_CLAUDE_3_5_HAIKU.id]: ReadonlyArray<'text' | 'image'>; + [RELACE_RELACE_APPLY_3.id]: ReadonlyArray<'text'>; + [SWITCHPOINT_ROUTER.id]: ReadonlyArray<'text'>; + [DEEPCOGITO_COGITO_V2_PREVIEW_LLAMA_70B.id]: ReadonlyArray<'text'>; + [MORPH_MORPH_V3_LARGE.id]: ReadonlyArray<'text'>; + [ARCEE_AI_MAESTRO_REASONING.id]: ReadonlyArray<'text'>; + [PERPLEXITY_SONAR.id]: ReadonlyArray<'text' | 'image'>; + [NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B.id]: ReadonlyArray<'text'>; + [NEVERSLEEP_NOROMAID_20B.id]: ReadonlyArray<'text'>; + [OPENAI_GPT_3_5_TURBO_0613.id]: ReadonlyArray<'text'>; + [RELACE_RELACE_SEARCH.id]: ReadonlyArray<'text'>; + [NOUSRESEARCH_HERMES_4_405B.id]: ReadonlyArray<'text'>; + [QWEN_QWEN3_CODER_PLUS.id]: ReadonlyArray<'text'>; + [ANTHROPIC_CLAUDE_HAIKU_4_5.id]: ReadonlyArray<'image' | 'text'>; + [OPENAI_O4_MINI_HIGH.id]: ReadonlyArray<'image' | 'text' | 'document'>; + [OPENAI_O3_MINI_HIGH.id]: ReadonlyArray<'text' | 'document'>; + [OPENAI_O3_MINI.id]: ReadonlyArray<'text' | 'document'>; + [OPENAI_O4_MINI.id]: ReadonlyArray<'image' | 'text' | 'document'>; + [NVIDIA_LLAMA_3_1_NEMOTRON_70B_INSTRUCT.id]: ReadonlyArray<'text'>; + [QWEN_QWEN3_MAX.id]: ReadonlyArray<'text'>; + [DEEPCOGITO_COGITO_V2_1_671B.id]: ReadonlyArray<'text'>; + [GOOGLE_GEMINI_2_5_PRO_PREVIEW.id]: ReadonlyArray<'document' | 'image' | 'text' | 'audio'>; + [OPENAI_GPT_5_CHAT.id]: ReadonlyArray<'document' | 'image' | 'text'>; + [GOOGLE_GEMINI_2_5_PRO_PREVIEW_05_06.id]: ReadonlyArray<'text' | 'image' | 'document' | 'audio' | 'video'>; + [OPENAI_GPT_5.id]: ReadonlyArray<'text' | 'image' | 'document'>; + [OPENAI_GPT_5_CODEX.id]: ReadonlyArray<'text' | 'image'>; + [OPENAI_GPT_5_1_CODEX.id]: ReadonlyArray<'text' | 'image'>; + [OPENAI_GPT_5_1_CHAT.id]: ReadonlyArray<'document' | 'image' | 'text'>; + [GOOGLE_GEMINI_2_5_PRO.id]: ReadonlyArray<'text' | 'image' | 'document' | 'audio' | 'video'>; + [OPENAI_GPT_5_1_CODEX_MAX.id]: ReadonlyArray<'text' | 'image'>; + [OPENAI_GPT_5_1.id]: ReadonlyArray<'image' | 'text' | 'document'>; + [SAO10K_L3_EURYALE_70B.id]: ReadonlyArray<'text'>; + [OPENAI_GPT_3_5_TURBO_INSTRUCT.id]: ReadonlyArray<'text'>; + [OPENAI_CODEX_MINI.id]: ReadonlyArray<'image' | 'text'>; + [QWEN_QWEN_MAX.id]: ReadonlyArray<'text'>; + [OPENAI_GPT_5_2.id]: ReadonlyArray<'document' | 'image' | 'text'>; + [OPENAI_GPT_5_2_CHAT.id]: ReadonlyArray<'document' | 'image' | 'text'>; + [MISTRALAI_MISTRAL_LARGE_2411.id]: ReadonlyArray<'text'>; + [MISTRALAI_PIXTRAL_LARGE_2411.id]: ReadonlyArray<'text' | 'image'>; + [MISTRALAI_MIXTRAL_8X22B_INSTRUCT.id]: ReadonlyArray<'text'>; + [MISTRALAI_MISTRAL_LARGE.id]: ReadonlyArray<'text'>; + [MISTRALAI_MISTRAL_LARGE_2407.id]: ReadonlyArray<'text'>; + [PERPLEXITY_SONAR_DEEP_RESEARCH.id]: ReadonlyArray<'text'>; + [PERPLEXITY_SONAR_REASONING_PRO.id]: ReadonlyArray<'text' | 'image'>; + [OPENAI_GPT_4_1.id]: ReadonlyArray<'image' | 'text' | 'document'>; + [OPENAI_O3.id]: ReadonlyArray<'image' | 'text' | 'document'>; + [AI21_JAMBA_LARGE_1_7.id]: ReadonlyArray<'text'>; + [OPENAI_O4_MINI_DEEP_RESEARCH.id]: ReadonlyArray<'document' | 'image' | 'text'>; + [GOOGLE_GEMINI_3_PRO_IMAGE_PREVIEW.id]: ReadonlyArray<'image' | 'text'>; + [GOOGLE_GEMINI_3_PRO_PREVIEW.id]: ReadonlyArray<'text' | 'image' | 'document' | 'audio' | 'video'>; + [OPENAI_GPT_5_IMAGE_MINI.id]: ReadonlyArray<'document' | 'image' | 'text'>; + [OPENAI_GPT_4O_SEARCH_PREVIEW.id]: ReadonlyArray<'text'>; + [OPENAI_GPT_4O_AUDIO_PREVIEW.id]: ReadonlyArray<'audio' | 'text'>; + [COHERE_COMMAND_A.id]: ReadonlyArray<'text'>; + [INFLECTION_INFLECTION_3_PI.id]: ReadonlyArray<'text'>; + [INFLECTION_INFLECTION_3_PRODUCTIVITY.id]: ReadonlyArray<'text'>; + [OPENAI_GPT_4O_2024_11_20.id]: ReadonlyArray<'text' | 'image' | 'document'>; + [OPENAI_GPT_4O_2024_08_06.id]: ReadonlyArray<'text' | 'image' | 'document'>; + [OPENAI_GPT_4O.id]: ReadonlyArray<'text' | 'image' | 'document'>; + [COHERE_COMMAND_R_PLUS_08_2024.id]: ReadonlyArray<'text'>; + [AMAZON_NOVA_PREMIER_V1.id]: ReadonlyArray<'text' | 'image'>; + [SAO10K_L3_1_70B_HANAMI_X1.id]: ReadonlyArray<'text'>; + [OPENAI_GPT_3_5_TURBO_16K.id]: ReadonlyArray<'text'>; + [ANTHRACITE_ORG_MAGNUM_V4_72B.id]: ReadonlyArray<'text'>; + [ANTHROPIC_CLAUDE_SONNET_4_5.id]: ReadonlyArray<'text' | 'image' | 'document'>; + [X_AI_GROK_3.id]: ReadonlyArray<'text'>; + [ANTHROPIC_CLAUDE_3_7_SONNET_THINKING.id]: ReadonlyArray<'text' | 'image' | 'document'>; + [X_AI_GROK_4.id]: ReadonlyArray<'image' | 'text'>; + [PERPLEXITY_SONAR_PRO.id]: ReadonlyArray<'text' | 'image'>; + [X_AI_GROK_3_BETA.id]: ReadonlyArray<'text'>; + [ANTHROPIC_CLAUDE_SONNET_4.id]: ReadonlyArray<'image' | 'text' | 'document'>; + [ANTHROPIC_CLAUDE_3_7_SONNET.id]: ReadonlyArray<'text' | 'image' | 'document'>; + [PERPLEXITY_SONAR_PRO_SEARCH.id]: ReadonlyArray<'text' | 'image'>; + [META_LLAMA_LLAMA_3_1_405B_INSTRUCT.id]: ReadonlyArray<'text'>; + [DEEPCOGITO_COGITO_V2_PREVIEW_LLAMA_405B.id]: ReadonlyArray<'text'>; + [META_LLAMA_LLAMA_3_1_405B.id]: ReadonlyArray<'text'>; + [AION_LABS_AION_1_0.id]: ReadonlyArray<'text'>; + [RAIFLE_SORCERERLM_8X22B.id]: ReadonlyArray<'text'>; + [OPENAI_CHATGPT_4O_LATEST.id]: ReadonlyArray<'text' | 'image'>; + [OPENAI_GPT_4O_2024_05_13.id]: ReadonlyArray<'text' | 'image' | 'document'>; + [ANTHROPIC_CLAUDE_OPUS_4_5.id]: ReadonlyArray<'document' | 'image' | 'text'>; + [ALPINDALE_GOLIATH_120B.id]: ReadonlyArray<'text'>; + [OPENAI_GPT_4O_EXTENDED.id]: ReadonlyArray<'text' | 'image' | 'document'>; + [ANTHROPIC_CLAUDE_3_5_SONNET.id]: ReadonlyArray<'text' | 'image' | 'document'>; + [OPENAI_GPT_5_IMAGE.id]: ReadonlyArray<'image' | 'text' | 'document'>; + [OPENAI_GPT_4_1106_PREVIEW.id]: ReadonlyArray<'text'>; + [OPENAI_GPT_4_TURBO.id]: ReadonlyArray<'text' | 'image'>; + [OPENAI_GPT_4_TURBO_PREVIEW.id]: ReadonlyArray<'text'>; + [OPENAI_O3_DEEP_RESEARCH.id]: ReadonlyArray<'image' | 'text' | 'document'>; + [OPENAI_O1.id]: ReadonlyArray<'text' | 'image' | 'document'>; + [ANTHROPIC_CLAUDE_OPUS_4_1.id]: ReadonlyArray<'image' | 'text' | 'document'>; + [ANTHROPIC_CLAUDE_OPUS_4.id]: ReadonlyArray<'image' | 'text' | 'document'>; + [OPENAI_GPT_5_PRO.id]: ReadonlyArray<'image' | 'text' | 'document'>; + [OPENAI_O3_PRO.id]: ReadonlyArray<'text' | 'document' | 'image'>; + [OPENAI_GPT_5_2_PRO.id]: ReadonlyArray<'image' | 'text' | 'document'>; + [OPENAI_GPT_4.id]: ReadonlyArray<'text'>; + [OPENAI_GPT_4_0314.id]: ReadonlyArray<'text'>; + [OPENAI_O1_PRO.id]: ReadonlyArray<'text' | 'image' | 'document'>; + [ALLENAI_OLMO_2_0325_32B_INSTRUCT.id]: ReadonlyArray<'text'>; + [META_LLAMA_LLAMA_3_2_90B_VISION_INSTRUCT.id]: ReadonlyArray<'text' | 'image'>; + [ANTHROPIC_CLAUDE_3_5_HAIKU_20241022.id]: ReadonlyArray<'text' | 'image' | 'document'>; + "openrouter/auto": ReadonlyArray<'text' | 'image' | 'audio' | 'video' | 'document'>; } export const OPENROUTER_CHAT_MODELS = [ @@ -17704,7 +8942,7 @@ export const OPENROUTER_CHAT_MODELS = [ ALLENAI_OLMO_2_0325_32B_INSTRUCT.id, META_LLAMA_LLAMA_3_2_90B_VISION_INSTRUCT.id, ANTHROPIC_CLAUDE_3_5_HAIKU_20241022.id, - 'openrouter/auto', + "openrouter/auto", ] as const export const OPENROUTER_IMAGE_MODELS = [ diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index bac95717..97cefd4a 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -617,7 +617,7 @@ importers: version: 1.1.0 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) zod: specifier: ^4.2.0 version: 4.2.1 @@ -633,7 +633,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) zod: specifier: ^4.2.0 version: 4.2.1 @@ -646,7 +646,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -674,7 +674,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.6) @@ -685,6 +685,28 @@ importers: specifier: ^2.11.10 version: 2.11.10(solid-js@1.9.10)(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + packages/typescript/ai-fal: + dependencies: + '@fal-ai/client': + specifier: ^1.8.3 + version: 1.8.3 + '@tanstack/ai': + specifier: workspace:* + version: link:../ai + zod: + specifier: ^4 + version: 4.2.1 + devDependencies: + '@hey-api/openapi-ts': + specifier: ^0.90.10 + version: 0.90.10(magicast@0.5.1)(typescript@5.9.3) + '@vitest/coverage-v8': + specifier: 4.0.14 + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + vite: + specifier: ^7.2.7 + version: 7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + packages/typescript/ai-gemini: dependencies: '@google/genai': @@ -696,7 +718,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -715,7 +737,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -731,7 +753,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -747,7 +769,7 @@ importers: version: link:../ai '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -766,7 +788,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -785,7 +807,7 @@ importers: version: 3.2.4(preact@10.28.2) '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.6) @@ -813,7 +835,7 @@ importers: version: 19.2.7 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.6) @@ -853,7 +875,7 @@ importers: version: 19.2.7 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) react: specifier: ^19.2.3 version: 19.2.3 @@ -924,7 +946,7 @@ importers: version: link:../ai-solid '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) solid-js: specifier: ^1.9.10 version: 1.9.10 @@ -952,7 +974,7 @@ importers: version: 24.10.3 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) jsdom: specifier: ^27.2.0 version: 27.3.0(postcss@8.5.6) @@ -1029,7 +1051,7 @@ importers: version: 6.0.3(vite@7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))(vue@3.5.25(typescript@5.9.3)) '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1054,7 +1076,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) vite: specifier: ^7.2.7 version: 7.2.7(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) @@ -1073,7 +1095,7 @@ importers: version: 19.2.7 '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) react: specifier: ^19.2.3 version: 19.2.3 @@ -1204,7 +1226,7 @@ importers: devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 - version: 4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + version: 4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) solid-js: specifier: ^1.9.10 version: 1.9.10 @@ -2275,6 +2297,10 @@ packages: resolution: {integrity: sha512-C3mrr3b5dRVlKPJdfrAXS8+dq+rq8Qm5SNRazca0JKgw1HQERFmrVb0towvMmw5uu8hHKNiQasMaR/tydf3Zsg==} engines: {node: ^20.19.0 || ^22.13.0 || ^23.5.0 || >=24.0.0, npm: '>=10'} + '@fal-ai/client@1.8.3': + resolution: {integrity: sha512-NL6rrWVJiz6pI5m30qRMKwaXLz1r5mNuSrK3hmGtF0gLwtSQ1elhXPkSI75kbp9eMwJBdzBkOsvbwnoSMG3I5A==} + engines: {node: '>=22.0.0'} + '@gerrit0/mini-shiki@3.19.0': resolution: {integrity: sha512-ZSlWfLvr8Nl0T4iA3FF/8VH8HivYF82xQts2DY0tJxZd4wtXJ8AA0nmdW9lmO4hlrh3f9xNwEPtOgqETPqKwDA==} @@ -2287,6 +2313,26 @@ packages: '@modelcontextprotocol/sdk': optional: true + '@hey-api/codegen-core@0.5.5': + resolution: {integrity: sha512-f2ZHucnA2wBGAY8ipB4wn/mrEYW+WUxU2huJmUvfDO6AE2vfILSHeF3wCO39Pz4wUYPoAWZByaauftLrOfC12Q==} + engines: {node: '>=20.19.0'} + peerDependencies: + typescript: '>=5.5.3' + + '@hey-api/json-schema-ref-parser@1.2.2': + resolution: {integrity: sha512-oS+5yAdwnK20lSeFO1d53Ku+yaGCsY8PcrmSq2GtSs3bsBfRnHAbpPKSVzQcaxAOrzj5NB+f34WhZglVrNayBA==} + engines: {node: '>= 16'} + + '@hey-api/openapi-ts@0.90.10': + resolution: {integrity: sha512-o0wlFxuLt1bcyIV/ZH8DQ1wrgODTnUYj/VfCHOOYgXUQlLp9Dm2PjihOz+WYrZLowhqUhSKeJRArOGzvLuOTsg==} + engines: {node: '>=20.19.0'} + hasBin: true + peerDependencies: + typescript: '>=5.5.3' + + '@hey-api/types@0.1.2': + resolution: {integrity: sha512-uNNtiVAWL7XNrV/tFXx7GLY9lwaaDazx1173cGW3+UEaw4RUPsHEmiB4DSpcjNxMIcrctfz2sGKLnVx5PBG2RA==} + '@humanfs/core@0.19.1': resolution: {integrity: sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==} engines: {node: '>=18.18.0'} @@ -2362,6 +2408,9 @@ packages: '@jridgewell/trace-mapping@0.3.31': resolution: {integrity: sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==} + '@jsdevtools/ono@7.1.3': + resolution: {integrity: sha512-4JQNk+3mVzK3xh2rqd6RB4J46qUR19azEHBneZyTZM+c456qOrbbM/5xcR8huNCCcbVt7+UmizG6GuUvPvKUYg==} + '@manypkg/find-root@1.1.0': resolution: {integrity: sha512-mki5uBvhHzO8kYYix/WRy2WX8S3B5wdVSc9D6KcU5lQNglP2yt58/VfLuAK49glRXChosY8ap2oJ1qgma3GUVA==} @@ -2386,6 +2435,10 @@ packages: '@microsoft/tsdoc@0.15.1': resolution: {integrity: sha512-4aErSrCR/On/e5G2hDP0wjooqDdauzEbIq8hIkIe5pXV0rtWJZvdCEKL0ykZxex+IxIwBp0eGeV48hQN07dXtw==} + '@msgpack/msgpack@3.1.3': + resolution: {integrity: sha512-47XIizs9XZXvuJgoaJUIE2lFoID8ugvc0jzSHP+Ptfk8nTbnR8g788wv48N03Kx0UkAv559HWRQ3yzOgzlRNUA==} + engines: {node: '>= 18'} + '@napi-rs/wasm-runtime@0.2.12': resolution: {integrity: sha512-ZVWUcfwY4E/yPitQJl481FjFo3K22D6qF0DuFH6Y/nbnE11GY5uguDxZMGXPQ8WQ0128MXQD7TnfHyK4oWoIJQ==} @@ -4140,8 +4193,8 @@ packages: '@vitest/expect@4.0.15': resolution: {integrity: sha512-Gfyva9/GxPAWXIWjyGDli9O+waHDC0Q0jaLdFP1qPAUUfo1FEXPXUfUkp3eZA0sSq340vPycSyOlYUeM15Ft1w==} - '@vitest/expect@4.0.17': - resolution: {integrity: sha512-mEoqP3RqhKlbmUmntNDDCJeTDavDR+fVYkSOw8qRwJFaW/0/5zA9zFeTrHqNtcmwh6j26yMmwx2PqUDPzt5ZAQ==} + '@vitest/expect@4.0.18': + resolution: {integrity: sha512-8sCWUyckXXYvx4opfzVY03EOiYVxyNrHS5QxX3DAIi5dpJAAkyJezHCP77VMX4HKA2LDT/Jpfo8i2r5BE3GnQQ==} '@vitest/mocker@4.0.15': resolution: {integrity: sha512-CZ28GLfOEIFkvCFngN8Sfx5h+Se0zN+h4B7yOsPVCcgtiO7t5jt9xQh2E1UkFep+eb9fjyMfuC5gBypwb07fvQ==} @@ -4154,8 +4207,8 @@ packages: vite: optional: true - '@vitest/mocker@4.0.17': - resolution: {integrity: sha512-+ZtQhLA3lDh1tI2wxe3yMsGzbp7uuJSWBM1iTIKCbppWTSBN09PUC+L+fyNlQApQoR+Ps8twt2pbSSXg2fQVEQ==} + '@vitest/mocker@4.0.18': + resolution: {integrity: sha512-HhVd0MDnzzsgevnOWCBj5Otnzobjy5wLBe4EdeeFGv8luMsGcYqDuFRMcttKWZA5vVO8RFjexVovXvAM4JoJDQ==} peerDependencies: msw: ^2.4.9 vite: ^6.0.0 || ^7.0.0-0 @@ -4171,26 +4224,26 @@ packages: '@vitest/pretty-format@4.0.15': resolution: {integrity: sha512-SWdqR8vEv83WtZcrfLNqlqeQXlQLh2iilO1Wk1gv4eiHKjEzvgHb2OVc3mIPyhZE6F+CtfYjNlDJwP5MN6Km7A==} - '@vitest/pretty-format@4.0.17': - resolution: {integrity: sha512-Ah3VAYmjcEdHg6+MwFE17qyLqBHZ+ni2ScKCiW2XrlSBV4H3Z7vYfPfz7CWQ33gyu76oc0Ai36+kgLU3rfF4nw==} + '@vitest/pretty-format@4.0.18': + resolution: {integrity: sha512-P24GK3GulZWC5tz87ux0m8OADrQIUVDPIjjj65vBXYG17ZeU3qD7r+MNZ1RNv4l8CGU2vtTRqixrOi9fYk/yKw==} '@vitest/runner@4.0.15': resolution: {integrity: sha512-+A+yMY8dGixUhHmNdPUxOh0la6uVzun86vAbuMT3hIDxMrAOmn5ILBHm8ajrqHE0t8R9T1dGnde1A5DTnmi3qw==} - '@vitest/runner@4.0.17': - resolution: {integrity: sha512-JmuQyf8aMWoo/LmNFppdpkfRVHJcsgzkbCA+/Bk7VfNH7RE6Ut2qxegeyx2j3ojtJtKIbIGy3h+KxGfYfk28YQ==} + '@vitest/runner@4.0.18': + resolution: {integrity: sha512-rpk9y12PGa22Jg6g5M3UVVnTS7+zycIGk9ZNGN+m6tZHKQb7jrP7/77WfZy13Y/EUDd52NDsLRQhYKtv7XfPQw==} '@vitest/snapshot@4.0.15': resolution: {integrity: sha512-A7Ob8EdFZJIBjLjeO0DZF4lqR6U7Ydi5/5LIZ0xcI+23lYlsYJAfGn8PrIWTYdZQRNnSRlzhg0zyGu37mVdy5g==} - '@vitest/snapshot@4.0.17': - resolution: {integrity: sha512-npPelD7oyL+YQM2gbIYvlavlMVWUfNNGZPcu0aEUQXt7FXTuqhmgiYupPnAanhKvyP6Srs2pIbWo30K0RbDtRQ==} + '@vitest/snapshot@4.0.18': + resolution: {integrity: sha512-PCiV0rcl7jKQjbgYqjtakly6T1uwv/5BQ9SwBLekVg/EaYeQFPiXcgrC2Y7vDMA8dM1SUEAEV82kgSQIlXNMvA==} '@vitest/spy@4.0.15': resolution: {integrity: sha512-+EIjOJmnY6mIfdXtE/bnozKEvTC4Uczg19yeZ2vtCz5Yyb0QQ31QWVQ8hswJ3Ysx/K2EqaNsVanjr//2+P3FHw==} - '@vitest/spy@4.0.17': - resolution: {integrity: sha512-I1bQo8QaP6tZlTomQNWKJE6ym4SHf3oLS7ceNjozxxgzavRAgZDc06T7kD8gb9bXKEgcLNt00Z+kZO6KaJ62Ew==} + '@vitest/spy@4.0.18': + resolution: {integrity: sha512-cbQt3PTSD7P2OARdVW3qWER5EGq7PHlvE+QfzSC0lbwO+xnt7+XH06ZzFjFRgzUX//JmpxrCu92VdwvEPlWSNw==} '@vitest/utils@4.0.14': resolution: {integrity: sha512-hLqXZKAWNg8pI+SQXyXxWCTOpA3MvsqcbVeNgSi8x/CSN2wi26dSzn1wrOhmCmFjEvN9p8/kLFRHa6PI8jHazw==} @@ -4198,8 +4251,8 @@ packages: '@vitest/utils@4.0.15': resolution: {integrity: sha512-HXjPW2w5dxhTD0dLwtYHDnelK3j8sR8cWIaLxr22evTyY6q8pRCjZSmhRWVjBaOVXChQd6AwMzi9pucorXCPZA==} - '@vitest/utils@4.0.17': - resolution: {integrity: sha512-RG6iy+IzQpa9SB8HAFHJ9Y+pTzI+h8553MrciN9eC6TFBErqrQaTas4vG+MVj8S4uKk8uTT2p0vgZPnTdxd96w==} + '@vitest/utils@4.0.18': + resolution: {integrity: sha512-msMRKLMVLWygpK3u2Hybgi4MNjcYJvwTb0Ru09+fOyCXIgT5raYP041DRRdiJiI3k/2U6SEbAETB3YtBrUkCFA==} '@volar/language-core@2.4.15': resolution: {integrity: sha512-3VHw+QZU0ZG9IuQmzT68IyN4hZNd9GchGPhbD9+pa8CVv7rnoOZwo7T8weIbrRmihqy3ATpdfXFnqRrfPVK6CA==} @@ -4563,6 +4616,10 @@ packages: buffer@6.0.3: resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==} + bundle-name@4.1.0: + resolution: {integrity: sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q==} + engines: {node: '>=18'} + bytes@3.1.2: resolution: {integrity: sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==} engines: {node: '>= 0.8'} @@ -4575,6 +4632,14 @@ packages: magicast: optional: true + c12@3.3.3: + resolution: {integrity: sha512-750hTRvgBy5kcMNPdh95Qo+XUBeGo8C7nsKSmedDmaQI+E0r82DwHeM6vBewDe4rGFbnxoa4V9pw+sPh5+Iz8Q==} + peerDependencies: + magicast: '*' + peerDependenciesMeta: + magicast: + optional: true + cac@6.7.14: resolution: {integrity: sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==} engines: {node: '>=8'} @@ -4708,6 +4773,10 @@ packages: color-name@1.1.4: resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + color-support@1.1.3: + resolution: {integrity: sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg==} + hasBin: true + combined-stream@1.0.8: resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} engines: {node: '>= 0.8'} @@ -4723,6 +4792,10 @@ packages: resolution: {integrity: sha512-/rFeCpNJQbhSZjGVwO9RFV3xPqbnERS8MmIQzCtD/zl6gpJuV/bMLuN92oG3F7d8oDEHHRrujSXNUr8fpjntKw==} engines: {node: '>=18'} + commander@14.0.2: + resolution: {integrity: sha512-TywoWNNRbhoD0BXs1P3ZEScW8W5iKrnbithIl0YH+uCmBd0QpPOA8yc82DS3BIE5Ma6FnBVUsJ7wVUDz4dvOWQ==} + engines: {node: '>=20'} + commander@2.20.3: resolution: {integrity: sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==} @@ -4915,6 +4988,14 @@ packages: resolution: {integrity: sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==} engines: {node: '>=0.10.0'} + default-browser-id@5.0.1: + resolution: {integrity: sha512-x1VCxdX4t+8wVfd1so/9w+vQ4vx7lKd2Qp5tDRutErwmR85OgmfX7RlLRMWafRMY7hbEiXIbudNrjOAPa/hL8Q==} + engines: {node: '>=18'} + + default-browser@5.4.0: + resolution: {integrity: sha512-XDuvSq38Hr1MdN47EDvYtx3U0MTqpCEn+F6ft8z2vYDzMrvQhVp0ui9oQdqW3MvK3vqUETglt1tVGgjLuJ5izg==} + engines: {node: '>=18'} + defaults@1.0.4: resolution: {integrity: sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==} @@ -4926,6 +5007,10 @@ packages: resolution: {integrity: sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==} engines: {node: '>=8'} + define-lazy-prop@3.0.0: + resolution: {integrity: sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==} + engines: {node: '>=12'} + define-properties@1.2.1: resolution: {integrity: sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==} engines: {node: '>= 0.4'} @@ -5310,6 +5395,10 @@ packages: resolution: {integrity: sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==} engines: {node: '>=0.8.x'} + eventsource-parser@1.1.2: + resolution: {integrity: sha512-v0eOBUbiaFojBu2s2NPBfYUoRR9GjcDNvCXVaqEf5vVfpIAh9f8RCo4vXTP8c63QRKCFwoLpMpTdPwwhEKVgzA==} + engines: {node: '>=14.18'} + execa@8.0.1: resolution: {integrity: sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg==} engines: {node: '>=16.17'} @@ -5862,6 +5951,10 @@ packages: resolution: {integrity: sha512-S+OpgB5i7wzIue/YSE5hg0e5ZYfG3hhpNh9KGl6ayJ38p7ED6wxQLd1TV91xHpcTvw90KMJ9EwN3F/iNflHBVg==} engines: {node: '>=8'} + is-in-ssh@1.0.0: + resolution: {integrity: sha512-jYa6Q9rH90kR1vKB6NM7qqd1mge3Fx4Dhw5TVlK1MUBqhEOuCagrEHMevNuCcbECmXZ0ThXkRm+Ymr51HwEPAw==} + engines: {node: '>=20'} + is-inside-container@1.0.0: resolution: {integrity: sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==} engines: {node: '>=14.16'} @@ -6750,6 +6843,10 @@ packages: resolution: {integrity: sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==} engines: {node: '>=12'} + open@11.0.0: + resolution: {integrity: sha512-smsWv2LzFjP03xmvFoJ331ss6h+jixfA4UUV/Bsiyuu4YJPfN+FIQGOIiv4w9/+MoHkfkJ22UIaQWRVFRfH6Vw==} + engines: {node: '>=20'} + open@8.4.2: resolution: {integrity: sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==} engines: {node: '>=12'} @@ -6930,6 +7027,10 @@ packages: resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==} engines: {node: ^10 || ^12 || >=14} + powershell-utils@0.1.0: + resolution: {integrity: sha512-dM0jVuXJPsDN6DvRpea484tCUaMiXWjuCn++HGTqUWzGDjv5tZkEZldAJ/UMlqRYGFrD/etByo4/xOuC/snX2A==} + engines: {node: '>=20'} + preact@10.28.1: resolution: {integrity: sha512-u1/ixq/lVQI0CakKNvLDEcW5zfCjUQfZdK9qqWuIJtsezuyG6pk9TWj75GMuI/EzRSZB/VAE43sNWWZfiy8psw==} @@ -7178,6 +7279,9 @@ packages: resolution: {integrity: sha512-l0OE8wL34P4nJH/H2ffoaniAokM2qSmrtXHmlpvYr5AVVX8msAyW0l8NVJFDxlSK4u3Uh/f41cQheDVdnYijwQ==} hasBin: true + robot3@0.4.1: + resolution: {integrity: sha512-hzjy826lrxzx8eRgv80idkf8ua1JAepRc9Efdtj03N3KNJuznQCPlyCJ7gnUmDFwZCLQjxy567mQVKmdv2BsXQ==} + rolldown-plugin-dts@0.18.3: resolution: {integrity: sha512-rd1LZ0Awwfyn89UndUF/HoFF4oH9a5j+2ZeuKSJYM80vmeN/p0gslYMnHTQHBEXPhUlvAlqGA3tVgXB/1qFNDg==} engines: {node: '>=20.19.0'} @@ -7237,6 +7341,10 @@ packages: resolution: {integrity: sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==} engines: {node: '>= 18'} + run-applescript@7.1.0: + resolution: {integrity: sha512-DPe5pVFaAsinSaV6QjQ6gdiedWDcRCbUuiQfQa2wmWV7+xC9bGulGI8+TdRmoFkAPaBXk8CrAbnlY2ISniJ47Q==} + engines: {node: '>=18'} + run-parallel@1.2.0: resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==} @@ -7631,6 +7739,7 @@ packages: tar@7.5.2: resolution: {integrity: sha512-7NyxrTE4Anh8km8iEy7o0QYPs+0JKBTj5ZaqHg6B39erLg0qYXN3BijtShwbsNSvQ+LN75+KV+C4QR/f6Gwnpg==} engines: {node: '>=18'} + deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exhorbitant rates) by contacting i@izs.me term-size@2.2.1: resolution: {integrity: sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg==} @@ -8231,18 +8340,18 @@ packages: jsdom: optional: true - vitest@4.0.17: - resolution: {integrity: sha512-FQMeF0DJdWY0iOnbv466n/0BudNdKj1l5jYgl5JVTwjSsZSlqyXFt/9+1sEyhR6CLowbZpV7O1sCHrzBhucKKg==} + vitest@4.0.18: + resolution: {integrity: sha512-hOQuK7h0FGKgBAas7v0mSAsnvrIgAvWmRFjmzpJ7SwFHH3g1k2u37JtYwOwmEKhK6ZO3v9ggDBBm0La1LCK4uQ==} engines: {node: ^20.0.0 || ^22.0.0 || >=24.0.0} hasBin: true peerDependencies: '@edge-runtime/vm': '*' '@opentelemetry/api': ^1.9.0 '@types/node': ^20.0.0 || ^22.0.0 || >=24.0.0 - '@vitest/browser-playwright': 4.0.17 - '@vitest/browser-preview': 4.0.17 - '@vitest/browser-webdriverio': 4.0.17 - '@vitest/ui': 4.0.17 + '@vitest/browser-playwright': 4.0.18 + '@vitest/browser-preview': 4.0.18 + '@vitest/browser-webdriverio': 4.0.18 + '@vitest/ui': 4.0.18 happy-dom: '*' jsdom: '*' peerDependenciesMeta: @@ -8330,6 +8439,7 @@ packages: whatwg-encoding@3.1.1: resolution: {integrity: sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==} engines: {node: '>=18'} + deprecated: Use @exodus/bytes instead for a more spec-conformant and faster implementation whatwg-fetch@3.6.20: resolution: {integrity: sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg==} @@ -8407,6 +8517,10 @@ packages: utf-8-validate: optional: true + wsl-utils@0.3.1: + resolution: {integrity: sha512-g/eziiSUNBSsdDJtCLB8bdYEUMj4jR7AGeUo96p/3dTafgjHhpF4RiCFPiRILwjQoDXx5MqkBr4fwWtR3Ky4Wg==} + engines: {node: '>=20'} + xml-name-validator@5.0.0: resolution: {integrity: sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==} engines: {node: '>=18'} @@ -9291,6 +9405,12 @@ snapshots: '@faker-js/faker@10.1.0': {} + '@fal-ai/client@1.8.3': + dependencies: + '@msgpack/msgpack': 3.1.3 + eventsource-parser: 1.1.2 + robot3: 0.4.1 + '@gerrit0/mini-shiki@3.19.0': dependencies: '@shikijs/engine-oniguruma': 3.20.0 @@ -9308,6 +9428,39 @@ snapshots: - supports-color - utf-8-validate + '@hey-api/codegen-core@0.5.5(magicast@0.5.1)(typescript@5.9.3)': + dependencies: + '@hey-api/types': 0.1.2 + ansi-colors: 4.1.3 + c12: 3.3.3(magicast@0.5.1) + color-support: 1.1.3 + typescript: 5.9.3 + transitivePeerDependencies: + - magicast + + '@hey-api/json-schema-ref-parser@1.2.2': + dependencies: + '@jsdevtools/ono': 7.1.3 + '@types/json-schema': 7.0.15 + js-yaml: 4.1.1 + lodash: 4.17.21 + + '@hey-api/openapi-ts@0.90.10(magicast@0.5.1)(typescript@5.9.3)': + dependencies: + '@hey-api/codegen-core': 0.5.5(magicast@0.5.1)(typescript@5.9.3) + '@hey-api/json-schema-ref-parser': 1.2.2 + '@hey-api/types': 0.1.2 + ansi-colors: 4.1.3 + color-support: 1.1.3 + commander: 14.0.2 + open: 11.0.0 + semver: 7.7.3 + typescript: 5.9.3 + transitivePeerDependencies: + - magicast + + '@hey-api/types@0.1.2': {} + '@humanfs/core@0.19.1': {} '@humanfs/node@0.16.7': @@ -9379,6 +9532,8 @@ snapshots: '@jridgewell/resolve-uri': 3.1.2 '@jridgewell/sourcemap-codec': 1.5.5 + '@jsdevtools/ono@7.1.3': {} + '@manypkg/find-root@1.1.0': dependencies: '@babel/runtime': 7.28.4 @@ -9443,6 +9598,8 @@ snapshots: '@microsoft/tsdoc@0.15.1': {} + '@msgpack/msgpack@3.1.3': {} + '@napi-rs/wasm-runtime@0.2.12': dependencies: '@emnapi/core': 1.7.1 @@ -11692,7 +11849,7 @@ snapshots: transitivePeerDependencies: - supports-color - '@vitest/coverage-v8@4.0.14(vitest@4.0.17(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@vitest/coverage-v8@4.0.14(vitest@4.0.18(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@bcoe/v8-coverage': 1.0.2 '@vitest/utils': 4.0.14 @@ -11705,11 +11862,11 @@ snapshots: obug: 2.1.1 std-env: 3.10.0 tinyrainbow: 3.0.3 - vitest: 4.0.17(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + vitest: 4.0.18(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) transitivePeerDependencies: - supports-color - '@vitest/coverage-v8@4.0.14(vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@vitest/coverage-v8@4.0.14(vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: '@bcoe/v8-coverage': 1.0.2 '@vitest/utils': 4.0.14 @@ -11722,7 +11879,7 @@ snapshots: obug: 2.1.1 std-env: 3.10.0 tinyrainbow: 3.0.3 - vitest: 4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + vitest: 4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) transitivePeerDependencies: - supports-color @@ -11735,12 +11892,12 @@ snapshots: chai: 6.2.1 tinyrainbow: 3.0.3 - '@vitest/expect@4.0.17': + '@vitest/expect@4.0.18': dependencies: '@standard-schema/spec': 1.1.0 '@types/chai': 5.2.3 - '@vitest/spy': 4.0.17 - '@vitest/utils': 4.0.17 + '@vitest/spy': 4.0.18 + '@vitest/utils': 4.0.18 chai: 6.2.2 tinyrainbow: 3.0.3 @@ -11752,17 +11909,17 @@ snapshots: optionalDependencies: vite: 7.2.7(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) - '@vitest/mocker@4.0.17(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@vitest/mocker@4.0.18(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: - '@vitest/spy': 4.0.17 + '@vitest/spy': 4.0.18 estree-walker: 3.0.3 magic-string: 0.30.21 optionalDependencies: vite: 7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) - '@vitest/mocker@4.0.17(vite@7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': + '@vitest/mocker@4.0.18(vite@7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: - '@vitest/spy': 4.0.17 + '@vitest/spy': 4.0.18 estree-walker: 3.0.3 magic-string: 0.30.21 optionalDependencies: @@ -11776,7 +11933,7 @@ snapshots: dependencies: tinyrainbow: 3.0.3 - '@vitest/pretty-format@4.0.17': + '@vitest/pretty-format@4.0.18': dependencies: tinyrainbow: 3.0.3 @@ -11785,9 +11942,9 @@ snapshots: '@vitest/utils': 4.0.15 pathe: 2.0.3 - '@vitest/runner@4.0.17': + '@vitest/runner@4.0.18': dependencies: - '@vitest/utils': 4.0.17 + '@vitest/utils': 4.0.18 pathe: 2.0.3 '@vitest/snapshot@4.0.15': @@ -11796,15 +11953,15 @@ snapshots: magic-string: 0.30.21 pathe: 2.0.3 - '@vitest/snapshot@4.0.17': + '@vitest/snapshot@4.0.18': dependencies: - '@vitest/pretty-format': 4.0.17 + '@vitest/pretty-format': 4.0.18 magic-string: 0.30.21 pathe: 2.0.3 '@vitest/spy@4.0.15': {} - '@vitest/spy@4.0.17': {} + '@vitest/spy@4.0.18': {} '@vitest/utils@4.0.14': dependencies: @@ -11816,9 +11973,9 @@ snapshots: '@vitest/pretty-format': 4.0.15 tinyrainbow: 3.0.3 - '@vitest/utils@4.0.17': + '@vitest/utils@4.0.18': dependencies: - '@vitest/pretty-format': 4.0.17 + '@vitest/pretty-format': 4.0.18 tinyrainbow: 3.0.3 '@volar/language-core@2.4.15': @@ -12245,6 +12402,10 @@ snapshots: base64-js: 1.5.1 ieee754: 1.2.1 + bundle-name@4.1.0: + dependencies: + run-applescript: 7.1.0 + bytes@3.1.2: {} c12@3.3.2(magicast@0.5.1): @@ -12264,6 +12425,23 @@ snapshots: optionalDependencies: magicast: 0.5.1 + c12@3.3.3(magicast@0.5.1): + dependencies: + chokidar: 5.0.0 + confbox: 0.2.2 + defu: 6.1.4 + dotenv: 17.2.3 + exsolve: 1.0.8 + giget: 2.0.0 + jiti: 2.6.1 + ohash: 2.0.11 + pathe: 2.0.3 + perfect-debounce: 2.0.0 + pkg-types: 2.3.0 + rc9: 2.1.2 + optionalDependencies: + magicast: 0.5.1 + cac@6.7.14: {} call-bind-apply-helpers@1.0.2: @@ -12397,6 +12575,8 @@ snapshots: color-name@1.1.4: {} + color-support@1.1.3: {} + combined-stream@1.0.8: dependencies: delayed-stream: 1.0.0 @@ -12407,6 +12587,8 @@ snapshots: commander@13.1.0: {} + commander@14.0.2: {} + commander@2.20.3: {} comment-parser@1.4.1: {} @@ -12570,6 +12752,13 @@ snapshots: deepmerge@4.3.1: {} + default-browser-id@5.0.1: {} + + default-browser@5.4.0: + dependencies: + bundle-name: 4.1.0 + default-browser-id: 5.0.1 + defaults@1.0.4: dependencies: clone: 1.0.4 @@ -12582,6 +12771,8 @@ snapshots: define-lazy-prop@2.0.0: {} + define-lazy-prop@3.0.0: {} + define-properties@1.2.1: dependencies: define-data-property: 1.1.4 @@ -13087,6 +13278,8 @@ snapshots: events@3.3.0: {} + eventsource-parser@1.1.2: {} + execa@8.0.1: dependencies: cross-spawn: 7.0.6 @@ -13758,6 +13951,8 @@ snapshots: dependencies: html-tags: 3.3.1 + is-in-ssh@1.0.0: {} + is-inside-container@1.0.0: dependencies: is-docker: 3.0.0 @@ -14934,6 +15129,15 @@ snapshots: dependencies: mimic-fn: 4.0.0 + open@11.0.0: + dependencies: + default-browser: 5.4.0 + define-lazy-prop: 3.0.0 + is-in-ssh: 1.0.0 + is-inside-container: 1.0.0 + powershell-utils: 0.1.0 + wsl-utils: 0.3.1 + open@8.4.2: dependencies: define-lazy-prop: 2.0.0 @@ -15124,6 +15328,8 @@ snapshots: picocolors: 1.1.1 source-map-js: 1.2.1 + powershell-utils@0.1.0: {} + preact@10.28.1: {} preact@10.28.2: {} @@ -15396,6 +15602,8 @@ snapshots: dependencies: glob: 10.5.0 + robot3@0.4.1: {} + rolldown-plugin-dts@0.18.3(oxc-resolver@11.15.0)(rolldown@1.0.0-beta.53)(typescript@5.9.3): dependencies: '@babel/generator': 7.28.5 @@ -15519,6 +15727,8 @@ snapshots: transitivePeerDependencies: - supports-color + run-applescript@7.1.0: {} + run-parallel@1.2.0: dependencies: queue-microtask: 1.2.3 @@ -16506,7 +16716,7 @@ snapshots: fdir: 6.5.0(picomatch@4.0.3) picomatch: 4.0.3 postcss: 8.5.6 - rollup: 4.53.3 + rollup: 4.55.1 tinyglobby: 0.2.15 optionalDependencies: '@types/node': 24.10.3 @@ -16632,15 +16842,15 @@ snapshots: - tsx - yaml - vitest@4.0.17(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2): + vitest@4.0.18(@types/node@24.10.3)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2): dependencies: - '@vitest/expect': 4.0.17 - '@vitest/mocker': 4.0.17(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) - '@vitest/pretty-format': 4.0.17 - '@vitest/runner': 4.0.17 - '@vitest/snapshot': 4.0.17 - '@vitest/spy': 4.0.17 - '@vitest/utils': 4.0.17 + '@vitest/expect': 4.0.18 + '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/pretty-format': 4.0.18 + '@vitest/runner': 4.0.18 + '@vitest/snapshot': 4.0.18 + '@vitest/spy': 4.0.18 + '@vitest/utils': 4.0.18 es-module-lexer: 1.7.0 expect-type: 1.3.0 magic-string: 0.30.21 @@ -16671,15 +16881,15 @@ snapshots: - tsx - yaml - vitest@4.0.17(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2): + vitest@4.0.18(@types/node@25.0.1)(happy-dom@20.0.11)(jiti@2.6.1)(jsdom@27.3.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2): dependencies: - '@vitest/expect': 4.0.17 - '@vitest/mocker': 4.0.17(vite@7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) - '@vitest/pretty-format': 4.0.17 - '@vitest/runner': 4.0.17 - '@vitest/snapshot': 4.0.17 - '@vitest/spy': 4.0.17 - '@vitest/utils': 4.0.17 + '@vitest/expect': 4.0.18 + '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/pretty-format': 4.0.18 + '@vitest/runner': 4.0.18 + '@vitest/snapshot': 4.0.18 + '@vitest/spy': 4.0.18 + '@vitest/utils': 4.0.18 es-module-lexer: 1.7.0 expect-type: 1.3.0 magic-string: 0.30.21 @@ -16849,6 +17059,11 @@ snapshots: ws@8.18.3: {} + wsl-utils@0.3.1: + dependencies: + is-wsl: 3.1.0 + powershell-utils: 0.1.0 + xml-name-validator@5.0.0: {} xmlbuilder2@3.1.1: